From 41700f02480ad9ab14924974231cb9b6ec17cde5 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 17 Dec 2024 23:48:57 +0000 Subject: [PATCH 01/91] WIP Signed-off-by: Austin Abro --- go.mod | 3 +++ go.sum | 15 +++++++++++ pkg/kube/client.go | 67 +++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 84 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 9d27e2b1f..d7500a674 100644 --- a/go.mod +++ b/go.mod @@ -46,6 +46,7 @@ require ( k8s.io/klog/v2 v2.130.1 k8s.io/kubectl v0.31.3 oras.land/oras-go v1.2.5 + sigs.k8s.io/cli-utils v0.37.2 sigs.k8s.io/yaml v1.4.0 ) @@ -76,6 +77,7 @@ require ( github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect github.com/docker/go-metrics v0.0.1 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect github.com/fatih/color v1.13.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -183,6 +185,7 @@ require ( k8s.io/component-base v0.31.3 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect + sigs.k8s.io/controller-runtime v0.18.4 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/kustomize/api v0.17.2 // indirect sigs.k8s.io/kustomize/kyaml v0.17.1 // indirect diff --git a/go.sum b/go.sum index 654fc5178..a575e35cf 100644 --- a/go.sum +++ b/go.sum @@ -112,6 +112,8 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= @@ -136,6 +138,8 @@ github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= @@ -146,6 +150,7 @@ github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+ github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= @@ -443,6 +448,10 @@ go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93V go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -454,6 +463,8 @@ golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72 golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -630,6 +641,10 @@ k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1 k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go v1.2.5 h1:XpYuAwAb0DfQsunIyMfeET92emK8km3W4yEzZvUbsTo= oras.land/oras-go v1.2.5/go.mod h1:PuAwRShRZCsZb7g8Ar3jKKQR/2A/qN+pkYxIOd/FAoo= +sigs.k8s.io/cli-utils v0.37.2 h1:GOfKw5RV2HDQZDJlru5KkfLO1tbxqMoyn1IYUxqBpNg= +sigs.k8s.io/cli-utils v0.37.2/go.mod h1:V+IZZr4UoGj7gMJXklWBg6t5xbdThFBcpj4MrZuCYco= +sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw= +sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kustomize/api v0.17.2 h1:E7/Fjk7V5fboiuijoZHgs4aHuexi5Y2loXlVOAVAG5g= diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 4d93c91b9..f2bb06130 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -36,6 +36,13 @@ import ( apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/cli-utils/pkg/kstatus/polling/aggregator" + "sigs.k8s.io/cli-utils/pkg/kstatus/polling/collector" + "sigs.k8s.io/cli-utils/pkg/kstatus/polling/event" + "sigs.k8s.io/cli-utils/pkg/kstatus/status" + "sigs.k8s.io/cli-utils/pkg/kstatus/watcher" + "sigs.k8s.io/cli-utils/pkg/object" multierror "github.com/hashicorp/go-multierror" "k8s.io/apimachinery/pkg/api/meta" @@ -44,7 +51,6 @@ import ( metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apimachinery/pkg/watch" @@ -296,6 +302,65 @@ func (c *Client) Wait(resources ResourceList, timeout time.Duration) error { return w.waitForResources(resources) } +// WaitForReady waits for all of the objects to reach a ready state. +func WaitForReady(ctx context.Context, sw watcher.StatusWatcher, resourceList ResourceList) error { + cancelCtx, cancel := context.WithCancel(ctx) + defer cancel() + // TODO maybe a simpler way to transfer the objects + runtimeObjs := []runtime.Object{} + for _, resource := range resourceList { + runtimeObjs = append(runtimeObjs, resource.Object) + } + resources := []object.ObjMetadata{} + for _, runtimeObj := range runtimeObjs { + obj, err := object.RuntimeToObjMeta(runtimeObj) + if err != nil { + return err + } + resources = append(resources, obj) + } + + eventCh := sw.Watch(cancelCtx, resources, watcher.Options{}) + statusCollector := collector.NewResourceStatusCollector(resources) + done := statusCollector.ListenWithObserver(eventCh, collector.ObserverFunc( + func(statusCollector *collector.ResourceStatusCollector, _ event.Event) { + rss := []*event.ResourceStatus{} + for _, rs := range statusCollector.ResourceStatuses { + if rs == nil { + continue + } + rss = append(rss, rs) + } + desired := status.CurrentStatus + if aggregator.AggregateStatus(rss, desired) == desired { + cancel() + return + } + }), + ) + <-done + + if statusCollector.Error != nil { + return statusCollector.Error + } + + // Only check parent context error, otherwise we would error when desired status is achieved. + if ctx.Err() != nil { + // todo use err + var err error + for _, id := range resources { + rs := statusCollector.ResourceStatuses[id] + if rs.Status == status.CurrentStatus { + continue + } + err = fmt.Errorf("%s: %s not ready, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status) + } + return fmt.Errorf("not all resources ready: %w: %w", ctx.Err(), err) + } + + return nil +} + // WaitWithJobs wait up to the given timeout for the specified resources to be ready, including jobs. func (c *Client) WaitWithJobs(resources ResourceList, timeout time.Duration) error { cs, err := c.getKubeClient() From 6f7ac066ae8a487621c169a5e588ebd4a19df284 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 23 Dec 2024 22:29:22 +0000 Subject: [PATCH 02/91] extending factory to enable getting a watcher Signed-off-by: Austin Abro --- pkg/kube/client.go | 45 ++++++++++++++++++++++++++++++++--------- pkg/kube/client_test.go | 5 +++++ pkg/kube/factory.go | 6 ++++++ 3 files changed, 46 insertions(+), 10 deletions(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 8bcd4824f..a25a6fcc3 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -43,6 +43,7 @@ import ( "sigs.k8s.io/cli-utils/pkg/kstatus/status" "sigs.k8s.io/cli-utils/pkg/kstatus/watcher" "sigs.k8s.io/cli-utils/pkg/object" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" multierror "github.com/hashicorp/go-multierror" "k8s.io/apimachinery/pkg/api/meta" @@ -56,6 +57,7 @@ import ( "k8s.io/apimachinery/pkg/watch" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" @@ -289,17 +291,43 @@ func getResource(info *resource.Info) (runtime.Object, error) { // Wait waits up to the given timeout for the specified resources to be ready. func (c *Client) Wait(resources ResourceList, timeout time.Duration) error { - cs, err := c.getKubeClient() + // cs, err := c.getKubeClient() + // if err != nil { + // return err + // } + // checker := NewReadyChecker(cs, c.Log, PausedAsReady(true)) + // w := waiter{ + // c: checker, + // log: c.Log, + // timeout: timeout, + // } + cfg, err := c.Factory.ToRESTConfig() if err != nil { return err } - checker := NewReadyChecker(cs, c.Log, PausedAsReady(true)) - w := waiter{ - c: checker, - log: c.Log, - timeout: timeout, + dynamicClient, err := dynamic.NewForConfig(cfg) + if err != nil { + return err } - return w.waitForResources(resources) + // Not sure if I should use factory methods to get this http client or I should do this + // For example, I could likely use this as well, but it seems like I should use the factory methods instead + // httpClient, err := rest.HTTPClientFor(cfg) + // if err != nil { + // return err + // } + client, err := c.Factory.RESTClient() + if err != nil { + return err + } + restMapper, err := apiutil.NewDynamicRESTMapper(cfg, client.Client) + if err != nil { + return err + } + sw := watcher.NewDefaultStatusWatcher(dynamicClient, restMapper) + // return sw, nil + ctx, cancel := context.WithTimeout(context.TODO(), timeout) + defer cancel() + return WaitForReady(ctx, sw, resources) } // WaitForReady waits for all of the objects to reach a ready state. @@ -319,7 +347,6 @@ func WaitForReady(ctx context.Context, sw watcher.StatusWatcher, resourceList Re } resources = append(resources, obj) } - eventCh := sw.Watch(cancelCtx, resources, watcher.Options{}) statusCollector := collector.NewResourceStatusCollector(resources) done := statusCollector.ListenWithObserver(eventCh, collector.ObserverFunc( @@ -346,7 +373,6 @@ func WaitForReady(ctx context.Context, sw watcher.StatusWatcher, resourceList Re // Only check parent context error, otherwise we would error when desired status is achieved. if ctx.Err() != nil { - // todo use err var err error for _, id := range resources { rs := statusCollector.ResourceStatuses[id] @@ -357,7 +383,6 @@ func WaitForReady(ctx context.Context, sw watcher.StatusWatcher, resourceList Re } return fmt.Errorf("not all resources ready: %w: %w", ctx.Err(), err) } - return nil } diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index f2d6bcb59..7f3ba65be 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -453,12 +453,17 @@ func TestPerform(t *testing.T) { } } +// Likely it is not possible to get this test to work with kstatus given that it seems +// kstatus is not making constant get checks on the resources and is instead waiting for events +// Potentially the test could be reworked to make the pods after five seconds +// would need this -> func TestWait(t *testing.T) { podList := newPodList("starfish", "otter", "squid") var created *time.Time c := newTestClient(t) + c.Factory.(*cmdtesting.TestFactory).ClientConfigVal = cmdtesting.DefaultClientConfig() c.Factory.(*cmdtesting.TestFactory).Client = &fake.RESTClient{ NegotiatedSerializer: unstructuredSerializer, Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { diff --git a/pkg/kube/factory.go b/pkg/kube/factory.go index f19d62dc3..b0b506282 100644 --- a/pkg/kube/factory.go +++ b/pkg/kube/factory.go @@ -17,9 +17,11 @@ limitations under the License. package kube // import "helm.sh/helm/v3/pkg/kube" import ( + "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/resource" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" + restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/kubectl/pkg/validation" ) @@ -33,6 +35,7 @@ import ( // Helm does not need are not impacted or exposed. This minimizes the impact of Kubernetes changes // being exposed. type Factory interface { + genericclioptions.RESTClientGetter // ToRawKubeConfigLoader return kubeconfig loader as-is ToRawKubeConfigLoader() clientcmd.ClientConfig @@ -42,6 +45,9 @@ type Factory interface { // KubernetesClientSet gives you back an external clientset KubernetesClientSet() (*kubernetes.Clientset, error) + // Returns a RESTClient for accessing Kubernetes resources or an error. + RESTClient() (*restclient.RESTClient, error) + // NewBuilder returns an object that assists in loading objects from both disk and the server // and which implements the common patterns for CLI interactions with generic resources. NewBuilder() *resource.Builder From a61a35240e3e99af8386605de8cdbd9564051d2f Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 23 Dec 2024 22:55:09 +0000 Subject: [PATCH 03/91] understand it better Signed-off-by: Austin Abro --- pkg/kube/client.go | 1 + pkg/kube/interface.go | 1 + pkg/kube/kready.go | 18 ++++++++++++++++++ 3 files changed, 20 insertions(+) create mode 100644 pkg/kube/kready.go diff --git a/pkg/kube/client.go b/pkg/kube/client.go index a25a6fcc3..b38b4b094 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -301,6 +301,7 @@ func (c *Client) Wait(resources ResourceList, timeout time.Duration) error { // log: c.Log, // timeout: timeout, // } + // w.waitForResources() cfg, err := c.Factory.ToRESTConfig() if err != nil { return err diff --git a/pkg/kube/interface.go b/pkg/kube/interface.go index ce42ed950..af3823a3e 100644 --- a/pkg/kube/interface.go +++ b/pkg/kube/interface.go @@ -33,6 +33,7 @@ type Interface interface { Create(resources ResourceList) (*Result, error) // Wait waits up to the given timeout for the specified resources to be ready. + // TODO introduce another interface for the waiting of the KubeClient Wait(resources ResourceList, timeout time.Duration) error // WaitWithJobs wait up to the given timeout for the specified resources to be ready, including jobs. diff --git a/pkg/kube/kready.go b/pkg/kube/kready.go new file mode 100644 index 000000000..0752ba481 --- /dev/null +++ b/pkg/kube/kready.go @@ -0,0 +1,18 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube // import "helm.sh/helm/v3/pkg/kube" + From 4c1758143fd5bfed4ed42fa73fd051ae6e90f642 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Thu, 26 Dec 2024 16:09:54 +0000 Subject: [PATCH 04/91] basic design up and balling Signed-off-by: Austin Abro --- pkg/action/action.go | 3 +- pkg/kube/client.go | 99 ++++++++++++++++++++--------------------- pkg/kube/client_test.go | 36 +++++++++++++-- pkg/kube/interface.go | 32 +++++++------ pkg/kube/kready.go | 80 +++++++++++++++++++++++++++++++++ pkg/kube/wait.go | 13 ++++++ 6 files changed, 193 insertions(+), 70 deletions(-) diff --git a/pkg/action/action.go b/pkg/action/action.go index 45f1a14e2..8fa3ae289 100644 --- a/pkg/action/action.go +++ b/pkg/action/action.go @@ -371,7 +371,8 @@ func (cfg *Configuration) recordRelease(r *release.Release) { // Init initializes the action configuration func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string, log DebugLog) error { - kc := kube.New(getter) + // TODO I don't love that this ends up using nil instead of a real watcher + kc := kube.New(getter, nil) kc.Log = log lazyClient := &lazyClient{ diff --git a/pkg/kube/client.go b/pkg/kube/client.go index b38b4b094..b1b1d4835 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -92,6 +92,11 @@ type Client struct { Namespace string kubeClient *kubernetes.Clientset + // Another potential option rather than having the waiter as a field + // would be to have a field that decides what type of waiter to use + // then instantiate it during the method + // of course the fields could take a waiter as well + waiter Waiter } func init() { @@ -105,14 +110,53 @@ func init() { } } +func getStatusWatcher(factory Factory) (watcher.StatusWatcher, error) { + cfg, err := factory.ToRESTConfig() + if err != nil { + return nil, err + } + // factory.DynamicClient() may be a better choice here + dynamicClient, err := dynamic.NewForConfig(cfg) + if err != nil { + return nil, err + } + // Not sure if I should use factory methods to get this http client or I should do this + // For example, I could likely use this as well, but it seems like I should use the factory methods instead + // httpClient, err := rest.HTTPClientFor(cfg) + // if err != nil { + // return err + // } + client, err := factory.RESTClient() + if err != nil { + return nil, err + } + restMapper, err := apiutil.NewDynamicRESTMapper(cfg, client.Client) + if err != nil { + return nil, err + } + sw := watcher.NewDefaultStatusWatcher(dynamicClient, restMapper) + return sw, nil +} + // New creates a new Client. -func New(getter genericclioptions.RESTClientGetter) *Client { +func New(getter genericclioptions.RESTClientGetter, waiter Waiter) *Client { if getter == nil { getter = genericclioptions.NewConfigFlags(true) + } + factory := cmdutil.NewFactory(getter) + if waiter == nil { + sw, err := getStatusWatcher(factory) + if err != nil { + // TODO, likely will move how the stats watcher is created so it doesn't need to be created + // unless it's going to be used + panic(err) + } + waiter = &kstatusWaiter{sw, nopLogger} } return &Client{ - Factory: cmdutil.NewFactory(getter), + Factory: factory, Log: nopLogger, + waiter: waiter, } } @@ -291,44 +335,7 @@ func getResource(info *resource.Info) (runtime.Object, error) { // Wait waits up to the given timeout for the specified resources to be ready. func (c *Client) Wait(resources ResourceList, timeout time.Duration) error { - // cs, err := c.getKubeClient() - // if err != nil { - // return err - // } - // checker := NewReadyChecker(cs, c.Log, PausedAsReady(true)) - // w := waiter{ - // c: checker, - // log: c.Log, - // timeout: timeout, - // } - // w.waitForResources() - cfg, err := c.Factory.ToRESTConfig() - if err != nil { - return err - } - dynamicClient, err := dynamic.NewForConfig(cfg) - if err != nil { - return err - } - // Not sure if I should use factory methods to get this http client or I should do this - // For example, I could likely use this as well, but it seems like I should use the factory methods instead - // httpClient, err := rest.HTTPClientFor(cfg) - // if err != nil { - // return err - // } - client, err := c.Factory.RESTClient() - if err != nil { - return err - } - restMapper, err := apiutil.NewDynamicRESTMapper(cfg, client.Client) - if err != nil { - return err - } - sw := watcher.NewDefaultStatusWatcher(dynamicClient, restMapper) - // return sw, nil - ctx, cancel := context.WithTimeout(context.TODO(), timeout) - defer cancel() - return WaitForReady(ctx, sw, resources) + return c.waiter.Wait(resources, timeout) } // WaitForReady waits for all of the objects to reach a ready state. @@ -389,17 +396,7 @@ func WaitForReady(ctx context.Context, sw watcher.StatusWatcher, resourceList Re // WaitWithJobs wait up to the given timeout for the specified resources to be ready, including jobs. func (c *Client) WaitWithJobs(resources ResourceList, timeout time.Duration) error { - cs, err := c.getKubeClient() - if err != nil { - return err - } - checker := NewReadyChecker(cs, c.Log, PausedAsReady(true), CheckJobs(true)) - w := waiter{ - c: checker, - log: c.Log, - timeout: timeout, - } - return w.waitForResources(resources) + return c.waiter.WaitWithJobs(resources, timeout) } // WaitForDelete wait up to the given timeout for the specified resources to be deleted. diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index 7f3ba65be..b12897121 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -24,6 +24,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -453,10 +454,10 @@ func TestPerform(t *testing.T) { } } -// Likely it is not possible to get this test to work with kstatus given that it seems +// Likely it is not possible to get this test to work with kstatus given that it seems // kstatus is not making constant get checks on the resources and is instead waiting for events // Potentially the test could be reworked to make the pods after five seconds -// would need this -> +// would need this -> func TestWait(t *testing.T) { podList := newPodList("starfish", "otter", "squid") @@ -517,6 +518,15 @@ func TestWait(t *testing.T) { } }), } + cs, err := c.getKubeClient() + require.NoError(t, err) + checker := NewReadyChecker(cs, c.Log, PausedAsReady(true)) + w := &waiter{ + c: checker, + log: c.Log, + timeout: time.Second * 30, + } + c.waiter = w resources, err := c.Build(objBody(&podList), false) if err != nil { t.Fatal(err) @@ -569,6 +579,15 @@ func TestWaitJob(t *testing.T) { } }), } + cs, err := c.getKubeClient() + require.NoError(t, err) + checker := NewReadyChecker(cs, c.Log, PausedAsReady(true), CheckJobs(true)) + w := &waiter{ + c: checker, + log: c.Log, + timeout: time.Second * 30, + } + c.waiter = w resources, err := c.Build(objBody(job), false) if err != nil { t.Fatal(err) @@ -623,6 +642,15 @@ func TestWaitDelete(t *testing.T) { } }), } + cs, err := c.getKubeClient() + require.NoError(t, err) + checker := NewReadyChecker(cs, c.Log, PausedAsReady(true)) + w := &waiter{ + c: checker, + log: c.Log, + timeout: time.Second * 30, + } + c.waiter = w resources, err := c.Build(objBody(&pod), false) if err != nil { t.Fatal(err) @@ -649,7 +677,7 @@ func TestWaitDelete(t *testing.T) { func TestReal(t *testing.T) { t.Skip("This is a live test, comment this line to run") - c := New(nil) + c := New(nil, nil) resources, err := c.Build(strings.NewReader(guestbookManifest), false) if err != nil { t.Fatal(err) @@ -659,7 +687,7 @@ func TestReal(t *testing.T) { } testSvcEndpointManifest := testServiceManifest + "\n---\n" + testEndpointManifest - c = New(nil) + c = New(nil, nil) resources, err = c.Build(strings.NewReader(testSvcEndpointManifest), false) if err != nil { t.Fatal(err) diff --git a/pkg/kube/interface.go b/pkg/kube/interface.go index af3823a3e..40880005a 100644 --- a/pkg/kube/interface.go +++ b/pkg/kube/interface.go @@ -32,16 +32,13 @@ type Interface interface { // Create creates one or more resources. Create(resources ResourceList) (*Result, error) - // Wait waits up to the given timeout for the specified resources to be ready. - // TODO introduce another interface for the waiting of the KubeClient - Wait(resources ResourceList, timeout time.Duration) error - - // WaitWithJobs wait up to the given timeout for the specified resources to be ready, including jobs. - WaitWithJobs(resources ResourceList, timeout time.Duration) error - // Delete destroys one or more resources. Delete(resources ResourceList) (*Result, []error) + // Update updates one or more resources or creates the resource + // if it doesn't exist. + Update(original, target ResourceList, force bool) (*Result, error) + // WatchUntilReady watches the resources given and waits until it is ready. // // This method is mainly for hook implementations. It watches for a resource to @@ -51,11 +48,12 @@ type Interface interface { // For Pods, "ready" means the Pod phase is marked "succeeded". // For all other kinds, it means the kind was created or modified without // error. + // TODO: Is watch until ready really behavior we want over the resources actually being ready? WatchUntilReady(resources ResourceList, timeout time.Duration) error - // Update updates one or more resources or creates the resource - // if it doesn't exist. - Update(original, target ResourceList, force bool) (*Result, error) + // WaitAndGetCompletedPodPhase waits up to a timeout until a pod enters a completed phase + // and returns said phase (PodSucceeded or PodFailed qualify). + WaitAndGetCompletedPodPhase(name string, timeout time.Duration) (v1.PodPhase, error) // Build creates a resource list from a Reader. // @@ -65,12 +63,18 @@ type Interface interface { // Validates against OpenAPI schema if validate is true. Build(reader io.Reader, validate bool) (ResourceList, error) - // WaitAndGetCompletedPodPhase waits up to a timeout until a pod enters a completed phase - // and returns said phase (PodSucceeded or PodFailed qualify). - WaitAndGetCompletedPodPhase(name string, timeout time.Duration) (v1.PodPhase, error) - // IsReachable checks whether the client is able to connect to the cluster. IsReachable() error + Waiter +} + +// Waiter defines methods related to waiting for resource states. +type Waiter interface { + // Wait waits up to the given timeout for the specified resources to be ready. + Wait(resources ResourceList, timeout time.Duration) error + + // WaitWithJobs wait up to the given timeout for the specified resources to be ready, including jobs. + WaitWithJobs(resources ResourceList, timeout time.Duration) error } // InterfaceExt is introduced to avoid breaking backwards compatibility for Interface implementers. diff --git a/pkg/kube/kready.go b/pkg/kube/kready.go index 0752ba481..c199eecc6 100644 --- a/pkg/kube/kready.go +++ b/pkg/kube/kready.go @@ -16,3 +16,83 @@ limitations under the License. package kube // import "helm.sh/helm/v3/pkg/kube" +import ( + "context" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/cli-utils/pkg/kstatus/polling/aggregator" + "sigs.k8s.io/cli-utils/pkg/kstatus/polling/collector" + "sigs.k8s.io/cli-utils/pkg/kstatus/polling/event" + "sigs.k8s.io/cli-utils/pkg/kstatus/status" + "sigs.k8s.io/cli-utils/pkg/kstatus/watcher" + "sigs.k8s.io/cli-utils/pkg/object" +) + +type kstatusWaiter struct { + // Add any necessary dependencies, e.g., Kubernetes API client. + sw watcher.StatusWatcher + log func(string, ...interface{}) +} + +func (w *kstatusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error { + ctx := context.TODO() + cancelCtx, cancel := context.WithCancel(ctx) + defer cancel() + // TODO maybe a simpler way to transfer the objects + runtimeObjs := []runtime.Object{} + for _, resource := range resourceList { + runtimeObjs = append(runtimeObjs, resource.Object) + } + resources := []object.ObjMetadata{} + for _, runtimeObj := range runtimeObjs { + obj, err := object.RuntimeToObjMeta(runtimeObj) + if err != nil { + return err + } + resources = append(resources, obj) + } + eventCh := w.sw.Watch(cancelCtx, resources, watcher.Options{}) + statusCollector := collector.NewResourceStatusCollector(resources) + done := statusCollector.ListenWithObserver(eventCh, collector.ObserverFunc( + func(statusCollector *collector.ResourceStatusCollector, _ event.Event) { + rss := []*event.ResourceStatus{} + for _, rs := range statusCollector.ResourceStatuses { + if rs == nil { + continue + } + rss = append(rss, rs) + } + desired := status.CurrentStatus + if aggregator.AggregateStatus(rss, desired) == desired { + cancel() + return + } + }), + ) + <-done + + if statusCollector.Error != nil { + return statusCollector.Error + } + + // Only check parent context error, otherwise we would error when desired status is achieved. + if ctx.Err() != nil { + var err error + for _, id := range resources { + rs := statusCollector.ResourceStatuses[id] + if rs.Status == status.CurrentStatus { + continue + } + err = fmt.Errorf("%s: %s not ready, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status) + } + return fmt.Errorf("not all resources ready: %w: %w", ctx.Err(), err) + } + return nil +} + +func (w *kstatusWaiter) WaitWithJobs(resources ResourceList, timeout time.Duration) error { + // Implementation + panic("not implemented") +} diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go index bdafc8255..de00aae47 100644 --- a/pkg/kube/wait.go +++ b/pkg/kube/wait.go @@ -44,6 +44,19 @@ type waiter struct { log func(string, ...interface{}) } +func (w *waiter) Wait(resources ResourceList, timeout time.Duration) error { + w.timeout = timeout + return w.waitForResources(resources) +} + +func (w *waiter) WaitWithJobs(resources ResourceList, timeout time.Duration) error { + // Implementation + // TODO this function doesn't make sense unless you pass a readyChecker to it + // TODO pass context instead + w.timeout = timeout + return w.waitForResources(resources) +} + // waitForResources polls to get the current status of all pods, PVCs, Services and // Jobs(optional) until all are ready or a timeout is reached func (w *waiter) waitForResources(created ResourceList) error { From 4564b8f7121083b21721e3f098e5ab487b3b159a Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Thu, 26 Dec 2024 17:26:03 +0000 Subject: [PATCH 05/91] make a working test Signed-off-by: Austin Abro --- go.mod | 3 +- pkg/kube/client.go | 75 ++--------- pkg/kube/client_test.go | 19 ++- pkg/kube/{kready.go => kwait.go} | 4 +- pkg/kube/kwait_test.go | 213 +++++++++++++++++++++++++++++++ 5 files changed, 238 insertions(+), 76 deletions(-) rename pkg/kube/{kready.go => kwait.go} (95%) create mode 100644 pkg/kube/kwait_test.go diff --git a/go.mod b/go.mod index feefb8932..e70781ac5 100644 --- a/go.mod +++ b/go.mod @@ -47,6 +47,7 @@ require ( k8s.io/kubectl v0.31.3 oras.land/oras-go v1.2.5 sigs.k8s.io/cli-utils v0.37.2 + sigs.k8s.io/controller-runtime v0.18.4 sigs.k8s.io/yaml v1.4.0 ) @@ -128,6 +129,7 @@ require ( github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/onsi/gomega v1.33.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect @@ -185,7 +187,6 @@ require ( k8s.io/component-base v0.31.3 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect - sigs.k8s.io/controller-runtime v0.18.4 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/kustomize/api v0.17.2 // indirect sigs.k8s.io/kustomize/kyaml v0.17.1 // indirect diff --git a/pkg/kube/client.go b/pkg/kube/client.go index b1b1d4835..149017b17 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -37,12 +37,7 @@ import ( apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling/aggregator" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling/collector" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling/event" - "sigs.k8s.io/cli-utils/pkg/kstatus/status" "sigs.k8s.io/cli-utils/pkg/kstatus/watcher" - "sigs.k8s.io/cli-utils/pkg/object" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" multierror "github.com/hashicorp/go-multierror" @@ -92,10 +87,12 @@ type Client struct { Namespace string kubeClient *kubernetes.Clientset - // Another potential option rather than having the waiter as a field - // would be to have a field that decides what type of waiter to use - // then instantiate it during the method - // of course the fields could take a waiter as well + // I see a couple different options for how waiter could be handled here + // - The waiter could be instantiated in New or at the start of each wait function // + // - The waiter could be completely separate from the client interface, + // I don't like that this causes consumers to need another interface on top of kube + // - The waiter could be bundled with the resource manager into a client object. The waiter doesn't need factory / + // Another option still would be to waiter Waiter } @@ -142,7 +139,7 @@ func getStatusWatcher(factory Factory) (watcher.StatusWatcher, error) { func New(getter genericclioptions.RESTClientGetter, waiter Waiter) *Client { if getter == nil { getter = genericclioptions.NewConfigFlags(true) - } + } factory := cmdutil.NewFactory(getter) if waiter == nil { sw, err := getStatusWatcher(factory) @@ -156,7 +153,7 @@ func New(getter genericclioptions.RESTClientGetter, waiter Waiter) *Client { return &Client{ Factory: factory, Log: nopLogger, - waiter: waiter, + waiter: waiter, } } @@ -338,62 +335,6 @@ func (c *Client) Wait(resources ResourceList, timeout time.Duration) error { return c.waiter.Wait(resources, timeout) } -// WaitForReady waits for all of the objects to reach a ready state. -func WaitForReady(ctx context.Context, sw watcher.StatusWatcher, resourceList ResourceList) error { - cancelCtx, cancel := context.WithCancel(ctx) - defer cancel() - // TODO maybe a simpler way to transfer the objects - runtimeObjs := []runtime.Object{} - for _, resource := range resourceList { - runtimeObjs = append(runtimeObjs, resource.Object) - } - resources := []object.ObjMetadata{} - for _, runtimeObj := range runtimeObjs { - obj, err := object.RuntimeToObjMeta(runtimeObj) - if err != nil { - return err - } - resources = append(resources, obj) - } - eventCh := sw.Watch(cancelCtx, resources, watcher.Options{}) - statusCollector := collector.NewResourceStatusCollector(resources) - done := statusCollector.ListenWithObserver(eventCh, collector.ObserverFunc( - func(statusCollector *collector.ResourceStatusCollector, _ event.Event) { - rss := []*event.ResourceStatus{} - for _, rs := range statusCollector.ResourceStatuses { - if rs == nil { - continue - } - rss = append(rss, rs) - } - desired := status.CurrentStatus - if aggregator.AggregateStatus(rss, desired) == desired { - cancel() - return - } - }), - ) - <-done - - if statusCollector.Error != nil { - return statusCollector.Error - } - - // Only check parent context error, otherwise we would error when desired status is achieved. - if ctx.Err() != nil { - var err error - for _, id := range resources { - rs := statusCollector.ResourceStatuses[id] - if rs.Status == status.CurrentStatus { - continue - } - err = fmt.Errorf("%s: %s not ready, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status) - } - return fmt.Errorf("not all resources ready: %w: %w", ctx.Err(), err) - } - return nil -} - // WaitWithJobs wait up to the given timeout for the specified resources to be ready, including jobs. func (c *Client) WaitWithJobs(resources ResourceList, timeout time.Duration) error { return c.waiter.WaitWithJobs(resources, timeout) diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index b12897121..de61a3862 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -24,7 +24,6 @@ import ( "testing" "time" - "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -519,14 +518,16 @@ func TestWait(t *testing.T) { }), } cs, err := c.getKubeClient() - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } checker := NewReadyChecker(cs, c.Log, PausedAsReady(true)) w := &waiter{ c: checker, log: c.Log, timeout: time.Second * 30, } - c.waiter = w + c.waiter = w resources, err := c.Build(objBody(&podList), false) if err != nil { t.Fatal(err) @@ -580,14 +581,16 @@ func TestWaitJob(t *testing.T) { }), } cs, err := c.getKubeClient() - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } checker := NewReadyChecker(cs, c.Log, PausedAsReady(true), CheckJobs(true)) w := &waiter{ c: checker, log: c.Log, timeout: time.Second * 30, } - c.waiter = w + c.waiter = w resources, err := c.Build(objBody(job), false) if err != nil { t.Fatal(err) @@ -643,14 +646,16 @@ func TestWaitDelete(t *testing.T) { }), } cs, err := c.getKubeClient() - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } checker := NewReadyChecker(cs, c.Log, PausedAsReady(true)) w := &waiter{ c: checker, log: c.Log, timeout: time.Second * 30, } - c.waiter = w + c.waiter = w resources, err := c.Build(objBody(&pod), false) if err != nil { t.Fatal(err) diff --git a/pkg/kube/kready.go b/pkg/kube/kwait.go similarity index 95% rename from pkg/kube/kready.go rename to pkg/kube/kwait.go index c199eecc6..d74c913ea 100644 --- a/pkg/kube/kready.go +++ b/pkg/kube/kwait.go @@ -37,7 +37,8 @@ type kstatusWaiter struct { } func (w *kstatusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error { - ctx := context.TODO() + ctx, cancel := context.WithTimeout(context.TODO(), timeout) + defer cancel() cancelCtx, cancel := context.WithCancel(ctx) defer cancel() // TODO maybe a simpler way to transfer the objects @@ -62,6 +63,7 @@ func (w *kstatusWaiter) Wait(resourceList ResourceList, timeout time.Duration) e if rs == nil { continue } + fmt.Println("this is the status of object", rs.Status) rss = append(rss, rs) } desired := status.CurrentStatus diff --git a/pkg/kube/kwait_test.go b/pkg/kube/kwait_test.go new file mode 100644 index 000000000..1d9a69959 --- /dev/null +++ b/pkg/kube/kwait_test.go @@ -0,0 +1,213 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube // import "helm.sh/helm/v3/pkg/kube" + +import ( + "errors" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/yaml" + dynamicfake "k8s.io/client-go/dynamic/fake" + "k8s.io/kubectl/pkg/scheme" + "sigs.k8s.io/cli-utils/pkg/kstatus/watcher" + "sigs.k8s.io/cli-utils/pkg/testutil" +) + +var podCurrentYaml = ` +apiVersion: v1 +kind: Pod +metadata: + name: good-pod + namespace: ns +status: + conditions: + - type: Ready + status: "True" + phase: Running +` + +var podYaml = ` +apiVersion: v1 +kind: Pod +metadata: + name: in-progress-pod + namespace: ns +` + +func TestRunHealthChecks(t *testing.T) { + t.Parallel() + tests := []struct { + name string + podYamls []string + expectErrs []error + }{ + { + name: "Pod is ready", + podYamls: []string{podCurrentYaml}, + expectErrs: nil, + }, + { + name: "one of the pods never becomes ready", + podYamls: []string{podYaml, podCurrentYaml}, + // TODO, make this better + expectErrs: []error{errors.New("not all resources ready: context deadline exceeded: in-progress-pod: Pod not ready, status: InProgress")}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + ) + // ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) + // defer cancel() + pods := []runtime.Object{} + statusWatcher := watcher.NewDefaultStatusWatcher(fakeClient, fakeMapper) + for _, podYaml := range tt.podYamls { + m := make(map[string]interface{}) + err := yaml.Unmarshal([]byte(podYaml), &m) + require.NoError(t, err) + pod := &unstructured.Unstructured{Object: m} + pods = append(pods, pod) + fmt.Println(pod.GetName()) + podGVR := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"} + err = fakeClient.Tracker().Create(podGVR, pod, pod.GetNamespace()) + require.NoError(t, err) + } + c.waiter = &kstatusWaiter{ + sw: statusWatcher, + log: c.Log, + } + + resourceList := ResourceList{} + for _, pod := range pods { + list, err := c.Build(objBody(pod), false) + if err != nil { + t.Fatal(err) + } + resourceList = append(resourceList, list...) + } + + err := c.Wait(resourceList, time.Second*5) + if tt.expectErrs != nil { + require.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + return + } + require.NoError(t, err) + }) + } +} + +// func TestWait1(t *testing.T) { +// podList := newPodList("starfish", "otter", "squid") + +// var created *time.Time + +// c := newTestClient(t) +// c.Factory.(*cmdtesting.TestFactory).ClientConfigVal = cmdtesting.DefaultClientConfig() +// c.Factory.(*cmdtesting.TestFactory).Client = &fake.RESTClient{ +// NegotiatedSerializer: unstructuredSerializer, +// Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { +// p, m := req.URL.Path, req.Method +// t.Logf("got request %s %s", p, m) +// switch { +// case p == "/api/v1/namespaces/default/pods/starfish" && m == "GET": +// pod := &podList.Items[0] +// if created != nil && time.Since(*created) >= time.Second*5 { +// pod.Status.Conditions = []v1.PodCondition{ +// { +// Type: v1.PodReady, +// Status: v1.ConditionTrue, +// }, +// } +// } +// return newResponse(200, pod) +// case p == "/api/v1/namespaces/default/pods/otter" && m == "GET": +// pod := &podList.Items[1] +// if created != nil && time.Since(*created) >= time.Second*5 { +// pod.Status.Conditions = []v1.PodCondition{ +// { +// Type: v1.PodReady, +// Status: v1.ConditionTrue, +// }, +// } +// } +// return newResponse(200, pod) +// case p == "/api/v1/namespaces/default/pods/squid" && m == "GET": +// pod := &podList.Items[2] +// if created != nil && time.Since(*created) >= time.Second*5 { +// pod.Status.Conditions = []v1.PodCondition{ +// { +// Type: v1.PodReady, +// Status: v1.ConditionTrue, +// }, +// } +// } +// return newResponse(200, pod) +// case p == "/namespaces/default/pods" && m == "POST": +// resources, err := c.Build(req.Body, false) +// if err != nil { +// t.Fatal(err) +// } +// now := time.Now() +// created = &now +// return newResponse(200, resources[0].Object) +// default: +// t.Fatalf("unexpected request: %s %s", req.Method, req.URL.Path) +// return nil, nil +// } +// }), +// } +// cs, err := c.getKubeClient() +// require.NoError(t, err) +// checker := NewReadyChecker(cs, c.Log, PausedAsReady(true)) +// w := &waiter{ +// c: checker, +// log: c.Log, +// timeout: time.Second * 30, +// } +// c.waiter = w +// resources, err := c.Build(objBody(&podList), false) +// if err != nil { +// t.Fatal(err) +// } +// result, err := c.Create(resources) +// if err != nil { +// t.Fatal(err) +// } +// if len(result.Created) != 3 { +// t.Errorf("expected 3 resource created, got %d", len(result.Created)) +// } + +// if err := c.Wait(resources, time.Second*30); err != nil { +// t.Errorf("expected wait without error, got %s", err) +// } + +// if time.Since(*created) < time.Second*5 { +// t.Errorf("expected to wait at least 5 seconds before ready status was detected, but got %s", time.Since(*created)) +// } +// } From ad1f1c02efda335320ec652c3a32cfbbc39b6337 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 29 Dec 2024 13:25:02 +0000 Subject: [PATCH 06/91] cleanup test Signed-off-by: Austin Abro --- pkg/kube/kwait_test.go | 92 ------------------------------------------ 1 file changed, 92 deletions(-) diff --git a/pkg/kube/kwait_test.go b/pkg/kube/kwait_test.go index 1d9a69959..1702f0990 100644 --- a/pkg/kube/kwait_test.go +++ b/pkg/kube/kwait_test.go @@ -83,8 +83,6 @@ func TestRunHealthChecks(t *testing.T) { fakeMapper := testutil.NewFakeRESTMapper( v1.SchemeGroupVersion.WithKind("Pod"), ) - // ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) - // defer cancel() pods := []runtime.Object{} statusWatcher := watcher.NewDefaultStatusWatcher(fakeClient, fakeMapper) for _, podYaml := range tt.podYamls { @@ -121,93 +119,3 @@ func TestRunHealthChecks(t *testing.T) { }) } } - -// func TestWait1(t *testing.T) { -// podList := newPodList("starfish", "otter", "squid") - -// var created *time.Time - -// c := newTestClient(t) -// c.Factory.(*cmdtesting.TestFactory).ClientConfigVal = cmdtesting.DefaultClientConfig() -// c.Factory.(*cmdtesting.TestFactory).Client = &fake.RESTClient{ -// NegotiatedSerializer: unstructuredSerializer, -// Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { -// p, m := req.URL.Path, req.Method -// t.Logf("got request %s %s", p, m) -// switch { -// case p == "/api/v1/namespaces/default/pods/starfish" && m == "GET": -// pod := &podList.Items[0] -// if created != nil && time.Since(*created) >= time.Second*5 { -// pod.Status.Conditions = []v1.PodCondition{ -// { -// Type: v1.PodReady, -// Status: v1.ConditionTrue, -// }, -// } -// } -// return newResponse(200, pod) -// case p == "/api/v1/namespaces/default/pods/otter" && m == "GET": -// pod := &podList.Items[1] -// if created != nil && time.Since(*created) >= time.Second*5 { -// pod.Status.Conditions = []v1.PodCondition{ -// { -// Type: v1.PodReady, -// Status: v1.ConditionTrue, -// }, -// } -// } -// return newResponse(200, pod) -// case p == "/api/v1/namespaces/default/pods/squid" && m == "GET": -// pod := &podList.Items[2] -// if created != nil && time.Since(*created) >= time.Second*5 { -// pod.Status.Conditions = []v1.PodCondition{ -// { -// Type: v1.PodReady, -// Status: v1.ConditionTrue, -// }, -// } -// } -// return newResponse(200, pod) -// case p == "/namespaces/default/pods" && m == "POST": -// resources, err := c.Build(req.Body, false) -// if err != nil { -// t.Fatal(err) -// } -// now := time.Now() -// created = &now -// return newResponse(200, resources[0].Object) -// default: -// t.Fatalf("unexpected request: %s %s", req.Method, req.URL.Path) -// return nil, nil -// } -// }), -// } -// cs, err := c.getKubeClient() -// require.NoError(t, err) -// checker := NewReadyChecker(cs, c.Log, PausedAsReady(true)) -// w := &waiter{ -// c: checker, -// log: c.Log, -// timeout: time.Second * 30, -// } -// c.waiter = w -// resources, err := c.Build(objBody(&podList), false) -// if err != nil { -// t.Fatal(err) -// } -// result, err := c.Create(resources) -// if err != nil { -// t.Fatal(err) -// } -// if len(result.Created) != 3 { -// t.Errorf("expected 3 resource created, got %d", len(result.Created)) -// } - -// if err := c.Wait(resources, time.Second*30); err != nil { -// t.Errorf("expected wait without error, got %s", err) -// } - -// if time.Since(*created) < time.Second*5 { -// t.Errorf("expected to wait at least 5 seconds before ready status was detected, but got %s", time.Since(*created)) -// } -// } From 859ff9b54882c4344cc5564c6cd4f993a300e20c Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 29 Dec 2024 14:37:33 +0000 Subject: [PATCH 07/91] change structure of client Signed-off-by: Austin Abro --- pkg/kube/client.go | 21 +++------------------ pkg/kube/client_test.go | 9 +++------ pkg/kube/interface.go | 10 +++++----- pkg/kube/kwait_test.go | 2 +- 4 files changed, 12 insertions(+), 30 deletions(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 149017b17..9e31a64e1 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -87,13 +87,8 @@ type Client struct { Namespace string kubeClient *kubernetes.Clientset - // I see a couple different options for how waiter could be handled here - // - The waiter could be instantiated in New or at the start of each wait function // - // - The waiter could be completely separate from the client interface, - // I don't like that this causes consumers to need another interface on top of kube - // - The waiter could be bundled with the resource manager into a client object. The waiter doesn't need factory / - // Another option still would be to - waiter Waiter + ResourceManager + Waiter } func init() { @@ -153,7 +148,7 @@ func New(getter genericclioptions.RESTClientGetter, waiter Waiter) *Client { return &Client{ Factory: factory, Log: nopLogger, - waiter: waiter, + Waiter: waiter, } } @@ -330,16 +325,6 @@ func getResource(info *resource.Info) (runtime.Object, error) { return obj, nil } -// Wait waits up to the given timeout for the specified resources to be ready. -func (c *Client) Wait(resources ResourceList, timeout time.Duration) error { - return c.waiter.Wait(resources, timeout) -} - -// WaitWithJobs wait up to the given timeout for the specified resources to be ready, including jobs. -func (c *Client) WaitWithJobs(resources ResourceList, timeout time.Duration) error { - return c.waiter.WaitWithJobs(resources, timeout) -} - // WaitForDelete wait up to the given timeout for the specified resources to be deleted. func (c *Client) WaitForDelete(resources ResourceList, timeout time.Duration) error { w := waiter{ diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index de61a3862..a6e095942 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -522,12 +522,11 @@ func TestWait(t *testing.T) { t.Fatal(err) } checker := NewReadyChecker(cs, c.Log, PausedAsReady(true)) - w := &waiter{ + c.Waiter = &waiter{ c: checker, log: c.Log, timeout: time.Second * 30, } - c.waiter = w resources, err := c.Build(objBody(&podList), false) if err != nil { t.Fatal(err) @@ -585,12 +584,11 @@ func TestWaitJob(t *testing.T) { t.Fatal(err) } checker := NewReadyChecker(cs, c.Log, PausedAsReady(true), CheckJobs(true)) - w := &waiter{ + c.Waiter = &waiter{ c: checker, log: c.Log, timeout: time.Second * 30, } - c.waiter = w resources, err := c.Build(objBody(job), false) if err != nil { t.Fatal(err) @@ -650,12 +648,11 @@ func TestWaitDelete(t *testing.T) { t.Fatal(err) } checker := NewReadyChecker(cs, c.Log, PausedAsReady(true)) - w := &waiter{ + c.Waiter = &waiter{ c: checker, log: c.Log, timeout: time.Second * 30, } - c.waiter = w resources, err := c.Build(objBody(&pod), false) if err != nil { t.Fatal(err) diff --git a/pkg/kube/interface.go b/pkg/kube/interface.go index 40880005a..d2230b244 100644 --- a/pkg/kube/interface.go +++ b/pkg/kube/interface.go @@ -29,6 +29,11 @@ import ( // // A KubernetesClient must be concurrency safe. type Interface interface { + ResourceManager + Waiter +} + +type ResourceManager interface { // Create creates one or more resources. Create(resources ResourceList) (*Result, error) @@ -38,7 +43,6 @@ type Interface interface { // Update updates one or more resources or creates the resource // if it doesn't exist. Update(original, target ResourceList, force bool) (*Result, error) - // WatchUntilReady watches the resources given and waits until it is ready. // // This method is mainly for hook implementations. It watches for a resource to @@ -50,11 +54,9 @@ type Interface interface { // error. // TODO: Is watch until ready really behavior we want over the resources actually being ready? WatchUntilReady(resources ResourceList, timeout time.Duration) error - // WaitAndGetCompletedPodPhase waits up to a timeout until a pod enters a completed phase // and returns said phase (PodSucceeded or PodFailed qualify). WaitAndGetCompletedPodPhase(name string, timeout time.Duration) (v1.PodPhase, error) - // Build creates a resource list from a Reader. // // Reader must contain a YAML stream (one or more YAML documents separated @@ -62,10 +64,8 @@ type Interface interface { // // Validates against OpenAPI schema if validate is true. Build(reader io.Reader, validate bool) (ResourceList, error) - // IsReachable checks whether the client is able to connect to the cluster. IsReachable() error - Waiter } // Waiter defines methods related to waiting for resource states. diff --git a/pkg/kube/kwait_test.go b/pkg/kube/kwait_test.go index 1702f0990..9854f2d60 100644 --- a/pkg/kube/kwait_test.go +++ b/pkg/kube/kwait_test.go @@ -96,7 +96,7 @@ func TestRunHealthChecks(t *testing.T) { err = fakeClient.Tracker().Create(podGVR, pod, pod.GetNamespace()) require.NoError(t, err) } - c.waiter = &kstatusWaiter{ + c.Waiter = &kstatusWaiter{ sw: statusWatcher, log: c.Log, } From aacaa08be2b689e7c688f483ab0946dedac154ab Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 29 Dec 2024 14:49:11 +0000 Subject: [PATCH 08/91] only emebed waiter Signed-off-by: Austin Abro --- pkg/kube/client.go | 1 - pkg/kube/interface.go | 6 +----- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 9e31a64e1..469a89b35 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -87,7 +87,6 @@ type Client struct { Namespace string kubeClient *kubernetes.Clientset - ResourceManager Waiter } diff --git a/pkg/kube/interface.go b/pkg/kube/interface.go index d2230b244..edc062c49 100644 --- a/pkg/kube/interface.go +++ b/pkg/kube/interface.go @@ -29,11 +29,6 @@ import ( // // A KubernetesClient must be concurrency safe. type Interface interface { - ResourceManager - Waiter -} - -type ResourceManager interface { // Create creates one or more resources. Create(resources ResourceList) (*Result, error) @@ -66,6 +61,7 @@ type ResourceManager interface { Build(reader io.Reader, validate bool) (ResourceList, error) // IsReachable checks whether the client is able to connect to the cluster. IsReachable() error + Waiter } // Waiter defines methods related to waiting for resource states. From 947425ee64b0047896ba9a96d130420c5ca60175 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 29 Dec 2024 14:51:22 +0000 Subject: [PATCH 09/91] refactor new Signed-off-by: Austin Abro --- pkg/action/action.go | 6 ++++-- pkg/kube/client.go | 8 +++----- pkg/kube/client_test.go | 10 ++++++++-- 3 files changed, 15 insertions(+), 9 deletions(-) diff --git a/pkg/action/action.go b/pkg/action/action.go index 8fa3ae289..8759597b4 100644 --- a/pkg/action/action.go +++ b/pkg/action/action.go @@ -371,8 +371,10 @@ func (cfg *Configuration) recordRelease(r *release.Release) { // Init initializes the action configuration func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string, log DebugLog) error { - // TODO I don't love that this ends up using nil instead of a real watcher - kc := kube.New(getter, nil) + kc, err := kube.New(getter, nil) + if err != nil { + return err + } kc.Log = log lazyClient := &lazyClient{ diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 469a89b35..a50655a40 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -130,7 +130,7 @@ func getStatusWatcher(factory Factory) (watcher.StatusWatcher, error) { } // New creates a new Client. -func New(getter genericclioptions.RESTClientGetter, waiter Waiter) *Client { +func New(getter genericclioptions.RESTClientGetter, waiter Waiter) (*Client, error) { if getter == nil { getter = genericclioptions.NewConfigFlags(true) } @@ -138,9 +138,7 @@ func New(getter genericclioptions.RESTClientGetter, waiter Waiter) *Client { if waiter == nil { sw, err := getStatusWatcher(factory) if err != nil { - // TODO, likely will move how the stats watcher is created so it doesn't need to be created - // unless it's going to be used - panic(err) + return nil, err } waiter = &kstatusWaiter{sw, nopLogger} } @@ -148,7 +146,7 @@ func New(getter genericclioptions.RESTClientGetter, waiter Waiter) *Client { Factory: factory, Log: nopLogger, Waiter: waiter, - } + }, nil } var nopLogger = func(_ string, _ ...interface{}) {} diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index a6e095942..037719219 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -679,7 +679,10 @@ func TestWaitDelete(t *testing.T) { func TestReal(t *testing.T) { t.Skip("This is a live test, comment this line to run") - c := New(nil, nil) + c, err := New(nil, nil) + if err != nil { + t.Fatal(err) + } resources, err := c.Build(strings.NewReader(guestbookManifest), false) if err != nil { t.Fatal(err) @@ -689,7 +692,10 @@ func TestReal(t *testing.T) { } testSvcEndpointManifest := testServiceManifest + "\n---\n" + testEndpointManifest - c = New(nil, nil) + c, err = New(nil, nil) + if err != nil { + t.Fatal(err) + } resources, err = c.Build(strings.NewReader(testSvcEndpointManifest), false) if err != nil { t.Fatal(err) From 807cc925f532323fcb143b566d8e44498bcaac32 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 29 Dec 2024 16:33:33 +0000 Subject: [PATCH 10/91] refactor test Signed-off-by: Austin Abro --- pkg/kube/client.go | 5 ++- pkg/kube/kwait.go | 5 +-- pkg/kube/kwait_test.go | 80 +++++++++++++++++++++++++++++++++--------- 3 files changed, 70 insertions(+), 20 deletions(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index a50655a40..cbef8fece 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -140,7 +140,10 @@ func New(getter genericclioptions.RESTClientGetter, waiter Waiter) (*Client, err if err != nil { return nil, err } - waiter = &kstatusWaiter{sw, nopLogger} + waiter = &kstatusWaiter{ + sw: sw, + log: nopLogger, + pausedAsReady: true} } return &Client{ Factory: factory, diff --git a/pkg/kube/kwait.go b/pkg/kube/kwait.go index d74c913ea..6c1d5b748 100644 --- a/pkg/kube/kwait.go +++ b/pkg/kube/kwait.go @@ -32,8 +32,9 @@ import ( type kstatusWaiter struct { // Add any necessary dependencies, e.g., Kubernetes API client. - sw watcher.StatusWatcher - log func(string, ...interface{}) + sw watcher.StatusWatcher + log func(string, ...interface{}) + pausedAsReady bool } func (w *kstatusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error { diff --git a/pkg/kube/kwait_test.go b/pkg/kube/kwait_test.go index 9854f2d60..372735462 100644 --- a/pkg/kube/kwait_test.go +++ b/pkg/kube/kwait_test.go @@ -18,12 +18,12 @@ package kube // import "helm.sh/helm/v3/pkg/kube" import ( "errors" - "fmt" "testing" "time" "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -34,7 +34,7 @@ import ( "sigs.k8s.io/cli-utils/pkg/testutil" ) -var podCurrentYaml = ` +var podCurrent = ` apiVersion: v1 kind: Pod metadata: @@ -47,7 +47,7 @@ status: phase: Running ` -var podYaml = ` +var podNoStatus = ` apiVersion: v1 kind: Pod metadata: @@ -55,21 +55,62 @@ metadata: namespace: ns ` -func TestRunHealthChecks(t *testing.T) { +var jobNoStatus = ` +apiVersion: batch/v1 +kind: Job +metadata: + name: test + namespace: qual + generation: 1 +` + +var jobComplete = ` +apiVersion: batch/v1 +kind: Job +metadata: + name: test + namespace: qual + generation: 1 +status: + succeeded: 1 + active: 0 + conditions: + - type: Complete + status: "True" +` + +func getGVR(t *testing.T, mapper meta.RESTMapper, obj *unstructured.Unstructured) schema.GroupVersionResource { + gvk := obj.GroupVersionKind() + mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + require.NoError(t, err) + return mapping.Resource +} + +func TestKWaitJob(t *testing.T) { t.Parallel() tests := []struct { name string - podYamls []string + objYamls []string expectErrs []error }{ + { + name: "Job is complete", + objYamls: []string{jobComplete}, + expectErrs: nil, + }, + { + name: "Job is not complete", + objYamls: []string{jobNoStatus}, + expectErrs: []error{errors.New("not all resources ready: context deadline exceeded: test: Job not ready, status: InProgress")}, + }, { name: "Pod is ready", - podYamls: []string{podCurrentYaml}, + objYamls: []string{podCurrent}, expectErrs: nil, }, { name: "one of the pods never becomes ready", - podYamls: []string{podYaml, podCurrentYaml}, + objYamls: []string{podNoStatus, podCurrent}, // TODO, make this better expectErrs: []error{errors.New("not all resources ready: context deadline exceeded: in-progress-pod: Pod not ready, status: InProgress")}, }, @@ -82,18 +123,22 @@ func TestRunHealthChecks(t *testing.T) { fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) fakeMapper := testutil.NewFakeRESTMapper( v1.SchemeGroupVersion.WithKind("Pod"), + schema.GroupVersionKind{ + Group: "batch", + Version: "v1", + Kind: "Job", + }, ) - pods := []runtime.Object{} + objs := []runtime.Object{} statusWatcher := watcher.NewDefaultStatusWatcher(fakeClient, fakeMapper) - for _, podYaml := range tt.podYamls { + for _, podYaml := range tt.objYamls { m := make(map[string]interface{}) err := yaml.Unmarshal([]byte(podYaml), &m) require.NoError(t, err) - pod := &unstructured.Unstructured{Object: m} - pods = append(pods, pod) - fmt.Println(pod.GetName()) - podGVR := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"} - err = fakeClient.Tracker().Create(podGVR, pod, pod.GetNamespace()) + resource := &unstructured.Unstructured{Object: m} + objs = append(objs, resource) + gvr := getGVR(t, fakeMapper, resource) + err = fakeClient.Tracker().Create(gvr, resource, resource.GetNamespace()) require.NoError(t, err) } c.Waiter = &kstatusWaiter{ @@ -102,16 +147,17 @@ func TestRunHealthChecks(t *testing.T) { } resourceList := ResourceList{} - for _, pod := range pods { - list, err := c.Build(objBody(pod), false) + for _, obj := range objs { + list, err := c.Build(objBody(obj), false) if err != nil { t.Fatal(err) } resourceList = append(resourceList, list...) } - err := c.Wait(resourceList, time.Second*5) + err := c.Wait(resourceList, time.Second*3) if tt.expectErrs != nil { + //TODO remove require require.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) return } From a6e5466942df67dccea00fbaa7b2ed4e5a8e619d Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 29 Dec 2024 16:54:33 +0000 Subject: [PATCH 11/91] refactor test Signed-off-by: Austin Abro --- pkg/kube/kwait.go | 8 +++++--- pkg/kube/kwait_test.go | 23 ++++++++++------------- 2 files changed, 15 insertions(+), 16 deletions(-) diff --git a/pkg/kube/kwait.go b/pkg/kube/kwait.go index 6c1d5b748..639794322 100644 --- a/pkg/kube/kwait.go +++ b/pkg/kube/kwait.go @@ -18,6 +18,7 @@ package kube // import "helm.sh/helm/v3/pkg/kube" import ( "context" + "errors" "fmt" "time" @@ -82,15 +83,16 @@ func (w *kstatusWaiter) Wait(resourceList ResourceList, timeout time.Duration) e // Only check parent context error, otherwise we would error when desired status is achieved. if ctx.Err() != nil { - var err error + errs := []error{} for _, id := range resources { rs := statusCollector.ResourceStatuses[id] if rs.Status == status.CurrentStatus { continue } - err = fmt.Errorf("%s: %s not ready, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status) + errs = append(errs, fmt.Errorf("%s: %s not ready, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) } - return fmt.Errorf("not all resources ready: %w: %w", ctx.Err(), err) + errs = append(errs, ctx.Err()) + return errors.Join(errs...) } return nil } diff --git a/pkg/kube/kwait_test.go b/pkg/kube/kwait_test.go index 372735462..fd5cd0b57 100644 --- a/pkg/kube/kwait_test.go +++ b/pkg/kube/kwait_test.go @@ -21,6 +21,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" @@ -101,7 +102,7 @@ func TestKWaitJob(t *testing.T) { { name: "Job is not complete", objYamls: []string{jobNoStatus}, - expectErrs: []error{errors.New("not all resources ready: context deadline exceeded: test: Job not ready, status: InProgress")}, + expectErrs: []error{errors.New("test: Job not ready, status: InProgress"), errors.New("context deadline exceeded")}, }, { name: "Pod is ready", @@ -109,10 +110,9 @@ func TestKWaitJob(t *testing.T) { expectErrs: nil, }, { - name: "one of the pods never becomes ready", - objYamls: []string{podNoStatus, podCurrent}, - // TODO, make this better - expectErrs: []error{errors.New("not all resources ready: context deadline exceeded: in-progress-pod: Pod not ready, status: InProgress")}, + name: "one of the pods never becomes ready", + objYamls: []string{podNoStatus, podCurrent}, + expectErrs: []error{errors.New("in-progress-pod: Pod not ready, status: InProgress"), errors.New("context deadline exceeded")}, }, } @@ -134,12 +134,12 @@ func TestKWaitJob(t *testing.T) { for _, podYaml := range tt.objYamls { m := make(map[string]interface{}) err := yaml.Unmarshal([]byte(podYaml), &m) - require.NoError(t, err) + assert.NoError(t, err) resource := &unstructured.Unstructured{Object: m} objs = append(objs, resource) gvr := getGVR(t, fakeMapper, resource) err = fakeClient.Tracker().Create(gvr, resource, resource.GetNamespace()) - require.NoError(t, err) + assert.NoError(t, err) } c.Waiter = &kstatusWaiter{ sw: statusWatcher, @@ -149,19 +149,16 @@ func TestKWaitJob(t *testing.T) { resourceList := ResourceList{} for _, obj := range objs { list, err := c.Build(objBody(obj), false) - if err != nil { - t.Fatal(err) - } + assert.NoError(t, err) resourceList = append(resourceList, list...) } err := c.Wait(resourceList, time.Second*3) if tt.expectErrs != nil { - //TODO remove require - require.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) return } - require.NoError(t, err) + assert.NoError(t, err) }) } } From 7b896df4d1089a7c6abded0caaf16fb84a2f90a7 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 29 Dec 2024 17:34:26 +0000 Subject: [PATCH 12/91] option to wait for jobs Signed-off-by: Austin Abro --- pkg/kube/kwait.go | 24 +++++++++++++++++------- pkg/kube/kwait_test.go | 24 ++++++++++++++++-------- 2 files changed, 33 insertions(+), 15 deletions(-) diff --git a/pkg/kube/kwait.go b/pkg/kube/kwait.go index 639794322..c1822d87c 100644 --- a/pkg/kube/kwait.go +++ b/pkg/kube/kwait.go @@ -22,6 +22,7 @@ import ( "fmt" "time" + batchv1 "k8s.io/api/batch/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/cli-utils/pkg/kstatus/polling/aggregator" "sigs.k8s.io/cli-utils/pkg/kstatus/polling/collector" @@ -39,6 +40,15 @@ type kstatusWaiter struct { } func (w *kstatusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error { + return w.wait(resourceList, timeout, false) +} + +func (w *kstatusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Duration) error { + // Implementation + return w.wait(resourceList, timeout, true) +} + +func (w *kstatusWaiter) wait(resourceList ResourceList, timeout time.Duration, waitWithJobs bool) error { ctx, cancel := context.WithTimeout(context.TODO(), timeout) defer cancel() cancelCtx, cancel := context.WithCancel(ctx) @@ -46,6 +56,12 @@ func (w *kstatusWaiter) Wait(resourceList ResourceList, timeout time.Duration) e // TODO maybe a simpler way to transfer the objects runtimeObjs := []runtime.Object{} for _, resource := range resourceList { + switch AsVersioned(resource).(type) { + case *batchv1.Job: + if !waitWithJobs { + continue + } + } runtimeObjs = append(runtimeObjs, resource.Object) } resources := []object.ObjMetadata{} @@ -65,7 +81,6 @@ func (w *kstatusWaiter) Wait(resourceList ResourceList, timeout time.Duration) e if rs == nil { continue } - fmt.Println("this is the status of object", rs.Status) rss = append(rss, rs) } desired := status.CurrentStatus @@ -89,15 +104,10 @@ func (w *kstatusWaiter) Wait(resourceList ResourceList, timeout time.Duration) e if rs.Status == status.CurrentStatus { continue } - errs = append(errs, fmt.Errorf("%s: %s not ready, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) + errs = append(errs, fmt.Errorf("%s: %s not ready, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) } errs = append(errs, ctx.Err()) return errors.Join(errs...) } return nil } - -func (w *kstatusWaiter) WaitWithJobs(resources ResourceList, timeout time.Duration) error { - // Implementation - panic("not implemented") -} diff --git a/pkg/kube/kwait_test.go b/pkg/kube/kwait_test.go index fd5cd0b57..e595f9ed3 100644 --- a/pkg/kube/kwait_test.go +++ b/pkg/kube/kwait_test.go @@ -90,9 +90,10 @@ func getGVR(t *testing.T, mapper meta.RESTMapper, obj *unstructured.Unstructured func TestKWaitJob(t *testing.T) { t.Parallel() tests := []struct { - name string - objYamls []string - expectErrs []error + name string + objYamls []string + expectErrs []error + waitForJobs bool }{ { name: "Job is complete", @@ -100,9 +101,16 @@ func TestKWaitJob(t *testing.T) { expectErrs: nil, }, { - name: "Job is not complete", - objYamls: []string{jobNoStatus}, - expectErrs: []error{errors.New("test: Job not ready, status: InProgress"), errors.New("context deadline exceeded")}, + name: "Job is not complete", + objYamls: []string{jobNoStatus}, + expectErrs: []error{errors.New("test: Job not ready, status: InProgress"), errors.New("context deadline exceeded")}, + waitForJobs: true, + }, + { + name: "Job is not ready, but we pass wait anyway", + objYamls: []string{jobNoStatus}, + expectErrs: nil, + waitForJobs: false, }, { name: "Pod is ready", @@ -141,7 +149,7 @@ func TestKWaitJob(t *testing.T) { err = fakeClient.Tracker().Create(gvr, resource, resource.GetNamespace()) assert.NoError(t, err) } - c.Waiter = &kstatusWaiter{ + kwaiter := kstatusWaiter{ sw: statusWatcher, log: c.Log, } @@ -153,7 +161,7 @@ func TestKWaitJob(t *testing.T) { resourceList = append(resourceList, list...) } - err := c.Wait(resourceList, time.Second*3) + err := kwaiter.wait(resourceList, time.Second*3, tt.waitForJobs) if tt.expectErrs != nil { assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) return From 22af71f125ca467a109eff50e78c5b7aea0e8642 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 29 Dec 2024 17:35:32 +0000 Subject: [PATCH 13/91] comments Signed-off-by: Austin Abro --- pkg/kube/kwait.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/pkg/kube/kwait.go b/pkg/kube/kwait.go index c1822d87c..674552432 100644 --- a/pkg/kube/kwait.go +++ b/pkg/kube/kwait.go @@ -33,7 +33,6 @@ import ( ) type kstatusWaiter struct { - // Add any necessary dependencies, e.g., Kubernetes API client. sw watcher.StatusWatcher log func(string, ...interface{}) pausedAsReady bool @@ -44,7 +43,6 @@ func (w *kstatusWaiter) Wait(resourceList ResourceList, timeout time.Duration) e } func (w *kstatusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Duration) error { - // Implementation return w.wait(resourceList, timeout, true) } From e18f22071d036832f8a8573a99bd2955745faef8 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 29 Dec 2024 17:43:18 +0000 Subject: [PATCH 14/91] paused as ready now working Signed-off-by: Austin Abro --- pkg/kube/kwait.go | 11 +++++++--- pkg/kube/kwait_test.go | 50 ++++++++++++++++++++++++++++++++++-------- 2 files changed, 49 insertions(+), 12 deletions(-) diff --git a/pkg/kube/kwait.go b/pkg/kube/kwait.go index 674552432..936445037 100644 --- a/pkg/kube/kwait.go +++ b/pkg/kube/kwait.go @@ -22,6 +22,7 @@ import ( "fmt" "time" + appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/cli-utils/pkg/kstatus/polling/aggregator" @@ -46,7 +47,7 @@ func (w *kstatusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Dur return w.wait(resourceList, timeout, true) } -func (w *kstatusWaiter) wait(resourceList ResourceList, timeout time.Duration, waitWithJobs bool) error { +func (w *kstatusWaiter) wait(resourceList ResourceList, timeout time.Duration, waitForJobs bool) error { ctx, cancel := context.WithTimeout(context.TODO(), timeout) defer cancel() cancelCtx, cancel := context.WithCancel(ctx) @@ -54,9 +55,13 @@ func (w *kstatusWaiter) wait(resourceList ResourceList, timeout time.Duration, w // TODO maybe a simpler way to transfer the objects runtimeObjs := []runtime.Object{} for _, resource := range resourceList { - switch AsVersioned(resource).(type) { + switch value := AsVersioned(resource).(type) { case *batchv1.Job: - if !waitWithJobs { + if !waitForJobs { + continue + } + case *appsv1.Deployment: + if w.pausedAsReady && value.Spec.Paused { continue } } diff --git a/pkg/kube/kwait_test.go b/pkg/kube/kwait_test.go index e595f9ed3..8504025da 100644 --- a/pkg/kube/kwait_test.go +++ b/pkg/kube/kwait_test.go @@ -23,6 +23,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + batchv1 "k8s.io/api/batch/v1" + appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -80,6 +82,31 @@ status: status: "True" ` +var pausedDeploymentYaml = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx + namespace: ns-1 + generation: 1 +spec: + paused: true + replicas: 1 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.19.6 + ports: + - containerPort: 80 +` + func getGVR(t *testing.T, mapper meta.RESTMapper, obj *unstructured.Unstructured) schema.GroupVersionResource { gvk := obj.GroupVersionKind() mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version) @@ -90,10 +117,11 @@ func getGVR(t *testing.T, mapper meta.RESTMapper, obj *unstructured.Unstructured func TestKWaitJob(t *testing.T) { t.Parallel() tests := []struct { - name string - objYamls []string - expectErrs []error - waitForJobs bool + name string + objYamls []string + expectErrs []error + waitForJobs bool + pausedAsReady bool }{ { name: "Job is complete", @@ -122,6 +150,12 @@ func TestKWaitJob(t *testing.T) { objYamls: []string{podNoStatus, podCurrent}, expectErrs: []error{errors.New("in-progress-pod: Pod not ready, status: InProgress"), errors.New("context deadline exceeded")}, }, + { + name: "paused deployment passes", + objYamls: []string{pausedDeploymentYaml}, + expectErrs: nil, + pausedAsReady: true, + }, } for _, tt := range tests { @@ -131,11 +165,8 @@ func TestKWaitJob(t *testing.T) { fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) fakeMapper := testutil.NewFakeRESTMapper( v1.SchemeGroupVersion.WithKind("Pod"), - schema.GroupVersionKind{ - Group: "batch", - Version: "v1", - Kind: "Job", - }, + appsv1.SchemeGroupVersion.WithKind("Deployment"), + batchv1.SchemeGroupVersion.WithKind("Job"), ) objs := []runtime.Object{} statusWatcher := watcher.NewDefaultStatusWatcher(fakeClient, fakeMapper) @@ -152,6 +183,7 @@ func TestKWaitJob(t *testing.T) { kwaiter := kstatusWaiter{ sw: statusWatcher, log: c.Log, + pausedAsReady: tt.pausedAsReady, } resourceList := ResourceList{} From b337790c102b812807a783bce5d2fbf74fc4f5cd Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 29 Dec 2024 17:59:26 +0000 Subject: [PATCH 15/91] paused as ready Signed-off-by: Austin Abro --- pkg/kube/kwait.go | 1 - pkg/kube/kwait_test.go | 11 ++++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/pkg/kube/kwait.go b/pkg/kube/kwait.go index 936445037..e72e4b93d 100644 --- a/pkg/kube/kwait.go +++ b/pkg/kube/kwait.go @@ -52,7 +52,6 @@ func (w *kstatusWaiter) wait(resourceList ResourceList, timeout time.Duration, w defer cancel() cancelCtx, cancel := context.WithCancel(ctx) defer cancel() - // TODO maybe a simpler way to transfer the objects runtimeObjs := []runtime.Object{} for _, resource := range resourceList { switch value := AsVersioned(resource).(type) { diff --git a/pkg/kube/kwait_test.go b/pkg/kube/kwait_test.go index 8504025da..1bc80d8ee 100644 --- a/pkg/kube/kwait_test.go +++ b/pkg/kube/kwait_test.go @@ -18,13 +18,14 @@ package kube // import "helm.sh/helm/v3/pkg/kube" import ( "errors" + "log" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - batchv1 "k8s.io/api/batch/v1" appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -166,7 +167,7 @@ func TestKWaitJob(t *testing.T) { fakeMapper := testutil.NewFakeRESTMapper( v1.SchemeGroupVersion.WithKind("Pod"), appsv1.SchemeGroupVersion.WithKind("Deployment"), - batchv1.SchemeGroupVersion.WithKind("Job"), + batchv1.SchemeGroupVersion.WithKind("Job"), ) objs := []runtime.Object{} statusWatcher := watcher.NewDefaultStatusWatcher(fakeClient, fakeMapper) @@ -181,9 +182,9 @@ func TestKWaitJob(t *testing.T) { assert.NoError(t, err) } kwaiter := kstatusWaiter{ - sw: statusWatcher, - log: c.Log, - pausedAsReady: tt.pausedAsReady, + sw: statusWatcher, + log: log.Printf, + pausedAsReady: tt.pausedAsReady, } resourceList := ResourceList{} From 28a9183ee3fd271ac2b76f4df89170e3c9452fbb Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 29 Dec 2024 18:39:09 +0000 Subject: [PATCH 16/91] context Signed-off-by: Austin Abro --- pkg/kube/kwait.go | 12 +++++++----- pkg/kube/kwait_test.go | 6 +++++- pkg/kube/wait.go | 3 ++- 3 files changed, 14 insertions(+), 7 deletions(-) diff --git a/pkg/kube/kwait.go b/pkg/kube/kwait.go index e72e4b93d..3d8cfb616 100644 --- a/pkg/kube/kwait.go +++ b/pkg/kube/kwait.go @@ -40,16 +40,18 @@ type kstatusWaiter struct { } func (w *kstatusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error { - return w.wait(resourceList, timeout, false) + ctx, cancel := context.WithTimeout(context.TODO(), timeout) + defer cancel() + return w.wait(ctx, resourceList, false) } func (w *kstatusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Duration) error { - return w.wait(resourceList, timeout, true) -} - -func (w *kstatusWaiter) wait(resourceList ResourceList, timeout time.Duration, waitForJobs bool) error { ctx, cancel := context.WithTimeout(context.TODO(), timeout) defer cancel() + return w.wait(ctx, resourceList, true) +} + +func (w *kstatusWaiter) wait(ctx context.Context, resourceList ResourceList, waitForJobs bool) error { cancelCtx, cancel := context.WithCancel(ctx) defer cancel() runtimeObjs := []runtime.Object{} diff --git a/pkg/kube/kwait_test.go b/pkg/kube/kwait_test.go index 1bc80d8ee..9598ca216 100644 --- a/pkg/kube/kwait_test.go +++ b/pkg/kube/kwait_test.go @@ -17,6 +17,7 @@ limitations under the License. package kube // import "helm.sh/helm/v3/pkg/kube" import ( + "context" "errors" "log" "testing" @@ -194,7 +195,10 @@ func TestKWaitJob(t *testing.T) { resourceList = append(resourceList, list...) } - err := kwaiter.wait(resourceList, time.Second*3, tt.waitForJobs) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) + defer cancel() + + err := kwaiter.wait(ctx, resourceList, tt.waitForJobs) if tt.expectErrs != nil { assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) return diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go index de00aae47..34eb55e7c 100644 --- a/pkg/kube/wait.go +++ b/pkg/kube/wait.go @@ -51,8 +51,9 @@ func (w *waiter) Wait(resources ResourceList, timeout time.Duration) error { func (w *waiter) WaitWithJobs(resources ResourceList, timeout time.Duration) error { // Implementation - // TODO this function doesn't make sense unless you pass a readyChecker to it + // TODO this function doesn't make sense unless you pass a readyChecker to it // TODO pass context instead + // checker := NewReadyChecker(cs, w.c.Log, PausedAsReady(true), CheckJobs(true)) w.timeout = timeout return w.waitForResources(resources) } From 265442c5eb2bedc3292e255e38f4b25e1b0463ce Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 30 Dec 2024 14:22:14 +0000 Subject: [PATCH 17/91] simplify things Signed-off-by: Austin Abro --- pkg/kube/client.go | 27 +++++---------------------- pkg/kube/kwait.go | 7 +++---- pkg/kube/kwait_test.go | 9 ++++----- 3 files changed, 12 insertions(+), 31 deletions(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index cbef8fece..a5441f399 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -38,7 +38,6 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/cli-utils/pkg/kstatus/watcher" - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" multierror "github.com/hashicorp/go-multierror" "k8s.io/apimachinery/pkg/api/meta" @@ -52,7 +51,6 @@ import ( "k8s.io/apimachinery/pkg/watch" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/resource" - "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" @@ -102,26 +100,11 @@ func init() { } func getStatusWatcher(factory Factory) (watcher.StatusWatcher, error) { - cfg, err := factory.ToRESTConfig() + dynamicClient, err := factory.DynamicClient() if err != nil { return nil, err } - // factory.DynamicClient() may be a better choice here - dynamicClient, err := dynamic.NewForConfig(cfg) - if err != nil { - return nil, err - } - // Not sure if I should use factory methods to get this http client or I should do this - // For example, I could likely use this as well, but it seems like I should use the factory methods instead - // httpClient, err := rest.HTTPClientFor(cfg) - // if err != nil { - // return err - // } - client, err := factory.RESTClient() - if err != nil { - return nil, err - } - restMapper, err := apiutil.NewDynamicRESTMapper(cfg, client.Client) + restMapper, err := factory.ToRESTMapper() if err != nil { return nil, err } @@ -141,9 +124,9 @@ func New(getter genericclioptions.RESTClientGetter, waiter Waiter) (*Client, err return nil, err } waiter = &kstatusWaiter{ - sw: sw, - log: nopLogger, - pausedAsReady: true} + sw: sw, + log: nopLogger, + } } return &Client{ Factory: factory, diff --git a/pkg/kube/kwait.go b/pkg/kube/kwait.go index 3d8cfb616..d0dcc9b60 100644 --- a/pkg/kube/kwait.go +++ b/pkg/kube/kwait.go @@ -34,9 +34,8 @@ import ( ) type kstatusWaiter struct { - sw watcher.StatusWatcher - log func(string, ...interface{}) - pausedAsReady bool + sw watcher.StatusWatcher + log func(string, ...interface{}) } func (w *kstatusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error { @@ -62,7 +61,7 @@ func (w *kstatusWaiter) wait(ctx context.Context, resourceList ResourceList, wai continue } case *appsv1.Deployment: - if w.pausedAsReady && value.Spec.Paused { + if value.Spec.Paused { continue } } diff --git a/pkg/kube/kwait_test.go b/pkg/kube/kwait_test.go index 9598ca216..527d10a05 100644 --- a/pkg/kube/kwait_test.go +++ b/pkg/kube/kwait_test.go @@ -183,9 +183,8 @@ func TestKWaitJob(t *testing.T) { assert.NoError(t, err) } kwaiter := kstatusWaiter{ - sw: statusWatcher, - log: log.Printf, - pausedAsReady: tt.pausedAsReady, + sw: statusWatcher, + log: log.Printf, } resourceList := ResourceList{} @@ -195,8 +194,8 @@ func TestKWaitJob(t *testing.T) { resourceList = append(resourceList, list...) } - ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) - defer cancel() + ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) + defer cancel() err := kwaiter.wait(ctx, resourceList, tt.waitForJobs) if tt.expectErrs != nil { From 9b63459becb190ad9f4ebe43235f40ed720e310d Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 30 Dec 2024 14:49:32 +0000 Subject: [PATCH 18/91] save state while I change up tests Signed-off-by: Austin Abro --- pkg/kube/kwait.go | 6 +++++ pkg/kube/kwait_test.go | 54 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+) diff --git a/pkg/kube/kwait.go b/pkg/kube/kwait.go index d0dcc9b60..f173a074e 100644 --- a/pkg/kube/kwait.go +++ b/pkg/kube/kwait.go @@ -50,6 +50,12 @@ func (w *kstatusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Dur return w.wait(ctx, resourceList, true) } +func (w *kstatusWaiter) waitForDelete(ctx context.Context, resourceList ResourceList) error { + _, cancel := context.WithCancel(ctx) + defer cancel() + return nil +} + func (w *kstatusWaiter) wait(ctx context.Context, resourceList ResourceList, waitForJobs bool) error { cancelCtx, cancel := context.WithCancel(ctx) defer cancel() diff --git a/pkg/kube/kwait_test.go b/pkg/kube/kwait_test.go index 527d10a05..2301c373d 100644 --- a/pkg/kube/kwait_test.go +++ b/pkg/kube/kwait_test.go @@ -29,6 +29,7 @@ import ( batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -116,6 +117,59 @@ func getGVR(t *testing.T, mapper meta.RESTMapper, obj *unstructured.Unstructured return mapping.Resource } +func TestKWaitForDelete(t *testing.T) { + t.Parallel() + tests := []struct { + name string + objs []runtime.Object + expectErrs []error + waitForJobs bool + pausedAsReady bool + }{ + { + name: "Pod is deleted", + objs: []runtime.Object{ + &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod", Namespace: "ns"}}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + appsv1.SchemeGroupVersion.WithKind("Deployment"), + batchv1.SchemeGroupVersion.WithKind("Job"), + ) + statusWatcher := watcher.NewDefaultStatusWatcher(fakeClient, fakeMapper) + kwaiter := kstatusWaiter{ + sw: statusWatcher, + log: log.Printf, + } + ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) + defer cancel() + resourceList := ResourceList{} + for _, obj := range tt.objs { + list, err := c.Build(objBody(obj), false) + assert.NoError(t, err) + // gvr := getGVR(t, fakeMapper, obj.) + // err = fakeClient.Tracker().Create(gvr, obj, ) + // assert.NoError(t, err) + // resourceList = append(resourceList, list...) + } + err := kwaiter.waitForDelete(ctx, resourceList) + if tt.expectErrs != nil { + assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + return + } + assert.NoError(t, err) + }) + } + +} + func TestKWaitJob(t *testing.T) { t.Parallel() tests := []struct { From 4dbdd7ce10cfb5f4d1de4dda2a65b27a83f93c0c Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 30 Dec 2024 14:55:13 +0000 Subject: [PATCH 19/91] wait for delete working Signed-off-by: Austin Abro --- pkg/kube/kwait.go | 53 ++++++++++++++++++++++++++++++++++++++- pkg/kube/kwait_test.go | 56 ++++++++++++++++++++++++------------------ 2 files changed, 84 insertions(+), 25 deletions(-) diff --git a/pkg/kube/kwait.go b/pkg/kube/kwait.go index f173a074e..ae7fcbe43 100644 --- a/pkg/kube/kwait.go +++ b/pkg/kube/kwait.go @@ -51,7 +51,58 @@ func (w *kstatusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Dur } func (w *kstatusWaiter) waitForDelete(ctx context.Context, resourceList ResourceList) error { - _, cancel := context.WithCancel(ctx) + cancelCtx, cancel := context.WithCancel(ctx) + defer cancel() + runtimeObjs := []runtime.Object{} + for _, resource := range resourceList { + runtimeObjs = append(runtimeObjs, resource.Object) + } + resources := []object.ObjMetadata{} + for _, runtimeObj := range runtimeObjs { + obj, err := object.RuntimeToObjMeta(runtimeObj) + if err != nil { + return err + } + resources = append(resources, obj) + } + eventCh := w.sw.Watch(cancelCtx, resources, watcher.Options{}) + statusCollector := collector.NewResourceStatusCollector(resources) + done := statusCollector.ListenWithObserver(eventCh, collector.ObserverFunc( + func(statusCollector *collector.ResourceStatusCollector, _ event.Event) { + rss := []*event.ResourceStatus{} + for _, rs := range statusCollector.ResourceStatuses { + if rs == nil { + continue + } + rss = append(rss, rs) + } + desired := status.NotFoundStatus + if aggregator.AggregateStatus(rss, desired) == desired { + cancel() + return + } + }), + ) + <-done + + if statusCollector.Error != nil { + return statusCollector.Error + } + + // Only check parent context error, otherwise we would error when desired status is achieved. + if ctx.Err() != nil { + errs := []error{} + for _, id := range resources { + rs := statusCollector.ResourceStatuses[id] + if rs.Status == status.CurrentStatus { + continue + } + errs = append(errs, fmt.Errorf("%s: %s not ready, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) + } + errs = append(errs, ctx.Err()) + return errors.Join(errs...) + } + return nil defer cancel() return nil } diff --git a/pkg/kube/kwait_test.go b/pkg/kube/kwait_test.go index 2301c373d..f910a4a9b 100644 --- a/pkg/kube/kwait_test.go +++ b/pkg/kube/kwait_test.go @@ -29,7 +29,6 @@ import ( batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -120,17 +119,15 @@ func getGVR(t *testing.T, mapper meta.RESTMapper, obj *unstructured.Unstructured func TestKWaitForDelete(t *testing.T) { t.Parallel() tests := []struct { - name string - objs []runtime.Object - expectErrs []error - waitForJobs bool - pausedAsReady bool + name string + objYamls []string + expectErrs []error + waitForJobs bool }{ { - name: "Pod is deleted", - objs: []runtime.Object{ - &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod", Namespace: "ns"}}, - }, + name: "wait for pod to be deleted", + objYamls: []string{podCurrent}, + expectErrs: nil, }, } for _, tt := range tests { @@ -150,14 +147,27 @@ func TestKWaitForDelete(t *testing.T) { } ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) defer cancel() + objs := []runtime.Object{} + for _, podYaml := range tt.objYamls { + m := make(map[string]interface{}) + err := yaml.Unmarshal([]byte(podYaml), &m) + assert.NoError(t, err) + resource := &unstructured.Unstructured{Object: m} + objs = append(objs, resource) + gvr := getGVR(t, fakeMapper, resource) + err = fakeClient.Tracker().Create(gvr, resource, resource.GetNamespace()) + assert.NoError(t, err) + go func(){ + time.Sleep(2 * time.Second) + err = fakeClient.Tracker().Delete(gvr, resource.GetNamespace(), resource.GetName()) + assert.NoError(t, err) + }() + } resourceList := ResourceList{} - for _, obj := range tt.objs { + for _, obj := range objs { list, err := c.Build(objBody(obj), false) assert.NoError(t, err) - // gvr := getGVR(t, fakeMapper, obj.) - // err = fakeClient.Tracker().Create(gvr, obj, ) - // assert.NoError(t, err) - // resourceList = append(resourceList, list...) + resourceList = append(resourceList, list...) } err := kwaiter.waitForDelete(ctx, resourceList) if tt.expectErrs != nil { @@ -173,11 +183,10 @@ func TestKWaitForDelete(t *testing.T) { func TestKWaitJob(t *testing.T) { t.Parallel() tests := []struct { - name string - objYamls []string - expectErrs []error - waitForJobs bool - pausedAsReady bool + name string + objYamls []string + expectErrs []error + waitForJobs bool }{ { name: "Job is complete", @@ -207,10 +216,9 @@ func TestKWaitJob(t *testing.T) { expectErrs: []error{errors.New("in-progress-pod: Pod not ready, status: InProgress"), errors.New("context deadline exceeded")}, }, { - name: "paused deployment passes", - objYamls: []string{pausedDeploymentYaml}, - expectErrs: nil, - pausedAsReady: true, + name: "paused deployment passes", + objYamls: []string{pausedDeploymentYaml}, + expectErrs: nil, }, } From db90b174846d96cce02d5c50993eb56262a1b681 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 30 Dec 2024 15:01:52 +0000 Subject: [PATCH 20/91] unknown status Signed-off-by: Austin Abro --- pkg/kube/kwait.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/pkg/kube/kwait.go b/pkg/kube/kwait.go index ae7fcbe43..587c41c49 100644 --- a/pkg/kube/kwait.go +++ b/pkg/kube/kwait.go @@ -94,17 +94,20 @@ func (w *kstatusWaiter) waitForDelete(ctx context.Context, resourceList Resource errs := []error{} for _, id := range resources { rs := statusCollector.ResourceStatuses[id] - if rs.Status == status.CurrentStatus { + if rs.Status == status.NotFoundStatus { continue } - errs = append(errs, fmt.Errorf("%s: %s not ready, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) + if rs.Status == status.UnknownStatus { + errs = append(errs, fmt.Errorf("%s: %s cannot determine if resource exists, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) + continue + } + + errs = append(errs, fmt.Errorf("%s: %s still exists, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) } errs = append(errs, ctx.Err()) return errors.Join(errs...) } return nil - defer cancel() - return nil } func (w *kstatusWaiter) wait(ctx context.Context, resourceList ResourceList, waitForJobs bool) error { From 4b59583670e40a556448f2a3627b100c88166a3f Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 5 Jan 2025 14:05:31 +0000 Subject: [PATCH 21/91] delete wait and get completed phase Signed-off-by: Austin Abro --- pkg/kube/client.go | 32 -------------------------------- pkg/kube/fake/fake.go | 34 ++++++++++++---------------------- pkg/kube/fake/printer.go | 6 ------ pkg/kube/interface.go | 4 ---- pkg/kube/kwait.go | 5 ++--- 5 files changed, 14 insertions(+), 67 deletions(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index a5441f399..5b466ea6f 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -821,35 +821,3 @@ func scrubValidationError(err error) error { } return err } - -// WaitAndGetCompletedPodPhase waits up to a timeout until a pod enters a completed phase -// and returns said phase (PodSucceeded or PodFailed qualify). -func (c *Client) WaitAndGetCompletedPodPhase(name string, timeout time.Duration) (v1.PodPhase, error) { - client, err := c.getKubeClient() - if err != nil { - return v1.PodUnknown, err - } - to := int64(timeout) - watcher, err := client.CoreV1().Pods(c.namespace()).Watch(context.Background(), metav1.ListOptions{ - FieldSelector: fmt.Sprintf("metadata.name=%s", name), - TimeoutSeconds: &to, - }) - if err != nil { - return v1.PodUnknown, err - } - - for event := range watcher.ResultChan() { - p, ok := event.Object.(*v1.Pod) - if !ok { - return v1.PodUnknown, fmt.Errorf("%s not a pod", name) - } - switch p.Status.Phase { - case v1.PodFailed: - return v1.PodFailed, nil - case v1.PodSucceeded: - return v1.PodSucceeded, nil - } - } - - return v1.PodUnknown, err -} diff --git a/pkg/kube/fake/fake.go b/pkg/kube/fake/fake.go index 267020d57..84e375346 100644 --- a/pkg/kube/fake/fake.go +++ b/pkg/kube/fake/fake.go @@ -21,7 +21,6 @@ import ( "io" "time" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/cli-runtime/pkg/resource" @@ -34,19 +33,18 @@ import ( // delegates all its calls to `PrintingKubeClient` type FailingKubeClient struct { PrintingKubeClient - CreateError error - GetError error - WaitError error - DeleteError error - DeleteWithPropagationError error - WatchUntilReadyError error - UpdateError error - BuildError error - BuildTableError error - BuildDummy bool - BuildUnstructuredError error - WaitAndGetCompletedPodPhaseError error - WaitDuration time.Duration + CreateError error + GetError error + WaitError error + DeleteError error + DeleteWithPropagationError error + WatchUntilReadyError error + UpdateError error + BuildError error + BuildTableError error + BuildDummy bool + BuildUnstructuredError error + WaitDuration time.Duration } // Create returns the configured error if set or prints @@ -133,14 +131,6 @@ func (f *FailingKubeClient) BuildTable(r io.Reader, _ bool) (kube.ResourceList, return f.PrintingKubeClient.BuildTable(r, false) } -// WaitAndGetCompletedPodPhase returns the configured error if set or prints -func (f *FailingKubeClient) WaitAndGetCompletedPodPhase(s string, d time.Duration) (v1.PodPhase, error) { - if f.WaitAndGetCompletedPodPhaseError != nil { - return v1.PodSucceeded, f.WaitAndGetCompletedPodPhaseError - } - return f.PrintingKubeClient.WaitAndGetCompletedPodPhase(s, d) -} - // DeleteWithPropagationPolicy returns the configured error if set or prints func (f *FailingKubeClient) DeleteWithPropagationPolicy(resources kube.ResourceList, policy metav1.DeletionPropagation) (*kube.Result, []error) { if f.DeleteWithPropagationError != nil { diff --git a/pkg/kube/fake/printer.go b/pkg/kube/fake/printer.go index cc2c84b40..0fb03c113 100644 --- a/pkg/kube/fake/printer.go +++ b/pkg/kube/fake/printer.go @@ -21,7 +21,6 @@ import ( "strings" "time" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/cli-runtime/pkg/resource" @@ -111,11 +110,6 @@ func (p *PrintingKubeClient) BuildTable(_ io.Reader, _ bool) (kube.ResourceList, return []*resource.Info{}, nil } -// WaitAndGetCompletedPodPhase implements KubeClient WaitAndGetCompletedPodPhase. -func (p *PrintingKubeClient) WaitAndGetCompletedPodPhase(_ string, _ time.Duration) (v1.PodPhase, error) { - return v1.PodSucceeded, nil -} - // DeleteWithPropagationPolicy implements KubeClient delete. // // It only prints out the content to be deleted. diff --git a/pkg/kube/interface.go b/pkg/kube/interface.go index edc062c49..6cf33c515 100644 --- a/pkg/kube/interface.go +++ b/pkg/kube/interface.go @@ -20,7 +20,6 @@ import ( "io" "time" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) @@ -49,9 +48,6 @@ type Interface interface { // error. // TODO: Is watch until ready really behavior we want over the resources actually being ready? WatchUntilReady(resources ResourceList, timeout time.Duration) error - // WaitAndGetCompletedPodPhase waits up to a timeout until a pod enters a completed phase - // and returns said phase (PodSucceeded or PodFailed qualify). - WaitAndGetCompletedPodPhase(name string, timeout time.Duration) (v1.PodPhase, error) // Build creates a resource list from a Reader. // // Reader must contain a YAML stream (one or more YAML documents separated diff --git a/pkg/kube/kwait.go b/pkg/kube/kwait.go index 587c41c49..1eb1c2053 100644 --- a/pkg/kube/kwait.go +++ b/pkg/kube/kwait.go @@ -99,10 +99,9 @@ func (w *kstatusWaiter) waitForDelete(ctx context.Context, resourceList Resource } if rs.Status == status.UnknownStatus { errs = append(errs, fmt.Errorf("%s: %s cannot determine if resource exists, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) - continue + } else { + errs = append(errs, fmt.Errorf("%s: %s still exists, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) } - - errs = append(errs, fmt.Errorf("%s: %s still exists, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) } errs = append(errs, ctx.Err()) return errors.Join(errs...) From 2cb999d72b0051341775861fbf2eca23cec7f3aa Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 5 Jan 2025 14:28:59 +0000 Subject: [PATCH 22/91] go fmt Signed-off-by: Austin Abro --- pkg/kube/kwait_test.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/pkg/kube/kwait_test.go b/pkg/kube/kwait_test.go index f910a4a9b..1e67bfa75 100644 --- a/pkg/kube/kwait_test.go +++ b/pkg/kube/kwait_test.go @@ -132,7 +132,7 @@ func TestKWaitForDelete(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - t.Parallel() + // t.Parallel() c := newTestClient(t) fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) fakeMapper := testutil.NewFakeRESTMapper( @@ -157,11 +157,11 @@ func TestKWaitForDelete(t *testing.T) { gvr := getGVR(t, fakeMapper, resource) err = fakeClient.Tracker().Create(gvr, resource, resource.GetNamespace()) assert.NoError(t, err) - go func(){ - time.Sleep(2 * time.Second) - err = fakeClient.Tracker().Delete(gvr, resource.GetNamespace(), resource.GetName()) - assert.NoError(t, err) - }() + go func() { + time.Sleep(2 * time.Second) + err = fakeClient.Tracker().Delete(gvr, resource.GetNamespace(), resource.GetName()) + assert.NoError(t, err) + }() } resourceList := ResourceList{} for _, obj := range objs { @@ -180,7 +180,7 @@ func TestKWaitForDelete(t *testing.T) { } -func TestKWaitJob(t *testing.T) { +func TestKWait(t *testing.T) { t.Parallel() tests := []struct { name string @@ -224,7 +224,7 @@ func TestKWaitJob(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - t.Parallel() + // t.Parallel() c := newTestClient(t) fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) fakeMapper := testutil.NewFakeRESTMapper( From 4dd6e19b1d3b2c1d6993f61292dd77d5c2bf4105 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 5 Jan 2025 14:45:18 +0000 Subject: [PATCH 23/91] provide path for creating new legacy waiter Signed-off-by: Austin Abro --- pkg/kube/wait.go | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go index b4cb85080..cbec8fa59 100644 --- a/pkg/kube/wait.go +++ b/pkg/kube/wait.go @@ -34,26 +34,33 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/kubernetes" "k8s.io/apimachinery/pkg/util/wait" ) type waiter struct { - c ReadyChecker - timeout time.Duration - log func(string, ...interface{}) + c ReadyChecker + timeout time.Duration + log func(string, ...interface{}) + kubeClient *kubernetes.Clientset +} + +func (w *waiter) NewLegacyWaiter(kubeClient *kubernetes.Clientset, log func(string, ...interface{})) *waiter { + return &waiter{ + log: log, + kubeClient: kubeClient, + } } func (w *waiter) Wait(resources ResourceList, timeout time.Duration) error { + w.c = NewReadyChecker(w.kubeClient, w.log, PausedAsReady(true)) w.timeout = timeout return w.waitForResources(resources) } func (w *waiter) WaitWithJobs(resources ResourceList, timeout time.Duration) error { - // Implementation - // TODO this function doesn't make sense unless you pass a readyChecker to it - // TODO pass context instead - // checker := NewReadyChecker(cs, w.c.Log, PausedAsReady(true), CheckJobs(true)) + w.c = NewReadyChecker(w.kubeClient, w.log, PausedAsReady(true), CheckJobs(true)) w.timeout = timeout return w.waitForResources(resources) } From cb6d48e6ae553ddd95ac838ae45fb7c0aabbfa71 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 5 Jan 2025 15:11:05 +0000 Subject: [PATCH 24/91] status wait Signed-off-by: Austin Abro --- pkg/kube/client.go | 2 +- pkg/kube/{kwait.go => statuswait.go} | 10 +++++----- pkg/kube/{kwait_test.go => statuswait_test.go} | 12 ++++++------ 3 files changed, 12 insertions(+), 12 deletions(-) rename pkg/kube/{kwait.go => statuswait.go} (92%) rename pkg/kube/{kwait_test.go => statuswait_test.go} (97%) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 45d842c4a..91b09eb65 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -123,7 +123,7 @@ func New(getter genericclioptions.RESTClientGetter, waiter Waiter) (*Client, err if err != nil { return nil, err } - waiter = &kstatusWaiter{ + waiter = &statusWaiter{ sw: sw, log: nopLogger, } diff --git a/pkg/kube/kwait.go b/pkg/kube/statuswait.go similarity index 92% rename from pkg/kube/kwait.go rename to pkg/kube/statuswait.go index 1eb1c2053..d58e34cdc 100644 --- a/pkg/kube/kwait.go +++ b/pkg/kube/statuswait.go @@ -33,24 +33,24 @@ import ( "sigs.k8s.io/cli-utils/pkg/object" ) -type kstatusWaiter struct { +type statusWaiter struct { sw watcher.StatusWatcher log func(string, ...interface{}) } -func (w *kstatusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error { +func (w *statusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error { ctx, cancel := context.WithTimeout(context.TODO(), timeout) defer cancel() return w.wait(ctx, resourceList, false) } -func (w *kstatusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Duration) error { +func (w *statusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Duration) error { ctx, cancel := context.WithTimeout(context.TODO(), timeout) defer cancel() return w.wait(ctx, resourceList, true) } -func (w *kstatusWaiter) waitForDelete(ctx context.Context, resourceList ResourceList) error { +func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceList) error { cancelCtx, cancel := context.WithCancel(ctx) defer cancel() runtimeObjs := []runtime.Object{} @@ -109,7 +109,7 @@ func (w *kstatusWaiter) waitForDelete(ctx context.Context, resourceList Resource return nil } -func (w *kstatusWaiter) wait(ctx context.Context, resourceList ResourceList, waitForJobs bool) error { +func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, waitForJobs bool) error { cancelCtx, cancel := context.WithCancel(ctx) defer cancel() runtimeObjs := []runtime.Object{} diff --git a/pkg/kube/kwait_test.go b/pkg/kube/statuswait_test.go similarity index 97% rename from pkg/kube/kwait_test.go rename to pkg/kube/statuswait_test.go index 1e67bfa75..31211d226 100644 --- a/pkg/kube/kwait_test.go +++ b/pkg/kube/statuswait_test.go @@ -116,7 +116,7 @@ func getGVR(t *testing.T, mapper meta.RESTMapper, obj *unstructured.Unstructured return mapping.Resource } -func TestKWaitForDelete(t *testing.T) { +func TestStatusWaitForDelete(t *testing.T) { t.Parallel() tests := []struct { name string @@ -132,7 +132,7 @@ func TestKWaitForDelete(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - // t.Parallel() + t.Parallel() c := newTestClient(t) fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) fakeMapper := testutil.NewFakeRESTMapper( @@ -141,7 +141,7 @@ func TestKWaitForDelete(t *testing.T) { batchv1.SchemeGroupVersion.WithKind("Job"), ) statusWatcher := watcher.NewDefaultStatusWatcher(fakeClient, fakeMapper) - kwaiter := kstatusWaiter{ + kwaiter := statusWaiter{ sw: statusWatcher, log: log.Printf, } @@ -180,7 +180,7 @@ func TestKWaitForDelete(t *testing.T) { } -func TestKWait(t *testing.T) { +func TestStatusWait(t *testing.T) { t.Parallel() tests := []struct { name string @@ -224,7 +224,7 @@ func TestKWait(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - // t.Parallel() + t.Parallel() c := newTestClient(t) fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) fakeMapper := testutil.NewFakeRESTMapper( @@ -244,7 +244,7 @@ func TestKWait(t *testing.T) { err = fakeClient.Tracker().Create(gvr, resource, resource.GetNamespace()) assert.NoError(t, err) } - kwaiter := kstatusWaiter{ + kwaiter := statusWaiter{ sw: statusWatcher, log: log.Printf, } From 86338215b7aff34bab669c9842c19aab771c5d6b Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 5 Jan 2025 15:42:56 +0000 Subject: [PATCH 25/91] ability to create different waiters Signed-off-by: Austin Abro --- pkg/action/action.go | 2 +- pkg/kube/client.go | 43 +++++++++++++++++++++++++++++++---------- pkg/kube/client_test.go | 35 ++++++++------------------------- pkg/kube/wait.go | 7 ------- 4 files changed, 42 insertions(+), 45 deletions(-) diff --git a/pkg/action/action.go b/pkg/action/action.go index e8e0a997a..7edb4a1ae 100644 --- a/pkg/action/action.go +++ b/pkg/action/action.go @@ -371,7 +371,7 @@ func (cfg *Configuration) recordRelease(r *release.Release) { // Init initializes the action configuration func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string, log DebugLog) error { - kc, err := kube.New(getter, nil) + kc, err := kube.New(getter, kube.StatusWaiter) if err != nil { return err } diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 91b09eb65..ce22f265a 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -88,6 +88,13 @@ type Client struct { Waiter } +type WaitStrategy int + +const ( + StatusWaiter WaitStrategy = iota + LegacyWaiter +) + func init() { // Add CRDs to the scheme. They are missing by default. if err := apiextv1.AddToScheme(scheme.Scheme); err != nil { @@ -112,21 +119,37 @@ func getStatusWatcher(factory Factory) (watcher.StatusWatcher, error) { return sw, nil } -// New creates a new Client. -func New(getter genericclioptions.RESTClientGetter, waiter Waiter) (*Client, error) { - if getter == nil { - getter = genericclioptions.NewConfigFlags(true) - } - factory := cmdutil.NewFactory(getter) - if waiter == nil { +func NewWaiter(strategy WaitStrategy, factory Factory, log func(string, ...interface{})) (Waiter, error) { + switch strategy { + case LegacyWaiter: + kc, err := factory.KubernetesClientSet() + if err != nil { + return nil, err + } + return &waiter{kubeClient: kc, log: log}, nil + case StatusWaiter: sw, err := getStatusWatcher(factory) if err != nil { return nil, err } - waiter = &statusWaiter{ + return &statusWaiter{ sw: sw, - log: nopLogger, - } + log: log, + }, nil + default: + return nil, errors.New("unknown wait strategy") + } +} + +// New creates a new Client. +func New(getter genericclioptions.RESTClientGetter, ws WaitStrategy) (*Client, error) { + if getter == nil { + getter = genericclioptions.NewConfigFlags(true) + } + factory := cmdutil.NewFactory(getter) + waiter, err := NewWaiter(ws, factory, nopLogger) + if err != nil { + return nil, err } return &Client{ Factory: factory, diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index 037719219..3ab415a48 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -453,10 +453,6 @@ func TestPerform(t *testing.T) { } } -// Likely it is not possible to get this test to work with kstatus given that it seems -// kstatus is not making constant get checks on the resources and is instead waiting for events -// Potentially the test could be reworked to make the pods after five seconds -// would need this -> func TestWait(t *testing.T) { podList := newPodList("starfish", "otter", "squid") @@ -517,16 +513,11 @@ func TestWait(t *testing.T) { } }), } - cs, err := c.getKubeClient() + waiter, err := NewWaiter(LegacyWaiter, c.Factory, c.Log) if err != nil { t.Fatal(err) } - checker := NewReadyChecker(cs, c.Log, PausedAsReady(true)) - c.Waiter = &waiter{ - c: checker, - log: c.Log, - timeout: time.Second * 30, - } + c.Waiter = waiter resources, err := c.Build(objBody(&podList), false) if err != nil { t.Fatal(err) @@ -579,16 +570,11 @@ func TestWaitJob(t *testing.T) { } }), } - cs, err := c.getKubeClient() + waiter, err := NewWaiter(LegacyWaiter, c.Factory, c.Log) if err != nil { t.Fatal(err) } - checker := NewReadyChecker(cs, c.Log, PausedAsReady(true), CheckJobs(true)) - c.Waiter = &waiter{ - c: checker, - log: c.Log, - timeout: time.Second * 30, - } + c.Waiter = waiter resources, err := c.Build(objBody(job), false) if err != nil { t.Fatal(err) @@ -643,16 +629,11 @@ func TestWaitDelete(t *testing.T) { } }), } - cs, err := c.getKubeClient() + waiter, err := NewWaiter(LegacyWaiter, c.Factory, c.Log) if err != nil { t.Fatal(err) } - checker := NewReadyChecker(cs, c.Log, PausedAsReady(true)) - c.Waiter = &waiter{ - c: checker, - log: c.Log, - timeout: time.Second * 30, - } + c.Waiter = waiter resources, err := c.Build(objBody(&pod), false) if err != nil { t.Fatal(err) @@ -679,7 +660,7 @@ func TestWaitDelete(t *testing.T) { func TestReal(t *testing.T) { t.Skip("This is a live test, comment this line to run") - c, err := New(nil, nil) + c, err := New(nil, StatusWaiter) if err != nil { t.Fatal(err) } @@ -692,7 +673,7 @@ func TestReal(t *testing.T) { } testSvcEndpointManifest := testServiceManifest + "\n---\n" + testEndpointManifest - c, err = New(nil, nil) + c, err = New(nil, StatusWaiter) if err != nil { t.Fatal(err) } diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go index cbec8fa59..0ee4504cb 100644 --- a/pkg/kube/wait.go +++ b/pkg/kube/wait.go @@ -46,13 +46,6 @@ type waiter struct { kubeClient *kubernetes.Clientset } -func (w *waiter) NewLegacyWaiter(kubeClient *kubernetes.Clientset, log func(string, ...interface{})) *waiter { - return &waiter{ - log: log, - kubeClient: kubeClient, - } -} - func (w *waiter) Wait(resources ResourceList, timeout time.Duration) error { w.c = NewReadyChecker(w.kubeClient, w.log, PausedAsReady(true)) w.timeout = timeout From 4c97d1276ca765bc9ba181a6a280b25b75a713dd Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 6 Jan 2025 12:31:43 +0000 Subject: [PATCH 26/91] helm waiter Signed-off-by: Austin Abro --- pkg/kube/client.go | 4 ++-- pkg/kube/statuswait.go | 4 ++++ pkg/kube/statuswait_test.go | 2 +- pkg/kube/wait.go | 14 +++++++------- 4 files changed, 14 insertions(+), 10 deletions(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index ce22f265a..fe830747d 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -126,7 +126,7 @@ func NewWaiter(strategy WaitStrategy, factory Factory, log func(string, ...inter if err != nil { return nil, err } - return &waiter{kubeClient: kc, log: log}, nil + return &HelmWaiter{kubeClient: kc, log: log}, nil case StatusWaiter: sw, err := getStatusWatcher(factory) if err != nil { @@ -333,7 +333,7 @@ func getResource(info *resource.Info) (runtime.Object, error) { // WaitForDelete wait up to the given timeout for the specified resources to be deleted. func (c *Client) WaitForDelete(resources ResourceList, timeout time.Duration) error { - w := waiter{ + w := HelmWaiter{ log: c.Log, timeout: timeout, } diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index d58e34cdc..bbc92292d 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -51,6 +51,8 @@ func (w *statusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Dura } func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceList) error { + deadline, _ := ctx.Deadline() + w.log("beginning wait for %d resources to be deleted with timeout of %v", len(resourceList), time.Until(deadline)) cancelCtx, cancel := context.WithCancel(ctx) defer cancel() runtimeObjs := []runtime.Object{} @@ -110,6 +112,8 @@ func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceL } func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, waitForJobs bool) error { + deadline, _ := ctx.Deadline() + w.log("beginning wait for %d resources with timeout of %v", len(resourceList), deadline) cancelCtx, cancel := context.WithCancel(ctx) defer cancel() runtimeObjs := []runtime.Object{} diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index 31211d226..b018691cd 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -143,7 +143,7 @@ func TestStatusWaitForDelete(t *testing.T) { statusWatcher := watcher.NewDefaultStatusWatcher(fakeClient, fakeMapper) kwaiter := statusWaiter{ sw: statusWatcher, - log: log.Printf, + log: t.Logf, } ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) defer cancel() diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go index 0ee4504cb..044bbbe1d 100644 --- a/pkg/kube/wait.go +++ b/pkg/kube/wait.go @@ -39,20 +39,20 @@ import ( "k8s.io/apimachinery/pkg/util/wait" ) -type waiter struct { +type HelmWaiter struct { c ReadyChecker timeout time.Duration log func(string, ...interface{}) kubeClient *kubernetes.Clientset } -func (w *waiter) Wait(resources ResourceList, timeout time.Duration) error { +func (w *HelmWaiter) Wait(resources ResourceList, timeout time.Duration) error { w.c = NewReadyChecker(w.kubeClient, w.log, PausedAsReady(true)) w.timeout = timeout return w.waitForResources(resources) } -func (w *waiter) WaitWithJobs(resources ResourceList, timeout time.Duration) error { +func (w *HelmWaiter) WaitWithJobs(resources ResourceList, timeout time.Duration) error { w.c = NewReadyChecker(w.kubeClient, w.log, PausedAsReady(true), CheckJobs(true)) w.timeout = timeout return w.waitForResources(resources) @@ -60,7 +60,7 @@ func (w *waiter) WaitWithJobs(resources ResourceList, timeout time.Duration) err // waitForResources polls to get the current status of all pods, PVCs, Services and // Jobs(optional) until all are ready or a timeout is reached -func (w *waiter) waitForResources(created ResourceList) error { +func (w *HelmWaiter) waitForResources(created ResourceList) error { w.log("beginning wait for %d resources with timeout of %v", len(created), w.timeout) ctx, cancel := context.WithTimeout(context.Background(), w.timeout) @@ -94,7 +94,7 @@ func (w *waiter) waitForResources(created ResourceList) error { }) } -func (w *waiter) isRetryableError(err error, resource *resource.Info) bool { +func (w *HelmWaiter) isRetryableError(err error, resource *resource.Info) bool { if err == nil { return false } @@ -109,12 +109,12 @@ func (w *waiter) isRetryableError(err error, resource *resource.Info) bool { return true } -func (w *waiter) isRetryableHTTPStatusCode(httpStatusCode int32) bool { +func (w *HelmWaiter) isRetryableHTTPStatusCode(httpStatusCode int32) bool { return httpStatusCode == 0 || httpStatusCode == http.StatusTooManyRequests || (httpStatusCode >= 500 && httpStatusCode != http.StatusNotImplemented) } // waitForDeletedResources polls to check if all the resources are deleted or a timeout is reached -func (w *waiter) waitForDeletedResources(deleted ResourceList) error { +func (w *HelmWaiter) waitForDeletedResources(deleted ResourceList) error { w.log("beginning wait for %d resources to be deleted with timeout of %v", len(deleted), w.timeout) ctx, cancel := context.WithTimeout(context.Background(), w.timeout) From b8bdcc3a2b866296c2639ef683d55a777ef66403 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 6 Jan 2025 12:33:26 +0000 Subject: [PATCH 27/91] Helm waiter Signed-off-by: Austin Abro --- pkg/kube/wait.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go index 044bbbe1d..e74753e57 100644 --- a/pkg/kube/wait.go +++ b/pkg/kube/wait.go @@ -39,6 +39,8 @@ import ( "k8s.io/apimachinery/pkg/util/wait" ) +// HelmWaiter is the legacy implementation of the Waiter interface. This logic was used by default in Helm 3 +// Helm 4 now uses the StatusWaiter interface instead type HelmWaiter struct { c ReadyChecker timeout time.Duration From ac9012577a8fccd13371966539fb953d4ff043ea Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 6 Jan 2025 13:06:54 +0000 Subject: [PATCH 28/91] status function Signed-off-by: Austin Abro --- pkg/kube/statuswait.go | 50 +++++++++++++------------------------ pkg/kube/statuswait_test.go | 3 +-- 2 files changed, 19 insertions(+), 34 deletions(-) diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index bbc92292d..bec38f7c9 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -69,22 +69,7 @@ func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceL } eventCh := w.sw.Watch(cancelCtx, resources, watcher.Options{}) statusCollector := collector.NewResourceStatusCollector(resources) - done := statusCollector.ListenWithObserver(eventCh, collector.ObserverFunc( - func(statusCollector *collector.ResourceStatusCollector, _ event.Event) { - rss := []*event.ResourceStatus{} - for _, rs := range statusCollector.ResourceStatuses { - if rs == nil { - continue - } - rss = append(rss, rs) - } - desired := status.NotFoundStatus - if aggregator.AggregateStatus(rss, desired) == desired { - cancel() - return - } - }), - ) + done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.NotFoundStatus)) <-done if statusCollector.Error != nil { @@ -140,22 +125,7 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, wait } eventCh := w.sw.Watch(cancelCtx, resources, watcher.Options{}) statusCollector := collector.NewResourceStatusCollector(resources) - done := statusCollector.ListenWithObserver(eventCh, collector.ObserverFunc( - func(statusCollector *collector.ResourceStatusCollector, _ event.Event) { - rss := []*event.ResourceStatus{} - for _, rs := range statusCollector.ResourceStatuses { - if rs == nil { - continue - } - rss = append(rss, rs) - } - desired := status.CurrentStatus - if aggregator.AggregateStatus(rss, desired) == desired { - cancel() - return - } - }), - ) + done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.CurrentStatus)) <-done if statusCollector.Error != nil { @@ -177,3 +147,19 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, wait } return nil } + +func statusObserver(cancel context.CancelFunc, desired status.Status) collector.ObserverFunc { + return func(statusCollector *collector.ResourceStatusCollector, _ event.Event) { + rss := []*event.ResourceStatus{} + for _, rs := range statusCollector.ResourceStatuses { + if rs == nil { + continue + } + rss = append(rss, rs) + } + if aggregator.AggregateStatus(rss, desired) == desired { + cancel() + return + } + } +} diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index b018691cd..822204dfe 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -19,7 +19,6 @@ package kube // import "helm.sh/helm/v3/pkg/kube" import ( "context" "errors" - "log" "testing" "time" @@ -246,7 +245,7 @@ func TestStatusWait(t *testing.T) { } kwaiter := statusWaiter{ sw: statusWatcher, - log: log.Printf, + log: t.Logf, } resourceList := ResourceList{} From 6b68a004400cab1f50cd3fa2861585e3fceb4eca Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 6 Jan 2025 13:30:29 +0000 Subject: [PATCH 29/91] change error messages Signed-off-by: Austin Abro --- pkg/kube/statuswait.go | 6 ++-- pkg/kube/statuswait_test.go | 63 +++++++++++++++++++++++-------------- 2 files changed, 42 insertions(+), 27 deletions(-) diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index bec38f7c9..8cd8bcfc2 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -85,9 +85,9 @@ func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceL continue } if rs.Status == status.UnknownStatus { - errs = append(errs, fmt.Errorf("%s: %s cannot determine if resource exists, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) + errs = append(errs, fmt.Errorf("cannot determine resource state, name: %s, kind: %s, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) } else { - errs = append(errs, fmt.Errorf("%s: %s still exists, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) + errs = append(errs, fmt.Errorf("resource still exists, name: %s, kind: %s, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) } } errs = append(errs, ctx.Err()) @@ -140,7 +140,7 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, wait if rs.Status == status.CurrentStatus { continue } - errs = append(errs, fmt.Errorf("%s: %s not ready, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) + errs = append(errs, fmt.Errorf("resource not ready, name: %s, kind: %s, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) } errs = append(errs, ctx.Err()) return errors.Join(errs...) diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index 822204dfe..ecd18e183 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -119,20 +119,29 @@ func TestStatusWaitForDelete(t *testing.T) { t.Parallel() tests := []struct { name string - objYamls []string + objToCreate []string + toDelete []string expectErrs []error - waitForJobs bool }{ { - name: "wait for pod to be deleted", - objYamls: []string{podCurrent}, - expectErrs: nil, + name: "wait for pod to be deleted", + objToCreate: []string{podCurrent}, + toDelete: []string{podCurrent}, + expectErrs: nil, + }, + { + name: "error when not all objects are deleted", + objToCreate: []string{jobComplete, podCurrent}, + toDelete: []string{jobComplete}, + expectErrs: []error{errors.New("resource still exists, name: good-pod, kind: Pod, status: Current"), errors.New("context deadline exceeded")}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { t.Parallel() c := newTestClient(t) + timeout := time.Second * 3 + timeToDeletePod := time.Second * 2 fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) fakeMapper := testutil.NewFakeRESTMapper( v1.SchemeGroupVersion.WithKind("Pod"), @@ -140,35 +149,42 @@ func TestStatusWaitForDelete(t *testing.T) { batchv1.SchemeGroupVersion.WithKind("Job"), ) statusWatcher := watcher.NewDefaultStatusWatcher(fakeClient, fakeMapper) - kwaiter := statusWaiter{ + statusWaiter := statusWaiter{ sw: statusWatcher, log: t.Logf, } - ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) + ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() - objs := []runtime.Object{} - for _, podYaml := range tt.objYamls { + createdObjs := []runtime.Object{} + for _, objYaml := range tt.objToCreate { m := make(map[string]interface{}) - err := yaml.Unmarshal([]byte(podYaml), &m) + err := yaml.Unmarshal([]byte(objYaml), &m) assert.NoError(t, err) resource := &unstructured.Unstructured{Object: m} - objs = append(objs, resource) + createdObjs = append(createdObjs, resource) gvr := getGVR(t, fakeMapper, resource) err = fakeClient.Tracker().Create(gvr, resource, resource.GetNamespace()) assert.NoError(t, err) + } + for _, objYaml := range tt.toDelete { + m := make(map[string]interface{}) + err := yaml.Unmarshal([]byte(objYaml), &m) + assert.NoError(t, err) + resource := &unstructured.Unstructured{Object: m} + gvr := getGVR(t, fakeMapper, resource) go func() { - time.Sleep(2 * time.Second) + time.Sleep(timeToDeletePod) err = fakeClient.Tracker().Delete(gvr, resource.GetNamespace(), resource.GetName()) assert.NoError(t, err) }() } resourceList := ResourceList{} - for _, obj := range objs { + for _, obj := range createdObjs { list, err := c.Build(objBody(obj), false) assert.NoError(t, err) resourceList = append(resourceList, list...) } - err := kwaiter.waitForDelete(ctx, resourceList) + err := statusWaiter.waitForDelete(ctx, resourceList) if tt.expectErrs != nil { assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) return @@ -195,7 +211,7 @@ func TestStatusWait(t *testing.T) { { name: "Job is not complete", objYamls: []string{jobNoStatus}, - expectErrs: []error{errors.New("test: Job not ready, status: InProgress"), errors.New("context deadline exceeded")}, + expectErrs: []error{errors.New("resource not ready, name: test, kind: Job, status: InProgress"), errors.New("context deadline exceeded")}, waitForJobs: true, }, { @@ -212,7 +228,7 @@ func TestStatusWait(t *testing.T) { { name: "one of the pods never becomes ready", objYamls: []string{podNoStatus, podCurrent}, - expectErrs: []error{errors.New("in-progress-pod: Pod not ready, status: InProgress"), errors.New("context deadline exceeded")}, + expectErrs: []error{errors.New("resource not ready, name: in-progress-pod, kind: Pod, status: InProgress"), errors.New("context deadline exceeded")}, }, { name: "paused deployment passes", @@ -231,8 +247,13 @@ func TestStatusWait(t *testing.T) { appsv1.SchemeGroupVersion.WithKind("Deployment"), batchv1.SchemeGroupVersion.WithKind("Job"), ) - objs := []runtime.Object{} statusWatcher := watcher.NewDefaultStatusWatcher(fakeClient, fakeMapper) + statusWaiter := statusWaiter{ + sw: statusWatcher, + log: t.Logf, + } + objs := []runtime.Object{} + for _, podYaml := range tt.objYamls { m := make(map[string]interface{}) err := yaml.Unmarshal([]byte(podYaml), &m) @@ -243,11 +264,6 @@ func TestStatusWait(t *testing.T) { err = fakeClient.Tracker().Create(gvr, resource, resource.GetNamespace()) assert.NoError(t, err) } - kwaiter := statusWaiter{ - sw: statusWatcher, - log: t.Logf, - } - resourceList := ResourceList{} for _, obj := range objs { list, err := c.Build(objBody(obj), false) @@ -257,8 +273,7 @@ func TestStatusWait(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) defer cancel() - - err := kwaiter.wait(ctx, resourceList, tt.waitForJobs) + err := statusWaiter.wait(ctx, resourceList, tt.waitForJobs) if tt.expectErrs != nil { assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) return From e6c6a40fe0fed670eaaaf60ada1643a0946ac3e0 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 6 Jan 2025 13:58:34 +0000 Subject: [PATCH 30/91] general error message Signed-off-by: Austin Abro --- pkg/kube/statuswait.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index 8cd8bcfc2..8268598e6 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -84,11 +84,7 @@ func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceL if rs.Status == status.NotFoundStatus { continue } - if rs.Status == status.UnknownStatus { - errs = append(errs, fmt.Errorf("cannot determine resource state, name: %s, kind: %s, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) - } else { - errs = append(errs, fmt.Errorf("resource still exists, name: %s, kind: %s, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) - } + errs = append(errs, fmt.Errorf("resource still exists, name: %s, kind: %s, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) } errs = append(errs, ctx.Err()) return errors.Join(errs...) From 8ce1876192b12db58993a993e5f307a1a17c3f08 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 6 Jan 2025 14:12:34 +0000 Subject: [PATCH 31/91] get rid of ext interface Signed-off-by: Austin Abro --- pkg/action/hooks.go | 8 ++------ pkg/action/uninstall.go | 6 ++---- pkg/kube/client.go | 9 --------- pkg/kube/interface.go | 6 ------ pkg/kube/statuswait.go | 6 ++++++ pkg/kube/statuswait_test.go | 4 +--- pkg/kube/wait.go | 19 ++++++++----------- 7 files changed, 19 insertions(+), 39 deletions(-) diff --git a/pkg/action/hooks.go b/pkg/action/hooks.go index ecca1d997..c32b9b3ce 100644 --- a/pkg/action/hooks.go +++ b/pkg/action/hooks.go @@ -22,7 +22,6 @@ import ( "github.com/pkg/errors" - "helm.sh/helm/v4/pkg/kube" "helm.sh/helm/v4/pkg/release" helmtime "helm.sh/helm/v4/pkg/time" ) @@ -138,11 +137,8 @@ func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.Hoo return errors.New(joinErrors(errs)) } - //wait for resources until they are deleted to avoid conflicts - if kubeClient, ok := cfg.KubeClient.(kube.InterfaceExt); ok { - if err := kubeClient.WaitForDelete(resources, timeout); err != nil { - return err - } + if err := cfg.KubeClient.WaitForDelete(resources, timeout); err != nil { + return err } } return nil diff --git a/pkg/action/uninstall.go b/pkg/action/uninstall.go index dda7d6978..75d999976 100644 --- a/pkg/action/uninstall.go +++ b/pkg/action/uninstall.go @@ -131,10 +131,8 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error) res.Info = kept if u.Wait { - if kubeClient, ok := u.cfg.KubeClient.(kube.InterfaceExt); ok { - if err := kubeClient.WaitForDelete(deletedResources, u.Timeout); err != nil { - errs = append(errs, err) - } + if err := u.cfg.KubeClient.WaitForDelete(deletedResources, u.Timeout); err != nil { + errs = append(errs, err) } } diff --git a/pkg/kube/client.go b/pkg/kube/client.go index fe830747d..968e1b951 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -331,15 +331,6 @@ func getResource(info *resource.Info) (runtime.Object, error) { return obj, nil } -// WaitForDelete wait up to the given timeout for the specified resources to be deleted. -func (c *Client) WaitForDelete(resources ResourceList, timeout time.Duration) error { - w := HelmWaiter{ - log: c.Log, - timeout: timeout, - } - return w.waitForDeletedResources(resources) -} - func (c *Client) namespace() string { if c.Namespace != "" { return c.Namespace diff --git a/pkg/kube/interface.go b/pkg/kube/interface.go index 6cf33c515..30be37f7c 100644 --- a/pkg/kube/interface.go +++ b/pkg/kube/interface.go @@ -67,12 +67,7 @@ type Waiter interface { // WaitWithJobs wait up to the given timeout for the specified resources to be ready, including jobs. WaitWithJobs(resources ResourceList, timeout time.Duration) error -} -// InterfaceExt is introduced to avoid breaking backwards compatibility for Interface implementers. -// -// TODO Helm 4: Remove InterfaceExt and integrate its method(s) into the Interface. -type InterfaceExt interface { // WaitForDelete wait up to the given timeout for the specified resources to be deleted. WaitForDelete(resources ResourceList, timeout time.Duration) error } @@ -108,6 +103,5 @@ type InterfaceResources interface { } var _ Interface = (*Client)(nil) -var _ InterfaceExt = (*Client)(nil) var _ InterfaceDeletionPropagation = (*Client)(nil) var _ InterfaceResources = (*Client)(nil) diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index 8268598e6..b1c39948c 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -50,6 +50,12 @@ func (w *statusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Dura return w.wait(ctx, resourceList, true) } +func (w *statusWaiter) WaitForDelete(resourceList ResourceList, timeout time.Duration) error { + ctx, cancel := context.WithTimeout(context.TODO(), timeout) + defer cancel() + return w.waitForDelete(ctx, resourceList) +} + func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceList) error { deadline, _ := ctx.Deadline() w.log("beginning wait for %d resources to be deleted with timeout of %v", len(resourceList), time.Until(deadline)) diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index ecd18e183..0084606cf 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -153,8 +153,6 @@ func TestStatusWaitForDelete(t *testing.T) { sw: statusWatcher, log: t.Logf, } - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() createdObjs := []runtime.Object{} for _, objYaml := range tt.objToCreate { m := make(map[string]interface{}) @@ -184,7 +182,7 @@ func TestStatusWaitForDelete(t *testing.T) { assert.NoError(t, err) resourceList = append(resourceList, list...) } - err := statusWaiter.waitForDelete(ctx, resourceList) + err := statusWaiter.WaitForDelete(resourceList, timeout) if tt.expectErrs != nil { assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) return diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go index e74753e57..97fa8b3e1 100644 --- a/pkg/kube/wait.go +++ b/pkg/kube/wait.go @@ -43,29 +43,26 @@ import ( // Helm 4 now uses the StatusWaiter interface instead type HelmWaiter struct { c ReadyChecker - timeout time.Duration log func(string, ...interface{}) kubeClient *kubernetes.Clientset } func (w *HelmWaiter) Wait(resources ResourceList, timeout time.Duration) error { w.c = NewReadyChecker(w.kubeClient, w.log, PausedAsReady(true)) - w.timeout = timeout - return w.waitForResources(resources) + return w.waitForResources(resources, timeout) } func (w *HelmWaiter) WaitWithJobs(resources ResourceList, timeout time.Duration) error { w.c = NewReadyChecker(w.kubeClient, w.log, PausedAsReady(true), CheckJobs(true)) - w.timeout = timeout - return w.waitForResources(resources) + return w.waitForResources(resources, timeout) } // waitForResources polls to get the current status of all pods, PVCs, Services and // Jobs(optional) until all are ready or a timeout is reached -func (w *HelmWaiter) waitForResources(created ResourceList) error { - w.log("beginning wait for %d resources with timeout of %v", len(created), w.timeout) +func (w *HelmWaiter) waitForResources(created ResourceList, timeout time.Duration) error { + w.log("beginning wait for %d resources with timeout of %v", len(created), timeout) - ctx, cancel := context.WithTimeout(context.Background(), w.timeout) + ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() numberOfErrors := make([]int, len(created)) @@ -116,10 +113,10 @@ func (w *HelmWaiter) isRetryableHTTPStatusCode(httpStatusCode int32) bool { } // waitForDeletedResources polls to check if all the resources are deleted or a timeout is reached -func (w *HelmWaiter) waitForDeletedResources(deleted ResourceList) error { - w.log("beginning wait for %d resources to be deleted with timeout of %v", len(deleted), w.timeout) +func (w *HelmWaiter) WaitForDelete(deleted ResourceList, timeout time.Duration) error { + w.log("beginning wait for %d resources to be deleted with timeout of %v", len(deleted), timeout) - ctx, cancel := context.WithTimeout(context.Background(), w.timeout) + ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() return wait.PollUntilContextCancel(ctx, 2*time.Second, true, func(_ context.Context) (bool, error) { From c26b44f65172b2d6e41e4ce8f0024c70c595ff6a Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 6 Jan 2025 15:21:11 +0000 Subject: [PATCH 32/91] update names Signed-off-by: Austin Abro --- pkg/action/action.go | 2 +- pkg/kube/client.go | 8 ++++---- pkg/kube/client_test.go | 10 +++++----- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/pkg/action/action.go b/pkg/action/action.go index 7edb4a1ae..0157ce1cc 100644 --- a/pkg/action/action.go +++ b/pkg/action/action.go @@ -371,7 +371,7 @@ func (cfg *Configuration) recordRelease(r *release.Release) { // Init initializes the action configuration func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string, log DebugLog) error { - kc, err := kube.New(getter, kube.StatusWaiter) + kc, err := kube.New(getter, kube.StatusWaiterStrategy) if err != nil { return err } diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 968e1b951..daa484b69 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -91,8 +91,8 @@ type Client struct { type WaitStrategy int const ( - StatusWaiter WaitStrategy = iota - LegacyWaiter + StatusWaiterStrategy WaitStrategy = iota + LegacyWaiterStrategy ) func init() { @@ -121,13 +121,13 @@ func getStatusWatcher(factory Factory) (watcher.StatusWatcher, error) { func NewWaiter(strategy WaitStrategy, factory Factory, log func(string, ...interface{})) (Waiter, error) { switch strategy { - case LegacyWaiter: + case LegacyWaiterStrategy: kc, err := factory.KubernetesClientSet() if err != nil { return nil, err } return &HelmWaiter{kubeClient: kc, log: log}, nil - case StatusWaiter: + case StatusWaiterStrategy: sw, err := getStatusWatcher(factory) if err != nil { return nil, err diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index 3ab415a48..50fc65cef 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -513,7 +513,7 @@ func TestWait(t *testing.T) { } }), } - waiter, err := NewWaiter(LegacyWaiter, c.Factory, c.Log) + waiter, err := NewWaiter(LegacyWaiterStrategy, c.Factory, c.Log) if err != nil { t.Fatal(err) } @@ -570,7 +570,7 @@ func TestWaitJob(t *testing.T) { } }), } - waiter, err := NewWaiter(LegacyWaiter, c.Factory, c.Log) + waiter, err := NewWaiter(LegacyWaiterStrategy, c.Factory, c.Log) if err != nil { t.Fatal(err) } @@ -629,7 +629,7 @@ func TestWaitDelete(t *testing.T) { } }), } - waiter, err := NewWaiter(LegacyWaiter, c.Factory, c.Log) + waiter, err := NewWaiter(LegacyWaiterStrategy, c.Factory, c.Log) if err != nil { t.Fatal(err) } @@ -660,7 +660,7 @@ func TestWaitDelete(t *testing.T) { func TestReal(t *testing.T) { t.Skip("This is a live test, comment this line to run") - c, err := New(nil, StatusWaiter) + c, err := New(nil, StatusWaiterStrategy) if err != nil { t.Fatal(err) } @@ -673,7 +673,7 @@ func TestReal(t *testing.T) { } testSvcEndpointManifest := testServiceManifest + "\n---\n" + testEndpointManifest - c, err = New(nil, StatusWaiter) + c, err = New(nil, StatusWaiterStrategy) if err != nil { t.Fatal(err) } From 649475265df89f5b514dcd95bcf90d4b32a215f3 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 6 Jan 2025 16:25:49 +0000 Subject: [PATCH 33/91] implement logger Signed-off-by: Austin Abro --- pkg/kube/statuswait.go | 22 ++++++++++++++++++++++ pkg/kube/statuswait_test.go | 8 ++++++-- 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index b1c39948c..bb92ae74e 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -75,6 +75,7 @@ func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceL } eventCh := w.sw.Watch(cancelCtx, resources, watcher.Options{}) statusCollector := collector.NewResourceStatusCollector(resources) + go logResource(ctx, resources, statusCollector, status.NotFoundStatus, w.log) done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.NotFoundStatus)) <-done @@ -127,6 +128,7 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, wait } eventCh := w.sw.Watch(cancelCtx, resources, watcher.Options{}) statusCollector := collector.NewResourceStatusCollector(resources) + go logResource(cancelCtx, resources, statusCollector, status.CurrentStatus, w.log) done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.CurrentStatus)) <-done @@ -165,3 +167,23 @@ func statusObserver(cancel context.CancelFunc, desired status.Status) collector. } } } + +func logResource(ctx context.Context, resources []object.ObjMetadata, sc *collector.ResourceStatusCollector, desiredStatus status.Status, log func(string, ...interface{})) { + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + for _, id := range resources { + rs := sc.ResourceStatuses[id] + if rs.Status != desiredStatus { + log("waiting for resource, name: %s, kind: %s, desired status: %s, actual status: %s\n", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, desiredStatus, rs.Status) + // only log one resource to not overwhelm the logs + break + } + } + } + } +} diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index 0084606cf..0d635ad79 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -19,6 +19,7 @@ package kube // import "helm.sh/helm/v3/pkg/kube" import ( "context" "errors" + "fmt" "testing" "time" @@ -114,6 +115,9 @@ func getGVR(t *testing.T, mapper meta.RESTMapper, obj *unstructured.Unstructured require.NoError(t, err) return mapping.Resource } +func testLogger(message string, args ...interface{}) { + fmt.Printf(message, args...) +} func TestStatusWaitForDelete(t *testing.T) { t.Parallel() @@ -151,7 +155,7 @@ func TestStatusWaitForDelete(t *testing.T) { statusWatcher := watcher.NewDefaultStatusWatcher(fakeClient, fakeMapper) statusWaiter := statusWaiter{ sw: statusWatcher, - log: t.Logf, + log: testLogger, } createdObjs := []runtime.Object{} for _, objYaml := range tt.objToCreate { @@ -248,7 +252,7 @@ func TestStatusWait(t *testing.T) { statusWatcher := watcher.NewDefaultStatusWatcher(fakeClient, fakeMapper) statusWaiter := statusWaiter{ sw: statusWatcher, - log: t.Logf, + log: testLogger, } objs := []runtime.Object{} From 71434c0b388a7bf8a1bdf3302779199becc3ce4b Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 6 Jan 2025 16:26:20 +0000 Subject: [PATCH 34/91] implement logger Signed-off-by: Austin Abro --- pkg/kube/statuswait_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index 0d635ad79..945131a5e 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -144,8 +144,8 @@ func TestStatusWaitForDelete(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() c := newTestClient(t) - timeout := time.Second * 3 - timeToDeletePod := time.Second * 2 + timeout := time.Second * 2 + timeUntilPodDelete := time.Second * 1 fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) fakeMapper := testutil.NewFakeRESTMapper( v1.SchemeGroupVersion.WithKind("Pod"), @@ -175,7 +175,7 @@ func TestStatusWaitForDelete(t *testing.T) { resource := &unstructured.Unstructured{Object: m} gvr := getGVR(t, fakeMapper, resource) go func() { - time.Sleep(timeToDeletePod) + time.Sleep(timeUntilPodDelete) err = fakeClient.Tracker().Delete(gvr, resource.GetNamespace(), resource.GetName()) assert.NoError(t, err) }() From e9d98543644b7b59b17c38a0af4ca500ab7e2644 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 6 Jan 2025 16:50:40 +0000 Subject: [PATCH 35/91] introduce test for status wait Signed-off-by: Austin Abro --- pkg/kube/statuswait.go | 8 ++++---- pkg/kube/statuswait_test.go | 33 +++++++++++++++++++++++++++++---- 2 files changed, 33 insertions(+), 8 deletions(-) diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index bb92ae74e..a4590aa42 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -75,7 +75,7 @@ func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceL } eventCh := w.sw.Watch(cancelCtx, resources, watcher.Options{}) statusCollector := collector.NewResourceStatusCollector(resources) - go logResource(ctx, resources, statusCollector, status.NotFoundStatus, w.log) + go logResourceStatus(ctx, resources, statusCollector, status.NotFoundStatus, w.log) done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.NotFoundStatus)) <-done @@ -128,7 +128,7 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, wait } eventCh := w.sw.Watch(cancelCtx, resources, watcher.Options{}) statusCollector := collector.NewResourceStatusCollector(resources) - go logResource(cancelCtx, resources, statusCollector, status.CurrentStatus, w.log) + go logResourceStatus(cancelCtx, resources, statusCollector, status.CurrentStatus, w.log) done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.CurrentStatus)) <-done @@ -168,7 +168,7 @@ func statusObserver(cancel context.CancelFunc, desired status.Status) collector. } } -func logResource(ctx context.Context, resources []object.ObjMetadata, sc *collector.ResourceStatusCollector, desiredStatus status.Status, log func(string, ...interface{})) { +func logResourceStatus(ctx context.Context, resources []object.ObjMetadata, sc *collector.ResourceStatusCollector, desiredStatus status.Status, log func(string, ...interface{})) { ticker := time.NewTicker(1 * time.Second) defer ticker.Stop() for { @@ -179,7 +179,7 @@ func logResource(ctx context.Context, resources []object.ObjMetadata, sc *collec for _, id := range resources { rs := sc.ResourceStatuses[id] if rs.Status != desiredStatus { - log("waiting for resource, name: %s, kind: %s, desired status: %s, actual status: %s\n", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, desiredStatus, rs.Status) + log("waiting for resource, name: %s, kind: %s, desired status: %s, actual status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, desiredStatus, rs.Status) // only log one resource to not overwhelm the logs break } diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index 945131a5e..e94e13313 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -35,7 +35,11 @@ import ( "k8s.io/apimachinery/pkg/util/yaml" dynamicfake "k8s.io/client-go/dynamic/fake" "k8s.io/kubectl/pkg/scheme" + "sigs.k8s.io/cli-utils/pkg/kstatus/polling/collector" + "sigs.k8s.io/cli-utils/pkg/kstatus/polling/event" + "sigs.k8s.io/cli-utils/pkg/kstatus/status" "sigs.k8s.io/cli-utils/pkg/kstatus/watcher" + "sigs.k8s.io/cli-utils/pkg/object" "sigs.k8s.io/cli-utils/pkg/testutil" ) @@ -115,8 +119,29 @@ func getGVR(t *testing.T, mapper meta.RESTMapper, obj *unstructured.Unstructured require.NoError(t, err) return mapping.Resource } -func testLogger(message string, args ...interface{}) { - fmt.Printf(message, args...) + +func TestStatusLogger(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*1500) + defer cancel() + readyPod := object.ObjMetadata{ + Name: "readyPod", + GroupKind: schema.GroupKind{Kind: "Pod"}, + } + notReadyPod := object.ObjMetadata{ + Name: "notReadyPod", + GroupKind: schema.GroupKind{Kind: "Pod"}, + } + objs := []object.ObjMetadata{readyPod, notReadyPod} + resourceStatusCollector := collector.NewResourceStatusCollector(objs) + resourceStatusCollector.ResourceStatuses[readyPod] = &event.ResourceStatus{ + Identifier: readyPod, + Status: status.CurrentStatus, + } + expectedMessage := "waiting for resource, name: notReadyPod, kind: Pod, desired status: Current, actual status: Unknown" + testLogger := func(message string, args ...interface{}) { + assert.Equal(t, expectedMessage, fmt.Sprintf(message, args...)) + } + logResourceStatus(ctx, objs, resourceStatusCollector, status.CurrentStatus, testLogger) } func TestStatusWaitForDelete(t *testing.T) { @@ -155,7 +180,7 @@ func TestStatusWaitForDelete(t *testing.T) { statusWatcher := watcher.NewDefaultStatusWatcher(fakeClient, fakeMapper) statusWaiter := statusWaiter{ sw: statusWatcher, - log: testLogger, + log: t.Logf, } createdObjs := []runtime.Object{} for _, objYaml := range tt.objToCreate { @@ -252,7 +277,7 @@ func TestStatusWait(t *testing.T) { statusWatcher := watcher.NewDefaultStatusWatcher(fakeClient, fakeMapper) statusWaiter := statusWaiter{ sw: statusWatcher, - log: testLogger, + log: t.Logf, } objs := []runtime.Object{} From 674ab0d4f6b0fc66b656ae98bab0d829eac9c5d2 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 6 Jan 2025 16:55:30 +0000 Subject: [PATCH 36/91] t.Parrallel Signed-off-by: Austin Abro --- pkg/kube/statuswait_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index e94e13313..c3aa61a69 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -121,6 +121,7 @@ func getGVR(t *testing.T, mapper meta.RESTMapper, obj *unstructured.Unstructured } func TestStatusLogger(t *testing.T) { + t.Parallel() ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*1500) defer cancel() readyPod := object.ObjMetadata{ From eaa6e14546ba3bd58150df6f407594330247d2f9 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 6 Jan 2025 16:57:32 +0000 Subject: [PATCH 37/91] test cleanup Signed-off-by: Austin Abro --- pkg/kube/client_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index 50fc65cef..abe74022d 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -459,7 +459,6 @@ func TestWait(t *testing.T) { var created *time.Time c := newTestClient(t) - c.Factory.(*cmdtesting.TestFactory).ClientConfigVal = cmdtesting.DefaultClientConfig() c.Factory.(*cmdtesting.TestFactory).Client = &fake.RESTClient{ NegotiatedSerializer: unstructuredSerializer, Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { @@ -659,7 +658,7 @@ func TestWaitDelete(t *testing.T) { } func TestReal(t *testing.T) { - t.Skip("This is a live test, comment this line to run") + // t.Skip("This is a live test, comment this line to run") c, err := New(nil, StatusWaiterStrategy) if err != nil { t.Fatal(err) From d07f546003c0113ab65214c2a0f36727fc1d3c23 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 6 Jan 2025 17:02:50 +0000 Subject: [PATCH 38/91] get rid of rest client Signed-off-by: Austin Abro --- pkg/kube/factory.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/pkg/kube/factory.go b/pkg/kube/factory.go index 3b1ec1d6b..78c8323fd 100644 --- a/pkg/kube/factory.go +++ b/pkg/kube/factory.go @@ -21,7 +21,6 @@ import ( "k8s.io/cli-runtime/pkg/resource" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" - restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/kubectl/pkg/validation" ) @@ -45,9 +44,6 @@ type Factory interface { // KubernetesClientSet gives you back an external clientset KubernetesClientSet() (*kubernetes.Clientset, error) - // Returns a RESTClient for accessing Kubernetes resources or an error. - RESTClient() (*restclient.RESTClient, error) - // NewBuilder returns an object that assists in loading objects from both disk and the server // and which implements the common patterns for CLI interactions with generic resources. NewBuilder() *resource.Builder From f9736d9022d10b0203bd1a5479f5aadc42b93b6e Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 6 Jan 2025 17:06:02 +0000 Subject: [PATCH 39/91] renames Signed-off-by: Austin Abro --- pkg/kube/statuswait_test.go | 92 ++++++++++++++++++------------------- 1 file changed, 46 insertions(+), 46 deletions(-) diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index c3aa61a69..d853e0012 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -43,7 +43,7 @@ import ( "sigs.k8s.io/cli-utils/pkg/testutil" ) -var podCurrent = ` +var podCurrentManifest = ` apiVersion: v1 kind: Pod metadata: @@ -56,7 +56,7 @@ status: phase: Running ` -var podNoStatus = ` +var podNoStatusManifest = ` apiVersion: v1 kind: Pod metadata: @@ -64,7 +64,7 @@ metadata: namespace: ns ` -var jobNoStatus = ` +var jobNoStatusManifest = ` apiVersion: batch/v1 kind: Job metadata: @@ -73,7 +73,7 @@ metadata: generation: 1 ` -var jobComplete = ` +var jobCompleteManifest = ` apiVersion: batch/v1 kind: Job metadata: @@ -88,7 +88,7 @@ status: status: "True" ` -var pausedDeploymentYaml = ` +var pausedDeploymentManifest = ` apiVersion: apps/v1 kind: Deployment metadata: @@ -148,22 +148,22 @@ func TestStatusLogger(t *testing.T) { func TestStatusWaitForDelete(t *testing.T) { t.Parallel() tests := []struct { - name string - objToCreate []string - toDelete []string - expectErrs []error + name string + manifestsToCreate []string + manifestsToDelete []string + expectErrs []error }{ { - name: "wait for pod to be deleted", - objToCreate: []string{podCurrent}, - toDelete: []string{podCurrent}, - expectErrs: nil, + name: "wait for pod to be deleted", + manifestsToCreate: []string{podCurrentManifest}, + manifestsToDelete: []string{podCurrentManifest}, + expectErrs: nil, }, { - name: "error when not all objects are deleted", - objToCreate: []string{jobComplete, podCurrent}, - toDelete: []string{jobComplete}, - expectErrs: []error{errors.New("resource still exists, name: good-pod, kind: Pod, status: Current"), errors.New("context deadline exceeded")}, + name: "error when not all objects are deleted", + manifestsToCreate: []string{jobCompleteManifest, podCurrentManifest}, + manifestsToDelete: []string{jobCompleteManifest}, + expectErrs: []error{errors.New("resource still exists, name: good-pod, kind: Pod, status: Current"), errors.New("context deadline exceeded")}, }, } for _, tt := range tests { @@ -184,9 +184,9 @@ func TestStatusWaitForDelete(t *testing.T) { log: t.Logf, } createdObjs := []runtime.Object{} - for _, objYaml := range tt.objToCreate { + for _, manifest := range tt.manifestsToCreate { m := make(map[string]interface{}) - err := yaml.Unmarshal([]byte(objYaml), &m) + err := yaml.Unmarshal([]byte(manifest), &m) assert.NoError(t, err) resource := &unstructured.Unstructured{Object: m} createdObjs = append(createdObjs, resource) @@ -194,9 +194,9 @@ func TestStatusWaitForDelete(t *testing.T) { err = fakeClient.Tracker().Create(gvr, resource, resource.GetNamespace()) assert.NoError(t, err) } - for _, objYaml := range tt.toDelete { + for _, manifest := range tt.manifestsToDelete { m := make(map[string]interface{}) - err := yaml.Unmarshal([]byte(objYaml), &m) + err := yaml.Unmarshal([]byte(manifest), &m) assert.NoError(t, err) resource := &unstructured.Unstructured{Object: m} gvr := getGVR(t, fakeMapper, resource) @@ -226,42 +226,42 @@ func TestStatusWaitForDelete(t *testing.T) { func TestStatusWait(t *testing.T) { t.Parallel() tests := []struct { - name string - objYamls []string - expectErrs []error - waitForJobs bool + name string + objManifests []string + expectErrs []error + waitForJobs bool }{ { - name: "Job is complete", - objYamls: []string{jobComplete}, - expectErrs: nil, + name: "Job is complete", + objManifests: []string{jobCompleteManifest}, + expectErrs: nil, }, { - name: "Job is not complete", - objYamls: []string{jobNoStatus}, - expectErrs: []error{errors.New("resource not ready, name: test, kind: Job, status: InProgress"), errors.New("context deadline exceeded")}, - waitForJobs: true, + name: "Job is not complete", + objManifests: []string{jobNoStatusManifest}, + expectErrs: []error{errors.New("resource not ready, name: test, kind: Job, status: InProgress"), errors.New("context deadline exceeded")}, + waitForJobs: true, }, { - name: "Job is not ready, but we pass wait anyway", - objYamls: []string{jobNoStatus}, - expectErrs: nil, - waitForJobs: false, + name: "Job is not ready, but we pass wait anyway", + objManifests: []string{jobNoStatusManifest}, + expectErrs: nil, + waitForJobs: false, }, { - name: "Pod is ready", - objYamls: []string{podCurrent}, - expectErrs: nil, + name: "Pod is ready", + objManifests: []string{podCurrentManifest}, + expectErrs: nil, }, { - name: "one of the pods never becomes ready", - objYamls: []string{podNoStatus, podCurrent}, - expectErrs: []error{errors.New("resource not ready, name: in-progress-pod, kind: Pod, status: InProgress"), errors.New("context deadline exceeded")}, + name: "one of the pods never becomes ready", + objManifests: []string{podNoStatusManifest, podCurrentManifest}, + expectErrs: []error{errors.New("resource not ready, name: in-progress-pod, kind: Pod, status: InProgress"), errors.New("context deadline exceeded")}, }, { - name: "paused deployment passes", - objYamls: []string{pausedDeploymentYaml}, - expectErrs: nil, + name: "paused deployment passes", + objManifests: []string{pausedDeploymentManifest}, + expectErrs: nil, }, } @@ -282,7 +282,7 @@ func TestStatusWait(t *testing.T) { } objs := []runtime.Object{} - for _, podYaml := range tt.objYamls { + for _, podYaml := range tt.objManifests { m := make(map[string]interface{}) err := yaml.Unmarshal([]byte(podYaml), &m) assert.NoError(t, err) From 8fe66998bf9b32c103c2eddbbd6583433dbdb470 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 6 Jan 2025 17:13:59 +0000 Subject: [PATCH 40/91] refactor obj logic Signed-off-by: Austin Abro --- pkg/kube/client_test.go | 2 +- pkg/kube/statuswait.go | 18 +++++------------- pkg/kube/wait.go | 2 +- 3 files changed, 7 insertions(+), 15 deletions(-) diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index abe74022d..f63070fe1 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -658,7 +658,7 @@ func TestWaitDelete(t *testing.T) { } func TestReal(t *testing.T) { - // t.Skip("This is a live test, comment this line to run") + t.Skip("This is a live test, comment this line to run") c, err := New(nil, StatusWaiterStrategy) if err != nil { t.Fatal(err) diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index a4590aa42..a0378aaf5 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -24,7 +24,6 @@ import ( appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" - "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/cli-utils/pkg/kstatus/polling/aggregator" "sigs.k8s.io/cli-utils/pkg/kstatus/polling/collector" "sigs.k8s.io/cli-utils/pkg/kstatus/polling/event" @@ -61,13 +60,9 @@ func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceL w.log("beginning wait for %d resources to be deleted with timeout of %v", len(resourceList), time.Until(deadline)) cancelCtx, cancel := context.WithCancel(ctx) defer cancel() - runtimeObjs := []runtime.Object{} - for _, resource := range resourceList { - runtimeObjs = append(runtimeObjs, resource.Object) - } resources := []object.ObjMetadata{} - for _, runtimeObj := range runtimeObjs { - obj, err := object.RuntimeToObjMeta(runtimeObj) + for _, resource := range resourceList { + obj, err := object.RuntimeToObjMeta(resource.Object) if err != nil { return err } @@ -104,7 +99,7 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, wait w.log("beginning wait for %d resources with timeout of %v", len(resourceList), deadline) cancelCtx, cancel := context.WithCancel(ctx) defer cancel() - runtimeObjs := []runtime.Object{} + resources := []object.ObjMetadata{} for _, resource := range resourceList { switch value := AsVersioned(resource).(type) { case *batchv1.Job: @@ -116,16 +111,13 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, wait continue } } - runtimeObjs = append(runtimeObjs, resource.Object) - } - resources := []object.ObjMetadata{} - for _, runtimeObj := range runtimeObjs { - obj, err := object.RuntimeToObjMeta(runtimeObj) + obj, err := object.RuntimeToObjMeta(resource.Object) if err != nil { return err } resources = append(resources, obj) } + eventCh := w.sw.Watch(cancelCtx, resources, watcher.Options{}) statusCollector := collector.NewResourceStatusCollector(resources) go logResourceStatus(cancelCtx, resources, statusCollector, status.CurrentStatus, w.log) diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go index 97fa8b3e1..525373e4d 100644 --- a/pkg/kube/wait.go +++ b/pkg/kube/wait.go @@ -40,7 +40,7 @@ import ( ) // HelmWaiter is the legacy implementation of the Waiter interface. This logic was used by default in Helm 3 -// Helm 4 now uses the StatusWaiter interface instead +// Helm 4 now uses the StatusWaiter implementation instead type HelmWaiter struct { c ReadyChecker log func(string, ...interface{}) From 9894d3ae78d7d2d2119c9de7f2d17454908c8fbe Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 6 Jan 2025 17:19:39 +0000 Subject: [PATCH 41/91] shorten interface Signed-off-by: Austin Abro --- pkg/kube/factory.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pkg/kube/factory.go b/pkg/kube/factory.go index 78c8323fd..013cd7b73 100644 --- a/pkg/kube/factory.go +++ b/pkg/kube/factory.go @@ -17,7 +17,7 @@ limitations under the License. package kube // import "helm.sh/helm/v4/pkg/kube" import ( - "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/apimachinery/pkg/api/meta" "k8s.io/cli-runtime/pkg/resource" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" @@ -34,7 +34,9 @@ import ( // Helm does not need are not impacted or exposed. This minimizes the impact of Kubernetes changes // being exposed. type Factory interface { - genericclioptions.RESTClientGetter + // ToRESTMapper returns a restmapper + ToRESTMapper() (meta.RESTMapper, error) + // ToRawKubeConfigLoader return kubeconfig loader as-is ToRawKubeConfigLoader() clientcmd.ClientConfig From c2dc44deb99d21320f3d6f4c58777a87c6d0de5b Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 14 Jan 2025 14:59:30 +0000 Subject: [PATCH 42/91] use dynamic rest mapper Signed-off-by: Austin Abro --- pkg/kube/client.go | 11 ++++++++++- pkg/kube/factory.go | 4 ++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index daa484b69..b607ea3ef 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -38,6 +38,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/cli-utils/pkg/kstatus/watcher" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" multierror "github.com/hashicorp/go-multierror" "k8s.io/apimachinery/pkg/api/meta" @@ -107,11 +108,19 @@ func init() { } func getStatusWatcher(factory Factory) (watcher.StatusWatcher, error) { + cfg, err := factory.ToRESTConfig() + if err != nil { + return nil, err + } dynamicClient, err := factory.DynamicClient() if err != nil { return nil, err } - restMapper, err := factory.ToRESTMapper() + httpClient, err := rest.HTTPClientFor(cfg) + if err != nil { + return nil, err + } + restMapper, err := apiutil.NewDynamicRESTMapper(cfg, httpClient) if err != nil { return nil, err } diff --git a/pkg/kube/factory.go b/pkg/kube/factory.go index 013cd7b73..7f21c85c6 100644 --- a/pkg/kube/factory.go +++ b/pkg/kube/factory.go @@ -21,6 +21,7 @@ import ( "k8s.io/cli-runtime/pkg/resource" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/kubectl/pkg/validation" ) @@ -37,6 +38,9 @@ type Factory interface { // ToRESTMapper returns a restmapper ToRESTMapper() (meta.RESTMapper, error) + // ToRESTConfig returns restconfig + ToRESTConfig() (*rest.Config, error) + // ToRawKubeConfigLoader return kubeconfig loader as-is ToRawKubeConfigLoader() clientcmd.ClientConfig From a859742e288fd546a3412ed0674d2c4b693e8206 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 14 Jan 2025 15:00:26 +0000 Subject: [PATCH 43/91] remove rest mapper Signed-off-by: Austin Abro --- pkg/kube/factory.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/pkg/kube/factory.go b/pkg/kube/factory.go index 7f21c85c6..1d237c307 100644 --- a/pkg/kube/factory.go +++ b/pkg/kube/factory.go @@ -17,7 +17,6 @@ limitations under the License. package kube // import "helm.sh/helm/v4/pkg/kube" import ( - "k8s.io/apimachinery/pkg/api/meta" "k8s.io/cli-runtime/pkg/resource" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" @@ -35,9 +34,6 @@ import ( // Helm does not need are not impacted or exposed. This minimizes the impact of Kubernetes changes // being exposed. type Factory interface { - // ToRESTMapper returns a restmapper - ToRESTMapper() (meta.RESTMapper, error) - // ToRESTConfig returns restconfig ToRESTConfig() (*rest.Config, error) From 816a78685304b45b15e4ae397e75a1760f8d54a0 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 14 Jan 2025 15:01:33 +0000 Subject: [PATCH 44/91] go mod tidy Signed-off-by: Austin Abro --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 3274e6b79..65372cdda 100644 --- a/go.mod +++ b/go.mod @@ -47,6 +47,7 @@ require ( k8s.io/kubectl v0.32.0 oras.land/oras-go v1.2.6 sigs.k8s.io/cli-utils v0.37.2 + sigs.k8s.io/controller-runtime v0.18.4 sigs.k8s.io/yaml v1.4.0 ) @@ -185,7 +186,6 @@ require ( k8s.io/component-base v0.32.0 // indirect k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect - sigs.k8s.io/controller-runtime v0.18.4 // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect sigs.k8s.io/kustomize/api v0.18.0 // indirect sigs.k8s.io/kustomize/kyaml v0.18.1 // indirect From e56a6e678f534fea6c7efb331fa3b4a0d9e591eb Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 14 Jan 2025 15:03:21 +0000 Subject: [PATCH 45/91] diff Signed-off-by: Austin Abro --- pkg/kube/interface.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/pkg/kube/interface.go b/pkg/kube/interface.go index 30be37f7c..f8e3c2ee2 100644 --- a/pkg/kube/interface.go +++ b/pkg/kube/interface.go @@ -34,9 +34,6 @@ type Interface interface { // Delete destroys one or more resources. Delete(resources ResourceList) (*Result, []error) - // Update updates one or more resources or creates the resource - // if it doesn't exist. - Update(original, target ResourceList, force bool) (*Result, error) // WatchUntilReady watches the resources given and waits until it is ready. // // This method is mainly for hook implementations. It watches for a resource to @@ -48,6 +45,11 @@ type Interface interface { // error. // TODO: Is watch until ready really behavior we want over the resources actually being ready? WatchUntilReady(resources ResourceList, timeout time.Duration) error + + // Update updates one or more resources or creates the resource + // if it doesn't exist. + Update(original, target ResourceList, force bool) (*Result, error) + // Build creates a resource list from a Reader. // // Reader must contain a YAML stream (one or more YAML documents separated From 4e12f9d5301f83ab05b9df06c25a1d4e2c7fa2f1 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 14 Jan 2025 15:38:23 +0000 Subject: [PATCH 46/91] simplify messages Signed-off-by: Austin Abro --- pkg/kube/statuswait.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index a0378aaf5..534aece8d 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -40,24 +40,25 @@ type statusWaiter struct { func (w *statusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error { ctx, cancel := context.WithTimeout(context.TODO(), timeout) defer cancel() + w.log("beginning wait for %d resources with timeout of %s", len(resourceList), timeout) return w.wait(ctx, resourceList, false) } func (w *statusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Duration) error { ctx, cancel := context.WithTimeout(context.TODO(), timeout) defer cancel() + w.log("beginning wait for %d resources with timeout of %s", len(resourceList), timeout) return w.wait(ctx, resourceList, true) } func (w *statusWaiter) WaitForDelete(resourceList ResourceList, timeout time.Duration) error { ctx, cancel := context.WithTimeout(context.TODO(), timeout) defer cancel() + w.log("beginning wait for %d resources to be deleted with timeout of %s", len(resourceList), timeout) return w.waitForDelete(ctx, resourceList) } func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceList) error { - deadline, _ := ctx.Deadline() - w.log("beginning wait for %d resources to be deleted with timeout of %v", len(resourceList), time.Until(deadline)) cancelCtx, cancel := context.WithCancel(ctx) defer cancel() resources := []object.ObjMetadata{} @@ -94,9 +95,7 @@ func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceL return nil } -func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, waitForJobs bool) error { - deadline, _ := ctx.Deadline() - w.log("beginning wait for %d resources with timeout of %v", len(resourceList), deadline) +func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, waitForJobs bool) error { cancelCtx, cancel := context.WithCancel(ctx) defer cancel() resources := []object.ObjMetadata{} From cf51d714e8cc27ebad0d44d19e69252ef86e5e94 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Wed, 15 Jan 2025 17:33:35 +0000 Subject: [PATCH 47/91] go fmt Signed-off-by: Austin Abro --- pkg/kube/statuswait.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index 534aece8d..7ac4706ee 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -95,7 +95,7 @@ func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceL return nil } -func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, waitForJobs bool) error { +func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, waitForJobs bool) error { cancelCtx, cancel := context.WithCancel(ctx) defer cancel() resources := []object.ObjMetadata{} From f1b642cb0d227ce6d661ccd636c7cfb6392e93fe Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 27 Jan 2025 16:15:39 +0000 Subject: [PATCH 48/91] unexport newWaiter function Signed-off-by: Austin Abro --- pkg/kube/client.go | 57 ++++++++++++++++++----------------------- pkg/kube/client_test.go | 12 ++++----- 2 files changed, 31 insertions(+), 38 deletions(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index b607ea3ef..e3fdb8b3b 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -107,43 +107,35 @@ func init() { } } -func getStatusWatcher(factory Factory) (watcher.StatusWatcher, error) { - cfg, err := factory.ToRESTConfig() - if err != nil { - return nil, err - } - dynamicClient, err := factory.DynamicClient() - if err != nil { - return nil, err - } - httpClient, err := rest.HTTPClientFor(cfg) - if err != nil { - return nil, err - } - restMapper, err := apiutil.NewDynamicRESTMapper(cfg, httpClient) - if err != nil { - return nil, err - } - sw := watcher.NewDefaultStatusWatcher(dynamicClient, restMapper) - return sw, nil -} - -func NewWaiter(strategy WaitStrategy, factory Factory, log func(string, ...interface{})) (Waiter, error) { +func (c *Client) newWaiter(strategy WaitStrategy) (Waiter, error) { switch strategy { case LegacyWaiterStrategy: - kc, err := factory.KubernetesClientSet() + kc, err := c.Factory.KubernetesClientSet() if err != nil { return nil, err } - return &HelmWaiter{kubeClient: kc, log: log}, nil + return &HelmWaiter{kubeClient: kc, log: c.Log}, nil case StatusWaiterStrategy: - sw, err := getStatusWatcher(factory) + cfg, err := c.Factory.ToRESTConfig() if err != nil { return nil, err } + dynamicClient, err := c.Factory.DynamicClient() + if err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(cfg) + if err != nil { + return nil, err + } + restMapper, err := apiutil.NewDynamicRESTMapper(cfg, httpClient) + if err != nil { + return nil, err + } + sw := watcher.NewDefaultStatusWatcher(dynamicClient, restMapper) return &statusWaiter{ sw: sw, - log: log, + log: c.Log, }, nil default: return nil, errors.New("unknown wait strategy") @@ -156,15 +148,16 @@ func New(getter genericclioptions.RESTClientGetter, ws WaitStrategy) (*Client, e getter = genericclioptions.NewConfigFlags(true) } factory := cmdutil.NewFactory(getter) - waiter, err := NewWaiter(ws, factory, nopLogger) + c := &Client{ + Factory: factory, + Log: nopLogger, + } + var err error + c.Waiter, err = c.newWaiter(ws) if err != nil { return nil, err } - return &Client{ - Factory: factory, - Log: nopLogger, - Waiter: waiter, - }, nil + return c, nil } var nopLogger = func(_ string, _ ...interface{}) {} diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index f63070fe1..cdf75938e 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -512,11 +512,11 @@ func TestWait(t *testing.T) { } }), } - waiter, err := NewWaiter(LegacyWaiterStrategy, c.Factory, c.Log) + var err error + c.Waiter, err = c.newWaiter(LegacyWaiterStrategy) if err != nil { t.Fatal(err) } - c.Waiter = waiter resources, err := c.Build(objBody(&podList), false) if err != nil { t.Fatal(err) @@ -569,11 +569,11 @@ func TestWaitJob(t *testing.T) { } }), } - waiter, err := NewWaiter(LegacyWaiterStrategy, c.Factory, c.Log) + var err error + c.Waiter, err = c.newWaiter(LegacyWaiterStrategy) if err != nil { t.Fatal(err) } - c.Waiter = waiter resources, err := c.Build(objBody(job), false) if err != nil { t.Fatal(err) @@ -628,11 +628,11 @@ func TestWaitDelete(t *testing.T) { } }), } - waiter, err := NewWaiter(LegacyWaiterStrategy, c.Factory, c.Log) + var err error + c.Waiter, err = c.newWaiter(LegacyWaiterStrategy) if err != nil { t.Fatal(err) } - c.Waiter = waiter resources, err := c.Build(objBody(&pod), false) if err != nil { t.Fatal(err) From a8f53f98ee2206dababbbc0bb8f1037c529488e7 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Thu, 6 Feb 2025 15:22:14 +0000 Subject: [PATCH 49/91] WIP custom status reader Signed-off-by: Austin Abro --- pkg/kube/client.go | 6 +- pkg/kube/job_status_reader.go | 120 +++++++++++++++++++++++++++++ pkg/kube/job_status_reader_test.go | 79 +++++++++++++++++++ 3 files changed, 204 insertions(+), 1 deletion(-) create mode 100644 pkg/kube/job_status_reader.go create mode 100644 pkg/kube/job_status_reader_test.go diff --git a/pkg/kube/client.go b/pkg/kube/client.go index e3fdb8b3b..b4164a8ff 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -59,6 +59,7 @@ import ( watchtools "k8s.io/client-go/tools/watch" "k8s.io/client-go/util/retry" cmdutil "k8s.io/kubectl/pkg/cmd/util" + "sigs.k8s.io/cli-utils/pkg/kstatus/polling/statusreaders" ) // ErrNoObjectsVisited indicates that during a visit operation, no matching objects were found. @@ -133,6 +134,9 @@ func (c *Client) newWaiter(strategy WaitStrategy) (Waiter, error) { return nil, err } sw := watcher.NewDefaultStatusWatcher(dynamicClient, restMapper) + newCustomJobStatusReader := NewCustomJobStatusReader(restMapper) + customSR := statusreaders.NewStatusReader(restMapper, newCustomJobStatusReader) + sw.StatusReader = customSR return &statusWaiter{ sw: sw, log: c.Log, @@ -148,7 +152,7 @@ func New(getter genericclioptions.RESTClientGetter, ws WaitStrategy) (*Client, e getter = genericclioptions.NewConfigFlags(true) } factory := cmdutil.NewFactory(getter) - c := &Client{ + c := &Client{ Factory: factory, Log: nopLogger, } diff --git a/pkg/kube/job_status_reader.go b/pkg/kube/job_status_reader.go new file mode 100644 index 000000000..f6eb8d3d9 --- /dev/null +++ b/pkg/kube/job_status_reader.go @@ -0,0 +1,120 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube + +// This file was copied and modified from https://github.com/fluxcd/kustomize-controller/blob/main/internal/statusreaders/job.go + +import ( + "context" + "fmt" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + + "sigs.k8s.io/cli-utils/pkg/kstatus/polling/engine" + "sigs.k8s.io/cli-utils/pkg/kstatus/polling/event" + "sigs.k8s.io/cli-utils/pkg/kstatus/polling/statusreaders" + "sigs.k8s.io/cli-utils/pkg/kstatus/status" + "sigs.k8s.io/cli-utils/pkg/object" +) + +type customJobStatusReader struct { + genericStatusReader engine.StatusReader +} + +func NewCustomJobStatusReader(mapper meta.RESTMapper) engine.StatusReader { + genericStatusReader := statusreaders.NewGenericStatusReader(mapper, jobConditions) + return &customJobStatusReader{ + genericStatusReader: genericStatusReader, + } +} + +func (j *customJobStatusReader) Supports(gk schema.GroupKind) bool { + return gk == batchv1.SchemeGroupVersion.WithKind("Job").GroupKind() +} + +func (j *customJobStatusReader) ReadStatus(ctx context.Context, reader engine.ClusterReader, resource object.ObjMetadata) (*event.ResourceStatus, error) { + return j.genericStatusReader.ReadStatus(ctx, reader, resource) +} + +func (j *customJobStatusReader) ReadStatusForObject(ctx context.Context, reader engine.ClusterReader, resource *unstructured.Unstructured) (*event.ResourceStatus, error) { + return j.genericStatusReader.ReadStatusForObject(ctx, reader, resource) +} + +// Ref: https://github.com/kubernetes-sigs/cli-utils/blob/v0.29.4/pkg/kstatus/status/core.go +// Modified to return Current status only when the Job has completed as opposed to when it's in progress. +func jobConditions(u *unstructured.Unstructured) (*status.Result, error) { + obj := u.UnstructuredContent() + + parallelism := status.GetIntField(obj, ".spec.parallelism", 1) + completions := status.GetIntField(obj, ".spec.completions", parallelism) + succeeded := status.GetIntField(obj, ".status.succeeded", 0) + failed := status.GetIntField(obj, ".status.failed", 0) + + // Conditions + // https://github.com/kubernetes/kubernetes/blob/master/pkg/controller/job/utils.go#L24 + objc, err := status.GetObjectWithConditions(obj) + if err != nil { + return nil, err + } + for _, c := range objc.Status.Conditions { + switch c.Type { + case "Complete": + if c.Status == corev1.ConditionTrue { + message := fmt.Sprintf("Job Completed. succeeded: %d/%d", succeeded, completions) + return &status.Result{ + Status: status.CurrentStatus, + Message: message, + Conditions: []status.Condition{}, + }, nil + } + case "Failed": + message := fmt.Sprintf("Job Failed. failed: %d/%d", failed, completions) + if c.Status == corev1.ConditionTrue { + return &status.Result{ + Status: status.FailedStatus, + Message: message, + Conditions: []status.Condition{ + { + Type: status.ConditionStalled, + Status: corev1.ConditionTrue, + Reason: "JobFailed", + Message: message, + }, + }, + }, nil + } + } + } + + message := "Job in progress" + return &status.Result{ + Status: status.InProgressStatus, + Message: message, + Conditions: []status.Condition{ + { + Type: status.ConditionReconciling, + Status: corev1.ConditionTrue, + Reason: "JobInProgress", + Message: message, + }, + }, + }, nil +} diff --git a/pkg/kube/job_status_reader_test.go b/pkg/kube/job_status_reader_test.go new file mode 100644 index 000000000..af372c8b3 --- /dev/null +++ b/pkg/kube/job_status_reader_test.go @@ -0,0 +1,79 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube + +// This file was copied and modified from https://github.com/fluxcd/kustomize-controller/blob/main/internal/statusreaders/job.go +import ( + "testing" + + "github.com/stretchr/testify/assert" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + + "sigs.k8s.io/cli-utils/pkg/kstatus/status" +) + +func toUnstructured(obj runtime.Object) (*unstructured.Unstructured, error) { + // If the incoming object is already unstructured, perform a deep copy first + // otherwise DefaultUnstructuredConverter ends up returning the inner map without + // making a copy. + if _, ok := obj.(runtime.Unstructured); ok { + obj = obj.DeepCopyObject() + } + rawMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return nil, err + } + return &unstructured.Unstructured{Object: rawMap}, nil +} + +func TestJobConditions(t *testing.T) { + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: "job", + }, + Spec: batchv1.JobSpec{}, + Status: batchv1.JobStatus{}, + } + + t.Run("job without Complete condition returns InProgress status", func(t *testing.T) { + us, err := toUnstructured(job) + assert.NoError(t, err) + result, err := jobConditions(us) + assert.NoError(t, err) + assert.Equal(t, status.InProgressStatus, result) + }) + + t.Run("job with Complete condition as True returns Current status", func(t *testing.T) { + job.Status = batchv1.JobStatus{ + Conditions: []batchv1.JobCondition{ + { + Type: batchv1.JobComplete, + Status: corev1.ConditionTrue, + }, + }, + } + us, err := toUnstructured(job) + assert.NoError(t, err) + result, err := jobConditions(us) + assert.NoError(t, err) + assert.Equal(t, status.CurrentStatus, result.Status) + }) +} From a5909993231c0826a7c5c139241d0e053ce9d03e Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Thu, 6 Feb 2025 19:53:42 +0000 Subject: [PATCH 50/91] switch client Signed-off-by: Austin Abro --- pkg/kube/client.go | 11 +++-------- pkg/kube/statuswait.go | 33 +++++++++++++++++++-------------- pkg/kube/statuswait_test.go | 22 ++++++++++++---------- 3 files changed, 34 insertions(+), 32 deletions(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index b4164a8ff..3753998ff 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -37,7 +37,6 @@ import ( apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/cli-utils/pkg/kstatus/watcher" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" multierror "github.com/hashicorp/go-multierror" @@ -59,7 +58,6 @@ import ( watchtools "k8s.io/client-go/tools/watch" "k8s.io/client-go/util/retry" cmdutil "k8s.io/kubectl/pkg/cmd/util" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling/statusreaders" ) // ErrNoObjectsVisited indicates that during a visit operation, no matching objects were found. @@ -133,13 +131,10 @@ func (c *Client) newWaiter(strategy WaitStrategy) (Waiter, error) { if err != nil { return nil, err } - sw := watcher.NewDefaultStatusWatcher(dynamicClient, restMapper) - newCustomJobStatusReader := NewCustomJobStatusReader(restMapper) - customSR := statusreaders.NewStatusReader(restMapper, newCustomJobStatusReader) - sw.StatusReader = customSR return &statusWaiter{ - sw: sw, - log: c.Log, + restMapper: restMapper, + client: dynamicClient, + log: c.Log, }, nil default: return nil, errors.New("unknown wait strategy") diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index 7ac4706ee..1aa424c4c 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -23,42 +23,51 @@ import ( "time" appsv1 "k8s.io/api/apps/v1" - batchv1 "k8s.io/api/batch/v1" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/client-go/dynamic" "sigs.k8s.io/cli-utils/pkg/kstatus/polling/aggregator" "sigs.k8s.io/cli-utils/pkg/kstatus/polling/collector" "sigs.k8s.io/cli-utils/pkg/kstatus/polling/event" + "sigs.k8s.io/cli-utils/pkg/kstatus/polling/statusreaders" "sigs.k8s.io/cli-utils/pkg/kstatus/status" "sigs.k8s.io/cli-utils/pkg/kstatus/watcher" "sigs.k8s.io/cli-utils/pkg/object" ) type statusWaiter struct { - sw watcher.StatusWatcher - log func(string, ...interface{}) + client dynamic.Interface + restMapper meta.RESTMapper + log func(string, ...interface{}) } func (w *statusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error { ctx, cancel := context.WithTimeout(context.TODO(), timeout) defer cancel() w.log("beginning wait for %d resources with timeout of %s", len(resourceList), timeout) - return w.wait(ctx, resourceList, false) + sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) + return w.wait(ctx, resourceList, sw) } func (w *statusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Duration) error { ctx, cancel := context.WithTimeout(context.TODO(), timeout) defer cancel() w.log("beginning wait for %d resources with timeout of %s", len(resourceList), timeout) - return w.wait(ctx, resourceList, true) + sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) + newCustomJobStatusReader := NewCustomJobStatusReader(w.restMapper) + customSR := statusreaders.NewStatusReader(w.restMapper, newCustomJobStatusReader) + sw.StatusReader = customSR + return w.wait(ctx, resourceList, sw) } func (w *statusWaiter) WaitForDelete(resourceList ResourceList, timeout time.Duration) error { ctx, cancel := context.WithTimeout(context.TODO(), timeout) defer cancel() w.log("beginning wait for %d resources to be deleted with timeout of %s", len(resourceList), timeout) - return w.waitForDelete(ctx, resourceList) + sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) + return w.waitForDelete(ctx, resourceList, sw) } -func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceList) error { +func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceList, sw watcher.StatusWatcher) error { cancelCtx, cancel := context.WithCancel(ctx) defer cancel() resources := []object.ObjMetadata{} @@ -69,7 +78,7 @@ func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceL } resources = append(resources, obj) } - eventCh := w.sw.Watch(cancelCtx, resources, watcher.Options{}) + eventCh := sw.Watch(cancelCtx, resources, watcher.Options{}) statusCollector := collector.NewResourceStatusCollector(resources) go logResourceStatus(ctx, resources, statusCollector, status.NotFoundStatus, w.log) done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.NotFoundStatus)) @@ -95,16 +104,12 @@ func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceL return nil } -func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, waitForJobs bool) error { +func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, sw watcher.StatusWatcher) error { cancelCtx, cancel := context.WithCancel(ctx) defer cancel() resources := []object.ObjMetadata{} for _, resource := range resourceList { switch value := AsVersioned(resource).(type) { - case *batchv1.Job: - if !waitForJobs { - continue - } case *appsv1.Deployment: if value.Spec.Paused { continue @@ -117,7 +122,7 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, wait resources = append(resources, obj) } - eventCh := w.sw.Watch(cancelCtx, resources, watcher.Options{}) + eventCh := sw.Watch(cancelCtx, resources, watcher.Options{}) statusCollector := collector.NewResourceStatusCollector(resources) go logResourceStatus(cancelCtx, resources, statusCollector, status.CurrentStatus, w.log) done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.CurrentStatus)) diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index d853e0012..f3694953c 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -38,7 +38,6 @@ import ( "sigs.k8s.io/cli-utils/pkg/kstatus/polling/collector" "sigs.k8s.io/cli-utils/pkg/kstatus/polling/event" "sigs.k8s.io/cli-utils/pkg/kstatus/status" - "sigs.k8s.io/cli-utils/pkg/kstatus/watcher" "sigs.k8s.io/cli-utils/pkg/object" "sigs.k8s.io/cli-utils/pkg/testutil" ) @@ -178,10 +177,10 @@ func TestStatusWaitForDelete(t *testing.T) { appsv1.SchemeGroupVersion.WithKind("Deployment"), batchv1.SchemeGroupVersion.WithKind("Job"), ) - statusWatcher := watcher.NewDefaultStatusWatcher(fakeClient, fakeMapper) statusWaiter := statusWaiter{ - sw: statusWatcher, - log: t.Logf, + restMapper: fakeMapper, + client: fakeClient, + log: t.Logf, } createdObjs := []runtime.Object{} for _, manifest := range tt.manifestsToCreate { @@ -275,10 +274,10 @@ func TestStatusWait(t *testing.T) { appsv1.SchemeGroupVersion.WithKind("Deployment"), batchv1.SchemeGroupVersion.WithKind("Job"), ) - statusWatcher := watcher.NewDefaultStatusWatcher(fakeClient, fakeMapper) statusWaiter := statusWaiter{ - sw: statusWatcher, - log: t.Logf, + client: fakeClient, + restMapper: fakeMapper, + log: t.Logf, } objs := []runtime.Object{} @@ -299,9 +298,12 @@ func TestStatusWait(t *testing.T) { resourceList = append(resourceList, list...) } - ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) - defer cancel() - err := statusWaiter.wait(ctx, resourceList, tt.waitForJobs) + var err error + if tt.waitForJobs { + err = statusWaiter.Wait(resourceList, time.Second*3) + } else { + err = statusWaiter.WaitWithJobs(resourceList, time.Second*3) + } if tt.expectErrs != nil { assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) return From bbe3246f0ada5dab5cc9c02873e40810e40c33fe Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Thu, 6 Feb 2025 21:41:14 +0000 Subject: [PATCH 51/91] tests passing Signed-off-by: Austin Abro --- pkg/kube/job_status_reader_test.go | 14 ++--- pkg/kube/statuswait_test.go | 82 ++++++++++++++++++++++++++++-- 2 files changed, 85 insertions(+), 11 deletions(-) diff --git a/pkg/kube/job_status_reader_test.go b/pkg/kube/job_status_reader_test.go index af372c8b3..60760efb9 100644 --- a/pkg/kube/job_status_reader_test.go +++ b/pkg/kube/job_status_reader_test.go @@ -53,13 +53,13 @@ func TestJobConditions(t *testing.T) { Status: batchv1.JobStatus{}, } - t.Run("job without Complete condition returns InProgress status", func(t *testing.T) { - us, err := toUnstructured(job) - assert.NoError(t, err) - result, err := jobConditions(us) - assert.NoError(t, err) - assert.Equal(t, status.InProgressStatus, result) - }) + // t.Run("job without Complete condition returns InProgress status", func(t *testing.T) { + // us, err := toUnstructured(job) + // assert.NoError(t, err) + // result, err := jobConditions(us) + // assert.NoError(t, err) + // assert.Equal(t, status.InProgressStatus, result) + // }) t.Run("job with Complete condition as True returns Current status", func(t *testing.T) { job.Status = batchv1.JobStatus{ diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index f3694953c..c028f8fd0 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -72,6 +72,80 @@ metadata: generation: 1 ` +var jobReadyManifest = ` +apiVersion: batch/v1 +kind: Job +metadata: + name: sleep-job + namespace: default + uid: 5e7d8814-36fc-486f-9e6d-5b0a09351682 + resourceVersion: "568" + generation: 1 + creationTimestamp: 2025-02-06T16:34:20-05:00 + labels: + batch.kubernetes.io/controller-uid: 5e7d8814-36fc-486f-9e6d-5b0a09351682 + batch.kubernetes.io/job-name: sleep-job + controller-uid: 5e7d8814-36fc-486f-9e6d-5b0a09351682 + job-name: sleep-job + annotations: + kubectl.kubernetes.io/last-applied-configuration: "{\"apiVersion\":\"batch/v1\",\"kind\":\"Job\",\"metadata\":{\"annotations\":{},\"name\":\"sleep-job\",\"namespace\":\"default\"},\"spec\":{\"template\":{\"metadata\":{\"name\":\"sleep-job\"},\"spec\":{\"containers\":[{\"command\":[\"sh\",\"-c\",\"sleep 100\"],\"image\":\"busybox\",\"name\":\"sleep\"}],\"restartPolicy\":\"Never\"}}}}\n" + managedFields: + - manager: kubectl-client-side-apply + operation: Update + apiVersion: batch/v1 + time: 2025-02-06T16:34:20-05:00 + fieldsType: FieldsV1 + fieldsV1: {} + - manager: k3s + operation: Update + apiVersion: batch/v1 + time: 2025-02-06T16:34:23-05:00 + fieldsType: FieldsV1 + fieldsV1: {} + subresource: status +spec: + parallelism: 1 + completions: 1 + backoffLimit: 6 + selector: + matchLabels: + batch.kubernetes.io/controller-uid: 5e7d8814-36fc-486f-9e6d-5b0a09351682 + manualSelector: false + template: + metadata: + name: sleep-job + labels: + batch.kubernetes.io/controller-uid: 5e7d8814-36fc-486f-9e6d-5b0a09351682 + batch.kubernetes.io/job-name: sleep-job + controller-uid: 5e7d8814-36fc-486f-9e6d-5b0a09351682 + job-name: sleep-job + spec: + containers: + - name: sleep + image: busybox + command: + - sh + - -c + - sleep 100 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + imagePullPolicy: Always + restartPolicy: Never + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} + schedulerName: default-scheduler + completionMode: NonIndexed + suspend: false + podReplacementPolicy: TerminatingOrFailed +status: + startTime: 2025-02-06T16:34:20-05:00 + active: 1 + terminating: 0 + uncountedTerminatedPods: {} + ready: 1 +` + var jobCompleteManifest = ` apiVersion: batch/v1 kind: Job @@ -242,8 +316,8 @@ func TestStatusWait(t *testing.T) { waitForJobs: true, }, { - name: "Job is not ready, but we pass wait anyway", - objManifests: []string{jobNoStatusManifest}, + name: "Job is not ready but we pass wait anyway", + objManifests: []string{jobReadyManifest}, expectErrs: nil, waitForJobs: false, }, @@ -300,9 +374,9 @@ func TestStatusWait(t *testing.T) { var err error if tt.waitForJobs { - err = statusWaiter.Wait(resourceList, time.Second*3) - } else { err = statusWaiter.WaitWithJobs(resourceList, time.Second*3) + } else { + err = statusWaiter.Wait(resourceList, time.Second*3) } if tt.expectErrs != nil { assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) From 59ef43e399375b773c1c42fe51befacbbb62e0f3 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Thu, 6 Feb 2025 21:41:43 +0000 Subject: [PATCH 52/91] tests passing Signed-off-by: Austin Abro --- pkg/kube/statuswait_test.go | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index c028f8fd0..06aa36c09 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -81,28 +81,6 @@ metadata: uid: 5e7d8814-36fc-486f-9e6d-5b0a09351682 resourceVersion: "568" generation: 1 - creationTimestamp: 2025-02-06T16:34:20-05:00 - labels: - batch.kubernetes.io/controller-uid: 5e7d8814-36fc-486f-9e6d-5b0a09351682 - batch.kubernetes.io/job-name: sleep-job - controller-uid: 5e7d8814-36fc-486f-9e6d-5b0a09351682 - job-name: sleep-job - annotations: - kubectl.kubernetes.io/last-applied-configuration: "{\"apiVersion\":\"batch/v1\",\"kind\":\"Job\",\"metadata\":{\"annotations\":{},\"name\":\"sleep-job\",\"namespace\":\"default\"},\"spec\":{\"template\":{\"metadata\":{\"name\":\"sleep-job\"},\"spec\":{\"containers\":[{\"command\":[\"sh\",\"-c\",\"sleep 100\"],\"image\":\"busybox\",\"name\":\"sleep\"}],\"restartPolicy\":\"Never\"}}}}\n" - managedFields: - - manager: kubectl-client-side-apply - operation: Update - apiVersion: batch/v1 - time: 2025-02-06T16:34:20-05:00 - fieldsType: FieldsV1 - fieldsV1: {} - - manager: k3s - operation: Update - apiVersion: batch/v1 - time: 2025-02-06T16:34:23-05:00 - fieldsType: FieldsV1 - fieldsV1: {} - subresource: status spec: parallelism: 1 completions: 1 From b9cbc93003d1d55399dccf13da396d6011a9b9cc Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Thu, 6 Feb 2025 21:45:07 +0000 Subject: [PATCH 53/91] tests passing Signed-off-by: Austin Abro --- pkg/kube/statuswait_test.go | 39 ------------------------------------- 1 file changed, 39 deletions(-) diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index 06aa36c09..ba4e79a58 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -78,49 +78,10 @@ kind: Job metadata: name: sleep-job namespace: default - uid: 5e7d8814-36fc-486f-9e6d-5b0a09351682 - resourceVersion: "568" generation: 1 -spec: - parallelism: 1 - completions: 1 - backoffLimit: 6 - selector: - matchLabels: - batch.kubernetes.io/controller-uid: 5e7d8814-36fc-486f-9e6d-5b0a09351682 - manualSelector: false - template: - metadata: - name: sleep-job - labels: - batch.kubernetes.io/controller-uid: 5e7d8814-36fc-486f-9e6d-5b0a09351682 - batch.kubernetes.io/job-name: sleep-job - controller-uid: 5e7d8814-36fc-486f-9e6d-5b0a09351682 - job-name: sleep-job - spec: - containers: - - name: sleep - image: busybox - command: - - sh - - -c - - sleep 100 - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - imagePullPolicy: Always - restartPolicy: Never - terminationGracePeriodSeconds: 30 - dnsPolicy: ClusterFirst - securityContext: {} - schedulerName: default-scheduler - completionMode: NonIndexed - suspend: false - podReplacementPolicy: TerminatingOrFailed status: startTime: 2025-02-06T16:34:20-05:00 active: 1 - terminating: 0 - uncountedTerminatedPods: {} ready: 1 ` From 0314135290d69e35c4f3c70330cc212ae0186a7c Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Fri, 7 Feb 2025 14:34:37 +0000 Subject: [PATCH 54/91] tests passing Signed-off-by: Austin Abro --- pkg/kube/statuswait_test.go | 73 ++++++++++++++++++++++++++++++++++--- 1 file changed, 68 insertions(+), 5 deletions(-) diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index ba4e79a58..9e3b696d5 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -76,7 +76,7 @@ var jobReadyManifest = ` apiVersion: batch/v1 kind: Job metadata: - name: sleep-job + name: ready-not-complete namespace: default generation: 1 status: @@ -182,8 +182,8 @@ func TestStatusWaitForDelete(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() c := newTestClient(t) - timeout := time.Second * 2 - timeUntilPodDelete := time.Second * 1 + timeout := time.Second + timeUntilPodDelete := time.Millisecond * 500 fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) fakeMapper := testutil.NewFakeRESTMapper( v1.SchemeGroupVersion.WithKind("Pod"), @@ -232,7 +232,6 @@ func TestStatusWaitForDelete(t *testing.T) { assert.NoError(t, err) }) } - } func TestStatusWait(t *testing.T) { @@ -314,7 +313,7 @@ func TestStatusWait(t *testing.T) { var err error if tt.waitForJobs { err = statusWaiter.WaitWithJobs(resourceList, time.Second*3) - } else { + } else { err = statusWaiter.Wait(resourceList, time.Second*3) } if tt.expectErrs != nil { @@ -325,3 +324,67 @@ func TestStatusWait(t *testing.T) { }) } } + +func TestWaitForJobComplete(t *testing.T) { + t.Parallel() + tests := []struct { + name string + objManifests []string + expectErrs []error + }{ + { + name: "Job is complete", + objManifests: []string{jobCompleteManifest}, + }, + { + name: "Job is not ready", + objManifests: []string{jobNoStatusManifest}, + expectErrs: []error{errors.New("resource not ready, name: test, kind: Job, status: InProgress"), errors.New("context deadline exceeded")}, + }, + { + name: "Job is ready but not complete", + objManifests: []string{jobReadyManifest}, + expectErrs: []error{errors.New("resource not ready, name: ready-not-complete, kind: Job, status: InProgress"), errors.New("context deadline exceeded")}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + batchv1.SchemeGroupVersion.WithKind("Job"), + ) + statusWaiter := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + log: t.Logf, + } + objs := []runtime.Object{} + for _, podYaml := range tt.objManifests { + m := make(map[string]interface{}) + err := yaml.Unmarshal([]byte(podYaml), &m) + assert.NoError(t, err) + resource := &unstructured.Unstructured{Object: m} + objs = append(objs, resource) + gvr := getGVR(t, fakeMapper, resource) + err = fakeClient.Tracker().Create(gvr, resource, resource.GetNamespace()) + assert.NoError(t, err) + } + resourceList := ResourceList{} + for _, obj := range objs { + list, err := c.Build(objBody(obj), false) + assert.NoError(t, err) + resourceList = append(resourceList, list...) + } + + err := statusWaiter.WaitWithJobs(resourceList, time.Second*3) + if tt.expectErrs != nil { + assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + return + } + assert.NoError(t, err) + }) + } +} From cc83b7c2e6b9403e7347990d11f329abd0fd4403 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Fri, 7 Feb 2025 15:01:53 +0000 Subject: [PATCH 55/91] tests passing Signed-off-by: Austin Abro --- pkg/kube/statuswait_test.go | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index 9e3b696d5..131224e8b 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -242,11 +242,6 @@ func TestStatusWait(t *testing.T) { expectErrs []error waitForJobs bool }{ - { - name: "Job is complete", - objManifests: []string{jobCompleteManifest}, - expectErrs: nil, - }, { name: "Job is not complete", objManifests: []string{jobNoStatusManifest}, @@ -254,7 +249,7 @@ func TestStatusWait(t *testing.T) { waitForJobs: true, }, { - name: "Job is not ready but we pass wait anyway", + name: "Job is ready but not complete", objManifests: []string{jobReadyManifest}, expectErrs: nil, waitForJobs: false, @@ -310,12 +305,7 @@ func TestStatusWait(t *testing.T) { resourceList = append(resourceList, list...) } - var err error - if tt.waitForJobs { - err = statusWaiter.WaitWithJobs(resourceList, time.Second*3) - } else { - err = statusWaiter.Wait(resourceList, time.Second*3) - } + err := statusWaiter.Wait(resourceList, time.Second*3) if tt.expectErrs != nil { assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) return From f49a7e134a4da7967be9f65bfa1f91a159889252 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Fri, 7 Feb 2025 15:14:25 +0000 Subject: [PATCH 56/91] start watch until ready Signed-off-by: Austin Abro --- pkg/kube/client.go | 157 ----------------------------------------- pkg/kube/interface.go | 24 +++---- pkg/kube/statuswait.go | 5 ++ pkg/kube/wait.go | 157 +++++++++++++++++++++++++++++++++++++++++ 4 files changed, 174 insertions(+), 169 deletions(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 3753998ff..8dca1c51b 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -18,7 +18,6 @@ package kube // import "helm.sh/helm/v4/pkg/kube" import ( "bytes" - "context" "encoding/json" "fmt" "io" @@ -27,11 +26,9 @@ import ( "reflect" "strings" "sync" - "time" jsonpatch "github.com/evanphx/json-patch" "github.com/pkg/errors" - batch "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" @@ -39,23 +36,18 @@ import ( "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" - multierror "github.com/hashicorp/go-multierror" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" - "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/strategicpatch" - "k8s.io/apimachinery/pkg/watch" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/resource" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" - cachetools "k8s.io/client-go/tools/cache" - watchtools "k8s.io/client-go/tools/watch" "k8s.io/client-go/util/retry" cmdutil "k8s.io/kubectl/pkg/cmd/util" ) @@ -524,52 +516,6 @@ func rdelete(c *Client, resources ResourceList, propagation metav1.DeletionPropa return res, nil } -func (c *Client) watchTimeout(t time.Duration) func(*resource.Info) error { - return func(info *resource.Info) error { - return c.watchUntilReady(t, info) - } -} - -// WatchUntilReady watches the resources given and waits until it is ready. -// -// This method is mainly for hook implementations. It watches for a resource to -// hit a particular milestone. The milestone depends on the Kind. -// -// For most kinds, it checks to see if the resource is marked as Added or Modified -// by the Kubernetes event stream. For some kinds, it does more: -// -// - Jobs: A job is marked "Ready" when it has successfully completed. This is -// ascertained by watching the Status fields in a job's output. -// - Pods: A pod is marked "Ready" when it has successfully completed. This is -// ascertained by watching the status.phase field in a pod's output. -// -// Handling for other kinds will be added as necessary. -func (c *Client) WatchUntilReady(resources ResourceList, timeout time.Duration) error { - // For jobs, there's also the option to do poll c.Jobs(namespace).Get(): - // https://github.com/adamreese/kubernetes/blob/master/test/e2e/job.go#L291-L300 - return perform(resources, c.watchTimeout(timeout)) -} - -func perform(infos ResourceList, fn func(*resource.Info) error) error { - var result error - - if len(infos) == 0 { - return ErrNoObjectsVisited - } - - errs := make(chan error) - go batchPerform(infos, fn, errs) - - for range infos { - err := <-errs - if err != nil { - result = multierror.Append(result, err) - } - } - - return result -} - // getManagedFieldsManager returns the manager string. If one was set it will be returned. // Otherwise, one is calculated based on the name of the binary. func getManagedFieldsManager() string { @@ -721,109 +667,6 @@ func updateResource(c *Client, target *resource.Info, currentObj runtime.Object, return nil } -func (c *Client) watchUntilReady(timeout time.Duration, info *resource.Info) error { - kind := info.Mapping.GroupVersionKind.Kind - switch kind { - case "Job", "Pod": - default: - return nil - } - - c.Log("Watching for changes to %s %s with timeout of %v", kind, info.Name, timeout) - - // Use a selector on the name of the resource. This should be unique for the - // given version and kind - selector, err := fields.ParseSelector(fmt.Sprintf("metadata.name=%s", info.Name)) - if err != nil { - return err - } - lw := cachetools.NewListWatchFromClient(info.Client, info.Mapping.Resource.Resource, info.Namespace, selector) - - // What we watch for depends on the Kind. - // - For a Job, we watch for completion. - // - For all else, we watch until Ready. - // In the future, we might want to add some special logic for types - // like Ingress, Volume, etc. - - ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout) - defer cancel() - _, err = watchtools.UntilWithSync(ctx, lw, &unstructured.Unstructured{}, nil, func(e watch.Event) (bool, error) { - // Make sure the incoming object is versioned as we use unstructured - // objects when we build manifests - obj := convertWithMapper(e.Object, info.Mapping) - switch e.Type { - case watch.Added, watch.Modified: - // For things like a secret or a config map, this is the best indicator - // we get. We care mostly about jobs, where what we want to see is - // the status go into a good state. For other types, like ReplicaSet - // we don't really do anything to support these as hooks. - c.Log("Add/Modify event for %s: %v", info.Name, e.Type) - switch kind { - case "Job": - return c.waitForJob(obj, info.Name) - case "Pod": - return c.waitForPodSuccess(obj, info.Name) - } - return true, nil - case watch.Deleted: - c.Log("Deleted event for %s", info.Name) - return true, nil - case watch.Error: - // Handle error and return with an error. - c.Log("Error event for %s", info.Name) - return true, errors.Errorf("failed to deploy %s", info.Name) - default: - return false, nil - } - }) - return err -} - -// waitForJob is a helper that waits for a job to complete. -// -// This operates on an event returned from a watcher. -func (c *Client) waitForJob(obj runtime.Object, name string) (bool, error) { - o, ok := obj.(*batch.Job) - if !ok { - return true, errors.Errorf("expected %s to be a *batch.Job, got %T", name, obj) - } - - for _, c := range o.Status.Conditions { - if c.Type == batch.JobComplete && c.Status == "True" { - return true, nil - } else if c.Type == batch.JobFailed && c.Status == "True" { - return true, errors.Errorf("job %s failed: %s", name, c.Reason) - } - } - - c.Log("%s: Jobs active: %d, jobs failed: %d, jobs succeeded: %d", name, o.Status.Active, o.Status.Failed, o.Status.Succeeded) - return false, nil -} - -// waitForPodSuccess is a helper that waits for a pod to complete. -// -// This operates on an event returned from a watcher. -func (c *Client) waitForPodSuccess(obj runtime.Object, name string) (bool, error) { - o, ok := obj.(*v1.Pod) - if !ok { - return true, errors.Errorf("expected %s to be a *v1.Pod, got %T", name, obj) - } - - switch o.Status.Phase { - case v1.PodSucceeded: - c.Log("Pod %s succeeded", o.Name) - return true, nil - case v1.PodFailed: - return true, errors.Errorf("pod %s failed", o.Name) - case v1.PodPending: - c.Log("Pod %s pending", o.Name) - case v1.PodRunning: - c.Log("Pod %s running", o.Name) - } - - return false, nil -} - // scrubValidationError removes kubectl info from the message. func scrubValidationError(err error) error { if err == nil { diff --git a/pkg/kube/interface.go b/pkg/kube/interface.go index f8e3c2ee2..0e6da1094 100644 --- a/pkg/kube/interface.go +++ b/pkg/kube/interface.go @@ -34,18 +34,6 @@ type Interface interface { // Delete destroys one or more resources. Delete(resources ResourceList) (*Result, []error) - // WatchUntilReady watches the resources given and waits until it is ready. - // - // This method is mainly for hook implementations. It watches for a resource to - // hit a particular milestone. The milestone depends on the Kind. - // - // For Jobs, "ready" means the Job ran to completion (exited without error). - // For Pods, "ready" means the Pod phase is marked "succeeded". - // For all other kinds, it means the kind was created or modified without - // error. - // TODO: Is watch until ready really behavior we want over the resources actually being ready? - WatchUntilReady(resources ResourceList, timeout time.Duration) error - // Update updates one or more resources or creates the resource // if it doesn't exist. Update(original, target ResourceList, force bool) (*Result, error) @@ -72,6 +60,18 @@ type Waiter interface { // WaitForDelete wait up to the given timeout for the specified resources to be deleted. WaitForDelete(resources ResourceList, timeout time.Duration) error + + // WatchUntilReady watches the resources given and waits until it is ready. + // + // This method is mainly for hook implementations. It watches for a resource to + // hit a particular milestone. The milestone depends on the Kind. + // + // For Jobs, "ready" means the Job ran to completion (exited without error). + // For Pods, "ready" means the Pod phase is marked "succeeded". + // For all other kinds, it means the kind was created or modified without + // error. + // TODO: Is watch until ready really behavior we want over the resources actually being ready? + WatchUntilReady(resources ResourceList, timeout time.Duration) error } // InterfaceDeletionPropagation is introduced to avoid breaking backwards compatibility for Interface implementers. diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index 1aa424c4c..2e27917bc 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -40,6 +40,11 @@ type statusWaiter struct { log func(string, ...interface{}) } +func (w *statusWaiter) WatchUntilReady(resources ResourceList, timeout time.Duration) error { + panic("todo") + return nil +} + func (w *statusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error { ctx, cancel := context.WithTimeout(context.TODO(), timeout) defer cancel() diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go index 525373e4d..fdb3c9087 100644 --- a/pkg/kube/wait.go +++ b/pkg/kube/wait.go @@ -22,19 +22,27 @@ import ( "net/http" "time" + multierror "github.com/hashicorp/go-multierror" "github.com/pkg/errors" appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" appsv1beta2 "k8s.io/api/apps/v1beta2" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" "k8s.io/cli-runtime/pkg/resource" "k8s.io/client-go/kubernetes" + cachetools "k8s.io/client-go/tools/cache" + watchtools "k8s.io/client-go/tools/watch" + batch "k8s.io/api/batch/v1" "k8s.io/apimachinery/pkg/util/wait" ) @@ -177,3 +185,152 @@ func SelectorsForObject(object runtime.Object) (selector labels.Selector, err er return selector, errors.Wrap(err, "invalid label selector") } + +func (hw *HelmWaiter) watchTimeout(t time.Duration) func(*resource.Info) error { + return func(info *resource.Info) error { + return hw.watchUntilReady(t, info) + } +} + +// WatchUntilReady watches the resources given and waits until it is ready. +// +// This method is mainly for hook implementations. It watches for a resource to +// hit a particular milestone. The milestone depends on the Kind. +// +// For most kinds, it checks to see if the resource is marked as Added or Modified +// by the Kubernetes event stream. For some kinds, it does more: +// +// - Jobs: A job is marked "Ready" when it has successfully completed. This is +// ascertained by watching the Status fields in a job's output. +// - Pods: A pod is marked "Ready" when it has successfully completed. This is +// ascertained by watching the status.phase field in a pod's output. +// +// Handling for other kinds will be added as necessary. +func (hw *HelmWaiter) WatchUntilReady(resources ResourceList, timeout time.Duration) error { + // For jobs, there's also the option to do poll c.Jobs(namespace).Get(): + // https://github.com/adamreese/kubernetes/blob/master/test/e2e/job.go#L291-L300 + return perform(resources, hw.watchTimeout(timeout)) +} + +func perform(infos ResourceList, fn func(*resource.Info) error) error { + var result error + + if len(infos) == 0 { + return ErrNoObjectsVisited + } + + errs := make(chan error) + go batchPerform(infos, fn, errs) + + for range infos { + err := <-errs + if err != nil { + result = multierror.Append(result, err) + } + } + + return result +} + +func (hw *HelmWaiter) watchUntilReady(timeout time.Duration, info *resource.Info) error { + kind := info.Mapping.GroupVersionKind.Kind + switch kind { + case "Job", "Pod": + default: + return nil + } + + hw.log("Watching for changes to %s %s with timeout of %v", kind, info.Name, timeout) + + // Use a selector on the name of the resource. This should be unique for the + // given version and kind + selector, err := fields.ParseSelector(fmt.Sprintf("metadata.name=%s", info.Name)) + if err != nil { + return err + } + lw := cachetools.NewListWatchFromClient(info.Client, info.Mapping.Resource.Resource, info.Namespace, selector) + + // What we watch for depends on the Kind. + // - For a Job, we watch for completion. + // - For all else, we watch until Ready. + // In the future, we might want to add some special logic for types + // like Ingress, Volume, etc. + + ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout) + defer cancel() + _, err = watchtools.UntilWithSync(ctx, lw, &unstructured.Unstructured{}, nil, func(e watch.Event) (bool, error) { + // Make sure the incoming object is versioned as we use unstructured + // objects when we build manifests + obj := convertWithMapper(e.Object, info.Mapping) + switch e.Type { + case watch.Added, watch.Modified: + // For things like a secret or a config map, this is the best indicator + // we get. We care mostly about jobs, where what we want to see is + // the status go into a good state. For other types, like ReplicaSet + // we don't really do anything to support these as hooks. + hw.log("Add/Modify event for %s: %v", info.Name, e.Type) + switch kind { + case "Job": + return hw.waitForJob(obj, info.Name) + case "Pod": + return hw.waitForPodSuccess(obj, info.Name) + } + return true, nil + case watch.Deleted: + hw.log("Deleted event for %s", info.Name) + return true, nil + case watch.Error: + // Handle error and return with an error. + hw.log("Error event for %s", info.Name) + return true, errors.Errorf("failed to deploy %s", info.Name) + default: + return false, nil + } + }) + return err +} + +// waitForJob is a helper that waits for a job to complete. +// +// This operates on an event returned from a watcher. +func (hw *HelmWaiter) waitForJob(obj runtime.Object, name string) (bool, error) { + o, ok := obj.(*batch.Job) + if !ok { + return true, errors.Errorf("expected %s to be a *batch.Job, got %T", name, obj) + } + + for _, c := range o.Status.Conditions { + if c.Type == batch.JobComplete && c.Status == "True" { + return true, nil + } else if c.Type == batch.JobFailed && c.Status == "True" { + return true, errors.Errorf("job %s failed: %s", name, c.Reason) + } + } + + hw.log("%s: Jobs active: %d, jobs failed: %d, jobs succeeded: %d", name, o.Status.Active, o.Status.Failed, o.Status.Succeeded) + return false, nil +} + +// waitForPodSuccess is a helper that waits for a pod to complete. +// +// This operates on an event returned from a watcher. +func (c *HelmWaiter) waitForPodSuccess(obj runtime.Object, name string) (bool, error) { + o, ok := obj.(*v1.Pod) + if !ok { + return true, errors.Errorf("expected %s to be a *v1.Pod, got %T", name, obj) + } + + switch o.Status.Phase { + case v1.PodSucceeded: + c.log("Pod %s succeeded", o.Name) + return true, nil + case v1.PodFailed: + return true, errors.Errorf("pod %s failed", o.Name) + case v1.PodPending: + c.log("Pod %s pending", o.Name) + case v1.PodRunning: + c.log("Pod %s running", o.Name) + } + + return false, nil +} From 187e99d299817e824a5bc5e5b3e3345a87e3ee96 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Fri, 7 Feb 2025 17:18:27 +0000 Subject: [PATCH 57/91] custom status readers look good Signed-off-by: Austin Abro --- pkg/kube/job_status_reader_test.go | 14 ++-- pkg/kube/pod_status_reader.go | 110 +++++++++++++++++++++++++++++ pkg/kube/pod_status_reader_test.go | 66 +++++++++++++++++ pkg/kube/statuswait.go | 2 +- 4 files changed, 184 insertions(+), 8 deletions(-) create mode 100644 pkg/kube/pod_status_reader.go create mode 100644 pkg/kube/pod_status_reader_test.go diff --git a/pkg/kube/job_status_reader_test.go b/pkg/kube/job_status_reader_test.go index 60760efb9..98b994030 100644 --- a/pkg/kube/job_status_reader_test.go +++ b/pkg/kube/job_status_reader_test.go @@ -53,13 +53,13 @@ func TestJobConditions(t *testing.T) { Status: batchv1.JobStatus{}, } - // t.Run("job without Complete condition returns InProgress status", func(t *testing.T) { - // us, err := toUnstructured(job) - // assert.NoError(t, err) - // result, err := jobConditions(us) - // assert.NoError(t, err) - // assert.Equal(t, status.InProgressStatus, result) - // }) + t.Run("job without Complete condition returns InProgress status", func(t *testing.T) { + us, err := toUnstructured(job) + assert.NoError(t, err) + result, err := jobConditions(us) + assert.NoError(t, err) + assert.Equal(t, status.InProgressStatus, result.Status) + }) t.Run("job with Complete condition as True returns Current status", func(t *testing.T) { job.Status = batchv1.JobStatus{ diff --git a/pkg/kube/pod_status_reader.go b/pkg/kube/pod_status_reader.go new file mode 100644 index 000000000..25e427966 --- /dev/null +++ b/pkg/kube/pod_status_reader.go @@ -0,0 +1,110 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube + +// This file was copied and modified from https://github.com/fluxcd/kustomize-controller/blob/main/internal/statusreaders/job.go + +import ( + "context" + "fmt" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + + "sigs.k8s.io/cli-utils/pkg/kstatus/polling/engine" + "sigs.k8s.io/cli-utils/pkg/kstatus/polling/event" + "sigs.k8s.io/cli-utils/pkg/kstatus/polling/statusreaders" + "sigs.k8s.io/cli-utils/pkg/kstatus/status" + "sigs.k8s.io/cli-utils/pkg/object" +) + +type customPodStatusReader struct { + genericStatusReader engine.StatusReader +} + +func NewCustomPodStatusReader(mapper meta.RESTMapper) engine.StatusReader { + genericStatusReader := statusreaders.NewGenericStatusReader(mapper, podConditions) + return &customJobStatusReader{ + genericStatusReader: genericStatusReader, + } +} + +func (j *customPodStatusReader) Supports(gk schema.GroupKind) bool { + return gk == batchv1.SchemeGroupVersion.WithKind("Job").GroupKind() +} + +func (j *customPodStatusReader) ReadStatus(ctx context.Context, reader engine.ClusterReader, resource object.ObjMetadata) (*event.ResourceStatus, error) { + return j.genericStatusReader.ReadStatus(ctx, reader, resource) +} + +func (j *customPodStatusReader) ReadStatusForObject(ctx context.Context, reader engine.ClusterReader, resource *unstructured.Unstructured) (*event.ResourceStatus, error) { + return j.genericStatusReader.ReadStatusForObject(ctx, reader, resource) +} + +func podConditions(u *unstructured.Unstructured) (*status.Result, error) { + obj := u.UnstructuredContent() + phase := status.GetStringField(obj, ".status.phase", "") + switch phase { + case string(v1.PodSucceeded): + message := fmt.Sprintf("pod %s succeeded", u.GetName()) + return &status.Result{ + Status: status.CurrentStatus, + Message: message, + Conditions: []status.Condition{ + { + Type: status.ConditionStalled, + Status: corev1.ConditionTrue, + Message: message, + }, + }, + }, nil + case string(v1.PodFailed): + message := fmt.Sprintf("pod %s failed", u.GetName()) + return &status.Result{ + Status: status.FailedStatus, + Message: message, + Conditions: []status.Condition{ + { + Type: status.ConditionStalled, + Status: corev1.ConditionTrue, + Reason: "PodFailed", + Message: message, + }, + }, + }, nil + case string(v1.PodPending): + case string(v1.PodRunning): + } + + message := "Pod in progress" + return &status.Result{ + Status: status.InProgressStatus, + Message: message, + Conditions: []status.Condition{ + { + Type: status.ConditionReconciling, + Status: corev1.ConditionTrue, + Reason: "PodInProgress", + Message: message, + }, + }, + }, nil +} diff --git a/pkg/kube/pod_status_reader_test.go b/pkg/kube/pod_status_reader_test.go new file mode 100644 index 000000000..2604ef026 --- /dev/null +++ b/pkg/kube/pod_status_reader_test.go @@ -0,0 +1,66 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube + +// This file was copied and modified from https://github.com/fluxcd/kustomize-controller/blob/main/internal/statusreaders/job.go +import ( + "testing" + + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "sigs.k8s.io/cli-utils/pkg/kstatus/status" +) + +func TestPodConditions(t *testing.T) { + t.Parallel() + + //TODO add some more tests here and parallelize + + t.Run("pod without status returns in progress", func(t *testing.T) { + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod", + }, + Spec: v1.PodSpec{}, + Status: v1.PodStatus{}, + } + us, err := toUnstructured(pod) + assert.NoError(t, err) + result, err := podConditions(us) + assert.NoError(t, err) + assert.Equal(t, status.InProgressStatus, result.Status) + }) + + t.Run("pod succeeded returns Current status", func(t *testing.T) { + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod", + }, + Spec: v1.PodSpec{}, + Status: v1.PodStatus{ + Phase: v1.PodSucceeded, + }, + } + us, err := toUnstructured(pod) + assert.NoError(t, err) + result, err := podConditions(us) + assert.NoError(t, err) + assert.Equal(t, status.CurrentStatus, result.Status) + }) +} diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index 2e27917bc..16751abba 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -41,7 +41,7 @@ type statusWaiter struct { } func (w *statusWaiter) WatchUntilReady(resources ResourceList, timeout time.Duration) error { - panic("todo") + return nil } From d1cc9b39a33e335c56e68e1305d27bc036363406 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 9 Feb 2025 15:32:46 +0000 Subject: [PATCH 58/91] tests for status reader Signed-off-by: Austin Abro --- pkg/kube/job_status_reader_test.go | 93 ++++++++++++++++-------- pkg/kube/pod_status_reader.go | 8 +-- pkg/kube/pod_status_reader_test.go | 110 ++++++++++++++++++++--------- 3 files changed, 145 insertions(+), 66 deletions(-) diff --git a/pkg/kube/job_status_reader_test.go b/pkg/kube/job_status_reader_test.go index 98b994030..cd0dcedeb 100644 --- a/pkg/kube/job_status_reader_test.go +++ b/pkg/kube/job_status_reader_test.go @@ -30,7 +30,8 @@ import ( "sigs.k8s.io/cli-utils/pkg/kstatus/status" ) -func toUnstructured(obj runtime.Object) (*unstructured.Unstructured, error) { +func toUnstructured(t *testing.T, obj runtime.Object) (*unstructured.Unstructured, error) { + t.Helper() // If the incoming object is already unstructured, perform a deep copy first // otherwise DefaultUnstructuredConverter ends up returning the inner map without // making a copy. @@ -45,35 +46,69 @@ func toUnstructured(obj runtime.Object) (*unstructured.Unstructured, error) { } func TestJobConditions(t *testing.T) { - job := &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - Name: "job", + t.Parallel() + tests := []struct { + name string + job *batchv1.Job + expectedStatus status.Status + }{ + { + name: "job without Complete condition returns InProgress status", + job: &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: "job-no-condition", + }, + Spec: batchv1.JobSpec{}, + Status: batchv1.JobStatus{}, + }, + expectedStatus: status.InProgressStatus, }, - Spec: batchv1.JobSpec{}, - Status: batchv1.JobStatus{}, - } - - t.Run("job without Complete condition returns InProgress status", func(t *testing.T) { - us, err := toUnstructured(job) - assert.NoError(t, err) - result, err := jobConditions(us) - assert.NoError(t, err) - assert.Equal(t, status.InProgressStatus, result.Status) - }) - - t.Run("job with Complete condition as True returns Current status", func(t *testing.T) { - job.Status = batchv1.JobStatus{ - Conditions: []batchv1.JobCondition{ - { - Type: batchv1.JobComplete, - Status: corev1.ConditionTrue, + { + name: "job with Complete condition as True returns Current status", + job: &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: "job-complete", + }, + Spec: batchv1.JobSpec{}, + Status: batchv1.JobStatus{ + Conditions: []batchv1.JobCondition{ + { + Type: batchv1.JobComplete, + Status: corev1.ConditionTrue, + }, + }, + }, + }, + expectedStatus: status.CurrentStatus, + }, + { + name: "job with Failed condition as True returns Failed status", + job: &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: "job-failed", + }, + Spec: batchv1.JobSpec{}, + Status: batchv1.JobStatus{ + Conditions: []batchv1.JobCondition{ + { + Type: batchv1.JobFailed, + Status: corev1.ConditionTrue, + }, + }, }, }, - } - us, err := toUnstructured(job) - assert.NoError(t, err) - result, err := jobConditions(us) - assert.NoError(t, err) - assert.Equal(t, status.CurrentStatus, result.Status) - }) + expectedStatus: status.FailedStatus, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + us, err := toUnstructured(t, tc.job) + assert.NoError(t, err) + result, err := jobConditions(us) + assert.NoError(t, err) + assert.Equal(t, tc.expectedStatus, result.Status) + }) + } } diff --git a/pkg/kube/pod_status_reader.go b/pkg/kube/pod_status_reader.go index 25e427966..752f73ac1 100644 --- a/pkg/kube/pod_status_reader.go +++ b/pkg/kube/pod_status_reader.go @@ -62,8 +62,8 @@ func (j *customPodStatusReader) ReadStatusForObject(ctx context.Context, reader func podConditions(u *unstructured.Unstructured) (*status.Result, error) { obj := u.UnstructuredContent() phase := status.GetStringField(obj, ".status.phase", "") - switch phase { - case string(v1.PodSucceeded): + switch v1.PodPhase(phase) { + case v1.PodSucceeded: message := fmt.Sprintf("pod %s succeeded", u.GetName()) return &status.Result{ Status: status.CurrentStatus, @@ -76,7 +76,7 @@ func podConditions(u *unstructured.Unstructured) (*status.Result, error) { }, }, }, nil - case string(v1.PodFailed): + case v1.PodFailed: message := fmt.Sprintf("pod %s failed", u.GetName()) return &status.Result{ Status: status.FailedStatus, @@ -90,8 +90,6 @@ func podConditions(u *unstructured.Unstructured) (*status.Result, error) { }, }, }, nil - case string(v1.PodPending): - case string(v1.PodRunning): } message := "Pod in progress" diff --git a/pkg/kube/pod_status_reader_test.go b/pkg/kube/pod_status_reader_test.go index 2604ef026..bb08f041a 100644 --- a/pkg/kube/pod_status_reader_test.go +++ b/pkg/kube/pod_status_reader_test.go @@ -28,39 +28,85 @@ import ( ) func TestPodConditions(t *testing.T) { - t.Parallel() - - //TODO add some more tests here and parallelize - - t.Run("pod without status returns in progress", func(t *testing.T) { - pod := &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pod", + tests := []struct { + name string + pod *v1.Pod + expectedStatus status.Status + }{ + { + name: "pod without status returns in progress", + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "pod-no-status"}, + Spec: v1.PodSpec{}, + Status: v1.PodStatus{}, }, - Spec: v1.PodSpec{}, - Status: v1.PodStatus{}, - } - us, err := toUnstructured(pod) - assert.NoError(t, err) - result, err := podConditions(us) - assert.NoError(t, err) - assert.Equal(t, status.InProgressStatus, result.Status) - }) - - t.Run("pod succeeded returns Current status", func(t *testing.T) { - pod := &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pod", + expectedStatus: status.InProgressStatus, + }, + { + name: "pod succeeded returns current status", + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "pod-succeeded"}, + Spec: v1.PodSpec{}, + Status: v1.PodStatus{ + Phase: v1.PodSucceeded, + }, + }, + expectedStatus: status.CurrentStatus, + }, + { + name: "pod failed returns failed status", + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "pod-failed"}, + Spec: v1.PodSpec{}, + Status: v1.PodStatus{ + Phase: v1.PodFailed, + }, }, - Spec: v1.PodSpec{}, - Status: v1.PodStatus{ - Phase: v1.PodSucceeded, + expectedStatus: status.FailedStatus, + }, + { + name: "pod pending returns in progress status", + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "pod-pending"}, + Spec: v1.PodSpec{}, + Status: v1.PodStatus{ + Phase: v1.PodPending, + }, }, - } - us, err := toUnstructured(pod) - assert.NoError(t, err) - result, err := podConditions(us) - assert.NoError(t, err) - assert.Equal(t, status.CurrentStatus, result.Status) - }) + expectedStatus: status.InProgressStatus, + }, + { + name: "pod running returns in progress status", + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "pod-running"}, + Spec: v1.PodSpec{}, + Status: v1.PodStatus{ + Phase: v1.PodRunning, + }, + }, + expectedStatus: status.InProgressStatus, + }, + { + name: "pod with unknown phase returns in progress status", + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "pod-unknown"}, + Spec: v1.PodSpec{}, + Status: v1.PodStatus{ + Phase: v1.PodUnknown, + }, + }, + expectedStatus: status.InProgressStatus, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + us, err := toUnstructured(t, tc.pod) + assert.NoError(t, err) + result, err := podConditions(us) + assert.NoError(t, err) + assert.Equal(t, tc.expectedStatus, result.Status) + }) + } } From 14391dea5bf98c54ca0f9d87c82a5328f4bff063 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 10 Feb 2025 15:06:16 +0000 Subject: [PATCH 59/91] pods and jobs working Signed-off-by: Austin Abro --- pkg/kube/pod_status_reader.go | 12 ++- pkg/kube/statuswait.go | 75 +++++++++++------- pkg/kube/statuswait_test.go | 141 ++++++++++++++++++++++++++-------- 3 files changed, 159 insertions(+), 69 deletions(-) diff --git a/pkg/kube/pod_status_reader.go b/pkg/kube/pod_status_reader.go index 752f73ac1..c44af542e 100644 --- a/pkg/kube/pod_status_reader.go +++ b/pkg/kube/pod_status_reader.go @@ -22,9 +22,7 @@ import ( "context" "fmt" - batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" @@ -42,13 +40,13 @@ type customPodStatusReader struct { func NewCustomPodStatusReader(mapper meta.RESTMapper) engine.StatusReader { genericStatusReader := statusreaders.NewGenericStatusReader(mapper, podConditions) - return &customJobStatusReader{ + return &customPodStatusReader{ genericStatusReader: genericStatusReader, } } func (j *customPodStatusReader) Supports(gk schema.GroupKind) bool { - return gk == batchv1.SchemeGroupVersion.WithKind("Job").GroupKind() + return gk == corev1.SchemeGroupVersion.WithKind("Pod").GroupKind() } func (j *customPodStatusReader) ReadStatus(ctx context.Context, reader engine.ClusterReader, resource object.ObjMetadata) (*event.ResourceStatus, error) { @@ -62,8 +60,8 @@ func (j *customPodStatusReader) ReadStatusForObject(ctx context.Context, reader func podConditions(u *unstructured.Unstructured) (*status.Result, error) { obj := u.UnstructuredContent() phase := status.GetStringField(obj, ".status.phase", "") - switch v1.PodPhase(phase) { - case v1.PodSucceeded: + switch corev1.PodPhase(phase) { + case corev1.PodSucceeded: message := fmt.Sprintf("pod %s succeeded", u.GetName()) return &status.Result{ Status: status.CurrentStatus, @@ -76,7 +74,7 @@ func podConditions(u *unstructured.Unstructured) (*status.Result, error) { }, }, }, nil - case v1.PodFailed: + case corev1.PodFailed: message := fmt.Sprintf("pod %s failed", u.GetName()) return &status.Result{ Status: status.FailedStatus, diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index 16751abba..4aff42ff2 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -20,13 +20,16 @@ import ( "context" "errors" "fmt" + "sort" "time" appsv1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/client-go/dynamic" "sigs.k8s.io/cli-utils/pkg/kstatus/polling/aggregator" "sigs.k8s.io/cli-utils/pkg/kstatus/polling/collector" + "sigs.k8s.io/cli-utils/pkg/kstatus/polling/engine" "sigs.k8s.io/cli-utils/pkg/kstatus/polling/event" "sigs.k8s.io/cli-utils/pkg/kstatus/polling/statusreaders" "sigs.k8s.io/cli-utils/pkg/kstatus/status" @@ -40,9 +43,32 @@ type statusWaiter struct { log func(string, ...interface{}) } -func (w *statusWaiter) WatchUntilReady(resources ResourceList, timeout time.Duration) error { - - return nil +func alwaysReady(u *unstructured.Unstructured) (*status.Result, error) { + return &status.Result{ + Status: status.CurrentStatus, + Message: "Resource is current", + }, nil +} + +func (w *statusWaiter) WatchUntilReady(resourceList ResourceList, timeout time.Duration) error { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + w.log("waiting for %d pods and jobs to complete with a timeout of %s", len(resourceList), timeout) + sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) + jobSR := NewCustomJobStatusReader(w.restMapper) + podSR := NewCustomPodStatusReader(w.restMapper) + // We don't want to wait on any other resources as watchUntilReady is only for Helm hooks + genericSR := statusreaders.NewGenericStatusReader(w.restMapper, alwaysReady) + + sr := &statusreaders.DelegatingStatusReader{ + StatusReaders: []engine.StatusReader{ + jobSR, + podSR, + genericSR, + }, + } + sw.StatusReader = sr + return w.wait(ctx, resourceList, sw) } func (w *statusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error { @@ -85,8 +111,7 @@ func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceL } eventCh := sw.Watch(cancelCtx, resources, watcher.Options{}) statusCollector := collector.NewResourceStatusCollector(resources) - go logResourceStatus(ctx, resources, statusCollector, status.NotFoundStatus, w.log) - done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.NotFoundStatus)) + done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.NotFoundStatus, w.log)) <-done if statusCollector.Error != nil { @@ -129,8 +154,7 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, sw w eventCh := sw.Watch(cancelCtx, resources, watcher.Options{}) statusCollector := collector.NewResourceStatusCollector(resources) - go logResourceStatus(cancelCtx, resources, statusCollector, status.CurrentStatus, w.log) - done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.CurrentStatus)) + done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.CurrentStatus, w.log)) <-done if statusCollector.Error != nil { @@ -153,38 +177,33 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, sw w return nil } -func statusObserver(cancel context.CancelFunc, desired status.Status) collector.ObserverFunc { - return func(statusCollector *collector.ResourceStatusCollector, _ event.Event) { - rss := []*event.ResourceStatus{} +func statusObserver(cancel context.CancelFunc, desired status.Status, logFn func(string, ...interface{})) collector.ObserverFunc { + return func(statusCollector *collector.ResourceStatusCollector, e event.Event) { + var rss []*event.ResourceStatus + var nonDesiredResources []*event.ResourceStatus for _, rs := range statusCollector.ResourceStatuses { if rs == nil { continue } rss = append(rss, rs) + if rs.Status != desired { + nonDesiredResources = append(nonDesiredResources, rs) + } } + if aggregator.AggregateStatus(rss, desired) == desired { cancel() return } - } -} -func logResourceStatus(ctx context.Context, resources []object.ObjMetadata, sc *collector.ResourceStatusCollector, desiredStatus status.Status, log func(string, ...interface{})) { - ticker := time.NewTicker(1 * time.Second) - defer ticker.Stop() - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - for _, id := range resources { - rs := sc.ResourceStatuses[id] - if rs.Status != desiredStatus { - log("waiting for resource, name: %s, kind: %s, desired status: %s, actual status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, desiredStatus, rs.Status) - // only log one resource to not overwhelm the logs - break - } - } + if len(nonDesiredResources) > 0 { + // Log only the first resource so the user knows what they're waiting for without being overwhelmed + sort.Slice(nonDesiredResources, func(i, j int) bool { + return nonDesiredResources[i].Identifier.Name < nonDesiredResources[j].Identifier.Name + }) + first := nonDesiredResources[0] + logFn("waiting for resource: name: %s, kind: %s, desired status: %s, actual status: %s", + first.Identifier.Name, first.Identifier.GroupKind.Kind, desired, first.Status) } } } diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index 131224e8b..df16bf7e9 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -17,9 +17,7 @@ limitations under the License. package kube // import "helm.sh/helm/v3/pkg/kube" import ( - "context" "errors" - "fmt" "testing" "time" @@ -35,10 +33,6 @@ import ( "k8s.io/apimachinery/pkg/util/yaml" dynamicfake "k8s.io/client-go/dynamic/fake" "k8s.io/kubectl/pkg/scheme" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling/collector" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling/event" - "sigs.k8s.io/cli-utils/pkg/kstatus/status" - "sigs.k8s.io/cli-utils/pkg/object" "sigs.k8s.io/cli-utils/pkg/testutil" ) @@ -46,7 +40,7 @@ var podCurrentManifest = ` apiVersion: v1 kind: Pod metadata: - name: good-pod + name: current-pod namespace: ns status: conditions: @@ -100,11 +94,21 @@ status: status: "True" ` +var podCompleteManifest = ` +apiVersion: v1 +kind: Pod +metadata: + name: good-pod + namespace: ns +status: + phase: Succeeded +` + var pausedDeploymentManifest = ` apiVersion: apps/v1 kind: Deployment metadata: - name: nginx + name: paused namespace: ns-1 generation: 1 spec: @@ -125,6 +129,30 @@ spec: - containerPort: 80 ` +var notReadyDeploymentManifest = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: not-ready + namespace: ns-1 + generation: 1 +spec: + replicas: 1 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.19.6 + ports: + - containerPort: 80 +` + func getGVR(t *testing.T, mapper meta.RESTMapper, obj *unstructured.Unstructured) schema.GroupVersionResource { gvk := obj.GroupVersionKind() mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version) @@ -132,31 +160,6 @@ func getGVR(t *testing.T, mapper meta.RESTMapper, obj *unstructured.Unstructured return mapping.Resource } -func TestStatusLogger(t *testing.T) { - t.Parallel() - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*1500) - defer cancel() - readyPod := object.ObjMetadata{ - Name: "readyPod", - GroupKind: schema.GroupKind{Kind: "Pod"}, - } - notReadyPod := object.ObjMetadata{ - Name: "notReadyPod", - GroupKind: schema.GroupKind{Kind: "Pod"}, - } - objs := []object.ObjMetadata{readyPod, notReadyPod} - resourceStatusCollector := collector.NewResourceStatusCollector(objs) - resourceStatusCollector.ResourceStatuses[readyPod] = &event.ResourceStatus{ - Identifier: readyPod, - Status: status.CurrentStatus, - } - expectedMessage := "waiting for resource, name: notReadyPod, kind: Pod, desired status: Current, actual status: Unknown" - testLogger := func(message string, args ...interface{}) { - assert.Equal(t, expectedMessage, fmt.Sprintf(message, args...)) - } - logResourceStatus(ctx, objs, resourceStatusCollector, status.CurrentStatus, testLogger) -} - func TestStatusWaitForDelete(t *testing.T) { t.Parallel() tests := []struct { @@ -175,7 +178,7 @@ func TestStatusWaitForDelete(t *testing.T) { name: "error when not all objects are deleted", manifestsToCreate: []string{jobCompleteManifest, podCurrentManifest}, manifestsToDelete: []string{jobCompleteManifest}, - expectErrs: []error{errors.New("resource still exists, name: good-pod, kind: Pod, status: Current"), errors.New("context deadline exceeded")}, + expectErrs: []error{errors.New("resource still exists, name: current-pod, kind: Pod, status: Current"), errors.New("context deadline exceeded")}, }, } for _, tt := range tests { @@ -378,3 +381,73 @@ func TestWaitForJobComplete(t *testing.T) { }) } } + +func TestWatchForReady(t *testing.T) { + t.Parallel() + tests := []struct { + name string + objManifests []string + expectErrs []error + }{ + { + name: "succeeds if pod and job are complete", + objManifests: []string{jobCompleteManifest, podCompleteManifest}, + }, + { + name: "succeeds even when a resource that's not a pod or job is complete", + objManifests: []string{notReadyDeploymentManifest}, + }, + { + name: "Fails if job is not complete", + objManifests: []string{jobReadyManifest}, + expectErrs: []error{errors.New("resource not ready, name: ready-not-complete, kind: Job, status: InProgress"), errors.New("context deadline exceeded")}, + }, + { + name: "Fails if pod is not complete", + objManifests: []string{podCurrentManifest}, + expectErrs: []error{errors.New("resource not ready, name: current-pod, kind: Pod, status: InProgress"), errors.New("context deadline exceeded")}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + appsv1.SchemeGroupVersion.WithKind("Deployment"), + batchv1.SchemeGroupVersion.WithKind("Job"), + ) + statusWaiter := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + log: t.Logf, + } + objs := []runtime.Object{} + for _, podYaml := range tt.objManifests { + m := make(map[string]interface{}) + err := yaml.Unmarshal([]byte(podYaml), &m) + assert.NoError(t, err) + resource := &unstructured.Unstructured{Object: m} + objs = append(objs, resource) + gvr := getGVR(t, fakeMapper, resource) + err = fakeClient.Tracker().Create(gvr, resource, resource.GetNamespace()) + assert.NoError(t, err) + } + resourceList := ResourceList{} + for _, obj := range objs { + list, err := c.Build(objBody(obj), false) + assert.NoError(t, err) + resourceList = append(resourceList, list...) + } + + err := statusWaiter.WatchUntilReady(resourceList, time.Second*3) + if tt.expectErrs != nil { + assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + return + } + assert.NoError(t, err) + }) + } +} From f866409c508c4b5430f0943b95f25ffbfd931c3b Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 10 Feb 2025 15:13:18 +0000 Subject: [PATCH 60/91] move statusreaders to it's own package Signed-off-by: Austin Abro --- {pkg/kube => internal/statusreaders}/job_status_reader.go | 2 +- .../statusreaders}/job_status_reader_test.go | 2 +- {pkg/kube => internal/statusreaders}/pod_status_reader.go | 4 +--- .../statusreaders}/pod_status_reader_test.go | 3 +-- pkg/kube/statuswait.go | 7 ++++--- 5 files changed, 8 insertions(+), 10 deletions(-) rename {pkg/kube => internal/statusreaders}/job_status_reader.go (99%) rename {pkg/kube => internal/statusreaders}/job_status_reader_test.go (99%) rename {pkg/kube => internal/statusreaders}/pod_status_reader.go (95%) rename {pkg/kube => internal/statusreaders}/pod_status_reader_test.go (95%) diff --git a/pkg/kube/job_status_reader.go b/internal/statusreaders/job_status_reader.go similarity index 99% rename from pkg/kube/job_status_reader.go rename to internal/statusreaders/job_status_reader.go index f6eb8d3d9..d493d9e13 100644 --- a/pkg/kube/job_status_reader.go +++ b/internal/statusreaders/job_status_reader.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kube +package statusreaders // This file was copied and modified from https://github.com/fluxcd/kustomize-controller/blob/main/internal/statusreaders/job.go diff --git a/pkg/kube/job_status_reader_test.go b/internal/statusreaders/job_status_reader_test.go similarity index 99% rename from pkg/kube/job_status_reader_test.go rename to internal/statusreaders/job_status_reader_test.go index cd0dcedeb..70e4ee29a 100644 --- a/pkg/kube/job_status_reader_test.go +++ b/internal/statusreaders/job_status_reader_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kube +package statusreaders // This file was copied and modified from https://github.com/fluxcd/kustomize-controller/blob/main/internal/statusreaders/job.go import ( diff --git a/pkg/kube/pod_status_reader.go b/internal/statusreaders/pod_status_reader.go similarity index 95% rename from pkg/kube/pod_status_reader.go rename to internal/statusreaders/pod_status_reader.go index c44af542e..d3daf7cc3 100644 --- a/pkg/kube/pod_status_reader.go +++ b/internal/statusreaders/pod_status_reader.go @@ -14,9 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kube - -// This file was copied and modified from https://github.com/fluxcd/kustomize-controller/blob/main/internal/statusreaders/job.go +package statusreaders import ( "context" diff --git a/pkg/kube/pod_status_reader_test.go b/internal/statusreaders/pod_status_reader_test.go similarity index 95% rename from pkg/kube/pod_status_reader_test.go rename to internal/statusreaders/pod_status_reader_test.go index bb08f041a..a151f1aed 100644 --- a/pkg/kube/pod_status_reader_test.go +++ b/internal/statusreaders/pod_status_reader_test.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kube +package statusreaders -// This file was copied and modified from https://github.com/fluxcd/kustomize-controller/blob/main/internal/statusreaders/job.go import ( "testing" diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index 4aff42ff2..eaa473cd4 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -23,6 +23,7 @@ import ( "sort" "time" + helmStatusReaders "helm.sh/helm/v4/internal/statusreaders" appsv1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -55,8 +56,8 @@ func (w *statusWaiter) WatchUntilReady(resourceList ResourceList, timeout time.D defer cancel() w.log("waiting for %d pods and jobs to complete with a timeout of %s", len(resourceList), timeout) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) - jobSR := NewCustomJobStatusReader(w.restMapper) - podSR := NewCustomPodStatusReader(w.restMapper) + jobSR := helmStatusReaders.NewCustomJobStatusReader(w.restMapper) + podSR := helmStatusReaders.NewCustomPodStatusReader(w.restMapper) // We don't want to wait on any other resources as watchUntilReady is only for Helm hooks genericSR := statusreaders.NewGenericStatusReader(w.restMapper, alwaysReady) @@ -84,7 +85,7 @@ func (w *statusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Dura defer cancel() w.log("beginning wait for %d resources with timeout of %s", len(resourceList), timeout) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) - newCustomJobStatusReader := NewCustomJobStatusReader(w.restMapper) + newCustomJobStatusReader := helmStatusReaders.NewCustomJobStatusReader(w.restMapper) customSR := statusreaders.NewStatusReader(w.restMapper, newCustomJobStatusReader) sw.StatusReader = customSR return w.wait(ctx, resourceList, sw) From 7207565e1284e2b597ffad5179d67487ab9478c1 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 10 Feb 2025 15:31:43 +0000 Subject: [PATCH 61/91] lint Signed-off-by: Austin Abro --- pkg/kube/statuswait.go | 7 +++--- pkg/kube/wait.go | 50 +++++++++++++++++++++--------------------- 2 files changed, 29 insertions(+), 28 deletions(-) diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index eaa473cd4..0729d0d1b 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -23,7 +23,6 @@ import ( "sort" "time" - helmStatusReaders "helm.sh/helm/v4/internal/statusreaders" appsv1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -36,6 +35,8 @@ import ( "sigs.k8s.io/cli-utils/pkg/kstatus/status" "sigs.k8s.io/cli-utils/pkg/kstatus/watcher" "sigs.k8s.io/cli-utils/pkg/object" + + helmStatusReaders "helm.sh/helm/v4/internal/statusreaders" ) type statusWaiter struct { @@ -44,7 +45,7 @@ type statusWaiter struct { log func(string, ...interface{}) } -func alwaysReady(u *unstructured.Unstructured) (*status.Result, error) { +func alwaysReady(_ *unstructured.Unstructured) (*status.Result, error) { return &status.Result{ Status: status.CurrentStatus, Message: "Resource is current", @@ -179,7 +180,7 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, sw w } func statusObserver(cancel context.CancelFunc, desired status.Status, logFn func(string, ...interface{})) collector.ObserverFunc { - return func(statusCollector *collector.ResourceStatusCollector, e event.Event) { + return func(statusCollector *collector.ResourceStatusCollector, _ event.Event) { var rss []*event.ResourceStatus var nonDesiredResources []*event.ResourceStatus for _, rs := range statusCollector.ResourceStatuses { diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go index fdb3c9087..83b352201 100644 --- a/pkg/kube/wait.go +++ b/pkg/kube/wait.go @@ -27,6 +27,7 @@ import ( appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" appsv1beta2 "k8s.io/api/apps/v1beta2" + batch "k8s.io/api/batch/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" @@ -42,7 +43,6 @@ import ( "k8s.io/client-go/kubernetes" cachetools "k8s.io/client-go/tools/cache" watchtools "k8s.io/client-go/tools/watch" - batch "k8s.io/api/batch/v1" "k8s.io/apimachinery/pkg/util/wait" ) @@ -55,20 +55,20 @@ type HelmWaiter struct { kubeClient *kubernetes.Clientset } -func (w *HelmWaiter) Wait(resources ResourceList, timeout time.Duration) error { - w.c = NewReadyChecker(w.kubeClient, w.log, PausedAsReady(true)) - return w.waitForResources(resources, timeout) +func (hw *HelmWaiter) Wait(resources ResourceList, timeout time.Duration) error { + hw.c = NewReadyChecker(hw.kubeClient, hw.log, PausedAsReady(true)) + return hw.waitForResources(resources, timeout) } -func (w *HelmWaiter) WaitWithJobs(resources ResourceList, timeout time.Duration) error { - w.c = NewReadyChecker(w.kubeClient, w.log, PausedAsReady(true), CheckJobs(true)) - return w.waitForResources(resources, timeout) +func (hw *HelmWaiter) WaitWithJobs(resources ResourceList, timeout time.Duration) error { + hw.c = NewReadyChecker(hw.kubeClient, hw.log, PausedAsReady(true), CheckJobs(true)) + return hw.waitForResources(resources, timeout) } // waitForResources polls to get the current status of all pods, PVCs, Services and // Jobs(optional) until all are ready or a timeout is reached -func (w *HelmWaiter) waitForResources(created ResourceList, timeout time.Duration) error { - w.log("beginning wait for %d resources with timeout of %v", len(created), timeout) +func (hw *HelmWaiter) waitForResources(created ResourceList, timeout time.Duration) error { + hw.log("beginning wait for %d resources with timeout of %v", len(created), timeout) ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() @@ -81,15 +81,15 @@ func (w *HelmWaiter) waitForResources(created ResourceList, timeout time.Duratio return wait.PollUntilContextCancel(ctx, 2*time.Second, true, func(ctx context.Context) (bool, error) { waitRetries := 30 for i, v := range created { - ready, err := w.c.IsReady(ctx, v) + ready, err := hw.c.IsReady(ctx, v) - if waitRetries > 0 && w.isRetryableError(err, v) { + if waitRetries > 0 && hw.isRetryableError(err, v) { numberOfErrors[i]++ if numberOfErrors[i] > waitRetries { - w.log("Max number of retries reached") + hw.log("Max number of retries reached") return false, err } - w.log("Retrying as current number of retries %d less than max number of retries %d", numberOfErrors[i]-1, waitRetries) + hw.log("Retrying as current number of retries %d less than max number of retries %d", numberOfErrors[i]-1, waitRetries) return false, nil } numberOfErrors[i] = 0 @@ -101,28 +101,28 @@ func (w *HelmWaiter) waitForResources(created ResourceList, timeout time.Duratio }) } -func (w *HelmWaiter) isRetryableError(err error, resource *resource.Info) bool { +func (hw *HelmWaiter) isRetryableError(err error, resource *resource.Info) bool { if err == nil { return false } - w.log("Error received when checking status of resource %s. Error: '%s', Resource details: '%s'", resource.Name, err, resource) + hw.log("Error received when checking status of resource %s. Error: '%s', Resource details: '%s'", resource.Name, err, resource) if ev, ok := err.(*apierrors.StatusError); ok { statusCode := ev.Status().Code - retryable := w.isRetryableHTTPStatusCode(statusCode) - w.log("Status code received: %d. Retryable error? %t", statusCode, retryable) + retryable := hw.isRetryableHTTPStatusCode(statusCode) + hw.log("Status code received: %d. Retryable error? %t", statusCode, retryable) return retryable } - w.log("Retryable error? %t", true) + hw.log("Retryable error? %t", true) return true } -func (w *HelmWaiter) isRetryableHTTPStatusCode(httpStatusCode int32) bool { +func (hw *HelmWaiter) isRetryableHTTPStatusCode(httpStatusCode int32) bool { return httpStatusCode == 0 || httpStatusCode == http.StatusTooManyRequests || (httpStatusCode >= 500 && httpStatusCode != http.StatusNotImplemented) } // waitForDeletedResources polls to check if all the resources are deleted or a timeout is reached -func (w *HelmWaiter) WaitForDelete(deleted ResourceList, timeout time.Duration) error { - w.log("beginning wait for %d resources to be deleted with timeout of %v", len(deleted), timeout) +func (hw *HelmWaiter) WaitForDelete(deleted ResourceList, timeout time.Duration) error { + hw.log("beginning wait for %d resources to be deleted with timeout of %v", len(deleted), timeout) ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() @@ -314,7 +314,7 @@ func (hw *HelmWaiter) waitForJob(obj runtime.Object, name string) (bool, error) // waitForPodSuccess is a helper that waits for a pod to complete. // // This operates on an event returned from a watcher. -func (c *HelmWaiter) waitForPodSuccess(obj runtime.Object, name string) (bool, error) { +func (hw *HelmWaiter) waitForPodSuccess(obj runtime.Object, name string) (bool, error) { o, ok := obj.(*v1.Pod) if !ok { return true, errors.Errorf("expected %s to be a *v1.Pod, got %T", name, obj) @@ -322,14 +322,14 @@ func (c *HelmWaiter) waitForPodSuccess(obj runtime.Object, name string) (bool, e switch o.Status.Phase { case v1.PodSucceeded: - c.log("Pod %s succeeded", o.Name) + hw.log("Pod %s succeeded", o.Name) return true, nil case v1.PodFailed: return true, errors.Errorf("pod %s failed", o.Name) case v1.PodPending: - c.log("Pod %s pending", o.Name) + hw.log("Pod %s pending", o.Name) case v1.PodRunning: - c.log("Pod %s running", o.Name) + hw.log("Pod %s running", o.Name) } return false, nil From bd3b5ee5d05391a63ced7c32cba05caa62c8d968 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 10 Feb 2025 15:51:14 +0000 Subject: [PATCH 62/91] comment Signed-off-by: Austin Abro --- pkg/kube/interface.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/kube/interface.go b/pkg/kube/interface.go index 0e6da1094..7af8ebca6 100644 --- a/pkg/kube/interface.go +++ b/pkg/kube/interface.go @@ -70,7 +70,6 @@ type Waiter interface { // For Pods, "ready" means the Pod phase is marked "succeeded". // For all other kinds, it means the kind was created or modified without // error. - // TODO: Is watch until ready really behavior we want over the resources actually being ready? WatchUntilReady(resources ResourceList, timeout time.Duration) error } From 2b03c527f19f47039116143417d0e58422b3e789 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 16 Feb 2025 20:38:28 +0000 Subject: [PATCH 63/91] set command line flags Signed-off-by: Austin Abro --- cmd/helm/flags.go | 44 ++++++++++++++++++++++++++++++++++++++ cmd/helm/install.go | 4 ++-- cmd/helm/rollback.go | 2 +- cmd/helm/upgrade.go | 2 +- pkg/action/action.go | 2 +- pkg/action/install.go | 14 +++++++++--- pkg/action/install_test.go | 7 +++--- pkg/action/rollback.go | 9 ++++++-- pkg/action/upgrade.go | 20 ++++++++++++----- pkg/action/upgrade_test.go | 11 +++++----- pkg/kube/client.go | 8 +++---- pkg/kube/client_test.go | 4 ++-- 12 files changed, 98 insertions(+), 29 deletions(-) diff --git a/cmd/helm/flags.go b/cmd/helm/flags.go index 3d159babd..c2e5e295d 100644 --- a/cmd/helm/flags.go +++ b/cmd/helm/flags.go @@ -32,6 +32,7 @@ import ( "helm.sh/helm/v4/pkg/cli/output" "helm.sh/helm/v4/pkg/cli/values" "helm.sh/helm/v4/pkg/helmpath" + "helm.sh/helm/v4/pkg/kube" "helm.sh/helm/v4/pkg/postrender" "helm.sh/helm/v4/pkg/repo" ) @@ -51,6 +52,49 @@ func addValueOptionsFlags(f *pflag.FlagSet, v *values.Options) { f.StringArrayVar(&v.LiteralValues, "set-literal", []string{}, "set a literal STRING value on the command line") } +func AddWaitFlag(cmd *cobra.Command, wait *kube.WaitStrategy) { + cmd.Flags().Var( + newWaitValue(wait), + "wait", + "if set, will wait until all resources are in the expected state before marking the operation as successful. It will wait for as long as --timeout. Options are (true, false, watcher, and legacy)", + ) + // Sets the strategy to use the watcher strategy if `--wait` is used without an argument + cmd.Flags().Lookup("wait").NoOptDefVal = string(kube.StatusWatcherStrategy) +} + +type waitValue kube.WaitStrategy + +func newWaitValue(ws *kube.WaitStrategy) *waitValue { + return (*waitValue)(ws) +} + +func (ws *waitValue) String() string { + if ws == nil { + return "" + } + return string(*ws) +} + +func (ws *waitValue) Set(s string) error { + switch s { + case string(kube.StatusWatcherStrategy), string(kube.LegacyWaiterStrategy): + *ws = waitValue(s) + return nil + case "true": + *ws = waitValue(kube.StatusWatcherStrategy) + return nil + case "false": + *ws = "" + return nil + default: + return fmt.Errorf("invalid wait input %q. Valid inputs are true, false, %s, and %s", s, kube.StatusWatcherStrategy, kube.LegacyWaiterStrategy) + } +} + +func (ws *waitValue) Type() string { + return "WaitStrategy" +} + func addChartPathOptionsFlags(f *pflag.FlagSet, c *action.ChartPathOptions) { f.StringVar(&c.Version, "version", "", "specify a version constraint for the chart version to use. This constraint can be a specific tag (e.g. 1.1.1) or it may reference a valid range (e.g. ^2.0.0). If this is not specified, the latest version is used") f.BoolVar(&c.Verify, "verify", false, "verify the package before using it") diff --git a/cmd/helm/install.go b/cmd/helm/install.go index ec651140c..16545b6ae 100644 --- a/cmd/helm/install.go +++ b/cmd/helm/install.go @@ -190,8 +190,7 @@ func addInstallFlags(cmd *cobra.Command, f *pflag.FlagSet, client *action.Instal f.BoolVar(&client.Force, "force", false, "force resource updates through a replacement strategy") f.BoolVar(&client.DisableHooks, "no-hooks", false, "prevent hooks from running during install") f.BoolVar(&client.Replace, "replace", false, "reuse the given name, only if that name is a deleted release which remains in the history. This is unsafe in production") - f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)") - f.BoolVar(&client.Wait, "wait", false, "if set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment, StatefulSet, or ReplicaSet are in a ready state before marking the release as successful. It will wait for as long as --timeout") + f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)") f.BoolVar(&client.WaitForJobs, "wait-for-jobs", false, "if set and --wait enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as --timeout") f.BoolVarP(&client.GenerateName, "generate-name", "g", false, "generate the name (and omit the NAME parameter)") f.StringVar(&client.NameTemplate, "name-template", "", "specify template used to name the release") @@ -209,6 +208,7 @@ func addInstallFlags(cmd *cobra.Command, f *pflag.FlagSet, client *action.Instal f.BoolVar(&client.TakeOwnership, "take-ownership", false, "if set, install will ignore the check for helm annotations and take ownership of the existing resources") addValueOptionsFlags(f, valueOpts) addChartPathOptionsFlags(f, &client.ChartPathOptions) + AddWaitFlag(cmd, &client.Wait) err := cmd.RegisterFlagCompletionFunc("version", func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { requiredArgs := 2 diff --git a/cmd/helm/rollback.go b/cmd/helm/rollback.go index a65f30a1f..83d3089e2 100644 --- a/cmd/helm/rollback.go +++ b/cmd/helm/rollback.go @@ -81,10 +81,10 @@ func newRollbackCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { f.BoolVar(&client.Force, "force", false, "force resource update through delete/recreate if needed") f.BoolVar(&client.DisableHooks, "no-hooks", false, "prevent hooks from running during rollback") f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)") - f.BoolVar(&client.Wait, "wait", false, "if set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment, StatefulSet, or ReplicaSet are in a ready state before marking the release as successful. It will wait for as long as --timeout") f.BoolVar(&client.WaitForJobs, "wait-for-jobs", false, "if set and --wait enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as --timeout") f.BoolVar(&client.CleanupOnFail, "cleanup-on-fail", false, "allow deletion of new resources created in this rollback when rollback fails") f.IntVar(&client.MaxHistory, "history-max", settings.MaxHistory, "limit the maximum number of revisions saved per release. Use 0 for no limit") + AddWaitFlag(cmd, &client.Wait) return cmd } diff --git a/cmd/helm/upgrade.go b/cmd/helm/upgrade.go index 7b4267894..e5e485eae 100644 --- a/cmd/helm/upgrade.go +++ b/cmd/helm/upgrade.go @@ -278,7 +278,6 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { f.BoolVar(&client.ResetValues, "reset-values", false, "when upgrading, reset the values to the ones built into the chart") f.BoolVar(&client.ReuseValues, "reuse-values", false, "when upgrading, reuse the last release's values and merge in any overrides from the command line via --set and -f. If '--reset-values' is specified, this is ignored") f.BoolVar(&client.ResetThenReuseValues, "reset-then-reuse-values", false, "when upgrading, reset the values to the ones built into the chart, apply the last release's values and merge in any overrides from the command line via --set and -f. If '--reset-values' or '--reuse-values' is specified, this is ignored") - f.BoolVar(&client.Wait, "wait", false, "if set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment, StatefulSet, or ReplicaSet are in a ready state before marking the release as successful. It will wait for as long as --timeout") f.BoolVar(&client.WaitForJobs, "wait-for-jobs", false, "if set and --wait enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as --timeout") f.BoolVar(&client.Atomic, "atomic", false, "if set, upgrade process rolls back changes made in case of failed upgrade. The --wait flag will be set automatically if --atomic is used") f.IntVar(&client.MaxHistory, "history-max", settings.MaxHistory, "limit the maximum number of revisions saved per release. Use 0 for no limit") @@ -295,6 +294,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { addValueOptionsFlags(f, valueOpts) bindOutputFlag(cmd, &outfmt) bindPostRenderFlag(cmd, &client.PostRenderer) + AddWaitFlag(cmd, &client.Wait) err := cmd.RegisterFlagCompletionFunc("version", func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) != 2 { diff --git a/pkg/action/action.go b/pkg/action/action.go index 0157ce1cc..a2d7523a5 100644 --- a/pkg/action/action.go +++ b/pkg/action/action.go @@ -371,7 +371,7 @@ func (cfg *Configuration) recordRelease(r *release.Release) { // Init initializes the action configuration func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string, log DebugLog) error { - kc, err := kube.New(getter, kube.StatusWaiterStrategy) + kc, err := kube.New(getter, kube.StatusWatcherStrategy) if err != nil { return err } diff --git a/pkg/action/install.go b/pkg/action/install.go index ef3f0fdc7..61b5ebd33 100644 --- a/pkg/action/install.go +++ b/pkg/action/install.go @@ -79,7 +79,7 @@ type Install struct { HideSecret bool DisableHooks bool Replace bool - Wait bool + Wait kube.WaitStrategy WaitForJobs bool Devel bool DependencyUpdate bool @@ -157,6 +157,10 @@ func (i *Install) GetRegistryClient() *registry.Client { return i.ChartPathOptions.registryClient } +func (i *Install) shouldWait() bool { + return i.Wait != "" +} + func (i *Install) installCRDs(crds []chart.CRD) error { // We do these one file at a time in the order they were read. totalItems := []*resource.Info{} @@ -289,7 +293,11 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma // Make sure if Atomic is set, that wait is set as well. This makes it so // the user doesn't have to specify both - i.Wait = i.Wait || i.Atomic + if !i.shouldWait() { + if i.Atomic { + i.Wait = "watcher" + } + } caps, err := i.cfg.getCapabilities() if err != nil { @@ -465,7 +473,7 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource return rel, err } - if i.Wait { + if i.shouldWait() { if i.WaitForJobs { err = i.cfg.KubeClient.WaitWithJobs(resources, i.Timeout) } else { diff --git a/pkg/action/install_test.go b/pkg/action/install_test.go index 9f738f0bc..6377cfda5 100644 --- a/pkg/action/install_test.go +++ b/pkg/action/install_test.go @@ -34,6 +34,7 @@ import ( "helm.sh/helm/v4/internal/test" "helm.sh/helm/v4/pkg/chart" "helm.sh/helm/v4/pkg/chartutil" + "helm.sh/helm/v4/pkg/kube" kubefake "helm.sh/helm/v4/pkg/kube/fake" "helm.sh/helm/v4/pkg/release" "helm.sh/helm/v4/pkg/storage/driver" @@ -407,7 +408,7 @@ func TestInstallRelease_Wait(t *testing.T) { failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.WaitError = fmt.Errorf("I timed out") instAction.cfg.KubeClient = failer - instAction.Wait = true + instAction.Wait = kube.StatusWatcherStrategy vals := map[string]interface{}{} goroutines := runtime.NumGoroutine() @@ -426,7 +427,7 @@ func TestInstallRelease_Wait_Interrupted(t *testing.T) { failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.WaitDuration = 10 * time.Second instAction.cfg.KubeClient = failer - instAction.Wait = true + instAction.Wait = kube.StatusWatcherStrategy vals := map[string]interface{}{} ctx, cancel := context.WithCancel(context.Background()) @@ -449,7 +450,7 @@ func TestInstallRelease_WaitForJobs(t *testing.T) { failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.WaitError = fmt.Errorf("I timed out") instAction.cfg.KubeClient = failer - instAction.Wait = true + instAction.Wait = kube.StatusWatcherStrategy instAction.WaitForJobs = true vals := map[string]interface{}{} diff --git a/pkg/action/rollback.go b/pkg/action/rollback.go index 12dee35ce..8ec134832 100644 --- a/pkg/action/rollback.go +++ b/pkg/action/rollback.go @@ -25,6 +25,7 @@ import ( "github.com/pkg/errors" "helm.sh/helm/v4/pkg/chartutil" + "helm.sh/helm/v4/pkg/kube" "helm.sh/helm/v4/pkg/release" helmtime "helm.sh/helm/v4/pkg/time" ) @@ -37,7 +38,7 @@ type Rollback struct { Version int Timeout time.Duration - Wait bool + Wait kube.WaitStrategy WaitForJobs bool DisableHooks bool DryRun bool @@ -89,6 +90,10 @@ func (r *Rollback) Run(name string) error { return nil } +func (r *Rollback) shouldWait() bool { + return !(r.Wait == "") +} + // prepareRollback finds the previous release and prepares a new release object with // the previous release's configuration func (r *Rollback) prepareRollback(name string) (*release.Release, *release.Release, error) { @@ -223,7 +228,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas } } - if r.Wait { + if r.shouldWait() { if r.WaitForJobs { if err := r.cfg.KubeClient.WaitWithJobs(target, r.Timeout); err != nil { targetRelease.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", targetRelease.Name, err.Error())) diff --git a/pkg/action/upgrade.go b/pkg/action/upgrade.go index f3e9a33bc..8d103ab6b 100644 --- a/pkg/action/upgrade.go +++ b/pkg/action/upgrade.go @@ -64,8 +64,8 @@ type Upgrade struct { SkipCRDs bool // Timeout is the timeout for this operation Timeout time.Duration - // Wait determines whether the wait operation should be performed after the upgrade is requested. - Wait bool + // Wait determines whether the wait operation should be performed and what type of wait. + Wait kube.WaitStrategy // WaitForJobs determines whether the wait operation for the Jobs should be performed after the upgrade is requested. WaitForJobs bool // DisableHooks disables hook processing if set to true. @@ -155,7 +155,11 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart. // Make sure if Atomic is set, that wait is set as well. This makes it so // the user doesn't have to specify both - u.Wait = u.Wait || u.Atomic + if !u.shouldWait() { + if u.Atomic { + u.Wait = kube.StatusWatcherStrategy + } + } if err := chartutil.ValidateReleaseName(name); err != nil { return nil, errors.Errorf("release name is invalid: %s", name) @@ -186,6 +190,10 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart. return res, nil } +func (u *Upgrade) shouldWait() bool { + return u.Wait != "" +} + // isDryRun returns true if Upgrade is set to run as a DryRun func (u *Upgrade) isDryRun() bool { if u.DryRun || u.DryRunOption == "client" || u.DryRunOption == "server" || u.DryRunOption == "true" { @@ -443,7 +451,7 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele } } - if u.Wait { + if u.shouldWait() { u.cfg.Log( "waiting for release %s resources (created: %d updated: %d deleted: %d)", upgradedRelease.Name, len(results.Created), len(results.Updated), len(results.Deleted)) @@ -526,7 +534,9 @@ func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, e rollin := NewRollback(u.cfg) rollin.Version = filteredHistory[0].Version - rollin.Wait = true + if !u.shouldWait() { + rollin.Wait = kube.StatusWatcherStrategy + } rollin.WaitForJobs = u.WaitForJobs rollin.DisableHooks = u.DisableHooks rollin.Recreate = u.Recreate diff --git a/pkg/action/upgrade_test.go b/pkg/action/upgrade_test.go index 5437490cb..93c54560a 100644 --- a/pkg/action/upgrade_test.go +++ b/pkg/action/upgrade_test.go @@ -24,6 +24,7 @@ import ( "time" "helm.sh/helm/v4/pkg/chart" + "helm.sh/helm/v4/pkg/kube" "helm.sh/helm/v4/pkg/storage/driver" "github.com/stretchr/testify/assert" @@ -52,7 +53,7 @@ func TestUpgradeRelease_Success(t *testing.T) { rel.Info.Status = release.StatusDeployed req.NoError(upAction.cfg.Releases.Create(rel)) - upAction.Wait = true + upAction.Wait = kube.StatusWatcherStrategy vals := map[string]interface{}{} ctx, done := context.WithCancel(context.Background()) @@ -82,7 +83,7 @@ func TestUpgradeRelease_Wait(t *testing.T) { failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.WaitError = fmt.Errorf("I timed out") upAction.cfg.KubeClient = failer - upAction.Wait = true + upAction.Wait = kube.StatusWatcherStrategy vals := map[string]interface{}{} res, err := upAction.Run(rel.Name, buildChart(), vals) @@ -104,7 +105,7 @@ func TestUpgradeRelease_WaitForJobs(t *testing.T) { failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.WaitError = fmt.Errorf("I timed out") upAction.cfg.KubeClient = failer - upAction.Wait = true + upAction.Wait = kube.StatusWatcherStrategy upAction.WaitForJobs = true vals := map[string]interface{}{} @@ -128,7 +129,7 @@ func TestUpgradeRelease_CleanupOnFail(t *testing.T) { failer.WaitError = fmt.Errorf("I timed out") failer.DeleteError = fmt.Errorf("I tried to delete nil") upAction.cfg.KubeClient = failer - upAction.Wait = true + upAction.Wait = kube.StatusWatcherStrategy upAction.CleanupOnFail = true vals := map[string]interface{}{} @@ -395,7 +396,7 @@ func TestUpgradeRelease_Interrupted_Wait(t *testing.T) { failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.WaitDuration = 10 * time.Second upAction.cfg.KubeClient = failer - upAction.Wait = true + upAction.Wait = kube.StatusWatcherStrategy vals := map[string]interface{}{} ctx := context.Background() diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 8dca1c51b..ba7794ac4 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -80,11 +80,11 @@ type Client struct { Waiter } -type WaitStrategy int +type WaitStrategy string const ( - StatusWaiterStrategy WaitStrategy = iota - LegacyWaiterStrategy + StatusWatcherStrategy WaitStrategy = "watcher" + LegacyWaiterStrategy WaitStrategy = "legacy" ) func init() { @@ -106,7 +106,7 @@ func (c *Client) newWaiter(strategy WaitStrategy) (Waiter, error) { return nil, err } return &HelmWaiter{kubeClient: kc, log: c.Log}, nil - case StatusWaiterStrategy: + case StatusWatcherStrategy: cfg, err := c.Factory.ToRESTConfig() if err != nil { return nil, err diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index cdf75938e..4c8719f98 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -659,7 +659,7 @@ func TestWaitDelete(t *testing.T) { func TestReal(t *testing.T) { t.Skip("This is a live test, comment this line to run") - c, err := New(nil, StatusWaiterStrategy) + c, err := New(nil, StatusWatcherStrategy) if err != nil { t.Fatal(err) } @@ -672,7 +672,7 @@ func TestReal(t *testing.T) { } testSvcEndpointManifest := testServiceManifest + "\n---\n" + testEndpointManifest - c, err = New(nil, StatusWaiterStrategy) + c, err = New(nil, StatusWatcherStrategy) if err != nil { t.Fatal(err) } From f2dd2c91093eeecff6747f9c85a9757ccb6d4b80 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 16 Feb 2025 21:10:06 +0000 Subject: [PATCH 64/91] add hook only waiter Signed-off-by: Austin Abro --- cmd/helm/flags.go | 9 ++++--- cmd/helm/install.go | 2 +- cmd/helm/uninstall.go | 2 +- cmd/helm/upgrade.go | 2 +- pkg/action/install.go | 20 +++++++------- pkg/action/rollback.go | 28 +++++++++----------- pkg/action/uninstall.go | 8 +++--- pkg/action/uninstall_test.go | 5 ++-- pkg/action/upgrade.go | 35 +++++++++---------------- pkg/kube/client.go | 51 ++++++++++++++++++++++-------------- pkg/kube/client_test.go | 6 ++--- pkg/kube/statuswait.go | 20 ++++++++++++++ 12 files changed, 103 insertions(+), 85 deletions(-) diff --git a/cmd/helm/flags.go b/cmd/helm/flags.go index c2e5e295d..d1f0fec58 100644 --- a/cmd/helm/flags.go +++ b/cmd/helm/flags.go @@ -54,7 +54,7 @@ func addValueOptionsFlags(f *pflag.FlagSet, v *values.Options) { func AddWaitFlag(cmd *cobra.Command, wait *kube.WaitStrategy) { cmd.Flags().Var( - newWaitValue(wait), + newWaitValue(kube.HookOnlyStrategy, wait), "wait", "if set, will wait until all resources are in the expected state before marking the operation as successful. It will wait for as long as --timeout. Options are (true, false, watcher, and legacy)", ) @@ -64,7 +64,8 @@ func AddWaitFlag(cmd *cobra.Command, wait *kube.WaitStrategy) { type waitValue kube.WaitStrategy -func newWaitValue(ws *kube.WaitStrategy) *waitValue { +func newWaitValue(defaultValue kube.WaitStrategy, ws *kube.WaitStrategy) *waitValue { + *ws = defaultValue return (*waitValue)(ws) } @@ -77,7 +78,7 @@ func (ws *waitValue) String() string { func (ws *waitValue) Set(s string) error { switch s { - case string(kube.StatusWatcherStrategy), string(kube.LegacyWaiterStrategy): + case string(kube.StatusWatcherStrategy), string(kube.LegacyStrategy): *ws = waitValue(s) return nil case "true": @@ -87,7 +88,7 @@ func (ws *waitValue) Set(s string) error { *ws = "" return nil default: - return fmt.Errorf("invalid wait input %q. Valid inputs are true, false, %s, and %s", s, kube.StatusWatcherStrategy, kube.LegacyWaiterStrategy) + return fmt.Errorf("invalid wait input %q. Valid inputs are true, false, %s, and %s", s, kube.StatusWatcherStrategy, kube.LegacyStrategy) } } diff --git a/cmd/helm/install.go b/cmd/helm/install.go index 16545b6ae..649c5c8b8 100644 --- a/cmd/helm/install.go +++ b/cmd/helm/install.go @@ -198,7 +198,7 @@ func addInstallFlags(cmd *cobra.Command, f *pflag.FlagSet, client *action.Instal f.BoolVar(&client.Devel, "devel", false, "use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored") f.BoolVar(&client.DependencyUpdate, "dependency-update", false, "update dependencies if they are missing before installing the chart") f.BoolVar(&client.DisableOpenAPIValidation, "disable-openapi-validation", false, "if set, the installation process will not validate rendered templates against the Kubernetes OpenAPI Schema") - f.BoolVar(&client.Atomic, "atomic", false, "if set, the installation process deletes the installation on failure. The --wait flag will be set automatically if --atomic is used") + f.BoolVar(&client.Atomic, "atomic", false, "if set, the installation process deletes the installation on failure. The --wait flag will be set automatically to \"watcher\" if --atomic is used") f.BoolVar(&client.SkipCRDs, "skip-crds", false, "if set, no CRDs will be installed. By default, CRDs are installed if not already present") f.BoolVar(&client.SubNotes, "render-subchart-notes", false, "if set, render subchart notes along with the parent") f.BoolVar(&client.SkipSchemaValidation, "skip-schema-validation", false, "if set, disables JSON schema validation") diff --git a/cmd/helm/uninstall.go b/cmd/helm/uninstall.go index 9c5e25c87..3504fd322 100644 --- a/cmd/helm/uninstall.go +++ b/cmd/helm/uninstall.go @@ -76,10 +76,10 @@ func newUninstallCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { f.BoolVar(&client.DisableHooks, "no-hooks", false, "prevent hooks from running during uninstallation") f.BoolVar(&client.IgnoreNotFound, "ignore-not-found", false, `Treat "release not found" as a successful uninstall`) f.BoolVar(&client.KeepHistory, "keep-history", false, "remove all associated resources and mark the release as deleted, but retain the release history") - f.BoolVar(&client.Wait, "wait", false, "if set, will wait until all the resources are deleted before returning. It will wait for as long as --timeout") f.StringVar(&client.DeletionPropagation, "cascade", "background", "Must be \"background\", \"orphan\", or \"foreground\". Selects the deletion cascading strategy for the dependents. Defaults to background.") f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)") f.StringVar(&client.Description, "description", "", "add a custom description") + AddWaitFlag(cmd, &client.Wait) return cmd } diff --git a/cmd/helm/upgrade.go b/cmd/helm/upgrade.go index e5e485eae..092f6bdcc 100644 --- a/cmd/helm/upgrade.go +++ b/cmd/helm/upgrade.go @@ -279,7 +279,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { f.BoolVar(&client.ReuseValues, "reuse-values", false, "when upgrading, reuse the last release's values and merge in any overrides from the command line via --set and -f. If '--reset-values' is specified, this is ignored") f.BoolVar(&client.ResetThenReuseValues, "reset-then-reuse-values", false, "when upgrading, reset the values to the ones built into the chart, apply the last release's values and merge in any overrides from the command line via --set and -f. If '--reset-values' or '--reuse-values' is specified, this is ignored") f.BoolVar(&client.WaitForJobs, "wait-for-jobs", false, "if set and --wait enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as --timeout") - f.BoolVar(&client.Atomic, "atomic", false, "if set, upgrade process rolls back changes made in case of failed upgrade. The --wait flag will be set automatically if --atomic is used") + f.BoolVar(&client.Atomic, "atomic", false, "if set, upgrade process rolls back changes made in case of failed upgrade. The --wait flag will be set automatically to \"watcher\" if --atomic is used") f.IntVar(&client.MaxHistory, "history-max", settings.MaxHistory, "limit the maximum number of revisions saved per release. Use 0 for no limit") f.BoolVar(&client.CleanupOnFail, "cleanup-on-fail", false, "allow deletion of new resources created in this upgrade when upgrade fails") f.BoolVar(&client.SubNotes, "render-subchart-notes", false, "if set, render subchart notes along with the parent") diff --git a/pkg/action/install.go b/pkg/action/install.go index 61b5ebd33..a12dee11d 100644 --- a/pkg/action/install.go +++ b/pkg/action/install.go @@ -293,9 +293,9 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma // Make sure if Atomic is set, that wait is set as well. This makes it so // the user doesn't have to specify both - if !i.shouldWait() { + if i.Wait == kube.HookOnlyStrategy { if i.Atomic { - i.Wait = "watcher" + i.Wait = kube.StatusWatcherStrategy } } @@ -473,15 +473,13 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource return rel, err } - if i.shouldWait() { - if i.WaitForJobs { - err = i.cfg.KubeClient.WaitWithJobs(resources, i.Timeout) - } else { - err = i.cfg.KubeClient.Wait(resources, i.Timeout) - } - if err != nil { - return rel, err - } + if i.WaitForJobs { + err = i.cfg.KubeClient.WaitWithJobs(resources, i.Timeout) + } else { + err = i.cfg.KubeClient.Wait(resources, i.Timeout) + } + if err != nil { + return rel, err } if !i.DisableHooks { diff --git a/pkg/action/rollback.go b/pkg/action/rollback.go index 8ec134832..8cb8b4ed4 100644 --- a/pkg/action/rollback.go +++ b/pkg/action/rollback.go @@ -228,21 +228,19 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas } } - if r.shouldWait() { - if r.WaitForJobs { - if err := r.cfg.KubeClient.WaitWithJobs(target, r.Timeout); err != nil { - targetRelease.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", targetRelease.Name, err.Error())) - r.cfg.recordRelease(currentRelease) - r.cfg.recordRelease(targetRelease) - return targetRelease, errors.Wrapf(err, "release %s failed", targetRelease.Name) - } - } else { - if err := r.cfg.KubeClient.Wait(target, r.Timeout); err != nil { - targetRelease.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", targetRelease.Name, err.Error())) - r.cfg.recordRelease(currentRelease) - r.cfg.recordRelease(targetRelease) - return targetRelease, errors.Wrapf(err, "release %s failed", targetRelease.Name) - } + if r.WaitForJobs { + if err := r.cfg.KubeClient.WaitWithJobs(target, r.Timeout); err != nil { + targetRelease.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", targetRelease.Name, err.Error())) + r.cfg.recordRelease(currentRelease) + r.cfg.recordRelease(targetRelease) + return targetRelease, errors.Wrapf(err, "release %s failed", targetRelease.Name) + } + } else { + if err := r.cfg.KubeClient.Wait(target, r.Timeout); err != nil { + targetRelease.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", targetRelease.Name, err.Error())) + r.cfg.recordRelease(currentRelease) + r.cfg.recordRelease(targetRelease) + return targetRelease, errors.Wrapf(err, "release %s failed", targetRelease.Name) } } diff --git a/pkg/action/uninstall.go b/pkg/action/uninstall.go index 75d999976..0a03f2180 100644 --- a/pkg/action/uninstall.go +++ b/pkg/action/uninstall.go @@ -41,7 +41,7 @@ type Uninstall struct { DryRun bool IgnoreNotFound bool KeepHistory bool - Wait bool + Wait kube.WaitStrategy DeletionPropagation string Timeout time.Duration Description string @@ -130,10 +130,8 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error) } res.Info = kept - if u.Wait { - if err := u.cfg.KubeClient.WaitForDelete(deletedResources, u.Timeout); err != nil { - errs = append(errs, err) - } + if err := u.cfg.KubeClient.WaitForDelete(deletedResources, u.Timeout); err != nil { + errs = append(errs, err) } if !u.DisableHooks { diff --git a/pkg/action/uninstall_test.go b/pkg/action/uninstall_test.go index eca9e6ad8..1c67cab7f 100644 --- a/pkg/action/uninstall_test.go +++ b/pkg/action/uninstall_test.go @@ -22,6 +22,7 @@ import ( "github.com/stretchr/testify/assert" + "helm.sh/helm/v4/pkg/kube" kubefake "helm.sh/helm/v4/pkg/kube/fake" "helm.sh/helm/v4/pkg/release" ) @@ -82,7 +83,7 @@ func TestUninstallRelease_Wait(t *testing.T) { unAction := uninstallAction(t) unAction.DisableHooks = true unAction.DryRun = false - unAction.Wait = true + unAction.Wait = kube.StatusWatcherStrategy rel := releaseStub() rel.Name = "come-fail-away" @@ -113,7 +114,7 @@ func TestUninstallRelease_Cascade(t *testing.T) { unAction := uninstallAction(t) unAction.DisableHooks = true unAction.DryRun = false - unAction.Wait = false + unAction.Wait = kube.HookOnlyStrategy unAction.DeletionPropagation = "foreground" rel := releaseStub() diff --git a/pkg/action/upgrade.go b/pkg/action/upgrade.go index 8d103ab6b..671426a27 100644 --- a/pkg/action/upgrade.go +++ b/pkg/action/upgrade.go @@ -155,7 +155,7 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart. // Make sure if Atomic is set, that wait is set as well. This makes it so // the user doesn't have to specify both - if !u.shouldWait() { + if u.Wait == kube.HookOnlyStrategy { if u.Atomic { u.Wait = kube.StatusWatcherStrategy } @@ -190,10 +190,6 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart. return res, nil } -func (u *Upgrade) shouldWait() bool { - return u.Wait != "" -} - // isDryRun returns true if Upgrade is set to run as a DryRun func (u *Upgrade) isDryRun() bool { if u.DryRun || u.DryRunOption == "client" || u.DryRunOption == "server" || u.DryRunOption == "true" { @@ -451,22 +447,17 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele } } - if u.shouldWait() { - u.cfg.Log( - "waiting for release %s resources (created: %d updated: %d deleted: %d)", - upgradedRelease.Name, len(results.Created), len(results.Updated), len(results.Deleted)) - if u.WaitForJobs { - if err := u.cfg.KubeClient.WaitWithJobs(target, u.Timeout); err != nil { - u.cfg.recordRelease(originalRelease) - u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err) - return - } - } else { - if err := u.cfg.KubeClient.Wait(target, u.Timeout); err != nil { - u.cfg.recordRelease(originalRelease) - u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err) - return - } + if u.WaitForJobs { + if err := u.cfg.KubeClient.WaitWithJobs(target, u.Timeout); err != nil { + u.cfg.recordRelease(originalRelease) + u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err) + return + } + } else { + if err := u.cfg.KubeClient.Wait(target, u.Timeout); err != nil { + u.cfg.recordRelease(originalRelease) + u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err) + return } } @@ -534,7 +525,7 @@ func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, e rollin := NewRollback(u.cfg) rollin.Version = filteredHistory[0].Version - if !u.shouldWait() { + if u.Wait == kube.HookOnlyStrategy { rollin.Wait = kube.StatusWatcherStrategy } rollin.WaitForJobs = u.WaitForJobs diff --git a/pkg/kube/client.go b/pkg/kube/client.go index ba7794ac4..de28c3421 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -84,7 +84,8 @@ type WaitStrategy string const ( StatusWatcherStrategy WaitStrategy = "watcher" - LegacyWaiterStrategy WaitStrategy = "legacy" + LegacyStrategy WaitStrategy = "legacy" + HookOnlyStrategy WaitStrategy = "noop" ) func init() { @@ -98,36 +99,46 @@ func init() { } } +func (c *Client) newStatusWatcher() (*statusWaiter, error) { + cfg, err := c.Factory.ToRESTConfig() + if err != nil { + return nil, err + } + dynamicClient, err := c.Factory.DynamicClient() + if err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(cfg) + if err != nil { + return nil, err + } + restMapper, err := apiutil.NewDynamicRESTMapper(cfg, httpClient) + if err != nil { + return nil, err + } + return &statusWaiter{ + restMapper: restMapper, + client: dynamicClient, + log: c.Log, + }, nil +} + func (c *Client) newWaiter(strategy WaitStrategy) (Waiter, error) { switch strategy { - case LegacyWaiterStrategy: + case LegacyStrategy: kc, err := c.Factory.KubernetesClientSet() if err != nil { return nil, err } return &HelmWaiter{kubeClient: kc, log: c.Log}, nil case StatusWatcherStrategy: - cfg, err := c.Factory.ToRESTConfig() - if err != nil { - return nil, err - } - dynamicClient, err := c.Factory.DynamicClient() - if err != nil { - return nil, err - } - httpClient, err := rest.HTTPClientFor(cfg) - if err != nil { - return nil, err - } - restMapper, err := apiutil.NewDynamicRESTMapper(cfg, httpClient) + return c.newStatusWatcher() + case HookOnlyStrategy: + sw, err := c.newStatusWatcher() if err != nil { return nil, err } - return &statusWaiter{ - restMapper: restMapper, - client: dynamicClient, - log: c.Log, - }, nil + return &hookOnlyWaiter{sw: sw}, nil default: return nil, errors.New("unknown wait strategy") } diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index 4c8719f98..8c8f89cdb 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -513,7 +513,7 @@ func TestWait(t *testing.T) { }), } var err error - c.Waiter, err = c.newWaiter(LegacyWaiterStrategy) + c.Waiter, err = c.newWaiter(LegacyStrategy) if err != nil { t.Fatal(err) } @@ -570,7 +570,7 @@ func TestWaitJob(t *testing.T) { }), } var err error - c.Waiter, err = c.newWaiter(LegacyWaiterStrategy) + c.Waiter, err = c.newWaiter(LegacyStrategy) if err != nil { t.Fatal(err) } @@ -629,7 +629,7 @@ func TestWaitDelete(t *testing.T) { }), } var err error - c.Waiter, err = c.newWaiter(LegacyWaiterStrategy) + c.Waiter, err = c.newWaiter(LegacyStrategy) if err != nil { t.Fatal(err) } diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index 0729d0d1b..4a0dcd0d2 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -209,3 +209,23 @@ func statusObserver(cancel context.CancelFunc, desired status.Status, logFn func } } } + +type hookOnlyWaiter struct { + sw *statusWaiter +} + +func (w *hookOnlyWaiter) WatchUntilReady(resourceList ResourceList, timeout time.Duration) error { + return w.sw.WatchUntilReady(resourceList, timeout) +} + +func (w *hookOnlyWaiter) Wait(resourceList ResourceList, timeout time.Duration) error { + return nil +} + +func (w *hookOnlyWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Duration) error { + return nil +} + +func (w *hookOnlyWaiter) WaitForDelete(resourceList ResourceList, timeout time.Duration) error { + return nil +} From 978d5a33181c5f102a2d70e5b1a9f756e9e3dc61 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 16 Feb 2025 21:11:48 +0000 Subject: [PATCH 65/91] lint Signed-off-by: Austin Abro --- pkg/kube/wait.go | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go index 83b352201..a7e3a1c7e 100644 --- a/pkg/kube/wait.go +++ b/pkg/kube/wait.go @@ -27,10 +27,8 @@ import ( appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" appsv1beta2 "k8s.io/api/apps/v1beta2" - batch "k8s.io/api/batch/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -294,15 +292,15 @@ func (hw *HelmWaiter) watchUntilReady(timeout time.Duration, info *resource.Info // // This operates on an event returned from a watcher. func (hw *HelmWaiter) waitForJob(obj runtime.Object, name string) (bool, error) { - o, ok := obj.(*batch.Job) + o, ok := obj.(*batchv1.Job) if !ok { return true, errors.Errorf("expected %s to be a *batch.Job, got %T", name, obj) } for _, c := range o.Status.Conditions { - if c.Type == batch.JobComplete && c.Status == "True" { + if c.Type == batchv1.JobComplete && c.Status == "True" { return true, nil - } else if c.Type == batch.JobFailed && c.Status == "True" { + } else if c.Type == batchv1.JobFailed && c.Status == "True" { return true, errors.Errorf("job %s failed: %s", name, c.Reason) } } @@ -315,20 +313,20 @@ func (hw *HelmWaiter) waitForJob(obj runtime.Object, name string) (bool, error) // // This operates on an event returned from a watcher. func (hw *HelmWaiter) waitForPodSuccess(obj runtime.Object, name string) (bool, error) { - o, ok := obj.(*v1.Pod) + o, ok := obj.(*corev1.Pod) if !ok { return true, errors.Errorf("expected %s to be a *v1.Pod, got %T", name, obj) } switch o.Status.Phase { - case v1.PodSucceeded: + case corev1.PodSucceeded: hw.log("Pod %s succeeded", o.Name) return true, nil - case v1.PodFailed: + case corev1.PodFailed: return true, errors.Errorf("pod %s failed", o.Name) - case v1.PodPending: + case corev1.PodPending: hw.log("Pod %s pending", o.Name) - case v1.PodRunning: + case corev1.PodRunning: hw.log("Pod %s running", o.Name) } From 7fde4962a85fb58c09e5412a7114592ca27a3d6a Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 16 Feb 2025 21:33:15 +0000 Subject: [PATCH 66/91] set waiter in functions Signed-off-by: Austin Abro --- pkg/action/action.go | 2 +- pkg/action/install.go | 7 +++---- pkg/action/rollback.go | 8 ++++---- pkg/action/uninstall.go | 5 +++++ pkg/action/upgrade.go | 8 ++++++++ pkg/kube/client.go | 13 +++++++++++-- pkg/kube/client_test.go | 4 ++-- pkg/kube/fake/fake.go | 4 ++++ pkg/kube/fake/printer.go | 4 ++++ pkg/kube/interface.go | 2 ++ 10 files changed, 44 insertions(+), 13 deletions(-) diff --git a/pkg/action/action.go b/pkg/action/action.go index a2d7523a5..d067c67ea 100644 --- a/pkg/action/action.go +++ b/pkg/action/action.go @@ -371,7 +371,7 @@ func (cfg *Configuration) recordRelease(r *release.Release) { // Init initializes the action configuration func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string, log DebugLog) error { - kc, err := kube.New(getter, kube.StatusWatcherStrategy) + kc, err := kube.New(getter) if err != nil { return err } diff --git a/pkg/action/install.go b/pkg/action/install.go index a12dee11d..a589aaf04 100644 --- a/pkg/action/install.go +++ b/pkg/action/install.go @@ -157,10 +157,6 @@ func (i *Install) GetRegistryClient() *registry.Client { return i.ChartPathOptions.registryClient } -func (i *Install) shouldWait() bool { - return i.Wait != "" -} - func (i *Install) installCRDs(crds []chart.CRD) error { // We do these one file at a time in the order they were read. totalItems := []*resource.Info{} @@ -298,6 +294,9 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma i.Wait = kube.StatusWatcherStrategy } } + if err := i.cfg.KubeClient.SetWaiter(i.Wait); err != nil { + return nil, fmt.Errorf("failed to set kube client waiter: %w", err) + } caps, err := i.cfg.getCapabilities() if err != nil { diff --git a/pkg/action/rollback.go b/pkg/action/rollback.go index 8cb8b4ed4..804bdbd58 100644 --- a/pkg/action/rollback.go +++ b/pkg/action/rollback.go @@ -61,6 +61,10 @@ func (r *Rollback) Run(name string) error { return err } + if err := r.cfg.KubeClient.SetWaiter(r.Wait); err != nil { + return fmt.Errorf("failed to set kube client waiter: %w", err) + } + r.cfg.Releases.MaxHistory = r.MaxHistory r.cfg.Log("preparing rollback of %s", name) @@ -90,10 +94,6 @@ func (r *Rollback) Run(name string) error { return nil } -func (r *Rollback) shouldWait() bool { - return !(r.Wait == "") -} - // prepareRollback finds the previous release and prepares a new release object with // the previous release's configuration func (r *Rollback) prepareRollback(name string) (*release.Release, *release.Release, error) { diff --git a/pkg/action/uninstall.go b/pkg/action/uninstall.go index 0a03f2180..f21551bbf 100644 --- a/pkg/action/uninstall.go +++ b/pkg/action/uninstall.go @@ -17,6 +17,7 @@ limitations under the License. package action import ( + "fmt" "strings" "time" @@ -60,6 +61,10 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error) return nil, err } + if err := u.cfg.KubeClient.SetWaiter(u.Wait); err != nil { + return nil, fmt.Errorf("failed to set kube client waiter: %w", err) + } + if u.DryRun { // In the dry run case, just see if the release exists r, err := u.cfg.releaseContent(name, 0) diff --git a/pkg/action/upgrade.go b/pkg/action/upgrade.go index 671426a27..626c1e6ad 100644 --- a/pkg/action/upgrade.go +++ b/pkg/action/upgrade.go @@ -160,6 +160,9 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart. u.Wait = kube.StatusWatcherStrategy } } + if err := u.cfg.KubeClient.SetWaiter(u.Wait); err != nil { + return nil, fmt.Errorf("failed to set kube client waiter: %w", err) + } if err := chartutil.ValidateReleaseName(name); err != nil { return nil, errors.Errorf("release name is invalid: %s", name) @@ -528,6 +531,11 @@ func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, e if u.Wait == kube.HookOnlyStrategy { rollin.Wait = kube.StatusWatcherStrategy } + // TODO pretty sure this is unnecessary as the waiter is already set if atomic at the start of upgrade + werr := u.cfg.KubeClient.SetWaiter(u.Wait) + if werr != nil { + return rel, errors.Wrapf(herr, "an error occurred while creating the waiter. original upgrade error: %s", err) + } rollin.WaitForJobs = u.WaitForJobs rollin.DisableHooks = u.DisableHooks rollin.Recreate = u.Recreate diff --git a/pkg/kube/client.go b/pkg/kube/client.go index de28c3421..425152006 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -144,8 +144,17 @@ func (c *Client) newWaiter(strategy WaitStrategy) (Waiter, error) { } } +func (c *Client) SetWaiter(ws WaitStrategy) error { + var err error + c.Waiter, err = c.newWaiter(ws) + if err != nil { + return err + } + return nil +} + // New creates a new Client. -func New(getter genericclioptions.RESTClientGetter, ws WaitStrategy) (*Client, error) { +func New(getter genericclioptions.RESTClientGetter) (*Client, error) { if getter == nil { getter = genericclioptions.NewConfigFlags(true) } @@ -155,7 +164,7 @@ func New(getter genericclioptions.RESTClientGetter, ws WaitStrategy) (*Client, e Log: nopLogger, } var err error - c.Waiter, err = c.newWaiter(ws) + c.Waiter, err = c.newWaiter(HookOnlyStrategy) if err != nil { return nil, err } diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index 8c8f89cdb..a5ad2b1eb 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -659,7 +659,7 @@ func TestWaitDelete(t *testing.T) { func TestReal(t *testing.T) { t.Skip("This is a live test, comment this line to run") - c, err := New(nil, StatusWatcherStrategy) + c, err := New(nil) if err != nil { t.Fatal(err) } @@ -672,7 +672,7 @@ func TestReal(t *testing.T) { } testSvcEndpointManifest := testServiceManifest + "\n---\n" + testEndpointManifest - c, err = New(nil, StatusWatcherStrategy) + c, err = New(nil) if err != nil { t.Fatal(err) } diff --git a/pkg/kube/fake/fake.go b/pkg/kube/fake/fake.go index ceca3c113..d722320f8 100644 --- a/pkg/kube/fake/fake.go +++ b/pkg/kube/fake/fake.go @@ -139,6 +139,10 @@ func (f *FailingKubeClient) DeleteWithPropagationPolicy(resources kube.ResourceL return f.PrintingKubeClient.DeleteWithPropagationPolicy(resources, policy) } +func (f *FailingKubeClient) SetWaiter(ws kube.WaitStrategy) error { + return nil +} + func createDummyResourceList() kube.ResourceList { var resInfo resource.Info resInfo.Name = "dummyName" diff --git a/pkg/kube/fake/printer.go b/pkg/kube/fake/printer.go index 0b957d725..3c0430aa1 100644 --- a/pkg/kube/fake/printer.go +++ b/pkg/kube/fake/printer.go @@ -121,6 +121,10 @@ func (p *PrintingKubeClient) DeleteWithPropagationPolicy(resources kube.Resource return &kube.Result{Deleted: resources}, nil } +func (f *PrintingKubeClient) SetWaiter(ws kube.WaitStrategy) error { + return nil +} + func bufferize(resources kube.ResourceList) io.Reader { var builder strings.Builder for _, info := range resources { diff --git a/pkg/kube/interface.go b/pkg/kube/interface.go index 7af8ebca6..fc74a9833 100644 --- a/pkg/kube/interface.go +++ b/pkg/kube/interface.go @@ -47,6 +47,8 @@ type Interface interface { Build(reader io.Reader, validate bool) (ResourceList, error) // IsReachable checks whether the client is able to connect to the cluster. IsReachable() error + // Set Waiter sets the Kube.Waiter + SetWaiter(ws WaitStrategy) error Waiter } From 5d1225549755832468972e4491991014441946f7 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 17 Feb 2025 14:53:34 +0000 Subject: [PATCH 67/91] wait for delete Signed-off-by: Austin Abro --- pkg/action/uninstall_test.go | 2 +- pkg/kube/fake/fake.go | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/pkg/action/uninstall_test.go b/pkg/action/uninstall_test.go index 1c67cab7f..5d2b33bdf 100644 --- a/pkg/action/uninstall_test.go +++ b/pkg/action/uninstall_test.go @@ -100,7 +100,7 @@ func TestUninstallRelease_Wait(t *testing.T) { }` unAction.cfg.Releases.Create(rel) failer := unAction.cfg.KubeClient.(*kubefake.FailingKubeClient) - failer.WaitError = fmt.Errorf("U timed out") + failer.WaitForDeleteError = fmt.Errorf("U timed out") unAction.cfg.KubeClient = failer res, err := unAction.Run(rel.Name) is.Error(err) diff --git a/pkg/kube/fake/fake.go b/pkg/kube/fake/fake.go index d722320f8..087fa89cb 100644 --- a/pkg/kube/fake/fake.go +++ b/pkg/kube/fake/fake.go @@ -36,6 +36,7 @@ type FailingKubeClient struct { CreateError error GetError error WaitError error + WaitForDeleteError error DeleteError error DeleteWithPropagationError error WatchUntilReadyError error @@ -82,8 +83,8 @@ func (f *FailingKubeClient) WaitWithJobs(resources kube.ResourceList, d time.Dur // WaitForDelete returns the configured error if set or prints func (f *FailingKubeClient) WaitForDelete(resources kube.ResourceList, d time.Duration) error { - if f.WaitError != nil { - return f.WaitError + if f.WaitForDeleteError != nil { + return f.WaitForDeleteError } return f.PrintingKubeClient.WaitForDelete(resources, d) } From ecd531657778daf3fde3777e39fbf628fa9eb4a6 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 18 Feb 2025 13:50:04 +0000 Subject: [PATCH 68/91] lint Signed-off-by: Austin Abro --- cmd/helm/install.go | 2 +- pkg/kube/fake/fake.go | 2 +- pkg/kube/statuswait.go | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cmd/helm/install.go b/cmd/helm/install.go index 649c5c8b8..4d72be966 100644 --- a/cmd/helm/install.go +++ b/cmd/helm/install.go @@ -190,7 +190,7 @@ func addInstallFlags(cmd *cobra.Command, f *pflag.FlagSet, client *action.Instal f.BoolVar(&client.Force, "force", false, "force resource updates through a replacement strategy") f.BoolVar(&client.DisableHooks, "no-hooks", false, "prevent hooks from running during install") f.BoolVar(&client.Replace, "replace", false, "reuse the given name, only if that name is a deleted release which remains in the history. This is unsafe in production") - f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)") + f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)") f.BoolVar(&client.WaitForJobs, "wait-for-jobs", false, "if set and --wait enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as --timeout") f.BoolVarP(&client.GenerateName, "generate-name", "g", false, "generate the name (and omit the NAME parameter)") f.StringVar(&client.NameTemplate, "name-template", "", "specify template used to name the release") diff --git a/pkg/kube/fake/fake.go b/pkg/kube/fake/fake.go index 087fa89cb..c4322733a 100644 --- a/pkg/kube/fake/fake.go +++ b/pkg/kube/fake/fake.go @@ -140,7 +140,7 @@ func (f *FailingKubeClient) DeleteWithPropagationPolicy(resources kube.ResourceL return f.PrintingKubeClient.DeleteWithPropagationPolicy(resources, policy) } -func (f *FailingKubeClient) SetWaiter(ws kube.WaitStrategy) error { +func (f *FailingKubeClient) SetWaiter(_ kube.WaitStrategy) error { return nil } diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index 4a0dcd0d2..3c1e90a36 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -218,14 +218,14 @@ func (w *hookOnlyWaiter) WatchUntilReady(resourceList ResourceList, timeout time return w.sw.WatchUntilReady(resourceList, timeout) } -func (w *hookOnlyWaiter) Wait(resourceList ResourceList, timeout time.Duration) error { +func (w *hookOnlyWaiter) Wait(_ ResourceList, _ time.Duration) error { return nil } -func (w *hookOnlyWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Duration) error { +func (w *hookOnlyWaiter) WaitWithJobs(_ ResourceList, _ time.Duration) error { return nil } -func (w *hookOnlyWaiter) WaitForDelete(resourceList ResourceList, timeout time.Duration) error { +func (w *hookOnlyWaiter) WaitForDelete(_ ResourceList, _ time.Duration) error { return nil } From efde8304059b791ef48afaf602d5cc4c7a537f3d Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 18 Feb 2025 13:53:06 +0000 Subject: [PATCH 69/91] better name Signed-off-by: Austin Abro --- pkg/kube/client.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 425152006..ff062a172 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -85,7 +85,7 @@ type WaitStrategy string const ( StatusWatcherStrategy WaitStrategy = "watcher" LegacyStrategy WaitStrategy = "legacy" - HookOnlyStrategy WaitStrategy = "noop" + HookOnlyStrategy WaitStrategy = "hookOnly" ) func init() { From ea87c49d1b6ef516d95226ce7fffd610d99ca16c Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 18 Feb 2025 13:53:47 +0000 Subject: [PATCH 70/91] print Signed-off-by: Austin Abro --- pkg/kube/fake/printer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kube/fake/printer.go b/pkg/kube/fake/printer.go index 3c0430aa1..82649b202 100644 --- a/pkg/kube/fake/printer.go +++ b/pkg/kube/fake/printer.go @@ -121,7 +121,7 @@ func (p *PrintingKubeClient) DeleteWithPropagationPolicy(resources kube.Resource return &kube.Result{Deleted: resources}, nil } -func (f *PrintingKubeClient) SetWaiter(ws kube.WaitStrategy) error { +func (p *PrintingKubeClient) SetWaiter(_ kube.WaitStrategy) error { return nil } From 5d31fb09d2110242dd91ff64e9b8e161f38e15d4 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 18 Feb 2025 13:55:13 +0000 Subject: [PATCH 71/91] better help text Signed-off-by: Austin Abro --- cmd/helm/flags.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/helm/flags.go b/cmd/helm/flags.go index d1f0fec58..c73bab63f 100644 --- a/cmd/helm/flags.go +++ b/cmd/helm/flags.go @@ -56,7 +56,7 @@ func AddWaitFlag(cmd *cobra.Command, wait *kube.WaitStrategy) { cmd.Flags().Var( newWaitValue(kube.HookOnlyStrategy, wait), "wait", - "if set, will wait until all resources are in the expected state before marking the operation as successful. It will wait for as long as --timeout. Options are (true, false, watcher, and legacy)", + "if set, will wait until all resources are in the expected state before marking the operation as successful. It will wait for as long as --timeout. Valid inputs are true, false, watcher, and legacy", ) // Sets the strategy to use the watcher strategy if `--wait` is used without an argument cmd.Flags().Lookup("wait").NoOptDefVal = string(kube.StatusWatcherStrategy) From b79dfd09b0b9a8173ec23e5a72b3a0c444863dee Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 25 Feb 2025 03:51:38 +0000 Subject: [PATCH 72/91] refactor Signed-off-by: Austin Abro --- pkg/kube/statuswait.go | 9 +++-- pkg/kube/statuswait_test.go | 70 +++++++++++++++++++++++++++---------- 2 files changed, 59 insertions(+), 20 deletions(-) diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index 3c1e90a36..baf5814b1 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -187,6 +187,11 @@ func statusObserver(cancel context.CancelFunc, desired status.Status, logFn func if rs == nil { continue } + // If a resource is already deleted before waiting has started, it will show as unknown + // this check ensures we don't wait forever for a resource that is already deleted + if rs.Status == status.UnknownStatus && desired == status.NotFoundStatus { + continue + } rss = append(rss, rs) if rs.Status != desired { nonDesiredResources = append(nonDesiredResources, rs) @@ -199,12 +204,12 @@ func statusObserver(cancel context.CancelFunc, desired status.Status, logFn func } if len(nonDesiredResources) > 0 { - // Log only the first resource so the user knows what they're waiting for without being overwhelmed + // Log a single resource so the user knows what they're waiting for without an overwhelming amount of output sort.Slice(nonDesiredResources, func(i, j int) bool { return nonDesiredResources[i].Identifier.Name < nonDesiredResources[j].Identifier.Name }) first := nonDesiredResources[0] - logFn("waiting for resource: name: %s, kind: %s, desired status: %s, actual status: %s", + logFn("waiting for resource: name: %s, kind: %s, desired status: %s, actual status: %s \n", first.Identifier.Name, first.Identifier.GroupKind.Kind, desired, first.Status) } } diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index df16bf7e9..2b10dfef1 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -160,6 +160,18 @@ func getGVR(t *testing.T, mapper meta.RESTMapper, obj *unstructured.Unstructured return mapping.Resource } +func getUnstructuredObjsFromManifests(t *testing.T, manifests []string) []runtime.Object { + objects := []runtime.Object{} + for _, manifest := range manifests { + m := make(map[string]interface{}) + err := yaml.Unmarshal([]byte(manifest), &m) + assert.NoError(t, err) + resource := &unstructured.Unstructured{Object: m} + objects = append(objects, resource) + } + return objects +} + func TestStatusWaitForDelete(t *testing.T) { t.Parallel() tests := []struct { @@ -190,7 +202,6 @@ func TestStatusWaitForDelete(t *testing.T) { fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) fakeMapper := testutil.NewFakeRESTMapper( v1.SchemeGroupVersion.WithKind("Pod"), - appsv1.SchemeGroupVersion.WithKind("Deployment"), batchv1.SchemeGroupVersion.WithKind("Job"), ) statusWaiter := statusWaiter{ @@ -198,31 +209,25 @@ func TestStatusWaitForDelete(t *testing.T) { client: fakeClient, log: t.Logf, } - createdObjs := []runtime.Object{} - for _, manifest := range tt.manifestsToCreate { - m := make(map[string]interface{}) - err := yaml.Unmarshal([]byte(manifest), &m) - assert.NoError(t, err) - resource := &unstructured.Unstructured{Object: m} - createdObjs = append(createdObjs, resource) - gvr := getGVR(t, fakeMapper, resource) - err = fakeClient.Tracker().Create(gvr, resource, resource.GetNamespace()) + objsToCreate := getUnstructuredObjsFromManifests(t, tt.manifestsToCreate) + for _, objToCreate := range objsToCreate { + u := objToCreate.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) assert.NoError(t, err) } - for _, manifest := range tt.manifestsToDelete { - m := make(map[string]interface{}) - err := yaml.Unmarshal([]byte(manifest), &m) - assert.NoError(t, err) - resource := &unstructured.Unstructured{Object: m} - gvr := getGVR(t, fakeMapper, resource) + objsToDelete := getUnstructuredObjsFromManifests(t, tt.manifestsToDelete) + for _, objToDelete := range objsToDelete { + u := objToDelete.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) go func() { time.Sleep(timeUntilPodDelete) - err = fakeClient.Tracker().Delete(gvr, resource.GetNamespace(), resource.GetName()) + err := fakeClient.Tracker().Delete(gvr, u.GetNamespace(), u.GetName()) assert.NoError(t, err) }() } resourceList := ResourceList{} - for _, obj := range createdObjs { + for _, obj := range objsToCreate { list, err := c.Build(objBody(obj), false) assert.NoError(t, err) resourceList = append(resourceList, list...) @@ -237,6 +242,35 @@ func TestStatusWaitForDelete(t *testing.T) { } } +func TestStatusWaitForDeleteNonExistentObject(t *testing.T) { + t.Parallel() + c := newTestClient(t) + timeout := time.Second + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + ) + statusWaiter := statusWaiter{ + restMapper: fakeMapper, + client: fakeClient, + log: t.Logf, + } + createdObjs := []runtime.Object{} + m := make(map[string]interface{}) + err := yaml.Unmarshal([]byte(podCurrentManifest), &m) + assert.NoError(t, err) + resource := &unstructured.Unstructured{Object: m} + createdObjs = append(createdObjs, resource) + resourceList := ResourceList{} + for _, obj := range createdObjs { + list, err := c.Build(objBody(obj), false) + assert.NoError(t, err) + resourceList = append(resourceList, list...) + } + err = statusWaiter.WaitForDelete(resourceList, timeout) + assert.NoError(t, err) +} + func TestStatusWait(t *testing.T) { t.Parallel() tests := []struct { From 75292c5e04e2e6684e6470b59e920e31a23d3492 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 25 Feb 2025 04:05:12 +0000 Subject: [PATCH 73/91] refactor Signed-off-by: Austin Abro --- pkg/kube/statuswait_test.go | 40 +++++++++++++++++-------------------- 1 file changed, 18 insertions(+), 22 deletions(-) diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index 2b10dfef1..d6d7f5e36 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -160,7 +160,7 @@ func getGVR(t *testing.T, mapper meta.RESTMapper, obj *unstructured.Unstructured return mapping.Resource } -func getUnstructuredObjsFromManifests(t *testing.T, manifests []string) []runtime.Object { +func getRuntimeObjFromManifests(t *testing.T, manifests []string) []runtime.Object { objects := []runtime.Object{} for _, manifest := range manifests { m := make(map[string]interface{}) @@ -172,6 +172,16 @@ func getUnstructuredObjsFromManifests(t *testing.T, manifests []string) []runtim return objects } +func getResourceListFromRuntimeObjs(t *testing.T, c *Client, objs []runtime.Object) ResourceList { + resourceList := ResourceList{} + for _, obj := range objs { + list, err := c.Build(objBody(obj), false) + assert.NoError(t, err) + resourceList = append(resourceList, list...) + } + return resourceList +} + func TestStatusWaitForDelete(t *testing.T) { t.Parallel() tests := []struct { @@ -209,14 +219,14 @@ func TestStatusWaitForDelete(t *testing.T) { client: fakeClient, log: t.Logf, } - objsToCreate := getUnstructuredObjsFromManifests(t, tt.manifestsToCreate) + objsToCreate := getRuntimeObjFromManifests(t, tt.manifestsToCreate) for _, objToCreate := range objsToCreate { u := objToCreate.(*unstructured.Unstructured) gvr := getGVR(t, fakeMapper, u) err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) assert.NoError(t, err) } - objsToDelete := getUnstructuredObjsFromManifests(t, tt.manifestsToDelete) + objsToDelete := getRuntimeObjFromManifests(t, tt.manifestsToDelete) for _, objToDelete := range objsToDelete { u := objToDelete.(*unstructured.Unstructured) gvr := getGVR(t, fakeMapper, u) @@ -226,12 +236,7 @@ func TestStatusWaitForDelete(t *testing.T) { assert.NoError(t, err) }() } - resourceList := ResourceList{} - for _, obj := range objsToCreate { - list, err := c.Build(objBody(obj), false) - assert.NoError(t, err) - resourceList = append(resourceList, list...) - } + resourceList := getResourceListFromRuntimeObjs(t, c, objsToCreate) err := statusWaiter.WaitForDelete(resourceList, timeout) if tt.expectErrs != nil { assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) @@ -255,19 +260,10 @@ func TestStatusWaitForDeleteNonExistentObject(t *testing.T) { client: fakeClient, log: t.Logf, } - createdObjs := []runtime.Object{} - m := make(map[string]interface{}) - err := yaml.Unmarshal([]byte(podCurrentManifest), &m) - assert.NoError(t, err) - resource := &unstructured.Unstructured{Object: m} - createdObjs = append(createdObjs, resource) - resourceList := ResourceList{} - for _, obj := range createdObjs { - list, err := c.Build(objBody(obj), false) - assert.NoError(t, err) - resourceList = append(resourceList, list...) - } - err = statusWaiter.WaitForDelete(resourceList, timeout) + // Don't create the object to test that the wait for delete works when the object doesn't exist + objManifest := getRuntimeObjFromManifests(t, []string{podCurrentManifest}) + resourceList := getResourceListFromRuntimeObjs(t, c, objManifest) + err := statusWaiter.WaitForDelete(resourceList, timeout) assert.NoError(t, err) } From 4f33e5c97fe4d38e9bcb742b053d9848f84b8e63 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 25 Feb 2025 04:08:51 +0000 Subject: [PATCH 74/91] test refactoring Signed-off-by: Austin Abro --- pkg/kube/statuswait_test.go | 63 ++++++++++--------------------------- 1 file changed, 16 insertions(+), 47 deletions(-) diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index d6d7f5e36..0e88f1bbe 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -319,25 +319,14 @@ func TestStatusWait(t *testing.T) { restMapper: fakeMapper, log: t.Logf, } - objs := []runtime.Object{} - - for _, podYaml := range tt.objManifests { - m := make(map[string]interface{}) - err := yaml.Unmarshal([]byte(podYaml), &m) - assert.NoError(t, err) - resource := &unstructured.Unstructured{Object: m} - objs = append(objs, resource) - gvr := getGVR(t, fakeMapper, resource) - err = fakeClient.Tracker().Create(gvr, resource, resource.GetNamespace()) - assert.NoError(t, err) - } - resourceList := ResourceList{} + objs := getRuntimeObjFromManifests(t, tt.objManifests) for _, obj := range objs { - list, err := c.Build(objBody(obj), false) + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) assert.NoError(t, err) - resourceList = append(resourceList, list...) } - + resourceList := getResourceListFromRuntimeObjs(t, c, objs) err := statusWaiter.Wait(resourceList, time.Second*3) if tt.expectErrs != nil { assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) @@ -384,24 +373,14 @@ func TestWaitForJobComplete(t *testing.T) { restMapper: fakeMapper, log: t.Logf, } - objs := []runtime.Object{} - for _, podYaml := range tt.objManifests { - m := make(map[string]interface{}) - err := yaml.Unmarshal([]byte(podYaml), &m) - assert.NoError(t, err) - resource := &unstructured.Unstructured{Object: m} - objs = append(objs, resource) - gvr := getGVR(t, fakeMapper, resource) - err = fakeClient.Tracker().Create(gvr, resource, resource.GetNamespace()) - assert.NoError(t, err) - } - resourceList := ResourceList{} + objs := getRuntimeObjFromManifests(t, tt.objManifests) for _, obj := range objs { - list, err := c.Build(objBody(obj), false) + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) assert.NoError(t, err) - resourceList = append(resourceList, list...) } - + resourceList := getResourceListFromRuntimeObjs(t, c, objs) err := statusWaiter.WaitWithJobs(resourceList, time.Second*3) if tt.expectErrs != nil { assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) @@ -424,7 +403,7 @@ func TestWatchForReady(t *testing.T) { objManifests: []string{jobCompleteManifest, podCompleteManifest}, }, { - name: "succeeds even when a resource that's not a pod or job is complete", + name: "succeeds when a resource that's not a pod or job is not ready", objManifests: []string{notReadyDeploymentManifest}, }, { @@ -454,24 +433,14 @@ func TestWatchForReady(t *testing.T) { restMapper: fakeMapper, log: t.Logf, } - objs := []runtime.Object{} - for _, podYaml := range tt.objManifests { - m := make(map[string]interface{}) - err := yaml.Unmarshal([]byte(podYaml), &m) - assert.NoError(t, err) - resource := &unstructured.Unstructured{Object: m} - objs = append(objs, resource) - gvr := getGVR(t, fakeMapper, resource) - err = fakeClient.Tracker().Create(gvr, resource, resource.GetNamespace()) - assert.NoError(t, err) - } - resourceList := ResourceList{} + objs := getRuntimeObjFromManifests(t, tt.objManifests) for _, obj := range objs { - list, err := c.Build(objBody(obj), false) + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) assert.NoError(t, err) - resourceList = append(resourceList, list...) } - + resourceList := getResourceListFromRuntimeObjs(t, c, objs) err := statusWaiter.WatchUntilReady(resourceList, time.Second*3) if tt.expectErrs != nil { assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) From 5a254dae2138e830403685527129a46be74c9b8a Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 25 Feb 2025 14:42:14 +0000 Subject: [PATCH 75/91] cleanup Signed-off-by: Austin Abro --- pkg/kube/client.go | 186 ++++++++++----------------------------------- 1 file changed, 42 insertions(+), 144 deletions(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index d174614db..333c0ec65 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -18,6 +18,7 @@ package kube // import "helm.sh/helm/v4/pkg/kube" import ( "bytes" + "context" "encoding/json" "fmt" "io" @@ -687,150 +688,47 @@ func updateResource(c *Client, target *resource.Info, currentObj runtime.Object, return nil } -// func (c *Client) watchUntilReady(timeout time.Duration, info *resource.Info) error { -// kind := info.Mapping.GroupVersionKind.Kind -// switch kind { -// case "Job", "Pod": -// default: -// return nil -// } - -// c.Log("Watching for changes to %s %s with timeout of %v", kind, info.Name, timeout) - -// // Use a selector on the name of the resource. This should be unique for the -// // given version and kind -// selector, err := fields.ParseSelector(fmt.Sprintf("metadata.name=%s", info.Name)) -// if err != nil { -// return err -// } -// lw := cachetools.NewListWatchFromClient(info.Client, info.Mapping.Resource.Resource, info.Namespace, selector) - -// // What we watch for depends on the Kind. -// // - For a Job, we watch for completion. -// // - For all else, we watch until Ready. -// // In the future, we might want to add some special logic for types -// // like Ingress, Volume, etc. - -// ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout) -// defer cancel() -// _, err = watchtools.UntilWithSync(ctx, lw, &unstructured.Unstructured{}, nil, func(e watch.Event) (bool, error) { -// // Make sure the incoming object is versioned as we use unstructured -// // objects when we build manifests -// obj := convertWithMapper(e.Object, info.Mapping) -// switch e.Type { -// case watch.Added, watch.Modified: -// // For things like a secret or a config map, this is the best indicator -// // we get. We care mostly about jobs, where what we want to see is -// // the status go into a good state. For other types, like ReplicaSet -// // we don't really do anything to support these as hooks. -// c.Log("Add/Modify event for %s: %v", info.Name, e.Type) -// switch kind { -// case "Job": -// return c.waitForJob(obj, info.Name) -// case "Pod": -// return c.waitForPodSuccess(obj, info.Name) -// } -// return true, nil -// case watch.Deleted: -// c.Log("Deleted event for %s", info.Name) -// return true, nil -// case watch.Error: -// // Handle error and return with an error. -// c.Log("Error event for %s", info.Name) -// return true, errors.Errorf("failed to deploy %s", info.Name) -// default: -// return false, nil -// } -// }) -// return err -// } - -// // waitForJob is a helper that waits for a job to complete. -// // -// // This operates on an event returned from a watcher. -// func (c *Client) waitForJob(obj runtime.Object, name string) (bool, error) { -// o, ok := obj.(*batch.Job) -// if !ok { -// return true, errors.Errorf("expected %s to be a *batch.Job, got %T", name, obj) -// } - -// for _, c := range o.Status.Conditions { -// if c.Type == batch.JobComplete && c.Status == "True" { -// return true, nil -// } else if c.Type == batch.JobFailed && c.Status == "True" { -// return true, errors.Errorf("job %s failed: %s", name, c.Reason) -// } -// } - -// c.Log("%s: Jobs active: %d, jobs failed: %d, jobs succeeded: %d", name, o.Status.Active, o.Status.Failed, o.Status.Succeeded) -// return false, nil -// } - -// // waitForPodSuccess is a helper that waits for a pod to complete. -// // -// // This operates on an event returned from a watcher. -// func (c *Client) waitForPodSuccess(obj runtime.Object, name string) (bool, error) { -// o, ok := obj.(*v1.Pod) -// if !ok { -// return true, errors.Errorf("expected %s to be a *v1.Pod, got %T", name, obj) -// } - -// switch o.Status.Phase { -// case v1.PodSucceeded: -// c.Log("Pod %s succeeded", o.Name) -// return true, nil -// case v1.PodFailed: -// return true, errors.Errorf("pod %s failed", o.Name) -// case v1.PodPending: -// c.Log("Pod %s pending", o.Name) -// case v1.PodRunning: -// c.Log("Pod %s running", o.Name) -// } - -// return false, nil -// } - -// // GetPodList uses the kubernetes interface to get the list of pods filtered by listOptions -// func (c *Client) GetPodList(namespace string, listOptions metav1.ListOptions) (*v1.PodList, error) { -// podList, err := c.kubeClient.CoreV1().Pods(namespace).List(context.Background(), listOptions) -// if err != nil { -// return nil, fmt.Errorf("failed to get pod list with options: %+v with error: %v", listOptions, err) -// } -// return podList, nil -// } - -// // OutputContainerLogsForPodList is a helper that outputs logs for a list of pods -// func (c *Client) OutputContainerLogsForPodList(podList *v1.PodList, namespace string, writerFunc func(namespace, pod, container string) io.Writer) error { -// for _, pod := range podList.Items { -// for _, container := range pod.Spec.Containers { -// options := &v1.PodLogOptions{ -// Container: container.Name, -// } -// request := c.kubeClient.CoreV1().Pods(namespace).GetLogs(pod.Name, options) -// err2 := copyRequestStreamToWriter(request, pod.Name, container.Name, writerFunc(namespace, pod.Name, container.Name)) -// if err2 != nil { -// return err2 -// } -// } -// } -// return nil -// } - -// func copyRequestStreamToWriter(request *rest.Request, podName, containerName string, writer io.Writer) error { -// readCloser, err := request.Stream(context.Background()) -// if err != nil { -// return errors.Errorf("Failed to stream pod logs for pod: %s, container: %s", podName, containerName) -// } -// defer readCloser.Close() -// _, err = io.Copy(writer, readCloser) -// if err != nil { -// return errors.Errorf("Failed to copy IO from logs for pod: %s, container: %s", podName, containerName) -// } -// if err != nil { -// return errors.Errorf("Failed to close reader for pod: %s, container: %s", podName, containerName) -// } -// return nil -// } +// GetPodList uses the kubernetes interface to get the list of pods filtered by listOptions +func (c *Client) GetPodList(namespace string, listOptions metav1.ListOptions) (*v1.PodList, error) { + podList, err := c.kubeClient.CoreV1().Pods(namespace).List(context.Background(), listOptions) + if err != nil { + return nil, fmt.Errorf("failed to get pod list with options: %+v with error: %v", listOptions, err) + } + return podList, nil +} + +// OutputContainerLogsForPodList is a helper that outputs logs for a list of pods +func (c *Client) OutputContainerLogsForPodList(podList *v1.PodList, namespace string, writerFunc func(namespace, pod, container string) io.Writer) error { + for _, pod := range podList.Items { + for _, container := range pod.Spec.Containers { + options := &v1.PodLogOptions{ + Container: container.Name, + } + request := c.kubeClient.CoreV1().Pods(namespace).GetLogs(pod.Name, options) + err2 := copyRequestStreamToWriter(request, pod.Name, container.Name, writerFunc(namespace, pod.Name, container.Name)) + if err2 != nil { + return err2 + } + } + } + return nil +} + +func copyRequestStreamToWriter(request *rest.Request, podName, containerName string, writer io.Writer) error { + readCloser, err := request.Stream(context.Background()) + if err != nil { + return errors.Errorf("Failed to stream pod logs for pod: %s, container: %s", podName, containerName) + } + defer readCloser.Close() + _, err = io.Copy(writer, readCloser) + if err != nil { + return errors.Errorf("Failed to copy IO from logs for pod: %s, container: %s", podName, containerName) + } + if err != nil { + return errors.Errorf("Failed to close reader for pod: %s, container: %s", podName, containerName) + } + return nil +} // scrubValidationError removes kubectl info from the message. func scrubValidationError(err error) error { From a18589c4d8d8d7f71e26397d51e41ff967038ca2 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 25 Feb 2025 14:42:52 +0000 Subject: [PATCH 76/91] fmt Signed-off-by: Austin Abro --- pkg/kube/statuswait.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index baf5814b1..bc3958848 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -188,7 +188,7 @@ func statusObserver(cancel context.CancelFunc, desired status.Status, logFn func continue } // If a resource is already deleted before waiting has started, it will show as unknown - // this check ensures we don't wait forever for a resource that is already deleted + // this check ensures we don't wait forever for a resource that is already deleted if rs.Status == status.UnknownStatus && desired == status.NotFoundStatus { continue } From 29c250c233c3efecc2dc7f2a8c1b9810299de5d8 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 25 Feb 2025 16:09:30 +0000 Subject: [PATCH 77/91] add back interface log check Signed-off-by: Austin Abro --- pkg/kube/interface.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/kube/interface.go b/pkg/kube/interface.go index 64d954853..d6ac823f1 100644 --- a/pkg/kube/interface.go +++ b/pkg/kube/interface.go @@ -118,5 +118,6 @@ type InterfaceResources interface { } var _ Interface = (*Client)(nil) +var _ InterfaceLogs = (*Client)(nil) var _ InterfaceDeletionPropagation = (*Client)(nil) var _ InterfaceResources = (*Client)(nil) From 5ecca2ed143187ba4c8e3dde44c9ec5f1627ce6b Mon Sep 17 00:00:00 2001 From: Austin Abro <37223396+AustinAbro321@users.noreply.github.com> Date: Mon, 3 Mar 2025 11:53:00 -0500 Subject: [PATCH 78/91] Apply suggestions from code review Co-authored-by: Scott Rigby Signed-off-by: Austin Abro <37223396+AustinAbro321@users.noreply.github.com> --- internal/statusreaders/job_status_reader.go | 5 +++-- internal/statusreaders/job_status_reader_test.go | 4 +++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/internal/statusreaders/job_status_reader.go b/internal/statusreaders/job_status_reader.go index d493d9e13..e11843f6d 100644 --- a/internal/statusreaders/job_status_reader.go +++ b/internal/statusreaders/job_status_reader.go @@ -1,5 +1,8 @@ /* Copyright The Helm Authors. +This file was initially copied and modified from + https://github.com/fluxcd/kustomize-controller/blob/main/internal/statusreaders/job.go +Copyright 2022 The Flux authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,8 +19,6 @@ limitations under the License. package statusreaders -// This file was copied and modified from https://github.com/fluxcd/kustomize-controller/blob/main/internal/statusreaders/job.go - import ( "context" "fmt" diff --git a/internal/statusreaders/job_status_reader_test.go b/internal/statusreaders/job_status_reader_test.go index 70e4ee29a..5f07be91c 100644 --- a/internal/statusreaders/job_status_reader_test.go +++ b/internal/statusreaders/job_status_reader_test.go @@ -1,5 +1,8 @@ /* Copyright The Helm Authors. +This file was initially copied and modified from + https://github.com/fluxcd/kustomize-controller/blob/main/internal/statusreaders/job_test.go +Copyright 2022 The Flux authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,7 +19,6 @@ limitations under the License. package statusreaders -// This file was copied and modified from https://github.com/fluxcd/kustomize-controller/blob/main/internal/statusreaders/job.go import ( "testing" From 68f72e5c3fb9c2bc6486a29a6eb5522a05ea8f81 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 3 Mar 2025 19:56:00 +0000 Subject: [PATCH 79/91] hook only strategy when wait=false Signed-off-by: Austin Abro --- pkg/cmd/flags.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cmd/flags.go b/pkg/cmd/flags.go index 10c7e9714..044b19e04 100644 --- a/pkg/cmd/flags.go +++ b/pkg/cmd/flags.go @@ -85,7 +85,7 @@ func (ws *waitValue) Set(s string) error { *ws = waitValue(kube.StatusWatcherStrategy) return nil case "false": - *ws = "" + *ws = waitValue(kube.HookOnlyStrategy) return nil default: return fmt.Errorf("invalid wait input %q. Valid inputs are true, false, %s, and %s", s, kube.StatusWatcherStrategy, kube.LegacyStrategy) From 8d964588cd3b54b470510ee9663eedba25c6186b Mon Sep 17 00:00:00 2001 From: Austin Abro <37223396+AustinAbro321@users.noreply.github.com> Date: Tue, 4 Mar 2025 17:47:25 -0500 Subject: [PATCH 80/91] Update internal/statusreaders/job_status_reader.go Co-authored-by: Scott Rigby Signed-off-by: Austin Abro <37223396+AustinAbro321@users.noreply.github.com> --- internal/statusreaders/job_status_reader.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/internal/statusreaders/job_status_reader.go b/internal/statusreaders/job_status_reader.go index e11843f6d..3cd9ac7ac 100644 --- a/internal/statusreaders/job_status_reader.go +++ b/internal/statusreaders/job_status_reader.go @@ -29,11 +29,11 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling/engine" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling/event" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling/statusreaders" - "sigs.k8s.io/cli-utils/pkg/kstatus/status" - "sigs.k8s.io/cli-utils/pkg/object" + "github.com/fluxcd/cli-utils/pkg/kstatus/polling/engine" + "github.com/fluxcd/cli-utils/pkg/kstatus/polling/event" + "github.com/fluxcd/cli-utils/pkg/kstatus/polling/statusreaders" + "github.com/fluxcd/cli-utils/pkg/kstatus/status" + "github.com/fluxcd/cli-utils/pkg/object" ) type customJobStatusReader struct { From 24dc64382292cbc3ad30743f6d2c63fbfcd810d5 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 4 Mar 2025 22:56:11 +0000 Subject: [PATCH 81/91] restmapper Signed-off-by: Austin Abro --- go.mod | 49 ++++---- go.sum | 117 ++++++++---------- .../statusreaders/job_status_reader_test.go | 2 +- internal/statusreaders/pod_status_reader.go | 10 +- .../statusreaders/pod_status_reader_test.go | 2 +- pkg/kube/statuswait.go | 16 +-- pkg/kube/statuswait_test.go | 2 +- 7 files changed, 95 insertions(+), 103 deletions(-) diff --git a/go.mod b/go.mod index 1d318ea25..3e4c81cdc 100644 --- a/go.mod +++ b/go.mod @@ -15,6 +15,7 @@ require ( github.com/cyphar/filepath-securejoin v0.4.1 github.com/distribution/distribution/v3 v3.0.0-rc.3 github.com/evanphx/json-patch v5.9.11+incompatible + github.com/fluxcd/cli-utils v0.36.0-flux.12 github.com/foxcpp/go-mockdns v1.1.0 github.com/gobwas/glob v0.2.3 github.com/gofrs/flock v0.12.1 @@ -46,7 +47,6 @@ require ( k8s.io/klog/v2 v2.130.1 k8s.io/kubectl v0.32.2 oras.land/oras-go/v2 v2.5.0 - sigs.k8s.io/cli-utils v0.37.2 sigs.k8s.io/controller-runtime v0.20.1 sigs.k8s.io/yaml v1.4.0 ) @@ -73,30 +73,30 @@ require ( github.com/docker/docker-credential-helpers v0.8.2 // indirect github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect github.com/docker/go-metrics v0.0.1 // indirect - github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/evanphx/json-patch/v5 v5.9.0 // indirect + github.com/emicklei/go-restful/v3 v3.12.1 // indirect + github.com/evanphx/json-patch/v5 v5.9.11 // indirect github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect github.com/fatih/color v1.13.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect - github.com/go-errors/errors v1.4.2 // indirect + github.com/go-errors/errors v1.5.1 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.3 // indirect - github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/gnostic-models v0.6.9 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/handlers v1.5.2 // indirect github.com/gorilla/mux v1.8.1 // indirect - github.com/gorilla/websocket v1.5.0 // indirect + github.com/gorilla/websocket v1.5.3 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect @@ -110,7 +110,7 @@ require ( github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect - github.com/mailru/easyjson v0.7.7 // indirect + github.com/mailru/easyjson v0.9.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.17 // indirect github.com/mattn/go-runewidth v0.0.9 // indirect @@ -123,13 +123,13 @@ require ( github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect - github.com/onsi/gomega v1.35.1 // indirect + github.com/onsi/gomega v1.36.2 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.60.1 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 // indirect github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 // indirect @@ -142,10 +142,11 @@ require ( github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xlab/treeprint v1.2.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 // indirect go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 // indirect - go.opentelemetry.io/otel v1.32.0 // indirect + go.opentelemetry.io/otel v1.34.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 // indirect @@ -158,31 +159,31 @@ require ( go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 // indirect go.opentelemetry.io/otel/log v0.8.0 // indirect - go.opentelemetry.io/otel/metric v1.32.0 // indirect + go.opentelemetry.io/otel/metric v1.34.0 // indirect go.opentelemetry.io/otel/sdk v1.32.0 // indirect go.opentelemetry.io/otel/sdk/log v0.8.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.32.0 // indirect - go.opentelemetry.io/otel/trace v1.32.0 // indirect + go.opentelemetry.io/otel/trace v1.34.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect - golang.org/x/mod v0.21.0 // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/oauth2 v0.23.0 // indirect + golang.org/x/mod v0.22.0 // indirect + golang.org/x/net v0.34.0 // indirect + golang.org/x/oauth2 v0.25.0 // indirect golang.org/x/sync v0.11.0 // indirect golang.org/x/sys v0.30.0 // indirect - golang.org/x/time v0.7.0 // indirect - golang.org/x/tools v0.26.0 // indirect + golang.org/x/time v0.9.0 // indirect + golang.org/x/tools v0.29.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect google.golang.org/grpc v1.68.0 // indirect - google.golang.org/protobuf v1.35.2 // indirect + google.golang.org/protobuf v1.36.4 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect k8s.io/component-base v0.32.2 // indirect - k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect - k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect - sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect + k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 // indirect + k8s.io/utils v0.0.0-20241210054802-24370beab758 // indirect + sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect sigs.k8s.io/kustomize/api v0.18.0 // indirect - sigs.k8s.io/kustomize/kyaml v0.18.1 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect + sigs.k8s.io/kustomize/kyaml v0.19.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.5.0 // indirect ) diff --git a/go.sum b/go.sum index 57fbc6117..9fbf29bfc 100644 --- a/go.sum +++ b/go.sum @@ -60,7 +60,6 @@ github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8 github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= @@ -81,26 +80,28 @@ github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= -github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= -github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= +github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8= github.com/evanphx/json-patch v5.9.11+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= -github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= +github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fluxcd/cli-utils v0.36.0-flux.12 h1:8cD6SmaKa/lGo0KCu0XWiGrXJMLMBQwSsnoP0cG+Gjw= +github.com/fluxcd/cli-utils v0.36.0-flux.12/go.mod h1:Nb/zMqsJAzjz4/HIsEc2LTqxC6eC0rV26t4hkJT/F9o= github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI= github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= -github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= +github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= @@ -113,12 +114,10 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= -github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= @@ -141,8 +140,8 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= -github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= +github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= @@ -150,8 +149,8 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= -github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20250128161936-077ca0a936bf h1:BvBLUD2hkvLI3dJTJMiopAq8/wp43AAZKTP7qdpptbU= +github.com/google/pprof v0.0.0-20250128161936-077ca0a936bf/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -160,8 +159,8 @@ github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyE github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= -github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= @@ -197,11 +196,8 @@ github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IX github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= @@ -214,8 +210,8 @@ github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= @@ -257,10 +253,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= -github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= -github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU= +github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk= +github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= +github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= @@ -288,8 +284,8 @@ github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= @@ -323,17 +319,12 @@ github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= @@ -350,14 +341,16 @@ github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 h1:UW0+QyeyBVhn+COBec3nGhfnFe5lwB0ic1JBVjzhk0w= go.opentelemetry.io/contrib/bridges/prometheus v0.57.0/go.mod h1:ppciCHRLsyCio54qbzQv0E4Jyth/fLWDTJYfvWpcSVk= go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQa2NQ/Aoi7zA+wy7vMOKD9H4= go.opentelemetry.io/contrib/exporters/autoexport v0.57.0/go.mod h1:EJBheUMttD/lABFyLXhce47Wr6DPWYReCzaZiXadH7g= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 h1:DheMAlT6POBP+gh8RUH19EOTnQIor5QE0uSRPtzCpSw= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0/go.mod h1:wZcGmeVO9nzP67aYSLDqXNWK87EZWhi7JWj1v7ZXf94= -go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= -go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= +go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 h1:WzNab7hOOLzdDF/EoWCt4glhrbMPVMOO5JYTmpz36Ls= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0/go.mod h1:hKvJwTzJdp90Vh7p6q/9PAOd55dI6WA6sWj62a/JvSs= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 h1:S+LdBGiQXtJdowoJoQPEtI52syEP/JYBUpjO49EQhV8= @@ -382,16 +375,16 @@ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 h1:cC2yDI3IQd0Udsu go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0/go.mod h1:2PD5Ex6z8CFzDbTdOlwyNIUywRr1DN0ospafJM1wJ+s= go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk= go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8= -go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= -go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs= go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo= go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= -go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= -go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= +go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= +go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -416,8 +409,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91 golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= -golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -431,10 +424,10 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= +golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= +golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -488,8 +481,8 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -498,8 +491,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk= -golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= -golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= +golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE= +golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -510,8 +503,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0= google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.4 h1:6A3ZDJHn/eNqc1i+IdefRzy/9PokBTPvcqMySR7NNIM= +google.golang.org/protobuf v1.36.4/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= @@ -542,25 +535,23 @@ k8s.io/component-base v0.32.2 h1:1aUL5Vdmu7qNo4ZsE+569PV5zFatM9hl+lb3dEea2zU= k8s.io/component-base v0.32.2/go.mod h1:PXJ61Vx9Lg+P5mS8TLd7bCIr+eMJRQTyXe8KvkrvJq0= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= +k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 h1:hcha5B1kVACrLujCKLbr8XWMxCxzQx42DY8QKYJrDLg= +k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7/go.mod h1:GewRfANuJ70iYzvn+i4lezLDAFzvjxZYK1gn1lWcfas= k8s.io/kubectl v0.32.2 h1:TAkag6+XfSBgkqK9I7ZvwtF0WVtUAvK8ZqTt+5zi1Us= k8s.io/kubectl v0.32.2/go.mod h1:+h/NQFSPxiDZYX/WZaWw9fwYezGLISP0ud8nQKg+3g8= -k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= -k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0= +k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go/v2 v2.5.0 h1:o8Me9kLY74Vp5uw07QXPiitjsw7qNXi8Twd+19Zf02c= oras.land/oras-go/v2 v2.5.0/go.mod h1:z4eisnLP530vwIOUOJeBIj0aGI0L1C3d53atvCBqZHg= -sigs.k8s.io/cli-utils v0.37.2 h1:GOfKw5RV2HDQZDJlru5KkfLO1tbxqMoyn1IYUxqBpNg= -sigs.k8s.io/cli-utils v0.37.2/go.mod h1:V+IZZr4UoGj7gMJXklWBg6t5xbdThFBcpj4MrZuCYco= sigs.k8s.io/controller-runtime v0.20.1 h1:JbGMAG/X94NeM3xvjenVUaBjy6Ui4Ogd/J5ZtjZnHaE= sigs.k8s.io/controller-runtime v0.20.1/go.mod h1:BrP3w158MwvB3ZbNpaAcIKkHQ7YGpYnzpoSTZ8E14WU= -sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= -sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/kustomize/api v0.18.0 h1:hTzp67k+3NEVInwz5BHyzc9rGxIauoXferXyjv5lWPo= sigs.k8s.io/kustomize/api v0.18.0/go.mod h1:f8isXnX+8b+SGLHQ6yO4JG1rdkZlvhaCf/uZbLVMb0U= -sigs.k8s.io/kustomize/kyaml v0.18.1 h1:WvBo56Wzw3fjS+7vBjN6TeivvpbW9GmRaWZ9CIVmt4E= -sigs.k8s.io/kustomize/kyaml v0.18.1/go.mod h1:C3L2BFVU1jgcddNBE1TxuVLgS46TjObMwW5FT9FcjYo= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= +sigs.k8s.io/kustomize/kyaml v0.19.0 h1:RFge5qsO1uHhwJsu3ipV7RNolC7Uozc0jUBC/61XSlA= +sigs.k8s.io/kustomize/kyaml v0.19.0/go.mod h1:FeKD5jEOH+FbZPpqUghBP8mrLjJ3+zD3/rf9NNu1cwY= +sigs.k8s.io/structured-merge-diff/v4 v4.5.0 h1:nbCitCK2hfnhyiKo6uf2HxUPTCodY6Qaf85SbDIaMBk= +sigs.k8s.io/structured-merge-diff/v4 v4.5.0/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/internal/statusreaders/job_status_reader_test.go b/internal/statusreaders/job_status_reader_test.go index 5f07be91c..6e9ed5a79 100644 --- a/internal/statusreaders/job_status_reader_test.go +++ b/internal/statusreaders/job_status_reader_test.go @@ -29,7 +29,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/cli-utils/pkg/kstatus/status" + "github.com/fluxcd/cli-utils/pkg/kstatus/status" ) func toUnstructured(t *testing.T, obj runtime.Object) (*unstructured.Unstructured, error) { diff --git a/internal/statusreaders/pod_status_reader.go b/internal/statusreaders/pod_status_reader.go index d3daf7cc3..c074c3487 100644 --- a/internal/statusreaders/pod_status_reader.go +++ b/internal/statusreaders/pod_status_reader.go @@ -25,11 +25,11 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling/engine" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling/event" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling/statusreaders" - "sigs.k8s.io/cli-utils/pkg/kstatus/status" - "sigs.k8s.io/cli-utils/pkg/object" + "github.com/fluxcd/cli-utils/pkg/kstatus/polling/engine" + "github.com/fluxcd/cli-utils/pkg/kstatus/polling/event" + "github.com/fluxcd/cli-utils/pkg/kstatus/polling/statusreaders" + "github.com/fluxcd/cli-utils/pkg/kstatus/status" + "github.com/fluxcd/cli-utils/pkg/object" ) type customPodStatusReader struct { diff --git a/internal/statusreaders/pod_status_reader_test.go b/internal/statusreaders/pod_status_reader_test.go index a151f1aed..ba0d1f1bb 100644 --- a/internal/statusreaders/pod_status_reader_test.go +++ b/internal/statusreaders/pod_status_reader_test.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/cli-utils/pkg/kstatus/status" + "github.com/fluxcd/cli-utils/pkg/kstatus/status" ) func TestPodConditions(t *testing.T) { diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index bc3958848..22242b40f 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -23,18 +23,18 @@ import ( "sort" "time" + "github.com/fluxcd/cli-utils/pkg/kstatus/polling/aggregator" + "github.com/fluxcd/cli-utils/pkg/kstatus/polling/collector" + "github.com/fluxcd/cli-utils/pkg/kstatus/polling/engine" + "github.com/fluxcd/cli-utils/pkg/kstatus/polling/event" + "github.com/fluxcd/cli-utils/pkg/kstatus/polling/statusreaders" + "github.com/fluxcd/cli-utils/pkg/kstatus/status" + "github.com/fluxcd/cli-utils/pkg/kstatus/watcher" + "github.com/fluxcd/cli-utils/pkg/object" appsv1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/client-go/dynamic" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling/aggregator" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling/collector" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling/engine" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling/event" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling/statusreaders" - "sigs.k8s.io/cli-utils/pkg/kstatus/status" - "sigs.k8s.io/cli-utils/pkg/kstatus/watcher" - "sigs.k8s.io/cli-utils/pkg/object" helmStatusReaders "helm.sh/helm/v4/internal/statusreaders" ) diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index 0e88f1bbe..fee325ddc 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -21,6 +21,7 @@ import ( "testing" "time" + "github.com/fluxcd/cli-utils/pkg/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" @@ -33,7 +34,6 @@ import ( "k8s.io/apimachinery/pkg/util/yaml" dynamicfake "k8s.io/client-go/dynamic/fake" "k8s.io/kubectl/pkg/scheme" - "sigs.k8s.io/cli-utils/pkg/testutil" ) var podCurrentManifest = ` From 3a296aacade3e66a68e3aac88fc3abf0ef2f81a5 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 4 Mar 2025 23:15:02 +0000 Subject: [PATCH 82/91] rest mapper Signed-off-by: Austin Abro --- internal/client/client.go | 369 ++++++++++++++++++++++++++++++++++++++ pkg/kube/client.go | 5 +- 2 files changed, 372 insertions(+), 2 deletions(-) create mode 100644 internal/client/client.go diff --git a/internal/client/client.go b/internal/client/client.go new file mode 100644 index 000000000..b55ddb3f8 --- /dev/null +++ b/internal/client/client.go @@ -0,0 +1,369 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "fmt" + "net/http" + "sort" + "strings" + "sync" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" +) + +/* +Adapted from controller-runtime v0.19 before The Kubernetes Aggregated Discovery was enabled +in controller-runtime v0.20 which broke the preferred version discovery in the RESTMapper. +https://github.com/kubernetes-sigs/controller-runtime/blob/e818ce450d3d358600848dcfa1b585de64e7c865/pkg/client/apiutil/restmapper.go +*/ + +// NewLazyRESTMapper returns a dynamic RESTMapper for cfg. The dynamic +// RESTMapper dynamically discovers resource types at runtime. +func NewLazyRESTMapper(cfg *rest.Config, httpClient *http.Client) (meta.RESTMapper, error) { + if httpClient == nil { + return nil, fmt.Errorf("httpClient must not be nil, consider using rest.HTTPClientFor(c) to create a client") + } + + client, err := discovery.NewDiscoveryClientForConfigAndClient(cfg, httpClient) + if err != nil { + return nil, err + } + return &mapper{ + mapper: restmapper.NewDiscoveryRESTMapper([]*restmapper.APIGroupResources{}), + client: client, + knownGroups: map[string]*restmapper.APIGroupResources{}, + apiGroups: map[string]*metav1.APIGroup{}, + }, nil +} + +// mapper is a RESTMapper that will lazily query the provided +// client for discovery information to do REST mappings. +type mapper struct { + mapper meta.RESTMapper + client discovery.DiscoveryInterface + knownGroups map[string]*restmapper.APIGroupResources + apiGroups map[string]*metav1.APIGroup + + // mutex to provide thread-safe mapper reloading. + mu sync.RWMutex +} + +// KindFor implements Mapper.KindFor. +func (m *mapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + res, err := m.getMapper().KindFor(resource) + if meta.IsNoMatchError(err) { + if err := m.addKnownGroupAndReload(resource.Group, resource.Version); err != nil { + return schema.GroupVersionKind{}, err + } + res, err = m.getMapper().KindFor(resource) + } + + return res, err +} + +// KindsFor implements Mapper.KindsFor. +func (m *mapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + res, err := m.getMapper().KindsFor(resource) + if meta.IsNoMatchError(err) { + if err := m.addKnownGroupAndReload(resource.Group, resource.Version); err != nil { + return nil, err + } + res, err = m.getMapper().KindsFor(resource) + } + + return res, err +} + +// ResourceFor implements Mapper.ResourceFor. +func (m *mapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { + res, err := m.getMapper().ResourceFor(input) + if meta.IsNoMatchError(err) { + if err := m.addKnownGroupAndReload(input.Group, input.Version); err != nil { + return schema.GroupVersionResource{}, err + } + res, err = m.getMapper().ResourceFor(input) + } + + return res, err +} + +// ResourcesFor implements Mapper.ResourcesFor. +func (m *mapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + res, err := m.getMapper().ResourcesFor(input) + if meta.IsNoMatchError(err) { + if err := m.addKnownGroupAndReload(input.Group, input.Version); err != nil { + return nil, err + } + res, err = m.getMapper().ResourcesFor(input) + } + + return res, err +} + +// RESTMapping implements Mapper.RESTMapping. +func (m *mapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + res, err := m.getMapper().RESTMapping(gk, versions...) + if meta.IsNoMatchError(err) { + if err := m.addKnownGroupAndReload(gk.Group, versions...); err != nil { + return nil, err + } + res, err = m.getMapper().RESTMapping(gk, versions...) + } + + return res, err +} + +// RESTMappings implements Mapper.RESTMappings. +func (m *mapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + res, err := m.getMapper().RESTMappings(gk, versions...) + if meta.IsNoMatchError(err) { + if err := m.addKnownGroupAndReload(gk.Group, versions...); err != nil { + return nil, err + } + res, err = m.getMapper().RESTMappings(gk, versions...) + } + + return res, err +} + +// ResourceSingularizer implements Mapper.ResourceSingularizer. +func (m *mapper) ResourceSingularizer(resource string) (string, error) { + return m.getMapper().ResourceSingularizer(resource) +} + +func (m *mapper) getMapper() meta.RESTMapper { + m.mu.RLock() + defer m.mu.RUnlock() + return m.mapper +} + +// addKnownGroupAndReload reloads the mapper with updated information about missing API group. +// versions can be specified for partial updates, for instance for v1beta1 version only. +func (m *mapper) addKnownGroupAndReload(groupName string, versions ...string) error { + // versions will here be [""] if the forwarded Version value of + // GroupVersionResource (in calling method) was not specified. + if len(versions) == 1 && versions[0] == "" { + versions = nil + } + + // If no specific versions are set by user, we will scan all available ones for the API group. + // This operation requires 2 requests: /api and /apis, but only once. For all subsequent calls + // this data will be taken from cache. + if len(versions) == 0 { + apiGroup, err := m.findAPIGroupByName(groupName) + if err != nil { + return err + } + if apiGroup != nil { + for _, version := range apiGroup.Versions { + versions = append(versions, version.Version) + } + } + } + + m.mu.Lock() + defer m.mu.Unlock() + + // Create or fetch group resources from cache. + groupResources := &restmapper.APIGroupResources{ + Group: metav1.APIGroup{Name: groupName}, + VersionedResources: make(map[string][]metav1.APIResource), + } + + // Update information for group resources about versioned resources. + // The number of API calls is equal to the number of versions: /apis//. + // If we encounter a missing API version (NotFound error), we will remove the group from + // the m.apiGroups and m.knownGroups caches. + // If this happens, in the next call the group will be added back to apiGroups + // and only the existing versions will be loaded in knownGroups. + groupVersionResources, err := m.fetchGroupVersionResourcesLocked(groupName, versions...) + if err != nil { + return fmt.Errorf("failed to get API group resources: %w", err) + } + + if _, ok := m.knownGroups[groupName]; ok { + groupResources = m.knownGroups[groupName] + } + + // Update information for group resources about the API group by adding new versions. + // Ignore the versions that are already registered. + for groupVersion, resources := range groupVersionResources { + version := groupVersion.Version + + groupResources.VersionedResources[version] = resources.APIResources + found := false + for _, v := range groupResources.Group.Versions { + if v.Version == version { + found = true + break + } + } + + if !found { + groupResources.Group.Versions = append(groupResources.Group.Versions, metav1.GroupVersionForDiscovery{ + GroupVersion: metav1.GroupVersion{Group: groupName, Version: version}.String(), + Version: version, + }) + } + } + + // Update data in the cache. + m.knownGroups[groupName] = groupResources + + // Finally, update the group with received information and regenerate the mapper. + updatedGroupResources := make([]*restmapper.APIGroupResources, 0, len(m.knownGroups)) + for _, agr := range m.knownGroups { + updatedGroupResources = append(updatedGroupResources, agr) + } + + m.mapper = restmapper.NewDiscoveryRESTMapper(updatedGroupResources) + return nil +} + +// findAPIGroupByNameLocked returns API group by its name. +func (m *mapper) findAPIGroupByName(groupName string) (*metav1.APIGroup, error) { + // Looking in the cache first. + { + m.mu.RLock() + group, ok := m.apiGroups[groupName] + m.mu.RUnlock() + if ok { + return group, nil + } + } + + // Update the cache if nothing was found. + apiGroups, err := m.client.ServerGroups() + if err != nil { + return nil, fmt.Errorf("failed to get server groups: %w", err) + } + if len(apiGroups.Groups) == 0 { + return nil, fmt.Errorf("received an empty API groups list") + } + + m.mu.Lock() + for i := range apiGroups.Groups { + group := &apiGroups.Groups[i] + m.apiGroups[group.Name] = group + } + m.mu.Unlock() + + // Looking in the cache again. + m.mu.RLock() + defer m.mu.RUnlock() + + // Don't return an error here if the API group is not present. + // The reloaded RESTMapper will take care of returning a NoMatchError. + return m.apiGroups[groupName], nil +} + +// fetchGroupVersionResourcesLocked fetches the resources for the specified group and its versions. +// This method might modify the cache so it needs to be called under the lock. +func (m *mapper) fetchGroupVersionResourcesLocked(groupName string, versions ...string) (map[schema.GroupVersion]*metav1.APIResourceList, error) { + groupVersionResources := make(map[schema.GroupVersion]*metav1.APIResourceList) + failedGroups := make(map[schema.GroupVersion]error) + + for _, version := range versions { + groupVersion := schema.GroupVersion{Group: groupName, Version: version} + + apiResourceList, err := m.client.ServerResourcesForGroupVersion(groupVersion.String()) + if apierrors.IsNotFound(err) { + // If the version is not found, we remove the group from the cache + // so it gets refreshed on the next call. + if m.isAPIGroupCached(groupVersion) { + delete(m.apiGroups, groupName) + } + if m.isGroupVersionCached(groupVersion) { + delete(m.knownGroups, groupName) + } + continue + } else if err != nil { + failedGroups[groupVersion] = err + } + + if apiResourceList != nil { + // even in case of error, some fallback might have been returned. + groupVersionResources[groupVersion] = apiResourceList + } + } + + if len(failedGroups) > 0 { + err := ErrResourceDiscoveryFailed(failedGroups) + return nil, &err + } + + return groupVersionResources, nil +} + +// isGroupVersionCached checks if a version for a group is cached in the known groups cache. +func (m *mapper) isGroupVersionCached(gv schema.GroupVersion) bool { + if cachedGroup, ok := m.knownGroups[gv.Group]; ok { + _, cached := cachedGroup.VersionedResources[gv.Version] + return cached + } + + return false +} + +// isAPIGroupCached checks if a version for a group is cached in the api groups cache. +func (m *mapper) isAPIGroupCached(gv schema.GroupVersion) bool { + cachedGroup, ok := m.apiGroups[gv.Group] + if !ok { + return false + } + + for _, version := range cachedGroup.Versions { + if version.Version == gv.Version { + return true + } + } + + return false +} + +// ErrResourceDiscoveryFailed is returned if the RESTMapper cannot discover supported resources for some GroupVersions. +// It wraps the errors encountered, except "NotFound" errors are replaced with meta.NoResourceMatchError, for +// backwards compatibility with code that uses meta.IsNoMatchError() to check for unsupported APIs. +type ErrResourceDiscoveryFailed map[schema.GroupVersion]error + +// Error implements the error interface. +func (e *ErrResourceDiscoveryFailed) Error() string { + subErrors := []string{} + for k, v := range *e { + subErrors = append(subErrors, fmt.Sprintf("%s: %v", k, v)) + } + sort.Strings(subErrors) + return fmt.Sprintf("unable to retrieve the complete list of server APIs: %s", strings.Join(subErrors, ", ")) +} + +func (e *ErrResourceDiscoveryFailed) Unwrap() []error { + subErrors := []error{} + for gv, err := range *e { + if apierrors.IsNotFound(err) { + err = &meta.NoResourceMatchError{PartialResource: gv.WithResource("")} + } + subErrors = append(subErrors, err) + } + return subErrors +} diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 333c0ec65..582c05c58 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -35,7 +35,6 @@ import ( apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -51,6 +50,8 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/util/retry" cmdutil "k8s.io/kubectl/pkg/cmd/util" + + helmClient "helm.sh/helm/v4/internal/client" ) // ErrNoObjectsVisited indicates that during a visit operation, no matching objects were found. @@ -113,7 +114,7 @@ func (c *Client) newStatusWatcher() (*statusWaiter, error) { if err != nil { return nil, err } - restMapper, err := apiutil.NewDynamicRESTMapper(cfg, httpClient) + restMapper, err := helmClient.NewLazyRESTMapper(cfg, httpClient) if err != nil { return nil, err } From ddc7baaacac3b1aeaf5f4dd4e3e029cac40282ee Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 4 Mar 2025 23:17:40 +0000 Subject: [PATCH 83/91] copyright things Signed-off-by: Austin Abro --- internal/client/client.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/internal/client/client.go b/internal/client/client.go index b55ddb3f8..cb4ddb60e 100644 --- a/internal/client/client.go +++ b/internal/client/client.go @@ -1,4 +1,7 @@ /* +Copyright The Helm Authors. +This file was initially copied and modified from + https://github.com/kubernetes-sigs/controller-runtime/blob/e818ce450d3d358600848dcfa1b585de64e7c865/pkg/client/apiutil/restmapper.go Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); From 600947b32e6557ab6f5ebf44fb754abbb5e63d2a Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Fri, 7 Mar 2025 14:27:09 +0000 Subject: [PATCH 84/91] client->restmapper Signed-off-by: Austin Abro --- internal/{client/client.go => restmapper/restmapper.go} | 2 +- pkg/kube/client.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) rename internal/{client/client.go => restmapper/restmapper.go} (99%) diff --git a/internal/client/client.go b/internal/restmapper/restmapper.go similarity index 99% rename from internal/client/client.go rename to internal/restmapper/restmapper.go index cb4ddb60e..85b7c2a69 100644 --- a/internal/client/client.go +++ b/internal/restmapper/restmapper.go @@ -17,7 +17,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package client +package restmapper import ( "fmt" diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 582c05c58..1244882aa 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -51,7 +51,7 @@ import ( "k8s.io/client-go/util/retry" cmdutil "k8s.io/kubectl/pkg/cmd/util" - helmClient "helm.sh/helm/v4/internal/client" + helmRestmapper "helm.sh/helm/v4/internal/restmapper" ) // ErrNoObjectsVisited indicates that during a visit operation, no matching objects were found. @@ -114,7 +114,7 @@ func (c *Client) newStatusWatcher() (*statusWaiter, error) { if err != nil { return nil, err } - restMapper, err := helmClient.NewLazyRESTMapper(cfg, httpClient) + restMapper, err := helmRestmapper.NewLazyRESTMapper(cfg, httpClient) if err != nil { return nil, err } From 2948279fb90bcb0d22e9f160f1f96b424ce74b7d Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Fri, 7 Mar 2025 14:29:47 +0000 Subject: [PATCH 85/91] cleanup if statement Signed-off-by: Austin Abro --- pkg/action/install.go | 6 ++---- pkg/action/upgrade.go | 6 ++---- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/pkg/action/install.go b/pkg/action/install.go index c96e1a0ff..be76a634f 100644 --- a/pkg/action/install.go +++ b/pkg/action/install.go @@ -289,10 +289,8 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma // Make sure if Atomic is set, that wait is set as well. This makes it so // the user doesn't have to specify both - if i.Wait == kube.HookOnlyStrategy { - if i.Atomic { - i.Wait = kube.StatusWatcherStrategy - } + if i.Wait == kube.HookOnlyStrategy && i.Atomic { + i.Wait = kube.StatusWatcherStrategy } if err := i.cfg.KubeClient.SetWaiter(i.Wait); err != nil { return nil, fmt.Errorf("failed to set kube client waiter: %w", err) diff --git a/pkg/action/upgrade.go b/pkg/action/upgrade.go index 851ac512a..ba5dfb5d1 100644 --- a/pkg/action/upgrade.go +++ b/pkg/action/upgrade.go @@ -155,10 +155,8 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart. // Make sure if Atomic is set, that wait is set as well. This makes it so // the user doesn't have to specify both - if u.Wait == kube.HookOnlyStrategy { - if u.Atomic { - u.Wait = kube.StatusWatcherStrategy - } + if u.Wait == kube.HookOnlyStrategy && u.Atomic { + u.Wait = kube.StatusWatcherStrategy } if err := u.cfg.KubeClient.SetWaiter(u.Wait); err != nil { return nil, fmt.Errorf("failed to set kube client waiter: %w", err) From e773a810eea2649d3cb52e2b140cc4492a94be26 Mon Sep 17 00:00:00 2001 From: Austin Abro <37223396+AustinAbro321@users.noreply.github.com> Date: Fri, 7 Mar 2025 09:30:28 -0500 Subject: [PATCH 86/91] Update pkg/cmd/flags.go Co-authored-by: George Jenkins Signed-off-by: Austin Abro <37223396+AustinAbro321@users.noreply.github.com> --- pkg/cmd/flags.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cmd/flags.go b/pkg/cmd/flags.go index 044b19e04..0fcad59fa 100644 --- a/pkg/cmd/flags.go +++ b/pkg/cmd/flags.go @@ -56,7 +56,7 @@ func AddWaitFlag(cmd *cobra.Command, wait *kube.WaitStrategy) { cmd.Flags().Var( newWaitValue(kube.HookOnlyStrategy, wait), "wait", - "if set, will wait until all resources are in the expected state before marking the operation as successful. It will wait for as long as --timeout. Valid inputs are true, false, watcher, and legacy", + "if specified, will wait until all resources are in the expected state before marking the operation as successful. It will wait for as long as --timeout. Valid inputs are 'watcher' and 'legacy'", ) // Sets the strategy to use the watcher strategy if `--wait` is used without an argument cmd.Flags().Lookup("wait").NoOptDefVal = string(kube.StatusWatcherStrategy) From 0dffe83ef3299aaa3e2e17a76743ad791c40f559 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Fri, 7 Mar 2025 14:35:44 +0000 Subject: [PATCH 87/91] warnings Signed-off-by: Austin Abro --- pkg/cmd/flags.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pkg/cmd/flags.go b/pkg/cmd/flags.go index 0fcad59fa..ed3b83a55 100644 --- a/pkg/cmd/flags.go +++ b/pkg/cmd/flags.go @@ -82,13 +82,15 @@ func (ws *waitValue) Set(s string) error { *ws = waitValue(s) return nil case "true": + Warning("--wait=true is deprecated (boolean value) and can be replaced with --wait=watcher") *ws = waitValue(kube.StatusWatcherStrategy) return nil case "false": + Warning("--wait=false is deprecated (boolean value) and can be replaced by omitting the --wait flag") *ws = waitValue(kube.HookOnlyStrategy) return nil default: - return fmt.Errorf("invalid wait input %q. Valid inputs are true, false, %s, and %s", s, kube.StatusWatcherStrategy, kube.LegacyStrategy) + return fmt.Errorf("invalid wait input %q. Valid inputs are %s, and %s", s, kube.StatusWatcherStrategy, kube.LegacyStrategy) } } From 10f78c814cd1c7d0b784a9371bcf56c1609ceece Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Fri, 7 Mar 2025 14:37:04 +0000 Subject: [PATCH 88/91] legacy waiter Signed-off-by: Austin Abro --- pkg/kube/client.go | 2 +- pkg/kube/wait.go | 26 +++++++++++++------------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 1244882aa..61e681ad3 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -132,7 +132,7 @@ func (c *Client) newWaiter(strategy WaitStrategy) (Waiter, error) { if err != nil { return nil, err } - return &HelmWaiter{kubeClient: kc, log: c.Log}, nil + return &legacyWaiter{kubeClient: kc, log: c.Log}, nil case StatusWatcherStrategy: return c.newStatusWatcher() case HookOnlyStrategy: diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go index a7e3a1c7e..9aeb93451 100644 --- a/pkg/kube/wait.go +++ b/pkg/kube/wait.go @@ -45,27 +45,27 @@ import ( "k8s.io/apimachinery/pkg/util/wait" ) -// HelmWaiter is the legacy implementation of the Waiter interface. This logic was used by default in Helm 3 +// legacyWaiter is the legacy implementation of the Waiter interface. This logic was used by default in Helm 3 // Helm 4 now uses the StatusWaiter implementation instead -type HelmWaiter struct { +type legacyWaiter struct { c ReadyChecker log func(string, ...interface{}) kubeClient *kubernetes.Clientset } -func (hw *HelmWaiter) Wait(resources ResourceList, timeout time.Duration) error { +func (hw *legacyWaiter) Wait(resources ResourceList, timeout time.Duration) error { hw.c = NewReadyChecker(hw.kubeClient, hw.log, PausedAsReady(true)) return hw.waitForResources(resources, timeout) } -func (hw *HelmWaiter) WaitWithJobs(resources ResourceList, timeout time.Duration) error { +func (hw *legacyWaiter) WaitWithJobs(resources ResourceList, timeout time.Duration) error { hw.c = NewReadyChecker(hw.kubeClient, hw.log, PausedAsReady(true), CheckJobs(true)) return hw.waitForResources(resources, timeout) } // waitForResources polls to get the current status of all pods, PVCs, Services and // Jobs(optional) until all are ready or a timeout is reached -func (hw *HelmWaiter) waitForResources(created ResourceList, timeout time.Duration) error { +func (hw *legacyWaiter) waitForResources(created ResourceList, timeout time.Duration) error { hw.log("beginning wait for %d resources with timeout of %v", len(created), timeout) ctx, cancel := context.WithTimeout(context.Background(), timeout) @@ -99,7 +99,7 @@ func (hw *HelmWaiter) waitForResources(created ResourceList, timeout time.Durati }) } -func (hw *HelmWaiter) isRetryableError(err error, resource *resource.Info) bool { +func (hw *legacyWaiter) isRetryableError(err error, resource *resource.Info) bool { if err == nil { return false } @@ -114,12 +114,12 @@ func (hw *HelmWaiter) isRetryableError(err error, resource *resource.Info) bool return true } -func (hw *HelmWaiter) isRetryableHTTPStatusCode(httpStatusCode int32) bool { +func (hw *legacyWaiter) isRetryableHTTPStatusCode(httpStatusCode int32) bool { return httpStatusCode == 0 || httpStatusCode == http.StatusTooManyRequests || (httpStatusCode >= 500 && httpStatusCode != http.StatusNotImplemented) } // waitForDeletedResources polls to check if all the resources are deleted or a timeout is reached -func (hw *HelmWaiter) WaitForDelete(deleted ResourceList, timeout time.Duration) error { +func (hw *legacyWaiter) WaitForDelete(deleted ResourceList, timeout time.Duration) error { hw.log("beginning wait for %d resources to be deleted with timeout of %v", len(deleted), timeout) ctx, cancel := context.WithTimeout(context.Background(), timeout) @@ -184,7 +184,7 @@ func SelectorsForObject(object runtime.Object) (selector labels.Selector, err er return selector, errors.Wrap(err, "invalid label selector") } -func (hw *HelmWaiter) watchTimeout(t time.Duration) func(*resource.Info) error { +func (hw *legacyWaiter) watchTimeout(t time.Duration) func(*resource.Info) error { return func(info *resource.Info) error { return hw.watchUntilReady(t, info) } @@ -204,7 +204,7 @@ func (hw *HelmWaiter) watchTimeout(t time.Duration) func(*resource.Info) error { // ascertained by watching the status.phase field in a pod's output. // // Handling for other kinds will be added as necessary. -func (hw *HelmWaiter) WatchUntilReady(resources ResourceList, timeout time.Duration) error { +func (hw *legacyWaiter) WatchUntilReady(resources ResourceList, timeout time.Duration) error { // For jobs, there's also the option to do poll c.Jobs(namespace).Get(): // https://github.com/adamreese/kubernetes/blob/master/test/e2e/job.go#L291-L300 return perform(resources, hw.watchTimeout(timeout)) @@ -230,7 +230,7 @@ func perform(infos ResourceList, fn func(*resource.Info) error) error { return result } -func (hw *HelmWaiter) watchUntilReady(timeout time.Duration, info *resource.Info) error { +func (hw *legacyWaiter) watchUntilReady(timeout time.Duration, info *resource.Info) error { kind := info.Mapping.GroupVersionKind.Kind switch kind { case "Job", "Pod": @@ -291,7 +291,7 @@ func (hw *HelmWaiter) watchUntilReady(timeout time.Duration, info *resource.Info // waitForJob is a helper that waits for a job to complete. // // This operates on an event returned from a watcher. -func (hw *HelmWaiter) waitForJob(obj runtime.Object, name string) (bool, error) { +func (hw *legacyWaiter) waitForJob(obj runtime.Object, name string) (bool, error) { o, ok := obj.(*batchv1.Job) if !ok { return true, errors.Errorf("expected %s to be a *batch.Job, got %T", name, obj) @@ -312,7 +312,7 @@ func (hw *HelmWaiter) waitForJob(obj runtime.Object, name string) (bool, error) // waitForPodSuccess is a helper that waits for a pod to complete. // // This operates on an event returned from a watcher. -func (hw *HelmWaiter) waitForPodSuccess(obj runtime.Object, name string) (bool, error) { +func (hw *legacyWaiter) waitForPodSuccess(obj runtime.Object, name string) (bool, error) { o, ok := obj.(*corev1.Pod) if !ok { return true, errors.Errorf("expected %s to be a *v1.Pod, got %T", name, obj) From 386523bdbc6f5e5f289ade7d9d4cf4c935354450 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 25 Mar 2025 13:55:39 +0000 Subject: [PATCH 89/91] update to get waiter instead of set Signed-off-by: Austin Abro --- pkg/action/action.go | 5 +-- pkg/action/hooks.go | 22 ++++++++---- pkg/action/install.go | 28 ++++++++++------ pkg/action/install_test.go | 6 ++-- pkg/action/release_testing.go | 3 +- pkg/action/rollback.go | 19 +++++------ pkg/action/uninstall.go | 14 ++++---- pkg/action/uninstall_test.go | 4 +-- pkg/action/upgrade.go | 35 +++++++++---------- pkg/action/upgrade_test.go | 10 +++--- pkg/cmd/install.go | 2 +- pkg/cmd/rollback.go | 2 +- pkg/cmd/uninstall.go | 2 +- pkg/cmd/upgrade.go | 4 +-- pkg/kube/client.go | 13 +++----- pkg/kube/client_test.go | 16 +++------ pkg/kube/fake/fake.go | 63 ++++++++++++++++++++++------------- pkg/kube/fake/printer.go | 28 ++++++++++------ pkg/kube/interface.go | 6 ++-- 19 files changed, 151 insertions(+), 131 deletions(-) diff --git a/pkg/action/action.go b/pkg/action/action.go index 1ca6a4dfa..ea2dc0dd7 100644 --- a/pkg/action/action.go +++ b/pkg/action/action.go @@ -375,10 +375,7 @@ func (cfg *Configuration) recordRelease(r *release.Release) { // Init initializes the action configuration func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string, log DebugLog) error { - kc, err := kube.New(getter) - if err != nil { - return err - } + kc := kube.New(getter) kc.Log = log lazyClient := &lazyClient{ diff --git a/pkg/action/hooks.go b/pkg/action/hooks.go index 6637891c5..9d0bb390b 100644 --- a/pkg/action/hooks.go +++ b/pkg/action/hooks.go @@ -35,7 +35,7 @@ import ( ) // execHook executes all of the hooks for the given hook event. -func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, timeout time.Duration) error { +func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, waitStrategy kube.WaitStrategy, timeout time.Duration) error { executingHooks := []*release.Hook{} for _, h := range rl.Hooks { @@ -59,7 +59,7 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, h.DeletePolicies = []release.HookDeletePolicy{release.HookBeforeHookCreation} } - if err := cfg.deleteHookByPolicy(h, release.HookBeforeHookCreation, timeout); err != nil { + if err := cfg.deleteHookByPolicy(h, release.HookBeforeHookCreation, waitStrategy, timeout); err != nil { return err } @@ -87,8 +87,12 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, return errors.Wrapf(err, "warning: Hook %s %s failed", hook, h.Path) } + waiter, err := cfg.KubeClient.GetWaiter(waitStrategy) + if err != nil { + return errors.Wrapf(err, "unable to get waiter") + } // Watch hook resources until they have completed - err = cfg.KubeClient.WatchUntilReady(resources, timeout) + err = waiter.WatchUntilReady(resources, timeout) // Note the time of success/failure h.LastRun.CompletedAt = helmtime.Now() // Mark hook as succeeded or failed @@ -101,7 +105,7 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, } // If a hook is failed, check the annotation of the hook to determine whether the hook should be deleted // under failed condition. If so, then clear the corresponding resource object in the hook - if errDeleting := cfg.deleteHookByPolicy(h, release.HookFailed, timeout); errDeleting != nil { + if errDeleting := cfg.deleteHookByPolicy(h, release.HookFailed, waitStrategy, timeout); errDeleting != nil { // We log the error here as we want to propagate the hook failure upwards to the release object. log.Printf("error deleting the hook resource on hook failure: %v", errDeleting) } @@ -118,7 +122,7 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, // We log here as we still want to attempt hook resource deletion even if output logging fails. log.Printf("error outputting logs for hook failure: %v", err) } - if err := cfg.deleteHookByPolicy(h, release.HookSucceeded, timeout); err != nil { + if err := cfg.deleteHookByPolicy(h, release.HookSucceeded, waitStrategy, timeout); err != nil { return err } } @@ -139,7 +143,7 @@ func (x hookByWeight) Less(i, j int) bool { } // deleteHookByPolicy deletes a hook if the hook policy instructs it to -func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.HookDeletePolicy, timeout time.Duration) error { +func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.HookDeletePolicy, waitStrategy kube.WaitStrategy, timeout time.Duration) error { // Never delete CustomResourceDefinitions; this could cause lots of // cascading garbage collection. if h.Kind == "CustomResourceDefinition" { @@ -155,7 +159,11 @@ func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.Hoo return errors.New(joinErrors(errs)) } - if err := cfg.KubeClient.WaitForDelete(resources, timeout); err != nil { + waiter, err := cfg.KubeClient.GetWaiter(waitStrategy) + if err != nil { + return err + } + if err := waiter.WaitForDelete(resources, timeout); err != nil { return err } } diff --git a/pkg/action/install.go b/pkg/action/install.go index be76a634f..735b8ac17 100644 --- a/pkg/action/install.go +++ b/pkg/action/install.go @@ -79,7 +79,7 @@ type Install struct { HideSecret bool DisableHooks bool Replace bool - Wait kube.WaitStrategy + WaitStrategy kube.WaitStrategy WaitForJobs bool Devel bool DependencyUpdate bool @@ -180,8 +180,12 @@ func (i *Install) installCRDs(crds []chart.CRD) error { totalItems = append(totalItems, res...) } if len(totalItems) > 0 { + waiter, err := i.cfg.KubeClient.GetWaiter(i.WaitStrategy) + if err != nil { + return errors.Wrapf(err, "unable to get waiter") + } // Give time for the CRD to be recognized. - if err := i.cfg.KubeClient.Wait(totalItems, 60*time.Second); err != nil { + if err := waiter.Wait(totalItems, 60*time.Second); err != nil { return err } @@ -289,11 +293,8 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma // Make sure if Atomic is set, that wait is set as well. This makes it so // the user doesn't have to specify both - if i.Wait == kube.HookOnlyStrategy && i.Atomic { - i.Wait = kube.StatusWatcherStrategy - } - if err := i.cfg.KubeClient.SetWaiter(i.Wait); err != nil { - return nil, fmt.Errorf("failed to set kube client waiter: %w", err) + if i.WaitStrategy == kube.HookOnlyStrategy && i.Atomic { + i.WaitStrategy = kube.StatusWatcherStrategy } caps, err := i.cfg.getCapabilities() @@ -453,7 +454,7 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource var err error // pre-install hooks if !i.DisableHooks { - if err := i.cfg.execHook(rel, release.HookPreInstall, i.Timeout); err != nil { + if err := i.cfg.execHook(rel, release.HookPreInstall, i.WaitStrategy, i.Timeout); err != nil { return rel, fmt.Errorf("failed pre-install: %s", err) } } @@ -470,17 +471,22 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource return rel, err } + waiter, err := i.cfg.KubeClient.GetWaiter(i.WaitStrategy) + if err != nil { + return rel, fmt.Errorf("failed to get waiter: %w", err) + } + if i.WaitForJobs { - err = i.cfg.KubeClient.WaitWithJobs(resources, i.Timeout) + err = waiter.WaitWithJobs(resources, i.Timeout) } else { - err = i.cfg.KubeClient.Wait(resources, i.Timeout) + err = waiter.Wait(resources, i.Timeout) } if err != nil { return rel, err } if !i.DisableHooks { - if err := i.cfg.execHook(rel, release.HookPostInstall, i.Timeout); err != nil { + if err := i.cfg.execHook(rel, release.HookPostInstall, i.WaitStrategy, i.Timeout); err != nil { return rel, fmt.Errorf("failed post-install: %s", err) } } diff --git a/pkg/action/install_test.go b/pkg/action/install_test.go index 331a2f71b..aafda86c2 100644 --- a/pkg/action/install_test.go +++ b/pkg/action/install_test.go @@ -412,7 +412,7 @@ func TestInstallRelease_Wait(t *testing.T) { failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.WaitError = fmt.Errorf("I timed out") instAction.cfg.KubeClient = failer - instAction.Wait = kube.StatusWatcherStrategy + instAction.WaitStrategy = kube.StatusWatcherStrategy vals := map[string]interface{}{} goroutines := runtime.NumGoroutine() @@ -431,7 +431,7 @@ func TestInstallRelease_Wait_Interrupted(t *testing.T) { failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.WaitDuration = 10 * time.Second instAction.cfg.KubeClient = failer - instAction.Wait = kube.StatusWatcherStrategy + instAction.WaitStrategy = kube.StatusWatcherStrategy vals := map[string]interface{}{} ctx, cancel := context.WithCancel(context.Background()) @@ -454,7 +454,7 @@ func TestInstallRelease_WaitForJobs(t *testing.T) { failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.WaitError = fmt.Errorf("I timed out") instAction.cfg.KubeClient = failer - instAction.Wait = kube.StatusWatcherStrategy + instAction.WaitStrategy = kube.StatusWatcherStrategy instAction.WaitForJobs = true vals := map[string]interface{}{} diff --git a/pkg/action/release_testing.go b/pkg/action/release_testing.go index c6374523e..7edc3ed34 100644 --- a/pkg/action/release_testing.go +++ b/pkg/action/release_testing.go @@ -28,6 +28,7 @@ import ( v1 "k8s.io/api/core/v1" chartutil "helm.sh/helm/v4/pkg/chart/v2/util" + "helm.sh/helm/v4/pkg/kube" release "helm.sh/helm/v4/pkg/release/v1" ) @@ -96,7 +97,7 @@ func (r *ReleaseTesting) Run(name string) (*release.Release, error) { rel.Hooks = executingHooks } - if err := r.cfg.execHook(rel, release.HookTest, r.Timeout); err != nil { + if err := r.cfg.execHook(rel, release.HookTest, kube.StatusWatcherStrategy, r.Timeout); err != nil { rel.Hooks = append(skippedHooks, rel.Hooks...) r.cfg.Releases.Update(rel) return rel, err diff --git a/pkg/action/rollback.go b/pkg/action/rollback.go index a96a706e3..870f1e635 100644 --- a/pkg/action/rollback.go +++ b/pkg/action/rollback.go @@ -38,7 +38,7 @@ type Rollback struct { Version int Timeout time.Duration - Wait kube.WaitStrategy + WaitStrategy kube.WaitStrategy WaitForJobs bool DisableHooks bool DryRun bool @@ -61,10 +61,6 @@ func (r *Rollback) Run(name string) error { return err } - if err := r.cfg.KubeClient.SetWaiter(r.Wait); err != nil { - return fmt.Errorf("failed to set kube client waiter: %w", err) - } - r.cfg.Releases.MaxHistory = r.MaxHistory r.cfg.Log("preparing rollback of %s", name) @@ -181,7 +177,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas // pre-rollback hooks if !r.DisableHooks { - if err := r.cfg.execHook(targetRelease, release.HookPreRollback, r.Timeout); err != nil { + if err := r.cfg.execHook(targetRelease, release.HookPreRollback, r.WaitStrategy, r.Timeout); err != nil { return targetRelease, err } } else { @@ -227,16 +223,19 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas r.cfg.Log(err.Error()) } } - + waiter, err := r.cfg.KubeClient.GetWaiter(r.WaitStrategy) + if err != nil { + return nil, errors.Wrap(err, "unable to set metadata visitor from target release") + } if r.WaitForJobs { - if err := r.cfg.KubeClient.WaitWithJobs(target, r.Timeout); err != nil { + if err := waiter.WaitWithJobs(target, r.Timeout); err != nil { targetRelease.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", targetRelease.Name, err.Error())) r.cfg.recordRelease(currentRelease) r.cfg.recordRelease(targetRelease) return targetRelease, errors.Wrapf(err, "release %s failed", targetRelease.Name) } } else { - if err := r.cfg.KubeClient.Wait(target, r.Timeout); err != nil { + if err := waiter.Wait(target, r.Timeout); err != nil { targetRelease.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", targetRelease.Name, err.Error())) r.cfg.recordRelease(currentRelease) r.cfg.recordRelease(targetRelease) @@ -246,7 +245,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas // post-rollback hooks if !r.DisableHooks { - if err := r.cfg.execHook(targetRelease, release.HookPostRollback, r.Timeout); err != nil { + if err := r.cfg.execHook(targetRelease, release.HookPostRollback, r.WaitStrategy, r.Timeout); err != nil { return targetRelease, err } } diff --git a/pkg/action/uninstall.go b/pkg/action/uninstall.go index 503be0da5..eeff997d3 100644 --- a/pkg/action/uninstall.go +++ b/pkg/action/uninstall.go @@ -17,7 +17,6 @@ limitations under the License. package action import ( - "fmt" "strings" "time" @@ -42,7 +41,7 @@ type Uninstall struct { DryRun bool IgnoreNotFound bool KeepHistory bool - Wait kube.WaitStrategy + WaitStrategy kube.WaitStrategy DeletionPropagation string Timeout time.Duration Description string @@ -61,8 +60,9 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error) return nil, err } - if err := u.cfg.KubeClient.SetWaiter(u.Wait); err != nil { - return nil, fmt.Errorf("failed to set kube client waiter: %w", err) + waiter, err := u.cfg.KubeClient.GetWaiter(u.WaitStrategy) + if err != nil { + return nil, err } if u.DryRun { @@ -111,7 +111,7 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error) res := &release.UninstallReleaseResponse{Release: rel} if !u.DisableHooks { - if err := u.cfg.execHook(rel, release.HookPreDelete, u.Timeout); err != nil { + if err := u.cfg.execHook(rel, release.HookPreDelete, u.WaitStrategy, u.Timeout); err != nil { return res, err } } else { @@ -135,12 +135,12 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error) } res.Info = kept - if err := u.cfg.KubeClient.WaitForDelete(deletedResources, u.Timeout); err != nil { + if err := waiter.WaitForDelete(deletedResources, u.Timeout); err != nil { errs = append(errs, err) } if !u.DisableHooks { - if err := u.cfg.execHook(rel, release.HookPostDelete, u.Timeout); err != nil { + if err := u.cfg.execHook(rel, release.HookPostDelete, u.WaitStrategy, u.Timeout); err != nil { errs = append(errs, err) } } diff --git a/pkg/action/uninstall_test.go b/pkg/action/uninstall_test.go index 5597abcdf..a83e4bc75 100644 --- a/pkg/action/uninstall_test.go +++ b/pkg/action/uninstall_test.go @@ -83,7 +83,7 @@ func TestUninstallRelease_Wait(t *testing.T) { unAction := uninstallAction(t) unAction.DisableHooks = true unAction.DryRun = false - unAction.Wait = kube.StatusWatcherStrategy + unAction.WaitStrategy = kube.StatusWatcherStrategy rel := releaseStub() rel.Name = "come-fail-away" @@ -114,7 +114,7 @@ func TestUninstallRelease_Cascade(t *testing.T) { unAction := uninstallAction(t) unAction.DisableHooks = true unAction.DryRun = false - unAction.Wait = kube.HookOnlyStrategy + unAction.WaitStrategy = kube.HookOnlyStrategy unAction.DeletionPropagation = "foreground" rel := releaseStub() diff --git a/pkg/action/upgrade.go b/pkg/action/upgrade.go index ba5dfb5d1..e3b775a25 100644 --- a/pkg/action/upgrade.go +++ b/pkg/action/upgrade.go @@ -64,8 +64,8 @@ type Upgrade struct { SkipCRDs bool // Timeout is the timeout for this operation Timeout time.Duration - // Wait determines whether the wait operation should be performed and what type of wait. - Wait kube.WaitStrategy + // WaitStrategy determines what type of waiting should be done + WaitStrategy kube.WaitStrategy // WaitForJobs determines whether the wait operation for the Jobs should be performed after the upgrade is requested. WaitForJobs bool // DisableHooks disables hook processing if set to true. @@ -155,11 +155,8 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart. // Make sure if Atomic is set, that wait is set as well. This makes it so // the user doesn't have to specify both - if u.Wait == kube.HookOnlyStrategy && u.Atomic { - u.Wait = kube.StatusWatcherStrategy - } - if err := u.cfg.KubeClient.SetWaiter(u.Wait); err != nil { - return nil, fmt.Errorf("failed to set kube client waiter: %w", err) + if u.WaitStrategy == kube.HookOnlyStrategy && u.Atomic { + u.WaitStrategy = kube.StatusWatcherStrategy } if err := chartutil.ValidateReleaseName(name); err != nil { @@ -423,7 +420,7 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele // pre-upgrade hooks if !u.DisableHooks { - if err := u.cfg.execHook(upgradedRelease, release.HookPreUpgrade, u.Timeout); err != nil { + if err := u.cfg.execHook(upgradedRelease, release.HookPreUpgrade, u.WaitStrategy, u.Timeout); err != nil { u.reportToPerformUpgrade(c, upgradedRelease, kube.ResourceList{}, fmt.Errorf("pre-upgrade hooks failed: %s", err)) return } @@ -447,15 +444,20 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele u.cfg.Log(err.Error()) } } - + waiter, err := u.cfg.KubeClient.GetWaiter(u.WaitStrategy) + if err != nil { + u.cfg.recordRelease(originalRelease) + u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err) + return + } if u.WaitForJobs { - if err := u.cfg.KubeClient.WaitWithJobs(target, u.Timeout); err != nil { + if err := waiter.WaitWithJobs(target, u.Timeout); err != nil { u.cfg.recordRelease(originalRelease) u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err) return } } else { - if err := u.cfg.KubeClient.Wait(target, u.Timeout); err != nil { + if err := waiter.Wait(target, u.Timeout); err != nil { u.cfg.recordRelease(originalRelease) u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err) return @@ -464,7 +466,7 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele // post-upgrade hooks if !u.DisableHooks { - if err := u.cfg.execHook(upgradedRelease, release.HookPostUpgrade, u.Timeout); err != nil { + if err := u.cfg.execHook(upgradedRelease, release.HookPostUpgrade, u.WaitStrategy, u.Timeout); err != nil { u.reportToPerformUpgrade(c, upgradedRelease, results.Created, fmt.Errorf("post-upgrade hooks failed: %s", err)) return } @@ -526,13 +528,8 @@ func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, e rollin := NewRollback(u.cfg) rollin.Version = filteredHistory[0].Version - if u.Wait == kube.HookOnlyStrategy { - rollin.Wait = kube.StatusWatcherStrategy - } - // TODO pretty sure this is unnecessary as the waiter is already set if atomic at the start of upgrade - werr := u.cfg.KubeClient.SetWaiter(u.Wait) - if werr != nil { - return rel, errors.Wrapf(herr, "an error occurred while creating the waiter. original upgrade error: %s", err) + if u.WaitStrategy == kube.HookOnlyStrategy { + rollin.WaitStrategy = kube.StatusWatcherStrategy } rollin.WaitForJobs = u.WaitForJobs rollin.DisableHooks = u.DisableHooks diff --git a/pkg/action/upgrade_test.go b/pkg/action/upgrade_test.go index a36b7a3de..19869f6d6 100644 --- a/pkg/action/upgrade_test.go +++ b/pkg/action/upgrade_test.go @@ -53,7 +53,7 @@ func TestUpgradeRelease_Success(t *testing.T) { rel.Info.Status = release.StatusDeployed req.NoError(upAction.cfg.Releases.Create(rel)) - upAction.Wait = kube.StatusWatcherStrategy + upAction.WaitStrategy = kube.StatusWatcherStrategy vals := map[string]interface{}{} ctx, done := context.WithCancel(context.Background()) @@ -83,7 +83,7 @@ func TestUpgradeRelease_Wait(t *testing.T) { failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.WaitError = fmt.Errorf("I timed out") upAction.cfg.KubeClient = failer - upAction.Wait = kube.StatusWatcherStrategy + upAction.WaitStrategy = kube.StatusWatcherStrategy vals := map[string]interface{}{} res, err := upAction.Run(rel.Name, buildChart(), vals) @@ -105,7 +105,7 @@ func TestUpgradeRelease_WaitForJobs(t *testing.T) { failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.WaitError = fmt.Errorf("I timed out") upAction.cfg.KubeClient = failer - upAction.Wait = kube.StatusWatcherStrategy + upAction.WaitStrategy = kube.StatusWatcherStrategy upAction.WaitForJobs = true vals := map[string]interface{}{} @@ -129,7 +129,7 @@ func TestUpgradeRelease_CleanupOnFail(t *testing.T) { failer.WaitError = fmt.Errorf("I timed out") failer.DeleteError = fmt.Errorf("I tried to delete nil") upAction.cfg.KubeClient = failer - upAction.Wait = kube.StatusWatcherStrategy + upAction.WaitStrategy = kube.StatusWatcherStrategy upAction.CleanupOnFail = true vals := map[string]interface{}{} @@ -396,7 +396,7 @@ func TestUpgradeRelease_Interrupted_Wait(t *testing.T) { failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.WaitDuration = 10 * time.Second upAction.cfg.KubeClient = failer - upAction.Wait = kube.StatusWatcherStrategy + upAction.WaitStrategy = kube.StatusWatcherStrategy vals := map[string]interface{}{} ctx := context.Background() diff --git a/pkg/cmd/install.go b/pkg/cmd/install.go index 04055fde9..051612bb8 100644 --- a/pkg/cmd/install.go +++ b/pkg/cmd/install.go @@ -211,7 +211,7 @@ func addInstallFlags(cmd *cobra.Command, f *pflag.FlagSet, client *action.Instal f.BoolVar(&client.TakeOwnership, "take-ownership", false, "if set, install will ignore the check for helm annotations and take ownership of the existing resources") addValueOptionsFlags(f, valueOpts) addChartPathOptionsFlags(f, &client.ChartPathOptions) - AddWaitFlag(cmd, &client.Wait) + AddWaitFlag(cmd, &client.WaitStrategy) err := cmd.RegisterFlagCompletionFunc("version", func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { requiredArgs := 2 diff --git a/pkg/cmd/rollback.go b/pkg/cmd/rollback.go index 01a32b184..1823432dc 100644 --- a/pkg/cmd/rollback.go +++ b/pkg/cmd/rollback.go @@ -84,7 +84,7 @@ func newRollbackCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { f.BoolVar(&client.WaitForJobs, "wait-for-jobs", false, "if set and --wait enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as --timeout") f.BoolVar(&client.CleanupOnFail, "cleanup-on-fail", false, "allow deletion of new resources created in this rollback when rollback fails") f.IntVar(&client.MaxHistory, "history-max", settings.MaxHistory, "limit the maximum number of revisions saved per release. Use 0 for no limit") - AddWaitFlag(cmd, &client.Wait) + AddWaitFlag(cmd, &client.WaitStrategy) return cmd } diff --git a/pkg/cmd/uninstall.go b/pkg/cmd/uninstall.go index 3a86cc598..4680c324a 100644 --- a/pkg/cmd/uninstall.go +++ b/pkg/cmd/uninstall.go @@ -79,7 +79,7 @@ func newUninstallCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { f.StringVar(&client.DeletionPropagation, "cascade", "background", "Must be \"background\", \"orphan\", or \"foreground\". Selects the deletion cascading strategy for the dependents. Defaults to background.") f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)") f.StringVar(&client.Description, "description", "", "add a custom description") - AddWaitFlag(cmd, &client.Wait) + AddWaitFlag(cmd, &client.WaitStrategy) return cmd } diff --git a/pkg/cmd/upgrade.go b/pkg/cmd/upgrade.go index 74d12ac40..afbbde435 100644 --- a/pkg/cmd/upgrade.go +++ b/pkg/cmd/upgrade.go @@ -136,7 +136,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { instClient.DisableHooks = client.DisableHooks instClient.SkipCRDs = client.SkipCRDs instClient.Timeout = client.Timeout - instClient.Wait = client.Wait + instClient.WaitStrategy = client.WaitStrategy instClient.WaitForJobs = client.WaitForJobs instClient.Devel = client.Devel instClient.Namespace = client.Namespace @@ -294,7 +294,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { addValueOptionsFlags(f, valueOpts) bindOutputFlag(cmd, &outfmt) bindPostRenderFlag(cmd, &client.PostRenderer) - AddWaitFlag(cmd, &client.Wait) + AddWaitFlag(cmd, &client.WaitStrategy) err := cmd.RegisterFlagCompletionFunc("version", func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) != 2 { diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 61e681ad3..032f79850 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -125,7 +125,7 @@ func (c *Client) newStatusWatcher() (*statusWaiter, error) { }, nil } -func (c *Client) newWaiter(strategy WaitStrategy) (Waiter, error) { +func (c *Client) GetWaiter(strategy WaitStrategy) (Waiter, error) { switch strategy { case LegacyStrategy: kc, err := c.Factory.KubernetesClientSet() @@ -148,7 +148,7 @@ func (c *Client) newWaiter(strategy WaitStrategy) (Waiter, error) { func (c *Client) SetWaiter(ws WaitStrategy) error { var err error - c.Waiter, err = c.newWaiter(ws) + c.Waiter, err = c.GetWaiter(ws) if err != nil { return err } @@ -156,7 +156,7 @@ func (c *Client) SetWaiter(ws WaitStrategy) error { } // New creates a new Client. -func New(getter genericclioptions.RESTClientGetter) (*Client, error) { +func New(getter genericclioptions.RESTClientGetter) *Client { if getter == nil { getter = genericclioptions.NewConfigFlags(true) } @@ -165,12 +165,7 @@ func New(getter genericclioptions.RESTClientGetter) (*Client, error) { Factory: factory, Log: nopLogger, } - var err error - c.Waiter, err = c.newWaiter(HookOnlyStrategy) - if err != nil { - return nil, err - } - return c, nil + return c } var nopLogger = func(_ string, _ ...interface{}) {} diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index 527f28a72..8ae1df238 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -516,7 +516,7 @@ func TestWait(t *testing.T) { }), } var err error - c.Waiter, err = c.newWaiter(LegacyStrategy) + c.Waiter, err = c.GetWaiter(LegacyStrategy) if err != nil { t.Fatal(err) } @@ -573,7 +573,7 @@ func TestWaitJob(t *testing.T) { }), } var err error - c.Waiter, err = c.newWaiter(LegacyStrategy) + c.Waiter, err = c.GetWaiter(LegacyStrategy) if err != nil { t.Fatal(err) } @@ -632,7 +632,7 @@ func TestWaitDelete(t *testing.T) { }), } var err error - c.Waiter, err = c.newWaiter(LegacyStrategy) + c.Waiter, err = c.GetWaiter(LegacyStrategy) if err != nil { t.Fatal(err) } @@ -662,10 +662,7 @@ func TestWaitDelete(t *testing.T) { func TestReal(t *testing.T) { t.Skip("This is a live test, comment this line to run") - c, err := New(nil) - if err != nil { - t.Fatal(err) - } + c := New(nil) resources, err := c.Build(strings.NewReader(guestbookManifest), false) if err != nil { t.Fatal(err) @@ -675,10 +672,7 @@ func TestReal(t *testing.T) { } testSvcEndpointManifest := testServiceManifest + "\n---\n" + testEndpointManifest - c, err = New(nil) - if err != nil { - t.Fatal(err) - } + c = New(nil) resources, err = c.Build(strings.NewReader(testSvcEndpointManifest), false) if err != nil { t.Fatal(err) diff --git a/pkg/kube/fake/fake.go b/pkg/kube/fake/fake.go index c4322733a..f868afa1a 100644 --- a/pkg/kube/fake/fake.go +++ b/pkg/kube/fake/fake.go @@ -35,19 +35,29 @@ type FailingKubeClient struct { PrintingKubeClient CreateError error GetError error - WaitError error - WaitForDeleteError error DeleteError error DeleteWithPropagationError error - WatchUntilReadyError error UpdateError error BuildError error BuildTableError error BuildDummy bool BuildUnstructuredError error + WaitError error + WaitForDeleteError error + WatchUntilReadyError error WaitDuration time.Duration } +// FailingKubeWaiter implements kube.Waiter for testing purposes. +// It also has additional errors you can set to fail different functions, otherwise it delegates all its calls to `PrintingKubeWaiter` +type FailingKubeWaiter struct { + *PrintingKubeWaiter + waitError error + waitForDeleteError error + watchUntilReadyError error + waitDuration time.Duration +} + // Create returns the configured error if set or prints func (f *FailingKubeClient) Create(resources kube.ResourceList) (*kube.Result, error) { if f.CreateError != nil { @@ -65,28 +75,28 @@ func (f *FailingKubeClient) Get(resources kube.ResourceList, related bool) (map[ } // Waits the amount of time defined on f.WaitDuration, then returns the configured error if set or prints. -func (f *FailingKubeClient) Wait(resources kube.ResourceList, d time.Duration) error { - time.Sleep(f.WaitDuration) - if f.WaitError != nil { - return f.WaitError +func (f *FailingKubeWaiter) Wait(resources kube.ResourceList, d time.Duration) error { + time.Sleep(f.waitDuration) + if f.waitError != nil { + return f.waitError } - return f.PrintingKubeClient.Wait(resources, d) + return f.PrintingKubeWaiter.Wait(resources, d) } // WaitWithJobs returns the configured error if set or prints -func (f *FailingKubeClient) WaitWithJobs(resources kube.ResourceList, d time.Duration) error { - if f.WaitError != nil { - return f.WaitError +func (f *FailingKubeWaiter) WaitWithJobs(resources kube.ResourceList, d time.Duration) error { + if f.waitError != nil { + return f.waitError } - return f.PrintingKubeClient.WaitWithJobs(resources, d) + return f.PrintingKubeWaiter.WaitWithJobs(resources, d) } // WaitForDelete returns the configured error if set or prints -func (f *FailingKubeClient) WaitForDelete(resources kube.ResourceList, d time.Duration) error { - if f.WaitForDeleteError != nil { - return f.WaitForDeleteError +func (f *FailingKubeWaiter) WaitForDelete(resources kube.ResourceList, d time.Duration) error { + if f.waitForDeleteError != nil { + return f.waitForDeleteError } - return f.PrintingKubeClient.WaitForDelete(resources, d) + return f.PrintingKubeWaiter.WaitForDelete(resources, d) } // Delete returns the configured error if set or prints @@ -98,11 +108,11 @@ func (f *FailingKubeClient) Delete(resources kube.ResourceList) (*kube.Result, [ } // WatchUntilReady returns the configured error if set or prints -func (f *FailingKubeClient) WatchUntilReady(resources kube.ResourceList, d time.Duration) error { - if f.WatchUntilReadyError != nil { - return f.WatchUntilReadyError +func (f *FailingKubeWaiter) WatchUntilReady(resources kube.ResourceList, d time.Duration) error { + if f.watchUntilReadyError != nil { + return f.watchUntilReadyError } - return f.PrintingKubeClient.WatchUntilReady(resources, d) + return f.PrintingKubeWaiter.WatchUntilReady(resources, d) } // Update returns the configured error if set or prints @@ -140,8 +150,16 @@ func (f *FailingKubeClient) DeleteWithPropagationPolicy(resources kube.ResourceL return f.PrintingKubeClient.DeleteWithPropagationPolicy(resources, policy) } -func (f *FailingKubeClient) SetWaiter(_ kube.WaitStrategy) error { - return nil +func (f *FailingKubeClient) GetWaiter(ws kube.WaitStrategy) (kube.Waiter, error) { + waiter, _ := f.PrintingKubeClient.GetWaiter(ws) + printingKubeWaiter, _ := waiter.(*PrintingKubeWaiter) + return &FailingKubeWaiter{ + PrintingKubeWaiter: printingKubeWaiter, + waitError: f.WaitError, + waitForDeleteError: f.WaitForDeleteError, + watchUntilReadyError: f.WatchUntilReadyError, + waitDuration: f.WaitDuration, + }, nil } func createDummyResourceList() kube.ResourceList { @@ -151,5 +169,4 @@ func createDummyResourceList() kube.ResourceList { var resourceList kube.ResourceList resourceList.Append(&resInfo) return resourceList - } diff --git a/pkg/kube/fake/printer.go b/pkg/kube/fake/printer.go index fa25a04b3..f6659a904 100644 --- a/pkg/kube/fake/printer.go +++ b/pkg/kube/fake/printer.go @@ -37,6 +37,12 @@ type PrintingKubeClient struct { LogOutput io.Writer } +// PrintingKubeWaiter implements kube.Waiter, but simply prints the reader to the given output +type PrintingKubeWaiter struct { + Out io.Writer + LogOutput io.Writer +} + // IsReachable checks if the cluster is reachable func (p *PrintingKubeClient) IsReachable() error { return nil @@ -59,17 +65,23 @@ func (p *PrintingKubeClient) Get(resources kube.ResourceList, _ bool) (map[strin return make(map[string][]runtime.Object), nil } -func (p *PrintingKubeClient) Wait(resources kube.ResourceList, _ time.Duration) error { +func (p *PrintingKubeWaiter) Wait(resources kube.ResourceList, _ time.Duration) error { _, err := io.Copy(p.Out, bufferize(resources)) return err } -func (p *PrintingKubeClient) WaitWithJobs(resources kube.ResourceList, _ time.Duration) error { +func (p *PrintingKubeWaiter) WaitWithJobs(resources kube.ResourceList, _ time.Duration) error { _, err := io.Copy(p.Out, bufferize(resources)) return err } -func (p *PrintingKubeClient) WaitForDelete(resources kube.ResourceList, _ time.Duration) error { +func (p *PrintingKubeWaiter) WaitForDelete(resources kube.ResourceList, _ time.Duration) error { + _, err := io.Copy(p.Out, bufferize(resources)) + return err +} + +// WatchUntilReady implements KubeClient WatchUntilReady. +func (p *PrintingKubeWaiter) WatchUntilReady(resources kube.ResourceList, _ time.Duration) error { _, err := io.Copy(p.Out, bufferize(resources)) return err } @@ -85,12 +97,6 @@ func (p *PrintingKubeClient) Delete(resources kube.ResourceList) (*kube.Result, return &kube.Result{Deleted: resources}, nil } -// WatchUntilReady implements KubeClient WatchUntilReady. -func (p *PrintingKubeClient) WatchUntilReady(resources kube.ResourceList, _ time.Duration) error { - _, err := io.Copy(p.Out, bufferize(resources)) - return err -} - // Update implements KubeClient Update. func (p *PrintingKubeClient) Update(_, modified kube.ResourceList, _ bool) (*kube.Result, error) { _, err := io.Copy(p.Out, bufferize(modified)) @@ -140,8 +146,8 @@ func (p *PrintingKubeClient) DeleteWithPropagationPolicy(resources kube.Resource return &kube.Result{Deleted: resources}, nil } -func (p *PrintingKubeClient) SetWaiter(_ kube.WaitStrategy) error { - return nil +func (p *PrintingKubeClient) GetWaiter(_ kube.WaitStrategy) (kube.Waiter, error) { + return &PrintingKubeWaiter{Out: p.Out, LogOutput: p.LogOutput}, nil } func bufferize(resources kube.ResourceList) io.Reader { diff --git a/pkg/kube/interface.go b/pkg/kube/interface.go index d6ac823f1..fb42fed06 100644 --- a/pkg/kube/interface.go +++ b/pkg/kube/interface.go @@ -48,9 +48,9 @@ type Interface interface { Build(reader io.Reader, validate bool) (ResourceList, error) // IsReachable checks whether the client is able to connect to the cluster. IsReachable() error - // Set Waiter sets the Kube.Waiter - SetWaiter(ws WaitStrategy) error - Waiter + + // Get Waiter gets the Kube.Waiter + GetWaiter(ws WaitStrategy) (Waiter, error) } // Waiter defines methods related to waiting for resource states. From 8efd428e5da26d035eb7a095e348c9cbbfae9f26 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 25 Mar 2025 14:10:31 +0000 Subject: [PATCH 90/91] switch back to k8s rest mapper Signed-off-by: Austin Abro --- go.mod | 2 +- go.sum | 4 +- internal/restmapper/restmapper.go | 372 ------------------------------ pkg/kube/client.go | 5 +- 4 files changed, 5 insertions(+), 378 deletions(-) delete mode 100644 internal/restmapper/restmapper.go diff --git a/go.mod b/go.mod index c0f172c1e..bfc55057a 100644 --- a/go.mod +++ b/go.mod @@ -46,6 +46,7 @@ require ( k8s.io/klog/v2 v2.130.1 k8s.io/kubectl v0.32.3 oras.land/oras-go/v2 v2.5.0 + sigs.k8s.io/controller-runtime v0.20.4 sigs.k8s.io/yaml v1.4.0 ) @@ -177,7 +178,6 @@ require ( k8s.io/component-base v0.32.3 // indirect k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 // indirect k8s.io/utils v0.0.0-20241210054802-24370beab758 // indirect - sigs.k8s.io/controller-runtime v0.20.1 // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect sigs.k8s.io/kustomize/api v0.18.0 // indirect sigs.k8s.io/kustomize/kyaml v0.19.0 // indirect diff --git a/go.sum b/go.sum index 620678cbf..1153931d8 100644 --- a/go.sum +++ b/go.sum @@ -535,8 +535,8 @@ k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJ k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go/v2 v2.5.0 h1:o8Me9kLY74Vp5uw07QXPiitjsw7qNXi8Twd+19Zf02c= oras.land/oras-go/v2 v2.5.0/go.mod h1:z4eisnLP530vwIOUOJeBIj0aGI0L1C3d53atvCBqZHg= -sigs.k8s.io/controller-runtime v0.20.1 h1:JbGMAG/X94NeM3xvjenVUaBjy6Ui4Ogd/J5ZtjZnHaE= -sigs.k8s.io/controller-runtime v0.20.1/go.mod h1:BrP3w158MwvB3ZbNpaAcIKkHQ7YGpYnzpoSTZ8E14WU= +sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU= +sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/kustomize/api v0.18.0 h1:hTzp67k+3NEVInwz5BHyzc9rGxIauoXferXyjv5lWPo= diff --git a/internal/restmapper/restmapper.go b/internal/restmapper/restmapper.go deleted file mode 100644 index 85b7c2a69..000000000 --- a/internal/restmapper/restmapper.go +++ /dev/null @@ -1,372 +0,0 @@ -/* -Copyright The Helm Authors. -This file was initially copied and modified from - https://github.com/kubernetes-sigs/controller-runtime/blob/e818ce450d3d358600848dcfa1b585de64e7c865/pkg/client/apiutil/restmapper.go -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package restmapper - -import ( - "fmt" - "net/http" - "sort" - "strings" - "sync" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/discovery" - "k8s.io/client-go/rest" - "k8s.io/client-go/restmapper" -) - -/* -Adapted from controller-runtime v0.19 before The Kubernetes Aggregated Discovery was enabled -in controller-runtime v0.20 which broke the preferred version discovery in the RESTMapper. -https://github.com/kubernetes-sigs/controller-runtime/blob/e818ce450d3d358600848dcfa1b585de64e7c865/pkg/client/apiutil/restmapper.go -*/ - -// NewLazyRESTMapper returns a dynamic RESTMapper for cfg. The dynamic -// RESTMapper dynamically discovers resource types at runtime. -func NewLazyRESTMapper(cfg *rest.Config, httpClient *http.Client) (meta.RESTMapper, error) { - if httpClient == nil { - return nil, fmt.Errorf("httpClient must not be nil, consider using rest.HTTPClientFor(c) to create a client") - } - - client, err := discovery.NewDiscoveryClientForConfigAndClient(cfg, httpClient) - if err != nil { - return nil, err - } - return &mapper{ - mapper: restmapper.NewDiscoveryRESTMapper([]*restmapper.APIGroupResources{}), - client: client, - knownGroups: map[string]*restmapper.APIGroupResources{}, - apiGroups: map[string]*metav1.APIGroup{}, - }, nil -} - -// mapper is a RESTMapper that will lazily query the provided -// client for discovery information to do REST mappings. -type mapper struct { - mapper meta.RESTMapper - client discovery.DiscoveryInterface - knownGroups map[string]*restmapper.APIGroupResources - apiGroups map[string]*metav1.APIGroup - - // mutex to provide thread-safe mapper reloading. - mu sync.RWMutex -} - -// KindFor implements Mapper.KindFor. -func (m *mapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { - res, err := m.getMapper().KindFor(resource) - if meta.IsNoMatchError(err) { - if err := m.addKnownGroupAndReload(resource.Group, resource.Version); err != nil { - return schema.GroupVersionKind{}, err - } - res, err = m.getMapper().KindFor(resource) - } - - return res, err -} - -// KindsFor implements Mapper.KindsFor. -func (m *mapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { - res, err := m.getMapper().KindsFor(resource) - if meta.IsNoMatchError(err) { - if err := m.addKnownGroupAndReload(resource.Group, resource.Version); err != nil { - return nil, err - } - res, err = m.getMapper().KindsFor(resource) - } - - return res, err -} - -// ResourceFor implements Mapper.ResourceFor. -func (m *mapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { - res, err := m.getMapper().ResourceFor(input) - if meta.IsNoMatchError(err) { - if err := m.addKnownGroupAndReload(input.Group, input.Version); err != nil { - return schema.GroupVersionResource{}, err - } - res, err = m.getMapper().ResourceFor(input) - } - - return res, err -} - -// ResourcesFor implements Mapper.ResourcesFor. -func (m *mapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { - res, err := m.getMapper().ResourcesFor(input) - if meta.IsNoMatchError(err) { - if err := m.addKnownGroupAndReload(input.Group, input.Version); err != nil { - return nil, err - } - res, err = m.getMapper().ResourcesFor(input) - } - - return res, err -} - -// RESTMapping implements Mapper.RESTMapping. -func (m *mapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { - res, err := m.getMapper().RESTMapping(gk, versions...) - if meta.IsNoMatchError(err) { - if err := m.addKnownGroupAndReload(gk.Group, versions...); err != nil { - return nil, err - } - res, err = m.getMapper().RESTMapping(gk, versions...) - } - - return res, err -} - -// RESTMappings implements Mapper.RESTMappings. -func (m *mapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { - res, err := m.getMapper().RESTMappings(gk, versions...) - if meta.IsNoMatchError(err) { - if err := m.addKnownGroupAndReload(gk.Group, versions...); err != nil { - return nil, err - } - res, err = m.getMapper().RESTMappings(gk, versions...) - } - - return res, err -} - -// ResourceSingularizer implements Mapper.ResourceSingularizer. -func (m *mapper) ResourceSingularizer(resource string) (string, error) { - return m.getMapper().ResourceSingularizer(resource) -} - -func (m *mapper) getMapper() meta.RESTMapper { - m.mu.RLock() - defer m.mu.RUnlock() - return m.mapper -} - -// addKnownGroupAndReload reloads the mapper with updated information about missing API group. -// versions can be specified for partial updates, for instance for v1beta1 version only. -func (m *mapper) addKnownGroupAndReload(groupName string, versions ...string) error { - // versions will here be [""] if the forwarded Version value of - // GroupVersionResource (in calling method) was not specified. - if len(versions) == 1 && versions[0] == "" { - versions = nil - } - - // If no specific versions are set by user, we will scan all available ones for the API group. - // This operation requires 2 requests: /api and /apis, but only once. For all subsequent calls - // this data will be taken from cache. - if len(versions) == 0 { - apiGroup, err := m.findAPIGroupByName(groupName) - if err != nil { - return err - } - if apiGroup != nil { - for _, version := range apiGroup.Versions { - versions = append(versions, version.Version) - } - } - } - - m.mu.Lock() - defer m.mu.Unlock() - - // Create or fetch group resources from cache. - groupResources := &restmapper.APIGroupResources{ - Group: metav1.APIGroup{Name: groupName}, - VersionedResources: make(map[string][]metav1.APIResource), - } - - // Update information for group resources about versioned resources. - // The number of API calls is equal to the number of versions: /apis//. - // If we encounter a missing API version (NotFound error), we will remove the group from - // the m.apiGroups and m.knownGroups caches. - // If this happens, in the next call the group will be added back to apiGroups - // and only the existing versions will be loaded in knownGroups. - groupVersionResources, err := m.fetchGroupVersionResourcesLocked(groupName, versions...) - if err != nil { - return fmt.Errorf("failed to get API group resources: %w", err) - } - - if _, ok := m.knownGroups[groupName]; ok { - groupResources = m.knownGroups[groupName] - } - - // Update information for group resources about the API group by adding new versions. - // Ignore the versions that are already registered. - for groupVersion, resources := range groupVersionResources { - version := groupVersion.Version - - groupResources.VersionedResources[version] = resources.APIResources - found := false - for _, v := range groupResources.Group.Versions { - if v.Version == version { - found = true - break - } - } - - if !found { - groupResources.Group.Versions = append(groupResources.Group.Versions, metav1.GroupVersionForDiscovery{ - GroupVersion: metav1.GroupVersion{Group: groupName, Version: version}.String(), - Version: version, - }) - } - } - - // Update data in the cache. - m.knownGroups[groupName] = groupResources - - // Finally, update the group with received information and regenerate the mapper. - updatedGroupResources := make([]*restmapper.APIGroupResources, 0, len(m.knownGroups)) - for _, agr := range m.knownGroups { - updatedGroupResources = append(updatedGroupResources, agr) - } - - m.mapper = restmapper.NewDiscoveryRESTMapper(updatedGroupResources) - return nil -} - -// findAPIGroupByNameLocked returns API group by its name. -func (m *mapper) findAPIGroupByName(groupName string) (*metav1.APIGroup, error) { - // Looking in the cache first. - { - m.mu.RLock() - group, ok := m.apiGroups[groupName] - m.mu.RUnlock() - if ok { - return group, nil - } - } - - // Update the cache if nothing was found. - apiGroups, err := m.client.ServerGroups() - if err != nil { - return nil, fmt.Errorf("failed to get server groups: %w", err) - } - if len(apiGroups.Groups) == 0 { - return nil, fmt.Errorf("received an empty API groups list") - } - - m.mu.Lock() - for i := range apiGroups.Groups { - group := &apiGroups.Groups[i] - m.apiGroups[group.Name] = group - } - m.mu.Unlock() - - // Looking in the cache again. - m.mu.RLock() - defer m.mu.RUnlock() - - // Don't return an error here if the API group is not present. - // The reloaded RESTMapper will take care of returning a NoMatchError. - return m.apiGroups[groupName], nil -} - -// fetchGroupVersionResourcesLocked fetches the resources for the specified group and its versions. -// This method might modify the cache so it needs to be called under the lock. -func (m *mapper) fetchGroupVersionResourcesLocked(groupName string, versions ...string) (map[schema.GroupVersion]*metav1.APIResourceList, error) { - groupVersionResources := make(map[schema.GroupVersion]*metav1.APIResourceList) - failedGroups := make(map[schema.GroupVersion]error) - - for _, version := range versions { - groupVersion := schema.GroupVersion{Group: groupName, Version: version} - - apiResourceList, err := m.client.ServerResourcesForGroupVersion(groupVersion.String()) - if apierrors.IsNotFound(err) { - // If the version is not found, we remove the group from the cache - // so it gets refreshed on the next call. - if m.isAPIGroupCached(groupVersion) { - delete(m.apiGroups, groupName) - } - if m.isGroupVersionCached(groupVersion) { - delete(m.knownGroups, groupName) - } - continue - } else if err != nil { - failedGroups[groupVersion] = err - } - - if apiResourceList != nil { - // even in case of error, some fallback might have been returned. - groupVersionResources[groupVersion] = apiResourceList - } - } - - if len(failedGroups) > 0 { - err := ErrResourceDiscoveryFailed(failedGroups) - return nil, &err - } - - return groupVersionResources, nil -} - -// isGroupVersionCached checks if a version for a group is cached in the known groups cache. -func (m *mapper) isGroupVersionCached(gv schema.GroupVersion) bool { - if cachedGroup, ok := m.knownGroups[gv.Group]; ok { - _, cached := cachedGroup.VersionedResources[gv.Version] - return cached - } - - return false -} - -// isAPIGroupCached checks if a version for a group is cached in the api groups cache. -func (m *mapper) isAPIGroupCached(gv schema.GroupVersion) bool { - cachedGroup, ok := m.apiGroups[gv.Group] - if !ok { - return false - } - - for _, version := range cachedGroup.Versions { - if version.Version == gv.Version { - return true - } - } - - return false -} - -// ErrResourceDiscoveryFailed is returned if the RESTMapper cannot discover supported resources for some GroupVersions. -// It wraps the errors encountered, except "NotFound" errors are replaced with meta.NoResourceMatchError, for -// backwards compatibility with code that uses meta.IsNoMatchError() to check for unsupported APIs. -type ErrResourceDiscoveryFailed map[schema.GroupVersion]error - -// Error implements the error interface. -func (e *ErrResourceDiscoveryFailed) Error() string { - subErrors := []string{} - for k, v := range *e { - subErrors = append(subErrors, fmt.Sprintf("%s: %v", k, v)) - } - sort.Strings(subErrors) - return fmt.Sprintf("unable to retrieve the complete list of server APIs: %s", strings.Join(subErrors, ", ")) -} - -func (e *ErrResourceDiscoveryFailed) Unwrap() []error { - subErrors := []error{} - for gv, err := range *e { - if apierrors.IsNotFound(err) { - err = &meta.NoResourceMatchError{PartialResource: gv.WithResource("")} - } - subErrors = append(subErrors, err) - } - return subErrors -} diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 032f79850..a62b83b3e 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -35,6 +35,7 @@ import ( apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -50,8 +51,6 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/util/retry" cmdutil "k8s.io/kubectl/pkg/cmd/util" - - helmRestmapper "helm.sh/helm/v4/internal/restmapper" ) // ErrNoObjectsVisited indicates that during a visit operation, no matching objects were found. @@ -114,7 +113,7 @@ func (c *Client) newStatusWatcher() (*statusWaiter, error) { if err != nil { return nil, err } - restMapper, err := helmRestmapper.NewLazyRESTMapper(cfg, httpClient) + restMapper, err := apiutil.NewDynamicRESTMapper(cfg, httpClient) if err != nil { return nil, err } From 21ee7212429ed8354f3093af40272c3c730520b7 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 25 Mar 2025 14:15:27 +0000 Subject: [PATCH 91/91] go fmt Signed-off-by: Austin Abro --- pkg/kube/interface.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kube/interface.go b/pkg/kube/interface.go index fb42fed06..f68367dcd 100644 --- a/pkg/kube/interface.go +++ b/pkg/kube/interface.go @@ -48,7 +48,7 @@ type Interface interface { Build(reader io.Reader, validate bool) (ResourceList, error) // IsReachable checks whether the client is able to connect to the cluster. IsReachable() error - + // Get Waiter gets the Kube.Waiter GetWaiter(ws WaitStrategy) (Waiter, error) }