make a working test

Signed-off-by: Austin Abro <AustinAbro321@gmail.com>
pull/13604/head
Austin Abro 9 months ago
parent 4c1758143f
commit 4564b8f712
No known key found for this signature in database
GPG Key ID: 92EB5159E403F9D6

@ -47,6 +47,7 @@ require (
k8s.io/kubectl v0.31.3 k8s.io/kubectl v0.31.3
oras.land/oras-go v1.2.5 oras.land/oras-go v1.2.5
sigs.k8s.io/cli-utils v0.37.2 sigs.k8s.io/cli-utils v0.37.2
sigs.k8s.io/controller-runtime v0.18.4
sigs.k8s.io/yaml v1.4.0 sigs.k8s.io/yaml v1.4.0
) )
@ -128,6 +129,7 @@ require (
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/onsi/gomega v1.33.1 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
@ -185,7 +187,6 @@ require (
k8s.io/component-base v0.31.3 // indirect k8s.io/component-base v0.31.3 // indirect
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect
sigs.k8s.io/controller-runtime v0.18.4 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/kustomize/api v0.17.2 // indirect sigs.k8s.io/kustomize/api v0.17.2 // indirect
sigs.k8s.io/kustomize/kyaml v0.17.1 // indirect sigs.k8s.io/kustomize/kyaml v0.17.1 // indirect

@ -37,12 +37,7 @@ import (
apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/cli-utils/pkg/kstatus/polling/aggregator"
"sigs.k8s.io/cli-utils/pkg/kstatus/polling/collector"
"sigs.k8s.io/cli-utils/pkg/kstatus/polling/event"
"sigs.k8s.io/cli-utils/pkg/kstatus/status"
"sigs.k8s.io/cli-utils/pkg/kstatus/watcher" "sigs.k8s.io/cli-utils/pkg/kstatus/watcher"
"sigs.k8s.io/cli-utils/pkg/object"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/client/apiutil"
multierror "github.com/hashicorp/go-multierror" multierror "github.com/hashicorp/go-multierror"
@ -92,10 +87,12 @@ type Client struct {
Namespace string Namespace string
kubeClient *kubernetes.Clientset kubeClient *kubernetes.Clientset
// Another potential option rather than having the waiter as a field // I see a couple different options for how waiter could be handled here
// would be to have a field that decides what type of waiter to use // - The waiter could be instantiated in New or at the start of each wait function //
// then instantiate it during the method // - The waiter could be completely separate from the client interface,
// of course the fields could take a waiter as well // I don't like that this causes consumers to need another interface on top of kube
// - The waiter could be bundled with the resource manager into a client object. The waiter doesn't need factory /
// Another option still would be to
waiter Waiter waiter Waiter
} }
@ -142,7 +139,7 @@ func getStatusWatcher(factory Factory) (watcher.StatusWatcher, error) {
func New(getter genericclioptions.RESTClientGetter, waiter Waiter) *Client { func New(getter genericclioptions.RESTClientGetter, waiter Waiter) *Client {
if getter == nil { if getter == nil {
getter = genericclioptions.NewConfigFlags(true) getter = genericclioptions.NewConfigFlags(true)
} }
factory := cmdutil.NewFactory(getter) factory := cmdutil.NewFactory(getter)
if waiter == nil { if waiter == nil {
sw, err := getStatusWatcher(factory) sw, err := getStatusWatcher(factory)
@ -156,7 +153,7 @@ func New(getter genericclioptions.RESTClientGetter, waiter Waiter) *Client {
return &Client{ return &Client{
Factory: factory, Factory: factory,
Log: nopLogger, Log: nopLogger,
waiter: waiter, waiter: waiter,
} }
} }
@ -338,62 +335,6 @@ func (c *Client) Wait(resources ResourceList, timeout time.Duration) error {
return c.waiter.Wait(resources, timeout) return c.waiter.Wait(resources, timeout)
} }
// WaitForReady waits for all of the objects to reach a ready state.
func WaitForReady(ctx context.Context, sw watcher.StatusWatcher, resourceList ResourceList) error {
cancelCtx, cancel := context.WithCancel(ctx)
defer cancel()
// TODO maybe a simpler way to transfer the objects
runtimeObjs := []runtime.Object{}
for _, resource := range resourceList {
runtimeObjs = append(runtimeObjs, resource.Object)
}
resources := []object.ObjMetadata{}
for _, runtimeObj := range runtimeObjs {
obj, err := object.RuntimeToObjMeta(runtimeObj)
if err != nil {
return err
}
resources = append(resources, obj)
}
eventCh := sw.Watch(cancelCtx, resources, watcher.Options{})
statusCollector := collector.NewResourceStatusCollector(resources)
done := statusCollector.ListenWithObserver(eventCh, collector.ObserverFunc(
func(statusCollector *collector.ResourceStatusCollector, _ event.Event) {
rss := []*event.ResourceStatus{}
for _, rs := range statusCollector.ResourceStatuses {
if rs == nil {
continue
}
rss = append(rss, rs)
}
desired := status.CurrentStatus
if aggregator.AggregateStatus(rss, desired) == desired {
cancel()
return
}
}),
)
<-done
if statusCollector.Error != nil {
return statusCollector.Error
}
// Only check parent context error, otherwise we would error when desired status is achieved.
if ctx.Err() != nil {
var err error
for _, id := range resources {
rs := statusCollector.ResourceStatuses[id]
if rs.Status == status.CurrentStatus {
continue
}
err = fmt.Errorf("%s: %s not ready, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)
}
return fmt.Errorf("not all resources ready: %w: %w", ctx.Err(), err)
}
return nil
}
// WaitWithJobs wait up to the given timeout for the specified resources to be ready, including jobs. // WaitWithJobs wait up to the given timeout for the specified resources to be ready, including jobs.
func (c *Client) WaitWithJobs(resources ResourceList, timeout time.Duration) error { func (c *Client) WaitWithJobs(resources ResourceList, timeout time.Duration) error {
return c.waiter.WaitWithJobs(resources, timeout) return c.waiter.WaitWithJobs(resources, timeout)

@ -24,7 +24,6 @@ import (
"testing" "testing"
"time" "time"
"github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
@ -519,14 +518,16 @@ func TestWait(t *testing.T) {
}), }),
} }
cs, err := c.getKubeClient() cs, err := c.getKubeClient()
require.NoError(t, err) if err != nil {
t.Fatal(err)
}
checker := NewReadyChecker(cs, c.Log, PausedAsReady(true)) checker := NewReadyChecker(cs, c.Log, PausedAsReady(true))
w := &waiter{ w := &waiter{
c: checker, c: checker,
log: c.Log, log: c.Log,
timeout: time.Second * 30, timeout: time.Second * 30,
} }
c.waiter = w c.waiter = w
resources, err := c.Build(objBody(&podList), false) resources, err := c.Build(objBody(&podList), false)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -580,14 +581,16 @@ func TestWaitJob(t *testing.T) {
}), }),
} }
cs, err := c.getKubeClient() cs, err := c.getKubeClient()
require.NoError(t, err) if err != nil {
t.Fatal(err)
}
checker := NewReadyChecker(cs, c.Log, PausedAsReady(true), CheckJobs(true)) checker := NewReadyChecker(cs, c.Log, PausedAsReady(true), CheckJobs(true))
w := &waiter{ w := &waiter{
c: checker, c: checker,
log: c.Log, log: c.Log,
timeout: time.Second * 30, timeout: time.Second * 30,
} }
c.waiter = w c.waiter = w
resources, err := c.Build(objBody(job), false) resources, err := c.Build(objBody(job), false)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -643,14 +646,16 @@ func TestWaitDelete(t *testing.T) {
}), }),
} }
cs, err := c.getKubeClient() cs, err := c.getKubeClient()
require.NoError(t, err) if err != nil {
t.Fatal(err)
}
checker := NewReadyChecker(cs, c.Log, PausedAsReady(true)) checker := NewReadyChecker(cs, c.Log, PausedAsReady(true))
w := &waiter{ w := &waiter{
c: checker, c: checker,
log: c.Log, log: c.Log,
timeout: time.Second * 30, timeout: time.Second * 30,
} }
c.waiter = w c.waiter = w
resources, err := c.Build(objBody(&pod), false) resources, err := c.Build(objBody(&pod), false)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)

@ -37,7 +37,8 @@ type kstatusWaiter struct {
} }
func (w *kstatusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error { func (w *kstatusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error {
ctx := context.TODO() ctx, cancel := context.WithTimeout(context.TODO(), timeout)
defer cancel()
cancelCtx, cancel := context.WithCancel(ctx) cancelCtx, cancel := context.WithCancel(ctx)
defer cancel() defer cancel()
// TODO maybe a simpler way to transfer the objects // TODO maybe a simpler way to transfer the objects
@ -62,6 +63,7 @@ func (w *kstatusWaiter) Wait(resourceList ResourceList, timeout time.Duration) e
if rs == nil { if rs == nil {
continue continue
} }
fmt.Println("this is the status of object", rs.Status)
rss = append(rss, rs) rss = append(rss, rs)
} }
desired := status.CurrentStatus desired := status.CurrentStatus

@ -0,0 +1,213 @@
/*
Copyright The Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kube // import "helm.sh/helm/v3/pkg/kube"
import (
"errors"
"fmt"
"testing"
"time"
"github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/yaml"
dynamicfake "k8s.io/client-go/dynamic/fake"
"k8s.io/kubectl/pkg/scheme"
"sigs.k8s.io/cli-utils/pkg/kstatus/watcher"
"sigs.k8s.io/cli-utils/pkg/testutil"
)
var podCurrentYaml = `
apiVersion: v1
kind: Pod
metadata:
name: good-pod
namespace: ns
status:
conditions:
- type: Ready
status: "True"
phase: Running
`
var podYaml = `
apiVersion: v1
kind: Pod
metadata:
name: in-progress-pod
namespace: ns
`
func TestRunHealthChecks(t *testing.T) {
t.Parallel()
tests := []struct {
name string
podYamls []string
expectErrs []error
}{
{
name: "Pod is ready",
podYamls: []string{podCurrentYaml},
expectErrs: nil,
},
{
name: "one of the pods never becomes ready",
podYamls: []string{podYaml, podCurrentYaml},
// TODO, make this better
expectErrs: []error{errors.New("not all resources ready: context deadline exceeded: in-progress-pod: Pod not ready, status: InProgress")},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
c := newTestClient(t)
fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
fakeMapper := testutil.NewFakeRESTMapper(
v1.SchemeGroupVersion.WithKind("Pod"),
)
// ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
// defer cancel()
pods := []runtime.Object{}
statusWatcher := watcher.NewDefaultStatusWatcher(fakeClient, fakeMapper)
for _, podYaml := range tt.podYamls {
m := make(map[string]interface{})
err := yaml.Unmarshal([]byte(podYaml), &m)
require.NoError(t, err)
pod := &unstructured.Unstructured{Object: m}
pods = append(pods, pod)
fmt.Println(pod.GetName())
podGVR := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}
err = fakeClient.Tracker().Create(podGVR, pod, pod.GetNamespace())
require.NoError(t, err)
}
c.waiter = &kstatusWaiter{
sw: statusWatcher,
log: c.Log,
}
resourceList := ResourceList{}
for _, pod := range pods {
list, err := c.Build(objBody(pod), false)
if err != nil {
t.Fatal(err)
}
resourceList = append(resourceList, list...)
}
err := c.Wait(resourceList, time.Second*5)
if tt.expectErrs != nil {
require.EqualError(t, err, errors.Join(tt.expectErrs...).Error())
return
}
require.NoError(t, err)
})
}
}
// func TestWait1(t *testing.T) {
// podList := newPodList("starfish", "otter", "squid")
// var created *time.Time
// c := newTestClient(t)
// c.Factory.(*cmdtesting.TestFactory).ClientConfigVal = cmdtesting.DefaultClientConfig()
// c.Factory.(*cmdtesting.TestFactory).Client = &fake.RESTClient{
// NegotiatedSerializer: unstructuredSerializer,
// Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
// p, m := req.URL.Path, req.Method
// t.Logf("got request %s %s", p, m)
// switch {
// case p == "/api/v1/namespaces/default/pods/starfish" && m == "GET":
// pod := &podList.Items[0]
// if created != nil && time.Since(*created) >= time.Second*5 {
// pod.Status.Conditions = []v1.PodCondition{
// {
// Type: v1.PodReady,
// Status: v1.ConditionTrue,
// },
// }
// }
// return newResponse(200, pod)
// case p == "/api/v1/namespaces/default/pods/otter" && m == "GET":
// pod := &podList.Items[1]
// if created != nil && time.Since(*created) >= time.Second*5 {
// pod.Status.Conditions = []v1.PodCondition{
// {
// Type: v1.PodReady,
// Status: v1.ConditionTrue,
// },
// }
// }
// return newResponse(200, pod)
// case p == "/api/v1/namespaces/default/pods/squid" && m == "GET":
// pod := &podList.Items[2]
// if created != nil && time.Since(*created) >= time.Second*5 {
// pod.Status.Conditions = []v1.PodCondition{
// {
// Type: v1.PodReady,
// Status: v1.ConditionTrue,
// },
// }
// }
// return newResponse(200, pod)
// case p == "/namespaces/default/pods" && m == "POST":
// resources, err := c.Build(req.Body, false)
// if err != nil {
// t.Fatal(err)
// }
// now := time.Now()
// created = &now
// return newResponse(200, resources[0].Object)
// default:
// t.Fatalf("unexpected request: %s %s", req.Method, req.URL.Path)
// return nil, nil
// }
// }),
// }
// cs, err := c.getKubeClient()
// require.NoError(t, err)
// checker := NewReadyChecker(cs, c.Log, PausedAsReady(true))
// w := &waiter{
// c: checker,
// log: c.Log,
// timeout: time.Second * 30,
// }
// c.waiter = w
// resources, err := c.Build(objBody(&podList), false)
// if err != nil {
// t.Fatal(err)
// }
// result, err := c.Create(resources)
// if err != nil {
// t.Fatal(err)
// }
// if len(result.Created) != 3 {
// t.Errorf("expected 3 resource created, got %d", len(result.Created))
// }
// if err := c.Wait(resources, time.Second*30); err != nil {
// t.Errorf("expected wait without error, got %s", err)
// }
// if time.Since(*created) < time.Second*5 {
// t.Errorf("expected to wait at least 5 seconds before ready status was detected, but got %s", time.Since(*created))
// }
// }
Loading…
Cancel
Save