diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e809e7ca2..0b704aa9a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -11,9 +11,13 @@ vulnerability_, please email a report to [cncf-helm-security@lists.cncf.io](mailto:cncf-helm-security@lists.cncf.io). This will give us a chance to try to fix the issue before it is exploited in the wild. -## Helm v3 and v4 +## Helm v3 -Helm v4 is currently under development on the `main` branch. During the development of Helm v4 and for some time after its released, Helm v3 will continue to be supported and developed on the `dev-v3` branch. Helm v3 will continue to get bug fixes and updates for new Kubernetes releases. Helm v4 is where new features and major changes will happen. For features to be backported to Helm v3, an exception will be needed. Bugs should first be fixed on Helm v4 and then backported to Helm v3. +Helm v4 development takes place on the `main` branch while Helm v3 is on the `dev-v3` branch. + +Helm v3 will continue to receive bug fixes and updates for new Kubernetes releases until July 8th 2026. Security enhancement will still be applied until November 11th 2026. See the blog for more details. + +Bugs should first be fixed on Helm v4 and then backported to Helm v3. Helm v3 (and the `dev-v3` branch) is no longer accepting new features. ## Sign Your Work diff --git a/go.mod b/go.mod index 6e2d9c15d..4b0bb8977 100644 --- a/go.mod +++ b/go.mod @@ -48,7 +48,7 @@ require ( k8s.io/klog/v2 v2.130.1 k8s.io/kubectl v0.35.0 oras.land/oras-go/v2 v2.6.0 - sigs.k8s.io/controller-runtime v0.22.4 + sigs.k8s.io/controller-runtime v0.23.0 sigs.k8s.io/kustomize/kyaml v0.21.0 sigs.k8s.io/yaml v1.6.0 ) diff --git a/go.sum b/go.sum index b1e843f1b..6e010e4b2 100644 --- a/go.sum +++ b/go.sum @@ -502,8 +502,8 @@ k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzk k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc= oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o= -sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A= -sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8= +sigs.k8s.io/controller-runtime v0.23.0 h1:Ubi7klJWiwEWqDY+odSVZiFA0aDSevOCXpa38yCSYu8= +sigs.k8s.io/controller-runtime v0.23.0/go.mod h1:DBOIr9NsprUqCZ1ZhsuJ0wAnQSIxY/C6VjZbmLgw0j0= sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I= diff --git a/pkg/cmd/uninstall.go b/pkg/cmd/uninstall.go index 4cc14ae1e..49f7bd19d 100644 --- a/pkg/cmd/uninstall.go +++ b/pkg/cmd/uninstall.go @@ -35,6 +35,9 @@ as well as the release history, freeing it up for future use. Use the '--dry-run' flag to see which releases will be uninstalled without actually uninstalling them. + +Use '--cascade foreground' with '--wait' to ensure resources with finalizers +are fully deleted before the command returns. ` func newUninstallCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { @@ -76,7 +79,7 @@ func newUninstallCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { f.BoolVar(&client.DisableHooks, "no-hooks", false, "prevent hooks from running during uninstallation") f.BoolVar(&client.IgnoreNotFound, "ignore-not-found", false, `Treat "release not found" as a successful uninstall`) f.BoolVar(&client.KeepHistory, "keep-history", false, "remove all associated resources and mark the release as deleted, but retain the release history") - f.StringVar(&client.DeletionPropagation, "cascade", "background", "Must be \"background\", \"orphan\", or \"foreground\". Selects the deletion cascading strategy for the dependents. Defaults to background.") + f.StringVar(&client.DeletionPropagation, "cascade", "background", "Must be \"background\", \"orphan\", or \"foreground\". Selects the deletion cascading strategy for the dependents. Defaults to background. Use \"foreground\" with --wait to ensure resources with finalizers are fully deleted before returning.") f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)") f.StringVar(&client.Description, "description", "", "add a custom description") AddWaitFlag(cmd, &client.WaitStrategy) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 9af4bbcb3..fc706496d 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -166,12 +166,18 @@ func (c *Client) newStatusWatcher(opts ...WaitOption) (*statusWaiter, error) { if waitContext == nil { waitContext = c.WaitContext } - return &statusWaiter{ - restMapper: restMapper, - client: dynamicClient, - ctx: waitContext, - readers: o.statusReaders, - }, nil + sw := &statusWaiter{ + restMapper: restMapper, + client: dynamicClient, + ctx: waitContext, + watchUntilReadyCtx: o.watchUntilReadyCtx, + waitCtx: o.waitCtx, + waitWithJobsCtx: o.waitWithJobsCtx, + waitForDeleteCtx: o.waitForDeleteCtx, + readers: o.statusReaders, + } + sw.SetLogger(c.Logger().Handler()) + return sw, nil } func (c *Client) GetWaiter(ws WaitStrategy) (Waiter, error) { diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index e257c66d2..7e4cf046c 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -32,9 +32,11 @@ import ( "github.com/fluxcd/cli-utils/pkg/kstatus/polling/event" "github.com/fluxcd/cli-utils/pkg/kstatus/status" "github.com/fluxcd/cli-utils/pkg/object" + "github.com/fluxcd/cli-utils/pkg/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -44,8 +46,10 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" jsonserializer "k8s.io/apimachinery/pkg/runtime/serializer/json" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/resource" + dynamicfake "k8s.io/client-go/dynamic/fake" "k8s.io/client-go/kubernetes" k8sfake "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/scheme" @@ -1241,7 +1245,7 @@ func (c createPatchTestCase) run(t *testing.T) { } } -func newTestCustomResourceData(metadata map[string]string, spec map[string]interface{}) *unstructured.Unstructured { +func newTestCustomResourceData(metadata map[string]string, spec map[string]any) *unstructured.Unstructured { if metadata == nil { metadata = make(map[string]string) } @@ -1251,7 +1255,7 @@ func newTestCustomResourceData(metadata map[string]string, spec map[string]inter if _, ok := metadata["namespace"]; !ok { metadata["namespace"] = "default" } - o := map[string]interface{}{ + o := map[string]any{ "apiVersion": "crd.com/v1", "kind": "Data", "metadata": metadata, @@ -1274,7 +1278,7 @@ func TestCreatePatchCustomResourceMetadata(t *testing.T) { name: "take ownership of resource", target: target, original: target, - actual: newTestCustomResourceData(nil, map[string]interface{}{ + actual: newTestCustomResourceData(nil, map[string]any{ "color": "red", }), threeWayMergeForUnstructured: true, @@ -1290,7 +1294,7 @@ func TestCreatePatchCustomResourceMetadata(t *testing.T) { } func TestCreatePatchCustomResourceSpec(t *testing.T) { - target := newTestCustomResourceData(nil, map[string]interface{}{ + target := newTestCustomResourceData(nil, map[string]any{ "color": "red", "size": "large", }) @@ -1298,7 +1302,7 @@ func TestCreatePatchCustomResourceSpec(t *testing.T) { name: "merge with spec of existing custom resource", target: target, original: target, - actual: newTestCustomResourceData(nil, map[string]interface{}{ + actual: newTestCustomResourceData(nil, map[string]any{ "color": "red", "weight": "heavy", }), @@ -2275,9 +2279,19 @@ metadata: }, } - var err error - c.Waiter, err = c.GetWaiterWithOptions(StatusWatcherStrategy, WithKStatusReaders(statusReaders...)) - require.NoError(t, err) + // Create a fake dynamic client with the pod resource + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper(v1.SchemeGroupVersion.WithKind("Pod")) + + // Create the pod in the fake client + createManifest(t, podManifest, fakeMapper, fakeClient) + + // Set up the waiter with the fake client and custom status readers + c.Waiter = &statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + readers: statusReaders, + } resources, err := c.Build(strings.NewReader(podManifest), false) require.NoError(t, err) @@ -2307,9 +2321,19 @@ metadata: }, } - var err error - c.Waiter, err = c.GetWaiterWithOptions(StatusWatcherStrategy, WithKStatusReaders(statusReaders...)) - require.NoError(t, err) + // Create a fake dynamic client with the job resource + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper(batchv1.SchemeGroupVersion.WithKind("Job")) + + // Create the job in the fake client + createManifest(t, jobManifest, fakeMapper, fakeClient) + + // Set up the waiter with the fake client and custom status readers + c.Waiter = &statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + readers: statusReaders, + } resources, err := c.Build(strings.NewReader(jobManifest), false) require.NoError(t, err) @@ -2319,3 +2343,18 @@ metadata: err = c.WaitWithJobs(resources, time.Second*3) require.NoError(t, err) } + +func createManifest(t *testing.T, manifest string, + fakeMapper meta.RESTMapper, fakeClient *dynamicfake.FakeDynamicClient) { + t.Helper() + + m := make(map[string]any) + err := yaml.Unmarshal([]byte(manifest), &m) + require.NoError(t, err) + obj := &unstructured.Unstructured{Object: m} + gvk := obj.GroupVersionKind() + mapping, err := fakeMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + require.NoError(t, err) + err = fakeClient.Tracker().Create(mapping.Resource, obj, obj.GetNamespace()) + require.NoError(t, err) +} diff --git a/pkg/kube/options.go b/pkg/kube/options.go index 49c6229ba..3326c284b 100644 --- a/pkg/kube/options.go +++ b/pkg/kube/options.go @@ -26,12 +26,45 @@ import ( type WaitOption func(*waitOptions) // WithWaitContext sets the context for waiting on resources. +// If unset, context.Background() will be used. func WithWaitContext(ctx context.Context) WaitOption { return func(wo *waitOptions) { wo.ctx = ctx } } +// WithWatchUntilReadyMethodContext sets the context specifically for the WatchUntilReady method. +// If unset, the context set by `WithWaitContext` will be used (falling back to `context.Background()`). +func WithWatchUntilReadyMethodContext(ctx context.Context) WaitOption { + return func(wo *waitOptions) { + wo.watchUntilReadyCtx = ctx + } +} + +// WithWaitMethodContext sets the context specifically for the Wait method. +// If unset, the context set by `WithWaitContext` will be used (falling back to `context.Background()`). +func WithWaitMethodContext(ctx context.Context) WaitOption { + return func(wo *waitOptions) { + wo.waitCtx = ctx + } +} + +// WithWaitWithJobsMethodContext sets the context specifically for the WaitWithJobs method. +// If unset, the context set by `WithWaitContext` will be used (falling back to `context.Background()`). +func WithWaitWithJobsMethodContext(ctx context.Context) WaitOption { + return func(wo *waitOptions) { + wo.waitWithJobsCtx = ctx + } +} + +// WithWaitForDeleteMethodContext sets the context specifically for the WaitForDelete method. +// If unset, the context set by `WithWaitContext` will be used (falling back to `context.Background()`). +func WithWaitForDeleteMethodContext(ctx context.Context) WaitOption { + return func(wo *waitOptions) { + wo.waitForDeleteCtx = ctx + } +} + // WithKStatusReaders sets the status readers to be used while waiting on resources. func WithKStatusReaders(readers ...engine.StatusReader) WaitOption { return func(wo *waitOptions) { @@ -40,6 +73,10 @@ func WithKStatusReaders(readers ...engine.StatusReader) WaitOption { } type waitOptions struct { - ctx context.Context - statusReaders []engine.StatusReader + ctx context.Context + watchUntilReadyCtx context.Context + waitCtx context.Context + waitWithJobsCtx context.Context + waitForDeleteCtx context.Context + statusReaders []engine.StatusReader } diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index 75c18de81..01024afa6 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -38,14 +38,20 @@ import ( "k8s.io/client-go/dynamic" watchtools "k8s.io/client-go/tools/watch" + "helm.sh/helm/v4/internal/logging" helmStatusReaders "helm.sh/helm/v4/internal/statusreaders" ) type statusWaiter struct { - client dynamic.Interface - restMapper meta.RESTMapper - ctx context.Context - readers []engine.StatusReader + client dynamic.Interface + restMapper meta.RESTMapper + ctx context.Context + watchUntilReadyCtx context.Context + waitCtx context.Context + waitWithJobsCtx context.Context + waitForDeleteCtx context.Context + readers []engine.StatusReader + logging.LogHolder } // DefaultStatusWatcherTimeout is the timeout used by the status waiter when a @@ -66,9 +72,9 @@ func (w *statusWaiter) WatchUntilReady(resourceList ResourceList, timeout time.D if timeout == 0 { timeout = DefaultStatusWatcherTimeout } - ctx, cancel := w.contextWithTimeout(timeout) + ctx, cancel := w.contextWithTimeout(w.watchUntilReadyCtx, timeout) defer cancel() - slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) + w.Logger().Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) jobSR := helmStatusReaders.NewCustomJobStatusReader(w.restMapper) podSR := helmStatusReaders.NewCustomPodStatusReader(w.restMapper) @@ -88,9 +94,9 @@ func (w *statusWaiter) Wait(resourceList ResourceList, timeout time.Duration) er if timeout == 0 { timeout = DefaultStatusWatcherTimeout } - ctx, cancel := w.contextWithTimeout(timeout) + ctx, cancel := w.contextWithTimeout(w.waitCtx, timeout) defer cancel() - slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) + w.Logger().Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) sw.StatusReader = statusreaders.NewStatusReader(w.restMapper, w.readers...) return w.wait(ctx, resourceList, sw) @@ -100,9 +106,9 @@ func (w *statusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Dura if timeout == 0 { timeout = DefaultStatusWatcherTimeout } - ctx, cancel := w.contextWithTimeout(timeout) + ctx, cancel := w.contextWithTimeout(w.waitWithJobsCtx, timeout) defer cancel() - slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) + w.Logger().Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) newCustomJobStatusReader := helmStatusReaders.NewCustomJobStatusReader(w.restMapper) readers := append([]engine.StatusReader(nil), w.readers...) @@ -116,9 +122,9 @@ func (w *statusWaiter) WaitForDelete(resourceList ResourceList, timeout time.Dur if timeout == 0 { timeout = DefaultStatusWatcherTimeout } - ctx, cancel := w.contextWithTimeout(timeout) + ctx, cancel := w.contextWithTimeout(w.waitForDeleteCtx, timeout) defer cancel() - slog.Debug("waiting for resources to be deleted", "count", len(resourceList), "timeout", timeout) + w.Logger().Debug("waiting for resources to be deleted", "count", len(resourceList), "timeout", timeout) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) return w.waitForDelete(ctx, resourceList, sw) } @@ -138,24 +144,26 @@ func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceL RESTScopeStrategy: watcher.RESTScopeNamespace, }) statusCollector := collector.NewResourceStatusCollector(resources) - done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.NotFoundStatus)) + done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.NotFoundStatus, w.Logger())) <-done if statusCollector.Error != nil { return statusCollector.Error } - // Only check parent context error, otherwise we would error when desired status is achieved. - if ctx.Err() != nil { - errs := []error{} - for _, id := range resources { - rs := statusCollector.ResourceStatuses[id] - if rs.Status == status.NotFoundStatus { - continue - } - errs = append(errs, fmt.Errorf("resource still exists, name: %s, kind: %s, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) + errs := []error{} + for _, id := range resources { + rs := statusCollector.ResourceStatuses[id] + if rs.Status == status.NotFoundStatus || rs.Status == status.UnknownStatus { + continue } - errs = append(errs, ctx.Err()) + errs = append(errs, fmt.Errorf("resource %s/%s/%s still exists. status: %s, message: %s", + rs.Identifier.GroupKind.Kind, rs.Identifier.Namespace, rs.Identifier.Name, rs.Status, rs.Message)) + } + if err := ctx.Err(); err != nil { + errs = append(errs, err) + } + if len(errs) > 0 { return errors.Join(errs...) } return nil @@ -183,31 +191,36 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, sw w RESTScopeStrategy: watcher.RESTScopeNamespace, }) statusCollector := collector.NewResourceStatusCollector(resources) - done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.CurrentStatus)) + done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.CurrentStatus, w.Logger())) <-done if statusCollector.Error != nil { return statusCollector.Error } - // Only check parent context error, otherwise we would error when desired status is achieved. - if ctx.Err() != nil { - errs := []error{} - for _, id := range resources { - rs := statusCollector.ResourceStatuses[id] - if rs.Status == status.CurrentStatus { - continue - } - errs = append(errs, fmt.Errorf("resource not ready, name: %s, kind: %s, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) + errs := []error{} + for _, id := range resources { + rs := statusCollector.ResourceStatuses[id] + if rs.Status == status.CurrentStatus { + continue } - errs = append(errs, ctx.Err()) + errs = append(errs, fmt.Errorf("resource %s/%s/%s not ready. status: %s, message: %s", + rs.Identifier.GroupKind.Kind, rs.Identifier.Namespace, rs.Identifier.Name, rs.Status, rs.Message)) + } + if err := ctx.Err(); err != nil { + errs = append(errs, err) + } + if len(errs) > 0 { return errors.Join(errs...) } return nil } -func (w *statusWaiter) contextWithTimeout(timeout time.Duration) (context.Context, context.CancelFunc) { - return contextWithTimeout(w.ctx, timeout) +func (w *statusWaiter) contextWithTimeout(methodCtx context.Context, timeout time.Duration) (context.Context, context.CancelFunc) { + if methodCtx == nil { + methodCtx = w.ctx + } + return contextWithTimeout(methodCtx, timeout) } func contextWithTimeout(ctx context.Context, timeout time.Duration) (context.Context, context.CancelFunc) { @@ -217,7 +230,7 @@ func contextWithTimeout(ctx context.Context, timeout time.Duration) (context.Con return watchtools.ContextWithOptionalTimeout(ctx, timeout) } -func statusObserver(cancel context.CancelFunc, desired status.Status) collector.ObserverFunc { +func statusObserver(cancel context.CancelFunc, desired status.Status, logger *slog.Logger) collector.ObserverFunc { return func(statusCollector *collector.ResourceStatusCollector, _ event.Event) { var rss []*event.ResourceStatus var nonDesiredResources []*event.ResourceStatus @@ -225,11 +238,16 @@ func statusObserver(cancel context.CancelFunc, desired status.Status) collector. if rs == nil { continue } - // If a resource is already deleted before waiting has started, it will show as unknown - // this check ensures we don't wait forever for a resource that is already deleted + // If a resource is already deleted before waiting has started, it will show as unknown. + // This check ensures we don't wait forever for a resource that is already deleted. if rs.Status == status.UnknownStatus && desired == status.NotFoundStatus { continue } + // Failed is a terminal state. This check ensures we don't wait forever for a resource + // that has already failed, as intervention is required to resolve the failure. + if rs.Status == status.FailedStatus && desired == status.CurrentStatus { + continue + } rss = append(rss, rs) if rs.Status != desired { nonDesiredResources = append(nonDesiredResources, rs) @@ -237,7 +255,7 @@ func statusObserver(cancel context.CancelFunc, desired status.Status) collector. } if aggregator.AggregateStatus(rss, desired) == desired { - slog.Debug("all resources achieved desired status", "desiredStatus", desired, "resourceCount", len(rss)) + logger.Debug("all resources achieved desired status", "desiredStatus", desired, "resourceCount", len(rss)) cancel() return } @@ -248,7 +266,7 @@ func statusObserver(cancel context.CancelFunc, desired status.Status) collector. return nonDesiredResources[i].Identifier.Name < nonDesiredResources[j].Identifier.Name }) first := nonDesiredResources[0] - slog.Debug("waiting for resource", "namespace", first.Identifier.Namespace, "name", first.Identifier.Name, "kind", first.Identifier.GroupKind.Kind, "expectedStatus", desired, "actualStatus", first.Status) + logger.Debug("waiting for resource", "namespace", first.Identifier.Namespace, "name", first.Identifier.Name, "kind", first.Identifier.GroupKind.Kind, "expectedStatus", desired, "actualStatus", first.Status) } } } diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index a4e7ff62c..d2dd57872 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "log/slog" "strings" "sync/atomic" "testing" @@ -101,10 +102,27 @@ status: succeeded: 1 active: 0 conditions: - - type: Complete + - type: Complete status: "True" ` +var jobFailedManifest = ` +apiVersion: batch/v1 +kind: Job +metadata: + name: failed-job + namespace: default + generation: 1 +status: + failed: 1 + active: 0 + conditions: + - type: Failed + status: "True" + reason: BackoffLimitExceeded + message: "Job has reached the specified backoff limit" +` + var podCompleteManifest = ` apiVersion: v1 kind: Pod @@ -279,7 +297,7 @@ func TestStatusWaitForDelete(t *testing.T) { name string manifestsToCreate []string manifestsToDelete []string - expectErrs []error + expectErrs []string }{ { name: "wait for pod to be deleted", @@ -291,7 +309,7 @@ func TestStatusWaitForDelete(t *testing.T) { name: "error when not all objects are deleted", manifestsToCreate: []string{jobCompleteManifest, podCurrentManifest}, manifestsToDelete: []string{jobCompleteManifest}, - expectErrs: []error{errors.New("resource still exists, name: current-pod, kind: Pod, status: Current"), errors.New("context deadline exceeded")}, + expectErrs: []string{"resource Pod/ns/current-pod still exists. status: Current", "context deadline exceeded"}, }, } for _, tt := range tests { @@ -309,6 +327,7 @@ func TestStatusWaitForDelete(t *testing.T) { restMapper: fakeMapper, client: fakeClient, } + statusWaiter.SetLogger(slog.Default().Handler()) objsToCreate := getRuntimeObjFromManifests(t, tt.manifestsToCreate) for _, objToCreate := range objsToCreate { u := objToCreate.(*unstructured.Unstructured) @@ -329,7 +348,10 @@ func TestStatusWaitForDelete(t *testing.T) { resourceList := getResourceListFromRuntimeObjs(t, c, objsToCreate) err := statusWaiter.WaitForDelete(resourceList, timeout) if tt.expectErrs != nil { - assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + require.Error(t, err) + for _, expectedErrStr := range tt.expectErrs { + assert.Contains(t, err.Error(), expectedErrStr) + } return } assert.NoError(t, err) @@ -349,6 +371,7 @@ func TestStatusWaitForDeleteNonExistentObject(t *testing.T) { restMapper: fakeMapper, client: fakeClient, } + statusWaiter.SetLogger(slog.Default().Handler()) // Don't create the object to test that the wait for delete works when the object doesn't exist objManifest := getRuntimeObjFromManifests(t, []string{podCurrentManifest}) resourceList := getResourceListFromRuntimeObjs(t, c, objManifest) @@ -359,37 +382,35 @@ func TestStatusWaitForDeleteNonExistentObject(t *testing.T) { func TestStatusWait(t *testing.T) { t.Parallel() tests := []struct { - name string - objManifests []string - expectErrs []error - waitForJobs bool + name string + objManifests []string + expectErrStrs []string + waitForJobs bool }{ { - name: "Job is not complete", - objManifests: []string{jobNoStatusManifest}, - expectErrs: []error{errors.New("resource not ready, name: test, kind: Job, status: InProgress"), errors.New("context deadline exceeded")}, - waitForJobs: true, + name: "Job is not complete", + objManifests: []string{jobNoStatusManifest}, + expectErrStrs: []string{"resource Job/qual/test not ready. status: InProgress", "context deadline exceeded"}, + waitForJobs: true, }, { - name: "Job is ready but not complete", - objManifests: []string{jobReadyManifest}, - expectErrs: nil, - waitForJobs: false, + name: "Job is ready but not complete", + objManifests: []string{jobReadyManifest}, + expectErrStrs: nil, + waitForJobs: false, }, { name: "Pod is ready", objManifests: []string{podCurrentManifest}, - expectErrs: nil, }, { - name: "one of the pods never becomes ready", - objManifests: []string{podNoStatusManifest, podCurrentManifest}, - expectErrs: []error{errors.New("resource not ready, name: in-progress-pod, kind: Pod, status: InProgress"), errors.New("context deadline exceeded")}, + name: "one of the pods never becomes ready", + objManifests: []string{podNoStatusManifest, podCurrentManifest}, + expectErrStrs: []string{"resource Pod/ns/in-progress-pod not ready. status: InProgress", "context deadline exceeded"}, }, { name: "paused deployment passes", objManifests: []string{pausedDeploymentManifest}, - expectErrs: nil, }, } @@ -407,6 +428,7 @@ func TestStatusWait(t *testing.T) { client: fakeClient, restMapper: fakeMapper, } + statusWaiter.SetLogger(slog.Default().Handler()) objs := getRuntimeObjFromManifests(t, tt.objManifests) for _, obj := range objs { u := obj.(*unstructured.Unstructured) @@ -416,8 +438,11 @@ func TestStatusWait(t *testing.T) { } resourceList := getResourceListFromRuntimeObjs(t, c, objs) err := statusWaiter.Wait(resourceList, time.Second*3) - if tt.expectErrs != nil { - assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + if tt.expectErrStrs != nil { + require.Error(t, err) + for _, expectedErrStr := range tt.expectErrStrs { + assert.Contains(t, err.Error(), expectedErrStr) + } return } assert.NoError(t, err) @@ -428,23 +453,23 @@ func TestStatusWait(t *testing.T) { func TestWaitForJobComplete(t *testing.T) { t.Parallel() tests := []struct { - name string - objManifests []string - expectErrs []error + name string + objManifests []string + expectErrStrs []string }{ { name: "Job is complete", objManifests: []string{jobCompleteManifest}, }, { - name: "Job is not ready", - objManifests: []string{jobNoStatusManifest}, - expectErrs: []error{errors.New("resource not ready, name: test, kind: Job, status: InProgress"), errors.New("context deadline exceeded")}, + name: "Job is not ready", + objManifests: []string{jobNoStatusManifest}, + expectErrStrs: []string{"resource Job/qual/test not ready. status: InProgress", "context deadline exceeded"}, }, { - name: "Job is ready but not complete", - objManifests: []string{jobReadyManifest}, - expectErrs: []error{errors.New("resource not ready, name: ready-not-complete, kind: Job, status: InProgress"), errors.New("context deadline exceeded")}, + name: "Job is ready but not complete", + objManifests: []string{jobReadyManifest}, + expectErrStrs: []string{"resource Job/default/ready-not-complete not ready. status: InProgress", "context deadline exceeded"}, }, } @@ -460,6 +485,7 @@ func TestWaitForJobComplete(t *testing.T) { client: fakeClient, restMapper: fakeMapper, } + statusWaiter.SetLogger(slog.Default().Handler()) objs := getRuntimeObjFromManifests(t, tt.objManifests) for _, obj := range objs { u := obj.(*unstructured.Unstructured) @@ -469,8 +495,11 @@ func TestWaitForJobComplete(t *testing.T) { } resourceList := getResourceListFromRuntimeObjs(t, c, objs) err := statusWaiter.WaitWithJobs(resourceList, time.Second*3) - if tt.expectErrs != nil { - assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + if tt.expectErrStrs != nil { + require.Error(t, err) + for _, expectedErrStr := range tt.expectErrStrs { + assert.Contains(t, err.Error(), expectedErrStr) + } return } assert.NoError(t, err) @@ -481,9 +510,9 @@ func TestWaitForJobComplete(t *testing.T) { func TestWatchForReady(t *testing.T) { t.Parallel() tests := []struct { - name string - objManifests []string - expectErrs []error + name string + objManifests []string + expectErrStrs []string }{ { name: "succeeds if pod and job are complete", @@ -494,14 +523,14 @@ func TestWatchForReady(t *testing.T) { objManifests: []string{notReadyDeploymentManifest}, }, { - name: "Fails if job is not complete", - objManifests: []string{jobReadyManifest}, - expectErrs: []error{errors.New("resource not ready, name: ready-not-complete, kind: Job, status: InProgress"), errors.New("context deadline exceeded")}, + name: "Fails if job is not complete", + objManifests: []string{jobReadyManifest}, + expectErrStrs: []string{"resource Job/default/ready-not-complete not ready. status: InProgress", "context deadline exceeded"}, }, { - name: "Fails if pod is not complete", - objManifests: []string{podCurrentManifest}, - expectErrs: []error{errors.New("resource not ready, name: current-pod, kind: Pod, status: InProgress"), errors.New("context deadline exceeded")}, + name: "Fails if pod is not complete", + objManifests: []string{podCurrentManifest}, + expectErrStrs: []string{"resource Pod/ns/current-pod not ready. status: InProgress", "context deadline exceeded"}, }, } @@ -519,6 +548,7 @@ func TestWatchForReady(t *testing.T) { client: fakeClient, restMapper: fakeMapper, } + statusWaiter.SetLogger(slog.Default().Handler()) objs := getRuntimeObjFromManifests(t, tt.objManifests) for _, obj := range objs { u := obj.(*unstructured.Unstructured) @@ -528,8 +558,11 @@ func TestWatchForReady(t *testing.T) { } resourceList := getResourceListFromRuntimeObjs(t, c, objs) err := statusWaiter.WatchUntilReady(resourceList, time.Second*3) - if tt.expectErrs != nil { - assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + if tt.expectErrStrs != nil { + require.Error(t, err) + for _, expectedErrStr := range tt.expectErrStrs { + assert.Contains(t, err.Error(), expectedErrStr) + } return } assert.NoError(t, err) @@ -540,65 +573,65 @@ func TestWatchForReady(t *testing.T) { func TestStatusWaitMultipleNamespaces(t *testing.T) { t.Parallel() tests := []struct { - name string - objManifests []string - expectErrs []error - testFunc func(statusWaiter, ResourceList, time.Duration) error + name string + objManifests []string + expectErrStrs []string + testFunc func(*statusWaiter, ResourceList, time.Duration) error }{ { name: "pods in multiple namespaces", objManifests: []string{podNamespace1Manifest, podNamespace2Manifest}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.Wait(rl, timeout) }, }, { name: "hooks in multiple namespaces", objManifests: []string{jobNamespace1CompleteManifest, podNamespace2SucceededManifest}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.WatchUntilReady(rl, timeout) }, }, { - name: "error when resource not ready in one namespace", - objManifests: []string{podNamespace1NoStatusManifest, podNamespace2Manifest}, - expectErrs: []error{errors.New("resource not ready, name: pod-ns1, kind: Pod, status: InProgress"), errors.New("context deadline exceeded")}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + name: "error when resource not ready in one namespace", + objManifests: []string{podNamespace1NoStatusManifest, podNamespace2Manifest}, + expectErrStrs: []string{"resource Pod/namespace-1/pod-ns1 not ready. status: InProgress", "context deadline exceeded"}, + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.Wait(rl, timeout) }, }, { name: "delete resources in multiple namespaces", objManifests: []string{podNamespace1Manifest, podNamespace2Manifest}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.WaitForDelete(rl, timeout) }, }, { name: "cluster-scoped resources work correctly with unrestricted permissions", objManifests: []string{podNamespace1Manifest, clusterRoleManifest}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.Wait(rl, timeout) }, }, { name: "namespace-scoped and cluster-scoped resources work together", objManifests: []string{podNamespace1Manifest, podNamespace2Manifest, clusterRoleManifest}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.Wait(rl, timeout) }, }, { name: "delete cluster-scoped resources works correctly", objManifests: []string{podNamespace1Manifest, namespaceManifest}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.WaitForDelete(rl, timeout) }, }, { name: "watch cluster-scoped resources works correctly", objManifests: []string{clusterRoleManifest}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.WatchUntilReady(rl, timeout) }, }, @@ -619,6 +652,7 @@ func TestStatusWaitMultipleNamespaces(t *testing.T) { client: fakeClient, restMapper: fakeMapper, } + sw.SetLogger(slog.Default().Handler()) objs := getRuntimeObjFromManifests(t, tt.objManifests) for _, obj := range objs { u := obj.(*unstructured.Unstructured) @@ -641,9 +675,12 @@ func TestStatusWaitMultipleNamespaces(t *testing.T) { } resourceList := getResourceListFromRuntimeObjs(t, c, objs) - err := tt.testFunc(sw, resourceList, time.Second*3) - if tt.expectErrs != nil { - assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + err := tt.testFunc(&sw, resourceList, time.Second*3) + if tt.expectErrStrs != nil { + require.Error(t, err) + for _, expectedErrStr := range tt.expectErrStrs { + assert.Contains(t, err.Error(), expectedErrStr) + } return } assert.NoError(t, err) @@ -726,13 +763,13 @@ func TestStatusWaitRestrictedRBAC(t *testing.T) { objManifests []string allowedNamespaces []string expectErrs []error - testFunc func(statusWaiter, ResourceList, time.Duration) error + testFunc func(*statusWaiter, ResourceList, time.Duration) error }{ { name: "pods in multiple namespaces with namespace permissions", objManifests: []string{podNamespace1Manifest, podNamespace2Manifest}, allowedNamespaces: []string{"namespace-1", "namespace-2"}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.Wait(rl, timeout) }, }, @@ -740,7 +777,7 @@ func TestStatusWaitRestrictedRBAC(t *testing.T) { name: "delete pods in multiple namespaces with namespace permissions", objManifests: []string{podNamespace1Manifest, podNamespace2Manifest}, allowedNamespaces: []string{"namespace-1", "namespace-2"}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.WaitForDelete(rl, timeout) }, }, @@ -748,7 +785,7 @@ func TestStatusWaitRestrictedRBAC(t *testing.T) { name: "hooks in multiple namespaces with namespace permissions", objManifests: []string{jobNamespace1CompleteManifest, podNamespace2SucceededManifest}, allowedNamespaces: []string{"namespace-1", "namespace-2"}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.WatchUntilReady(rl, timeout) }, }, @@ -757,7 +794,7 @@ func TestStatusWaitRestrictedRBAC(t *testing.T) { objManifests: []string{podNamespace1Manifest, clusterRoleManifest}, allowedNamespaces: []string{"namespace-1"}, expectErrs: []error{fmt.Errorf("user does not have cluster-wide LIST permissions for cluster-scoped resources")}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.Wait(rl, timeout) }, }, @@ -766,7 +803,7 @@ func TestStatusWaitRestrictedRBAC(t *testing.T) { objManifests: []string{podNamespace1Manifest, namespaceManifest}, allowedNamespaces: []string{"namespace-1"}, expectErrs: []error{fmt.Errorf("user does not have cluster-wide LIST permissions for cluster-scoped resources")}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.WaitForDelete(rl, timeout) }, }, @@ -775,7 +812,7 @@ func TestStatusWaitRestrictedRBAC(t *testing.T) { objManifests: []string{podNamespace1Manifest, podNamespace2Manifest}, allowedNamespaces: []string{"namespace-1"}, expectErrs: []error{fmt.Errorf("user does not have LIST permissions in namespace %q", "namespace-2")}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.Wait(rl, timeout) }, }, @@ -797,6 +834,7 @@ func TestStatusWaitRestrictedRBAC(t *testing.T) { client: baseFakeClient, restMapper: fakeMapper, } + sw.SetLogger(slog.Default().Handler()) objs := getRuntimeObjFromManifests(t, tt.objManifests) for _, obj := range objs { u := obj.(*unstructured.Unstructured) @@ -819,7 +857,7 @@ func TestStatusWaitRestrictedRBAC(t *testing.T) { } resourceList := getResourceListFromRuntimeObjs(t, c, objs) - err := tt.testFunc(sw, resourceList, time.Second*3) + err := tt.testFunc(&sw, resourceList, time.Second*3) if tt.expectErrs != nil { require.Error(t, err) for _, expectedErr := range tt.expectErrs { @@ -840,13 +878,13 @@ func TestStatusWaitMixedResources(t *testing.T) { objManifests []string allowedNamespaces []string expectErrs []error - testFunc func(statusWaiter, ResourceList, time.Duration) error + testFunc func(*statusWaiter, ResourceList, time.Duration) error }{ { name: "wait succeeds with namespace-scoped resources only", objManifests: []string{podNamespace1Manifest, podNamespace2Manifest}, allowedNamespaces: []string{"namespace-1", "namespace-2"}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.Wait(rl, timeout) }, }, @@ -855,7 +893,7 @@ func TestStatusWaitMixedResources(t *testing.T) { objManifests: []string{podNamespace1Manifest, clusterRoleManifest}, allowedNamespaces: []string{"namespace-1"}, expectErrs: []error{fmt.Errorf("user does not have cluster-wide LIST permissions for cluster-scoped resources")}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.Wait(rl, timeout) }, }, @@ -864,7 +902,7 @@ func TestStatusWaitMixedResources(t *testing.T) { objManifests: []string{podNamespace1Manifest, clusterRoleManifest}, allowedNamespaces: []string{"namespace-1"}, expectErrs: []error{fmt.Errorf("user does not have cluster-wide LIST permissions for cluster-scoped resources")}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.WaitForDelete(rl, timeout) }, }, @@ -873,7 +911,7 @@ func TestStatusWaitMixedResources(t *testing.T) { objManifests: []string{podNamespace1Manifest, namespaceManifest}, allowedNamespaces: []string{"namespace-1"}, expectErrs: []error{fmt.Errorf("user does not have cluster-wide LIST permissions for cluster-scoped resources")}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.Wait(rl, timeout) }, }, @@ -882,7 +920,7 @@ func TestStatusWaitMixedResources(t *testing.T) { objManifests: []string{podNamespace1Manifest, podNamespace2Manifest}, allowedNamespaces: []string{"namespace-1"}, expectErrs: []error{fmt.Errorf("user does not have LIST permissions in namespace %q", "namespace-2")}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.Wait(rl, timeout) }, }, @@ -904,6 +942,7 @@ func TestStatusWaitMixedResources(t *testing.T) { client: baseFakeClient, restMapper: fakeMapper, } + sw.SetLogger(slog.Default().Handler()) objs := getRuntimeObjFromManifests(t, tt.objManifests) for _, obj := range objs { u := obj.(*unstructured.Unstructured) @@ -926,7 +965,7 @@ func TestStatusWaitMixedResources(t *testing.T) { } resourceList := getResourceListFromRuntimeObjs(t, c, objs) - err := tt.testFunc(sw, resourceList, time.Second*3) + err := tt.testFunc(&sw, resourceList, time.Second*3) if tt.expectErrs != nil { require.Error(t, err) for _, expectedErr := range tt.expectErrs { @@ -978,10 +1017,10 @@ func (m *mockStatusReader) ReadStatusForObject(_ context.Context, _ engine.Clust func TestStatusWaitWithCustomReaders(t *testing.T) { t.Parallel() tests := []struct { - name string - objManifests []string - customReader *mockStatusReader - expectErrs []error + name string + objManifests []string + customReader *mockStatusReader + expectErrStrs []string }{ { name: "custom reader makes pod immediately current", @@ -990,7 +1029,6 @@ func TestStatusWaitWithCustomReaders(t *testing.T) { supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), status: status.CurrentStatus, }, - expectErrs: nil, }, { name: "custom reader returns in-progress status", @@ -999,7 +1037,7 @@ func TestStatusWaitWithCustomReaders(t *testing.T) { supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), status: status.InProgressStatus, }, - expectErrs: []error{errors.New("resource not ready, name: current-pod, kind: Pod, status: InProgress"), errors.New("context deadline exceeded")}, + expectErrStrs: []string{"resource Pod/ns/current-pod not ready. status: InProgress", "context deadline exceeded"}, }, { name: "custom reader for different resource type is not used", @@ -1008,7 +1046,6 @@ func TestStatusWaitWithCustomReaders(t *testing.T) { supportedGK: batchv1.SchemeGroupVersion.WithKind("Job").GroupKind(), status: status.InProgressStatus, }, - expectErrs: nil, }, } @@ -1035,8 +1072,11 @@ func TestStatusWaitWithCustomReaders(t *testing.T) { } resourceList := getResourceListFromRuntimeObjs(t, c, objs) err := statusWaiter.Wait(resourceList, time.Second*3) - if tt.expectErrs != nil { - assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + if tt.expectErrStrs != nil { + require.Error(t, err) + for _, expectedErrStr := range tt.expectErrStrs { + assert.Contains(t, err.Error(), expectedErrStr) + } return } assert.NoError(t, err) @@ -1113,13 +1153,584 @@ func TestStatusWaitWithJobsAndCustomReaders(t *testing.T) { } } +func TestStatusWaitWithFailedResources(t *testing.T) { + t.Parallel() + tests := []struct { + name string + objManifests []string + customReader *mockStatusReader + expectErrStrs []string + testFunc func(*statusWaiter, ResourceList, time.Duration) error + }{ + { + name: "Wait returns error when resource has failed", + objManifests: []string{podNoStatusManifest}, + customReader: &mockStatusReader{ + supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), + status: status.FailedStatus, + }, + expectErrStrs: []string{"resource Pod/ns/in-progress-pod not ready. status: Failed, message: mock status reader"}, + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { + return sw.Wait(rl, timeout) + }, + }, + { + name: "WaitWithJobs returns error when job has failed", + objManifests: []string{jobFailedManifest}, + customReader: nil, // Use the built-in job status reader + expectErrStrs: []string{ + "resource Job/default/failed-job not ready. status: Failed", + }, + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { + return sw.WaitWithJobs(rl, timeout) + }, + }, + { + name: "Wait returns errors when multiple resources fail", + objManifests: []string{podNoStatusManifest, podCurrentManifest}, + customReader: &mockStatusReader{ + supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), + status: status.FailedStatus, + }, + // The mock reader will make both pods return FailedStatus + expectErrStrs: []string{ + "resource Pod/ns/in-progress-pod not ready. status: Failed, message: mock status reader", + "resource Pod/ns/current-pod not ready. status: Failed, message: mock status reader", + }, + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { + return sw.Wait(rl, timeout) + }, + }, + { + name: "WatchUntilReady returns error when resource has failed", + objManifests: []string{podNoStatusManifest}, + customReader: &mockStatusReader{ + supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), + status: status.FailedStatus, + }, + // WatchUntilReady also waits for CurrentStatus, so failed resources should return error + expectErrStrs: []string{"resource Pod/ns/in-progress-pod not ready. status: Failed, message: mock status reader"}, + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { + return sw.WatchUntilReady(rl, timeout) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + batchv1.SchemeGroupVersion.WithKind("Job"), + ) + var readers []engine.StatusReader + if tt.customReader != nil { + readers = []engine.StatusReader{tt.customReader} + } + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + readers: readers, + } + objs := getRuntimeObjFromManifests(t, tt.objManifests) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + assert.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + err := tt.testFunc(&sw, resourceList, time.Second*3) + if tt.expectErrStrs != nil { + require.Error(t, err) + for _, expectedErrStr := range tt.expectErrStrs { + assert.Contains(t, err.Error(), expectedErrStr) + } + return + } + assert.NoError(t, err) + }) + } +} + +func TestWaitOptionFunctions(t *testing.T) { + t.Parallel() + + t.Run("WithWatchUntilReadyMethodContext sets watchUntilReadyCtx", func(t *testing.T) { + t.Parallel() + type contextKey struct{} + ctx := context.WithValue(context.Background(), contextKey{}, "test") + opts := &waitOptions{} + WithWatchUntilReadyMethodContext(ctx)(opts) + assert.Equal(t, ctx, opts.watchUntilReadyCtx) + }) + + t.Run("WithWaitMethodContext sets waitCtx", func(t *testing.T) { + t.Parallel() + type contextKey struct{} + ctx := context.WithValue(context.Background(), contextKey{}, "test") + opts := &waitOptions{} + WithWaitMethodContext(ctx)(opts) + assert.Equal(t, ctx, opts.waitCtx) + }) + + t.Run("WithWaitWithJobsMethodContext sets waitWithJobsCtx", func(t *testing.T) { + t.Parallel() + type contextKey struct{} + ctx := context.WithValue(context.Background(), contextKey{}, "test") + opts := &waitOptions{} + WithWaitWithJobsMethodContext(ctx)(opts) + assert.Equal(t, ctx, opts.waitWithJobsCtx) + }) + + t.Run("WithWaitForDeleteMethodContext sets waitForDeleteCtx", func(t *testing.T) { + t.Parallel() + type contextKey struct{} + ctx := context.WithValue(context.Background(), contextKey{}, "test") + opts := &waitOptions{} + WithWaitForDeleteMethodContext(ctx)(opts) + assert.Equal(t, ctx, opts.waitForDeleteCtx) + }) +} + +func TestMethodSpecificContextCancellation(t *testing.T) { + t.Parallel() + + t.Run("WatchUntilReady uses method-specific context", func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + ) + + // Create a cancelled method-specific context + methodCtx, methodCancel := context.WithCancel(context.Background()) + methodCancel() // Cancel immediately + + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + ctx: context.Background(), // General context is not cancelled + watchUntilReadyCtx: methodCtx, // Method context is cancelled + } + + objs := getRuntimeObjFromManifests(t, []string{podCompleteManifest}) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + require.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + + err := sw.WatchUntilReady(resourceList, time.Second*3) + // Should fail due to cancelled method context + require.Error(t, err) + assert.Contains(t, err.Error(), "context canceled") + }) + + t.Run("Wait uses method-specific context", func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + ) + + // Create a cancelled method-specific context + methodCtx, methodCancel := context.WithCancel(context.Background()) + methodCancel() // Cancel immediately + + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + ctx: context.Background(), // General context is not cancelled + waitCtx: methodCtx, // Method context is cancelled + } + + objs := getRuntimeObjFromManifests(t, []string{podCurrentManifest}) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + require.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + + err := sw.Wait(resourceList, time.Second*3) + // Should fail due to cancelled method context + require.Error(t, err) + assert.Contains(t, err.Error(), "context canceled") + }) + + t.Run("WaitWithJobs uses method-specific context", func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + batchv1.SchemeGroupVersion.WithKind("Job"), + ) + + // Create a cancelled method-specific context + methodCtx, methodCancel := context.WithCancel(context.Background()) + methodCancel() // Cancel immediately + + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + ctx: context.Background(), // General context is not cancelled + waitWithJobsCtx: methodCtx, // Method context is cancelled + } + + objs := getRuntimeObjFromManifests(t, []string{jobCompleteManifest}) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + require.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + + err := sw.WaitWithJobs(resourceList, time.Second*3) + // Should fail due to cancelled method context + require.Error(t, err) + assert.Contains(t, err.Error(), "context canceled") + }) + + t.Run("WaitForDelete uses method-specific context", func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + ) + + // Create a cancelled method-specific context + methodCtx, methodCancel := context.WithCancel(context.Background()) + methodCancel() // Cancel immediately + + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + ctx: context.Background(), // General context is not cancelled + waitForDeleteCtx: methodCtx, // Method context is cancelled + } + + objs := getRuntimeObjFromManifests(t, []string{podCurrentManifest}) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + require.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + + err := sw.WaitForDelete(resourceList, time.Second*3) + // Should fail due to cancelled method context + require.Error(t, err) + assert.Contains(t, err.Error(), "context canceled") + }) +} + +func TestMethodContextFallbackToGeneralContext(t *testing.T) { + t.Parallel() + + t.Run("WatchUntilReady falls back to general context when method context is nil", func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + ) + + // Create a cancelled general context + generalCtx, generalCancel := context.WithCancel(context.Background()) + generalCancel() // Cancel immediately + + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + ctx: generalCtx, // General context is cancelled + watchUntilReadyCtx: nil, // Method context is nil, should fall back + } + + objs := getRuntimeObjFromManifests(t, []string{podCompleteManifest}) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + require.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + + err := sw.WatchUntilReady(resourceList, time.Second*3) + // Should fail due to cancelled general context + require.Error(t, err) + assert.Contains(t, err.Error(), "context canceled") + }) + + t.Run("Wait falls back to general context when method context is nil", func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + ) + + // Create a cancelled general context + generalCtx, generalCancel := context.WithCancel(context.Background()) + generalCancel() // Cancel immediately + + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + ctx: generalCtx, // General context is cancelled + waitCtx: nil, // Method context is nil, should fall back + } + + objs := getRuntimeObjFromManifests(t, []string{podCurrentManifest}) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + require.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + + err := sw.Wait(resourceList, time.Second*3) + // Should fail due to cancelled general context + require.Error(t, err) + assert.Contains(t, err.Error(), "context canceled") + }) + + t.Run("WaitWithJobs falls back to general context when method context is nil", func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + batchv1.SchemeGroupVersion.WithKind("Job"), + ) + + // Create a cancelled general context + generalCtx, generalCancel := context.WithCancel(context.Background()) + generalCancel() // Cancel immediately + + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + ctx: generalCtx, // General context is cancelled + waitWithJobsCtx: nil, // Method context is nil, should fall back + } + + objs := getRuntimeObjFromManifests(t, []string{jobCompleteManifest}) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + require.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + + err := sw.WaitWithJobs(resourceList, time.Second*3) + // Should fail due to cancelled general context + require.Error(t, err) + assert.Contains(t, err.Error(), "context canceled") + }) + + t.Run("WaitForDelete falls back to general context when method context is nil", func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + ) + + // Create a cancelled general context + generalCtx, generalCancel := context.WithCancel(context.Background()) + generalCancel() // Cancel immediately + + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + ctx: generalCtx, // General context is cancelled + waitForDeleteCtx: nil, // Method context is nil, should fall back + } + + objs := getRuntimeObjFromManifests(t, []string{podCurrentManifest}) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + require.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + + err := sw.WaitForDelete(resourceList, time.Second*3) + // Should fail due to cancelled general context + require.Error(t, err) + assert.Contains(t, err.Error(), "context canceled") + }) +} + +func TestMethodContextOverridesGeneralContext(t *testing.T) { + t.Parallel() + + t.Run("method-specific context overrides general context for WatchUntilReady", func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + ) + + // General context is cancelled, but method context is not + generalCtx, generalCancel := context.WithCancel(context.Background()) + generalCancel() + + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + ctx: generalCtx, // Cancelled + watchUntilReadyCtx: context.Background(), // Not cancelled - should be used + } + + objs := getRuntimeObjFromManifests(t, []string{podCompleteManifest}) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + require.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + + err := sw.WatchUntilReady(resourceList, time.Second*3) + // Should succeed because method context is used and it's not cancelled + assert.NoError(t, err) + }) + + t.Run("method-specific context overrides general context for Wait", func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + ) + + // General context is cancelled, but method context is not + generalCtx, generalCancel := context.WithCancel(context.Background()) + generalCancel() + + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + ctx: generalCtx, // Cancelled + waitCtx: context.Background(), // Not cancelled - should be used + } + + objs := getRuntimeObjFromManifests(t, []string{podCurrentManifest}) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + require.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + + err := sw.Wait(resourceList, time.Second*3) + // Should succeed because method context is used and it's not cancelled + assert.NoError(t, err) + }) + + t.Run("method-specific context overrides general context for WaitWithJobs", func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + batchv1.SchemeGroupVersion.WithKind("Job"), + ) + + // General context is cancelled, but method context is not + generalCtx, generalCancel := context.WithCancel(context.Background()) + generalCancel() + + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + ctx: generalCtx, // Cancelled + waitWithJobsCtx: context.Background(), // Not cancelled - should be used + } + + objs := getRuntimeObjFromManifests(t, []string{jobCompleteManifest}) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + require.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + + err := sw.WaitWithJobs(resourceList, time.Second*3) + // Should succeed because method context is used and it's not cancelled + assert.NoError(t, err) + }) + + t.Run("method-specific context overrides general context for WaitForDelete", func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + timeout := time.Second + timeUntilPodDelete := time.Millisecond * 500 + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + ) + + // General context is cancelled, but method context is not + generalCtx, generalCancel := context.WithCancel(context.Background()) + generalCancel() + + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + ctx: generalCtx, // Cancelled + waitForDeleteCtx: context.Background(), // Not cancelled - should be used + } + + objs := getRuntimeObjFromManifests(t, []string{podCurrentManifest}) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + require.NoError(t, err) + } + + // Schedule deletion + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + go func(gvr schema.GroupVersionResource, u *unstructured.Unstructured) { + time.Sleep(timeUntilPodDelete) + err := fakeClient.Tracker().Delete(gvr, u.GetNamespace(), u.GetName()) + assert.NoError(t, err) + }(gvr, u) + } + + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + err := sw.WaitForDelete(resourceList, timeout) + // Should succeed because method context is used and it's not cancelled + assert.NoError(t, err) + }) +} + func TestWatchUntilReadyWithCustomReaders(t *testing.T) { t.Parallel() tests := []struct { - name string - objManifests []string - customReader *mockStatusReader - expectErrs []error + name string + objManifests []string + customReader *mockStatusReader + expectErrStrs []string }{ { name: "custom reader makes job immediately current for hooks", @@ -1128,7 +1739,6 @@ func TestWatchUntilReadyWithCustomReaders(t *testing.T) { supportedGK: batchv1.SchemeGroupVersion.WithKind("Job").GroupKind(), status: status.CurrentStatus, }, - expectErrs: nil, }, { name: "custom reader makes pod immediately current for hooks", @@ -1137,7 +1747,6 @@ func TestWatchUntilReadyWithCustomReaders(t *testing.T) { supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), status: status.CurrentStatus, }, - expectErrs: nil, }, { name: "custom reader takes precedence over built-in pod reader", @@ -1146,7 +1755,7 @@ func TestWatchUntilReadyWithCustomReaders(t *testing.T) { supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), status: status.InProgressStatus, }, - expectErrs: []error{errors.New("resource not ready, name: good-pod, kind: Pod, status: InProgress"), errors.New("context deadline exceeded")}, + expectErrStrs: []string{"resource Pod/ns/good-pod not ready. status: InProgress", "context deadline exceeded"}, }, { name: "custom reader takes precedence over built-in job reader", @@ -1155,7 +1764,7 @@ func TestWatchUntilReadyWithCustomReaders(t *testing.T) { supportedGK: batchv1.SchemeGroupVersion.WithKind("Job").GroupKind(), status: status.InProgressStatus, }, - expectErrs: []error{errors.New("resource not ready, name: test, kind: Job, status: InProgress"), errors.New("context deadline exceeded")}, + expectErrStrs: []string{"resource Job/qual/test not ready. status: InProgress", "context deadline exceeded"}, }, { name: "custom reader for different resource type does not affect pods", @@ -1164,7 +1773,6 @@ func TestWatchUntilReadyWithCustomReaders(t *testing.T) { supportedGK: batchv1.SchemeGroupVersion.WithKind("Job").GroupKind(), status: status.InProgressStatus, }, - expectErrs: nil, }, { name: "built-in readers still work when custom reader does not match", @@ -1173,7 +1781,6 @@ func TestWatchUntilReadyWithCustomReaders(t *testing.T) { supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), status: status.InProgressStatus, }, - expectErrs: nil, }, } @@ -1200,8 +1807,11 @@ func TestWatchUntilReadyWithCustomReaders(t *testing.T) { } resourceList := getResourceListFromRuntimeObjs(t, c, objs) err := statusWaiter.WatchUntilReady(resourceList, time.Second*3) - if tt.expectErrs != nil { - assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + if tt.expectErrStrs != nil { + require.Error(t, err) + for _, expectedErrStr := range tt.expectErrStrs { + assert.Contains(t, err.Error(), expectedErrStr) + } return } assert.NoError(t, err) diff --git a/scripts/sync-repo.sh b/scripts/sync-repo.sh deleted file mode 100755 index 0651d3634..000000000 --- a/scripts/sync-repo.sh +++ /dev/null @@ -1,83 +0,0 @@ -#!/usr/bin/env bash - -# Copyright The Helm Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Bash 'Strict Mode' -# http://redsymbol.net/articles/unofficial-bash-strict-mode -set -euo pipefail -IFS=$'\n\t' - -# Helper Functions ------------------------------------------------------------- - -# Display error message and exit -error_exit() { - echo "error: ${1:-"unknown error"}" 1>&2 - exit 1 -} - -# Checks if a command exists. Returns 1 or 0 -command_exists() { - hash "${1}" 2>/dev/null -} - -# Program Functions ------------------------------------------------------------ - -verify_prereqs() { - echo "Verifying Prerequisites...." - if command_exists gsutil; then - echo "Thumbs up! Looks like you have gsutil. Let's continue." - else - error_exit "Couldn't find gsutil. Bailing out." - fi -} - -confirm() { - # shellcheck disable=SC2154 - case $response in - [yY][eE][sS]|[yY]) - true - ;; - *) - false - ;; - esac -} - -# Main ------------------------------------------------------------------------- - -main() { - if [ "$#" -ne 2 ]; then - error_exit "Illegal number of parameters. You must pass in local directory path and a GCS bucket name" - fi - - echo "Getting ready to sync your local directory ($1) to a remote repository at gs://$2" - - verify_prereqs - - # dry run of the command - gsutil rsync -d -n "$1" gs://"$2" - - read -r -p "Are you sure you would like to continue with these changes? [y/N] " confirm - if [[ $confirm =~ [yY](es)* ]]; then - gsutil rsync -d "$1" gs://"$2" - else - error_exit "Discontinuing sync process." - fi - - echo "Your remote chart repository now matches the contents of the $1 directory!" - -} - -main "${@:-}"