Merge branch 'helm:main' into feature/rollback-description-flag

pull/31580/head
MrJack 1 week ago committed by GitHub
commit bd30d13891
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -11,9 +11,13 @@ vulnerability_, please email a report to
[cncf-helm-security@lists.cncf.io](mailto:cncf-helm-security@lists.cncf.io). This will give us a
chance to try to fix the issue before it is exploited in the wild.
## Helm v3 and v4
## Helm v3
Helm v4 is currently under development on the `main` branch. During the development of Helm v4 and for some time after its released, Helm v3 will continue to be supported and developed on the `dev-v3` branch. Helm v3 will continue to get bug fixes and updates for new Kubernetes releases. Helm v4 is where new features and major changes will happen. For features to be backported to Helm v3, an exception will be needed. Bugs should first be fixed on Helm v4 and then backported to Helm v3.
Helm v4 development takes place on the `main` branch while Helm v3 is on the `dev-v3` branch.
Helm v3 will continue to receive bug fixes and updates for new Kubernetes releases until July 8th 2026. Security enhancement will still be applied until November 11th 2026. See the blog <https://helm.sh/blog/helm-4-released#helm-v3-support> for more details.
Bugs should first be fixed on Helm v4 and then backported to Helm v3. Helm v3 (and the `dev-v3` branch) is no longer accepting new features.
## Sign Your Work

@ -48,7 +48,7 @@ require (
k8s.io/klog/v2 v2.130.1
k8s.io/kubectl v0.35.0
oras.land/oras-go/v2 v2.6.0
sigs.k8s.io/controller-runtime v0.22.4
sigs.k8s.io/controller-runtime v0.23.0
sigs.k8s.io/kustomize/kyaml v0.21.0
sigs.k8s.io/yaml v1.6.0
)

@ -502,8 +502,8 @@ k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzk
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc=
oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o=
sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A=
sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8=
sigs.k8s.io/controller-runtime v0.23.0 h1:Ubi7klJWiwEWqDY+odSVZiFA0aDSevOCXpa38yCSYu8=
sigs.k8s.io/controller-runtime v0.23.0/go.mod h1:DBOIr9NsprUqCZ1ZhsuJ0wAnQSIxY/C6VjZbmLgw0j0=
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I=

@ -35,6 +35,9 @@ as well as the release history, freeing it up for future use.
Use the '--dry-run' flag to see which releases will be uninstalled without actually
uninstalling them.
Use '--cascade foreground' with '--wait' to ensure resources with finalizers
are fully deleted before the command returns.
`
func newUninstallCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
@ -76,7 +79,7 @@ func newUninstallCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
f.BoolVar(&client.DisableHooks, "no-hooks", false, "prevent hooks from running during uninstallation")
f.BoolVar(&client.IgnoreNotFound, "ignore-not-found", false, `Treat "release not found" as a successful uninstall`)
f.BoolVar(&client.KeepHistory, "keep-history", false, "remove all associated resources and mark the release as deleted, but retain the release history")
f.StringVar(&client.DeletionPropagation, "cascade", "background", "Must be \"background\", \"orphan\", or \"foreground\". Selects the deletion cascading strategy for the dependents. Defaults to background.")
f.StringVar(&client.DeletionPropagation, "cascade", "background", "Must be \"background\", \"orphan\", or \"foreground\". Selects the deletion cascading strategy for the dependents. Defaults to background. Use \"foreground\" with --wait to ensure resources with finalizers are fully deleted before returning.")
f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)")
f.StringVar(&client.Description, "description", "", "add a custom description")
AddWaitFlag(cmd, &client.WaitStrategy)

@ -166,12 +166,18 @@ func (c *Client) newStatusWatcher(opts ...WaitOption) (*statusWaiter, error) {
if waitContext == nil {
waitContext = c.WaitContext
}
return &statusWaiter{
restMapper: restMapper,
client: dynamicClient,
ctx: waitContext,
readers: o.statusReaders,
}, nil
sw := &statusWaiter{
restMapper: restMapper,
client: dynamicClient,
ctx: waitContext,
watchUntilReadyCtx: o.watchUntilReadyCtx,
waitCtx: o.waitCtx,
waitWithJobsCtx: o.waitWithJobsCtx,
waitForDeleteCtx: o.waitForDeleteCtx,
readers: o.statusReaders,
}
sw.SetLogger(c.Logger().Handler())
return sw, nil
}
func (c *Client) GetWaiter(ws WaitStrategy) (Waiter, error) {

@ -32,9 +32,11 @@ import (
"github.com/fluxcd/cli-utils/pkg/kstatus/polling/event"
"github.com/fluxcd/cli-utils/pkg/kstatus/status"
"github.com/fluxcd/cli-utils/pkg/object"
"github.com/fluxcd/cli-utils/pkg/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
batchv1 "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
@ -44,8 +46,10 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
jsonserializer "k8s.io/apimachinery/pkg/runtime/serializer/json"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/cli-runtime/pkg/resource"
dynamicfake "k8s.io/client-go/dynamic/fake"
"k8s.io/client-go/kubernetes"
k8sfake "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/kubernetes/scheme"
@ -1241,7 +1245,7 @@ func (c createPatchTestCase) run(t *testing.T) {
}
}
func newTestCustomResourceData(metadata map[string]string, spec map[string]interface{}) *unstructured.Unstructured {
func newTestCustomResourceData(metadata map[string]string, spec map[string]any) *unstructured.Unstructured {
if metadata == nil {
metadata = make(map[string]string)
}
@ -1251,7 +1255,7 @@ func newTestCustomResourceData(metadata map[string]string, spec map[string]inter
if _, ok := metadata["namespace"]; !ok {
metadata["namespace"] = "default"
}
o := map[string]interface{}{
o := map[string]any{
"apiVersion": "crd.com/v1",
"kind": "Data",
"metadata": metadata,
@ -1274,7 +1278,7 @@ func TestCreatePatchCustomResourceMetadata(t *testing.T) {
name: "take ownership of resource",
target: target,
original: target,
actual: newTestCustomResourceData(nil, map[string]interface{}{
actual: newTestCustomResourceData(nil, map[string]any{
"color": "red",
}),
threeWayMergeForUnstructured: true,
@ -1290,7 +1294,7 @@ func TestCreatePatchCustomResourceMetadata(t *testing.T) {
}
func TestCreatePatchCustomResourceSpec(t *testing.T) {
target := newTestCustomResourceData(nil, map[string]interface{}{
target := newTestCustomResourceData(nil, map[string]any{
"color": "red",
"size": "large",
})
@ -1298,7 +1302,7 @@ func TestCreatePatchCustomResourceSpec(t *testing.T) {
name: "merge with spec of existing custom resource",
target: target,
original: target,
actual: newTestCustomResourceData(nil, map[string]interface{}{
actual: newTestCustomResourceData(nil, map[string]any{
"color": "red",
"weight": "heavy",
}),
@ -2275,9 +2279,19 @@ metadata:
},
}
var err error
c.Waiter, err = c.GetWaiterWithOptions(StatusWatcherStrategy, WithKStatusReaders(statusReaders...))
require.NoError(t, err)
// Create a fake dynamic client with the pod resource
fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
fakeMapper := testutil.NewFakeRESTMapper(v1.SchemeGroupVersion.WithKind("Pod"))
// Create the pod in the fake client
createManifest(t, podManifest, fakeMapper, fakeClient)
// Set up the waiter with the fake client and custom status readers
c.Waiter = &statusWaiter{
client: fakeClient,
restMapper: fakeMapper,
readers: statusReaders,
}
resources, err := c.Build(strings.NewReader(podManifest), false)
require.NoError(t, err)
@ -2307,9 +2321,19 @@ metadata:
},
}
var err error
c.Waiter, err = c.GetWaiterWithOptions(StatusWatcherStrategy, WithKStatusReaders(statusReaders...))
require.NoError(t, err)
// Create a fake dynamic client with the job resource
fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
fakeMapper := testutil.NewFakeRESTMapper(batchv1.SchemeGroupVersion.WithKind("Job"))
// Create the job in the fake client
createManifest(t, jobManifest, fakeMapper, fakeClient)
// Set up the waiter with the fake client and custom status readers
c.Waiter = &statusWaiter{
client: fakeClient,
restMapper: fakeMapper,
readers: statusReaders,
}
resources, err := c.Build(strings.NewReader(jobManifest), false)
require.NoError(t, err)
@ -2319,3 +2343,18 @@ metadata:
err = c.WaitWithJobs(resources, time.Second*3)
require.NoError(t, err)
}
func createManifest(t *testing.T, manifest string,
fakeMapper meta.RESTMapper, fakeClient *dynamicfake.FakeDynamicClient) {
t.Helper()
m := make(map[string]any)
err := yaml.Unmarshal([]byte(manifest), &m)
require.NoError(t, err)
obj := &unstructured.Unstructured{Object: m}
gvk := obj.GroupVersionKind()
mapping, err := fakeMapper.RESTMapping(gvk.GroupKind(), gvk.Version)
require.NoError(t, err)
err = fakeClient.Tracker().Create(mapping.Resource, obj, obj.GetNamespace())
require.NoError(t, err)
}

@ -26,12 +26,45 @@ import (
type WaitOption func(*waitOptions)
// WithWaitContext sets the context for waiting on resources.
// If unset, context.Background() will be used.
func WithWaitContext(ctx context.Context) WaitOption {
return func(wo *waitOptions) {
wo.ctx = ctx
}
}
// WithWatchUntilReadyMethodContext sets the context specifically for the WatchUntilReady method.
// If unset, the context set by `WithWaitContext` will be used (falling back to `context.Background()`).
func WithWatchUntilReadyMethodContext(ctx context.Context) WaitOption {
return func(wo *waitOptions) {
wo.watchUntilReadyCtx = ctx
}
}
// WithWaitMethodContext sets the context specifically for the Wait method.
// If unset, the context set by `WithWaitContext` will be used (falling back to `context.Background()`).
func WithWaitMethodContext(ctx context.Context) WaitOption {
return func(wo *waitOptions) {
wo.waitCtx = ctx
}
}
// WithWaitWithJobsMethodContext sets the context specifically for the WaitWithJobs method.
// If unset, the context set by `WithWaitContext` will be used (falling back to `context.Background()`).
func WithWaitWithJobsMethodContext(ctx context.Context) WaitOption {
return func(wo *waitOptions) {
wo.waitWithJobsCtx = ctx
}
}
// WithWaitForDeleteMethodContext sets the context specifically for the WaitForDelete method.
// If unset, the context set by `WithWaitContext` will be used (falling back to `context.Background()`).
func WithWaitForDeleteMethodContext(ctx context.Context) WaitOption {
return func(wo *waitOptions) {
wo.waitForDeleteCtx = ctx
}
}
// WithKStatusReaders sets the status readers to be used while waiting on resources.
func WithKStatusReaders(readers ...engine.StatusReader) WaitOption {
return func(wo *waitOptions) {
@ -40,6 +73,10 @@ func WithKStatusReaders(readers ...engine.StatusReader) WaitOption {
}
type waitOptions struct {
ctx context.Context
statusReaders []engine.StatusReader
ctx context.Context
watchUntilReadyCtx context.Context
waitCtx context.Context
waitWithJobsCtx context.Context
waitForDeleteCtx context.Context
statusReaders []engine.StatusReader
}

@ -38,14 +38,20 @@ import (
"k8s.io/client-go/dynamic"
watchtools "k8s.io/client-go/tools/watch"
"helm.sh/helm/v4/internal/logging"
helmStatusReaders "helm.sh/helm/v4/internal/statusreaders"
)
type statusWaiter struct {
client dynamic.Interface
restMapper meta.RESTMapper
ctx context.Context
readers []engine.StatusReader
client dynamic.Interface
restMapper meta.RESTMapper
ctx context.Context
watchUntilReadyCtx context.Context
waitCtx context.Context
waitWithJobsCtx context.Context
waitForDeleteCtx context.Context
readers []engine.StatusReader
logging.LogHolder
}
// DefaultStatusWatcherTimeout is the timeout used by the status waiter when a
@ -66,9 +72,9 @@ func (w *statusWaiter) WatchUntilReady(resourceList ResourceList, timeout time.D
if timeout == 0 {
timeout = DefaultStatusWatcherTimeout
}
ctx, cancel := w.contextWithTimeout(timeout)
ctx, cancel := w.contextWithTimeout(w.watchUntilReadyCtx, timeout)
defer cancel()
slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout)
w.Logger().Debug("waiting for resources", "count", len(resourceList), "timeout", timeout)
sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper)
jobSR := helmStatusReaders.NewCustomJobStatusReader(w.restMapper)
podSR := helmStatusReaders.NewCustomPodStatusReader(w.restMapper)
@ -88,9 +94,9 @@ func (w *statusWaiter) Wait(resourceList ResourceList, timeout time.Duration) er
if timeout == 0 {
timeout = DefaultStatusWatcherTimeout
}
ctx, cancel := w.contextWithTimeout(timeout)
ctx, cancel := w.contextWithTimeout(w.waitCtx, timeout)
defer cancel()
slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout)
w.Logger().Debug("waiting for resources", "count", len(resourceList), "timeout", timeout)
sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper)
sw.StatusReader = statusreaders.NewStatusReader(w.restMapper, w.readers...)
return w.wait(ctx, resourceList, sw)
@ -100,9 +106,9 @@ func (w *statusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Dura
if timeout == 0 {
timeout = DefaultStatusWatcherTimeout
}
ctx, cancel := w.contextWithTimeout(timeout)
ctx, cancel := w.contextWithTimeout(w.waitWithJobsCtx, timeout)
defer cancel()
slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout)
w.Logger().Debug("waiting for resources", "count", len(resourceList), "timeout", timeout)
sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper)
newCustomJobStatusReader := helmStatusReaders.NewCustomJobStatusReader(w.restMapper)
readers := append([]engine.StatusReader(nil), w.readers...)
@ -116,9 +122,9 @@ func (w *statusWaiter) WaitForDelete(resourceList ResourceList, timeout time.Dur
if timeout == 0 {
timeout = DefaultStatusWatcherTimeout
}
ctx, cancel := w.contextWithTimeout(timeout)
ctx, cancel := w.contextWithTimeout(w.waitForDeleteCtx, timeout)
defer cancel()
slog.Debug("waiting for resources to be deleted", "count", len(resourceList), "timeout", timeout)
w.Logger().Debug("waiting for resources to be deleted", "count", len(resourceList), "timeout", timeout)
sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper)
return w.waitForDelete(ctx, resourceList, sw)
}
@ -138,24 +144,26 @@ func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceL
RESTScopeStrategy: watcher.RESTScopeNamespace,
})
statusCollector := collector.NewResourceStatusCollector(resources)
done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.NotFoundStatus))
done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.NotFoundStatus, w.Logger()))
<-done
if statusCollector.Error != nil {
return statusCollector.Error
}
// Only check parent context error, otherwise we would error when desired status is achieved.
if ctx.Err() != nil {
errs := []error{}
for _, id := range resources {
rs := statusCollector.ResourceStatuses[id]
if rs.Status == status.NotFoundStatus {
continue
}
errs = append(errs, fmt.Errorf("resource still exists, name: %s, kind: %s, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status))
errs := []error{}
for _, id := range resources {
rs := statusCollector.ResourceStatuses[id]
if rs.Status == status.NotFoundStatus || rs.Status == status.UnknownStatus {
continue
}
errs = append(errs, ctx.Err())
errs = append(errs, fmt.Errorf("resource %s/%s/%s still exists. status: %s, message: %s",
rs.Identifier.GroupKind.Kind, rs.Identifier.Namespace, rs.Identifier.Name, rs.Status, rs.Message))
}
if err := ctx.Err(); err != nil {
errs = append(errs, err)
}
if len(errs) > 0 {
return errors.Join(errs...)
}
return nil
@ -183,31 +191,36 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, sw w
RESTScopeStrategy: watcher.RESTScopeNamespace,
})
statusCollector := collector.NewResourceStatusCollector(resources)
done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.CurrentStatus))
done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.CurrentStatus, w.Logger()))
<-done
if statusCollector.Error != nil {
return statusCollector.Error
}
// Only check parent context error, otherwise we would error when desired status is achieved.
if ctx.Err() != nil {
errs := []error{}
for _, id := range resources {
rs := statusCollector.ResourceStatuses[id]
if rs.Status == status.CurrentStatus {
continue
}
errs = append(errs, fmt.Errorf("resource not ready, name: %s, kind: %s, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status))
errs := []error{}
for _, id := range resources {
rs := statusCollector.ResourceStatuses[id]
if rs.Status == status.CurrentStatus {
continue
}
errs = append(errs, ctx.Err())
errs = append(errs, fmt.Errorf("resource %s/%s/%s not ready. status: %s, message: %s",
rs.Identifier.GroupKind.Kind, rs.Identifier.Namespace, rs.Identifier.Name, rs.Status, rs.Message))
}
if err := ctx.Err(); err != nil {
errs = append(errs, err)
}
if len(errs) > 0 {
return errors.Join(errs...)
}
return nil
}
func (w *statusWaiter) contextWithTimeout(timeout time.Duration) (context.Context, context.CancelFunc) {
return contextWithTimeout(w.ctx, timeout)
func (w *statusWaiter) contextWithTimeout(methodCtx context.Context, timeout time.Duration) (context.Context, context.CancelFunc) {
if methodCtx == nil {
methodCtx = w.ctx
}
return contextWithTimeout(methodCtx, timeout)
}
func contextWithTimeout(ctx context.Context, timeout time.Duration) (context.Context, context.CancelFunc) {
@ -217,7 +230,7 @@ func contextWithTimeout(ctx context.Context, timeout time.Duration) (context.Con
return watchtools.ContextWithOptionalTimeout(ctx, timeout)
}
func statusObserver(cancel context.CancelFunc, desired status.Status) collector.ObserverFunc {
func statusObserver(cancel context.CancelFunc, desired status.Status, logger *slog.Logger) collector.ObserverFunc {
return func(statusCollector *collector.ResourceStatusCollector, _ event.Event) {
var rss []*event.ResourceStatus
var nonDesiredResources []*event.ResourceStatus
@ -225,11 +238,16 @@ func statusObserver(cancel context.CancelFunc, desired status.Status) collector.
if rs == nil {
continue
}
// If a resource is already deleted before waiting has started, it will show as unknown
// this check ensures we don't wait forever for a resource that is already deleted
// If a resource is already deleted before waiting has started, it will show as unknown.
// This check ensures we don't wait forever for a resource that is already deleted.
if rs.Status == status.UnknownStatus && desired == status.NotFoundStatus {
continue
}
// Failed is a terminal state. This check ensures we don't wait forever for a resource
// that has already failed, as intervention is required to resolve the failure.
if rs.Status == status.FailedStatus && desired == status.CurrentStatus {
continue
}
rss = append(rss, rs)
if rs.Status != desired {
nonDesiredResources = append(nonDesiredResources, rs)
@ -237,7 +255,7 @@ func statusObserver(cancel context.CancelFunc, desired status.Status) collector.
}
if aggregator.AggregateStatus(rss, desired) == desired {
slog.Debug("all resources achieved desired status", "desiredStatus", desired, "resourceCount", len(rss))
logger.Debug("all resources achieved desired status", "desiredStatus", desired, "resourceCount", len(rss))
cancel()
return
}
@ -248,7 +266,7 @@ func statusObserver(cancel context.CancelFunc, desired status.Status) collector.
return nonDesiredResources[i].Identifier.Name < nonDesiredResources[j].Identifier.Name
})
first := nonDesiredResources[0]
slog.Debug("waiting for resource", "namespace", first.Identifier.Namespace, "name", first.Identifier.Name, "kind", first.Identifier.GroupKind.Kind, "expectedStatus", desired, "actualStatus", first.Status)
logger.Debug("waiting for resource", "namespace", first.Identifier.Namespace, "name", first.Identifier.Name, "kind", first.Identifier.GroupKind.Kind, "expectedStatus", desired, "actualStatus", first.Status)
}
}
}

File diff suppressed because it is too large Load Diff

@ -1,83 +0,0 @@
#!/usr/bin/env bash
# Copyright The Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Bash 'Strict Mode'
# http://redsymbol.net/articles/unofficial-bash-strict-mode
set -euo pipefail
IFS=$'\n\t'
# Helper Functions -------------------------------------------------------------
# Display error message and exit
error_exit() {
echo "error: ${1:-"unknown error"}" 1>&2
exit 1
}
# Checks if a command exists. Returns 1 or 0
command_exists() {
hash "${1}" 2>/dev/null
}
# Program Functions ------------------------------------------------------------
verify_prereqs() {
echo "Verifying Prerequisites...."
if command_exists gsutil; then
echo "Thumbs up! Looks like you have gsutil. Let's continue."
else
error_exit "Couldn't find gsutil. Bailing out."
fi
}
confirm() {
# shellcheck disable=SC2154
case $response in
[yY][eE][sS]|[yY])
true
;;
*)
false
;;
esac
}
# Main -------------------------------------------------------------------------
main() {
if [ "$#" -ne 2 ]; then
error_exit "Illegal number of parameters. You must pass in local directory path and a GCS bucket name"
fi
echo "Getting ready to sync your local directory ($1) to a remote repository at gs://$2"
verify_prereqs
# dry run of the command
gsutil rsync -d -n "$1" gs://"$2"
read -r -p "Are you sure you would like to continue with these changes? [y/N] " confirm
if [[ $confirm =~ [yY](es)* ]]; then
gsutil rsync -d "$1" gs://"$2"
else
error_exit "Discontinuing sync process."
fi
echo "Your remote chart repository now matches the contents of the $1 directory!"
}
main "${@:-}"
Loading…
Cancel
Save