diff --git a/cmd/helm/rollback.go b/cmd/helm/rollback.go index 2cd6fa2cb..3ed52d512 100644 --- a/cmd/helm/rollback.go +++ b/cmd/helm/rollback.go @@ -78,7 +78,7 @@ func newRollbackCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { f := cmd.Flags() f.BoolVar(&client.DryRun, "dry-run", false, "simulate a rollback") f.BoolVar(&client.Recreate, "recreate-pods", false, "performs pods restart for the resource if applicable") - f.BoolVar(&client.Force, "force", false, "force resource update through delete/recreate if needed") + f.BoolVar(&client.Force, "force", false, "force resource update through delete/recreate if needed (deprecated, not work anymore)") f.BoolVar(&client.DisableHooks, "no-hooks", false, "prevent hooks from running during rollback") f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)") f.BoolVar(&client.Wait, "wait", false, "if set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment, StatefulSet, or ReplicaSet are in a ready state before marking the release as successful. It will wait for as long as --timeout") diff --git a/pkg/action/rollback.go b/pkg/action/rollback.go index 8773b6271..86fa9fb73 100644 --- a/pkg/action/rollback.go +++ b/pkg/action/rollback.go @@ -162,7 +162,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas r.cfg.Log("rollback hooks disabled for %s", targetRelease.Name) } - results, err := r.cfg.KubeClient.Update(current, target, r.Force) + results, err := r.cfg.KubeClient.Update(current, target, true) if err != nil { msg := fmt.Sprintf("Rollback %q failed: %s", targetRelease.Name, err)