diff --git a/pkg/action/rollback.go b/pkg/action/rollback.go index ac8a28fe0..1dc0c7f84 100644 --- a/pkg/action/rollback.go +++ b/pkg/action/rollback.go @@ -41,7 +41,6 @@ type Rollback struct { WaitForJobs bool DisableHooks bool DryRun bool - Recreate bool // will (if true) recreate pods after a rollback. Force bool // will (if true) force resource upgrade through uninstall/recreate if needed CleanupOnFail bool MaxHistory int // MaxHistory limits the maximum number of revisions saved per release @@ -211,15 +210,6 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas return targetRelease, err } - if r.Recreate { - // NOTE: Because this is not critical for a release to succeed, we just - // log if an error occurs and continue onward. If we ever introduce log - // levels, we should make these error level logs so users are notified - // that they'll need to go do the cleanup on their own - if err := recreate(r.cfg, results.Updated); err != nil { - slog.Error(err.Error()) - } - } waiter, err := r.cfg.KubeClient.GetWaiter(r.WaitStrategy) if err != nil { return nil, fmt.Errorf("unable to set metadata visitor from target release: %w", err) diff --git a/pkg/action/upgrade.go b/pkg/action/upgrade.go index e2d2ead69..271bc8aa9 100644 --- a/pkg/action/upgrade.go +++ b/pkg/action/upgrade.go @@ -26,7 +26,6 @@ import ( "sync" "time" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/cli-runtime/pkg/resource" chart "helm.sh/helm/v4/pkg/chart/v2" @@ -88,8 +87,6 @@ type Upgrade struct { ReuseValues bool // ResetThenReuseValues will reset the values to the chart's built-ins then merge with user's last supplied values. ResetThenReuseValues bool - // Recreate will (if true) recreate pods after a rollback. - Recreate bool // MaxHistory limits the maximum number of revisions saved per release MaxHistory int // Atomic, if true, will roll back on failure. @@ -436,15 +433,6 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele return } - if u.Recreate { - // NOTE: Because this is not critical for a release to succeed, we just - // log if an error occurs and continue onward. If we ever introduce log - // levels, we should make these error level logs so users are notified - // that they'll need to go do the cleanup on their own - if err := recreate(u.cfg, results.Updated); err != nil { - slog.Error(err.Error()) - } - } waiter, err := u.cfg.KubeClient.GetWaiter(u.WaitStrategy) if err != nil { u.cfg.recordRelease(originalRelease) @@ -537,7 +525,6 @@ func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, e } rollin.WaitForJobs = u.WaitForJobs rollin.DisableHooks = u.DisableHooks - rollin.Recreate = u.Recreate rollin.Force = u.Force rollin.Timeout = u.Timeout if rollErr := rollin.Run(rel.Name); rollErr != nil { @@ -602,42 +589,6 @@ func validateManifest(c kube.Interface, manifest []byte, openAPIValidation bool) return err } -// recreate captures all the logic for recreating pods for both upgrade and -// rollback. If we end up refactoring rollback to use upgrade, this can just be -// made an unexported method on the upgrade action. -func recreate(cfg *Configuration, resources kube.ResourceList) error { - for _, res := range resources { - versioned := kube.AsVersioned(res) - selector, err := kube.SelectorsForObject(versioned) - if err != nil { - // If no selector is returned, it means this object is - // definitely not a pod, so continue onward - continue - } - - client, err := cfg.KubernetesClientSet() - if err != nil { - return fmt.Errorf("unable to recreate pods for object %s/%s because an error occurred: %w", res.Namespace, res.Name, err) - } - - pods, err := client.CoreV1().Pods(res.Namespace).List(context.Background(), metav1.ListOptions{ - LabelSelector: selector.String(), - }) - if err != nil { - return fmt.Errorf("unable to recreate pods for object %s/%s because an error occurred: %w", res.Namespace, res.Name, err) - } - - // Restart pods - for _, pod := range pods.Items { - // Delete each pod for get them restarted with changed spec. - if err := client.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, *metav1.NewPreconditionDeleteOptions(string(pod.UID))); err != nil { - return fmt.Errorf("unable to recreate pods for object %s/%s because an error occurred: %w", res.Namespace, res.Name, err) - } - } - } - return nil -} - func objectKey(r *resource.Info) string { gvk := r.Object.GetObjectKind().GroupVersionKind() return fmt.Sprintf("%s/%s/%s/%s", gvk.GroupVersion().String(), gvk.Kind, r.Namespace, r.Name) diff --git a/pkg/cmd/rollback.go b/pkg/cmd/rollback.go index 1823432dc..6658d3fd6 100644 --- a/pkg/cmd/rollback.go +++ b/pkg/cmd/rollback.go @@ -77,7 +77,6 @@ func newRollbackCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { f := cmd.Flags() f.BoolVar(&client.DryRun, "dry-run", false, "simulate a rollback") - f.BoolVar(&client.Recreate, "recreate-pods", false, "performs pods restart for the resource if applicable") f.BoolVar(&client.Force, "force", false, "force resource update through delete/recreate if needed") f.BoolVar(&client.DisableHooks, "no-hooks", false, "prevent hooks from running during rollback") f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)") diff --git a/pkg/cmd/upgrade.go b/pkg/cmd/upgrade.go index b93fa6e64..d4e7b4852 100644 --- a/pkg/cmd/upgrade.go +++ b/pkg/cmd/upgrade.go @@ -268,8 +268,6 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { f.StringVar(&client.DryRunOption, "dry-run", "", "simulate an install. If --dry-run is set with no option being specified or as '--dry-run=client', it will not attempt cluster connections. Setting '--dry-run=server' allows attempting cluster connections.") f.BoolVar(&client.HideSecret, "hide-secret", false, "hide Kubernetes Secrets when also using the --dry-run flag") f.Lookup("dry-run").NoOptDefVal = "client" - f.BoolVar(&client.Recreate, "recreate-pods", false, "performs pods restart for the resource if applicable") - f.MarkDeprecated("recreate-pods", "functionality will no longer be updated. Consult the documentation for other methods to recreate pods") f.BoolVar(&client.Force, "force", false, "force resource updates through a replacement strategy") f.BoolVar(&client.DisableHooks, "no-hooks", false, "disable pre/post upgrade hooks") f.BoolVar(&client.DisableOpenAPIValidation, "disable-openapi-validation", false, "if set, the upgrade process will not validate rendered templates against the Kubernetes OpenAPI Schema")