Call slog directly instead of using a wrapper

Signed-off-by: Benoit Tigeot <benoit.tigeot@lifen.fr>
pull/30708/head
Benoit Tigeot 5 months ago
parent 6b5fa33633
commit cbaac7652d
No known key found for this signature in database
GPG Key ID: 8E6D4FC8AEBDA62C

@ -41,12 +41,12 @@ func main() {
cmd, err := helmcmd.NewRootCmd(os.Stdout, os.Args[1:]) cmd, err := helmcmd.NewRootCmd(os.Stdout, os.Args[1:])
if err != nil { if err != nil {
helmcmd.Logger.Warn("command failed", slog.Any("error", err)) slog.Warn("command failed", slog.Any("error", err))
os.Exit(1) os.Exit(1)
} }
if err := cmd.Execute(); err != nil { if err := cmd.Execute(); err != nil {
helmcmd.Logger.Debug("error", slog.Any("error", err)) slog.Debug("error", slog.Any("error", err))
switch e := err.(type) { switch e := err.(type) {
case helmcmd.PluginError: case helmcmd.PluginError:
os.Exit(e.Code) os.Exit(e.Code)

@ -18,8 +18,6 @@ package monocular
import ( import (
"errors" "errors"
"io"
"log/slog"
"net/url" "net/url"
) )
@ -31,8 +29,6 @@ type Client struct {
// The base URL for requests // The base URL for requests
BaseURL string BaseURL string
Log *slog.Logger
} }
// New creates a new client // New creates a new client
@ -45,7 +41,6 @@ func New(u string) (*Client, error) {
return &Client{ return &Client{
BaseURL: u, BaseURL: u,
Log: slog.New(slog.NewTextHandler(io.Discard, nil)),
}, nil }, nil
} }

@ -96,8 +96,6 @@ type Configuration struct {
// Capabilities describes the capabilities of the Kubernetes cluster. // Capabilities describes the capabilities of the Kubernetes cluster.
Capabilities *chartutil.Capabilities Capabilities *chartutil.Capabilities
Log *slog.Logger
// HookOutputFunc called with container name and returns and expects writer that will receive the log output. // HookOutputFunc called with container name and returns and expects writer that will receive the log output.
HookOutputFunc func(namespace, pod, container string) io.Writer HookOutputFunc func(namespace, pod, container string) io.Writer
} }
@ -267,8 +265,8 @@ func (cfg *Configuration) getCapabilities() (*chartutil.Capabilities, error) {
apiVersions, err := GetVersionSet(dc) apiVersions, err := GetVersionSet(dc)
if err != nil { if err != nil {
if discovery.IsGroupDiscoveryFailedError(err) { if discovery.IsGroupDiscoveryFailedError(err) {
cfg.Log.Warn("the kubernetes server has an orphaned API service", "errors", err) slog.Warn("the kubernetes server has an orphaned API service", "errors", err)
cfg.Log.Warn("to fix this, kubectl delete apiservice <service-name>") slog.Warn("to fix this, kubectl delete apiservice <service-name>")
} else { } else {
return nil, errors.Wrap(err, "could not get apiVersions from Kubernetes") return nil, errors.Wrap(err, "could not get apiVersions from Kubernetes")
} }
@ -367,29 +365,28 @@ func GetVersionSet(client discovery.ServerResourcesInterface) (chartutil.Version
// recordRelease with an update operation in case reuse has been set. // recordRelease with an update operation in case reuse has been set.
func (cfg *Configuration) recordRelease(r *release.Release) { func (cfg *Configuration) recordRelease(r *release.Release) {
if err := cfg.Releases.Update(r); err != nil { if err := cfg.Releases.Update(r); err != nil {
cfg.Log.Warn("failed to update release", "name", r.Name, "revision", r.Version, slog.Any("error", err)) slog.Warn("failed to update release", "name", r.Name, "revision", r.Version, slog.Any("error", err))
} }
} }
// Init initializes the action configuration // Init initializes the action configuration
func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string, log *slog.Logger) error { func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string) error {
kc := kube.New(getter) kc := kube.New(getter)
kc.Log = log
lazyClient := &lazyClient{ lazyClient := &lazyClient{
namespace: namespace, namespace: namespace,
clientFn: kc.Factory.KubernetesClientSet, clientFn: kc.Factory.KubernetesClientSet,
} }
// slog.SetDefault()
var store *storage.Storage var store *storage.Storage
switch helmDriver { switch helmDriver {
case "secret", "secrets", "": case "secret", "secrets", "":
d := driver.NewSecrets(newSecretClient(lazyClient)) d := driver.NewSecrets(newSecretClient(lazyClient))
d.Log = log
store = storage.Init(d) store = storage.Init(d)
case "configmap", "configmaps": case "configmap", "configmaps":
d := driver.NewConfigMaps(newConfigMapClient(lazyClient)) d := driver.NewConfigMaps(newConfigMapClient(lazyClient))
d.Log = log
store = storage.Init(d) store = storage.Init(d)
case "memory": case "memory":
var d *driver.Memory var d *driver.Memory
@ -409,7 +406,6 @@ func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namesp
case "sql": case "sql":
d, err := driver.NewSQL( d, err := driver.NewSQL(
os.Getenv("HELM_DRIVER_SQL_CONNECTION_STRING"), os.Getenv("HELM_DRIVER_SQL_CONNECTION_STRING"),
log,
namespace, namespace,
) )
if err != nil { if err != nil {
@ -423,7 +419,6 @@ func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namesp
cfg.RESTClientGetter = getter cfg.RESTClientGetter = getter
cfg.KubeClient = kc cfg.KubeClient = kc
cfg.Releases = store cfg.Releases = store
cfg.Log = log
cfg.HookOutputFunc = func(_, _, _ string) io.Writer { return io.Discard } cfg.HookOutputFunc = func(_, _, _ string) io.Writer { return io.Discard }
return nil return nil

@ -56,6 +56,7 @@ func actionConfigFixture(t *testing.T) *Configuration {
}) })
logger = slog.New(handler) logger = slog.New(handler)
} }
slog.SetDefault(logger)
registryClient, err := registry.NewClient() registryClient, err := registry.NewClient()
if err != nil { if err != nil {
@ -67,7 +68,6 @@ func actionConfigFixture(t *testing.T) *Configuration {
KubeClient: &kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}}, KubeClient: &kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}},
Capabilities: chartutil.DefaultCapabilities, Capabilities: chartutil.DefaultCapabilities,
RegistryClient: registryClient, RegistryClient: registryClient,
Log: logger,
} }
} }
@ -347,7 +347,7 @@ func TestConfiguration_Init(t *testing.T) {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
cfg := &Configuration{} cfg := &Configuration{}
actualErr := cfg.Init(nil, "default", tt.helmDriver, nil) actualErr := cfg.Init(nil, "default", tt.helmDriver)
if tt.expectErr { if tt.expectErr {
assert.Error(t, actualErr) assert.Error(t, actualErr)
assert.Contains(t, actualErr.Error(), tt.errMsg) assert.Contains(t, actualErr.Error(), tt.errMsg)

@ -17,6 +17,8 @@ limitations under the License.
package action package action
import ( import (
"log/slog"
"github.com/pkg/errors" "github.com/pkg/errors"
chartutil "helm.sh/helm/v4/pkg/chart/v2/util" chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
@ -53,6 +55,6 @@ func (h *History) Run(name string) ([]*release.Release, error) {
return nil, errors.Errorf("release name is invalid: %s", name) return nil, errors.Errorf("release name is invalid: %s", name)
} }
h.cfg.Log.Debug("getting history for release", "release", name) slog.Debug("getting history for release", "release", name)
return h.cfg.Releases.History(name) return h.cfg.Releases.History(name)
} }

@ -173,7 +173,7 @@ func (i *Install) installCRDs(crds []chart.CRD) error {
// If the error is CRD already exists, continue. // If the error is CRD already exists, continue.
if apierrors.IsAlreadyExists(err) { if apierrors.IsAlreadyExists(err) {
crdName := res[0].Name crdName := res[0].Name
i.cfg.Log.Debug("CRD is already present. Skipping", "crd", crdName) slog.Debug("CRD is already present. Skipping", "crd", crdName)
continue continue
} }
return errors.Wrapf(err, "failed to install CRD %s", obj.Name) return errors.Wrapf(err, "failed to install CRD %s", obj.Name)
@ -201,7 +201,7 @@ func (i *Install) installCRDs(crds []chart.CRD) error {
return err return err
} }
i.cfg.Log.Debug("clearing discovery cache") slog.Debug("clearing discovery cache")
discoveryClient.Invalidate() discoveryClient.Invalidate()
_, _ = discoveryClient.ServerGroups() _, _ = discoveryClient.ServerGroups()
@ -214,7 +214,7 @@ func (i *Install) installCRDs(crds []chart.CRD) error {
return err return err
} }
if resettable, ok := restMapper.(meta.ResettableRESTMapper); ok { if resettable, ok := restMapper.(meta.ResettableRESTMapper); ok {
i.cfg.Log.Debug("clearing REST mapper cache") slog.Debug("clearing REST mapper cache")
resettable.Reset() resettable.Reset()
} }
} }
@ -238,24 +238,24 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma
// Check reachability of cluster unless in client-only mode (e.g. `helm template` without `--validate`) // Check reachability of cluster unless in client-only mode (e.g. `helm template` without `--validate`)
if !i.ClientOnly { if !i.ClientOnly {
if err := i.cfg.KubeClient.IsReachable(); err != nil { if err := i.cfg.KubeClient.IsReachable(); err != nil {
i.cfg.Log.Error(fmt.Sprintf("cluster reachability check failed: %v", err)) slog.Error(fmt.Sprintf("cluster reachability check failed: %v", err))
return nil, errors.Wrap(err, "cluster reachability check failed") return nil, errors.Wrap(err, "cluster reachability check failed")
} }
} }
// HideSecret must be used with dry run. Otherwise, return an error. // HideSecret must be used with dry run. Otherwise, return an error.
if !i.isDryRun() && i.HideSecret { if !i.isDryRun() && i.HideSecret {
i.cfg.Log.Error("hiding Kubernetes secrets requires a dry-run mode") slog.Error("hiding Kubernetes secrets requires a dry-run mode")
return nil, errors.New("Hiding Kubernetes secrets requires a dry-run mode") return nil, errors.New("Hiding Kubernetes secrets requires a dry-run mode")
} }
if err := i.availableName(); err != nil { if err := i.availableName(); err != nil {
i.cfg.Log.Error("release name check failed", slog.Any("error", err)) slog.Error("release name check failed", slog.Any("error", err))
return nil, errors.Wrap(err, "release name check failed") return nil, errors.Wrap(err, "release name check failed")
} }
if err := chartutil.ProcessDependencies(chrt, vals); err != nil { if err := chartutil.ProcessDependencies(chrt, vals); err != nil {
i.cfg.Log.Error("chart dependencies processing failed", slog.Any("error", err)) slog.Error("chart dependencies processing failed", slog.Any("error", err))
return nil, errors.Wrap(err, "chart dependencies processing failed") return nil, errors.Wrap(err, "chart dependencies processing failed")
} }
@ -269,7 +269,7 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma
if crds := chrt.CRDObjects(); !i.ClientOnly && !i.SkipCRDs && len(crds) > 0 { if crds := chrt.CRDObjects(); !i.ClientOnly && !i.SkipCRDs && len(crds) > 0 {
// On dry run, bail here // On dry run, bail here
if i.isDryRun() { if i.isDryRun() {
i.cfg.Log.Warn("This chart or one of its subcharts contains CRDs. Rendering may fail or contain inaccuracies.") slog.Warn("This chart or one of its subcharts contains CRDs. Rendering may fail or contain inaccuracies.")
} else if err := i.installCRDs(crds); err != nil { } else if err := i.installCRDs(crds); err != nil {
return nil, err return nil, err
} }
@ -289,7 +289,7 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma
mem.SetNamespace(i.Namespace) mem.SetNamespace(i.Namespace)
i.cfg.Releases = storage.Init(mem) i.cfg.Releases = storage.Init(mem)
} else if !i.ClientOnly && len(i.APIVersions) > 0 { } else if !i.ClientOnly && len(i.APIVersions) > 0 {
i.cfg.Log.Debug("API Version list given outside of client only mode, this list will be ignored") slog.Debug("API Version list given outside of client only mode, this list will be ignored")
} }
// Make sure if Atomic is set, that wait is set as well. This makes it so // Make sure if Atomic is set, that wait is set as well. This makes it so
@ -506,7 +506,7 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource
// One possible strategy would be to do a timed retry to see if we can get // One possible strategy would be to do a timed retry to see if we can get
// this stored in the future. // this stored in the future.
if err := i.recordRelease(rel); err != nil { if err := i.recordRelease(rel); err != nil {
i.cfg.Log.Error("failed to record the release", slog.Any("error", err)) slog.Error("failed to record the release", slog.Any("error", err))
} }
return rel, nil return rel, nil
@ -515,7 +515,7 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource
func (i *Install) failRelease(rel *release.Release, err error) (*release.Release, error) { func (i *Install) failRelease(rel *release.Release, err error) (*release.Release, error) {
rel.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", i.ReleaseName, err.Error())) rel.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", i.ReleaseName, err.Error()))
if i.Atomic { if i.Atomic {
i.cfg.Log.Debug("install failed, uninstalling release", "release", i.ReleaseName) slog.Debug("install failed, uninstalling release", "release", i.ReleaseName)
uninstall := NewUninstall(i.cfg) uninstall := NewUninstall(i.cfg)
uninstall.DisableHooks = i.DisableHooks uninstall.DisableHooks = i.DisableHooks
uninstall.KeepHistory = false uninstall.KeepHistory = false

@ -19,6 +19,7 @@ package action
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"log/slog"
"strings" "strings"
"time" "time"
@ -63,26 +64,26 @@ func (r *Rollback) Run(name string) error {
r.cfg.Releases.MaxHistory = r.MaxHistory r.cfg.Releases.MaxHistory = r.MaxHistory
r.cfg.Log.Debug("preparing rollback", "name", name) slog.Debug("preparing rollback", "name", name)
currentRelease, targetRelease, err := r.prepareRollback(name) currentRelease, targetRelease, err := r.prepareRollback(name)
if err != nil { if err != nil {
return err return err
} }
if !r.DryRun { if !r.DryRun {
r.cfg.Log.Debug("creating rolled back release", "name", name) slog.Debug("creating rolled back release", "name", name)
if err := r.cfg.Releases.Create(targetRelease); err != nil { if err := r.cfg.Releases.Create(targetRelease); err != nil {
return err return err
} }
} }
r.cfg.Log.Debug("performing rollback", "name", name) slog.Debug("performing rollback", "name", name)
if _, err := r.performRollback(currentRelease, targetRelease); err != nil { if _, err := r.performRollback(currentRelease, targetRelease); err != nil {
return err return err
} }
if !r.DryRun { if !r.DryRun {
r.cfg.Log.Debug("updating status for rolled back release", "name", name) slog.Debug("updating status for rolled back release", "name", name)
if err := r.cfg.Releases.Update(targetRelease); err != nil { if err := r.cfg.Releases.Update(targetRelease); err != nil {
return err return err
} }
@ -129,7 +130,7 @@ func (r *Rollback) prepareRollback(name string) (*release.Release, *release.Rele
return nil, nil, errors.Errorf("release has no %d version", previousVersion) return nil, nil, errors.Errorf("release has no %d version", previousVersion)
} }
r.cfg.Log.Debug("rolling back", "name", name, "currentVersion", currentRelease.Version, "targetVersion", previousVersion) slog.Debug("rolling back", "name", name, "currentVersion", currentRelease.Version, "targetVersion", previousVersion)
previousRelease, err := r.cfg.Releases.Get(name, previousVersion) previousRelease, err := r.cfg.Releases.Get(name, previousVersion)
if err != nil { if err != nil {
@ -162,7 +163,7 @@ func (r *Rollback) prepareRollback(name string) (*release.Release, *release.Rele
func (r *Rollback) performRollback(currentRelease, targetRelease *release.Release) (*release.Release, error) { func (r *Rollback) performRollback(currentRelease, targetRelease *release.Release) (*release.Release, error) {
if r.DryRun { if r.DryRun {
r.cfg.Log.Debug("dry run", "name", targetRelease.Name) slog.Debug("dry run", "name", targetRelease.Name)
return targetRelease, nil return targetRelease, nil
} }
@ -181,7 +182,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas
return targetRelease, err return targetRelease, err
} }
} else { } else {
r.cfg.Log.Debug("rollback hooks disabled", "name", targetRelease.Name) slog.Debug("rollback hooks disabled", "name", targetRelease.Name)
} }
// It is safe to use "force" here because these are resources currently rendered by the chart. // It is safe to use "force" here because these are resources currently rendered by the chart.
@ -193,14 +194,14 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas
if err != nil { if err != nil {
msg := fmt.Sprintf("Rollback %q failed: %s", targetRelease.Name, err) msg := fmt.Sprintf("Rollback %q failed: %s", targetRelease.Name, err)
r.cfg.Log.Warn(msg) slog.Warn(msg)
currentRelease.Info.Status = release.StatusSuperseded currentRelease.Info.Status = release.StatusSuperseded
targetRelease.Info.Status = release.StatusFailed targetRelease.Info.Status = release.StatusFailed
targetRelease.Info.Description = msg targetRelease.Info.Description = msg
r.cfg.recordRelease(currentRelease) r.cfg.recordRelease(currentRelease)
r.cfg.recordRelease(targetRelease) r.cfg.recordRelease(targetRelease)
if r.CleanupOnFail { if r.CleanupOnFail {
r.cfg.Log.Debug("cleanup on fail set, cleaning up resources", "count", len(results.Created)) slog.Debug("cleanup on fail set, cleaning up resources", "count", len(results.Created))
_, errs := r.cfg.KubeClient.Delete(results.Created) _, errs := r.cfg.KubeClient.Delete(results.Created)
if errs != nil { if errs != nil {
var errorList []string var errorList []string
@ -209,7 +210,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas
} }
return targetRelease, errors.Wrapf(fmt.Errorf("unable to cleanup resources: %s", strings.Join(errorList, ", ")), "an error occurred while cleaning up resources. original rollback error: %s", err) return targetRelease, errors.Wrapf(fmt.Errorf("unable to cleanup resources: %s", strings.Join(errorList, ", ")), "an error occurred while cleaning up resources. original rollback error: %s", err)
} }
r.cfg.Log.Debug("resource cleanup complete") slog.Debug("resource cleanup complete")
} }
return targetRelease, err return targetRelease, err
} }
@ -220,7 +221,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas
// levels, we should make these error level logs so users are notified // levels, we should make these error level logs so users are notified
// that they'll need to go do the cleanup on their own // that they'll need to go do the cleanup on their own
if err := recreate(r.cfg, results.Updated); err != nil { if err := recreate(r.cfg, results.Updated); err != nil {
r.cfg.Log.Error(err.Error()) slog.Error(err.Error())
} }
} }
waiter, err := r.cfg.KubeClient.GetWaiter(r.WaitStrategy) waiter, err := r.cfg.KubeClient.GetWaiter(r.WaitStrategy)
@ -256,7 +257,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas
} }
// Supersede all previous deployments, see issue #2941. // Supersede all previous deployments, see issue #2941.
for _, rel := range deployed { for _, rel := range deployed {
r.cfg.Log.Debug("superseding previous deployment", "version", rel.Version) slog.Debug("superseding previous deployment", "version", rel.Version)
rel.Info.Status = release.StatusSuperseded rel.Info.Status = release.StatusSuperseded
r.cfg.recordRelease(rel) r.cfg.recordRelease(rel)
} }

@ -105,7 +105,7 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error)
return nil, errors.Errorf("the release named %q is already deleted", name) return nil, errors.Errorf("the release named %q is already deleted", name)
} }
u.cfg.Log.Debug("uninstall: deleting release", "name", name) slog.Debug("uninstall: deleting release", "name", name)
rel.Info.Status = release.StatusUninstalling rel.Info.Status = release.StatusUninstalling
rel.Info.Deleted = helmtime.Now() rel.Info.Deleted = helmtime.Now()
rel.Info.Description = "Deletion in progress (or silently failed)" rel.Info.Description = "Deletion in progress (or silently failed)"
@ -116,18 +116,18 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error)
return res, err return res, err
} }
} else { } else {
u.cfg.Log.Debug("delete hooks disabled", "release", name) slog.Debug("delete hooks disabled", "release", name)
} }
// From here on out, the release is currently considered to be in StatusUninstalling // From here on out, the release is currently considered to be in StatusUninstalling
// state. // state.
if err := u.cfg.Releases.Update(rel); err != nil { if err := u.cfg.Releases.Update(rel); err != nil {
u.cfg.Log.Debug("uninstall: Failed to store updated release", slog.Any("error", err)) slog.Debug("uninstall: Failed to store updated release", slog.Any("error", err))
} }
deletedResources, kept, errs := u.deleteRelease(rel) deletedResources, kept, errs := u.deleteRelease(rel)
if errs != nil { if errs != nil {
u.cfg.Log.Debug("uninstall: Failed to delete release", "errors", errs) slog.Debug("uninstall: Failed to delete release", "errors", errs)
return nil, errors.Errorf("failed to delete release: %s", name) return nil, errors.Errorf("failed to delete release: %s", name)
} }
@ -154,7 +154,7 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error)
} }
if !u.KeepHistory { if !u.KeepHistory {
u.cfg.Log.Debug("purge requested", "release", name) slog.Debug("purge requested", "release", name)
err := u.purgeReleases(rels...) err := u.purgeReleases(rels...)
if err != nil { if err != nil {
errs = append(errs, errors.Wrap(err, "uninstall: Failed to purge the release")) errs = append(errs, errors.Wrap(err, "uninstall: Failed to purge the release"))
@ -169,7 +169,7 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error)
} }
if err := u.cfg.Releases.Update(rel); err != nil { if err := u.cfg.Releases.Update(rel); err != nil {
u.cfg.Log.Debug("uninstall: Failed to store updated release", slog.Any("error", err)) slog.Debug("uninstall: Failed to store updated release", slog.Any("error", err))
} }
if len(errs) > 0 { if len(errs) > 0 {
@ -226,7 +226,7 @@ func (u *Uninstall) deleteRelease(rel *release.Release) (kube.ResourceList, stri
} }
if len(resources) > 0 { if len(resources) > 0 {
if kubeClient, ok := u.cfg.KubeClient.(kube.InterfaceDeletionPropagation); ok { if kubeClient, ok := u.cfg.KubeClient.(kube.InterfaceDeletionPropagation); ok {
_, errs = kubeClient.DeleteWithPropagationPolicy(resources, parseCascadingFlag(u.cfg, u.DeletionPropagation)) _, errs = kubeClient.DeleteWithPropagationPolicy(resources, parseCascadingFlag(u.DeletionPropagation))
return resources, kept, errs return resources, kept, errs
} }
_, errs = u.cfg.KubeClient.Delete(resources) _, errs = u.cfg.KubeClient.Delete(resources)
@ -234,7 +234,7 @@ func (u *Uninstall) deleteRelease(rel *release.Release) (kube.ResourceList, stri
return resources, kept, errs return resources, kept, errs
} }
func parseCascadingFlag(cfg *Configuration, cascadingFlag string) v1.DeletionPropagation { func parseCascadingFlag(cascadingFlag string) v1.DeletionPropagation {
switch cascadingFlag { switch cascadingFlag {
case "orphan": case "orphan":
return v1.DeletePropagationOrphan return v1.DeletePropagationOrphan
@ -243,7 +243,7 @@ func parseCascadingFlag(cfg *Configuration, cascadingFlag string) v1.DeletionPro
case "background": case "background":
return v1.DeletePropagationBackground return v1.DeletePropagationBackground
default: default:
cfg.Log.Debug("uninstall: given cascade value, defaulting to delete propagation background", "value", cascadingFlag) slog.Debug("uninstall: given cascade value, defaulting to delete propagation background", "value", cascadingFlag)
return v1.DeletePropagationBackground return v1.DeletePropagationBackground
} }
} }

@ -164,7 +164,7 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart.
return nil, errors.Errorf("release name is invalid: %s", name) return nil, errors.Errorf("release name is invalid: %s", name)
} }
u.cfg.Log.Debug("preparing upgrade", "name", name) slog.Debug("preparing upgrade", "name", name)
currentRelease, upgradedRelease, err := u.prepareUpgrade(name, chart, vals) currentRelease, upgradedRelease, err := u.prepareUpgrade(name, chart, vals)
if err != nil { if err != nil {
return nil, err return nil, err
@ -172,7 +172,7 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart.
u.cfg.Releases.MaxHistory = u.MaxHistory u.cfg.Releases.MaxHistory = u.MaxHistory
u.cfg.Log.Debug("performing update", "name", name) slog.Debug("performing update", "name", name)
res, err := u.performUpgrade(ctx, currentRelease, upgradedRelease) res, err := u.performUpgrade(ctx, currentRelease, upgradedRelease)
if err != nil { if err != nil {
return res, err return res, err
@ -180,7 +180,7 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart.
// Do not update for dry runs // Do not update for dry runs
if !u.isDryRun() { if !u.isDryRun() {
u.cfg.Log.Debug("updating status for upgraded release", "name", name) slog.Debug("updating status for upgraded release", "name", name)
if err := u.cfg.Releases.Update(upgradedRelease); err != nil { if err := u.cfg.Releases.Update(upgradedRelease); err != nil {
return res, err return res, err
} }
@ -366,7 +366,7 @@ func (u *Upgrade) performUpgrade(ctx context.Context, originalRelease, upgradedR
// Run if it is a dry run // Run if it is a dry run
if u.isDryRun() { if u.isDryRun() {
u.cfg.Log.Debug("dry run for release", "name", upgradedRelease.Name) slog.Debug("dry run for release", "name", upgradedRelease.Name)
if len(u.Description) > 0 { if len(u.Description) > 0 {
upgradedRelease.Info.Description = u.Description upgradedRelease.Info.Description = u.Description
} else { } else {
@ -375,7 +375,7 @@ func (u *Upgrade) performUpgrade(ctx context.Context, originalRelease, upgradedR
return upgradedRelease, nil return upgradedRelease, nil
} }
u.cfg.Log.Debug("creating upgraded release", "name", upgradedRelease.Name) slog.Debug("creating upgraded release", "name", upgradedRelease.Name)
if err := u.cfg.Releases.Create(upgradedRelease); err != nil { if err := u.cfg.Releases.Create(upgradedRelease); err != nil {
return nil, err return nil, err
} }
@ -426,7 +426,7 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele
return return
} }
} else { } else {
u.cfg.Log.Debug("upgrade hooks disabled", "name", upgradedRelease.Name) slog.Debug("upgrade hooks disabled", "name", upgradedRelease.Name)
} }
results, err := u.cfg.KubeClient.Update(current, target, u.Force) results, err := u.cfg.KubeClient.Update(current, target, u.Force)
@ -442,7 +442,7 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele
// levels, we should make these error level logs so users are notified // levels, we should make these error level logs so users are notified
// that they'll need to go do the cleanup on their own // that they'll need to go do the cleanup on their own
if err := recreate(u.cfg, results.Updated); err != nil { if err := recreate(u.cfg, results.Updated); err != nil {
u.cfg.Log.Error(err.Error()) slog.Error(err.Error())
} }
} }
waiter, err := u.cfg.KubeClient.GetWaiter(u.WaitStrategy) waiter, err := u.cfg.KubeClient.GetWaiter(u.WaitStrategy)
@ -487,13 +487,13 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele
func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, err error) (*release.Release, error) { func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, err error) (*release.Release, error) {
msg := fmt.Sprintf("Upgrade %q failed: %s", rel.Name, err) msg := fmt.Sprintf("Upgrade %q failed: %s", rel.Name, err)
u.cfg.Log.Warn("upgrade failed", "name", rel.Name, slog.Any("error", err)) slog.Warn("upgrade failed", "name", rel.Name, slog.Any("error", err))
rel.Info.Status = release.StatusFailed rel.Info.Status = release.StatusFailed
rel.Info.Description = msg rel.Info.Description = msg
u.cfg.recordRelease(rel) u.cfg.recordRelease(rel)
if u.CleanupOnFail && len(created) > 0 { if u.CleanupOnFail && len(created) > 0 {
u.cfg.Log.Debug("cleanup on fail set", "cleaning_resources", len(created)) slog.Debug("cleanup on fail set", "cleaning_resources", len(created))
_, errs := u.cfg.KubeClient.Delete(created) _, errs := u.cfg.KubeClient.Delete(created)
if errs != nil { if errs != nil {
var errorList []string var errorList []string
@ -502,10 +502,10 @@ func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, e
} }
return rel, errors.Wrapf(fmt.Errorf("unable to cleanup resources: %s", strings.Join(errorList, ", ")), "an error occurred while cleaning up resources. original upgrade error: %s", err) return rel, errors.Wrapf(fmt.Errorf("unable to cleanup resources: %s", strings.Join(errorList, ", ")), "an error occurred while cleaning up resources. original upgrade error: %s", err)
} }
u.cfg.Log.Debug("resource cleanup complete") slog.Debug("resource cleanup complete")
} }
if u.Atomic { if u.Atomic {
u.cfg.Log.Debug("upgrade failed and atomic is set, rolling back to last successful release") slog.Debug("upgrade failed and atomic is set, rolling back to last successful release")
// As a protection, get the last successful release before rollback. // As a protection, get the last successful release before rollback.
// If there are no successful releases, bail out // If there are no successful releases, bail out
@ -557,13 +557,13 @@ func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, e
func (u *Upgrade) reuseValues(chart *chart.Chart, current *release.Release, newVals map[string]interface{}) (map[string]interface{}, error) { func (u *Upgrade) reuseValues(chart *chart.Chart, current *release.Release, newVals map[string]interface{}) (map[string]interface{}, error) {
if u.ResetValues { if u.ResetValues {
// If ResetValues is set, we completely ignore current.Config. // If ResetValues is set, we completely ignore current.Config.
u.cfg.Log.Debug("resetting values to the chart's original version") slog.Debug("resetting values to the chart's original version")
return newVals, nil return newVals, nil
} }
// If the ReuseValues flag is set, we always copy the old values over the new config's values. // If the ReuseValues flag is set, we always copy the old values over the new config's values.
if u.ReuseValues { if u.ReuseValues {
u.cfg.Log.Debug("reusing the old release's values") slog.Debug("reusing the old release's values")
// We have to regenerate the old coalesced values: // We have to regenerate the old coalesced values:
oldVals, err := chartutil.CoalesceValues(current.Chart, current.Config) oldVals, err := chartutil.CoalesceValues(current.Chart, current.Config)
@ -580,7 +580,7 @@ func (u *Upgrade) reuseValues(chart *chart.Chart, current *release.Release, newV
// If the ResetThenReuseValues flag is set, we use the new chart's values, but we copy the old config's values over the new config's values. // If the ResetThenReuseValues flag is set, we use the new chart's values, but we copy the old config's values over the new config's values.
if u.ResetThenReuseValues { if u.ResetThenReuseValues {
u.cfg.Log.Debug("merging values from old release to new values") slog.Debug("merging values from old release to new values")
newVals = chartutil.CoalesceTables(newVals, current.Config) newVals = chartutil.CoalesceTables(newVals, current.Config)
@ -588,7 +588,7 @@ func (u *Upgrade) reuseValues(chart *chart.Chart, current *release.Release, newV
} }
if len(newVals) == 0 && len(current.Config) > 0 { if len(newVals) == 0 && len(current.Config) > 0 {
u.cfg.Log.Debug("copying values from old release", "name", current.Name, "version", current.Version) slog.Debug("copying values from old release", "name", current.Name, "version", current.Version)
newVals = current.Config newVals = current.Config
} }
return newVals, nil return newVals, nil

@ -20,6 +20,7 @@ import (
"flag" "flag"
"fmt" "fmt"
"log" "log"
"log/slog"
"path/filepath" "path/filepath"
"sort" "sort"
"strings" "strings"
@ -82,11 +83,11 @@ func (ws *waitValue) Set(s string) error {
*ws = waitValue(s) *ws = waitValue(s)
return nil return nil
case "true": case "true":
Logger.Warn("--wait=true is deprecated (boolean value) and can be replaced with --wait=watcher") slog.Warn("--wait=true is deprecated (boolean value) and can be replaced with --wait=watcher")
*ws = waitValue(kube.StatusWatcherStrategy) *ws = waitValue(kube.StatusWatcherStrategy)
return nil return nil
case "false": case "false":
Logger.Warn("--wait=false is deprecated (boolean value) and can be replaced by omitting the --wait flag") slog.Warn("--wait=false is deprecated (boolean value) and can be replaced by omitting the --wait flag")
*ws = waitValue(kube.HookOnlyStrategy) *ws = waitValue(kube.HookOnlyStrategy)
return nil return nil
default: default:

@ -19,7 +19,6 @@ package cmd
import ( import (
"bytes" "bytes"
"io" "io"
"log/slog"
"os" "os"
"strings" "strings"
"testing" "testing"
@ -93,7 +92,6 @@ func executeActionCommandStdinC(store *storage.Storage, in *os.File, cmd string)
Releases: store, Releases: store,
KubeClient: &kubefake.PrintingKubeClient{Out: io.Discard}, KubeClient: &kubefake.PrintingKubeClient{Out: io.Discard},
Capabilities: chartutil.DefaultCapabilities, Capabilities: chartutil.DefaultCapabilities,
Log: slog.New(slog.NewTextHandler(io.Discard, nil)),
} }
root, err := newRootCmdWithConfig(actionConfig, buf, args) root, err := newRootCmdWithConfig(actionConfig, buf, args)

@ -21,6 +21,7 @@ import (
"fmt" "fmt"
"io" "io"
"log" "log"
"log/slog"
"os" "os"
"os/signal" "os/signal"
"syscall" "syscall"
@ -229,9 +230,9 @@ func addInstallFlags(cmd *cobra.Command, f *pflag.FlagSet, client *action.Instal
} }
func runInstall(args []string, client *action.Install, valueOpts *values.Options, out io.Writer) (*release.Release, error) { func runInstall(args []string, client *action.Install, valueOpts *values.Options, out io.Writer) (*release.Release, error) {
Logger.Debug("Original chart version", "version", client.Version) slog.Debug("Original chart version", "version", client.Version)
if client.Version == "" && client.Devel { if client.Version == "" && client.Devel {
Logger.Debug("setting version to >0.0.0-0") slog.Debug("setting version to >0.0.0-0")
client.Version = ">0.0.0-0" client.Version = ">0.0.0-0"
} }
@ -246,7 +247,7 @@ func runInstall(args []string, client *action.Install, valueOpts *values.Options
return nil, err return nil, err
} }
Logger.Debug("Chart path", "path", cp) slog.Debug("Chart path", "path", cp)
p := getter.All(settings) p := getter.All(settings)
vals, err := valueOpts.MergeValues(p) vals, err := valueOpts.MergeValues(p)
@ -265,7 +266,7 @@ func runInstall(args []string, client *action.Install, valueOpts *values.Options
} }
if chartRequested.Metadata.Deprecated { if chartRequested.Metadata.Deprecated {
Logger.Warn("this chart is deprecated") slog.Warn("this chart is deprecated")
} }
if req := chartRequested.Metadata.Dependencies; req != nil { if req := chartRequested.Metadata.Dependencies; req != nil {

@ -71,7 +71,7 @@ func newListCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
ValidArgsFunction: noMoreArgsCompFunc, ValidArgsFunction: noMoreArgsCompFunc,
RunE: func(cmd *cobra.Command, _ []string) error { RunE: func(cmd *cobra.Command, _ []string) error {
if client.AllNamespaces { if client.AllNamespaces {
if err := cfg.Init(settings.RESTClientGetter(), "", os.Getenv("HELM_DRIVER"), Logger); err != nil { if err := cfg.Init(settings.RESTClientGetter(), "", os.Getenv("HELM_DRIVER")); err != nil {
return err return err
} }
} }

@ -17,6 +17,7 @@ package cmd
import ( import (
"io" "io"
"log/slog"
"os" "os"
"os/exec" "os/exec"
@ -66,7 +67,7 @@ func runHook(p *plugin.Plugin, event string) error {
prog := exec.Command(main, argv...) prog := exec.Command(main, argv...)
Logger.Debug("running hook", "event", event, "program", prog) slog.Debug("running hook", "event", event, "program", prog)
prog.Stdout, prog.Stderr = os.Stdout, os.Stderr prog.Stdout, prog.Stderr = os.Stdout, os.Stderr
if err := prog.Run(); err != nil { if err := prog.Run(); err != nil {

@ -18,6 +18,7 @@ package cmd
import ( import (
"fmt" "fmt"
"io" "io"
"log/slog"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@ -79,7 +80,7 @@ func (o *pluginInstallOptions) run(out io.Writer) error {
return err return err
} }
Logger.Debug("loading plugin", "path", i.Path()) slog.Debug("loading plugin", "path", i.Path())
p, err := plugin.LoadDir(i.Path()) p, err := plugin.LoadDir(i.Path())
if err != nil { if err != nil {
return errors.Wrap(err, "plugin is installed but unusable") return errors.Wrap(err, "plugin is installed but unusable")

@ -18,6 +18,7 @@ package cmd
import ( import (
"fmt" "fmt"
"io" "io"
"log/slog"
"github.com/gosuri/uitable" "github.com/gosuri/uitable"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@ -32,7 +33,7 @@ func newPluginListCmd(out io.Writer) *cobra.Command {
Short: "list installed Helm plugins", Short: "list installed Helm plugins",
ValidArgsFunction: noMoreArgsCompFunc, ValidArgsFunction: noMoreArgsCompFunc,
RunE: func(_ *cobra.Command, _ []string) error { RunE: func(_ *cobra.Command, _ []string) error {
Logger.Debug("pluginDirs", "directory", settings.PluginsDirectory) slog.Debug("pluginDirs", "directory", settings.PluginsDirectory)
plugins, err := plugin.FindPlugins(settings.PluginsDirectory) plugins, err := plugin.FindPlugins(settings.PluginsDirectory)
if err != nil { if err != nil {
return err return err

@ -18,6 +18,7 @@ package cmd
import ( import (
"fmt" "fmt"
"io" "io"
"log/slog"
"os" "os"
"strings" "strings"
@ -60,7 +61,7 @@ func (o *pluginUninstallOptions) complete(args []string) error {
} }
func (o *pluginUninstallOptions) run(out io.Writer) error { func (o *pluginUninstallOptions) run(out io.Writer) error {
Logger.Debug("loading installer plugins", "dir", settings.PluginsDirectory) slog.Debug("loading installer plugins", "dir", settings.PluginsDirectory)
plugins, err := plugin.FindPlugins(settings.PluginsDirectory) plugins, err := plugin.FindPlugins(settings.PluginsDirectory)
if err != nil { if err != nil {
return err return err

@ -18,6 +18,7 @@ package cmd
import ( import (
"fmt" "fmt"
"io" "io"
"log/slog"
"path/filepath" "path/filepath"
"strings" "strings"
@ -62,7 +63,7 @@ func (o *pluginUpdateOptions) complete(args []string) error {
func (o *pluginUpdateOptions) run(out io.Writer) error { func (o *pluginUpdateOptions) run(out io.Writer) error {
installer.Debug = settings.Debug installer.Debug = settings.Debug
Logger.Debug("loading installed plugins", "path", settings.PluginsDirectory) slog.Debug("loading installed plugins", "path", settings.PluginsDirectory)
plugins, err := plugin.FindPlugins(settings.PluginsDirectory) plugins, err := plugin.FindPlugins(settings.PluginsDirectory)
if err != nil { if err != nil {
return err return err
@ -104,7 +105,7 @@ func updatePlugin(p *plugin.Plugin) error {
return err return err
} }
Logger.Debug("loading plugin", "path", i.Path()) slog.Debug("loading plugin", "path", i.Path())
updatedPlugin, err := plugin.LoadDir(i.Path()) updatedPlugin, err := plugin.LoadDir(i.Path())
if err != nil { if err != nil {
return err return err

@ -20,6 +20,7 @@ import (
"fmt" "fmt"
"io" "io"
"log" "log"
"log/slog"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@ -60,7 +61,7 @@ func newPullCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
RunE: func(_ *cobra.Command, args []string) error { RunE: func(_ *cobra.Command, args []string) error {
client.Settings = settings client.Settings = settings
if client.Version == "" && client.Devel { if client.Version == "" && client.Devel {
Logger.Debug("setting version to >0.0.0-0") slog.Debug("setting version to >0.0.0-0")
client.Version = ">0.0.0-0" client.Version = ">0.0.0-0"
} }

@ -21,6 +21,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"log/slog"
"os" "os"
"strings" "strings"
@ -122,7 +123,7 @@ func getUsernamePassword(usernameOpt string, passwordOpt string, passwordFromStd
} }
} }
} else { } else {
Logger.Warn("using --password via the CLI is insecure. Use --password-stdin") slog.Warn("using --password via the CLI is insecure. Use --password-stdin")
} }
return username, password, nil return username, password, nil

@ -21,6 +21,7 @@ import (
"fmt" "fmt"
"io" "io"
"log" "log"
"log/slog"
"net/http" "net/http"
"os" "os"
"strings" "strings"
@ -95,7 +96,6 @@ By default, the default directories depend on the Operating System. The defaults
` `
var settings = cli.New() var settings = cli.New()
var Logger = cli.NewLogger(settings)
func NewRootCmd(out io.Writer, args []string) (*cobra.Command, error) { func NewRootCmd(out io.Writer, args []string) (*cobra.Command, error) {
actionConfig := new(action.Configuration) actionConfig := new(action.Configuration)
@ -105,7 +105,7 @@ func NewRootCmd(out io.Writer, args []string) (*cobra.Command, error) {
} }
cobra.OnInitialize(func() { cobra.OnInitialize(func() {
helmDriver := os.Getenv("HELM_DRIVER") helmDriver := os.Getenv("HELM_DRIVER")
if err := actionConfig.Init(settings.RESTClientGetter(), settings.Namespace(), helmDriver, Logger); err != nil { if err := actionConfig.Init(settings.RESTClientGetter(), settings.Namespace(), helmDriver); err != nil {
log.Fatal(err) log.Fatal(err)
} }
if helmDriver == "memory" { if helmDriver == "memory" {
@ -139,6 +139,9 @@ func newRootCmdWithConfig(actionConfig *action.Configuration, out io.Writer, arg
settings.AddFlags(flags) settings.AddFlags(flags)
addKlogFlags(flags) addKlogFlags(flags)
logger := cli.NewLogger(settings)
slog.SetDefault(logger)
// Setup shell completion for the namespace flag // Setup shell completion for the namespace flag
err := cmd.RegisterFlagCompletionFunc("namespace", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { err := cmd.RegisterFlagCompletionFunc("namespace", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
if client, err := actionConfig.KubernetesClientSet(); err == nil { if client, err := actionConfig.KubernetesClientSet(); err == nil {

@ -90,7 +90,7 @@ func (o *searchHubOptions) run(out io.Writer, args []string) error {
q := strings.Join(args, " ") q := strings.Join(args, " ")
results, err := c.Search(q) results, err := c.Search(q)
if err != nil { if err != nil {
Logger.Debug("search failed", slog.Any("error", err)) slog.Debug("search failed", slog.Any("error", err))
return fmt.Errorf("unable to perform search against %q", o.searchEndpoint) return fmt.Errorf("unable to perform search against %q", o.searchEndpoint)
} }

@ -131,17 +131,17 @@ func (o *searchRepoOptions) run(out io.Writer, args []string) error {
} }
func (o *searchRepoOptions) setupSearchedVersion() { func (o *searchRepoOptions) setupSearchedVersion() {
Logger.Debug("original chart version", "version", o.version) slog.Debug("original chart version", "version", o.version)
if o.version != "" { if o.version != "" {
return return
} }
if o.devel { // search for releases and prereleases (alpha, beta, and release candidate releases). if o.devel { // search for releases and prereleases (alpha, beta, and release candidate releases).
Logger.Debug("setting version to >0.0.0-0") slog.Debug("setting version to >0.0.0-0")
o.version = ">0.0.0-0" o.version = ">0.0.0-0"
} else { // search only for stable releases, prerelease versions will be skipped } else { // search only for stable releases, prerelease versions will be skipped
Logger.Debug("setting version to >0.0.0") slog.Debug("setting version to >0.0.0")
o.version = ">0.0.0" o.version = ">0.0.0"
} }
} }
@ -190,7 +190,7 @@ func (o *searchRepoOptions) buildIndex() (*search.Index, error) {
f := filepath.Join(o.repoCacheDir, helmpath.CacheIndexFile(n)) f := filepath.Join(o.repoCacheDir, helmpath.CacheIndexFile(n))
ind, err := repo.LoadIndexFile(f) ind, err := repo.LoadIndexFile(f)
if err != nil { if err != nil {
Logger.Warn("repo is corrupt or missing", "repo", n, slog.Any("error", err)) slog.Warn("repo is corrupt or missing", "repo", n, slog.Any("error", err))
continue continue
} }

@ -20,6 +20,7 @@ import (
"fmt" "fmt"
"io" "io"
"log" "log"
"log/slog"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@ -211,9 +212,9 @@ func addShowFlags(subCmd *cobra.Command, client *action.Show) {
} }
func runShow(args []string, client *action.Show) (string, error) { func runShow(args []string, client *action.Show) (string, error) {
Logger.Debug("original chart version", "version", client.Version) slog.Debug("original chart version", "version", client.Version)
if client.Version == "" && client.Devel { if client.Version == "" && client.Devel {
Logger.Debug("setting version to >0.0.0-0") slog.Debug("setting version to >0.0.0-0")
client.Version = ">0.0.0-0" client.Version = ">0.0.0-0"
} }

@ -21,6 +21,7 @@ import (
"fmt" "fmt"
"io" "io"
"log" "log"
"log/slog"
"os" "os"
"os/signal" "os/signal"
"syscall" "syscall"
@ -173,7 +174,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
} }
if client.Version == "" && client.Devel { if client.Version == "" && client.Devel {
Logger.Debug("setting version to >0.0.0-0") slog.Debug("setting version to >0.0.0-0")
client.Version = ">0.0.0-0" client.Version = ">0.0.0-0"
} }
@ -225,7 +226,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
} }
if ch.Metadata.Deprecated { if ch.Metadata.Deprecated {
Logger.Warn("this chart is deprecated") slog.Warn("this chart is deprecated")
} }
// Create context and prepare the handle of SIGTERM // Create context and prepare the handle of SIGTERM

@ -74,7 +74,6 @@ type Client struct {
// needs. The smaller surface area of the interface means there is a lower // needs. The smaller surface area of the interface means there is a lower
// chance of it changing. // chance of it changing.
Factory Factory Factory Factory
Log *slog.Logger
// Namespace allows to bypass the kubeconfig file for the choice of the namespace // Namespace allows to bypass the kubeconfig file for the choice of the namespace
Namespace string Namespace string
@ -121,7 +120,6 @@ func (c *Client) newStatusWatcher() (*statusWaiter, error) {
return &statusWaiter{ return &statusWaiter{
restMapper: restMapper, restMapper: restMapper,
client: dynamicClient, client: dynamicClient,
log: c.Log,
}, nil }, nil
} }
@ -132,7 +130,7 @@ func (c *Client) GetWaiter(strategy WaitStrategy) (Waiter, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &legacyWaiter{kubeClient: kc, log: c.Log}, nil return &legacyWaiter{kubeClient: kc}, nil
case StatusWatcherStrategy: case StatusWatcherStrategy:
return c.newStatusWatcher() return c.newStatusWatcher()
case HookOnlyStrategy: case HookOnlyStrategy:
@ -163,7 +161,6 @@ func New(getter genericclioptions.RESTClientGetter) *Client {
factory := cmdutil.NewFactory(getter) factory := cmdutil.NewFactory(getter)
c := &Client{ c := &Client{
Factory: factory, Factory: factory,
Log: slog.New(slog.NewTextHandler(io.Discard, nil)),
} }
return c return c
} }
@ -197,7 +194,7 @@ func (c *Client) IsReachable() error {
// Create creates Kubernetes resources specified in the resource list. // Create creates Kubernetes resources specified in the resource list.
func (c *Client) Create(resources ResourceList) (*Result, error) { func (c *Client) Create(resources ResourceList) (*Result, error) {
c.Log.Debug("creating resource(s)", "resources", len(resources)) slog.Debug("creating resource(s)", "resources", len(resources))
if err := perform(resources, createResource); err != nil { if err := perform(resources, createResource); err != nil {
return nil, err return nil, err
} }
@ -249,7 +246,7 @@ func (c *Client) Get(resources ResourceList, related bool) (map[string][]runtime
objs, err = c.getSelectRelationPod(info, objs, isTable, &podSelectors) objs, err = c.getSelectRelationPod(info, objs, isTable, &podSelectors)
if err != nil { if err != nil {
c.Log.Warn("get the relation pod is failed", slog.Any("error", err)) slog.Warn("get the relation pod is failed", slog.Any("error", err))
} }
} }
} }
@ -267,7 +264,7 @@ func (c *Client) getSelectRelationPod(info *resource.Info, objs map[string][]run
if info == nil { if info == nil {
return objs, nil return objs, nil
} }
c.Log.Debug("get relation pod of object", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind) slog.Debug("get relation pod of object", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind)
selector, ok, _ := getSelectorFromObject(info.Object) selector, ok, _ := getSelectorFromObject(info.Object)
if !ok { if !ok {
return objs, nil return objs, nil
@ -409,7 +406,7 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err
updateErrors := []string{} updateErrors := []string{}
res := &Result{} res := &Result{}
c.Log.Debug("checking resources for changes", "resources", len(target)) slog.Debug("checking resources for changes", "resources", len(target))
err := target.Visit(func(info *resource.Info, err error) error { err := target.Visit(func(info *resource.Info, err error) error {
if err != nil { if err != nil {
return err return err
@ -430,7 +427,7 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err
} }
kind := info.Mapping.GroupVersionKind.Kind kind := info.Mapping.GroupVersionKind.Kind
c.Log.Debug("created a new resource", "namespace", info.Namespace, "name", info.Name, "kind", kind) slog.Debug("created a new resource", "namespace", info.Namespace, "name", info.Name, "kind", kind)
return nil return nil
} }
@ -441,7 +438,7 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err
} }
if err := updateResource(c, info, originalInfo.Object, force); err != nil { if err := updateResource(c, info, originalInfo.Object, force); err != nil {
c.Log.Debug("error updating the resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err)) slog.Debug("error updating the resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err))
updateErrors = append(updateErrors, err.Error()) updateErrors = append(updateErrors, err.Error())
} }
// Because we check for errors later, append the info regardless // Because we check for errors later, append the info regardless
@ -458,22 +455,22 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err
} }
for _, info := range original.Difference(target) { for _, info := range original.Difference(target) {
c.Log.Debug("deleting resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind) slog.Debug("deleting resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind)
if err := info.Get(); err != nil { if err := info.Get(); err != nil {
c.Log.Debug("unable to get object", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err)) slog.Debug("unable to get object", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err))
continue continue
} }
annotations, err := metadataAccessor.Annotations(info.Object) annotations, err := metadataAccessor.Annotations(info.Object)
if err != nil { if err != nil {
c.Log.Debug("unable to get annotations", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err)) slog.Debug("unable to get annotations", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err))
} }
if annotations != nil && annotations[ResourcePolicyAnno] == KeepPolicy { if annotations != nil && annotations[ResourcePolicyAnno] == KeepPolicy {
c.Log.Debug("skipping delete due to annotation", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, "annotation", ResourcePolicyAnno, "value", KeepPolicy) slog.Debug("skipping delete due to annotation", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, "annotation", ResourcePolicyAnno, "value", KeepPolicy)
continue continue
} }
if err := deleteResource(info, metav1.DeletePropagationBackground); err != nil { if err := deleteResource(info, metav1.DeletePropagationBackground); err != nil {
c.Log.Debug("failed to delete resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err)) slog.Debug("failed to delete resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err))
continue continue
} }
res.Deleted = append(res.Deleted, info) res.Deleted = append(res.Deleted, info)
@ -497,16 +494,16 @@ func (c *Client) DeleteWithPropagationPolicy(resources ResourceList, policy meta
return rdelete(c, resources, policy) return rdelete(c, resources, policy)
} }
func rdelete(c *Client, resources ResourceList, propagation metav1.DeletionPropagation) (*Result, []error) { func rdelete(_ *Client, resources ResourceList, propagation metav1.DeletionPropagation) (*Result, []error) {
var errs []error var errs []error
res := &Result{} res := &Result{}
mtx := sync.Mutex{} mtx := sync.Mutex{}
err := perform(resources, func(info *resource.Info) error { err := perform(resources, func(info *resource.Info) error {
c.Log.Debug("starting delete resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind) slog.Debug("starting delete resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind)
err := deleteResource(info, propagation) err := deleteResource(info, propagation)
if err == nil || apierrors.IsNotFound(err) { if err == nil || apierrors.IsNotFound(err) {
if err != nil { if err != nil {
c.Log.Debug("ignoring delete failure", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err)) slog.Debug("ignoring delete failure", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err))
} }
mtx.Lock() mtx.Lock()
defer mtx.Unlock() defer mtx.Unlock()
@ -640,7 +637,7 @@ func createPatch(target *resource.Info, current runtime.Object) ([]byte, types.P
return patch, types.StrategicMergePatchType, err return patch, types.StrategicMergePatchType, err
} }
func updateResource(c *Client, target *resource.Info, currentObj runtime.Object, force bool) error { func updateResource(_ *Client, target *resource.Info, currentObj runtime.Object, force bool) error {
var ( var (
obj runtime.Object obj runtime.Object
helper = resource.NewHelper(target.Client, target.Mapping).WithFieldManager(getManagedFieldsManager()) helper = resource.NewHelper(target.Client, target.Mapping).WithFieldManager(getManagedFieldsManager())
@ -654,7 +651,7 @@ func updateResource(c *Client, target *resource.Info, currentObj runtime.Object,
if err != nil { if err != nil {
return errors.Wrap(err, "failed to replace object") return errors.Wrap(err, "failed to replace object")
} }
c.Log.Debug("replace succeeded", "name", target.Name, "initialKind", currentObj.GetObjectKind().GroupVersionKind().Kind, "kind", kind) slog.Debug("replace succeeded", "name", target.Name, "initialKind", currentObj.GetObjectKind().GroupVersionKind().Kind, "kind", kind)
} else { } else {
patch, patchType, err := createPatch(target, currentObj) patch, patchType, err := createPatch(target, currentObj)
if err != nil { if err != nil {
@ -662,7 +659,7 @@ func updateResource(c *Client, target *resource.Info, currentObj runtime.Object,
} }
if patch == nil || string(patch) == "{}" { if patch == nil || string(patch) == "{}" {
c.Log.Debug("no changes detected", "kind", kind, "name", target.Name) slog.Debug("no changes detected", "kind", kind, "name", target.Name)
// This needs to happen to make sure that Helm has the latest info from the API // This needs to happen to make sure that Helm has the latest info from the API
// Otherwise there will be no labels and other functions that use labels will panic // Otherwise there will be no labels and other functions that use labels will panic
if err := target.Get(); err != nil { if err := target.Get(); err != nil {
@ -671,7 +668,7 @@ func updateResource(c *Client, target *resource.Info, currentObj runtime.Object,
return nil return nil
} }
// send patch to server // send patch to server
c.Log.Debug("patching resource", "kind", kind, "name", target.Name, "namespace", target.Namespace) slog.Debug("patching resource", "kind", kind, "name", target.Name, "namespace", target.Namespace)
obj, err = helper.Patch(target.Namespace, target.Name, patchType, patch, nil) obj, err = helper.Patch(target.Namespace, target.Name, patchType, patch, nil)
if err != nil { if err != nil {
return errors.Wrapf(err, "cannot patch %q with kind %s", target.Name, kind) return errors.Wrapf(err, "cannot patch %q with kind %s", target.Name, kind)

@ -19,7 +19,6 @@ package kube
import ( import (
"bytes" "bytes"
"io" "io"
"log/slog"
"net/http" "net/http"
"strings" "strings"
"testing" "testing"
@ -108,7 +107,6 @@ func newTestClient(t *testing.T) *Client {
return &Client{ return &Client{
Factory: testFactory.WithNamespace("default"), Factory: testFactory.WithNamespace("default"),
Log: slog.New(slog.NewTextHandler(io.Discard, nil)),
} }
} }

@ -19,7 +19,6 @@ package kube // import "helm.sh/helm/v4/pkg/kube"
import ( import (
"context" "context"
"fmt" "fmt"
"io"
"log/slog" "log/slog"
appsv1 "k8s.io/api/apps/v1" appsv1 "k8s.io/api/apps/v1"
@ -59,13 +58,9 @@ func CheckJobs(checkJobs bool) ReadyCheckerOption {
// NewReadyChecker creates a new checker. Passed ReadyCheckerOptions can // NewReadyChecker creates a new checker. Passed ReadyCheckerOptions can
// be used to override defaults. // be used to override defaults.
func NewReadyChecker(cl kubernetes.Interface, logger *slog.Logger, opts ...ReadyCheckerOption) ReadyChecker { func NewReadyChecker(cl kubernetes.Interface, opts ...ReadyCheckerOption) ReadyChecker {
c := ReadyChecker{ c := ReadyChecker{
client: cl, client: cl,
log: logger,
}
if c.log == nil {
c.log = slog.New(slog.NewTextHandler(io.Discard, nil))
} }
for _, opt := range opts { for _, opt := range opts {
opt(&c) opt(&c)
@ -76,7 +71,6 @@ func NewReadyChecker(cl kubernetes.Interface, logger *slog.Logger, opts ...Ready
// ReadyChecker is a type that can check core Kubernetes types for readiness. // ReadyChecker is a type that can check core Kubernetes types for readiness.
type ReadyChecker struct { type ReadyChecker struct {
client kubernetes.Interface client kubernetes.Interface
log *slog.Logger
checkJobs bool checkJobs bool
pausedAsReady bool pausedAsReady bool
} }
@ -232,18 +226,18 @@ func (c *ReadyChecker) isPodReady(pod *corev1.Pod) bool {
return true return true
} }
} }
c.log.Debug("Pod is not ready", "namespace", pod.GetNamespace(), "name", pod.GetName()) slog.Debug("Pod is not ready", "namespace", pod.GetNamespace(), "name", pod.GetName())
return false return false
} }
func (c *ReadyChecker) jobReady(job *batchv1.Job) (bool, error) { func (c *ReadyChecker) jobReady(job *batchv1.Job) (bool, error) {
if job.Status.Failed > *job.Spec.BackoffLimit { if job.Status.Failed > *job.Spec.BackoffLimit {
c.log.Debug("Job is failed", "namespace", job.GetNamespace(), "name", job.GetName()) slog.Debug("Job is failed", "namespace", job.GetNamespace(), "name", job.GetName())
// If a job is failed, it can't recover, so throw an error // If a job is failed, it can't recover, so throw an error
return false, fmt.Errorf("job is failed: %s/%s", job.GetNamespace(), job.GetName()) return false, fmt.Errorf("job is failed: %s/%s", job.GetNamespace(), job.GetName())
} }
if job.Spec.Completions != nil && job.Status.Succeeded < *job.Spec.Completions { if job.Spec.Completions != nil && job.Status.Succeeded < *job.Spec.Completions {
c.log.Debug("Job is not completed", "namespace", job.GetNamespace(), "name", job.GetName()) slog.Debug("Job is not completed", "namespace", job.GetNamespace(), "name", job.GetName())
return false, nil return false, nil
} }
return true, nil return true, nil
@ -257,7 +251,7 @@ func (c *ReadyChecker) serviceReady(s *corev1.Service) bool {
// Ensure that the service cluster IP is not empty // Ensure that the service cluster IP is not empty
if s.Spec.ClusterIP == "" { if s.Spec.ClusterIP == "" {
c.log.Debug("Service does not have cluster IP address", "namespace", s.GetNamespace(), "name", s.GetName()) slog.Debug("Service does not have cluster IP address", "namespace", s.GetNamespace(), "name", s.GetName())
return false return false
} }
@ -265,12 +259,12 @@ func (c *ReadyChecker) serviceReady(s *corev1.Service) bool {
if s.Spec.Type == corev1.ServiceTypeLoadBalancer { if s.Spec.Type == corev1.ServiceTypeLoadBalancer {
// do not wait when at least 1 external IP is set // do not wait when at least 1 external IP is set
if len(s.Spec.ExternalIPs) > 0 { if len(s.Spec.ExternalIPs) > 0 {
c.log.Debug("Service has external IP addresses", "namespace", s.GetNamespace(), "name", s.GetName(), "externalIPs", s.Spec.ExternalIPs) slog.Debug("Service has external IP addresses", "namespace", s.GetNamespace(), "name", s.GetName(), "externalIPs", s.Spec.ExternalIPs)
return true return true
} }
if s.Status.LoadBalancer.Ingress == nil { if s.Status.LoadBalancer.Ingress == nil {
c.log.Debug("Service does not have load balancer ingress IP address", "namespace", s.GetNamespace(), "name", s.GetName()) slog.Debug("Service does not have load balancer ingress IP address", "namespace", s.GetNamespace(), "name", s.GetName())
return false return false
} }
} }
@ -280,7 +274,7 @@ func (c *ReadyChecker) serviceReady(s *corev1.Service) bool {
func (c *ReadyChecker) volumeReady(v *corev1.PersistentVolumeClaim) bool { func (c *ReadyChecker) volumeReady(v *corev1.PersistentVolumeClaim) bool {
if v.Status.Phase != corev1.ClaimBound { if v.Status.Phase != corev1.ClaimBound {
c.log.Debug("PersistentVolumeClaim is not bound", "namespace", v.GetNamespace(), "name", v.GetName()) slog.Debug("PersistentVolumeClaim is not bound", "namespace", v.GetNamespace(), "name", v.GetName())
return false return false
} }
return true return true
@ -293,13 +287,13 @@ func (c *ReadyChecker) deploymentReady(rs *appsv1.ReplicaSet, dep *appsv1.Deploy
} }
// Verify the generation observed by the deployment controller matches the spec generation // Verify the generation observed by the deployment controller matches the spec generation
if dep.Status.ObservedGeneration != dep.ObjectMeta.Generation { if dep.Status.ObservedGeneration != dep.ObjectMeta.Generation {
c.log.Debug("Deployment is not ready, observedGeneration does not match spec generation", "namespace", dep.GetNamespace(), "name", dep.GetName(), "actualGeneration", dep.Status.ObservedGeneration, "expectedGeneration", dep.ObjectMeta.Generation) slog.Debug("Deployment is not ready, observedGeneration does not match spec generation", "namespace", dep.GetNamespace(), "name", dep.GetName(), "actualGeneration", dep.Status.ObservedGeneration, "expectedGeneration", dep.ObjectMeta.Generation)
return false return false
} }
expectedReady := *dep.Spec.Replicas - deploymentutil.MaxUnavailable(*dep) expectedReady := *dep.Spec.Replicas - deploymentutil.MaxUnavailable(*dep)
if !(rs.Status.ReadyReplicas >= expectedReady) { if !(rs.Status.ReadyReplicas >= expectedReady) {
c.log.Debug("Deployment does not have enough pods ready", "namespace", dep.GetNamespace(), "name", dep.GetName(), "readyPods", rs.Status.ReadyReplicas, "totalPods", expectedReady) slog.Debug("Deployment does not have enough pods ready", "namespace", dep.GetNamespace(), "name", dep.GetName(), "readyPods", rs.Status.ReadyReplicas, "totalPods", expectedReady)
return false return false
} }
return true return true
@ -308,7 +302,7 @@ func (c *ReadyChecker) deploymentReady(rs *appsv1.ReplicaSet, dep *appsv1.Deploy
func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool { func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool {
// Verify the generation observed by the daemonSet controller matches the spec generation // Verify the generation observed by the daemonSet controller matches the spec generation
if ds.Status.ObservedGeneration != ds.ObjectMeta.Generation { if ds.Status.ObservedGeneration != ds.ObjectMeta.Generation {
c.log.Debug("DaemonSet is not ready, observedGeneration does not match spec generation", "namespace", ds.GetNamespace(), "name", ds.GetName(), "observedGeneration", ds.Status.ObservedGeneration, "expectedGeneration", ds.ObjectMeta.Generation) slog.Debug("DaemonSet is not ready, observedGeneration does not match spec generation", "namespace", ds.GetNamespace(), "name", ds.GetName(), "observedGeneration", ds.Status.ObservedGeneration, "expectedGeneration", ds.ObjectMeta.Generation)
return false return false
} }
@ -319,7 +313,7 @@ func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool {
// Make sure all the updated pods have been scheduled // Make sure all the updated pods have been scheduled
if ds.Status.UpdatedNumberScheduled != ds.Status.DesiredNumberScheduled { if ds.Status.UpdatedNumberScheduled != ds.Status.DesiredNumberScheduled {
c.log.Debug("DaemonSet does not have enough Pods scheduled", "namespace", ds.GetNamespace(), "name", ds.GetName(), "scheduledPods", ds.Status.UpdatedNumberScheduled, "totalPods", ds.Status.DesiredNumberScheduled) slog.Debug("DaemonSet does not have enough Pods scheduled", "namespace", ds.GetNamespace(), "name", ds.GetName(), "scheduledPods", ds.Status.UpdatedNumberScheduled, "totalPods", ds.Status.DesiredNumberScheduled)
return false return false
} }
maxUnavailable, err := intstr.GetScaledValueFromIntOrPercent(ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable, int(ds.Status.DesiredNumberScheduled), true) maxUnavailable, err := intstr.GetScaledValueFromIntOrPercent(ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable, int(ds.Status.DesiredNumberScheduled), true)
@ -332,7 +326,7 @@ func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool {
expectedReady := int(ds.Status.DesiredNumberScheduled) - maxUnavailable expectedReady := int(ds.Status.DesiredNumberScheduled) - maxUnavailable
if !(int(ds.Status.NumberReady) >= expectedReady) { if !(int(ds.Status.NumberReady) >= expectedReady) {
c.log.Debug("DaemonSet does not have enough Pods ready", "namespace", ds.GetNamespace(), "name", ds.GetName(), "readyPods", ds.Status.NumberReady, "totalPods", expectedReady) slog.Debug("DaemonSet does not have enough Pods ready", "namespace", ds.GetNamespace(), "name", ds.GetName(), "readyPods", ds.Status.NumberReady, "totalPods", expectedReady)
return false return false
} }
return true return true
@ -384,13 +378,13 @@ func (c *ReadyChecker) crdReady(crd apiextv1.CustomResourceDefinition) bool {
func (c *ReadyChecker) statefulSetReady(sts *appsv1.StatefulSet) bool { func (c *ReadyChecker) statefulSetReady(sts *appsv1.StatefulSet) bool {
// Verify the generation observed by the statefulSet controller matches the spec generation // Verify the generation observed by the statefulSet controller matches the spec generation
if sts.Status.ObservedGeneration != sts.ObjectMeta.Generation { if sts.Status.ObservedGeneration != sts.ObjectMeta.Generation {
c.log.Debug("StatefulSet is not ready, observedGeneration doest not match spec generation", "namespace", sts.GetNamespace(), "name", sts.GetName(), "actualGeneration", sts.Status.ObservedGeneration, "expectedGeneration", sts.ObjectMeta.Generation) slog.Debug("StatefulSet is not ready, observedGeneration doest not match spec generation", "namespace", sts.GetNamespace(), "name", sts.GetName(), "actualGeneration", sts.Status.ObservedGeneration, "expectedGeneration", sts.ObjectMeta.Generation)
return false return false
} }
// If the update strategy is not a rolling update, there will be nothing to wait for // If the update strategy is not a rolling update, there will be nothing to wait for
if sts.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType { if sts.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType {
c.log.Debug("StatefulSet skipped ready check", "namespace", sts.GetNamespace(), "name", sts.GetName(), "updateStrategy", sts.Spec.UpdateStrategy.Type) slog.Debug("StatefulSet skipped ready check", "namespace", sts.GetNamespace(), "name", sts.GetName(), "updateStrategy", sts.Spec.UpdateStrategy.Type)
return true return true
} }
@ -416,30 +410,30 @@ func (c *ReadyChecker) statefulSetReady(sts *appsv1.StatefulSet) bool {
// Make sure all the updated pods have been scheduled // Make sure all the updated pods have been scheduled
if int(sts.Status.UpdatedReplicas) < expectedReplicas { if int(sts.Status.UpdatedReplicas) < expectedReplicas {
c.log.Debug("StatefulSet does not have enough Pods scheduled", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.UpdatedReplicas, "totalPods", expectedReplicas) slog.Debug("StatefulSet does not have enough Pods scheduled", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.UpdatedReplicas, "totalPods", expectedReplicas)
return false return false
} }
if int(sts.Status.ReadyReplicas) != replicas { if int(sts.Status.ReadyReplicas) != replicas {
c.log.Debug("StatefulSet does not have enough Pods ready", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.ReadyReplicas, "totalPods", replicas) slog.Debug("StatefulSet does not have enough Pods ready", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.ReadyReplicas, "totalPods", replicas)
return false return false
} }
// This check only makes sense when all partitions are being upgraded otherwise during a // This check only makes sense when all partitions are being upgraded otherwise during a
// partitioned rolling upgrade, this condition will never evaluate to true, leading to // partitioned rolling upgrade, this condition will never evaluate to true, leading to
// error. // error.
if partition == 0 && sts.Status.CurrentRevision != sts.Status.UpdateRevision { if partition == 0 && sts.Status.CurrentRevision != sts.Status.UpdateRevision {
c.log.Debug("StatefulSet is not ready, currentRevision does not match updateRevision", "namespace", sts.GetNamespace(), "name", sts.GetName(), "currentRevision", sts.Status.CurrentRevision, "updateRevision", sts.Status.UpdateRevision) slog.Debug("StatefulSet is not ready, currentRevision does not match updateRevision", "namespace", sts.GetNamespace(), "name", sts.GetName(), "currentRevision", sts.Status.CurrentRevision, "updateRevision", sts.Status.UpdateRevision)
return false return false
} }
c.log.Debug("StatefulSet is ready", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.ReadyReplicas, "totalPods", replicas) slog.Debug("StatefulSet is ready", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.ReadyReplicas, "totalPods", replicas)
return true return true
} }
func (c *ReadyChecker) replicationControllerReady(rc *corev1.ReplicationController) bool { func (c *ReadyChecker) replicationControllerReady(rc *corev1.ReplicationController) bool {
// Verify the generation observed by the replicationController controller matches the spec generation // Verify the generation observed by the replicationController controller matches the spec generation
if rc.Status.ObservedGeneration != rc.ObjectMeta.Generation { if rc.Status.ObservedGeneration != rc.ObjectMeta.Generation {
c.log.Debug("ReplicationController is not ready, observedGeneration doest not match spec generation", "namespace", rc.GetNamespace(), "name", rc.GetName(), "actualGeneration", rc.Status.ObservedGeneration, "expectedGeneration", rc.ObjectMeta.Generation) slog.Debug("ReplicationController is not ready, observedGeneration doest not match spec generation", "namespace", rc.GetNamespace(), "name", rc.GetName(), "actualGeneration", rc.Status.ObservedGeneration, "expectedGeneration", rc.ObjectMeta.Generation)
return false return false
} }
return true return true
@ -448,7 +442,7 @@ func (c *ReadyChecker) replicationControllerReady(rc *corev1.ReplicationControll
func (c *ReadyChecker) replicaSetReady(rs *appsv1.ReplicaSet) bool { func (c *ReadyChecker) replicaSetReady(rs *appsv1.ReplicaSet) bool {
// Verify the generation observed by the replicaSet controller matches the spec generation // Verify the generation observed by the replicaSet controller matches the spec generation
if rs.Status.ObservedGeneration != rs.ObjectMeta.Generation { if rs.Status.ObservedGeneration != rs.ObjectMeta.Generation {
c.log.Debug("ReplicaSet is not ready, observedGeneration doest not match spec generation", "namespace", rs.GetNamespace(), "name", rs.GetName(), "actualGeneration", rs.Status.ObservedGeneration, "expectedGeneration", rs.ObjectMeta.Generation) slog.Debug("ReplicaSet is not ready, observedGeneration doest not match spec generation", "namespace", rs.GetNamespace(), "name", rs.GetName(), "actualGeneration", rs.Status.ObservedGeneration, "expectedGeneration", rs.ObjectMeta.Generation)
return false return false
} }
return true return true

@ -17,8 +17,6 @@ package kube // import "helm.sh/helm/v4/pkg/kube"
import ( import (
"context" "context"
"io"
"log/slog"
"testing" "testing"
appsv1 "k8s.io/api/apps/v1" appsv1 "k8s.io/api/apps/v1"
@ -39,7 +37,6 @@ const defaultNamespace = metav1.NamespaceDefault
func Test_ReadyChecker_IsReady_Pod(t *testing.T) { func Test_ReadyChecker_IsReady_Pod(t *testing.T) {
type fields struct { type fields struct {
client kubernetes.Interface client kubernetes.Interface
log *slog.Logger
checkJobs bool checkJobs bool
pausedAsReady bool pausedAsReady bool
} }
@ -59,7 +56,6 @@ func Test_ReadyChecker_IsReady_Pod(t *testing.T) {
name: "IsReady Pod", name: "IsReady Pod",
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: slog.New(slog.NewTextHandler(io.Discard, nil)),
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -75,7 +71,6 @@ func Test_ReadyChecker_IsReady_Pod(t *testing.T) {
name: "IsReady Pod returns error", name: "IsReady Pod returns error",
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: slog.New(slog.NewTextHandler(io.Discard, nil)),
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -92,7 +87,6 @@ func Test_ReadyChecker_IsReady_Pod(t *testing.T) {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{ c := &ReadyChecker{
client: tt.fields.client, client: tt.fields.client,
log: tt.fields.log,
checkJobs: tt.fields.checkJobs, checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady, pausedAsReady: tt.fields.pausedAsReady,
} }
@ -115,7 +109,6 @@ func Test_ReadyChecker_IsReady_Pod(t *testing.T) {
func Test_ReadyChecker_IsReady_Job(t *testing.T) { func Test_ReadyChecker_IsReady_Job(t *testing.T) {
type fields struct { type fields struct {
client kubernetes.Interface client kubernetes.Interface
log *slog.Logger
checkJobs bool checkJobs bool
pausedAsReady bool pausedAsReady bool
} }
@ -135,7 +128,6 @@ func Test_ReadyChecker_IsReady_Job(t *testing.T) {
name: "IsReady Job error while getting job", name: "IsReady Job error while getting job",
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: slog.New(slog.NewTextHandler(io.Discard, nil)),
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -151,7 +143,6 @@ func Test_ReadyChecker_IsReady_Job(t *testing.T) {
name: "IsReady Job", name: "IsReady Job",
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: slog.New(slog.NewTextHandler(io.Discard, nil)),
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -168,7 +159,6 @@ func Test_ReadyChecker_IsReady_Job(t *testing.T) {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{ c := &ReadyChecker{
client: tt.fields.client, client: tt.fields.client,
log: tt.fields.log,
checkJobs: tt.fields.checkJobs, checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady, pausedAsReady: tt.fields.pausedAsReady,
} }
@ -190,7 +180,6 @@ func Test_ReadyChecker_IsReady_Job(t *testing.T) {
func Test_ReadyChecker_IsReady_Deployment(t *testing.T) { func Test_ReadyChecker_IsReady_Deployment(t *testing.T) {
type fields struct { type fields struct {
client kubernetes.Interface client kubernetes.Interface
log *slog.Logger
checkJobs bool checkJobs bool
pausedAsReady bool pausedAsReady bool
} }
@ -211,7 +200,6 @@ func Test_ReadyChecker_IsReady_Deployment(t *testing.T) {
name: "IsReady Deployments error while getting current Deployment", name: "IsReady Deployments error while getting current Deployment",
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: slog.New(slog.NewTextHandler(io.Discard, nil)),
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -228,7 +216,6 @@ func Test_ReadyChecker_IsReady_Deployment(t *testing.T) {
name: "IsReady Deployments", //TODO fix this one name: "IsReady Deployments", //TODO fix this one
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: slog.New(slog.NewTextHandler(io.Discard, nil)),
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -246,7 +233,6 @@ func Test_ReadyChecker_IsReady_Deployment(t *testing.T) {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{ c := &ReadyChecker{
client: tt.fields.client, client: tt.fields.client,
log: tt.fields.log,
checkJobs: tt.fields.checkJobs, checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady, pausedAsReady: tt.fields.pausedAsReady,
} }
@ -272,7 +258,6 @@ func Test_ReadyChecker_IsReady_Deployment(t *testing.T) {
func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) { func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) {
type fields struct { type fields struct {
client kubernetes.Interface client kubernetes.Interface
log *slog.Logger
checkJobs bool checkJobs bool
pausedAsReady bool pausedAsReady bool
} }
@ -292,7 +277,6 @@ func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) {
name: "IsReady PersistentVolumeClaim", name: "IsReady PersistentVolumeClaim",
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: slog.New(slog.NewTextHandler(io.Discard, nil)),
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -308,7 +292,6 @@ func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) {
name: "IsReady PersistentVolumeClaim with error", name: "IsReady PersistentVolumeClaim with error",
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: slog.New(slog.NewTextHandler(io.Discard, nil)),
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -325,7 +308,6 @@ func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{ c := &ReadyChecker{
client: tt.fields.client, client: tt.fields.client,
log: tt.fields.log,
checkJobs: tt.fields.checkJobs, checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady, pausedAsReady: tt.fields.pausedAsReady,
} }
@ -347,7 +329,6 @@ func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) {
func Test_ReadyChecker_IsReady_Service(t *testing.T) { func Test_ReadyChecker_IsReady_Service(t *testing.T) {
type fields struct { type fields struct {
client kubernetes.Interface client kubernetes.Interface
log *slog.Logger
checkJobs bool checkJobs bool
pausedAsReady bool pausedAsReady bool
} }
@ -367,7 +348,6 @@ func Test_ReadyChecker_IsReady_Service(t *testing.T) {
name: "IsReady Service", name: "IsReady Service",
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: slog.New(slog.NewTextHandler(io.Discard, nil)),
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -383,7 +363,6 @@ func Test_ReadyChecker_IsReady_Service(t *testing.T) {
name: "IsReady Service with error", name: "IsReady Service with error",
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: slog.New(slog.NewTextHandler(io.Discard, nil)),
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -400,7 +379,6 @@ func Test_ReadyChecker_IsReady_Service(t *testing.T) {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{ c := &ReadyChecker{
client: tt.fields.client, client: tt.fields.client,
log: tt.fields.log,
checkJobs: tt.fields.checkJobs, checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady, pausedAsReady: tt.fields.pausedAsReady,
} }
@ -422,7 +400,6 @@ func Test_ReadyChecker_IsReady_Service(t *testing.T) {
func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) { func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) {
type fields struct { type fields struct {
client kubernetes.Interface client kubernetes.Interface
log *slog.Logger
checkJobs bool checkJobs bool
pausedAsReady bool pausedAsReady bool
} }
@ -442,7 +419,6 @@ func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) {
name: "IsReady DaemonSet", name: "IsReady DaemonSet",
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: slog.New(slog.NewTextHandler(io.Discard, nil)),
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -458,7 +434,6 @@ func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) {
name: "IsReady DaemonSet with error", name: "IsReady DaemonSet with error",
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: slog.New(slog.NewTextHandler(io.Discard, nil)),
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -475,7 +450,6 @@ func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{ c := &ReadyChecker{
client: tt.fields.client, client: tt.fields.client,
log: tt.fields.log,
checkJobs: tt.fields.checkJobs, checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady, pausedAsReady: tt.fields.pausedAsReady,
} }
@ -497,7 +471,6 @@ func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) {
func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) { func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) {
type fields struct { type fields struct {
client kubernetes.Interface client kubernetes.Interface
log *slog.Logger
checkJobs bool checkJobs bool
pausedAsReady bool pausedAsReady bool
} }
@ -517,7 +490,6 @@ func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) {
name: "IsReady StatefulSet", name: "IsReady StatefulSet",
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: slog.New(slog.NewTextHandler(io.Discard, nil)),
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -533,7 +505,6 @@ func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) {
name: "IsReady StatefulSet with error", name: "IsReady StatefulSet with error",
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: slog.New(slog.NewTextHandler(io.Discard, nil)),
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -550,7 +521,6 @@ func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{ c := &ReadyChecker{
client: tt.fields.client, client: tt.fields.client,
log: tt.fields.log,
checkJobs: tt.fields.checkJobs, checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady, pausedAsReady: tt.fields.pausedAsReady,
} }
@ -572,7 +542,6 @@ func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) {
func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) { func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) {
type fields struct { type fields struct {
client kubernetes.Interface client kubernetes.Interface
log *slog.Logger
checkJobs bool checkJobs bool
pausedAsReady bool pausedAsReady bool
} }
@ -592,7 +561,6 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) {
name: "IsReady ReplicationController", name: "IsReady ReplicationController",
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: slog.New(slog.NewTextHandler(io.Discard, nil)),
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -608,7 +576,6 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) {
name: "IsReady ReplicationController with error", name: "IsReady ReplicationController with error",
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: slog.New(slog.NewTextHandler(io.Discard, nil)),
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -624,7 +591,6 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) {
name: "IsReady ReplicationController and pods not ready for object", name: "IsReady ReplicationController and pods not ready for object",
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: slog.New(slog.NewTextHandler(io.Discard, nil)),
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -641,7 +607,6 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{ c := &ReadyChecker{
client: tt.fields.client, client: tt.fields.client,
log: tt.fields.log,
checkJobs: tt.fields.checkJobs, checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady, pausedAsReady: tt.fields.pausedAsReady,
} }
@ -663,7 +628,6 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) {
func Test_ReadyChecker_IsReady_ReplicaSet(t *testing.T) { func Test_ReadyChecker_IsReady_ReplicaSet(t *testing.T) {
type fields struct { type fields struct {
client kubernetes.Interface client kubernetes.Interface
log *slog.Logger
checkJobs bool checkJobs bool
pausedAsReady bool pausedAsReady bool
} }
@ -683,7 +647,6 @@ func Test_ReadyChecker_IsReady_ReplicaSet(t *testing.T) {
name: "IsReady ReplicaSet", name: "IsReady ReplicaSet",
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: slog.New(slog.NewTextHandler(io.Discard, nil)),
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -699,7 +662,6 @@ func Test_ReadyChecker_IsReady_ReplicaSet(t *testing.T) {
name: "IsReady ReplicaSet not ready", name: "IsReady ReplicaSet not ready",
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: slog.New(slog.NewTextHandler(io.Discard, nil)),
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -716,7 +678,6 @@ func Test_ReadyChecker_IsReady_ReplicaSet(t *testing.T) {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{ c := &ReadyChecker{
client: tt.fields.client, client: tt.fields.client,
log: tt.fields.log,
checkJobs: tt.fields.checkJobs, checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady, pausedAsReady: tt.fields.pausedAsReady,
} }

@ -43,7 +43,6 @@ import (
type statusWaiter struct { type statusWaiter struct {
client dynamic.Interface client dynamic.Interface
restMapper meta.RESTMapper restMapper meta.RESTMapper
log *slog.Logger
} }
func alwaysReady(_ *unstructured.Unstructured) (*status.Result, error) { func alwaysReady(_ *unstructured.Unstructured) (*status.Result, error) {
@ -56,7 +55,7 @@ func alwaysReady(_ *unstructured.Unstructured) (*status.Result, error) {
func (w *statusWaiter) WatchUntilReady(resourceList ResourceList, timeout time.Duration) error { func (w *statusWaiter) WatchUntilReady(resourceList ResourceList, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(context.Background(), timeout) ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel() defer cancel()
w.log.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout)
sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper)
jobSR := helmStatusReaders.NewCustomJobStatusReader(w.restMapper) jobSR := helmStatusReaders.NewCustomJobStatusReader(w.restMapper)
podSR := helmStatusReaders.NewCustomPodStatusReader(w.restMapper) podSR := helmStatusReaders.NewCustomPodStatusReader(w.restMapper)
@ -77,7 +76,7 @@ func (w *statusWaiter) WatchUntilReady(resourceList ResourceList, timeout time.D
func (w *statusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error { func (w *statusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(context.TODO(), timeout) ctx, cancel := context.WithTimeout(context.TODO(), timeout)
defer cancel() defer cancel()
w.log.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout)
sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper)
return w.wait(ctx, resourceList, sw) return w.wait(ctx, resourceList, sw)
} }
@ -85,7 +84,7 @@ func (w *statusWaiter) Wait(resourceList ResourceList, timeout time.Duration) er
func (w *statusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Duration) error { func (w *statusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(context.TODO(), timeout) ctx, cancel := context.WithTimeout(context.TODO(), timeout)
defer cancel() defer cancel()
w.log.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout)
sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper)
newCustomJobStatusReader := helmStatusReaders.NewCustomJobStatusReader(w.restMapper) newCustomJobStatusReader := helmStatusReaders.NewCustomJobStatusReader(w.restMapper)
customSR := statusreaders.NewStatusReader(w.restMapper, newCustomJobStatusReader) customSR := statusreaders.NewStatusReader(w.restMapper, newCustomJobStatusReader)
@ -96,7 +95,7 @@ func (w *statusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Dura
func (w *statusWaiter) WaitForDelete(resourceList ResourceList, timeout time.Duration) error { func (w *statusWaiter) WaitForDelete(resourceList ResourceList, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(context.TODO(), timeout) ctx, cancel := context.WithTimeout(context.TODO(), timeout)
defer cancel() defer cancel()
w.log.Debug("waiting for resources to be deleted", "count", len(resourceList), "timeout", timeout) slog.Debug("waiting for resources to be deleted", "count", len(resourceList), "timeout", timeout)
sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper)
return w.waitForDelete(ctx, resourceList, sw) return w.waitForDelete(ctx, resourceList, sw)
} }
@ -114,7 +113,7 @@ func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceL
} }
eventCh := sw.Watch(cancelCtx, resources, watcher.Options{}) eventCh := sw.Watch(cancelCtx, resources, watcher.Options{})
statusCollector := collector.NewResourceStatusCollector(resources) statusCollector := collector.NewResourceStatusCollector(resources)
done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.NotFoundStatus, w.log)) done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.NotFoundStatus))
<-done <-done
if statusCollector.Error != nil { if statusCollector.Error != nil {
@ -157,7 +156,7 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, sw w
eventCh := sw.Watch(cancelCtx, resources, watcher.Options{}) eventCh := sw.Watch(cancelCtx, resources, watcher.Options{})
statusCollector := collector.NewResourceStatusCollector(resources) statusCollector := collector.NewResourceStatusCollector(resources)
done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.CurrentStatus, w.log)) done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.CurrentStatus))
<-done <-done
if statusCollector.Error != nil { if statusCollector.Error != nil {
@ -180,7 +179,7 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, sw w
return nil return nil
} }
func statusObserver(cancel context.CancelFunc, desired status.Status, logger *slog.Logger) collector.ObserverFunc { func statusObserver(cancel context.CancelFunc, desired status.Status) collector.ObserverFunc {
return func(statusCollector *collector.ResourceStatusCollector, _ event.Event) { return func(statusCollector *collector.ResourceStatusCollector, _ event.Event) {
var rss []*event.ResourceStatus var rss []*event.ResourceStatus
var nonDesiredResources []*event.ResourceStatus var nonDesiredResources []*event.ResourceStatus
@ -210,7 +209,7 @@ func statusObserver(cancel context.CancelFunc, desired status.Status, logger *sl
return nonDesiredResources[i].Identifier.Name < nonDesiredResources[j].Identifier.Name return nonDesiredResources[i].Identifier.Name < nonDesiredResources[j].Identifier.Name
}) })
first := nonDesiredResources[0] first := nonDesiredResources[0]
logger.Debug("waiting for resource", "name", first.Identifier.Name, "kind", first.Identifier.GroupKind.Kind, "expectedStatus", desired, "actualStatus", first.Status) slog.Debug("waiting for resource", "name", first.Identifier.Name, "kind", first.Identifier.GroupKind.Kind, "expectedStatus", desired, "actualStatus", first.Status)
} }
} }
} }

@ -18,8 +18,6 @@ package kube // import "helm.sh/helm/v3/pkg/kube"
import ( import (
"errors" "errors"
"io"
"log/slog"
"testing" "testing"
"time" "time"
@ -219,7 +217,6 @@ func TestStatusWaitForDelete(t *testing.T) {
statusWaiter := statusWaiter{ statusWaiter := statusWaiter{
restMapper: fakeMapper, restMapper: fakeMapper,
client: fakeClient, client: fakeClient,
log: slog.New(slog.NewTextHandler(io.Discard, nil)),
} }
objsToCreate := getRuntimeObjFromManifests(t, tt.manifestsToCreate) objsToCreate := getRuntimeObjFromManifests(t, tt.manifestsToCreate)
for _, objToCreate := range objsToCreate { for _, objToCreate := range objsToCreate {
@ -260,7 +257,6 @@ func TestStatusWaitForDeleteNonExistentObject(t *testing.T) {
statusWaiter := statusWaiter{ statusWaiter := statusWaiter{
restMapper: fakeMapper, restMapper: fakeMapper,
client: fakeClient, client: fakeClient,
log: slog.New(slog.NewTextHandler(io.Discard, nil)),
} }
// Don't create the object to test that the wait for delete works when the object doesn't exist // Don't create the object to test that the wait for delete works when the object doesn't exist
objManifest := getRuntimeObjFromManifests(t, []string{podCurrentManifest}) objManifest := getRuntimeObjFromManifests(t, []string{podCurrentManifest})
@ -319,7 +315,6 @@ func TestStatusWait(t *testing.T) {
statusWaiter := statusWaiter{ statusWaiter := statusWaiter{
client: fakeClient, client: fakeClient,
restMapper: fakeMapper, restMapper: fakeMapper,
log: slog.New(slog.NewTextHandler(io.Discard, nil)),
} }
objs := getRuntimeObjFromManifests(t, tt.objManifests) objs := getRuntimeObjFromManifests(t, tt.objManifests)
for _, obj := range objs { for _, obj := range objs {
@ -373,7 +368,6 @@ func TestWaitForJobComplete(t *testing.T) {
statusWaiter := statusWaiter{ statusWaiter := statusWaiter{
client: fakeClient, client: fakeClient,
restMapper: fakeMapper, restMapper: fakeMapper,
log: slog.New(slog.NewTextHandler(io.Discard, nil)),
} }
objs := getRuntimeObjFromManifests(t, tt.objManifests) objs := getRuntimeObjFromManifests(t, tt.objManifests)
for _, obj := range objs { for _, obj := range objs {
@ -433,7 +427,6 @@ func TestWatchForReady(t *testing.T) {
statusWaiter := statusWaiter{ statusWaiter := statusWaiter{
client: fakeClient, client: fakeClient,
restMapper: fakeMapper, restMapper: fakeMapper,
log: slog.New(slog.NewTextHandler(io.Discard, nil)),
} }
objs := getRuntimeObjFromManifests(t, tt.objManifests) objs := getRuntimeObjFromManifests(t, tt.objManifests)
for _, obj := range objs { for _, obj := range objs {

@ -50,24 +50,23 @@ import (
// Helm 4 now uses the StatusWaiter implementation instead // Helm 4 now uses the StatusWaiter implementation instead
type legacyWaiter struct { type legacyWaiter struct {
c ReadyChecker c ReadyChecker
log *slog.Logger
kubeClient *kubernetes.Clientset kubeClient *kubernetes.Clientset
} }
func (hw *legacyWaiter) Wait(resources ResourceList, timeout time.Duration) error { func (hw *legacyWaiter) Wait(resources ResourceList, timeout time.Duration) error {
hw.c = NewReadyChecker(hw.kubeClient, hw.log, PausedAsReady(true)) hw.c = NewReadyChecker(hw.kubeClient, PausedAsReady(true))
return hw.waitForResources(resources, timeout) return hw.waitForResources(resources, timeout)
} }
func (hw *legacyWaiter) WaitWithJobs(resources ResourceList, timeout time.Duration) error { func (hw *legacyWaiter) WaitWithJobs(resources ResourceList, timeout time.Duration) error {
hw.c = NewReadyChecker(hw.kubeClient, hw.log, PausedAsReady(true), CheckJobs(true)) hw.c = NewReadyChecker(hw.kubeClient, PausedAsReady(true), CheckJobs(true))
return hw.waitForResources(resources, timeout) return hw.waitForResources(resources, timeout)
} }
// waitForResources polls to get the current status of all pods, PVCs, Services and // waitForResources polls to get the current status of all pods, PVCs, Services and
// Jobs(optional) until all are ready or a timeout is reached // Jobs(optional) until all are ready or a timeout is reached
func (hw *legacyWaiter) waitForResources(created ResourceList, timeout time.Duration) error { func (hw *legacyWaiter) waitForResources(created ResourceList, timeout time.Duration) error {
hw.log.Debug("beginning wait for resources", "count", len(created), "timeout", timeout) slog.Debug("beginning wait for resources", "count", len(created), "timeout", timeout)
ctx, cancel := context.WithTimeout(context.Background(), timeout) ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel() defer cancel()
@ -85,10 +84,10 @@ func (hw *legacyWaiter) waitForResources(created ResourceList, timeout time.Dura
if waitRetries > 0 && hw.isRetryableError(err, v) { if waitRetries > 0 && hw.isRetryableError(err, v) {
numberOfErrors[i]++ numberOfErrors[i]++
if numberOfErrors[i] > waitRetries { if numberOfErrors[i] > waitRetries {
hw.log.Debug("max number of retries reached", "resource", v.Name, "retries", numberOfErrors[i]) slog.Debug("max number of retries reached", "resource", v.Name, "retries", numberOfErrors[i])
return false, err return false, err
} }
hw.log.Debug("retrying resource readiness", "resource", v.Name, "currentRetries", numberOfErrors[i]-1, "maxRetries", waitRetries) slog.Debug("retrying resource readiness", "resource", v.Name, "currentRetries", numberOfErrors[i]-1, "maxRetries", waitRetries)
return false, nil return false, nil
} }
numberOfErrors[i] = 0 numberOfErrors[i] = 0
@ -104,14 +103,14 @@ func (hw *legacyWaiter) isRetryableError(err error, resource *resource.Info) boo
if err == nil { if err == nil {
return false return false
} }
hw.log.Debug("error received when checking resource status", "resource", resource.Name, slog.Any("error", err)) slog.Debug("error received when checking resource status", "resource", resource.Name, slog.Any("error", err))
if ev, ok := err.(*apierrors.StatusError); ok { if ev, ok := err.(*apierrors.StatusError); ok {
statusCode := ev.Status().Code statusCode := ev.Status().Code
retryable := hw.isRetryableHTTPStatusCode(statusCode) retryable := hw.isRetryableHTTPStatusCode(statusCode)
hw.log.Debug("status code received", "resource", resource.Name, "statusCode", statusCode, "retryable", retryable) slog.Debug("status code received", "resource", resource.Name, "statusCode", statusCode, "retryable", retryable)
return retryable return retryable
} }
hw.log.Debug("retryable error assumed", "resource", resource.Name) slog.Debug("retryable error assumed", "resource", resource.Name)
return true return true
} }
@ -121,7 +120,7 @@ func (hw *legacyWaiter) isRetryableHTTPStatusCode(httpStatusCode int32) bool {
// waitForDeletedResources polls to check if all the resources are deleted or a timeout is reached // waitForDeletedResources polls to check if all the resources are deleted or a timeout is reached
func (hw *legacyWaiter) WaitForDelete(deleted ResourceList, timeout time.Duration) error { func (hw *legacyWaiter) WaitForDelete(deleted ResourceList, timeout time.Duration) error {
hw.log.Debug("beginning wait for resources to be deleted", "count", len(deleted), "timeout", timeout) slog.Debug("beginning wait for resources to be deleted", "count", len(deleted), "timeout", timeout)
startTime := time.Now() startTime := time.Now()
ctx, cancel := context.WithTimeout(context.Background(), timeout) ctx, cancel := context.WithTimeout(context.Background(), timeout)
@ -139,9 +138,9 @@ func (hw *legacyWaiter) WaitForDelete(deleted ResourceList, timeout time.Duratio
elapsed := time.Since(startTime).Round(time.Second) elapsed := time.Since(startTime).Round(time.Second)
if err != nil { if err != nil {
hw.log.Debug("wait for resources failed", "elapsed", elapsed, slog.Any("error", err)) slog.Debug("wait for resources failed", "elapsed", elapsed, slog.Any("error", err))
} else { } else {
hw.log.Debug("wait for resources succeeded", "elapsed", elapsed) slog.Debug("wait for resources succeeded", "elapsed", elapsed)
} }
return err return err
@ -249,7 +248,7 @@ func (hw *legacyWaiter) watchUntilReady(timeout time.Duration, info *resource.In
return nil return nil
} }
hw.log.Debug("watching for resource changes", "kind", kind, "resource", info.Name, "timeout", timeout) slog.Debug("watching for resource changes", "kind", kind, "resource", info.Name, "timeout", timeout)
// Use a selector on the name of the resource. This should be unique for the // Use a selector on the name of the resource. This should be unique for the
// given version and kind // given version and kind
@ -277,7 +276,7 @@ func (hw *legacyWaiter) watchUntilReady(timeout time.Duration, info *resource.In
// we get. We care mostly about jobs, where what we want to see is // we get. We care mostly about jobs, where what we want to see is
// the status go into a good state. For other types, like ReplicaSet // the status go into a good state. For other types, like ReplicaSet
// we don't really do anything to support these as hooks. // we don't really do anything to support these as hooks.
hw.log.Debug("add/modify event received", "resource", info.Name, "eventType", e.Type) slog.Debug("add/modify event received", "resource", info.Name, "eventType", e.Type)
switch kind { switch kind {
case "Job": case "Job":
@ -287,11 +286,11 @@ func (hw *legacyWaiter) watchUntilReady(timeout time.Duration, info *resource.In
} }
return true, nil return true, nil
case watch.Deleted: case watch.Deleted:
hw.log.Debug("deleted event received", "resource", info.Name) slog.Debug("deleted event received", "resource", info.Name)
return true, nil return true, nil
case watch.Error: case watch.Error:
// Handle error and return with an error. // Handle error and return with an error.
hw.log.Error("error event received", "resource", info.Name) slog.Error("error event received", "resource", info.Name)
return true, errors.Errorf("failed to deploy %s", info.Name) return true, errors.Errorf("failed to deploy %s", info.Name)
default: default:
return false, nil return false, nil
@ -313,12 +312,12 @@ func (hw *legacyWaiter) waitForJob(obj runtime.Object, name string) (bool, error
if c.Type == batchv1.JobComplete && c.Status == "True" { if c.Type == batchv1.JobComplete && c.Status == "True" {
return true, nil return true, nil
} else if c.Type == batchv1.JobFailed && c.Status == "True" { } else if c.Type == batchv1.JobFailed && c.Status == "True" {
hw.log.Error("job failed", "job", name, "reason", c.Reason) slog.Error("job failed", "job", name, "reason", c.Reason)
return true, errors.Errorf("job %s failed: %s", name, c.Reason) return true, errors.Errorf("job %s failed: %s", name, c.Reason)
} }
} }
hw.log.Debug("job status update", "job", name, "active", o.Status.Active, "failed", o.Status.Failed, "succeeded", o.Status.Succeeded) slog.Debug("job status update", "job", name, "active", o.Status.Active, "failed", o.Status.Failed, "succeeded", o.Status.Succeeded)
return false, nil return false, nil
} }
@ -333,15 +332,15 @@ func (hw *legacyWaiter) waitForPodSuccess(obj runtime.Object, name string) (bool
switch o.Status.Phase { switch o.Status.Phase {
case corev1.PodSucceeded: case corev1.PodSucceeded:
hw.log.Debug("pod succeeded", "pod", o.Name) slog.Debug("pod succeeded", "pod", o.Name)
return true, nil return true, nil
case corev1.PodFailed: case corev1.PodFailed:
hw.log.Error("pod failed", "pod", o.Name) slog.Error("pod failed", "pod", o.Name)
return true, errors.Errorf("pod %s failed", o.Name) return true, errors.Errorf("pod %s failed", o.Name)
case corev1.PodPending: case corev1.PodPending:
hw.log.Debug("pod pending", "pod", o.Name) slog.Debug("pod pending", "pod", o.Name)
case corev1.PodRunning: case corev1.PodRunning:
hw.log.Debug("pod running", "pod", o.Name) slog.Debug("pod running", "pod", o.Name)
} }
return false, nil return false, nil

@ -44,7 +44,6 @@ const ConfigMapsDriverName = "ConfigMap"
// ConfigMapsInterface. // ConfigMapsInterface.
type ConfigMaps struct { type ConfigMaps struct {
impl corev1.ConfigMapInterface impl corev1.ConfigMapInterface
Log *slog.Logger
} }
// NewConfigMaps initializes a new ConfigMaps wrapping an implementation of // NewConfigMaps initializes a new ConfigMaps wrapping an implementation of
@ -70,13 +69,13 @@ func (cfgmaps *ConfigMaps) Get(key string) (*rspb.Release, error) {
return nil, ErrReleaseNotFound return nil, ErrReleaseNotFound
} }
cfgmaps.Log.Debug("failed to get release", "key", key, slog.Any("error", err)) slog.Debug("failed to get release", "key", key, slog.Any("error", err))
return nil, err return nil, err
} }
// found the configmap, decode the base64 data string // found the configmap, decode the base64 data string
r, err := decodeRelease(obj.Data["release"]) r, err := decodeRelease(obj.Data["release"])
if err != nil { if err != nil {
cfgmaps.Log.Debug("failed to decode data", "key", key, slog.Any("error", err)) slog.Debug("failed to decode data", "key", key, slog.Any("error", err))
return nil, err return nil, err
} }
r.Labels = filterSystemLabels(obj.ObjectMeta.Labels) r.Labels = filterSystemLabels(obj.ObjectMeta.Labels)
@ -93,7 +92,7 @@ func (cfgmaps *ConfigMaps) List(filter func(*rspb.Release) bool) ([]*rspb.Releas
list, err := cfgmaps.impl.List(context.Background(), opts) list, err := cfgmaps.impl.List(context.Background(), opts)
if err != nil { if err != nil {
cfgmaps.Log.Debug("failed to list releases", slog.Any("error", err)) slog.Debug("failed to list releases", slog.Any("error", err))
return nil, err return nil, err
} }
@ -104,7 +103,7 @@ func (cfgmaps *ConfigMaps) List(filter func(*rspb.Release) bool) ([]*rspb.Releas
for _, item := range list.Items { for _, item := range list.Items {
rls, err := decodeRelease(item.Data["release"]) rls, err := decodeRelease(item.Data["release"])
if err != nil { if err != nil {
cfgmaps.Log.Debug("failed to decode release", "item", item, slog.Any("error", err)) slog.Debug("failed to decode release", "item", item, slog.Any("error", err))
continue continue
} }
@ -132,7 +131,7 @@ func (cfgmaps *ConfigMaps) Query(labels map[string]string) ([]*rspb.Release, err
list, err := cfgmaps.impl.List(context.Background(), opts) list, err := cfgmaps.impl.List(context.Background(), opts)
if err != nil { if err != nil {
cfgmaps.Log.Debug("failed to query with labels", slog.Any("error", err)) slog.Debug("failed to query with labels", slog.Any("error", err))
return nil, err return nil, err
} }
@ -144,7 +143,7 @@ func (cfgmaps *ConfigMaps) Query(labels map[string]string) ([]*rspb.Release, err
for _, item := range list.Items { for _, item := range list.Items {
rls, err := decodeRelease(item.Data["release"]) rls, err := decodeRelease(item.Data["release"])
if err != nil { if err != nil {
cfgmaps.Log.Debug("failed to decode release", slog.Any("error", err)) slog.Debug("failed to decode release", slog.Any("error", err))
continue continue
} }
rls.Labels = item.ObjectMeta.Labels rls.Labels = item.ObjectMeta.Labels
@ -166,7 +165,7 @@ func (cfgmaps *ConfigMaps) Create(key string, rls *rspb.Release) error {
// create a new configmap to hold the release // create a new configmap to hold the release
obj, err := newConfigMapsObject(key, rls, lbs) obj, err := newConfigMapsObject(key, rls, lbs)
if err != nil { if err != nil {
cfgmaps.Log.Debug("failed to encode release", "name", rls.Name, slog.Any("error", err)) slog.Debug("failed to encode release", "name", rls.Name, slog.Any("error", err))
return err return err
} }
// push the configmap object out into the kubiverse // push the configmap object out into the kubiverse
@ -175,7 +174,7 @@ func (cfgmaps *ConfigMaps) Create(key string, rls *rspb.Release) error {
return ErrReleaseExists return ErrReleaseExists
} }
cfgmaps.Log.Debug("failed to create release", slog.Any("error", err)) slog.Debug("failed to create release", slog.Any("error", err))
return err return err
} }
return nil return nil
@ -194,13 +193,13 @@ func (cfgmaps *ConfigMaps) Update(key string, rls *rspb.Release) error {
// create a new configmap object to hold the release // create a new configmap object to hold the release
obj, err := newConfigMapsObject(key, rls, lbs) obj, err := newConfigMapsObject(key, rls, lbs)
if err != nil { if err != nil {
cfgmaps.Log.Debug("failed to encode release", "name", rls.Name, slog.Any("error", err)) slog.Debug("failed to encode release", "name", rls.Name, slog.Any("error", err))
return err return err
} }
// push the configmap object out into the kubiverse // push the configmap object out into the kubiverse
_, err = cfgmaps.impl.Update(context.Background(), obj, metav1.UpdateOptions{}) _, err = cfgmaps.impl.Update(context.Background(), obj, metav1.UpdateOptions{})
if err != nil { if err != nil {
cfgmaps.Log.Debug("failed to update release", slog.Any("error", err)) slog.Debug("failed to update release", slog.Any("error", err))
return err return err
} }
return nil return nil

@ -19,8 +19,6 @@ package driver // import "helm.sh/helm/v4/pkg/storage/driver"
import ( import (
"context" "context"
"fmt" "fmt"
"io"
"log/slog"
"testing" "testing"
sqlmock "github.com/DATA-DOG/go-sqlmock" sqlmock "github.com/DATA-DOG/go-sqlmock"
@ -266,6 +264,5 @@ func newTestFixtureSQL(t *testing.T, _ ...*rspb.Release) (*SQL, sqlmock.Sqlmock)
db: sqlxDB, db: sqlxDB,
namespace: "default", namespace: "default",
statementBuilder: sq.StatementBuilder.PlaceholderFormat(sq.Dollar), statementBuilder: sq.StatementBuilder.PlaceholderFormat(sq.Dollar),
Log: slog.New(slog.NewTextHandler(io.Discard, nil)),
}, mock }, mock
} }

@ -44,7 +44,6 @@ const SecretsDriverName = "Secret"
// SecretsInterface. // SecretsInterface.
type Secrets struct { type Secrets struct {
impl corev1.SecretInterface impl corev1.SecretInterface
Log *slog.Logger
} }
// NewSecrets initializes a new Secrets wrapping an implementation of // NewSecrets initializes a new Secrets wrapping an implementation of
@ -96,7 +95,7 @@ func (secrets *Secrets) List(filter func(*rspb.Release) bool) ([]*rspb.Release,
for _, item := range list.Items { for _, item := range list.Items {
rls, err := decodeRelease(string(item.Data["release"])) rls, err := decodeRelease(string(item.Data["release"]))
if err != nil { if err != nil {
secrets.Log.Debug("list failed to decode release", "key", item.Name, slog.Any("error", err)) slog.Debug("list failed to decode release", "key", item.Name, slog.Any("error", err))
continue continue
} }
@ -135,7 +134,7 @@ func (secrets *Secrets) Query(labels map[string]string) ([]*rspb.Release, error)
for _, item := range list.Items { for _, item := range list.Items {
rls, err := decodeRelease(string(item.Data["release"])) rls, err := decodeRelease(string(item.Data["release"]))
if err != nil { if err != nil {
secrets.Log.Debug("failed to decode release", "key", item.Name, slog.Any("error", err)) slog.Debug("failed to decode release", "key", item.Name, slog.Any("error", err))
continue continue
} }
rls.Labels = item.ObjectMeta.Labels rls.Labels = item.ObjectMeta.Labels

@ -87,8 +87,6 @@ type SQL struct {
db *sqlx.DB db *sqlx.DB
namespace string namespace string
statementBuilder sq.StatementBuilderType statementBuilder sq.StatementBuilderType
Log *slog.Logger
} }
// Name returns the name of the driver. // Name returns the name of the driver.
@ -109,13 +107,13 @@ func (s *SQL) checkAlreadyApplied(migrations []*migrate.Migration) bool {
records, err := migrate.GetMigrationRecords(s.db.DB, postgreSQLDialect) records, err := migrate.GetMigrationRecords(s.db.DB, postgreSQLDialect)
migrate.SetDisableCreateTable(false) migrate.SetDisableCreateTable(false)
if err != nil { if err != nil {
s.Log.Debug("failed to get migration records", slog.Any("error", err)) slog.Debug("failed to get migration records", slog.Any("error", err))
return false return false
} }
for _, record := range records { for _, record := range records {
if _, ok := migrationsIDs[record.Id]; ok { if _, ok := migrationsIDs[record.Id]; ok {
s.Log.Debug("found previous migration", "id", record.Id, "appliedAt", record.AppliedAt) slog.Debug("found previous migration", "id", record.Id, "appliedAt", record.AppliedAt)
delete(migrationsIDs, record.Id) delete(migrationsIDs, record.Id)
} }
} }
@ -123,7 +121,7 @@ func (s *SQL) checkAlreadyApplied(migrations []*migrate.Migration) bool {
// check if all migrations applied // check if all migrations applied
if len(migrationsIDs) != 0 { if len(migrationsIDs) != 0 {
for id := range migrationsIDs { for id := range migrationsIDs {
s.Log.Debug("find unapplied migration", "id", id) slog.Debug("find unapplied migration", "id", id)
} }
return false return false
} }
@ -277,7 +275,7 @@ type SQLReleaseCustomLabelWrapper struct {
} }
// NewSQL initializes a new sql driver. // NewSQL initializes a new sql driver.
func NewSQL(connectionString string, logger *slog.Logger, namespace string) (*SQL, error) { func NewSQL(connectionString string, namespace string) (*SQL, error) {
db, err := sqlx.Connect(postgreSQLDialect, connectionString) db, err := sqlx.Connect(postgreSQLDialect, connectionString)
if err != nil { if err != nil {
return nil, err return nil, err
@ -285,7 +283,6 @@ func NewSQL(connectionString string, logger *slog.Logger, namespace string) (*SQ
driver := &SQL{ driver := &SQL{
db: db, db: db,
Log: logger,
statementBuilder: sq.StatementBuilder.PlaceholderFormat(sq.Dollar), statementBuilder: sq.StatementBuilder.PlaceholderFormat(sq.Dollar),
} }
@ -310,24 +307,24 @@ func (s *SQL) Get(key string) (*rspb.Release, error) {
query, args, err := qb.ToSql() query, args, err := qb.ToSql()
if err != nil { if err != nil {
s.Log.Debug("failed to build query", slog.Any("error", err)) slog.Debug("failed to build query", slog.Any("error", err))
return nil, err return nil, err
} }
// Get will return an error if the result is empty // Get will return an error if the result is empty
if err := s.db.Get(&record, query, args...); err != nil { if err := s.db.Get(&record, query, args...); err != nil {
s.Log.Debug("got SQL error when getting release", "key", key, slog.Any("error", err)) slog.Debug("got SQL error when getting release", "key", key, slog.Any("error", err))
return nil, ErrReleaseNotFound return nil, ErrReleaseNotFound
} }
release, err := decodeRelease(record.Body) release, err := decodeRelease(record.Body)
if err != nil { if err != nil {
s.Log.Debug("failed to decode data", "key", key, slog.Any("error", err)) slog.Debug("failed to decode data", "key", key, slog.Any("error", err))
return nil, err return nil, err
} }
if release.Labels, err = s.getReleaseCustomLabels(key, s.namespace); err != nil { if release.Labels, err = s.getReleaseCustomLabels(key, s.namespace); err != nil {
s.Log.Debug("failed to get release custom labels", "namespace", s.namespace, "key", key, slog.Any("error", err)) slog.Debug("failed to get release custom labels", "namespace", s.namespace, "key", key, slog.Any("error", err))
return nil, err return nil, err
} }
@ -348,13 +345,13 @@ func (s *SQL) List(filter func(*rspb.Release) bool) ([]*rspb.Release, error) {
query, args, err := sb.ToSql() query, args, err := sb.ToSql()
if err != nil { if err != nil {
s.Log.Debug("failed to build query", slog.Any("error", err)) slog.Debug("failed to build query", slog.Any("error", err))
return nil, err return nil, err
} }
var records = []SQLReleaseWrapper{} var records = []SQLReleaseWrapper{}
if err := s.db.Select(&records, query, args...); err != nil { if err := s.db.Select(&records, query, args...); err != nil {
s.Log.Debug("failed to list", slog.Any("error", err)) slog.Debug("failed to list", slog.Any("error", err))
return nil, err return nil, err
} }
@ -362,12 +359,12 @@ func (s *SQL) List(filter func(*rspb.Release) bool) ([]*rspb.Release, error) {
for _, record := range records { for _, record := range records {
release, err := decodeRelease(record.Body) release, err := decodeRelease(record.Body)
if err != nil { if err != nil {
s.Log.Debug("failed to decode release", "record", record, slog.Any("error", err)) slog.Debug("failed to decode release", "record", record, slog.Any("error", err))
continue continue
} }
if release.Labels, err = s.getReleaseCustomLabels(record.Key, record.Namespace); err != nil { if release.Labels, err = s.getReleaseCustomLabels(record.Key, record.Namespace); err != nil {
s.Log.Debug("failed to get release custom labels", "namespace", record.Namespace, "key", record.Key, slog.Any("error", err)) slog.Debug("failed to get release custom labels", "namespace", record.Namespace, "key", record.Key, slog.Any("error", err))
return nil, err return nil, err
} }
for k, v := range getReleaseSystemLabels(release) { for k, v := range getReleaseSystemLabels(release) {
@ -397,7 +394,7 @@ func (s *SQL) Query(labels map[string]string) ([]*rspb.Release, error) {
if _, ok := labelMap[key]; ok { if _, ok := labelMap[key]; ok {
sb = sb.Where(sq.Eq{key: labels[key]}) sb = sb.Where(sq.Eq{key: labels[key]})
} else { } else {
s.Log.Debug("unknown label", "key", key) slog.Debug("unknown label", "key", key)
return nil, fmt.Errorf("unknown label %s", key) return nil, fmt.Errorf("unknown label %s", key)
} }
} }
@ -410,13 +407,13 @@ func (s *SQL) Query(labels map[string]string) ([]*rspb.Release, error) {
// Build our query // Build our query
query, args, err := sb.ToSql() query, args, err := sb.ToSql()
if err != nil { if err != nil {
s.Log.Debug("failed to build query", slog.Any("error", err)) slog.Debug("failed to build query", slog.Any("error", err))
return nil, err return nil, err
} }
var records = []SQLReleaseWrapper{} var records = []SQLReleaseWrapper{}
if err := s.db.Select(&records, query, args...); err != nil { if err := s.db.Select(&records, query, args...); err != nil {
s.Log.Debug("failed to query with labels", slog.Any("error", err)) slog.Debug("failed to query with labels", slog.Any("error", err))
return nil, err return nil, err
} }
@ -428,12 +425,12 @@ func (s *SQL) Query(labels map[string]string) ([]*rspb.Release, error) {
for _, record := range records { for _, record := range records {
release, err := decodeRelease(record.Body) release, err := decodeRelease(record.Body)
if err != nil { if err != nil {
s.Log.Debug("failed to decode release", "record", record, slog.Any("error", err)) slog.Debug("failed to decode release", "record", record, slog.Any("error", err))
continue continue
} }
if release.Labels, err = s.getReleaseCustomLabels(record.Key, record.Namespace); err != nil { if release.Labels, err = s.getReleaseCustomLabels(record.Key, record.Namespace); err != nil {
s.Log.Debug("failed to get release custom labels", "namespace", record.Namespace, "key", record.Key, slog.Any("error", err)) slog.Debug("failed to get release custom labels", "namespace", record.Namespace, "key", record.Key, slog.Any("error", err))
return nil, err return nil, err
} }
@ -457,13 +454,13 @@ func (s *SQL) Create(key string, rls *rspb.Release) error {
body, err := encodeRelease(rls) body, err := encodeRelease(rls)
if err != nil { if err != nil {
s.Log.Debug("failed to encode release", slog.Any("error", err)) slog.Debug("failed to encode release", slog.Any("error", err))
return err return err
} }
transaction, err := s.db.Beginx() transaction, err := s.db.Beginx()
if err != nil { if err != nil {
s.Log.Debug("failed to start SQL transaction", slog.Any("error", err)) slog.Debug("failed to start SQL transaction", slog.Any("error", err))
return fmt.Errorf("error beginning transaction: %v", err) return fmt.Errorf("error beginning transaction: %v", err)
} }
@ -492,7 +489,7 @@ func (s *SQL) Create(key string, rls *rspb.Release) error {
int(time.Now().Unix()), int(time.Now().Unix()),
).ToSql() ).ToSql()
if err != nil { if err != nil {
s.Log.Debug("failed to build insert query", slog.Any("error", err)) slog.Debug("failed to build insert query", slog.Any("error", err))
return err return err
} }
@ -506,17 +503,17 @@ func (s *SQL) Create(key string, rls *rspb.Release) error {
Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}). Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}).
ToSql() ToSql()
if buildErr != nil { if buildErr != nil {
s.Log.Debug("failed to build select query", "error", buildErr) slog.Debug("failed to build select query", "error", buildErr)
return err return err
} }
var record SQLReleaseWrapper var record SQLReleaseWrapper
if err := transaction.Get(&record, selectQuery, args...); err == nil { if err := transaction.Get(&record, selectQuery, args...); err == nil {
s.Log.Debug("release already exists", "key", key) slog.Debug("release already exists", "key", key)
return ErrReleaseExists return ErrReleaseExists
} }
s.Log.Debug("failed to store release in SQL database", "key", key, slog.Any("error", err)) slog.Debug("failed to store release in SQL database", "key", key, slog.Any("error", err))
return err return err
} }
@ -539,13 +536,13 @@ func (s *SQL) Create(key string, rls *rspb.Release) error {
if err != nil { if err != nil {
defer transaction.Rollback() defer transaction.Rollback()
s.Log.Debug("failed to build insert query", slog.Any("error", err)) slog.Debug("failed to build insert query", slog.Any("error", err))
return err return err
} }
if _, err := transaction.Exec(insertLabelsQuery, args...); err != nil { if _, err := transaction.Exec(insertLabelsQuery, args...); err != nil {
defer transaction.Rollback() defer transaction.Rollback()
s.Log.Debug("failed to write Labels", slog.Any("error", err)) slog.Debug("failed to write Labels", slog.Any("error", err))
return err return err
} }
} }
@ -564,7 +561,7 @@ func (s *SQL) Update(key string, rls *rspb.Release) error {
body, err := encodeRelease(rls) body, err := encodeRelease(rls)
if err != nil { if err != nil {
s.Log.Debug("failed to encode release", slog.Any("error", err)) slog.Debug("failed to encode release", slog.Any("error", err))
return err return err
} }
@ -581,12 +578,12 @@ func (s *SQL) Update(key string, rls *rspb.Release) error {
ToSql() ToSql()
if err != nil { if err != nil {
s.Log.Debug("failed to build update query", slog.Any("error", err)) slog.Debug("failed to build update query", slog.Any("error", err))
return err return err
} }
if _, err := s.db.Exec(query, args...); err != nil { if _, err := s.db.Exec(query, args...); err != nil {
s.Log.Debug("failed to update release in SQL database", "key", key, slog.Any("error", err)) slog.Debug("failed to update release in SQL database", "key", key, slog.Any("error", err))
return err return err
} }
@ -597,7 +594,7 @@ func (s *SQL) Update(key string, rls *rspb.Release) error {
func (s *SQL) Delete(key string) (*rspb.Release, error) { func (s *SQL) Delete(key string) (*rspb.Release, error) {
transaction, err := s.db.Beginx() transaction, err := s.db.Beginx()
if err != nil { if err != nil {
s.Log.Debug("failed to start SQL transaction", slog.Any("error", err)) slog.Debug("failed to start SQL transaction", slog.Any("error", err))
return nil, fmt.Errorf("error beginning transaction: %v", err) return nil, fmt.Errorf("error beginning transaction: %v", err)
} }
@ -608,20 +605,20 @@ func (s *SQL) Delete(key string) (*rspb.Release, error) {
Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}). Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}).
ToSql() ToSql()
if err != nil { if err != nil {
s.Log.Debug("failed to build select query", slog.Any("error", err)) slog.Debug("failed to build select query", slog.Any("error", err))
return nil, err return nil, err
} }
var record SQLReleaseWrapper var record SQLReleaseWrapper
err = transaction.Get(&record, selectQuery, args...) err = transaction.Get(&record, selectQuery, args...)
if err != nil { if err != nil {
s.Log.Debug("release not found", "key", key, slog.Any("error", err)) slog.Debug("release not found", "key", key, slog.Any("error", err))
return nil, ErrReleaseNotFound return nil, ErrReleaseNotFound
} }
release, err := decodeRelease(record.Body) release, err := decodeRelease(record.Body)
if err != nil { if err != nil {
s.Log.Debug("failed to decode release", "key", key, slog.Any("error", err)) slog.Debug("failed to decode release", "key", key, slog.Any("error", err))
transaction.Rollback() transaction.Rollback()
return nil, err return nil, err
} }
@ -633,18 +630,18 @@ func (s *SQL) Delete(key string) (*rspb.Release, error) {
Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}). Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}).
ToSql() ToSql()
if err != nil { if err != nil {
s.Log.Debug("failed to build delete query", slog.Any("error", err)) slog.Debug("failed to build delete query", slog.Any("error", err))
return nil, err return nil, err
} }
_, err = transaction.Exec(deleteQuery, args...) _, err = transaction.Exec(deleteQuery, args...)
if err != nil { if err != nil {
s.Log.Debug("failed perform delete query", slog.Any("error", err)) slog.Debug("failed perform delete query", slog.Any("error", err))
return release, err return release, err
} }
if release.Labels, err = s.getReleaseCustomLabels(key, s.namespace); err != nil { if release.Labels, err = s.getReleaseCustomLabels(key, s.namespace); err != nil {
s.Log.Debug("failed to get release custom labels", "namespace", s.namespace, "key", key, slog.Any("error", err)) slog.Debug("failed to get release custom labels", "namespace", s.namespace, "key", key, slog.Any("error", err))
return nil, err return nil, err
} }
@ -655,7 +652,7 @@ func (s *SQL) Delete(key string) (*rspb.Release, error) {
ToSql() ToSql()
if err != nil { if err != nil {
s.Log.Debug("failed to build delete Labels query", slog.Any("error", err)) slog.Debug("failed to build delete Labels query", slog.Any("error", err))
return nil, err return nil, err
} }
_, err = transaction.Exec(deleteCustomLabelsQuery, args...) _, err = transaction.Exec(deleteCustomLabelsQuery, args...)

@ -18,6 +18,7 @@ package storage // import "helm.sh/helm/v4/pkg/storage"
import ( import (
"fmt" "fmt"
"log/slog"
"strings" "strings"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -42,15 +43,13 @@ type Storage struct {
// be retained, including the most recent release. Values of 0 or less are // be retained, including the most recent release. Values of 0 or less are
// ignored (meaning no limits are imposed). // ignored (meaning no limits are imposed).
MaxHistory int MaxHistory int
Log func(string, ...interface{})
} }
// Get retrieves the release from storage. An error is returned // Get retrieves the release from storage. An error is returned
// if the storage driver failed to fetch the release, or the // if the storage driver failed to fetch the release, or the
// release identified by the key, version pair does not exist. // release identified by the key, version pair does not exist.
func (s *Storage) Get(name string, version int) (*rspb.Release, error) { func (s *Storage) Get(name string, version int) (*rspb.Release, error) {
s.Log("getting release %q", makeKey(name, version)) slog.Debug("getting release", "key", makeKey(name, version))
return s.Driver.Get(makeKey(name, version)) return s.Driver.Get(makeKey(name, version))
} }
@ -58,7 +57,7 @@ func (s *Storage) Get(name string, version int) (*rspb.Release, error) {
// error is returned if the storage driver fails to store the // error is returned if the storage driver fails to store the
// release, or a release with an identical key already exists. // release, or a release with an identical key already exists.
func (s *Storage) Create(rls *rspb.Release) error { func (s *Storage) Create(rls *rspb.Release) error {
s.Log("creating release %q", makeKey(rls.Name, rls.Version)) slog.Debug("creating release", "key", makeKey(rls.Name, rls.Version))
if s.MaxHistory > 0 { if s.MaxHistory > 0 {
// Want to make space for one more release. // Want to make space for one more release.
if err := s.removeLeastRecent(rls.Name, s.MaxHistory-1); err != nil && if err := s.removeLeastRecent(rls.Name, s.MaxHistory-1); err != nil &&
@ -73,7 +72,7 @@ func (s *Storage) Create(rls *rspb.Release) error {
// storage backend fails to update the release or if the release // storage backend fails to update the release or if the release
// does not exist. // does not exist.
func (s *Storage) Update(rls *rspb.Release) error { func (s *Storage) Update(rls *rspb.Release) error {
s.Log("updating release %q", makeKey(rls.Name, rls.Version)) slog.Debug("updating release", "key", makeKey(rls.Name, rls.Version))
return s.Driver.Update(makeKey(rls.Name, rls.Version), rls) return s.Driver.Update(makeKey(rls.Name, rls.Version), rls)
} }
@ -81,21 +80,21 @@ func (s *Storage) Update(rls *rspb.Release) error {
// the storage backend fails to delete the release or if the release // the storage backend fails to delete the release or if the release
// does not exist. // does not exist.
func (s *Storage) Delete(name string, version int) (*rspb.Release, error) { func (s *Storage) Delete(name string, version int) (*rspb.Release, error) {
s.Log("deleting release %q", makeKey(name, version)) slog.Debug("deleting release", "key", makeKey(name, version))
return s.Driver.Delete(makeKey(name, version)) return s.Driver.Delete(makeKey(name, version))
} }
// ListReleases returns all releases from storage. An error is returned if the // ListReleases returns all releases from storage. An error is returned if the
// storage backend fails to retrieve the releases. // storage backend fails to retrieve the releases.
func (s *Storage) ListReleases() ([]*rspb.Release, error) { func (s *Storage) ListReleases() ([]*rspb.Release, error) {
s.Log("listing all releases in storage") slog.Debug("listing all releases in storage")
return s.Driver.List(func(_ *rspb.Release) bool { return true }) return s.Driver.List(func(_ *rspb.Release) bool { return true })
} }
// ListUninstalled returns all releases with Status == UNINSTALLED. An error is returned // ListUninstalled returns all releases with Status == UNINSTALLED. An error is returned
// if the storage backend fails to retrieve the releases. // if the storage backend fails to retrieve the releases.
func (s *Storage) ListUninstalled() ([]*rspb.Release, error) { func (s *Storage) ListUninstalled() ([]*rspb.Release, error) {
s.Log("listing uninstalled releases in storage") slog.Debug("listing uninstalled releases in storage")
return s.Driver.List(func(rls *rspb.Release) bool { return s.Driver.List(func(rls *rspb.Release) bool {
return relutil.StatusFilter(rspb.StatusUninstalled).Check(rls) return relutil.StatusFilter(rspb.StatusUninstalled).Check(rls)
}) })
@ -104,7 +103,7 @@ func (s *Storage) ListUninstalled() ([]*rspb.Release, error) {
// ListDeployed returns all releases with Status == DEPLOYED. An error is returned // ListDeployed returns all releases with Status == DEPLOYED. An error is returned
// if the storage backend fails to retrieve the releases. // if the storage backend fails to retrieve the releases.
func (s *Storage) ListDeployed() ([]*rspb.Release, error) { func (s *Storage) ListDeployed() ([]*rspb.Release, error) {
s.Log("listing all deployed releases in storage") slog.Debug("listing all deployed releases in storage")
return s.Driver.List(func(rls *rspb.Release) bool { return s.Driver.List(func(rls *rspb.Release) bool {
return relutil.StatusFilter(rspb.StatusDeployed).Check(rls) return relutil.StatusFilter(rspb.StatusDeployed).Check(rls)
}) })
@ -132,7 +131,7 @@ func (s *Storage) Deployed(name string) (*rspb.Release, error) {
// DeployedAll returns all deployed releases with the provided name, or // DeployedAll returns all deployed releases with the provided name, or
// returns driver.NewErrNoDeployedReleases if not found. // returns driver.NewErrNoDeployedReleases if not found.
func (s *Storage) DeployedAll(name string) ([]*rspb.Release, error) { func (s *Storage) DeployedAll(name string) ([]*rspb.Release, error) {
s.Log("getting deployed releases from %q history", name) slog.Debug("getting deployed releases", "name", name)
ls, err := s.Driver.Query(map[string]string{ ls, err := s.Driver.Query(map[string]string{
"name": name, "name": name,
@ -151,7 +150,7 @@ func (s *Storage) DeployedAll(name string) ([]*rspb.Release, error) {
// History returns the revision history for the release with the provided name, or // History returns the revision history for the release with the provided name, or
// returns driver.ErrReleaseNotFound if no such release name exists. // returns driver.ErrReleaseNotFound if no such release name exists.
func (s *Storage) History(name string) ([]*rspb.Release, error) { func (s *Storage) History(name string) ([]*rspb.Release, error) {
s.Log("getting release history for %q", name) slog.Debug("getting release history", "name", name)
return s.Driver.Query(map[string]string{"name": name, "owner": "helm"}) return s.Driver.Query(map[string]string{"name": name, "owner": "helm"})
} }
@ -206,7 +205,7 @@ func (s *Storage) removeLeastRecent(name string, maximum int) error {
} }
} }
s.Log("Pruned %d record(s) from %s with %d error(s)", len(toDelete), name, len(errs)) slog.Debug("pruned records", "count", len(toDelete), "release", name, "errors", len(errs))
switch c := len(errs); c { switch c := len(errs); c {
case 0: case 0:
return nil return nil
@ -221,7 +220,7 @@ func (s *Storage) deleteReleaseVersion(name string, version int) error {
key := makeKey(name, version) key := makeKey(name, version)
_, err := s.Delete(name, version) _, err := s.Delete(name, version)
if err != nil { if err != nil {
s.Log("error pruning %s from release history: %s", key, err) slog.Debug("error pruning release", "key", key, slog.Any("error", err))
return err return err
} }
return nil return nil
@ -229,7 +228,7 @@ func (s *Storage) deleteReleaseVersion(name string, version int) error {
// Last fetches the last revision of the named release. // Last fetches the last revision of the named release.
func (s *Storage) Last(name string) (*rspb.Release, error) { func (s *Storage) Last(name string) (*rspb.Release, error) {
s.Log("getting last revision of %q", name) slog.Debug("getting last revision", "name", name)
h, err := s.History(name) h, err := s.History(name)
if err != nil { if err != nil {
return nil, err return nil, err
@ -261,6 +260,5 @@ func Init(d driver.Driver) *Storage {
} }
return &Storage{ return &Storage{
Driver: d, Driver: d,
Log: func(_ string, _ ...interface{}) {},
} }
} }

@ -312,7 +312,6 @@ func (d *MaxHistoryMockDriver) Name() string {
func TestMaxHistoryErrorHandling(t *testing.T) { func TestMaxHistoryErrorHandling(t *testing.T) {
//func TestStorageRemoveLeastRecentWithError(t *testing.T) { //func TestStorageRemoveLeastRecentWithError(t *testing.T) {
storage := Init(NewMaxHistoryMockDriver(driver.NewMemory())) storage := Init(NewMaxHistoryMockDriver(driver.NewMemory()))
storage.Log = t.Logf
storage.MaxHistory = 1 storage.MaxHistory = 1
@ -338,7 +337,6 @@ func TestMaxHistoryErrorHandling(t *testing.T) {
func TestStorageRemoveLeastRecent(t *testing.T) { func TestStorageRemoveLeastRecent(t *testing.T) {
storage := Init(driver.NewMemory()) storage := Init(driver.NewMemory())
storage.Log = t.Logf
// Make sure that specifying this at the outset doesn't cause any bugs. // Make sure that specifying this at the outset doesn't cause any bugs.
storage.MaxHistory = 10 storage.MaxHistory = 10
@ -395,7 +393,6 @@ func TestStorageRemoveLeastRecent(t *testing.T) {
func TestStorageDoNotDeleteDeployed(t *testing.T) { func TestStorageDoNotDeleteDeployed(t *testing.T) {
storage := Init(driver.NewMemory()) storage := Init(driver.NewMemory())
storage.Log = t.Logf
storage.MaxHistory = 3 storage.MaxHistory = 3
const name = "angry-bird" const name = "angry-bird"

Loading…
Cancel
Save