Merge pull request #30708 from benoittgt/migrate-kube-pkg-to-slog

Migrate pkg to slog
pull/30749/head
Matt Farina 5 months ago committed by GitHub
commit 7a1eb77e77
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -17,7 +17,7 @@ limitations under the License.
package main // import "helm.sh/helm/v4/cmd/helm" package main // import "helm.sh/helm/v4/cmd/helm"
import ( import (
"log" "log/slog"
"os" "os"
// Import to initialize client auth plugins. // Import to initialize client auth plugins.
@ -27,10 +27,6 @@ import (
"helm.sh/helm/v4/pkg/kube" "helm.sh/helm/v4/pkg/kube"
) )
func init() {
log.SetFlags(log.Lshortfile)
}
func main() { func main() {
// Setting the name of the app for managedFields in the Kubernetes client. // Setting the name of the app for managedFields in the Kubernetes client.
// It is set here to the full name of "helm" so that renaming of helm to // It is set here to the full name of "helm" so that renaming of helm to
@ -40,12 +36,12 @@ func main() {
cmd, err := helmcmd.NewRootCmd(os.Stdout, os.Args[1:]) cmd, err := helmcmd.NewRootCmd(os.Stdout, os.Args[1:])
if err != nil { if err != nil {
helmcmd.Warning("%+v", err) slog.Warn("command failed", slog.Any("error", err))
os.Exit(1) os.Exit(1)
} }
if err := cmd.Execute(); err != nil { if err := cmd.Execute(); err != nil {
helmcmd.Debug("%+v", err) slog.Debug("error", slog.Any("error", err))
switch e := err.(type) { switch e := err.(type) {
case helmcmd.PluginError: case helmcmd.PluginError:
os.Exit(e.Code) os.Exit(e.Code)

@ -0,0 +1,87 @@
/*
Copyright The Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logging
import (
"context"
"log/slog"
"os"
)
// DebugEnabledFunc is a function type that determines if debug logging is enabled
// We use a function because we want to check the setting at log time, not when the logger is created
type DebugEnabledFunc func() bool
// DebugCheckHandler checks settings.Debug at log time
type DebugCheckHandler struct {
handler slog.Handler
debugEnabled DebugEnabledFunc
}
// Enabled implements slog.Handler.Enabled
func (h *DebugCheckHandler) Enabled(_ context.Context, level slog.Level) bool {
if level == slog.LevelDebug {
return h.debugEnabled()
}
return true // Always log other levels
}
// Handle implements slog.Handler.Handle
func (h *DebugCheckHandler) Handle(ctx context.Context, r slog.Record) error {
return h.handler.Handle(ctx, r)
}
// WithAttrs implements slog.Handler.WithAttrs
func (h *DebugCheckHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
return &DebugCheckHandler{
handler: h.handler.WithAttrs(attrs),
debugEnabled: h.debugEnabled,
}
}
// WithGroup implements slog.Handler.WithGroup
func (h *DebugCheckHandler) WithGroup(name string) slog.Handler {
return &DebugCheckHandler{
handler: h.handler.WithGroup(name),
debugEnabled: h.debugEnabled,
}
}
// NewLogger creates a new logger with dynamic debug checking
func NewLogger(debugEnabled DebugEnabledFunc) *slog.Logger {
// Create base handler that removes timestamps
baseHandler := slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{
// Always use LevelDebug here to allow all messages through
// Our custom handler will do the filtering
Level: slog.LevelDebug,
ReplaceAttr: func(_ []string, a slog.Attr) slog.Attr {
// Remove the time attribute
if a.Key == slog.TimeKey {
return slog.Attr{}
}
return a
},
})
// Wrap with our dynamic debug-checking handler
dynamicHandler := &DebugCheckHandler{
handler: baseHandler,
debugEnabled: debugEnabled,
}
return slog.New(dynamicHandler)
}

@ -29,9 +29,6 @@ type Client struct {
// The base URL for requests // The base URL for requests
BaseURL string BaseURL string
// The internal logger to use
Log func(string, ...interface{})
} }
// New creates a new client // New creates a new client
@ -44,12 +41,9 @@ func New(u string) (*Client, error) {
return &Client{ return &Client{
BaseURL: u, BaseURL: u,
Log: nopLogger,
}, nil }, nil
} }
var nopLogger = func(_ string, _ ...interface{}) {}
// Validate if the base URL for monocular is valid. // Validate if the base URL for monocular is valid.
func validate(u string) error { func validate(u string) error {

@ -20,6 +20,7 @@ import (
"bytes" "bytes"
"fmt" "fmt"
"io" "io"
"log/slog"
"os" "os"
"path" "path"
"path/filepath" "path/filepath"
@ -79,8 +80,6 @@ type Configuration struct {
// Capabilities describes the capabilities of the Kubernetes cluster. // Capabilities describes the capabilities of the Kubernetes cluster.
Capabilities *chartutil.Capabilities Capabilities *chartutil.Capabilities
Log func(string, ...interface{})
// HookOutputFunc called with container name and returns and expects writer that will receive the log output. // HookOutputFunc called with container name and returns and expects writer that will receive the log output.
HookOutputFunc func(namespace, pod, container string) io.Writer HookOutputFunc func(namespace, pod, container string) io.Writer
} }
@ -227,9 +226,6 @@ type RESTClientGetter interface {
ToRESTMapper() (meta.RESTMapper, error) ToRESTMapper() (meta.RESTMapper, error)
} }
// DebugLog sets the logger that writes debug strings
type DebugLog func(format string, v ...interface{})
// capabilities builds a Capabilities from discovery information. // capabilities builds a Capabilities from discovery information.
func (cfg *Configuration) getCapabilities() (*chartutil.Capabilities, error) { func (cfg *Configuration) getCapabilities() (*chartutil.Capabilities, error) {
if cfg.Capabilities != nil { if cfg.Capabilities != nil {
@ -253,8 +249,8 @@ func (cfg *Configuration) getCapabilities() (*chartutil.Capabilities, error) {
apiVersions, err := GetVersionSet(dc) apiVersions, err := GetVersionSet(dc)
if err != nil { if err != nil {
if discovery.IsGroupDiscoveryFailedError(err) { if discovery.IsGroupDiscoveryFailedError(err) {
cfg.Log("WARNING: The Kubernetes server has an orphaned API service. Server reports: %s", err) slog.Warn("the kubernetes server has an orphaned API service", slog.Any("error", err))
cfg.Log("WARNING: To fix this, kubectl delete apiservice <service-name>") slog.Warn("to fix this, kubectl delete apiservice <service-name>")
} else { } else {
return nil, errors.Wrap(err, "could not get apiVersions from Kubernetes") return nil, errors.Wrap(err, "could not get apiVersions from Kubernetes")
} }
@ -353,14 +349,13 @@ func GetVersionSet(client discovery.ServerResourcesInterface) (chartutil.Version
// recordRelease with an update operation in case reuse has been set. // recordRelease with an update operation in case reuse has been set.
func (cfg *Configuration) recordRelease(r *release.Release) { func (cfg *Configuration) recordRelease(r *release.Release) {
if err := cfg.Releases.Update(r); err != nil { if err := cfg.Releases.Update(r); err != nil {
cfg.Log("warning: Failed to update release %s: %s", r.Name, err) slog.Warn("failed to update release", "name", r.Name, "revision", r.Version, slog.Any("error", err))
} }
} }
// Init initializes the action configuration // Init initializes the action configuration
func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string, log DebugLog) error { func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string) error {
kc := kube.New(getter) kc := kube.New(getter)
kc.Log = log
lazyClient := &lazyClient{ lazyClient := &lazyClient{
namespace: namespace, namespace: namespace,
@ -371,11 +366,9 @@ func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namesp
switch helmDriver { switch helmDriver {
case "secret", "secrets", "": case "secret", "secrets", "":
d := driver.NewSecrets(newSecretClient(lazyClient)) d := driver.NewSecrets(newSecretClient(lazyClient))
d.Log = log
store = storage.Init(d) store = storage.Init(d)
case "configmap", "configmaps": case "configmap", "configmaps":
d := driver.NewConfigMaps(newConfigMapClient(lazyClient)) d := driver.NewConfigMaps(newConfigMapClient(lazyClient))
d.Log = log
store = storage.Init(d) store = storage.Init(d)
case "memory": case "memory":
var d *driver.Memory var d *driver.Memory
@ -395,7 +388,6 @@ func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namesp
case "sql": case "sql":
d, err := driver.NewSQL( d, err := driver.NewSQL(
os.Getenv("HELM_DRIVER_SQL_CONNECTION_STRING"), os.Getenv("HELM_DRIVER_SQL_CONNECTION_STRING"),
log,
namespace, namespace,
) )
if err != nil { if err != nil {
@ -409,7 +401,6 @@ func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namesp
cfg.RESTClientGetter = getter cfg.RESTClientGetter = getter
cfg.KubeClient = kc cfg.KubeClient = kc
cfg.Releases = store cfg.Releases = store
cfg.Log = log
cfg.HookOutputFunc = func(_, _, _ string) io.Writer { return io.Discard } cfg.HookOutputFunc = func(_, _, _ string) io.Writer { return io.Discard }
return nil return nil

@ -19,11 +19,13 @@ import (
"flag" "flag"
"fmt" "fmt"
"io" "io"
"log/slog"
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
fakeclientset "k8s.io/client-go/kubernetes/fake" fakeclientset "k8s.io/client-go/kubernetes/fake"
"helm.sh/helm/v4/internal/logging"
chart "helm.sh/helm/v4/pkg/chart/v2" chart "helm.sh/helm/v4/pkg/chart/v2"
chartutil "helm.sh/helm/v4/pkg/chart/v2/util" chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
kubefake "helm.sh/helm/v4/pkg/kube/fake" kubefake "helm.sh/helm/v4/pkg/kube/fake"
@ -34,11 +36,16 @@ import (
"helm.sh/helm/v4/pkg/time" "helm.sh/helm/v4/pkg/time"
) )
var verbose = flag.Bool("test.log", false, "enable test logging") var verbose = flag.Bool("test.log", false, "enable test logging (debug by default)")
func actionConfigFixture(t *testing.T) *Configuration { func actionConfigFixture(t *testing.T) *Configuration {
t.Helper() t.Helper()
logger := logging.NewLogger(func() bool {
return *verbose
})
slog.SetDefault(logger)
registryClient, err := registry.NewClient() registryClient, err := registry.NewClient()
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -49,12 +56,6 @@ func actionConfigFixture(t *testing.T) *Configuration {
KubeClient: &kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}}, KubeClient: &kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}},
Capabilities: chartutil.DefaultCapabilities, Capabilities: chartutil.DefaultCapabilities,
RegistryClient: registryClient, RegistryClient: registryClient,
Log: func(format string, v ...interface{}) {
t.Helper()
if *verbose {
t.Logf(format, v...)
}
},
} }
} }
@ -334,7 +335,7 @@ func TestConfiguration_Init(t *testing.T) {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
cfg := &Configuration{} cfg := &Configuration{}
actualErr := cfg.Init(nil, "default", tt.helmDriver, nil) actualErr := cfg.Init(nil, "default", tt.helmDriver)
if tt.expectErr { if tt.expectErr {
assert.Error(t, actualErr) assert.Error(t, actualErr)
assert.Contains(t, actualErr.Error(), tt.errMsg) assert.Contains(t, actualErr.Error(), tt.errMsg)

@ -17,6 +17,8 @@ limitations under the License.
package action package action
import ( import (
"log/slog"
"github.com/pkg/errors" "github.com/pkg/errors"
chartutil "helm.sh/helm/v4/pkg/chart/v2/util" chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
@ -53,6 +55,6 @@ func (h *History) Run(name string) ([]*release.Release, error) {
return nil, errors.Errorf("release name is invalid: %s", name) return nil, errors.Errorf("release name is invalid: %s", name)
} }
h.cfg.Log("getting history for release %s", name) slog.Debug("getting history for release", "release", name)
return h.cfg.Releases.History(name) return h.cfg.Releases.History(name)
} }

@ -21,6 +21,7 @@ import (
"context" "context"
"fmt" "fmt"
"io" "io"
"log/slog"
"net/url" "net/url"
"os" "os"
"path" "path"
@ -172,7 +173,7 @@ func (i *Install) installCRDs(crds []chart.CRD) error {
// If the error is CRD already exists, continue. // If the error is CRD already exists, continue.
if apierrors.IsAlreadyExists(err) { if apierrors.IsAlreadyExists(err) {
crdName := res[0].Name crdName := res[0].Name
i.cfg.Log("CRD %s is already present. Skipping.", crdName) slog.Debug("CRD is already present. Skipping", "crd", crdName)
continue continue
} }
return errors.Wrapf(err, "failed to install CRD %s", obj.Name) return errors.Wrapf(err, "failed to install CRD %s", obj.Name)
@ -200,7 +201,7 @@ func (i *Install) installCRDs(crds []chart.CRD) error {
return err return err
} }
i.cfg.Log("Clearing discovery cache") slog.Debug("clearing discovery cache")
discoveryClient.Invalidate() discoveryClient.Invalidate()
_, _ = discoveryClient.ServerGroups() _, _ = discoveryClient.ServerGroups()
@ -213,7 +214,7 @@ func (i *Install) installCRDs(crds []chart.CRD) error {
return err return err
} }
if resettable, ok := restMapper.(meta.ResettableRESTMapper); ok { if resettable, ok := restMapper.(meta.ResettableRESTMapper); ok {
i.cfg.Log("Clearing REST mapper cache") slog.Debug("clearing REST mapper cache")
resettable.Reset() resettable.Reset()
} }
} }
@ -237,24 +238,24 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma
// Check reachability of cluster unless in client-only mode (e.g. `helm template` without `--validate`) // Check reachability of cluster unless in client-only mode (e.g. `helm template` without `--validate`)
if !i.ClientOnly { if !i.ClientOnly {
if err := i.cfg.KubeClient.IsReachable(); err != nil { if err := i.cfg.KubeClient.IsReachable(); err != nil {
i.cfg.Log(fmt.Sprintf("ERROR: Cluster reachability check failed: %v", err)) slog.Error(fmt.Sprintf("cluster reachability check failed: %v", err))
return nil, errors.Wrap(err, "cluster reachability check failed") return nil, errors.Wrap(err, "cluster reachability check failed")
} }
} }
// HideSecret must be used with dry run. Otherwise, return an error. // HideSecret must be used with dry run. Otherwise, return an error.
if !i.isDryRun() && i.HideSecret { if !i.isDryRun() && i.HideSecret {
i.cfg.Log("ERROR: Hiding Kubernetes secrets requires a dry-run mode") slog.Error("hiding Kubernetes secrets requires a dry-run mode")
return nil, errors.New("Hiding Kubernetes secrets requires a dry-run mode") return nil, errors.New("Hiding Kubernetes secrets requires a dry-run mode")
} }
if err := i.availableName(); err != nil { if err := i.availableName(); err != nil {
i.cfg.Log(fmt.Sprintf("ERROR: Release name check failed: %v", err)) slog.Error("release name check failed", slog.Any("error", err))
return nil, errors.Wrap(err, "release name check failed") return nil, errors.Wrap(err, "release name check failed")
} }
if err := chartutil.ProcessDependencies(chrt, vals); err != nil { if err := chartutil.ProcessDependencies(chrt, vals); err != nil {
i.cfg.Log(fmt.Sprintf("ERROR: Processing chart dependencies failed: %v", err)) slog.Error("chart dependencies processing failed", slog.Any("error", err))
return nil, errors.Wrap(err, "chart dependencies processing failed") return nil, errors.Wrap(err, "chart dependencies processing failed")
} }
@ -268,7 +269,7 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma
if crds := chrt.CRDObjects(); !i.ClientOnly && !i.SkipCRDs && len(crds) > 0 { if crds := chrt.CRDObjects(); !i.ClientOnly && !i.SkipCRDs && len(crds) > 0 {
// On dry run, bail here // On dry run, bail here
if i.isDryRun() { if i.isDryRun() {
i.cfg.Log("WARNING: This chart or one of its subcharts contains CRDs. Rendering may fail or contain inaccuracies.") slog.Warn("This chart or one of its subcharts contains CRDs. Rendering may fail or contain inaccuracies.")
} else if err := i.installCRDs(crds); err != nil { } else if err := i.installCRDs(crds); err != nil {
return nil, err return nil, err
} }
@ -288,7 +289,7 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma
mem.SetNamespace(i.Namespace) mem.SetNamespace(i.Namespace)
i.cfg.Releases = storage.Init(mem) i.cfg.Releases = storage.Init(mem)
} else if !i.ClientOnly && len(i.APIVersions) > 0 { } else if !i.ClientOnly && len(i.APIVersions) > 0 {
i.cfg.Log("API Version list given outside of client only mode, this list will be ignored") slog.Debug("API Version list given outside of client only mode, this list will be ignored")
} }
// Make sure if Atomic is set, that wait is set as well. This makes it so // Make sure if Atomic is set, that wait is set as well. This makes it so
@ -505,7 +506,7 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource
// One possible strategy would be to do a timed retry to see if we can get // One possible strategy would be to do a timed retry to see if we can get
// this stored in the future. // this stored in the future.
if err := i.recordRelease(rel); err != nil { if err := i.recordRelease(rel); err != nil {
i.cfg.Log("failed to record the release: %s", err) slog.Error("failed to record the release", slog.Any("error", err))
} }
return rel, nil return rel, nil
@ -514,7 +515,7 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource
func (i *Install) failRelease(rel *release.Release, err error) (*release.Release, error) { func (i *Install) failRelease(rel *release.Release, err error) (*release.Release, error) {
rel.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", i.ReleaseName, err.Error())) rel.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", i.ReleaseName, err.Error()))
if i.Atomic { if i.Atomic {
i.cfg.Log("Install failed and atomic is set, uninstalling release") slog.Debug("install failed, uninstalling release", "release", i.ReleaseName)
uninstall := NewUninstall(i.cfg) uninstall := NewUninstall(i.cfg)
uninstall.DisableHooks = i.DisableHooks uninstall.DisableHooks = i.DisableHooks
uninstall.KeepHistory = false uninstall.KeepHistory = false

@ -19,6 +19,7 @@ package action
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"log/slog"
"strings" "strings"
"time" "time"
@ -63,26 +64,26 @@ func (r *Rollback) Run(name string) error {
r.cfg.Releases.MaxHistory = r.MaxHistory r.cfg.Releases.MaxHistory = r.MaxHistory
r.cfg.Log("preparing rollback of %s", name) slog.Debug("preparing rollback", "name", name)
currentRelease, targetRelease, err := r.prepareRollback(name) currentRelease, targetRelease, err := r.prepareRollback(name)
if err != nil { if err != nil {
return err return err
} }
if !r.DryRun { if !r.DryRun {
r.cfg.Log("creating rolled back release for %s", name) slog.Debug("creating rolled back release", "name", name)
if err := r.cfg.Releases.Create(targetRelease); err != nil { if err := r.cfg.Releases.Create(targetRelease); err != nil {
return err return err
} }
} }
r.cfg.Log("performing rollback of %s", name) slog.Debug("performing rollback", "name", name)
if _, err := r.performRollback(currentRelease, targetRelease); err != nil { if _, err := r.performRollback(currentRelease, targetRelease); err != nil {
return err return err
} }
if !r.DryRun { if !r.DryRun {
r.cfg.Log("updating status for rolled back release for %s", name) slog.Debug("updating status for rolled back release", "name", name)
if err := r.cfg.Releases.Update(targetRelease); err != nil { if err := r.cfg.Releases.Update(targetRelease); err != nil {
return err return err
} }
@ -129,7 +130,7 @@ func (r *Rollback) prepareRollback(name string) (*release.Release, *release.Rele
return nil, nil, errors.Errorf("release has no %d version", previousVersion) return nil, nil, errors.Errorf("release has no %d version", previousVersion)
} }
r.cfg.Log("rolling back %s (current: v%d, target: v%d)", name, currentRelease.Version, previousVersion) slog.Debug("rolling back", "name", name, "currentVersion", currentRelease.Version, "targetVersion", previousVersion)
previousRelease, err := r.cfg.Releases.Get(name, previousVersion) previousRelease, err := r.cfg.Releases.Get(name, previousVersion)
if err != nil { if err != nil {
@ -162,7 +163,7 @@ func (r *Rollback) prepareRollback(name string) (*release.Release, *release.Rele
func (r *Rollback) performRollback(currentRelease, targetRelease *release.Release) (*release.Release, error) { func (r *Rollback) performRollback(currentRelease, targetRelease *release.Release) (*release.Release, error) {
if r.DryRun { if r.DryRun {
r.cfg.Log("dry run for %s", targetRelease.Name) slog.Debug("dry run", "name", targetRelease.Name)
return targetRelease, nil return targetRelease, nil
} }
@ -181,7 +182,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas
return targetRelease, err return targetRelease, err
} }
} else { } else {
r.cfg.Log("rollback hooks disabled for %s", targetRelease.Name) slog.Debug("rollback hooks disabled", "name", targetRelease.Name)
} }
// It is safe to use "force" here because these are resources currently rendered by the chart. // It is safe to use "force" here because these are resources currently rendered by the chart.
@ -193,14 +194,14 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas
if err != nil { if err != nil {
msg := fmt.Sprintf("Rollback %q failed: %s", targetRelease.Name, err) msg := fmt.Sprintf("Rollback %q failed: %s", targetRelease.Name, err)
r.cfg.Log("warning: %s", msg) slog.Warn(msg)
currentRelease.Info.Status = release.StatusSuperseded currentRelease.Info.Status = release.StatusSuperseded
targetRelease.Info.Status = release.StatusFailed targetRelease.Info.Status = release.StatusFailed
targetRelease.Info.Description = msg targetRelease.Info.Description = msg
r.cfg.recordRelease(currentRelease) r.cfg.recordRelease(currentRelease)
r.cfg.recordRelease(targetRelease) r.cfg.recordRelease(targetRelease)
if r.CleanupOnFail { if r.CleanupOnFail {
r.cfg.Log("Cleanup on fail set, cleaning up %d resources", len(results.Created)) slog.Debug("cleanup on fail set, cleaning up resources", "count", len(results.Created))
_, errs := r.cfg.KubeClient.Delete(results.Created) _, errs := r.cfg.KubeClient.Delete(results.Created)
if errs != nil { if errs != nil {
var errorList []string var errorList []string
@ -209,7 +210,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas
} }
return targetRelease, errors.Wrapf(fmt.Errorf("unable to cleanup resources: %s", strings.Join(errorList, ", ")), "an error occurred while cleaning up resources. original rollback error: %s", err) return targetRelease, errors.Wrapf(fmt.Errorf("unable to cleanup resources: %s", strings.Join(errorList, ", ")), "an error occurred while cleaning up resources. original rollback error: %s", err)
} }
r.cfg.Log("Resource cleanup complete") slog.Debug("resource cleanup complete")
} }
return targetRelease, err return targetRelease, err
} }
@ -220,7 +221,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas
// levels, we should make these error level logs so users are notified // levels, we should make these error level logs so users are notified
// that they'll need to go do the cleanup on their own // that they'll need to go do the cleanup on their own
if err := recreate(r.cfg, results.Updated); err != nil { if err := recreate(r.cfg, results.Updated); err != nil {
r.cfg.Log(err.Error()) slog.Error(err.Error())
} }
} }
waiter, err := r.cfg.KubeClient.GetWaiter(r.WaitStrategy) waiter, err := r.cfg.KubeClient.GetWaiter(r.WaitStrategy)
@ -256,7 +257,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas
} }
// Supersede all previous deployments, see issue #2941. // Supersede all previous deployments, see issue #2941.
for _, rel := range deployed { for _, rel := range deployed {
r.cfg.Log("superseding previous deployment %d", rel.Version) slog.Debug("superseding previous deployment", "version", rel.Version)
rel.Info.Status = release.StatusSuperseded rel.Info.Status = release.StatusSuperseded
r.cfg.recordRelease(rel) r.cfg.recordRelease(rel)
} }

@ -17,6 +17,7 @@ limitations under the License.
package action package action
import ( import (
"log/slog"
"strings" "strings"
"time" "time"
@ -104,7 +105,7 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error)
return nil, errors.Errorf("the release named %q is already deleted", name) return nil, errors.Errorf("the release named %q is already deleted", name)
} }
u.cfg.Log("uninstall: Deleting %s", name) slog.Debug("uninstall: deleting release", "name", name)
rel.Info.Status = release.StatusUninstalling rel.Info.Status = release.StatusUninstalling
rel.Info.Deleted = helmtime.Now() rel.Info.Deleted = helmtime.Now()
rel.Info.Description = "Deletion in progress (or silently failed)" rel.Info.Description = "Deletion in progress (or silently failed)"
@ -115,18 +116,18 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error)
return res, err return res, err
} }
} else { } else {
u.cfg.Log("delete hooks disabled for %s", name) slog.Debug("delete hooks disabled", "release", name)
} }
// From here on out, the release is currently considered to be in StatusUninstalling // From here on out, the release is currently considered to be in StatusUninstalling
// state. // state.
if err := u.cfg.Releases.Update(rel); err != nil { if err := u.cfg.Releases.Update(rel); err != nil {
u.cfg.Log("uninstall: Failed to store updated release: %s", err) slog.Debug("uninstall: Failed to store updated release", slog.Any("error", err))
} }
deletedResources, kept, errs := u.deleteRelease(rel) deletedResources, kept, errs := u.deleteRelease(rel)
if errs != nil { if errs != nil {
u.cfg.Log("uninstall: Failed to delete release: %s", errs) slog.Debug("uninstall: Failed to delete release", slog.Any("error", errs))
return nil, errors.Errorf("failed to delete release: %s", name) return nil, errors.Errorf("failed to delete release: %s", name)
} }
@ -153,7 +154,7 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error)
} }
if !u.KeepHistory { if !u.KeepHistory {
u.cfg.Log("purge requested for %s", name) slog.Debug("purge requested", "release", name)
err := u.purgeReleases(rels...) err := u.purgeReleases(rels...)
if err != nil { if err != nil {
errs = append(errs, errors.Wrap(err, "uninstall: Failed to purge the release")) errs = append(errs, errors.Wrap(err, "uninstall: Failed to purge the release"))
@ -168,7 +169,7 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error)
} }
if err := u.cfg.Releases.Update(rel); err != nil { if err := u.cfg.Releases.Update(rel); err != nil {
u.cfg.Log("uninstall: Failed to store updated release: %s", err) slog.Debug("uninstall: Failed to store updated release", slog.Any("error", err))
} }
if len(errs) > 0 { if len(errs) > 0 {
@ -225,7 +226,7 @@ func (u *Uninstall) deleteRelease(rel *release.Release) (kube.ResourceList, stri
} }
if len(resources) > 0 { if len(resources) > 0 {
if kubeClient, ok := u.cfg.KubeClient.(kube.InterfaceDeletionPropagation); ok { if kubeClient, ok := u.cfg.KubeClient.(kube.InterfaceDeletionPropagation); ok {
_, errs = kubeClient.DeleteWithPropagationPolicy(resources, parseCascadingFlag(u.cfg, u.DeletionPropagation)) _, errs = kubeClient.DeleteWithPropagationPolicy(resources, parseCascadingFlag(u.DeletionPropagation))
return resources, kept, errs return resources, kept, errs
} }
_, errs = u.cfg.KubeClient.Delete(resources) _, errs = u.cfg.KubeClient.Delete(resources)
@ -233,7 +234,7 @@ func (u *Uninstall) deleteRelease(rel *release.Release) (kube.ResourceList, stri
return resources, kept, errs return resources, kept, errs
} }
func parseCascadingFlag(cfg *Configuration, cascadingFlag string) v1.DeletionPropagation { func parseCascadingFlag(cascadingFlag string) v1.DeletionPropagation {
switch cascadingFlag { switch cascadingFlag {
case "orphan": case "orphan":
return v1.DeletePropagationOrphan return v1.DeletePropagationOrphan
@ -242,7 +243,7 @@ func parseCascadingFlag(cfg *Configuration, cascadingFlag string) v1.DeletionPro
case "background": case "background":
return v1.DeletePropagationBackground return v1.DeletePropagationBackground
default: default:
cfg.Log("uninstall: given cascade value: %s, defaulting to delete propagation background", cascadingFlag) slog.Debug("uninstall: given cascade value, defaulting to delete propagation background", "value", cascadingFlag)
return v1.DeletePropagationBackground return v1.DeletePropagationBackground
} }
} }

@ -20,6 +20,7 @@ import (
"bytes" "bytes"
"context" "context"
"fmt" "fmt"
"log/slog"
"strings" "strings"
"sync" "sync"
"time" "time"
@ -163,7 +164,7 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart.
return nil, errors.Errorf("release name is invalid: %s", name) return nil, errors.Errorf("release name is invalid: %s", name)
} }
u.cfg.Log("preparing upgrade for %s", name) slog.Debug("preparing upgrade", "name", name)
currentRelease, upgradedRelease, err := u.prepareUpgrade(name, chart, vals) currentRelease, upgradedRelease, err := u.prepareUpgrade(name, chart, vals)
if err != nil { if err != nil {
return nil, err return nil, err
@ -171,7 +172,7 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart.
u.cfg.Releases.MaxHistory = u.MaxHistory u.cfg.Releases.MaxHistory = u.MaxHistory
u.cfg.Log("performing update for %s", name) slog.Debug("performing update", "name", name)
res, err := u.performUpgrade(ctx, currentRelease, upgradedRelease) res, err := u.performUpgrade(ctx, currentRelease, upgradedRelease)
if err != nil { if err != nil {
return res, err return res, err
@ -179,7 +180,7 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart.
// Do not update for dry runs // Do not update for dry runs
if !u.isDryRun() { if !u.isDryRun() {
u.cfg.Log("updating status for upgraded release for %s", name) slog.Debug("updating status for upgraded release", "name", name)
if err := u.cfg.Releases.Update(upgradedRelease); err != nil { if err := u.cfg.Releases.Update(upgradedRelease); err != nil {
return res, err return res, err
} }
@ -365,7 +366,7 @@ func (u *Upgrade) performUpgrade(ctx context.Context, originalRelease, upgradedR
// Run if it is a dry run // Run if it is a dry run
if u.isDryRun() { if u.isDryRun() {
u.cfg.Log("dry run for %s", upgradedRelease.Name) slog.Debug("dry run for release", "name", upgradedRelease.Name)
if len(u.Description) > 0 { if len(u.Description) > 0 {
upgradedRelease.Info.Description = u.Description upgradedRelease.Info.Description = u.Description
} else { } else {
@ -374,7 +375,7 @@ func (u *Upgrade) performUpgrade(ctx context.Context, originalRelease, upgradedR
return upgradedRelease, nil return upgradedRelease, nil
} }
u.cfg.Log("creating upgraded release for %s", upgradedRelease.Name) slog.Debug("creating upgraded release", "name", upgradedRelease.Name)
if err := u.cfg.Releases.Create(upgradedRelease); err != nil { if err := u.cfg.Releases.Create(upgradedRelease); err != nil {
return nil, err return nil, err
} }
@ -425,7 +426,7 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele
return return
} }
} else { } else {
u.cfg.Log("upgrade hooks disabled for %s", upgradedRelease.Name) slog.Debug("upgrade hooks disabled", "name", upgradedRelease.Name)
} }
results, err := u.cfg.KubeClient.Update(current, target, u.Force) results, err := u.cfg.KubeClient.Update(current, target, u.Force)
@ -441,7 +442,7 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele
// levels, we should make these error level logs so users are notified // levels, we should make these error level logs so users are notified
// that they'll need to go do the cleanup on their own // that they'll need to go do the cleanup on their own
if err := recreate(u.cfg, results.Updated); err != nil { if err := recreate(u.cfg, results.Updated); err != nil {
u.cfg.Log(err.Error()) slog.Error(err.Error())
} }
} }
waiter, err := u.cfg.KubeClient.GetWaiter(u.WaitStrategy) waiter, err := u.cfg.KubeClient.GetWaiter(u.WaitStrategy)
@ -486,13 +487,13 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele
func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, err error) (*release.Release, error) { func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, err error) (*release.Release, error) {
msg := fmt.Sprintf("Upgrade %q failed: %s", rel.Name, err) msg := fmt.Sprintf("Upgrade %q failed: %s", rel.Name, err)
u.cfg.Log("warning: %s", msg) slog.Warn("upgrade failed", "name", rel.Name, slog.Any("error", err))
rel.Info.Status = release.StatusFailed rel.Info.Status = release.StatusFailed
rel.Info.Description = msg rel.Info.Description = msg
u.cfg.recordRelease(rel) u.cfg.recordRelease(rel)
if u.CleanupOnFail && len(created) > 0 { if u.CleanupOnFail && len(created) > 0 {
u.cfg.Log("Cleanup on fail set, cleaning up %d resources", len(created)) slog.Debug("cleanup on fail set", "cleaning_resources", len(created))
_, errs := u.cfg.KubeClient.Delete(created) _, errs := u.cfg.KubeClient.Delete(created)
if errs != nil { if errs != nil {
var errorList []string var errorList []string
@ -501,10 +502,10 @@ func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, e
} }
return rel, errors.Wrapf(fmt.Errorf("unable to cleanup resources: %s", strings.Join(errorList, ", ")), "an error occurred while cleaning up resources. original upgrade error: %s", err) return rel, errors.Wrapf(fmt.Errorf("unable to cleanup resources: %s", strings.Join(errorList, ", ")), "an error occurred while cleaning up resources. original upgrade error: %s", err)
} }
u.cfg.Log("Resource cleanup complete") slog.Debug("resource cleanup complete")
} }
if u.Atomic { if u.Atomic {
u.cfg.Log("Upgrade failed and atomic is set, rolling back to last successful release") slog.Debug("upgrade failed and atomic is set, rolling back to last successful release")
// As a protection, get the last successful release before rollback. // As a protection, get the last successful release before rollback.
// If there are no successful releases, bail out // If there are no successful releases, bail out
@ -556,13 +557,13 @@ func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, e
func (u *Upgrade) reuseValues(chart *chart.Chart, current *release.Release, newVals map[string]interface{}) (map[string]interface{}, error) { func (u *Upgrade) reuseValues(chart *chart.Chart, current *release.Release, newVals map[string]interface{}) (map[string]interface{}, error) {
if u.ResetValues { if u.ResetValues {
// If ResetValues is set, we completely ignore current.Config. // If ResetValues is set, we completely ignore current.Config.
u.cfg.Log("resetting values to the chart's original version") slog.Debug("resetting values to the chart's original version")
return newVals, nil return newVals, nil
} }
// If the ReuseValues flag is set, we always copy the old values over the new config's values. // If the ReuseValues flag is set, we always copy the old values over the new config's values.
if u.ReuseValues { if u.ReuseValues {
u.cfg.Log("reusing the old release's values") slog.Debug("reusing the old release's values")
// We have to regenerate the old coalesced values: // We have to regenerate the old coalesced values:
oldVals, err := chartutil.CoalesceValues(current.Chart, current.Config) oldVals, err := chartutil.CoalesceValues(current.Chart, current.Config)
@ -579,7 +580,7 @@ func (u *Upgrade) reuseValues(chart *chart.Chart, current *release.Release, newV
// If the ResetThenReuseValues flag is set, we use the new chart's values, but we copy the old config's values over the new config's values. // If the ResetThenReuseValues flag is set, we use the new chart's values, but we copy the old config's values over the new config's values.
if u.ResetThenReuseValues { if u.ResetThenReuseValues {
u.cfg.Log("merging values from old release to new values") slog.Debug("merging values from old release to new values")
newVals = chartutil.CoalesceTables(newVals, current.Config) newVals = chartutil.CoalesceTables(newVals, current.Config)
@ -587,7 +588,7 @@ func (u *Upgrade) reuseValues(chart *chart.Chart, current *release.Release, newV
} }
if len(newVals) == 0 && len(current.Config) > 0 { if len(newVals) == 0 && len(current.Config) > 0 {
u.cfg.Log("copying values from %s (v%d) to new release.", current.Name, current.Version) slog.Debug("copying values from old release", "name", current.Name, "version", current.Version)
newVals = current.Config newVals = current.Config
} }
return newVals, nil return newVals, nil

@ -254,7 +254,7 @@ func processImportValues(c *chart.Chart, merge bool) error {
// get child table // get child table
vv, err := cvals.Table(r.Name + "." + child) vv, err := cvals.Table(r.Name + "." + child)
if err != nil { if err != nil {
slog.Warn("ImportValues missing table from chart", "chart", r.Name, "error", err) slog.Warn("ImportValues missing table from chart", "chart", r.Name, slog.Any("error", err))
continue continue
} }
// create value map from child to be merged into parent // create value map from child to be merged into parent

@ -20,6 +20,7 @@ import (
"flag" "flag"
"fmt" "fmt"
"log" "log"
"log/slog"
"path/filepath" "path/filepath"
"sort" "sort"
"strings" "strings"
@ -82,11 +83,11 @@ func (ws *waitValue) Set(s string) error {
*ws = waitValue(s) *ws = waitValue(s)
return nil return nil
case "true": case "true":
Warning("--wait=true is deprecated (boolean value) and can be replaced with --wait=watcher") slog.Warn("--wait=true is deprecated (boolean value) and can be replaced with --wait=watcher")
*ws = waitValue(kube.StatusWatcherStrategy) *ws = waitValue(kube.StatusWatcherStrategy)
return nil return nil
case "false": case "false":
Warning("--wait=false is deprecated (boolean value) and can be replaced by omitting the --wait flag") slog.Warn("--wait=false is deprecated (boolean value) and can be replaced by omitting the --wait flag")
*ws = waitValue(kube.HookOnlyStrategy) *ws = waitValue(kube.HookOnlyStrategy)
return nil return nil
default: default:

@ -92,7 +92,6 @@ func executeActionCommandStdinC(store *storage.Storage, in *os.File, cmd string)
Releases: store, Releases: store,
KubeClient: &kubefake.PrintingKubeClient{Out: io.Discard}, KubeClient: &kubefake.PrintingKubeClient{Out: io.Discard},
Capabilities: chartutil.DefaultCapabilities, Capabilities: chartutil.DefaultCapabilities,
Log: func(_ string, _ ...interface{}) {},
} }
root, err := newRootCmdWithConfig(actionConfig, buf, args) root, err := newRootCmdWithConfig(actionConfig, buf, args)

@ -21,6 +21,7 @@ import (
"fmt" "fmt"
"io" "io"
"log" "log"
"log/slog"
"os" "os"
"os/signal" "os/signal"
"syscall" "syscall"
@ -229,9 +230,9 @@ func addInstallFlags(cmd *cobra.Command, f *pflag.FlagSet, client *action.Instal
} }
func runInstall(args []string, client *action.Install, valueOpts *values.Options, out io.Writer) (*release.Release, error) { func runInstall(args []string, client *action.Install, valueOpts *values.Options, out io.Writer) (*release.Release, error) {
Debug("Original chart version: %q", client.Version) slog.Debug("Original chart version", "version", client.Version)
if client.Version == "" && client.Devel { if client.Version == "" && client.Devel {
Debug("setting version to >0.0.0-0") slog.Debug("setting version to >0.0.0-0")
client.Version = ">0.0.0-0" client.Version = ">0.0.0-0"
} }
@ -246,7 +247,7 @@ func runInstall(args []string, client *action.Install, valueOpts *values.Options
return nil, err return nil, err
} }
Debug("CHART PATH: %s\n", cp) slog.Debug("Chart path", "path", cp)
p := getter.All(settings) p := getter.All(settings)
vals, err := valueOpts.MergeValues(p) vals, err := valueOpts.MergeValues(p)
@ -265,7 +266,7 @@ func runInstall(args []string, client *action.Install, valueOpts *values.Options
} }
if chartRequested.Metadata.Deprecated { if chartRequested.Metadata.Deprecated {
Warning("This chart is deprecated") slog.Warn("this chart is deprecated")
} }
if req := chartRequested.Metadata.Dependencies; req != nil { if req := chartRequested.Metadata.Dependencies; req != nil {

@ -71,7 +71,7 @@ func newListCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
ValidArgsFunction: noMoreArgsCompFunc, ValidArgsFunction: noMoreArgsCompFunc,
RunE: func(cmd *cobra.Command, _ []string) error { RunE: func(cmd *cobra.Command, _ []string) error {
if client.AllNamespaces { if client.AllNamespaces {
if err := cfg.Init(settings.RESTClientGetter(), "", os.Getenv("HELM_DRIVER"), Debug); err != nil { if err := cfg.Init(settings.RESTClientGetter(), "", os.Getenv("HELM_DRIVER")); err != nil {
return err return err
} }
} }

@ -17,6 +17,7 @@ package cmd
import ( import (
"io" "io"
"log/slog"
"os" "os"
"os/exec" "os/exec"
@ -66,7 +67,7 @@ func runHook(p *plugin.Plugin, event string) error {
prog := exec.Command(main, argv...) prog := exec.Command(main, argv...)
Debug("running %s hook: %s", event, prog) slog.Debug("running hook", "event", event, "program", prog)
prog.Stdout, prog.Stderr = os.Stdout, os.Stderr prog.Stdout, prog.Stderr = os.Stdout, os.Stderr
if err := prog.Run(); err != nil { if err := prog.Run(); err != nil {

@ -18,6 +18,7 @@ package cmd
import ( import (
"fmt" "fmt"
"io" "io"
"log/slog"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@ -79,7 +80,7 @@ func (o *pluginInstallOptions) run(out io.Writer) error {
return err return err
} }
Debug("loading plugin from %s", i.Path()) slog.Debug("loading plugin", "path", i.Path())
p, err := plugin.LoadDir(i.Path()) p, err := plugin.LoadDir(i.Path())
if err != nil { if err != nil {
return errors.Wrap(err, "plugin is installed but unusable") return errors.Wrap(err, "plugin is installed but unusable")

@ -18,6 +18,7 @@ package cmd
import ( import (
"fmt" "fmt"
"io" "io"
"log/slog"
"github.com/gosuri/uitable" "github.com/gosuri/uitable"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@ -32,7 +33,7 @@ func newPluginListCmd(out io.Writer) *cobra.Command {
Short: "list installed Helm plugins", Short: "list installed Helm plugins",
ValidArgsFunction: noMoreArgsCompFunc, ValidArgsFunction: noMoreArgsCompFunc,
RunE: func(_ *cobra.Command, _ []string) error { RunE: func(_ *cobra.Command, _ []string) error {
Debug("pluginDirs: %s", settings.PluginsDirectory) slog.Debug("pluginDirs", "directory", settings.PluginsDirectory)
plugins, err := plugin.FindPlugins(settings.PluginsDirectory) plugins, err := plugin.FindPlugins(settings.PluginsDirectory)
if err != nil { if err != nil {
return err return err

@ -18,6 +18,7 @@ package cmd
import ( import (
"fmt" "fmt"
"io" "io"
"log/slog"
"os" "os"
"strings" "strings"
@ -60,7 +61,7 @@ func (o *pluginUninstallOptions) complete(args []string) error {
} }
func (o *pluginUninstallOptions) run(out io.Writer) error { func (o *pluginUninstallOptions) run(out io.Writer) error {
Debug("loading installed plugins from %s", settings.PluginsDirectory) slog.Debug("loading installer plugins", "dir", settings.PluginsDirectory)
plugins, err := plugin.FindPlugins(settings.PluginsDirectory) plugins, err := plugin.FindPlugins(settings.PluginsDirectory)
if err != nil { if err != nil {
return err return err

@ -18,6 +18,7 @@ package cmd
import ( import (
"fmt" "fmt"
"io" "io"
"log/slog"
"path/filepath" "path/filepath"
"strings" "strings"
@ -62,7 +63,7 @@ func (o *pluginUpdateOptions) complete(args []string) error {
func (o *pluginUpdateOptions) run(out io.Writer) error { func (o *pluginUpdateOptions) run(out io.Writer) error {
installer.Debug = settings.Debug installer.Debug = settings.Debug
Debug("loading installed plugins from %s", settings.PluginsDirectory) slog.Debug("loading installed plugins", "path", settings.PluginsDirectory)
plugins, err := plugin.FindPlugins(settings.PluginsDirectory) plugins, err := plugin.FindPlugins(settings.PluginsDirectory)
if err != nil { if err != nil {
return err return err
@ -104,7 +105,7 @@ func updatePlugin(p *plugin.Plugin) error {
return err return err
} }
Debug("loading plugin from %s", i.Path()) slog.Debug("loading plugin", "path", i.Path())
updatedPlugin, err := plugin.LoadDir(i.Path()) updatedPlugin, err := plugin.LoadDir(i.Path())
if err != nil { if err != nil {
return err return err

@ -20,6 +20,7 @@ import (
"fmt" "fmt"
"io" "io"
"log" "log"
"log/slog"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@ -60,7 +61,7 @@ func newPullCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
RunE: func(_ *cobra.Command, args []string) error { RunE: func(_ *cobra.Command, args []string) error {
client.Settings = settings client.Settings = settings
if client.Version == "" && client.Devel { if client.Version == "" && client.Devel {
Debug("setting version to >0.0.0-0") slog.Debug("setting version to >0.0.0-0")
client.Version = ">0.0.0-0" client.Version = ">0.0.0-0"
} }

@ -21,6 +21,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"log/slog"
"os" "os"
"strings" "strings"
@ -122,7 +123,7 @@ func getUsernamePassword(usernameOpt string, passwordOpt string, passwordFromStd
} }
} }
} else { } else {
Warning("Using --password via the CLI is insecure. Use --password-stdin.") slog.Warn("using --password via the CLI is insecure. Use --password-stdin")
} }
return username, password, nil return username, password, nil

@ -21,6 +21,7 @@ import (
"fmt" "fmt"
"io" "io"
"log" "log"
"log/slog"
"net/http" "net/http"
"os" "os"
"strings" "strings"
@ -31,6 +32,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd"
"helm.sh/helm/v4/internal/logging"
"helm.sh/helm/v4/internal/tlsutil" "helm.sh/helm/v4/internal/tlsutil"
"helm.sh/helm/v4/pkg/action" "helm.sh/helm/v4/pkg/action"
"helm.sh/helm/v4/pkg/cli" "helm.sh/helm/v4/pkg/cli"
@ -96,16 +98,6 @@ By default, the default directories depend on the Operating System. The defaults
var settings = cli.New() var settings = cli.New()
func Debug(format string, v ...interface{}) {
if settings.Debug {
log.Output(2, fmt.Sprintf("[debug] "+format+"\n", v...))
}
}
func Warning(format string, v ...interface{}) {
fmt.Fprintf(os.Stderr, "WARNING: "+format+"\n", v...)
}
func NewRootCmd(out io.Writer, args []string) (*cobra.Command, error) { func NewRootCmd(out io.Writer, args []string) (*cobra.Command, error) {
actionConfig := new(action.Configuration) actionConfig := new(action.Configuration)
cmd, err := newRootCmdWithConfig(actionConfig, out, args) cmd, err := newRootCmdWithConfig(actionConfig, out, args)
@ -114,7 +106,7 @@ func NewRootCmd(out io.Writer, args []string) (*cobra.Command, error) {
} }
cobra.OnInitialize(func() { cobra.OnInitialize(func() {
helmDriver := os.Getenv("HELM_DRIVER") helmDriver := os.Getenv("HELM_DRIVER")
if err := actionConfig.Init(settings.RESTClientGetter(), settings.Namespace(), helmDriver, Debug); err != nil { if err := actionConfig.Init(settings.RESTClientGetter(), settings.Namespace(), helmDriver); err != nil {
log.Fatal(err) log.Fatal(err)
} }
if helmDriver == "memory" { if helmDriver == "memory" {
@ -148,6 +140,9 @@ func newRootCmdWithConfig(actionConfig *action.Configuration, out io.Writer, arg
settings.AddFlags(flags) settings.AddFlags(flags)
addKlogFlags(flags) addKlogFlags(flags)
logger := logging.NewLogger(func() bool { return settings.Debug })
slog.SetDefault(logger)
// Setup shell completion for the namespace flag // Setup shell completion for the namespace flag
err := cmd.RegisterFlagCompletionFunc("namespace", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { err := cmd.RegisterFlagCompletionFunc("namespace", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
if client, err := actionConfig.KubernetesClientSet(); err == nil { if client, err := actionConfig.KubernetesClientSet(); err == nil {

@ -19,6 +19,7 @@ package cmd
import ( import (
"fmt" "fmt"
"io" "io"
"log/slog"
"strings" "strings"
"github.com/gosuri/uitable" "github.com/gosuri/uitable"
@ -89,7 +90,7 @@ func (o *searchHubOptions) run(out io.Writer, args []string) error {
q := strings.Join(args, " ") q := strings.Join(args, " ")
results, err := c.Search(q) results, err := c.Search(q)
if err != nil { if err != nil {
Debug("%s", err) slog.Debug("search failed", slog.Any("error", err))
return fmt.Errorf("unable to perform search against %q", o.searchEndpoint) return fmt.Errorf("unable to perform search against %q", o.searchEndpoint)
} }

@ -21,6 +21,7 @@ import (
"bytes" "bytes"
"fmt" "fmt"
"io" "io"
"log/slog"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
@ -130,17 +131,17 @@ func (o *searchRepoOptions) run(out io.Writer, args []string) error {
} }
func (o *searchRepoOptions) setupSearchedVersion() { func (o *searchRepoOptions) setupSearchedVersion() {
Debug("Original chart version: %q", o.version) slog.Debug("original chart version", "version", o.version)
if o.version != "" { if o.version != "" {
return return
} }
if o.devel { // search for releases and prereleases (alpha, beta, and release candidate releases). if o.devel { // search for releases and prereleases (alpha, beta, and release candidate releases).
Debug("setting version to >0.0.0-0") slog.Debug("setting version to >0.0.0-0")
o.version = ">0.0.0-0" o.version = ">0.0.0-0"
} else { // search only for stable releases, prerelease versions will be skipped } else { // search only for stable releases, prerelease versions will be skipped
Debug("setting version to >0.0.0") slog.Debug("setting version to >0.0.0")
o.version = ">0.0.0" o.version = ">0.0.0"
} }
} }
@ -189,8 +190,7 @@ func (o *searchRepoOptions) buildIndex() (*search.Index, error) {
f := filepath.Join(o.repoCacheDir, helmpath.CacheIndexFile(n)) f := filepath.Join(o.repoCacheDir, helmpath.CacheIndexFile(n))
ind, err := repo.LoadIndexFile(f) ind, err := repo.LoadIndexFile(f)
if err != nil { if err != nil {
Warning("Repo %q is corrupt or missing. Try 'helm repo update'.", n) slog.Warn("repo is corrupt or missing", "repo", n, slog.Any("error", err))
Warning("%s", err)
continue continue
} }

@ -20,6 +20,7 @@ import (
"fmt" "fmt"
"io" "io"
"log" "log"
"log/slog"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@ -211,9 +212,9 @@ func addShowFlags(subCmd *cobra.Command, client *action.Show) {
} }
func runShow(args []string, client *action.Show) (string, error) { func runShow(args []string, client *action.Show) (string, error) {
Debug("Original chart version: %q", client.Version) slog.Debug("original chart version", "version", client.Version)
if client.Version == "" && client.Devel { if client.Version == "" && client.Devel {
Debug("setting version to >0.0.0-0") slog.Debug("setting version to >0.0.0-0")
client.Version = ">0.0.0-0" client.Version = ">0.0.0-0"
} }

@ -21,6 +21,7 @@ import (
"fmt" "fmt"
"io" "io"
"log" "log"
"log/slog"
"os" "os"
"os/signal" "os/signal"
"syscall" "syscall"
@ -173,7 +174,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
} }
if client.Version == "" && client.Devel { if client.Version == "" && client.Devel {
Debug("setting version to >0.0.0-0") slog.Debug("setting version to >0.0.0-0")
client.Version = ">0.0.0-0" client.Version = ">0.0.0-0"
} }
@ -225,7 +226,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
} }
if ch.Metadata.Deprecated { if ch.Metadata.Deprecated {
Warning("This chart is deprecated") slog.Warn("this chart is deprecated")
} }
// Create context and prepare the handle of SIGTERM // Create context and prepare the handle of SIGTERM

@ -98,7 +98,7 @@ func getDynamicClientOnKind(apiversion string, kind string, config *rest.Config)
gvk := schema.FromAPIVersionAndKind(apiversion, kind) gvk := schema.FromAPIVersionAndKind(apiversion, kind)
apiRes, err := getAPIResourceForGVK(gvk, config) apiRes, err := getAPIResourceForGVK(gvk, config)
if err != nil { if err != nil {
slog.Error("unable to get apiresource", "groupVersionKind", gvk.String(), "error", err) slog.Error("unable to get apiresource", "groupVersionKind", gvk.String(), slog.Any("error", err))
return nil, false, errors.Wrapf(err, "unable to get apiresource from unstructured: %s", gvk.String()) return nil, false, errors.Wrapf(err, "unable to get apiresource from unstructured: %s", gvk.String())
} }
gvr := schema.GroupVersionResource{ gvr := schema.GroupVersionResource{
@ -124,7 +124,7 @@ func getAPIResourceForGVK(gvk schema.GroupVersionKind, config *rest.Config) (met
} }
resList, err := discoveryClient.ServerResourcesForGroupVersion(gvk.GroupVersion().String()) resList, err := discoveryClient.ServerResourcesForGroupVersion(gvk.GroupVersion().String())
if err != nil { if err != nil {
slog.Error("unable to retrieve resource list", "GroupVersion", gvk.GroupVersion().String(), "error", err) slog.Error("unable to retrieve resource list", "GroupVersion", gvk.GroupVersion().String(), slog.Any("error", err))
return res, err return res, err
} }
for _, resource := range resList.APIResources { for _, resource := range resList.APIResources {

@ -177,7 +177,7 @@ func (r *Rules) parseRule(rule string) error {
rule = strings.TrimPrefix(rule, "/") rule = strings.TrimPrefix(rule, "/")
ok, err := filepath.Match(rule, n) ok, err := filepath.Match(rule, n)
if err != nil { if err != nil {
slog.Error("failed to compile", "rule", rule, "error", err) slog.Error("failed to compile", "rule", rule, slog.Any("error", err))
return false return false
} }
return ok return ok
@ -187,7 +187,7 @@ func (r *Rules) parseRule(rule string) error {
p.match = func(n string, _ os.FileInfo) bool { p.match = func(n string, _ os.FileInfo) bool {
ok, err := filepath.Match(rule, n) ok, err := filepath.Match(rule, n)
if err != nil { if err != nil {
slog.Error("failed to compile", "rule", rule, "error", err) slog.Error("failed to compile", "rule", rule, slog.Any("error", err))
return false return false
} }
return ok return ok
@ -199,7 +199,7 @@ func (r *Rules) parseRule(rule string) error {
n = filepath.Base(n) n = filepath.Base(n)
ok, err := filepath.Match(rule, n) ok, err := filepath.Match(rule, n)
if err != nil { if err != nil {
slog.Error("failed to compile", "rule", rule, "error", err) slog.Error("failed to compile", "rule", rule, slog.Any("error", err))
return false return false
} }
return ok return ok

@ -22,6 +22,7 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"log/slog"
"os" "os"
"path/filepath" "path/filepath"
"reflect" "reflect"
@ -73,7 +74,6 @@ type Client struct {
// needs. The smaller surface area of the interface means there is a lower // needs. The smaller surface area of the interface means there is a lower
// chance of it changing. // chance of it changing.
Factory Factory Factory Factory
Log func(string, ...interface{})
// Namespace allows to bypass the kubeconfig file for the choice of the namespace // Namespace allows to bypass the kubeconfig file for the choice of the namespace
Namespace string Namespace string
@ -120,7 +120,6 @@ func (c *Client) newStatusWatcher() (*statusWaiter, error) {
return &statusWaiter{ return &statusWaiter{
restMapper: restMapper, restMapper: restMapper,
client: dynamicClient, client: dynamicClient,
log: c.Log,
}, nil }, nil
} }
@ -131,7 +130,7 @@ func (c *Client) GetWaiter(strategy WaitStrategy) (Waiter, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &legacyWaiter{kubeClient: kc, log: c.Log}, nil return &legacyWaiter{kubeClient: kc}, nil
case StatusWatcherStrategy: case StatusWatcherStrategy:
return c.newStatusWatcher() return c.newStatusWatcher()
case HookOnlyStrategy: case HookOnlyStrategy:
@ -162,13 +161,10 @@ func New(getter genericclioptions.RESTClientGetter) *Client {
factory := cmdutil.NewFactory(getter) factory := cmdutil.NewFactory(getter)
c := &Client{ c := &Client{
Factory: factory, Factory: factory,
Log: nopLogger,
} }
return c return c
} }
var nopLogger = func(_ string, _ ...interface{}) {}
// getKubeClient get or create a new KubernetesClientSet // getKubeClient get or create a new KubernetesClientSet
func (c *Client) getKubeClient() (kubernetes.Interface, error) { func (c *Client) getKubeClient() (kubernetes.Interface, error) {
var err error var err error
@ -198,7 +194,7 @@ func (c *Client) IsReachable() error {
// Create creates Kubernetes resources specified in the resource list. // Create creates Kubernetes resources specified in the resource list.
func (c *Client) Create(resources ResourceList) (*Result, error) { func (c *Client) Create(resources ResourceList) (*Result, error) {
c.Log("creating %d resource(s)", len(resources)) slog.Debug("creating resource(s)", "resources", len(resources))
if err := perform(resources, createResource); err != nil { if err := perform(resources, createResource); err != nil {
return nil, err return nil, err
} }
@ -250,7 +246,7 @@ func (c *Client) Get(resources ResourceList, related bool) (map[string][]runtime
objs, err = c.getSelectRelationPod(info, objs, isTable, &podSelectors) objs, err = c.getSelectRelationPod(info, objs, isTable, &podSelectors)
if err != nil { if err != nil {
c.Log("Warning: get the relation pod is failed, err:%s", err.Error()) slog.Warn("get the relation pod is failed", slog.Any("error", err))
} }
} }
} }
@ -268,7 +264,7 @@ func (c *Client) getSelectRelationPod(info *resource.Info, objs map[string][]run
if info == nil { if info == nil {
return objs, nil return objs, nil
} }
c.Log("get relation pod of object: %s/%s/%s", info.Namespace, info.Mapping.GroupVersionKind.Kind, info.Name) slog.Debug("get relation pod of object", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind)
selector, ok, _ := getSelectorFromObject(info.Object) selector, ok, _ := getSelectorFromObject(info.Object)
if !ok { if !ok {
return objs, nil return objs, nil
@ -410,7 +406,7 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err
updateErrors := []string{} updateErrors := []string{}
res := &Result{} res := &Result{}
c.Log("checking %d resources for changes", len(target)) slog.Debug("checking resources for changes", "resources", len(target))
err := target.Visit(func(info *resource.Info, err error) error { err := target.Visit(func(info *resource.Info, err error) error {
if err != nil { if err != nil {
return err return err
@ -431,7 +427,7 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err
} }
kind := info.Mapping.GroupVersionKind.Kind kind := info.Mapping.GroupVersionKind.Kind
c.Log("Created a new %s called %q in %s\n", kind, info.Name, info.Namespace) slog.Debug("created a new resource", "namespace", info.Namespace, "name", info.Name, "kind", kind)
return nil return nil
} }
@ -442,7 +438,7 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err
} }
if err := updateResource(c, info, originalInfo.Object, force); err != nil { if err := updateResource(c, info, originalInfo.Object, force); err != nil {
c.Log("error updating the resource %q:\n\t %v", info.Name, err) slog.Debug("error updating the resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err))
updateErrors = append(updateErrors, err.Error()) updateErrors = append(updateErrors, err.Error())
} }
// Because we check for errors later, append the info regardless // Because we check for errors later, append the info regardless
@ -459,22 +455,22 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err
} }
for _, info := range original.Difference(target) { for _, info := range original.Difference(target) {
c.Log("Deleting %s %q in namespace %s...", info.Mapping.GroupVersionKind.Kind, info.Name, info.Namespace) slog.Debug("deleting resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind)
if err := info.Get(); err != nil { if err := info.Get(); err != nil {
c.Log("Unable to get obj %q, err: %s", info.Name, err) slog.Debug("unable to get object", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err))
continue continue
} }
annotations, err := metadataAccessor.Annotations(info.Object) annotations, err := metadataAccessor.Annotations(info.Object)
if err != nil { if err != nil {
c.Log("Unable to get annotations on %q, err: %s", info.Name, err) slog.Debug("unable to get annotations", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err))
} }
if annotations != nil && annotations[ResourcePolicyAnno] == KeepPolicy { if annotations != nil && annotations[ResourcePolicyAnno] == KeepPolicy {
c.Log("Skipping delete of %q due to annotation [%s=%s]", info.Name, ResourcePolicyAnno, KeepPolicy) slog.Debug("skipping delete due to annotation", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, "annotation", ResourcePolicyAnno, "value", KeepPolicy)
continue continue
} }
if err := deleteResource(info, metav1.DeletePropagationBackground); err != nil { if err := deleteResource(info, metav1.DeletePropagationBackground); err != nil {
c.Log("Failed to delete %q, err: %s", info.ObjectName(), err) slog.Debug("failed to delete resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err))
continue continue
} }
res.Deleted = append(res.Deleted, info) res.Deleted = append(res.Deleted, info)
@ -498,16 +494,16 @@ func (c *Client) DeleteWithPropagationPolicy(resources ResourceList, policy meta
return rdelete(c, resources, policy) return rdelete(c, resources, policy)
} }
func rdelete(c *Client, resources ResourceList, propagation metav1.DeletionPropagation) (*Result, []error) { func rdelete(_ *Client, resources ResourceList, propagation metav1.DeletionPropagation) (*Result, []error) {
var errs []error var errs []error
res := &Result{} res := &Result{}
mtx := sync.Mutex{} mtx := sync.Mutex{}
err := perform(resources, func(info *resource.Info) error { err := perform(resources, func(info *resource.Info) error {
c.Log("Starting delete for %q %s", info.Name, info.Mapping.GroupVersionKind.Kind) slog.Debug("starting delete resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind)
err := deleteResource(info, propagation) err := deleteResource(info, propagation)
if err == nil || apierrors.IsNotFound(err) { if err == nil || apierrors.IsNotFound(err) {
if err != nil { if err != nil {
c.Log("Ignoring delete failure for %q %s: %v", info.Name, info.Mapping.GroupVersionKind, err) slog.Debug("ignoring delete failure", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err))
} }
mtx.Lock() mtx.Lock()
defer mtx.Unlock() defer mtx.Unlock()
@ -641,7 +637,7 @@ func createPatch(target *resource.Info, current runtime.Object) ([]byte, types.P
return patch, types.StrategicMergePatchType, err return patch, types.StrategicMergePatchType, err
} }
func updateResource(c *Client, target *resource.Info, currentObj runtime.Object, force bool) error { func updateResource(_ *Client, target *resource.Info, currentObj runtime.Object, force bool) error {
var ( var (
obj runtime.Object obj runtime.Object
helper = resource.NewHelper(target.Client, target.Mapping).WithFieldManager(getManagedFieldsManager()) helper = resource.NewHelper(target.Client, target.Mapping).WithFieldManager(getManagedFieldsManager())
@ -655,7 +651,7 @@ func updateResource(c *Client, target *resource.Info, currentObj runtime.Object,
if err != nil { if err != nil {
return errors.Wrap(err, "failed to replace object") return errors.Wrap(err, "failed to replace object")
} }
c.Log("Replaced %q with kind %s for kind %s", target.Name, currentObj.GetObjectKind().GroupVersionKind().Kind, kind) slog.Debug("replace succeeded", "name", target.Name, "initialKind", currentObj.GetObjectKind().GroupVersionKind().Kind, "kind", kind)
} else { } else {
patch, patchType, err := createPatch(target, currentObj) patch, patchType, err := createPatch(target, currentObj)
if err != nil { if err != nil {
@ -663,7 +659,7 @@ func updateResource(c *Client, target *resource.Info, currentObj runtime.Object,
} }
if patch == nil || string(patch) == "{}" { if patch == nil || string(patch) == "{}" {
c.Log("Looks like there are no changes for %s %q", kind, target.Name) slog.Debug("no changes detected", "kind", kind, "name", target.Name)
// This needs to happen to make sure that Helm has the latest info from the API // This needs to happen to make sure that Helm has the latest info from the API
// Otherwise there will be no labels and other functions that use labels will panic // Otherwise there will be no labels and other functions that use labels will panic
if err := target.Get(); err != nil { if err := target.Get(); err != nil {
@ -672,7 +668,7 @@ func updateResource(c *Client, target *resource.Info, currentObj runtime.Object,
return nil return nil
} }
// send patch to server // send patch to server
c.Log("Patch %s %q in namespace %s", kind, target.Name, target.Namespace) slog.Debug("patching resource", "kind", kind, "name", target.Name, "namespace", target.Namespace)
obj, err = helper.Patch(target.Namespace, target.Name, patchType, patch, nil) obj, err = helper.Patch(target.Namespace, target.Name, patchType, patch, nil)
if err != nil { if err != nil {
return errors.Wrapf(err, "cannot patch %q with kind %s", target.Name, kind) return errors.Wrapf(err, "cannot patch %q with kind %s", target.Name, kind)
@ -719,9 +715,6 @@ func copyRequestStreamToWriter(request *rest.Request, podName, containerName str
if err != nil { if err != nil {
return errors.Errorf("Failed to copy IO from logs for pod: %s, container: %s", podName, containerName) return errors.Errorf("Failed to copy IO from logs for pod: %s, container: %s", podName, containerName)
} }
if err != nil {
return errors.Errorf("Failed to close reader for pod: %s, container: %s", podName, containerName)
}
return nil return nil
} }

@ -107,7 +107,6 @@ func newTestClient(t *testing.T) *Client {
return &Client{ return &Client{
Factory: testFactory.WithNamespace("default"), Factory: testFactory.WithNamespace("default"),
Log: nopLogger,
} }
} }

@ -19,6 +19,7 @@ package kube // import "helm.sh/helm/v4/pkg/kube"
import ( import (
"context" "context"
"fmt" "fmt"
"log/slog"
appsv1 "k8s.io/api/apps/v1" appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1" batchv1 "k8s.io/api/batch/v1"
@ -57,13 +58,9 @@ func CheckJobs(checkJobs bool) ReadyCheckerOption {
// NewReadyChecker creates a new checker. Passed ReadyCheckerOptions can // NewReadyChecker creates a new checker. Passed ReadyCheckerOptions can
// be used to override defaults. // be used to override defaults.
func NewReadyChecker(cl kubernetes.Interface, log func(string, ...interface{}), opts ...ReadyCheckerOption) ReadyChecker { func NewReadyChecker(cl kubernetes.Interface, opts ...ReadyCheckerOption) ReadyChecker {
c := ReadyChecker{ c := ReadyChecker{
client: cl, client: cl,
log: log,
}
if c.log == nil {
c.log = nopLogger
} }
for _, opt := range opts { for _, opt := range opts {
opt(&c) opt(&c)
@ -74,7 +71,6 @@ func NewReadyChecker(cl kubernetes.Interface, log func(string, ...interface{}),
// ReadyChecker is a type that can check core Kubernetes types for readiness. // ReadyChecker is a type that can check core Kubernetes types for readiness.
type ReadyChecker struct { type ReadyChecker struct {
client kubernetes.Interface client kubernetes.Interface
log func(string, ...interface{})
checkJobs bool checkJobs bool
pausedAsReady bool pausedAsReady bool
} }
@ -230,18 +226,18 @@ func (c *ReadyChecker) isPodReady(pod *corev1.Pod) bool {
return true return true
} }
} }
c.log("Pod is not ready: %s/%s", pod.GetNamespace(), pod.GetName()) slog.Debug("Pod is not ready", "namespace", pod.GetNamespace(), "name", pod.GetName())
return false return false
} }
func (c *ReadyChecker) jobReady(job *batchv1.Job) (bool, error) { func (c *ReadyChecker) jobReady(job *batchv1.Job) (bool, error) {
if job.Status.Failed > *job.Spec.BackoffLimit { if job.Status.Failed > *job.Spec.BackoffLimit {
c.log("Job is failed: %s/%s", job.GetNamespace(), job.GetName()) slog.Debug("Job is failed", "namespace", job.GetNamespace(), "name", job.GetName())
// If a job is failed, it can't recover, so throw an error // If a job is failed, it can't recover, so throw an error
return false, fmt.Errorf("job is failed: %s/%s", job.GetNamespace(), job.GetName()) return false, fmt.Errorf("job is failed: %s/%s", job.GetNamespace(), job.GetName())
} }
if job.Spec.Completions != nil && job.Status.Succeeded < *job.Spec.Completions { if job.Spec.Completions != nil && job.Status.Succeeded < *job.Spec.Completions {
c.log("Job is not completed: %s/%s", job.GetNamespace(), job.GetName()) slog.Debug("Job is not completed", "namespace", job.GetNamespace(), "name", job.GetName())
return false, nil return false, nil
} }
return true, nil return true, nil
@ -255,7 +251,7 @@ func (c *ReadyChecker) serviceReady(s *corev1.Service) bool {
// Ensure that the service cluster IP is not empty // Ensure that the service cluster IP is not empty
if s.Spec.ClusterIP == "" { if s.Spec.ClusterIP == "" {
c.log("Service does not have cluster IP address: %s/%s", s.GetNamespace(), s.GetName()) slog.Debug("Service does not have cluster IP address", "namespace", s.GetNamespace(), "name", s.GetName())
return false return false
} }
@ -263,12 +259,12 @@ func (c *ReadyChecker) serviceReady(s *corev1.Service) bool {
if s.Spec.Type == corev1.ServiceTypeLoadBalancer { if s.Spec.Type == corev1.ServiceTypeLoadBalancer {
// do not wait when at least 1 external IP is set // do not wait when at least 1 external IP is set
if len(s.Spec.ExternalIPs) > 0 { if len(s.Spec.ExternalIPs) > 0 {
c.log("Service %s/%s has external IP addresses (%v), marking as ready", s.GetNamespace(), s.GetName(), s.Spec.ExternalIPs) slog.Debug("Service has external IP addresses", "namespace", s.GetNamespace(), "name", s.GetName(), "externalIPs", s.Spec.ExternalIPs)
return true return true
} }
if s.Status.LoadBalancer.Ingress == nil { if s.Status.LoadBalancer.Ingress == nil {
c.log("Service does not have load balancer ingress IP address: %s/%s", s.GetNamespace(), s.GetName()) slog.Debug("Service does not have load balancer ingress IP address", "namespace", s.GetNamespace(), "name", s.GetName())
return false return false
} }
} }
@ -278,7 +274,7 @@ func (c *ReadyChecker) serviceReady(s *corev1.Service) bool {
func (c *ReadyChecker) volumeReady(v *corev1.PersistentVolumeClaim) bool { func (c *ReadyChecker) volumeReady(v *corev1.PersistentVolumeClaim) bool {
if v.Status.Phase != corev1.ClaimBound { if v.Status.Phase != corev1.ClaimBound {
c.log("PersistentVolumeClaim is not bound: %s/%s", v.GetNamespace(), v.GetName()) slog.Debug("PersistentVolumeClaim is not bound", "namespace", v.GetNamespace(), "name", v.GetName())
return false return false
} }
return true return true
@ -291,13 +287,13 @@ func (c *ReadyChecker) deploymentReady(rs *appsv1.ReplicaSet, dep *appsv1.Deploy
} }
// Verify the generation observed by the deployment controller matches the spec generation // Verify the generation observed by the deployment controller matches the spec generation
if dep.Status.ObservedGeneration != dep.ObjectMeta.Generation { if dep.Status.ObservedGeneration != dep.ObjectMeta.Generation {
c.log("Deployment is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", dep.Namespace, dep.Name, dep.Status.ObservedGeneration, dep.ObjectMeta.Generation) slog.Debug("Deployment is not ready, observedGeneration does not match spec generation", "namespace", dep.GetNamespace(), "name", dep.GetName(), "actualGeneration", dep.Status.ObservedGeneration, "expectedGeneration", dep.ObjectMeta.Generation)
return false return false
} }
expectedReady := *dep.Spec.Replicas - deploymentutil.MaxUnavailable(*dep) expectedReady := *dep.Spec.Replicas - deploymentutil.MaxUnavailable(*dep)
if !(rs.Status.ReadyReplicas >= expectedReady) { if !(rs.Status.ReadyReplicas >= expectedReady) {
c.log("Deployment is not ready: %s/%s. %d out of %d expected pods are ready", dep.Namespace, dep.Name, rs.Status.ReadyReplicas, expectedReady) slog.Debug("Deployment does not have enough pods ready", "namespace", dep.GetNamespace(), "name", dep.GetName(), "readyPods", rs.Status.ReadyReplicas, "totalPods", expectedReady)
return false return false
} }
return true return true
@ -306,7 +302,7 @@ func (c *ReadyChecker) deploymentReady(rs *appsv1.ReplicaSet, dep *appsv1.Deploy
func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool { func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool {
// Verify the generation observed by the daemonSet controller matches the spec generation // Verify the generation observed by the daemonSet controller matches the spec generation
if ds.Status.ObservedGeneration != ds.ObjectMeta.Generation { if ds.Status.ObservedGeneration != ds.ObjectMeta.Generation {
c.log("DaemonSet is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", ds.Namespace, ds.Name, ds.Status.ObservedGeneration, ds.ObjectMeta.Generation) slog.Debug("DaemonSet is not ready, observedGeneration does not match spec generation", "namespace", ds.GetNamespace(), "name", ds.GetName(), "observedGeneration", ds.Status.ObservedGeneration, "expectedGeneration", ds.ObjectMeta.Generation)
return false return false
} }
@ -317,7 +313,7 @@ func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool {
// Make sure all the updated pods have been scheduled // Make sure all the updated pods have been scheduled
if ds.Status.UpdatedNumberScheduled != ds.Status.DesiredNumberScheduled { if ds.Status.UpdatedNumberScheduled != ds.Status.DesiredNumberScheduled {
c.log("DaemonSet is not ready: %s/%s. %d out of %d expected pods have been scheduled", ds.Namespace, ds.Name, ds.Status.UpdatedNumberScheduled, ds.Status.DesiredNumberScheduled) slog.Debug("DaemonSet does not have enough Pods scheduled", "namespace", ds.GetNamespace(), "name", ds.GetName(), "scheduledPods", ds.Status.UpdatedNumberScheduled, "totalPods", ds.Status.DesiredNumberScheduled)
return false return false
} }
maxUnavailable, err := intstr.GetScaledValueFromIntOrPercent(ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable, int(ds.Status.DesiredNumberScheduled), true) maxUnavailable, err := intstr.GetScaledValueFromIntOrPercent(ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable, int(ds.Status.DesiredNumberScheduled), true)
@ -330,7 +326,7 @@ func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool {
expectedReady := int(ds.Status.DesiredNumberScheduled) - maxUnavailable expectedReady := int(ds.Status.DesiredNumberScheduled) - maxUnavailable
if !(int(ds.Status.NumberReady) >= expectedReady) { if !(int(ds.Status.NumberReady) >= expectedReady) {
c.log("DaemonSet is not ready: %s/%s. %d out of %d expected pods are ready", ds.Namespace, ds.Name, ds.Status.NumberReady, expectedReady) slog.Debug("DaemonSet does not have enough Pods ready", "namespace", ds.GetNamespace(), "name", ds.GetName(), "readyPods", ds.Status.NumberReady, "totalPods", expectedReady)
return false return false
} }
return true return true
@ -382,13 +378,13 @@ func (c *ReadyChecker) crdReady(crd apiextv1.CustomResourceDefinition) bool {
func (c *ReadyChecker) statefulSetReady(sts *appsv1.StatefulSet) bool { func (c *ReadyChecker) statefulSetReady(sts *appsv1.StatefulSet) bool {
// Verify the generation observed by the statefulSet controller matches the spec generation // Verify the generation observed by the statefulSet controller matches the spec generation
if sts.Status.ObservedGeneration != sts.ObjectMeta.Generation { if sts.Status.ObservedGeneration != sts.ObjectMeta.Generation {
c.log("StatefulSet is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", sts.Namespace, sts.Name, sts.Status.ObservedGeneration, sts.ObjectMeta.Generation) slog.Debug("StatefulSet is not ready, observedGeneration doest not match spec generation", "namespace", sts.GetNamespace(), "name", sts.GetName(), "actualGeneration", sts.Status.ObservedGeneration, "expectedGeneration", sts.ObjectMeta.Generation)
return false return false
} }
// If the update strategy is not a rolling update, there will be nothing to wait for // If the update strategy is not a rolling update, there will be nothing to wait for
if sts.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType { if sts.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType {
c.log("StatefulSet skipped ready check: %s/%s. updateStrategy is %v", sts.Namespace, sts.Name, sts.Spec.UpdateStrategy.Type) slog.Debug("StatefulSet skipped ready check", "namespace", sts.GetNamespace(), "name", sts.GetName(), "updateStrategy", sts.Spec.UpdateStrategy.Type)
return true return true
} }
@ -414,30 +410,30 @@ func (c *ReadyChecker) statefulSetReady(sts *appsv1.StatefulSet) bool {
// Make sure all the updated pods have been scheduled // Make sure all the updated pods have been scheduled
if int(sts.Status.UpdatedReplicas) < expectedReplicas { if int(sts.Status.UpdatedReplicas) < expectedReplicas {
c.log("StatefulSet is not ready: %s/%s. %d out of %d expected pods have been scheduled", sts.Namespace, sts.Name, sts.Status.UpdatedReplicas, expectedReplicas) slog.Debug("StatefulSet does not have enough Pods scheduled", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.UpdatedReplicas, "totalPods", expectedReplicas)
return false return false
} }
if int(sts.Status.ReadyReplicas) != replicas { if int(sts.Status.ReadyReplicas) != replicas {
c.log("StatefulSet is not ready: %s/%s. %d out of %d expected pods are ready", sts.Namespace, sts.Name, sts.Status.ReadyReplicas, replicas) slog.Debug("StatefulSet does not have enough Pods ready", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.ReadyReplicas, "totalPods", replicas)
return false return false
} }
// This check only makes sense when all partitions are being upgraded otherwise during a // This check only makes sense when all partitions are being upgraded otherwise during a
// partitioned rolling upgrade, this condition will never evaluate to true, leading to // partitioned rolling upgrade, this condition will never evaluate to true, leading to
// error. // error.
if partition == 0 && sts.Status.CurrentRevision != sts.Status.UpdateRevision { if partition == 0 && sts.Status.CurrentRevision != sts.Status.UpdateRevision {
c.log("StatefulSet is not ready: %s/%s. currentRevision %s does not yet match updateRevision %s", sts.Namespace, sts.Name, sts.Status.CurrentRevision, sts.Status.UpdateRevision) slog.Debug("StatefulSet is not ready, currentRevision does not match updateRevision", "namespace", sts.GetNamespace(), "name", sts.GetName(), "currentRevision", sts.Status.CurrentRevision, "updateRevision", sts.Status.UpdateRevision)
return false return false
} }
c.log("StatefulSet is ready: %s/%s. %d out of %d expected pods are ready", sts.Namespace, sts.Name, sts.Status.ReadyReplicas, replicas) slog.Debug("StatefulSet is ready", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.ReadyReplicas, "totalPods", replicas)
return true return true
} }
func (c *ReadyChecker) replicationControllerReady(rc *corev1.ReplicationController) bool { func (c *ReadyChecker) replicationControllerReady(rc *corev1.ReplicationController) bool {
// Verify the generation observed by the replicationController controller matches the spec generation // Verify the generation observed by the replicationController controller matches the spec generation
if rc.Status.ObservedGeneration != rc.ObjectMeta.Generation { if rc.Status.ObservedGeneration != rc.ObjectMeta.Generation {
c.log("ReplicationController is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", rc.Namespace, rc.Name, rc.Status.ObservedGeneration, rc.ObjectMeta.Generation) slog.Debug("ReplicationController is not ready, observedGeneration doest not match spec generation", "namespace", rc.GetNamespace(), "name", rc.GetName(), "actualGeneration", rc.Status.ObservedGeneration, "expectedGeneration", rc.ObjectMeta.Generation)
return false return false
} }
return true return true
@ -446,7 +442,7 @@ func (c *ReadyChecker) replicationControllerReady(rc *corev1.ReplicationControll
func (c *ReadyChecker) replicaSetReady(rs *appsv1.ReplicaSet) bool { func (c *ReadyChecker) replicaSetReady(rs *appsv1.ReplicaSet) bool {
// Verify the generation observed by the replicaSet controller matches the spec generation // Verify the generation observed by the replicaSet controller matches the spec generation
if rs.Status.ObservedGeneration != rs.ObjectMeta.Generation { if rs.Status.ObservedGeneration != rs.ObjectMeta.Generation {
c.log("ReplicaSet is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", rs.Namespace, rs.Name, rs.Status.ObservedGeneration, rs.ObjectMeta.Generation) slog.Debug("ReplicaSet is not ready, observedGeneration doest not match spec generation", "namespace", rs.GetNamespace(), "name", rs.GetName(), "actualGeneration", rs.Status.ObservedGeneration, "expectedGeneration", rs.ObjectMeta.Generation)
return false return false
} }
return true return true

@ -37,7 +37,6 @@ const defaultNamespace = metav1.NamespaceDefault
func Test_ReadyChecker_IsReady_Pod(t *testing.T) { func Test_ReadyChecker_IsReady_Pod(t *testing.T) {
type fields struct { type fields struct {
client kubernetes.Interface client kubernetes.Interface
log func(string, ...interface{})
checkJobs bool checkJobs bool
pausedAsReady bool pausedAsReady bool
} }
@ -57,7 +56,6 @@ func Test_ReadyChecker_IsReady_Pod(t *testing.T) {
name: "IsReady Pod", name: "IsReady Pod",
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -73,7 +71,6 @@ func Test_ReadyChecker_IsReady_Pod(t *testing.T) {
name: "IsReady Pod returns error", name: "IsReady Pod returns error",
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -90,7 +87,6 @@ func Test_ReadyChecker_IsReady_Pod(t *testing.T) {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{ c := &ReadyChecker{
client: tt.fields.client, client: tt.fields.client,
log: tt.fields.log,
checkJobs: tt.fields.checkJobs, checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady, pausedAsReady: tt.fields.pausedAsReady,
} }
@ -113,7 +109,6 @@ func Test_ReadyChecker_IsReady_Pod(t *testing.T) {
func Test_ReadyChecker_IsReady_Job(t *testing.T) { func Test_ReadyChecker_IsReady_Job(t *testing.T) {
type fields struct { type fields struct {
client kubernetes.Interface client kubernetes.Interface
log func(string, ...interface{})
checkJobs bool checkJobs bool
pausedAsReady bool pausedAsReady bool
} }
@ -133,7 +128,6 @@ func Test_ReadyChecker_IsReady_Job(t *testing.T) {
name: "IsReady Job error while getting job", name: "IsReady Job error while getting job",
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -149,7 +143,6 @@ func Test_ReadyChecker_IsReady_Job(t *testing.T) {
name: "IsReady Job", name: "IsReady Job",
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -166,7 +159,6 @@ func Test_ReadyChecker_IsReady_Job(t *testing.T) {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{ c := &ReadyChecker{
client: tt.fields.client, client: tt.fields.client,
log: tt.fields.log,
checkJobs: tt.fields.checkJobs, checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady, pausedAsReady: tt.fields.pausedAsReady,
} }
@ -188,7 +180,6 @@ func Test_ReadyChecker_IsReady_Job(t *testing.T) {
func Test_ReadyChecker_IsReady_Deployment(t *testing.T) { func Test_ReadyChecker_IsReady_Deployment(t *testing.T) {
type fields struct { type fields struct {
client kubernetes.Interface client kubernetes.Interface
log func(string, ...interface{})
checkJobs bool checkJobs bool
pausedAsReady bool pausedAsReady bool
} }
@ -209,7 +200,6 @@ func Test_ReadyChecker_IsReady_Deployment(t *testing.T) {
name: "IsReady Deployments error while getting current Deployment", name: "IsReady Deployments error while getting current Deployment",
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -226,7 +216,6 @@ func Test_ReadyChecker_IsReady_Deployment(t *testing.T) {
name: "IsReady Deployments", //TODO fix this one name: "IsReady Deployments", //TODO fix this one
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -244,7 +233,6 @@ func Test_ReadyChecker_IsReady_Deployment(t *testing.T) {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{ c := &ReadyChecker{
client: tt.fields.client, client: tt.fields.client,
log: tt.fields.log,
checkJobs: tt.fields.checkJobs, checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady, pausedAsReady: tt.fields.pausedAsReady,
} }
@ -270,7 +258,6 @@ func Test_ReadyChecker_IsReady_Deployment(t *testing.T) {
func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) { func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) {
type fields struct { type fields struct {
client kubernetes.Interface client kubernetes.Interface
log func(string, ...interface{})
checkJobs bool checkJobs bool
pausedAsReady bool pausedAsReady bool
} }
@ -290,7 +277,6 @@ func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) {
name: "IsReady PersistentVolumeClaim", name: "IsReady PersistentVolumeClaim",
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -306,7 +292,6 @@ func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) {
name: "IsReady PersistentVolumeClaim with error", name: "IsReady PersistentVolumeClaim with error",
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -323,7 +308,6 @@ func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{ c := &ReadyChecker{
client: tt.fields.client, client: tt.fields.client,
log: tt.fields.log,
checkJobs: tt.fields.checkJobs, checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady, pausedAsReady: tt.fields.pausedAsReady,
} }
@ -345,7 +329,6 @@ func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) {
func Test_ReadyChecker_IsReady_Service(t *testing.T) { func Test_ReadyChecker_IsReady_Service(t *testing.T) {
type fields struct { type fields struct {
client kubernetes.Interface client kubernetes.Interface
log func(string, ...interface{})
checkJobs bool checkJobs bool
pausedAsReady bool pausedAsReady bool
} }
@ -365,7 +348,6 @@ func Test_ReadyChecker_IsReady_Service(t *testing.T) {
name: "IsReady Service", name: "IsReady Service",
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -381,7 +363,6 @@ func Test_ReadyChecker_IsReady_Service(t *testing.T) {
name: "IsReady Service with error", name: "IsReady Service with error",
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -398,7 +379,6 @@ func Test_ReadyChecker_IsReady_Service(t *testing.T) {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{ c := &ReadyChecker{
client: tt.fields.client, client: tt.fields.client,
log: tt.fields.log,
checkJobs: tt.fields.checkJobs, checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady, pausedAsReady: tt.fields.pausedAsReady,
} }
@ -420,7 +400,6 @@ func Test_ReadyChecker_IsReady_Service(t *testing.T) {
func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) { func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) {
type fields struct { type fields struct {
client kubernetes.Interface client kubernetes.Interface
log func(string, ...interface{})
checkJobs bool checkJobs bool
pausedAsReady bool pausedAsReady bool
} }
@ -440,7 +419,6 @@ func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) {
name: "IsReady DaemonSet", name: "IsReady DaemonSet",
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -456,7 +434,6 @@ func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) {
name: "IsReady DaemonSet with error", name: "IsReady DaemonSet with error",
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -473,7 +450,6 @@ func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{ c := &ReadyChecker{
client: tt.fields.client, client: tt.fields.client,
log: tt.fields.log,
checkJobs: tt.fields.checkJobs, checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady, pausedAsReady: tt.fields.pausedAsReady,
} }
@ -495,7 +471,6 @@ func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) {
func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) { func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) {
type fields struct { type fields struct {
client kubernetes.Interface client kubernetes.Interface
log func(string, ...interface{})
checkJobs bool checkJobs bool
pausedAsReady bool pausedAsReady bool
} }
@ -515,7 +490,6 @@ func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) {
name: "IsReady StatefulSet", name: "IsReady StatefulSet",
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -531,7 +505,6 @@ func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) {
name: "IsReady StatefulSet with error", name: "IsReady StatefulSet with error",
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -548,7 +521,6 @@ func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{ c := &ReadyChecker{
client: tt.fields.client, client: tt.fields.client,
log: tt.fields.log,
checkJobs: tt.fields.checkJobs, checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady, pausedAsReady: tt.fields.pausedAsReady,
} }
@ -570,7 +542,6 @@ func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) {
func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) { func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) {
type fields struct { type fields struct {
client kubernetes.Interface client kubernetes.Interface
log func(string, ...interface{})
checkJobs bool checkJobs bool
pausedAsReady bool pausedAsReady bool
} }
@ -590,7 +561,6 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) {
name: "IsReady ReplicationController", name: "IsReady ReplicationController",
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -606,7 +576,6 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) {
name: "IsReady ReplicationController with error", name: "IsReady ReplicationController with error",
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -622,7 +591,6 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) {
name: "IsReady ReplicationController and pods not ready for object", name: "IsReady ReplicationController and pods not ready for object",
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -639,7 +607,6 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{ c := &ReadyChecker{
client: tt.fields.client, client: tt.fields.client,
log: tt.fields.log,
checkJobs: tt.fields.checkJobs, checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady, pausedAsReady: tt.fields.pausedAsReady,
} }
@ -661,7 +628,6 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) {
func Test_ReadyChecker_IsReady_ReplicaSet(t *testing.T) { func Test_ReadyChecker_IsReady_ReplicaSet(t *testing.T) {
type fields struct { type fields struct {
client kubernetes.Interface client kubernetes.Interface
log func(string, ...interface{})
checkJobs bool checkJobs bool
pausedAsReady bool pausedAsReady bool
} }
@ -681,7 +647,6 @@ func Test_ReadyChecker_IsReady_ReplicaSet(t *testing.T) {
name: "IsReady ReplicaSet", name: "IsReady ReplicaSet",
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -697,7 +662,6 @@ func Test_ReadyChecker_IsReady_ReplicaSet(t *testing.T) {
name: "IsReady ReplicaSet not ready", name: "IsReady ReplicaSet not ready",
fields: fields{ fields: fields{
client: fake.NewClientset(), client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true, checkJobs: true,
pausedAsReady: false, pausedAsReady: false,
}, },
@ -714,7 +678,6 @@ func Test_ReadyChecker_IsReady_ReplicaSet(t *testing.T) {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{ c := &ReadyChecker{
client: tt.fields.client, client: tt.fields.client,
log: tt.fields.log,
checkJobs: tt.fields.checkJobs, checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady, pausedAsReady: tt.fields.pausedAsReady,
} }
@ -791,7 +754,7 @@ func Test_ReadyChecker_deploymentReady(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
c := NewReadyChecker(fake.NewClientset(), nil) c := NewReadyChecker(fake.NewClientset())
if got := c.deploymentReady(tt.args.rs, tt.args.dep); got != tt.want { if got := c.deploymentReady(tt.args.rs, tt.args.dep); got != tt.want {
t.Errorf("deploymentReady() = %v, want %v", got, tt.want) t.Errorf("deploymentReady() = %v, want %v", got, tt.want)
} }
@ -825,7 +788,7 @@ func Test_ReadyChecker_replicaSetReady(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
c := NewReadyChecker(fake.NewClientset(), nil) c := NewReadyChecker(fake.NewClientset())
if got := c.replicaSetReady(tt.args.rs); got != tt.want { if got := c.replicaSetReady(tt.args.rs); got != tt.want {
t.Errorf("replicaSetReady() = %v, want %v", got, tt.want) t.Errorf("replicaSetReady() = %v, want %v", got, tt.want)
} }
@ -859,7 +822,7 @@ func Test_ReadyChecker_replicationControllerReady(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
c := NewReadyChecker(fake.NewClientset(), nil) c := NewReadyChecker(fake.NewClientset())
if got := c.replicationControllerReady(tt.args.rc); got != tt.want { if got := c.replicationControllerReady(tt.args.rc); got != tt.want {
t.Errorf("replicationControllerReady() = %v, want %v", got, tt.want) t.Errorf("replicationControllerReady() = %v, want %v", got, tt.want)
} }
@ -914,7 +877,7 @@ func Test_ReadyChecker_daemonSetReady(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
c := NewReadyChecker(fake.NewClientset(), nil) c := NewReadyChecker(fake.NewClientset())
if got := c.daemonSetReady(tt.args.ds); got != tt.want { if got := c.daemonSetReady(tt.args.ds); got != tt.want {
t.Errorf("daemonSetReady() = %v, want %v", got, tt.want) t.Errorf("daemonSetReady() = %v, want %v", got, tt.want)
} }
@ -990,7 +953,7 @@ func Test_ReadyChecker_statefulSetReady(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
c := NewReadyChecker(fake.NewClientset(), nil) c := NewReadyChecker(fake.NewClientset())
if got := c.statefulSetReady(tt.args.sts); got != tt.want { if got := c.statefulSetReady(tt.args.sts); got != tt.want {
t.Errorf("statefulSetReady() = %v, want %v", got, tt.want) t.Errorf("statefulSetReady() = %v, want %v", got, tt.want)
} }
@ -1049,7 +1012,7 @@ func Test_ReadyChecker_podsReadyForObject(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
c := NewReadyChecker(fake.NewClientset(), nil) c := NewReadyChecker(fake.NewClientset())
for _, pod := range tt.existPods { for _, pod := range tt.existPods {
if _, err := c.client.CoreV1().Pods(defaultNamespace).Create(context.TODO(), &pod, metav1.CreateOptions{}); err != nil { if _, err := c.client.CoreV1().Pods(defaultNamespace).Create(context.TODO(), &pod, metav1.CreateOptions{}); err != nil {
t.Errorf("Failed to create Pod error: %v", err) t.Errorf("Failed to create Pod error: %v", err)
@ -1128,7 +1091,7 @@ func Test_ReadyChecker_jobReady(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
c := NewReadyChecker(fake.NewClientset(), nil) c := NewReadyChecker(fake.NewClientset())
got, err := c.jobReady(tt.args.job) got, err := c.jobReady(tt.args.job)
if (err != nil) != tt.wantErr { if (err != nil) != tt.wantErr {
t.Errorf("jobReady() error = %v, wantErr %v", err, tt.wantErr) t.Errorf("jobReady() error = %v, wantErr %v", err, tt.wantErr)
@ -1167,7 +1130,7 @@ func Test_ReadyChecker_volumeReady(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
c := NewReadyChecker(fake.NewClientset(), nil) c := NewReadyChecker(fake.NewClientset())
if got := c.volumeReady(tt.args.v); got != tt.want { if got := c.volumeReady(tt.args.v); got != tt.want {
t.Errorf("volumeReady() = %v, want %v", got, tt.want) t.Errorf("volumeReady() = %v, want %v", got, tt.want)
} }
@ -1212,7 +1175,7 @@ func Test_ReadyChecker_serviceReady(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
c := NewReadyChecker(fake.NewClientset(), nil) c := NewReadyChecker(fake.NewClientset())
got := c.serviceReady(tt.args.service) got := c.serviceReady(tt.args.service)
if got != tt.want { if got != tt.want {
t.Errorf("serviceReady() = %v, want %v", got, tt.want) t.Errorf("serviceReady() = %v, want %v", got, tt.want)
@ -1281,7 +1244,7 @@ func Test_ReadyChecker_crdBetaReady(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
c := NewReadyChecker(fake.NewClientset(), nil) c := NewReadyChecker(fake.NewClientset())
got := c.crdBetaReady(tt.args.crdBeta) got := c.crdBetaReady(tt.args.crdBeta)
if got != tt.want { if got != tt.want {
t.Errorf("crdBetaReady() = %v, want %v", got, tt.want) t.Errorf("crdBetaReady() = %v, want %v", got, tt.want)
@ -1350,7 +1313,7 @@ func Test_ReadyChecker_crdReady(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
c := NewReadyChecker(fake.NewClientset(), nil) c := NewReadyChecker(fake.NewClientset())
got := c.crdReady(tt.args.crdBeta) got := c.crdReady(tt.args.crdBeta)
if got != tt.want { if got != tt.want {
t.Errorf("crdBetaReady() = %v, want %v", got, tt.want) t.Errorf("crdBetaReady() = %v, want %v", got, tt.want)

@ -20,6 +20,7 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"log/slog"
"sort" "sort"
"time" "time"
@ -42,7 +43,6 @@ import (
type statusWaiter struct { type statusWaiter struct {
client dynamic.Interface client dynamic.Interface
restMapper meta.RESTMapper restMapper meta.RESTMapper
log func(string, ...interface{})
} }
func alwaysReady(_ *unstructured.Unstructured) (*status.Result, error) { func alwaysReady(_ *unstructured.Unstructured) (*status.Result, error) {
@ -55,7 +55,7 @@ func alwaysReady(_ *unstructured.Unstructured) (*status.Result, error) {
func (w *statusWaiter) WatchUntilReady(resourceList ResourceList, timeout time.Duration) error { func (w *statusWaiter) WatchUntilReady(resourceList ResourceList, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(context.Background(), timeout) ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel() defer cancel()
w.log("waiting for %d pods and jobs to complete with a timeout of %s", len(resourceList), timeout) slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout)
sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper)
jobSR := helmStatusReaders.NewCustomJobStatusReader(w.restMapper) jobSR := helmStatusReaders.NewCustomJobStatusReader(w.restMapper)
podSR := helmStatusReaders.NewCustomPodStatusReader(w.restMapper) podSR := helmStatusReaders.NewCustomPodStatusReader(w.restMapper)
@ -76,7 +76,7 @@ func (w *statusWaiter) WatchUntilReady(resourceList ResourceList, timeout time.D
func (w *statusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error { func (w *statusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(context.TODO(), timeout) ctx, cancel := context.WithTimeout(context.TODO(), timeout)
defer cancel() defer cancel()
w.log("beginning wait for %d resources with timeout of %s", len(resourceList), timeout) slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout)
sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper)
return w.wait(ctx, resourceList, sw) return w.wait(ctx, resourceList, sw)
} }
@ -84,7 +84,7 @@ func (w *statusWaiter) Wait(resourceList ResourceList, timeout time.Duration) er
func (w *statusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Duration) error { func (w *statusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(context.TODO(), timeout) ctx, cancel := context.WithTimeout(context.TODO(), timeout)
defer cancel() defer cancel()
w.log("beginning wait for %d resources with timeout of %s", len(resourceList), timeout) slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout)
sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper)
newCustomJobStatusReader := helmStatusReaders.NewCustomJobStatusReader(w.restMapper) newCustomJobStatusReader := helmStatusReaders.NewCustomJobStatusReader(w.restMapper)
customSR := statusreaders.NewStatusReader(w.restMapper, newCustomJobStatusReader) customSR := statusreaders.NewStatusReader(w.restMapper, newCustomJobStatusReader)
@ -95,7 +95,7 @@ func (w *statusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Dura
func (w *statusWaiter) WaitForDelete(resourceList ResourceList, timeout time.Duration) error { func (w *statusWaiter) WaitForDelete(resourceList ResourceList, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(context.TODO(), timeout) ctx, cancel := context.WithTimeout(context.TODO(), timeout)
defer cancel() defer cancel()
w.log("beginning wait for %d resources to be deleted with timeout of %s", len(resourceList), timeout) slog.Debug("waiting for resources to be deleted", "count", len(resourceList), "timeout", timeout)
sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper)
return w.waitForDelete(ctx, resourceList, sw) return w.waitForDelete(ctx, resourceList, sw)
} }
@ -113,7 +113,7 @@ func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceL
} }
eventCh := sw.Watch(cancelCtx, resources, watcher.Options{}) eventCh := sw.Watch(cancelCtx, resources, watcher.Options{})
statusCollector := collector.NewResourceStatusCollector(resources) statusCollector := collector.NewResourceStatusCollector(resources)
done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.NotFoundStatus, w.log)) done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.NotFoundStatus))
<-done <-done
if statusCollector.Error != nil { if statusCollector.Error != nil {
@ -156,7 +156,7 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, sw w
eventCh := sw.Watch(cancelCtx, resources, watcher.Options{}) eventCh := sw.Watch(cancelCtx, resources, watcher.Options{})
statusCollector := collector.NewResourceStatusCollector(resources) statusCollector := collector.NewResourceStatusCollector(resources)
done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.CurrentStatus, w.log)) done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.CurrentStatus))
<-done <-done
if statusCollector.Error != nil { if statusCollector.Error != nil {
@ -179,7 +179,7 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, sw w
return nil return nil
} }
func statusObserver(cancel context.CancelFunc, desired status.Status, logFn func(string, ...interface{})) collector.ObserverFunc { func statusObserver(cancel context.CancelFunc, desired status.Status) collector.ObserverFunc {
return func(statusCollector *collector.ResourceStatusCollector, _ event.Event) { return func(statusCollector *collector.ResourceStatusCollector, _ event.Event) {
var rss []*event.ResourceStatus var rss []*event.ResourceStatus
var nonDesiredResources []*event.ResourceStatus var nonDesiredResources []*event.ResourceStatus
@ -209,8 +209,7 @@ func statusObserver(cancel context.CancelFunc, desired status.Status, logFn func
return nonDesiredResources[i].Identifier.Name < nonDesiredResources[j].Identifier.Name return nonDesiredResources[i].Identifier.Name < nonDesiredResources[j].Identifier.Name
}) })
first := nonDesiredResources[0] first := nonDesiredResources[0]
logFn("waiting for resource: name: %s, kind: %s, desired status: %s, actual status: %s \n", slog.Debug("waiting for resource", "name", first.Identifier.Name, "kind", first.Identifier.GroupKind.Kind, "expectedStatus", desired, "actualStatus", first.Status)
first.Identifier.Name, first.Identifier.GroupKind.Kind, desired, first.Status)
} }
} }
} }

@ -217,7 +217,6 @@ func TestStatusWaitForDelete(t *testing.T) {
statusWaiter := statusWaiter{ statusWaiter := statusWaiter{
restMapper: fakeMapper, restMapper: fakeMapper,
client: fakeClient, client: fakeClient,
log: t.Logf,
} }
objsToCreate := getRuntimeObjFromManifests(t, tt.manifestsToCreate) objsToCreate := getRuntimeObjFromManifests(t, tt.manifestsToCreate)
for _, objToCreate := range objsToCreate { for _, objToCreate := range objsToCreate {
@ -258,7 +257,6 @@ func TestStatusWaitForDeleteNonExistentObject(t *testing.T) {
statusWaiter := statusWaiter{ statusWaiter := statusWaiter{
restMapper: fakeMapper, restMapper: fakeMapper,
client: fakeClient, client: fakeClient,
log: t.Logf,
} }
// Don't create the object to test that the wait for delete works when the object doesn't exist // Don't create the object to test that the wait for delete works when the object doesn't exist
objManifest := getRuntimeObjFromManifests(t, []string{podCurrentManifest}) objManifest := getRuntimeObjFromManifests(t, []string{podCurrentManifest})
@ -317,7 +315,6 @@ func TestStatusWait(t *testing.T) {
statusWaiter := statusWaiter{ statusWaiter := statusWaiter{
client: fakeClient, client: fakeClient,
restMapper: fakeMapper, restMapper: fakeMapper,
log: t.Logf,
} }
objs := getRuntimeObjFromManifests(t, tt.objManifests) objs := getRuntimeObjFromManifests(t, tt.objManifests)
for _, obj := range objs { for _, obj := range objs {
@ -371,7 +368,6 @@ func TestWaitForJobComplete(t *testing.T) {
statusWaiter := statusWaiter{ statusWaiter := statusWaiter{
client: fakeClient, client: fakeClient,
restMapper: fakeMapper, restMapper: fakeMapper,
log: t.Logf,
} }
objs := getRuntimeObjFromManifests(t, tt.objManifests) objs := getRuntimeObjFromManifests(t, tt.objManifests)
for _, obj := range objs { for _, obj := range objs {
@ -431,7 +427,6 @@ func TestWatchForReady(t *testing.T) {
statusWaiter := statusWaiter{ statusWaiter := statusWaiter{
client: fakeClient, client: fakeClient,
restMapper: fakeMapper, restMapper: fakeMapper,
log: t.Logf,
} }
objs := getRuntimeObjFromManifests(t, tt.objManifests) objs := getRuntimeObjFromManifests(t, tt.objManifests)
for _, obj := range objs { for _, obj := range objs {

@ -50,24 +50,23 @@ import (
// Helm 4 now uses the StatusWaiter implementation instead // Helm 4 now uses the StatusWaiter implementation instead
type legacyWaiter struct { type legacyWaiter struct {
c ReadyChecker c ReadyChecker
log func(string, ...interface{})
kubeClient *kubernetes.Clientset kubeClient *kubernetes.Clientset
} }
func (hw *legacyWaiter) Wait(resources ResourceList, timeout time.Duration) error { func (hw *legacyWaiter) Wait(resources ResourceList, timeout time.Duration) error {
hw.c = NewReadyChecker(hw.kubeClient, hw.log, PausedAsReady(true)) hw.c = NewReadyChecker(hw.kubeClient, PausedAsReady(true))
return hw.waitForResources(resources, timeout) return hw.waitForResources(resources, timeout)
} }
func (hw *legacyWaiter) WaitWithJobs(resources ResourceList, timeout time.Duration) error { func (hw *legacyWaiter) WaitWithJobs(resources ResourceList, timeout time.Duration) error {
hw.c = NewReadyChecker(hw.kubeClient, hw.log, PausedAsReady(true), CheckJobs(true)) hw.c = NewReadyChecker(hw.kubeClient, PausedAsReady(true), CheckJobs(true))
return hw.waitForResources(resources, timeout) return hw.waitForResources(resources, timeout)
} }
// waitForResources polls to get the current status of all pods, PVCs, Services and // waitForResources polls to get the current status of all pods, PVCs, Services and
// Jobs(optional) until all are ready or a timeout is reached // Jobs(optional) until all are ready or a timeout is reached
func (hw *legacyWaiter) waitForResources(created ResourceList, timeout time.Duration) error { func (hw *legacyWaiter) waitForResources(created ResourceList, timeout time.Duration) error {
hw.log("beginning wait for %d resources with timeout of %v", len(created), timeout) slog.Debug("beginning wait for resources", "count", len(created), "timeout", timeout)
ctx, cancel := context.WithTimeout(context.Background(), timeout) ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel() defer cancel()
@ -85,10 +84,10 @@ func (hw *legacyWaiter) waitForResources(created ResourceList, timeout time.Dura
if waitRetries > 0 && hw.isRetryableError(err, v) { if waitRetries > 0 && hw.isRetryableError(err, v) {
numberOfErrors[i]++ numberOfErrors[i]++
if numberOfErrors[i] > waitRetries { if numberOfErrors[i] > waitRetries {
hw.log("Max number of retries reached") slog.Debug("max number of retries reached", "resource", v.Name, "retries", numberOfErrors[i])
return false, err return false, err
} }
hw.log("Retrying as current number of retries %d less than max number of retries %d", numberOfErrors[i]-1, waitRetries) slog.Debug("retrying resource readiness", "resource", v.Name, "currentRetries", numberOfErrors[i]-1, "maxRetries", waitRetries)
return false, nil return false, nil
} }
numberOfErrors[i] = 0 numberOfErrors[i] = 0
@ -104,14 +103,14 @@ func (hw *legacyWaiter) isRetryableError(err error, resource *resource.Info) boo
if err == nil { if err == nil {
return false return false
} }
hw.log("Error received when checking status of resource %s. Error: '%s', Resource details: '%s'", resource.Name, err, resource) slog.Debug("error received when checking resource status", "resource", resource.Name, slog.Any("error", err))
if ev, ok := err.(*apierrors.StatusError); ok { if ev, ok := err.(*apierrors.StatusError); ok {
statusCode := ev.Status().Code statusCode := ev.Status().Code
retryable := hw.isRetryableHTTPStatusCode(statusCode) retryable := hw.isRetryableHTTPStatusCode(statusCode)
hw.log("Status code received: %d. Retryable error? %t", statusCode, retryable) slog.Debug("status code received", "resource", resource.Name, "statusCode", statusCode, "retryable", retryable)
return retryable return retryable
} }
hw.log("Retryable error? %t", true) slog.Debug("retryable error assumed", "resource", resource.Name)
return true return true
} }
@ -249,7 +248,7 @@ func (hw *legacyWaiter) watchUntilReady(timeout time.Duration, info *resource.In
return nil return nil
} }
hw.log("Watching for changes to %s %s with timeout of %v", kind, info.Name, timeout) slog.Debug("watching for resource changes", "kind", kind, "resource", info.Name, "timeout", timeout)
// Use a selector on the name of the resource. This should be unique for the // Use a selector on the name of the resource. This should be unique for the
// given version and kind // given version and kind
@ -277,7 +276,8 @@ func (hw *legacyWaiter) watchUntilReady(timeout time.Duration, info *resource.In
// we get. We care mostly about jobs, where what we want to see is // we get. We care mostly about jobs, where what we want to see is
// the status go into a good state. For other types, like ReplicaSet // the status go into a good state. For other types, like ReplicaSet
// we don't really do anything to support these as hooks. // we don't really do anything to support these as hooks.
hw.log("Add/Modify event for %s: %v", info.Name, e.Type) slog.Debug("add/modify event received", "resource", info.Name, "eventType", e.Type)
switch kind { switch kind {
case "Job": case "Job":
return hw.waitForJob(obj, info.Name) return hw.waitForJob(obj, info.Name)
@ -286,11 +286,11 @@ func (hw *legacyWaiter) watchUntilReady(timeout time.Duration, info *resource.In
} }
return true, nil return true, nil
case watch.Deleted: case watch.Deleted:
hw.log("Deleted event for %s", info.Name) slog.Debug("deleted event received", "resource", info.Name)
return true, nil return true, nil
case watch.Error: case watch.Error:
// Handle error and return with an error. // Handle error and return with an error.
hw.log("Error event for %s", info.Name) slog.Error("error event received", "resource", info.Name)
return true, errors.Errorf("failed to deploy %s", info.Name) return true, errors.Errorf("failed to deploy %s", info.Name)
default: default:
return false, nil return false, nil
@ -312,11 +312,12 @@ func (hw *legacyWaiter) waitForJob(obj runtime.Object, name string) (bool, error
if c.Type == batchv1.JobComplete && c.Status == "True" { if c.Type == batchv1.JobComplete && c.Status == "True" {
return true, nil return true, nil
} else if c.Type == batchv1.JobFailed && c.Status == "True" { } else if c.Type == batchv1.JobFailed && c.Status == "True" {
slog.Error("job failed", "job", name, "reason", c.Reason)
return true, errors.Errorf("job %s failed: %s", name, c.Reason) return true, errors.Errorf("job %s failed: %s", name, c.Reason)
} }
} }
hw.log("%s: Jobs active: %d, jobs failed: %d, jobs succeeded: %d", name, o.Status.Active, o.Status.Failed, o.Status.Succeeded) slog.Debug("job status update", "job", name, "active", o.Status.Active, "failed", o.Status.Failed, "succeeded", o.Status.Succeeded)
return false, nil return false, nil
} }
@ -331,14 +332,15 @@ func (hw *legacyWaiter) waitForPodSuccess(obj runtime.Object, name string) (bool
switch o.Status.Phase { switch o.Status.Phase {
case corev1.PodSucceeded: case corev1.PodSucceeded:
hw.log("Pod %s succeeded", o.Name) slog.Debug("pod succeeded", "pod", o.Name)
return true, nil return true, nil
case corev1.PodFailed: case corev1.PodFailed:
slog.Error("pod failed", "pod", o.Name)
return true, errors.Errorf("pod %s failed", o.Name) return true, errors.Errorf("pod %s failed", o.Name)
case corev1.PodPending: case corev1.PodPending:
hw.log("Pod %s pending", o.Name) slog.Debug("pod pending", "pod", o.Name)
case corev1.PodRunning: case corev1.PodRunning:
hw.log("Pod %s running", o.Name) slog.Debug("pod running", "pod", o.Name)
} }
return false, nil return false, nil

@ -19,6 +19,7 @@ package driver // import "helm.sh/helm/v4/pkg/storage/driver"
import ( import (
"context" "context"
"fmt" "fmt"
"log/slog"
"strconv" "strconv"
"strings" "strings"
"time" "time"
@ -43,7 +44,6 @@ const ConfigMapsDriverName = "ConfigMap"
// ConfigMapsInterface. // ConfigMapsInterface.
type ConfigMaps struct { type ConfigMaps struct {
impl corev1.ConfigMapInterface impl corev1.ConfigMapInterface
Log func(string, ...interface{})
} }
// NewConfigMaps initializes a new ConfigMaps wrapping an implementation of // NewConfigMaps initializes a new ConfigMaps wrapping an implementation of
@ -51,7 +51,6 @@ type ConfigMaps struct {
func NewConfigMaps(impl corev1.ConfigMapInterface) *ConfigMaps { func NewConfigMaps(impl corev1.ConfigMapInterface) *ConfigMaps {
return &ConfigMaps{ return &ConfigMaps{
impl: impl, impl: impl,
Log: func(_ string, _ ...interface{}) {},
} }
} }
@ -70,13 +69,13 @@ func (cfgmaps *ConfigMaps) Get(key string) (*rspb.Release, error) {
return nil, ErrReleaseNotFound return nil, ErrReleaseNotFound
} }
cfgmaps.Log("get: failed to get %q: %s", key, err) slog.Debug("failed to get release", "key", key, slog.Any("error", err))
return nil, err return nil, err
} }
// found the configmap, decode the base64 data string // found the configmap, decode the base64 data string
r, err := decodeRelease(obj.Data["release"]) r, err := decodeRelease(obj.Data["release"])
if err != nil { if err != nil {
cfgmaps.Log("get: failed to decode data %q: %s", key, err) slog.Debug("failed to decode data", "key", key, slog.Any("error", err))
return nil, err return nil, err
} }
r.Labels = filterSystemLabels(obj.ObjectMeta.Labels) r.Labels = filterSystemLabels(obj.ObjectMeta.Labels)
@ -93,7 +92,7 @@ func (cfgmaps *ConfigMaps) List(filter func(*rspb.Release) bool) ([]*rspb.Releas
list, err := cfgmaps.impl.List(context.Background(), opts) list, err := cfgmaps.impl.List(context.Background(), opts)
if err != nil { if err != nil {
cfgmaps.Log("list: failed to list: %s", err) slog.Debug("failed to list releases", slog.Any("error", err))
return nil, err return nil, err
} }
@ -104,7 +103,7 @@ func (cfgmaps *ConfigMaps) List(filter func(*rspb.Release) bool) ([]*rspb.Releas
for _, item := range list.Items { for _, item := range list.Items {
rls, err := decodeRelease(item.Data["release"]) rls, err := decodeRelease(item.Data["release"])
if err != nil { if err != nil {
cfgmaps.Log("list: failed to decode release: %v: %s", item, err) slog.Debug("failed to decode release", "item", item, slog.Any("error", err))
continue continue
} }
@ -132,7 +131,7 @@ func (cfgmaps *ConfigMaps) Query(labels map[string]string) ([]*rspb.Release, err
list, err := cfgmaps.impl.List(context.Background(), opts) list, err := cfgmaps.impl.List(context.Background(), opts)
if err != nil { if err != nil {
cfgmaps.Log("query: failed to query with labels: %s", err) slog.Debug("failed to query with labels", slog.Any("error", err))
return nil, err return nil, err
} }
@ -144,7 +143,7 @@ func (cfgmaps *ConfigMaps) Query(labels map[string]string) ([]*rspb.Release, err
for _, item := range list.Items { for _, item := range list.Items {
rls, err := decodeRelease(item.Data["release"]) rls, err := decodeRelease(item.Data["release"])
if err != nil { if err != nil {
cfgmaps.Log("query: failed to decode release: %s", err) slog.Debug("failed to decode release", slog.Any("error", err))
continue continue
} }
rls.Labels = item.ObjectMeta.Labels rls.Labels = item.ObjectMeta.Labels
@ -166,7 +165,7 @@ func (cfgmaps *ConfigMaps) Create(key string, rls *rspb.Release) error {
// create a new configmap to hold the release // create a new configmap to hold the release
obj, err := newConfigMapsObject(key, rls, lbs) obj, err := newConfigMapsObject(key, rls, lbs)
if err != nil { if err != nil {
cfgmaps.Log("create: failed to encode release %q: %s", rls.Name, err) slog.Debug("failed to encode release", "name", rls.Name, slog.Any("error", err))
return err return err
} }
// push the configmap object out into the kubiverse // push the configmap object out into the kubiverse
@ -175,7 +174,7 @@ func (cfgmaps *ConfigMaps) Create(key string, rls *rspb.Release) error {
return ErrReleaseExists return ErrReleaseExists
} }
cfgmaps.Log("create: failed to create: %s", err) slog.Debug("failed to create release", slog.Any("error", err))
return err return err
} }
return nil return nil
@ -194,13 +193,13 @@ func (cfgmaps *ConfigMaps) Update(key string, rls *rspb.Release) error {
// create a new configmap object to hold the release // create a new configmap object to hold the release
obj, err := newConfigMapsObject(key, rls, lbs) obj, err := newConfigMapsObject(key, rls, lbs)
if err != nil { if err != nil {
cfgmaps.Log("update: failed to encode release %q: %s", rls.Name, err) slog.Debug("failed to encode release", "name", rls.Name, slog.Any("error", err))
return err return err
} }
// push the configmap object out into the kubiverse // push the configmap object out into the kubiverse
_, err = cfgmaps.impl.Update(context.Background(), obj, metav1.UpdateOptions{}) _, err = cfgmaps.impl.Update(context.Background(), obj, metav1.UpdateOptions{})
if err != nil { if err != nil {
cfgmaps.Log("update: failed to update: %s", err) slog.Debug("failed to update release", slog.Any("error", err))
return err return err
} }
return nil return nil

@ -262,7 +262,6 @@ func newTestFixtureSQL(t *testing.T, _ ...*rspb.Release) (*SQL, sqlmock.Sqlmock)
sqlxDB := sqlx.NewDb(sqlDB, "sqlmock") sqlxDB := sqlx.NewDb(sqlDB, "sqlmock")
return &SQL{ return &SQL{
db: sqlxDB, db: sqlxDB,
Log: func(_ string, _ ...interface{}) {},
namespace: "default", namespace: "default",
statementBuilder: sq.StatementBuilder.PlaceholderFormat(sq.Dollar), statementBuilder: sq.StatementBuilder.PlaceholderFormat(sq.Dollar),
}, mock }, mock

@ -19,6 +19,7 @@ package driver // import "helm.sh/helm/v4/pkg/storage/driver"
import ( import (
"context" "context"
"fmt" "fmt"
"log/slog"
"strconv" "strconv"
"strings" "strings"
"time" "time"
@ -43,7 +44,6 @@ const SecretsDriverName = "Secret"
// SecretsInterface. // SecretsInterface.
type Secrets struct { type Secrets struct {
impl corev1.SecretInterface impl corev1.SecretInterface
Log func(string, ...interface{})
} }
// NewSecrets initializes a new Secrets wrapping an implementation of // NewSecrets initializes a new Secrets wrapping an implementation of
@ -51,7 +51,6 @@ type Secrets struct {
func NewSecrets(impl corev1.SecretInterface) *Secrets { func NewSecrets(impl corev1.SecretInterface) *Secrets {
return &Secrets{ return &Secrets{
impl: impl, impl: impl,
Log: func(_ string, _ ...interface{}) {},
} }
} }
@ -96,7 +95,7 @@ func (secrets *Secrets) List(filter func(*rspb.Release) bool) ([]*rspb.Release,
for _, item := range list.Items { for _, item := range list.Items {
rls, err := decodeRelease(string(item.Data["release"])) rls, err := decodeRelease(string(item.Data["release"]))
if err != nil { if err != nil {
secrets.Log("list: failed to decode release: %v: %s", item, err) slog.Debug("list failed to decode release", "key", item.Name, slog.Any("error", err))
continue continue
} }
@ -135,7 +134,7 @@ func (secrets *Secrets) Query(labels map[string]string) ([]*rspb.Release, error)
for _, item := range list.Items { for _, item := range list.Items {
rls, err := decodeRelease(string(item.Data["release"])) rls, err := decodeRelease(string(item.Data["release"]))
if err != nil { if err != nil {
secrets.Log("query: failed to decode release: %s", err) slog.Debug("failed to decode release", "key", item.Name, slog.Any("error", err))
continue continue
} }
rls.Labels = item.ObjectMeta.Labels rls.Labels = item.ObjectMeta.Labels

@ -18,6 +18,7 @@ package driver // import "helm.sh/helm/v4/pkg/storage/driver"
import ( import (
"fmt" "fmt"
"log/slog"
"sort" "sort"
"strconv" "strconv"
"time" "time"
@ -86,8 +87,6 @@ type SQL struct {
db *sqlx.DB db *sqlx.DB
namespace string namespace string
statementBuilder sq.StatementBuilderType statementBuilder sq.StatementBuilderType
Log func(string, ...interface{})
} }
// Name returns the name of the driver. // Name returns the name of the driver.
@ -108,13 +107,13 @@ func (s *SQL) checkAlreadyApplied(migrations []*migrate.Migration) bool {
records, err := migrate.GetMigrationRecords(s.db.DB, postgreSQLDialect) records, err := migrate.GetMigrationRecords(s.db.DB, postgreSQLDialect)
migrate.SetDisableCreateTable(false) migrate.SetDisableCreateTable(false)
if err != nil { if err != nil {
s.Log("checkAlreadyApplied: failed to get migration records: %v", err) slog.Debug("failed to get migration records", slog.Any("error", err))
return false return false
} }
for _, record := range records { for _, record := range records {
if _, ok := migrationsIDs[record.Id]; ok { if _, ok := migrationsIDs[record.Id]; ok {
s.Log("checkAlreadyApplied: found previous migration (Id: %v) applied at %v", record.Id, record.AppliedAt) slog.Debug("found previous migration", "id", record.Id, "appliedAt", record.AppliedAt)
delete(migrationsIDs, record.Id) delete(migrationsIDs, record.Id)
} }
} }
@ -122,7 +121,7 @@ func (s *SQL) checkAlreadyApplied(migrations []*migrate.Migration) bool {
// check if all migrations applied // check if all migrations applied
if len(migrationsIDs) != 0 { if len(migrationsIDs) != 0 {
for id := range migrationsIDs { for id := range migrationsIDs {
s.Log("checkAlreadyApplied: find unapplied migration (id: %v)", id) slog.Debug("find unapplied migration", "id", id)
} }
return false return false
} }
@ -276,7 +275,7 @@ type SQLReleaseCustomLabelWrapper struct {
} }
// NewSQL initializes a new sql driver. // NewSQL initializes a new sql driver.
func NewSQL(connectionString string, logger func(string, ...interface{}), namespace string) (*SQL, error) { func NewSQL(connectionString string, namespace string) (*SQL, error) {
db, err := sqlx.Connect(postgreSQLDialect, connectionString) db, err := sqlx.Connect(postgreSQLDialect, connectionString)
if err != nil { if err != nil {
return nil, err return nil, err
@ -284,7 +283,6 @@ func NewSQL(connectionString string, logger func(string, ...interface{}), namesp
driver := &SQL{ driver := &SQL{
db: db, db: db,
Log: logger,
statementBuilder: sq.StatementBuilder.PlaceholderFormat(sq.Dollar), statementBuilder: sq.StatementBuilder.PlaceholderFormat(sq.Dollar),
} }
@ -309,24 +307,24 @@ func (s *SQL) Get(key string) (*rspb.Release, error) {
query, args, err := qb.ToSql() query, args, err := qb.ToSql()
if err != nil { if err != nil {
s.Log("failed to build query: %v", err) slog.Debug("failed to build query", slog.Any("error", err))
return nil, err return nil, err
} }
// Get will return an error if the result is empty // Get will return an error if the result is empty
if err := s.db.Get(&record, query, args...); err != nil { if err := s.db.Get(&record, query, args...); err != nil {
s.Log("got SQL error when getting release %s: %v", key, err) slog.Debug("got SQL error when getting release", "key", key, slog.Any("error", err))
return nil, ErrReleaseNotFound return nil, ErrReleaseNotFound
} }
release, err := decodeRelease(record.Body) release, err := decodeRelease(record.Body)
if err != nil { if err != nil {
s.Log("get: failed to decode data %q: %v", key, err) slog.Debug("failed to decode data", "key", key, slog.Any("error", err))
return nil, err return nil, err
} }
if release.Labels, err = s.getReleaseCustomLabels(key, s.namespace); err != nil { if release.Labels, err = s.getReleaseCustomLabels(key, s.namespace); err != nil {
s.Log("failed to get release %s/%s custom labels: %v", s.namespace, key, err) slog.Debug("failed to get release custom labels", "namespace", s.namespace, "key", key, slog.Any("error", err))
return nil, err return nil, err
} }
@ -347,13 +345,13 @@ func (s *SQL) List(filter func(*rspb.Release) bool) ([]*rspb.Release, error) {
query, args, err := sb.ToSql() query, args, err := sb.ToSql()
if err != nil { if err != nil {
s.Log("failed to build query: %v", err) slog.Debug("failed to build query", slog.Any("error", err))
return nil, err return nil, err
} }
var records = []SQLReleaseWrapper{} var records = []SQLReleaseWrapper{}
if err := s.db.Select(&records, query, args...); err != nil { if err := s.db.Select(&records, query, args...); err != nil {
s.Log("list: failed to list: %v", err) slog.Debug("failed to list", slog.Any("error", err))
return nil, err return nil, err
} }
@ -361,12 +359,12 @@ func (s *SQL) List(filter func(*rspb.Release) bool) ([]*rspb.Release, error) {
for _, record := range records { for _, record := range records {
release, err := decodeRelease(record.Body) release, err := decodeRelease(record.Body)
if err != nil { if err != nil {
s.Log("list: failed to decode release: %v: %v", record, err) slog.Debug("failed to decode release", "record", record, slog.Any("error", err))
continue continue
} }
if release.Labels, err = s.getReleaseCustomLabels(record.Key, record.Namespace); err != nil { if release.Labels, err = s.getReleaseCustomLabels(record.Key, record.Namespace); err != nil {
s.Log("failed to get release %s/%s custom labels: %v", record.Namespace, record.Key, err) slog.Debug("failed to get release custom labels", "namespace", record.Namespace, "key", record.Key, slog.Any("error", err))
return nil, err return nil, err
} }
for k, v := range getReleaseSystemLabels(release) { for k, v := range getReleaseSystemLabels(release) {
@ -396,7 +394,7 @@ func (s *SQL) Query(labels map[string]string) ([]*rspb.Release, error) {
if _, ok := labelMap[key]; ok { if _, ok := labelMap[key]; ok {
sb = sb.Where(sq.Eq{key: labels[key]}) sb = sb.Where(sq.Eq{key: labels[key]})
} else { } else {
s.Log("unknown label %s", key) slog.Debug("unknown label", "key", key)
return nil, fmt.Errorf("unknown label %s", key) return nil, fmt.Errorf("unknown label %s", key)
} }
} }
@ -409,13 +407,13 @@ func (s *SQL) Query(labels map[string]string) ([]*rspb.Release, error) {
// Build our query // Build our query
query, args, err := sb.ToSql() query, args, err := sb.ToSql()
if err != nil { if err != nil {
s.Log("failed to build query: %v", err) slog.Debug("failed to build query", slog.Any("error", err))
return nil, err return nil, err
} }
var records = []SQLReleaseWrapper{} var records = []SQLReleaseWrapper{}
if err := s.db.Select(&records, query, args...); err != nil { if err := s.db.Select(&records, query, args...); err != nil {
s.Log("list: failed to query with labels: %v", err) slog.Debug("failed to query with labels", slog.Any("error", err))
return nil, err return nil, err
} }
@ -427,12 +425,12 @@ func (s *SQL) Query(labels map[string]string) ([]*rspb.Release, error) {
for _, record := range records { for _, record := range records {
release, err := decodeRelease(record.Body) release, err := decodeRelease(record.Body)
if err != nil { if err != nil {
s.Log("list: failed to decode release: %v: %v", record, err) slog.Debug("failed to decode release", "record", record, slog.Any("error", err))
continue continue
} }
if release.Labels, err = s.getReleaseCustomLabels(record.Key, record.Namespace); err != nil { if release.Labels, err = s.getReleaseCustomLabels(record.Key, record.Namespace); err != nil {
s.Log("failed to get release %s/%s custom labels: %v", record.Namespace, record.Key, err) slog.Debug("failed to get release custom labels", "namespace", record.Namespace, "key", record.Key, slog.Any("error", err))
return nil, err return nil, err
} }
@ -456,13 +454,13 @@ func (s *SQL) Create(key string, rls *rspb.Release) error {
body, err := encodeRelease(rls) body, err := encodeRelease(rls)
if err != nil { if err != nil {
s.Log("failed to encode release: %v", err) slog.Debug("failed to encode release", slog.Any("error", err))
return err return err
} }
transaction, err := s.db.Beginx() transaction, err := s.db.Beginx()
if err != nil { if err != nil {
s.Log("failed to start SQL transaction: %v", err) slog.Debug("failed to start SQL transaction", slog.Any("error", err))
return fmt.Errorf("error beginning transaction: %v", err) return fmt.Errorf("error beginning transaction: %v", err)
} }
@ -491,7 +489,7 @@ func (s *SQL) Create(key string, rls *rspb.Release) error {
int(time.Now().Unix()), int(time.Now().Unix()),
).ToSql() ).ToSql()
if err != nil { if err != nil {
s.Log("failed to build insert query: %v", err) slog.Debug("failed to build insert query", slog.Any("error", err))
return err return err
} }
@ -505,17 +503,17 @@ func (s *SQL) Create(key string, rls *rspb.Release) error {
Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}). Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}).
ToSql() ToSql()
if buildErr != nil { if buildErr != nil {
s.Log("failed to build select query: %v", buildErr) slog.Debug("failed to build select query", "error", buildErr)
return err return err
} }
var record SQLReleaseWrapper var record SQLReleaseWrapper
if err := transaction.Get(&record, selectQuery, args...); err == nil { if err := transaction.Get(&record, selectQuery, args...); err == nil {
s.Log("release %s already exists", key) slog.Debug("release already exists", "key", key)
return ErrReleaseExists return ErrReleaseExists
} }
s.Log("failed to store release %s in SQL database: %v", key, err) slog.Debug("failed to store release in SQL database", "key", key, slog.Any("error", err))
return err return err
} }
@ -538,13 +536,13 @@ func (s *SQL) Create(key string, rls *rspb.Release) error {
if err != nil { if err != nil {
defer transaction.Rollback() defer transaction.Rollback()
s.Log("failed to build insert query: %v", err) slog.Debug("failed to build insert query", slog.Any("error", err))
return err return err
} }
if _, err := transaction.Exec(insertLabelsQuery, args...); err != nil { if _, err := transaction.Exec(insertLabelsQuery, args...); err != nil {
defer transaction.Rollback() defer transaction.Rollback()
s.Log("failed to write Labels: %v", err) slog.Debug("failed to write Labels", slog.Any("error", err))
return err return err
} }
} }
@ -563,7 +561,7 @@ func (s *SQL) Update(key string, rls *rspb.Release) error {
body, err := encodeRelease(rls) body, err := encodeRelease(rls)
if err != nil { if err != nil {
s.Log("failed to encode release: %v", err) slog.Debug("failed to encode release", slog.Any("error", err))
return err return err
} }
@ -580,12 +578,12 @@ func (s *SQL) Update(key string, rls *rspb.Release) error {
ToSql() ToSql()
if err != nil { if err != nil {
s.Log("failed to build update query: %v", err) slog.Debug("failed to build update query", slog.Any("error", err))
return err return err
} }
if _, err := s.db.Exec(query, args...); err != nil { if _, err := s.db.Exec(query, args...); err != nil {
s.Log("failed to update release %s in SQL database: %v", key, err) slog.Debug("failed to update release in SQL database", "key", key, slog.Any("error", err))
return err return err
} }
@ -596,7 +594,7 @@ func (s *SQL) Update(key string, rls *rspb.Release) error {
func (s *SQL) Delete(key string) (*rspb.Release, error) { func (s *SQL) Delete(key string) (*rspb.Release, error) {
transaction, err := s.db.Beginx() transaction, err := s.db.Beginx()
if err != nil { if err != nil {
s.Log("failed to start SQL transaction: %v", err) slog.Debug("failed to start SQL transaction", slog.Any("error", err))
return nil, fmt.Errorf("error beginning transaction: %v", err) return nil, fmt.Errorf("error beginning transaction: %v", err)
} }
@ -607,20 +605,20 @@ func (s *SQL) Delete(key string) (*rspb.Release, error) {
Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}). Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}).
ToSql() ToSql()
if err != nil { if err != nil {
s.Log("failed to build select query: %v", err) slog.Debug("failed to build select query", slog.Any("error", err))
return nil, err return nil, err
} }
var record SQLReleaseWrapper var record SQLReleaseWrapper
err = transaction.Get(&record, selectQuery, args...) err = transaction.Get(&record, selectQuery, args...)
if err != nil { if err != nil {
s.Log("release %s not found: %v", key, err) slog.Debug("release not found", "key", key, slog.Any("error", err))
return nil, ErrReleaseNotFound return nil, ErrReleaseNotFound
} }
release, err := decodeRelease(record.Body) release, err := decodeRelease(record.Body)
if err != nil { if err != nil {
s.Log("failed to decode release %s: %v", key, err) slog.Debug("failed to decode release", "key", key, slog.Any("error", err))
transaction.Rollback() transaction.Rollback()
return nil, err return nil, err
} }
@ -632,18 +630,18 @@ func (s *SQL) Delete(key string) (*rspb.Release, error) {
Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}). Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}).
ToSql() ToSql()
if err != nil { if err != nil {
s.Log("failed to build delete query: %v", err) slog.Debug("failed to build delete query", slog.Any("error", err))
return nil, err return nil, err
} }
_, err = transaction.Exec(deleteQuery, args...) _, err = transaction.Exec(deleteQuery, args...)
if err != nil { if err != nil {
s.Log("failed perform delete query: %v", err) slog.Debug("failed perform delete query", slog.Any("error", err))
return release, err return release, err
} }
if release.Labels, err = s.getReleaseCustomLabels(key, s.namespace); err != nil { if release.Labels, err = s.getReleaseCustomLabels(key, s.namespace); err != nil {
s.Log("failed to get release %s/%s custom labels: %v", s.namespace, key, err) slog.Debug("failed to get release custom labels", "namespace", s.namespace, "key", key, slog.Any("error", err))
return nil, err return nil, err
} }
@ -654,7 +652,7 @@ func (s *SQL) Delete(key string) (*rspb.Release, error) {
ToSql() ToSql()
if err != nil { if err != nil {
s.Log("failed to build delete Labels query: %v", err) slog.Debug("failed to build delete Labels query", slog.Any("error", err))
return nil, err return nil, err
} }
_, err = transaction.Exec(deleteCustomLabelsQuery, args...) _, err = transaction.Exec(deleteCustomLabelsQuery, args...)

@ -18,6 +18,7 @@ package storage // import "helm.sh/helm/v4/pkg/storage"
import ( import (
"fmt" "fmt"
"log/slog"
"strings" "strings"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -42,15 +43,13 @@ type Storage struct {
// be retained, including the most recent release. Values of 0 or less are // be retained, including the most recent release. Values of 0 or less are
// ignored (meaning no limits are imposed). // ignored (meaning no limits are imposed).
MaxHistory int MaxHistory int
Log func(string, ...interface{})
} }
// Get retrieves the release from storage. An error is returned // Get retrieves the release from storage. An error is returned
// if the storage driver failed to fetch the release, or the // if the storage driver failed to fetch the release, or the
// release identified by the key, version pair does not exist. // release identified by the key, version pair does not exist.
func (s *Storage) Get(name string, version int) (*rspb.Release, error) { func (s *Storage) Get(name string, version int) (*rspb.Release, error) {
s.Log("getting release %q", makeKey(name, version)) slog.Debug("getting release", "key", makeKey(name, version))
return s.Driver.Get(makeKey(name, version)) return s.Driver.Get(makeKey(name, version))
} }
@ -58,7 +57,7 @@ func (s *Storage) Get(name string, version int) (*rspb.Release, error) {
// error is returned if the storage driver fails to store the // error is returned if the storage driver fails to store the
// release, or a release with an identical key already exists. // release, or a release with an identical key already exists.
func (s *Storage) Create(rls *rspb.Release) error { func (s *Storage) Create(rls *rspb.Release) error {
s.Log("creating release %q", makeKey(rls.Name, rls.Version)) slog.Debug("creating release", "key", makeKey(rls.Name, rls.Version))
if s.MaxHistory > 0 { if s.MaxHistory > 0 {
// Want to make space for one more release. // Want to make space for one more release.
if err := s.removeLeastRecent(rls.Name, s.MaxHistory-1); err != nil && if err := s.removeLeastRecent(rls.Name, s.MaxHistory-1); err != nil &&
@ -73,7 +72,7 @@ func (s *Storage) Create(rls *rspb.Release) error {
// storage backend fails to update the release or if the release // storage backend fails to update the release or if the release
// does not exist. // does not exist.
func (s *Storage) Update(rls *rspb.Release) error { func (s *Storage) Update(rls *rspb.Release) error {
s.Log("updating release %q", makeKey(rls.Name, rls.Version)) slog.Debug("updating release", "key", makeKey(rls.Name, rls.Version))
return s.Driver.Update(makeKey(rls.Name, rls.Version), rls) return s.Driver.Update(makeKey(rls.Name, rls.Version), rls)
} }
@ -81,21 +80,21 @@ func (s *Storage) Update(rls *rspb.Release) error {
// the storage backend fails to delete the release or if the release // the storage backend fails to delete the release or if the release
// does not exist. // does not exist.
func (s *Storage) Delete(name string, version int) (*rspb.Release, error) { func (s *Storage) Delete(name string, version int) (*rspb.Release, error) {
s.Log("deleting release %q", makeKey(name, version)) slog.Debug("deleting release", "key", makeKey(name, version))
return s.Driver.Delete(makeKey(name, version)) return s.Driver.Delete(makeKey(name, version))
} }
// ListReleases returns all releases from storage. An error is returned if the // ListReleases returns all releases from storage. An error is returned if the
// storage backend fails to retrieve the releases. // storage backend fails to retrieve the releases.
func (s *Storage) ListReleases() ([]*rspb.Release, error) { func (s *Storage) ListReleases() ([]*rspb.Release, error) {
s.Log("listing all releases in storage") slog.Debug("listing all releases in storage")
return s.Driver.List(func(_ *rspb.Release) bool { return true }) return s.Driver.List(func(_ *rspb.Release) bool { return true })
} }
// ListUninstalled returns all releases with Status == UNINSTALLED. An error is returned // ListUninstalled returns all releases with Status == UNINSTALLED. An error is returned
// if the storage backend fails to retrieve the releases. // if the storage backend fails to retrieve the releases.
func (s *Storage) ListUninstalled() ([]*rspb.Release, error) { func (s *Storage) ListUninstalled() ([]*rspb.Release, error) {
s.Log("listing uninstalled releases in storage") slog.Debug("listing uninstalled releases in storage")
return s.Driver.List(func(rls *rspb.Release) bool { return s.Driver.List(func(rls *rspb.Release) bool {
return relutil.StatusFilter(rspb.StatusUninstalled).Check(rls) return relutil.StatusFilter(rspb.StatusUninstalled).Check(rls)
}) })
@ -104,7 +103,7 @@ func (s *Storage) ListUninstalled() ([]*rspb.Release, error) {
// ListDeployed returns all releases with Status == DEPLOYED. An error is returned // ListDeployed returns all releases with Status == DEPLOYED. An error is returned
// if the storage backend fails to retrieve the releases. // if the storage backend fails to retrieve the releases.
func (s *Storage) ListDeployed() ([]*rspb.Release, error) { func (s *Storage) ListDeployed() ([]*rspb.Release, error) {
s.Log("listing all deployed releases in storage") slog.Debug("listing all deployed releases in storage")
return s.Driver.List(func(rls *rspb.Release) bool { return s.Driver.List(func(rls *rspb.Release) bool {
return relutil.StatusFilter(rspb.StatusDeployed).Check(rls) return relutil.StatusFilter(rspb.StatusDeployed).Check(rls)
}) })
@ -132,7 +131,7 @@ func (s *Storage) Deployed(name string) (*rspb.Release, error) {
// DeployedAll returns all deployed releases with the provided name, or // DeployedAll returns all deployed releases with the provided name, or
// returns driver.NewErrNoDeployedReleases if not found. // returns driver.NewErrNoDeployedReleases if not found.
func (s *Storage) DeployedAll(name string) ([]*rspb.Release, error) { func (s *Storage) DeployedAll(name string) ([]*rspb.Release, error) {
s.Log("getting deployed releases from %q history", name) slog.Debug("getting deployed releases", "name", name)
ls, err := s.Driver.Query(map[string]string{ ls, err := s.Driver.Query(map[string]string{
"name": name, "name": name,
@ -151,7 +150,7 @@ func (s *Storage) DeployedAll(name string) ([]*rspb.Release, error) {
// History returns the revision history for the release with the provided name, or // History returns the revision history for the release with the provided name, or
// returns driver.ErrReleaseNotFound if no such release name exists. // returns driver.ErrReleaseNotFound if no such release name exists.
func (s *Storage) History(name string) ([]*rspb.Release, error) { func (s *Storage) History(name string) ([]*rspb.Release, error) {
s.Log("getting release history for %q", name) slog.Debug("getting release history", "name", name)
return s.Driver.Query(map[string]string{"name": name, "owner": "helm"}) return s.Driver.Query(map[string]string{"name": name, "owner": "helm"})
} }
@ -206,7 +205,7 @@ func (s *Storage) removeLeastRecent(name string, maximum int) error {
} }
} }
s.Log("Pruned %d record(s) from %s with %d error(s)", len(toDelete), name, len(errs)) slog.Debug("pruned records", "count", len(toDelete), "release", name, "errors", len(errs))
switch c := len(errs); c { switch c := len(errs); c {
case 0: case 0:
return nil return nil
@ -221,7 +220,7 @@ func (s *Storage) deleteReleaseVersion(name string, version int) error {
key := makeKey(name, version) key := makeKey(name, version)
_, err := s.Delete(name, version) _, err := s.Delete(name, version)
if err != nil { if err != nil {
s.Log("error pruning %s from release history: %s", key, err) slog.Debug("error pruning release", "key", key, slog.Any("error", err))
return err return err
} }
return nil return nil
@ -229,7 +228,7 @@ func (s *Storage) deleteReleaseVersion(name string, version int) error {
// Last fetches the last revision of the named release. // Last fetches the last revision of the named release.
func (s *Storage) Last(name string) (*rspb.Release, error) { func (s *Storage) Last(name string) (*rspb.Release, error) {
s.Log("getting last revision of %q", name) slog.Debug("getting last revision", "name", name)
h, err := s.History(name) h, err := s.History(name)
if err != nil { if err != nil {
return nil, err return nil, err
@ -261,6 +260,5 @@ func Init(d driver.Driver) *Storage {
} }
return &Storage{ return &Storage{
Driver: d, Driver: d,
Log: func(_ string, _ ...interface{}) {},
} }
} }

@ -312,7 +312,6 @@ func (d *MaxHistoryMockDriver) Name() string {
func TestMaxHistoryErrorHandling(t *testing.T) { func TestMaxHistoryErrorHandling(t *testing.T) {
//func TestStorageRemoveLeastRecentWithError(t *testing.T) { //func TestStorageRemoveLeastRecentWithError(t *testing.T) {
storage := Init(NewMaxHistoryMockDriver(driver.NewMemory())) storage := Init(NewMaxHistoryMockDriver(driver.NewMemory()))
storage.Log = t.Logf
storage.MaxHistory = 1 storage.MaxHistory = 1
@ -338,7 +337,6 @@ func TestMaxHistoryErrorHandling(t *testing.T) {
func TestStorageRemoveLeastRecent(t *testing.T) { func TestStorageRemoveLeastRecent(t *testing.T) {
storage := Init(driver.NewMemory()) storage := Init(driver.NewMemory())
storage.Log = t.Logf
// Make sure that specifying this at the outset doesn't cause any bugs. // Make sure that specifying this at the outset doesn't cause any bugs.
storage.MaxHistory = 10 storage.MaxHistory = 10
@ -395,7 +393,6 @@ func TestStorageRemoveLeastRecent(t *testing.T) {
func TestStorageDoNotDeleteDeployed(t *testing.T) { func TestStorageDoNotDeleteDeployed(t *testing.T) {
storage := Init(driver.NewMemory()) storage := Init(driver.NewMemory())
storage.Log = t.Logf
storage.MaxHistory = 3 storage.MaxHistory = 3
const name = "angry-bird" const name = "angry-bird"

Loading…
Cancel
Save