Merge pull request #30708 from benoittgt/migrate-kube-pkg-to-slog

Migrate pkg to slog
pull/30749/head
Matt Farina 5 months ago committed by GitHub
commit 7a1eb77e77
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -17,7 +17,7 @@ limitations under the License.
package main // import "helm.sh/helm/v4/cmd/helm"
import (
"log"
"log/slog"
"os"
// Import to initialize client auth plugins.
@ -27,10 +27,6 @@ import (
"helm.sh/helm/v4/pkg/kube"
)
func init() {
log.SetFlags(log.Lshortfile)
}
func main() {
// Setting the name of the app for managedFields in the Kubernetes client.
// It is set here to the full name of "helm" so that renaming of helm to
@ -40,12 +36,12 @@ func main() {
cmd, err := helmcmd.NewRootCmd(os.Stdout, os.Args[1:])
if err != nil {
helmcmd.Warning("%+v", err)
slog.Warn("command failed", slog.Any("error", err))
os.Exit(1)
}
if err := cmd.Execute(); err != nil {
helmcmd.Debug("%+v", err)
slog.Debug("error", slog.Any("error", err))
switch e := err.(type) {
case helmcmd.PluginError:
os.Exit(e.Code)

@ -0,0 +1,87 @@
/*
Copyright The Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logging
import (
"context"
"log/slog"
"os"
)
// DebugEnabledFunc is a function type that determines if debug logging is enabled
// We use a function because we want to check the setting at log time, not when the logger is created
type DebugEnabledFunc func() bool
// DebugCheckHandler checks settings.Debug at log time
type DebugCheckHandler struct {
handler slog.Handler
debugEnabled DebugEnabledFunc
}
// Enabled implements slog.Handler.Enabled
func (h *DebugCheckHandler) Enabled(_ context.Context, level slog.Level) bool {
if level == slog.LevelDebug {
return h.debugEnabled()
}
return true // Always log other levels
}
// Handle implements slog.Handler.Handle
func (h *DebugCheckHandler) Handle(ctx context.Context, r slog.Record) error {
return h.handler.Handle(ctx, r)
}
// WithAttrs implements slog.Handler.WithAttrs
func (h *DebugCheckHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
return &DebugCheckHandler{
handler: h.handler.WithAttrs(attrs),
debugEnabled: h.debugEnabled,
}
}
// WithGroup implements slog.Handler.WithGroup
func (h *DebugCheckHandler) WithGroup(name string) slog.Handler {
return &DebugCheckHandler{
handler: h.handler.WithGroup(name),
debugEnabled: h.debugEnabled,
}
}
// NewLogger creates a new logger with dynamic debug checking
func NewLogger(debugEnabled DebugEnabledFunc) *slog.Logger {
// Create base handler that removes timestamps
baseHandler := slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{
// Always use LevelDebug here to allow all messages through
// Our custom handler will do the filtering
Level: slog.LevelDebug,
ReplaceAttr: func(_ []string, a slog.Attr) slog.Attr {
// Remove the time attribute
if a.Key == slog.TimeKey {
return slog.Attr{}
}
return a
},
})
// Wrap with our dynamic debug-checking handler
dynamicHandler := &DebugCheckHandler{
handler: baseHandler,
debugEnabled: debugEnabled,
}
return slog.New(dynamicHandler)
}

@ -29,9 +29,6 @@ type Client struct {
// The base URL for requests
BaseURL string
// The internal logger to use
Log func(string, ...interface{})
}
// New creates a new client
@ -44,12 +41,9 @@ func New(u string) (*Client, error) {
return &Client{
BaseURL: u,
Log: nopLogger,
}, nil
}
var nopLogger = func(_ string, _ ...interface{}) {}
// Validate if the base URL for monocular is valid.
func validate(u string) error {

@ -20,6 +20,7 @@ import (
"bytes"
"fmt"
"io"
"log/slog"
"os"
"path"
"path/filepath"
@ -79,8 +80,6 @@ type Configuration struct {
// Capabilities describes the capabilities of the Kubernetes cluster.
Capabilities *chartutil.Capabilities
Log func(string, ...interface{})
// HookOutputFunc called with container name and returns and expects writer that will receive the log output.
HookOutputFunc func(namespace, pod, container string) io.Writer
}
@ -227,9 +226,6 @@ type RESTClientGetter interface {
ToRESTMapper() (meta.RESTMapper, error)
}
// DebugLog sets the logger that writes debug strings
type DebugLog func(format string, v ...interface{})
// capabilities builds a Capabilities from discovery information.
func (cfg *Configuration) getCapabilities() (*chartutil.Capabilities, error) {
if cfg.Capabilities != nil {
@ -253,8 +249,8 @@ func (cfg *Configuration) getCapabilities() (*chartutil.Capabilities, error) {
apiVersions, err := GetVersionSet(dc)
if err != nil {
if discovery.IsGroupDiscoveryFailedError(err) {
cfg.Log("WARNING: The Kubernetes server has an orphaned API service. Server reports: %s", err)
cfg.Log("WARNING: To fix this, kubectl delete apiservice <service-name>")
slog.Warn("the kubernetes server has an orphaned API service", slog.Any("error", err))
slog.Warn("to fix this, kubectl delete apiservice <service-name>")
} else {
return nil, errors.Wrap(err, "could not get apiVersions from Kubernetes")
}
@ -353,14 +349,13 @@ func GetVersionSet(client discovery.ServerResourcesInterface) (chartutil.Version
// recordRelease with an update operation in case reuse has been set.
func (cfg *Configuration) recordRelease(r *release.Release) {
if err := cfg.Releases.Update(r); err != nil {
cfg.Log("warning: Failed to update release %s: %s", r.Name, err)
slog.Warn("failed to update release", "name", r.Name, "revision", r.Version, slog.Any("error", err))
}
}
// Init initializes the action configuration
func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string, log DebugLog) error {
func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string) error {
kc := kube.New(getter)
kc.Log = log
lazyClient := &lazyClient{
namespace: namespace,
@ -371,11 +366,9 @@ func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namesp
switch helmDriver {
case "secret", "secrets", "":
d := driver.NewSecrets(newSecretClient(lazyClient))
d.Log = log
store = storage.Init(d)
case "configmap", "configmaps":
d := driver.NewConfigMaps(newConfigMapClient(lazyClient))
d.Log = log
store = storage.Init(d)
case "memory":
var d *driver.Memory
@ -395,7 +388,6 @@ func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namesp
case "sql":
d, err := driver.NewSQL(
os.Getenv("HELM_DRIVER_SQL_CONNECTION_STRING"),
log,
namespace,
)
if err != nil {
@ -409,7 +401,6 @@ func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namesp
cfg.RESTClientGetter = getter
cfg.KubeClient = kc
cfg.Releases = store
cfg.Log = log
cfg.HookOutputFunc = func(_, _, _ string) io.Writer { return io.Discard }
return nil

@ -19,11 +19,13 @@ import (
"flag"
"fmt"
"io"
"log/slog"
"testing"
"github.com/stretchr/testify/assert"
fakeclientset "k8s.io/client-go/kubernetes/fake"
"helm.sh/helm/v4/internal/logging"
chart "helm.sh/helm/v4/pkg/chart/v2"
chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
kubefake "helm.sh/helm/v4/pkg/kube/fake"
@ -34,11 +36,16 @@ import (
"helm.sh/helm/v4/pkg/time"
)
var verbose = flag.Bool("test.log", false, "enable test logging")
var verbose = flag.Bool("test.log", false, "enable test logging (debug by default)")
func actionConfigFixture(t *testing.T) *Configuration {
t.Helper()
logger := logging.NewLogger(func() bool {
return *verbose
})
slog.SetDefault(logger)
registryClient, err := registry.NewClient()
if err != nil {
t.Fatal(err)
@ -49,12 +56,6 @@ func actionConfigFixture(t *testing.T) *Configuration {
KubeClient: &kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}},
Capabilities: chartutil.DefaultCapabilities,
RegistryClient: registryClient,
Log: func(format string, v ...interface{}) {
t.Helper()
if *verbose {
t.Logf(format, v...)
}
},
}
}
@ -334,7 +335,7 @@ func TestConfiguration_Init(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
cfg := &Configuration{}
actualErr := cfg.Init(nil, "default", tt.helmDriver, nil)
actualErr := cfg.Init(nil, "default", tt.helmDriver)
if tt.expectErr {
assert.Error(t, actualErr)
assert.Contains(t, actualErr.Error(), tt.errMsg)

@ -17,6 +17,8 @@ limitations under the License.
package action
import (
"log/slog"
"github.com/pkg/errors"
chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
@ -53,6 +55,6 @@ func (h *History) Run(name string) ([]*release.Release, error) {
return nil, errors.Errorf("release name is invalid: %s", name)
}
h.cfg.Log("getting history for release %s", name)
slog.Debug("getting history for release", "release", name)
return h.cfg.Releases.History(name)
}

@ -21,6 +21,7 @@ import (
"context"
"fmt"
"io"
"log/slog"
"net/url"
"os"
"path"
@ -172,7 +173,7 @@ func (i *Install) installCRDs(crds []chart.CRD) error {
// If the error is CRD already exists, continue.
if apierrors.IsAlreadyExists(err) {
crdName := res[0].Name
i.cfg.Log("CRD %s is already present. Skipping.", crdName)
slog.Debug("CRD is already present. Skipping", "crd", crdName)
continue
}
return errors.Wrapf(err, "failed to install CRD %s", obj.Name)
@ -200,7 +201,7 @@ func (i *Install) installCRDs(crds []chart.CRD) error {
return err
}
i.cfg.Log("Clearing discovery cache")
slog.Debug("clearing discovery cache")
discoveryClient.Invalidate()
_, _ = discoveryClient.ServerGroups()
@ -213,7 +214,7 @@ func (i *Install) installCRDs(crds []chart.CRD) error {
return err
}
if resettable, ok := restMapper.(meta.ResettableRESTMapper); ok {
i.cfg.Log("Clearing REST mapper cache")
slog.Debug("clearing REST mapper cache")
resettable.Reset()
}
}
@ -237,24 +238,24 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma
// Check reachability of cluster unless in client-only mode (e.g. `helm template` without `--validate`)
if !i.ClientOnly {
if err := i.cfg.KubeClient.IsReachable(); err != nil {
i.cfg.Log(fmt.Sprintf("ERROR: Cluster reachability check failed: %v", err))
slog.Error(fmt.Sprintf("cluster reachability check failed: %v", err))
return nil, errors.Wrap(err, "cluster reachability check failed")
}
}
// HideSecret must be used with dry run. Otherwise, return an error.
if !i.isDryRun() && i.HideSecret {
i.cfg.Log("ERROR: Hiding Kubernetes secrets requires a dry-run mode")
slog.Error("hiding Kubernetes secrets requires a dry-run mode")
return nil, errors.New("Hiding Kubernetes secrets requires a dry-run mode")
}
if err := i.availableName(); err != nil {
i.cfg.Log(fmt.Sprintf("ERROR: Release name check failed: %v", err))
slog.Error("release name check failed", slog.Any("error", err))
return nil, errors.Wrap(err, "release name check failed")
}
if err := chartutil.ProcessDependencies(chrt, vals); err != nil {
i.cfg.Log(fmt.Sprintf("ERROR: Processing chart dependencies failed: %v", err))
slog.Error("chart dependencies processing failed", slog.Any("error", err))
return nil, errors.Wrap(err, "chart dependencies processing failed")
}
@ -268,7 +269,7 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma
if crds := chrt.CRDObjects(); !i.ClientOnly && !i.SkipCRDs && len(crds) > 0 {
// On dry run, bail here
if i.isDryRun() {
i.cfg.Log("WARNING: This chart or one of its subcharts contains CRDs. Rendering may fail or contain inaccuracies.")
slog.Warn("This chart or one of its subcharts contains CRDs. Rendering may fail or contain inaccuracies.")
} else if err := i.installCRDs(crds); err != nil {
return nil, err
}
@ -288,7 +289,7 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma
mem.SetNamespace(i.Namespace)
i.cfg.Releases = storage.Init(mem)
} else if !i.ClientOnly && len(i.APIVersions) > 0 {
i.cfg.Log("API Version list given outside of client only mode, this list will be ignored")
slog.Debug("API Version list given outside of client only mode, this list will be ignored")
}
// Make sure if Atomic is set, that wait is set as well. This makes it so
@ -505,7 +506,7 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource
// One possible strategy would be to do a timed retry to see if we can get
// this stored in the future.
if err := i.recordRelease(rel); err != nil {
i.cfg.Log("failed to record the release: %s", err)
slog.Error("failed to record the release", slog.Any("error", err))
}
return rel, nil
@ -514,7 +515,7 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource
func (i *Install) failRelease(rel *release.Release, err error) (*release.Release, error) {
rel.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", i.ReleaseName, err.Error()))
if i.Atomic {
i.cfg.Log("Install failed and atomic is set, uninstalling release")
slog.Debug("install failed, uninstalling release", "release", i.ReleaseName)
uninstall := NewUninstall(i.cfg)
uninstall.DisableHooks = i.DisableHooks
uninstall.KeepHistory = false

@ -19,6 +19,7 @@ package action
import (
"bytes"
"fmt"
"log/slog"
"strings"
"time"
@ -63,26 +64,26 @@ func (r *Rollback) Run(name string) error {
r.cfg.Releases.MaxHistory = r.MaxHistory
r.cfg.Log("preparing rollback of %s", name)
slog.Debug("preparing rollback", "name", name)
currentRelease, targetRelease, err := r.prepareRollback(name)
if err != nil {
return err
}
if !r.DryRun {
r.cfg.Log("creating rolled back release for %s", name)
slog.Debug("creating rolled back release", "name", name)
if err := r.cfg.Releases.Create(targetRelease); err != nil {
return err
}
}
r.cfg.Log("performing rollback of %s", name)
slog.Debug("performing rollback", "name", name)
if _, err := r.performRollback(currentRelease, targetRelease); err != nil {
return err
}
if !r.DryRun {
r.cfg.Log("updating status for rolled back release for %s", name)
slog.Debug("updating status for rolled back release", "name", name)
if err := r.cfg.Releases.Update(targetRelease); err != nil {
return err
}
@ -129,7 +130,7 @@ func (r *Rollback) prepareRollback(name string) (*release.Release, *release.Rele
return nil, nil, errors.Errorf("release has no %d version", previousVersion)
}
r.cfg.Log("rolling back %s (current: v%d, target: v%d)", name, currentRelease.Version, previousVersion)
slog.Debug("rolling back", "name", name, "currentVersion", currentRelease.Version, "targetVersion", previousVersion)
previousRelease, err := r.cfg.Releases.Get(name, previousVersion)
if err != nil {
@ -162,7 +163,7 @@ func (r *Rollback) prepareRollback(name string) (*release.Release, *release.Rele
func (r *Rollback) performRollback(currentRelease, targetRelease *release.Release) (*release.Release, error) {
if r.DryRun {
r.cfg.Log("dry run for %s", targetRelease.Name)
slog.Debug("dry run", "name", targetRelease.Name)
return targetRelease, nil
}
@ -181,7 +182,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas
return targetRelease, err
}
} else {
r.cfg.Log("rollback hooks disabled for %s", targetRelease.Name)
slog.Debug("rollback hooks disabled", "name", targetRelease.Name)
}
// It is safe to use "force" here because these are resources currently rendered by the chart.
@ -193,14 +194,14 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas
if err != nil {
msg := fmt.Sprintf("Rollback %q failed: %s", targetRelease.Name, err)
r.cfg.Log("warning: %s", msg)
slog.Warn(msg)
currentRelease.Info.Status = release.StatusSuperseded
targetRelease.Info.Status = release.StatusFailed
targetRelease.Info.Description = msg
r.cfg.recordRelease(currentRelease)
r.cfg.recordRelease(targetRelease)
if r.CleanupOnFail {
r.cfg.Log("Cleanup on fail set, cleaning up %d resources", len(results.Created))
slog.Debug("cleanup on fail set, cleaning up resources", "count", len(results.Created))
_, errs := r.cfg.KubeClient.Delete(results.Created)
if errs != nil {
var errorList []string
@ -209,7 +210,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas
}
return targetRelease, errors.Wrapf(fmt.Errorf("unable to cleanup resources: %s", strings.Join(errorList, ", ")), "an error occurred while cleaning up resources. original rollback error: %s", err)
}
r.cfg.Log("Resource cleanup complete")
slog.Debug("resource cleanup complete")
}
return targetRelease, err
}
@ -220,7 +221,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas
// levels, we should make these error level logs so users are notified
// that they'll need to go do the cleanup on their own
if err := recreate(r.cfg, results.Updated); err != nil {
r.cfg.Log(err.Error())
slog.Error(err.Error())
}
}
waiter, err := r.cfg.KubeClient.GetWaiter(r.WaitStrategy)
@ -256,7 +257,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas
}
// Supersede all previous deployments, see issue #2941.
for _, rel := range deployed {
r.cfg.Log("superseding previous deployment %d", rel.Version)
slog.Debug("superseding previous deployment", "version", rel.Version)
rel.Info.Status = release.StatusSuperseded
r.cfg.recordRelease(rel)
}

@ -17,6 +17,7 @@ limitations under the License.
package action
import (
"log/slog"
"strings"
"time"
@ -104,7 +105,7 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error)
return nil, errors.Errorf("the release named %q is already deleted", name)
}
u.cfg.Log("uninstall: Deleting %s", name)
slog.Debug("uninstall: deleting release", "name", name)
rel.Info.Status = release.StatusUninstalling
rel.Info.Deleted = helmtime.Now()
rel.Info.Description = "Deletion in progress (or silently failed)"
@ -115,18 +116,18 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error)
return res, err
}
} else {
u.cfg.Log("delete hooks disabled for %s", name)
slog.Debug("delete hooks disabled", "release", name)
}
// From here on out, the release is currently considered to be in StatusUninstalling
// state.
if err := u.cfg.Releases.Update(rel); err != nil {
u.cfg.Log("uninstall: Failed to store updated release: %s", err)
slog.Debug("uninstall: Failed to store updated release", slog.Any("error", err))
}
deletedResources, kept, errs := u.deleteRelease(rel)
if errs != nil {
u.cfg.Log("uninstall: Failed to delete release: %s", errs)
slog.Debug("uninstall: Failed to delete release", slog.Any("error", errs))
return nil, errors.Errorf("failed to delete release: %s", name)
}
@ -153,7 +154,7 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error)
}
if !u.KeepHistory {
u.cfg.Log("purge requested for %s", name)
slog.Debug("purge requested", "release", name)
err := u.purgeReleases(rels...)
if err != nil {
errs = append(errs, errors.Wrap(err, "uninstall: Failed to purge the release"))
@ -168,7 +169,7 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error)
}
if err := u.cfg.Releases.Update(rel); err != nil {
u.cfg.Log("uninstall: Failed to store updated release: %s", err)
slog.Debug("uninstall: Failed to store updated release", slog.Any("error", err))
}
if len(errs) > 0 {
@ -225,7 +226,7 @@ func (u *Uninstall) deleteRelease(rel *release.Release) (kube.ResourceList, stri
}
if len(resources) > 0 {
if kubeClient, ok := u.cfg.KubeClient.(kube.InterfaceDeletionPropagation); ok {
_, errs = kubeClient.DeleteWithPropagationPolicy(resources, parseCascadingFlag(u.cfg, u.DeletionPropagation))
_, errs = kubeClient.DeleteWithPropagationPolicy(resources, parseCascadingFlag(u.DeletionPropagation))
return resources, kept, errs
}
_, errs = u.cfg.KubeClient.Delete(resources)
@ -233,7 +234,7 @@ func (u *Uninstall) deleteRelease(rel *release.Release) (kube.ResourceList, stri
return resources, kept, errs
}
func parseCascadingFlag(cfg *Configuration, cascadingFlag string) v1.DeletionPropagation {
func parseCascadingFlag(cascadingFlag string) v1.DeletionPropagation {
switch cascadingFlag {
case "orphan":
return v1.DeletePropagationOrphan
@ -242,7 +243,7 @@ func parseCascadingFlag(cfg *Configuration, cascadingFlag string) v1.DeletionPro
case "background":
return v1.DeletePropagationBackground
default:
cfg.Log("uninstall: given cascade value: %s, defaulting to delete propagation background", cascadingFlag)
slog.Debug("uninstall: given cascade value, defaulting to delete propagation background", "value", cascadingFlag)
return v1.DeletePropagationBackground
}
}

@ -20,6 +20,7 @@ import (
"bytes"
"context"
"fmt"
"log/slog"
"strings"
"sync"
"time"
@ -163,7 +164,7 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart.
return nil, errors.Errorf("release name is invalid: %s", name)
}
u.cfg.Log("preparing upgrade for %s", name)
slog.Debug("preparing upgrade", "name", name)
currentRelease, upgradedRelease, err := u.prepareUpgrade(name, chart, vals)
if err != nil {
return nil, err
@ -171,7 +172,7 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart.
u.cfg.Releases.MaxHistory = u.MaxHistory
u.cfg.Log("performing update for %s", name)
slog.Debug("performing update", "name", name)
res, err := u.performUpgrade(ctx, currentRelease, upgradedRelease)
if err != nil {
return res, err
@ -179,7 +180,7 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart.
// Do not update for dry runs
if !u.isDryRun() {
u.cfg.Log("updating status for upgraded release for %s", name)
slog.Debug("updating status for upgraded release", "name", name)
if err := u.cfg.Releases.Update(upgradedRelease); err != nil {
return res, err
}
@ -365,7 +366,7 @@ func (u *Upgrade) performUpgrade(ctx context.Context, originalRelease, upgradedR
// Run if it is a dry run
if u.isDryRun() {
u.cfg.Log("dry run for %s", upgradedRelease.Name)
slog.Debug("dry run for release", "name", upgradedRelease.Name)
if len(u.Description) > 0 {
upgradedRelease.Info.Description = u.Description
} else {
@ -374,7 +375,7 @@ func (u *Upgrade) performUpgrade(ctx context.Context, originalRelease, upgradedR
return upgradedRelease, nil
}
u.cfg.Log("creating upgraded release for %s", upgradedRelease.Name)
slog.Debug("creating upgraded release", "name", upgradedRelease.Name)
if err := u.cfg.Releases.Create(upgradedRelease); err != nil {
return nil, err
}
@ -425,7 +426,7 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele
return
}
} else {
u.cfg.Log("upgrade hooks disabled for %s", upgradedRelease.Name)
slog.Debug("upgrade hooks disabled", "name", upgradedRelease.Name)
}
results, err := u.cfg.KubeClient.Update(current, target, u.Force)
@ -441,7 +442,7 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele
// levels, we should make these error level logs so users are notified
// that they'll need to go do the cleanup on their own
if err := recreate(u.cfg, results.Updated); err != nil {
u.cfg.Log(err.Error())
slog.Error(err.Error())
}
}
waiter, err := u.cfg.KubeClient.GetWaiter(u.WaitStrategy)
@ -486,13 +487,13 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele
func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, err error) (*release.Release, error) {
msg := fmt.Sprintf("Upgrade %q failed: %s", rel.Name, err)
u.cfg.Log("warning: %s", msg)
slog.Warn("upgrade failed", "name", rel.Name, slog.Any("error", err))
rel.Info.Status = release.StatusFailed
rel.Info.Description = msg
u.cfg.recordRelease(rel)
if u.CleanupOnFail && len(created) > 0 {
u.cfg.Log("Cleanup on fail set, cleaning up %d resources", len(created))
slog.Debug("cleanup on fail set", "cleaning_resources", len(created))
_, errs := u.cfg.KubeClient.Delete(created)
if errs != nil {
var errorList []string
@ -501,10 +502,10 @@ func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, e
}
return rel, errors.Wrapf(fmt.Errorf("unable to cleanup resources: %s", strings.Join(errorList, ", ")), "an error occurred while cleaning up resources. original upgrade error: %s", err)
}
u.cfg.Log("Resource cleanup complete")
slog.Debug("resource cleanup complete")
}
if u.Atomic {
u.cfg.Log("Upgrade failed and atomic is set, rolling back to last successful release")
slog.Debug("upgrade failed and atomic is set, rolling back to last successful release")
// As a protection, get the last successful release before rollback.
// If there are no successful releases, bail out
@ -556,13 +557,13 @@ func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, e
func (u *Upgrade) reuseValues(chart *chart.Chart, current *release.Release, newVals map[string]interface{}) (map[string]interface{}, error) {
if u.ResetValues {
// If ResetValues is set, we completely ignore current.Config.
u.cfg.Log("resetting values to the chart's original version")
slog.Debug("resetting values to the chart's original version")
return newVals, nil
}
// If the ReuseValues flag is set, we always copy the old values over the new config's values.
if u.ReuseValues {
u.cfg.Log("reusing the old release's values")
slog.Debug("reusing the old release's values")
// We have to regenerate the old coalesced values:
oldVals, err := chartutil.CoalesceValues(current.Chart, current.Config)
@ -579,7 +580,7 @@ func (u *Upgrade) reuseValues(chart *chart.Chart, current *release.Release, newV
// If the ResetThenReuseValues flag is set, we use the new chart's values, but we copy the old config's values over the new config's values.
if u.ResetThenReuseValues {
u.cfg.Log("merging values from old release to new values")
slog.Debug("merging values from old release to new values")
newVals = chartutil.CoalesceTables(newVals, current.Config)
@ -587,7 +588,7 @@ func (u *Upgrade) reuseValues(chart *chart.Chart, current *release.Release, newV
}
if len(newVals) == 0 && len(current.Config) > 0 {
u.cfg.Log("copying values from %s (v%d) to new release.", current.Name, current.Version)
slog.Debug("copying values from old release", "name", current.Name, "version", current.Version)
newVals = current.Config
}
return newVals, nil

@ -254,7 +254,7 @@ func processImportValues(c *chart.Chart, merge bool) error {
// get child table
vv, err := cvals.Table(r.Name + "." + child)
if err != nil {
slog.Warn("ImportValues missing table from chart", "chart", r.Name, "error", err)
slog.Warn("ImportValues missing table from chart", "chart", r.Name, slog.Any("error", err))
continue
}
// create value map from child to be merged into parent

@ -20,6 +20,7 @@ import (
"flag"
"fmt"
"log"
"log/slog"
"path/filepath"
"sort"
"strings"
@ -82,11 +83,11 @@ func (ws *waitValue) Set(s string) error {
*ws = waitValue(s)
return nil
case "true":
Warning("--wait=true is deprecated (boolean value) and can be replaced with --wait=watcher")
slog.Warn("--wait=true is deprecated (boolean value) and can be replaced with --wait=watcher")
*ws = waitValue(kube.StatusWatcherStrategy)
return nil
case "false":
Warning("--wait=false is deprecated (boolean value) and can be replaced by omitting the --wait flag")
slog.Warn("--wait=false is deprecated (boolean value) and can be replaced by omitting the --wait flag")
*ws = waitValue(kube.HookOnlyStrategy)
return nil
default:

@ -92,7 +92,6 @@ func executeActionCommandStdinC(store *storage.Storage, in *os.File, cmd string)
Releases: store,
KubeClient: &kubefake.PrintingKubeClient{Out: io.Discard},
Capabilities: chartutil.DefaultCapabilities,
Log: func(_ string, _ ...interface{}) {},
}
root, err := newRootCmdWithConfig(actionConfig, buf, args)

@ -21,6 +21,7 @@ import (
"fmt"
"io"
"log"
"log/slog"
"os"
"os/signal"
"syscall"
@ -229,9 +230,9 @@ func addInstallFlags(cmd *cobra.Command, f *pflag.FlagSet, client *action.Instal
}
func runInstall(args []string, client *action.Install, valueOpts *values.Options, out io.Writer) (*release.Release, error) {
Debug("Original chart version: %q", client.Version)
slog.Debug("Original chart version", "version", client.Version)
if client.Version == "" && client.Devel {
Debug("setting version to >0.0.0-0")
slog.Debug("setting version to >0.0.0-0")
client.Version = ">0.0.0-0"
}
@ -246,7 +247,7 @@ func runInstall(args []string, client *action.Install, valueOpts *values.Options
return nil, err
}
Debug("CHART PATH: %s\n", cp)
slog.Debug("Chart path", "path", cp)
p := getter.All(settings)
vals, err := valueOpts.MergeValues(p)
@ -265,7 +266,7 @@ func runInstall(args []string, client *action.Install, valueOpts *values.Options
}
if chartRequested.Metadata.Deprecated {
Warning("This chart is deprecated")
slog.Warn("this chart is deprecated")
}
if req := chartRequested.Metadata.Dependencies; req != nil {

@ -71,7 +71,7 @@ func newListCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
ValidArgsFunction: noMoreArgsCompFunc,
RunE: func(cmd *cobra.Command, _ []string) error {
if client.AllNamespaces {
if err := cfg.Init(settings.RESTClientGetter(), "", os.Getenv("HELM_DRIVER"), Debug); err != nil {
if err := cfg.Init(settings.RESTClientGetter(), "", os.Getenv("HELM_DRIVER")); err != nil {
return err
}
}

@ -17,6 +17,7 @@ package cmd
import (
"io"
"log/slog"
"os"
"os/exec"
@ -66,7 +67,7 @@ func runHook(p *plugin.Plugin, event string) error {
prog := exec.Command(main, argv...)
Debug("running %s hook: %s", event, prog)
slog.Debug("running hook", "event", event, "program", prog)
prog.Stdout, prog.Stderr = os.Stdout, os.Stderr
if err := prog.Run(); err != nil {

@ -18,6 +18,7 @@ package cmd
import (
"fmt"
"io"
"log/slog"
"github.com/pkg/errors"
"github.com/spf13/cobra"
@ -79,7 +80,7 @@ func (o *pluginInstallOptions) run(out io.Writer) error {
return err
}
Debug("loading plugin from %s", i.Path())
slog.Debug("loading plugin", "path", i.Path())
p, err := plugin.LoadDir(i.Path())
if err != nil {
return errors.Wrap(err, "plugin is installed but unusable")

@ -18,6 +18,7 @@ package cmd
import (
"fmt"
"io"
"log/slog"
"github.com/gosuri/uitable"
"github.com/spf13/cobra"
@ -32,7 +33,7 @@ func newPluginListCmd(out io.Writer) *cobra.Command {
Short: "list installed Helm plugins",
ValidArgsFunction: noMoreArgsCompFunc,
RunE: func(_ *cobra.Command, _ []string) error {
Debug("pluginDirs: %s", settings.PluginsDirectory)
slog.Debug("pluginDirs", "directory", settings.PluginsDirectory)
plugins, err := plugin.FindPlugins(settings.PluginsDirectory)
if err != nil {
return err

@ -18,6 +18,7 @@ package cmd
import (
"fmt"
"io"
"log/slog"
"os"
"strings"
@ -60,7 +61,7 @@ func (o *pluginUninstallOptions) complete(args []string) error {
}
func (o *pluginUninstallOptions) run(out io.Writer) error {
Debug("loading installed plugins from %s", settings.PluginsDirectory)
slog.Debug("loading installer plugins", "dir", settings.PluginsDirectory)
plugins, err := plugin.FindPlugins(settings.PluginsDirectory)
if err != nil {
return err

@ -18,6 +18,7 @@ package cmd
import (
"fmt"
"io"
"log/slog"
"path/filepath"
"strings"
@ -62,7 +63,7 @@ func (o *pluginUpdateOptions) complete(args []string) error {
func (o *pluginUpdateOptions) run(out io.Writer) error {
installer.Debug = settings.Debug
Debug("loading installed plugins from %s", settings.PluginsDirectory)
slog.Debug("loading installed plugins", "path", settings.PluginsDirectory)
plugins, err := plugin.FindPlugins(settings.PluginsDirectory)
if err != nil {
return err
@ -104,7 +105,7 @@ func updatePlugin(p *plugin.Plugin) error {
return err
}
Debug("loading plugin from %s", i.Path())
slog.Debug("loading plugin", "path", i.Path())
updatedPlugin, err := plugin.LoadDir(i.Path())
if err != nil {
return err

@ -20,6 +20,7 @@ import (
"fmt"
"io"
"log"
"log/slog"
"github.com/spf13/cobra"
@ -60,7 +61,7 @@ func newPullCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
RunE: func(_ *cobra.Command, args []string) error {
client.Settings = settings
if client.Version == "" && client.Devel {
Debug("setting version to >0.0.0-0")
slog.Debug("setting version to >0.0.0-0")
client.Version = ">0.0.0-0"
}

@ -21,6 +21,7 @@ import (
"errors"
"fmt"
"io"
"log/slog"
"os"
"strings"
@ -122,7 +123,7 @@ func getUsernamePassword(usernameOpt string, passwordOpt string, passwordFromStd
}
}
} else {
Warning("Using --password via the CLI is insecure. Use --password-stdin.")
slog.Warn("using --password via the CLI is insecure. Use --password-stdin")
}
return username, password, nil

@ -21,6 +21,7 @@ import (
"fmt"
"io"
"log"
"log/slog"
"net/http"
"os"
"strings"
@ -31,6 +32,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/clientcmd"
"helm.sh/helm/v4/internal/logging"
"helm.sh/helm/v4/internal/tlsutil"
"helm.sh/helm/v4/pkg/action"
"helm.sh/helm/v4/pkg/cli"
@ -96,16 +98,6 @@ By default, the default directories depend on the Operating System. The defaults
var settings = cli.New()
func Debug(format string, v ...interface{}) {
if settings.Debug {
log.Output(2, fmt.Sprintf("[debug] "+format+"\n", v...))
}
}
func Warning(format string, v ...interface{}) {
fmt.Fprintf(os.Stderr, "WARNING: "+format+"\n", v...)
}
func NewRootCmd(out io.Writer, args []string) (*cobra.Command, error) {
actionConfig := new(action.Configuration)
cmd, err := newRootCmdWithConfig(actionConfig, out, args)
@ -114,7 +106,7 @@ func NewRootCmd(out io.Writer, args []string) (*cobra.Command, error) {
}
cobra.OnInitialize(func() {
helmDriver := os.Getenv("HELM_DRIVER")
if err := actionConfig.Init(settings.RESTClientGetter(), settings.Namespace(), helmDriver, Debug); err != nil {
if err := actionConfig.Init(settings.RESTClientGetter(), settings.Namespace(), helmDriver); err != nil {
log.Fatal(err)
}
if helmDriver == "memory" {
@ -148,6 +140,9 @@ func newRootCmdWithConfig(actionConfig *action.Configuration, out io.Writer, arg
settings.AddFlags(flags)
addKlogFlags(flags)
logger := logging.NewLogger(func() bool { return settings.Debug })
slog.SetDefault(logger)
// Setup shell completion for the namespace flag
err := cmd.RegisterFlagCompletionFunc("namespace", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
if client, err := actionConfig.KubernetesClientSet(); err == nil {

@ -19,6 +19,7 @@ package cmd
import (
"fmt"
"io"
"log/slog"
"strings"
"github.com/gosuri/uitable"
@ -89,7 +90,7 @@ func (o *searchHubOptions) run(out io.Writer, args []string) error {
q := strings.Join(args, " ")
results, err := c.Search(q)
if err != nil {
Debug("%s", err)
slog.Debug("search failed", slog.Any("error", err))
return fmt.Errorf("unable to perform search against %q", o.searchEndpoint)
}

@ -21,6 +21,7 @@ import (
"bytes"
"fmt"
"io"
"log/slog"
"os"
"path/filepath"
"strings"
@ -130,17 +131,17 @@ func (o *searchRepoOptions) run(out io.Writer, args []string) error {
}
func (o *searchRepoOptions) setupSearchedVersion() {
Debug("Original chart version: %q", o.version)
slog.Debug("original chart version", "version", o.version)
if o.version != "" {
return
}
if o.devel { // search for releases and prereleases (alpha, beta, and release candidate releases).
Debug("setting version to >0.0.0-0")
slog.Debug("setting version to >0.0.0-0")
o.version = ">0.0.0-0"
} else { // search only for stable releases, prerelease versions will be skipped
Debug("setting version to >0.0.0")
slog.Debug("setting version to >0.0.0")
o.version = ">0.0.0"
}
}
@ -189,8 +190,7 @@ func (o *searchRepoOptions) buildIndex() (*search.Index, error) {
f := filepath.Join(o.repoCacheDir, helmpath.CacheIndexFile(n))
ind, err := repo.LoadIndexFile(f)
if err != nil {
Warning("Repo %q is corrupt or missing. Try 'helm repo update'.", n)
Warning("%s", err)
slog.Warn("repo is corrupt or missing", "repo", n, slog.Any("error", err))
continue
}

@ -20,6 +20,7 @@ import (
"fmt"
"io"
"log"
"log/slog"
"github.com/spf13/cobra"
@ -211,9 +212,9 @@ func addShowFlags(subCmd *cobra.Command, client *action.Show) {
}
func runShow(args []string, client *action.Show) (string, error) {
Debug("Original chart version: %q", client.Version)
slog.Debug("original chart version", "version", client.Version)
if client.Version == "" && client.Devel {
Debug("setting version to >0.0.0-0")
slog.Debug("setting version to >0.0.0-0")
client.Version = ">0.0.0-0"
}

@ -21,6 +21,7 @@ import (
"fmt"
"io"
"log"
"log/slog"
"os"
"os/signal"
"syscall"
@ -173,7 +174,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
}
if client.Version == "" && client.Devel {
Debug("setting version to >0.0.0-0")
slog.Debug("setting version to >0.0.0-0")
client.Version = ">0.0.0-0"
}
@ -225,7 +226,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
}
if ch.Metadata.Deprecated {
Warning("This chart is deprecated")
slog.Warn("this chart is deprecated")
}
// Create context and prepare the handle of SIGTERM

@ -98,7 +98,7 @@ func getDynamicClientOnKind(apiversion string, kind string, config *rest.Config)
gvk := schema.FromAPIVersionAndKind(apiversion, kind)
apiRes, err := getAPIResourceForGVK(gvk, config)
if err != nil {
slog.Error("unable to get apiresource", "groupVersionKind", gvk.String(), "error", err)
slog.Error("unable to get apiresource", "groupVersionKind", gvk.String(), slog.Any("error", err))
return nil, false, errors.Wrapf(err, "unable to get apiresource from unstructured: %s", gvk.String())
}
gvr := schema.GroupVersionResource{
@ -124,7 +124,7 @@ func getAPIResourceForGVK(gvk schema.GroupVersionKind, config *rest.Config) (met
}
resList, err := discoveryClient.ServerResourcesForGroupVersion(gvk.GroupVersion().String())
if err != nil {
slog.Error("unable to retrieve resource list", "GroupVersion", gvk.GroupVersion().String(), "error", err)
slog.Error("unable to retrieve resource list", "GroupVersion", gvk.GroupVersion().String(), slog.Any("error", err))
return res, err
}
for _, resource := range resList.APIResources {

@ -177,7 +177,7 @@ func (r *Rules) parseRule(rule string) error {
rule = strings.TrimPrefix(rule, "/")
ok, err := filepath.Match(rule, n)
if err != nil {
slog.Error("failed to compile", "rule", rule, "error", err)
slog.Error("failed to compile", "rule", rule, slog.Any("error", err))
return false
}
return ok
@ -187,7 +187,7 @@ func (r *Rules) parseRule(rule string) error {
p.match = func(n string, _ os.FileInfo) bool {
ok, err := filepath.Match(rule, n)
if err != nil {
slog.Error("failed to compile", "rule", rule, "error", err)
slog.Error("failed to compile", "rule", rule, slog.Any("error", err))
return false
}
return ok
@ -199,7 +199,7 @@ func (r *Rules) parseRule(rule string) error {
n = filepath.Base(n)
ok, err := filepath.Match(rule, n)
if err != nil {
slog.Error("failed to compile", "rule", rule, "error", err)
slog.Error("failed to compile", "rule", rule, slog.Any("error", err))
return false
}
return ok

@ -22,6 +22,7 @@ import (
"encoding/json"
"fmt"
"io"
"log/slog"
"os"
"path/filepath"
"reflect"
@ -73,7 +74,6 @@ type Client struct {
// needs. The smaller surface area of the interface means there is a lower
// chance of it changing.
Factory Factory
Log func(string, ...interface{})
// Namespace allows to bypass the kubeconfig file for the choice of the namespace
Namespace string
@ -120,7 +120,6 @@ func (c *Client) newStatusWatcher() (*statusWaiter, error) {
return &statusWaiter{
restMapper: restMapper,
client: dynamicClient,
log: c.Log,
}, nil
}
@ -131,7 +130,7 @@ func (c *Client) GetWaiter(strategy WaitStrategy) (Waiter, error) {
if err != nil {
return nil, err
}
return &legacyWaiter{kubeClient: kc, log: c.Log}, nil
return &legacyWaiter{kubeClient: kc}, nil
case StatusWatcherStrategy:
return c.newStatusWatcher()
case HookOnlyStrategy:
@ -162,13 +161,10 @@ func New(getter genericclioptions.RESTClientGetter) *Client {
factory := cmdutil.NewFactory(getter)
c := &Client{
Factory: factory,
Log: nopLogger,
}
return c
}
var nopLogger = func(_ string, _ ...interface{}) {}
// getKubeClient get or create a new KubernetesClientSet
func (c *Client) getKubeClient() (kubernetes.Interface, error) {
var err error
@ -198,7 +194,7 @@ func (c *Client) IsReachable() error {
// Create creates Kubernetes resources specified in the resource list.
func (c *Client) Create(resources ResourceList) (*Result, error) {
c.Log("creating %d resource(s)", len(resources))
slog.Debug("creating resource(s)", "resources", len(resources))
if err := perform(resources, createResource); err != nil {
return nil, err
}
@ -250,7 +246,7 @@ func (c *Client) Get(resources ResourceList, related bool) (map[string][]runtime
objs, err = c.getSelectRelationPod(info, objs, isTable, &podSelectors)
if err != nil {
c.Log("Warning: get the relation pod is failed, err:%s", err.Error())
slog.Warn("get the relation pod is failed", slog.Any("error", err))
}
}
}
@ -268,7 +264,7 @@ func (c *Client) getSelectRelationPod(info *resource.Info, objs map[string][]run
if info == nil {
return objs, nil
}
c.Log("get relation pod of object: %s/%s/%s", info.Namespace, info.Mapping.GroupVersionKind.Kind, info.Name)
slog.Debug("get relation pod of object", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind)
selector, ok, _ := getSelectorFromObject(info.Object)
if !ok {
return objs, nil
@ -410,7 +406,7 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err
updateErrors := []string{}
res := &Result{}
c.Log("checking %d resources for changes", len(target))
slog.Debug("checking resources for changes", "resources", len(target))
err := target.Visit(func(info *resource.Info, err error) error {
if err != nil {
return err
@ -431,7 +427,7 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err
}
kind := info.Mapping.GroupVersionKind.Kind
c.Log("Created a new %s called %q in %s\n", kind, info.Name, info.Namespace)
slog.Debug("created a new resource", "namespace", info.Namespace, "name", info.Name, "kind", kind)
return nil
}
@ -442,7 +438,7 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err
}
if err := updateResource(c, info, originalInfo.Object, force); err != nil {
c.Log("error updating the resource %q:\n\t %v", info.Name, err)
slog.Debug("error updating the resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err))
updateErrors = append(updateErrors, err.Error())
}
// Because we check for errors later, append the info regardless
@ -459,22 +455,22 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err
}
for _, info := range original.Difference(target) {
c.Log("Deleting %s %q in namespace %s...", info.Mapping.GroupVersionKind.Kind, info.Name, info.Namespace)
slog.Debug("deleting resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind)
if err := info.Get(); err != nil {
c.Log("Unable to get obj %q, err: %s", info.Name, err)
slog.Debug("unable to get object", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err))
continue
}
annotations, err := metadataAccessor.Annotations(info.Object)
if err != nil {
c.Log("Unable to get annotations on %q, err: %s", info.Name, err)
slog.Debug("unable to get annotations", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err))
}
if annotations != nil && annotations[ResourcePolicyAnno] == KeepPolicy {
c.Log("Skipping delete of %q due to annotation [%s=%s]", info.Name, ResourcePolicyAnno, KeepPolicy)
slog.Debug("skipping delete due to annotation", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, "annotation", ResourcePolicyAnno, "value", KeepPolicy)
continue
}
if err := deleteResource(info, metav1.DeletePropagationBackground); err != nil {
c.Log("Failed to delete %q, err: %s", info.ObjectName(), err)
slog.Debug("failed to delete resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err))
continue
}
res.Deleted = append(res.Deleted, info)
@ -498,16 +494,16 @@ func (c *Client) DeleteWithPropagationPolicy(resources ResourceList, policy meta
return rdelete(c, resources, policy)
}
func rdelete(c *Client, resources ResourceList, propagation metav1.DeletionPropagation) (*Result, []error) {
func rdelete(_ *Client, resources ResourceList, propagation metav1.DeletionPropagation) (*Result, []error) {
var errs []error
res := &Result{}
mtx := sync.Mutex{}
err := perform(resources, func(info *resource.Info) error {
c.Log("Starting delete for %q %s", info.Name, info.Mapping.GroupVersionKind.Kind)
slog.Debug("starting delete resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind)
err := deleteResource(info, propagation)
if err == nil || apierrors.IsNotFound(err) {
if err != nil {
c.Log("Ignoring delete failure for %q %s: %v", info.Name, info.Mapping.GroupVersionKind, err)
slog.Debug("ignoring delete failure", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err))
}
mtx.Lock()
defer mtx.Unlock()
@ -641,7 +637,7 @@ func createPatch(target *resource.Info, current runtime.Object) ([]byte, types.P
return patch, types.StrategicMergePatchType, err
}
func updateResource(c *Client, target *resource.Info, currentObj runtime.Object, force bool) error {
func updateResource(_ *Client, target *resource.Info, currentObj runtime.Object, force bool) error {
var (
obj runtime.Object
helper = resource.NewHelper(target.Client, target.Mapping).WithFieldManager(getManagedFieldsManager())
@ -655,7 +651,7 @@ func updateResource(c *Client, target *resource.Info, currentObj runtime.Object,
if err != nil {
return errors.Wrap(err, "failed to replace object")
}
c.Log("Replaced %q with kind %s for kind %s", target.Name, currentObj.GetObjectKind().GroupVersionKind().Kind, kind)
slog.Debug("replace succeeded", "name", target.Name, "initialKind", currentObj.GetObjectKind().GroupVersionKind().Kind, "kind", kind)
} else {
patch, patchType, err := createPatch(target, currentObj)
if err != nil {
@ -663,7 +659,7 @@ func updateResource(c *Client, target *resource.Info, currentObj runtime.Object,
}
if patch == nil || string(patch) == "{}" {
c.Log("Looks like there are no changes for %s %q", kind, target.Name)
slog.Debug("no changes detected", "kind", kind, "name", target.Name)
// This needs to happen to make sure that Helm has the latest info from the API
// Otherwise there will be no labels and other functions that use labels will panic
if err := target.Get(); err != nil {
@ -672,7 +668,7 @@ func updateResource(c *Client, target *resource.Info, currentObj runtime.Object,
return nil
}
// send patch to server
c.Log("Patch %s %q in namespace %s", kind, target.Name, target.Namespace)
slog.Debug("patching resource", "kind", kind, "name", target.Name, "namespace", target.Namespace)
obj, err = helper.Patch(target.Namespace, target.Name, patchType, patch, nil)
if err != nil {
return errors.Wrapf(err, "cannot patch %q with kind %s", target.Name, kind)
@ -719,9 +715,6 @@ func copyRequestStreamToWriter(request *rest.Request, podName, containerName str
if err != nil {
return errors.Errorf("Failed to copy IO from logs for pod: %s, container: %s", podName, containerName)
}
if err != nil {
return errors.Errorf("Failed to close reader for pod: %s, container: %s", podName, containerName)
}
return nil
}

@ -107,7 +107,6 @@ func newTestClient(t *testing.T) *Client {
return &Client{
Factory: testFactory.WithNamespace("default"),
Log: nopLogger,
}
}

@ -19,6 +19,7 @@ package kube // import "helm.sh/helm/v4/pkg/kube"
import (
"context"
"fmt"
"log/slog"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
@ -57,13 +58,9 @@ func CheckJobs(checkJobs bool) ReadyCheckerOption {
// NewReadyChecker creates a new checker. Passed ReadyCheckerOptions can
// be used to override defaults.
func NewReadyChecker(cl kubernetes.Interface, log func(string, ...interface{}), opts ...ReadyCheckerOption) ReadyChecker {
func NewReadyChecker(cl kubernetes.Interface, opts ...ReadyCheckerOption) ReadyChecker {
c := ReadyChecker{
client: cl,
log: log,
}
if c.log == nil {
c.log = nopLogger
}
for _, opt := range opts {
opt(&c)
@ -74,7 +71,6 @@ func NewReadyChecker(cl kubernetes.Interface, log func(string, ...interface{}),
// ReadyChecker is a type that can check core Kubernetes types for readiness.
type ReadyChecker struct {
client kubernetes.Interface
log func(string, ...interface{})
checkJobs bool
pausedAsReady bool
}
@ -230,18 +226,18 @@ func (c *ReadyChecker) isPodReady(pod *corev1.Pod) bool {
return true
}
}
c.log("Pod is not ready: %s/%s", pod.GetNamespace(), pod.GetName())
slog.Debug("Pod is not ready", "namespace", pod.GetNamespace(), "name", pod.GetName())
return false
}
func (c *ReadyChecker) jobReady(job *batchv1.Job) (bool, error) {
if job.Status.Failed > *job.Spec.BackoffLimit {
c.log("Job is failed: %s/%s", job.GetNamespace(), job.GetName())
slog.Debug("Job is failed", "namespace", job.GetNamespace(), "name", job.GetName())
// If a job is failed, it can't recover, so throw an error
return false, fmt.Errorf("job is failed: %s/%s", job.GetNamespace(), job.GetName())
}
if job.Spec.Completions != nil && job.Status.Succeeded < *job.Spec.Completions {
c.log("Job is not completed: %s/%s", job.GetNamespace(), job.GetName())
slog.Debug("Job is not completed", "namespace", job.GetNamespace(), "name", job.GetName())
return false, nil
}
return true, nil
@ -255,7 +251,7 @@ func (c *ReadyChecker) serviceReady(s *corev1.Service) bool {
// Ensure that the service cluster IP is not empty
if s.Spec.ClusterIP == "" {
c.log("Service does not have cluster IP address: %s/%s", s.GetNamespace(), s.GetName())
slog.Debug("Service does not have cluster IP address", "namespace", s.GetNamespace(), "name", s.GetName())
return false
}
@ -263,12 +259,12 @@ func (c *ReadyChecker) serviceReady(s *corev1.Service) bool {
if s.Spec.Type == corev1.ServiceTypeLoadBalancer {
// do not wait when at least 1 external IP is set
if len(s.Spec.ExternalIPs) > 0 {
c.log("Service %s/%s has external IP addresses (%v), marking as ready", s.GetNamespace(), s.GetName(), s.Spec.ExternalIPs)
slog.Debug("Service has external IP addresses", "namespace", s.GetNamespace(), "name", s.GetName(), "externalIPs", s.Spec.ExternalIPs)
return true
}
if s.Status.LoadBalancer.Ingress == nil {
c.log("Service does not have load balancer ingress IP address: %s/%s", s.GetNamespace(), s.GetName())
slog.Debug("Service does not have load balancer ingress IP address", "namespace", s.GetNamespace(), "name", s.GetName())
return false
}
}
@ -278,7 +274,7 @@ func (c *ReadyChecker) serviceReady(s *corev1.Service) bool {
func (c *ReadyChecker) volumeReady(v *corev1.PersistentVolumeClaim) bool {
if v.Status.Phase != corev1.ClaimBound {
c.log("PersistentVolumeClaim is not bound: %s/%s", v.GetNamespace(), v.GetName())
slog.Debug("PersistentVolumeClaim is not bound", "namespace", v.GetNamespace(), "name", v.GetName())
return false
}
return true
@ -291,13 +287,13 @@ func (c *ReadyChecker) deploymentReady(rs *appsv1.ReplicaSet, dep *appsv1.Deploy
}
// Verify the generation observed by the deployment controller matches the spec generation
if dep.Status.ObservedGeneration != dep.ObjectMeta.Generation {
c.log("Deployment is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", dep.Namespace, dep.Name, dep.Status.ObservedGeneration, dep.ObjectMeta.Generation)
slog.Debug("Deployment is not ready, observedGeneration does not match spec generation", "namespace", dep.GetNamespace(), "name", dep.GetName(), "actualGeneration", dep.Status.ObservedGeneration, "expectedGeneration", dep.ObjectMeta.Generation)
return false
}
expectedReady := *dep.Spec.Replicas - deploymentutil.MaxUnavailable(*dep)
if !(rs.Status.ReadyReplicas >= expectedReady) {
c.log("Deployment is not ready: %s/%s. %d out of %d expected pods are ready", dep.Namespace, dep.Name, rs.Status.ReadyReplicas, expectedReady)
slog.Debug("Deployment does not have enough pods ready", "namespace", dep.GetNamespace(), "name", dep.GetName(), "readyPods", rs.Status.ReadyReplicas, "totalPods", expectedReady)
return false
}
return true
@ -306,7 +302,7 @@ func (c *ReadyChecker) deploymentReady(rs *appsv1.ReplicaSet, dep *appsv1.Deploy
func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool {
// Verify the generation observed by the daemonSet controller matches the spec generation
if ds.Status.ObservedGeneration != ds.ObjectMeta.Generation {
c.log("DaemonSet is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", ds.Namespace, ds.Name, ds.Status.ObservedGeneration, ds.ObjectMeta.Generation)
slog.Debug("DaemonSet is not ready, observedGeneration does not match spec generation", "namespace", ds.GetNamespace(), "name", ds.GetName(), "observedGeneration", ds.Status.ObservedGeneration, "expectedGeneration", ds.ObjectMeta.Generation)
return false
}
@ -317,7 +313,7 @@ func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool {
// Make sure all the updated pods have been scheduled
if ds.Status.UpdatedNumberScheduled != ds.Status.DesiredNumberScheduled {
c.log("DaemonSet is not ready: %s/%s. %d out of %d expected pods have been scheduled", ds.Namespace, ds.Name, ds.Status.UpdatedNumberScheduled, ds.Status.DesiredNumberScheduled)
slog.Debug("DaemonSet does not have enough Pods scheduled", "namespace", ds.GetNamespace(), "name", ds.GetName(), "scheduledPods", ds.Status.UpdatedNumberScheduled, "totalPods", ds.Status.DesiredNumberScheduled)
return false
}
maxUnavailable, err := intstr.GetScaledValueFromIntOrPercent(ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable, int(ds.Status.DesiredNumberScheduled), true)
@ -330,7 +326,7 @@ func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool {
expectedReady := int(ds.Status.DesiredNumberScheduled) - maxUnavailable
if !(int(ds.Status.NumberReady) >= expectedReady) {
c.log("DaemonSet is not ready: %s/%s. %d out of %d expected pods are ready", ds.Namespace, ds.Name, ds.Status.NumberReady, expectedReady)
slog.Debug("DaemonSet does not have enough Pods ready", "namespace", ds.GetNamespace(), "name", ds.GetName(), "readyPods", ds.Status.NumberReady, "totalPods", expectedReady)
return false
}
return true
@ -382,13 +378,13 @@ func (c *ReadyChecker) crdReady(crd apiextv1.CustomResourceDefinition) bool {
func (c *ReadyChecker) statefulSetReady(sts *appsv1.StatefulSet) bool {
// Verify the generation observed by the statefulSet controller matches the spec generation
if sts.Status.ObservedGeneration != sts.ObjectMeta.Generation {
c.log("StatefulSet is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", sts.Namespace, sts.Name, sts.Status.ObservedGeneration, sts.ObjectMeta.Generation)
slog.Debug("StatefulSet is not ready, observedGeneration doest not match spec generation", "namespace", sts.GetNamespace(), "name", sts.GetName(), "actualGeneration", sts.Status.ObservedGeneration, "expectedGeneration", sts.ObjectMeta.Generation)
return false
}
// If the update strategy is not a rolling update, there will be nothing to wait for
if sts.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType {
c.log("StatefulSet skipped ready check: %s/%s. updateStrategy is %v", sts.Namespace, sts.Name, sts.Spec.UpdateStrategy.Type)
slog.Debug("StatefulSet skipped ready check", "namespace", sts.GetNamespace(), "name", sts.GetName(), "updateStrategy", sts.Spec.UpdateStrategy.Type)
return true
}
@ -414,30 +410,30 @@ func (c *ReadyChecker) statefulSetReady(sts *appsv1.StatefulSet) bool {
// Make sure all the updated pods have been scheduled
if int(sts.Status.UpdatedReplicas) < expectedReplicas {
c.log("StatefulSet is not ready: %s/%s. %d out of %d expected pods have been scheduled", sts.Namespace, sts.Name, sts.Status.UpdatedReplicas, expectedReplicas)
slog.Debug("StatefulSet does not have enough Pods scheduled", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.UpdatedReplicas, "totalPods", expectedReplicas)
return false
}
if int(sts.Status.ReadyReplicas) != replicas {
c.log("StatefulSet is not ready: %s/%s. %d out of %d expected pods are ready", sts.Namespace, sts.Name, sts.Status.ReadyReplicas, replicas)
slog.Debug("StatefulSet does not have enough Pods ready", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.ReadyReplicas, "totalPods", replicas)
return false
}
// This check only makes sense when all partitions are being upgraded otherwise during a
// partitioned rolling upgrade, this condition will never evaluate to true, leading to
// error.
if partition == 0 && sts.Status.CurrentRevision != sts.Status.UpdateRevision {
c.log("StatefulSet is not ready: %s/%s. currentRevision %s does not yet match updateRevision %s", sts.Namespace, sts.Name, sts.Status.CurrentRevision, sts.Status.UpdateRevision)
slog.Debug("StatefulSet is not ready, currentRevision does not match updateRevision", "namespace", sts.GetNamespace(), "name", sts.GetName(), "currentRevision", sts.Status.CurrentRevision, "updateRevision", sts.Status.UpdateRevision)
return false
}
c.log("StatefulSet is ready: %s/%s. %d out of %d expected pods are ready", sts.Namespace, sts.Name, sts.Status.ReadyReplicas, replicas)
slog.Debug("StatefulSet is ready", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.ReadyReplicas, "totalPods", replicas)
return true
}
func (c *ReadyChecker) replicationControllerReady(rc *corev1.ReplicationController) bool {
// Verify the generation observed by the replicationController controller matches the spec generation
if rc.Status.ObservedGeneration != rc.ObjectMeta.Generation {
c.log("ReplicationController is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", rc.Namespace, rc.Name, rc.Status.ObservedGeneration, rc.ObjectMeta.Generation)
slog.Debug("ReplicationController is not ready, observedGeneration doest not match spec generation", "namespace", rc.GetNamespace(), "name", rc.GetName(), "actualGeneration", rc.Status.ObservedGeneration, "expectedGeneration", rc.ObjectMeta.Generation)
return false
}
return true
@ -446,7 +442,7 @@ func (c *ReadyChecker) replicationControllerReady(rc *corev1.ReplicationControll
func (c *ReadyChecker) replicaSetReady(rs *appsv1.ReplicaSet) bool {
// Verify the generation observed by the replicaSet controller matches the spec generation
if rs.Status.ObservedGeneration != rs.ObjectMeta.Generation {
c.log("ReplicaSet is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", rs.Namespace, rs.Name, rs.Status.ObservedGeneration, rs.ObjectMeta.Generation)
slog.Debug("ReplicaSet is not ready, observedGeneration doest not match spec generation", "namespace", rs.GetNamespace(), "name", rs.GetName(), "actualGeneration", rs.Status.ObservedGeneration, "expectedGeneration", rs.ObjectMeta.Generation)
return false
}
return true

@ -37,7 +37,6 @@ const defaultNamespace = metav1.NamespaceDefault
func Test_ReadyChecker_IsReady_Pod(t *testing.T) {
type fields struct {
client kubernetes.Interface
log func(string, ...interface{})
checkJobs bool
pausedAsReady bool
}
@ -57,7 +56,6 @@ func Test_ReadyChecker_IsReady_Pod(t *testing.T) {
name: "IsReady Pod",
fields: fields{
client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true,
pausedAsReady: false,
},
@ -73,7 +71,6 @@ func Test_ReadyChecker_IsReady_Pod(t *testing.T) {
name: "IsReady Pod returns error",
fields: fields{
client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true,
pausedAsReady: false,
},
@ -90,7 +87,6 @@ func Test_ReadyChecker_IsReady_Pod(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{
client: tt.fields.client,
log: tt.fields.log,
checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady,
}
@ -113,7 +109,6 @@ func Test_ReadyChecker_IsReady_Pod(t *testing.T) {
func Test_ReadyChecker_IsReady_Job(t *testing.T) {
type fields struct {
client kubernetes.Interface
log func(string, ...interface{})
checkJobs bool
pausedAsReady bool
}
@ -133,7 +128,6 @@ func Test_ReadyChecker_IsReady_Job(t *testing.T) {
name: "IsReady Job error while getting job",
fields: fields{
client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true,
pausedAsReady: false,
},
@ -149,7 +143,6 @@ func Test_ReadyChecker_IsReady_Job(t *testing.T) {
name: "IsReady Job",
fields: fields{
client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true,
pausedAsReady: false,
},
@ -166,7 +159,6 @@ func Test_ReadyChecker_IsReady_Job(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{
client: tt.fields.client,
log: tt.fields.log,
checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady,
}
@ -188,7 +180,6 @@ func Test_ReadyChecker_IsReady_Job(t *testing.T) {
func Test_ReadyChecker_IsReady_Deployment(t *testing.T) {
type fields struct {
client kubernetes.Interface
log func(string, ...interface{})
checkJobs bool
pausedAsReady bool
}
@ -209,7 +200,6 @@ func Test_ReadyChecker_IsReady_Deployment(t *testing.T) {
name: "IsReady Deployments error while getting current Deployment",
fields: fields{
client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true,
pausedAsReady: false,
},
@ -226,7 +216,6 @@ func Test_ReadyChecker_IsReady_Deployment(t *testing.T) {
name: "IsReady Deployments", //TODO fix this one
fields: fields{
client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true,
pausedAsReady: false,
},
@ -244,7 +233,6 @@ func Test_ReadyChecker_IsReady_Deployment(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{
client: tt.fields.client,
log: tt.fields.log,
checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady,
}
@ -270,7 +258,6 @@ func Test_ReadyChecker_IsReady_Deployment(t *testing.T) {
func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) {
type fields struct {
client kubernetes.Interface
log func(string, ...interface{})
checkJobs bool
pausedAsReady bool
}
@ -290,7 +277,6 @@ func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) {
name: "IsReady PersistentVolumeClaim",
fields: fields{
client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true,
pausedAsReady: false,
},
@ -306,7 +292,6 @@ func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) {
name: "IsReady PersistentVolumeClaim with error",
fields: fields{
client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true,
pausedAsReady: false,
},
@ -323,7 +308,6 @@ func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{
client: tt.fields.client,
log: tt.fields.log,
checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady,
}
@ -345,7 +329,6 @@ func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) {
func Test_ReadyChecker_IsReady_Service(t *testing.T) {
type fields struct {
client kubernetes.Interface
log func(string, ...interface{})
checkJobs bool
pausedAsReady bool
}
@ -365,7 +348,6 @@ func Test_ReadyChecker_IsReady_Service(t *testing.T) {
name: "IsReady Service",
fields: fields{
client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true,
pausedAsReady: false,
},
@ -381,7 +363,6 @@ func Test_ReadyChecker_IsReady_Service(t *testing.T) {
name: "IsReady Service with error",
fields: fields{
client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true,
pausedAsReady: false,
},
@ -398,7 +379,6 @@ func Test_ReadyChecker_IsReady_Service(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{
client: tt.fields.client,
log: tt.fields.log,
checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady,
}
@ -420,7 +400,6 @@ func Test_ReadyChecker_IsReady_Service(t *testing.T) {
func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) {
type fields struct {
client kubernetes.Interface
log func(string, ...interface{})
checkJobs bool
pausedAsReady bool
}
@ -440,7 +419,6 @@ func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) {
name: "IsReady DaemonSet",
fields: fields{
client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true,
pausedAsReady: false,
},
@ -456,7 +434,6 @@ func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) {
name: "IsReady DaemonSet with error",
fields: fields{
client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true,
pausedAsReady: false,
},
@ -473,7 +450,6 @@ func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{
client: tt.fields.client,
log: tt.fields.log,
checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady,
}
@ -495,7 +471,6 @@ func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) {
func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) {
type fields struct {
client kubernetes.Interface
log func(string, ...interface{})
checkJobs bool
pausedAsReady bool
}
@ -515,7 +490,6 @@ func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) {
name: "IsReady StatefulSet",
fields: fields{
client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true,
pausedAsReady: false,
},
@ -531,7 +505,6 @@ func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) {
name: "IsReady StatefulSet with error",
fields: fields{
client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true,
pausedAsReady: false,
},
@ -548,7 +521,6 @@ func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{
client: tt.fields.client,
log: tt.fields.log,
checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady,
}
@ -570,7 +542,6 @@ func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) {
func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) {
type fields struct {
client kubernetes.Interface
log func(string, ...interface{})
checkJobs bool
pausedAsReady bool
}
@ -590,7 +561,6 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) {
name: "IsReady ReplicationController",
fields: fields{
client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true,
pausedAsReady: false,
},
@ -606,7 +576,6 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) {
name: "IsReady ReplicationController with error",
fields: fields{
client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true,
pausedAsReady: false,
},
@ -622,7 +591,6 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) {
name: "IsReady ReplicationController and pods not ready for object",
fields: fields{
client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true,
pausedAsReady: false,
},
@ -639,7 +607,6 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{
client: tt.fields.client,
log: tt.fields.log,
checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady,
}
@ -661,7 +628,6 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) {
func Test_ReadyChecker_IsReady_ReplicaSet(t *testing.T) {
type fields struct {
client kubernetes.Interface
log func(string, ...interface{})
checkJobs bool
pausedAsReady bool
}
@ -681,7 +647,6 @@ func Test_ReadyChecker_IsReady_ReplicaSet(t *testing.T) {
name: "IsReady ReplicaSet",
fields: fields{
client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true,
pausedAsReady: false,
},
@ -697,7 +662,6 @@ func Test_ReadyChecker_IsReady_ReplicaSet(t *testing.T) {
name: "IsReady ReplicaSet not ready",
fields: fields{
client: fake.NewClientset(),
log: func(string, ...interface{}) {},
checkJobs: true,
pausedAsReady: false,
},
@ -714,7 +678,6 @@ func Test_ReadyChecker_IsReady_ReplicaSet(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{
client: tt.fields.client,
log: tt.fields.log,
checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady,
}
@ -791,7 +754,7 @@ func Test_ReadyChecker_deploymentReady(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := NewReadyChecker(fake.NewClientset(), nil)
c := NewReadyChecker(fake.NewClientset())
if got := c.deploymentReady(tt.args.rs, tt.args.dep); got != tt.want {
t.Errorf("deploymentReady() = %v, want %v", got, tt.want)
}
@ -825,7 +788,7 @@ func Test_ReadyChecker_replicaSetReady(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := NewReadyChecker(fake.NewClientset(), nil)
c := NewReadyChecker(fake.NewClientset())
if got := c.replicaSetReady(tt.args.rs); got != tt.want {
t.Errorf("replicaSetReady() = %v, want %v", got, tt.want)
}
@ -859,7 +822,7 @@ func Test_ReadyChecker_replicationControllerReady(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := NewReadyChecker(fake.NewClientset(), nil)
c := NewReadyChecker(fake.NewClientset())
if got := c.replicationControllerReady(tt.args.rc); got != tt.want {
t.Errorf("replicationControllerReady() = %v, want %v", got, tt.want)
}
@ -914,7 +877,7 @@ func Test_ReadyChecker_daemonSetReady(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := NewReadyChecker(fake.NewClientset(), nil)
c := NewReadyChecker(fake.NewClientset())
if got := c.daemonSetReady(tt.args.ds); got != tt.want {
t.Errorf("daemonSetReady() = %v, want %v", got, tt.want)
}
@ -990,7 +953,7 @@ func Test_ReadyChecker_statefulSetReady(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := NewReadyChecker(fake.NewClientset(), nil)
c := NewReadyChecker(fake.NewClientset())
if got := c.statefulSetReady(tt.args.sts); got != tt.want {
t.Errorf("statefulSetReady() = %v, want %v", got, tt.want)
}
@ -1049,7 +1012,7 @@ func Test_ReadyChecker_podsReadyForObject(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := NewReadyChecker(fake.NewClientset(), nil)
c := NewReadyChecker(fake.NewClientset())
for _, pod := range tt.existPods {
if _, err := c.client.CoreV1().Pods(defaultNamespace).Create(context.TODO(), &pod, metav1.CreateOptions{}); err != nil {
t.Errorf("Failed to create Pod error: %v", err)
@ -1128,7 +1091,7 @@ func Test_ReadyChecker_jobReady(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := NewReadyChecker(fake.NewClientset(), nil)
c := NewReadyChecker(fake.NewClientset())
got, err := c.jobReady(tt.args.job)
if (err != nil) != tt.wantErr {
t.Errorf("jobReady() error = %v, wantErr %v", err, tt.wantErr)
@ -1167,7 +1130,7 @@ func Test_ReadyChecker_volumeReady(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := NewReadyChecker(fake.NewClientset(), nil)
c := NewReadyChecker(fake.NewClientset())
if got := c.volumeReady(tt.args.v); got != tt.want {
t.Errorf("volumeReady() = %v, want %v", got, tt.want)
}
@ -1212,7 +1175,7 @@ func Test_ReadyChecker_serviceReady(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := NewReadyChecker(fake.NewClientset(), nil)
c := NewReadyChecker(fake.NewClientset())
got := c.serviceReady(tt.args.service)
if got != tt.want {
t.Errorf("serviceReady() = %v, want %v", got, tt.want)
@ -1281,7 +1244,7 @@ func Test_ReadyChecker_crdBetaReady(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := NewReadyChecker(fake.NewClientset(), nil)
c := NewReadyChecker(fake.NewClientset())
got := c.crdBetaReady(tt.args.crdBeta)
if got != tt.want {
t.Errorf("crdBetaReady() = %v, want %v", got, tt.want)
@ -1350,7 +1313,7 @@ func Test_ReadyChecker_crdReady(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := NewReadyChecker(fake.NewClientset(), nil)
c := NewReadyChecker(fake.NewClientset())
got := c.crdReady(tt.args.crdBeta)
if got != tt.want {
t.Errorf("crdBetaReady() = %v, want %v", got, tt.want)

@ -20,6 +20,7 @@ import (
"context"
"errors"
"fmt"
"log/slog"
"sort"
"time"
@ -42,7 +43,6 @@ import (
type statusWaiter struct {
client dynamic.Interface
restMapper meta.RESTMapper
log func(string, ...interface{})
}
func alwaysReady(_ *unstructured.Unstructured) (*status.Result, error) {
@ -55,7 +55,7 @@ func alwaysReady(_ *unstructured.Unstructured) (*status.Result, error) {
func (w *statusWaiter) WatchUntilReady(resourceList ResourceList, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
w.log("waiting for %d pods and jobs to complete with a timeout of %s", len(resourceList), timeout)
slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout)
sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper)
jobSR := helmStatusReaders.NewCustomJobStatusReader(w.restMapper)
podSR := helmStatusReaders.NewCustomPodStatusReader(w.restMapper)
@ -76,7 +76,7 @@ func (w *statusWaiter) WatchUntilReady(resourceList ResourceList, timeout time.D
func (w *statusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(context.TODO(), timeout)
defer cancel()
w.log("beginning wait for %d resources with timeout of %s", len(resourceList), timeout)
slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout)
sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper)
return w.wait(ctx, resourceList, sw)
}
@ -84,7 +84,7 @@ func (w *statusWaiter) Wait(resourceList ResourceList, timeout time.Duration) er
func (w *statusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(context.TODO(), timeout)
defer cancel()
w.log("beginning wait for %d resources with timeout of %s", len(resourceList), timeout)
slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout)
sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper)
newCustomJobStatusReader := helmStatusReaders.NewCustomJobStatusReader(w.restMapper)
customSR := statusreaders.NewStatusReader(w.restMapper, newCustomJobStatusReader)
@ -95,7 +95,7 @@ func (w *statusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Dura
func (w *statusWaiter) WaitForDelete(resourceList ResourceList, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(context.TODO(), timeout)
defer cancel()
w.log("beginning wait for %d resources to be deleted with timeout of %s", len(resourceList), timeout)
slog.Debug("waiting for resources to be deleted", "count", len(resourceList), "timeout", timeout)
sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper)
return w.waitForDelete(ctx, resourceList, sw)
}
@ -113,7 +113,7 @@ func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceL
}
eventCh := sw.Watch(cancelCtx, resources, watcher.Options{})
statusCollector := collector.NewResourceStatusCollector(resources)
done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.NotFoundStatus, w.log))
done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.NotFoundStatus))
<-done
if statusCollector.Error != nil {
@ -156,7 +156,7 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, sw w
eventCh := sw.Watch(cancelCtx, resources, watcher.Options{})
statusCollector := collector.NewResourceStatusCollector(resources)
done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.CurrentStatus, w.log))
done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.CurrentStatus))
<-done
if statusCollector.Error != nil {
@ -179,7 +179,7 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, sw w
return nil
}
func statusObserver(cancel context.CancelFunc, desired status.Status, logFn func(string, ...interface{})) collector.ObserverFunc {
func statusObserver(cancel context.CancelFunc, desired status.Status) collector.ObserverFunc {
return func(statusCollector *collector.ResourceStatusCollector, _ event.Event) {
var rss []*event.ResourceStatus
var nonDesiredResources []*event.ResourceStatus
@ -209,8 +209,7 @@ func statusObserver(cancel context.CancelFunc, desired status.Status, logFn func
return nonDesiredResources[i].Identifier.Name < nonDesiredResources[j].Identifier.Name
})
first := nonDesiredResources[0]
logFn("waiting for resource: name: %s, kind: %s, desired status: %s, actual status: %s \n",
first.Identifier.Name, first.Identifier.GroupKind.Kind, desired, first.Status)
slog.Debug("waiting for resource", "name", first.Identifier.Name, "kind", first.Identifier.GroupKind.Kind, "expectedStatus", desired, "actualStatus", first.Status)
}
}
}

@ -217,7 +217,6 @@ func TestStatusWaitForDelete(t *testing.T) {
statusWaiter := statusWaiter{
restMapper: fakeMapper,
client: fakeClient,
log: t.Logf,
}
objsToCreate := getRuntimeObjFromManifests(t, tt.manifestsToCreate)
for _, objToCreate := range objsToCreate {
@ -258,7 +257,6 @@ func TestStatusWaitForDeleteNonExistentObject(t *testing.T) {
statusWaiter := statusWaiter{
restMapper: fakeMapper,
client: fakeClient,
log: t.Logf,
}
// Don't create the object to test that the wait for delete works when the object doesn't exist
objManifest := getRuntimeObjFromManifests(t, []string{podCurrentManifest})
@ -317,7 +315,6 @@ func TestStatusWait(t *testing.T) {
statusWaiter := statusWaiter{
client: fakeClient,
restMapper: fakeMapper,
log: t.Logf,
}
objs := getRuntimeObjFromManifests(t, tt.objManifests)
for _, obj := range objs {
@ -371,7 +368,6 @@ func TestWaitForJobComplete(t *testing.T) {
statusWaiter := statusWaiter{
client: fakeClient,
restMapper: fakeMapper,
log: t.Logf,
}
objs := getRuntimeObjFromManifests(t, tt.objManifests)
for _, obj := range objs {
@ -431,7 +427,6 @@ func TestWatchForReady(t *testing.T) {
statusWaiter := statusWaiter{
client: fakeClient,
restMapper: fakeMapper,
log: t.Logf,
}
objs := getRuntimeObjFromManifests(t, tt.objManifests)
for _, obj := range objs {

@ -50,24 +50,23 @@ import (
// Helm 4 now uses the StatusWaiter implementation instead
type legacyWaiter struct {
c ReadyChecker
log func(string, ...interface{})
kubeClient *kubernetes.Clientset
}
func (hw *legacyWaiter) Wait(resources ResourceList, timeout time.Duration) error {
hw.c = NewReadyChecker(hw.kubeClient, hw.log, PausedAsReady(true))
hw.c = NewReadyChecker(hw.kubeClient, PausedAsReady(true))
return hw.waitForResources(resources, timeout)
}
func (hw *legacyWaiter) WaitWithJobs(resources ResourceList, timeout time.Duration) error {
hw.c = NewReadyChecker(hw.kubeClient, hw.log, PausedAsReady(true), CheckJobs(true))
hw.c = NewReadyChecker(hw.kubeClient, PausedAsReady(true), CheckJobs(true))
return hw.waitForResources(resources, timeout)
}
// waitForResources polls to get the current status of all pods, PVCs, Services and
// Jobs(optional) until all are ready or a timeout is reached
func (hw *legacyWaiter) waitForResources(created ResourceList, timeout time.Duration) error {
hw.log("beginning wait for %d resources with timeout of %v", len(created), timeout)
slog.Debug("beginning wait for resources", "count", len(created), "timeout", timeout)
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
@ -85,10 +84,10 @@ func (hw *legacyWaiter) waitForResources(created ResourceList, timeout time.Dura
if waitRetries > 0 && hw.isRetryableError(err, v) {
numberOfErrors[i]++
if numberOfErrors[i] > waitRetries {
hw.log("Max number of retries reached")
slog.Debug("max number of retries reached", "resource", v.Name, "retries", numberOfErrors[i])
return false, err
}
hw.log("Retrying as current number of retries %d less than max number of retries %d", numberOfErrors[i]-1, waitRetries)
slog.Debug("retrying resource readiness", "resource", v.Name, "currentRetries", numberOfErrors[i]-1, "maxRetries", waitRetries)
return false, nil
}
numberOfErrors[i] = 0
@ -104,14 +103,14 @@ func (hw *legacyWaiter) isRetryableError(err error, resource *resource.Info) boo
if err == nil {
return false
}
hw.log("Error received when checking status of resource %s. Error: '%s', Resource details: '%s'", resource.Name, err, resource)
slog.Debug("error received when checking resource status", "resource", resource.Name, slog.Any("error", err))
if ev, ok := err.(*apierrors.StatusError); ok {
statusCode := ev.Status().Code
retryable := hw.isRetryableHTTPStatusCode(statusCode)
hw.log("Status code received: %d. Retryable error? %t", statusCode, retryable)
slog.Debug("status code received", "resource", resource.Name, "statusCode", statusCode, "retryable", retryable)
return retryable
}
hw.log("Retryable error? %t", true)
slog.Debug("retryable error assumed", "resource", resource.Name)
return true
}
@ -249,7 +248,7 @@ func (hw *legacyWaiter) watchUntilReady(timeout time.Duration, info *resource.In
return nil
}
hw.log("Watching for changes to %s %s with timeout of %v", kind, info.Name, timeout)
slog.Debug("watching for resource changes", "kind", kind, "resource", info.Name, "timeout", timeout)
// Use a selector on the name of the resource. This should be unique for the
// given version and kind
@ -277,7 +276,8 @@ func (hw *legacyWaiter) watchUntilReady(timeout time.Duration, info *resource.In
// we get. We care mostly about jobs, where what we want to see is
// the status go into a good state. For other types, like ReplicaSet
// we don't really do anything to support these as hooks.
hw.log("Add/Modify event for %s: %v", info.Name, e.Type)
slog.Debug("add/modify event received", "resource", info.Name, "eventType", e.Type)
switch kind {
case "Job":
return hw.waitForJob(obj, info.Name)
@ -286,11 +286,11 @@ func (hw *legacyWaiter) watchUntilReady(timeout time.Duration, info *resource.In
}
return true, nil
case watch.Deleted:
hw.log("Deleted event for %s", info.Name)
slog.Debug("deleted event received", "resource", info.Name)
return true, nil
case watch.Error:
// Handle error and return with an error.
hw.log("Error event for %s", info.Name)
slog.Error("error event received", "resource", info.Name)
return true, errors.Errorf("failed to deploy %s", info.Name)
default:
return false, nil
@ -312,11 +312,12 @@ func (hw *legacyWaiter) waitForJob(obj runtime.Object, name string) (bool, error
if c.Type == batchv1.JobComplete && c.Status == "True" {
return true, nil
} else if c.Type == batchv1.JobFailed && c.Status == "True" {
slog.Error("job failed", "job", name, "reason", c.Reason)
return true, errors.Errorf("job %s failed: %s", name, c.Reason)
}
}
hw.log("%s: Jobs active: %d, jobs failed: %d, jobs succeeded: %d", name, o.Status.Active, o.Status.Failed, o.Status.Succeeded)
slog.Debug("job status update", "job", name, "active", o.Status.Active, "failed", o.Status.Failed, "succeeded", o.Status.Succeeded)
return false, nil
}
@ -331,14 +332,15 @@ func (hw *legacyWaiter) waitForPodSuccess(obj runtime.Object, name string) (bool
switch o.Status.Phase {
case corev1.PodSucceeded:
hw.log("Pod %s succeeded", o.Name)
slog.Debug("pod succeeded", "pod", o.Name)
return true, nil
case corev1.PodFailed:
slog.Error("pod failed", "pod", o.Name)
return true, errors.Errorf("pod %s failed", o.Name)
case corev1.PodPending:
hw.log("Pod %s pending", o.Name)
slog.Debug("pod pending", "pod", o.Name)
case corev1.PodRunning:
hw.log("Pod %s running", o.Name)
slog.Debug("pod running", "pod", o.Name)
}
return false, nil

@ -19,6 +19,7 @@ package driver // import "helm.sh/helm/v4/pkg/storage/driver"
import (
"context"
"fmt"
"log/slog"
"strconv"
"strings"
"time"
@ -43,7 +44,6 @@ const ConfigMapsDriverName = "ConfigMap"
// ConfigMapsInterface.
type ConfigMaps struct {
impl corev1.ConfigMapInterface
Log func(string, ...interface{})
}
// NewConfigMaps initializes a new ConfigMaps wrapping an implementation of
@ -51,7 +51,6 @@ type ConfigMaps struct {
func NewConfigMaps(impl corev1.ConfigMapInterface) *ConfigMaps {
return &ConfigMaps{
impl: impl,
Log: func(_ string, _ ...interface{}) {},
}
}
@ -70,13 +69,13 @@ func (cfgmaps *ConfigMaps) Get(key string) (*rspb.Release, error) {
return nil, ErrReleaseNotFound
}
cfgmaps.Log("get: failed to get %q: %s", key, err)
slog.Debug("failed to get release", "key", key, slog.Any("error", err))
return nil, err
}
// found the configmap, decode the base64 data string
r, err := decodeRelease(obj.Data["release"])
if err != nil {
cfgmaps.Log("get: failed to decode data %q: %s", key, err)
slog.Debug("failed to decode data", "key", key, slog.Any("error", err))
return nil, err
}
r.Labels = filterSystemLabels(obj.ObjectMeta.Labels)
@ -93,7 +92,7 @@ func (cfgmaps *ConfigMaps) List(filter func(*rspb.Release) bool) ([]*rspb.Releas
list, err := cfgmaps.impl.List(context.Background(), opts)
if err != nil {
cfgmaps.Log("list: failed to list: %s", err)
slog.Debug("failed to list releases", slog.Any("error", err))
return nil, err
}
@ -104,7 +103,7 @@ func (cfgmaps *ConfigMaps) List(filter func(*rspb.Release) bool) ([]*rspb.Releas
for _, item := range list.Items {
rls, err := decodeRelease(item.Data["release"])
if err != nil {
cfgmaps.Log("list: failed to decode release: %v: %s", item, err)
slog.Debug("failed to decode release", "item", item, slog.Any("error", err))
continue
}
@ -132,7 +131,7 @@ func (cfgmaps *ConfigMaps) Query(labels map[string]string) ([]*rspb.Release, err
list, err := cfgmaps.impl.List(context.Background(), opts)
if err != nil {
cfgmaps.Log("query: failed to query with labels: %s", err)
slog.Debug("failed to query with labels", slog.Any("error", err))
return nil, err
}
@ -144,7 +143,7 @@ func (cfgmaps *ConfigMaps) Query(labels map[string]string) ([]*rspb.Release, err
for _, item := range list.Items {
rls, err := decodeRelease(item.Data["release"])
if err != nil {
cfgmaps.Log("query: failed to decode release: %s", err)
slog.Debug("failed to decode release", slog.Any("error", err))
continue
}
rls.Labels = item.ObjectMeta.Labels
@ -166,7 +165,7 @@ func (cfgmaps *ConfigMaps) Create(key string, rls *rspb.Release) error {
// create a new configmap to hold the release
obj, err := newConfigMapsObject(key, rls, lbs)
if err != nil {
cfgmaps.Log("create: failed to encode release %q: %s", rls.Name, err)
slog.Debug("failed to encode release", "name", rls.Name, slog.Any("error", err))
return err
}
// push the configmap object out into the kubiverse
@ -175,7 +174,7 @@ func (cfgmaps *ConfigMaps) Create(key string, rls *rspb.Release) error {
return ErrReleaseExists
}
cfgmaps.Log("create: failed to create: %s", err)
slog.Debug("failed to create release", slog.Any("error", err))
return err
}
return nil
@ -194,13 +193,13 @@ func (cfgmaps *ConfigMaps) Update(key string, rls *rspb.Release) error {
// create a new configmap object to hold the release
obj, err := newConfigMapsObject(key, rls, lbs)
if err != nil {
cfgmaps.Log("update: failed to encode release %q: %s", rls.Name, err)
slog.Debug("failed to encode release", "name", rls.Name, slog.Any("error", err))
return err
}
// push the configmap object out into the kubiverse
_, err = cfgmaps.impl.Update(context.Background(), obj, metav1.UpdateOptions{})
if err != nil {
cfgmaps.Log("update: failed to update: %s", err)
slog.Debug("failed to update release", slog.Any("error", err))
return err
}
return nil

@ -262,7 +262,6 @@ func newTestFixtureSQL(t *testing.T, _ ...*rspb.Release) (*SQL, sqlmock.Sqlmock)
sqlxDB := sqlx.NewDb(sqlDB, "sqlmock")
return &SQL{
db: sqlxDB,
Log: func(_ string, _ ...interface{}) {},
namespace: "default",
statementBuilder: sq.StatementBuilder.PlaceholderFormat(sq.Dollar),
}, mock

@ -19,6 +19,7 @@ package driver // import "helm.sh/helm/v4/pkg/storage/driver"
import (
"context"
"fmt"
"log/slog"
"strconv"
"strings"
"time"
@ -43,7 +44,6 @@ const SecretsDriverName = "Secret"
// SecretsInterface.
type Secrets struct {
impl corev1.SecretInterface
Log func(string, ...interface{})
}
// NewSecrets initializes a new Secrets wrapping an implementation of
@ -51,7 +51,6 @@ type Secrets struct {
func NewSecrets(impl corev1.SecretInterface) *Secrets {
return &Secrets{
impl: impl,
Log: func(_ string, _ ...interface{}) {},
}
}
@ -96,7 +95,7 @@ func (secrets *Secrets) List(filter func(*rspb.Release) bool) ([]*rspb.Release,
for _, item := range list.Items {
rls, err := decodeRelease(string(item.Data["release"]))
if err != nil {
secrets.Log("list: failed to decode release: %v: %s", item, err)
slog.Debug("list failed to decode release", "key", item.Name, slog.Any("error", err))
continue
}
@ -135,7 +134,7 @@ func (secrets *Secrets) Query(labels map[string]string) ([]*rspb.Release, error)
for _, item := range list.Items {
rls, err := decodeRelease(string(item.Data["release"]))
if err != nil {
secrets.Log("query: failed to decode release: %s", err)
slog.Debug("failed to decode release", "key", item.Name, slog.Any("error", err))
continue
}
rls.Labels = item.ObjectMeta.Labels

@ -18,6 +18,7 @@ package driver // import "helm.sh/helm/v4/pkg/storage/driver"
import (
"fmt"
"log/slog"
"sort"
"strconv"
"time"
@ -86,8 +87,6 @@ type SQL struct {
db *sqlx.DB
namespace string
statementBuilder sq.StatementBuilderType
Log func(string, ...interface{})
}
// Name returns the name of the driver.
@ -108,13 +107,13 @@ func (s *SQL) checkAlreadyApplied(migrations []*migrate.Migration) bool {
records, err := migrate.GetMigrationRecords(s.db.DB, postgreSQLDialect)
migrate.SetDisableCreateTable(false)
if err != nil {
s.Log("checkAlreadyApplied: failed to get migration records: %v", err)
slog.Debug("failed to get migration records", slog.Any("error", err))
return false
}
for _, record := range records {
if _, ok := migrationsIDs[record.Id]; ok {
s.Log("checkAlreadyApplied: found previous migration (Id: %v) applied at %v", record.Id, record.AppliedAt)
slog.Debug("found previous migration", "id", record.Id, "appliedAt", record.AppliedAt)
delete(migrationsIDs, record.Id)
}
}
@ -122,7 +121,7 @@ func (s *SQL) checkAlreadyApplied(migrations []*migrate.Migration) bool {
// check if all migrations applied
if len(migrationsIDs) != 0 {
for id := range migrationsIDs {
s.Log("checkAlreadyApplied: find unapplied migration (id: %v)", id)
slog.Debug("find unapplied migration", "id", id)
}
return false
}
@ -276,7 +275,7 @@ type SQLReleaseCustomLabelWrapper struct {
}
// NewSQL initializes a new sql driver.
func NewSQL(connectionString string, logger func(string, ...interface{}), namespace string) (*SQL, error) {
func NewSQL(connectionString string, namespace string) (*SQL, error) {
db, err := sqlx.Connect(postgreSQLDialect, connectionString)
if err != nil {
return nil, err
@ -284,7 +283,6 @@ func NewSQL(connectionString string, logger func(string, ...interface{}), namesp
driver := &SQL{
db: db,
Log: logger,
statementBuilder: sq.StatementBuilder.PlaceholderFormat(sq.Dollar),
}
@ -309,24 +307,24 @@ func (s *SQL) Get(key string) (*rspb.Release, error) {
query, args, err := qb.ToSql()
if err != nil {
s.Log("failed to build query: %v", err)
slog.Debug("failed to build query", slog.Any("error", err))
return nil, err
}
// Get will return an error if the result is empty
if err := s.db.Get(&record, query, args...); err != nil {
s.Log("got SQL error when getting release %s: %v", key, err)
slog.Debug("got SQL error when getting release", "key", key, slog.Any("error", err))
return nil, ErrReleaseNotFound
}
release, err := decodeRelease(record.Body)
if err != nil {
s.Log("get: failed to decode data %q: %v", key, err)
slog.Debug("failed to decode data", "key", key, slog.Any("error", err))
return nil, err
}
if release.Labels, err = s.getReleaseCustomLabels(key, s.namespace); err != nil {
s.Log("failed to get release %s/%s custom labels: %v", s.namespace, key, err)
slog.Debug("failed to get release custom labels", "namespace", s.namespace, "key", key, slog.Any("error", err))
return nil, err
}
@ -347,13 +345,13 @@ func (s *SQL) List(filter func(*rspb.Release) bool) ([]*rspb.Release, error) {
query, args, err := sb.ToSql()
if err != nil {
s.Log("failed to build query: %v", err)
slog.Debug("failed to build query", slog.Any("error", err))
return nil, err
}
var records = []SQLReleaseWrapper{}
if err := s.db.Select(&records, query, args...); err != nil {
s.Log("list: failed to list: %v", err)
slog.Debug("failed to list", slog.Any("error", err))
return nil, err
}
@ -361,12 +359,12 @@ func (s *SQL) List(filter func(*rspb.Release) bool) ([]*rspb.Release, error) {
for _, record := range records {
release, err := decodeRelease(record.Body)
if err != nil {
s.Log("list: failed to decode release: %v: %v", record, err)
slog.Debug("failed to decode release", "record", record, slog.Any("error", err))
continue
}
if release.Labels, err = s.getReleaseCustomLabels(record.Key, record.Namespace); err != nil {
s.Log("failed to get release %s/%s custom labels: %v", record.Namespace, record.Key, err)
slog.Debug("failed to get release custom labels", "namespace", record.Namespace, "key", record.Key, slog.Any("error", err))
return nil, err
}
for k, v := range getReleaseSystemLabels(release) {
@ -396,7 +394,7 @@ func (s *SQL) Query(labels map[string]string) ([]*rspb.Release, error) {
if _, ok := labelMap[key]; ok {
sb = sb.Where(sq.Eq{key: labels[key]})
} else {
s.Log("unknown label %s", key)
slog.Debug("unknown label", "key", key)
return nil, fmt.Errorf("unknown label %s", key)
}
}
@ -409,13 +407,13 @@ func (s *SQL) Query(labels map[string]string) ([]*rspb.Release, error) {
// Build our query
query, args, err := sb.ToSql()
if err != nil {
s.Log("failed to build query: %v", err)
slog.Debug("failed to build query", slog.Any("error", err))
return nil, err
}
var records = []SQLReleaseWrapper{}
if err := s.db.Select(&records, query, args...); err != nil {
s.Log("list: failed to query with labels: %v", err)
slog.Debug("failed to query with labels", slog.Any("error", err))
return nil, err
}
@ -427,12 +425,12 @@ func (s *SQL) Query(labels map[string]string) ([]*rspb.Release, error) {
for _, record := range records {
release, err := decodeRelease(record.Body)
if err != nil {
s.Log("list: failed to decode release: %v: %v", record, err)
slog.Debug("failed to decode release", "record", record, slog.Any("error", err))
continue
}
if release.Labels, err = s.getReleaseCustomLabels(record.Key, record.Namespace); err != nil {
s.Log("failed to get release %s/%s custom labels: %v", record.Namespace, record.Key, err)
slog.Debug("failed to get release custom labels", "namespace", record.Namespace, "key", record.Key, slog.Any("error", err))
return nil, err
}
@ -456,13 +454,13 @@ func (s *SQL) Create(key string, rls *rspb.Release) error {
body, err := encodeRelease(rls)
if err != nil {
s.Log("failed to encode release: %v", err)
slog.Debug("failed to encode release", slog.Any("error", err))
return err
}
transaction, err := s.db.Beginx()
if err != nil {
s.Log("failed to start SQL transaction: %v", err)
slog.Debug("failed to start SQL transaction", slog.Any("error", err))
return fmt.Errorf("error beginning transaction: %v", err)
}
@ -491,7 +489,7 @@ func (s *SQL) Create(key string, rls *rspb.Release) error {
int(time.Now().Unix()),
).ToSql()
if err != nil {
s.Log("failed to build insert query: %v", err)
slog.Debug("failed to build insert query", slog.Any("error", err))
return err
}
@ -505,17 +503,17 @@ func (s *SQL) Create(key string, rls *rspb.Release) error {
Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}).
ToSql()
if buildErr != nil {
s.Log("failed to build select query: %v", buildErr)
slog.Debug("failed to build select query", "error", buildErr)
return err
}
var record SQLReleaseWrapper
if err := transaction.Get(&record, selectQuery, args...); err == nil {
s.Log("release %s already exists", key)
slog.Debug("release already exists", "key", key)
return ErrReleaseExists
}
s.Log("failed to store release %s in SQL database: %v", key, err)
slog.Debug("failed to store release in SQL database", "key", key, slog.Any("error", err))
return err
}
@ -538,13 +536,13 @@ func (s *SQL) Create(key string, rls *rspb.Release) error {
if err != nil {
defer transaction.Rollback()
s.Log("failed to build insert query: %v", err)
slog.Debug("failed to build insert query", slog.Any("error", err))
return err
}
if _, err := transaction.Exec(insertLabelsQuery, args...); err != nil {
defer transaction.Rollback()
s.Log("failed to write Labels: %v", err)
slog.Debug("failed to write Labels", slog.Any("error", err))
return err
}
}
@ -563,7 +561,7 @@ func (s *SQL) Update(key string, rls *rspb.Release) error {
body, err := encodeRelease(rls)
if err != nil {
s.Log("failed to encode release: %v", err)
slog.Debug("failed to encode release", slog.Any("error", err))
return err
}
@ -580,12 +578,12 @@ func (s *SQL) Update(key string, rls *rspb.Release) error {
ToSql()
if err != nil {
s.Log("failed to build update query: %v", err)
slog.Debug("failed to build update query", slog.Any("error", err))
return err
}
if _, err := s.db.Exec(query, args...); err != nil {
s.Log("failed to update release %s in SQL database: %v", key, err)
slog.Debug("failed to update release in SQL database", "key", key, slog.Any("error", err))
return err
}
@ -596,7 +594,7 @@ func (s *SQL) Update(key string, rls *rspb.Release) error {
func (s *SQL) Delete(key string) (*rspb.Release, error) {
transaction, err := s.db.Beginx()
if err != nil {
s.Log("failed to start SQL transaction: %v", err)
slog.Debug("failed to start SQL transaction", slog.Any("error", err))
return nil, fmt.Errorf("error beginning transaction: %v", err)
}
@ -607,20 +605,20 @@ func (s *SQL) Delete(key string) (*rspb.Release, error) {
Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}).
ToSql()
if err != nil {
s.Log("failed to build select query: %v", err)
slog.Debug("failed to build select query", slog.Any("error", err))
return nil, err
}
var record SQLReleaseWrapper
err = transaction.Get(&record, selectQuery, args...)
if err != nil {
s.Log("release %s not found: %v", key, err)
slog.Debug("release not found", "key", key, slog.Any("error", err))
return nil, ErrReleaseNotFound
}
release, err := decodeRelease(record.Body)
if err != nil {
s.Log("failed to decode release %s: %v", key, err)
slog.Debug("failed to decode release", "key", key, slog.Any("error", err))
transaction.Rollback()
return nil, err
}
@ -632,18 +630,18 @@ func (s *SQL) Delete(key string) (*rspb.Release, error) {
Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}).
ToSql()
if err != nil {
s.Log("failed to build delete query: %v", err)
slog.Debug("failed to build delete query", slog.Any("error", err))
return nil, err
}
_, err = transaction.Exec(deleteQuery, args...)
if err != nil {
s.Log("failed perform delete query: %v", err)
slog.Debug("failed perform delete query", slog.Any("error", err))
return release, err
}
if release.Labels, err = s.getReleaseCustomLabels(key, s.namespace); err != nil {
s.Log("failed to get release %s/%s custom labels: %v", s.namespace, key, err)
slog.Debug("failed to get release custom labels", "namespace", s.namespace, "key", key, slog.Any("error", err))
return nil, err
}
@ -654,7 +652,7 @@ func (s *SQL) Delete(key string) (*rspb.Release, error) {
ToSql()
if err != nil {
s.Log("failed to build delete Labels query: %v", err)
slog.Debug("failed to build delete Labels query", slog.Any("error", err))
return nil, err
}
_, err = transaction.Exec(deleteCustomLabelsQuery, args...)

@ -18,6 +18,7 @@ package storage // import "helm.sh/helm/v4/pkg/storage"
import (
"fmt"
"log/slog"
"strings"
"github.com/pkg/errors"
@ -42,15 +43,13 @@ type Storage struct {
// be retained, including the most recent release. Values of 0 or less are
// ignored (meaning no limits are imposed).
MaxHistory int
Log func(string, ...interface{})
}
// Get retrieves the release from storage. An error is returned
// if the storage driver failed to fetch the release, or the
// release identified by the key, version pair does not exist.
func (s *Storage) Get(name string, version int) (*rspb.Release, error) {
s.Log("getting release %q", makeKey(name, version))
slog.Debug("getting release", "key", makeKey(name, version))
return s.Driver.Get(makeKey(name, version))
}
@ -58,7 +57,7 @@ func (s *Storage) Get(name string, version int) (*rspb.Release, error) {
// error is returned if the storage driver fails to store the
// release, or a release with an identical key already exists.
func (s *Storage) Create(rls *rspb.Release) error {
s.Log("creating release %q", makeKey(rls.Name, rls.Version))
slog.Debug("creating release", "key", makeKey(rls.Name, rls.Version))
if s.MaxHistory > 0 {
// Want to make space for one more release.
if err := s.removeLeastRecent(rls.Name, s.MaxHistory-1); err != nil &&
@ -73,7 +72,7 @@ func (s *Storage) Create(rls *rspb.Release) error {
// storage backend fails to update the release or if the release
// does not exist.
func (s *Storage) Update(rls *rspb.Release) error {
s.Log("updating release %q", makeKey(rls.Name, rls.Version))
slog.Debug("updating release", "key", makeKey(rls.Name, rls.Version))
return s.Driver.Update(makeKey(rls.Name, rls.Version), rls)
}
@ -81,21 +80,21 @@ func (s *Storage) Update(rls *rspb.Release) error {
// the storage backend fails to delete the release or if the release
// does not exist.
func (s *Storage) Delete(name string, version int) (*rspb.Release, error) {
s.Log("deleting release %q", makeKey(name, version))
slog.Debug("deleting release", "key", makeKey(name, version))
return s.Driver.Delete(makeKey(name, version))
}
// ListReleases returns all releases from storage. An error is returned if the
// storage backend fails to retrieve the releases.
func (s *Storage) ListReleases() ([]*rspb.Release, error) {
s.Log("listing all releases in storage")
slog.Debug("listing all releases in storage")
return s.Driver.List(func(_ *rspb.Release) bool { return true })
}
// ListUninstalled returns all releases with Status == UNINSTALLED. An error is returned
// if the storage backend fails to retrieve the releases.
func (s *Storage) ListUninstalled() ([]*rspb.Release, error) {
s.Log("listing uninstalled releases in storage")
slog.Debug("listing uninstalled releases in storage")
return s.Driver.List(func(rls *rspb.Release) bool {
return relutil.StatusFilter(rspb.StatusUninstalled).Check(rls)
})
@ -104,7 +103,7 @@ func (s *Storage) ListUninstalled() ([]*rspb.Release, error) {
// ListDeployed returns all releases with Status == DEPLOYED. An error is returned
// if the storage backend fails to retrieve the releases.
func (s *Storage) ListDeployed() ([]*rspb.Release, error) {
s.Log("listing all deployed releases in storage")
slog.Debug("listing all deployed releases in storage")
return s.Driver.List(func(rls *rspb.Release) bool {
return relutil.StatusFilter(rspb.StatusDeployed).Check(rls)
})
@ -132,7 +131,7 @@ func (s *Storage) Deployed(name string) (*rspb.Release, error) {
// DeployedAll returns all deployed releases with the provided name, or
// returns driver.NewErrNoDeployedReleases if not found.
func (s *Storage) DeployedAll(name string) ([]*rspb.Release, error) {
s.Log("getting deployed releases from %q history", name)
slog.Debug("getting deployed releases", "name", name)
ls, err := s.Driver.Query(map[string]string{
"name": name,
@ -151,7 +150,7 @@ func (s *Storage) DeployedAll(name string) ([]*rspb.Release, error) {
// History returns the revision history for the release with the provided name, or
// returns driver.ErrReleaseNotFound if no such release name exists.
func (s *Storage) History(name string) ([]*rspb.Release, error) {
s.Log("getting release history for %q", name)
slog.Debug("getting release history", "name", name)
return s.Driver.Query(map[string]string{"name": name, "owner": "helm"})
}
@ -206,7 +205,7 @@ func (s *Storage) removeLeastRecent(name string, maximum int) error {
}
}
s.Log("Pruned %d record(s) from %s with %d error(s)", len(toDelete), name, len(errs))
slog.Debug("pruned records", "count", len(toDelete), "release", name, "errors", len(errs))
switch c := len(errs); c {
case 0:
return nil
@ -221,7 +220,7 @@ func (s *Storage) deleteReleaseVersion(name string, version int) error {
key := makeKey(name, version)
_, err := s.Delete(name, version)
if err != nil {
s.Log("error pruning %s from release history: %s", key, err)
slog.Debug("error pruning release", "key", key, slog.Any("error", err))
return err
}
return nil
@ -229,7 +228,7 @@ func (s *Storage) deleteReleaseVersion(name string, version int) error {
// Last fetches the last revision of the named release.
func (s *Storage) Last(name string) (*rspb.Release, error) {
s.Log("getting last revision of %q", name)
slog.Debug("getting last revision", "name", name)
h, err := s.History(name)
if err != nil {
return nil, err
@ -261,6 +260,5 @@ func Init(d driver.Driver) *Storage {
}
return &Storage{
Driver: d,
Log: func(_ string, _ ...interface{}) {},
}
}

@ -312,7 +312,6 @@ func (d *MaxHistoryMockDriver) Name() string {
func TestMaxHistoryErrorHandling(t *testing.T) {
//func TestStorageRemoveLeastRecentWithError(t *testing.T) {
storage := Init(NewMaxHistoryMockDriver(driver.NewMemory()))
storage.Log = t.Logf
storage.MaxHistory = 1
@ -338,7 +337,6 @@ func TestMaxHistoryErrorHandling(t *testing.T) {
func TestStorageRemoveLeastRecent(t *testing.T) {
storage := Init(driver.NewMemory())
storage.Log = t.Logf
// Make sure that specifying this at the outset doesn't cause any bugs.
storage.MaxHistory = 10
@ -395,7 +393,6 @@ func TestStorageRemoveLeastRecent(t *testing.T) {
func TestStorageDoNotDeleteDeployed(t *testing.T) {
storage := Init(driver.NewMemory())
storage.Log = t.Logf
storage.MaxHistory = 3
const name = "angry-bird"

Loading…
Cancel
Save