Migrate kube package to slog

As for helm v4. We want to migrate logs to slog.

Signed-off-by: Benoit Tigeot <benoit.tigeot@lifen.fr>
pull/30708/head
Benoit Tigeot 6 months ago
parent af4f7370cb
commit f4631bf3d8
No known key found for this signature in database
GPG Key ID: 8E6D4FC8AEBDA62C

@ -376,7 +376,7 @@ func (cfg *Configuration) recordRelease(r *release.Release) {
// Init initializes the action configuration
func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string, log DebugLog) error {
kc := kube.New(getter)
kc.Log = log
kc.Log = log // TODO: Switch to slog compatible logger
lazyClient := &lazyClient{
namespace: namespace,

@ -73,7 +73,7 @@ type Client struct {
// needs. The smaller surface area of the interface means there is a lower
// chance of it changing.
Factory Factory
Log func(string, ...interface{})
Log Logger
// Namespace allows to bypass the kubeconfig file for the choice of the namespace
Namespace string
@ -167,8 +167,6 @@ func New(getter genericclioptions.RESTClientGetter) *Client {
return c
}
var nopLogger = func(_ string, _ ...interface{}) {}
// getKubeClient get or create a new KubernetesClientSet
func (c *Client) getKubeClient() (kubernetes.Interface, error) {
var err error
@ -198,7 +196,7 @@ func (c *Client) IsReachable() error {
// Create creates Kubernetes resources specified in the resource list.
func (c *Client) Create(resources ResourceList) (*Result, error) {
c.Log("creating %d resource(s)", len(resources))
c.Log.Debug("creating resource(s)", "resources", resources)
if err := perform(resources, createResource); err != nil {
return nil, err
}
@ -250,7 +248,7 @@ func (c *Client) Get(resources ResourceList, related bool) (map[string][]runtime
objs, err = c.getSelectRelationPod(info, objs, isTable, &podSelectors)
if err != nil {
c.Log("Warning: get the relation pod is failed, err:%s", err.Error())
c.Log.Debug("failed to get related pods", "error", err)
}
}
}
@ -268,7 +266,7 @@ func (c *Client) getSelectRelationPod(info *resource.Info, objs map[string][]run
if info == nil {
return objs, nil
}
c.Log("get relation pod of object: %s/%s/%s", info.Namespace, info.Mapping.GroupVersionKind.Kind, info.Name)
c.Log.Debug("get relation pod of object", "namespace", info.Namespace, "kind", info.Mapping.GroupVersionKind.Kind, "name", info.Name)
selector, ok, _ := getSelectorFromObject(info.Object)
if !ok {
return objs, nil
@ -410,7 +408,7 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err
updateErrors := []string{}
res := &Result{}
c.Log("checking %d resources for changes", len(target))
c.Log.Debug("checking resources for changes", "original", original, "target", target)
err := target.Visit(func(info *resource.Info, err error) error {
if err != nil {
return err
@ -431,7 +429,7 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err
}
kind := info.Mapping.GroupVersionKind.Kind
c.Log("Created a new %s called %q in %s\n", kind, info.Name, info.Namespace)
c.Log.Debug("created a new resource", "kind", kind, "name", info.Name, "namespace", info.Namespace)
return nil
}
@ -442,7 +440,7 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err
}
if err := updateResource(c, info, originalInfo.Object, force); err != nil {
c.Log("error updating the resource %q:\n\t %v", info.Name, err)
c.Log.Debug("error updating the resource", "kind", info.Mapping.GroupVersionKind.Kind, "name", info.Name, "error", err)
updateErrors = append(updateErrors, err.Error())
}
// Because we check for errors later, append the info regardless
@ -459,22 +457,22 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err
}
for _, info := range original.Difference(target) {
c.Log("Deleting %s %q in namespace %s...", info.Mapping.GroupVersionKind.Kind, info.Name, info.Namespace)
c.Log.Debug("deleting resource", "kind", info.Mapping.GroupVersionKind.Kind, "name", info.Name, "namespace", info.Namespace)
if err := info.Get(); err != nil {
c.Log("Unable to get obj %q, err: %s", info.Name, err)
c.Log.Debug("unable to get object", "name", info.Name, "error", err)
continue
}
annotations, err := metadataAccessor.Annotations(info.Object)
if err != nil {
c.Log("Unable to get annotations on %q, err: %s", info.Name, err)
c.Log.Debug("unable to get annotations", "name", info.Name, "error", err)
}
if annotations != nil && annotations[ResourcePolicyAnno] == KeepPolicy {
c.Log("Skipping delete of %q due to annotation [%s=%s]", info.Name, ResourcePolicyAnno, KeepPolicy)
c.Log.Debug("skipping delete due to annotation", "name", info.Name, "annotation", ResourcePolicyAnno, "value", KeepPolicy)
continue
}
if err := deleteResource(info, metav1.DeletePropagationBackground); err != nil {
c.Log("Failed to delete %q, err: %s", info.ObjectName(), err)
c.Log.Debug("failed to delete resource", "name", info.Name, "error", err)
continue
}
res.Deleted = append(res.Deleted, info)
@ -503,11 +501,11 @@ func rdelete(c *Client, resources ResourceList, propagation metav1.DeletionPropa
res := &Result{}
mtx := sync.Mutex{}
err := perform(resources, func(info *resource.Info) error {
c.Log("Starting delete for %q %s", info.Name, info.Mapping.GroupVersionKind.Kind)
c.Log.Debug("starting delete resource", "kind", info.Mapping.GroupVersionKind.Kind, "name", info.Name, "namespace", info.Namespace)
err := deleteResource(info, propagation)
if err == nil || apierrors.IsNotFound(err) {
if err != nil {
c.Log("Ignoring delete failure for %q %s: %v", info.Name, info.Mapping.GroupVersionKind, err)
c.Log.Debug("ignoring delete failure", "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, "error", err)
}
mtx.Lock()
defer mtx.Unlock()
@ -655,7 +653,7 @@ func updateResource(c *Client, target *resource.Info, currentObj runtime.Object,
if err != nil {
return errors.Wrap(err, "failed to replace object")
}
c.Log("Replaced %q with kind %s for kind %s", target.Name, currentObj.GetObjectKind().GroupVersionKind().Kind, kind)
c.Log.Debug("replace succeeded", "name", target.Name, "initialKind", currentObj.GetObjectKind().GroupVersionKind().Kind, "kind", kind)
} else {
patch, patchType, err := createPatch(target, currentObj)
if err != nil {
@ -663,7 +661,7 @@ func updateResource(c *Client, target *resource.Info, currentObj runtime.Object,
}
if patch == nil || string(patch) == "{}" {
c.Log("Looks like there are no changes for %s %q", kind, target.Name)
c.Log.Debug("no changes detected", "kind", kind, "name", target.Name)
// This needs to happen to make sure that Helm has the latest info from the API
// Otherwise there will be no labels and other functions that use labels will panic
if err := target.Get(); err != nil {
@ -672,7 +670,7 @@ func updateResource(c *Client, target *resource.Info, currentObj runtime.Object,
return nil
}
// send patch to server
c.Log("Patch %s %q in namespace %s", kind, target.Name, target.Namespace)
c.Log.Debug("patching resource", "kind", kind, "name", target.Name, "namespace", target.Namespace)
obj, err = helper.Patch(target.Namespace, target.Name, patchType, patch, nil)
if err != nil {
return errors.Wrapf(err, "cannot patch %q with kind %s", target.Name, kind)

@ -0,0 +1,30 @@
/*
Copyright The Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kube
// Logger defines a minimal logging interface compatible with slog.Logger
type Logger interface {
Debug(msg string, args ...any)
}
// NopLogger is a logger that does nothing
type NopLogger struct{}
// Debug implements the Logger interface
func (n NopLogger) Debug(msg string, args ...any) {}
var nopLogger = NopLogger{}

@ -19,6 +19,8 @@ package kube // import "helm.sh/helm/v4/pkg/kube"
import (
"context"
"fmt"
"io"
"log/slog"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
@ -57,13 +59,13 @@ func CheckJobs(checkJobs bool) ReadyCheckerOption {
// NewReadyChecker creates a new checker. Passed ReadyCheckerOptions can
// be used to override defaults.
func NewReadyChecker(cl kubernetes.Interface, log func(string, ...interface{}), opts ...ReadyCheckerOption) ReadyChecker {
func NewReadyChecker(cl kubernetes.Interface, logger Logger, opts ...ReadyCheckerOption) ReadyChecker {
c := ReadyChecker{
client: cl,
log: log,
log: logger,
}
if c.log == nil {
c.log = nopLogger
c.log = slog.New(slog.NewTextHandler(io.Discard, nil))
}
for _, opt := range opts {
opt(&c)
@ -74,7 +76,7 @@ func NewReadyChecker(cl kubernetes.Interface, log func(string, ...interface{}),
// ReadyChecker is a type that can check core Kubernetes types for readiness.
type ReadyChecker struct {
client kubernetes.Interface
log func(string, ...interface{})
log Logger
checkJobs bool
pausedAsReady bool
}
@ -230,18 +232,18 @@ func (c *ReadyChecker) isPodReady(pod *corev1.Pod) bool {
return true
}
}
c.log("Pod is not ready: %s/%s", pod.GetNamespace(), pod.GetName())
c.log.Debug("Pod is not ready", "namespace", pod.GetNamespace(), "name", pod.GetName())
return false
}
func (c *ReadyChecker) jobReady(job *batchv1.Job) (bool, error) {
if job.Status.Failed > *job.Spec.BackoffLimit {
c.log("Job is failed: %s/%s", job.GetNamespace(), job.GetName())
c.log.Debug("Job is failed", "namespace", job.GetNamespace(), "name", job.GetName())
// If a job is failed, it can't recover, so throw an error
return false, fmt.Errorf("job is failed: %s/%s", job.GetNamespace(), job.GetName())
}
if job.Spec.Completions != nil && job.Status.Succeeded < *job.Spec.Completions {
c.log("Job is not completed: %s/%s", job.GetNamespace(), job.GetName())
c.log.Debug("Job is not completed", "namespace", job.GetNamespace(), "name", job.GetName())
return false, nil
}
return true, nil
@ -255,7 +257,7 @@ func (c *ReadyChecker) serviceReady(s *corev1.Service) bool {
// Ensure that the service cluster IP is not empty
if s.Spec.ClusterIP == "" {
c.log("Service does not have cluster IP address: %s/%s", s.GetNamespace(), s.GetName())
c.log.Debug("Service does not have cluster IP address", "namespace", s.GetNamespace(), "name", s.GetName())
return false
}
@ -263,12 +265,12 @@ func (c *ReadyChecker) serviceReady(s *corev1.Service) bool {
if s.Spec.Type == corev1.ServiceTypeLoadBalancer {
// do not wait when at least 1 external IP is set
if len(s.Spec.ExternalIPs) > 0 {
c.log("Service %s/%s has external IP addresses (%v), marking as ready", s.GetNamespace(), s.GetName(), s.Spec.ExternalIPs)
c.log.Debug("Service has external IP addresses", "namespace", s.GetNamespace(), "name", s.GetName(), "externalIPs", s.Spec.ExternalIPs)
return true
}
if s.Status.LoadBalancer.Ingress == nil {
c.log("Service does not have load balancer ingress IP address: %s/%s", s.GetNamespace(), s.GetName())
c.log.Debug("Service does not have load balancer ingress IP address", "namespace", s.GetNamespace(), "name", s.GetName())
return false
}
}
@ -278,7 +280,7 @@ func (c *ReadyChecker) serviceReady(s *corev1.Service) bool {
func (c *ReadyChecker) volumeReady(v *corev1.PersistentVolumeClaim) bool {
if v.Status.Phase != corev1.ClaimBound {
c.log("PersistentVolumeClaim is not bound: %s/%s", v.GetNamespace(), v.GetName())
c.log.Debug("PersistentVolumeClaim is not bound", "namespace", v.GetNamespace(), "name", v.GetName())
return false
}
return true
@ -291,13 +293,13 @@ func (c *ReadyChecker) deploymentReady(rs *appsv1.ReplicaSet, dep *appsv1.Deploy
}
// Verify the generation observed by the deployment controller matches the spec generation
if dep.Status.ObservedGeneration != dep.ObjectMeta.Generation {
c.log("Deployment is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", dep.Namespace, dep.Name, dep.Status.ObservedGeneration, dep.ObjectMeta.Generation)
c.log.Debug("Deployment is not ready, observedGeneration does not match spec generation", "namespace", dep.GetNamespace(), "name", dep.GetName(), "observedGeneration", dep.Status.ObservedGeneration, "expectedGeneration", dep.ObjectMeta.Generation)
return false
}
expectedReady := *dep.Spec.Replicas - deploymentutil.MaxUnavailable(*dep)
if !(rs.Status.ReadyReplicas >= expectedReady) {
c.log("Deployment is not ready: %s/%s. %d out of %d expected pods are ready", dep.Namespace, dep.Name, rs.Status.ReadyReplicas, expectedReady)
c.log.Debug("Deployment is not ready, not all Pods are ready", "namespace", dep.GetNamespace(), "name", dep.GetName(), "readyPods", rs.Status.ReadyReplicas, "totalPods", expectedReady)
return false
}
return true
@ -306,7 +308,7 @@ func (c *ReadyChecker) deploymentReady(rs *appsv1.ReplicaSet, dep *appsv1.Deploy
func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool {
// Verify the generation observed by the daemonSet controller matches the spec generation
if ds.Status.ObservedGeneration != ds.ObjectMeta.Generation {
c.log("DaemonSet is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", ds.Namespace, ds.Name, ds.Status.ObservedGeneration, ds.ObjectMeta.Generation)
c.log.Debug("DaemonSet is not ready, observedGeneration does not match spec generation", "namespace", ds.GetNamespace(), "name", ds.GetName(), "observedGeneration", ds.Status.ObservedGeneration, "expectedGeneration", ds.ObjectMeta.Generation)
return false
}
@ -317,7 +319,7 @@ func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool {
// Make sure all the updated pods have been scheduled
if ds.Status.UpdatedNumberScheduled != ds.Status.DesiredNumberScheduled {
c.log("DaemonSet is not ready: %s/%s. %d out of %d expected pods have been scheduled", ds.Namespace, ds.Name, ds.Status.UpdatedNumberScheduled, ds.Status.DesiredNumberScheduled)
c.log.Debug("DaemonSet is not ready, not all Pods scheduled", "namespace", ds.GetNamespace(), "name", ds.GetName(), "scheduledPods", ds.Status.UpdatedNumberScheduled, "totalPods", ds.Status.DesiredNumberScheduled)
return false
}
maxUnavailable, err := intstr.GetScaledValueFromIntOrPercent(ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable, int(ds.Status.DesiredNumberScheduled), true)
@ -330,7 +332,7 @@ func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool {
expectedReady := int(ds.Status.DesiredNumberScheduled) - maxUnavailable
if !(int(ds.Status.NumberReady) >= expectedReady) {
c.log("DaemonSet is not ready: %s/%s. %d out of %d expected pods are ready", ds.Namespace, ds.Name, ds.Status.NumberReady, expectedReady)
c.log.Debug("DaemonSet is not ready. All Pods are not ready", "namespace", ds.GetNamespace(), "name", ds.GetName(), "readyPods", ds.Status.NumberReady, "totalPods", expectedReady)
return false
}
return true
@ -382,13 +384,13 @@ func (c *ReadyChecker) crdReady(crd apiextv1.CustomResourceDefinition) bool {
func (c *ReadyChecker) statefulSetReady(sts *appsv1.StatefulSet) bool {
// Verify the generation observed by the statefulSet controller matches the spec generation
if sts.Status.ObservedGeneration != sts.ObjectMeta.Generation {
c.log("StatefulSet is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", sts.Namespace, sts.Name, sts.Status.ObservedGeneration, sts.ObjectMeta.Generation)
c.log.Debug("StatefulSet is not ready, observedGeneration doest not match spec generation", "namespace", sts.GetNamespace(), "name", sts.GetName(), "observedGeneration", sts.Status.ObservedGeneration, "expectedGeneration", sts.ObjectMeta.Generation)
return false
}
// If the update strategy is not a rolling update, there will be nothing to wait for
if sts.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType {
c.log("StatefulSet skipped ready check: %s/%s. updateStrategy is %v", sts.Namespace, sts.Name, sts.Spec.UpdateStrategy.Type)
c.log.Debug("StatefulSet skipped ready check", "namespace", sts.GetNamespace(), "name", sts.GetName(), "updateStrategy", sts.Spec.UpdateStrategy.Type)
return true
}
@ -414,30 +416,30 @@ func (c *ReadyChecker) statefulSetReady(sts *appsv1.StatefulSet) bool {
// Make sure all the updated pods have been scheduled
if int(sts.Status.UpdatedReplicas) < expectedReplicas {
c.log("StatefulSet is not ready: %s/%s. %d out of %d expected pods have been scheduled", sts.Namespace, sts.Name, sts.Status.UpdatedReplicas, expectedReplicas)
c.log.Debug("StatefulSet is not ready, not all Pods have been scheduled", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.UpdatedReplicas, "totalPods", expectedReplicas)
return false
}
if int(sts.Status.ReadyReplicas) != replicas {
c.log("StatefulSet is not ready: %s/%s. %d out of %d expected pods are ready", sts.Namespace, sts.Name, sts.Status.ReadyReplicas, replicas)
c.log.Debug("StatefulSet is not ready, not all Pods are ready", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.ReadyReplicas, "totalPods", replicas)
return false
}
// This check only makes sense when all partitions are being upgraded otherwise during a
// partitioned rolling upgrade, this condition will never evaluate to true, leading to
// error.
if partition == 0 && sts.Status.CurrentRevision != sts.Status.UpdateRevision {
c.log("StatefulSet is not ready: %s/%s. currentRevision %s does not yet match updateRevision %s", sts.Namespace, sts.Name, sts.Status.CurrentRevision, sts.Status.UpdateRevision)
c.log.Debug("StatefulSet is not ready, currentRevision does not match updateRevision", "namespace", sts.GetNamespace(), "name", sts.GetName(), "currentRevision", sts.Status.CurrentRevision, "updateRevision", sts.Status.UpdateRevision)
return false
}
c.log("StatefulSet is ready: %s/%s. %d out of %d expected pods are ready", sts.Namespace, sts.Name, sts.Status.ReadyReplicas, replicas)
c.log.Debug("StatefulSet is ready", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.ReadyReplicas, "totalPods", replicas)
return true
}
func (c *ReadyChecker) replicationControllerReady(rc *corev1.ReplicationController) bool {
// Verify the generation observed by the replicationController controller matches the spec generation
if rc.Status.ObservedGeneration != rc.ObjectMeta.Generation {
c.log("ReplicationController is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", rc.Namespace, rc.Name, rc.Status.ObservedGeneration, rc.ObjectMeta.Generation)
c.log.Debug("ReplicationController is not ready, observedGeneration doest not match spec generation", "namespace", rc.GetNamespace(), "name", rc.GetName(), "observedGeneration", rc.Status.ObservedGeneration, "expectedGeneration", rc.ObjectMeta.Generation)
return false
}
return true
@ -446,7 +448,7 @@ func (c *ReadyChecker) replicationControllerReady(rc *corev1.ReplicationControll
func (c *ReadyChecker) replicaSetReady(rs *appsv1.ReplicaSet) bool {
// Verify the generation observed by the replicaSet controller matches the spec generation
if rs.Status.ObservedGeneration != rs.ObjectMeta.Generation {
c.log("ReplicaSet is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", rs.Namespace, rs.Name, rs.Status.ObservedGeneration, rs.ObjectMeta.Generation)
c.log.Debug("ReplicaSet is not ready, observedGeneration doest not match spec generation", "namespace", rs.GetNamespace(), "name", rs.GetName(), "observedGeneration", rs.Status.ObservedGeneration, "expectedGeneration", rs.ObjectMeta.Generation)
return false
}
return true

@ -37,7 +37,7 @@ const defaultNamespace = metav1.NamespaceDefault
func Test_ReadyChecker_IsReady_Pod(t *testing.T) {
type fields struct {
client kubernetes.Interface
log func(string, ...interface{})
log Logger
checkJobs bool
pausedAsReady bool
}
@ -113,7 +113,7 @@ func Test_ReadyChecker_IsReady_Pod(t *testing.T) {
func Test_ReadyChecker_IsReady_Job(t *testing.T) {
type fields struct {
client kubernetes.Interface
log func(string, ...interface{})
log Logger
checkJobs bool
pausedAsReady bool
}
@ -188,7 +188,7 @@ func Test_ReadyChecker_IsReady_Job(t *testing.T) {
func Test_ReadyChecker_IsReady_Deployment(t *testing.T) {
type fields struct {
client kubernetes.Interface
log func(string, ...interface{})
log Logger
checkJobs bool
pausedAsReady bool
}
@ -270,7 +270,7 @@ func Test_ReadyChecker_IsReady_Deployment(t *testing.T) {
func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) {
type fields struct {
client kubernetes.Interface
log func(string, ...interface{})
log Logger
checkJobs bool
pausedAsReady bool
}
@ -345,7 +345,7 @@ func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) {
func Test_ReadyChecker_IsReady_Service(t *testing.T) {
type fields struct {
client kubernetes.Interface
log func(string, ...interface{})
log Logger
checkJobs bool
pausedAsReady bool
}
@ -420,7 +420,7 @@ func Test_ReadyChecker_IsReady_Service(t *testing.T) {
func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) {
type fields struct {
client kubernetes.Interface
log func(string, ...interface{})
log Logger
checkJobs bool
pausedAsReady bool
}
@ -495,7 +495,7 @@ func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) {
func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) {
type fields struct {
client kubernetes.Interface
log func(string, ...interface{})
log Logger
checkJobs bool
pausedAsReady bool
}
@ -570,7 +570,7 @@ func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) {
func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) {
type fields struct {
client kubernetes.Interface
log func(string, ...interface{})
log Logger
checkJobs bool
pausedAsReady bool
}
@ -661,7 +661,7 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) {
func Test_ReadyChecker_IsReady_ReplicaSet(t *testing.T) {
type fields struct {
client kubernetes.Interface
log func(string, ...interface{})
log Logger
checkJobs bool
pausedAsReady bool
}

Loading…
Cancel
Save