From 2c8cfaea4175a41cce086ed45871c2113644899c Mon Sep 17 00:00:00 2001 From: KISHOREKUMAR THUDI Date: Sun, 17 Nov 2024 11:04:25 -0500 Subject: [PATCH 001/192] Replacing NewSimpleClientSet to NewClientSet due to deprecation Signed-off-by: KISHOREKUMAR THUDI --- pkg/action/action_test.go | 2 +- pkg/kube/ready_test.go | 60 +++++++++++++++++++-------------------- 2 files changed, 31 insertions(+), 31 deletions(-) diff --git a/pkg/action/action_test.go b/pkg/action/action_test.go index 149eb85b1..3bf64c3e0 100644 --- a/pkg/action/action_test.go +++ b/pkg/action/action_test.go @@ -344,7 +344,7 @@ func TestConfiguration_Init(t *testing.T) { } func TestGetVersionSet(t *testing.T) { - client := fakeclientset.NewSimpleClientset() + client := fakeclientset.NewClientset() vs, err := GetVersionSet(client.Discovery()) if err != nil { diff --git a/pkg/kube/ready_test.go b/pkg/kube/ready_test.go index 14bf8588b..32840fb6e 100644 --- a/pkg/kube/ready_test.go +++ b/pkg/kube/ready_test.go @@ -58,7 +58,7 @@ func Test_ReadyChecker_IsReady_Pod(t *testing.T) { { name: "IsReady Pod", fields: fields{ - client: fake.NewSimpleClientset(), + client: fake.NewClientset(), log: func(string, ...interface{}) {}, checkJobs: true, pausedAsReady: false, @@ -74,7 +74,7 @@ func Test_ReadyChecker_IsReady_Pod(t *testing.T) { { name: "IsReady Pod returns error", fields: fields{ - client: fake.NewSimpleClientset(), + client: fake.NewClientset(), log: func(string, ...interface{}) {}, checkJobs: true, pausedAsReady: false, @@ -134,7 +134,7 @@ func Test_ReadyChecker_IsReady_Job(t *testing.T) { { name: "IsReady Job error while getting job", fields: fields{ - client: fake.NewSimpleClientset(), + client: fake.NewClientset(), log: func(string, ...interface{}) {}, checkJobs: true, pausedAsReady: false, @@ -150,7 +150,7 @@ func Test_ReadyChecker_IsReady_Job(t *testing.T) { { name: "IsReady Job", fields: fields{ - client: fake.NewSimpleClientset(), + client: fake.NewClientset(), log: func(string, ...interface{}) {}, checkJobs: true, pausedAsReady: false, @@ -210,7 +210,7 @@ func Test_ReadyChecker_IsReady_Deployment(t *testing.T) { { name: "IsReady Deployments error while getting current Deployment", fields: fields{ - client: fake.NewSimpleClientset(), + client: fake.NewClientset(), log: func(string, ...interface{}) {}, checkJobs: true, pausedAsReady: false, @@ -227,7 +227,7 @@ func Test_ReadyChecker_IsReady_Deployment(t *testing.T) { { name: "IsReady Deployments", //TODO fix this one fields: fields{ - client: fake.NewSimpleClientset(), + client: fake.NewClientset(), log: func(string, ...interface{}) {}, checkJobs: true, pausedAsReady: false, @@ -291,7 +291,7 @@ func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) { { name: "IsReady PersistentVolumeClaim", fields: fields{ - client: fake.NewSimpleClientset(), + client: fake.NewClientset(), log: func(string, ...interface{}) {}, checkJobs: true, pausedAsReady: false, @@ -307,7 +307,7 @@ func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) { { name: "IsReady PersistentVolumeClaim with error", fields: fields{ - client: fake.NewSimpleClientset(), + client: fake.NewClientset(), log: func(string, ...interface{}) {}, checkJobs: true, pausedAsReady: false, @@ -366,7 +366,7 @@ func Test_ReadyChecker_IsReady_Service(t *testing.T) { { name: "IsReady Service", fields: fields{ - client: fake.NewSimpleClientset(), + client: fake.NewClientset(), log: func(string, ...interface{}) {}, checkJobs: true, pausedAsReady: false, @@ -382,7 +382,7 @@ func Test_ReadyChecker_IsReady_Service(t *testing.T) { { name: "IsReady Service with error", fields: fields{ - client: fake.NewSimpleClientset(), + client: fake.NewClientset(), log: func(string, ...interface{}) {}, checkJobs: true, pausedAsReady: false, @@ -441,7 +441,7 @@ func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) { { name: "IsReady DaemonSet", fields: fields{ - client: fake.NewSimpleClientset(), + client: fake.NewClientset(), log: func(string, ...interface{}) {}, checkJobs: true, pausedAsReady: false, @@ -457,7 +457,7 @@ func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) { { name: "IsReady DaemonSet with error", fields: fields{ - client: fake.NewSimpleClientset(), + client: fake.NewClientset(), log: func(string, ...interface{}) {}, checkJobs: true, pausedAsReady: false, @@ -516,7 +516,7 @@ func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) { { name: "IsReady StatefulSet", fields: fields{ - client: fake.NewSimpleClientset(), + client: fake.NewClientset(), log: func(string, ...interface{}) {}, checkJobs: true, pausedAsReady: false, @@ -532,7 +532,7 @@ func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) { { name: "IsReady StatefulSet with error", fields: fields{ - client: fake.NewSimpleClientset(), + client: fake.NewClientset(), log: func(string, ...interface{}) {}, checkJobs: true, pausedAsReady: false, @@ -591,7 +591,7 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) { { name: "IsReady ReplicationController", fields: fields{ - client: fake.NewSimpleClientset(), + client: fake.NewClientset(), log: func(string, ...interface{}) {}, checkJobs: true, pausedAsReady: false, @@ -607,7 +607,7 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) { { name: "IsReady ReplicationController with error", fields: fields{ - client: fake.NewSimpleClientset(), + client: fake.NewClientset(), log: func(string, ...interface{}) {}, checkJobs: true, pausedAsReady: false, @@ -623,7 +623,7 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) { { name: "IsReady ReplicationController and pods not ready for object", fields: fields{ - client: fake.NewSimpleClientset(), + client: fake.NewClientset(), log: func(string, ...interface{}) {}, checkJobs: true, pausedAsReady: false, @@ -682,7 +682,7 @@ func Test_ReadyChecker_IsReady_ReplicaSet(t *testing.T) { { name: "IsReady ReplicaSet", fields: fields{ - client: fake.NewSimpleClientset(), + client: fake.NewClientset(), log: func(string, ...interface{}) {}, checkJobs: true, pausedAsReady: false, @@ -698,7 +698,7 @@ func Test_ReadyChecker_IsReady_ReplicaSet(t *testing.T) { { name: "IsReady ReplicaSet not ready", fields: fields{ - client: fake.NewSimpleClientset(), + client: fake.NewClientset(), log: func(string, ...interface{}) {}, checkJobs: true, pausedAsReady: false, @@ -793,7 +793,7 @@ func Test_ReadyChecker_deploymentReady(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - c := NewReadyChecker(fake.NewSimpleClientset(), nil) + c := NewReadyChecker(fake.NewClientset(), nil) if got := c.deploymentReady(tt.args.rs, tt.args.dep); got != tt.want { t.Errorf("deploymentReady() = %v, want %v", got, tt.want) } @@ -827,7 +827,7 @@ func Test_ReadyChecker_replicaSetReady(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - c := NewReadyChecker(fake.NewSimpleClientset(), nil) + c := NewReadyChecker(fake.NewClientset(), nil) if got := c.replicaSetReady(tt.args.rs); got != tt.want { t.Errorf("replicaSetReady() = %v, want %v", got, tt.want) } @@ -861,7 +861,7 @@ func Test_ReadyChecker_replicationControllerReady(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - c := NewReadyChecker(fake.NewSimpleClientset(), nil) + c := NewReadyChecker(fake.NewClientset(), nil) if got := c.replicationControllerReady(tt.args.rc); got != tt.want { t.Errorf("replicationControllerReady() = %v, want %v", got, tt.want) } @@ -916,7 +916,7 @@ func Test_ReadyChecker_daemonSetReady(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - c := NewReadyChecker(fake.NewSimpleClientset(), nil) + c := NewReadyChecker(fake.NewClientset(), nil) if got := c.daemonSetReady(tt.args.ds); got != tt.want { t.Errorf("daemonSetReady() = %v, want %v", got, tt.want) } @@ -992,7 +992,7 @@ func Test_ReadyChecker_statefulSetReady(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - c := NewReadyChecker(fake.NewSimpleClientset(), nil) + c := NewReadyChecker(fake.NewClientset(), nil) if got := c.statefulSetReady(tt.args.sts); got != tt.want { t.Errorf("statefulSetReady() = %v, want %v", got, tt.want) } @@ -1051,7 +1051,7 @@ func Test_ReadyChecker_podsReadyForObject(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - c := NewReadyChecker(fake.NewSimpleClientset(), nil) + c := NewReadyChecker(fake.NewClientset(), nil) for _, pod := range tt.existPods { if _, err := c.client.CoreV1().Pods(defaultNamespace).Create(context.TODO(), &pod, metav1.CreateOptions{}); err != nil { t.Errorf("Failed to create Pod error: %v", err) @@ -1130,7 +1130,7 @@ func Test_ReadyChecker_jobReady(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - c := NewReadyChecker(fake.NewSimpleClientset(), nil) + c := NewReadyChecker(fake.NewClientset(), nil) got, err := c.jobReady(tt.args.job) if (err != nil) != tt.wantErr { t.Errorf("jobReady() error = %v, wantErr %v", err, tt.wantErr) @@ -1169,7 +1169,7 @@ func Test_ReadyChecker_volumeReady(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - c := NewReadyChecker(fake.NewSimpleClientset(), nil) + c := NewReadyChecker(fake.NewClientset(), nil) if got := c.volumeReady(tt.args.v); got != tt.want { t.Errorf("volumeReady() = %v, want %v", got, tt.want) } @@ -1214,7 +1214,7 @@ func Test_ReadyChecker_serviceReady(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - c := NewReadyChecker(fake.NewSimpleClientset(), nil) + c := NewReadyChecker(fake.NewClientset(), nil) got := c.serviceReady(tt.args.service) if got != tt.want { t.Errorf("serviceReady() = %v, want %v", got, tt.want) @@ -1283,7 +1283,7 @@ func Test_ReadyChecker_crdBetaReady(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - c := NewReadyChecker(fake.NewSimpleClientset(), nil) + c := NewReadyChecker(fake.NewClientset(), nil) got := c.crdBetaReady(tt.args.crdBeta) if got != tt.want { t.Errorf("crdBetaReady() = %v, want %v", got, tt.want) @@ -1352,7 +1352,7 @@ func Test_ReadyChecker_crdReady(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - c := NewReadyChecker(fake.NewSimpleClientset(), nil) + c := NewReadyChecker(fake.NewClientset(), nil) got := c.crdReady(tt.args.crdBeta) if got != tt.want { t.Errorf("crdBetaReady() = %v, want %v", got, tt.want) From 41700f02480ad9ab14924974231cb9b6ec17cde5 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 17 Dec 2024 23:48:57 +0000 Subject: [PATCH 002/192] WIP Signed-off-by: Austin Abro --- go.mod | 3 +++ go.sum | 15 +++++++++++ pkg/kube/client.go | 67 +++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 84 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 9d27e2b1f..d7500a674 100644 --- a/go.mod +++ b/go.mod @@ -46,6 +46,7 @@ require ( k8s.io/klog/v2 v2.130.1 k8s.io/kubectl v0.31.3 oras.land/oras-go v1.2.5 + sigs.k8s.io/cli-utils v0.37.2 sigs.k8s.io/yaml v1.4.0 ) @@ -76,6 +77,7 @@ require ( github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect github.com/docker/go-metrics v0.0.1 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect github.com/fatih/color v1.13.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -183,6 +185,7 @@ require ( k8s.io/component-base v0.31.3 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect + sigs.k8s.io/controller-runtime v0.18.4 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/kustomize/api v0.17.2 // indirect sigs.k8s.io/kustomize/kyaml v0.17.1 // indirect diff --git a/go.sum b/go.sum index 654fc5178..a575e35cf 100644 --- a/go.sum +++ b/go.sum @@ -112,6 +112,8 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= @@ -136,6 +138,8 @@ github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= @@ -146,6 +150,7 @@ github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+ github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= @@ -443,6 +448,10 @@ go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93V go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -454,6 +463,8 @@ golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72 golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -630,6 +641,10 @@ k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1 k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go v1.2.5 h1:XpYuAwAb0DfQsunIyMfeET92emK8km3W4yEzZvUbsTo= oras.land/oras-go v1.2.5/go.mod h1:PuAwRShRZCsZb7g8Ar3jKKQR/2A/qN+pkYxIOd/FAoo= +sigs.k8s.io/cli-utils v0.37.2 h1:GOfKw5RV2HDQZDJlru5KkfLO1tbxqMoyn1IYUxqBpNg= +sigs.k8s.io/cli-utils v0.37.2/go.mod h1:V+IZZr4UoGj7gMJXklWBg6t5xbdThFBcpj4MrZuCYco= +sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw= +sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kustomize/api v0.17.2 h1:E7/Fjk7V5fboiuijoZHgs4aHuexi5Y2loXlVOAVAG5g= diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 4d93c91b9..f2bb06130 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -36,6 +36,13 @@ import ( apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/cli-utils/pkg/kstatus/polling/aggregator" + "sigs.k8s.io/cli-utils/pkg/kstatus/polling/collector" + "sigs.k8s.io/cli-utils/pkg/kstatus/polling/event" + "sigs.k8s.io/cli-utils/pkg/kstatus/status" + "sigs.k8s.io/cli-utils/pkg/kstatus/watcher" + "sigs.k8s.io/cli-utils/pkg/object" multierror "github.com/hashicorp/go-multierror" "k8s.io/apimachinery/pkg/api/meta" @@ -44,7 +51,6 @@ import ( metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apimachinery/pkg/watch" @@ -296,6 +302,65 @@ func (c *Client) Wait(resources ResourceList, timeout time.Duration) error { return w.waitForResources(resources) } +// WaitForReady waits for all of the objects to reach a ready state. +func WaitForReady(ctx context.Context, sw watcher.StatusWatcher, resourceList ResourceList) error { + cancelCtx, cancel := context.WithCancel(ctx) + defer cancel() + // TODO maybe a simpler way to transfer the objects + runtimeObjs := []runtime.Object{} + for _, resource := range resourceList { + runtimeObjs = append(runtimeObjs, resource.Object) + } + resources := []object.ObjMetadata{} + for _, runtimeObj := range runtimeObjs { + obj, err := object.RuntimeToObjMeta(runtimeObj) + if err != nil { + return err + } + resources = append(resources, obj) + } + + eventCh := sw.Watch(cancelCtx, resources, watcher.Options{}) + statusCollector := collector.NewResourceStatusCollector(resources) + done := statusCollector.ListenWithObserver(eventCh, collector.ObserverFunc( + func(statusCollector *collector.ResourceStatusCollector, _ event.Event) { + rss := []*event.ResourceStatus{} + for _, rs := range statusCollector.ResourceStatuses { + if rs == nil { + continue + } + rss = append(rss, rs) + } + desired := status.CurrentStatus + if aggregator.AggregateStatus(rss, desired) == desired { + cancel() + return + } + }), + ) + <-done + + if statusCollector.Error != nil { + return statusCollector.Error + } + + // Only check parent context error, otherwise we would error when desired status is achieved. + if ctx.Err() != nil { + // todo use err + var err error + for _, id := range resources { + rs := statusCollector.ResourceStatuses[id] + if rs.Status == status.CurrentStatus { + continue + } + err = fmt.Errorf("%s: %s not ready, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status) + } + return fmt.Errorf("not all resources ready: %w: %w", ctx.Err(), err) + } + + return nil +} + // WaitWithJobs wait up to the given timeout for the specified resources to be ready, including jobs. func (c *Client) WaitWithJobs(resources ResourceList, timeout time.Duration) error { cs, err := c.getKubeClient() From 6f7ac066ae8a487621c169a5e588ebd4a19df284 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 23 Dec 2024 22:29:22 +0000 Subject: [PATCH 003/192] extending factory to enable getting a watcher Signed-off-by: Austin Abro --- pkg/kube/client.go | 45 ++++++++++++++++++++++++++++++++--------- pkg/kube/client_test.go | 5 +++++ pkg/kube/factory.go | 6 ++++++ 3 files changed, 46 insertions(+), 10 deletions(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 8bcd4824f..a25a6fcc3 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -43,6 +43,7 @@ import ( "sigs.k8s.io/cli-utils/pkg/kstatus/status" "sigs.k8s.io/cli-utils/pkg/kstatus/watcher" "sigs.k8s.io/cli-utils/pkg/object" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" multierror "github.com/hashicorp/go-multierror" "k8s.io/apimachinery/pkg/api/meta" @@ -56,6 +57,7 @@ import ( "k8s.io/apimachinery/pkg/watch" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" @@ -289,17 +291,43 @@ func getResource(info *resource.Info) (runtime.Object, error) { // Wait waits up to the given timeout for the specified resources to be ready. func (c *Client) Wait(resources ResourceList, timeout time.Duration) error { - cs, err := c.getKubeClient() + // cs, err := c.getKubeClient() + // if err != nil { + // return err + // } + // checker := NewReadyChecker(cs, c.Log, PausedAsReady(true)) + // w := waiter{ + // c: checker, + // log: c.Log, + // timeout: timeout, + // } + cfg, err := c.Factory.ToRESTConfig() if err != nil { return err } - checker := NewReadyChecker(cs, c.Log, PausedAsReady(true)) - w := waiter{ - c: checker, - log: c.Log, - timeout: timeout, + dynamicClient, err := dynamic.NewForConfig(cfg) + if err != nil { + return err } - return w.waitForResources(resources) + // Not sure if I should use factory methods to get this http client or I should do this + // For example, I could likely use this as well, but it seems like I should use the factory methods instead + // httpClient, err := rest.HTTPClientFor(cfg) + // if err != nil { + // return err + // } + client, err := c.Factory.RESTClient() + if err != nil { + return err + } + restMapper, err := apiutil.NewDynamicRESTMapper(cfg, client.Client) + if err != nil { + return err + } + sw := watcher.NewDefaultStatusWatcher(dynamicClient, restMapper) + // return sw, nil + ctx, cancel := context.WithTimeout(context.TODO(), timeout) + defer cancel() + return WaitForReady(ctx, sw, resources) } // WaitForReady waits for all of the objects to reach a ready state. @@ -319,7 +347,6 @@ func WaitForReady(ctx context.Context, sw watcher.StatusWatcher, resourceList Re } resources = append(resources, obj) } - eventCh := sw.Watch(cancelCtx, resources, watcher.Options{}) statusCollector := collector.NewResourceStatusCollector(resources) done := statusCollector.ListenWithObserver(eventCh, collector.ObserverFunc( @@ -346,7 +373,6 @@ func WaitForReady(ctx context.Context, sw watcher.StatusWatcher, resourceList Re // Only check parent context error, otherwise we would error when desired status is achieved. if ctx.Err() != nil { - // todo use err var err error for _, id := range resources { rs := statusCollector.ResourceStatuses[id] @@ -357,7 +383,6 @@ func WaitForReady(ctx context.Context, sw watcher.StatusWatcher, resourceList Re } return fmt.Errorf("not all resources ready: %w: %w", ctx.Err(), err) } - return nil } diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index f2d6bcb59..7f3ba65be 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -453,12 +453,17 @@ func TestPerform(t *testing.T) { } } +// Likely it is not possible to get this test to work with kstatus given that it seems +// kstatus is not making constant get checks on the resources and is instead waiting for events +// Potentially the test could be reworked to make the pods after five seconds +// would need this -> func TestWait(t *testing.T) { podList := newPodList("starfish", "otter", "squid") var created *time.Time c := newTestClient(t) + c.Factory.(*cmdtesting.TestFactory).ClientConfigVal = cmdtesting.DefaultClientConfig() c.Factory.(*cmdtesting.TestFactory).Client = &fake.RESTClient{ NegotiatedSerializer: unstructuredSerializer, Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { diff --git a/pkg/kube/factory.go b/pkg/kube/factory.go index f19d62dc3..b0b506282 100644 --- a/pkg/kube/factory.go +++ b/pkg/kube/factory.go @@ -17,9 +17,11 @@ limitations under the License. package kube // import "helm.sh/helm/v3/pkg/kube" import ( + "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/resource" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" + restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/kubectl/pkg/validation" ) @@ -33,6 +35,7 @@ import ( // Helm does not need are not impacted or exposed. This minimizes the impact of Kubernetes changes // being exposed. type Factory interface { + genericclioptions.RESTClientGetter // ToRawKubeConfigLoader return kubeconfig loader as-is ToRawKubeConfigLoader() clientcmd.ClientConfig @@ -42,6 +45,9 @@ type Factory interface { // KubernetesClientSet gives you back an external clientset KubernetesClientSet() (*kubernetes.Clientset, error) + // Returns a RESTClient for accessing Kubernetes resources or an error. + RESTClient() (*restclient.RESTClient, error) + // NewBuilder returns an object that assists in loading objects from both disk and the server // and which implements the common patterns for CLI interactions with generic resources. NewBuilder() *resource.Builder From a61a35240e3e99af8386605de8cdbd9564051d2f Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 23 Dec 2024 22:55:09 +0000 Subject: [PATCH 004/192] understand it better Signed-off-by: Austin Abro --- pkg/kube/client.go | 1 + pkg/kube/interface.go | 1 + pkg/kube/kready.go | 18 ++++++++++++++++++ 3 files changed, 20 insertions(+) create mode 100644 pkg/kube/kready.go diff --git a/pkg/kube/client.go b/pkg/kube/client.go index a25a6fcc3..b38b4b094 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -301,6 +301,7 @@ func (c *Client) Wait(resources ResourceList, timeout time.Duration) error { // log: c.Log, // timeout: timeout, // } + // w.waitForResources() cfg, err := c.Factory.ToRESTConfig() if err != nil { return err diff --git a/pkg/kube/interface.go b/pkg/kube/interface.go index ce42ed950..af3823a3e 100644 --- a/pkg/kube/interface.go +++ b/pkg/kube/interface.go @@ -33,6 +33,7 @@ type Interface interface { Create(resources ResourceList) (*Result, error) // Wait waits up to the given timeout for the specified resources to be ready. + // TODO introduce another interface for the waiting of the KubeClient Wait(resources ResourceList, timeout time.Duration) error // WaitWithJobs wait up to the given timeout for the specified resources to be ready, including jobs. diff --git a/pkg/kube/kready.go b/pkg/kube/kready.go new file mode 100644 index 000000000..0752ba481 --- /dev/null +++ b/pkg/kube/kready.go @@ -0,0 +1,18 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube // import "helm.sh/helm/v3/pkg/kube" + From 4c1758143fd5bfed4ed42fa73fd051ae6e90f642 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Thu, 26 Dec 2024 16:09:54 +0000 Subject: [PATCH 005/192] basic design up and balling Signed-off-by: Austin Abro --- pkg/action/action.go | 3 +- pkg/kube/client.go | 99 ++++++++++++++++++++--------------------- pkg/kube/client_test.go | 36 +++++++++++++-- pkg/kube/interface.go | 32 +++++++------ pkg/kube/kready.go | 80 +++++++++++++++++++++++++++++++++ pkg/kube/wait.go | 13 ++++++ 6 files changed, 193 insertions(+), 70 deletions(-) diff --git a/pkg/action/action.go b/pkg/action/action.go index 45f1a14e2..8fa3ae289 100644 --- a/pkg/action/action.go +++ b/pkg/action/action.go @@ -371,7 +371,8 @@ func (cfg *Configuration) recordRelease(r *release.Release) { // Init initializes the action configuration func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string, log DebugLog) error { - kc := kube.New(getter) + // TODO I don't love that this ends up using nil instead of a real watcher + kc := kube.New(getter, nil) kc.Log = log lazyClient := &lazyClient{ diff --git a/pkg/kube/client.go b/pkg/kube/client.go index b38b4b094..b1b1d4835 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -92,6 +92,11 @@ type Client struct { Namespace string kubeClient *kubernetes.Clientset + // Another potential option rather than having the waiter as a field + // would be to have a field that decides what type of waiter to use + // then instantiate it during the method + // of course the fields could take a waiter as well + waiter Waiter } func init() { @@ -105,14 +110,53 @@ func init() { } } +func getStatusWatcher(factory Factory) (watcher.StatusWatcher, error) { + cfg, err := factory.ToRESTConfig() + if err != nil { + return nil, err + } + // factory.DynamicClient() may be a better choice here + dynamicClient, err := dynamic.NewForConfig(cfg) + if err != nil { + return nil, err + } + // Not sure if I should use factory methods to get this http client or I should do this + // For example, I could likely use this as well, but it seems like I should use the factory methods instead + // httpClient, err := rest.HTTPClientFor(cfg) + // if err != nil { + // return err + // } + client, err := factory.RESTClient() + if err != nil { + return nil, err + } + restMapper, err := apiutil.NewDynamicRESTMapper(cfg, client.Client) + if err != nil { + return nil, err + } + sw := watcher.NewDefaultStatusWatcher(dynamicClient, restMapper) + return sw, nil +} + // New creates a new Client. -func New(getter genericclioptions.RESTClientGetter) *Client { +func New(getter genericclioptions.RESTClientGetter, waiter Waiter) *Client { if getter == nil { getter = genericclioptions.NewConfigFlags(true) + } + factory := cmdutil.NewFactory(getter) + if waiter == nil { + sw, err := getStatusWatcher(factory) + if err != nil { + // TODO, likely will move how the stats watcher is created so it doesn't need to be created + // unless it's going to be used + panic(err) + } + waiter = &kstatusWaiter{sw, nopLogger} } return &Client{ - Factory: cmdutil.NewFactory(getter), + Factory: factory, Log: nopLogger, + waiter: waiter, } } @@ -291,44 +335,7 @@ func getResource(info *resource.Info) (runtime.Object, error) { // Wait waits up to the given timeout for the specified resources to be ready. func (c *Client) Wait(resources ResourceList, timeout time.Duration) error { - // cs, err := c.getKubeClient() - // if err != nil { - // return err - // } - // checker := NewReadyChecker(cs, c.Log, PausedAsReady(true)) - // w := waiter{ - // c: checker, - // log: c.Log, - // timeout: timeout, - // } - // w.waitForResources() - cfg, err := c.Factory.ToRESTConfig() - if err != nil { - return err - } - dynamicClient, err := dynamic.NewForConfig(cfg) - if err != nil { - return err - } - // Not sure if I should use factory methods to get this http client or I should do this - // For example, I could likely use this as well, but it seems like I should use the factory methods instead - // httpClient, err := rest.HTTPClientFor(cfg) - // if err != nil { - // return err - // } - client, err := c.Factory.RESTClient() - if err != nil { - return err - } - restMapper, err := apiutil.NewDynamicRESTMapper(cfg, client.Client) - if err != nil { - return err - } - sw := watcher.NewDefaultStatusWatcher(dynamicClient, restMapper) - // return sw, nil - ctx, cancel := context.WithTimeout(context.TODO(), timeout) - defer cancel() - return WaitForReady(ctx, sw, resources) + return c.waiter.Wait(resources, timeout) } // WaitForReady waits for all of the objects to reach a ready state. @@ -389,17 +396,7 @@ func WaitForReady(ctx context.Context, sw watcher.StatusWatcher, resourceList Re // WaitWithJobs wait up to the given timeout for the specified resources to be ready, including jobs. func (c *Client) WaitWithJobs(resources ResourceList, timeout time.Duration) error { - cs, err := c.getKubeClient() - if err != nil { - return err - } - checker := NewReadyChecker(cs, c.Log, PausedAsReady(true), CheckJobs(true)) - w := waiter{ - c: checker, - log: c.Log, - timeout: timeout, - } - return w.waitForResources(resources) + return c.waiter.WaitWithJobs(resources, timeout) } // WaitForDelete wait up to the given timeout for the specified resources to be deleted. diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index 7f3ba65be..b12897121 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -24,6 +24,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -453,10 +454,10 @@ func TestPerform(t *testing.T) { } } -// Likely it is not possible to get this test to work with kstatus given that it seems +// Likely it is not possible to get this test to work with kstatus given that it seems // kstatus is not making constant get checks on the resources and is instead waiting for events // Potentially the test could be reworked to make the pods after five seconds -// would need this -> +// would need this -> func TestWait(t *testing.T) { podList := newPodList("starfish", "otter", "squid") @@ -517,6 +518,15 @@ func TestWait(t *testing.T) { } }), } + cs, err := c.getKubeClient() + require.NoError(t, err) + checker := NewReadyChecker(cs, c.Log, PausedAsReady(true)) + w := &waiter{ + c: checker, + log: c.Log, + timeout: time.Second * 30, + } + c.waiter = w resources, err := c.Build(objBody(&podList), false) if err != nil { t.Fatal(err) @@ -569,6 +579,15 @@ func TestWaitJob(t *testing.T) { } }), } + cs, err := c.getKubeClient() + require.NoError(t, err) + checker := NewReadyChecker(cs, c.Log, PausedAsReady(true), CheckJobs(true)) + w := &waiter{ + c: checker, + log: c.Log, + timeout: time.Second * 30, + } + c.waiter = w resources, err := c.Build(objBody(job), false) if err != nil { t.Fatal(err) @@ -623,6 +642,15 @@ func TestWaitDelete(t *testing.T) { } }), } + cs, err := c.getKubeClient() + require.NoError(t, err) + checker := NewReadyChecker(cs, c.Log, PausedAsReady(true)) + w := &waiter{ + c: checker, + log: c.Log, + timeout: time.Second * 30, + } + c.waiter = w resources, err := c.Build(objBody(&pod), false) if err != nil { t.Fatal(err) @@ -649,7 +677,7 @@ func TestWaitDelete(t *testing.T) { func TestReal(t *testing.T) { t.Skip("This is a live test, comment this line to run") - c := New(nil) + c := New(nil, nil) resources, err := c.Build(strings.NewReader(guestbookManifest), false) if err != nil { t.Fatal(err) @@ -659,7 +687,7 @@ func TestReal(t *testing.T) { } testSvcEndpointManifest := testServiceManifest + "\n---\n" + testEndpointManifest - c = New(nil) + c = New(nil, nil) resources, err = c.Build(strings.NewReader(testSvcEndpointManifest), false) if err != nil { t.Fatal(err) diff --git a/pkg/kube/interface.go b/pkg/kube/interface.go index af3823a3e..40880005a 100644 --- a/pkg/kube/interface.go +++ b/pkg/kube/interface.go @@ -32,16 +32,13 @@ type Interface interface { // Create creates one or more resources. Create(resources ResourceList) (*Result, error) - // Wait waits up to the given timeout for the specified resources to be ready. - // TODO introduce another interface for the waiting of the KubeClient - Wait(resources ResourceList, timeout time.Duration) error - - // WaitWithJobs wait up to the given timeout for the specified resources to be ready, including jobs. - WaitWithJobs(resources ResourceList, timeout time.Duration) error - // Delete destroys one or more resources. Delete(resources ResourceList) (*Result, []error) + // Update updates one or more resources or creates the resource + // if it doesn't exist. + Update(original, target ResourceList, force bool) (*Result, error) + // WatchUntilReady watches the resources given and waits until it is ready. // // This method is mainly for hook implementations. It watches for a resource to @@ -51,11 +48,12 @@ type Interface interface { // For Pods, "ready" means the Pod phase is marked "succeeded". // For all other kinds, it means the kind was created or modified without // error. + // TODO: Is watch until ready really behavior we want over the resources actually being ready? WatchUntilReady(resources ResourceList, timeout time.Duration) error - // Update updates one or more resources or creates the resource - // if it doesn't exist. - Update(original, target ResourceList, force bool) (*Result, error) + // WaitAndGetCompletedPodPhase waits up to a timeout until a pod enters a completed phase + // and returns said phase (PodSucceeded or PodFailed qualify). + WaitAndGetCompletedPodPhase(name string, timeout time.Duration) (v1.PodPhase, error) // Build creates a resource list from a Reader. // @@ -65,12 +63,18 @@ type Interface interface { // Validates against OpenAPI schema if validate is true. Build(reader io.Reader, validate bool) (ResourceList, error) - // WaitAndGetCompletedPodPhase waits up to a timeout until a pod enters a completed phase - // and returns said phase (PodSucceeded or PodFailed qualify). - WaitAndGetCompletedPodPhase(name string, timeout time.Duration) (v1.PodPhase, error) - // IsReachable checks whether the client is able to connect to the cluster. IsReachable() error + Waiter +} + +// Waiter defines methods related to waiting for resource states. +type Waiter interface { + // Wait waits up to the given timeout for the specified resources to be ready. + Wait(resources ResourceList, timeout time.Duration) error + + // WaitWithJobs wait up to the given timeout for the specified resources to be ready, including jobs. + WaitWithJobs(resources ResourceList, timeout time.Duration) error } // InterfaceExt is introduced to avoid breaking backwards compatibility for Interface implementers. diff --git a/pkg/kube/kready.go b/pkg/kube/kready.go index 0752ba481..c199eecc6 100644 --- a/pkg/kube/kready.go +++ b/pkg/kube/kready.go @@ -16,3 +16,83 @@ limitations under the License. package kube // import "helm.sh/helm/v3/pkg/kube" +import ( + "context" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/cli-utils/pkg/kstatus/polling/aggregator" + "sigs.k8s.io/cli-utils/pkg/kstatus/polling/collector" + "sigs.k8s.io/cli-utils/pkg/kstatus/polling/event" + "sigs.k8s.io/cli-utils/pkg/kstatus/status" + "sigs.k8s.io/cli-utils/pkg/kstatus/watcher" + "sigs.k8s.io/cli-utils/pkg/object" +) + +type kstatusWaiter struct { + // Add any necessary dependencies, e.g., Kubernetes API client. + sw watcher.StatusWatcher + log func(string, ...interface{}) +} + +func (w *kstatusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error { + ctx := context.TODO() + cancelCtx, cancel := context.WithCancel(ctx) + defer cancel() + // TODO maybe a simpler way to transfer the objects + runtimeObjs := []runtime.Object{} + for _, resource := range resourceList { + runtimeObjs = append(runtimeObjs, resource.Object) + } + resources := []object.ObjMetadata{} + for _, runtimeObj := range runtimeObjs { + obj, err := object.RuntimeToObjMeta(runtimeObj) + if err != nil { + return err + } + resources = append(resources, obj) + } + eventCh := w.sw.Watch(cancelCtx, resources, watcher.Options{}) + statusCollector := collector.NewResourceStatusCollector(resources) + done := statusCollector.ListenWithObserver(eventCh, collector.ObserverFunc( + func(statusCollector *collector.ResourceStatusCollector, _ event.Event) { + rss := []*event.ResourceStatus{} + for _, rs := range statusCollector.ResourceStatuses { + if rs == nil { + continue + } + rss = append(rss, rs) + } + desired := status.CurrentStatus + if aggregator.AggregateStatus(rss, desired) == desired { + cancel() + return + } + }), + ) + <-done + + if statusCollector.Error != nil { + return statusCollector.Error + } + + // Only check parent context error, otherwise we would error when desired status is achieved. + if ctx.Err() != nil { + var err error + for _, id := range resources { + rs := statusCollector.ResourceStatuses[id] + if rs.Status == status.CurrentStatus { + continue + } + err = fmt.Errorf("%s: %s not ready, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status) + } + return fmt.Errorf("not all resources ready: %w: %w", ctx.Err(), err) + } + return nil +} + +func (w *kstatusWaiter) WaitWithJobs(resources ResourceList, timeout time.Duration) error { + // Implementation + panic("not implemented") +} diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go index bdafc8255..de00aae47 100644 --- a/pkg/kube/wait.go +++ b/pkg/kube/wait.go @@ -44,6 +44,19 @@ type waiter struct { log func(string, ...interface{}) } +func (w *waiter) Wait(resources ResourceList, timeout time.Duration) error { + w.timeout = timeout + return w.waitForResources(resources) +} + +func (w *waiter) WaitWithJobs(resources ResourceList, timeout time.Duration) error { + // Implementation + // TODO this function doesn't make sense unless you pass a readyChecker to it + // TODO pass context instead + w.timeout = timeout + return w.waitForResources(resources) +} + // waitForResources polls to get the current status of all pods, PVCs, Services and // Jobs(optional) until all are ready or a timeout is reached func (w *waiter) waitForResources(created ResourceList) error { From 4564b8f7121083b21721e3f098e5ab487b3b159a Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Thu, 26 Dec 2024 17:26:03 +0000 Subject: [PATCH 006/192] make a working test Signed-off-by: Austin Abro --- go.mod | 3 +- pkg/kube/client.go | 75 ++--------- pkg/kube/client_test.go | 19 ++- pkg/kube/{kready.go => kwait.go} | 4 +- pkg/kube/kwait_test.go | 213 +++++++++++++++++++++++++++++++ 5 files changed, 238 insertions(+), 76 deletions(-) rename pkg/kube/{kready.go => kwait.go} (95%) create mode 100644 pkg/kube/kwait_test.go diff --git a/go.mod b/go.mod index feefb8932..e70781ac5 100644 --- a/go.mod +++ b/go.mod @@ -47,6 +47,7 @@ require ( k8s.io/kubectl v0.31.3 oras.land/oras-go v1.2.5 sigs.k8s.io/cli-utils v0.37.2 + sigs.k8s.io/controller-runtime v0.18.4 sigs.k8s.io/yaml v1.4.0 ) @@ -128,6 +129,7 @@ require ( github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/onsi/gomega v1.33.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect @@ -185,7 +187,6 @@ require ( k8s.io/component-base v0.31.3 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect - sigs.k8s.io/controller-runtime v0.18.4 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/kustomize/api v0.17.2 // indirect sigs.k8s.io/kustomize/kyaml v0.17.1 // indirect diff --git a/pkg/kube/client.go b/pkg/kube/client.go index b1b1d4835..149017b17 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -37,12 +37,7 @@ import ( apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling/aggregator" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling/collector" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling/event" - "sigs.k8s.io/cli-utils/pkg/kstatus/status" "sigs.k8s.io/cli-utils/pkg/kstatus/watcher" - "sigs.k8s.io/cli-utils/pkg/object" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" multierror "github.com/hashicorp/go-multierror" @@ -92,10 +87,12 @@ type Client struct { Namespace string kubeClient *kubernetes.Clientset - // Another potential option rather than having the waiter as a field - // would be to have a field that decides what type of waiter to use - // then instantiate it during the method - // of course the fields could take a waiter as well + // I see a couple different options for how waiter could be handled here + // - The waiter could be instantiated in New or at the start of each wait function // + // - The waiter could be completely separate from the client interface, + // I don't like that this causes consumers to need another interface on top of kube + // - The waiter could be bundled with the resource manager into a client object. The waiter doesn't need factory / + // Another option still would be to waiter Waiter } @@ -142,7 +139,7 @@ func getStatusWatcher(factory Factory) (watcher.StatusWatcher, error) { func New(getter genericclioptions.RESTClientGetter, waiter Waiter) *Client { if getter == nil { getter = genericclioptions.NewConfigFlags(true) - } + } factory := cmdutil.NewFactory(getter) if waiter == nil { sw, err := getStatusWatcher(factory) @@ -156,7 +153,7 @@ func New(getter genericclioptions.RESTClientGetter, waiter Waiter) *Client { return &Client{ Factory: factory, Log: nopLogger, - waiter: waiter, + waiter: waiter, } } @@ -338,62 +335,6 @@ func (c *Client) Wait(resources ResourceList, timeout time.Duration) error { return c.waiter.Wait(resources, timeout) } -// WaitForReady waits for all of the objects to reach a ready state. -func WaitForReady(ctx context.Context, sw watcher.StatusWatcher, resourceList ResourceList) error { - cancelCtx, cancel := context.WithCancel(ctx) - defer cancel() - // TODO maybe a simpler way to transfer the objects - runtimeObjs := []runtime.Object{} - for _, resource := range resourceList { - runtimeObjs = append(runtimeObjs, resource.Object) - } - resources := []object.ObjMetadata{} - for _, runtimeObj := range runtimeObjs { - obj, err := object.RuntimeToObjMeta(runtimeObj) - if err != nil { - return err - } - resources = append(resources, obj) - } - eventCh := sw.Watch(cancelCtx, resources, watcher.Options{}) - statusCollector := collector.NewResourceStatusCollector(resources) - done := statusCollector.ListenWithObserver(eventCh, collector.ObserverFunc( - func(statusCollector *collector.ResourceStatusCollector, _ event.Event) { - rss := []*event.ResourceStatus{} - for _, rs := range statusCollector.ResourceStatuses { - if rs == nil { - continue - } - rss = append(rss, rs) - } - desired := status.CurrentStatus - if aggregator.AggregateStatus(rss, desired) == desired { - cancel() - return - } - }), - ) - <-done - - if statusCollector.Error != nil { - return statusCollector.Error - } - - // Only check parent context error, otherwise we would error when desired status is achieved. - if ctx.Err() != nil { - var err error - for _, id := range resources { - rs := statusCollector.ResourceStatuses[id] - if rs.Status == status.CurrentStatus { - continue - } - err = fmt.Errorf("%s: %s not ready, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status) - } - return fmt.Errorf("not all resources ready: %w: %w", ctx.Err(), err) - } - return nil -} - // WaitWithJobs wait up to the given timeout for the specified resources to be ready, including jobs. func (c *Client) WaitWithJobs(resources ResourceList, timeout time.Duration) error { return c.waiter.WaitWithJobs(resources, timeout) diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index b12897121..de61a3862 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -24,7 +24,6 @@ import ( "testing" "time" - "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -519,14 +518,16 @@ func TestWait(t *testing.T) { }), } cs, err := c.getKubeClient() - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } checker := NewReadyChecker(cs, c.Log, PausedAsReady(true)) w := &waiter{ c: checker, log: c.Log, timeout: time.Second * 30, } - c.waiter = w + c.waiter = w resources, err := c.Build(objBody(&podList), false) if err != nil { t.Fatal(err) @@ -580,14 +581,16 @@ func TestWaitJob(t *testing.T) { }), } cs, err := c.getKubeClient() - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } checker := NewReadyChecker(cs, c.Log, PausedAsReady(true), CheckJobs(true)) w := &waiter{ c: checker, log: c.Log, timeout: time.Second * 30, } - c.waiter = w + c.waiter = w resources, err := c.Build(objBody(job), false) if err != nil { t.Fatal(err) @@ -643,14 +646,16 @@ func TestWaitDelete(t *testing.T) { }), } cs, err := c.getKubeClient() - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } checker := NewReadyChecker(cs, c.Log, PausedAsReady(true)) w := &waiter{ c: checker, log: c.Log, timeout: time.Second * 30, } - c.waiter = w + c.waiter = w resources, err := c.Build(objBody(&pod), false) if err != nil { t.Fatal(err) diff --git a/pkg/kube/kready.go b/pkg/kube/kwait.go similarity index 95% rename from pkg/kube/kready.go rename to pkg/kube/kwait.go index c199eecc6..d74c913ea 100644 --- a/pkg/kube/kready.go +++ b/pkg/kube/kwait.go @@ -37,7 +37,8 @@ type kstatusWaiter struct { } func (w *kstatusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error { - ctx := context.TODO() + ctx, cancel := context.WithTimeout(context.TODO(), timeout) + defer cancel() cancelCtx, cancel := context.WithCancel(ctx) defer cancel() // TODO maybe a simpler way to transfer the objects @@ -62,6 +63,7 @@ func (w *kstatusWaiter) Wait(resourceList ResourceList, timeout time.Duration) e if rs == nil { continue } + fmt.Println("this is the status of object", rs.Status) rss = append(rss, rs) } desired := status.CurrentStatus diff --git a/pkg/kube/kwait_test.go b/pkg/kube/kwait_test.go new file mode 100644 index 000000000..1d9a69959 --- /dev/null +++ b/pkg/kube/kwait_test.go @@ -0,0 +1,213 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube // import "helm.sh/helm/v3/pkg/kube" + +import ( + "errors" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/yaml" + dynamicfake "k8s.io/client-go/dynamic/fake" + "k8s.io/kubectl/pkg/scheme" + "sigs.k8s.io/cli-utils/pkg/kstatus/watcher" + "sigs.k8s.io/cli-utils/pkg/testutil" +) + +var podCurrentYaml = ` +apiVersion: v1 +kind: Pod +metadata: + name: good-pod + namespace: ns +status: + conditions: + - type: Ready + status: "True" + phase: Running +` + +var podYaml = ` +apiVersion: v1 +kind: Pod +metadata: + name: in-progress-pod + namespace: ns +` + +func TestRunHealthChecks(t *testing.T) { + t.Parallel() + tests := []struct { + name string + podYamls []string + expectErrs []error + }{ + { + name: "Pod is ready", + podYamls: []string{podCurrentYaml}, + expectErrs: nil, + }, + { + name: "one of the pods never becomes ready", + podYamls: []string{podYaml, podCurrentYaml}, + // TODO, make this better + expectErrs: []error{errors.New("not all resources ready: context deadline exceeded: in-progress-pod: Pod not ready, status: InProgress")}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + ) + // ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) + // defer cancel() + pods := []runtime.Object{} + statusWatcher := watcher.NewDefaultStatusWatcher(fakeClient, fakeMapper) + for _, podYaml := range tt.podYamls { + m := make(map[string]interface{}) + err := yaml.Unmarshal([]byte(podYaml), &m) + require.NoError(t, err) + pod := &unstructured.Unstructured{Object: m} + pods = append(pods, pod) + fmt.Println(pod.GetName()) + podGVR := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"} + err = fakeClient.Tracker().Create(podGVR, pod, pod.GetNamespace()) + require.NoError(t, err) + } + c.waiter = &kstatusWaiter{ + sw: statusWatcher, + log: c.Log, + } + + resourceList := ResourceList{} + for _, pod := range pods { + list, err := c.Build(objBody(pod), false) + if err != nil { + t.Fatal(err) + } + resourceList = append(resourceList, list...) + } + + err := c.Wait(resourceList, time.Second*5) + if tt.expectErrs != nil { + require.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + return + } + require.NoError(t, err) + }) + } +} + +// func TestWait1(t *testing.T) { +// podList := newPodList("starfish", "otter", "squid") + +// var created *time.Time + +// c := newTestClient(t) +// c.Factory.(*cmdtesting.TestFactory).ClientConfigVal = cmdtesting.DefaultClientConfig() +// c.Factory.(*cmdtesting.TestFactory).Client = &fake.RESTClient{ +// NegotiatedSerializer: unstructuredSerializer, +// Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { +// p, m := req.URL.Path, req.Method +// t.Logf("got request %s %s", p, m) +// switch { +// case p == "/api/v1/namespaces/default/pods/starfish" && m == "GET": +// pod := &podList.Items[0] +// if created != nil && time.Since(*created) >= time.Second*5 { +// pod.Status.Conditions = []v1.PodCondition{ +// { +// Type: v1.PodReady, +// Status: v1.ConditionTrue, +// }, +// } +// } +// return newResponse(200, pod) +// case p == "/api/v1/namespaces/default/pods/otter" && m == "GET": +// pod := &podList.Items[1] +// if created != nil && time.Since(*created) >= time.Second*5 { +// pod.Status.Conditions = []v1.PodCondition{ +// { +// Type: v1.PodReady, +// Status: v1.ConditionTrue, +// }, +// } +// } +// return newResponse(200, pod) +// case p == "/api/v1/namespaces/default/pods/squid" && m == "GET": +// pod := &podList.Items[2] +// if created != nil && time.Since(*created) >= time.Second*5 { +// pod.Status.Conditions = []v1.PodCondition{ +// { +// Type: v1.PodReady, +// Status: v1.ConditionTrue, +// }, +// } +// } +// return newResponse(200, pod) +// case p == "/namespaces/default/pods" && m == "POST": +// resources, err := c.Build(req.Body, false) +// if err != nil { +// t.Fatal(err) +// } +// now := time.Now() +// created = &now +// return newResponse(200, resources[0].Object) +// default: +// t.Fatalf("unexpected request: %s %s", req.Method, req.URL.Path) +// return nil, nil +// } +// }), +// } +// cs, err := c.getKubeClient() +// require.NoError(t, err) +// checker := NewReadyChecker(cs, c.Log, PausedAsReady(true)) +// w := &waiter{ +// c: checker, +// log: c.Log, +// timeout: time.Second * 30, +// } +// c.waiter = w +// resources, err := c.Build(objBody(&podList), false) +// if err != nil { +// t.Fatal(err) +// } +// result, err := c.Create(resources) +// if err != nil { +// t.Fatal(err) +// } +// if len(result.Created) != 3 { +// t.Errorf("expected 3 resource created, got %d", len(result.Created)) +// } + +// if err := c.Wait(resources, time.Second*30); err != nil { +// t.Errorf("expected wait without error, got %s", err) +// } + +// if time.Since(*created) < time.Second*5 { +// t.Errorf("expected to wait at least 5 seconds before ready status was detected, but got %s", time.Since(*created)) +// } +// } From ad1f1c02efda335320ec652c3a32cfbbc39b6337 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 29 Dec 2024 13:25:02 +0000 Subject: [PATCH 007/192] cleanup test Signed-off-by: Austin Abro --- pkg/kube/kwait_test.go | 92 ------------------------------------------ 1 file changed, 92 deletions(-) diff --git a/pkg/kube/kwait_test.go b/pkg/kube/kwait_test.go index 1d9a69959..1702f0990 100644 --- a/pkg/kube/kwait_test.go +++ b/pkg/kube/kwait_test.go @@ -83,8 +83,6 @@ func TestRunHealthChecks(t *testing.T) { fakeMapper := testutil.NewFakeRESTMapper( v1.SchemeGroupVersion.WithKind("Pod"), ) - // ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) - // defer cancel() pods := []runtime.Object{} statusWatcher := watcher.NewDefaultStatusWatcher(fakeClient, fakeMapper) for _, podYaml := range tt.podYamls { @@ -121,93 +119,3 @@ func TestRunHealthChecks(t *testing.T) { }) } } - -// func TestWait1(t *testing.T) { -// podList := newPodList("starfish", "otter", "squid") - -// var created *time.Time - -// c := newTestClient(t) -// c.Factory.(*cmdtesting.TestFactory).ClientConfigVal = cmdtesting.DefaultClientConfig() -// c.Factory.(*cmdtesting.TestFactory).Client = &fake.RESTClient{ -// NegotiatedSerializer: unstructuredSerializer, -// Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { -// p, m := req.URL.Path, req.Method -// t.Logf("got request %s %s", p, m) -// switch { -// case p == "/api/v1/namespaces/default/pods/starfish" && m == "GET": -// pod := &podList.Items[0] -// if created != nil && time.Since(*created) >= time.Second*5 { -// pod.Status.Conditions = []v1.PodCondition{ -// { -// Type: v1.PodReady, -// Status: v1.ConditionTrue, -// }, -// } -// } -// return newResponse(200, pod) -// case p == "/api/v1/namespaces/default/pods/otter" && m == "GET": -// pod := &podList.Items[1] -// if created != nil && time.Since(*created) >= time.Second*5 { -// pod.Status.Conditions = []v1.PodCondition{ -// { -// Type: v1.PodReady, -// Status: v1.ConditionTrue, -// }, -// } -// } -// return newResponse(200, pod) -// case p == "/api/v1/namespaces/default/pods/squid" && m == "GET": -// pod := &podList.Items[2] -// if created != nil && time.Since(*created) >= time.Second*5 { -// pod.Status.Conditions = []v1.PodCondition{ -// { -// Type: v1.PodReady, -// Status: v1.ConditionTrue, -// }, -// } -// } -// return newResponse(200, pod) -// case p == "/namespaces/default/pods" && m == "POST": -// resources, err := c.Build(req.Body, false) -// if err != nil { -// t.Fatal(err) -// } -// now := time.Now() -// created = &now -// return newResponse(200, resources[0].Object) -// default: -// t.Fatalf("unexpected request: %s %s", req.Method, req.URL.Path) -// return nil, nil -// } -// }), -// } -// cs, err := c.getKubeClient() -// require.NoError(t, err) -// checker := NewReadyChecker(cs, c.Log, PausedAsReady(true)) -// w := &waiter{ -// c: checker, -// log: c.Log, -// timeout: time.Second * 30, -// } -// c.waiter = w -// resources, err := c.Build(objBody(&podList), false) -// if err != nil { -// t.Fatal(err) -// } -// result, err := c.Create(resources) -// if err != nil { -// t.Fatal(err) -// } -// if len(result.Created) != 3 { -// t.Errorf("expected 3 resource created, got %d", len(result.Created)) -// } - -// if err := c.Wait(resources, time.Second*30); err != nil { -// t.Errorf("expected wait without error, got %s", err) -// } - -// if time.Since(*created) < time.Second*5 { -// t.Errorf("expected to wait at least 5 seconds before ready status was detected, but got %s", time.Since(*created)) -// } -// } From 859ff9b54882c4344cc5564c6cd4f993a300e20c Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 29 Dec 2024 14:37:33 +0000 Subject: [PATCH 008/192] change structure of client Signed-off-by: Austin Abro --- pkg/kube/client.go | 21 +++------------------ pkg/kube/client_test.go | 9 +++------ pkg/kube/interface.go | 10 +++++----- pkg/kube/kwait_test.go | 2 +- 4 files changed, 12 insertions(+), 30 deletions(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 149017b17..9e31a64e1 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -87,13 +87,8 @@ type Client struct { Namespace string kubeClient *kubernetes.Clientset - // I see a couple different options for how waiter could be handled here - // - The waiter could be instantiated in New or at the start of each wait function // - // - The waiter could be completely separate from the client interface, - // I don't like that this causes consumers to need another interface on top of kube - // - The waiter could be bundled with the resource manager into a client object. The waiter doesn't need factory / - // Another option still would be to - waiter Waiter + ResourceManager + Waiter } func init() { @@ -153,7 +148,7 @@ func New(getter genericclioptions.RESTClientGetter, waiter Waiter) *Client { return &Client{ Factory: factory, Log: nopLogger, - waiter: waiter, + Waiter: waiter, } } @@ -330,16 +325,6 @@ func getResource(info *resource.Info) (runtime.Object, error) { return obj, nil } -// Wait waits up to the given timeout for the specified resources to be ready. -func (c *Client) Wait(resources ResourceList, timeout time.Duration) error { - return c.waiter.Wait(resources, timeout) -} - -// WaitWithJobs wait up to the given timeout for the specified resources to be ready, including jobs. -func (c *Client) WaitWithJobs(resources ResourceList, timeout time.Duration) error { - return c.waiter.WaitWithJobs(resources, timeout) -} - // WaitForDelete wait up to the given timeout for the specified resources to be deleted. func (c *Client) WaitForDelete(resources ResourceList, timeout time.Duration) error { w := waiter{ diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index de61a3862..a6e095942 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -522,12 +522,11 @@ func TestWait(t *testing.T) { t.Fatal(err) } checker := NewReadyChecker(cs, c.Log, PausedAsReady(true)) - w := &waiter{ + c.Waiter = &waiter{ c: checker, log: c.Log, timeout: time.Second * 30, } - c.waiter = w resources, err := c.Build(objBody(&podList), false) if err != nil { t.Fatal(err) @@ -585,12 +584,11 @@ func TestWaitJob(t *testing.T) { t.Fatal(err) } checker := NewReadyChecker(cs, c.Log, PausedAsReady(true), CheckJobs(true)) - w := &waiter{ + c.Waiter = &waiter{ c: checker, log: c.Log, timeout: time.Second * 30, } - c.waiter = w resources, err := c.Build(objBody(job), false) if err != nil { t.Fatal(err) @@ -650,12 +648,11 @@ func TestWaitDelete(t *testing.T) { t.Fatal(err) } checker := NewReadyChecker(cs, c.Log, PausedAsReady(true)) - w := &waiter{ + c.Waiter = &waiter{ c: checker, log: c.Log, timeout: time.Second * 30, } - c.waiter = w resources, err := c.Build(objBody(&pod), false) if err != nil { t.Fatal(err) diff --git a/pkg/kube/interface.go b/pkg/kube/interface.go index 40880005a..d2230b244 100644 --- a/pkg/kube/interface.go +++ b/pkg/kube/interface.go @@ -29,6 +29,11 @@ import ( // // A KubernetesClient must be concurrency safe. type Interface interface { + ResourceManager + Waiter +} + +type ResourceManager interface { // Create creates one or more resources. Create(resources ResourceList) (*Result, error) @@ -38,7 +43,6 @@ type Interface interface { // Update updates one or more resources or creates the resource // if it doesn't exist. Update(original, target ResourceList, force bool) (*Result, error) - // WatchUntilReady watches the resources given and waits until it is ready. // // This method is mainly for hook implementations. It watches for a resource to @@ -50,11 +54,9 @@ type Interface interface { // error. // TODO: Is watch until ready really behavior we want over the resources actually being ready? WatchUntilReady(resources ResourceList, timeout time.Duration) error - // WaitAndGetCompletedPodPhase waits up to a timeout until a pod enters a completed phase // and returns said phase (PodSucceeded or PodFailed qualify). WaitAndGetCompletedPodPhase(name string, timeout time.Duration) (v1.PodPhase, error) - // Build creates a resource list from a Reader. // // Reader must contain a YAML stream (one or more YAML documents separated @@ -62,10 +64,8 @@ type Interface interface { // // Validates against OpenAPI schema if validate is true. Build(reader io.Reader, validate bool) (ResourceList, error) - // IsReachable checks whether the client is able to connect to the cluster. IsReachable() error - Waiter } // Waiter defines methods related to waiting for resource states. diff --git a/pkg/kube/kwait_test.go b/pkg/kube/kwait_test.go index 1702f0990..9854f2d60 100644 --- a/pkg/kube/kwait_test.go +++ b/pkg/kube/kwait_test.go @@ -96,7 +96,7 @@ func TestRunHealthChecks(t *testing.T) { err = fakeClient.Tracker().Create(podGVR, pod, pod.GetNamespace()) require.NoError(t, err) } - c.waiter = &kstatusWaiter{ + c.Waiter = &kstatusWaiter{ sw: statusWatcher, log: c.Log, } From aacaa08be2b689e7c688f483ab0946dedac154ab Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 29 Dec 2024 14:49:11 +0000 Subject: [PATCH 009/192] only emebed waiter Signed-off-by: Austin Abro --- pkg/kube/client.go | 1 - pkg/kube/interface.go | 6 +----- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 9e31a64e1..469a89b35 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -87,7 +87,6 @@ type Client struct { Namespace string kubeClient *kubernetes.Clientset - ResourceManager Waiter } diff --git a/pkg/kube/interface.go b/pkg/kube/interface.go index d2230b244..edc062c49 100644 --- a/pkg/kube/interface.go +++ b/pkg/kube/interface.go @@ -29,11 +29,6 @@ import ( // // A KubernetesClient must be concurrency safe. type Interface interface { - ResourceManager - Waiter -} - -type ResourceManager interface { // Create creates one or more resources. Create(resources ResourceList) (*Result, error) @@ -66,6 +61,7 @@ type ResourceManager interface { Build(reader io.Reader, validate bool) (ResourceList, error) // IsReachable checks whether the client is able to connect to the cluster. IsReachable() error + Waiter } // Waiter defines methods related to waiting for resource states. From 947425ee64b0047896ba9a96d130420c5ca60175 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 29 Dec 2024 14:51:22 +0000 Subject: [PATCH 010/192] refactor new Signed-off-by: Austin Abro --- pkg/action/action.go | 6 ++++-- pkg/kube/client.go | 8 +++----- pkg/kube/client_test.go | 10 ++++++++-- 3 files changed, 15 insertions(+), 9 deletions(-) diff --git a/pkg/action/action.go b/pkg/action/action.go index 8fa3ae289..8759597b4 100644 --- a/pkg/action/action.go +++ b/pkg/action/action.go @@ -371,8 +371,10 @@ func (cfg *Configuration) recordRelease(r *release.Release) { // Init initializes the action configuration func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string, log DebugLog) error { - // TODO I don't love that this ends up using nil instead of a real watcher - kc := kube.New(getter, nil) + kc, err := kube.New(getter, nil) + if err != nil { + return err + } kc.Log = log lazyClient := &lazyClient{ diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 469a89b35..a50655a40 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -130,7 +130,7 @@ func getStatusWatcher(factory Factory) (watcher.StatusWatcher, error) { } // New creates a new Client. -func New(getter genericclioptions.RESTClientGetter, waiter Waiter) *Client { +func New(getter genericclioptions.RESTClientGetter, waiter Waiter) (*Client, error) { if getter == nil { getter = genericclioptions.NewConfigFlags(true) } @@ -138,9 +138,7 @@ func New(getter genericclioptions.RESTClientGetter, waiter Waiter) *Client { if waiter == nil { sw, err := getStatusWatcher(factory) if err != nil { - // TODO, likely will move how the stats watcher is created so it doesn't need to be created - // unless it's going to be used - panic(err) + return nil, err } waiter = &kstatusWaiter{sw, nopLogger} } @@ -148,7 +146,7 @@ func New(getter genericclioptions.RESTClientGetter, waiter Waiter) *Client { Factory: factory, Log: nopLogger, Waiter: waiter, - } + }, nil } var nopLogger = func(_ string, _ ...interface{}) {} diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index a6e095942..037719219 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -679,7 +679,10 @@ func TestWaitDelete(t *testing.T) { func TestReal(t *testing.T) { t.Skip("This is a live test, comment this line to run") - c := New(nil, nil) + c, err := New(nil, nil) + if err != nil { + t.Fatal(err) + } resources, err := c.Build(strings.NewReader(guestbookManifest), false) if err != nil { t.Fatal(err) @@ -689,7 +692,10 @@ func TestReal(t *testing.T) { } testSvcEndpointManifest := testServiceManifest + "\n---\n" + testEndpointManifest - c = New(nil, nil) + c, err = New(nil, nil) + if err != nil { + t.Fatal(err) + } resources, err = c.Build(strings.NewReader(testSvcEndpointManifest), false) if err != nil { t.Fatal(err) From 807cc925f532323fcb143b566d8e44498bcaac32 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 29 Dec 2024 16:33:33 +0000 Subject: [PATCH 011/192] refactor test Signed-off-by: Austin Abro --- pkg/kube/client.go | 5 ++- pkg/kube/kwait.go | 5 +-- pkg/kube/kwait_test.go | 80 +++++++++++++++++++++++++++++++++--------- 3 files changed, 70 insertions(+), 20 deletions(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index a50655a40..cbef8fece 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -140,7 +140,10 @@ func New(getter genericclioptions.RESTClientGetter, waiter Waiter) (*Client, err if err != nil { return nil, err } - waiter = &kstatusWaiter{sw, nopLogger} + waiter = &kstatusWaiter{ + sw: sw, + log: nopLogger, + pausedAsReady: true} } return &Client{ Factory: factory, diff --git a/pkg/kube/kwait.go b/pkg/kube/kwait.go index d74c913ea..6c1d5b748 100644 --- a/pkg/kube/kwait.go +++ b/pkg/kube/kwait.go @@ -32,8 +32,9 @@ import ( type kstatusWaiter struct { // Add any necessary dependencies, e.g., Kubernetes API client. - sw watcher.StatusWatcher - log func(string, ...interface{}) + sw watcher.StatusWatcher + log func(string, ...interface{}) + pausedAsReady bool } func (w *kstatusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error { diff --git a/pkg/kube/kwait_test.go b/pkg/kube/kwait_test.go index 9854f2d60..372735462 100644 --- a/pkg/kube/kwait_test.go +++ b/pkg/kube/kwait_test.go @@ -18,12 +18,12 @@ package kube // import "helm.sh/helm/v3/pkg/kube" import ( "errors" - "fmt" "testing" "time" "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -34,7 +34,7 @@ import ( "sigs.k8s.io/cli-utils/pkg/testutil" ) -var podCurrentYaml = ` +var podCurrent = ` apiVersion: v1 kind: Pod metadata: @@ -47,7 +47,7 @@ status: phase: Running ` -var podYaml = ` +var podNoStatus = ` apiVersion: v1 kind: Pod metadata: @@ -55,21 +55,62 @@ metadata: namespace: ns ` -func TestRunHealthChecks(t *testing.T) { +var jobNoStatus = ` +apiVersion: batch/v1 +kind: Job +metadata: + name: test + namespace: qual + generation: 1 +` + +var jobComplete = ` +apiVersion: batch/v1 +kind: Job +metadata: + name: test + namespace: qual + generation: 1 +status: + succeeded: 1 + active: 0 + conditions: + - type: Complete + status: "True" +` + +func getGVR(t *testing.T, mapper meta.RESTMapper, obj *unstructured.Unstructured) schema.GroupVersionResource { + gvk := obj.GroupVersionKind() + mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + require.NoError(t, err) + return mapping.Resource +} + +func TestKWaitJob(t *testing.T) { t.Parallel() tests := []struct { name string - podYamls []string + objYamls []string expectErrs []error }{ + { + name: "Job is complete", + objYamls: []string{jobComplete}, + expectErrs: nil, + }, + { + name: "Job is not complete", + objYamls: []string{jobNoStatus}, + expectErrs: []error{errors.New("not all resources ready: context deadline exceeded: test: Job not ready, status: InProgress")}, + }, { name: "Pod is ready", - podYamls: []string{podCurrentYaml}, + objYamls: []string{podCurrent}, expectErrs: nil, }, { name: "one of the pods never becomes ready", - podYamls: []string{podYaml, podCurrentYaml}, + objYamls: []string{podNoStatus, podCurrent}, // TODO, make this better expectErrs: []error{errors.New("not all resources ready: context deadline exceeded: in-progress-pod: Pod not ready, status: InProgress")}, }, @@ -82,18 +123,22 @@ func TestRunHealthChecks(t *testing.T) { fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) fakeMapper := testutil.NewFakeRESTMapper( v1.SchemeGroupVersion.WithKind("Pod"), + schema.GroupVersionKind{ + Group: "batch", + Version: "v1", + Kind: "Job", + }, ) - pods := []runtime.Object{} + objs := []runtime.Object{} statusWatcher := watcher.NewDefaultStatusWatcher(fakeClient, fakeMapper) - for _, podYaml := range tt.podYamls { + for _, podYaml := range tt.objYamls { m := make(map[string]interface{}) err := yaml.Unmarshal([]byte(podYaml), &m) require.NoError(t, err) - pod := &unstructured.Unstructured{Object: m} - pods = append(pods, pod) - fmt.Println(pod.GetName()) - podGVR := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"} - err = fakeClient.Tracker().Create(podGVR, pod, pod.GetNamespace()) + resource := &unstructured.Unstructured{Object: m} + objs = append(objs, resource) + gvr := getGVR(t, fakeMapper, resource) + err = fakeClient.Tracker().Create(gvr, resource, resource.GetNamespace()) require.NoError(t, err) } c.Waiter = &kstatusWaiter{ @@ -102,16 +147,17 @@ func TestRunHealthChecks(t *testing.T) { } resourceList := ResourceList{} - for _, pod := range pods { - list, err := c.Build(objBody(pod), false) + for _, obj := range objs { + list, err := c.Build(objBody(obj), false) if err != nil { t.Fatal(err) } resourceList = append(resourceList, list...) } - err := c.Wait(resourceList, time.Second*5) + err := c.Wait(resourceList, time.Second*3) if tt.expectErrs != nil { + //TODO remove require require.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) return } From a6e5466942df67dccea00fbaa7b2ed4e5a8e619d Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 29 Dec 2024 16:54:33 +0000 Subject: [PATCH 012/192] refactor test Signed-off-by: Austin Abro --- pkg/kube/kwait.go | 8 +++++--- pkg/kube/kwait_test.go | 23 ++++++++++------------- 2 files changed, 15 insertions(+), 16 deletions(-) diff --git a/pkg/kube/kwait.go b/pkg/kube/kwait.go index 6c1d5b748..639794322 100644 --- a/pkg/kube/kwait.go +++ b/pkg/kube/kwait.go @@ -18,6 +18,7 @@ package kube // import "helm.sh/helm/v3/pkg/kube" import ( "context" + "errors" "fmt" "time" @@ -82,15 +83,16 @@ func (w *kstatusWaiter) Wait(resourceList ResourceList, timeout time.Duration) e // Only check parent context error, otherwise we would error when desired status is achieved. if ctx.Err() != nil { - var err error + errs := []error{} for _, id := range resources { rs := statusCollector.ResourceStatuses[id] if rs.Status == status.CurrentStatus { continue } - err = fmt.Errorf("%s: %s not ready, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status) + errs = append(errs, fmt.Errorf("%s: %s not ready, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) } - return fmt.Errorf("not all resources ready: %w: %w", ctx.Err(), err) + errs = append(errs, ctx.Err()) + return errors.Join(errs...) } return nil } diff --git a/pkg/kube/kwait_test.go b/pkg/kube/kwait_test.go index 372735462..fd5cd0b57 100644 --- a/pkg/kube/kwait_test.go +++ b/pkg/kube/kwait_test.go @@ -21,6 +21,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" @@ -101,7 +102,7 @@ func TestKWaitJob(t *testing.T) { { name: "Job is not complete", objYamls: []string{jobNoStatus}, - expectErrs: []error{errors.New("not all resources ready: context deadline exceeded: test: Job not ready, status: InProgress")}, + expectErrs: []error{errors.New("test: Job not ready, status: InProgress"), errors.New("context deadline exceeded")}, }, { name: "Pod is ready", @@ -109,10 +110,9 @@ func TestKWaitJob(t *testing.T) { expectErrs: nil, }, { - name: "one of the pods never becomes ready", - objYamls: []string{podNoStatus, podCurrent}, - // TODO, make this better - expectErrs: []error{errors.New("not all resources ready: context deadline exceeded: in-progress-pod: Pod not ready, status: InProgress")}, + name: "one of the pods never becomes ready", + objYamls: []string{podNoStatus, podCurrent}, + expectErrs: []error{errors.New("in-progress-pod: Pod not ready, status: InProgress"), errors.New("context deadline exceeded")}, }, } @@ -134,12 +134,12 @@ func TestKWaitJob(t *testing.T) { for _, podYaml := range tt.objYamls { m := make(map[string]interface{}) err := yaml.Unmarshal([]byte(podYaml), &m) - require.NoError(t, err) + assert.NoError(t, err) resource := &unstructured.Unstructured{Object: m} objs = append(objs, resource) gvr := getGVR(t, fakeMapper, resource) err = fakeClient.Tracker().Create(gvr, resource, resource.GetNamespace()) - require.NoError(t, err) + assert.NoError(t, err) } c.Waiter = &kstatusWaiter{ sw: statusWatcher, @@ -149,19 +149,16 @@ func TestKWaitJob(t *testing.T) { resourceList := ResourceList{} for _, obj := range objs { list, err := c.Build(objBody(obj), false) - if err != nil { - t.Fatal(err) - } + assert.NoError(t, err) resourceList = append(resourceList, list...) } err := c.Wait(resourceList, time.Second*3) if tt.expectErrs != nil { - //TODO remove require - require.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) return } - require.NoError(t, err) + assert.NoError(t, err) }) } } From 7b896df4d1089a7c6abded0caaf16fb84a2f90a7 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 29 Dec 2024 17:34:26 +0000 Subject: [PATCH 013/192] option to wait for jobs Signed-off-by: Austin Abro --- pkg/kube/kwait.go | 24 +++++++++++++++++------- pkg/kube/kwait_test.go | 24 ++++++++++++++++-------- 2 files changed, 33 insertions(+), 15 deletions(-) diff --git a/pkg/kube/kwait.go b/pkg/kube/kwait.go index 639794322..c1822d87c 100644 --- a/pkg/kube/kwait.go +++ b/pkg/kube/kwait.go @@ -22,6 +22,7 @@ import ( "fmt" "time" + batchv1 "k8s.io/api/batch/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/cli-utils/pkg/kstatus/polling/aggregator" "sigs.k8s.io/cli-utils/pkg/kstatus/polling/collector" @@ -39,6 +40,15 @@ type kstatusWaiter struct { } func (w *kstatusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error { + return w.wait(resourceList, timeout, false) +} + +func (w *kstatusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Duration) error { + // Implementation + return w.wait(resourceList, timeout, true) +} + +func (w *kstatusWaiter) wait(resourceList ResourceList, timeout time.Duration, waitWithJobs bool) error { ctx, cancel := context.WithTimeout(context.TODO(), timeout) defer cancel() cancelCtx, cancel := context.WithCancel(ctx) @@ -46,6 +56,12 @@ func (w *kstatusWaiter) Wait(resourceList ResourceList, timeout time.Duration) e // TODO maybe a simpler way to transfer the objects runtimeObjs := []runtime.Object{} for _, resource := range resourceList { + switch AsVersioned(resource).(type) { + case *batchv1.Job: + if !waitWithJobs { + continue + } + } runtimeObjs = append(runtimeObjs, resource.Object) } resources := []object.ObjMetadata{} @@ -65,7 +81,6 @@ func (w *kstatusWaiter) Wait(resourceList ResourceList, timeout time.Duration) e if rs == nil { continue } - fmt.Println("this is the status of object", rs.Status) rss = append(rss, rs) } desired := status.CurrentStatus @@ -89,15 +104,10 @@ func (w *kstatusWaiter) Wait(resourceList ResourceList, timeout time.Duration) e if rs.Status == status.CurrentStatus { continue } - errs = append(errs, fmt.Errorf("%s: %s not ready, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) + errs = append(errs, fmt.Errorf("%s: %s not ready, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) } errs = append(errs, ctx.Err()) return errors.Join(errs...) } return nil } - -func (w *kstatusWaiter) WaitWithJobs(resources ResourceList, timeout time.Duration) error { - // Implementation - panic("not implemented") -} diff --git a/pkg/kube/kwait_test.go b/pkg/kube/kwait_test.go index fd5cd0b57..e595f9ed3 100644 --- a/pkg/kube/kwait_test.go +++ b/pkg/kube/kwait_test.go @@ -90,9 +90,10 @@ func getGVR(t *testing.T, mapper meta.RESTMapper, obj *unstructured.Unstructured func TestKWaitJob(t *testing.T) { t.Parallel() tests := []struct { - name string - objYamls []string - expectErrs []error + name string + objYamls []string + expectErrs []error + waitForJobs bool }{ { name: "Job is complete", @@ -100,9 +101,16 @@ func TestKWaitJob(t *testing.T) { expectErrs: nil, }, { - name: "Job is not complete", - objYamls: []string{jobNoStatus}, - expectErrs: []error{errors.New("test: Job not ready, status: InProgress"), errors.New("context deadline exceeded")}, + name: "Job is not complete", + objYamls: []string{jobNoStatus}, + expectErrs: []error{errors.New("test: Job not ready, status: InProgress"), errors.New("context deadline exceeded")}, + waitForJobs: true, + }, + { + name: "Job is not ready, but we pass wait anyway", + objYamls: []string{jobNoStatus}, + expectErrs: nil, + waitForJobs: false, }, { name: "Pod is ready", @@ -141,7 +149,7 @@ func TestKWaitJob(t *testing.T) { err = fakeClient.Tracker().Create(gvr, resource, resource.GetNamespace()) assert.NoError(t, err) } - c.Waiter = &kstatusWaiter{ + kwaiter := kstatusWaiter{ sw: statusWatcher, log: c.Log, } @@ -153,7 +161,7 @@ func TestKWaitJob(t *testing.T) { resourceList = append(resourceList, list...) } - err := c.Wait(resourceList, time.Second*3) + err := kwaiter.wait(resourceList, time.Second*3, tt.waitForJobs) if tt.expectErrs != nil { assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) return From 22af71f125ca467a109eff50e78c5b7aea0e8642 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 29 Dec 2024 17:35:32 +0000 Subject: [PATCH 014/192] comments Signed-off-by: Austin Abro --- pkg/kube/kwait.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/pkg/kube/kwait.go b/pkg/kube/kwait.go index c1822d87c..674552432 100644 --- a/pkg/kube/kwait.go +++ b/pkg/kube/kwait.go @@ -33,7 +33,6 @@ import ( ) type kstatusWaiter struct { - // Add any necessary dependencies, e.g., Kubernetes API client. sw watcher.StatusWatcher log func(string, ...interface{}) pausedAsReady bool @@ -44,7 +43,6 @@ func (w *kstatusWaiter) Wait(resourceList ResourceList, timeout time.Duration) e } func (w *kstatusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Duration) error { - // Implementation return w.wait(resourceList, timeout, true) } From e18f22071d036832f8a8573a99bd2955745faef8 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 29 Dec 2024 17:43:18 +0000 Subject: [PATCH 015/192] paused as ready now working Signed-off-by: Austin Abro --- pkg/kube/kwait.go | 11 +++++++--- pkg/kube/kwait_test.go | 50 ++++++++++++++++++++++++++++++++++-------- 2 files changed, 49 insertions(+), 12 deletions(-) diff --git a/pkg/kube/kwait.go b/pkg/kube/kwait.go index 674552432..936445037 100644 --- a/pkg/kube/kwait.go +++ b/pkg/kube/kwait.go @@ -22,6 +22,7 @@ import ( "fmt" "time" + appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/cli-utils/pkg/kstatus/polling/aggregator" @@ -46,7 +47,7 @@ func (w *kstatusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Dur return w.wait(resourceList, timeout, true) } -func (w *kstatusWaiter) wait(resourceList ResourceList, timeout time.Duration, waitWithJobs bool) error { +func (w *kstatusWaiter) wait(resourceList ResourceList, timeout time.Duration, waitForJobs bool) error { ctx, cancel := context.WithTimeout(context.TODO(), timeout) defer cancel() cancelCtx, cancel := context.WithCancel(ctx) @@ -54,9 +55,13 @@ func (w *kstatusWaiter) wait(resourceList ResourceList, timeout time.Duration, w // TODO maybe a simpler way to transfer the objects runtimeObjs := []runtime.Object{} for _, resource := range resourceList { - switch AsVersioned(resource).(type) { + switch value := AsVersioned(resource).(type) { case *batchv1.Job: - if !waitWithJobs { + if !waitForJobs { + continue + } + case *appsv1.Deployment: + if w.pausedAsReady && value.Spec.Paused { continue } } diff --git a/pkg/kube/kwait_test.go b/pkg/kube/kwait_test.go index e595f9ed3..8504025da 100644 --- a/pkg/kube/kwait_test.go +++ b/pkg/kube/kwait_test.go @@ -23,6 +23,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + batchv1 "k8s.io/api/batch/v1" + appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -80,6 +82,31 @@ status: status: "True" ` +var pausedDeploymentYaml = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx + namespace: ns-1 + generation: 1 +spec: + paused: true + replicas: 1 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.19.6 + ports: + - containerPort: 80 +` + func getGVR(t *testing.T, mapper meta.RESTMapper, obj *unstructured.Unstructured) schema.GroupVersionResource { gvk := obj.GroupVersionKind() mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version) @@ -90,10 +117,11 @@ func getGVR(t *testing.T, mapper meta.RESTMapper, obj *unstructured.Unstructured func TestKWaitJob(t *testing.T) { t.Parallel() tests := []struct { - name string - objYamls []string - expectErrs []error - waitForJobs bool + name string + objYamls []string + expectErrs []error + waitForJobs bool + pausedAsReady bool }{ { name: "Job is complete", @@ -122,6 +150,12 @@ func TestKWaitJob(t *testing.T) { objYamls: []string{podNoStatus, podCurrent}, expectErrs: []error{errors.New("in-progress-pod: Pod not ready, status: InProgress"), errors.New("context deadline exceeded")}, }, + { + name: "paused deployment passes", + objYamls: []string{pausedDeploymentYaml}, + expectErrs: nil, + pausedAsReady: true, + }, } for _, tt := range tests { @@ -131,11 +165,8 @@ func TestKWaitJob(t *testing.T) { fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) fakeMapper := testutil.NewFakeRESTMapper( v1.SchemeGroupVersion.WithKind("Pod"), - schema.GroupVersionKind{ - Group: "batch", - Version: "v1", - Kind: "Job", - }, + appsv1.SchemeGroupVersion.WithKind("Deployment"), + batchv1.SchemeGroupVersion.WithKind("Job"), ) objs := []runtime.Object{} statusWatcher := watcher.NewDefaultStatusWatcher(fakeClient, fakeMapper) @@ -152,6 +183,7 @@ func TestKWaitJob(t *testing.T) { kwaiter := kstatusWaiter{ sw: statusWatcher, log: c.Log, + pausedAsReady: tt.pausedAsReady, } resourceList := ResourceList{} From b337790c102b812807a783bce5d2fbf74fc4f5cd Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 29 Dec 2024 17:59:26 +0000 Subject: [PATCH 016/192] paused as ready Signed-off-by: Austin Abro --- pkg/kube/kwait.go | 1 - pkg/kube/kwait_test.go | 11 ++++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/pkg/kube/kwait.go b/pkg/kube/kwait.go index 936445037..e72e4b93d 100644 --- a/pkg/kube/kwait.go +++ b/pkg/kube/kwait.go @@ -52,7 +52,6 @@ func (w *kstatusWaiter) wait(resourceList ResourceList, timeout time.Duration, w defer cancel() cancelCtx, cancel := context.WithCancel(ctx) defer cancel() - // TODO maybe a simpler way to transfer the objects runtimeObjs := []runtime.Object{} for _, resource := range resourceList { switch value := AsVersioned(resource).(type) { diff --git a/pkg/kube/kwait_test.go b/pkg/kube/kwait_test.go index 8504025da..1bc80d8ee 100644 --- a/pkg/kube/kwait_test.go +++ b/pkg/kube/kwait_test.go @@ -18,13 +18,14 @@ package kube // import "helm.sh/helm/v3/pkg/kube" import ( "errors" + "log" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - batchv1 "k8s.io/api/batch/v1" appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -166,7 +167,7 @@ func TestKWaitJob(t *testing.T) { fakeMapper := testutil.NewFakeRESTMapper( v1.SchemeGroupVersion.WithKind("Pod"), appsv1.SchemeGroupVersion.WithKind("Deployment"), - batchv1.SchemeGroupVersion.WithKind("Job"), + batchv1.SchemeGroupVersion.WithKind("Job"), ) objs := []runtime.Object{} statusWatcher := watcher.NewDefaultStatusWatcher(fakeClient, fakeMapper) @@ -181,9 +182,9 @@ func TestKWaitJob(t *testing.T) { assert.NoError(t, err) } kwaiter := kstatusWaiter{ - sw: statusWatcher, - log: c.Log, - pausedAsReady: tt.pausedAsReady, + sw: statusWatcher, + log: log.Printf, + pausedAsReady: tt.pausedAsReady, } resourceList := ResourceList{} From 28a9183ee3fd271ac2b76f4df89170e3c9452fbb Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 29 Dec 2024 18:39:09 +0000 Subject: [PATCH 017/192] context Signed-off-by: Austin Abro --- pkg/kube/kwait.go | 12 +++++++----- pkg/kube/kwait_test.go | 6 +++++- pkg/kube/wait.go | 3 ++- 3 files changed, 14 insertions(+), 7 deletions(-) diff --git a/pkg/kube/kwait.go b/pkg/kube/kwait.go index e72e4b93d..3d8cfb616 100644 --- a/pkg/kube/kwait.go +++ b/pkg/kube/kwait.go @@ -40,16 +40,18 @@ type kstatusWaiter struct { } func (w *kstatusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error { - return w.wait(resourceList, timeout, false) + ctx, cancel := context.WithTimeout(context.TODO(), timeout) + defer cancel() + return w.wait(ctx, resourceList, false) } func (w *kstatusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Duration) error { - return w.wait(resourceList, timeout, true) -} - -func (w *kstatusWaiter) wait(resourceList ResourceList, timeout time.Duration, waitForJobs bool) error { ctx, cancel := context.WithTimeout(context.TODO(), timeout) defer cancel() + return w.wait(ctx, resourceList, true) +} + +func (w *kstatusWaiter) wait(ctx context.Context, resourceList ResourceList, waitForJobs bool) error { cancelCtx, cancel := context.WithCancel(ctx) defer cancel() runtimeObjs := []runtime.Object{} diff --git a/pkg/kube/kwait_test.go b/pkg/kube/kwait_test.go index 1bc80d8ee..9598ca216 100644 --- a/pkg/kube/kwait_test.go +++ b/pkg/kube/kwait_test.go @@ -17,6 +17,7 @@ limitations under the License. package kube // import "helm.sh/helm/v3/pkg/kube" import ( + "context" "errors" "log" "testing" @@ -194,7 +195,10 @@ func TestKWaitJob(t *testing.T) { resourceList = append(resourceList, list...) } - err := kwaiter.wait(resourceList, time.Second*3, tt.waitForJobs) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) + defer cancel() + + err := kwaiter.wait(ctx, resourceList, tt.waitForJobs) if tt.expectErrs != nil { assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) return diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go index de00aae47..34eb55e7c 100644 --- a/pkg/kube/wait.go +++ b/pkg/kube/wait.go @@ -51,8 +51,9 @@ func (w *waiter) Wait(resources ResourceList, timeout time.Duration) error { func (w *waiter) WaitWithJobs(resources ResourceList, timeout time.Duration) error { // Implementation - // TODO this function doesn't make sense unless you pass a readyChecker to it + // TODO this function doesn't make sense unless you pass a readyChecker to it // TODO pass context instead + // checker := NewReadyChecker(cs, w.c.Log, PausedAsReady(true), CheckJobs(true)) w.timeout = timeout return w.waitForResources(resources) } From 265442c5eb2bedc3292e255e38f4b25e1b0463ce Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 30 Dec 2024 14:22:14 +0000 Subject: [PATCH 018/192] simplify things Signed-off-by: Austin Abro --- pkg/kube/client.go | 27 +++++---------------------- pkg/kube/kwait.go | 7 +++---- pkg/kube/kwait_test.go | 9 ++++----- 3 files changed, 12 insertions(+), 31 deletions(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index cbef8fece..a5441f399 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -38,7 +38,6 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/cli-utils/pkg/kstatus/watcher" - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" multierror "github.com/hashicorp/go-multierror" "k8s.io/apimachinery/pkg/api/meta" @@ -52,7 +51,6 @@ import ( "k8s.io/apimachinery/pkg/watch" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/resource" - "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" @@ -102,26 +100,11 @@ func init() { } func getStatusWatcher(factory Factory) (watcher.StatusWatcher, error) { - cfg, err := factory.ToRESTConfig() + dynamicClient, err := factory.DynamicClient() if err != nil { return nil, err } - // factory.DynamicClient() may be a better choice here - dynamicClient, err := dynamic.NewForConfig(cfg) - if err != nil { - return nil, err - } - // Not sure if I should use factory methods to get this http client or I should do this - // For example, I could likely use this as well, but it seems like I should use the factory methods instead - // httpClient, err := rest.HTTPClientFor(cfg) - // if err != nil { - // return err - // } - client, err := factory.RESTClient() - if err != nil { - return nil, err - } - restMapper, err := apiutil.NewDynamicRESTMapper(cfg, client.Client) + restMapper, err := factory.ToRESTMapper() if err != nil { return nil, err } @@ -141,9 +124,9 @@ func New(getter genericclioptions.RESTClientGetter, waiter Waiter) (*Client, err return nil, err } waiter = &kstatusWaiter{ - sw: sw, - log: nopLogger, - pausedAsReady: true} + sw: sw, + log: nopLogger, + } } return &Client{ Factory: factory, diff --git a/pkg/kube/kwait.go b/pkg/kube/kwait.go index 3d8cfb616..d0dcc9b60 100644 --- a/pkg/kube/kwait.go +++ b/pkg/kube/kwait.go @@ -34,9 +34,8 @@ import ( ) type kstatusWaiter struct { - sw watcher.StatusWatcher - log func(string, ...interface{}) - pausedAsReady bool + sw watcher.StatusWatcher + log func(string, ...interface{}) } func (w *kstatusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error { @@ -62,7 +61,7 @@ func (w *kstatusWaiter) wait(ctx context.Context, resourceList ResourceList, wai continue } case *appsv1.Deployment: - if w.pausedAsReady && value.Spec.Paused { + if value.Spec.Paused { continue } } diff --git a/pkg/kube/kwait_test.go b/pkg/kube/kwait_test.go index 9598ca216..527d10a05 100644 --- a/pkg/kube/kwait_test.go +++ b/pkg/kube/kwait_test.go @@ -183,9 +183,8 @@ func TestKWaitJob(t *testing.T) { assert.NoError(t, err) } kwaiter := kstatusWaiter{ - sw: statusWatcher, - log: log.Printf, - pausedAsReady: tt.pausedAsReady, + sw: statusWatcher, + log: log.Printf, } resourceList := ResourceList{} @@ -195,8 +194,8 @@ func TestKWaitJob(t *testing.T) { resourceList = append(resourceList, list...) } - ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) - defer cancel() + ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) + defer cancel() err := kwaiter.wait(ctx, resourceList, tt.waitForJobs) if tt.expectErrs != nil { From 9b63459becb190ad9f4ebe43235f40ed720e310d Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 30 Dec 2024 14:49:32 +0000 Subject: [PATCH 019/192] save state while I change up tests Signed-off-by: Austin Abro --- pkg/kube/kwait.go | 6 +++++ pkg/kube/kwait_test.go | 54 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+) diff --git a/pkg/kube/kwait.go b/pkg/kube/kwait.go index d0dcc9b60..f173a074e 100644 --- a/pkg/kube/kwait.go +++ b/pkg/kube/kwait.go @@ -50,6 +50,12 @@ func (w *kstatusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Dur return w.wait(ctx, resourceList, true) } +func (w *kstatusWaiter) waitForDelete(ctx context.Context, resourceList ResourceList) error { + _, cancel := context.WithCancel(ctx) + defer cancel() + return nil +} + func (w *kstatusWaiter) wait(ctx context.Context, resourceList ResourceList, waitForJobs bool) error { cancelCtx, cancel := context.WithCancel(ctx) defer cancel() diff --git a/pkg/kube/kwait_test.go b/pkg/kube/kwait_test.go index 527d10a05..2301c373d 100644 --- a/pkg/kube/kwait_test.go +++ b/pkg/kube/kwait_test.go @@ -29,6 +29,7 @@ import ( batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -116,6 +117,59 @@ func getGVR(t *testing.T, mapper meta.RESTMapper, obj *unstructured.Unstructured return mapping.Resource } +func TestKWaitForDelete(t *testing.T) { + t.Parallel() + tests := []struct { + name string + objs []runtime.Object + expectErrs []error + waitForJobs bool + pausedAsReady bool + }{ + { + name: "Pod is deleted", + objs: []runtime.Object{ + &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod", Namespace: "ns"}}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + appsv1.SchemeGroupVersion.WithKind("Deployment"), + batchv1.SchemeGroupVersion.WithKind("Job"), + ) + statusWatcher := watcher.NewDefaultStatusWatcher(fakeClient, fakeMapper) + kwaiter := kstatusWaiter{ + sw: statusWatcher, + log: log.Printf, + } + ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) + defer cancel() + resourceList := ResourceList{} + for _, obj := range tt.objs { + list, err := c.Build(objBody(obj), false) + assert.NoError(t, err) + // gvr := getGVR(t, fakeMapper, obj.) + // err = fakeClient.Tracker().Create(gvr, obj, ) + // assert.NoError(t, err) + // resourceList = append(resourceList, list...) + } + err := kwaiter.waitForDelete(ctx, resourceList) + if tt.expectErrs != nil { + assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + return + } + assert.NoError(t, err) + }) + } + +} + func TestKWaitJob(t *testing.T) { t.Parallel() tests := []struct { From 4dbdd7ce10cfb5f4d1de4dda2a65b27a83f93c0c Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 30 Dec 2024 14:55:13 +0000 Subject: [PATCH 020/192] wait for delete working Signed-off-by: Austin Abro --- pkg/kube/kwait.go | 53 ++++++++++++++++++++++++++++++++++++++- pkg/kube/kwait_test.go | 56 ++++++++++++++++++++++++------------------ 2 files changed, 84 insertions(+), 25 deletions(-) diff --git a/pkg/kube/kwait.go b/pkg/kube/kwait.go index f173a074e..ae7fcbe43 100644 --- a/pkg/kube/kwait.go +++ b/pkg/kube/kwait.go @@ -51,7 +51,58 @@ func (w *kstatusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Dur } func (w *kstatusWaiter) waitForDelete(ctx context.Context, resourceList ResourceList) error { - _, cancel := context.WithCancel(ctx) + cancelCtx, cancel := context.WithCancel(ctx) + defer cancel() + runtimeObjs := []runtime.Object{} + for _, resource := range resourceList { + runtimeObjs = append(runtimeObjs, resource.Object) + } + resources := []object.ObjMetadata{} + for _, runtimeObj := range runtimeObjs { + obj, err := object.RuntimeToObjMeta(runtimeObj) + if err != nil { + return err + } + resources = append(resources, obj) + } + eventCh := w.sw.Watch(cancelCtx, resources, watcher.Options{}) + statusCollector := collector.NewResourceStatusCollector(resources) + done := statusCollector.ListenWithObserver(eventCh, collector.ObserverFunc( + func(statusCollector *collector.ResourceStatusCollector, _ event.Event) { + rss := []*event.ResourceStatus{} + for _, rs := range statusCollector.ResourceStatuses { + if rs == nil { + continue + } + rss = append(rss, rs) + } + desired := status.NotFoundStatus + if aggregator.AggregateStatus(rss, desired) == desired { + cancel() + return + } + }), + ) + <-done + + if statusCollector.Error != nil { + return statusCollector.Error + } + + // Only check parent context error, otherwise we would error when desired status is achieved. + if ctx.Err() != nil { + errs := []error{} + for _, id := range resources { + rs := statusCollector.ResourceStatuses[id] + if rs.Status == status.CurrentStatus { + continue + } + errs = append(errs, fmt.Errorf("%s: %s not ready, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) + } + errs = append(errs, ctx.Err()) + return errors.Join(errs...) + } + return nil defer cancel() return nil } diff --git a/pkg/kube/kwait_test.go b/pkg/kube/kwait_test.go index 2301c373d..f910a4a9b 100644 --- a/pkg/kube/kwait_test.go +++ b/pkg/kube/kwait_test.go @@ -29,7 +29,6 @@ import ( batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -120,17 +119,15 @@ func getGVR(t *testing.T, mapper meta.RESTMapper, obj *unstructured.Unstructured func TestKWaitForDelete(t *testing.T) { t.Parallel() tests := []struct { - name string - objs []runtime.Object - expectErrs []error - waitForJobs bool - pausedAsReady bool + name string + objYamls []string + expectErrs []error + waitForJobs bool }{ { - name: "Pod is deleted", - objs: []runtime.Object{ - &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod", Namespace: "ns"}}, - }, + name: "wait for pod to be deleted", + objYamls: []string{podCurrent}, + expectErrs: nil, }, } for _, tt := range tests { @@ -150,14 +147,27 @@ func TestKWaitForDelete(t *testing.T) { } ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) defer cancel() + objs := []runtime.Object{} + for _, podYaml := range tt.objYamls { + m := make(map[string]interface{}) + err := yaml.Unmarshal([]byte(podYaml), &m) + assert.NoError(t, err) + resource := &unstructured.Unstructured{Object: m} + objs = append(objs, resource) + gvr := getGVR(t, fakeMapper, resource) + err = fakeClient.Tracker().Create(gvr, resource, resource.GetNamespace()) + assert.NoError(t, err) + go func(){ + time.Sleep(2 * time.Second) + err = fakeClient.Tracker().Delete(gvr, resource.GetNamespace(), resource.GetName()) + assert.NoError(t, err) + }() + } resourceList := ResourceList{} - for _, obj := range tt.objs { + for _, obj := range objs { list, err := c.Build(objBody(obj), false) assert.NoError(t, err) - // gvr := getGVR(t, fakeMapper, obj.) - // err = fakeClient.Tracker().Create(gvr, obj, ) - // assert.NoError(t, err) - // resourceList = append(resourceList, list...) + resourceList = append(resourceList, list...) } err := kwaiter.waitForDelete(ctx, resourceList) if tt.expectErrs != nil { @@ -173,11 +183,10 @@ func TestKWaitForDelete(t *testing.T) { func TestKWaitJob(t *testing.T) { t.Parallel() tests := []struct { - name string - objYamls []string - expectErrs []error - waitForJobs bool - pausedAsReady bool + name string + objYamls []string + expectErrs []error + waitForJobs bool }{ { name: "Job is complete", @@ -207,10 +216,9 @@ func TestKWaitJob(t *testing.T) { expectErrs: []error{errors.New("in-progress-pod: Pod not ready, status: InProgress"), errors.New("context deadline exceeded")}, }, { - name: "paused deployment passes", - objYamls: []string{pausedDeploymentYaml}, - expectErrs: nil, - pausedAsReady: true, + name: "paused deployment passes", + objYamls: []string{pausedDeploymentYaml}, + expectErrs: nil, }, } From db90b174846d96cce02d5c50993eb56262a1b681 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 30 Dec 2024 15:01:52 +0000 Subject: [PATCH 021/192] unknown status Signed-off-by: Austin Abro --- pkg/kube/kwait.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/pkg/kube/kwait.go b/pkg/kube/kwait.go index ae7fcbe43..587c41c49 100644 --- a/pkg/kube/kwait.go +++ b/pkg/kube/kwait.go @@ -94,17 +94,20 @@ func (w *kstatusWaiter) waitForDelete(ctx context.Context, resourceList Resource errs := []error{} for _, id := range resources { rs := statusCollector.ResourceStatuses[id] - if rs.Status == status.CurrentStatus { + if rs.Status == status.NotFoundStatus { continue } - errs = append(errs, fmt.Errorf("%s: %s not ready, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) + if rs.Status == status.UnknownStatus { + errs = append(errs, fmt.Errorf("%s: %s cannot determine if resource exists, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) + continue + } + + errs = append(errs, fmt.Errorf("%s: %s still exists, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) } errs = append(errs, ctx.Err()) return errors.Join(errs...) } return nil - defer cancel() - return nil } func (w *kstatusWaiter) wait(ctx context.Context, resourceList ResourceList, waitForJobs bool) error { From 4b59583670e40a556448f2a3627b100c88166a3f Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 5 Jan 2025 14:05:31 +0000 Subject: [PATCH 022/192] delete wait and get completed phase Signed-off-by: Austin Abro --- pkg/kube/client.go | 32 -------------------------------- pkg/kube/fake/fake.go | 34 ++++++++++++---------------------- pkg/kube/fake/printer.go | 6 ------ pkg/kube/interface.go | 4 ---- pkg/kube/kwait.go | 5 ++--- 5 files changed, 14 insertions(+), 67 deletions(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index a5441f399..5b466ea6f 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -821,35 +821,3 @@ func scrubValidationError(err error) error { } return err } - -// WaitAndGetCompletedPodPhase waits up to a timeout until a pod enters a completed phase -// and returns said phase (PodSucceeded or PodFailed qualify). -func (c *Client) WaitAndGetCompletedPodPhase(name string, timeout time.Duration) (v1.PodPhase, error) { - client, err := c.getKubeClient() - if err != nil { - return v1.PodUnknown, err - } - to := int64(timeout) - watcher, err := client.CoreV1().Pods(c.namespace()).Watch(context.Background(), metav1.ListOptions{ - FieldSelector: fmt.Sprintf("metadata.name=%s", name), - TimeoutSeconds: &to, - }) - if err != nil { - return v1.PodUnknown, err - } - - for event := range watcher.ResultChan() { - p, ok := event.Object.(*v1.Pod) - if !ok { - return v1.PodUnknown, fmt.Errorf("%s not a pod", name) - } - switch p.Status.Phase { - case v1.PodFailed: - return v1.PodFailed, nil - case v1.PodSucceeded: - return v1.PodSucceeded, nil - } - } - - return v1.PodUnknown, err -} diff --git a/pkg/kube/fake/fake.go b/pkg/kube/fake/fake.go index 267020d57..84e375346 100644 --- a/pkg/kube/fake/fake.go +++ b/pkg/kube/fake/fake.go @@ -21,7 +21,6 @@ import ( "io" "time" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/cli-runtime/pkg/resource" @@ -34,19 +33,18 @@ import ( // delegates all its calls to `PrintingKubeClient` type FailingKubeClient struct { PrintingKubeClient - CreateError error - GetError error - WaitError error - DeleteError error - DeleteWithPropagationError error - WatchUntilReadyError error - UpdateError error - BuildError error - BuildTableError error - BuildDummy bool - BuildUnstructuredError error - WaitAndGetCompletedPodPhaseError error - WaitDuration time.Duration + CreateError error + GetError error + WaitError error + DeleteError error + DeleteWithPropagationError error + WatchUntilReadyError error + UpdateError error + BuildError error + BuildTableError error + BuildDummy bool + BuildUnstructuredError error + WaitDuration time.Duration } // Create returns the configured error if set or prints @@ -133,14 +131,6 @@ func (f *FailingKubeClient) BuildTable(r io.Reader, _ bool) (kube.ResourceList, return f.PrintingKubeClient.BuildTable(r, false) } -// WaitAndGetCompletedPodPhase returns the configured error if set or prints -func (f *FailingKubeClient) WaitAndGetCompletedPodPhase(s string, d time.Duration) (v1.PodPhase, error) { - if f.WaitAndGetCompletedPodPhaseError != nil { - return v1.PodSucceeded, f.WaitAndGetCompletedPodPhaseError - } - return f.PrintingKubeClient.WaitAndGetCompletedPodPhase(s, d) -} - // DeleteWithPropagationPolicy returns the configured error if set or prints func (f *FailingKubeClient) DeleteWithPropagationPolicy(resources kube.ResourceList, policy metav1.DeletionPropagation) (*kube.Result, []error) { if f.DeleteWithPropagationError != nil { diff --git a/pkg/kube/fake/printer.go b/pkg/kube/fake/printer.go index cc2c84b40..0fb03c113 100644 --- a/pkg/kube/fake/printer.go +++ b/pkg/kube/fake/printer.go @@ -21,7 +21,6 @@ import ( "strings" "time" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/cli-runtime/pkg/resource" @@ -111,11 +110,6 @@ func (p *PrintingKubeClient) BuildTable(_ io.Reader, _ bool) (kube.ResourceList, return []*resource.Info{}, nil } -// WaitAndGetCompletedPodPhase implements KubeClient WaitAndGetCompletedPodPhase. -func (p *PrintingKubeClient) WaitAndGetCompletedPodPhase(_ string, _ time.Duration) (v1.PodPhase, error) { - return v1.PodSucceeded, nil -} - // DeleteWithPropagationPolicy implements KubeClient delete. // // It only prints out the content to be deleted. diff --git a/pkg/kube/interface.go b/pkg/kube/interface.go index edc062c49..6cf33c515 100644 --- a/pkg/kube/interface.go +++ b/pkg/kube/interface.go @@ -20,7 +20,6 @@ import ( "io" "time" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) @@ -49,9 +48,6 @@ type Interface interface { // error. // TODO: Is watch until ready really behavior we want over the resources actually being ready? WatchUntilReady(resources ResourceList, timeout time.Duration) error - // WaitAndGetCompletedPodPhase waits up to a timeout until a pod enters a completed phase - // and returns said phase (PodSucceeded or PodFailed qualify). - WaitAndGetCompletedPodPhase(name string, timeout time.Duration) (v1.PodPhase, error) // Build creates a resource list from a Reader. // // Reader must contain a YAML stream (one or more YAML documents separated diff --git a/pkg/kube/kwait.go b/pkg/kube/kwait.go index 587c41c49..1eb1c2053 100644 --- a/pkg/kube/kwait.go +++ b/pkg/kube/kwait.go @@ -99,10 +99,9 @@ func (w *kstatusWaiter) waitForDelete(ctx context.Context, resourceList Resource } if rs.Status == status.UnknownStatus { errs = append(errs, fmt.Errorf("%s: %s cannot determine if resource exists, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) - continue + } else { + errs = append(errs, fmt.Errorf("%s: %s still exists, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) } - - errs = append(errs, fmt.Errorf("%s: %s still exists, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) } errs = append(errs, ctx.Err()) return errors.Join(errs...) From 2cb999d72b0051341775861fbf2eca23cec7f3aa Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 5 Jan 2025 14:28:59 +0000 Subject: [PATCH 023/192] go fmt Signed-off-by: Austin Abro --- pkg/kube/kwait_test.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/pkg/kube/kwait_test.go b/pkg/kube/kwait_test.go index f910a4a9b..1e67bfa75 100644 --- a/pkg/kube/kwait_test.go +++ b/pkg/kube/kwait_test.go @@ -132,7 +132,7 @@ func TestKWaitForDelete(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - t.Parallel() + // t.Parallel() c := newTestClient(t) fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) fakeMapper := testutil.NewFakeRESTMapper( @@ -157,11 +157,11 @@ func TestKWaitForDelete(t *testing.T) { gvr := getGVR(t, fakeMapper, resource) err = fakeClient.Tracker().Create(gvr, resource, resource.GetNamespace()) assert.NoError(t, err) - go func(){ - time.Sleep(2 * time.Second) - err = fakeClient.Tracker().Delete(gvr, resource.GetNamespace(), resource.GetName()) - assert.NoError(t, err) - }() + go func() { + time.Sleep(2 * time.Second) + err = fakeClient.Tracker().Delete(gvr, resource.GetNamespace(), resource.GetName()) + assert.NoError(t, err) + }() } resourceList := ResourceList{} for _, obj := range objs { @@ -180,7 +180,7 @@ func TestKWaitForDelete(t *testing.T) { } -func TestKWaitJob(t *testing.T) { +func TestKWait(t *testing.T) { t.Parallel() tests := []struct { name string @@ -224,7 +224,7 @@ func TestKWaitJob(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - t.Parallel() + // t.Parallel() c := newTestClient(t) fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) fakeMapper := testutil.NewFakeRESTMapper( From 4dd6e19b1d3b2c1d6993f61292dd77d5c2bf4105 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 5 Jan 2025 14:45:18 +0000 Subject: [PATCH 024/192] provide path for creating new legacy waiter Signed-off-by: Austin Abro --- pkg/kube/wait.go | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go index b4cb85080..cbec8fa59 100644 --- a/pkg/kube/wait.go +++ b/pkg/kube/wait.go @@ -34,26 +34,33 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/kubernetes" "k8s.io/apimachinery/pkg/util/wait" ) type waiter struct { - c ReadyChecker - timeout time.Duration - log func(string, ...interface{}) + c ReadyChecker + timeout time.Duration + log func(string, ...interface{}) + kubeClient *kubernetes.Clientset +} + +func (w *waiter) NewLegacyWaiter(kubeClient *kubernetes.Clientset, log func(string, ...interface{})) *waiter { + return &waiter{ + log: log, + kubeClient: kubeClient, + } } func (w *waiter) Wait(resources ResourceList, timeout time.Duration) error { + w.c = NewReadyChecker(w.kubeClient, w.log, PausedAsReady(true)) w.timeout = timeout return w.waitForResources(resources) } func (w *waiter) WaitWithJobs(resources ResourceList, timeout time.Duration) error { - // Implementation - // TODO this function doesn't make sense unless you pass a readyChecker to it - // TODO pass context instead - // checker := NewReadyChecker(cs, w.c.Log, PausedAsReady(true), CheckJobs(true)) + w.c = NewReadyChecker(w.kubeClient, w.log, PausedAsReady(true), CheckJobs(true)) w.timeout = timeout return w.waitForResources(resources) } From cb6d48e6ae553ddd95ac838ae45fb7c0aabbfa71 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 5 Jan 2025 15:11:05 +0000 Subject: [PATCH 025/192] status wait Signed-off-by: Austin Abro --- pkg/kube/client.go | 2 +- pkg/kube/{kwait.go => statuswait.go} | 10 +++++----- pkg/kube/{kwait_test.go => statuswait_test.go} | 12 ++++++------ 3 files changed, 12 insertions(+), 12 deletions(-) rename pkg/kube/{kwait.go => statuswait.go} (92%) rename pkg/kube/{kwait_test.go => statuswait_test.go} (97%) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 45d842c4a..91b09eb65 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -123,7 +123,7 @@ func New(getter genericclioptions.RESTClientGetter, waiter Waiter) (*Client, err if err != nil { return nil, err } - waiter = &kstatusWaiter{ + waiter = &statusWaiter{ sw: sw, log: nopLogger, } diff --git a/pkg/kube/kwait.go b/pkg/kube/statuswait.go similarity index 92% rename from pkg/kube/kwait.go rename to pkg/kube/statuswait.go index 1eb1c2053..d58e34cdc 100644 --- a/pkg/kube/kwait.go +++ b/pkg/kube/statuswait.go @@ -33,24 +33,24 @@ import ( "sigs.k8s.io/cli-utils/pkg/object" ) -type kstatusWaiter struct { +type statusWaiter struct { sw watcher.StatusWatcher log func(string, ...interface{}) } -func (w *kstatusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error { +func (w *statusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error { ctx, cancel := context.WithTimeout(context.TODO(), timeout) defer cancel() return w.wait(ctx, resourceList, false) } -func (w *kstatusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Duration) error { +func (w *statusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Duration) error { ctx, cancel := context.WithTimeout(context.TODO(), timeout) defer cancel() return w.wait(ctx, resourceList, true) } -func (w *kstatusWaiter) waitForDelete(ctx context.Context, resourceList ResourceList) error { +func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceList) error { cancelCtx, cancel := context.WithCancel(ctx) defer cancel() runtimeObjs := []runtime.Object{} @@ -109,7 +109,7 @@ func (w *kstatusWaiter) waitForDelete(ctx context.Context, resourceList Resource return nil } -func (w *kstatusWaiter) wait(ctx context.Context, resourceList ResourceList, waitForJobs bool) error { +func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, waitForJobs bool) error { cancelCtx, cancel := context.WithCancel(ctx) defer cancel() runtimeObjs := []runtime.Object{} diff --git a/pkg/kube/kwait_test.go b/pkg/kube/statuswait_test.go similarity index 97% rename from pkg/kube/kwait_test.go rename to pkg/kube/statuswait_test.go index 1e67bfa75..31211d226 100644 --- a/pkg/kube/kwait_test.go +++ b/pkg/kube/statuswait_test.go @@ -116,7 +116,7 @@ func getGVR(t *testing.T, mapper meta.RESTMapper, obj *unstructured.Unstructured return mapping.Resource } -func TestKWaitForDelete(t *testing.T) { +func TestStatusWaitForDelete(t *testing.T) { t.Parallel() tests := []struct { name string @@ -132,7 +132,7 @@ func TestKWaitForDelete(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - // t.Parallel() + t.Parallel() c := newTestClient(t) fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) fakeMapper := testutil.NewFakeRESTMapper( @@ -141,7 +141,7 @@ func TestKWaitForDelete(t *testing.T) { batchv1.SchemeGroupVersion.WithKind("Job"), ) statusWatcher := watcher.NewDefaultStatusWatcher(fakeClient, fakeMapper) - kwaiter := kstatusWaiter{ + kwaiter := statusWaiter{ sw: statusWatcher, log: log.Printf, } @@ -180,7 +180,7 @@ func TestKWaitForDelete(t *testing.T) { } -func TestKWait(t *testing.T) { +func TestStatusWait(t *testing.T) { t.Parallel() tests := []struct { name string @@ -224,7 +224,7 @@ func TestKWait(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - // t.Parallel() + t.Parallel() c := newTestClient(t) fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) fakeMapper := testutil.NewFakeRESTMapper( @@ -244,7 +244,7 @@ func TestKWait(t *testing.T) { err = fakeClient.Tracker().Create(gvr, resource, resource.GetNamespace()) assert.NoError(t, err) } - kwaiter := kstatusWaiter{ + kwaiter := statusWaiter{ sw: statusWatcher, log: log.Printf, } From 86338215b7aff34bab669c9842c19aab771c5d6b Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 5 Jan 2025 15:42:56 +0000 Subject: [PATCH 026/192] ability to create different waiters Signed-off-by: Austin Abro --- pkg/action/action.go | 2 +- pkg/kube/client.go | 43 +++++++++++++++++++++++++++++++---------- pkg/kube/client_test.go | 35 ++++++++------------------------- pkg/kube/wait.go | 7 ------- 4 files changed, 42 insertions(+), 45 deletions(-) diff --git a/pkg/action/action.go b/pkg/action/action.go index e8e0a997a..7edb4a1ae 100644 --- a/pkg/action/action.go +++ b/pkg/action/action.go @@ -371,7 +371,7 @@ func (cfg *Configuration) recordRelease(r *release.Release) { // Init initializes the action configuration func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string, log DebugLog) error { - kc, err := kube.New(getter, nil) + kc, err := kube.New(getter, kube.StatusWaiter) if err != nil { return err } diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 91b09eb65..ce22f265a 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -88,6 +88,13 @@ type Client struct { Waiter } +type WaitStrategy int + +const ( + StatusWaiter WaitStrategy = iota + LegacyWaiter +) + func init() { // Add CRDs to the scheme. They are missing by default. if err := apiextv1.AddToScheme(scheme.Scheme); err != nil { @@ -112,21 +119,37 @@ func getStatusWatcher(factory Factory) (watcher.StatusWatcher, error) { return sw, nil } -// New creates a new Client. -func New(getter genericclioptions.RESTClientGetter, waiter Waiter) (*Client, error) { - if getter == nil { - getter = genericclioptions.NewConfigFlags(true) - } - factory := cmdutil.NewFactory(getter) - if waiter == nil { +func NewWaiter(strategy WaitStrategy, factory Factory, log func(string, ...interface{})) (Waiter, error) { + switch strategy { + case LegacyWaiter: + kc, err := factory.KubernetesClientSet() + if err != nil { + return nil, err + } + return &waiter{kubeClient: kc, log: log}, nil + case StatusWaiter: sw, err := getStatusWatcher(factory) if err != nil { return nil, err } - waiter = &statusWaiter{ + return &statusWaiter{ sw: sw, - log: nopLogger, - } + log: log, + }, nil + default: + return nil, errors.New("unknown wait strategy") + } +} + +// New creates a new Client. +func New(getter genericclioptions.RESTClientGetter, ws WaitStrategy) (*Client, error) { + if getter == nil { + getter = genericclioptions.NewConfigFlags(true) + } + factory := cmdutil.NewFactory(getter) + waiter, err := NewWaiter(ws, factory, nopLogger) + if err != nil { + return nil, err } return &Client{ Factory: factory, diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index 037719219..3ab415a48 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -453,10 +453,6 @@ func TestPerform(t *testing.T) { } } -// Likely it is not possible to get this test to work with kstatus given that it seems -// kstatus is not making constant get checks on the resources and is instead waiting for events -// Potentially the test could be reworked to make the pods after five seconds -// would need this -> func TestWait(t *testing.T) { podList := newPodList("starfish", "otter", "squid") @@ -517,16 +513,11 @@ func TestWait(t *testing.T) { } }), } - cs, err := c.getKubeClient() + waiter, err := NewWaiter(LegacyWaiter, c.Factory, c.Log) if err != nil { t.Fatal(err) } - checker := NewReadyChecker(cs, c.Log, PausedAsReady(true)) - c.Waiter = &waiter{ - c: checker, - log: c.Log, - timeout: time.Second * 30, - } + c.Waiter = waiter resources, err := c.Build(objBody(&podList), false) if err != nil { t.Fatal(err) @@ -579,16 +570,11 @@ func TestWaitJob(t *testing.T) { } }), } - cs, err := c.getKubeClient() + waiter, err := NewWaiter(LegacyWaiter, c.Factory, c.Log) if err != nil { t.Fatal(err) } - checker := NewReadyChecker(cs, c.Log, PausedAsReady(true), CheckJobs(true)) - c.Waiter = &waiter{ - c: checker, - log: c.Log, - timeout: time.Second * 30, - } + c.Waiter = waiter resources, err := c.Build(objBody(job), false) if err != nil { t.Fatal(err) @@ -643,16 +629,11 @@ func TestWaitDelete(t *testing.T) { } }), } - cs, err := c.getKubeClient() + waiter, err := NewWaiter(LegacyWaiter, c.Factory, c.Log) if err != nil { t.Fatal(err) } - checker := NewReadyChecker(cs, c.Log, PausedAsReady(true)) - c.Waiter = &waiter{ - c: checker, - log: c.Log, - timeout: time.Second * 30, - } + c.Waiter = waiter resources, err := c.Build(objBody(&pod), false) if err != nil { t.Fatal(err) @@ -679,7 +660,7 @@ func TestWaitDelete(t *testing.T) { func TestReal(t *testing.T) { t.Skip("This is a live test, comment this line to run") - c, err := New(nil, nil) + c, err := New(nil, StatusWaiter) if err != nil { t.Fatal(err) } @@ -692,7 +673,7 @@ func TestReal(t *testing.T) { } testSvcEndpointManifest := testServiceManifest + "\n---\n" + testEndpointManifest - c, err = New(nil, nil) + c, err = New(nil, StatusWaiter) if err != nil { t.Fatal(err) } diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go index cbec8fa59..0ee4504cb 100644 --- a/pkg/kube/wait.go +++ b/pkg/kube/wait.go @@ -46,13 +46,6 @@ type waiter struct { kubeClient *kubernetes.Clientset } -func (w *waiter) NewLegacyWaiter(kubeClient *kubernetes.Clientset, log func(string, ...interface{})) *waiter { - return &waiter{ - log: log, - kubeClient: kubeClient, - } -} - func (w *waiter) Wait(resources ResourceList, timeout time.Duration) error { w.c = NewReadyChecker(w.kubeClient, w.log, PausedAsReady(true)) w.timeout = timeout From 4c97d1276ca765bc9ba181a6a280b25b75a713dd Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 6 Jan 2025 12:31:43 +0000 Subject: [PATCH 027/192] helm waiter Signed-off-by: Austin Abro --- pkg/kube/client.go | 4 ++-- pkg/kube/statuswait.go | 4 ++++ pkg/kube/statuswait_test.go | 2 +- pkg/kube/wait.go | 14 +++++++------- 4 files changed, 14 insertions(+), 10 deletions(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index ce22f265a..fe830747d 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -126,7 +126,7 @@ func NewWaiter(strategy WaitStrategy, factory Factory, log func(string, ...inter if err != nil { return nil, err } - return &waiter{kubeClient: kc, log: log}, nil + return &HelmWaiter{kubeClient: kc, log: log}, nil case StatusWaiter: sw, err := getStatusWatcher(factory) if err != nil { @@ -333,7 +333,7 @@ func getResource(info *resource.Info) (runtime.Object, error) { // WaitForDelete wait up to the given timeout for the specified resources to be deleted. func (c *Client) WaitForDelete(resources ResourceList, timeout time.Duration) error { - w := waiter{ + w := HelmWaiter{ log: c.Log, timeout: timeout, } diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index d58e34cdc..bbc92292d 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -51,6 +51,8 @@ func (w *statusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Dura } func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceList) error { + deadline, _ := ctx.Deadline() + w.log("beginning wait for %d resources to be deleted with timeout of %v", len(resourceList), time.Until(deadline)) cancelCtx, cancel := context.WithCancel(ctx) defer cancel() runtimeObjs := []runtime.Object{} @@ -110,6 +112,8 @@ func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceL } func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, waitForJobs bool) error { + deadline, _ := ctx.Deadline() + w.log("beginning wait for %d resources with timeout of %v", len(resourceList), deadline) cancelCtx, cancel := context.WithCancel(ctx) defer cancel() runtimeObjs := []runtime.Object{} diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index 31211d226..b018691cd 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -143,7 +143,7 @@ func TestStatusWaitForDelete(t *testing.T) { statusWatcher := watcher.NewDefaultStatusWatcher(fakeClient, fakeMapper) kwaiter := statusWaiter{ sw: statusWatcher, - log: log.Printf, + log: t.Logf, } ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) defer cancel() diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go index 0ee4504cb..044bbbe1d 100644 --- a/pkg/kube/wait.go +++ b/pkg/kube/wait.go @@ -39,20 +39,20 @@ import ( "k8s.io/apimachinery/pkg/util/wait" ) -type waiter struct { +type HelmWaiter struct { c ReadyChecker timeout time.Duration log func(string, ...interface{}) kubeClient *kubernetes.Clientset } -func (w *waiter) Wait(resources ResourceList, timeout time.Duration) error { +func (w *HelmWaiter) Wait(resources ResourceList, timeout time.Duration) error { w.c = NewReadyChecker(w.kubeClient, w.log, PausedAsReady(true)) w.timeout = timeout return w.waitForResources(resources) } -func (w *waiter) WaitWithJobs(resources ResourceList, timeout time.Duration) error { +func (w *HelmWaiter) WaitWithJobs(resources ResourceList, timeout time.Duration) error { w.c = NewReadyChecker(w.kubeClient, w.log, PausedAsReady(true), CheckJobs(true)) w.timeout = timeout return w.waitForResources(resources) @@ -60,7 +60,7 @@ func (w *waiter) WaitWithJobs(resources ResourceList, timeout time.Duration) err // waitForResources polls to get the current status of all pods, PVCs, Services and // Jobs(optional) until all are ready or a timeout is reached -func (w *waiter) waitForResources(created ResourceList) error { +func (w *HelmWaiter) waitForResources(created ResourceList) error { w.log("beginning wait for %d resources with timeout of %v", len(created), w.timeout) ctx, cancel := context.WithTimeout(context.Background(), w.timeout) @@ -94,7 +94,7 @@ func (w *waiter) waitForResources(created ResourceList) error { }) } -func (w *waiter) isRetryableError(err error, resource *resource.Info) bool { +func (w *HelmWaiter) isRetryableError(err error, resource *resource.Info) bool { if err == nil { return false } @@ -109,12 +109,12 @@ func (w *waiter) isRetryableError(err error, resource *resource.Info) bool { return true } -func (w *waiter) isRetryableHTTPStatusCode(httpStatusCode int32) bool { +func (w *HelmWaiter) isRetryableHTTPStatusCode(httpStatusCode int32) bool { return httpStatusCode == 0 || httpStatusCode == http.StatusTooManyRequests || (httpStatusCode >= 500 && httpStatusCode != http.StatusNotImplemented) } // waitForDeletedResources polls to check if all the resources are deleted or a timeout is reached -func (w *waiter) waitForDeletedResources(deleted ResourceList) error { +func (w *HelmWaiter) waitForDeletedResources(deleted ResourceList) error { w.log("beginning wait for %d resources to be deleted with timeout of %v", len(deleted), w.timeout) ctx, cancel := context.WithTimeout(context.Background(), w.timeout) From b8bdcc3a2b866296c2639ef683d55a777ef66403 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 6 Jan 2025 12:33:26 +0000 Subject: [PATCH 028/192] Helm waiter Signed-off-by: Austin Abro --- pkg/kube/wait.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go index 044bbbe1d..e74753e57 100644 --- a/pkg/kube/wait.go +++ b/pkg/kube/wait.go @@ -39,6 +39,8 @@ import ( "k8s.io/apimachinery/pkg/util/wait" ) +// HelmWaiter is the legacy implementation of the Waiter interface. This logic was used by default in Helm 3 +// Helm 4 now uses the StatusWaiter interface instead type HelmWaiter struct { c ReadyChecker timeout time.Duration From ac9012577a8fccd13371966539fb953d4ff043ea Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 6 Jan 2025 13:06:54 +0000 Subject: [PATCH 029/192] status function Signed-off-by: Austin Abro --- pkg/kube/statuswait.go | 50 +++++++++++++------------------------ pkg/kube/statuswait_test.go | 3 +-- 2 files changed, 19 insertions(+), 34 deletions(-) diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index bbc92292d..bec38f7c9 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -69,22 +69,7 @@ func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceL } eventCh := w.sw.Watch(cancelCtx, resources, watcher.Options{}) statusCollector := collector.NewResourceStatusCollector(resources) - done := statusCollector.ListenWithObserver(eventCh, collector.ObserverFunc( - func(statusCollector *collector.ResourceStatusCollector, _ event.Event) { - rss := []*event.ResourceStatus{} - for _, rs := range statusCollector.ResourceStatuses { - if rs == nil { - continue - } - rss = append(rss, rs) - } - desired := status.NotFoundStatus - if aggregator.AggregateStatus(rss, desired) == desired { - cancel() - return - } - }), - ) + done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.NotFoundStatus)) <-done if statusCollector.Error != nil { @@ -140,22 +125,7 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, wait } eventCh := w.sw.Watch(cancelCtx, resources, watcher.Options{}) statusCollector := collector.NewResourceStatusCollector(resources) - done := statusCollector.ListenWithObserver(eventCh, collector.ObserverFunc( - func(statusCollector *collector.ResourceStatusCollector, _ event.Event) { - rss := []*event.ResourceStatus{} - for _, rs := range statusCollector.ResourceStatuses { - if rs == nil { - continue - } - rss = append(rss, rs) - } - desired := status.CurrentStatus - if aggregator.AggregateStatus(rss, desired) == desired { - cancel() - return - } - }), - ) + done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.CurrentStatus)) <-done if statusCollector.Error != nil { @@ -177,3 +147,19 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, wait } return nil } + +func statusObserver(cancel context.CancelFunc, desired status.Status) collector.ObserverFunc { + return func(statusCollector *collector.ResourceStatusCollector, _ event.Event) { + rss := []*event.ResourceStatus{} + for _, rs := range statusCollector.ResourceStatuses { + if rs == nil { + continue + } + rss = append(rss, rs) + } + if aggregator.AggregateStatus(rss, desired) == desired { + cancel() + return + } + } +} diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index b018691cd..822204dfe 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -19,7 +19,6 @@ package kube // import "helm.sh/helm/v3/pkg/kube" import ( "context" "errors" - "log" "testing" "time" @@ -246,7 +245,7 @@ func TestStatusWait(t *testing.T) { } kwaiter := statusWaiter{ sw: statusWatcher, - log: log.Printf, + log: t.Logf, } resourceList := ResourceList{} From 6b68a004400cab1f50cd3fa2861585e3fceb4eca Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 6 Jan 2025 13:30:29 +0000 Subject: [PATCH 030/192] change error messages Signed-off-by: Austin Abro --- pkg/kube/statuswait.go | 6 ++-- pkg/kube/statuswait_test.go | 63 +++++++++++++++++++++++-------------- 2 files changed, 42 insertions(+), 27 deletions(-) diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index bec38f7c9..8cd8bcfc2 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -85,9 +85,9 @@ func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceL continue } if rs.Status == status.UnknownStatus { - errs = append(errs, fmt.Errorf("%s: %s cannot determine if resource exists, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) + errs = append(errs, fmt.Errorf("cannot determine resource state, name: %s, kind: %s, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) } else { - errs = append(errs, fmt.Errorf("%s: %s still exists, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) + errs = append(errs, fmt.Errorf("resource still exists, name: %s, kind: %s, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) } } errs = append(errs, ctx.Err()) @@ -140,7 +140,7 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, wait if rs.Status == status.CurrentStatus { continue } - errs = append(errs, fmt.Errorf("%s: %s not ready, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) + errs = append(errs, fmt.Errorf("resource not ready, name: %s, kind: %s, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) } errs = append(errs, ctx.Err()) return errors.Join(errs...) diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index 822204dfe..ecd18e183 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -119,20 +119,29 @@ func TestStatusWaitForDelete(t *testing.T) { t.Parallel() tests := []struct { name string - objYamls []string + objToCreate []string + toDelete []string expectErrs []error - waitForJobs bool }{ { - name: "wait for pod to be deleted", - objYamls: []string{podCurrent}, - expectErrs: nil, + name: "wait for pod to be deleted", + objToCreate: []string{podCurrent}, + toDelete: []string{podCurrent}, + expectErrs: nil, + }, + { + name: "error when not all objects are deleted", + objToCreate: []string{jobComplete, podCurrent}, + toDelete: []string{jobComplete}, + expectErrs: []error{errors.New("resource still exists, name: good-pod, kind: Pod, status: Current"), errors.New("context deadline exceeded")}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { t.Parallel() c := newTestClient(t) + timeout := time.Second * 3 + timeToDeletePod := time.Second * 2 fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) fakeMapper := testutil.NewFakeRESTMapper( v1.SchemeGroupVersion.WithKind("Pod"), @@ -140,35 +149,42 @@ func TestStatusWaitForDelete(t *testing.T) { batchv1.SchemeGroupVersion.WithKind("Job"), ) statusWatcher := watcher.NewDefaultStatusWatcher(fakeClient, fakeMapper) - kwaiter := statusWaiter{ + statusWaiter := statusWaiter{ sw: statusWatcher, log: t.Logf, } - ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) + ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() - objs := []runtime.Object{} - for _, podYaml := range tt.objYamls { + createdObjs := []runtime.Object{} + for _, objYaml := range tt.objToCreate { m := make(map[string]interface{}) - err := yaml.Unmarshal([]byte(podYaml), &m) + err := yaml.Unmarshal([]byte(objYaml), &m) assert.NoError(t, err) resource := &unstructured.Unstructured{Object: m} - objs = append(objs, resource) + createdObjs = append(createdObjs, resource) gvr := getGVR(t, fakeMapper, resource) err = fakeClient.Tracker().Create(gvr, resource, resource.GetNamespace()) assert.NoError(t, err) + } + for _, objYaml := range tt.toDelete { + m := make(map[string]interface{}) + err := yaml.Unmarshal([]byte(objYaml), &m) + assert.NoError(t, err) + resource := &unstructured.Unstructured{Object: m} + gvr := getGVR(t, fakeMapper, resource) go func() { - time.Sleep(2 * time.Second) + time.Sleep(timeToDeletePod) err = fakeClient.Tracker().Delete(gvr, resource.GetNamespace(), resource.GetName()) assert.NoError(t, err) }() } resourceList := ResourceList{} - for _, obj := range objs { + for _, obj := range createdObjs { list, err := c.Build(objBody(obj), false) assert.NoError(t, err) resourceList = append(resourceList, list...) } - err := kwaiter.waitForDelete(ctx, resourceList) + err := statusWaiter.waitForDelete(ctx, resourceList) if tt.expectErrs != nil { assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) return @@ -195,7 +211,7 @@ func TestStatusWait(t *testing.T) { { name: "Job is not complete", objYamls: []string{jobNoStatus}, - expectErrs: []error{errors.New("test: Job not ready, status: InProgress"), errors.New("context deadline exceeded")}, + expectErrs: []error{errors.New("resource not ready, name: test, kind: Job, status: InProgress"), errors.New("context deadline exceeded")}, waitForJobs: true, }, { @@ -212,7 +228,7 @@ func TestStatusWait(t *testing.T) { { name: "one of the pods never becomes ready", objYamls: []string{podNoStatus, podCurrent}, - expectErrs: []error{errors.New("in-progress-pod: Pod not ready, status: InProgress"), errors.New("context deadline exceeded")}, + expectErrs: []error{errors.New("resource not ready, name: in-progress-pod, kind: Pod, status: InProgress"), errors.New("context deadline exceeded")}, }, { name: "paused deployment passes", @@ -231,8 +247,13 @@ func TestStatusWait(t *testing.T) { appsv1.SchemeGroupVersion.WithKind("Deployment"), batchv1.SchemeGroupVersion.WithKind("Job"), ) - objs := []runtime.Object{} statusWatcher := watcher.NewDefaultStatusWatcher(fakeClient, fakeMapper) + statusWaiter := statusWaiter{ + sw: statusWatcher, + log: t.Logf, + } + objs := []runtime.Object{} + for _, podYaml := range tt.objYamls { m := make(map[string]interface{}) err := yaml.Unmarshal([]byte(podYaml), &m) @@ -243,11 +264,6 @@ func TestStatusWait(t *testing.T) { err = fakeClient.Tracker().Create(gvr, resource, resource.GetNamespace()) assert.NoError(t, err) } - kwaiter := statusWaiter{ - sw: statusWatcher, - log: t.Logf, - } - resourceList := ResourceList{} for _, obj := range objs { list, err := c.Build(objBody(obj), false) @@ -257,8 +273,7 @@ func TestStatusWait(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) defer cancel() - - err := kwaiter.wait(ctx, resourceList, tt.waitForJobs) + err := statusWaiter.wait(ctx, resourceList, tt.waitForJobs) if tt.expectErrs != nil { assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) return From e6c6a40fe0fed670eaaaf60ada1643a0946ac3e0 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 6 Jan 2025 13:58:34 +0000 Subject: [PATCH 031/192] general error message Signed-off-by: Austin Abro --- pkg/kube/statuswait.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index 8cd8bcfc2..8268598e6 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -84,11 +84,7 @@ func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceL if rs.Status == status.NotFoundStatus { continue } - if rs.Status == status.UnknownStatus { - errs = append(errs, fmt.Errorf("cannot determine resource state, name: %s, kind: %s, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) - } else { - errs = append(errs, fmt.Errorf("resource still exists, name: %s, kind: %s, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) - } + errs = append(errs, fmt.Errorf("resource still exists, name: %s, kind: %s, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) } errs = append(errs, ctx.Err()) return errors.Join(errs...) From 8ce1876192b12db58993a993e5f307a1a17c3f08 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 6 Jan 2025 14:12:34 +0000 Subject: [PATCH 032/192] get rid of ext interface Signed-off-by: Austin Abro --- pkg/action/hooks.go | 8 ++------ pkg/action/uninstall.go | 6 ++---- pkg/kube/client.go | 9 --------- pkg/kube/interface.go | 6 ------ pkg/kube/statuswait.go | 6 ++++++ pkg/kube/statuswait_test.go | 4 +--- pkg/kube/wait.go | 19 ++++++++----------- 7 files changed, 19 insertions(+), 39 deletions(-) diff --git a/pkg/action/hooks.go b/pkg/action/hooks.go index ecca1d997..c32b9b3ce 100644 --- a/pkg/action/hooks.go +++ b/pkg/action/hooks.go @@ -22,7 +22,6 @@ import ( "github.com/pkg/errors" - "helm.sh/helm/v4/pkg/kube" "helm.sh/helm/v4/pkg/release" helmtime "helm.sh/helm/v4/pkg/time" ) @@ -138,11 +137,8 @@ func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.Hoo return errors.New(joinErrors(errs)) } - //wait for resources until they are deleted to avoid conflicts - if kubeClient, ok := cfg.KubeClient.(kube.InterfaceExt); ok { - if err := kubeClient.WaitForDelete(resources, timeout); err != nil { - return err - } + if err := cfg.KubeClient.WaitForDelete(resources, timeout); err != nil { + return err } } return nil diff --git a/pkg/action/uninstall.go b/pkg/action/uninstall.go index dda7d6978..75d999976 100644 --- a/pkg/action/uninstall.go +++ b/pkg/action/uninstall.go @@ -131,10 +131,8 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error) res.Info = kept if u.Wait { - if kubeClient, ok := u.cfg.KubeClient.(kube.InterfaceExt); ok { - if err := kubeClient.WaitForDelete(deletedResources, u.Timeout); err != nil { - errs = append(errs, err) - } + if err := u.cfg.KubeClient.WaitForDelete(deletedResources, u.Timeout); err != nil { + errs = append(errs, err) } } diff --git a/pkg/kube/client.go b/pkg/kube/client.go index fe830747d..968e1b951 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -331,15 +331,6 @@ func getResource(info *resource.Info) (runtime.Object, error) { return obj, nil } -// WaitForDelete wait up to the given timeout for the specified resources to be deleted. -func (c *Client) WaitForDelete(resources ResourceList, timeout time.Duration) error { - w := HelmWaiter{ - log: c.Log, - timeout: timeout, - } - return w.waitForDeletedResources(resources) -} - func (c *Client) namespace() string { if c.Namespace != "" { return c.Namespace diff --git a/pkg/kube/interface.go b/pkg/kube/interface.go index 6cf33c515..30be37f7c 100644 --- a/pkg/kube/interface.go +++ b/pkg/kube/interface.go @@ -67,12 +67,7 @@ type Waiter interface { // WaitWithJobs wait up to the given timeout for the specified resources to be ready, including jobs. WaitWithJobs(resources ResourceList, timeout time.Duration) error -} -// InterfaceExt is introduced to avoid breaking backwards compatibility for Interface implementers. -// -// TODO Helm 4: Remove InterfaceExt and integrate its method(s) into the Interface. -type InterfaceExt interface { // WaitForDelete wait up to the given timeout for the specified resources to be deleted. WaitForDelete(resources ResourceList, timeout time.Duration) error } @@ -108,6 +103,5 @@ type InterfaceResources interface { } var _ Interface = (*Client)(nil) -var _ InterfaceExt = (*Client)(nil) var _ InterfaceDeletionPropagation = (*Client)(nil) var _ InterfaceResources = (*Client)(nil) diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index 8268598e6..b1c39948c 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -50,6 +50,12 @@ func (w *statusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Dura return w.wait(ctx, resourceList, true) } +func (w *statusWaiter) WaitForDelete(resourceList ResourceList, timeout time.Duration) error { + ctx, cancel := context.WithTimeout(context.TODO(), timeout) + defer cancel() + return w.waitForDelete(ctx, resourceList) +} + func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceList) error { deadline, _ := ctx.Deadline() w.log("beginning wait for %d resources to be deleted with timeout of %v", len(resourceList), time.Until(deadline)) diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index ecd18e183..0084606cf 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -153,8 +153,6 @@ func TestStatusWaitForDelete(t *testing.T) { sw: statusWatcher, log: t.Logf, } - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() createdObjs := []runtime.Object{} for _, objYaml := range tt.objToCreate { m := make(map[string]interface{}) @@ -184,7 +182,7 @@ func TestStatusWaitForDelete(t *testing.T) { assert.NoError(t, err) resourceList = append(resourceList, list...) } - err := statusWaiter.waitForDelete(ctx, resourceList) + err := statusWaiter.WaitForDelete(resourceList, timeout) if tt.expectErrs != nil { assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) return diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go index e74753e57..97fa8b3e1 100644 --- a/pkg/kube/wait.go +++ b/pkg/kube/wait.go @@ -43,29 +43,26 @@ import ( // Helm 4 now uses the StatusWaiter interface instead type HelmWaiter struct { c ReadyChecker - timeout time.Duration log func(string, ...interface{}) kubeClient *kubernetes.Clientset } func (w *HelmWaiter) Wait(resources ResourceList, timeout time.Duration) error { w.c = NewReadyChecker(w.kubeClient, w.log, PausedAsReady(true)) - w.timeout = timeout - return w.waitForResources(resources) + return w.waitForResources(resources, timeout) } func (w *HelmWaiter) WaitWithJobs(resources ResourceList, timeout time.Duration) error { w.c = NewReadyChecker(w.kubeClient, w.log, PausedAsReady(true), CheckJobs(true)) - w.timeout = timeout - return w.waitForResources(resources) + return w.waitForResources(resources, timeout) } // waitForResources polls to get the current status of all pods, PVCs, Services and // Jobs(optional) until all are ready or a timeout is reached -func (w *HelmWaiter) waitForResources(created ResourceList) error { - w.log("beginning wait for %d resources with timeout of %v", len(created), w.timeout) +func (w *HelmWaiter) waitForResources(created ResourceList, timeout time.Duration) error { + w.log("beginning wait for %d resources with timeout of %v", len(created), timeout) - ctx, cancel := context.WithTimeout(context.Background(), w.timeout) + ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() numberOfErrors := make([]int, len(created)) @@ -116,10 +113,10 @@ func (w *HelmWaiter) isRetryableHTTPStatusCode(httpStatusCode int32) bool { } // waitForDeletedResources polls to check if all the resources are deleted or a timeout is reached -func (w *HelmWaiter) waitForDeletedResources(deleted ResourceList) error { - w.log("beginning wait for %d resources to be deleted with timeout of %v", len(deleted), w.timeout) +func (w *HelmWaiter) WaitForDelete(deleted ResourceList, timeout time.Duration) error { + w.log("beginning wait for %d resources to be deleted with timeout of %v", len(deleted), timeout) - ctx, cancel := context.WithTimeout(context.Background(), w.timeout) + ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() return wait.PollUntilContextCancel(ctx, 2*time.Second, true, func(_ context.Context) (bool, error) { From c26b44f65172b2d6e41e4ce8f0024c70c595ff6a Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 6 Jan 2025 15:21:11 +0000 Subject: [PATCH 033/192] update names Signed-off-by: Austin Abro --- pkg/action/action.go | 2 +- pkg/kube/client.go | 8 ++++---- pkg/kube/client_test.go | 10 +++++----- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/pkg/action/action.go b/pkg/action/action.go index 7edb4a1ae..0157ce1cc 100644 --- a/pkg/action/action.go +++ b/pkg/action/action.go @@ -371,7 +371,7 @@ func (cfg *Configuration) recordRelease(r *release.Release) { // Init initializes the action configuration func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string, log DebugLog) error { - kc, err := kube.New(getter, kube.StatusWaiter) + kc, err := kube.New(getter, kube.StatusWaiterStrategy) if err != nil { return err } diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 968e1b951..daa484b69 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -91,8 +91,8 @@ type Client struct { type WaitStrategy int const ( - StatusWaiter WaitStrategy = iota - LegacyWaiter + StatusWaiterStrategy WaitStrategy = iota + LegacyWaiterStrategy ) func init() { @@ -121,13 +121,13 @@ func getStatusWatcher(factory Factory) (watcher.StatusWatcher, error) { func NewWaiter(strategy WaitStrategy, factory Factory, log func(string, ...interface{})) (Waiter, error) { switch strategy { - case LegacyWaiter: + case LegacyWaiterStrategy: kc, err := factory.KubernetesClientSet() if err != nil { return nil, err } return &HelmWaiter{kubeClient: kc, log: log}, nil - case StatusWaiter: + case StatusWaiterStrategy: sw, err := getStatusWatcher(factory) if err != nil { return nil, err diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index 3ab415a48..50fc65cef 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -513,7 +513,7 @@ func TestWait(t *testing.T) { } }), } - waiter, err := NewWaiter(LegacyWaiter, c.Factory, c.Log) + waiter, err := NewWaiter(LegacyWaiterStrategy, c.Factory, c.Log) if err != nil { t.Fatal(err) } @@ -570,7 +570,7 @@ func TestWaitJob(t *testing.T) { } }), } - waiter, err := NewWaiter(LegacyWaiter, c.Factory, c.Log) + waiter, err := NewWaiter(LegacyWaiterStrategy, c.Factory, c.Log) if err != nil { t.Fatal(err) } @@ -629,7 +629,7 @@ func TestWaitDelete(t *testing.T) { } }), } - waiter, err := NewWaiter(LegacyWaiter, c.Factory, c.Log) + waiter, err := NewWaiter(LegacyWaiterStrategy, c.Factory, c.Log) if err != nil { t.Fatal(err) } @@ -660,7 +660,7 @@ func TestWaitDelete(t *testing.T) { func TestReal(t *testing.T) { t.Skip("This is a live test, comment this line to run") - c, err := New(nil, StatusWaiter) + c, err := New(nil, StatusWaiterStrategy) if err != nil { t.Fatal(err) } @@ -673,7 +673,7 @@ func TestReal(t *testing.T) { } testSvcEndpointManifest := testServiceManifest + "\n---\n" + testEndpointManifest - c, err = New(nil, StatusWaiter) + c, err = New(nil, StatusWaiterStrategy) if err != nil { t.Fatal(err) } From 649475265df89f5b514dcd95bcf90d4b32a215f3 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 6 Jan 2025 16:25:49 +0000 Subject: [PATCH 034/192] implement logger Signed-off-by: Austin Abro --- pkg/kube/statuswait.go | 22 ++++++++++++++++++++++ pkg/kube/statuswait_test.go | 8 ++++++-- 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index b1c39948c..bb92ae74e 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -75,6 +75,7 @@ func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceL } eventCh := w.sw.Watch(cancelCtx, resources, watcher.Options{}) statusCollector := collector.NewResourceStatusCollector(resources) + go logResource(ctx, resources, statusCollector, status.NotFoundStatus, w.log) done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.NotFoundStatus)) <-done @@ -127,6 +128,7 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, wait } eventCh := w.sw.Watch(cancelCtx, resources, watcher.Options{}) statusCollector := collector.NewResourceStatusCollector(resources) + go logResource(cancelCtx, resources, statusCollector, status.CurrentStatus, w.log) done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.CurrentStatus)) <-done @@ -165,3 +167,23 @@ func statusObserver(cancel context.CancelFunc, desired status.Status) collector. } } } + +func logResource(ctx context.Context, resources []object.ObjMetadata, sc *collector.ResourceStatusCollector, desiredStatus status.Status, log func(string, ...interface{})) { + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + for _, id := range resources { + rs := sc.ResourceStatuses[id] + if rs.Status != desiredStatus { + log("waiting for resource, name: %s, kind: %s, desired status: %s, actual status: %s\n", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, desiredStatus, rs.Status) + // only log one resource to not overwhelm the logs + break + } + } + } + } +} diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index 0084606cf..0d635ad79 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -19,6 +19,7 @@ package kube // import "helm.sh/helm/v3/pkg/kube" import ( "context" "errors" + "fmt" "testing" "time" @@ -114,6 +115,9 @@ func getGVR(t *testing.T, mapper meta.RESTMapper, obj *unstructured.Unstructured require.NoError(t, err) return mapping.Resource } +func testLogger(message string, args ...interface{}) { + fmt.Printf(message, args...) +} func TestStatusWaitForDelete(t *testing.T) { t.Parallel() @@ -151,7 +155,7 @@ func TestStatusWaitForDelete(t *testing.T) { statusWatcher := watcher.NewDefaultStatusWatcher(fakeClient, fakeMapper) statusWaiter := statusWaiter{ sw: statusWatcher, - log: t.Logf, + log: testLogger, } createdObjs := []runtime.Object{} for _, objYaml := range tt.objToCreate { @@ -248,7 +252,7 @@ func TestStatusWait(t *testing.T) { statusWatcher := watcher.NewDefaultStatusWatcher(fakeClient, fakeMapper) statusWaiter := statusWaiter{ sw: statusWatcher, - log: t.Logf, + log: testLogger, } objs := []runtime.Object{} From 71434c0b388a7bf8a1bdf3302779199becc3ce4b Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 6 Jan 2025 16:26:20 +0000 Subject: [PATCH 035/192] implement logger Signed-off-by: Austin Abro --- pkg/kube/statuswait_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index 0d635ad79..945131a5e 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -144,8 +144,8 @@ func TestStatusWaitForDelete(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() c := newTestClient(t) - timeout := time.Second * 3 - timeToDeletePod := time.Second * 2 + timeout := time.Second * 2 + timeUntilPodDelete := time.Second * 1 fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) fakeMapper := testutil.NewFakeRESTMapper( v1.SchemeGroupVersion.WithKind("Pod"), @@ -175,7 +175,7 @@ func TestStatusWaitForDelete(t *testing.T) { resource := &unstructured.Unstructured{Object: m} gvr := getGVR(t, fakeMapper, resource) go func() { - time.Sleep(timeToDeletePod) + time.Sleep(timeUntilPodDelete) err = fakeClient.Tracker().Delete(gvr, resource.GetNamespace(), resource.GetName()) assert.NoError(t, err) }() From e9d98543644b7b59b17c38a0af4ca500ab7e2644 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 6 Jan 2025 16:50:40 +0000 Subject: [PATCH 036/192] introduce test for status wait Signed-off-by: Austin Abro --- pkg/kube/statuswait.go | 8 ++++---- pkg/kube/statuswait_test.go | 33 +++++++++++++++++++++++++++++---- 2 files changed, 33 insertions(+), 8 deletions(-) diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index bb92ae74e..a4590aa42 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -75,7 +75,7 @@ func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceL } eventCh := w.sw.Watch(cancelCtx, resources, watcher.Options{}) statusCollector := collector.NewResourceStatusCollector(resources) - go logResource(ctx, resources, statusCollector, status.NotFoundStatus, w.log) + go logResourceStatus(ctx, resources, statusCollector, status.NotFoundStatus, w.log) done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.NotFoundStatus)) <-done @@ -128,7 +128,7 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, wait } eventCh := w.sw.Watch(cancelCtx, resources, watcher.Options{}) statusCollector := collector.NewResourceStatusCollector(resources) - go logResource(cancelCtx, resources, statusCollector, status.CurrentStatus, w.log) + go logResourceStatus(cancelCtx, resources, statusCollector, status.CurrentStatus, w.log) done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.CurrentStatus)) <-done @@ -168,7 +168,7 @@ func statusObserver(cancel context.CancelFunc, desired status.Status) collector. } } -func logResource(ctx context.Context, resources []object.ObjMetadata, sc *collector.ResourceStatusCollector, desiredStatus status.Status, log func(string, ...interface{})) { +func logResourceStatus(ctx context.Context, resources []object.ObjMetadata, sc *collector.ResourceStatusCollector, desiredStatus status.Status, log func(string, ...interface{})) { ticker := time.NewTicker(1 * time.Second) defer ticker.Stop() for { @@ -179,7 +179,7 @@ func logResource(ctx context.Context, resources []object.ObjMetadata, sc *collec for _, id := range resources { rs := sc.ResourceStatuses[id] if rs.Status != desiredStatus { - log("waiting for resource, name: %s, kind: %s, desired status: %s, actual status: %s\n", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, desiredStatus, rs.Status) + log("waiting for resource, name: %s, kind: %s, desired status: %s, actual status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, desiredStatus, rs.Status) // only log one resource to not overwhelm the logs break } diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index 945131a5e..e94e13313 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -35,7 +35,11 @@ import ( "k8s.io/apimachinery/pkg/util/yaml" dynamicfake "k8s.io/client-go/dynamic/fake" "k8s.io/kubectl/pkg/scheme" + "sigs.k8s.io/cli-utils/pkg/kstatus/polling/collector" + "sigs.k8s.io/cli-utils/pkg/kstatus/polling/event" + "sigs.k8s.io/cli-utils/pkg/kstatus/status" "sigs.k8s.io/cli-utils/pkg/kstatus/watcher" + "sigs.k8s.io/cli-utils/pkg/object" "sigs.k8s.io/cli-utils/pkg/testutil" ) @@ -115,8 +119,29 @@ func getGVR(t *testing.T, mapper meta.RESTMapper, obj *unstructured.Unstructured require.NoError(t, err) return mapping.Resource } -func testLogger(message string, args ...interface{}) { - fmt.Printf(message, args...) + +func TestStatusLogger(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*1500) + defer cancel() + readyPod := object.ObjMetadata{ + Name: "readyPod", + GroupKind: schema.GroupKind{Kind: "Pod"}, + } + notReadyPod := object.ObjMetadata{ + Name: "notReadyPod", + GroupKind: schema.GroupKind{Kind: "Pod"}, + } + objs := []object.ObjMetadata{readyPod, notReadyPod} + resourceStatusCollector := collector.NewResourceStatusCollector(objs) + resourceStatusCollector.ResourceStatuses[readyPod] = &event.ResourceStatus{ + Identifier: readyPod, + Status: status.CurrentStatus, + } + expectedMessage := "waiting for resource, name: notReadyPod, kind: Pod, desired status: Current, actual status: Unknown" + testLogger := func(message string, args ...interface{}) { + assert.Equal(t, expectedMessage, fmt.Sprintf(message, args...)) + } + logResourceStatus(ctx, objs, resourceStatusCollector, status.CurrentStatus, testLogger) } func TestStatusWaitForDelete(t *testing.T) { @@ -155,7 +180,7 @@ func TestStatusWaitForDelete(t *testing.T) { statusWatcher := watcher.NewDefaultStatusWatcher(fakeClient, fakeMapper) statusWaiter := statusWaiter{ sw: statusWatcher, - log: testLogger, + log: t.Logf, } createdObjs := []runtime.Object{} for _, objYaml := range tt.objToCreate { @@ -252,7 +277,7 @@ func TestStatusWait(t *testing.T) { statusWatcher := watcher.NewDefaultStatusWatcher(fakeClient, fakeMapper) statusWaiter := statusWaiter{ sw: statusWatcher, - log: testLogger, + log: t.Logf, } objs := []runtime.Object{} From 674ab0d4f6b0fc66b656ae98bab0d829eac9c5d2 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 6 Jan 2025 16:55:30 +0000 Subject: [PATCH 037/192] t.Parrallel Signed-off-by: Austin Abro --- pkg/kube/statuswait_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index e94e13313..c3aa61a69 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -121,6 +121,7 @@ func getGVR(t *testing.T, mapper meta.RESTMapper, obj *unstructured.Unstructured } func TestStatusLogger(t *testing.T) { + t.Parallel() ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*1500) defer cancel() readyPod := object.ObjMetadata{ From eaa6e14546ba3bd58150df6f407594330247d2f9 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 6 Jan 2025 16:57:32 +0000 Subject: [PATCH 038/192] test cleanup Signed-off-by: Austin Abro --- pkg/kube/client_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index 50fc65cef..abe74022d 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -459,7 +459,6 @@ func TestWait(t *testing.T) { var created *time.Time c := newTestClient(t) - c.Factory.(*cmdtesting.TestFactory).ClientConfigVal = cmdtesting.DefaultClientConfig() c.Factory.(*cmdtesting.TestFactory).Client = &fake.RESTClient{ NegotiatedSerializer: unstructuredSerializer, Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { @@ -659,7 +658,7 @@ func TestWaitDelete(t *testing.T) { } func TestReal(t *testing.T) { - t.Skip("This is a live test, comment this line to run") + // t.Skip("This is a live test, comment this line to run") c, err := New(nil, StatusWaiterStrategy) if err != nil { t.Fatal(err) From d07f546003c0113ab65214c2a0f36727fc1d3c23 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 6 Jan 2025 17:02:50 +0000 Subject: [PATCH 039/192] get rid of rest client Signed-off-by: Austin Abro --- pkg/kube/factory.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/pkg/kube/factory.go b/pkg/kube/factory.go index 3b1ec1d6b..78c8323fd 100644 --- a/pkg/kube/factory.go +++ b/pkg/kube/factory.go @@ -21,7 +21,6 @@ import ( "k8s.io/cli-runtime/pkg/resource" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" - restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/kubectl/pkg/validation" ) @@ -45,9 +44,6 @@ type Factory interface { // KubernetesClientSet gives you back an external clientset KubernetesClientSet() (*kubernetes.Clientset, error) - // Returns a RESTClient for accessing Kubernetes resources or an error. - RESTClient() (*restclient.RESTClient, error) - // NewBuilder returns an object that assists in loading objects from both disk and the server // and which implements the common patterns for CLI interactions with generic resources. NewBuilder() *resource.Builder From f9736d9022d10b0203bd1a5479f5aadc42b93b6e Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 6 Jan 2025 17:06:02 +0000 Subject: [PATCH 040/192] renames Signed-off-by: Austin Abro --- pkg/kube/statuswait_test.go | 92 ++++++++++++++++++------------------- 1 file changed, 46 insertions(+), 46 deletions(-) diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index c3aa61a69..d853e0012 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -43,7 +43,7 @@ import ( "sigs.k8s.io/cli-utils/pkg/testutil" ) -var podCurrent = ` +var podCurrentManifest = ` apiVersion: v1 kind: Pod metadata: @@ -56,7 +56,7 @@ status: phase: Running ` -var podNoStatus = ` +var podNoStatusManifest = ` apiVersion: v1 kind: Pod metadata: @@ -64,7 +64,7 @@ metadata: namespace: ns ` -var jobNoStatus = ` +var jobNoStatusManifest = ` apiVersion: batch/v1 kind: Job metadata: @@ -73,7 +73,7 @@ metadata: generation: 1 ` -var jobComplete = ` +var jobCompleteManifest = ` apiVersion: batch/v1 kind: Job metadata: @@ -88,7 +88,7 @@ status: status: "True" ` -var pausedDeploymentYaml = ` +var pausedDeploymentManifest = ` apiVersion: apps/v1 kind: Deployment metadata: @@ -148,22 +148,22 @@ func TestStatusLogger(t *testing.T) { func TestStatusWaitForDelete(t *testing.T) { t.Parallel() tests := []struct { - name string - objToCreate []string - toDelete []string - expectErrs []error + name string + manifestsToCreate []string + manifestsToDelete []string + expectErrs []error }{ { - name: "wait for pod to be deleted", - objToCreate: []string{podCurrent}, - toDelete: []string{podCurrent}, - expectErrs: nil, + name: "wait for pod to be deleted", + manifestsToCreate: []string{podCurrentManifest}, + manifestsToDelete: []string{podCurrentManifest}, + expectErrs: nil, }, { - name: "error when not all objects are deleted", - objToCreate: []string{jobComplete, podCurrent}, - toDelete: []string{jobComplete}, - expectErrs: []error{errors.New("resource still exists, name: good-pod, kind: Pod, status: Current"), errors.New("context deadline exceeded")}, + name: "error when not all objects are deleted", + manifestsToCreate: []string{jobCompleteManifest, podCurrentManifest}, + manifestsToDelete: []string{jobCompleteManifest}, + expectErrs: []error{errors.New("resource still exists, name: good-pod, kind: Pod, status: Current"), errors.New("context deadline exceeded")}, }, } for _, tt := range tests { @@ -184,9 +184,9 @@ func TestStatusWaitForDelete(t *testing.T) { log: t.Logf, } createdObjs := []runtime.Object{} - for _, objYaml := range tt.objToCreate { + for _, manifest := range tt.manifestsToCreate { m := make(map[string]interface{}) - err := yaml.Unmarshal([]byte(objYaml), &m) + err := yaml.Unmarshal([]byte(manifest), &m) assert.NoError(t, err) resource := &unstructured.Unstructured{Object: m} createdObjs = append(createdObjs, resource) @@ -194,9 +194,9 @@ func TestStatusWaitForDelete(t *testing.T) { err = fakeClient.Tracker().Create(gvr, resource, resource.GetNamespace()) assert.NoError(t, err) } - for _, objYaml := range tt.toDelete { + for _, manifest := range tt.manifestsToDelete { m := make(map[string]interface{}) - err := yaml.Unmarshal([]byte(objYaml), &m) + err := yaml.Unmarshal([]byte(manifest), &m) assert.NoError(t, err) resource := &unstructured.Unstructured{Object: m} gvr := getGVR(t, fakeMapper, resource) @@ -226,42 +226,42 @@ func TestStatusWaitForDelete(t *testing.T) { func TestStatusWait(t *testing.T) { t.Parallel() tests := []struct { - name string - objYamls []string - expectErrs []error - waitForJobs bool + name string + objManifests []string + expectErrs []error + waitForJobs bool }{ { - name: "Job is complete", - objYamls: []string{jobComplete}, - expectErrs: nil, + name: "Job is complete", + objManifests: []string{jobCompleteManifest}, + expectErrs: nil, }, { - name: "Job is not complete", - objYamls: []string{jobNoStatus}, - expectErrs: []error{errors.New("resource not ready, name: test, kind: Job, status: InProgress"), errors.New("context deadline exceeded")}, - waitForJobs: true, + name: "Job is not complete", + objManifests: []string{jobNoStatusManifest}, + expectErrs: []error{errors.New("resource not ready, name: test, kind: Job, status: InProgress"), errors.New("context deadline exceeded")}, + waitForJobs: true, }, { - name: "Job is not ready, but we pass wait anyway", - objYamls: []string{jobNoStatus}, - expectErrs: nil, - waitForJobs: false, + name: "Job is not ready, but we pass wait anyway", + objManifests: []string{jobNoStatusManifest}, + expectErrs: nil, + waitForJobs: false, }, { - name: "Pod is ready", - objYamls: []string{podCurrent}, - expectErrs: nil, + name: "Pod is ready", + objManifests: []string{podCurrentManifest}, + expectErrs: nil, }, { - name: "one of the pods never becomes ready", - objYamls: []string{podNoStatus, podCurrent}, - expectErrs: []error{errors.New("resource not ready, name: in-progress-pod, kind: Pod, status: InProgress"), errors.New("context deadline exceeded")}, + name: "one of the pods never becomes ready", + objManifests: []string{podNoStatusManifest, podCurrentManifest}, + expectErrs: []error{errors.New("resource not ready, name: in-progress-pod, kind: Pod, status: InProgress"), errors.New("context deadline exceeded")}, }, { - name: "paused deployment passes", - objYamls: []string{pausedDeploymentYaml}, - expectErrs: nil, + name: "paused deployment passes", + objManifests: []string{pausedDeploymentManifest}, + expectErrs: nil, }, } @@ -282,7 +282,7 @@ func TestStatusWait(t *testing.T) { } objs := []runtime.Object{} - for _, podYaml := range tt.objYamls { + for _, podYaml := range tt.objManifests { m := make(map[string]interface{}) err := yaml.Unmarshal([]byte(podYaml), &m) assert.NoError(t, err) From 8fe66998bf9b32c103c2eddbbd6583433dbdb470 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 6 Jan 2025 17:13:59 +0000 Subject: [PATCH 041/192] refactor obj logic Signed-off-by: Austin Abro --- pkg/kube/client_test.go | 2 +- pkg/kube/statuswait.go | 18 +++++------------- pkg/kube/wait.go | 2 +- 3 files changed, 7 insertions(+), 15 deletions(-) diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index abe74022d..f63070fe1 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -658,7 +658,7 @@ func TestWaitDelete(t *testing.T) { } func TestReal(t *testing.T) { - // t.Skip("This is a live test, comment this line to run") + t.Skip("This is a live test, comment this line to run") c, err := New(nil, StatusWaiterStrategy) if err != nil { t.Fatal(err) diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index a4590aa42..a0378aaf5 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -24,7 +24,6 @@ import ( appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" - "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/cli-utils/pkg/kstatus/polling/aggregator" "sigs.k8s.io/cli-utils/pkg/kstatus/polling/collector" "sigs.k8s.io/cli-utils/pkg/kstatus/polling/event" @@ -61,13 +60,9 @@ func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceL w.log("beginning wait for %d resources to be deleted with timeout of %v", len(resourceList), time.Until(deadline)) cancelCtx, cancel := context.WithCancel(ctx) defer cancel() - runtimeObjs := []runtime.Object{} - for _, resource := range resourceList { - runtimeObjs = append(runtimeObjs, resource.Object) - } resources := []object.ObjMetadata{} - for _, runtimeObj := range runtimeObjs { - obj, err := object.RuntimeToObjMeta(runtimeObj) + for _, resource := range resourceList { + obj, err := object.RuntimeToObjMeta(resource.Object) if err != nil { return err } @@ -104,7 +99,7 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, wait w.log("beginning wait for %d resources with timeout of %v", len(resourceList), deadline) cancelCtx, cancel := context.WithCancel(ctx) defer cancel() - runtimeObjs := []runtime.Object{} + resources := []object.ObjMetadata{} for _, resource := range resourceList { switch value := AsVersioned(resource).(type) { case *batchv1.Job: @@ -116,16 +111,13 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, wait continue } } - runtimeObjs = append(runtimeObjs, resource.Object) - } - resources := []object.ObjMetadata{} - for _, runtimeObj := range runtimeObjs { - obj, err := object.RuntimeToObjMeta(runtimeObj) + obj, err := object.RuntimeToObjMeta(resource.Object) if err != nil { return err } resources = append(resources, obj) } + eventCh := w.sw.Watch(cancelCtx, resources, watcher.Options{}) statusCollector := collector.NewResourceStatusCollector(resources) go logResourceStatus(cancelCtx, resources, statusCollector, status.CurrentStatus, w.log) diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go index 97fa8b3e1..525373e4d 100644 --- a/pkg/kube/wait.go +++ b/pkg/kube/wait.go @@ -40,7 +40,7 @@ import ( ) // HelmWaiter is the legacy implementation of the Waiter interface. This logic was used by default in Helm 3 -// Helm 4 now uses the StatusWaiter interface instead +// Helm 4 now uses the StatusWaiter implementation instead type HelmWaiter struct { c ReadyChecker log func(string, ...interface{}) From 9894d3ae78d7d2d2119c9de7f2d17454908c8fbe Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 6 Jan 2025 17:19:39 +0000 Subject: [PATCH 042/192] shorten interface Signed-off-by: Austin Abro --- pkg/kube/factory.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pkg/kube/factory.go b/pkg/kube/factory.go index 78c8323fd..013cd7b73 100644 --- a/pkg/kube/factory.go +++ b/pkg/kube/factory.go @@ -17,7 +17,7 @@ limitations under the License. package kube // import "helm.sh/helm/v4/pkg/kube" import ( - "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/apimachinery/pkg/api/meta" "k8s.io/cli-runtime/pkg/resource" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" @@ -34,7 +34,9 @@ import ( // Helm does not need are not impacted or exposed. This minimizes the impact of Kubernetes changes // being exposed. type Factory interface { - genericclioptions.RESTClientGetter + // ToRESTMapper returns a restmapper + ToRESTMapper() (meta.RESTMapper, error) + // ToRawKubeConfigLoader return kubeconfig loader as-is ToRawKubeConfigLoader() clientcmd.ClientConfig From c2dc44deb99d21320f3d6f4c58777a87c6d0de5b Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 14 Jan 2025 14:59:30 +0000 Subject: [PATCH 043/192] use dynamic rest mapper Signed-off-by: Austin Abro --- pkg/kube/client.go | 11 ++++++++++- pkg/kube/factory.go | 4 ++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index daa484b69..b607ea3ef 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -38,6 +38,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/cli-utils/pkg/kstatus/watcher" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" multierror "github.com/hashicorp/go-multierror" "k8s.io/apimachinery/pkg/api/meta" @@ -107,11 +108,19 @@ func init() { } func getStatusWatcher(factory Factory) (watcher.StatusWatcher, error) { + cfg, err := factory.ToRESTConfig() + if err != nil { + return nil, err + } dynamicClient, err := factory.DynamicClient() if err != nil { return nil, err } - restMapper, err := factory.ToRESTMapper() + httpClient, err := rest.HTTPClientFor(cfg) + if err != nil { + return nil, err + } + restMapper, err := apiutil.NewDynamicRESTMapper(cfg, httpClient) if err != nil { return nil, err } diff --git a/pkg/kube/factory.go b/pkg/kube/factory.go index 013cd7b73..7f21c85c6 100644 --- a/pkg/kube/factory.go +++ b/pkg/kube/factory.go @@ -21,6 +21,7 @@ import ( "k8s.io/cli-runtime/pkg/resource" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/kubectl/pkg/validation" ) @@ -37,6 +38,9 @@ type Factory interface { // ToRESTMapper returns a restmapper ToRESTMapper() (meta.RESTMapper, error) + // ToRESTConfig returns restconfig + ToRESTConfig() (*rest.Config, error) + // ToRawKubeConfigLoader return kubeconfig loader as-is ToRawKubeConfigLoader() clientcmd.ClientConfig From a859742e288fd546a3412ed0674d2c4b693e8206 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 14 Jan 2025 15:00:26 +0000 Subject: [PATCH 044/192] remove rest mapper Signed-off-by: Austin Abro --- pkg/kube/factory.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/pkg/kube/factory.go b/pkg/kube/factory.go index 7f21c85c6..1d237c307 100644 --- a/pkg/kube/factory.go +++ b/pkg/kube/factory.go @@ -17,7 +17,6 @@ limitations under the License. package kube // import "helm.sh/helm/v4/pkg/kube" import ( - "k8s.io/apimachinery/pkg/api/meta" "k8s.io/cli-runtime/pkg/resource" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" @@ -35,9 +34,6 @@ import ( // Helm does not need are not impacted or exposed. This minimizes the impact of Kubernetes changes // being exposed. type Factory interface { - // ToRESTMapper returns a restmapper - ToRESTMapper() (meta.RESTMapper, error) - // ToRESTConfig returns restconfig ToRESTConfig() (*rest.Config, error) From 816a78685304b45b15e4ae397e75a1760f8d54a0 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 14 Jan 2025 15:01:33 +0000 Subject: [PATCH 045/192] go mod tidy Signed-off-by: Austin Abro --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 3274e6b79..65372cdda 100644 --- a/go.mod +++ b/go.mod @@ -47,6 +47,7 @@ require ( k8s.io/kubectl v0.32.0 oras.land/oras-go v1.2.6 sigs.k8s.io/cli-utils v0.37.2 + sigs.k8s.io/controller-runtime v0.18.4 sigs.k8s.io/yaml v1.4.0 ) @@ -185,7 +186,6 @@ require ( k8s.io/component-base v0.32.0 // indirect k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect - sigs.k8s.io/controller-runtime v0.18.4 // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect sigs.k8s.io/kustomize/api v0.18.0 // indirect sigs.k8s.io/kustomize/kyaml v0.18.1 // indirect From e56a6e678f534fea6c7efb331fa3b4a0d9e591eb Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 14 Jan 2025 15:03:21 +0000 Subject: [PATCH 046/192] diff Signed-off-by: Austin Abro --- pkg/kube/interface.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/pkg/kube/interface.go b/pkg/kube/interface.go index 30be37f7c..f8e3c2ee2 100644 --- a/pkg/kube/interface.go +++ b/pkg/kube/interface.go @@ -34,9 +34,6 @@ type Interface interface { // Delete destroys one or more resources. Delete(resources ResourceList) (*Result, []error) - // Update updates one or more resources or creates the resource - // if it doesn't exist. - Update(original, target ResourceList, force bool) (*Result, error) // WatchUntilReady watches the resources given and waits until it is ready. // // This method is mainly for hook implementations. It watches for a resource to @@ -48,6 +45,11 @@ type Interface interface { // error. // TODO: Is watch until ready really behavior we want over the resources actually being ready? WatchUntilReady(resources ResourceList, timeout time.Duration) error + + // Update updates one or more resources or creates the resource + // if it doesn't exist. + Update(original, target ResourceList, force bool) (*Result, error) + // Build creates a resource list from a Reader. // // Reader must contain a YAML stream (one or more YAML documents separated From 4e12f9d5301f83ab05b9df06c25a1d4e2c7fa2f1 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 14 Jan 2025 15:38:23 +0000 Subject: [PATCH 047/192] simplify messages Signed-off-by: Austin Abro --- pkg/kube/statuswait.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index a0378aaf5..534aece8d 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -40,24 +40,25 @@ type statusWaiter struct { func (w *statusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error { ctx, cancel := context.WithTimeout(context.TODO(), timeout) defer cancel() + w.log("beginning wait for %d resources with timeout of %s", len(resourceList), timeout) return w.wait(ctx, resourceList, false) } func (w *statusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Duration) error { ctx, cancel := context.WithTimeout(context.TODO(), timeout) defer cancel() + w.log("beginning wait for %d resources with timeout of %s", len(resourceList), timeout) return w.wait(ctx, resourceList, true) } func (w *statusWaiter) WaitForDelete(resourceList ResourceList, timeout time.Duration) error { ctx, cancel := context.WithTimeout(context.TODO(), timeout) defer cancel() + w.log("beginning wait for %d resources to be deleted with timeout of %s", len(resourceList), timeout) return w.waitForDelete(ctx, resourceList) } func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceList) error { - deadline, _ := ctx.Deadline() - w.log("beginning wait for %d resources to be deleted with timeout of %v", len(resourceList), time.Until(deadline)) cancelCtx, cancel := context.WithCancel(ctx) defer cancel() resources := []object.ObjMetadata{} @@ -94,9 +95,7 @@ func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceL return nil } -func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, waitForJobs bool) error { - deadline, _ := ctx.Deadline() - w.log("beginning wait for %d resources with timeout of %v", len(resourceList), deadline) +func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, waitForJobs bool) error { cancelCtx, cancel := context.WithCancel(ctx) defer cancel() resources := []object.ObjMetadata{} From cf51d714e8cc27ebad0d44d19e69252ef86e5e94 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Wed, 15 Jan 2025 17:33:35 +0000 Subject: [PATCH 048/192] go fmt Signed-off-by: Austin Abro --- pkg/kube/statuswait.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index 534aece8d..7ac4706ee 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -95,7 +95,7 @@ func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceL return nil } -func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, waitForJobs bool) error { +func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, waitForJobs bool) error { cancelCtx, cancel := context.WithCancel(ctx) defer cancel() resources := []object.ObjMetadata{} From 32a87dff39bd855277274149d0da11f53fa90c8b Mon Sep 17 00:00:00 2001 From: Kurnia D Win Date: Thu, 29 Aug 2024 05:18:24 +0000 Subject: [PATCH 049/192] adding support for JSON Schema 2020 Signed-off-by: Kurnia D Win --- go.mod | 1 + go.sum | 4 ++ pkg/chartutil/jsonschema.go | 39 ++++++++++++++++ pkg/chartutil/jsonschema_test.go | 80 ++++++++++++++++++++++++++++++++ 4 files changed, 124 insertions(+) diff --git a/go.mod b/go.mod index 09b071ef5..cefb06f67 100644 --- a/go.mod +++ b/go.mod @@ -29,6 +29,7 @@ require ( github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 github.com/pkg/errors v0.9.1 github.com/rubenv/sql-migrate v1.7.1 + github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 diff --git a/go.sum b/go.sum index d70e7733c..028023612 100644 --- a/go.sum +++ b/go.sum @@ -83,6 +83,8 @@ github.com/distribution/distribution/v3 v3.0.0-rc.2 h1:tTrzntanYMbd20SyvdeR83Ya1 github.com/distribution/distribution/v3 v3.0.0-rc.2/go.mod h1:H2zIRRXS20ylnv2HTuKILAWuANjuA60GB7MLOsQag7Y= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= +github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/docker/cli v27.1.0+incompatible h1:P0KSYmPtNbmx59wHZvG6+rjivhKDRA1BvvWM0f5DgHc= github.com/docker/cli v27.1.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= @@ -330,6 +332,8 @@ github.com/rubenv/sql-migrate v1.7.1 h1:f/o0WgfO/GqNuVg+6801K/KW3WdDSupzSjDYODmi github.com/rubenv/sql-migrate v1.7.1/go.mod h1:Ob2Psprc0/3ggbM6wCzyYVFFuc6FyZrb2AS+ezLDFb4= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 h1:PKK9DyHxif4LZo+uQSgXNqs0jj5+xZwwfKHgph2lxBw= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.1/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= diff --git a/pkg/chartutil/jsonschema.go b/pkg/chartutil/jsonschema.go index f1c8dcdf4..f165bbbac 100644 --- a/pkg/chartutil/jsonschema.go +++ b/pkg/chartutil/jsonschema.go @@ -18,10 +18,12 @@ package chartutil import ( "bytes" + "encoding/json" "fmt" "strings" "github.com/pkg/errors" + "github.com/santhosh-tekuri/jsonschema/v6" "github.com/xeipuuv/gojsonschema" "sigs.k8s.io/yaml" @@ -73,6 +75,11 @@ func ValidateAgainstSingleSchema(values Values, schemaJSON []byte) (reterr error if bytes.Equal(valuesJSON, []byte("null")) { valuesJSON = []byte("{}") } + + if schemaIs2020(schemaJSON) { + return validateUsingNewValidator(valuesJSON, schemaJSON) + } + schemaLoader := gojsonschema.NewBytesLoader(schemaJSON) valuesLoader := gojsonschema.NewBytesLoader(valuesJSON) @@ -91,3 +98,35 @@ func ValidateAgainstSingleSchema(values Values, schemaJSON []byte) (reterr error return nil } + +func validateUsingNewValidator(valuesJSON, schemaJSON []byte) error { + schema, err := jsonschema.UnmarshalJSON(bytes.NewReader(schemaJSON)) + if err != nil { + return err + } + values, err := jsonschema.UnmarshalJSON(bytes.NewReader(valuesJSON)) + if err != nil { + return err + } + + compiler := jsonschema.NewCompiler() + err = compiler.AddResource("file:///values.schema.json", schema) + if err != nil { + return err + } + + validator, err := compiler.Compile("file:///values.schema.json") + if err != nil { + return err + } + + return validator.Validate(values) +} + +func schemaIs2020(schemaJSON []byte) bool { + var partialSchema struct { + Schema string `json:"$schema"` + } + _ = json.Unmarshal(schemaJSON, &partialSchema) + return partialSchema.Schema == "https://json-schema.org/draft/2020-12/schema" +} diff --git a/pkg/chartutil/jsonschema_test.go b/pkg/chartutil/jsonschema_test.go index 098cf70db..8d18d657e 100644 --- a/pkg/chartutil/jsonschema_test.go +++ b/pkg/chartutil/jsonschema_test.go @@ -104,6 +104,21 @@ const subchartSchema = `{ } ` +const subchartSchema2020 = `{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "Values", + "type": "object", + "properties": { + "data": { + "type": "array", + "contains": { "type": "string" }, + "unevaluatedItems": { "type": "number" } + } + }, + "required": ["data"] +} +` + func TestValidateAgainstSchema(t *testing.T) { subchartJSON := []byte(subchartSchema) subchart := &chart.Chart{ @@ -165,3 +180,68 @@ func TestValidateAgainstSchemaNegative(t *testing.T) { t.Errorf("Error string :\n`%s`\ndoes not match expected\n`%s`", errString, expectedErrString) } } + +func TestValidateAgainstSchema2020(t *testing.T) { + subchartJSON := []byte(subchartSchema2020) + subchart := &chart.Chart{ + Metadata: &chart.Metadata{ + Name: "subchart", + }, + Schema: subchartJSON, + } + chrt := &chart.Chart{ + Metadata: &chart.Metadata{ + Name: "chrt", + }, + } + chrt.AddDependency(subchart) + + vals := map[string]interface{}{ + "name": "John", + "subchart": map[string]interface{}{ + "data": []any{"hello", 12}, + }, + } + + if err := ValidateAgainstSchema(chrt, vals); err != nil { + t.Errorf("Error validating Values against Schema: %s", err) + } +} + +func TestValidateAgainstSchema2020Negative(t *testing.T) { + subchartJSON := []byte(subchartSchema2020) + subchart := &chart.Chart{ + Metadata: &chart.Metadata{ + Name: "subchart", + }, + Schema: subchartJSON, + } + chrt := &chart.Chart{ + Metadata: &chart.Metadata{ + Name: "chrt", + }, + } + chrt.AddDependency(subchart) + + vals := map[string]interface{}{ + "name": "John", + "subchart": map[string]interface{}{ + "data": []any{12}, + }, + } + + var errString string + if err := ValidateAgainstSchema(chrt, vals); err == nil { + t.Fatalf("Expected an error, but got nil") + } else { + errString = err.Error() + } + + expectedErrString := `subchart: +jsonschema validation failed with 'file:///values.schema.json#' +- at '/data': no items match contains schema + - at '/data/0': got number, want string` + if errString != expectedErrString { + t.Errorf("Error string :\n`%s`\ndoes not match expected\n`%s`", errString, expectedErrString) + } +} From f1b642cb0d227ce6d661ccd636c7cfb6392e93fe Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 27 Jan 2025 16:15:39 +0000 Subject: [PATCH 050/192] unexport newWaiter function Signed-off-by: Austin Abro --- pkg/kube/client.go | 57 ++++++++++++++++++----------------------- pkg/kube/client_test.go | 12 ++++----- 2 files changed, 31 insertions(+), 38 deletions(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index b607ea3ef..e3fdb8b3b 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -107,43 +107,35 @@ func init() { } } -func getStatusWatcher(factory Factory) (watcher.StatusWatcher, error) { - cfg, err := factory.ToRESTConfig() - if err != nil { - return nil, err - } - dynamicClient, err := factory.DynamicClient() - if err != nil { - return nil, err - } - httpClient, err := rest.HTTPClientFor(cfg) - if err != nil { - return nil, err - } - restMapper, err := apiutil.NewDynamicRESTMapper(cfg, httpClient) - if err != nil { - return nil, err - } - sw := watcher.NewDefaultStatusWatcher(dynamicClient, restMapper) - return sw, nil -} - -func NewWaiter(strategy WaitStrategy, factory Factory, log func(string, ...interface{})) (Waiter, error) { +func (c *Client) newWaiter(strategy WaitStrategy) (Waiter, error) { switch strategy { case LegacyWaiterStrategy: - kc, err := factory.KubernetesClientSet() + kc, err := c.Factory.KubernetesClientSet() if err != nil { return nil, err } - return &HelmWaiter{kubeClient: kc, log: log}, nil + return &HelmWaiter{kubeClient: kc, log: c.Log}, nil case StatusWaiterStrategy: - sw, err := getStatusWatcher(factory) + cfg, err := c.Factory.ToRESTConfig() if err != nil { return nil, err } + dynamicClient, err := c.Factory.DynamicClient() + if err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(cfg) + if err != nil { + return nil, err + } + restMapper, err := apiutil.NewDynamicRESTMapper(cfg, httpClient) + if err != nil { + return nil, err + } + sw := watcher.NewDefaultStatusWatcher(dynamicClient, restMapper) return &statusWaiter{ sw: sw, - log: log, + log: c.Log, }, nil default: return nil, errors.New("unknown wait strategy") @@ -156,15 +148,16 @@ func New(getter genericclioptions.RESTClientGetter, ws WaitStrategy) (*Client, e getter = genericclioptions.NewConfigFlags(true) } factory := cmdutil.NewFactory(getter) - waiter, err := NewWaiter(ws, factory, nopLogger) + c := &Client{ + Factory: factory, + Log: nopLogger, + } + var err error + c.Waiter, err = c.newWaiter(ws) if err != nil { return nil, err } - return &Client{ - Factory: factory, - Log: nopLogger, - Waiter: waiter, - }, nil + return c, nil } var nopLogger = func(_ string, _ ...interface{}) {} diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index f63070fe1..cdf75938e 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -512,11 +512,11 @@ func TestWait(t *testing.T) { } }), } - waiter, err := NewWaiter(LegacyWaiterStrategy, c.Factory, c.Log) + var err error + c.Waiter, err = c.newWaiter(LegacyWaiterStrategy) if err != nil { t.Fatal(err) } - c.Waiter = waiter resources, err := c.Build(objBody(&podList), false) if err != nil { t.Fatal(err) @@ -569,11 +569,11 @@ func TestWaitJob(t *testing.T) { } }), } - waiter, err := NewWaiter(LegacyWaiterStrategy, c.Factory, c.Log) + var err error + c.Waiter, err = c.newWaiter(LegacyWaiterStrategy) if err != nil { t.Fatal(err) } - c.Waiter = waiter resources, err := c.Build(objBody(job), false) if err != nil { t.Fatal(err) @@ -628,11 +628,11 @@ func TestWaitDelete(t *testing.T) { } }), } - waiter, err := NewWaiter(LegacyWaiterStrategy, c.Factory, c.Log) + var err error + c.Waiter, err = c.newWaiter(LegacyWaiterStrategy) if err != nil { t.Fatal(err) } - c.Waiter = waiter resources, err := c.Build(objBody(&pod), false) if err != nil { t.Fatal(err) From a8f53f98ee2206dababbbc0bb8f1037c529488e7 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Thu, 6 Feb 2025 15:22:14 +0000 Subject: [PATCH 051/192] WIP custom status reader Signed-off-by: Austin Abro --- pkg/kube/client.go | 6 +- pkg/kube/job_status_reader.go | 120 +++++++++++++++++++++++++++++ pkg/kube/job_status_reader_test.go | 79 +++++++++++++++++++ 3 files changed, 204 insertions(+), 1 deletion(-) create mode 100644 pkg/kube/job_status_reader.go create mode 100644 pkg/kube/job_status_reader_test.go diff --git a/pkg/kube/client.go b/pkg/kube/client.go index e3fdb8b3b..b4164a8ff 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -59,6 +59,7 @@ import ( watchtools "k8s.io/client-go/tools/watch" "k8s.io/client-go/util/retry" cmdutil "k8s.io/kubectl/pkg/cmd/util" + "sigs.k8s.io/cli-utils/pkg/kstatus/polling/statusreaders" ) // ErrNoObjectsVisited indicates that during a visit operation, no matching objects were found. @@ -133,6 +134,9 @@ func (c *Client) newWaiter(strategy WaitStrategy) (Waiter, error) { return nil, err } sw := watcher.NewDefaultStatusWatcher(dynamicClient, restMapper) + newCustomJobStatusReader := NewCustomJobStatusReader(restMapper) + customSR := statusreaders.NewStatusReader(restMapper, newCustomJobStatusReader) + sw.StatusReader = customSR return &statusWaiter{ sw: sw, log: c.Log, @@ -148,7 +152,7 @@ func New(getter genericclioptions.RESTClientGetter, ws WaitStrategy) (*Client, e getter = genericclioptions.NewConfigFlags(true) } factory := cmdutil.NewFactory(getter) - c := &Client{ + c := &Client{ Factory: factory, Log: nopLogger, } diff --git a/pkg/kube/job_status_reader.go b/pkg/kube/job_status_reader.go new file mode 100644 index 000000000..f6eb8d3d9 --- /dev/null +++ b/pkg/kube/job_status_reader.go @@ -0,0 +1,120 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube + +// This file was copied and modified from https://github.com/fluxcd/kustomize-controller/blob/main/internal/statusreaders/job.go + +import ( + "context" + "fmt" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + + "sigs.k8s.io/cli-utils/pkg/kstatus/polling/engine" + "sigs.k8s.io/cli-utils/pkg/kstatus/polling/event" + "sigs.k8s.io/cli-utils/pkg/kstatus/polling/statusreaders" + "sigs.k8s.io/cli-utils/pkg/kstatus/status" + "sigs.k8s.io/cli-utils/pkg/object" +) + +type customJobStatusReader struct { + genericStatusReader engine.StatusReader +} + +func NewCustomJobStatusReader(mapper meta.RESTMapper) engine.StatusReader { + genericStatusReader := statusreaders.NewGenericStatusReader(mapper, jobConditions) + return &customJobStatusReader{ + genericStatusReader: genericStatusReader, + } +} + +func (j *customJobStatusReader) Supports(gk schema.GroupKind) bool { + return gk == batchv1.SchemeGroupVersion.WithKind("Job").GroupKind() +} + +func (j *customJobStatusReader) ReadStatus(ctx context.Context, reader engine.ClusterReader, resource object.ObjMetadata) (*event.ResourceStatus, error) { + return j.genericStatusReader.ReadStatus(ctx, reader, resource) +} + +func (j *customJobStatusReader) ReadStatusForObject(ctx context.Context, reader engine.ClusterReader, resource *unstructured.Unstructured) (*event.ResourceStatus, error) { + return j.genericStatusReader.ReadStatusForObject(ctx, reader, resource) +} + +// Ref: https://github.com/kubernetes-sigs/cli-utils/blob/v0.29.4/pkg/kstatus/status/core.go +// Modified to return Current status only when the Job has completed as opposed to when it's in progress. +func jobConditions(u *unstructured.Unstructured) (*status.Result, error) { + obj := u.UnstructuredContent() + + parallelism := status.GetIntField(obj, ".spec.parallelism", 1) + completions := status.GetIntField(obj, ".spec.completions", parallelism) + succeeded := status.GetIntField(obj, ".status.succeeded", 0) + failed := status.GetIntField(obj, ".status.failed", 0) + + // Conditions + // https://github.com/kubernetes/kubernetes/blob/master/pkg/controller/job/utils.go#L24 + objc, err := status.GetObjectWithConditions(obj) + if err != nil { + return nil, err + } + for _, c := range objc.Status.Conditions { + switch c.Type { + case "Complete": + if c.Status == corev1.ConditionTrue { + message := fmt.Sprintf("Job Completed. succeeded: %d/%d", succeeded, completions) + return &status.Result{ + Status: status.CurrentStatus, + Message: message, + Conditions: []status.Condition{}, + }, nil + } + case "Failed": + message := fmt.Sprintf("Job Failed. failed: %d/%d", failed, completions) + if c.Status == corev1.ConditionTrue { + return &status.Result{ + Status: status.FailedStatus, + Message: message, + Conditions: []status.Condition{ + { + Type: status.ConditionStalled, + Status: corev1.ConditionTrue, + Reason: "JobFailed", + Message: message, + }, + }, + }, nil + } + } + } + + message := "Job in progress" + return &status.Result{ + Status: status.InProgressStatus, + Message: message, + Conditions: []status.Condition{ + { + Type: status.ConditionReconciling, + Status: corev1.ConditionTrue, + Reason: "JobInProgress", + Message: message, + }, + }, + }, nil +} diff --git a/pkg/kube/job_status_reader_test.go b/pkg/kube/job_status_reader_test.go new file mode 100644 index 000000000..af372c8b3 --- /dev/null +++ b/pkg/kube/job_status_reader_test.go @@ -0,0 +1,79 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube + +// This file was copied and modified from https://github.com/fluxcd/kustomize-controller/blob/main/internal/statusreaders/job.go +import ( + "testing" + + "github.com/stretchr/testify/assert" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + + "sigs.k8s.io/cli-utils/pkg/kstatus/status" +) + +func toUnstructured(obj runtime.Object) (*unstructured.Unstructured, error) { + // If the incoming object is already unstructured, perform a deep copy first + // otherwise DefaultUnstructuredConverter ends up returning the inner map without + // making a copy. + if _, ok := obj.(runtime.Unstructured); ok { + obj = obj.DeepCopyObject() + } + rawMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return nil, err + } + return &unstructured.Unstructured{Object: rawMap}, nil +} + +func TestJobConditions(t *testing.T) { + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: "job", + }, + Spec: batchv1.JobSpec{}, + Status: batchv1.JobStatus{}, + } + + t.Run("job without Complete condition returns InProgress status", func(t *testing.T) { + us, err := toUnstructured(job) + assert.NoError(t, err) + result, err := jobConditions(us) + assert.NoError(t, err) + assert.Equal(t, status.InProgressStatus, result) + }) + + t.Run("job with Complete condition as True returns Current status", func(t *testing.T) { + job.Status = batchv1.JobStatus{ + Conditions: []batchv1.JobCondition{ + { + Type: batchv1.JobComplete, + Status: corev1.ConditionTrue, + }, + }, + } + us, err := toUnstructured(job) + assert.NoError(t, err) + result, err := jobConditions(us) + assert.NoError(t, err) + assert.Equal(t, status.CurrentStatus, result.Status) + }) +} From a5909993231c0826a7c5c139241d0e053ce9d03e Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Thu, 6 Feb 2025 19:53:42 +0000 Subject: [PATCH 052/192] switch client Signed-off-by: Austin Abro --- pkg/kube/client.go | 11 +++-------- pkg/kube/statuswait.go | 33 +++++++++++++++++++-------------- pkg/kube/statuswait_test.go | 22 ++++++++++++---------- 3 files changed, 34 insertions(+), 32 deletions(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index b4164a8ff..3753998ff 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -37,7 +37,6 @@ import ( apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/cli-utils/pkg/kstatus/watcher" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" multierror "github.com/hashicorp/go-multierror" @@ -59,7 +58,6 @@ import ( watchtools "k8s.io/client-go/tools/watch" "k8s.io/client-go/util/retry" cmdutil "k8s.io/kubectl/pkg/cmd/util" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling/statusreaders" ) // ErrNoObjectsVisited indicates that during a visit operation, no matching objects were found. @@ -133,13 +131,10 @@ func (c *Client) newWaiter(strategy WaitStrategy) (Waiter, error) { if err != nil { return nil, err } - sw := watcher.NewDefaultStatusWatcher(dynamicClient, restMapper) - newCustomJobStatusReader := NewCustomJobStatusReader(restMapper) - customSR := statusreaders.NewStatusReader(restMapper, newCustomJobStatusReader) - sw.StatusReader = customSR return &statusWaiter{ - sw: sw, - log: c.Log, + restMapper: restMapper, + client: dynamicClient, + log: c.Log, }, nil default: return nil, errors.New("unknown wait strategy") diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index 7ac4706ee..1aa424c4c 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -23,42 +23,51 @@ import ( "time" appsv1 "k8s.io/api/apps/v1" - batchv1 "k8s.io/api/batch/v1" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/client-go/dynamic" "sigs.k8s.io/cli-utils/pkg/kstatus/polling/aggregator" "sigs.k8s.io/cli-utils/pkg/kstatus/polling/collector" "sigs.k8s.io/cli-utils/pkg/kstatus/polling/event" + "sigs.k8s.io/cli-utils/pkg/kstatus/polling/statusreaders" "sigs.k8s.io/cli-utils/pkg/kstatus/status" "sigs.k8s.io/cli-utils/pkg/kstatus/watcher" "sigs.k8s.io/cli-utils/pkg/object" ) type statusWaiter struct { - sw watcher.StatusWatcher - log func(string, ...interface{}) + client dynamic.Interface + restMapper meta.RESTMapper + log func(string, ...interface{}) } func (w *statusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error { ctx, cancel := context.WithTimeout(context.TODO(), timeout) defer cancel() w.log("beginning wait for %d resources with timeout of %s", len(resourceList), timeout) - return w.wait(ctx, resourceList, false) + sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) + return w.wait(ctx, resourceList, sw) } func (w *statusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Duration) error { ctx, cancel := context.WithTimeout(context.TODO(), timeout) defer cancel() w.log("beginning wait for %d resources with timeout of %s", len(resourceList), timeout) - return w.wait(ctx, resourceList, true) + sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) + newCustomJobStatusReader := NewCustomJobStatusReader(w.restMapper) + customSR := statusreaders.NewStatusReader(w.restMapper, newCustomJobStatusReader) + sw.StatusReader = customSR + return w.wait(ctx, resourceList, sw) } func (w *statusWaiter) WaitForDelete(resourceList ResourceList, timeout time.Duration) error { ctx, cancel := context.WithTimeout(context.TODO(), timeout) defer cancel() w.log("beginning wait for %d resources to be deleted with timeout of %s", len(resourceList), timeout) - return w.waitForDelete(ctx, resourceList) + sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) + return w.waitForDelete(ctx, resourceList, sw) } -func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceList) error { +func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceList, sw watcher.StatusWatcher) error { cancelCtx, cancel := context.WithCancel(ctx) defer cancel() resources := []object.ObjMetadata{} @@ -69,7 +78,7 @@ func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceL } resources = append(resources, obj) } - eventCh := w.sw.Watch(cancelCtx, resources, watcher.Options{}) + eventCh := sw.Watch(cancelCtx, resources, watcher.Options{}) statusCollector := collector.NewResourceStatusCollector(resources) go logResourceStatus(ctx, resources, statusCollector, status.NotFoundStatus, w.log) done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.NotFoundStatus)) @@ -95,16 +104,12 @@ func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceL return nil } -func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, waitForJobs bool) error { +func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, sw watcher.StatusWatcher) error { cancelCtx, cancel := context.WithCancel(ctx) defer cancel() resources := []object.ObjMetadata{} for _, resource := range resourceList { switch value := AsVersioned(resource).(type) { - case *batchv1.Job: - if !waitForJobs { - continue - } case *appsv1.Deployment: if value.Spec.Paused { continue @@ -117,7 +122,7 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, wait resources = append(resources, obj) } - eventCh := w.sw.Watch(cancelCtx, resources, watcher.Options{}) + eventCh := sw.Watch(cancelCtx, resources, watcher.Options{}) statusCollector := collector.NewResourceStatusCollector(resources) go logResourceStatus(cancelCtx, resources, statusCollector, status.CurrentStatus, w.log) done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.CurrentStatus)) diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index d853e0012..f3694953c 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -38,7 +38,6 @@ import ( "sigs.k8s.io/cli-utils/pkg/kstatus/polling/collector" "sigs.k8s.io/cli-utils/pkg/kstatus/polling/event" "sigs.k8s.io/cli-utils/pkg/kstatus/status" - "sigs.k8s.io/cli-utils/pkg/kstatus/watcher" "sigs.k8s.io/cli-utils/pkg/object" "sigs.k8s.io/cli-utils/pkg/testutil" ) @@ -178,10 +177,10 @@ func TestStatusWaitForDelete(t *testing.T) { appsv1.SchemeGroupVersion.WithKind("Deployment"), batchv1.SchemeGroupVersion.WithKind("Job"), ) - statusWatcher := watcher.NewDefaultStatusWatcher(fakeClient, fakeMapper) statusWaiter := statusWaiter{ - sw: statusWatcher, - log: t.Logf, + restMapper: fakeMapper, + client: fakeClient, + log: t.Logf, } createdObjs := []runtime.Object{} for _, manifest := range tt.manifestsToCreate { @@ -275,10 +274,10 @@ func TestStatusWait(t *testing.T) { appsv1.SchemeGroupVersion.WithKind("Deployment"), batchv1.SchemeGroupVersion.WithKind("Job"), ) - statusWatcher := watcher.NewDefaultStatusWatcher(fakeClient, fakeMapper) statusWaiter := statusWaiter{ - sw: statusWatcher, - log: t.Logf, + client: fakeClient, + restMapper: fakeMapper, + log: t.Logf, } objs := []runtime.Object{} @@ -299,9 +298,12 @@ func TestStatusWait(t *testing.T) { resourceList = append(resourceList, list...) } - ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) - defer cancel() - err := statusWaiter.wait(ctx, resourceList, tt.waitForJobs) + var err error + if tt.waitForJobs { + err = statusWaiter.Wait(resourceList, time.Second*3) + } else { + err = statusWaiter.WaitWithJobs(resourceList, time.Second*3) + } if tt.expectErrs != nil { assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) return From bbe3246f0ada5dab5cc9c02873e40810e40c33fe Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Thu, 6 Feb 2025 21:41:14 +0000 Subject: [PATCH 053/192] tests passing Signed-off-by: Austin Abro --- pkg/kube/job_status_reader_test.go | 14 ++--- pkg/kube/statuswait_test.go | 82 ++++++++++++++++++++++++++++-- 2 files changed, 85 insertions(+), 11 deletions(-) diff --git a/pkg/kube/job_status_reader_test.go b/pkg/kube/job_status_reader_test.go index af372c8b3..60760efb9 100644 --- a/pkg/kube/job_status_reader_test.go +++ b/pkg/kube/job_status_reader_test.go @@ -53,13 +53,13 @@ func TestJobConditions(t *testing.T) { Status: batchv1.JobStatus{}, } - t.Run("job without Complete condition returns InProgress status", func(t *testing.T) { - us, err := toUnstructured(job) - assert.NoError(t, err) - result, err := jobConditions(us) - assert.NoError(t, err) - assert.Equal(t, status.InProgressStatus, result) - }) + // t.Run("job without Complete condition returns InProgress status", func(t *testing.T) { + // us, err := toUnstructured(job) + // assert.NoError(t, err) + // result, err := jobConditions(us) + // assert.NoError(t, err) + // assert.Equal(t, status.InProgressStatus, result) + // }) t.Run("job with Complete condition as True returns Current status", func(t *testing.T) { job.Status = batchv1.JobStatus{ diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index f3694953c..c028f8fd0 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -72,6 +72,80 @@ metadata: generation: 1 ` +var jobReadyManifest = ` +apiVersion: batch/v1 +kind: Job +metadata: + name: sleep-job + namespace: default + uid: 5e7d8814-36fc-486f-9e6d-5b0a09351682 + resourceVersion: "568" + generation: 1 + creationTimestamp: 2025-02-06T16:34:20-05:00 + labels: + batch.kubernetes.io/controller-uid: 5e7d8814-36fc-486f-9e6d-5b0a09351682 + batch.kubernetes.io/job-name: sleep-job + controller-uid: 5e7d8814-36fc-486f-9e6d-5b0a09351682 + job-name: sleep-job + annotations: + kubectl.kubernetes.io/last-applied-configuration: "{\"apiVersion\":\"batch/v1\",\"kind\":\"Job\",\"metadata\":{\"annotations\":{},\"name\":\"sleep-job\",\"namespace\":\"default\"},\"spec\":{\"template\":{\"metadata\":{\"name\":\"sleep-job\"},\"spec\":{\"containers\":[{\"command\":[\"sh\",\"-c\",\"sleep 100\"],\"image\":\"busybox\",\"name\":\"sleep\"}],\"restartPolicy\":\"Never\"}}}}\n" + managedFields: + - manager: kubectl-client-side-apply + operation: Update + apiVersion: batch/v1 + time: 2025-02-06T16:34:20-05:00 + fieldsType: FieldsV1 + fieldsV1: {} + - manager: k3s + operation: Update + apiVersion: batch/v1 + time: 2025-02-06T16:34:23-05:00 + fieldsType: FieldsV1 + fieldsV1: {} + subresource: status +spec: + parallelism: 1 + completions: 1 + backoffLimit: 6 + selector: + matchLabels: + batch.kubernetes.io/controller-uid: 5e7d8814-36fc-486f-9e6d-5b0a09351682 + manualSelector: false + template: + metadata: + name: sleep-job + labels: + batch.kubernetes.io/controller-uid: 5e7d8814-36fc-486f-9e6d-5b0a09351682 + batch.kubernetes.io/job-name: sleep-job + controller-uid: 5e7d8814-36fc-486f-9e6d-5b0a09351682 + job-name: sleep-job + spec: + containers: + - name: sleep + image: busybox + command: + - sh + - -c + - sleep 100 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + imagePullPolicy: Always + restartPolicy: Never + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} + schedulerName: default-scheduler + completionMode: NonIndexed + suspend: false + podReplacementPolicy: TerminatingOrFailed +status: + startTime: 2025-02-06T16:34:20-05:00 + active: 1 + terminating: 0 + uncountedTerminatedPods: {} + ready: 1 +` + var jobCompleteManifest = ` apiVersion: batch/v1 kind: Job @@ -242,8 +316,8 @@ func TestStatusWait(t *testing.T) { waitForJobs: true, }, { - name: "Job is not ready, but we pass wait anyway", - objManifests: []string{jobNoStatusManifest}, + name: "Job is not ready but we pass wait anyway", + objManifests: []string{jobReadyManifest}, expectErrs: nil, waitForJobs: false, }, @@ -300,9 +374,9 @@ func TestStatusWait(t *testing.T) { var err error if tt.waitForJobs { - err = statusWaiter.Wait(resourceList, time.Second*3) - } else { err = statusWaiter.WaitWithJobs(resourceList, time.Second*3) + } else { + err = statusWaiter.Wait(resourceList, time.Second*3) } if tt.expectErrs != nil { assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) From 59ef43e399375b773c1c42fe51befacbbb62e0f3 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Thu, 6 Feb 2025 21:41:43 +0000 Subject: [PATCH 054/192] tests passing Signed-off-by: Austin Abro --- pkg/kube/statuswait_test.go | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index c028f8fd0..06aa36c09 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -81,28 +81,6 @@ metadata: uid: 5e7d8814-36fc-486f-9e6d-5b0a09351682 resourceVersion: "568" generation: 1 - creationTimestamp: 2025-02-06T16:34:20-05:00 - labels: - batch.kubernetes.io/controller-uid: 5e7d8814-36fc-486f-9e6d-5b0a09351682 - batch.kubernetes.io/job-name: sleep-job - controller-uid: 5e7d8814-36fc-486f-9e6d-5b0a09351682 - job-name: sleep-job - annotations: - kubectl.kubernetes.io/last-applied-configuration: "{\"apiVersion\":\"batch/v1\",\"kind\":\"Job\",\"metadata\":{\"annotations\":{},\"name\":\"sleep-job\",\"namespace\":\"default\"},\"spec\":{\"template\":{\"metadata\":{\"name\":\"sleep-job\"},\"spec\":{\"containers\":[{\"command\":[\"sh\",\"-c\",\"sleep 100\"],\"image\":\"busybox\",\"name\":\"sleep\"}],\"restartPolicy\":\"Never\"}}}}\n" - managedFields: - - manager: kubectl-client-side-apply - operation: Update - apiVersion: batch/v1 - time: 2025-02-06T16:34:20-05:00 - fieldsType: FieldsV1 - fieldsV1: {} - - manager: k3s - operation: Update - apiVersion: batch/v1 - time: 2025-02-06T16:34:23-05:00 - fieldsType: FieldsV1 - fieldsV1: {} - subresource: status spec: parallelism: 1 completions: 1 From b9cbc93003d1d55399dccf13da396d6011a9b9cc Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Thu, 6 Feb 2025 21:45:07 +0000 Subject: [PATCH 055/192] tests passing Signed-off-by: Austin Abro --- pkg/kube/statuswait_test.go | 39 ------------------------------------- 1 file changed, 39 deletions(-) diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index 06aa36c09..ba4e79a58 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -78,49 +78,10 @@ kind: Job metadata: name: sleep-job namespace: default - uid: 5e7d8814-36fc-486f-9e6d-5b0a09351682 - resourceVersion: "568" generation: 1 -spec: - parallelism: 1 - completions: 1 - backoffLimit: 6 - selector: - matchLabels: - batch.kubernetes.io/controller-uid: 5e7d8814-36fc-486f-9e6d-5b0a09351682 - manualSelector: false - template: - metadata: - name: sleep-job - labels: - batch.kubernetes.io/controller-uid: 5e7d8814-36fc-486f-9e6d-5b0a09351682 - batch.kubernetes.io/job-name: sleep-job - controller-uid: 5e7d8814-36fc-486f-9e6d-5b0a09351682 - job-name: sleep-job - spec: - containers: - - name: sleep - image: busybox - command: - - sh - - -c - - sleep 100 - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - imagePullPolicy: Always - restartPolicy: Never - terminationGracePeriodSeconds: 30 - dnsPolicy: ClusterFirst - securityContext: {} - schedulerName: default-scheduler - completionMode: NonIndexed - suspend: false - podReplacementPolicy: TerminatingOrFailed status: startTime: 2025-02-06T16:34:20-05:00 active: 1 - terminating: 0 - uncountedTerminatedPods: {} ready: 1 ` From 0314135290d69e35c4f3c70330cc212ae0186a7c Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Fri, 7 Feb 2025 14:34:37 +0000 Subject: [PATCH 056/192] tests passing Signed-off-by: Austin Abro --- pkg/kube/statuswait_test.go | 73 ++++++++++++++++++++++++++++++++++--- 1 file changed, 68 insertions(+), 5 deletions(-) diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index ba4e79a58..9e3b696d5 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -76,7 +76,7 @@ var jobReadyManifest = ` apiVersion: batch/v1 kind: Job metadata: - name: sleep-job + name: ready-not-complete namespace: default generation: 1 status: @@ -182,8 +182,8 @@ func TestStatusWaitForDelete(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() c := newTestClient(t) - timeout := time.Second * 2 - timeUntilPodDelete := time.Second * 1 + timeout := time.Second + timeUntilPodDelete := time.Millisecond * 500 fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) fakeMapper := testutil.NewFakeRESTMapper( v1.SchemeGroupVersion.WithKind("Pod"), @@ -232,7 +232,6 @@ func TestStatusWaitForDelete(t *testing.T) { assert.NoError(t, err) }) } - } func TestStatusWait(t *testing.T) { @@ -314,7 +313,7 @@ func TestStatusWait(t *testing.T) { var err error if tt.waitForJobs { err = statusWaiter.WaitWithJobs(resourceList, time.Second*3) - } else { + } else { err = statusWaiter.Wait(resourceList, time.Second*3) } if tt.expectErrs != nil { @@ -325,3 +324,67 @@ func TestStatusWait(t *testing.T) { }) } } + +func TestWaitForJobComplete(t *testing.T) { + t.Parallel() + tests := []struct { + name string + objManifests []string + expectErrs []error + }{ + { + name: "Job is complete", + objManifests: []string{jobCompleteManifest}, + }, + { + name: "Job is not ready", + objManifests: []string{jobNoStatusManifest}, + expectErrs: []error{errors.New("resource not ready, name: test, kind: Job, status: InProgress"), errors.New("context deadline exceeded")}, + }, + { + name: "Job is ready but not complete", + objManifests: []string{jobReadyManifest}, + expectErrs: []error{errors.New("resource not ready, name: ready-not-complete, kind: Job, status: InProgress"), errors.New("context deadline exceeded")}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + batchv1.SchemeGroupVersion.WithKind("Job"), + ) + statusWaiter := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + log: t.Logf, + } + objs := []runtime.Object{} + for _, podYaml := range tt.objManifests { + m := make(map[string]interface{}) + err := yaml.Unmarshal([]byte(podYaml), &m) + assert.NoError(t, err) + resource := &unstructured.Unstructured{Object: m} + objs = append(objs, resource) + gvr := getGVR(t, fakeMapper, resource) + err = fakeClient.Tracker().Create(gvr, resource, resource.GetNamespace()) + assert.NoError(t, err) + } + resourceList := ResourceList{} + for _, obj := range objs { + list, err := c.Build(objBody(obj), false) + assert.NoError(t, err) + resourceList = append(resourceList, list...) + } + + err := statusWaiter.WaitWithJobs(resourceList, time.Second*3) + if tt.expectErrs != nil { + assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + return + } + assert.NoError(t, err) + }) + } +} From cc83b7c2e6b9403e7347990d11f329abd0fd4403 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Fri, 7 Feb 2025 15:01:53 +0000 Subject: [PATCH 057/192] tests passing Signed-off-by: Austin Abro --- pkg/kube/statuswait_test.go | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index 9e3b696d5..131224e8b 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -242,11 +242,6 @@ func TestStatusWait(t *testing.T) { expectErrs []error waitForJobs bool }{ - { - name: "Job is complete", - objManifests: []string{jobCompleteManifest}, - expectErrs: nil, - }, { name: "Job is not complete", objManifests: []string{jobNoStatusManifest}, @@ -254,7 +249,7 @@ func TestStatusWait(t *testing.T) { waitForJobs: true, }, { - name: "Job is not ready but we pass wait anyway", + name: "Job is ready but not complete", objManifests: []string{jobReadyManifest}, expectErrs: nil, waitForJobs: false, @@ -310,12 +305,7 @@ func TestStatusWait(t *testing.T) { resourceList = append(resourceList, list...) } - var err error - if tt.waitForJobs { - err = statusWaiter.WaitWithJobs(resourceList, time.Second*3) - } else { - err = statusWaiter.Wait(resourceList, time.Second*3) - } + err := statusWaiter.Wait(resourceList, time.Second*3) if tt.expectErrs != nil { assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) return From f49a7e134a4da7967be9f65bfa1f91a159889252 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Fri, 7 Feb 2025 15:14:25 +0000 Subject: [PATCH 058/192] start watch until ready Signed-off-by: Austin Abro --- pkg/kube/client.go | 157 ----------------------------------------- pkg/kube/interface.go | 24 +++---- pkg/kube/statuswait.go | 5 ++ pkg/kube/wait.go | 157 +++++++++++++++++++++++++++++++++++++++++ 4 files changed, 174 insertions(+), 169 deletions(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 3753998ff..8dca1c51b 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -18,7 +18,6 @@ package kube // import "helm.sh/helm/v4/pkg/kube" import ( "bytes" - "context" "encoding/json" "fmt" "io" @@ -27,11 +26,9 @@ import ( "reflect" "strings" "sync" - "time" jsonpatch "github.com/evanphx/json-patch" "github.com/pkg/errors" - batch "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" @@ -39,23 +36,18 @@ import ( "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" - multierror "github.com/hashicorp/go-multierror" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" - "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/strategicpatch" - "k8s.io/apimachinery/pkg/watch" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/resource" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" - cachetools "k8s.io/client-go/tools/cache" - watchtools "k8s.io/client-go/tools/watch" "k8s.io/client-go/util/retry" cmdutil "k8s.io/kubectl/pkg/cmd/util" ) @@ -524,52 +516,6 @@ func rdelete(c *Client, resources ResourceList, propagation metav1.DeletionPropa return res, nil } -func (c *Client) watchTimeout(t time.Duration) func(*resource.Info) error { - return func(info *resource.Info) error { - return c.watchUntilReady(t, info) - } -} - -// WatchUntilReady watches the resources given and waits until it is ready. -// -// This method is mainly for hook implementations. It watches for a resource to -// hit a particular milestone. The milestone depends on the Kind. -// -// For most kinds, it checks to see if the resource is marked as Added or Modified -// by the Kubernetes event stream. For some kinds, it does more: -// -// - Jobs: A job is marked "Ready" when it has successfully completed. This is -// ascertained by watching the Status fields in a job's output. -// - Pods: A pod is marked "Ready" when it has successfully completed. This is -// ascertained by watching the status.phase field in a pod's output. -// -// Handling for other kinds will be added as necessary. -func (c *Client) WatchUntilReady(resources ResourceList, timeout time.Duration) error { - // For jobs, there's also the option to do poll c.Jobs(namespace).Get(): - // https://github.com/adamreese/kubernetes/blob/master/test/e2e/job.go#L291-L300 - return perform(resources, c.watchTimeout(timeout)) -} - -func perform(infos ResourceList, fn func(*resource.Info) error) error { - var result error - - if len(infos) == 0 { - return ErrNoObjectsVisited - } - - errs := make(chan error) - go batchPerform(infos, fn, errs) - - for range infos { - err := <-errs - if err != nil { - result = multierror.Append(result, err) - } - } - - return result -} - // getManagedFieldsManager returns the manager string. If one was set it will be returned. // Otherwise, one is calculated based on the name of the binary. func getManagedFieldsManager() string { @@ -721,109 +667,6 @@ func updateResource(c *Client, target *resource.Info, currentObj runtime.Object, return nil } -func (c *Client) watchUntilReady(timeout time.Duration, info *resource.Info) error { - kind := info.Mapping.GroupVersionKind.Kind - switch kind { - case "Job", "Pod": - default: - return nil - } - - c.Log("Watching for changes to %s %s with timeout of %v", kind, info.Name, timeout) - - // Use a selector on the name of the resource. This should be unique for the - // given version and kind - selector, err := fields.ParseSelector(fmt.Sprintf("metadata.name=%s", info.Name)) - if err != nil { - return err - } - lw := cachetools.NewListWatchFromClient(info.Client, info.Mapping.Resource.Resource, info.Namespace, selector) - - // What we watch for depends on the Kind. - // - For a Job, we watch for completion. - // - For all else, we watch until Ready. - // In the future, we might want to add some special logic for types - // like Ingress, Volume, etc. - - ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout) - defer cancel() - _, err = watchtools.UntilWithSync(ctx, lw, &unstructured.Unstructured{}, nil, func(e watch.Event) (bool, error) { - // Make sure the incoming object is versioned as we use unstructured - // objects when we build manifests - obj := convertWithMapper(e.Object, info.Mapping) - switch e.Type { - case watch.Added, watch.Modified: - // For things like a secret or a config map, this is the best indicator - // we get. We care mostly about jobs, where what we want to see is - // the status go into a good state. For other types, like ReplicaSet - // we don't really do anything to support these as hooks. - c.Log("Add/Modify event for %s: %v", info.Name, e.Type) - switch kind { - case "Job": - return c.waitForJob(obj, info.Name) - case "Pod": - return c.waitForPodSuccess(obj, info.Name) - } - return true, nil - case watch.Deleted: - c.Log("Deleted event for %s", info.Name) - return true, nil - case watch.Error: - // Handle error and return with an error. - c.Log("Error event for %s", info.Name) - return true, errors.Errorf("failed to deploy %s", info.Name) - default: - return false, nil - } - }) - return err -} - -// waitForJob is a helper that waits for a job to complete. -// -// This operates on an event returned from a watcher. -func (c *Client) waitForJob(obj runtime.Object, name string) (bool, error) { - o, ok := obj.(*batch.Job) - if !ok { - return true, errors.Errorf("expected %s to be a *batch.Job, got %T", name, obj) - } - - for _, c := range o.Status.Conditions { - if c.Type == batch.JobComplete && c.Status == "True" { - return true, nil - } else if c.Type == batch.JobFailed && c.Status == "True" { - return true, errors.Errorf("job %s failed: %s", name, c.Reason) - } - } - - c.Log("%s: Jobs active: %d, jobs failed: %d, jobs succeeded: %d", name, o.Status.Active, o.Status.Failed, o.Status.Succeeded) - return false, nil -} - -// waitForPodSuccess is a helper that waits for a pod to complete. -// -// This operates on an event returned from a watcher. -func (c *Client) waitForPodSuccess(obj runtime.Object, name string) (bool, error) { - o, ok := obj.(*v1.Pod) - if !ok { - return true, errors.Errorf("expected %s to be a *v1.Pod, got %T", name, obj) - } - - switch o.Status.Phase { - case v1.PodSucceeded: - c.Log("Pod %s succeeded", o.Name) - return true, nil - case v1.PodFailed: - return true, errors.Errorf("pod %s failed", o.Name) - case v1.PodPending: - c.Log("Pod %s pending", o.Name) - case v1.PodRunning: - c.Log("Pod %s running", o.Name) - } - - return false, nil -} - // scrubValidationError removes kubectl info from the message. func scrubValidationError(err error) error { if err == nil { diff --git a/pkg/kube/interface.go b/pkg/kube/interface.go index f8e3c2ee2..0e6da1094 100644 --- a/pkg/kube/interface.go +++ b/pkg/kube/interface.go @@ -34,18 +34,6 @@ type Interface interface { // Delete destroys one or more resources. Delete(resources ResourceList) (*Result, []error) - // WatchUntilReady watches the resources given and waits until it is ready. - // - // This method is mainly for hook implementations. It watches for a resource to - // hit a particular milestone. The milestone depends on the Kind. - // - // For Jobs, "ready" means the Job ran to completion (exited without error). - // For Pods, "ready" means the Pod phase is marked "succeeded". - // For all other kinds, it means the kind was created or modified without - // error. - // TODO: Is watch until ready really behavior we want over the resources actually being ready? - WatchUntilReady(resources ResourceList, timeout time.Duration) error - // Update updates one or more resources or creates the resource // if it doesn't exist. Update(original, target ResourceList, force bool) (*Result, error) @@ -72,6 +60,18 @@ type Waiter interface { // WaitForDelete wait up to the given timeout for the specified resources to be deleted. WaitForDelete(resources ResourceList, timeout time.Duration) error + + // WatchUntilReady watches the resources given and waits until it is ready. + // + // This method is mainly for hook implementations. It watches for a resource to + // hit a particular milestone. The milestone depends on the Kind. + // + // For Jobs, "ready" means the Job ran to completion (exited without error). + // For Pods, "ready" means the Pod phase is marked "succeeded". + // For all other kinds, it means the kind was created or modified without + // error. + // TODO: Is watch until ready really behavior we want over the resources actually being ready? + WatchUntilReady(resources ResourceList, timeout time.Duration) error } // InterfaceDeletionPropagation is introduced to avoid breaking backwards compatibility for Interface implementers. diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index 1aa424c4c..2e27917bc 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -40,6 +40,11 @@ type statusWaiter struct { log func(string, ...interface{}) } +func (w *statusWaiter) WatchUntilReady(resources ResourceList, timeout time.Duration) error { + panic("todo") + return nil +} + func (w *statusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error { ctx, cancel := context.WithTimeout(context.TODO(), timeout) defer cancel() diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go index 525373e4d..fdb3c9087 100644 --- a/pkg/kube/wait.go +++ b/pkg/kube/wait.go @@ -22,19 +22,27 @@ import ( "net/http" "time" + multierror "github.com/hashicorp/go-multierror" "github.com/pkg/errors" appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" appsv1beta2 "k8s.io/api/apps/v1beta2" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" "k8s.io/cli-runtime/pkg/resource" "k8s.io/client-go/kubernetes" + cachetools "k8s.io/client-go/tools/cache" + watchtools "k8s.io/client-go/tools/watch" + batch "k8s.io/api/batch/v1" "k8s.io/apimachinery/pkg/util/wait" ) @@ -177,3 +185,152 @@ func SelectorsForObject(object runtime.Object) (selector labels.Selector, err er return selector, errors.Wrap(err, "invalid label selector") } + +func (hw *HelmWaiter) watchTimeout(t time.Duration) func(*resource.Info) error { + return func(info *resource.Info) error { + return hw.watchUntilReady(t, info) + } +} + +// WatchUntilReady watches the resources given and waits until it is ready. +// +// This method is mainly for hook implementations. It watches for a resource to +// hit a particular milestone. The milestone depends on the Kind. +// +// For most kinds, it checks to see if the resource is marked as Added or Modified +// by the Kubernetes event stream. For some kinds, it does more: +// +// - Jobs: A job is marked "Ready" when it has successfully completed. This is +// ascertained by watching the Status fields in a job's output. +// - Pods: A pod is marked "Ready" when it has successfully completed. This is +// ascertained by watching the status.phase field in a pod's output. +// +// Handling for other kinds will be added as necessary. +func (hw *HelmWaiter) WatchUntilReady(resources ResourceList, timeout time.Duration) error { + // For jobs, there's also the option to do poll c.Jobs(namespace).Get(): + // https://github.com/adamreese/kubernetes/blob/master/test/e2e/job.go#L291-L300 + return perform(resources, hw.watchTimeout(timeout)) +} + +func perform(infos ResourceList, fn func(*resource.Info) error) error { + var result error + + if len(infos) == 0 { + return ErrNoObjectsVisited + } + + errs := make(chan error) + go batchPerform(infos, fn, errs) + + for range infos { + err := <-errs + if err != nil { + result = multierror.Append(result, err) + } + } + + return result +} + +func (hw *HelmWaiter) watchUntilReady(timeout time.Duration, info *resource.Info) error { + kind := info.Mapping.GroupVersionKind.Kind + switch kind { + case "Job", "Pod": + default: + return nil + } + + hw.log("Watching for changes to %s %s with timeout of %v", kind, info.Name, timeout) + + // Use a selector on the name of the resource. This should be unique for the + // given version and kind + selector, err := fields.ParseSelector(fmt.Sprintf("metadata.name=%s", info.Name)) + if err != nil { + return err + } + lw := cachetools.NewListWatchFromClient(info.Client, info.Mapping.Resource.Resource, info.Namespace, selector) + + // What we watch for depends on the Kind. + // - For a Job, we watch for completion. + // - For all else, we watch until Ready. + // In the future, we might want to add some special logic for types + // like Ingress, Volume, etc. + + ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout) + defer cancel() + _, err = watchtools.UntilWithSync(ctx, lw, &unstructured.Unstructured{}, nil, func(e watch.Event) (bool, error) { + // Make sure the incoming object is versioned as we use unstructured + // objects when we build manifests + obj := convertWithMapper(e.Object, info.Mapping) + switch e.Type { + case watch.Added, watch.Modified: + // For things like a secret or a config map, this is the best indicator + // we get. We care mostly about jobs, where what we want to see is + // the status go into a good state. For other types, like ReplicaSet + // we don't really do anything to support these as hooks. + hw.log("Add/Modify event for %s: %v", info.Name, e.Type) + switch kind { + case "Job": + return hw.waitForJob(obj, info.Name) + case "Pod": + return hw.waitForPodSuccess(obj, info.Name) + } + return true, nil + case watch.Deleted: + hw.log("Deleted event for %s", info.Name) + return true, nil + case watch.Error: + // Handle error and return with an error. + hw.log("Error event for %s", info.Name) + return true, errors.Errorf("failed to deploy %s", info.Name) + default: + return false, nil + } + }) + return err +} + +// waitForJob is a helper that waits for a job to complete. +// +// This operates on an event returned from a watcher. +func (hw *HelmWaiter) waitForJob(obj runtime.Object, name string) (bool, error) { + o, ok := obj.(*batch.Job) + if !ok { + return true, errors.Errorf("expected %s to be a *batch.Job, got %T", name, obj) + } + + for _, c := range o.Status.Conditions { + if c.Type == batch.JobComplete && c.Status == "True" { + return true, nil + } else if c.Type == batch.JobFailed && c.Status == "True" { + return true, errors.Errorf("job %s failed: %s", name, c.Reason) + } + } + + hw.log("%s: Jobs active: %d, jobs failed: %d, jobs succeeded: %d", name, o.Status.Active, o.Status.Failed, o.Status.Succeeded) + return false, nil +} + +// waitForPodSuccess is a helper that waits for a pod to complete. +// +// This operates on an event returned from a watcher. +func (c *HelmWaiter) waitForPodSuccess(obj runtime.Object, name string) (bool, error) { + o, ok := obj.(*v1.Pod) + if !ok { + return true, errors.Errorf("expected %s to be a *v1.Pod, got %T", name, obj) + } + + switch o.Status.Phase { + case v1.PodSucceeded: + c.log("Pod %s succeeded", o.Name) + return true, nil + case v1.PodFailed: + return true, errors.Errorf("pod %s failed", o.Name) + case v1.PodPending: + c.log("Pod %s pending", o.Name) + case v1.PodRunning: + c.log("Pod %s running", o.Name) + } + + return false, nil +} From 187e99d299817e824a5bc5e5b3e3345a87e3ee96 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Fri, 7 Feb 2025 17:18:27 +0000 Subject: [PATCH 059/192] custom status readers look good Signed-off-by: Austin Abro --- pkg/kube/job_status_reader_test.go | 14 ++-- pkg/kube/pod_status_reader.go | 110 +++++++++++++++++++++++++++++ pkg/kube/pod_status_reader_test.go | 66 +++++++++++++++++ pkg/kube/statuswait.go | 2 +- 4 files changed, 184 insertions(+), 8 deletions(-) create mode 100644 pkg/kube/pod_status_reader.go create mode 100644 pkg/kube/pod_status_reader_test.go diff --git a/pkg/kube/job_status_reader_test.go b/pkg/kube/job_status_reader_test.go index 60760efb9..98b994030 100644 --- a/pkg/kube/job_status_reader_test.go +++ b/pkg/kube/job_status_reader_test.go @@ -53,13 +53,13 @@ func TestJobConditions(t *testing.T) { Status: batchv1.JobStatus{}, } - // t.Run("job without Complete condition returns InProgress status", func(t *testing.T) { - // us, err := toUnstructured(job) - // assert.NoError(t, err) - // result, err := jobConditions(us) - // assert.NoError(t, err) - // assert.Equal(t, status.InProgressStatus, result) - // }) + t.Run("job without Complete condition returns InProgress status", func(t *testing.T) { + us, err := toUnstructured(job) + assert.NoError(t, err) + result, err := jobConditions(us) + assert.NoError(t, err) + assert.Equal(t, status.InProgressStatus, result.Status) + }) t.Run("job with Complete condition as True returns Current status", func(t *testing.T) { job.Status = batchv1.JobStatus{ diff --git a/pkg/kube/pod_status_reader.go b/pkg/kube/pod_status_reader.go new file mode 100644 index 000000000..25e427966 --- /dev/null +++ b/pkg/kube/pod_status_reader.go @@ -0,0 +1,110 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube + +// This file was copied and modified from https://github.com/fluxcd/kustomize-controller/blob/main/internal/statusreaders/job.go + +import ( + "context" + "fmt" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + + "sigs.k8s.io/cli-utils/pkg/kstatus/polling/engine" + "sigs.k8s.io/cli-utils/pkg/kstatus/polling/event" + "sigs.k8s.io/cli-utils/pkg/kstatus/polling/statusreaders" + "sigs.k8s.io/cli-utils/pkg/kstatus/status" + "sigs.k8s.io/cli-utils/pkg/object" +) + +type customPodStatusReader struct { + genericStatusReader engine.StatusReader +} + +func NewCustomPodStatusReader(mapper meta.RESTMapper) engine.StatusReader { + genericStatusReader := statusreaders.NewGenericStatusReader(mapper, podConditions) + return &customJobStatusReader{ + genericStatusReader: genericStatusReader, + } +} + +func (j *customPodStatusReader) Supports(gk schema.GroupKind) bool { + return gk == batchv1.SchemeGroupVersion.WithKind("Job").GroupKind() +} + +func (j *customPodStatusReader) ReadStatus(ctx context.Context, reader engine.ClusterReader, resource object.ObjMetadata) (*event.ResourceStatus, error) { + return j.genericStatusReader.ReadStatus(ctx, reader, resource) +} + +func (j *customPodStatusReader) ReadStatusForObject(ctx context.Context, reader engine.ClusterReader, resource *unstructured.Unstructured) (*event.ResourceStatus, error) { + return j.genericStatusReader.ReadStatusForObject(ctx, reader, resource) +} + +func podConditions(u *unstructured.Unstructured) (*status.Result, error) { + obj := u.UnstructuredContent() + phase := status.GetStringField(obj, ".status.phase", "") + switch phase { + case string(v1.PodSucceeded): + message := fmt.Sprintf("pod %s succeeded", u.GetName()) + return &status.Result{ + Status: status.CurrentStatus, + Message: message, + Conditions: []status.Condition{ + { + Type: status.ConditionStalled, + Status: corev1.ConditionTrue, + Message: message, + }, + }, + }, nil + case string(v1.PodFailed): + message := fmt.Sprintf("pod %s failed", u.GetName()) + return &status.Result{ + Status: status.FailedStatus, + Message: message, + Conditions: []status.Condition{ + { + Type: status.ConditionStalled, + Status: corev1.ConditionTrue, + Reason: "PodFailed", + Message: message, + }, + }, + }, nil + case string(v1.PodPending): + case string(v1.PodRunning): + } + + message := "Pod in progress" + return &status.Result{ + Status: status.InProgressStatus, + Message: message, + Conditions: []status.Condition{ + { + Type: status.ConditionReconciling, + Status: corev1.ConditionTrue, + Reason: "PodInProgress", + Message: message, + }, + }, + }, nil +} diff --git a/pkg/kube/pod_status_reader_test.go b/pkg/kube/pod_status_reader_test.go new file mode 100644 index 000000000..2604ef026 --- /dev/null +++ b/pkg/kube/pod_status_reader_test.go @@ -0,0 +1,66 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube + +// This file was copied and modified from https://github.com/fluxcd/kustomize-controller/blob/main/internal/statusreaders/job.go +import ( + "testing" + + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "sigs.k8s.io/cli-utils/pkg/kstatus/status" +) + +func TestPodConditions(t *testing.T) { + t.Parallel() + + //TODO add some more tests here and parallelize + + t.Run("pod without status returns in progress", func(t *testing.T) { + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod", + }, + Spec: v1.PodSpec{}, + Status: v1.PodStatus{}, + } + us, err := toUnstructured(pod) + assert.NoError(t, err) + result, err := podConditions(us) + assert.NoError(t, err) + assert.Equal(t, status.InProgressStatus, result.Status) + }) + + t.Run("pod succeeded returns Current status", func(t *testing.T) { + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod", + }, + Spec: v1.PodSpec{}, + Status: v1.PodStatus{ + Phase: v1.PodSucceeded, + }, + } + us, err := toUnstructured(pod) + assert.NoError(t, err) + result, err := podConditions(us) + assert.NoError(t, err) + assert.Equal(t, status.CurrentStatus, result.Status) + }) +} diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index 2e27917bc..16751abba 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -41,7 +41,7 @@ type statusWaiter struct { } func (w *statusWaiter) WatchUntilReady(resources ResourceList, timeout time.Duration) error { - panic("todo") + return nil } From d1cc9b39a33e335c56e68e1305d27bc036363406 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 9 Feb 2025 15:32:46 +0000 Subject: [PATCH 060/192] tests for status reader Signed-off-by: Austin Abro --- pkg/kube/job_status_reader_test.go | 93 ++++++++++++++++-------- pkg/kube/pod_status_reader.go | 8 +-- pkg/kube/pod_status_reader_test.go | 110 ++++++++++++++++++++--------- 3 files changed, 145 insertions(+), 66 deletions(-) diff --git a/pkg/kube/job_status_reader_test.go b/pkg/kube/job_status_reader_test.go index 98b994030..cd0dcedeb 100644 --- a/pkg/kube/job_status_reader_test.go +++ b/pkg/kube/job_status_reader_test.go @@ -30,7 +30,8 @@ import ( "sigs.k8s.io/cli-utils/pkg/kstatus/status" ) -func toUnstructured(obj runtime.Object) (*unstructured.Unstructured, error) { +func toUnstructured(t *testing.T, obj runtime.Object) (*unstructured.Unstructured, error) { + t.Helper() // If the incoming object is already unstructured, perform a deep copy first // otherwise DefaultUnstructuredConverter ends up returning the inner map without // making a copy. @@ -45,35 +46,69 @@ func toUnstructured(obj runtime.Object) (*unstructured.Unstructured, error) { } func TestJobConditions(t *testing.T) { - job := &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - Name: "job", + t.Parallel() + tests := []struct { + name string + job *batchv1.Job + expectedStatus status.Status + }{ + { + name: "job without Complete condition returns InProgress status", + job: &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: "job-no-condition", + }, + Spec: batchv1.JobSpec{}, + Status: batchv1.JobStatus{}, + }, + expectedStatus: status.InProgressStatus, }, - Spec: batchv1.JobSpec{}, - Status: batchv1.JobStatus{}, - } - - t.Run("job without Complete condition returns InProgress status", func(t *testing.T) { - us, err := toUnstructured(job) - assert.NoError(t, err) - result, err := jobConditions(us) - assert.NoError(t, err) - assert.Equal(t, status.InProgressStatus, result.Status) - }) - - t.Run("job with Complete condition as True returns Current status", func(t *testing.T) { - job.Status = batchv1.JobStatus{ - Conditions: []batchv1.JobCondition{ - { - Type: batchv1.JobComplete, - Status: corev1.ConditionTrue, + { + name: "job with Complete condition as True returns Current status", + job: &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: "job-complete", + }, + Spec: batchv1.JobSpec{}, + Status: batchv1.JobStatus{ + Conditions: []batchv1.JobCondition{ + { + Type: batchv1.JobComplete, + Status: corev1.ConditionTrue, + }, + }, + }, + }, + expectedStatus: status.CurrentStatus, + }, + { + name: "job with Failed condition as True returns Failed status", + job: &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: "job-failed", + }, + Spec: batchv1.JobSpec{}, + Status: batchv1.JobStatus{ + Conditions: []batchv1.JobCondition{ + { + Type: batchv1.JobFailed, + Status: corev1.ConditionTrue, + }, + }, }, }, - } - us, err := toUnstructured(job) - assert.NoError(t, err) - result, err := jobConditions(us) - assert.NoError(t, err) - assert.Equal(t, status.CurrentStatus, result.Status) - }) + expectedStatus: status.FailedStatus, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + us, err := toUnstructured(t, tc.job) + assert.NoError(t, err) + result, err := jobConditions(us) + assert.NoError(t, err) + assert.Equal(t, tc.expectedStatus, result.Status) + }) + } } diff --git a/pkg/kube/pod_status_reader.go b/pkg/kube/pod_status_reader.go index 25e427966..752f73ac1 100644 --- a/pkg/kube/pod_status_reader.go +++ b/pkg/kube/pod_status_reader.go @@ -62,8 +62,8 @@ func (j *customPodStatusReader) ReadStatusForObject(ctx context.Context, reader func podConditions(u *unstructured.Unstructured) (*status.Result, error) { obj := u.UnstructuredContent() phase := status.GetStringField(obj, ".status.phase", "") - switch phase { - case string(v1.PodSucceeded): + switch v1.PodPhase(phase) { + case v1.PodSucceeded: message := fmt.Sprintf("pod %s succeeded", u.GetName()) return &status.Result{ Status: status.CurrentStatus, @@ -76,7 +76,7 @@ func podConditions(u *unstructured.Unstructured) (*status.Result, error) { }, }, }, nil - case string(v1.PodFailed): + case v1.PodFailed: message := fmt.Sprintf("pod %s failed", u.GetName()) return &status.Result{ Status: status.FailedStatus, @@ -90,8 +90,6 @@ func podConditions(u *unstructured.Unstructured) (*status.Result, error) { }, }, }, nil - case string(v1.PodPending): - case string(v1.PodRunning): } message := "Pod in progress" diff --git a/pkg/kube/pod_status_reader_test.go b/pkg/kube/pod_status_reader_test.go index 2604ef026..bb08f041a 100644 --- a/pkg/kube/pod_status_reader_test.go +++ b/pkg/kube/pod_status_reader_test.go @@ -28,39 +28,85 @@ import ( ) func TestPodConditions(t *testing.T) { - t.Parallel() - - //TODO add some more tests here and parallelize - - t.Run("pod without status returns in progress", func(t *testing.T) { - pod := &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pod", + tests := []struct { + name string + pod *v1.Pod + expectedStatus status.Status + }{ + { + name: "pod without status returns in progress", + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "pod-no-status"}, + Spec: v1.PodSpec{}, + Status: v1.PodStatus{}, }, - Spec: v1.PodSpec{}, - Status: v1.PodStatus{}, - } - us, err := toUnstructured(pod) - assert.NoError(t, err) - result, err := podConditions(us) - assert.NoError(t, err) - assert.Equal(t, status.InProgressStatus, result.Status) - }) - - t.Run("pod succeeded returns Current status", func(t *testing.T) { - pod := &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pod", + expectedStatus: status.InProgressStatus, + }, + { + name: "pod succeeded returns current status", + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "pod-succeeded"}, + Spec: v1.PodSpec{}, + Status: v1.PodStatus{ + Phase: v1.PodSucceeded, + }, + }, + expectedStatus: status.CurrentStatus, + }, + { + name: "pod failed returns failed status", + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "pod-failed"}, + Spec: v1.PodSpec{}, + Status: v1.PodStatus{ + Phase: v1.PodFailed, + }, }, - Spec: v1.PodSpec{}, - Status: v1.PodStatus{ - Phase: v1.PodSucceeded, + expectedStatus: status.FailedStatus, + }, + { + name: "pod pending returns in progress status", + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "pod-pending"}, + Spec: v1.PodSpec{}, + Status: v1.PodStatus{ + Phase: v1.PodPending, + }, }, - } - us, err := toUnstructured(pod) - assert.NoError(t, err) - result, err := podConditions(us) - assert.NoError(t, err) - assert.Equal(t, status.CurrentStatus, result.Status) - }) + expectedStatus: status.InProgressStatus, + }, + { + name: "pod running returns in progress status", + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "pod-running"}, + Spec: v1.PodSpec{}, + Status: v1.PodStatus{ + Phase: v1.PodRunning, + }, + }, + expectedStatus: status.InProgressStatus, + }, + { + name: "pod with unknown phase returns in progress status", + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "pod-unknown"}, + Spec: v1.PodSpec{}, + Status: v1.PodStatus{ + Phase: v1.PodUnknown, + }, + }, + expectedStatus: status.InProgressStatus, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + us, err := toUnstructured(t, tc.pod) + assert.NoError(t, err) + result, err := podConditions(us) + assert.NoError(t, err) + assert.Equal(t, tc.expectedStatus, result.Status) + }) + } } From 14391dea5bf98c54ca0f9d87c82a5328f4bff063 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 10 Feb 2025 15:06:16 +0000 Subject: [PATCH 061/192] pods and jobs working Signed-off-by: Austin Abro --- pkg/kube/pod_status_reader.go | 12 ++- pkg/kube/statuswait.go | 75 +++++++++++------- pkg/kube/statuswait_test.go | 141 ++++++++++++++++++++++++++-------- 3 files changed, 159 insertions(+), 69 deletions(-) diff --git a/pkg/kube/pod_status_reader.go b/pkg/kube/pod_status_reader.go index 752f73ac1..c44af542e 100644 --- a/pkg/kube/pod_status_reader.go +++ b/pkg/kube/pod_status_reader.go @@ -22,9 +22,7 @@ import ( "context" "fmt" - batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" @@ -42,13 +40,13 @@ type customPodStatusReader struct { func NewCustomPodStatusReader(mapper meta.RESTMapper) engine.StatusReader { genericStatusReader := statusreaders.NewGenericStatusReader(mapper, podConditions) - return &customJobStatusReader{ + return &customPodStatusReader{ genericStatusReader: genericStatusReader, } } func (j *customPodStatusReader) Supports(gk schema.GroupKind) bool { - return gk == batchv1.SchemeGroupVersion.WithKind("Job").GroupKind() + return gk == corev1.SchemeGroupVersion.WithKind("Pod").GroupKind() } func (j *customPodStatusReader) ReadStatus(ctx context.Context, reader engine.ClusterReader, resource object.ObjMetadata) (*event.ResourceStatus, error) { @@ -62,8 +60,8 @@ func (j *customPodStatusReader) ReadStatusForObject(ctx context.Context, reader func podConditions(u *unstructured.Unstructured) (*status.Result, error) { obj := u.UnstructuredContent() phase := status.GetStringField(obj, ".status.phase", "") - switch v1.PodPhase(phase) { - case v1.PodSucceeded: + switch corev1.PodPhase(phase) { + case corev1.PodSucceeded: message := fmt.Sprintf("pod %s succeeded", u.GetName()) return &status.Result{ Status: status.CurrentStatus, @@ -76,7 +74,7 @@ func podConditions(u *unstructured.Unstructured) (*status.Result, error) { }, }, }, nil - case v1.PodFailed: + case corev1.PodFailed: message := fmt.Sprintf("pod %s failed", u.GetName()) return &status.Result{ Status: status.FailedStatus, diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index 16751abba..4aff42ff2 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -20,13 +20,16 @@ import ( "context" "errors" "fmt" + "sort" "time" appsv1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/client-go/dynamic" "sigs.k8s.io/cli-utils/pkg/kstatus/polling/aggregator" "sigs.k8s.io/cli-utils/pkg/kstatus/polling/collector" + "sigs.k8s.io/cli-utils/pkg/kstatus/polling/engine" "sigs.k8s.io/cli-utils/pkg/kstatus/polling/event" "sigs.k8s.io/cli-utils/pkg/kstatus/polling/statusreaders" "sigs.k8s.io/cli-utils/pkg/kstatus/status" @@ -40,9 +43,32 @@ type statusWaiter struct { log func(string, ...interface{}) } -func (w *statusWaiter) WatchUntilReady(resources ResourceList, timeout time.Duration) error { - - return nil +func alwaysReady(u *unstructured.Unstructured) (*status.Result, error) { + return &status.Result{ + Status: status.CurrentStatus, + Message: "Resource is current", + }, nil +} + +func (w *statusWaiter) WatchUntilReady(resourceList ResourceList, timeout time.Duration) error { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + w.log("waiting for %d pods and jobs to complete with a timeout of %s", len(resourceList), timeout) + sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) + jobSR := NewCustomJobStatusReader(w.restMapper) + podSR := NewCustomPodStatusReader(w.restMapper) + // We don't want to wait on any other resources as watchUntilReady is only for Helm hooks + genericSR := statusreaders.NewGenericStatusReader(w.restMapper, alwaysReady) + + sr := &statusreaders.DelegatingStatusReader{ + StatusReaders: []engine.StatusReader{ + jobSR, + podSR, + genericSR, + }, + } + sw.StatusReader = sr + return w.wait(ctx, resourceList, sw) } func (w *statusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error { @@ -85,8 +111,7 @@ func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceL } eventCh := sw.Watch(cancelCtx, resources, watcher.Options{}) statusCollector := collector.NewResourceStatusCollector(resources) - go logResourceStatus(ctx, resources, statusCollector, status.NotFoundStatus, w.log) - done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.NotFoundStatus)) + done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.NotFoundStatus, w.log)) <-done if statusCollector.Error != nil { @@ -129,8 +154,7 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, sw w eventCh := sw.Watch(cancelCtx, resources, watcher.Options{}) statusCollector := collector.NewResourceStatusCollector(resources) - go logResourceStatus(cancelCtx, resources, statusCollector, status.CurrentStatus, w.log) - done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.CurrentStatus)) + done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.CurrentStatus, w.log)) <-done if statusCollector.Error != nil { @@ -153,38 +177,33 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, sw w return nil } -func statusObserver(cancel context.CancelFunc, desired status.Status) collector.ObserverFunc { - return func(statusCollector *collector.ResourceStatusCollector, _ event.Event) { - rss := []*event.ResourceStatus{} +func statusObserver(cancel context.CancelFunc, desired status.Status, logFn func(string, ...interface{})) collector.ObserverFunc { + return func(statusCollector *collector.ResourceStatusCollector, e event.Event) { + var rss []*event.ResourceStatus + var nonDesiredResources []*event.ResourceStatus for _, rs := range statusCollector.ResourceStatuses { if rs == nil { continue } rss = append(rss, rs) + if rs.Status != desired { + nonDesiredResources = append(nonDesiredResources, rs) + } } + if aggregator.AggregateStatus(rss, desired) == desired { cancel() return } - } -} -func logResourceStatus(ctx context.Context, resources []object.ObjMetadata, sc *collector.ResourceStatusCollector, desiredStatus status.Status, log func(string, ...interface{})) { - ticker := time.NewTicker(1 * time.Second) - defer ticker.Stop() - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - for _, id := range resources { - rs := sc.ResourceStatuses[id] - if rs.Status != desiredStatus { - log("waiting for resource, name: %s, kind: %s, desired status: %s, actual status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, desiredStatus, rs.Status) - // only log one resource to not overwhelm the logs - break - } - } + if len(nonDesiredResources) > 0 { + // Log only the first resource so the user knows what they're waiting for without being overwhelmed + sort.Slice(nonDesiredResources, func(i, j int) bool { + return nonDesiredResources[i].Identifier.Name < nonDesiredResources[j].Identifier.Name + }) + first := nonDesiredResources[0] + logFn("waiting for resource: name: %s, kind: %s, desired status: %s, actual status: %s", + first.Identifier.Name, first.Identifier.GroupKind.Kind, desired, first.Status) } } } diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index 131224e8b..df16bf7e9 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -17,9 +17,7 @@ limitations under the License. package kube // import "helm.sh/helm/v3/pkg/kube" import ( - "context" "errors" - "fmt" "testing" "time" @@ -35,10 +33,6 @@ import ( "k8s.io/apimachinery/pkg/util/yaml" dynamicfake "k8s.io/client-go/dynamic/fake" "k8s.io/kubectl/pkg/scheme" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling/collector" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling/event" - "sigs.k8s.io/cli-utils/pkg/kstatus/status" - "sigs.k8s.io/cli-utils/pkg/object" "sigs.k8s.io/cli-utils/pkg/testutil" ) @@ -46,7 +40,7 @@ var podCurrentManifest = ` apiVersion: v1 kind: Pod metadata: - name: good-pod + name: current-pod namespace: ns status: conditions: @@ -100,11 +94,21 @@ status: status: "True" ` +var podCompleteManifest = ` +apiVersion: v1 +kind: Pod +metadata: + name: good-pod + namespace: ns +status: + phase: Succeeded +` + var pausedDeploymentManifest = ` apiVersion: apps/v1 kind: Deployment metadata: - name: nginx + name: paused namespace: ns-1 generation: 1 spec: @@ -125,6 +129,30 @@ spec: - containerPort: 80 ` +var notReadyDeploymentManifest = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: not-ready + namespace: ns-1 + generation: 1 +spec: + replicas: 1 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.19.6 + ports: + - containerPort: 80 +` + func getGVR(t *testing.T, mapper meta.RESTMapper, obj *unstructured.Unstructured) schema.GroupVersionResource { gvk := obj.GroupVersionKind() mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version) @@ -132,31 +160,6 @@ func getGVR(t *testing.T, mapper meta.RESTMapper, obj *unstructured.Unstructured return mapping.Resource } -func TestStatusLogger(t *testing.T) { - t.Parallel() - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*1500) - defer cancel() - readyPod := object.ObjMetadata{ - Name: "readyPod", - GroupKind: schema.GroupKind{Kind: "Pod"}, - } - notReadyPod := object.ObjMetadata{ - Name: "notReadyPod", - GroupKind: schema.GroupKind{Kind: "Pod"}, - } - objs := []object.ObjMetadata{readyPod, notReadyPod} - resourceStatusCollector := collector.NewResourceStatusCollector(objs) - resourceStatusCollector.ResourceStatuses[readyPod] = &event.ResourceStatus{ - Identifier: readyPod, - Status: status.CurrentStatus, - } - expectedMessage := "waiting for resource, name: notReadyPod, kind: Pod, desired status: Current, actual status: Unknown" - testLogger := func(message string, args ...interface{}) { - assert.Equal(t, expectedMessage, fmt.Sprintf(message, args...)) - } - logResourceStatus(ctx, objs, resourceStatusCollector, status.CurrentStatus, testLogger) -} - func TestStatusWaitForDelete(t *testing.T) { t.Parallel() tests := []struct { @@ -175,7 +178,7 @@ func TestStatusWaitForDelete(t *testing.T) { name: "error when not all objects are deleted", manifestsToCreate: []string{jobCompleteManifest, podCurrentManifest}, manifestsToDelete: []string{jobCompleteManifest}, - expectErrs: []error{errors.New("resource still exists, name: good-pod, kind: Pod, status: Current"), errors.New("context deadline exceeded")}, + expectErrs: []error{errors.New("resource still exists, name: current-pod, kind: Pod, status: Current"), errors.New("context deadline exceeded")}, }, } for _, tt := range tests { @@ -378,3 +381,73 @@ func TestWaitForJobComplete(t *testing.T) { }) } } + +func TestWatchForReady(t *testing.T) { + t.Parallel() + tests := []struct { + name string + objManifests []string + expectErrs []error + }{ + { + name: "succeeds if pod and job are complete", + objManifests: []string{jobCompleteManifest, podCompleteManifest}, + }, + { + name: "succeeds even when a resource that's not a pod or job is complete", + objManifests: []string{notReadyDeploymentManifest}, + }, + { + name: "Fails if job is not complete", + objManifests: []string{jobReadyManifest}, + expectErrs: []error{errors.New("resource not ready, name: ready-not-complete, kind: Job, status: InProgress"), errors.New("context deadline exceeded")}, + }, + { + name: "Fails if pod is not complete", + objManifests: []string{podCurrentManifest}, + expectErrs: []error{errors.New("resource not ready, name: current-pod, kind: Pod, status: InProgress"), errors.New("context deadline exceeded")}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + appsv1.SchemeGroupVersion.WithKind("Deployment"), + batchv1.SchemeGroupVersion.WithKind("Job"), + ) + statusWaiter := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + log: t.Logf, + } + objs := []runtime.Object{} + for _, podYaml := range tt.objManifests { + m := make(map[string]interface{}) + err := yaml.Unmarshal([]byte(podYaml), &m) + assert.NoError(t, err) + resource := &unstructured.Unstructured{Object: m} + objs = append(objs, resource) + gvr := getGVR(t, fakeMapper, resource) + err = fakeClient.Tracker().Create(gvr, resource, resource.GetNamespace()) + assert.NoError(t, err) + } + resourceList := ResourceList{} + for _, obj := range objs { + list, err := c.Build(objBody(obj), false) + assert.NoError(t, err) + resourceList = append(resourceList, list...) + } + + err := statusWaiter.WatchUntilReady(resourceList, time.Second*3) + if tt.expectErrs != nil { + assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + return + } + assert.NoError(t, err) + }) + } +} From f866409c508c4b5430f0943b95f25ffbfd931c3b Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 10 Feb 2025 15:13:18 +0000 Subject: [PATCH 062/192] move statusreaders to it's own package Signed-off-by: Austin Abro --- {pkg/kube => internal/statusreaders}/job_status_reader.go | 2 +- .../statusreaders}/job_status_reader_test.go | 2 +- {pkg/kube => internal/statusreaders}/pod_status_reader.go | 4 +--- .../statusreaders}/pod_status_reader_test.go | 3 +-- pkg/kube/statuswait.go | 7 ++++--- 5 files changed, 8 insertions(+), 10 deletions(-) rename {pkg/kube => internal/statusreaders}/job_status_reader.go (99%) rename {pkg/kube => internal/statusreaders}/job_status_reader_test.go (99%) rename {pkg/kube => internal/statusreaders}/pod_status_reader.go (95%) rename {pkg/kube => internal/statusreaders}/pod_status_reader_test.go (95%) diff --git a/pkg/kube/job_status_reader.go b/internal/statusreaders/job_status_reader.go similarity index 99% rename from pkg/kube/job_status_reader.go rename to internal/statusreaders/job_status_reader.go index f6eb8d3d9..d493d9e13 100644 --- a/pkg/kube/job_status_reader.go +++ b/internal/statusreaders/job_status_reader.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kube +package statusreaders // This file was copied and modified from https://github.com/fluxcd/kustomize-controller/blob/main/internal/statusreaders/job.go diff --git a/pkg/kube/job_status_reader_test.go b/internal/statusreaders/job_status_reader_test.go similarity index 99% rename from pkg/kube/job_status_reader_test.go rename to internal/statusreaders/job_status_reader_test.go index cd0dcedeb..70e4ee29a 100644 --- a/pkg/kube/job_status_reader_test.go +++ b/internal/statusreaders/job_status_reader_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kube +package statusreaders // This file was copied and modified from https://github.com/fluxcd/kustomize-controller/blob/main/internal/statusreaders/job.go import ( diff --git a/pkg/kube/pod_status_reader.go b/internal/statusreaders/pod_status_reader.go similarity index 95% rename from pkg/kube/pod_status_reader.go rename to internal/statusreaders/pod_status_reader.go index c44af542e..d3daf7cc3 100644 --- a/pkg/kube/pod_status_reader.go +++ b/internal/statusreaders/pod_status_reader.go @@ -14,9 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kube - -// This file was copied and modified from https://github.com/fluxcd/kustomize-controller/blob/main/internal/statusreaders/job.go +package statusreaders import ( "context" diff --git a/pkg/kube/pod_status_reader_test.go b/internal/statusreaders/pod_status_reader_test.go similarity index 95% rename from pkg/kube/pod_status_reader_test.go rename to internal/statusreaders/pod_status_reader_test.go index bb08f041a..a151f1aed 100644 --- a/pkg/kube/pod_status_reader_test.go +++ b/internal/statusreaders/pod_status_reader_test.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kube +package statusreaders -// This file was copied and modified from https://github.com/fluxcd/kustomize-controller/blob/main/internal/statusreaders/job.go import ( "testing" diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index 4aff42ff2..eaa473cd4 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -23,6 +23,7 @@ import ( "sort" "time" + helmStatusReaders "helm.sh/helm/v4/internal/statusreaders" appsv1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -55,8 +56,8 @@ func (w *statusWaiter) WatchUntilReady(resourceList ResourceList, timeout time.D defer cancel() w.log("waiting for %d pods and jobs to complete with a timeout of %s", len(resourceList), timeout) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) - jobSR := NewCustomJobStatusReader(w.restMapper) - podSR := NewCustomPodStatusReader(w.restMapper) + jobSR := helmStatusReaders.NewCustomJobStatusReader(w.restMapper) + podSR := helmStatusReaders.NewCustomPodStatusReader(w.restMapper) // We don't want to wait on any other resources as watchUntilReady is only for Helm hooks genericSR := statusreaders.NewGenericStatusReader(w.restMapper, alwaysReady) @@ -84,7 +85,7 @@ func (w *statusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Dura defer cancel() w.log("beginning wait for %d resources with timeout of %s", len(resourceList), timeout) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) - newCustomJobStatusReader := NewCustomJobStatusReader(w.restMapper) + newCustomJobStatusReader := helmStatusReaders.NewCustomJobStatusReader(w.restMapper) customSR := statusreaders.NewStatusReader(w.restMapper, newCustomJobStatusReader) sw.StatusReader = customSR return w.wait(ctx, resourceList, sw) From 7207565e1284e2b597ffad5179d67487ab9478c1 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 10 Feb 2025 15:31:43 +0000 Subject: [PATCH 063/192] lint Signed-off-by: Austin Abro --- pkg/kube/statuswait.go | 7 +++--- pkg/kube/wait.go | 50 +++++++++++++++++++++--------------------- 2 files changed, 29 insertions(+), 28 deletions(-) diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index eaa473cd4..0729d0d1b 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -23,7 +23,6 @@ import ( "sort" "time" - helmStatusReaders "helm.sh/helm/v4/internal/statusreaders" appsv1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -36,6 +35,8 @@ import ( "sigs.k8s.io/cli-utils/pkg/kstatus/status" "sigs.k8s.io/cli-utils/pkg/kstatus/watcher" "sigs.k8s.io/cli-utils/pkg/object" + + helmStatusReaders "helm.sh/helm/v4/internal/statusreaders" ) type statusWaiter struct { @@ -44,7 +45,7 @@ type statusWaiter struct { log func(string, ...interface{}) } -func alwaysReady(u *unstructured.Unstructured) (*status.Result, error) { +func alwaysReady(_ *unstructured.Unstructured) (*status.Result, error) { return &status.Result{ Status: status.CurrentStatus, Message: "Resource is current", @@ -179,7 +180,7 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, sw w } func statusObserver(cancel context.CancelFunc, desired status.Status, logFn func(string, ...interface{})) collector.ObserverFunc { - return func(statusCollector *collector.ResourceStatusCollector, e event.Event) { + return func(statusCollector *collector.ResourceStatusCollector, _ event.Event) { var rss []*event.ResourceStatus var nonDesiredResources []*event.ResourceStatus for _, rs := range statusCollector.ResourceStatuses { diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go index fdb3c9087..83b352201 100644 --- a/pkg/kube/wait.go +++ b/pkg/kube/wait.go @@ -27,6 +27,7 @@ import ( appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" appsv1beta2 "k8s.io/api/apps/v1beta2" + batch "k8s.io/api/batch/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" @@ -42,7 +43,6 @@ import ( "k8s.io/client-go/kubernetes" cachetools "k8s.io/client-go/tools/cache" watchtools "k8s.io/client-go/tools/watch" - batch "k8s.io/api/batch/v1" "k8s.io/apimachinery/pkg/util/wait" ) @@ -55,20 +55,20 @@ type HelmWaiter struct { kubeClient *kubernetes.Clientset } -func (w *HelmWaiter) Wait(resources ResourceList, timeout time.Duration) error { - w.c = NewReadyChecker(w.kubeClient, w.log, PausedAsReady(true)) - return w.waitForResources(resources, timeout) +func (hw *HelmWaiter) Wait(resources ResourceList, timeout time.Duration) error { + hw.c = NewReadyChecker(hw.kubeClient, hw.log, PausedAsReady(true)) + return hw.waitForResources(resources, timeout) } -func (w *HelmWaiter) WaitWithJobs(resources ResourceList, timeout time.Duration) error { - w.c = NewReadyChecker(w.kubeClient, w.log, PausedAsReady(true), CheckJobs(true)) - return w.waitForResources(resources, timeout) +func (hw *HelmWaiter) WaitWithJobs(resources ResourceList, timeout time.Duration) error { + hw.c = NewReadyChecker(hw.kubeClient, hw.log, PausedAsReady(true), CheckJobs(true)) + return hw.waitForResources(resources, timeout) } // waitForResources polls to get the current status of all pods, PVCs, Services and // Jobs(optional) until all are ready or a timeout is reached -func (w *HelmWaiter) waitForResources(created ResourceList, timeout time.Duration) error { - w.log("beginning wait for %d resources with timeout of %v", len(created), timeout) +func (hw *HelmWaiter) waitForResources(created ResourceList, timeout time.Duration) error { + hw.log("beginning wait for %d resources with timeout of %v", len(created), timeout) ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() @@ -81,15 +81,15 @@ func (w *HelmWaiter) waitForResources(created ResourceList, timeout time.Duratio return wait.PollUntilContextCancel(ctx, 2*time.Second, true, func(ctx context.Context) (bool, error) { waitRetries := 30 for i, v := range created { - ready, err := w.c.IsReady(ctx, v) + ready, err := hw.c.IsReady(ctx, v) - if waitRetries > 0 && w.isRetryableError(err, v) { + if waitRetries > 0 && hw.isRetryableError(err, v) { numberOfErrors[i]++ if numberOfErrors[i] > waitRetries { - w.log("Max number of retries reached") + hw.log("Max number of retries reached") return false, err } - w.log("Retrying as current number of retries %d less than max number of retries %d", numberOfErrors[i]-1, waitRetries) + hw.log("Retrying as current number of retries %d less than max number of retries %d", numberOfErrors[i]-1, waitRetries) return false, nil } numberOfErrors[i] = 0 @@ -101,28 +101,28 @@ func (w *HelmWaiter) waitForResources(created ResourceList, timeout time.Duratio }) } -func (w *HelmWaiter) isRetryableError(err error, resource *resource.Info) bool { +func (hw *HelmWaiter) isRetryableError(err error, resource *resource.Info) bool { if err == nil { return false } - w.log("Error received when checking status of resource %s. Error: '%s', Resource details: '%s'", resource.Name, err, resource) + hw.log("Error received when checking status of resource %s. Error: '%s', Resource details: '%s'", resource.Name, err, resource) if ev, ok := err.(*apierrors.StatusError); ok { statusCode := ev.Status().Code - retryable := w.isRetryableHTTPStatusCode(statusCode) - w.log("Status code received: %d. Retryable error? %t", statusCode, retryable) + retryable := hw.isRetryableHTTPStatusCode(statusCode) + hw.log("Status code received: %d. Retryable error? %t", statusCode, retryable) return retryable } - w.log("Retryable error? %t", true) + hw.log("Retryable error? %t", true) return true } -func (w *HelmWaiter) isRetryableHTTPStatusCode(httpStatusCode int32) bool { +func (hw *HelmWaiter) isRetryableHTTPStatusCode(httpStatusCode int32) bool { return httpStatusCode == 0 || httpStatusCode == http.StatusTooManyRequests || (httpStatusCode >= 500 && httpStatusCode != http.StatusNotImplemented) } // waitForDeletedResources polls to check if all the resources are deleted or a timeout is reached -func (w *HelmWaiter) WaitForDelete(deleted ResourceList, timeout time.Duration) error { - w.log("beginning wait for %d resources to be deleted with timeout of %v", len(deleted), timeout) +func (hw *HelmWaiter) WaitForDelete(deleted ResourceList, timeout time.Duration) error { + hw.log("beginning wait for %d resources to be deleted with timeout of %v", len(deleted), timeout) ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() @@ -314,7 +314,7 @@ func (hw *HelmWaiter) waitForJob(obj runtime.Object, name string) (bool, error) // waitForPodSuccess is a helper that waits for a pod to complete. // // This operates on an event returned from a watcher. -func (c *HelmWaiter) waitForPodSuccess(obj runtime.Object, name string) (bool, error) { +func (hw *HelmWaiter) waitForPodSuccess(obj runtime.Object, name string) (bool, error) { o, ok := obj.(*v1.Pod) if !ok { return true, errors.Errorf("expected %s to be a *v1.Pod, got %T", name, obj) @@ -322,14 +322,14 @@ func (c *HelmWaiter) waitForPodSuccess(obj runtime.Object, name string) (bool, e switch o.Status.Phase { case v1.PodSucceeded: - c.log("Pod %s succeeded", o.Name) + hw.log("Pod %s succeeded", o.Name) return true, nil case v1.PodFailed: return true, errors.Errorf("pod %s failed", o.Name) case v1.PodPending: - c.log("Pod %s pending", o.Name) + hw.log("Pod %s pending", o.Name) case v1.PodRunning: - c.log("Pod %s running", o.Name) + hw.log("Pod %s running", o.Name) } return false, nil From bd3b5ee5d05391a63ced7c32cba05caa62c8d968 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 10 Feb 2025 15:51:14 +0000 Subject: [PATCH 064/192] comment Signed-off-by: Austin Abro --- pkg/kube/interface.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/kube/interface.go b/pkg/kube/interface.go index 0e6da1094..7af8ebca6 100644 --- a/pkg/kube/interface.go +++ b/pkg/kube/interface.go @@ -70,7 +70,6 @@ type Waiter interface { // For Pods, "ready" means the Pod phase is marked "succeeded". // For all other kinds, it means the kind was created or modified without // error. - // TODO: Is watch until ready really behavior we want over the resources actually being ready? WatchUntilReady(resources ResourceList, timeout time.Duration) error } From 2b03c527f19f47039116143417d0e58422b3e789 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 16 Feb 2025 20:38:28 +0000 Subject: [PATCH 065/192] set command line flags Signed-off-by: Austin Abro --- cmd/helm/flags.go | 44 ++++++++++++++++++++++++++++++++++++++ cmd/helm/install.go | 4 ++-- cmd/helm/rollback.go | 2 +- cmd/helm/upgrade.go | 2 +- pkg/action/action.go | 2 +- pkg/action/install.go | 14 +++++++++--- pkg/action/install_test.go | 7 +++--- pkg/action/rollback.go | 9 ++++++-- pkg/action/upgrade.go | 20 ++++++++++++----- pkg/action/upgrade_test.go | 11 +++++----- pkg/kube/client.go | 8 +++---- pkg/kube/client_test.go | 4 ++-- 12 files changed, 98 insertions(+), 29 deletions(-) diff --git a/cmd/helm/flags.go b/cmd/helm/flags.go index 3d159babd..c2e5e295d 100644 --- a/cmd/helm/flags.go +++ b/cmd/helm/flags.go @@ -32,6 +32,7 @@ import ( "helm.sh/helm/v4/pkg/cli/output" "helm.sh/helm/v4/pkg/cli/values" "helm.sh/helm/v4/pkg/helmpath" + "helm.sh/helm/v4/pkg/kube" "helm.sh/helm/v4/pkg/postrender" "helm.sh/helm/v4/pkg/repo" ) @@ -51,6 +52,49 @@ func addValueOptionsFlags(f *pflag.FlagSet, v *values.Options) { f.StringArrayVar(&v.LiteralValues, "set-literal", []string{}, "set a literal STRING value on the command line") } +func AddWaitFlag(cmd *cobra.Command, wait *kube.WaitStrategy) { + cmd.Flags().Var( + newWaitValue(wait), + "wait", + "if set, will wait until all resources are in the expected state before marking the operation as successful. It will wait for as long as --timeout. Options are (true, false, watcher, and legacy)", + ) + // Sets the strategy to use the watcher strategy if `--wait` is used without an argument + cmd.Flags().Lookup("wait").NoOptDefVal = string(kube.StatusWatcherStrategy) +} + +type waitValue kube.WaitStrategy + +func newWaitValue(ws *kube.WaitStrategy) *waitValue { + return (*waitValue)(ws) +} + +func (ws *waitValue) String() string { + if ws == nil { + return "" + } + return string(*ws) +} + +func (ws *waitValue) Set(s string) error { + switch s { + case string(kube.StatusWatcherStrategy), string(kube.LegacyWaiterStrategy): + *ws = waitValue(s) + return nil + case "true": + *ws = waitValue(kube.StatusWatcherStrategy) + return nil + case "false": + *ws = "" + return nil + default: + return fmt.Errorf("invalid wait input %q. Valid inputs are true, false, %s, and %s", s, kube.StatusWatcherStrategy, kube.LegacyWaiterStrategy) + } +} + +func (ws *waitValue) Type() string { + return "WaitStrategy" +} + func addChartPathOptionsFlags(f *pflag.FlagSet, c *action.ChartPathOptions) { f.StringVar(&c.Version, "version", "", "specify a version constraint for the chart version to use. This constraint can be a specific tag (e.g. 1.1.1) or it may reference a valid range (e.g. ^2.0.0). If this is not specified, the latest version is used") f.BoolVar(&c.Verify, "verify", false, "verify the package before using it") diff --git a/cmd/helm/install.go b/cmd/helm/install.go index ec651140c..16545b6ae 100644 --- a/cmd/helm/install.go +++ b/cmd/helm/install.go @@ -190,8 +190,7 @@ func addInstallFlags(cmd *cobra.Command, f *pflag.FlagSet, client *action.Instal f.BoolVar(&client.Force, "force", false, "force resource updates through a replacement strategy") f.BoolVar(&client.DisableHooks, "no-hooks", false, "prevent hooks from running during install") f.BoolVar(&client.Replace, "replace", false, "reuse the given name, only if that name is a deleted release which remains in the history. This is unsafe in production") - f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)") - f.BoolVar(&client.Wait, "wait", false, "if set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment, StatefulSet, or ReplicaSet are in a ready state before marking the release as successful. It will wait for as long as --timeout") + f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)") f.BoolVar(&client.WaitForJobs, "wait-for-jobs", false, "if set and --wait enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as --timeout") f.BoolVarP(&client.GenerateName, "generate-name", "g", false, "generate the name (and omit the NAME parameter)") f.StringVar(&client.NameTemplate, "name-template", "", "specify template used to name the release") @@ -209,6 +208,7 @@ func addInstallFlags(cmd *cobra.Command, f *pflag.FlagSet, client *action.Instal f.BoolVar(&client.TakeOwnership, "take-ownership", false, "if set, install will ignore the check for helm annotations and take ownership of the existing resources") addValueOptionsFlags(f, valueOpts) addChartPathOptionsFlags(f, &client.ChartPathOptions) + AddWaitFlag(cmd, &client.Wait) err := cmd.RegisterFlagCompletionFunc("version", func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { requiredArgs := 2 diff --git a/cmd/helm/rollback.go b/cmd/helm/rollback.go index a65f30a1f..83d3089e2 100644 --- a/cmd/helm/rollback.go +++ b/cmd/helm/rollback.go @@ -81,10 +81,10 @@ func newRollbackCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { f.BoolVar(&client.Force, "force", false, "force resource update through delete/recreate if needed") f.BoolVar(&client.DisableHooks, "no-hooks", false, "prevent hooks from running during rollback") f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)") - f.BoolVar(&client.Wait, "wait", false, "if set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment, StatefulSet, or ReplicaSet are in a ready state before marking the release as successful. It will wait for as long as --timeout") f.BoolVar(&client.WaitForJobs, "wait-for-jobs", false, "if set and --wait enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as --timeout") f.BoolVar(&client.CleanupOnFail, "cleanup-on-fail", false, "allow deletion of new resources created in this rollback when rollback fails") f.IntVar(&client.MaxHistory, "history-max", settings.MaxHistory, "limit the maximum number of revisions saved per release. Use 0 for no limit") + AddWaitFlag(cmd, &client.Wait) return cmd } diff --git a/cmd/helm/upgrade.go b/cmd/helm/upgrade.go index 7b4267894..e5e485eae 100644 --- a/cmd/helm/upgrade.go +++ b/cmd/helm/upgrade.go @@ -278,7 +278,6 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { f.BoolVar(&client.ResetValues, "reset-values", false, "when upgrading, reset the values to the ones built into the chart") f.BoolVar(&client.ReuseValues, "reuse-values", false, "when upgrading, reuse the last release's values and merge in any overrides from the command line via --set and -f. If '--reset-values' is specified, this is ignored") f.BoolVar(&client.ResetThenReuseValues, "reset-then-reuse-values", false, "when upgrading, reset the values to the ones built into the chart, apply the last release's values and merge in any overrides from the command line via --set and -f. If '--reset-values' or '--reuse-values' is specified, this is ignored") - f.BoolVar(&client.Wait, "wait", false, "if set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment, StatefulSet, or ReplicaSet are in a ready state before marking the release as successful. It will wait for as long as --timeout") f.BoolVar(&client.WaitForJobs, "wait-for-jobs", false, "if set and --wait enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as --timeout") f.BoolVar(&client.Atomic, "atomic", false, "if set, upgrade process rolls back changes made in case of failed upgrade. The --wait flag will be set automatically if --atomic is used") f.IntVar(&client.MaxHistory, "history-max", settings.MaxHistory, "limit the maximum number of revisions saved per release. Use 0 for no limit") @@ -295,6 +294,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { addValueOptionsFlags(f, valueOpts) bindOutputFlag(cmd, &outfmt) bindPostRenderFlag(cmd, &client.PostRenderer) + AddWaitFlag(cmd, &client.Wait) err := cmd.RegisterFlagCompletionFunc("version", func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) != 2 { diff --git a/pkg/action/action.go b/pkg/action/action.go index 0157ce1cc..a2d7523a5 100644 --- a/pkg/action/action.go +++ b/pkg/action/action.go @@ -371,7 +371,7 @@ func (cfg *Configuration) recordRelease(r *release.Release) { // Init initializes the action configuration func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string, log DebugLog) error { - kc, err := kube.New(getter, kube.StatusWaiterStrategy) + kc, err := kube.New(getter, kube.StatusWatcherStrategy) if err != nil { return err } diff --git a/pkg/action/install.go b/pkg/action/install.go index ef3f0fdc7..61b5ebd33 100644 --- a/pkg/action/install.go +++ b/pkg/action/install.go @@ -79,7 +79,7 @@ type Install struct { HideSecret bool DisableHooks bool Replace bool - Wait bool + Wait kube.WaitStrategy WaitForJobs bool Devel bool DependencyUpdate bool @@ -157,6 +157,10 @@ func (i *Install) GetRegistryClient() *registry.Client { return i.ChartPathOptions.registryClient } +func (i *Install) shouldWait() bool { + return i.Wait != "" +} + func (i *Install) installCRDs(crds []chart.CRD) error { // We do these one file at a time in the order they were read. totalItems := []*resource.Info{} @@ -289,7 +293,11 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma // Make sure if Atomic is set, that wait is set as well. This makes it so // the user doesn't have to specify both - i.Wait = i.Wait || i.Atomic + if !i.shouldWait() { + if i.Atomic { + i.Wait = "watcher" + } + } caps, err := i.cfg.getCapabilities() if err != nil { @@ -465,7 +473,7 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource return rel, err } - if i.Wait { + if i.shouldWait() { if i.WaitForJobs { err = i.cfg.KubeClient.WaitWithJobs(resources, i.Timeout) } else { diff --git a/pkg/action/install_test.go b/pkg/action/install_test.go index 9f738f0bc..6377cfda5 100644 --- a/pkg/action/install_test.go +++ b/pkg/action/install_test.go @@ -34,6 +34,7 @@ import ( "helm.sh/helm/v4/internal/test" "helm.sh/helm/v4/pkg/chart" "helm.sh/helm/v4/pkg/chartutil" + "helm.sh/helm/v4/pkg/kube" kubefake "helm.sh/helm/v4/pkg/kube/fake" "helm.sh/helm/v4/pkg/release" "helm.sh/helm/v4/pkg/storage/driver" @@ -407,7 +408,7 @@ func TestInstallRelease_Wait(t *testing.T) { failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.WaitError = fmt.Errorf("I timed out") instAction.cfg.KubeClient = failer - instAction.Wait = true + instAction.Wait = kube.StatusWatcherStrategy vals := map[string]interface{}{} goroutines := runtime.NumGoroutine() @@ -426,7 +427,7 @@ func TestInstallRelease_Wait_Interrupted(t *testing.T) { failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.WaitDuration = 10 * time.Second instAction.cfg.KubeClient = failer - instAction.Wait = true + instAction.Wait = kube.StatusWatcherStrategy vals := map[string]interface{}{} ctx, cancel := context.WithCancel(context.Background()) @@ -449,7 +450,7 @@ func TestInstallRelease_WaitForJobs(t *testing.T) { failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.WaitError = fmt.Errorf("I timed out") instAction.cfg.KubeClient = failer - instAction.Wait = true + instAction.Wait = kube.StatusWatcherStrategy instAction.WaitForJobs = true vals := map[string]interface{}{} diff --git a/pkg/action/rollback.go b/pkg/action/rollback.go index 12dee35ce..8ec134832 100644 --- a/pkg/action/rollback.go +++ b/pkg/action/rollback.go @@ -25,6 +25,7 @@ import ( "github.com/pkg/errors" "helm.sh/helm/v4/pkg/chartutil" + "helm.sh/helm/v4/pkg/kube" "helm.sh/helm/v4/pkg/release" helmtime "helm.sh/helm/v4/pkg/time" ) @@ -37,7 +38,7 @@ type Rollback struct { Version int Timeout time.Duration - Wait bool + Wait kube.WaitStrategy WaitForJobs bool DisableHooks bool DryRun bool @@ -89,6 +90,10 @@ func (r *Rollback) Run(name string) error { return nil } +func (r *Rollback) shouldWait() bool { + return !(r.Wait == "") +} + // prepareRollback finds the previous release and prepares a new release object with // the previous release's configuration func (r *Rollback) prepareRollback(name string) (*release.Release, *release.Release, error) { @@ -223,7 +228,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas } } - if r.Wait { + if r.shouldWait() { if r.WaitForJobs { if err := r.cfg.KubeClient.WaitWithJobs(target, r.Timeout); err != nil { targetRelease.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", targetRelease.Name, err.Error())) diff --git a/pkg/action/upgrade.go b/pkg/action/upgrade.go index f3e9a33bc..8d103ab6b 100644 --- a/pkg/action/upgrade.go +++ b/pkg/action/upgrade.go @@ -64,8 +64,8 @@ type Upgrade struct { SkipCRDs bool // Timeout is the timeout for this operation Timeout time.Duration - // Wait determines whether the wait operation should be performed after the upgrade is requested. - Wait bool + // Wait determines whether the wait operation should be performed and what type of wait. + Wait kube.WaitStrategy // WaitForJobs determines whether the wait operation for the Jobs should be performed after the upgrade is requested. WaitForJobs bool // DisableHooks disables hook processing if set to true. @@ -155,7 +155,11 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart. // Make sure if Atomic is set, that wait is set as well. This makes it so // the user doesn't have to specify both - u.Wait = u.Wait || u.Atomic + if !u.shouldWait() { + if u.Atomic { + u.Wait = kube.StatusWatcherStrategy + } + } if err := chartutil.ValidateReleaseName(name); err != nil { return nil, errors.Errorf("release name is invalid: %s", name) @@ -186,6 +190,10 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart. return res, nil } +func (u *Upgrade) shouldWait() bool { + return u.Wait != "" +} + // isDryRun returns true if Upgrade is set to run as a DryRun func (u *Upgrade) isDryRun() bool { if u.DryRun || u.DryRunOption == "client" || u.DryRunOption == "server" || u.DryRunOption == "true" { @@ -443,7 +451,7 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele } } - if u.Wait { + if u.shouldWait() { u.cfg.Log( "waiting for release %s resources (created: %d updated: %d deleted: %d)", upgradedRelease.Name, len(results.Created), len(results.Updated), len(results.Deleted)) @@ -526,7 +534,9 @@ func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, e rollin := NewRollback(u.cfg) rollin.Version = filteredHistory[0].Version - rollin.Wait = true + if !u.shouldWait() { + rollin.Wait = kube.StatusWatcherStrategy + } rollin.WaitForJobs = u.WaitForJobs rollin.DisableHooks = u.DisableHooks rollin.Recreate = u.Recreate diff --git a/pkg/action/upgrade_test.go b/pkg/action/upgrade_test.go index 5437490cb..93c54560a 100644 --- a/pkg/action/upgrade_test.go +++ b/pkg/action/upgrade_test.go @@ -24,6 +24,7 @@ import ( "time" "helm.sh/helm/v4/pkg/chart" + "helm.sh/helm/v4/pkg/kube" "helm.sh/helm/v4/pkg/storage/driver" "github.com/stretchr/testify/assert" @@ -52,7 +53,7 @@ func TestUpgradeRelease_Success(t *testing.T) { rel.Info.Status = release.StatusDeployed req.NoError(upAction.cfg.Releases.Create(rel)) - upAction.Wait = true + upAction.Wait = kube.StatusWatcherStrategy vals := map[string]interface{}{} ctx, done := context.WithCancel(context.Background()) @@ -82,7 +83,7 @@ func TestUpgradeRelease_Wait(t *testing.T) { failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.WaitError = fmt.Errorf("I timed out") upAction.cfg.KubeClient = failer - upAction.Wait = true + upAction.Wait = kube.StatusWatcherStrategy vals := map[string]interface{}{} res, err := upAction.Run(rel.Name, buildChart(), vals) @@ -104,7 +105,7 @@ func TestUpgradeRelease_WaitForJobs(t *testing.T) { failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.WaitError = fmt.Errorf("I timed out") upAction.cfg.KubeClient = failer - upAction.Wait = true + upAction.Wait = kube.StatusWatcherStrategy upAction.WaitForJobs = true vals := map[string]interface{}{} @@ -128,7 +129,7 @@ func TestUpgradeRelease_CleanupOnFail(t *testing.T) { failer.WaitError = fmt.Errorf("I timed out") failer.DeleteError = fmt.Errorf("I tried to delete nil") upAction.cfg.KubeClient = failer - upAction.Wait = true + upAction.Wait = kube.StatusWatcherStrategy upAction.CleanupOnFail = true vals := map[string]interface{}{} @@ -395,7 +396,7 @@ func TestUpgradeRelease_Interrupted_Wait(t *testing.T) { failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.WaitDuration = 10 * time.Second upAction.cfg.KubeClient = failer - upAction.Wait = true + upAction.Wait = kube.StatusWatcherStrategy vals := map[string]interface{}{} ctx := context.Background() diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 8dca1c51b..ba7794ac4 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -80,11 +80,11 @@ type Client struct { Waiter } -type WaitStrategy int +type WaitStrategy string const ( - StatusWaiterStrategy WaitStrategy = iota - LegacyWaiterStrategy + StatusWatcherStrategy WaitStrategy = "watcher" + LegacyWaiterStrategy WaitStrategy = "legacy" ) func init() { @@ -106,7 +106,7 @@ func (c *Client) newWaiter(strategy WaitStrategy) (Waiter, error) { return nil, err } return &HelmWaiter{kubeClient: kc, log: c.Log}, nil - case StatusWaiterStrategy: + case StatusWatcherStrategy: cfg, err := c.Factory.ToRESTConfig() if err != nil { return nil, err diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index cdf75938e..4c8719f98 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -659,7 +659,7 @@ func TestWaitDelete(t *testing.T) { func TestReal(t *testing.T) { t.Skip("This is a live test, comment this line to run") - c, err := New(nil, StatusWaiterStrategy) + c, err := New(nil, StatusWatcherStrategy) if err != nil { t.Fatal(err) } @@ -672,7 +672,7 @@ func TestReal(t *testing.T) { } testSvcEndpointManifest := testServiceManifest + "\n---\n" + testEndpointManifest - c, err = New(nil, StatusWaiterStrategy) + c, err = New(nil, StatusWatcherStrategy) if err != nil { t.Fatal(err) } From f2dd2c91093eeecff6747f9c85a9757ccb6d4b80 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 16 Feb 2025 21:10:06 +0000 Subject: [PATCH 066/192] add hook only waiter Signed-off-by: Austin Abro --- cmd/helm/flags.go | 9 ++++--- cmd/helm/install.go | 2 +- cmd/helm/uninstall.go | 2 +- cmd/helm/upgrade.go | 2 +- pkg/action/install.go | 20 +++++++------- pkg/action/rollback.go | 28 +++++++++----------- pkg/action/uninstall.go | 8 +++--- pkg/action/uninstall_test.go | 5 ++-- pkg/action/upgrade.go | 35 +++++++++---------------- pkg/kube/client.go | 51 ++++++++++++++++++++++-------------- pkg/kube/client_test.go | 6 ++--- pkg/kube/statuswait.go | 20 ++++++++++++++ 12 files changed, 103 insertions(+), 85 deletions(-) diff --git a/cmd/helm/flags.go b/cmd/helm/flags.go index c2e5e295d..d1f0fec58 100644 --- a/cmd/helm/flags.go +++ b/cmd/helm/flags.go @@ -54,7 +54,7 @@ func addValueOptionsFlags(f *pflag.FlagSet, v *values.Options) { func AddWaitFlag(cmd *cobra.Command, wait *kube.WaitStrategy) { cmd.Flags().Var( - newWaitValue(wait), + newWaitValue(kube.HookOnlyStrategy, wait), "wait", "if set, will wait until all resources are in the expected state before marking the operation as successful. It will wait for as long as --timeout. Options are (true, false, watcher, and legacy)", ) @@ -64,7 +64,8 @@ func AddWaitFlag(cmd *cobra.Command, wait *kube.WaitStrategy) { type waitValue kube.WaitStrategy -func newWaitValue(ws *kube.WaitStrategy) *waitValue { +func newWaitValue(defaultValue kube.WaitStrategy, ws *kube.WaitStrategy) *waitValue { + *ws = defaultValue return (*waitValue)(ws) } @@ -77,7 +78,7 @@ func (ws *waitValue) String() string { func (ws *waitValue) Set(s string) error { switch s { - case string(kube.StatusWatcherStrategy), string(kube.LegacyWaiterStrategy): + case string(kube.StatusWatcherStrategy), string(kube.LegacyStrategy): *ws = waitValue(s) return nil case "true": @@ -87,7 +88,7 @@ func (ws *waitValue) Set(s string) error { *ws = "" return nil default: - return fmt.Errorf("invalid wait input %q. Valid inputs are true, false, %s, and %s", s, kube.StatusWatcherStrategy, kube.LegacyWaiterStrategy) + return fmt.Errorf("invalid wait input %q. Valid inputs are true, false, %s, and %s", s, kube.StatusWatcherStrategy, kube.LegacyStrategy) } } diff --git a/cmd/helm/install.go b/cmd/helm/install.go index 16545b6ae..649c5c8b8 100644 --- a/cmd/helm/install.go +++ b/cmd/helm/install.go @@ -198,7 +198,7 @@ func addInstallFlags(cmd *cobra.Command, f *pflag.FlagSet, client *action.Instal f.BoolVar(&client.Devel, "devel", false, "use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored") f.BoolVar(&client.DependencyUpdate, "dependency-update", false, "update dependencies if they are missing before installing the chart") f.BoolVar(&client.DisableOpenAPIValidation, "disable-openapi-validation", false, "if set, the installation process will not validate rendered templates against the Kubernetes OpenAPI Schema") - f.BoolVar(&client.Atomic, "atomic", false, "if set, the installation process deletes the installation on failure. The --wait flag will be set automatically if --atomic is used") + f.BoolVar(&client.Atomic, "atomic", false, "if set, the installation process deletes the installation on failure. The --wait flag will be set automatically to \"watcher\" if --atomic is used") f.BoolVar(&client.SkipCRDs, "skip-crds", false, "if set, no CRDs will be installed. By default, CRDs are installed if not already present") f.BoolVar(&client.SubNotes, "render-subchart-notes", false, "if set, render subchart notes along with the parent") f.BoolVar(&client.SkipSchemaValidation, "skip-schema-validation", false, "if set, disables JSON schema validation") diff --git a/cmd/helm/uninstall.go b/cmd/helm/uninstall.go index 9c5e25c87..3504fd322 100644 --- a/cmd/helm/uninstall.go +++ b/cmd/helm/uninstall.go @@ -76,10 +76,10 @@ func newUninstallCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { f.BoolVar(&client.DisableHooks, "no-hooks", false, "prevent hooks from running during uninstallation") f.BoolVar(&client.IgnoreNotFound, "ignore-not-found", false, `Treat "release not found" as a successful uninstall`) f.BoolVar(&client.KeepHistory, "keep-history", false, "remove all associated resources and mark the release as deleted, but retain the release history") - f.BoolVar(&client.Wait, "wait", false, "if set, will wait until all the resources are deleted before returning. It will wait for as long as --timeout") f.StringVar(&client.DeletionPropagation, "cascade", "background", "Must be \"background\", \"orphan\", or \"foreground\". Selects the deletion cascading strategy for the dependents. Defaults to background.") f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)") f.StringVar(&client.Description, "description", "", "add a custom description") + AddWaitFlag(cmd, &client.Wait) return cmd } diff --git a/cmd/helm/upgrade.go b/cmd/helm/upgrade.go index e5e485eae..092f6bdcc 100644 --- a/cmd/helm/upgrade.go +++ b/cmd/helm/upgrade.go @@ -279,7 +279,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { f.BoolVar(&client.ReuseValues, "reuse-values", false, "when upgrading, reuse the last release's values and merge in any overrides from the command line via --set and -f. If '--reset-values' is specified, this is ignored") f.BoolVar(&client.ResetThenReuseValues, "reset-then-reuse-values", false, "when upgrading, reset the values to the ones built into the chart, apply the last release's values and merge in any overrides from the command line via --set and -f. If '--reset-values' or '--reuse-values' is specified, this is ignored") f.BoolVar(&client.WaitForJobs, "wait-for-jobs", false, "if set and --wait enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as --timeout") - f.BoolVar(&client.Atomic, "atomic", false, "if set, upgrade process rolls back changes made in case of failed upgrade. The --wait flag will be set automatically if --atomic is used") + f.BoolVar(&client.Atomic, "atomic", false, "if set, upgrade process rolls back changes made in case of failed upgrade. The --wait flag will be set automatically to \"watcher\" if --atomic is used") f.IntVar(&client.MaxHistory, "history-max", settings.MaxHistory, "limit the maximum number of revisions saved per release. Use 0 for no limit") f.BoolVar(&client.CleanupOnFail, "cleanup-on-fail", false, "allow deletion of new resources created in this upgrade when upgrade fails") f.BoolVar(&client.SubNotes, "render-subchart-notes", false, "if set, render subchart notes along with the parent") diff --git a/pkg/action/install.go b/pkg/action/install.go index 61b5ebd33..a12dee11d 100644 --- a/pkg/action/install.go +++ b/pkg/action/install.go @@ -293,9 +293,9 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma // Make sure if Atomic is set, that wait is set as well. This makes it so // the user doesn't have to specify both - if !i.shouldWait() { + if i.Wait == kube.HookOnlyStrategy { if i.Atomic { - i.Wait = "watcher" + i.Wait = kube.StatusWatcherStrategy } } @@ -473,15 +473,13 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource return rel, err } - if i.shouldWait() { - if i.WaitForJobs { - err = i.cfg.KubeClient.WaitWithJobs(resources, i.Timeout) - } else { - err = i.cfg.KubeClient.Wait(resources, i.Timeout) - } - if err != nil { - return rel, err - } + if i.WaitForJobs { + err = i.cfg.KubeClient.WaitWithJobs(resources, i.Timeout) + } else { + err = i.cfg.KubeClient.Wait(resources, i.Timeout) + } + if err != nil { + return rel, err } if !i.DisableHooks { diff --git a/pkg/action/rollback.go b/pkg/action/rollback.go index 8ec134832..8cb8b4ed4 100644 --- a/pkg/action/rollback.go +++ b/pkg/action/rollback.go @@ -228,21 +228,19 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas } } - if r.shouldWait() { - if r.WaitForJobs { - if err := r.cfg.KubeClient.WaitWithJobs(target, r.Timeout); err != nil { - targetRelease.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", targetRelease.Name, err.Error())) - r.cfg.recordRelease(currentRelease) - r.cfg.recordRelease(targetRelease) - return targetRelease, errors.Wrapf(err, "release %s failed", targetRelease.Name) - } - } else { - if err := r.cfg.KubeClient.Wait(target, r.Timeout); err != nil { - targetRelease.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", targetRelease.Name, err.Error())) - r.cfg.recordRelease(currentRelease) - r.cfg.recordRelease(targetRelease) - return targetRelease, errors.Wrapf(err, "release %s failed", targetRelease.Name) - } + if r.WaitForJobs { + if err := r.cfg.KubeClient.WaitWithJobs(target, r.Timeout); err != nil { + targetRelease.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", targetRelease.Name, err.Error())) + r.cfg.recordRelease(currentRelease) + r.cfg.recordRelease(targetRelease) + return targetRelease, errors.Wrapf(err, "release %s failed", targetRelease.Name) + } + } else { + if err := r.cfg.KubeClient.Wait(target, r.Timeout); err != nil { + targetRelease.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", targetRelease.Name, err.Error())) + r.cfg.recordRelease(currentRelease) + r.cfg.recordRelease(targetRelease) + return targetRelease, errors.Wrapf(err, "release %s failed", targetRelease.Name) } } diff --git a/pkg/action/uninstall.go b/pkg/action/uninstall.go index 75d999976..0a03f2180 100644 --- a/pkg/action/uninstall.go +++ b/pkg/action/uninstall.go @@ -41,7 +41,7 @@ type Uninstall struct { DryRun bool IgnoreNotFound bool KeepHistory bool - Wait bool + Wait kube.WaitStrategy DeletionPropagation string Timeout time.Duration Description string @@ -130,10 +130,8 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error) } res.Info = kept - if u.Wait { - if err := u.cfg.KubeClient.WaitForDelete(deletedResources, u.Timeout); err != nil { - errs = append(errs, err) - } + if err := u.cfg.KubeClient.WaitForDelete(deletedResources, u.Timeout); err != nil { + errs = append(errs, err) } if !u.DisableHooks { diff --git a/pkg/action/uninstall_test.go b/pkg/action/uninstall_test.go index eca9e6ad8..1c67cab7f 100644 --- a/pkg/action/uninstall_test.go +++ b/pkg/action/uninstall_test.go @@ -22,6 +22,7 @@ import ( "github.com/stretchr/testify/assert" + "helm.sh/helm/v4/pkg/kube" kubefake "helm.sh/helm/v4/pkg/kube/fake" "helm.sh/helm/v4/pkg/release" ) @@ -82,7 +83,7 @@ func TestUninstallRelease_Wait(t *testing.T) { unAction := uninstallAction(t) unAction.DisableHooks = true unAction.DryRun = false - unAction.Wait = true + unAction.Wait = kube.StatusWatcherStrategy rel := releaseStub() rel.Name = "come-fail-away" @@ -113,7 +114,7 @@ func TestUninstallRelease_Cascade(t *testing.T) { unAction := uninstallAction(t) unAction.DisableHooks = true unAction.DryRun = false - unAction.Wait = false + unAction.Wait = kube.HookOnlyStrategy unAction.DeletionPropagation = "foreground" rel := releaseStub() diff --git a/pkg/action/upgrade.go b/pkg/action/upgrade.go index 8d103ab6b..671426a27 100644 --- a/pkg/action/upgrade.go +++ b/pkg/action/upgrade.go @@ -155,7 +155,7 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart. // Make sure if Atomic is set, that wait is set as well. This makes it so // the user doesn't have to specify both - if !u.shouldWait() { + if u.Wait == kube.HookOnlyStrategy { if u.Atomic { u.Wait = kube.StatusWatcherStrategy } @@ -190,10 +190,6 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart. return res, nil } -func (u *Upgrade) shouldWait() bool { - return u.Wait != "" -} - // isDryRun returns true if Upgrade is set to run as a DryRun func (u *Upgrade) isDryRun() bool { if u.DryRun || u.DryRunOption == "client" || u.DryRunOption == "server" || u.DryRunOption == "true" { @@ -451,22 +447,17 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele } } - if u.shouldWait() { - u.cfg.Log( - "waiting for release %s resources (created: %d updated: %d deleted: %d)", - upgradedRelease.Name, len(results.Created), len(results.Updated), len(results.Deleted)) - if u.WaitForJobs { - if err := u.cfg.KubeClient.WaitWithJobs(target, u.Timeout); err != nil { - u.cfg.recordRelease(originalRelease) - u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err) - return - } - } else { - if err := u.cfg.KubeClient.Wait(target, u.Timeout); err != nil { - u.cfg.recordRelease(originalRelease) - u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err) - return - } + if u.WaitForJobs { + if err := u.cfg.KubeClient.WaitWithJobs(target, u.Timeout); err != nil { + u.cfg.recordRelease(originalRelease) + u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err) + return + } + } else { + if err := u.cfg.KubeClient.Wait(target, u.Timeout); err != nil { + u.cfg.recordRelease(originalRelease) + u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err) + return } } @@ -534,7 +525,7 @@ func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, e rollin := NewRollback(u.cfg) rollin.Version = filteredHistory[0].Version - if !u.shouldWait() { + if u.Wait == kube.HookOnlyStrategy { rollin.Wait = kube.StatusWatcherStrategy } rollin.WaitForJobs = u.WaitForJobs diff --git a/pkg/kube/client.go b/pkg/kube/client.go index ba7794ac4..de28c3421 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -84,7 +84,8 @@ type WaitStrategy string const ( StatusWatcherStrategy WaitStrategy = "watcher" - LegacyWaiterStrategy WaitStrategy = "legacy" + LegacyStrategy WaitStrategy = "legacy" + HookOnlyStrategy WaitStrategy = "noop" ) func init() { @@ -98,36 +99,46 @@ func init() { } } +func (c *Client) newStatusWatcher() (*statusWaiter, error) { + cfg, err := c.Factory.ToRESTConfig() + if err != nil { + return nil, err + } + dynamicClient, err := c.Factory.DynamicClient() + if err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(cfg) + if err != nil { + return nil, err + } + restMapper, err := apiutil.NewDynamicRESTMapper(cfg, httpClient) + if err != nil { + return nil, err + } + return &statusWaiter{ + restMapper: restMapper, + client: dynamicClient, + log: c.Log, + }, nil +} + func (c *Client) newWaiter(strategy WaitStrategy) (Waiter, error) { switch strategy { - case LegacyWaiterStrategy: + case LegacyStrategy: kc, err := c.Factory.KubernetesClientSet() if err != nil { return nil, err } return &HelmWaiter{kubeClient: kc, log: c.Log}, nil case StatusWatcherStrategy: - cfg, err := c.Factory.ToRESTConfig() - if err != nil { - return nil, err - } - dynamicClient, err := c.Factory.DynamicClient() - if err != nil { - return nil, err - } - httpClient, err := rest.HTTPClientFor(cfg) - if err != nil { - return nil, err - } - restMapper, err := apiutil.NewDynamicRESTMapper(cfg, httpClient) + return c.newStatusWatcher() + case HookOnlyStrategy: + sw, err := c.newStatusWatcher() if err != nil { return nil, err } - return &statusWaiter{ - restMapper: restMapper, - client: dynamicClient, - log: c.Log, - }, nil + return &hookOnlyWaiter{sw: sw}, nil default: return nil, errors.New("unknown wait strategy") } diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index 4c8719f98..8c8f89cdb 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -513,7 +513,7 @@ func TestWait(t *testing.T) { }), } var err error - c.Waiter, err = c.newWaiter(LegacyWaiterStrategy) + c.Waiter, err = c.newWaiter(LegacyStrategy) if err != nil { t.Fatal(err) } @@ -570,7 +570,7 @@ func TestWaitJob(t *testing.T) { }), } var err error - c.Waiter, err = c.newWaiter(LegacyWaiterStrategy) + c.Waiter, err = c.newWaiter(LegacyStrategy) if err != nil { t.Fatal(err) } @@ -629,7 +629,7 @@ func TestWaitDelete(t *testing.T) { }), } var err error - c.Waiter, err = c.newWaiter(LegacyWaiterStrategy) + c.Waiter, err = c.newWaiter(LegacyStrategy) if err != nil { t.Fatal(err) } diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index 0729d0d1b..4a0dcd0d2 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -209,3 +209,23 @@ func statusObserver(cancel context.CancelFunc, desired status.Status, logFn func } } } + +type hookOnlyWaiter struct { + sw *statusWaiter +} + +func (w *hookOnlyWaiter) WatchUntilReady(resourceList ResourceList, timeout time.Duration) error { + return w.sw.WatchUntilReady(resourceList, timeout) +} + +func (w *hookOnlyWaiter) Wait(resourceList ResourceList, timeout time.Duration) error { + return nil +} + +func (w *hookOnlyWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Duration) error { + return nil +} + +func (w *hookOnlyWaiter) WaitForDelete(resourceList ResourceList, timeout time.Duration) error { + return nil +} From 978d5a33181c5f102a2d70e5b1a9f756e9e3dc61 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 16 Feb 2025 21:11:48 +0000 Subject: [PATCH 067/192] lint Signed-off-by: Austin Abro --- pkg/kube/wait.go | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go index 83b352201..a7e3a1c7e 100644 --- a/pkg/kube/wait.go +++ b/pkg/kube/wait.go @@ -27,10 +27,8 @@ import ( appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" appsv1beta2 "k8s.io/api/apps/v1beta2" - batch "k8s.io/api/batch/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -294,15 +292,15 @@ func (hw *HelmWaiter) watchUntilReady(timeout time.Duration, info *resource.Info // // This operates on an event returned from a watcher. func (hw *HelmWaiter) waitForJob(obj runtime.Object, name string) (bool, error) { - o, ok := obj.(*batch.Job) + o, ok := obj.(*batchv1.Job) if !ok { return true, errors.Errorf("expected %s to be a *batch.Job, got %T", name, obj) } for _, c := range o.Status.Conditions { - if c.Type == batch.JobComplete && c.Status == "True" { + if c.Type == batchv1.JobComplete && c.Status == "True" { return true, nil - } else if c.Type == batch.JobFailed && c.Status == "True" { + } else if c.Type == batchv1.JobFailed && c.Status == "True" { return true, errors.Errorf("job %s failed: %s", name, c.Reason) } } @@ -315,20 +313,20 @@ func (hw *HelmWaiter) waitForJob(obj runtime.Object, name string) (bool, error) // // This operates on an event returned from a watcher. func (hw *HelmWaiter) waitForPodSuccess(obj runtime.Object, name string) (bool, error) { - o, ok := obj.(*v1.Pod) + o, ok := obj.(*corev1.Pod) if !ok { return true, errors.Errorf("expected %s to be a *v1.Pod, got %T", name, obj) } switch o.Status.Phase { - case v1.PodSucceeded: + case corev1.PodSucceeded: hw.log("Pod %s succeeded", o.Name) return true, nil - case v1.PodFailed: + case corev1.PodFailed: return true, errors.Errorf("pod %s failed", o.Name) - case v1.PodPending: + case corev1.PodPending: hw.log("Pod %s pending", o.Name) - case v1.PodRunning: + case corev1.PodRunning: hw.log("Pod %s running", o.Name) } From 7fde4962a85fb58c09e5412a7114592ca27a3d6a Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Sun, 16 Feb 2025 21:33:15 +0000 Subject: [PATCH 068/192] set waiter in functions Signed-off-by: Austin Abro --- pkg/action/action.go | 2 +- pkg/action/install.go | 7 +++---- pkg/action/rollback.go | 8 ++++---- pkg/action/uninstall.go | 5 +++++ pkg/action/upgrade.go | 8 ++++++++ pkg/kube/client.go | 13 +++++++++++-- pkg/kube/client_test.go | 4 ++-- pkg/kube/fake/fake.go | 4 ++++ pkg/kube/fake/printer.go | 4 ++++ pkg/kube/interface.go | 2 ++ 10 files changed, 44 insertions(+), 13 deletions(-) diff --git a/pkg/action/action.go b/pkg/action/action.go index a2d7523a5..d067c67ea 100644 --- a/pkg/action/action.go +++ b/pkg/action/action.go @@ -371,7 +371,7 @@ func (cfg *Configuration) recordRelease(r *release.Release) { // Init initializes the action configuration func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string, log DebugLog) error { - kc, err := kube.New(getter, kube.StatusWatcherStrategy) + kc, err := kube.New(getter) if err != nil { return err } diff --git a/pkg/action/install.go b/pkg/action/install.go index a12dee11d..a589aaf04 100644 --- a/pkg/action/install.go +++ b/pkg/action/install.go @@ -157,10 +157,6 @@ func (i *Install) GetRegistryClient() *registry.Client { return i.ChartPathOptions.registryClient } -func (i *Install) shouldWait() bool { - return i.Wait != "" -} - func (i *Install) installCRDs(crds []chart.CRD) error { // We do these one file at a time in the order they were read. totalItems := []*resource.Info{} @@ -298,6 +294,9 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma i.Wait = kube.StatusWatcherStrategy } } + if err := i.cfg.KubeClient.SetWaiter(i.Wait); err != nil { + return nil, fmt.Errorf("failed to set kube client waiter: %w", err) + } caps, err := i.cfg.getCapabilities() if err != nil { diff --git a/pkg/action/rollback.go b/pkg/action/rollback.go index 8cb8b4ed4..804bdbd58 100644 --- a/pkg/action/rollback.go +++ b/pkg/action/rollback.go @@ -61,6 +61,10 @@ func (r *Rollback) Run(name string) error { return err } + if err := r.cfg.KubeClient.SetWaiter(r.Wait); err != nil { + return fmt.Errorf("failed to set kube client waiter: %w", err) + } + r.cfg.Releases.MaxHistory = r.MaxHistory r.cfg.Log("preparing rollback of %s", name) @@ -90,10 +94,6 @@ func (r *Rollback) Run(name string) error { return nil } -func (r *Rollback) shouldWait() bool { - return !(r.Wait == "") -} - // prepareRollback finds the previous release and prepares a new release object with // the previous release's configuration func (r *Rollback) prepareRollback(name string) (*release.Release, *release.Release, error) { diff --git a/pkg/action/uninstall.go b/pkg/action/uninstall.go index 0a03f2180..f21551bbf 100644 --- a/pkg/action/uninstall.go +++ b/pkg/action/uninstall.go @@ -17,6 +17,7 @@ limitations under the License. package action import ( + "fmt" "strings" "time" @@ -60,6 +61,10 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error) return nil, err } + if err := u.cfg.KubeClient.SetWaiter(u.Wait); err != nil { + return nil, fmt.Errorf("failed to set kube client waiter: %w", err) + } + if u.DryRun { // In the dry run case, just see if the release exists r, err := u.cfg.releaseContent(name, 0) diff --git a/pkg/action/upgrade.go b/pkg/action/upgrade.go index 671426a27..626c1e6ad 100644 --- a/pkg/action/upgrade.go +++ b/pkg/action/upgrade.go @@ -160,6 +160,9 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart. u.Wait = kube.StatusWatcherStrategy } } + if err := u.cfg.KubeClient.SetWaiter(u.Wait); err != nil { + return nil, fmt.Errorf("failed to set kube client waiter: %w", err) + } if err := chartutil.ValidateReleaseName(name); err != nil { return nil, errors.Errorf("release name is invalid: %s", name) @@ -528,6 +531,11 @@ func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, e if u.Wait == kube.HookOnlyStrategy { rollin.Wait = kube.StatusWatcherStrategy } + // TODO pretty sure this is unnecessary as the waiter is already set if atomic at the start of upgrade + werr := u.cfg.KubeClient.SetWaiter(u.Wait) + if werr != nil { + return rel, errors.Wrapf(herr, "an error occurred while creating the waiter. original upgrade error: %s", err) + } rollin.WaitForJobs = u.WaitForJobs rollin.DisableHooks = u.DisableHooks rollin.Recreate = u.Recreate diff --git a/pkg/kube/client.go b/pkg/kube/client.go index de28c3421..425152006 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -144,8 +144,17 @@ func (c *Client) newWaiter(strategy WaitStrategy) (Waiter, error) { } } +func (c *Client) SetWaiter(ws WaitStrategy) error { + var err error + c.Waiter, err = c.newWaiter(ws) + if err != nil { + return err + } + return nil +} + // New creates a new Client. -func New(getter genericclioptions.RESTClientGetter, ws WaitStrategy) (*Client, error) { +func New(getter genericclioptions.RESTClientGetter) (*Client, error) { if getter == nil { getter = genericclioptions.NewConfigFlags(true) } @@ -155,7 +164,7 @@ func New(getter genericclioptions.RESTClientGetter, ws WaitStrategy) (*Client, e Log: nopLogger, } var err error - c.Waiter, err = c.newWaiter(ws) + c.Waiter, err = c.newWaiter(HookOnlyStrategy) if err != nil { return nil, err } diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index 8c8f89cdb..a5ad2b1eb 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -659,7 +659,7 @@ func TestWaitDelete(t *testing.T) { func TestReal(t *testing.T) { t.Skip("This is a live test, comment this line to run") - c, err := New(nil, StatusWatcherStrategy) + c, err := New(nil) if err != nil { t.Fatal(err) } @@ -672,7 +672,7 @@ func TestReal(t *testing.T) { } testSvcEndpointManifest := testServiceManifest + "\n---\n" + testEndpointManifest - c, err = New(nil, StatusWatcherStrategy) + c, err = New(nil) if err != nil { t.Fatal(err) } diff --git a/pkg/kube/fake/fake.go b/pkg/kube/fake/fake.go index ceca3c113..d722320f8 100644 --- a/pkg/kube/fake/fake.go +++ b/pkg/kube/fake/fake.go @@ -139,6 +139,10 @@ func (f *FailingKubeClient) DeleteWithPropagationPolicy(resources kube.ResourceL return f.PrintingKubeClient.DeleteWithPropagationPolicy(resources, policy) } +func (f *FailingKubeClient) SetWaiter(ws kube.WaitStrategy) error { + return nil +} + func createDummyResourceList() kube.ResourceList { var resInfo resource.Info resInfo.Name = "dummyName" diff --git a/pkg/kube/fake/printer.go b/pkg/kube/fake/printer.go index 0b957d725..3c0430aa1 100644 --- a/pkg/kube/fake/printer.go +++ b/pkg/kube/fake/printer.go @@ -121,6 +121,10 @@ func (p *PrintingKubeClient) DeleteWithPropagationPolicy(resources kube.Resource return &kube.Result{Deleted: resources}, nil } +func (f *PrintingKubeClient) SetWaiter(ws kube.WaitStrategy) error { + return nil +} + func bufferize(resources kube.ResourceList) io.Reader { var builder strings.Builder for _, info := range resources { diff --git a/pkg/kube/interface.go b/pkg/kube/interface.go index 7af8ebca6..fc74a9833 100644 --- a/pkg/kube/interface.go +++ b/pkg/kube/interface.go @@ -47,6 +47,8 @@ type Interface interface { Build(reader io.Reader, validate bool) (ResourceList, error) // IsReachable checks whether the client is able to connect to the cluster. IsReachable() error + // Set Waiter sets the Kube.Waiter + SetWaiter(ws WaitStrategy) error Waiter } From 5d1225549755832468972e4491991014441946f7 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 17 Feb 2025 14:53:34 +0000 Subject: [PATCH 069/192] wait for delete Signed-off-by: Austin Abro --- pkg/action/uninstall_test.go | 2 +- pkg/kube/fake/fake.go | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/pkg/action/uninstall_test.go b/pkg/action/uninstall_test.go index 1c67cab7f..5d2b33bdf 100644 --- a/pkg/action/uninstall_test.go +++ b/pkg/action/uninstall_test.go @@ -100,7 +100,7 @@ func TestUninstallRelease_Wait(t *testing.T) { }` unAction.cfg.Releases.Create(rel) failer := unAction.cfg.KubeClient.(*kubefake.FailingKubeClient) - failer.WaitError = fmt.Errorf("U timed out") + failer.WaitForDeleteError = fmt.Errorf("U timed out") unAction.cfg.KubeClient = failer res, err := unAction.Run(rel.Name) is.Error(err) diff --git a/pkg/kube/fake/fake.go b/pkg/kube/fake/fake.go index d722320f8..087fa89cb 100644 --- a/pkg/kube/fake/fake.go +++ b/pkg/kube/fake/fake.go @@ -36,6 +36,7 @@ type FailingKubeClient struct { CreateError error GetError error WaitError error + WaitForDeleteError error DeleteError error DeleteWithPropagationError error WatchUntilReadyError error @@ -82,8 +83,8 @@ func (f *FailingKubeClient) WaitWithJobs(resources kube.ResourceList, d time.Dur // WaitForDelete returns the configured error if set or prints func (f *FailingKubeClient) WaitForDelete(resources kube.ResourceList, d time.Duration) error { - if f.WaitError != nil { - return f.WaitError + if f.WaitForDeleteError != nil { + return f.WaitForDeleteError } return f.PrintingKubeClient.WaitForDelete(resources, d) } From ecd531657778daf3fde3777e39fbf628fa9eb4a6 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 18 Feb 2025 13:50:04 +0000 Subject: [PATCH 070/192] lint Signed-off-by: Austin Abro --- cmd/helm/install.go | 2 +- pkg/kube/fake/fake.go | 2 +- pkg/kube/statuswait.go | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cmd/helm/install.go b/cmd/helm/install.go index 649c5c8b8..4d72be966 100644 --- a/cmd/helm/install.go +++ b/cmd/helm/install.go @@ -190,7 +190,7 @@ func addInstallFlags(cmd *cobra.Command, f *pflag.FlagSet, client *action.Instal f.BoolVar(&client.Force, "force", false, "force resource updates through a replacement strategy") f.BoolVar(&client.DisableHooks, "no-hooks", false, "prevent hooks from running during install") f.BoolVar(&client.Replace, "replace", false, "reuse the given name, only if that name is a deleted release which remains in the history. This is unsafe in production") - f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)") + f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)") f.BoolVar(&client.WaitForJobs, "wait-for-jobs", false, "if set and --wait enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as --timeout") f.BoolVarP(&client.GenerateName, "generate-name", "g", false, "generate the name (and omit the NAME parameter)") f.StringVar(&client.NameTemplate, "name-template", "", "specify template used to name the release") diff --git a/pkg/kube/fake/fake.go b/pkg/kube/fake/fake.go index 087fa89cb..c4322733a 100644 --- a/pkg/kube/fake/fake.go +++ b/pkg/kube/fake/fake.go @@ -140,7 +140,7 @@ func (f *FailingKubeClient) DeleteWithPropagationPolicy(resources kube.ResourceL return f.PrintingKubeClient.DeleteWithPropagationPolicy(resources, policy) } -func (f *FailingKubeClient) SetWaiter(ws kube.WaitStrategy) error { +func (f *FailingKubeClient) SetWaiter(_ kube.WaitStrategy) error { return nil } diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index 4a0dcd0d2..3c1e90a36 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -218,14 +218,14 @@ func (w *hookOnlyWaiter) WatchUntilReady(resourceList ResourceList, timeout time return w.sw.WatchUntilReady(resourceList, timeout) } -func (w *hookOnlyWaiter) Wait(resourceList ResourceList, timeout time.Duration) error { +func (w *hookOnlyWaiter) Wait(_ ResourceList, _ time.Duration) error { return nil } -func (w *hookOnlyWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Duration) error { +func (w *hookOnlyWaiter) WaitWithJobs(_ ResourceList, _ time.Duration) error { return nil } -func (w *hookOnlyWaiter) WaitForDelete(resourceList ResourceList, timeout time.Duration) error { +func (w *hookOnlyWaiter) WaitForDelete(_ ResourceList, _ time.Duration) error { return nil } From efde8304059b791ef48afaf602d5cc4c7a537f3d Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 18 Feb 2025 13:53:06 +0000 Subject: [PATCH 071/192] better name Signed-off-by: Austin Abro --- pkg/kube/client.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 425152006..ff062a172 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -85,7 +85,7 @@ type WaitStrategy string const ( StatusWatcherStrategy WaitStrategy = "watcher" LegacyStrategy WaitStrategy = "legacy" - HookOnlyStrategy WaitStrategy = "noop" + HookOnlyStrategy WaitStrategy = "hookOnly" ) func init() { From ea87c49d1b6ef516d95226ce7fffd610d99ca16c Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 18 Feb 2025 13:53:47 +0000 Subject: [PATCH 072/192] print Signed-off-by: Austin Abro --- pkg/kube/fake/printer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kube/fake/printer.go b/pkg/kube/fake/printer.go index 3c0430aa1..82649b202 100644 --- a/pkg/kube/fake/printer.go +++ b/pkg/kube/fake/printer.go @@ -121,7 +121,7 @@ func (p *PrintingKubeClient) DeleteWithPropagationPolicy(resources kube.Resource return &kube.Result{Deleted: resources}, nil } -func (f *PrintingKubeClient) SetWaiter(ws kube.WaitStrategy) error { +func (p *PrintingKubeClient) SetWaiter(_ kube.WaitStrategy) error { return nil } From 5d31fb09d2110242dd91ff64e9b8e161f38e15d4 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 18 Feb 2025 13:55:13 +0000 Subject: [PATCH 073/192] better help text Signed-off-by: Austin Abro --- cmd/helm/flags.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/helm/flags.go b/cmd/helm/flags.go index d1f0fec58..c73bab63f 100644 --- a/cmd/helm/flags.go +++ b/cmd/helm/flags.go @@ -56,7 +56,7 @@ func AddWaitFlag(cmd *cobra.Command, wait *kube.WaitStrategy) { cmd.Flags().Var( newWaitValue(kube.HookOnlyStrategy, wait), "wait", - "if set, will wait until all resources are in the expected state before marking the operation as successful. It will wait for as long as --timeout. Options are (true, false, watcher, and legacy)", + "if set, will wait until all resources are in the expected state before marking the operation as successful. It will wait for as long as --timeout. Valid inputs are true, false, watcher, and legacy", ) // Sets the strategy to use the watcher strategy if `--wait` is used without an argument cmd.Flags().Lookup("wait").NoOptDefVal = string(kube.StatusWatcherStrategy) From b79dfd09b0b9a8173ec23e5a72b3a0c444863dee Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 25 Feb 2025 03:51:38 +0000 Subject: [PATCH 074/192] refactor Signed-off-by: Austin Abro --- pkg/kube/statuswait.go | 9 +++-- pkg/kube/statuswait_test.go | 70 +++++++++++++++++++++++++++---------- 2 files changed, 59 insertions(+), 20 deletions(-) diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index 3c1e90a36..baf5814b1 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -187,6 +187,11 @@ func statusObserver(cancel context.CancelFunc, desired status.Status, logFn func if rs == nil { continue } + // If a resource is already deleted before waiting has started, it will show as unknown + // this check ensures we don't wait forever for a resource that is already deleted + if rs.Status == status.UnknownStatus && desired == status.NotFoundStatus { + continue + } rss = append(rss, rs) if rs.Status != desired { nonDesiredResources = append(nonDesiredResources, rs) @@ -199,12 +204,12 @@ func statusObserver(cancel context.CancelFunc, desired status.Status, logFn func } if len(nonDesiredResources) > 0 { - // Log only the first resource so the user knows what they're waiting for without being overwhelmed + // Log a single resource so the user knows what they're waiting for without an overwhelming amount of output sort.Slice(nonDesiredResources, func(i, j int) bool { return nonDesiredResources[i].Identifier.Name < nonDesiredResources[j].Identifier.Name }) first := nonDesiredResources[0] - logFn("waiting for resource: name: %s, kind: %s, desired status: %s, actual status: %s", + logFn("waiting for resource: name: %s, kind: %s, desired status: %s, actual status: %s \n", first.Identifier.Name, first.Identifier.GroupKind.Kind, desired, first.Status) } } diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index df16bf7e9..2b10dfef1 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -160,6 +160,18 @@ func getGVR(t *testing.T, mapper meta.RESTMapper, obj *unstructured.Unstructured return mapping.Resource } +func getUnstructuredObjsFromManifests(t *testing.T, manifests []string) []runtime.Object { + objects := []runtime.Object{} + for _, manifest := range manifests { + m := make(map[string]interface{}) + err := yaml.Unmarshal([]byte(manifest), &m) + assert.NoError(t, err) + resource := &unstructured.Unstructured{Object: m} + objects = append(objects, resource) + } + return objects +} + func TestStatusWaitForDelete(t *testing.T) { t.Parallel() tests := []struct { @@ -190,7 +202,6 @@ func TestStatusWaitForDelete(t *testing.T) { fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) fakeMapper := testutil.NewFakeRESTMapper( v1.SchemeGroupVersion.WithKind("Pod"), - appsv1.SchemeGroupVersion.WithKind("Deployment"), batchv1.SchemeGroupVersion.WithKind("Job"), ) statusWaiter := statusWaiter{ @@ -198,31 +209,25 @@ func TestStatusWaitForDelete(t *testing.T) { client: fakeClient, log: t.Logf, } - createdObjs := []runtime.Object{} - for _, manifest := range tt.manifestsToCreate { - m := make(map[string]interface{}) - err := yaml.Unmarshal([]byte(manifest), &m) - assert.NoError(t, err) - resource := &unstructured.Unstructured{Object: m} - createdObjs = append(createdObjs, resource) - gvr := getGVR(t, fakeMapper, resource) - err = fakeClient.Tracker().Create(gvr, resource, resource.GetNamespace()) + objsToCreate := getUnstructuredObjsFromManifests(t, tt.manifestsToCreate) + for _, objToCreate := range objsToCreate { + u := objToCreate.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) assert.NoError(t, err) } - for _, manifest := range tt.manifestsToDelete { - m := make(map[string]interface{}) - err := yaml.Unmarshal([]byte(manifest), &m) - assert.NoError(t, err) - resource := &unstructured.Unstructured{Object: m} - gvr := getGVR(t, fakeMapper, resource) + objsToDelete := getUnstructuredObjsFromManifests(t, tt.manifestsToDelete) + for _, objToDelete := range objsToDelete { + u := objToDelete.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) go func() { time.Sleep(timeUntilPodDelete) - err = fakeClient.Tracker().Delete(gvr, resource.GetNamespace(), resource.GetName()) + err := fakeClient.Tracker().Delete(gvr, u.GetNamespace(), u.GetName()) assert.NoError(t, err) }() } resourceList := ResourceList{} - for _, obj := range createdObjs { + for _, obj := range objsToCreate { list, err := c.Build(objBody(obj), false) assert.NoError(t, err) resourceList = append(resourceList, list...) @@ -237,6 +242,35 @@ func TestStatusWaitForDelete(t *testing.T) { } } +func TestStatusWaitForDeleteNonExistentObject(t *testing.T) { + t.Parallel() + c := newTestClient(t) + timeout := time.Second + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + ) + statusWaiter := statusWaiter{ + restMapper: fakeMapper, + client: fakeClient, + log: t.Logf, + } + createdObjs := []runtime.Object{} + m := make(map[string]interface{}) + err := yaml.Unmarshal([]byte(podCurrentManifest), &m) + assert.NoError(t, err) + resource := &unstructured.Unstructured{Object: m} + createdObjs = append(createdObjs, resource) + resourceList := ResourceList{} + for _, obj := range createdObjs { + list, err := c.Build(objBody(obj), false) + assert.NoError(t, err) + resourceList = append(resourceList, list...) + } + err = statusWaiter.WaitForDelete(resourceList, timeout) + assert.NoError(t, err) +} + func TestStatusWait(t *testing.T) { t.Parallel() tests := []struct { From 75292c5e04e2e6684e6470b59e920e31a23d3492 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 25 Feb 2025 04:05:12 +0000 Subject: [PATCH 075/192] refactor Signed-off-by: Austin Abro --- pkg/kube/statuswait_test.go | 40 +++++++++++++++++-------------------- 1 file changed, 18 insertions(+), 22 deletions(-) diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index 2b10dfef1..d6d7f5e36 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -160,7 +160,7 @@ func getGVR(t *testing.T, mapper meta.RESTMapper, obj *unstructured.Unstructured return mapping.Resource } -func getUnstructuredObjsFromManifests(t *testing.T, manifests []string) []runtime.Object { +func getRuntimeObjFromManifests(t *testing.T, manifests []string) []runtime.Object { objects := []runtime.Object{} for _, manifest := range manifests { m := make(map[string]interface{}) @@ -172,6 +172,16 @@ func getUnstructuredObjsFromManifests(t *testing.T, manifests []string) []runtim return objects } +func getResourceListFromRuntimeObjs(t *testing.T, c *Client, objs []runtime.Object) ResourceList { + resourceList := ResourceList{} + for _, obj := range objs { + list, err := c.Build(objBody(obj), false) + assert.NoError(t, err) + resourceList = append(resourceList, list...) + } + return resourceList +} + func TestStatusWaitForDelete(t *testing.T) { t.Parallel() tests := []struct { @@ -209,14 +219,14 @@ func TestStatusWaitForDelete(t *testing.T) { client: fakeClient, log: t.Logf, } - objsToCreate := getUnstructuredObjsFromManifests(t, tt.manifestsToCreate) + objsToCreate := getRuntimeObjFromManifests(t, tt.manifestsToCreate) for _, objToCreate := range objsToCreate { u := objToCreate.(*unstructured.Unstructured) gvr := getGVR(t, fakeMapper, u) err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) assert.NoError(t, err) } - objsToDelete := getUnstructuredObjsFromManifests(t, tt.manifestsToDelete) + objsToDelete := getRuntimeObjFromManifests(t, tt.manifestsToDelete) for _, objToDelete := range objsToDelete { u := objToDelete.(*unstructured.Unstructured) gvr := getGVR(t, fakeMapper, u) @@ -226,12 +236,7 @@ func TestStatusWaitForDelete(t *testing.T) { assert.NoError(t, err) }() } - resourceList := ResourceList{} - for _, obj := range objsToCreate { - list, err := c.Build(objBody(obj), false) - assert.NoError(t, err) - resourceList = append(resourceList, list...) - } + resourceList := getResourceListFromRuntimeObjs(t, c, objsToCreate) err := statusWaiter.WaitForDelete(resourceList, timeout) if tt.expectErrs != nil { assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) @@ -255,19 +260,10 @@ func TestStatusWaitForDeleteNonExistentObject(t *testing.T) { client: fakeClient, log: t.Logf, } - createdObjs := []runtime.Object{} - m := make(map[string]interface{}) - err := yaml.Unmarshal([]byte(podCurrentManifest), &m) - assert.NoError(t, err) - resource := &unstructured.Unstructured{Object: m} - createdObjs = append(createdObjs, resource) - resourceList := ResourceList{} - for _, obj := range createdObjs { - list, err := c.Build(objBody(obj), false) - assert.NoError(t, err) - resourceList = append(resourceList, list...) - } - err = statusWaiter.WaitForDelete(resourceList, timeout) + // Don't create the object to test that the wait for delete works when the object doesn't exist + objManifest := getRuntimeObjFromManifests(t, []string{podCurrentManifest}) + resourceList := getResourceListFromRuntimeObjs(t, c, objManifest) + err := statusWaiter.WaitForDelete(resourceList, timeout) assert.NoError(t, err) } From 4f33e5c97fe4d38e9bcb742b053d9848f84b8e63 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 25 Feb 2025 04:08:51 +0000 Subject: [PATCH 076/192] test refactoring Signed-off-by: Austin Abro --- pkg/kube/statuswait_test.go | 63 ++++++++++--------------------------- 1 file changed, 16 insertions(+), 47 deletions(-) diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index d6d7f5e36..0e88f1bbe 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -319,25 +319,14 @@ func TestStatusWait(t *testing.T) { restMapper: fakeMapper, log: t.Logf, } - objs := []runtime.Object{} - - for _, podYaml := range tt.objManifests { - m := make(map[string]interface{}) - err := yaml.Unmarshal([]byte(podYaml), &m) - assert.NoError(t, err) - resource := &unstructured.Unstructured{Object: m} - objs = append(objs, resource) - gvr := getGVR(t, fakeMapper, resource) - err = fakeClient.Tracker().Create(gvr, resource, resource.GetNamespace()) - assert.NoError(t, err) - } - resourceList := ResourceList{} + objs := getRuntimeObjFromManifests(t, tt.objManifests) for _, obj := range objs { - list, err := c.Build(objBody(obj), false) + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) assert.NoError(t, err) - resourceList = append(resourceList, list...) } - + resourceList := getResourceListFromRuntimeObjs(t, c, objs) err := statusWaiter.Wait(resourceList, time.Second*3) if tt.expectErrs != nil { assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) @@ -384,24 +373,14 @@ func TestWaitForJobComplete(t *testing.T) { restMapper: fakeMapper, log: t.Logf, } - objs := []runtime.Object{} - for _, podYaml := range tt.objManifests { - m := make(map[string]interface{}) - err := yaml.Unmarshal([]byte(podYaml), &m) - assert.NoError(t, err) - resource := &unstructured.Unstructured{Object: m} - objs = append(objs, resource) - gvr := getGVR(t, fakeMapper, resource) - err = fakeClient.Tracker().Create(gvr, resource, resource.GetNamespace()) - assert.NoError(t, err) - } - resourceList := ResourceList{} + objs := getRuntimeObjFromManifests(t, tt.objManifests) for _, obj := range objs { - list, err := c.Build(objBody(obj), false) + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) assert.NoError(t, err) - resourceList = append(resourceList, list...) } - + resourceList := getResourceListFromRuntimeObjs(t, c, objs) err := statusWaiter.WaitWithJobs(resourceList, time.Second*3) if tt.expectErrs != nil { assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) @@ -424,7 +403,7 @@ func TestWatchForReady(t *testing.T) { objManifests: []string{jobCompleteManifest, podCompleteManifest}, }, { - name: "succeeds even when a resource that's not a pod or job is complete", + name: "succeeds when a resource that's not a pod or job is not ready", objManifests: []string{notReadyDeploymentManifest}, }, { @@ -454,24 +433,14 @@ func TestWatchForReady(t *testing.T) { restMapper: fakeMapper, log: t.Logf, } - objs := []runtime.Object{} - for _, podYaml := range tt.objManifests { - m := make(map[string]interface{}) - err := yaml.Unmarshal([]byte(podYaml), &m) - assert.NoError(t, err) - resource := &unstructured.Unstructured{Object: m} - objs = append(objs, resource) - gvr := getGVR(t, fakeMapper, resource) - err = fakeClient.Tracker().Create(gvr, resource, resource.GetNamespace()) - assert.NoError(t, err) - } - resourceList := ResourceList{} + objs := getRuntimeObjFromManifests(t, tt.objManifests) for _, obj := range objs { - list, err := c.Build(objBody(obj), false) + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) assert.NoError(t, err) - resourceList = append(resourceList, list...) } - + resourceList := getResourceListFromRuntimeObjs(t, c, objs) err := statusWaiter.WatchUntilReady(resourceList, time.Second*3) if tt.expectErrs != nil { assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) From 5a254dae2138e830403685527129a46be74c9b8a Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 25 Feb 2025 14:42:14 +0000 Subject: [PATCH 077/192] cleanup Signed-off-by: Austin Abro --- pkg/kube/client.go | 186 ++++++++++----------------------------------- 1 file changed, 42 insertions(+), 144 deletions(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index d174614db..333c0ec65 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -18,6 +18,7 @@ package kube // import "helm.sh/helm/v4/pkg/kube" import ( "bytes" + "context" "encoding/json" "fmt" "io" @@ -687,150 +688,47 @@ func updateResource(c *Client, target *resource.Info, currentObj runtime.Object, return nil } -// func (c *Client) watchUntilReady(timeout time.Duration, info *resource.Info) error { -// kind := info.Mapping.GroupVersionKind.Kind -// switch kind { -// case "Job", "Pod": -// default: -// return nil -// } - -// c.Log("Watching for changes to %s %s with timeout of %v", kind, info.Name, timeout) - -// // Use a selector on the name of the resource. This should be unique for the -// // given version and kind -// selector, err := fields.ParseSelector(fmt.Sprintf("metadata.name=%s", info.Name)) -// if err != nil { -// return err -// } -// lw := cachetools.NewListWatchFromClient(info.Client, info.Mapping.Resource.Resource, info.Namespace, selector) - -// // What we watch for depends on the Kind. -// // - For a Job, we watch for completion. -// // - For all else, we watch until Ready. -// // In the future, we might want to add some special logic for types -// // like Ingress, Volume, etc. - -// ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout) -// defer cancel() -// _, err = watchtools.UntilWithSync(ctx, lw, &unstructured.Unstructured{}, nil, func(e watch.Event) (bool, error) { -// // Make sure the incoming object is versioned as we use unstructured -// // objects when we build manifests -// obj := convertWithMapper(e.Object, info.Mapping) -// switch e.Type { -// case watch.Added, watch.Modified: -// // For things like a secret or a config map, this is the best indicator -// // we get. We care mostly about jobs, where what we want to see is -// // the status go into a good state. For other types, like ReplicaSet -// // we don't really do anything to support these as hooks. -// c.Log("Add/Modify event for %s: %v", info.Name, e.Type) -// switch kind { -// case "Job": -// return c.waitForJob(obj, info.Name) -// case "Pod": -// return c.waitForPodSuccess(obj, info.Name) -// } -// return true, nil -// case watch.Deleted: -// c.Log("Deleted event for %s", info.Name) -// return true, nil -// case watch.Error: -// // Handle error and return with an error. -// c.Log("Error event for %s", info.Name) -// return true, errors.Errorf("failed to deploy %s", info.Name) -// default: -// return false, nil -// } -// }) -// return err -// } - -// // waitForJob is a helper that waits for a job to complete. -// // -// // This operates on an event returned from a watcher. -// func (c *Client) waitForJob(obj runtime.Object, name string) (bool, error) { -// o, ok := obj.(*batch.Job) -// if !ok { -// return true, errors.Errorf("expected %s to be a *batch.Job, got %T", name, obj) -// } - -// for _, c := range o.Status.Conditions { -// if c.Type == batch.JobComplete && c.Status == "True" { -// return true, nil -// } else if c.Type == batch.JobFailed && c.Status == "True" { -// return true, errors.Errorf("job %s failed: %s", name, c.Reason) -// } -// } - -// c.Log("%s: Jobs active: %d, jobs failed: %d, jobs succeeded: %d", name, o.Status.Active, o.Status.Failed, o.Status.Succeeded) -// return false, nil -// } - -// // waitForPodSuccess is a helper that waits for a pod to complete. -// // -// // This operates on an event returned from a watcher. -// func (c *Client) waitForPodSuccess(obj runtime.Object, name string) (bool, error) { -// o, ok := obj.(*v1.Pod) -// if !ok { -// return true, errors.Errorf("expected %s to be a *v1.Pod, got %T", name, obj) -// } - -// switch o.Status.Phase { -// case v1.PodSucceeded: -// c.Log("Pod %s succeeded", o.Name) -// return true, nil -// case v1.PodFailed: -// return true, errors.Errorf("pod %s failed", o.Name) -// case v1.PodPending: -// c.Log("Pod %s pending", o.Name) -// case v1.PodRunning: -// c.Log("Pod %s running", o.Name) -// } - -// return false, nil -// } - -// // GetPodList uses the kubernetes interface to get the list of pods filtered by listOptions -// func (c *Client) GetPodList(namespace string, listOptions metav1.ListOptions) (*v1.PodList, error) { -// podList, err := c.kubeClient.CoreV1().Pods(namespace).List(context.Background(), listOptions) -// if err != nil { -// return nil, fmt.Errorf("failed to get pod list with options: %+v with error: %v", listOptions, err) -// } -// return podList, nil -// } - -// // OutputContainerLogsForPodList is a helper that outputs logs for a list of pods -// func (c *Client) OutputContainerLogsForPodList(podList *v1.PodList, namespace string, writerFunc func(namespace, pod, container string) io.Writer) error { -// for _, pod := range podList.Items { -// for _, container := range pod.Spec.Containers { -// options := &v1.PodLogOptions{ -// Container: container.Name, -// } -// request := c.kubeClient.CoreV1().Pods(namespace).GetLogs(pod.Name, options) -// err2 := copyRequestStreamToWriter(request, pod.Name, container.Name, writerFunc(namespace, pod.Name, container.Name)) -// if err2 != nil { -// return err2 -// } -// } -// } -// return nil -// } - -// func copyRequestStreamToWriter(request *rest.Request, podName, containerName string, writer io.Writer) error { -// readCloser, err := request.Stream(context.Background()) -// if err != nil { -// return errors.Errorf("Failed to stream pod logs for pod: %s, container: %s", podName, containerName) -// } -// defer readCloser.Close() -// _, err = io.Copy(writer, readCloser) -// if err != nil { -// return errors.Errorf("Failed to copy IO from logs for pod: %s, container: %s", podName, containerName) -// } -// if err != nil { -// return errors.Errorf("Failed to close reader for pod: %s, container: %s", podName, containerName) -// } -// return nil -// } +// GetPodList uses the kubernetes interface to get the list of pods filtered by listOptions +func (c *Client) GetPodList(namespace string, listOptions metav1.ListOptions) (*v1.PodList, error) { + podList, err := c.kubeClient.CoreV1().Pods(namespace).List(context.Background(), listOptions) + if err != nil { + return nil, fmt.Errorf("failed to get pod list with options: %+v with error: %v", listOptions, err) + } + return podList, nil +} + +// OutputContainerLogsForPodList is a helper that outputs logs for a list of pods +func (c *Client) OutputContainerLogsForPodList(podList *v1.PodList, namespace string, writerFunc func(namespace, pod, container string) io.Writer) error { + for _, pod := range podList.Items { + for _, container := range pod.Spec.Containers { + options := &v1.PodLogOptions{ + Container: container.Name, + } + request := c.kubeClient.CoreV1().Pods(namespace).GetLogs(pod.Name, options) + err2 := copyRequestStreamToWriter(request, pod.Name, container.Name, writerFunc(namespace, pod.Name, container.Name)) + if err2 != nil { + return err2 + } + } + } + return nil +} + +func copyRequestStreamToWriter(request *rest.Request, podName, containerName string, writer io.Writer) error { + readCloser, err := request.Stream(context.Background()) + if err != nil { + return errors.Errorf("Failed to stream pod logs for pod: %s, container: %s", podName, containerName) + } + defer readCloser.Close() + _, err = io.Copy(writer, readCloser) + if err != nil { + return errors.Errorf("Failed to copy IO from logs for pod: %s, container: %s", podName, containerName) + } + if err != nil { + return errors.Errorf("Failed to close reader for pod: %s, container: %s", podName, containerName) + } + return nil +} // scrubValidationError removes kubectl info from the message. func scrubValidationError(err error) error { From a18589c4d8d8d7f71e26397d51e41ff967038ca2 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 25 Feb 2025 14:42:52 +0000 Subject: [PATCH 078/192] fmt Signed-off-by: Austin Abro --- pkg/kube/statuswait.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index baf5814b1..bc3958848 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -188,7 +188,7 @@ func statusObserver(cancel context.CancelFunc, desired status.Status, logFn func continue } // If a resource is already deleted before waiting has started, it will show as unknown - // this check ensures we don't wait forever for a resource that is already deleted + // this check ensures we don't wait forever for a resource that is already deleted if rs.Status == status.UnknownStatus && desired == status.NotFoundStatus { continue } From 29c250c233c3efecc2dc7f2a8c1b9810299de5d8 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 25 Feb 2025 16:09:30 +0000 Subject: [PATCH 079/192] add back interface log check Signed-off-by: Austin Abro --- pkg/kube/interface.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/kube/interface.go b/pkg/kube/interface.go index 64d954853..d6ac823f1 100644 --- a/pkg/kube/interface.go +++ b/pkg/kube/interface.go @@ -118,5 +118,6 @@ type InterfaceResources interface { } var _ Interface = (*Client)(nil) +var _ InterfaceLogs = (*Client)(nil) var _ InterfaceDeletionPropagation = (*Client)(nil) var _ InterfaceResources = (*Client)(nil) From 1ad79a2bb71c94cb4c232a8527ddde1953d41b39 Mon Sep 17 00:00:00 2001 From: Robert Sirchia Date: Thu, 27 Feb 2025 14:21:57 -0500 Subject: [PATCH 080/192] converting inline log to slog Signed-off-by: Robert Sirchia --- internal/sympath/walk.go | 4 ++-- pkg/chart/v2/util/dependencies.go | 12 ++++++------ pkg/engine/engine.go | 8 ++++---- pkg/engine/lookup_func.go | 10 +++++----- pkg/ignore/rules.go | 10 +++++----- pkg/plugin/installer/http_installer.go | 3 ++- pkg/plugin/installer/installer.go | 10 ---------- pkg/plugin/installer/local_installer.go | 5 +++-- pkg/plugin/installer/vcs_installer.go | 15 ++++++++------- pkg/release/util/manifest_sorter.go | 4 ++-- pkg/repo/chartrepo.go | 5 +++-- 11 files changed, 40 insertions(+), 46 deletions(-) diff --git a/internal/sympath/walk.go b/internal/sympath/walk.go index 6b221fb6c..938a99bfe 100644 --- a/internal/sympath/walk.go +++ b/internal/sympath/walk.go @@ -21,7 +21,7 @@ limitations under the License. package sympath import ( - "log" + "log/slog" "os" "path/filepath" "sort" @@ -72,7 +72,7 @@ func symwalk(path string, info os.FileInfo, walkFn filepath.WalkFunc) error { return errors.Wrapf(err, "error evaluating symlink %s", path) } //This log message is to highlight a symlink that is being used within a chart, symlinks can be used for nefarious reasons. - log.Printf("found symbolic link in path: %s resolves to %s. Contents of linked file included and used", path, resolved) + slog.Info("found symbolic link in path: %s resolves to %s. Contents of linked file included and used", path, resolved) if info, err = os.Lstat(resolved); err != nil { return err } diff --git a/pkg/chart/v2/util/dependencies.go b/pkg/chart/v2/util/dependencies.go index 78ed46517..2a6912e84 100644 --- a/pkg/chart/v2/util/dependencies.go +++ b/pkg/chart/v2/util/dependencies.go @@ -16,7 +16,7 @@ limitations under the License. package util import ( - "log" + "log/slog" "strings" "github.com/mitchellh/copystructure" @@ -48,10 +48,10 @@ func processDependencyConditions(reqs []*chart.Dependency, cvals Values, cpath s r.Enabled = bv break } - log.Printf("Warning: Condition path '%s' for chart %s returned non-bool value", c, r.Name) + slog.Warn("Warning: Condition path '%s' for chart %s returned non-bool value", c, r.Name) } else if _, ok := err.(ErrNoValue); !ok { // this is a real error - log.Printf("Warning: PathValue returned error %v", err) + slog.Error("Warning: PathValue returned error %v", err) } } } @@ -79,7 +79,7 @@ func processDependencyTags(reqs []*chart.Dependency, cvals Values) { hasFalse = true } } else { - log.Printf("Warning: Tag '%s' for chart %s returned non-bool value", k, r.Name) + slog.Warn("Warning: Tag '%s' for chart %s returned non-bool value", k, r.Name) } } } @@ -254,7 +254,7 @@ func processImportValues(c *chart.Chart, merge bool) error { // get child table vv, err := cvals.Table(r.Name + "." + child) if err != nil { - log.Printf("Warning: ImportValues missing table from chart %s: %v", r.Name, err) + slog.Error("Warning: ImportValues missing table from chart %s: %v", r.Name, err) continue } // create value map from child to be merged into parent @@ -271,7 +271,7 @@ func processImportValues(c *chart.Chart, merge bool) error { }) vm, err := cvals.Table(r.Name + "." + child) if err != nil { - log.Printf("Warning: ImportValues missing table: %v", err) + slog.Error("Warning: ImportValues missing table: %v", err) continue } if merge { diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index 0d0a398be..650b56a3a 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -18,7 +18,7 @@ package engine import ( "fmt" - "log" + "log/slog" "path" "path/filepath" "regexp" @@ -203,7 +203,7 @@ func (e Engine) initFunMap(t *template.Template) { if val == nil { if e.LintMode { // Don't fail on missing required values when linting - log.Printf("[INFO] Missing required value: %s", warn) + slog.Warn("[INFO] Missing required value: %s", warn) return "", nil } return val, errors.New(warnWrap(warn)) @@ -211,7 +211,7 @@ func (e Engine) initFunMap(t *template.Template) { if val == "" { if e.LintMode { // Don't fail on missing required values when linting - log.Printf("[INFO] Missing required value: %s", warn) + slog.Warn("[INFO] Missing required value: %s", warn) return "", nil } return val, errors.New(warnWrap(warn)) @@ -224,7 +224,7 @@ func (e Engine) initFunMap(t *template.Template) { funcMap["fail"] = func(msg string) (string, error) { if e.LintMode { // Don't fail when linting - log.Printf("[INFO] Fail: %s", msg) + slog.Info("[INFO] Fail: %s", msg) return "", nil } return "", errors.New(warnWrap(msg)) diff --git a/pkg/engine/lookup_func.go b/pkg/engine/lookup_func.go index 75e85098d..c7f7226f4 100644 --- a/pkg/engine/lookup_func.go +++ b/pkg/engine/lookup_func.go @@ -18,7 +18,7 @@ package engine import ( "context" - "log" + "log/slog" "strings" "github.com/pkg/errors" @@ -101,7 +101,7 @@ func getDynamicClientOnKind(apiversion string, kind string, config *rest.Config) gvk := schema.FromAPIVersionAndKind(apiversion, kind) apiRes, err := getAPIResourceForGVK(gvk, config) if err != nil { - log.Printf("[ERROR] unable to get apiresource from unstructured: %s , error %s", gvk.String(), err) + slog.Error("[ERROR] unable to get apiresource from unstructured: %s , error %s", gvk.String(), err) return nil, false, errors.Wrapf(err, "unable to get apiresource from unstructured: %s", gvk.String()) } gvr := schema.GroupVersionResource{ @@ -111,7 +111,7 @@ func getDynamicClientOnKind(apiversion string, kind string, config *rest.Config) } intf, err := dynamic.NewForConfig(config) if err != nil { - log.Printf("[ERROR] unable to get dynamic client %s", err) + slog.Error("[ERROR] unable to get dynamic client %s", err) return nil, false, err } res := intf.Resource(gvr) @@ -122,12 +122,12 @@ func getAPIResourceForGVK(gvk schema.GroupVersionKind, config *rest.Config) (met res := metav1.APIResource{} discoveryClient, err := discovery.NewDiscoveryClientForConfig(config) if err != nil { - log.Printf("[ERROR] unable to create discovery client %s", err) + slog.Error("[ERROR] unable to create discovery client %s", err) return res, err } resList, err := discoveryClient.ServerResourcesForGroupVersion(gvk.GroupVersion().String()) if err != nil { - log.Printf("[ERROR] unable to retrieve resource list for: %s , error: %s", gvk.GroupVersion().String(), err) + slog.Error("[ERROR] unable to retrieve resource list for: %s , error: %s", gvk.GroupVersion().String(), err) return res, err } for _, resource := range resList.APIResources { diff --git a/pkg/ignore/rules.go b/pkg/ignore/rules.go index 88de407ad..e59e8dee5 100644 --- a/pkg/ignore/rules.go +++ b/pkg/ignore/rules.go @@ -20,7 +20,7 @@ import ( "bufio" "bytes" "io" - "log" + "log/slog" "os" "path/filepath" "strings" @@ -102,7 +102,7 @@ func (r *Rules) Ignore(path string, fi os.FileInfo) bool { } for _, p := range r.patterns { if p.match == nil { - log.Printf("ignore: no matcher supplied for %q", p.raw) + slog.Info("ignore: no matcher supplied for %q", p.raw) return false } @@ -177,7 +177,7 @@ func (r *Rules) parseRule(rule string) error { rule = strings.TrimPrefix(rule, "/") ok, err := filepath.Match(rule, n) if err != nil { - log.Printf("Failed to compile %q: %s", rule, err) + slog.Error("Failed to compile %q: %s", rule, err) return false } return ok @@ -187,7 +187,7 @@ func (r *Rules) parseRule(rule string) error { p.match = func(n string, _ os.FileInfo) bool { ok, err := filepath.Match(rule, n) if err != nil { - log.Printf("Failed to compile %q: %s", rule, err) + slog.Error("Failed to compile %q: %s", rule, err) return false } return ok @@ -199,7 +199,7 @@ func (r *Rules) parseRule(rule string) error { n = filepath.Base(n) ok, err := filepath.Match(rule, n) if err != nil { - log.Printf("Failed to compile %q: %s", rule, err) + slog.Error("Failed to compile %q: %s", rule, err) return false } return ok diff --git a/pkg/plugin/installer/http_installer.go b/pkg/plugin/installer/http_installer.go index b900fa401..7e457b0d0 100644 --- a/pkg/plugin/installer/http_installer.go +++ b/pkg/plugin/installer/http_installer.go @@ -20,6 +20,7 @@ import ( "bytes" "compress/gzip" "io" + "log/slog" "os" "path" "path/filepath" @@ -144,7 +145,7 @@ func (i *HTTPInstaller) Install() error { return err } - debug("copying %s to %s", src, i.Path()) + slog.Debug("copying %s to %s", src, i.Path()) return fs.CopyDir(src, i.Path()) } diff --git a/pkg/plugin/installer/installer.go b/pkg/plugin/installer/installer.go index 5fad58f99..1e90bcaa0 100644 --- a/pkg/plugin/installer/installer.go +++ b/pkg/plugin/installer/installer.go @@ -16,8 +16,6 @@ limitations under the License. package installer import ( - "fmt" - "log" "net/http" "os" "path/filepath" @@ -125,11 +123,3 @@ func isPlugin(dirname string) bool { _, err := os.Stat(filepath.Join(dirname, plugin.PluginFileName)) return err == nil } - -var logger = log.New(os.Stderr, "[debug] ", log.Lshortfile) - -func debug(format string, args ...interface{}) { - if Debug { - logger.Output(2, fmt.Sprintf(format, args...)) - } -} diff --git a/pkg/plugin/installer/local_installer.go b/pkg/plugin/installer/local_installer.go index a79ca7ec7..4c95134ca 100644 --- a/pkg/plugin/installer/local_installer.go +++ b/pkg/plugin/installer/local_installer.go @@ -16,6 +16,7 @@ limitations under the License. package installer // import "helm.sh/helm/v4/pkg/plugin/installer" import ( + "log/slog" "os" "path/filepath" @@ -57,12 +58,12 @@ func (i *LocalInstaller) Install() error { if !isPlugin(i.Source) { return ErrMissingMetadata } - debug("symlinking %s to %s", i.Source, i.Path()) + slog.Debug("symlinking %s to %s", i.Source, i.Path()) return os.Symlink(i.Source, i.Path()) } // Update updates a local repository func (i *LocalInstaller) Update() error { - debug("local repository is auto-updated") + slog.Debug("local repository is auto-updated") return nil } diff --git a/pkg/plugin/installer/vcs_installer.go b/pkg/plugin/installer/vcs_installer.go index 3967e46cd..41b47ed13 100644 --- a/pkg/plugin/installer/vcs_installer.go +++ b/pkg/plugin/installer/vcs_installer.go @@ -16,6 +16,7 @@ limitations under the License. package installer // import "helm.sh/helm/v4/pkg/plugin/installer" import ( + "log/slog" "os" "sort" @@ -88,13 +89,13 @@ func (i *VCSInstaller) Install() error { return ErrMissingMetadata } - debug("copying %s to %s", i.Repo.LocalPath(), i.Path()) + slog.Debug("copying %s to %s", i.Repo.LocalPath(), i.Path()) return fs.CopyDir(i.Repo.LocalPath(), i.Path()) } // Update updates a remote repository func (i *VCSInstaller) Update() error { - debug("updating %s", i.Repo.Remote()) + slog.Debug("updating %s", i.Repo.Remote()) if i.Repo.IsDirty() { return errors.New("plugin repo was modified") } @@ -128,7 +129,7 @@ func (i *VCSInstaller) solveVersion(repo vcs.Repo) (string, error) { if err != nil { return "", err } - debug("found refs: %s", refs) + slog.Debug("found refs: %s", refs) // Convert and filter the list to semver.Version instances semvers := getSemVers(refs) @@ -139,7 +140,7 @@ func (i *VCSInstaller) solveVersion(repo vcs.Repo) (string, error) { if constraint.Check(v) { // If the constraint passes get the original reference ver := v.Original() - debug("setting to %s", ver) + slog.Debug("setting to %s", ver) return ver, nil } } @@ -149,17 +150,17 @@ func (i *VCSInstaller) solveVersion(repo vcs.Repo) (string, error) { // setVersion attempts to checkout the version func (i *VCSInstaller) setVersion(repo vcs.Repo, ref string) error { - debug("setting version to %q", i.Version) + slog.Debug("setting version to %q", i.Version) return repo.UpdateVersion(ref) } // sync will clone or update a remote repo. func (i *VCSInstaller) sync(repo vcs.Repo) error { if _, err := os.Stat(repo.LocalPath()); os.IsNotExist(err) { - debug("cloning %s to %s", repo.Remote(), repo.LocalPath()) + slog.Debug("cloning %s to %s", repo.Remote(), repo.LocalPath()) return repo.Get() } - debug("updating %s", repo.Remote()) + slog.Debug("updating %s", repo.Remote()) return repo.Update() } diff --git a/pkg/release/util/manifest_sorter.go b/pkg/release/util/manifest_sorter.go index 15eb76174..8b5247cad 100644 --- a/pkg/release/util/manifest_sorter.go +++ b/pkg/release/util/manifest_sorter.go @@ -17,7 +17,7 @@ limitations under the License. package util import ( - "log" + "log/slog" "path" "sort" "strconv" @@ -196,7 +196,7 @@ func (file *manifestFile) sort(result *result) error { } if isUnknownHook { - log.Printf("info: skipping unknown hook: %q", hookTypes) + slog.Info("info: skipping unknown hook: %q", hookTypes) continue } diff --git a/pkg/repo/chartrepo.go b/pkg/repo/chartrepo.go index 52f81be57..070069748 100644 --- a/pkg/repo/chartrepo.go +++ b/pkg/repo/chartrepo.go @@ -22,7 +22,7 @@ import ( "encoding/json" "fmt" "io" - "log" + "log/slog" "net/url" "os" "path/filepath" @@ -343,7 +343,8 @@ func ResolveReferenceURL(baseURL, refURL string) (string, error) { func (e *Entry) String() string { buf, err := json.Marshal(e) if err != nil { - log.Panic(err) + slog.Error("failed to marshal entry: %s", err) + panic(err) } return string(buf) } From c36bc25fb16577559c6573633c630f1295040202 Mon Sep 17 00:00:00 2001 From: Robert Sirchia Date: Thu, 27 Feb 2025 14:48:29 -0500 Subject: [PATCH 081/192] fixing missing attributes Signed-off-by: Robert Sirchia --- pkg/chart/v2/util/dependencies.go | 4 ++-- pkg/engine/engine.go | 6 +++--- pkg/engine/lookup_func.go | 4 ++-- pkg/ignore/rules.go | 2 +- pkg/plugin/installer/vcs_installer.go | 10 +++++----- pkg/release/util/manifest_sorter.go | 2 +- pkg/repo/chartrepo.go | 2 +- 7 files changed, 15 insertions(+), 15 deletions(-) diff --git a/pkg/chart/v2/util/dependencies.go b/pkg/chart/v2/util/dependencies.go index 2a6912e84..8c64298c9 100644 --- a/pkg/chart/v2/util/dependencies.go +++ b/pkg/chart/v2/util/dependencies.go @@ -51,7 +51,7 @@ func processDependencyConditions(reqs []*chart.Dependency, cvals Values, cpath s slog.Warn("Warning: Condition path '%s' for chart %s returned non-bool value", c, r.Name) } else if _, ok := err.(ErrNoValue); !ok { // this is a real error - slog.Error("Warning: PathValue returned error %v", err) + slog.Error("Warning: PathValue returned error %v", slog.Any("err", err)) } } } @@ -271,7 +271,7 @@ func processImportValues(c *chart.Chart, merge bool) error { }) vm, err := cvals.Table(r.Name + "." + child) if err != nil { - slog.Error("Warning: ImportValues missing table: %v", err) + slog.Error("Warning: ImportValues missing table: %v", slog.Any("err", err)) continue } if merge { diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index 650b56a3a..157338bbd 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -203,7 +203,7 @@ func (e Engine) initFunMap(t *template.Template) { if val == nil { if e.LintMode { // Don't fail on missing required values when linting - slog.Warn("[INFO] Missing required value: %s", warn) + slog.Warn("[INFO] Missing required value: %s", "LintMode", warn) return "", nil } return val, errors.New(warnWrap(warn)) @@ -211,7 +211,7 @@ func (e Engine) initFunMap(t *template.Template) { if val == "" { if e.LintMode { // Don't fail on missing required values when linting - slog.Warn("[INFO] Missing required value: %s", warn) + slog.Warn("[INFO] Missing required value: %s", "LintMode", warn) return "", nil } return val, errors.New(warnWrap(warn)) @@ -224,7 +224,7 @@ func (e Engine) initFunMap(t *template.Template) { funcMap["fail"] = func(msg string) (string, error) { if e.LintMode { // Don't fail when linting - slog.Info("[INFO] Fail: %s", msg) + slog.Info("[INFO] Fail: %s", "LintMode", msg) return "", nil } return "", errors.New(warnWrap(msg)) diff --git a/pkg/engine/lookup_func.go b/pkg/engine/lookup_func.go index c7f7226f4..b8a0b8378 100644 --- a/pkg/engine/lookup_func.go +++ b/pkg/engine/lookup_func.go @@ -111,7 +111,7 @@ func getDynamicClientOnKind(apiversion string, kind string, config *rest.Config) } intf, err := dynamic.NewForConfig(config) if err != nil { - slog.Error("[ERROR] unable to get dynamic client %s", err) + slog.Error("[ERROR] unable to get dynamic client %s", slog.Any("err", err)) return nil, false, err } res := intf.Resource(gvr) @@ -122,7 +122,7 @@ func getAPIResourceForGVK(gvk schema.GroupVersionKind, config *rest.Config) (met res := metav1.APIResource{} discoveryClient, err := discovery.NewDiscoveryClientForConfig(config) if err != nil { - slog.Error("[ERROR] unable to create discovery client %s", err) + slog.Error("[ERROR] unable to create discovery client %s", slog.Any("err", err)) return res, err } resList, err := discoveryClient.ServerResourcesForGroupVersion(gvk.GroupVersion().String()) diff --git a/pkg/ignore/rules.go b/pkg/ignore/rules.go index e59e8dee5..6d146e719 100644 --- a/pkg/ignore/rules.go +++ b/pkg/ignore/rules.go @@ -102,7 +102,7 @@ func (r *Rules) Ignore(path string, fi os.FileInfo) bool { } for _, p := range r.patterns { if p.match == nil { - slog.Info("ignore: no matcher supplied for %q", p.raw) + slog.Info("ignore: no matcher supplied for %q", "patterns", p.raw) return false } diff --git a/pkg/plugin/installer/vcs_installer.go b/pkg/plugin/installer/vcs_installer.go index 41b47ed13..cb7f3fa09 100644 --- a/pkg/plugin/installer/vcs_installer.go +++ b/pkg/plugin/installer/vcs_installer.go @@ -95,7 +95,7 @@ func (i *VCSInstaller) Install() error { // Update updates a remote repository func (i *VCSInstaller) Update() error { - slog.Debug("updating %s", i.Repo.Remote()) + slog.Debug("updating %s", "repo", i.Repo.Remote()) if i.Repo.IsDirty() { return errors.New("plugin repo was modified") } @@ -129,7 +129,7 @@ func (i *VCSInstaller) solveVersion(repo vcs.Repo) (string, error) { if err != nil { return "", err } - slog.Debug("found refs: %s", refs) + slog.Debug("found refs: %s", "refs", refs) // Convert and filter the list to semver.Version instances semvers := getSemVers(refs) @@ -140,7 +140,7 @@ func (i *VCSInstaller) solveVersion(repo vcs.Repo) (string, error) { if constraint.Check(v) { // If the constraint passes get the original reference ver := v.Original() - slog.Debug("setting to %s", ver) + slog.Debug("setting to %s", "versions", ver) return ver, nil } } @@ -150,7 +150,7 @@ func (i *VCSInstaller) solveVersion(repo vcs.Repo) (string, error) { // setVersion attempts to checkout the version func (i *VCSInstaller) setVersion(repo vcs.Repo, ref string) error { - slog.Debug("setting version to %q", i.Version) + slog.Debug("setting version to %q", "versions", i.Version) return repo.UpdateVersion(ref) } @@ -160,7 +160,7 @@ func (i *VCSInstaller) sync(repo vcs.Repo) error { slog.Debug("cloning %s to %s", repo.Remote(), repo.LocalPath()) return repo.Get() } - slog.Debug("updating %s", repo.Remote()) + slog.Debug("updating %s", "remote", repo.Remote()) return repo.Update() } diff --git a/pkg/release/util/manifest_sorter.go b/pkg/release/util/manifest_sorter.go index 8b5247cad..e1cf9171a 100644 --- a/pkg/release/util/manifest_sorter.go +++ b/pkg/release/util/manifest_sorter.go @@ -196,7 +196,7 @@ func (file *manifestFile) sort(result *result) error { } if isUnknownHook { - slog.Info("info: skipping unknown hook: %q", hookTypes) + slog.Info("info: skipping unknown hook: %q", "hookTypes", hookTypes) continue } diff --git a/pkg/repo/chartrepo.go b/pkg/repo/chartrepo.go index 070069748..748730f27 100644 --- a/pkg/repo/chartrepo.go +++ b/pkg/repo/chartrepo.go @@ -343,7 +343,7 @@ func ResolveReferenceURL(baseURL, refURL string) (string, error) { func (e *Entry) String() string { buf, err := json.Marshal(e) if err != nil { - slog.Error("failed to marshal entry: %s", err) + slog.Error("failed to marshal entry: %s", slog.Any("err", err)) panic(err) } return string(buf) From 8887d017915507ae3a28d6cfff4244f3fae79c5d Mon Sep 17 00:00:00 2001 From: Robert Sirchia Date: Thu, 27 Feb 2025 15:47:28 -0500 Subject: [PATCH 082/192] fixing issues with my PR Signed-off-by: Robert Sirchia --- pkg/chart/v2/util/dependencies.go | 10 +++++----- pkg/engine/engine.go | 10 +++++----- pkg/engine/lookup_func.go | 10 +++++----- pkg/ignore/rules.go | 2 +- pkg/plugin/installer/vcs_installer.go | 16 ++++++++-------- pkg/release/util/manifest_sorter.go | 2 +- 6 files changed, 25 insertions(+), 25 deletions(-) diff --git a/pkg/chart/v2/util/dependencies.go b/pkg/chart/v2/util/dependencies.go index 8c64298c9..387d8b297 100644 --- a/pkg/chart/v2/util/dependencies.go +++ b/pkg/chart/v2/util/dependencies.go @@ -48,10 +48,10 @@ func processDependencyConditions(reqs []*chart.Dependency, cvals Values, cpath s r.Enabled = bv break } - slog.Warn("Warning: Condition path '%s' for chart %s returned non-bool value", c, r.Name) + slog.Warn("Condition path '%s' for chart %s returned non-bool value", c, r.Name) } else if _, ok := err.(ErrNoValue); !ok { // this is a real error - slog.Error("Warning: PathValue returned error %v", slog.Any("err", err)) + slog.Error("PathValue returned error %v", slog.Any("err", err)) } } } @@ -79,7 +79,7 @@ func processDependencyTags(reqs []*chart.Dependency, cvals Values) { hasFalse = true } } else { - slog.Warn("Warning: Tag '%s' for chart %s returned non-bool value", k, r.Name) + slog.Warn("Tag '%s' for chart %s returned non-bool value", k, r.Name) } } } @@ -254,7 +254,7 @@ func processImportValues(c *chart.Chart, merge bool) error { // get child table vv, err := cvals.Table(r.Name + "." + child) if err != nil { - slog.Error("Warning: ImportValues missing table from chart %s: %v", r.Name, err) + slog.Error("ImportValues missing table from chart %s: %v", r.Name, err) continue } // create value map from child to be merged into parent @@ -271,7 +271,7 @@ func processImportValues(c *chart.Chart, merge bool) error { }) vm, err := cvals.Table(r.Name + "." + child) if err != nil { - slog.Error("Warning: ImportValues missing table: %v", slog.Any("err", err)) + slog.Error("ImportValues missing table: %v", slog.Any("err", err)) continue } if merge { diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index 157338bbd..fa51f0923 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -176,12 +176,12 @@ func tplFun(parent *template.Template, includedNames map[string]int, strict bool // text string. (Maybe we could use a hash appended to the name?) t, err = t.New(parent.Name()).Parse(tpl) if err != nil { - return "", errors.Wrapf(err, "cannot parse template %q", tpl) + return "", errors.Wrapf(err, "Cannot parse template %q", tpl) } var buf strings.Builder if err := t.Execute(&buf, vals); err != nil { - return "", errors.Wrapf(err, "error during tpl function execution for %q", tpl) + return "", errors.Wrapf(err, "Error during tpl function execution for %q", tpl) } // See comment in renderWithReferences explaining the hack. @@ -203,7 +203,7 @@ func (e Engine) initFunMap(t *template.Template) { if val == nil { if e.LintMode { // Don't fail on missing required values when linting - slog.Warn("[INFO] Missing required value: %s", "LintMode", warn) + slog.Warn("Missing required value: %s", "LintMode", warn) return "", nil } return val, errors.New(warnWrap(warn)) @@ -211,7 +211,7 @@ func (e Engine) initFunMap(t *template.Template) { if val == "" { if e.LintMode { // Don't fail on missing required values when linting - slog.Warn("[INFO] Missing required value: %s", "LintMode", warn) + slog.Warn("Missing required value: %s", "LintMode", warn) return "", nil } return val, errors.New(warnWrap(warn)) @@ -224,7 +224,7 @@ func (e Engine) initFunMap(t *template.Template) { funcMap["fail"] = func(msg string) (string, error) { if e.LintMode { // Don't fail when linting - slog.Info("[INFO] Fail: %s", "LintMode", msg) + slog.Info("Fail: %s", "LintMode", msg) return "", nil } return "", errors.New(warnWrap(msg)) diff --git a/pkg/engine/lookup_func.go b/pkg/engine/lookup_func.go index b8a0b8378..47f7dd179 100644 --- a/pkg/engine/lookup_func.go +++ b/pkg/engine/lookup_func.go @@ -101,8 +101,8 @@ func getDynamicClientOnKind(apiversion string, kind string, config *rest.Config) gvk := schema.FromAPIVersionAndKind(apiversion, kind) apiRes, err := getAPIResourceForGVK(gvk, config) if err != nil { - slog.Error("[ERROR] unable to get apiresource from unstructured: %s , error %s", gvk.String(), err) - return nil, false, errors.Wrapf(err, "unable to get apiresource from unstructured: %s", gvk.String()) + slog.Error("Unable to get apiresource from unstructured: %s , error %s", gvk.String(), err) + return nil, false, errors.Wrapf(err, "Unable to get apiresource from unstructured: %s", gvk.String()) } gvr := schema.GroupVersionResource{ Group: apiRes.Group, @@ -111,7 +111,7 @@ func getDynamicClientOnKind(apiversion string, kind string, config *rest.Config) } intf, err := dynamic.NewForConfig(config) if err != nil { - slog.Error("[ERROR] unable to get dynamic client %s", slog.Any("err", err)) + slog.Error("Unable to get dynamic client %s", slog.Any("err", err)) return nil, false, err } res := intf.Resource(gvr) @@ -122,12 +122,12 @@ func getAPIResourceForGVK(gvk schema.GroupVersionKind, config *rest.Config) (met res := metav1.APIResource{} discoveryClient, err := discovery.NewDiscoveryClientForConfig(config) if err != nil { - slog.Error("[ERROR] unable to create discovery client %s", slog.Any("err", err)) + slog.Error("Unable to create discovery client %s", slog.Any("err", err)) return res, err } resList, err := discoveryClient.ServerResourcesForGroupVersion(gvk.GroupVersion().String()) if err != nil { - slog.Error("[ERROR] unable to retrieve resource list for: %s , error: %s", gvk.GroupVersion().String(), err) + slog.Error("Unable to retrieve resource list for: %s , error: %s", gvk.GroupVersion().String(), err) return res, err } for _, resource := range resList.APIResources { diff --git a/pkg/ignore/rules.go b/pkg/ignore/rules.go index 6d146e719..a343030ea 100644 --- a/pkg/ignore/rules.go +++ b/pkg/ignore/rules.go @@ -102,7 +102,7 @@ func (r *Rules) Ignore(path string, fi os.FileInfo) bool { } for _, p := range r.patterns { if p.match == nil { - slog.Info("ignore: no matcher supplied for %q", "patterns", p.raw) + slog.Info("This will be ignored no matcher supplied for %q", "patterns", p.raw) return false } diff --git a/pkg/plugin/installer/vcs_installer.go b/pkg/plugin/installer/vcs_installer.go index cb7f3fa09..97b2f1cd4 100644 --- a/pkg/plugin/installer/vcs_installer.go +++ b/pkg/plugin/installer/vcs_installer.go @@ -89,13 +89,13 @@ func (i *VCSInstaller) Install() error { return ErrMissingMetadata } - slog.Debug("copying %s to %s", i.Repo.LocalPath(), i.Path()) + slog.Debug("Copying %s to %s", i.Repo.LocalPath(), i.Path()) return fs.CopyDir(i.Repo.LocalPath(), i.Path()) } // Update updates a remote repository func (i *VCSInstaller) Update() error { - slog.Debug("updating %s", "repo", i.Repo.Remote()) + slog.Debug("Updating %s", "repo", i.Repo.Remote()) if i.Repo.IsDirty() { return errors.New("plugin repo was modified") } @@ -129,7 +129,7 @@ func (i *VCSInstaller) solveVersion(repo vcs.Repo) (string, error) { if err != nil { return "", err } - slog.Debug("found refs: %s", "refs", refs) + slog.Debug("Found refs: %s", "refs", refs) // Convert and filter the list to semver.Version instances semvers := getSemVers(refs) @@ -140,27 +140,27 @@ func (i *VCSInstaller) solveVersion(repo vcs.Repo) (string, error) { if constraint.Check(v) { // If the constraint passes get the original reference ver := v.Original() - slog.Debug("setting to %s", "versions", ver) + slog.Debug("Setting to %s", "versions", ver) return ver, nil } } - return "", errors.Errorf("requested version %q does not exist for plugin %q", i.Version, i.Repo.Remote()) + return "", errors.Errorf("Requested version %q does not exist for plugin %q", i.Version, i.Repo.Remote()) } // setVersion attempts to checkout the version func (i *VCSInstaller) setVersion(repo vcs.Repo, ref string) error { - slog.Debug("setting version to %q", "versions", i.Version) + slog.Debug("Setting version to %q", "versions", i.Version) return repo.UpdateVersion(ref) } // sync will clone or update a remote repo. func (i *VCSInstaller) sync(repo vcs.Repo) error { if _, err := os.Stat(repo.LocalPath()); os.IsNotExist(err) { - slog.Debug("cloning %s to %s", repo.Remote(), repo.LocalPath()) + slog.Debug("Cloning %s to %s", repo.Remote(), repo.LocalPath()) return repo.Get() } - slog.Debug("updating %s", "remote", repo.Remote()) + slog.Debug("Updating %s", "remote", repo.Remote()) return repo.Update() } diff --git a/pkg/release/util/manifest_sorter.go b/pkg/release/util/manifest_sorter.go index e1cf9171a..495b0ae0d 100644 --- a/pkg/release/util/manifest_sorter.go +++ b/pkg/release/util/manifest_sorter.go @@ -196,7 +196,7 @@ func (file *manifestFile) sort(result *result) error { } if isUnknownHook { - slog.Info("info: skipping unknown hook: %q", "hookTypes", hookTypes) + slog.Info("Skipping unknown hook: %q", "hookTypes", hookTypes) continue } From c2e6ed8ae5bcfa3ca1932bb4e0fb6a498b23283b Mon Sep 17 00:00:00 2001 From: Robert Sirchia Date: Fri, 28 Feb 2025 08:22:53 -0500 Subject: [PATCH 083/192] fixing build error Signed-off-by: Robert Sirchia --- pkg/chart/v2/util/dependencies.go | 10 +++++----- pkg/engine/engine.go | 6 +++--- pkg/engine/lookup_func.go | 10 +++++----- pkg/ignore/rules.go | 8 ++++---- pkg/plugin/installer/vcs_installer.go | 16 ++++++++-------- pkg/release/util/manifest_sorter.go | 2 +- 6 files changed, 26 insertions(+), 26 deletions(-) diff --git a/pkg/chart/v2/util/dependencies.go b/pkg/chart/v2/util/dependencies.go index 387d8b297..07d2ad055 100644 --- a/pkg/chart/v2/util/dependencies.go +++ b/pkg/chart/v2/util/dependencies.go @@ -48,10 +48,10 @@ func processDependencyConditions(reqs []*chart.Dependency, cvals Values, cpath s r.Enabled = bv break } - slog.Warn("Condition path '%s' for chart %s returned non-bool value", c, r.Name) + slog.Warn("condition path '%s' for chart %s returned non-bool value", c, r.Name) } else if _, ok := err.(ErrNoValue); !ok { // this is a real error - slog.Error("PathValue returned error %v", slog.Any("err", err)) + slog.Error("pathValue returned error %v", slog.Any("err", err)) } } } @@ -79,7 +79,7 @@ func processDependencyTags(reqs []*chart.Dependency, cvals Values) { hasFalse = true } } else { - slog.Warn("Tag '%s' for chart %s returned non-bool value", k, r.Name) + slog.Warn("tag '%s' for chart %s returned non-bool value", k, r.Name) } } } @@ -254,7 +254,7 @@ func processImportValues(c *chart.Chart, merge bool) error { // get child table vv, err := cvals.Table(r.Name + "." + child) if err != nil { - slog.Error("ImportValues missing table from chart %s: %v", r.Name, err) + slog.Error("importValues missing table from chart %s: %v", r.Name, err) continue } // create value map from child to be merged into parent @@ -271,7 +271,7 @@ func processImportValues(c *chart.Chart, merge bool) error { }) vm, err := cvals.Table(r.Name + "." + child) if err != nil { - slog.Error("ImportValues missing table: %v", slog.Any("err", err)) + slog.Error("importValues missing table: %v", slog.Any("err", err)) continue } if merge { diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index fa51f0923..4da458e73 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -203,7 +203,7 @@ func (e Engine) initFunMap(t *template.Template) { if val == nil { if e.LintMode { // Don't fail on missing required values when linting - slog.Warn("Missing required value: %s", "LintMode", warn) + slog.Warn("missing required value: %s", "LintMode", warn) return "", nil } return val, errors.New(warnWrap(warn)) @@ -211,7 +211,7 @@ func (e Engine) initFunMap(t *template.Template) { if val == "" { if e.LintMode { // Don't fail on missing required values when linting - slog.Warn("Missing required value: %s", "LintMode", warn) + slog.Warn("missing required value: %s", "LintMode", warn) return "", nil } return val, errors.New(warnWrap(warn)) @@ -224,7 +224,7 @@ func (e Engine) initFunMap(t *template.Template) { funcMap["fail"] = func(msg string) (string, error) { if e.LintMode { // Don't fail when linting - slog.Info("Fail: %s", "LintMode", msg) + slog.Info("fail: %s", "lintMode", msg) return "", nil } return "", errors.New(warnWrap(msg)) diff --git a/pkg/engine/lookup_func.go b/pkg/engine/lookup_func.go index 47f7dd179..9043b519b 100644 --- a/pkg/engine/lookup_func.go +++ b/pkg/engine/lookup_func.go @@ -101,8 +101,8 @@ func getDynamicClientOnKind(apiversion string, kind string, config *rest.Config) gvk := schema.FromAPIVersionAndKind(apiversion, kind) apiRes, err := getAPIResourceForGVK(gvk, config) if err != nil { - slog.Error("Unable to get apiresource from unstructured: %s , error %s", gvk.String(), err) - return nil, false, errors.Wrapf(err, "Unable to get apiresource from unstructured: %s", gvk.String()) + slog.Error("unable to get apiresource from unstructured: %s , error %s", gvk.String(), err) + return nil, false, errors.Wrapf(err, "unable to get apiresource from unstructured: %s", gvk.String()) } gvr := schema.GroupVersionResource{ Group: apiRes.Group, @@ -111,7 +111,7 @@ func getDynamicClientOnKind(apiversion string, kind string, config *rest.Config) } intf, err := dynamic.NewForConfig(config) if err != nil { - slog.Error("Unable to get dynamic client %s", slog.Any("err", err)) + slog.Error("unable to get dynamic client %s", slog.Any("err", err)) return nil, false, err } res := intf.Resource(gvr) @@ -122,12 +122,12 @@ func getAPIResourceForGVK(gvk schema.GroupVersionKind, config *rest.Config) (met res := metav1.APIResource{} discoveryClient, err := discovery.NewDiscoveryClientForConfig(config) if err != nil { - slog.Error("Unable to create discovery client %s", slog.Any("err", err)) + slog.Error("unable to create discovery client %s", slog.Any("err", err)) return res, err } resList, err := discoveryClient.ServerResourcesForGroupVersion(gvk.GroupVersion().String()) if err != nil { - slog.Error("Unable to retrieve resource list for: %s , error: %s", gvk.GroupVersion().String(), err) + slog.Error("unable to retrieve resource list for: %s , error: %s", gvk.GroupVersion().String(), err) return res, err } for _, resource := range resList.APIResources { diff --git a/pkg/ignore/rules.go b/pkg/ignore/rules.go index a343030ea..25a9c6715 100644 --- a/pkg/ignore/rules.go +++ b/pkg/ignore/rules.go @@ -102,7 +102,7 @@ func (r *Rules) Ignore(path string, fi os.FileInfo) bool { } for _, p := range r.patterns { if p.match == nil { - slog.Info("This will be ignored no matcher supplied for %q", "patterns", p.raw) + slog.Info("this will be ignored no matcher supplied for %q", "patterns", p.raw) return false } @@ -177,7 +177,7 @@ func (r *Rules) parseRule(rule string) error { rule = strings.TrimPrefix(rule, "/") ok, err := filepath.Match(rule, n) if err != nil { - slog.Error("Failed to compile %q: %s", rule, err) + slog.Error("failed to compile %q: %s", rule, err) return false } return ok @@ -187,7 +187,7 @@ func (r *Rules) parseRule(rule string) error { p.match = func(n string, _ os.FileInfo) bool { ok, err := filepath.Match(rule, n) if err != nil { - slog.Error("Failed to compile %q: %s", rule, err) + slog.Error("failed to compile %q: %s", rule, err) return false } return ok @@ -199,7 +199,7 @@ func (r *Rules) parseRule(rule string) error { n = filepath.Base(n) ok, err := filepath.Match(rule, n) if err != nil { - slog.Error("Failed to compile %q: %s", rule, err) + slog.Error("failed to compile %q: %s", rule, err) return false } return ok diff --git a/pkg/plugin/installer/vcs_installer.go b/pkg/plugin/installer/vcs_installer.go index 97b2f1cd4..cb7f3fa09 100644 --- a/pkg/plugin/installer/vcs_installer.go +++ b/pkg/plugin/installer/vcs_installer.go @@ -89,13 +89,13 @@ func (i *VCSInstaller) Install() error { return ErrMissingMetadata } - slog.Debug("Copying %s to %s", i.Repo.LocalPath(), i.Path()) + slog.Debug("copying %s to %s", i.Repo.LocalPath(), i.Path()) return fs.CopyDir(i.Repo.LocalPath(), i.Path()) } // Update updates a remote repository func (i *VCSInstaller) Update() error { - slog.Debug("Updating %s", "repo", i.Repo.Remote()) + slog.Debug("updating %s", "repo", i.Repo.Remote()) if i.Repo.IsDirty() { return errors.New("plugin repo was modified") } @@ -129,7 +129,7 @@ func (i *VCSInstaller) solveVersion(repo vcs.Repo) (string, error) { if err != nil { return "", err } - slog.Debug("Found refs: %s", "refs", refs) + slog.Debug("found refs: %s", "refs", refs) // Convert and filter the list to semver.Version instances semvers := getSemVers(refs) @@ -140,27 +140,27 @@ func (i *VCSInstaller) solveVersion(repo vcs.Repo) (string, error) { if constraint.Check(v) { // If the constraint passes get the original reference ver := v.Original() - slog.Debug("Setting to %s", "versions", ver) + slog.Debug("setting to %s", "versions", ver) return ver, nil } } - return "", errors.Errorf("Requested version %q does not exist for plugin %q", i.Version, i.Repo.Remote()) + return "", errors.Errorf("requested version %q does not exist for plugin %q", i.Version, i.Repo.Remote()) } // setVersion attempts to checkout the version func (i *VCSInstaller) setVersion(repo vcs.Repo, ref string) error { - slog.Debug("Setting version to %q", "versions", i.Version) + slog.Debug("setting version to %q", "versions", i.Version) return repo.UpdateVersion(ref) } // sync will clone or update a remote repo. func (i *VCSInstaller) sync(repo vcs.Repo) error { if _, err := os.Stat(repo.LocalPath()); os.IsNotExist(err) { - slog.Debug("Cloning %s to %s", repo.Remote(), repo.LocalPath()) + slog.Debug("cloning %s to %s", repo.Remote(), repo.LocalPath()) return repo.Get() } - slog.Debug("Updating %s", "remote", repo.Remote()) + slog.Debug("updating %s", "remote", repo.Remote()) return repo.Update() } diff --git a/pkg/release/util/manifest_sorter.go b/pkg/release/util/manifest_sorter.go index 495b0ae0d..a0107c8ee 100644 --- a/pkg/release/util/manifest_sorter.go +++ b/pkg/release/util/manifest_sorter.go @@ -196,7 +196,7 @@ func (file *manifestFile) sort(result *result) error { } if isUnknownHook { - slog.Info("Skipping unknown hook: %q", "hookTypes", hookTypes) + slog.Info("skipping unknown hook: %q", "hookTypes", hookTypes) continue } From 848c134e0c0b8d47285a61c5c923e7c449e1a67c Mon Sep 17 00:00:00 2001 From: Robert Sirchia Date: Fri, 28 Feb 2025 16:14:48 -0500 Subject: [PATCH 084/192] fixing error messages Signed-off-by: Robert Sirchia --- internal/sympath/walk.go | 2 +- pkg/chart/v2/util/dependencies.go | 10 +++++----- pkg/engine/engine.go | 6 +++--- pkg/engine/lookup_func.go | 8 ++++---- pkg/ignore/rules.go | 8 ++++---- pkg/plugin/installer/http_installer.go | 2 +- pkg/plugin/installer/local_installer.go | 2 +- pkg/plugin/installer/vcs_installer.go | 14 +++++++------- pkg/release/util/manifest_sorter.go | 2 +- pkg/repo/chartrepo.go | 2 +- 10 files changed, 28 insertions(+), 28 deletions(-) diff --git a/internal/sympath/walk.go b/internal/sympath/walk.go index 938a99bfe..0cd258d39 100644 --- a/internal/sympath/walk.go +++ b/internal/sympath/walk.go @@ -72,7 +72,7 @@ func symwalk(path string, info os.FileInfo, walkFn filepath.WalkFunc) error { return errors.Wrapf(err, "error evaluating symlink %s", path) } //This log message is to highlight a symlink that is being used within a chart, symlinks can be used for nefarious reasons. - slog.Info("found symbolic link in path: %s resolves to %s. Contents of linked file included and used", path, resolved) + slog.Info("found symbolic link in path. Contents of linked file included and used", "path", path, "resolved", resolved) if info, err = os.Lstat(resolved); err != nil { return err } diff --git a/pkg/chart/v2/util/dependencies.go b/pkg/chart/v2/util/dependencies.go index 07d2ad055..80268b45c 100644 --- a/pkg/chart/v2/util/dependencies.go +++ b/pkg/chart/v2/util/dependencies.go @@ -48,10 +48,10 @@ func processDependencyConditions(reqs []*chart.Dependency, cvals Values, cpath s r.Enabled = bv break } - slog.Warn("condition path '%s' for chart %s returned non-bool value", c, r.Name) + slog.Warn("returned non-bool value", "path", c, "chart", r.Name) } else if _, ok := err.(ErrNoValue); !ok { // this is a real error - slog.Error("pathValue returned error %v", slog.Any("err", err)) + slog.Error("pathValue returned error", slog.Any("err", err)) } } } @@ -79,7 +79,7 @@ func processDependencyTags(reqs []*chart.Dependency, cvals Values) { hasFalse = true } } else { - slog.Warn("tag '%s' for chart %s returned non-bool value", k, r.Name) + slog.Warn("returned non-bool value", "tag", k, "chart", r.Name) } } } @@ -254,7 +254,7 @@ func processImportValues(c *chart.Chart, merge bool) error { // get child table vv, err := cvals.Table(r.Name + "." + child) if err != nil { - slog.Error("importValues missing table from chart %s: %v", r.Name, err) + slog.Error("importValues missing table from chart", "chart", r.Name, "value", err) continue } // create value map from child to be merged into parent @@ -271,7 +271,7 @@ func processImportValues(c *chart.Chart, merge bool) error { }) vm, err := cvals.Table(r.Name + "." + child) if err != nil { - slog.Error("importValues missing table: %v", slog.Any("err", err)) + slog.Error("importValues missing table", slog.Any("err", err)) continue } if merge { diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index 4da458e73..d47606ee6 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -203,7 +203,7 @@ func (e Engine) initFunMap(t *template.Template) { if val == nil { if e.LintMode { // Don't fail on missing required values when linting - slog.Warn("missing required value: %s", "LintMode", warn) + slog.Warn("missing required value", "value", warn) return "", nil } return val, errors.New(warnWrap(warn)) @@ -211,7 +211,7 @@ func (e Engine) initFunMap(t *template.Template) { if val == "" { if e.LintMode { // Don't fail on missing required values when linting - slog.Warn("missing required value: %s", "LintMode", warn) + slog.Warn("missing required values", "value", warn) return "", nil } return val, errors.New(warnWrap(warn)) @@ -224,7 +224,7 @@ func (e Engine) initFunMap(t *template.Template) { funcMap["fail"] = func(msg string) (string, error) { if e.LintMode { // Don't fail when linting - slog.Info("fail: %s", "lintMode", msg) + slog.Info("funcMap fail", "lintMode", msg) return "", nil } return "", errors.New(warnWrap(msg)) diff --git a/pkg/engine/lookup_func.go b/pkg/engine/lookup_func.go index 9043b519b..b36e6a7ef 100644 --- a/pkg/engine/lookup_func.go +++ b/pkg/engine/lookup_func.go @@ -101,7 +101,7 @@ func getDynamicClientOnKind(apiversion string, kind string, config *rest.Config) gvk := schema.FromAPIVersionAndKind(apiversion, kind) apiRes, err := getAPIResourceForGVK(gvk, config) if err != nil { - slog.Error("unable to get apiresource from unstructured: %s , error %s", gvk.String(), err) + slog.Error("unable to get apiresource", "groupVersionKind", gvk.String(), "error", err) return nil, false, errors.Wrapf(err, "unable to get apiresource from unstructured: %s", gvk.String()) } gvr := schema.GroupVersionResource{ @@ -111,7 +111,7 @@ func getDynamicClientOnKind(apiversion string, kind string, config *rest.Config) } intf, err := dynamic.NewForConfig(config) if err != nil { - slog.Error("unable to get dynamic client %s", slog.Any("err", err)) + slog.Error("unable to get dynamic client", slog.Any("err", err)) return nil, false, err } res := intf.Resource(gvr) @@ -122,12 +122,12 @@ func getAPIResourceForGVK(gvk schema.GroupVersionKind, config *rest.Config) (met res := metav1.APIResource{} discoveryClient, err := discovery.NewDiscoveryClientForConfig(config) if err != nil { - slog.Error("unable to create discovery client %s", slog.Any("err", err)) + slog.Error("unable to create discovery client", slog.Any("err", err)) return res, err } resList, err := discoveryClient.ServerResourcesForGroupVersion(gvk.GroupVersion().String()) if err != nil { - slog.Error("unable to retrieve resource list for: %s , error: %s", gvk.GroupVersion().String(), err) + slog.Error("unable to retrieve resource list", "list", gvk.GroupVersion().String(), "error", err) return res, err } for _, resource := range resList.APIResources { diff --git a/pkg/ignore/rules.go b/pkg/ignore/rules.go index 25a9c6715..3f672873c 100644 --- a/pkg/ignore/rules.go +++ b/pkg/ignore/rules.go @@ -102,7 +102,7 @@ func (r *Rules) Ignore(path string, fi os.FileInfo) bool { } for _, p := range r.patterns { if p.match == nil { - slog.Info("this will be ignored no matcher supplied for %q", "patterns", p.raw) + slog.Info("this will be ignored no matcher supplied", "patterns", p.raw) return false } @@ -177,7 +177,7 @@ func (r *Rules) parseRule(rule string) error { rule = strings.TrimPrefix(rule, "/") ok, err := filepath.Match(rule, n) if err != nil { - slog.Error("failed to compile %q: %s", rule, err) + slog.Error("failed to compile", "rule", rule, "error", err) return false } return ok @@ -187,7 +187,7 @@ func (r *Rules) parseRule(rule string) error { p.match = func(n string, _ os.FileInfo) bool { ok, err := filepath.Match(rule, n) if err != nil { - slog.Error("failed to compile %q: %s", rule, err) + slog.Error("failed to compile", "rule", rule, "error", err) return false } return ok @@ -199,7 +199,7 @@ func (r *Rules) parseRule(rule string) error { n = filepath.Base(n) ok, err := filepath.Match(rule, n) if err != nil { - slog.Error("failed to compile %q: %s", rule, err) + slog.Error("failed to compile", "rule", rule, "error", err) return false } return ok diff --git a/pkg/plugin/installer/http_installer.go b/pkg/plugin/installer/http_installer.go index 7e457b0d0..cc45787bf 100644 --- a/pkg/plugin/installer/http_installer.go +++ b/pkg/plugin/installer/http_installer.go @@ -145,7 +145,7 @@ func (i *HTTPInstaller) Install() error { return err } - slog.Debug("copying %s to %s", src, i.Path()) + slog.Debug("copying", "source", src, "path", i.Path()) return fs.CopyDir(src, i.Path()) } diff --git a/pkg/plugin/installer/local_installer.go b/pkg/plugin/installer/local_installer.go index 4c95134ca..52636d019 100644 --- a/pkg/plugin/installer/local_installer.go +++ b/pkg/plugin/installer/local_installer.go @@ -58,7 +58,7 @@ func (i *LocalInstaller) Install() error { if !isPlugin(i.Source) { return ErrMissingMetadata } - slog.Debug("symlinking %s to %s", i.Source, i.Path()) + slog.Debug("symlinking", "source", i.Source, "path", i.Path()) return os.Symlink(i.Source, i.Path()) } diff --git a/pkg/plugin/installer/vcs_installer.go b/pkg/plugin/installer/vcs_installer.go index cb7f3fa09..049775094 100644 --- a/pkg/plugin/installer/vcs_installer.go +++ b/pkg/plugin/installer/vcs_installer.go @@ -89,13 +89,13 @@ func (i *VCSInstaller) Install() error { return ErrMissingMetadata } - slog.Debug("copying %s to %s", i.Repo.LocalPath(), i.Path()) + slog.Debug("copying files", "source", i.Repo.LocalPath(), "destination", i.Path()) return fs.CopyDir(i.Repo.LocalPath(), i.Path()) } // Update updates a remote repository func (i *VCSInstaller) Update() error { - slog.Debug("updating %s", "repo", i.Repo.Remote()) + slog.Debug("updating", "repo", i.Repo.Remote()) if i.Repo.IsDirty() { return errors.New("plugin repo was modified") } @@ -129,7 +129,7 @@ func (i *VCSInstaller) solveVersion(repo vcs.Repo) (string, error) { if err != nil { return "", err } - slog.Debug("found refs: %s", "refs", refs) + slog.Debug("found refs", "refs", refs) // Convert and filter the list to semver.Version instances semvers := getSemVers(refs) @@ -140,7 +140,7 @@ func (i *VCSInstaller) solveVersion(repo vcs.Repo) (string, error) { if constraint.Check(v) { // If the constraint passes get the original reference ver := v.Original() - slog.Debug("setting to %s", "versions", ver) + slog.Debug("setting to version", "version", ver) return ver, nil } } @@ -150,17 +150,17 @@ func (i *VCSInstaller) solveVersion(repo vcs.Repo) (string, error) { // setVersion attempts to checkout the version func (i *VCSInstaller) setVersion(repo vcs.Repo, ref string) error { - slog.Debug("setting version to %q", "versions", i.Version) + slog.Debug("setting version", "version", i.Version) return repo.UpdateVersion(ref) } // sync will clone or update a remote repo. func (i *VCSInstaller) sync(repo vcs.Repo) error { if _, err := os.Stat(repo.LocalPath()); os.IsNotExist(err) { - slog.Debug("cloning %s to %s", repo.Remote(), repo.LocalPath()) + slog.Debug("cloning", "source", repo.Remote(), "destination", repo.LocalPath()) return repo.Get() } - slog.Debug("updating %s", "remote", repo.Remote()) + slog.Debug("updating", "remote", repo.Remote()) return repo.Update() } diff --git a/pkg/release/util/manifest_sorter.go b/pkg/release/util/manifest_sorter.go index a0107c8ee..df3bd71d7 100644 --- a/pkg/release/util/manifest_sorter.go +++ b/pkg/release/util/manifest_sorter.go @@ -196,7 +196,7 @@ func (file *manifestFile) sort(result *result) error { } if isUnknownHook { - slog.Info("skipping unknown hook: %q", "hookTypes", hookTypes) + slog.Info("skipping unknown hooks", "hookTypes", hookTypes) continue } diff --git a/pkg/repo/chartrepo.go b/pkg/repo/chartrepo.go index 748730f27..766e31a61 100644 --- a/pkg/repo/chartrepo.go +++ b/pkg/repo/chartrepo.go @@ -343,7 +343,7 @@ func ResolveReferenceURL(baseURL, refURL string) (string, error) { func (e *Entry) String() string { buf, err := json.Marshal(e) if err != nil { - slog.Error("failed to marshal entry: %s", slog.Any("err", err)) + slog.Error("failed to marshal entry", slog.Any("err", err)) panic(err) } return string(buf) From 5ecca2ed143187ba4c8e3dde44c9ec5f1627ce6b Mon Sep 17 00:00:00 2001 From: Austin Abro <37223396+AustinAbro321@users.noreply.github.com> Date: Mon, 3 Mar 2025 11:53:00 -0500 Subject: [PATCH 085/192] Apply suggestions from code review Co-authored-by: Scott Rigby Signed-off-by: Austin Abro <37223396+AustinAbro321@users.noreply.github.com> --- internal/statusreaders/job_status_reader.go | 5 +++-- internal/statusreaders/job_status_reader_test.go | 4 +++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/internal/statusreaders/job_status_reader.go b/internal/statusreaders/job_status_reader.go index d493d9e13..e11843f6d 100644 --- a/internal/statusreaders/job_status_reader.go +++ b/internal/statusreaders/job_status_reader.go @@ -1,5 +1,8 @@ /* Copyright The Helm Authors. +This file was initially copied and modified from + https://github.com/fluxcd/kustomize-controller/blob/main/internal/statusreaders/job.go +Copyright 2022 The Flux authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,8 +19,6 @@ limitations under the License. package statusreaders -// This file was copied and modified from https://github.com/fluxcd/kustomize-controller/blob/main/internal/statusreaders/job.go - import ( "context" "fmt" diff --git a/internal/statusreaders/job_status_reader_test.go b/internal/statusreaders/job_status_reader_test.go index 70e4ee29a..5f07be91c 100644 --- a/internal/statusreaders/job_status_reader_test.go +++ b/internal/statusreaders/job_status_reader_test.go @@ -1,5 +1,8 @@ /* Copyright The Helm Authors. +This file was initially copied and modified from + https://github.com/fluxcd/kustomize-controller/blob/main/internal/statusreaders/job_test.go +Copyright 2022 The Flux authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,7 +19,6 @@ limitations under the License. package statusreaders -// This file was copied and modified from https://github.com/fluxcd/kustomize-controller/blob/main/internal/statusreaders/job.go import ( "testing" From 68f72e5c3fb9c2bc6486a29a6eb5522a05ea8f81 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 3 Mar 2025 19:56:00 +0000 Subject: [PATCH 086/192] hook only strategy when wait=false Signed-off-by: Austin Abro --- pkg/cmd/flags.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cmd/flags.go b/pkg/cmd/flags.go index 10c7e9714..044b19e04 100644 --- a/pkg/cmd/flags.go +++ b/pkg/cmd/flags.go @@ -85,7 +85,7 @@ func (ws *waitValue) Set(s string) error { *ws = waitValue(kube.StatusWatcherStrategy) return nil case "false": - *ws = "" + *ws = waitValue(kube.HookOnlyStrategy) return nil default: return fmt.Errorf("invalid wait input %q. Valid inputs are true, false, %s, and %s", s, kube.StatusWatcherStrategy, kube.LegacyStrategy) From 8d964588cd3b54b470510ee9663eedba25c6186b Mon Sep 17 00:00:00 2001 From: Austin Abro <37223396+AustinAbro321@users.noreply.github.com> Date: Tue, 4 Mar 2025 17:47:25 -0500 Subject: [PATCH 087/192] Update internal/statusreaders/job_status_reader.go Co-authored-by: Scott Rigby Signed-off-by: Austin Abro <37223396+AustinAbro321@users.noreply.github.com> --- internal/statusreaders/job_status_reader.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/internal/statusreaders/job_status_reader.go b/internal/statusreaders/job_status_reader.go index e11843f6d..3cd9ac7ac 100644 --- a/internal/statusreaders/job_status_reader.go +++ b/internal/statusreaders/job_status_reader.go @@ -29,11 +29,11 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling/engine" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling/event" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling/statusreaders" - "sigs.k8s.io/cli-utils/pkg/kstatus/status" - "sigs.k8s.io/cli-utils/pkg/object" + "github.com/fluxcd/cli-utils/pkg/kstatus/polling/engine" + "github.com/fluxcd/cli-utils/pkg/kstatus/polling/event" + "github.com/fluxcd/cli-utils/pkg/kstatus/polling/statusreaders" + "github.com/fluxcd/cli-utils/pkg/kstatus/status" + "github.com/fluxcd/cli-utils/pkg/object" ) type customJobStatusReader struct { From 24dc64382292cbc3ad30743f6d2c63fbfcd810d5 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 4 Mar 2025 22:56:11 +0000 Subject: [PATCH 088/192] restmapper Signed-off-by: Austin Abro --- go.mod | 49 ++++---- go.sum | 117 ++++++++---------- .../statusreaders/job_status_reader_test.go | 2 +- internal/statusreaders/pod_status_reader.go | 10 +- .../statusreaders/pod_status_reader_test.go | 2 +- pkg/kube/statuswait.go | 16 +-- pkg/kube/statuswait_test.go | 2 +- 7 files changed, 95 insertions(+), 103 deletions(-) diff --git a/go.mod b/go.mod index 1d318ea25..3e4c81cdc 100644 --- a/go.mod +++ b/go.mod @@ -15,6 +15,7 @@ require ( github.com/cyphar/filepath-securejoin v0.4.1 github.com/distribution/distribution/v3 v3.0.0-rc.3 github.com/evanphx/json-patch v5.9.11+incompatible + github.com/fluxcd/cli-utils v0.36.0-flux.12 github.com/foxcpp/go-mockdns v1.1.0 github.com/gobwas/glob v0.2.3 github.com/gofrs/flock v0.12.1 @@ -46,7 +47,6 @@ require ( k8s.io/klog/v2 v2.130.1 k8s.io/kubectl v0.32.2 oras.land/oras-go/v2 v2.5.0 - sigs.k8s.io/cli-utils v0.37.2 sigs.k8s.io/controller-runtime v0.20.1 sigs.k8s.io/yaml v1.4.0 ) @@ -73,30 +73,30 @@ require ( github.com/docker/docker-credential-helpers v0.8.2 // indirect github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect github.com/docker/go-metrics v0.0.1 // indirect - github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/evanphx/json-patch/v5 v5.9.0 // indirect + github.com/emicklei/go-restful/v3 v3.12.1 // indirect + github.com/evanphx/json-patch/v5 v5.9.11 // indirect github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect github.com/fatih/color v1.13.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect - github.com/go-errors/errors v1.4.2 // indirect + github.com/go-errors/errors v1.5.1 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.3 // indirect - github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/gnostic-models v0.6.9 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/handlers v1.5.2 // indirect github.com/gorilla/mux v1.8.1 // indirect - github.com/gorilla/websocket v1.5.0 // indirect + github.com/gorilla/websocket v1.5.3 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect @@ -110,7 +110,7 @@ require ( github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect - github.com/mailru/easyjson v0.7.7 // indirect + github.com/mailru/easyjson v0.9.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.17 // indirect github.com/mattn/go-runewidth v0.0.9 // indirect @@ -123,13 +123,13 @@ require ( github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect - github.com/onsi/gomega v1.35.1 // indirect + github.com/onsi/gomega v1.36.2 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.60.1 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 // indirect github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 // indirect @@ -142,10 +142,11 @@ require ( github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xlab/treeprint v1.2.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 // indirect go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 // indirect - go.opentelemetry.io/otel v1.32.0 // indirect + go.opentelemetry.io/otel v1.34.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 // indirect @@ -158,31 +159,31 @@ require ( go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 // indirect go.opentelemetry.io/otel/log v0.8.0 // indirect - go.opentelemetry.io/otel/metric v1.32.0 // indirect + go.opentelemetry.io/otel/metric v1.34.0 // indirect go.opentelemetry.io/otel/sdk v1.32.0 // indirect go.opentelemetry.io/otel/sdk/log v0.8.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.32.0 // indirect - go.opentelemetry.io/otel/trace v1.32.0 // indirect + go.opentelemetry.io/otel/trace v1.34.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect - golang.org/x/mod v0.21.0 // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/oauth2 v0.23.0 // indirect + golang.org/x/mod v0.22.0 // indirect + golang.org/x/net v0.34.0 // indirect + golang.org/x/oauth2 v0.25.0 // indirect golang.org/x/sync v0.11.0 // indirect golang.org/x/sys v0.30.0 // indirect - golang.org/x/time v0.7.0 // indirect - golang.org/x/tools v0.26.0 // indirect + golang.org/x/time v0.9.0 // indirect + golang.org/x/tools v0.29.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect google.golang.org/grpc v1.68.0 // indirect - google.golang.org/protobuf v1.35.2 // indirect + google.golang.org/protobuf v1.36.4 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect k8s.io/component-base v0.32.2 // indirect - k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect - k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect - sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect + k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 // indirect + k8s.io/utils v0.0.0-20241210054802-24370beab758 // indirect + sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect sigs.k8s.io/kustomize/api v0.18.0 // indirect - sigs.k8s.io/kustomize/kyaml v0.18.1 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect + sigs.k8s.io/kustomize/kyaml v0.19.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.5.0 // indirect ) diff --git a/go.sum b/go.sum index 57fbc6117..9fbf29bfc 100644 --- a/go.sum +++ b/go.sum @@ -60,7 +60,6 @@ github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8 github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= @@ -81,26 +80,28 @@ github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= -github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= -github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= +github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8= github.com/evanphx/json-patch v5.9.11+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= -github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= +github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fluxcd/cli-utils v0.36.0-flux.12 h1:8cD6SmaKa/lGo0KCu0XWiGrXJMLMBQwSsnoP0cG+Gjw= +github.com/fluxcd/cli-utils v0.36.0-flux.12/go.mod h1:Nb/zMqsJAzjz4/HIsEc2LTqxC6eC0rV26t4hkJT/F9o= github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI= github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= -github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= +github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= @@ -113,12 +114,10 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= -github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= @@ -141,8 +140,8 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= -github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= +github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= @@ -150,8 +149,8 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= -github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20250128161936-077ca0a936bf h1:BvBLUD2hkvLI3dJTJMiopAq8/wp43AAZKTP7qdpptbU= +github.com/google/pprof v0.0.0-20250128161936-077ca0a936bf/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -160,8 +159,8 @@ github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyE github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= -github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= @@ -197,11 +196,8 @@ github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IX github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= @@ -214,8 +210,8 @@ github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= @@ -257,10 +253,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= -github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= -github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU= +github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk= +github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= +github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= @@ -288,8 +284,8 @@ github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= @@ -323,17 +319,12 @@ github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= @@ -350,14 +341,16 @@ github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 h1:UW0+QyeyBVhn+COBec3nGhfnFe5lwB0ic1JBVjzhk0w= go.opentelemetry.io/contrib/bridges/prometheus v0.57.0/go.mod h1:ppciCHRLsyCio54qbzQv0E4Jyth/fLWDTJYfvWpcSVk= go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQa2NQ/Aoi7zA+wy7vMOKD9H4= go.opentelemetry.io/contrib/exporters/autoexport v0.57.0/go.mod h1:EJBheUMttD/lABFyLXhce47Wr6DPWYReCzaZiXadH7g= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 h1:DheMAlT6POBP+gh8RUH19EOTnQIor5QE0uSRPtzCpSw= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0/go.mod h1:wZcGmeVO9nzP67aYSLDqXNWK87EZWhi7JWj1v7ZXf94= -go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= -go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= +go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 h1:WzNab7hOOLzdDF/EoWCt4glhrbMPVMOO5JYTmpz36Ls= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0/go.mod h1:hKvJwTzJdp90Vh7p6q/9PAOd55dI6WA6sWj62a/JvSs= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 h1:S+LdBGiQXtJdowoJoQPEtI52syEP/JYBUpjO49EQhV8= @@ -382,16 +375,16 @@ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 h1:cC2yDI3IQd0Udsu go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0/go.mod h1:2PD5Ex6z8CFzDbTdOlwyNIUywRr1DN0ospafJM1wJ+s= go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk= go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8= -go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= -go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs= go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo= go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= -go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= -go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= +go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= +go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -416,8 +409,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91 golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= -golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -431,10 +424,10 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= +golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= +golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -488,8 +481,8 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -498,8 +491,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk= -golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= -golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= +golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE= +golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -510,8 +503,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0= google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.4 h1:6A3ZDJHn/eNqc1i+IdefRzy/9PokBTPvcqMySR7NNIM= +google.golang.org/protobuf v1.36.4/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= @@ -542,25 +535,23 @@ k8s.io/component-base v0.32.2 h1:1aUL5Vdmu7qNo4ZsE+569PV5zFatM9hl+lb3dEea2zU= k8s.io/component-base v0.32.2/go.mod h1:PXJ61Vx9Lg+P5mS8TLd7bCIr+eMJRQTyXe8KvkrvJq0= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= +k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 h1:hcha5B1kVACrLujCKLbr8XWMxCxzQx42DY8QKYJrDLg= +k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7/go.mod h1:GewRfANuJ70iYzvn+i4lezLDAFzvjxZYK1gn1lWcfas= k8s.io/kubectl v0.32.2 h1:TAkag6+XfSBgkqK9I7ZvwtF0WVtUAvK8ZqTt+5zi1Us= k8s.io/kubectl v0.32.2/go.mod h1:+h/NQFSPxiDZYX/WZaWw9fwYezGLISP0ud8nQKg+3g8= -k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= -k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0= +k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go/v2 v2.5.0 h1:o8Me9kLY74Vp5uw07QXPiitjsw7qNXi8Twd+19Zf02c= oras.land/oras-go/v2 v2.5.0/go.mod h1:z4eisnLP530vwIOUOJeBIj0aGI0L1C3d53atvCBqZHg= -sigs.k8s.io/cli-utils v0.37.2 h1:GOfKw5RV2HDQZDJlru5KkfLO1tbxqMoyn1IYUxqBpNg= -sigs.k8s.io/cli-utils v0.37.2/go.mod h1:V+IZZr4UoGj7gMJXklWBg6t5xbdThFBcpj4MrZuCYco= sigs.k8s.io/controller-runtime v0.20.1 h1:JbGMAG/X94NeM3xvjenVUaBjy6Ui4Ogd/J5ZtjZnHaE= sigs.k8s.io/controller-runtime v0.20.1/go.mod h1:BrP3w158MwvB3ZbNpaAcIKkHQ7YGpYnzpoSTZ8E14WU= -sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= -sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/kustomize/api v0.18.0 h1:hTzp67k+3NEVInwz5BHyzc9rGxIauoXferXyjv5lWPo= sigs.k8s.io/kustomize/api v0.18.0/go.mod h1:f8isXnX+8b+SGLHQ6yO4JG1rdkZlvhaCf/uZbLVMb0U= -sigs.k8s.io/kustomize/kyaml v0.18.1 h1:WvBo56Wzw3fjS+7vBjN6TeivvpbW9GmRaWZ9CIVmt4E= -sigs.k8s.io/kustomize/kyaml v0.18.1/go.mod h1:C3L2BFVU1jgcddNBE1TxuVLgS46TjObMwW5FT9FcjYo= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= +sigs.k8s.io/kustomize/kyaml v0.19.0 h1:RFge5qsO1uHhwJsu3ipV7RNolC7Uozc0jUBC/61XSlA= +sigs.k8s.io/kustomize/kyaml v0.19.0/go.mod h1:FeKD5jEOH+FbZPpqUghBP8mrLjJ3+zD3/rf9NNu1cwY= +sigs.k8s.io/structured-merge-diff/v4 v4.5.0 h1:nbCitCK2hfnhyiKo6uf2HxUPTCodY6Qaf85SbDIaMBk= +sigs.k8s.io/structured-merge-diff/v4 v4.5.0/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/internal/statusreaders/job_status_reader_test.go b/internal/statusreaders/job_status_reader_test.go index 5f07be91c..6e9ed5a79 100644 --- a/internal/statusreaders/job_status_reader_test.go +++ b/internal/statusreaders/job_status_reader_test.go @@ -29,7 +29,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/cli-utils/pkg/kstatus/status" + "github.com/fluxcd/cli-utils/pkg/kstatus/status" ) func toUnstructured(t *testing.T, obj runtime.Object) (*unstructured.Unstructured, error) { diff --git a/internal/statusreaders/pod_status_reader.go b/internal/statusreaders/pod_status_reader.go index d3daf7cc3..c074c3487 100644 --- a/internal/statusreaders/pod_status_reader.go +++ b/internal/statusreaders/pod_status_reader.go @@ -25,11 +25,11 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling/engine" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling/event" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling/statusreaders" - "sigs.k8s.io/cli-utils/pkg/kstatus/status" - "sigs.k8s.io/cli-utils/pkg/object" + "github.com/fluxcd/cli-utils/pkg/kstatus/polling/engine" + "github.com/fluxcd/cli-utils/pkg/kstatus/polling/event" + "github.com/fluxcd/cli-utils/pkg/kstatus/polling/statusreaders" + "github.com/fluxcd/cli-utils/pkg/kstatus/status" + "github.com/fluxcd/cli-utils/pkg/object" ) type customPodStatusReader struct { diff --git a/internal/statusreaders/pod_status_reader_test.go b/internal/statusreaders/pod_status_reader_test.go index a151f1aed..ba0d1f1bb 100644 --- a/internal/statusreaders/pod_status_reader_test.go +++ b/internal/statusreaders/pod_status_reader_test.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/cli-utils/pkg/kstatus/status" + "github.com/fluxcd/cli-utils/pkg/kstatus/status" ) func TestPodConditions(t *testing.T) { diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index bc3958848..22242b40f 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -23,18 +23,18 @@ import ( "sort" "time" + "github.com/fluxcd/cli-utils/pkg/kstatus/polling/aggregator" + "github.com/fluxcd/cli-utils/pkg/kstatus/polling/collector" + "github.com/fluxcd/cli-utils/pkg/kstatus/polling/engine" + "github.com/fluxcd/cli-utils/pkg/kstatus/polling/event" + "github.com/fluxcd/cli-utils/pkg/kstatus/polling/statusreaders" + "github.com/fluxcd/cli-utils/pkg/kstatus/status" + "github.com/fluxcd/cli-utils/pkg/kstatus/watcher" + "github.com/fluxcd/cli-utils/pkg/object" appsv1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/client-go/dynamic" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling/aggregator" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling/collector" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling/engine" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling/event" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling/statusreaders" - "sigs.k8s.io/cli-utils/pkg/kstatus/status" - "sigs.k8s.io/cli-utils/pkg/kstatus/watcher" - "sigs.k8s.io/cli-utils/pkg/object" helmStatusReaders "helm.sh/helm/v4/internal/statusreaders" ) diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index 0e88f1bbe..fee325ddc 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -21,6 +21,7 @@ import ( "testing" "time" + "github.com/fluxcd/cli-utils/pkg/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" @@ -33,7 +34,6 @@ import ( "k8s.io/apimachinery/pkg/util/yaml" dynamicfake "k8s.io/client-go/dynamic/fake" "k8s.io/kubectl/pkg/scheme" - "sigs.k8s.io/cli-utils/pkg/testutil" ) var podCurrentManifest = ` From 3a296aacade3e66a68e3aac88fc3abf0ef2f81a5 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 4 Mar 2025 23:15:02 +0000 Subject: [PATCH 089/192] rest mapper Signed-off-by: Austin Abro --- internal/client/client.go | 369 ++++++++++++++++++++++++++++++++++++++ pkg/kube/client.go | 5 +- 2 files changed, 372 insertions(+), 2 deletions(-) create mode 100644 internal/client/client.go diff --git a/internal/client/client.go b/internal/client/client.go new file mode 100644 index 000000000..b55ddb3f8 --- /dev/null +++ b/internal/client/client.go @@ -0,0 +1,369 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "fmt" + "net/http" + "sort" + "strings" + "sync" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" +) + +/* +Adapted from controller-runtime v0.19 before The Kubernetes Aggregated Discovery was enabled +in controller-runtime v0.20 which broke the preferred version discovery in the RESTMapper. +https://github.com/kubernetes-sigs/controller-runtime/blob/e818ce450d3d358600848dcfa1b585de64e7c865/pkg/client/apiutil/restmapper.go +*/ + +// NewLazyRESTMapper returns a dynamic RESTMapper for cfg. The dynamic +// RESTMapper dynamically discovers resource types at runtime. +func NewLazyRESTMapper(cfg *rest.Config, httpClient *http.Client) (meta.RESTMapper, error) { + if httpClient == nil { + return nil, fmt.Errorf("httpClient must not be nil, consider using rest.HTTPClientFor(c) to create a client") + } + + client, err := discovery.NewDiscoveryClientForConfigAndClient(cfg, httpClient) + if err != nil { + return nil, err + } + return &mapper{ + mapper: restmapper.NewDiscoveryRESTMapper([]*restmapper.APIGroupResources{}), + client: client, + knownGroups: map[string]*restmapper.APIGroupResources{}, + apiGroups: map[string]*metav1.APIGroup{}, + }, nil +} + +// mapper is a RESTMapper that will lazily query the provided +// client for discovery information to do REST mappings. +type mapper struct { + mapper meta.RESTMapper + client discovery.DiscoveryInterface + knownGroups map[string]*restmapper.APIGroupResources + apiGroups map[string]*metav1.APIGroup + + // mutex to provide thread-safe mapper reloading. + mu sync.RWMutex +} + +// KindFor implements Mapper.KindFor. +func (m *mapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + res, err := m.getMapper().KindFor(resource) + if meta.IsNoMatchError(err) { + if err := m.addKnownGroupAndReload(resource.Group, resource.Version); err != nil { + return schema.GroupVersionKind{}, err + } + res, err = m.getMapper().KindFor(resource) + } + + return res, err +} + +// KindsFor implements Mapper.KindsFor. +func (m *mapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + res, err := m.getMapper().KindsFor(resource) + if meta.IsNoMatchError(err) { + if err := m.addKnownGroupAndReload(resource.Group, resource.Version); err != nil { + return nil, err + } + res, err = m.getMapper().KindsFor(resource) + } + + return res, err +} + +// ResourceFor implements Mapper.ResourceFor. +func (m *mapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { + res, err := m.getMapper().ResourceFor(input) + if meta.IsNoMatchError(err) { + if err := m.addKnownGroupAndReload(input.Group, input.Version); err != nil { + return schema.GroupVersionResource{}, err + } + res, err = m.getMapper().ResourceFor(input) + } + + return res, err +} + +// ResourcesFor implements Mapper.ResourcesFor. +func (m *mapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + res, err := m.getMapper().ResourcesFor(input) + if meta.IsNoMatchError(err) { + if err := m.addKnownGroupAndReload(input.Group, input.Version); err != nil { + return nil, err + } + res, err = m.getMapper().ResourcesFor(input) + } + + return res, err +} + +// RESTMapping implements Mapper.RESTMapping. +func (m *mapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + res, err := m.getMapper().RESTMapping(gk, versions...) + if meta.IsNoMatchError(err) { + if err := m.addKnownGroupAndReload(gk.Group, versions...); err != nil { + return nil, err + } + res, err = m.getMapper().RESTMapping(gk, versions...) + } + + return res, err +} + +// RESTMappings implements Mapper.RESTMappings. +func (m *mapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + res, err := m.getMapper().RESTMappings(gk, versions...) + if meta.IsNoMatchError(err) { + if err := m.addKnownGroupAndReload(gk.Group, versions...); err != nil { + return nil, err + } + res, err = m.getMapper().RESTMappings(gk, versions...) + } + + return res, err +} + +// ResourceSingularizer implements Mapper.ResourceSingularizer. +func (m *mapper) ResourceSingularizer(resource string) (string, error) { + return m.getMapper().ResourceSingularizer(resource) +} + +func (m *mapper) getMapper() meta.RESTMapper { + m.mu.RLock() + defer m.mu.RUnlock() + return m.mapper +} + +// addKnownGroupAndReload reloads the mapper with updated information about missing API group. +// versions can be specified for partial updates, for instance for v1beta1 version only. +func (m *mapper) addKnownGroupAndReload(groupName string, versions ...string) error { + // versions will here be [""] if the forwarded Version value of + // GroupVersionResource (in calling method) was not specified. + if len(versions) == 1 && versions[0] == "" { + versions = nil + } + + // If no specific versions are set by user, we will scan all available ones for the API group. + // This operation requires 2 requests: /api and /apis, but only once. For all subsequent calls + // this data will be taken from cache. + if len(versions) == 0 { + apiGroup, err := m.findAPIGroupByName(groupName) + if err != nil { + return err + } + if apiGroup != nil { + for _, version := range apiGroup.Versions { + versions = append(versions, version.Version) + } + } + } + + m.mu.Lock() + defer m.mu.Unlock() + + // Create or fetch group resources from cache. + groupResources := &restmapper.APIGroupResources{ + Group: metav1.APIGroup{Name: groupName}, + VersionedResources: make(map[string][]metav1.APIResource), + } + + // Update information for group resources about versioned resources. + // The number of API calls is equal to the number of versions: /apis//. + // If we encounter a missing API version (NotFound error), we will remove the group from + // the m.apiGroups and m.knownGroups caches. + // If this happens, in the next call the group will be added back to apiGroups + // and only the existing versions will be loaded in knownGroups. + groupVersionResources, err := m.fetchGroupVersionResourcesLocked(groupName, versions...) + if err != nil { + return fmt.Errorf("failed to get API group resources: %w", err) + } + + if _, ok := m.knownGroups[groupName]; ok { + groupResources = m.knownGroups[groupName] + } + + // Update information for group resources about the API group by adding new versions. + // Ignore the versions that are already registered. + for groupVersion, resources := range groupVersionResources { + version := groupVersion.Version + + groupResources.VersionedResources[version] = resources.APIResources + found := false + for _, v := range groupResources.Group.Versions { + if v.Version == version { + found = true + break + } + } + + if !found { + groupResources.Group.Versions = append(groupResources.Group.Versions, metav1.GroupVersionForDiscovery{ + GroupVersion: metav1.GroupVersion{Group: groupName, Version: version}.String(), + Version: version, + }) + } + } + + // Update data in the cache. + m.knownGroups[groupName] = groupResources + + // Finally, update the group with received information and regenerate the mapper. + updatedGroupResources := make([]*restmapper.APIGroupResources, 0, len(m.knownGroups)) + for _, agr := range m.knownGroups { + updatedGroupResources = append(updatedGroupResources, agr) + } + + m.mapper = restmapper.NewDiscoveryRESTMapper(updatedGroupResources) + return nil +} + +// findAPIGroupByNameLocked returns API group by its name. +func (m *mapper) findAPIGroupByName(groupName string) (*metav1.APIGroup, error) { + // Looking in the cache first. + { + m.mu.RLock() + group, ok := m.apiGroups[groupName] + m.mu.RUnlock() + if ok { + return group, nil + } + } + + // Update the cache if nothing was found. + apiGroups, err := m.client.ServerGroups() + if err != nil { + return nil, fmt.Errorf("failed to get server groups: %w", err) + } + if len(apiGroups.Groups) == 0 { + return nil, fmt.Errorf("received an empty API groups list") + } + + m.mu.Lock() + for i := range apiGroups.Groups { + group := &apiGroups.Groups[i] + m.apiGroups[group.Name] = group + } + m.mu.Unlock() + + // Looking in the cache again. + m.mu.RLock() + defer m.mu.RUnlock() + + // Don't return an error here if the API group is not present. + // The reloaded RESTMapper will take care of returning a NoMatchError. + return m.apiGroups[groupName], nil +} + +// fetchGroupVersionResourcesLocked fetches the resources for the specified group and its versions. +// This method might modify the cache so it needs to be called under the lock. +func (m *mapper) fetchGroupVersionResourcesLocked(groupName string, versions ...string) (map[schema.GroupVersion]*metav1.APIResourceList, error) { + groupVersionResources := make(map[schema.GroupVersion]*metav1.APIResourceList) + failedGroups := make(map[schema.GroupVersion]error) + + for _, version := range versions { + groupVersion := schema.GroupVersion{Group: groupName, Version: version} + + apiResourceList, err := m.client.ServerResourcesForGroupVersion(groupVersion.String()) + if apierrors.IsNotFound(err) { + // If the version is not found, we remove the group from the cache + // so it gets refreshed on the next call. + if m.isAPIGroupCached(groupVersion) { + delete(m.apiGroups, groupName) + } + if m.isGroupVersionCached(groupVersion) { + delete(m.knownGroups, groupName) + } + continue + } else if err != nil { + failedGroups[groupVersion] = err + } + + if apiResourceList != nil { + // even in case of error, some fallback might have been returned. + groupVersionResources[groupVersion] = apiResourceList + } + } + + if len(failedGroups) > 0 { + err := ErrResourceDiscoveryFailed(failedGroups) + return nil, &err + } + + return groupVersionResources, nil +} + +// isGroupVersionCached checks if a version for a group is cached in the known groups cache. +func (m *mapper) isGroupVersionCached(gv schema.GroupVersion) bool { + if cachedGroup, ok := m.knownGroups[gv.Group]; ok { + _, cached := cachedGroup.VersionedResources[gv.Version] + return cached + } + + return false +} + +// isAPIGroupCached checks if a version for a group is cached in the api groups cache. +func (m *mapper) isAPIGroupCached(gv schema.GroupVersion) bool { + cachedGroup, ok := m.apiGroups[gv.Group] + if !ok { + return false + } + + for _, version := range cachedGroup.Versions { + if version.Version == gv.Version { + return true + } + } + + return false +} + +// ErrResourceDiscoveryFailed is returned if the RESTMapper cannot discover supported resources for some GroupVersions. +// It wraps the errors encountered, except "NotFound" errors are replaced with meta.NoResourceMatchError, for +// backwards compatibility with code that uses meta.IsNoMatchError() to check for unsupported APIs. +type ErrResourceDiscoveryFailed map[schema.GroupVersion]error + +// Error implements the error interface. +func (e *ErrResourceDiscoveryFailed) Error() string { + subErrors := []string{} + for k, v := range *e { + subErrors = append(subErrors, fmt.Sprintf("%s: %v", k, v)) + } + sort.Strings(subErrors) + return fmt.Sprintf("unable to retrieve the complete list of server APIs: %s", strings.Join(subErrors, ", ")) +} + +func (e *ErrResourceDiscoveryFailed) Unwrap() []error { + subErrors := []error{} + for gv, err := range *e { + if apierrors.IsNotFound(err) { + err = &meta.NoResourceMatchError{PartialResource: gv.WithResource("")} + } + subErrors = append(subErrors, err) + } + return subErrors +} diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 333c0ec65..582c05c58 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -35,7 +35,6 @@ import ( apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -51,6 +50,8 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/util/retry" cmdutil "k8s.io/kubectl/pkg/cmd/util" + + helmClient "helm.sh/helm/v4/internal/client" ) // ErrNoObjectsVisited indicates that during a visit operation, no matching objects were found. @@ -113,7 +114,7 @@ func (c *Client) newStatusWatcher() (*statusWaiter, error) { if err != nil { return nil, err } - restMapper, err := apiutil.NewDynamicRESTMapper(cfg, httpClient) + restMapper, err := helmClient.NewLazyRESTMapper(cfg, httpClient) if err != nil { return nil, err } From ddc7baaacac3b1aeaf5f4dd4e3e029cac40282ee Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 4 Mar 2025 23:17:40 +0000 Subject: [PATCH 090/192] copyright things Signed-off-by: Austin Abro --- internal/client/client.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/internal/client/client.go b/internal/client/client.go index b55ddb3f8..cb4ddb60e 100644 --- a/internal/client/client.go +++ b/internal/client/client.go @@ -1,4 +1,7 @@ /* +Copyright The Helm Authors. +This file was initially copied and modified from + https://github.com/kubernetes-sigs/controller-runtime/blob/e818ce450d3d358600848dcfa1b585de64e7c865/pkg/client/apiutil/restmapper.go Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); From 442200033027ce2512a5974a52993a7af4aac3d6 Mon Sep 17 00:00:00 2001 From: Robert Sirchia Date: Wed, 5 Mar 2025 17:21:29 +0100 Subject: [PATCH 091/192] fixing case issues with the logging of my errors Signed-off-by: Robert Sirchia --- pkg/chart/v2/util/dependencies.go | 6 +++--- pkg/engine/engine.go | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/chart/v2/util/dependencies.go b/pkg/chart/v2/util/dependencies.go index 80268b45c..493bd31d6 100644 --- a/pkg/chart/v2/util/dependencies.go +++ b/pkg/chart/v2/util/dependencies.go @@ -51,7 +51,7 @@ func processDependencyConditions(reqs []*chart.Dependency, cvals Values, cpath s slog.Warn("returned non-bool value", "path", c, "chart", r.Name) } else if _, ok := err.(ErrNoValue); !ok { // this is a real error - slog.Error("pathValue returned error", slog.Any("err", err)) + slog.Error("the method PathValue returned error", slog.Any("err", err)) } } } @@ -254,7 +254,7 @@ func processImportValues(c *chart.Chart, merge bool) error { // get child table vv, err := cvals.Table(r.Name + "." + child) if err != nil { - slog.Error("importValues missing table from chart", "chart", r.Name, "value", err) + slog.Error("ImportValues missing table from chart", "chart", r.Name, "value", err) continue } // create value map from child to be merged into parent @@ -271,7 +271,7 @@ func processImportValues(c *chart.Chart, merge bool) error { }) vm, err := cvals.Table(r.Name + "." + child) if err != nil { - slog.Error("importValues missing table", slog.Any("err", err)) + slog.Error("ImportValues missing table", slog.Any("err", err)) continue } if merge { diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index d47606ee6..9c91fd43b 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -176,12 +176,12 @@ func tplFun(parent *template.Template, includedNames map[string]int, strict bool // text string. (Maybe we could use a hash appended to the name?) t, err = t.New(parent.Name()).Parse(tpl) if err != nil { - return "", errors.Wrapf(err, "Cannot parse template %q", tpl) + return "", errors.Wrapf(err, "cannot parse template %q", tpl) } var buf strings.Builder if err := t.Execute(&buf, vals); err != nil { - return "", errors.Wrapf(err, "Error during tpl function execution for %q", tpl) + return "", errors.Wrapf(err, "error during tpl function execution for %q", tpl) } // See comment in renderWithReferences explaining the hack. From 2192c4e0d172ad2d200a8ccd496fef988335f5ca Mon Sep 17 00:00:00 2001 From: Robert Sirchia Date: Wed, 5 Mar 2025 17:25:32 +0100 Subject: [PATCH 092/192] changing errors back to warns Signed-off-by: Robert Sirchia --- pkg/chart/v2/util/dependencies.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/chart/v2/util/dependencies.go b/pkg/chart/v2/util/dependencies.go index 493bd31d6..9a77920b1 100644 --- a/pkg/chart/v2/util/dependencies.go +++ b/pkg/chart/v2/util/dependencies.go @@ -51,7 +51,7 @@ func processDependencyConditions(reqs []*chart.Dependency, cvals Values, cpath s slog.Warn("returned non-bool value", "path", c, "chart", r.Name) } else if _, ok := err.(ErrNoValue); !ok { // this is a real error - slog.Error("the method PathValue returned error", slog.Any("err", err)) + slog.Warn("the method PathValue returned error", slog.Any("err", err)) } } } @@ -254,7 +254,7 @@ func processImportValues(c *chart.Chart, merge bool) error { // get child table vv, err := cvals.Table(r.Name + "." + child) if err != nil { - slog.Error("ImportValues missing table from chart", "chart", r.Name, "value", err) + slog.Warn("ImportValues missing table from chart", "chart", r.Name, "value", err) continue } // create value map from child to be merged into parent @@ -271,7 +271,7 @@ func processImportValues(c *chart.Chart, merge bool) error { }) vm, err := cvals.Table(r.Name + "." + child) if err != nil { - slog.Error("ImportValues missing table", slog.Any("err", err)) + slog.Warn("ImportValues missing table", slog.Any("err", err)) continue } if merge { From 600947b32e6557ab6f5ebf44fb754abbb5e63d2a Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Fri, 7 Mar 2025 14:27:09 +0000 Subject: [PATCH 093/192] client->restmapper Signed-off-by: Austin Abro --- internal/{client/client.go => restmapper/restmapper.go} | 2 +- pkg/kube/client.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) rename internal/{client/client.go => restmapper/restmapper.go} (99%) diff --git a/internal/client/client.go b/internal/restmapper/restmapper.go similarity index 99% rename from internal/client/client.go rename to internal/restmapper/restmapper.go index cb4ddb60e..85b7c2a69 100644 --- a/internal/client/client.go +++ b/internal/restmapper/restmapper.go @@ -17,7 +17,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package client +package restmapper import ( "fmt" diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 582c05c58..1244882aa 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -51,7 +51,7 @@ import ( "k8s.io/client-go/util/retry" cmdutil "k8s.io/kubectl/pkg/cmd/util" - helmClient "helm.sh/helm/v4/internal/client" + helmRestmapper "helm.sh/helm/v4/internal/restmapper" ) // ErrNoObjectsVisited indicates that during a visit operation, no matching objects were found. @@ -114,7 +114,7 @@ func (c *Client) newStatusWatcher() (*statusWaiter, error) { if err != nil { return nil, err } - restMapper, err := helmClient.NewLazyRESTMapper(cfg, httpClient) + restMapper, err := helmRestmapper.NewLazyRESTMapper(cfg, httpClient) if err != nil { return nil, err } From 2948279fb90bcb0d22e9f160f1f96b424ce74b7d Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Fri, 7 Mar 2025 14:29:47 +0000 Subject: [PATCH 094/192] cleanup if statement Signed-off-by: Austin Abro --- pkg/action/install.go | 6 ++---- pkg/action/upgrade.go | 6 ++---- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/pkg/action/install.go b/pkg/action/install.go index c96e1a0ff..be76a634f 100644 --- a/pkg/action/install.go +++ b/pkg/action/install.go @@ -289,10 +289,8 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma // Make sure if Atomic is set, that wait is set as well. This makes it so // the user doesn't have to specify both - if i.Wait == kube.HookOnlyStrategy { - if i.Atomic { - i.Wait = kube.StatusWatcherStrategy - } + if i.Wait == kube.HookOnlyStrategy && i.Atomic { + i.Wait = kube.StatusWatcherStrategy } if err := i.cfg.KubeClient.SetWaiter(i.Wait); err != nil { return nil, fmt.Errorf("failed to set kube client waiter: %w", err) diff --git a/pkg/action/upgrade.go b/pkg/action/upgrade.go index 851ac512a..ba5dfb5d1 100644 --- a/pkg/action/upgrade.go +++ b/pkg/action/upgrade.go @@ -155,10 +155,8 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart. // Make sure if Atomic is set, that wait is set as well. This makes it so // the user doesn't have to specify both - if u.Wait == kube.HookOnlyStrategy { - if u.Atomic { - u.Wait = kube.StatusWatcherStrategy - } + if u.Wait == kube.HookOnlyStrategy && u.Atomic { + u.Wait = kube.StatusWatcherStrategy } if err := u.cfg.KubeClient.SetWaiter(u.Wait); err != nil { return nil, fmt.Errorf("failed to set kube client waiter: %w", err) From e773a810eea2649d3cb52e2b140cc4492a94be26 Mon Sep 17 00:00:00 2001 From: Austin Abro <37223396+AustinAbro321@users.noreply.github.com> Date: Fri, 7 Mar 2025 09:30:28 -0500 Subject: [PATCH 095/192] Update pkg/cmd/flags.go Co-authored-by: George Jenkins Signed-off-by: Austin Abro <37223396+AustinAbro321@users.noreply.github.com> --- pkg/cmd/flags.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cmd/flags.go b/pkg/cmd/flags.go index 044b19e04..0fcad59fa 100644 --- a/pkg/cmd/flags.go +++ b/pkg/cmd/flags.go @@ -56,7 +56,7 @@ func AddWaitFlag(cmd *cobra.Command, wait *kube.WaitStrategy) { cmd.Flags().Var( newWaitValue(kube.HookOnlyStrategy, wait), "wait", - "if set, will wait until all resources are in the expected state before marking the operation as successful. It will wait for as long as --timeout. Valid inputs are true, false, watcher, and legacy", + "if specified, will wait until all resources are in the expected state before marking the operation as successful. It will wait for as long as --timeout. Valid inputs are 'watcher' and 'legacy'", ) // Sets the strategy to use the watcher strategy if `--wait` is used without an argument cmd.Flags().Lookup("wait").NoOptDefVal = string(kube.StatusWatcherStrategy) From 0dffe83ef3299aaa3e2e17a76743ad791c40f559 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Fri, 7 Mar 2025 14:35:44 +0000 Subject: [PATCH 096/192] warnings Signed-off-by: Austin Abro --- pkg/cmd/flags.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pkg/cmd/flags.go b/pkg/cmd/flags.go index 0fcad59fa..ed3b83a55 100644 --- a/pkg/cmd/flags.go +++ b/pkg/cmd/flags.go @@ -82,13 +82,15 @@ func (ws *waitValue) Set(s string) error { *ws = waitValue(s) return nil case "true": + Warning("--wait=true is deprecated (boolean value) and can be replaced with --wait=watcher") *ws = waitValue(kube.StatusWatcherStrategy) return nil case "false": + Warning("--wait=false is deprecated (boolean value) and can be replaced by omitting the --wait flag") *ws = waitValue(kube.HookOnlyStrategy) return nil default: - return fmt.Errorf("invalid wait input %q. Valid inputs are true, false, %s, and %s", s, kube.StatusWatcherStrategy, kube.LegacyStrategy) + return fmt.Errorf("invalid wait input %q. Valid inputs are %s, and %s", s, kube.StatusWatcherStrategy, kube.LegacyStrategy) } } From 10f78c814cd1c7d0b784a9371bcf56c1609ceece Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Fri, 7 Mar 2025 14:37:04 +0000 Subject: [PATCH 097/192] legacy waiter Signed-off-by: Austin Abro --- pkg/kube/client.go | 2 +- pkg/kube/wait.go | 26 +++++++++++++------------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 1244882aa..61e681ad3 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -132,7 +132,7 @@ func (c *Client) newWaiter(strategy WaitStrategy) (Waiter, error) { if err != nil { return nil, err } - return &HelmWaiter{kubeClient: kc, log: c.Log}, nil + return &legacyWaiter{kubeClient: kc, log: c.Log}, nil case StatusWatcherStrategy: return c.newStatusWatcher() case HookOnlyStrategy: diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go index a7e3a1c7e..9aeb93451 100644 --- a/pkg/kube/wait.go +++ b/pkg/kube/wait.go @@ -45,27 +45,27 @@ import ( "k8s.io/apimachinery/pkg/util/wait" ) -// HelmWaiter is the legacy implementation of the Waiter interface. This logic was used by default in Helm 3 +// legacyWaiter is the legacy implementation of the Waiter interface. This logic was used by default in Helm 3 // Helm 4 now uses the StatusWaiter implementation instead -type HelmWaiter struct { +type legacyWaiter struct { c ReadyChecker log func(string, ...interface{}) kubeClient *kubernetes.Clientset } -func (hw *HelmWaiter) Wait(resources ResourceList, timeout time.Duration) error { +func (hw *legacyWaiter) Wait(resources ResourceList, timeout time.Duration) error { hw.c = NewReadyChecker(hw.kubeClient, hw.log, PausedAsReady(true)) return hw.waitForResources(resources, timeout) } -func (hw *HelmWaiter) WaitWithJobs(resources ResourceList, timeout time.Duration) error { +func (hw *legacyWaiter) WaitWithJobs(resources ResourceList, timeout time.Duration) error { hw.c = NewReadyChecker(hw.kubeClient, hw.log, PausedAsReady(true), CheckJobs(true)) return hw.waitForResources(resources, timeout) } // waitForResources polls to get the current status of all pods, PVCs, Services and // Jobs(optional) until all are ready or a timeout is reached -func (hw *HelmWaiter) waitForResources(created ResourceList, timeout time.Duration) error { +func (hw *legacyWaiter) waitForResources(created ResourceList, timeout time.Duration) error { hw.log("beginning wait for %d resources with timeout of %v", len(created), timeout) ctx, cancel := context.WithTimeout(context.Background(), timeout) @@ -99,7 +99,7 @@ func (hw *HelmWaiter) waitForResources(created ResourceList, timeout time.Durati }) } -func (hw *HelmWaiter) isRetryableError(err error, resource *resource.Info) bool { +func (hw *legacyWaiter) isRetryableError(err error, resource *resource.Info) bool { if err == nil { return false } @@ -114,12 +114,12 @@ func (hw *HelmWaiter) isRetryableError(err error, resource *resource.Info) bool return true } -func (hw *HelmWaiter) isRetryableHTTPStatusCode(httpStatusCode int32) bool { +func (hw *legacyWaiter) isRetryableHTTPStatusCode(httpStatusCode int32) bool { return httpStatusCode == 0 || httpStatusCode == http.StatusTooManyRequests || (httpStatusCode >= 500 && httpStatusCode != http.StatusNotImplemented) } // waitForDeletedResources polls to check if all the resources are deleted or a timeout is reached -func (hw *HelmWaiter) WaitForDelete(deleted ResourceList, timeout time.Duration) error { +func (hw *legacyWaiter) WaitForDelete(deleted ResourceList, timeout time.Duration) error { hw.log("beginning wait for %d resources to be deleted with timeout of %v", len(deleted), timeout) ctx, cancel := context.WithTimeout(context.Background(), timeout) @@ -184,7 +184,7 @@ func SelectorsForObject(object runtime.Object) (selector labels.Selector, err er return selector, errors.Wrap(err, "invalid label selector") } -func (hw *HelmWaiter) watchTimeout(t time.Duration) func(*resource.Info) error { +func (hw *legacyWaiter) watchTimeout(t time.Duration) func(*resource.Info) error { return func(info *resource.Info) error { return hw.watchUntilReady(t, info) } @@ -204,7 +204,7 @@ func (hw *HelmWaiter) watchTimeout(t time.Duration) func(*resource.Info) error { // ascertained by watching the status.phase field in a pod's output. // // Handling for other kinds will be added as necessary. -func (hw *HelmWaiter) WatchUntilReady(resources ResourceList, timeout time.Duration) error { +func (hw *legacyWaiter) WatchUntilReady(resources ResourceList, timeout time.Duration) error { // For jobs, there's also the option to do poll c.Jobs(namespace).Get(): // https://github.com/adamreese/kubernetes/blob/master/test/e2e/job.go#L291-L300 return perform(resources, hw.watchTimeout(timeout)) @@ -230,7 +230,7 @@ func perform(infos ResourceList, fn func(*resource.Info) error) error { return result } -func (hw *HelmWaiter) watchUntilReady(timeout time.Duration, info *resource.Info) error { +func (hw *legacyWaiter) watchUntilReady(timeout time.Duration, info *resource.Info) error { kind := info.Mapping.GroupVersionKind.Kind switch kind { case "Job", "Pod": @@ -291,7 +291,7 @@ func (hw *HelmWaiter) watchUntilReady(timeout time.Duration, info *resource.Info // waitForJob is a helper that waits for a job to complete. // // This operates on an event returned from a watcher. -func (hw *HelmWaiter) waitForJob(obj runtime.Object, name string) (bool, error) { +func (hw *legacyWaiter) waitForJob(obj runtime.Object, name string) (bool, error) { o, ok := obj.(*batchv1.Job) if !ok { return true, errors.Errorf("expected %s to be a *batch.Job, got %T", name, obj) @@ -312,7 +312,7 @@ func (hw *HelmWaiter) waitForJob(obj runtime.Object, name string) (bool, error) // waitForPodSuccess is a helper that waits for a pod to complete. // // This operates on an event returned from a watcher. -func (hw *HelmWaiter) waitForPodSuccess(obj runtime.Object, name string) (bool, error) { +func (hw *legacyWaiter) waitForPodSuccess(obj runtime.Object, name string) (bool, error) { o, ok := obj.(*corev1.Pod) if !ok { return true, errors.Errorf("expected %s to be a *v1.Pod, got %T", name, obj) From 3a195763778f52fe6cd5a9e1f478f6a232b3d15d Mon Sep 17 00:00:00 2001 From: Robert Sirchia Date: Mon, 10 Mar 2025 15:40:10 -0400 Subject: [PATCH 098/192] making changes as requested by matt Signed-off-by: Robert Sirchia --- pkg/chart/v2/util/dependencies.go | 4 ++-- pkg/engine/lookup_func.go | 4 ++-- pkg/plugin/installer/vcs_installer.go | 4 ++-- pkg/repo/chartrepo.go | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/pkg/chart/v2/util/dependencies.go b/pkg/chart/v2/util/dependencies.go index 9a77920b1..6c9da4430 100644 --- a/pkg/chart/v2/util/dependencies.go +++ b/pkg/chart/v2/util/dependencies.go @@ -51,7 +51,7 @@ func processDependencyConditions(reqs []*chart.Dependency, cvals Values, cpath s slog.Warn("returned non-bool value", "path", c, "chart", r.Name) } else if _, ok := err.(ErrNoValue); !ok { // this is a real error - slog.Warn("the method PathValue returned error", slog.Any("err", err)) + slog.Warn("the method PathValue returned error", slog.Any("error", err)) } } } @@ -271,7 +271,7 @@ func processImportValues(c *chart.Chart, merge bool) error { }) vm, err := cvals.Table(r.Name + "." + child) if err != nil { - slog.Warn("ImportValues missing table", slog.Any("err", err)) + slog.Warn("ImportValues missing table", slog.Any("error", err)) continue } if merge { diff --git a/pkg/engine/lookup_func.go b/pkg/engine/lookup_func.go index b36e6a7ef..89f2707ec 100644 --- a/pkg/engine/lookup_func.go +++ b/pkg/engine/lookup_func.go @@ -111,7 +111,7 @@ func getDynamicClientOnKind(apiversion string, kind string, config *rest.Config) } intf, err := dynamic.NewForConfig(config) if err != nil { - slog.Error("unable to get dynamic client", slog.Any("err", err)) + slog.Error("unable to get dynamic client", slog.Any("error", err)) return nil, false, err } res := intf.Resource(gvr) @@ -122,7 +122,7 @@ func getAPIResourceForGVK(gvk schema.GroupVersionKind, config *rest.Config) (met res := metav1.APIResource{} discoveryClient, err := discovery.NewDiscoveryClientForConfig(config) if err != nil { - slog.Error("unable to create discovery client", slog.Any("err", err)) + slog.Error("unable to create discovery client", slog.Any("error", err)) return res, err } resList, err := discoveryClient.ServerResourcesForGroupVersion(gvk.GroupVersion().String()) diff --git a/pkg/plugin/installer/vcs_installer.go b/pkg/plugin/installer/vcs_installer.go index 049775094..d1b704d6e 100644 --- a/pkg/plugin/installer/vcs_installer.go +++ b/pkg/plugin/installer/vcs_installer.go @@ -95,7 +95,7 @@ func (i *VCSInstaller) Install() error { // Update updates a remote repository func (i *VCSInstaller) Update() error { - slog.Debug("updating", "repo", i.Repo.Remote()) + slog.Debug("updating", "source", i.Repo.Remote()) if i.Repo.IsDirty() { return errors.New("plugin repo was modified") } @@ -160,7 +160,7 @@ func (i *VCSInstaller) sync(repo vcs.Repo) error { slog.Debug("cloning", "source", repo.Remote(), "destination", repo.LocalPath()) return repo.Get() } - slog.Debug("updating", "remote", repo.Remote()) + slog.Debug("updating", "source", repo.Remote(), "destination", repo.LocalPath()) return repo.Update() } diff --git a/pkg/repo/chartrepo.go b/pkg/repo/chartrepo.go index 766e31a61..3fe5383f3 100644 --- a/pkg/repo/chartrepo.go +++ b/pkg/repo/chartrepo.go @@ -343,7 +343,7 @@ func ResolveReferenceURL(baseURL, refURL string) (string, error) { func (e *Entry) String() string { buf, err := json.Marshal(e) if err != nil { - slog.Error("failed to marshal entry", slog.Any("err", err)) + slog.Error("failed to marshal entry", slog.Any("error", err)) panic(err) } return string(buf) From 788652fd276cf69e8e188a96e8d1331ab24ad9ae Mon Sep 17 00:00:00 2001 From: Laszlo Uveges Date: Mon, 12 Sep 2022 15:50:50 +0200 Subject: [PATCH 099/192] Add dummy test case to prove that not all hooks are delted on failure Signed-off-by: Laszlo Uveges --- pkg/action/hooks_test.go | 319 +++++++++++++++++---------------------- 1 file changed, 139 insertions(+), 180 deletions(-) diff --git a/pkg/action/hooks_test.go b/pkg/action/hooks_test.go index 38f25d9ab..0849574cb 100644 --- a/pkg/action/hooks_test.go +++ b/pkg/action/hooks_test.go @@ -1,208 +1,167 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - package action import ( - "bytes" - "fmt" + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/kube" + kubefake "helm.sh/helm/v3/pkg/kube/fake" + "helm.sh/helm/v3/pkg/release" + "helm.sh/helm/v3/pkg/storage" + "helm.sh/helm/v3/pkg/storage/driver" "io" + "io/ioutil" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/cli-runtime/pkg/resource" + "reflect" "testing" - - "github.com/stretchr/testify/assert" - - chart "helm.sh/helm/v4/pkg/chart/v2" - kubefake "helm.sh/helm/v4/pkg/kube/fake" - release "helm.sh/helm/v4/pkg/release/v1" + "time" ) -func podManifestWithOutputLogs(hookDefinitions []release.HookOutputLogPolicy) string { - hookDefinitionString := convertHooksToCommaSeparated(hookDefinitions) - return fmt.Sprintf(`kind: Pod -metadata: - name: finding-sharky, - annotations: - "helm.sh/hook": pre-install - "helm.sh/hook-output-log-policy": %s -spec: - containers: - - name: sharky-test - image: fake-image - cmd: fake-command`, hookDefinitionString) -} +type HookFailedError struct{} -func podManifestWithOutputLogWithNamespace(hookDefinitions []release.HookOutputLogPolicy) string { - hookDefinitionString := convertHooksToCommaSeparated(hookDefinitions) - return fmt.Sprintf(`kind: Pod -metadata: - name: finding-george - namespace: sneaky-namespace - annotations: - "helm.sh/hook": pre-install - "helm.sh/hook-output-log-policy": %s -spec: - containers: - - name: george-test - image: fake-image - cmd: fake-command`, hookDefinitionString) +func (e *HookFailedError) Error() string { + return "Hook failed!" } -func jobManifestWithOutputLog(hookDefinitions []release.HookOutputLogPolicy) string { - hookDefinitionString := convertHooksToCommaSeparated(hookDefinitions) - return fmt.Sprintf(`kind: Job -apiVersion: batch/v1 -metadata: - name: losing-religion - annotations: - "helm.sh/hook": pre-install - "helm.sh/hook-output-log-policy": %s -spec: - completions: 1 - parallelism: 1 - activeDeadlineSeconds: 30 - template: - spec: - containers: - - name: religion-container - image: religion-image - cmd: religion-command`, hookDefinitionString) +type HookFailingKubeClient struct { + kubefake.PrintingKubeClient + failOn resource.Info + deleteRecord []resource.Info } -func jobManifestWithOutputLogWithNamespace(hookDefinitions []release.HookOutputLogPolicy) string { - hookDefinitionString := convertHooksToCommaSeparated(hookDefinitions) - return fmt.Sprintf(`kind: Job -apiVersion: batch/v1 -metadata: - name: losing-religion - namespace: rem-namespace - annotations: - "helm.sh/hook": pre-install - "helm.sh/hook-output-log-policy": %s -spec: - completions: 1 - parallelism: 1 - activeDeadlineSeconds: 30 - template: - spec: - containers: - - name: religion-container - image: religion-image - cmd: religion-command`, hookDefinitionString) -} +func (_ *HookFailingKubeClient) Build(reader io.Reader, _ bool) (kube.ResourceList, error) { + configMap := &v1.ConfigMap{} -func convertHooksToCommaSeparated(hookDefinitions []release.HookOutputLogPolicy) string { - var commaSeparated string - for i, policy := range hookDefinitions { - if i+1 == len(hookDefinitions) { - commaSeparated += policy.String() - } else { - commaSeparated += policy.String() + "," - } + err := yaml.NewYAMLOrJSONDecoder(reader, 1000).Decode(configMap) + + if err != nil { + return kube.ResourceList{}, err } - return commaSeparated -} -func TestInstallRelease_HookOutputLogsOnFailure(t *testing.T) { - // Should output on failure with expected namespace if hook-failed is set - runInstallForHooksWithFailure(t, podManifestWithOutputLogs([]release.HookOutputLogPolicy{release.HookOutputOnFailed}), "spaced", true) - runInstallForHooksWithFailure(t, podManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnFailed}), "sneaky-namespace", true) - runInstallForHooksWithFailure(t, jobManifestWithOutputLog([]release.HookOutputLogPolicy{release.HookOutputOnFailed}), "spaced", true) - runInstallForHooksWithFailure(t, jobManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnFailed}), "rem-namespace", true) - - // Should not output on failure with expected namespace if hook-succeed is set - runInstallForHooksWithFailure(t, podManifestWithOutputLogs([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded}), "", false) - runInstallForHooksWithFailure(t, podManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded}), "", false) - runInstallForHooksWithFailure(t, jobManifestWithOutputLog([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded}), "", false) - runInstallForHooksWithFailure(t, jobManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded}), "", false) + return kube.ResourceList{{ + Name: configMap.Name, + Namespace: configMap.Namespace, + }}, nil } -func TestInstallRelease_HookOutputLogsOnSuccess(t *testing.T) { - // Should output on success with expected namespace if hook-succeeded is set - runInstallForHooksWithSuccess(t, podManifestWithOutputLogs([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded}), "spaced", true) - runInstallForHooksWithSuccess(t, podManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded}), "sneaky-namespace", true) - runInstallForHooksWithSuccess(t, jobManifestWithOutputLog([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded}), "spaced", true) - runInstallForHooksWithSuccess(t, jobManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded}), "rem-namespace", true) - - // Should not output on success if hook-failed is set - runInstallForHooksWithSuccess(t, podManifestWithOutputLogs([]release.HookOutputLogPolicy{release.HookOutputOnFailed}), "", false) - runInstallForHooksWithSuccess(t, podManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnFailed}), "", false) - runInstallForHooksWithSuccess(t, jobManifestWithOutputLog([]release.HookOutputLogPolicy{release.HookOutputOnFailed}), "", false) - runInstallForHooksWithSuccess(t, jobManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnFailed}), "", false) -} +func (h *HookFailingKubeClient) WatchUntilReady(resources kube.ResourceList, duration time.Duration) error { + for _, res := range resources { + if res.Name == h.failOn.Name && res.Namespace == h.failOn.Namespace { + return &HookFailedError{} + } + } -func TestInstallRelease_HooksOutputLogsOnSuccessAndFailure(t *testing.T) { - // Should output on success with expected namespace if hook-succeeded and hook-failed is set - runInstallForHooksWithSuccess(t, podManifestWithOutputLogs([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded, release.HookOutputOnFailed}), "spaced", true) - runInstallForHooksWithSuccess(t, podManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded, release.HookOutputOnFailed}), "sneaky-namespace", true) - runInstallForHooksWithSuccess(t, jobManifestWithOutputLog([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded, release.HookOutputOnFailed}), "spaced", true) - runInstallForHooksWithSuccess(t, jobManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded, release.HookOutputOnFailed}), "rem-namespace", true) - - // Should output on failure if hook-succeeded and hook-failed is set - runInstallForHooksWithFailure(t, podManifestWithOutputLogs([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded, release.HookOutputOnFailed}), "spaced", true) - runInstallForHooksWithFailure(t, podManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded, release.HookOutputOnFailed}), "sneaky-namespace", true) - runInstallForHooksWithFailure(t, jobManifestWithOutputLog([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded, release.HookOutputOnFailed}), "spaced", true) - runInstallForHooksWithFailure(t, jobManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded, release.HookOutputOnFailed}), "rem-namespace", true) + return h.PrintingKubeClient.WatchUntilReady(resources, duration) } -func runInstallForHooksWithSuccess(t *testing.T, manifest, expectedNamespace string, shouldOutput bool) { - var expectedOutput string - if shouldOutput { - expectedOutput = fmt.Sprintf("attempted to output logs for namespace: %s", expectedNamespace) +func (h *HookFailingKubeClient) Delete(resources kube.ResourceList) (*kube.Result, []error) { + for _, res := range resources { + h.deleteRecord = append(h.deleteRecord, resource.Info{ + Name: res.Name, + Namespace: res.Namespace, + }) } - is := assert.New(t) - instAction := installAction(t) - instAction.ReleaseName = "failed-hooks" - outBuffer := &bytes.Buffer{} - instAction.cfg.KubeClient = &kubefake.PrintingKubeClient{Out: io.Discard, LogOutput: outBuffer} - - templates := []*chart.File{ - {Name: "templates/hello", Data: []byte("hello: world")}, - {Name: "templates/hooks", Data: []byte(manifest)}, - } - vals := map[string]interface{}{} - res, err := instAction.Run(buildChartWithTemplates(templates), vals) - is.NoError(err) - is.Equal(expectedOutput, outBuffer.String()) - is.Equal(release.StatusDeployed, res.Info.Status) + return h.PrintingKubeClient.Delete(resources) } -func runInstallForHooksWithFailure(t *testing.T, manifest, expectedNamespace string, shouldOutput bool) { - var expectedOutput string - if shouldOutput { - expectedOutput = fmt.Sprintf("attempted to output logs for namespace: %s", expectedNamespace) +func TestHooksCleanUp(t *testing.T) { + hookKubeClient := &HookFailingKubeClient{kubefake.PrintingKubeClient{Out: ioutil.Discard}, resource.Info{ + Name: "build-config-2", + Namespace: "test", + }, []resource.Info{}} + + configuration := &Configuration{ + Releases: storage.Init(driver.NewMemory()), + KubeClient: hookKubeClient, + Capabilities: chartutil.DefaultCapabilities, + Log: func(format string, v ...interface{}) { + t.Helper() + if *verbose { + t.Logf(format, v...) + } + }, + } + + hookEvent := release.HookPreInstall + + r := &release.Release{ + Name: "test-release", + Namespace: "test", + Hooks: []*release.Hook{ + { + Name: "hook-1", + Kind: "ConfigMap", + Path: "templates/service_account.yaml", + Manifest: `apiVersion: v1 +kind: ConfigMap +metadata: + name: build-config-1 + namespace: test +data: + foo: bar +`, + Weight: -5, + Events: []release.HookEvent{ + hookEvent, + }, + DeletePolicies: []release.HookDeletePolicy{ + release.HookBeforeHookCreation, + release.HookSucceeded, + release.HookFailed, + }, + LastRun: release.HookExecution{ + Phase: release.HookPhaseSucceeded, + }, + }, + { + Name: "hook-2", + Kind: "ConfigMap", + Path: "templates/job.yaml", + Manifest: `apiVersion: v1 +kind: ConfigMap +metadata: + name: build-config-2 + namespace: test +data: + foo: bar +`, + Weight: 0, + Events: []release.HookEvent{ + hookEvent, + }, + DeletePolicies: []release.HookDeletePolicy{ + release.HookBeforeHookCreation, + release.HookSucceeded, + release.HookFailed, + }, + LastRun: release.HookExecution{ + Phase: release.HookPhaseFailed, + }, + }, + }, } - is := assert.New(t) - instAction := installAction(t) - instAction.ReleaseName = "failed-hooks" - failingClient := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient) - failingClient.WatchUntilReadyError = fmt.Errorf("failed watch") - instAction.cfg.KubeClient = failingClient - outBuffer := &bytes.Buffer{} - failingClient.PrintingKubeClient = kubefake.PrintingKubeClient{Out: io.Discard, LogOutput: outBuffer} - - templates := []*chart.File{ - {Name: "templates/hello", Data: []byte("hello: world")}, - {Name: "templates/hooks", Data: []byte(manifest)}, + + _ = configuration.execHook(r, hookEvent, 600) + + if !reflect.DeepEqual(hookKubeClient.deleteRecord, []resource.Info{ + { + Name: "build-config-1", + Namespace: "test", + }, + { + Name: "build-config-2", + Namespace: "test", + }, + { + Name: "build-config-2", + Namespace: "test", + }, + }) { + t.Fatalf("Got unexpected delete record") } - vals := map[string]interface{}{} - res, err := instAction.Run(buildChartWithTemplates(templates), vals) - is.Error(err) - is.Contains(res.Info.Description, "failed pre-install") - is.Equal(expectedOutput, outBuffer.String()) - is.Equal(release.StatusFailed, res.Info.Status) + //if err != nil { + // t.Fatalf("An expected error occured: %#v", err) + //} } From 0a28223ae57309f4bef7724e344fc9e8586506b1 Mon Sep 17 00:00:00 2001 From: Laszlo Uveges Date: Mon, 12 Sep 2022 16:47:07 +0200 Subject: [PATCH 100/192] Restructure hooks tests to be reusable Signed-off-by: Laszlo Uveges --- pkg/action/hooks_test.go | 169 ++++++++++++++++++++++----------------- 1 file changed, 95 insertions(+), 74 deletions(-) diff --git a/pkg/action/hooks_test.go b/pkg/action/hooks_test.go index 0849574cb..0a3df94ef 100644 --- a/pkg/action/hooks_test.go +++ b/pkg/action/hooks_test.go @@ -66,34 +66,26 @@ func (h *HookFailingKubeClient) Delete(resources kube.ResourceList) (*kube.Resul } func TestHooksCleanUp(t *testing.T) { - hookKubeClient := &HookFailingKubeClient{kubefake.PrintingKubeClient{Out: ioutil.Discard}, resource.Info{ - Name: "build-config-2", - Namespace: "test", - }, []resource.Info{}} - - configuration := &Configuration{ - Releases: storage.Init(driver.NewMemory()), - KubeClient: hookKubeClient, - Capabilities: chartutil.DefaultCapabilities, - Log: func(format string, v ...interface{}) { - t.Helper() - if *verbose { - t.Logf(format, v...) - } - }, - } - hookEvent := release.HookPreInstall - r := &release.Release{ - Name: "test-release", - Namespace: "test", - Hooks: []*release.Hook{ - { - Name: "hook-1", - Kind: "ConfigMap", - Path: "templates/service_account.yaml", - Manifest: `apiVersion: v1 + testCases := []struct { + name string + inputRelease release.Release + failOn resource.Info + expectedDeleteRecord []resource.Info + expectError bool + }{ + { + "Deletion hook runs for previously successful hook on failure of a heavier weight hook", + release.Release{ + Name: "test-release", + Namespace: "test", + Hooks: []*release.Hook{ + { + Name: "hook-1", + Kind: "ConfigMap", + Path: "templates/service_account.yaml", + Manifest: `apiVersion: v1 kind: ConfigMap metadata: name: build-config-1 @@ -101,24 +93,24 @@ metadata: data: foo: bar `, - Weight: -5, - Events: []release.HookEvent{ - hookEvent, - }, - DeletePolicies: []release.HookDeletePolicy{ - release.HookBeforeHookCreation, - release.HookSucceeded, - release.HookFailed, - }, - LastRun: release.HookExecution{ - Phase: release.HookPhaseSucceeded, - }, - }, - { - Name: "hook-2", - Kind: "ConfigMap", - Path: "templates/job.yaml", - Manifest: `apiVersion: v1 + Weight: -5, + Events: []release.HookEvent{ + hookEvent, + }, + DeletePolicies: []release.HookDeletePolicy{ + release.HookBeforeHookCreation, + release.HookSucceeded, + release.HookFailed, + }, + LastRun: release.HookExecution{ + Phase: release.HookPhaseSucceeded, + }, + }, + { + Name: "hook-2", + Kind: "ConfigMap", + Path: "templates/job.yaml", + Manifest: `apiVersion: v1 kind: ConfigMap metadata: name: build-config-2 @@ -126,42 +118,71 @@ metadata: data: foo: bar `, - Weight: 0, - Events: []release.HookEvent{ - hookEvent, + Weight: 0, + Events: []release.HookEvent{ + hookEvent, + }, + DeletePolicies: []release.HookDeletePolicy{ + release.HookBeforeHookCreation, + release.HookSucceeded, + release.HookFailed, + }, + LastRun: release.HookExecution{ + Phase: release.HookPhaseFailed, + }, + }, + }, + }, resource.Info{ + Name: "build-config-2", + Namespace: "test", + }, []resource.Info{ + { + Name: "build-config-1", + Namespace: "test", }, - DeletePolicies: []release.HookDeletePolicy{ - release.HookBeforeHookCreation, - release.HookSucceeded, - release.HookFailed, + { + Name: "build-config-2", + Namespace: "test", }, - LastRun: release.HookExecution{ - Phase: release.HookPhaseFailed, + { + Name: "build-config-2", + Namespace: "test", }, - }, + }, true, }, } - _ = configuration.execHook(r, hookEvent, 600) + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + kubeClient := &HookFailingKubeClient{ + kubefake.PrintingKubeClient{Out: ioutil.Discard}, tc.failOn, []resource.Info{}, + } - if !reflect.DeepEqual(hookKubeClient.deleteRecord, []resource.Info{ - { - Name: "build-config-1", - Namespace: "test", - }, - { - Name: "build-config-2", - Namespace: "test", - }, - { - Name: "build-config-2", - Namespace: "test", - }, - }) { - t.Fatalf("Got unexpected delete record") - } + configuration := &Configuration{ + Releases: storage.Init(driver.NewMemory()), + KubeClient: kubeClient, + Capabilities: chartutil.DefaultCapabilities, + Log: func(format string, v ...interface{}) { + t.Helper() + if *verbose { + t.Logf(format, v...) + } + }, + } + + err := configuration.execHook(&tc.inputRelease, hookEvent, 600) - //if err != nil { - // t.Fatalf("An expected error occured: %#v", err) - //} + if !reflect.DeepEqual(kubeClient.deleteRecord, tc.expectedDeleteRecord) { + t.Fatalf("Got unexpected delete record, expected: %#v, but got: %#v", kubeClient.deleteRecord, tc.expectedDeleteRecord) + } + + if err != nil && !tc.expectError { + t.Fatalf("Got an unexpected error.") + } + + if err == nil && tc.expectError { + t.Fatalf("Expected and error but did not get it.") + } + }) + } } From 2eea520ba47af957e23247c3175d558515309c1a Mon Sep 17 00:00:00 2001 From: Laszlo Uveges Date: Mon, 12 Sep 2022 16:52:14 +0200 Subject: [PATCH 101/192] Delete previously successful hooks when a later hook fails Signed-off-by: Laszlo Uveges --- pkg/action/hooks.go | 110 ++++++++------------------------------- pkg/action/hooks_test.go | 8 +++ 2 files changed, 31 insertions(+), 87 deletions(-) diff --git a/pkg/action/hooks.go b/pkg/action/hooks.go index 230e9ec81..4fbe28bbe 100644 --- a/pkg/action/hooks.go +++ b/pkg/action/hooks.go @@ -17,21 +17,13 @@ package action import ( "bytes" - "fmt" - "log" - "slices" "sort" "time" - "helm.sh/helm/v4/pkg/kube" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/pkg/errors" - "gopkg.in/yaml.v3" - release "helm.sh/helm/v4/pkg/release/v1" - helmtime "helm.sh/helm/v4/pkg/time" + "helm.sh/helm/v3/pkg/release" + helmtime "helm.sh/helm/v3/pkg/time" ) // execHook executes all of the hooks for the given hook event. @@ -49,9 +41,9 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, // hooke are pre-ordered by kind, so keep order stable sort.Stable(hookByWeight(executingHooks)) - for _, h := range executingHooks { + for i, h := range executingHooks { // Set default delete policy to before-hook-creation - if len(h.DeletePolicies) == 0 { + if h.DeletePolicies == nil || len(h.DeletePolicies) == 0 { // TODO(jlegrone): Only apply before-hook-creation delete policy to run to completion // resources. For all other resource types update in place if a // resource with the same name already exists and is owned by the @@ -59,7 +51,7 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, h.DeletePolicies = []release.HookDeletePolicy{release.HookBeforeHookCreation} } - if err := cfg.deleteHookByPolicy(h, release.HookBeforeHookCreation, timeout); err != nil { + if err := cfg.deleteHookByPolicy(h, release.HookBeforeHookCreation); err != nil { return err } @@ -94,33 +86,27 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, // Mark hook as succeeded or failed if err != nil { h.LastRun.Phase = release.HookPhaseFailed - // If a hook is failed, check the annotation of the hook to determine if we should copy the logs client side - if errOutputting := cfg.outputLogsByPolicy(h, rl.Namespace, release.HookOutputOnFailed); errOutputting != nil { - // We log the error here as we want to propagate the hook failure upwards to the release object. - log.Printf("error outputting logs for hook failure: %v", errOutputting) - } // If a hook is failed, check the annotation of the hook to determine whether the hook should be deleted // under failed condition. If so, then clear the corresponding resource object in the hook - if errDeleting := cfg.deleteHookByPolicy(h, release.HookFailed, timeout); errDeleting != nil { - // We log the error here as we want to propagate the hook failure upwards to the release object. - log.Printf("error deleting the hook resource on hook failure: %v", errDeleting) + if err := cfg.deleteHookByPolicy(h, release.HookFailed); err != nil { + return err } + + // If a hook is failed, check the annotation of the previous successful hooks to determine whether the hook + // should be deleted under succeeded condition. + if err := cfg.deleteHooksByPolicy(executingHooks[0:i], release.HookSucceeded); err != nil { + return err + } + return err } h.LastRun.Phase = release.HookPhaseSucceeded } // If all hooks are successful, check the annotation of each hook to determine whether the hook should be deleted - // or output should be logged under succeeded condition. If so, then clear the corresponding resource object in each hook - for i := len(executingHooks) - 1; i >= 0; i-- { - h := executingHooks[i] - if err := cfg.outputLogsByPolicy(h, rl.Namespace, release.HookOutputOnSucceeded); err != nil { - // We log here as we still want to attempt hook resource deletion even if output logging fails. - log.Printf("error outputting logs for hook failure: %v", err) - } - if err := cfg.deleteHookByPolicy(h, release.HookSucceeded, timeout); err != nil { - return err - } + // under succeeded condition. If so, then clear the corresponding resource object in each hook + if err := cfg.deleteHooksByPolicy(executingHooks, release.HookSucceeded); err != nil { + return err } return nil @@ -139,7 +125,7 @@ func (x hookByWeight) Less(i, j int) bool { } // deleteHookByPolicy deletes a hook if the hook policy instructs it to -func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.HookDeletePolicy, timeout time.Duration) error { +func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.HookDeletePolicy) error { // Never delete CustomResourceDefinitions; this could cause lots of // cascading garbage collection. if h.Kind == "CustomResourceDefinition" { @@ -154,13 +140,6 @@ func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.Hoo if len(errs) > 0 { return errors.New(joinErrors(errs)) } - - // wait for resources until they are deleted to avoid conflicts - if kubeClient, ok := cfg.KubeClient.(kube.InterfaceExt); ok { - if err := kubeClient.WaitForDelete(resources, timeout); err != nil { - return err - } - } } return nil } @@ -176,56 +155,13 @@ func hookHasDeletePolicy(h *release.Hook, policy release.HookDeletePolicy) bool return false } -// outputLogsByPolicy outputs a pods logs if the hook policy instructs it to -func (cfg *Configuration) outputLogsByPolicy(h *release.Hook, releaseNamespace string, policy release.HookOutputLogPolicy) error { - if !hookHasOutputLogPolicy(h, policy) { - return nil - } - namespace, err := cfg.deriveNamespace(h, releaseNamespace) - if err != nil { - return err - } - switch h.Kind { - case "Job": - return cfg.outputContainerLogsForListOptions(namespace, metav1.ListOptions{LabelSelector: fmt.Sprintf("job-name=%s", h.Name)}) - case "Pod": - return cfg.outputContainerLogsForListOptions(namespace, metav1.ListOptions{FieldSelector: fmt.Sprintf("metadata.name=%s", h.Name)}) - default: - return nil - } -} - -func (cfg *Configuration) outputContainerLogsForListOptions(namespace string, listOptions metav1.ListOptions) error { - // TODO Helm 4: Remove this check when GetPodList and OutputContainerLogsForPodList are moved from InterfaceLogs to Interface - if kubeClient, ok := cfg.KubeClient.(kube.InterfaceLogs); ok { - podList, err := kubeClient.GetPodList(namespace, listOptions) - if err != nil { +// deleteHooksByPolicy deletes all hooks if the hook policy instructs it to +func (cfg *Configuration) deleteHooksByPolicy(hooks []*release.Hook, policy release.HookDeletePolicy) error { + for _, h := range hooks { + if err := cfg.deleteHookByPolicy(h, policy); err != nil { return err } - err = kubeClient.OutputContainerLogsForPodList(podList, namespace, cfg.HookOutputFunc) - return err } - return nil -} - -func (cfg *Configuration) deriveNamespace(h *release.Hook, namespace string) (string, error) { - tmp := struct { - Metadata struct { - Namespace string - } - }{} - err := yaml.Unmarshal([]byte(h.Manifest), &tmp) - if err != nil { - return "", errors.Wrapf(err, "unable to parse metadata.namespace from kubernetes manifest for output logs hook %s", h.Path) - } - if tmp.Metadata.Namespace == "" { - return namespace, nil - } - return tmp.Metadata.Namespace, nil -} -// hookHasOutputLogPolicy determines whether the defined hook output log policy matches the hook output log policies -// supported by helm. -func hookHasOutputLogPolicy(h *release.Hook, policy release.HookOutputLogPolicy) bool { - return slices.Contains(h.OutputLogPolicies, policy) + return nil } diff --git a/pkg/action/hooks_test.go b/pkg/action/hooks_test.go index 0a3df94ef..bb9f39c1e 100644 --- a/pkg/action/hooks_test.go +++ b/pkg/action/hooks_test.go @@ -137,17 +137,25 @@ data: Namespace: "test", }, []resource.Info{ { + // This should be in the record for `before-hook-creation` Name: "build-config-1", Namespace: "test", }, { + // This should be in the record for `before-hook-creation` Name: "build-config-2", Namespace: "test", }, { + // This should be in the record for cleaning up (the failure first) Name: "build-config-2", Namespace: "test", }, + { + // This should be in the record for cleaning up (then the previously successful) + Name: "build-config-1", + Namespace: "test", + }, }, true, }, } From 940966d9c82125f095cb4e1c725902551686822e Mon Sep 17 00:00:00 2001 From: Laszlo Uveges Date: Mon, 12 Sep 2022 17:19:25 +0200 Subject: [PATCH 102/192] Fix formatting issues Signed-off-by: Laszlo Uveges --- pkg/action/hooks_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/action/hooks_test.go b/pkg/action/hooks_test.go index bb9f39c1e..75a97d031 100644 --- a/pkg/action/hooks_test.go +++ b/pkg/action/hooks_test.go @@ -29,7 +29,7 @@ type HookFailingKubeClient struct { deleteRecord []resource.Info } -func (_ *HookFailingKubeClient) Build(reader io.Reader, _ bool) (kube.ResourceList, error) { +func (*HookFailingKubeClient) Build(reader io.Reader, _ bool) (kube.ResourceList, error) { configMap := &v1.ConfigMap{} err := yaml.NewYAMLOrJSONDecoder(reader, 1000).Decode(configMap) From d03981b82c84f1682d4f6f8dbeb1dcb9a1025df8 Mon Sep 17 00:00:00 2001 From: Laszlo Uveges Date: Mon, 12 Sep 2022 17:34:30 +0200 Subject: [PATCH 103/192] Fix go imports Signed-off-by: Laszlo Uveges --- pkg/action/hooks_test.go | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/pkg/action/hooks_test.go b/pkg/action/hooks_test.go index 75a97d031..49c53ceac 100644 --- a/pkg/action/hooks_test.go +++ b/pkg/action/hooks_test.go @@ -1,20 +1,22 @@ package action import ( + "io" + "io/ioutil" + "reflect" + "testing" + "time" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/cli-runtime/pkg/resource" + "helm.sh/helm/v3/pkg/chartutil" "helm.sh/helm/v3/pkg/kube" kubefake "helm.sh/helm/v3/pkg/kube/fake" "helm.sh/helm/v3/pkg/release" "helm.sh/helm/v3/pkg/storage" "helm.sh/helm/v3/pkg/storage/driver" - "io" - "io/ioutil" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/cli-runtime/pkg/resource" - "reflect" - "testing" - "time" ) type HookFailedError struct{} From acca1b04eb800823678f0386a7d85ecabd161af1 Mon Sep 17 00:00:00 2001 From: Laszlo Uveges Date: Mon, 12 Sep 2022 17:40:58 +0200 Subject: [PATCH 104/192] Add missing license header Signed-off-by: Laszlo Uveges --- pkg/action/hooks_test.go | 60 +++++++++++++++++++++++++--------------- 1 file changed, 38 insertions(+), 22 deletions(-) diff --git a/pkg/action/hooks_test.go b/pkg/action/hooks_test.go index 49c53ceac..9c279bc1a 100644 --- a/pkg/action/hooks_test.go +++ b/pkg/action/hooks_test.go @@ -1,3 +1,19 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package action import ( @@ -135,30 +151,30 @@ data: }, }, }, resource.Info{ + Name: "build-config-2", + Namespace: "test", + }, []resource.Info{ + { + // This should be in the record for `before-hook-creation` + Name: "build-config-1", + Namespace: "test", + }, + { + // This should be in the record for `before-hook-creation` Name: "build-config-2", Namespace: "test", - }, []resource.Info{ - { - // This should be in the record for `before-hook-creation` - Name: "build-config-1", - Namespace: "test", - }, - { - // This should be in the record for `before-hook-creation` - Name: "build-config-2", - Namespace: "test", - }, - { - // This should be in the record for cleaning up (the failure first) - Name: "build-config-2", - Namespace: "test", - }, - { - // This should be in the record for cleaning up (then the previously successful) - Name: "build-config-1", - Namespace: "test", - }, - }, true, + }, + { + // This should be in the record for cleaning up (the failure first) + Name: "build-config-2", + Namespace: "test", + }, + { + // This should be in the record for cleaning up (then the previously successful) + Name: "build-config-1", + Namespace: "test", + }, + }, true, }, } From 63b615316325ced44b55ceb7a01f9185c6b8600d Mon Sep 17 00:00:00 2001 From: Laszlo Uveges Date: Mon, 12 Sep 2022 17:45:43 +0200 Subject: [PATCH 105/192] More formatting Signed-off-by: Laszlo Uveges --- pkg/action/hooks_test.go | 44 ++++++++++++++++++++-------------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/pkg/action/hooks_test.go b/pkg/action/hooks_test.go index 9c279bc1a..3ef1c17cb 100644 --- a/pkg/action/hooks_test.go +++ b/pkg/action/hooks_test.go @@ -151,30 +151,30 @@ data: }, }, }, resource.Info{ - Name: "build-config-2", - Namespace: "test", - }, []resource.Info{ - { - // This should be in the record for `before-hook-creation` - Name: "build-config-1", - Namespace: "test", - }, - { - // This should be in the record for `before-hook-creation` - Name: "build-config-2", - Namespace: "test", - }, - { - // This should be in the record for cleaning up (the failure first) Name: "build-config-2", Namespace: "test", - }, - { - // This should be in the record for cleaning up (then the previously successful) - Name: "build-config-1", - Namespace: "test", - }, - }, true, + }, []resource.Info{ + { + // This should be in the record for `before-hook-creation` + Name: "build-config-1", + Namespace: "test", + }, + { + // This should be in the record for `before-hook-creation` + Name: "build-config-2", + Namespace: "test", + }, + { + // This should be in the record for cleaning up (the failure first) + Name: "build-config-2", + Namespace: "test", + }, + { + // This should be in the record for cleaning up (then the previously successful) + Name: "build-config-1", + Namespace: "test", + }, + }, true, }, } From aa9e4bb42dfc99b4d5144bbcf6f2cf9b2395ba2b Mon Sep 17 00:00:00 2001 From: Gerard Nguyen Date: Sat, 15 Mar 2025 09:11:45 +1100 Subject: [PATCH 106/192] rebase Signed-off-by: Gerard Nguyen --- pkg/action/hooks.go | 116 +++++++++++++++++++---- pkg/action/hooks_test.go | 197 +++++++++++++++++++++++++++++++++++++-- 2 files changed, 288 insertions(+), 25 deletions(-) diff --git a/pkg/action/hooks.go b/pkg/action/hooks.go index 4fbe28bbe..d9c0eb085 100644 --- a/pkg/action/hooks.go +++ b/pkg/action/hooks.go @@ -17,13 +17,21 @@ package action import ( "bytes" + "fmt" + "log" + "slices" "sort" "time" + "helm.sh/helm/v4/pkg/kube" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/pkg/errors" + "gopkg.in/yaml.v3" - "helm.sh/helm/v3/pkg/release" - helmtime "helm.sh/helm/v3/pkg/time" + release "helm.sh/helm/v4/pkg/release/v1" + helmtime "helm.sh/helm/v4/pkg/time" ) // execHook executes all of the hooks for the given hook event. @@ -43,7 +51,7 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, for i, h := range executingHooks { // Set default delete policy to before-hook-creation - if h.DeletePolicies == nil || len(h.DeletePolicies) == 0 { + if len(h.DeletePolicies) == 0 { // TODO(jlegrone): Only apply before-hook-creation delete policy to run to completion // resources. For all other resource types update in place if a // resource with the same name already exists and is owned by the @@ -51,7 +59,7 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, h.DeletePolicies = []release.HookDeletePolicy{release.HookBeforeHookCreation} } - if err := cfg.deleteHookByPolicy(h, release.HookBeforeHookCreation); err != nil { + if err := cfg.deleteHookByPolicy(h, release.HookBeforeHookCreation, timeout); err != nil { return err } @@ -86,15 +94,21 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, // Mark hook as succeeded or failed if err != nil { h.LastRun.Phase = release.HookPhaseFailed + // If a hook is failed, check the annotation of the hook to determine if we should copy the logs client side + if errOutputting := cfg.outputLogsByPolicy(h, rl.Namespace, release.HookOutputOnFailed); errOutputting != nil { + // We log the error here as we want to propagate the hook failure upwards to the release object. + log.Printf("error outputting logs for hook failure: %v", errOutputting) + } // If a hook is failed, check the annotation of the hook to determine whether the hook should be deleted // under failed condition. If so, then clear the corresponding resource object in the hook - if err := cfg.deleteHookByPolicy(h, release.HookFailed); err != nil { - return err + if errDeleting := cfg.deleteHookByPolicy(h, release.HookFailed, timeout); errDeleting != nil { + // We log the error here as we want to propagate the hook failure upwards to the release object. + log.Printf("error deleting the hook resource on hook failure: %v", errDeleting) } - // If a hook is failed, check the annotation of the previous successful hooks to determine whether the hook + // If a hook is failed, check the annotation of the previous successful hooks to determine whether the hooks // should be deleted under succeeded condition. - if err := cfg.deleteHooksByPolicy(executingHooks[0:i], release.HookSucceeded); err != nil { + if err := cfg.deleteHooksByPolicy(executingHooks[0:i], release.HookSucceeded, timeout); err != nil { return err } @@ -104,9 +118,16 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, } // If all hooks are successful, check the annotation of each hook to determine whether the hook should be deleted - // under succeeded condition. If so, then clear the corresponding resource object in each hook - if err := cfg.deleteHooksByPolicy(executingHooks, release.HookSucceeded); err != nil { - return err + // or output should be logged under succeeded condition. If so, then clear the corresponding resource object in each hook + for i := len(executingHooks) - 1; i >= 0; i-- { + h := executingHooks[i] + if err := cfg.outputLogsByPolicy(h, rl.Namespace, release.HookOutputOnSucceeded); err != nil { + // We log here as we still want to attempt hook resource deletion even if output logging fails. + log.Printf("error outputting logs for hook failure: %v", err) + } + if err := cfg.deleteHookByPolicy(h, release.HookSucceeded, timeout); err != nil { + return err + } } return nil @@ -125,7 +146,7 @@ func (x hookByWeight) Less(i, j int) bool { } // deleteHookByPolicy deletes a hook if the hook policy instructs it to -func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.HookDeletePolicy) error { +func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.HookDeletePolicy, timeout time.Duration) error { // Never delete CustomResourceDefinitions; this could cause lots of // cascading garbage collection. if h.Kind == "CustomResourceDefinition" { @@ -140,7 +161,25 @@ func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.Hoo if len(errs) > 0 { return errors.New(joinErrors(errs)) } + + // wait for resources until they are deleted to avoid conflicts + if kubeClient, ok := cfg.KubeClient.(kube.InterfaceExt); ok { + if err := kubeClient.WaitForDelete(resources, timeout); err != nil { + return err + } + } + } + return nil +} + +// deleteHooksByPolicy deletes all hooks if the hook policy instructs it to +func (cfg *Configuration) deleteHooksByPolicy(hooks []*release.Hook, policy release.HookDeletePolicy, timeout time.Duration) error { + for _, h := range hooks { + if err := cfg.deleteHookByPolicy(h, policy, timeout); err != nil { + return err + } } + return nil } @@ -155,13 +194,56 @@ func hookHasDeletePolicy(h *release.Hook, policy release.HookDeletePolicy) bool return false } -// deleteHooksByPolicy deletes all hooks if the hook policy instructs it to -func (cfg *Configuration) deleteHooksByPolicy(hooks []*release.Hook, policy release.HookDeletePolicy) error { - for _, h := range hooks { - if err := cfg.deleteHookByPolicy(h, policy); err != nil { +// outputLogsByPolicy outputs a pods logs if the hook policy instructs it to +func (cfg *Configuration) outputLogsByPolicy(h *release.Hook, releaseNamespace string, policy release.HookOutputLogPolicy) error { + if !hookHasOutputLogPolicy(h, policy) { + return nil + } + namespace, err := cfg.deriveNamespace(h, releaseNamespace) + if err != nil { + return err + } + switch h.Kind { + case "Job": + return cfg.outputContainerLogsForListOptions(namespace, metav1.ListOptions{LabelSelector: fmt.Sprintf("job-name=%s", h.Name)}) + case "Pod": + return cfg.outputContainerLogsForListOptions(namespace, metav1.ListOptions{FieldSelector: fmt.Sprintf("metadata.name=%s", h.Name)}) + default: + return nil + } +} + +func (cfg *Configuration) outputContainerLogsForListOptions(namespace string, listOptions metav1.ListOptions) error { + // TODO Helm 4: Remove this check when GetPodList and OutputContainerLogsForPodList are moved from InterfaceLogs to Interface + if kubeClient, ok := cfg.KubeClient.(kube.InterfaceLogs); ok { + podList, err := kubeClient.GetPodList(namespace, listOptions) + if err != nil { return err } + err = kubeClient.OutputContainerLogsForPodList(podList, namespace, cfg.HookOutputFunc) + return err } - return nil } + +func (cfg *Configuration) deriveNamespace(h *release.Hook, namespace string) (string, error) { + tmp := struct { + Metadata struct { + Namespace string + } + }{} + err := yaml.Unmarshal([]byte(h.Manifest), &tmp) + if err != nil { + return "", errors.Wrapf(err, "unable to parse metadata.namespace from kubernetes manifest for output logs hook %s", h.Path) + } + if tmp.Metadata.Namespace == "" { + return namespace, nil + } + return tmp.Metadata.Namespace, nil +} + +// hookHasOutputLogPolicy determines whether the defined hook output log policy matches the hook output log policies +// supported by helm. +func hookHasOutputLogPolicy(h *release.Hook, policy release.HookOutputLogPolicy) bool { + return slices.Contains(h.OutputLogPolicies, policy) +} diff --git a/pkg/action/hooks_test.go b/pkg/action/hooks_test.go index 3ef1c17cb..68379add8 100644 --- a/pkg/action/hooks_test.go +++ b/pkg/action/hooks_test.go @@ -17,24 +17,205 @@ limitations under the License. package action import ( + "bytes" + "fmt" "io" - "io/ioutil" "reflect" "testing" "time" + "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/cli-runtime/pkg/resource" - "helm.sh/helm/v3/pkg/chartutil" - "helm.sh/helm/v3/pkg/kube" - kubefake "helm.sh/helm/v3/pkg/kube/fake" - "helm.sh/helm/v3/pkg/release" - "helm.sh/helm/v3/pkg/storage" - "helm.sh/helm/v3/pkg/storage/driver" + chart "helm.sh/helm/v4/pkg/chart/v2" + chartutil "helm.sh/helm/v4/pkg/chart/v2/util" + "helm.sh/helm/v4/pkg/kube" + kubefake "helm.sh/helm/v4/pkg/kube/fake" + release "helm.sh/helm/v4/pkg/release/v1" + "helm.sh/helm/v4/pkg/storage" + "helm.sh/helm/v4/pkg/storage/driver" ) +func podManifestWithOutputLogs(hookDefinitions []release.HookOutputLogPolicy) string { + hookDefinitionString := convertHooksToCommaSeparated(hookDefinitions) + return fmt.Sprintf(`kind: Pod +metadata: + name: finding-sharky, + annotations: + "helm.sh/hook": pre-install + "helm.sh/hook-output-log-policy": %s +spec: + containers: + - name: sharky-test + image: fake-image + cmd: fake-command`, hookDefinitionString) +} + +func podManifestWithOutputLogWithNamespace(hookDefinitions []release.HookOutputLogPolicy) string { + hookDefinitionString := convertHooksToCommaSeparated(hookDefinitions) + return fmt.Sprintf(`kind: Pod +metadata: + name: finding-george + namespace: sneaky-namespace + annotations: + "helm.sh/hook": pre-install + "helm.sh/hook-output-log-policy": %s +spec: + containers: + - name: george-test + image: fake-image + cmd: fake-command`, hookDefinitionString) +} + +func jobManifestWithOutputLog(hookDefinitions []release.HookOutputLogPolicy) string { + hookDefinitionString := convertHooksToCommaSeparated(hookDefinitions) + return fmt.Sprintf(`kind: Job +apiVersion: batch/v1 +metadata: + name: losing-religion + annotations: + "helm.sh/hook": pre-install + "helm.sh/hook-output-log-policy": %s +spec: + completions: 1 + parallelism: 1 + activeDeadlineSeconds: 30 + template: + spec: + containers: + - name: religion-container + image: religion-image + cmd: religion-command`, hookDefinitionString) +} + +func jobManifestWithOutputLogWithNamespace(hookDefinitions []release.HookOutputLogPolicy) string { + hookDefinitionString := convertHooksToCommaSeparated(hookDefinitions) + return fmt.Sprintf(`kind: Job +apiVersion: batch/v1 +metadata: + name: losing-religion + namespace: rem-namespace + annotations: + "helm.sh/hook": pre-install + "helm.sh/hook-output-log-policy": %s +spec: + completions: 1 + parallelism: 1 + activeDeadlineSeconds: 30 + template: + spec: + containers: + - name: religion-container + image: religion-image + cmd: religion-command`, hookDefinitionString) +} + +func convertHooksToCommaSeparated(hookDefinitions []release.HookOutputLogPolicy) string { + var commaSeparated string + for i, policy := range hookDefinitions { + if i+1 == len(hookDefinitions) { + commaSeparated += policy.String() + } else { + commaSeparated += policy.String() + "," + } + } + return commaSeparated +} + +func TestInstallRelease_HookOutputLogsOnFailure(t *testing.T) { + // Should output on failure with expected namespace if hook-failed is set + runInstallForHooksWithFailure(t, podManifestWithOutputLogs([]release.HookOutputLogPolicy{release.HookOutputOnFailed}), "spaced", true) + runInstallForHooksWithFailure(t, podManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnFailed}), "sneaky-namespace", true) + runInstallForHooksWithFailure(t, jobManifestWithOutputLog([]release.HookOutputLogPolicy{release.HookOutputOnFailed}), "spaced", true) + runInstallForHooksWithFailure(t, jobManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnFailed}), "rem-namespace", true) + + // Should not output on failure with expected namespace if hook-succeed is set + runInstallForHooksWithFailure(t, podManifestWithOutputLogs([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded}), "", false) + runInstallForHooksWithFailure(t, podManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded}), "", false) + runInstallForHooksWithFailure(t, jobManifestWithOutputLog([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded}), "", false) + runInstallForHooksWithFailure(t, jobManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded}), "", false) +} + +func TestInstallRelease_HookOutputLogsOnSuccess(t *testing.T) { + // Should output on success with expected namespace if hook-succeeded is set + runInstallForHooksWithSuccess(t, podManifestWithOutputLogs([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded}), "spaced", true) + runInstallForHooksWithSuccess(t, podManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded}), "sneaky-namespace", true) + runInstallForHooksWithSuccess(t, jobManifestWithOutputLog([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded}), "spaced", true) + runInstallForHooksWithSuccess(t, jobManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded}), "rem-namespace", true) + + // Should not output on success if hook-failed is set + runInstallForHooksWithSuccess(t, podManifestWithOutputLogs([]release.HookOutputLogPolicy{release.HookOutputOnFailed}), "", false) + runInstallForHooksWithSuccess(t, podManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnFailed}), "", false) + runInstallForHooksWithSuccess(t, jobManifestWithOutputLog([]release.HookOutputLogPolicy{release.HookOutputOnFailed}), "", false) + runInstallForHooksWithSuccess(t, jobManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnFailed}), "", false) +} + +func TestInstallRelease_HooksOutputLogsOnSuccessAndFailure(t *testing.T) { + // Should output on success with expected namespace if hook-succeeded and hook-failed is set + runInstallForHooksWithSuccess(t, podManifestWithOutputLogs([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded, release.HookOutputOnFailed}), "spaced", true) + runInstallForHooksWithSuccess(t, podManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded, release.HookOutputOnFailed}), "sneaky-namespace", true) + runInstallForHooksWithSuccess(t, jobManifestWithOutputLog([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded, release.HookOutputOnFailed}), "spaced", true) + runInstallForHooksWithSuccess(t, jobManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded, release.HookOutputOnFailed}), "rem-namespace", true) + + // Should output on failure if hook-succeeded and hook-failed is set + runInstallForHooksWithFailure(t, podManifestWithOutputLogs([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded, release.HookOutputOnFailed}), "spaced", true) + runInstallForHooksWithFailure(t, podManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded, release.HookOutputOnFailed}), "sneaky-namespace", true) + runInstallForHooksWithFailure(t, jobManifestWithOutputLog([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded, release.HookOutputOnFailed}), "spaced", true) + runInstallForHooksWithFailure(t, jobManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded, release.HookOutputOnFailed}), "rem-namespace", true) +} + +func runInstallForHooksWithSuccess(t *testing.T, manifest, expectedNamespace string, shouldOutput bool) { + var expectedOutput string + if shouldOutput { + expectedOutput = fmt.Sprintf("attempted to output logs for namespace: %s", expectedNamespace) + } + is := assert.New(t) + instAction := installAction(t) + instAction.ReleaseName = "failed-hooks" + outBuffer := &bytes.Buffer{} + instAction.cfg.KubeClient = &kubefake.PrintingKubeClient{Out: io.Discard, LogOutput: outBuffer} + + templates := []*chart.File{ + {Name: "templates/hello", Data: []byte("hello: world")}, + {Name: "templates/hooks", Data: []byte(manifest)}, + } + vals := map[string]interface{}{} + + res, err := instAction.Run(buildChartWithTemplates(templates), vals) + is.NoError(err) + is.Equal(expectedOutput, outBuffer.String()) + is.Equal(release.StatusDeployed, res.Info.Status) +} + +func runInstallForHooksWithFailure(t *testing.T, manifest, expectedNamespace string, shouldOutput bool) { + var expectedOutput string + if shouldOutput { + expectedOutput = fmt.Sprintf("attempted to output logs for namespace: %s", expectedNamespace) + } + is := assert.New(t) + instAction := installAction(t) + instAction.ReleaseName = "failed-hooks" + failingClient := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient) + failingClient.WatchUntilReadyError = fmt.Errorf("failed watch") + instAction.cfg.KubeClient = failingClient + outBuffer := &bytes.Buffer{} + failingClient.PrintingKubeClient = kubefake.PrintingKubeClient{Out: io.Discard, LogOutput: outBuffer} + + templates := []*chart.File{ + {Name: "templates/hello", Data: []byte("hello: world")}, + {Name: "templates/hooks", Data: []byte(manifest)}, + } + vals := map[string]interface{}{} + + res, err := instAction.Run(buildChartWithTemplates(templates), vals) + is.Error(err) + is.Contains(res.Info.Description, "failed pre-install") + is.Equal(expectedOutput, outBuffer.String()) + is.Equal(release.StatusFailed, res.Info.Status) +} + type HookFailedError struct{} func (e *HookFailedError) Error() string { @@ -181,7 +362,7 @@ data: for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { kubeClient := &HookFailingKubeClient{ - kubefake.PrintingKubeClient{Out: ioutil.Discard}, tc.failOn, []resource.Info{}, + kubefake.PrintingKubeClient{Out: io.Discard}, tc.failOn, []resource.Info{}, } configuration := &Configuration{ From b533189cb371d676db949217ebb8c18160b92306 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Mar 2025 00:19:41 +0000 Subject: [PATCH 107/192] build(deps): bump github.com/containerd/containerd from 1.7.26 to 1.7.27 Bumps [github.com/containerd/containerd](https://github.com/containerd/containerd) from 1.7.26 to 1.7.27. - [Release notes](https://github.com/containerd/containerd/releases) - [Changelog](https://github.com/containerd/containerd/blob/main/RELEASES.md) - [Commits](https://github.com/containerd/containerd/compare/v1.7.26...v1.7.27) --- updated-dependencies: - dependency-name: github.com/containerd/containerd dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 6 ++---- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 789cc723f..723cfc769 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/Masterminds/squirrel v1.5.4 github.com/Masterminds/vcs v1.13.3 github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 - github.com/containerd/containerd v1.7.26 + github.com/containerd/containerd v1.7.27 github.com/cyphar/filepath-securejoin v0.4.1 github.com/distribution/distribution/v3 v3.0.0-rc.3 github.com/evanphx/json-patch v5.9.11+incompatible diff --git a/go.sum b/go.sum index fec96dac6..b0e35d8b9 100644 --- a/go.sum +++ b/go.sum @@ -48,8 +48,8 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= -github.com/containerd/containerd v1.7.26 h1:3cs8K2RHlMQaPifLqgRyI4VBkoldNdEw62cb7qQga7k= -github.com/containerd/containerd v1.7.26/go.mod h1:m4JU0E+h0ebbo9yXD7Hyt+sWnc8tChm7MudCjj4jRvQ= +github.com/containerd/containerd v1.7.27 h1:yFyEyojddO3MIGVER2xJLWoCIn+Up4GaHFquP7hsFII= +github.com/containerd/containerd v1.7.27/go.mod h1:xZmPnl75Vc+BLGt4MIfu6bp+fy03gdHAn9bz+FreFR0= github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4= github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= @@ -423,8 +423,6 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= From d5d75ad0c7c16b8b630492559dd97c857cca7bb8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Mar 2025 21:41:09 +0000 Subject: [PATCH 108/192] build(deps): bump golangci/golangci-lint-action from 6.5.1 to 6.5.2 Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 6.5.1 to 6.5.2. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/4696ba8babb6127d732c3c6dde519db15edab9ea...55c2c1448f86e01eaae002a5a3a9624417608d84) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/golangci-lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index d7eafdd72..0d11cd531 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -21,6 +21,6 @@ jobs: go-version: '1.23' check-latest: true - name: golangci-lint - uses: golangci/golangci-lint-action@4696ba8babb6127d732c3c6dde519db15edab9ea #pin@6.5.1 + uses: golangci/golangci-lint-action@55c2c1448f86e01eaae002a5a3a9624417608d84 #pin@6.5.2 with: version: v1.62 From 835ff78f482c12703c61720e293a3ad82d652d03 Mon Sep 17 00:00:00 2001 From: Tom Wieczorek Date: Wed, 19 Mar 2025 14:50:16 +0100 Subject: [PATCH 109/192] Remove ClientOptResolver from OCI Client This option was kept to avoid compile-time incompatibilities in Helm v3 when upgrading to ORAS v2. Let's remove it for Helm v4. This allows Helm to drop the containerd dependency entirely. Signed-off-by: Tom Wieczorek --- go.mod | 4 ---- go.sum | 8 -------- pkg/registry/client.go | 9 --------- pkg/registry/client_test.go | 33 --------------------------------- 4 files changed, 54 deletions(-) delete mode 100644 pkg/registry/client_test.go diff --git a/go.mod b/go.mod index 723cfc769..0589b84e9 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,6 @@ require ( github.com/Masterminds/squirrel v1.5.4 github.com/Masterminds/vcs v1.13.3 github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 - github.com/containerd/containerd v1.7.27 github.com/cyphar/filepath-securejoin v0.4.1 github.com/distribution/distribution/v3 v3.0.0-rc.3 github.com/evanphx/json-patch v5.9.11+incompatible @@ -60,9 +59,6 @@ require ( github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect - github.com/containerd/errdefs v0.3.0 // indirect - github.com/containerd/log v0.1.0 // indirect - github.com/containerd/platforms v0.2.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect diff --git a/go.sum b/go.sum index b0e35d8b9..20dd5c0b9 100644 --- a/go.sum +++ b/go.sum @@ -48,14 +48,6 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= -github.com/containerd/containerd v1.7.27 h1:yFyEyojddO3MIGVER2xJLWoCIn+Up4GaHFquP7hsFII= -github.com/containerd/containerd v1.7.27/go.mod h1:xZmPnl75Vc+BLGt4MIfu6bp+fy03gdHAn9bz+FreFR0= -github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4= -github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= -github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= -github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= -github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= -github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0= diff --git a/pkg/registry/client.go b/pkg/registry/client.go index ecc7a0d04..2078ecd75 100644 --- a/pkg/registry/client.go +++ b/pkg/registry/client.go @@ -31,7 +31,6 @@ import ( "sync" "github.com/Masterminds/semver/v3" - "github.com/containerd/containerd/remotes" "github.com/opencontainers/image-spec/specs-go" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" @@ -56,8 +55,6 @@ storing semantic versions, Helm adopts the convention of changing plus (+) to an underscore (_) in chart version tags when pushing to a registry and back to a plus (+) when pulling from a registry.` -var errDeprecatedRemote = errors.New("providing github.com/containerd/containerd/remotes.Resolver via ClientOptResolver is no longer suported") - type ( // RemoteClient shadows the ORAS remote.Client interface // (hiding the ORAS type from Helm client visibility) @@ -231,12 +228,6 @@ func ClientOptPlainHTTP() ClientOption { } } -func ClientOptResolver(_ remotes.Resolver) ClientOption { - return func(c *Client) { - c.err = errDeprecatedRemote - } -} - type ( // LoginOption allows specifying various settings on login LoginOption func(*loginOperation) diff --git a/pkg/registry/client_test.go b/pkg/registry/client_test.go deleted file mode 100644 index 4c5a78849..000000000 --- a/pkg/registry/client_test.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package registry - -import ( - "testing" - - "github.com/containerd/containerd/remotes" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestNewClientResolverNotSupported(t *testing.T) { - var r remotes.Resolver - - client, err := NewClient(ClientOptResolver(r)) - require.Equal(t, err, errDeprecatedRemote) - assert.Nil(t, client) -} From a45cf1bab970430d599aa1d735615ef5264e9f38 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 19 Mar 2025 21:51:55 +0000 Subject: [PATCH 110/192] build(deps): bump actions/upload-artifact from 4.6.1 to 4.6.2 Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.6.1 to 4.6.2. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1...ea165f8d65b6e75b540449e92b4886f43607fa02) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/scorecards.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index f89fcd98c..a8c2e8a15 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -55,7 +55,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: SARIF file path: results.sarif From f95410f66c42759325dec33ca162056a762affbb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 19 Mar 2025 21:51:59 +0000 Subject: [PATCH 111/192] build(deps): bump actions/setup-go from 5.3.0 to 5.4.0 Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.3.0 to 5.4.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/f111f3307d8850f501ac008e886eec1fd1932a34...0aaccfd150d50ccaeb58ebd88d36e91967a5f35b) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/build-test.yml | 2 +- .github/workflows/golangci-lint.yml | 2 +- .github/workflows/govulncheck.yml | 2 +- .github/workflows/release.yml | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build-test.yml b/.github/workflows/build-test.yml index 2ccea3d0e..b654bf4d6 100644 --- a/.github/workflows/build-test.yml +++ b/.github/workflows/build-test.yml @@ -20,7 +20,7 @@ jobs: - name: Checkout source code uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # pin@v4.2.2 - name: Setup Go - uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # pin@5.3.0 + uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # pin@5.4.0 with: go-version: '1.23' check-latest: true diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 0d11cd531..6fbbd2c53 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -16,7 +16,7 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # pin@v4.2.2 - name: Setup Go - uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # pin@5.3.0 + uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # pin@5.4.0 with: go-version: '1.23' check-latest: true diff --git a/.github/workflows/govulncheck.yml b/.github/workflows/govulncheck.yml index f8572f2d6..b376c7b8e 100644 --- a/.github/workflows/govulncheck.yml +++ b/.github/workflows/govulncheck.yml @@ -14,7 +14,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Setup Go - uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # pin@5.3.0 + uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # pin@5.4.0 with: go-version: '1.23' check-latest: true diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index c5e7c6840..63e5c0e26 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -25,7 +25,7 @@ jobs: fetch-depth: 0 - name: Setup Go - uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # pin@5.3.0 + uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # pin@5.4.0 with: go-version: '1.23' @@ -81,7 +81,7 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # pin@v4.2.2 - name: Setup Go - uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # pin@5.3.0 + uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # pin@5.4.0 with: go-version: '1.23' check-latest: true From fc476f7235a6135acbf902aacb42af54d8edccad Mon Sep 17 00:00:00 2001 From: linghuying <1599935829@qq.com> Date: Thu, 20 Mar 2025 22:18:28 +0800 Subject: [PATCH 112/192] chore: make function comment match function name Signed-off-by: linghuying <1599935829@qq.com> --- pkg/registry/client.go | 2 +- pkg/storage/driver/mock_test.go | 2 +- pkg/storage/storage_test.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/registry/client.go b/pkg/registry/client.go index ecc7a0d04..fadffac5b 100644 --- a/pkg/registry/client.go +++ b/pkg/registry/client.go @@ -771,7 +771,7 @@ func PushOptStrictMode(strictMode bool) PushOption { } } -// PushOptCreationDate returns a function that sets the creation time +// PushOptCreationTime returns a function that sets the creation time func PushOptCreationTime(creationTime string) PushOption { return func(operation *pushOperation) { operation.creationTime = creationTime diff --git a/pkg/storage/driver/mock_test.go b/pkg/storage/driver/mock_test.go index 199da6505..53919b45d 100644 --- a/pkg/storage/driver/mock_test.go +++ b/pkg/storage/driver/mock_test.go @@ -166,7 +166,7 @@ func (mock *MockConfigMapsInterface) Delete(_ context.Context, name string, _ me return nil } -// newTestFixture initializes a MockSecretsInterface. +// newTestFixtureSecrets initializes a MockSecretsInterface. // Secrets are created for each release provided. func newTestFixtureSecrets(t *testing.T, releases ...*rspb.Release) *Secrets { var mock MockSecretsInterface diff --git a/pkg/storage/storage_test.go b/pkg/storage/storage_test.go index 056b7f5f5..1dadc9c93 100644 --- a/pkg/storage/storage_test.go +++ b/pkg/storage/storage_test.go @@ -476,7 +476,7 @@ func TestStorageLast(t *testing.T) { } } -// TestUpgradeInitiallyFailedRelease tests a case when there are no deployed release yet, but history limit has been +// TestUpgradeInitiallyFailedReleaseWithHistoryLimit tests a case when there are no deployed release yet, but history limit has been // reached: the has-no-deployed-releases error should not occur in such case. func TestUpgradeInitiallyFailedReleaseWithHistoryLimit(t *testing.T) { storage := Init(driver.NewMemory()) From 0e4d185370b6e1e8cf186dcb8946bb44ca410e24 Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Fri, 21 Mar 2025 09:54:44 +0100 Subject: [PATCH 113/192] Inform about time spent waiting resources to be ready in slog format Signed-off-by: Benoit Tigeot --- pkg/kube/wait.go | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go index 7eb931496..8844b0876 100644 --- a/pkg/kube/wait.go +++ b/pkg/kube/wait.go @@ -19,6 +19,7 @@ package kube // import "helm.sh/helm/v4/pkg/kube" import ( "context" "fmt" + "log/slog" "net/http" "time" @@ -101,12 +102,13 @@ func (w *waiter) isRetryableHTTPStatusCode(httpStatusCode int32) bool { // waitForDeletedResources polls to check if all the resources are deleted or a timeout is reached func (w *waiter) waitForDeletedResources(deleted ResourceList) error { - w.log("beginning wait for %d resources to be deleted with timeout of %v", len(deleted), w.timeout) + slog.Info("beginning wait for resources to be deleted", "count", len(deleted), "timeout", w.timeout) + startTime := time.Now() ctx, cancel := context.WithTimeout(context.Background(), w.timeout) defer cancel() - return wait.PollUntilContextCancel(ctx, 2*time.Second, true, func(_ context.Context) (bool, error) { + err := wait.PollUntilContextCancel(ctx, 2*time.Second, true, func(ctx context.Context) (bool, error) { for _, v := range deleted { err := v.Get() if err == nil || !apierrors.IsNotFound(err) { @@ -115,6 +117,15 @@ func (w *waiter) waitForDeletedResources(deleted ResourceList) error { } return true, nil }) + + elapsed := time.Since(startTime).Round(time.Second) + if err != nil { + slog.Debug("wait for resources failed", "elapsed", elapsed, "error", err) + } else { + slog.Debug("wait for resources succeeded", "elapsed", elapsed) + } + + return err } // SelectorsForObject returns the pod label selector for a given object From e3e84b6dfe462cbd45e5252796f0b3b7f431dc6e Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Fri, 21 Mar 2025 10:08:05 +0100 Subject: [PATCH 114/192] "beginning wait" is dedicated to be display as debug log Signed-off-by: Benoit Tigeot --- pkg/kube/wait.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go index 8844b0876..de53a67f1 100644 --- a/pkg/kube/wait.go +++ b/pkg/kube/wait.go @@ -102,7 +102,7 @@ func (w *waiter) isRetryableHTTPStatusCode(httpStatusCode int32) bool { // waitForDeletedResources polls to check if all the resources are deleted or a timeout is reached func (w *waiter) waitForDeletedResources(deleted ResourceList) error { - slog.Info("beginning wait for resources to be deleted", "count", len(deleted), "timeout", w.timeout) + slog.Debug("beginning wait for resources to be deleted", "count", len(deleted), "timeout", w.timeout) startTime := time.Now() ctx, cancel := context.WithTimeout(context.Background(), w.timeout) From 94cb21c7c48ccefafbd8ad04defe5846bcfb2751 Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Fri, 21 Mar 2025 10:33:45 +0100 Subject: [PATCH 115/192] Follow convention for error with slog.Any() Signed-off-by: Benoit Tigeot --- pkg/kube/wait.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go index de53a67f1..6a709b22d 100644 --- a/pkg/kube/wait.go +++ b/pkg/kube/wait.go @@ -120,7 +120,7 @@ func (w *waiter) waitForDeletedResources(deleted ResourceList) error { elapsed := time.Since(startTime).Round(time.Second) if err != nil { - slog.Debug("wait for resources failed", "elapsed", elapsed, "error", err) + slog.Debug("wait for resources failed", "elapsed", elapsed, slog.Any("error", err)) } else { slog.Debug("wait for resources succeeded", "elapsed", elapsed) } From e4e602e13c3363b8c479607cd932e6a4efd9c38f Mon Sep 17 00:00:00 2001 From: Matt Farina Date: Fri, 21 Mar 2025 08:06:01 -0400 Subject: [PATCH 116/192] Error when failed repo update. In Helm v3 we did not change exit codes for existing commands to maintain compat. A flag was introduced so a failure would result in a non-0 exit code. A note was left to make this the default in Helm v4. That's what this change does. Closes #10016 Signed-off-by: Matt Farina --- pkg/cmd/repo_update.go | 21 ++++++------------ pkg/cmd/repo_update_test.go | 43 +++++-------------------------------- 2 files changed, 12 insertions(+), 52 deletions(-) diff --git a/pkg/cmd/repo_update.go b/pkg/cmd/repo_update.go index 25071377b..6590d9872 100644 --- a/pkg/cmd/repo_update.go +++ b/pkg/cmd/repo_update.go @@ -42,11 +42,10 @@ To update all the repositories, use 'helm repo update'. var errNoRepositories = errors.New("no repositories found. You must add one before updating") type repoUpdateOptions struct { - update func([]*repo.ChartRepository, io.Writer, bool) error - repoFile string - repoCache string - names []string - failOnRepoUpdateFail bool + update func([]*repo.ChartRepository, io.Writer) error + repoFile string + repoCache string + names []string } func newRepoUpdateCmd(out io.Writer) *cobra.Command { @@ -69,12 +68,6 @@ func newRepoUpdateCmd(out io.Writer) *cobra.Command { }, } - f := cmd.Flags() - - // Adding this flag for Helm 3 as stop gap functionality for https://github.com/helm/helm/issues/10016. - // This should be deprecated in Helm 4 by update to the behaviour of `helm repo update` command. - f.BoolVar(&o.failOnRepoUpdateFail, "fail-on-repo-update-fail", false, "update fails if any of the repository updates fail") - return cmd } @@ -112,10 +105,10 @@ func (o *repoUpdateOptions) run(out io.Writer) error { } } - return o.update(repos, out, o.failOnRepoUpdateFail) + return o.update(repos, out) } -func updateCharts(repos []*repo.ChartRepository, out io.Writer, failOnRepoUpdateFail bool) error { +func updateCharts(repos []*repo.ChartRepository, out io.Writer) error { fmt.Fprintln(out, "Hang tight while we grab the latest from your chart repositories...") var wg sync.WaitGroup var repoFailList []string @@ -133,7 +126,7 @@ func updateCharts(repos []*repo.ChartRepository, out io.Writer, failOnRepoUpdate } wg.Wait() - if len(repoFailList) > 0 && failOnRepoUpdateFail { + if len(repoFailList) > 0 { return fmt.Errorf("Failed to update the following repositories: %s", repoFailList) } diff --git a/pkg/cmd/repo_update_test.go b/pkg/cmd/repo_update_test.go index 5b27a6dfb..6fc4c8f4b 100644 --- a/pkg/cmd/repo_update_test.go +++ b/pkg/cmd/repo_update_test.go @@ -34,7 +34,7 @@ func TestUpdateCmd(t *testing.T) { var out bytes.Buffer // Instead of using the HTTP updater, we provide our own for this test. // The TestUpdateCharts test verifies the HTTP behavior independently. - updater := func(repos []*repo.ChartRepository, out io.Writer, _ bool) error { + updater := func(repos []*repo.ChartRepository, out io.Writer) error { for _, re := range repos { fmt.Fprintln(out, re.Config.Name) } @@ -59,7 +59,7 @@ func TestUpdateCmdMultiple(t *testing.T) { var out bytes.Buffer // Instead of using the HTTP updater, we provide our own for this test. // The TestUpdateCharts test verifies the HTTP behavior independently. - updater := func(repos []*repo.ChartRepository, out io.Writer, _ bool) error { + updater := func(repos []*repo.ChartRepository, out io.Writer) error { for _, re := range repos { fmt.Fprintln(out, re.Config.Name) } @@ -85,7 +85,7 @@ func TestUpdateCmdInvalid(t *testing.T) { var out bytes.Buffer // Instead of using the HTTP updater, we provide our own for this test. // The TestUpdateCharts test verifies the HTTP behavior independently. - updater := func(repos []*repo.ChartRepository, out io.Writer, _ bool) error { + updater := func(repos []*repo.ChartRepository, out io.Writer) error { for _, re := range repos { fmt.Fprintln(out, re.Config.Name) } @@ -145,7 +145,7 @@ func TestUpdateCharts(t *testing.T) { } b := bytes.NewBuffer(nil) - updateCharts([]*repo.ChartRepository{r}, b, false) + updateCharts([]*repo.ChartRepository{r}, b) got := b.String() if strings.Contains(got, "Unable to get an update") { @@ -161,39 +161,6 @@ func TestRepoUpdateFileCompletion(t *testing.T) { checkFileCompletion(t, "repo update repo1", false) } -func TestUpdateChartsFail(t *testing.T) { - defer resetEnv()() - ensure.HelmHome(t) - - ts := repotest.NewTempServer( - t, - repotest.WithChartSourceGlob("testdata/testserver/*.*"), - ) - defer ts.Stop() - - var invalidURL = ts.URL() + "55" - r, err := repo.NewChartRepository(&repo.Entry{ - Name: "charts", - URL: invalidURL, - }, getter.All(settings)) - if err != nil { - t.Error(err) - } - - b := bytes.NewBuffer(nil) - if err := updateCharts([]*repo.ChartRepository{r}, b, false); err != nil { - t.Error("Repo update should not return error if update of repository fails") - } - - got := b.String() - if !strings.Contains(got, "Unable to get an update") { - t.Errorf("Repo should have failed update but instead got: %q", got) - } - if !strings.Contains(got, "Update Complete.") { - t.Error("Update was not successful") - } -} - func TestUpdateChartsFailWithError(t *testing.T) { defer resetEnv()() ensure.HelmHome(t) @@ -214,7 +181,7 @@ func TestUpdateChartsFailWithError(t *testing.T) { } b := bytes.NewBuffer(nil) - err = updateCharts([]*repo.ChartRepository{r}, b, true) + err = updateCharts([]*repo.ChartRepository{r}, b) if err == nil { t.Error("Repo update should return error because update of repository fails and 'fail-on-repo-update-fail' flag set") return From c5991028e01588a4f5b86cc31d794e15ffde8f64 Mon Sep 17 00:00:00 2001 From: Robert Sirchia Date: Fri, 21 Mar 2025 16:12:53 -0400 Subject: [PATCH 117/192] fixing matts changes Signed-off-by: Robert Sirchia --- pkg/chart/v2/util/dependencies.go | 2 +- pkg/engine/engine.go | 6 +++--- pkg/engine/lookup_func.go | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/chart/v2/util/dependencies.go b/pkg/chart/v2/util/dependencies.go index 6c9da4430..72a08b2a9 100644 --- a/pkg/chart/v2/util/dependencies.go +++ b/pkg/chart/v2/util/dependencies.go @@ -254,7 +254,7 @@ func processImportValues(c *chart.Chart, merge bool) error { // get child table vv, err := cvals.Table(r.Name + "." + child) if err != nil { - slog.Warn("ImportValues missing table from chart", "chart", r.Name, "value", err) + slog.Warn("ImportValues missing table from chart", "chart", r.Name, "error", err) continue } // create value map from child to be merged into parent diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index 9c91fd43b..7235b026a 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -203,7 +203,7 @@ func (e Engine) initFunMap(t *template.Template) { if val == nil { if e.LintMode { // Don't fail on missing required values when linting - slog.Warn("missing required value", "value", warn) + slog.Warn("missing required value", "message", warn) return "", nil } return val, errors.New(warnWrap(warn)) @@ -211,7 +211,7 @@ func (e Engine) initFunMap(t *template.Template) { if val == "" { if e.LintMode { // Don't fail on missing required values when linting - slog.Warn("missing required values", "value", warn) + slog.Warn("missing required values", "message", warn) return "", nil } return val, errors.New(warnWrap(warn)) @@ -224,7 +224,7 @@ func (e Engine) initFunMap(t *template.Template) { funcMap["fail"] = func(msg string) (string, error) { if e.LintMode { // Don't fail when linting - slog.Info("funcMap fail", "lintMode", msg) + slog.Info("funcMap fail", "message", msg) return "", nil } return "", errors.New(warnWrap(msg)) diff --git a/pkg/engine/lookup_func.go b/pkg/engine/lookup_func.go index 89f2707ec..b7460850a 100644 --- a/pkg/engine/lookup_func.go +++ b/pkg/engine/lookup_func.go @@ -127,7 +127,7 @@ func getAPIResourceForGVK(gvk schema.GroupVersionKind, config *rest.Config) (met } resList, err := discoveryClient.ServerResourcesForGroupVersion(gvk.GroupVersion().String()) if err != nil { - slog.Error("unable to retrieve resource list", "list", gvk.GroupVersion().String(), "error", err) + slog.Error("unable to retrieve resource list", "GroupVersion", gvk.GroupVersion().String(), "error", err) return res, err } for _, resource := range resList.APIResources { From 4f4c858f9c8f2e55871d80e877241abb9fa69b21 Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Sun, 23 Mar 2025 15:38:59 +0100 Subject: [PATCH 118/192] Ignore unused parameter Signed-off-by: Benoit Tigeot --- pkg/kube/wait.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go index 6a709b22d..71c6add53 100644 --- a/pkg/kube/wait.go +++ b/pkg/kube/wait.go @@ -108,7 +108,7 @@ func (w *waiter) waitForDeletedResources(deleted ResourceList) error { ctx, cancel := context.WithTimeout(context.Background(), w.timeout) defer cancel() - err := wait.PollUntilContextCancel(ctx, 2*time.Second, true, func(ctx context.Context) (bool, error) { + err := wait.PollUntilContextCancel(ctx, 2*time.Second, true, func(_ context.Context) (bool, error) { for _, v := range deleted { err := v.Get() if err == nil || !apierrors.IsNotFound(err) { From 3ca2558f0455ba7e50f260e2ef26745140b0277c Mon Sep 17 00:00:00 2001 From: Wahab Ali Date: Wed, 7 Jun 2023 19:00:11 -0400 Subject: [PATCH 119/192] Do not explicitly set SNI in HTTPGetter Signed-off-by: Wahab Ali --- pkg/getter/httpgetter.go | 7 -- pkg/getter/httpgetter_test.go | 118 +++++++++++++++++++++++++++++++++- testdata/localhost-crt.pem | 73 +++++++++++++++++++++ testdata/openssl.conf | 4 ++ 4 files changed, 192 insertions(+), 10 deletions(-) create mode 100644 testdata/localhost-crt.pem diff --git a/pkg/getter/httpgetter.go b/pkg/getter/httpgetter.go index 37d80cda7..a945dec2b 100644 --- a/pkg/getter/httpgetter.go +++ b/pkg/getter/httpgetter.go @@ -26,7 +26,6 @@ import ( "github.com/pkg/errors" "helm.sh/helm/v4/internal/tlsutil" - "helm.sh/helm/v4/internal/urlutil" "helm.sh/helm/v4/internal/version" ) @@ -137,12 +136,6 @@ func (g *HTTPGetter) httpClient() (*http.Client, error) { return nil, errors.Wrap(err, "can't create TLS config for client") } - sni, err := urlutil.ExtractHostname(g.opts.url) - if err != nil { - return nil, err - } - tlsConf.ServerName = sni - g.transport.TLSClientConfig = tlsConf } diff --git a/pkg/getter/httpgetter_test.go b/pkg/getter/httpgetter_test.go index 24e670f6e..dc60b9982 100644 --- a/pkg/getter/httpgetter_test.go +++ b/pkg/getter/httpgetter_test.go @@ -358,6 +358,121 @@ func TestDownloadTLS(t *testing.T) { } } +func TestDownloadTLSWithRedirect(t *testing.T) { + cd := "../../testdata" + srv2Resp := "hello" + insecureSkipTLSverify := false + + // Server 2 that will actually fulfil the request. + ca, pub, priv := filepath.Join(cd, "rootca.crt"), filepath.Join(cd, "localhost-crt.pem"), filepath.Join(cd, "key.pem") + tlsConf, err := tlsutil.NewClientTLS(pub, priv, ca, insecureSkipTLSverify) + if err != nil { + t.Fatal(errors.Wrap(err, "can't create TLS config for client")) + } + + tlsSrv2 := httptest.NewUnstartedServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + rw.Header().Set("Content-Type", "text/plain") + rw.Write([]byte(srv2Resp)) + })) + + tlsSrv2.TLS = tlsConf + tlsSrv2.StartTLS() + defer tlsSrv2.Close() + + // Server 1 responds with a redirect to Server 2. + ca, pub, priv = filepath.Join(cd, "rootca.crt"), filepath.Join(cd, "crt.pem"), filepath.Join(cd, "key.pem") + tlsConf, err = tlsutil.NewClientTLS(pub, priv, ca, insecureSkipTLSverify) + if err != nil { + t.Fatal(errors.Wrap(err, "can't create TLS config for client")) + } + + tlsSrv1 := httptest.NewUnstartedServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + u, _ := url.ParseRequestURI(tlsSrv2.URL) + + // Make the request using the hostname 'localhost' (to which 'localhost-crt.pem' is issued) + // to verify that a successful TLS connection is made even if the client doesn't specify + // the hostname (SNI) in `tls.Config.ServerName`. By default the hostname is derived from the + // request URL for every request (including redirects). Setting `tls.Config.ServerName` on the + // client just overrides the remote endpoint's hostname. + // See https://github.com/golang/go/blob/3979fb9/src/net/http/transport.go#L1505-L1513. + u.Host = fmt.Sprintf("localhost:%s", u.Port()) + + http.Redirect(rw, r, u.String(), http.StatusTemporaryRedirect) + })) + + tlsSrv1.TLS = tlsConf + tlsSrv1.StartTLS() + defer tlsSrv1.Close() + + u, _ := url.ParseRequestURI(tlsSrv1.URL) + + t.Run("Test with TLS", func(t *testing.T) { + g, err := NewHTTPGetter( + WithURL(u.String()), + WithTLSClientConfig(pub, priv, ca), + ) + if err != nil { + t.Fatal(err) + } + + buf, err := g.Get(u.String()) + if err != nil { + t.Error(err) + } + + b, err := io.ReadAll(buf) + if err != nil { + t.Error(err) + } + + if string(b) != srv2Resp { + t.Errorf("expected response from Server2 to be '%s', instead got: %s", srv2Resp, string(b)) + } + }) + + t.Run("Test with TLS config being passed along in .Get (see #6635)", func(t *testing.T) { + g, err := NewHTTPGetter() + if err != nil { + t.Fatal(err) + } + + buf, err := g.Get(u.String(), WithURL(u.String()), WithTLSClientConfig(pub, priv, ca)) + if err != nil { + t.Error(err) + } + + b, err := io.ReadAll(buf) + if err != nil { + t.Error(err) + } + + if string(b) != srv2Resp { + t.Errorf("expected response from Server2 to be '%s', instead got: %s", srv2Resp, string(b)) + } + }) + + t.Run("Test with only the CA file (see also #6635)", func(t *testing.T) { + g, err := NewHTTPGetter() + if err != nil { + t.Fatal(err) + } + + buf, err := g.Get(u.String(), WithURL(u.String()), WithTLSClientConfig("", "", ca)) + if err != nil { + t.Error(err) + } + + b, err := io.ReadAll(buf) + if err != nil { + t.Error(err) + } + + if string(b) != srv2Resp { + t.Errorf("expected response from Server2 to be '%s', instead got: %s", srv2Resp, string(b)) + } + }) +} + func TestDownloadInsecureSkipTLSVerify(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) {})) defer ts.Close() @@ -450,9 +565,6 @@ func TestHttpClientInsecureSkipVerify(t *testing.T) { if len(transport.TLSClientConfig.Certificates) <= 0 { t.Fatal("transport.TLSClientConfig.Certificates is not present") } - if transport.TLSClientConfig.ServerName == "" { - t.Fatal("TLSClientConfig.ServerName is blank") - } } func verifyInsecureSkipVerify(t *testing.T, g *HTTPGetter, caseName string, expectedValue bool) *http.Transport { diff --git a/testdata/localhost-crt.pem b/testdata/localhost-crt.pem new file mode 100644 index 000000000..70fa0a429 --- /dev/null +++ b/testdata/localhost-crt.pem @@ -0,0 +1,73 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 7f:5e:fa:21:fa:ee:e4:6a:be:9b:c2:80:bf:ed:42:f3:2d:47:f5:d2 + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=CO, L=Boulder, O=Helm, CN=helm.sh + Validity + Not Before: Nov 6 21:59:18 2023 GMT + Not After : Nov 3 21:59:18 2033 GMT + Subject: C=CA, ST=ON, L=Kitchener, O=Helm, CN=localhost + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + RSA Public-Key: (2048 bit) + Modulus: + 00:c8:89:55:0d:0b:f1:da:e6:c0:70:7d:d3:27:cd: + b8:a8:81:8b:7c:a4:89:e5:d1:b1:78:01:1d:df:44: + 88:0b:fc:d6:81:35:3d:d1:3b:5e:8f:bb:93:b3:7e: + 28:db:ed:ff:a0:13:3a:70:a3:fe:94:6b:0b:fe:fb: + 63:00:b0:cb:dc:81:cd:80:dc:d0:2f:bf:b2:4f:9a: + 81:d4:22:dc:97:c8:8f:27:86:59:91:fa:92:05:75: + c4:cc:6b:f5:a9:6b:74:1e:f5:db:a9:f8:bf:8c:a2: + 25:fd:a0:cc:79:f4:25:57:74:a9:23:9b:e2:b7:22: + 7a:14:7a:3d:ea:f1:7e:32:6b:57:6c:2e:c6:4f:75: + 54:f9:6b:54:d2:ca:eb:54:1c:af:39:15:9b:d0:7c: + 0f:f8:55:51:04:ea:da:fa:7b:8b:63:0f:ac:39:b1: + f6:4b:8e:4e:f6:ea:e9:7b:e6:ba:5e:5a:8e:91:ef: + dc:b1:7d:52:3f:73:83:52:46:83:48:49:ff:f2:2d: + ca:54:f2:36:bb:49:cc:59:99:c0:9e:cf:8e:78:55: + 6c:ed:7d:7e:83:b8:59:2c:7d:f8:1a:81:f0:7d:f5: + 27:f2:db:ae:d4:31:54:38:fe:47:b2:ee:16:20:0f: + f1:db:2d:28:bf:6f:38:eb:11:bb:9a:d4:b2:5a:3a: + 4a:7f + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Alternative Name: + DNS:localhost + Signature Algorithm: sha256WithRSAEncryption + 47:47:fe:29:ca:94:28:75:59:ba:ab:67:ab:c6:a6:0b:0a:f2: + 0f:26:d9:1d:35:db:68:a5:d8:f5:1f:d1:87:e7:a7:74:fd:c0: + 22:aa:c8:ec:6c:d3:ac:8a:0b:ed:59:3a:a0:12:77:7c:53:74: + fd:30:59:34:8f:a4:ef:5b:98:3f:ff:cf:89:87:ed:d3:7f:41: + 2f:b1:9a:12:71:bb:fe:3a:cf:77:16:32:bc:83:90:cc:52:2f: + 3b:f4:ae:db:b1:bb:f0:dd:30:d4:03:17:5e:47:b7:06:86:7a: + 16:b1:72:2f:80:5d:d4:c0:f9:6c:91:df:5a:c5:15:86:66:68: + c8:90:8e:f1:a2:bb:40:0f:ef:26:1b:02:c4:42:de:8c:69:ec: + ad:27:d0:bc:da:7c:76:33:86:de:b7:c4:04:64:e6:f6:dc:44: + 89:7b:b8:2f:c7:28:7a:4c:a6:01:ad:a5:17:64:3a:23:da:aa: + db:ce:3f:86:e9:92:dc:0d:c4:5a:b4:52:a8:8a:ee:3d:62:7d: + b1:c8:fa:ef:96:2b:ab:f1:e1:6d:6f:7d:1e:ce:bc:7a:d0:92: + 02:1b:c8:55:36:77:bf:d4:42:d3:fc:57:ca:b7:cc:95:be:ce: + f8:6e:b2:28:ca:4d:9a:00:7d:78:c8:56:04:2e:b3:ac:03:fa: + 05:d8:42:bd +-----BEGIN CERTIFICATE----- +MIIDRDCCAiygAwIBAgIUf176Ifru5Gq+m8KAv+1C8y1H9dIwDQYJKoZIhvcNAQEL +BQAwTTELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNPMRAwDgYDVQQHDAdCb3VsZGVy +MQ0wCwYDVQQKDARIZWxtMRAwDgYDVQQDDAdoZWxtLnNoMB4XDTIzMTEwNjIxNTkx +OFoXDTMzMTEwMzIxNTkxOFowUTELMAkGA1UEBhMCQ0ExCzAJBgNVBAgMAk9OMRIw +EAYDVQQHDAlLaXRjaGVuZXIxDTALBgNVBAoMBEhlbG0xEjAQBgNVBAMMCWxvY2Fs +aG9zdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMiJVQ0L8drmwHB9 +0yfNuKiBi3ykieXRsXgBHd9EiAv81oE1PdE7Xo+7k7N+KNvt/6ATOnCj/pRrC/77 +YwCwy9yBzYDc0C+/sk+agdQi3JfIjyeGWZH6kgV1xMxr9alrdB7126n4v4yiJf2g +zHn0JVd0qSOb4rciehR6PerxfjJrV2wuxk91VPlrVNLK61QcrzkVm9B8D/hVUQTq +2vp7i2MPrDmx9kuOTvbq6Xvmul5ajpHv3LF9Uj9zg1JGg0hJ//ItylTyNrtJzFmZ +wJ7PjnhVbO19foO4WSx9+BqB8H31J/LbrtQxVDj+R7LuFiAP8dstKL9vOOsRu5rU +slo6Sn8CAwEAAaMYMBYwFAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEB +CwUAA4IBAQBHR/4pypQodVm6q2erxqYLCvIPJtkdNdtopdj1H9GH56d0/cAiqsjs +bNOsigvtWTqgEnd8U3T9MFk0j6TvW5g//8+Jh+3Tf0EvsZoScbv+Os93FjK8g5DM +Ui879K7bsbvw3TDUAxdeR7cGhnoWsXIvgF3UwPlskd9axRWGZmjIkI7xortAD+8m +GwLEQt6MaeytJ9C82nx2M4bet8QEZOb23ESJe7gvxyh6TKYBraUXZDoj2qrbzj+G +6ZLcDcRatFKoiu49Yn2xyPrvliur8eFtb30ezrx60JICG8hVNne/1ELT/FfKt8yV +vs74brIoyk2aAH14yFYELrOsA/oF2EK9 +-----END CERTIFICATE----- diff --git a/testdata/openssl.conf b/testdata/openssl.conf index 9b27e445b..be5ff04b7 100644 --- a/testdata/openssl.conf +++ b/testdata/openssl.conf @@ -40,3 +40,7 @@ subjectAltName = @alternate_names [alternate_names] DNS.1 = helm.sh IP.1 = 127.0.0.1 + +# # Used to generate localhost-crt.pem +# [alternate_names] +# DNS.1 = localhost From e7895245f08cccb9bf0716f986c6cc1baf3fff28 Mon Sep 17 00:00:00 2001 From: Wahab Ali Date: Thu, 6 Feb 2025 19:32:08 +0500 Subject: [PATCH 120/192] Fix lint errors Signed-off-by: Wahab Ali --- pkg/getter/httpgetter_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/getter/httpgetter_test.go b/pkg/getter/httpgetter_test.go index dc60b9982..02e0735b5 100644 --- a/pkg/getter/httpgetter_test.go +++ b/pkg/getter/httpgetter_test.go @@ -370,7 +370,7 @@ func TestDownloadTLSWithRedirect(t *testing.T) { t.Fatal(errors.Wrap(err, "can't create TLS config for client")) } - tlsSrv2 := httptest.NewUnstartedServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + tlsSrv2 := httptest.NewUnstartedServer(http.HandlerFunc(func(rw http.ResponseWriter, _ *http.Request) { rw.Header().Set("Content-Type", "text/plain") rw.Write([]byte(srv2Resp)) })) From ec31aab851abf6a2377769a2db4ba1d610e152a5 Mon Sep 17 00:00:00 2001 From: Wahab Ali Date: Mon, 24 Mar 2025 10:51:02 -0400 Subject: [PATCH 121/192] Use NewTLSConfig instead of the outdated NewClientTLS func Signed-off-by: Wahab Ali --- pkg/getter/httpgetter_test.go | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/pkg/getter/httpgetter_test.go b/pkg/getter/httpgetter_test.go index 02e0735b5..27752a257 100644 --- a/pkg/getter/httpgetter_test.go +++ b/pkg/getter/httpgetter_test.go @@ -365,7 +365,12 @@ func TestDownloadTLSWithRedirect(t *testing.T) { // Server 2 that will actually fulfil the request. ca, pub, priv := filepath.Join(cd, "rootca.crt"), filepath.Join(cd, "localhost-crt.pem"), filepath.Join(cd, "key.pem") - tlsConf, err := tlsutil.NewClientTLS(pub, priv, ca, insecureSkipTLSverify) + tlsConf, err := tlsutil.NewTLSConfig( + tlsutil.WithCAFile(ca), + tlsutil.WithCertKeyPairFiles(pub, priv), + tlsutil.WithInsecureSkipVerify(insecureSkipTLSverify), + ) + if err != nil { t.Fatal(errors.Wrap(err, "can't create TLS config for client")) } @@ -381,7 +386,12 @@ func TestDownloadTLSWithRedirect(t *testing.T) { // Server 1 responds with a redirect to Server 2. ca, pub, priv = filepath.Join(cd, "rootca.crt"), filepath.Join(cd, "crt.pem"), filepath.Join(cd, "key.pem") - tlsConf, err = tlsutil.NewClientTLS(pub, priv, ca, insecureSkipTLSverify) + tlsConf, err = tlsutil.NewTLSConfig( + tlsutil.WithCAFile(ca), + tlsutil.WithCertKeyPairFiles(pub, priv), + tlsutil.WithInsecureSkipVerify(insecureSkipTLSverify), + ) + if err != nil { t.Fatal(errors.Wrap(err, "can't create TLS config for client")) } From 386523bdbc6f5e5f289ade7d9d4cf4c935354450 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 25 Mar 2025 13:55:39 +0000 Subject: [PATCH 122/192] update to get waiter instead of set Signed-off-by: Austin Abro --- pkg/action/action.go | 5 +-- pkg/action/hooks.go | 22 ++++++++---- pkg/action/install.go | 28 ++++++++++------ pkg/action/install_test.go | 6 ++-- pkg/action/release_testing.go | 3 +- pkg/action/rollback.go | 19 +++++------ pkg/action/uninstall.go | 14 ++++---- pkg/action/uninstall_test.go | 4 +-- pkg/action/upgrade.go | 35 +++++++++---------- pkg/action/upgrade_test.go | 10 +++--- pkg/cmd/install.go | 2 +- pkg/cmd/rollback.go | 2 +- pkg/cmd/uninstall.go | 2 +- pkg/cmd/upgrade.go | 4 +-- pkg/kube/client.go | 13 +++----- pkg/kube/client_test.go | 16 +++------ pkg/kube/fake/fake.go | 63 ++++++++++++++++++++++------------- pkg/kube/fake/printer.go | 28 ++++++++++------ pkg/kube/interface.go | 6 ++-- 19 files changed, 151 insertions(+), 131 deletions(-) diff --git a/pkg/action/action.go b/pkg/action/action.go index 1ca6a4dfa..ea2dc0dd7 100644 --- a/pkg/action/action.go +++ b/pkg/action/action.go @@ -375,10 +375,7 @@ func (cfg *Configuration) recordRelease(r *release.Release) { // Init initializes the action configuration func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string, log DebugLog) error { - kc, err := kube.New(getter) - if err != nil { - return err - } + kc := kube.New(getter) kc.Log = log lazyClient := &lazyClient{ diff --git a/pkg/action/hooks.go b/pkg/action/hooks.go index 6637891c5..9d0bb390b 100644 --- a/pkg/action/hooks.go +++ b/pkg/action/hooks.go @@ -35,7 +35,7 @@ import ( ) // execHook executes all of the hooks for the given hook event. -func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, timeout time.Duration) error { +func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, waitStrategy kube.WaitStrategy, timeout time.Duration) error { executingHooks := []*release.Hook{} for _, h := range rl.Hooks { @@ -59,7 +59,7 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, h.DeletePolicies = []release.HookDeletePolicy{release.HookBeforeHookCreation} } - if err := cfg.deleteHookByPolicy(h, release.HookBeforeHookCreation, timeout); err != nil { + if err := cfg.deleteHookByPolicy(h, release.HookBeforeHookCreation, waitStrategy, timeout); err != nil { return err } @@ -87,8 +87,12 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, return errors.Wrapf(err, "warning: Hook %s %s failed", hook, h.Path) } + waiter, err := cfg.KubeClient.GetWaiter(waitStrategy) + if err != nil { + return errors.Wrapf(err, "unable to get waiter") + } // Watch hook resources until they have completed - err = cfg.KubeClient.WatchUntilReady(resources, timeout) + err = waiter.WatchUntilReady(resources, timeout) // Note the time of success/failure h.LastRun.CompletedAt = helmtime.Now() // Mark hook as succeeded or failed @@ -101,7 +105,7 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, } // If a hook is failed, check the annotation of the hook to determine whether the hook should be deleted // under failed condition. If so, then clear the corresponding resource object in the hook - if errDeleting := cfg.deleteHookByPolicy(h, release.HookFailed, timeout); errDeleting != nil { + if errDeleting := cfg.deleteHookByPolicy(h, release.HookFailed, waitStrategy, timeout); errDeleting != nil { // We log the error here as we want to propagate the hook failure upwards to the release object. log.Printf("error deleting the hook resource on hook failure: %v", errDeleting) } @@ -118,7 +122,7 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, // We log here as we still want to attempt hook resource deletion even if output logging fails. log.Printf("error outputting logs for hook failure: %v", err) } - if err := cfg.deleteHookByPolicy(h, release.HookSucceeded, timeout); err != nil { + if err := cfg.deleteHookByPolicy(h, release.HookSucceeded, waitStrategy, timeout); err != nil { return err } } @@ -139,7 +143,7 @@ func (x hookByWeight) Less(i, j int) bool { } // deleteHookByPolicy deletes a hook if the hook policy instructs it to -func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.HookDeletePolicy, timeout time.Duration) error { +func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.HookDeletePolicy, waitStrategy kube.WaitStrategy, timeout time.Duration) error { // Never delete CustomResourceDefinitions; this could cause lots of // cascading garbage collection. if h.Kind == "CustomResourceDefinition" { @@ -155,7 +159,11 @@ func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.Hoo return errors.New(joinErrors(errs)) } - if err := cfg.KubeClient.WaitForDelete(resources, timeout); err != nil { + waiter, err := cfg.KubeClient.GetWaiter(waitStrategy) + if err != nil { + return err + } + if err := waiter.WaitForDelete(resources, timeout); err != nil { return err } } diff --git a/pkg/action/install.go b/pkg/action/install.go index be76a634f..735b8ac17 100644 --- a/pkg/action/install.go +++ b/pkg/action/install.go @@ -79,7 +79,7 @@ type Install struct { HideSecret bool DisableHooks bool Replace bool - Wait kube.WaitStrategy + WaitStrategy kube.WaitStrategy WaitForJobs bool Devel bool DependencyUpdate bool @@ -180,8 +180,12 @@ func (i *Install) installCRDs(crds []chart.CRD) error { totalItems = append(totalItems, res...) } if len(totalItems) > 0 { + waiter, err := i.cfg.KubeClient.GetWaiter(i.WaitStrategy) + if err != nil { + return errors.Wrapf(err, "unable to get waiter") + } // Give time for the CRD to be recognized. - if err := i.cfg.KubeClient.Wait(totalItems, 60*time.Second); err != nil { + if err := waiter.Wait(totalItems, 60*time.Second); err != nil { return err } @@ -289,11 +293,8 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma // Make sure if Atomic is set, that wait is set as well. This makes it so // the user doesn't have to specify both - if i.Wait == kube.HookOnlyStrategy && i.Atomic { - i.Wait = kube.StatusWatcherStrategy - } - if err := i.cfg.KubeClient.SetWaiter(i.Wait); err != nil { - return nil, fmt.Errorf("failed to set kube client waiter: %w", err) + if i.WaitStrategy == kube.HookOnlyStrategy && i.Atomic { + i.WaitStrategy = kube.StatusWatcherStrategy } caps, err := i.cfg.getCapabilities() @@ -453,7 +454,7 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource var err error // pre-install hooks if !i.DisableHooks { - if err := i.cfg.execHook(rel, release.HookPreInstall, i.Timeout); err != nil { + if err := i.cfg.execHook(rel, release.HookPreInstall, i.WaitStrategy, i.Timeout); err != nil { return rel, fmt.Errorf("failed pre-install: %s", err) } } @@ -470,17 +471,22 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource return rel, err } + waiter, err := i.cfg.KubeClient.GetWaiter(i.WaitStrategy) + if err != nil { + return rel, fmt.Errorf("failed to get waiter: %w", err) + } + if i.WaitForJobs { - err = i.cfg.KubeClient.WaitWithJobs(resources, i.Timeout) + err = waiter.WaitWithJobs(resources, i.Timeout) } else { - err = i.cfg.KubeClient.Wait(resources, i.Timeout) + err = waiter.Wait(resources, i.Timeout) } if err != nil { return rel, err } if !i.DisableHooks { - if err := i.cfg.execHook(rel, release.HookPostInstall, i.Timeout); err != nil { + if err := i.cfg.execHook(rel, release.HookPostInstall, i.WaitStrategy, i.Timeout); err != nil { return rel, fmt.Errorf("failed post-install: %s", err) } } diff --git a/pkg/action/install_test.go b/pkg/action/install_test.go index 331a2f71b..aafda86c2 100644 --- a/pkg/action/install_test.go +++ b/pkg/action/install_test.go @@ -412,7 +412,7 @@ func TestInstallRelease_Wait(t *testing.T) { failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.WaitError = fmt.Errorf("I timed out") instAction.cfg.KubeClient = failer - instAction.Wait = kube.StatusWatcherStrategy + instAction.WaitStrategy = kube.StatusWatcherStrategy vals := map[string]interface{}{} goroutines := runtime.NumGoroutine() @@ -431,7 +431,7 @@ func TestInstallRelease_Wait_Interrupted(t *testing.T) { failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.WaitDuration = 10 * time.Second instAction.cfg.KubeClient = failer - instAction.Wait = kube.StatusWatcherStrategy + instAction.WaitStrategy = kube.StatusWatcherStrategy vals := map[string]interface{}{} ctx, cancel := context.WithCancel(context.Background()) @@ -454,7 +454,7 @@ func TestInstallRelease_WaitForJobs(t *testing.T) { failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.WaitError = fmt.Errorf("I timed out") instAction.cfg.KubeClient = failer - instAction.Wait = kube.StatusWatcherStrategy + instAction.WaitStrategy = kube.StatusWatcherStrategy instAction.WaitForJobs = true vals := map[string]interface{}{} diff --git a/pkg/action/release_testing.go b/pkg/action/release_testing.go index c6374523e..7edc3ed34 100644 --- a/pkg/action/release_testing.go +++ b/pkg/action/release_testing.go @@ -28,6 +28,7 @@ import ( v1 "k8s.io/api/core/v1" chartutil "helm.sh/helm/v4/pkg/chart/v2/util" + "helm.sh/helm/v4/pkg/kube" release "helm.sh/helm/v4/pkg/release/v1" ) @@ -96,7 +97,7 @@ func (r *ReleaseTesting) Run(name string) (*release.Release, error) { rel.Hooks = executingHooks } - if err := r.cfg.execHook(rel, release.HookTest, r.Timeout); err != nil { + if err := r.cfg.execHook(rel, release.HookTest, kube.StatusWatcherStrategy, r.Timeout); err != nil { rel.Hooks = append(skippedHooks, rel.Hooks...) r.cfg.Releases.Update(rel) return rel, err diff --git a/pkg/action/rollback.go b/pkg/action/rollback.go index a96a706e3..870f1e635 100644 --- a/pkg/action/rollback.go +++ b/pkg/action/rollback.go @@ -38,7 +38,7 @@ type Rollback struct { Version int Timeout time.Duration - Wait kube.WaitStrategy + WaitStrategy kube.WaitStrategy WaitForJobs bool DisableHooks bool DryRun bool @@ -61,10 +61,6 @@ func (r *Rollback) Run(name string) error { return err } - if err := r.cfg.KubeClient.SetWaiter(r.Wait); err != nil { - return fmt.Errorf("failed to set kube client waiter: %w", err) - } - r.cfg.Releases.MaxHistory = r.MaxHistory r.cfg.Log("preparing rollback of %s", name) @@ -181,7 +177,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas // pre-rollback hooks if !r.DisableHooks { - if err := r.cfg.execHook(targetRelease, release.HookPreRollback, r.Timeout); err != nil { + if err := r.cfg.execHook(targetRelease, release.HookPreRollback, r.WaitStrategy, r.Timeout); err != nil { return targetRelease, err } } else { @@ -227,16 +223,19 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas r.cfg.Log(err.Error()) } } - + waiter, err := r.cfg.KubeClient.GetWaiter(r.WaitStrategy) + if err != nil { + return nil, errors.Wrap(err, "unable to set metadata visitor from target release") + } if r.WaitForJobs { - if err := r.cfg.KubeClient.WaitWithJobs(target, r.Timeout); err != nil { + if err := waiter.WaitWithJobs(target, r.Timeout); err != nil { targetRelease.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", targetRelease.Name, err.Error())) r.cfg.recordRelease(currentRelease) r.cfg.recordRelease(targetRelease) return targetRelease, errors.Wrapf(err, "release %s failed", targetRelease.Name) } } else { - if err := r.cfg.KubeClient.Wait(target, r.Timeout); err != nil { + if err := waiter.Wait(target, r.Timeout); err != nil { targetRelease.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", targetRelease.Name, err.Error())) r.cfg.recordRelease(currentRelease) r.cfg.recordRelease(targetRelease) @@ -246,7 +245,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas // post-rollback hooks if !r.DisableHooks { - if err := r.cfg.execHook(targetRelease, release.HookPostRollback, r.Timeout); err != nil { + if err := r.cfg.execHook(targetRelease, release.HookPostRollback, r.WaitStrategy, r.Timeout); err != nil { return targetRelease, err } } diff --git a/pkg/action/uninstall.go b/pkg/action/uninstall.go index 503be0da5..eeff997d3 100644 --- a/pkg/action/uninstall.go +++ b/pkg/action/uninstall.go @@ -17,7 +17,6 @@ limitations under the License. package action import ( - "fmt" "strings" "time" @@ -42,7 +41,7 @@ type Uninstall struct { DryRun bool IgnoreNotFound bool KeepHistory bool - Wait kube.WaitStrategy + WaitStrategy kube.WaitStrategy DeletionPropagation string Timeout time.Duration Description string @@ -61,8 +60,9 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error) return nil, err } - if err := u.cfg.KubeClient.SetWaiter(u.Wait); err != nil { - return nil, fmt.Errorf("failed to set kube client waiter: %w", err) + waiter, err := u.cfg.KubeClient.GetWaiter(u.WaitStrategy) + if err != nil { + return nil, err } if u.DryRun { @@ -111,7 +111,7 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error) res := &release.UninstallReleaseResponse{Release: rel} if !u.DisableHooks { - if err := u.cfg.execHook(rel, release.HookPreDelete, u.Timeout); err != nil { + if err := u.cfg.execHook(rel, release.HookPreDelete, u.WaitStrategy, u.Timeout); err != nil { return res, err } } else { @@ -135,12 +135,12 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error) } res.Info = kept - if err := u.cfg.KubeClient.WaitForDelete(deletedResources, u.Timeout); err != nil { + if err := waiter.WaitForDelete(deletedResources, u.Timeout); err != nil { errs = append(errs, err) } if !u.DisableHooks { - if err := u.cfg.execHook(rel, release.HookPostDelete, u.Timeout); err != nil { + if err := u.cfg.execHook(rel, release.HookPostDelete, u.WaitStrategy, u.Timeout); err != nil { errs = append(errs, err) } } diff --git a/pkg/action/uninstall_test.go b/pkg/action/uninstall_test.go index 5597abcdf..a83e4bc75 100644 --- a/pkg/action/uninstall_test.go +++ b/pkg/action/uninstall_test.go @@ -83,7 +83,7 @@ func TestUninstallRelease_Wait(t *testing.T) { unAction := uninstallAction(t) unAction.DisableHooks = true unAction.DryRun = false - unAction.Wait = kube.StatusWatcherStrategy + unAction.WaitStrategy = kube.StatusWatcherStrategy rel := releaseStub() rel.Name = "come-fail-away" @@ -114,7 +114,7 @@ func TestUninstallRelease_Cascade(t *testing.T) { unAction := uninstallAction(t) unAction.DisableHooks = true unAction.DryRun = false - unAction.Wait = kube.HookOnlyStrategy + unAction.WaitStrategy = kube.HookOnlyStrategy unAction.DeletionPropagation = "foreground" rel := releaseStub() diff --git a/pkg/action/upgrade.go b/pkg/action/upgrade.go index ba5dfb5d1..e3b775a25 100644 --- a/pkg/action/upgrade.go +++ b/pkg/action/upgrade.go @@ -64,8 +64,8 @@ type Upgrade struct { SkipCRDs bool // Timeout is the timeout for this operation Timeout time.Duration - // Wait determines whether the wait operation should be performed and what type of wait. - Wait kube.WaitStrategy + // WaitStrategy determines what type of waiting should be done + WaitStrategy kube.WaitStrategy // WaitForJobs determines whether the wait operation for the Jobs should be performed after the upgrade is requested. WaitForJobs bool // DisableHooks disables hook processing if set to true. @@ -155,11 +155,8 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart. // Make sure if Atomic is set, that wait is set as well. This makes it so // the user doesn't have to specify both - if u.Wait == kube.HookOnlyStrategy && u.Atomic { - u.Wait = kube.StatusWatcherStrategy - } - if err := u.cfg.KubeClient.SetWaiter(u.Wait); err != nil { - return nil, fmt.Errorf("failed to set kube client waiter: %w", err) + if u.WaitStrategy == kube.HookOnlyStrategy && u.Atomic { + u.WaitStrategy = kube.StatusWatcherStrategy } if err := chartutil.ValidateReleaseName(name); err != nil { @@ -423,7 +420,7 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele // pre-upgrade hooks if !u.DisableHooks { - if err := u.cfg.execHook(upgradedRelease, release.HookPreUpgrade, u.Timeout); err != nil { + if err := u.cfg.execHook(upgradedRelease, release.HookPreUpgrade, u.WaitStrategy, u.Timeout); err != nil { u.reportToPerformUpgrade(c, upgradedRelease, kube.ResourceList{}, fmt.Errorf("pre-upgrade hooks failed: %s", err)) return } @@ -447,15 +444,20 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele u.cfg.Log(err.Error()) } } - + waiter, err := u.cfg.KubeClient.GetWaiter(u.WaitStrategy) + if err != nil { + u.cfg.recordRelease(originalRelease) + u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err) + return + } if u.WaitForJobs { - if err := u.cfg.KubeClient.WaitWithJobs(target, u.Timeout); err != nil { + if err := waiter.WaitWithJobs(target, u.Timeout); err != nil { u.cfg.recordRelease(originalRelease) u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err) return } } else { - if err := u.cfg.KubeClient.Wait(target, u.Timeout); err != nil { + if err := waiter.Wait(target, u.Timeout); err != nil { u.cfg.recordRelease(originalRelease) u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err) return @@ -464,7 +466,7 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele // post-upgrade hooks if !u.DisableHooks { - if err := u.cfg.execHook(upgradedRelease, release.HookPostUpgrade, u.Timeout); err != nil { + if err := u.cfg.execHook(upgradedRelease, release.HookPostUpgrade, u.WaitStrategy, u.Timeout); err != nil { u.reportToPerformUpgrade(c, upgradedRelease, results.Created, fmt.Errorf("post-upgrade hooks failed: %s", err)) return } @@ -526,13 +528,8 @@ func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, e rollin := NewRollback(u.cfg) rollin.Version = filteredHistory[0].Version - if u.Wait == kube.HookOnlyStrategy { - rollin.Wait = kube.StatusWatcherStrategy - } - // TODO pretty sure this is unnecessary as the waiter is already set if atomic at the start of upgrade - werr := u.cfg.KubeClient.SetWaiter(u.Wait) - if werr != nil { - return rel, errors.Wrapf(herr, "an error occurred while creating the waiter. original upgrade error: %s", err) + if u.WaitStrategy == kube.HookOnlyStrategy { + rollin.WaitStrategy = kube.StatusWatcherStrategy } rollin.WaitForJobs = u.WaitForJobs rollin.DisableHooks = u.DisableHooks diff --git a/pkg/action/upgrade_test.go b/pkg/action/upgrade_test.go index a36b7a3de..19869f6d6 100644 --- a/pkg/action/upgrade_test.go +++ b/pkg/action/upgrade_test.go @@ -53,7 +53,7 @@ func TestUpgradeRelease_Success(t *testing.T) { rel.Info.Status = release.StatusDeployed req.NoError(upAction.cfg.Releases.Create(rel)) - upAction.Wait = kube.StatusWatcherStrategy + upAction.WaitStrategy = kube.StatusWatcherStrategy vals := map[string]interface{}{} ctx, done := context.WithCancel(context.Background()) @@ -83,7 +83,7 @@ func TestUpgradeRelease_Wait(t *testing.T) { failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.WaitError = fmt.Errorf("I timed out") upAction.cfg.KubeClient = failer - upAction.Wait = kube.StatusWatcherStrategy + upAction.WaitStrategy = kube.StatusWatcherStrategy vals := map[string]interface{}{} res, err := upAction.Run(rel.Name, buildChart(), vals) @@ -105,7 +105,7 @@ func TestUpgradeRelease_WaitForJobs(t *testing.T) { failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.WaitError = fmt.Errorf("I timed out") upAction.cfg.KubeClient = failer - upAction.Wait = kube.StatusWatcherStrategy + upAction.WaitStrategy = kube.StatusWatcherStrategy upAction.WaitForJobs = true vals := map[string]interface{}{} @@ -129,7 +129,7 @@ func TestUpgradeRelease_CleanupOnFail(t *testing.T) { failer.WaitError = fmt.Errorf("I timed out") failer.DeleteError = fmt.Errorf("I tried to delete nil") upAction.cfg.KubeClient = failer - upAction.Wait = kube.StatusWatcherStrategy + upAction.WaitStrategy = kube.StatusWatcherStrategy upAction.CleanupOnFail = true vals := map[string]interface{}{} @@ -396,7 +396,7 @@ func TestUpgradeRelease_Interrupted_Wait(t *testing.T) { failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.WaitDuration = 10 * time.Second upAction.cfg.KubeClient = failer - upAction.Wait = kube.StatusWatcherStrategy + upAction.WaitStrategy = kube.StatusWatcherStrategy vals := map[string]interface{}{} ctx := context.Background() diff --git a/pkg/cmd/install.go b/pkg/cmd/install.go index 04055fde9..051612bb8 100644 --- a/pkg/cmd/install.go +++ b/pkg/cmd/install.go @@ -211,7 +211,7 @@ func addInstallFlags(cmd *cobra.Command, f *pflag.FlagSet, client *action.Instal f.BoolVar(&client.TakeOwnership, "take-ownership", false, "if set, install will ignore the check for helm annotations and take ownership of the existing resources") addValueOptionsFlags(f, valueOpts) addChartPathOptionsFlags(f, &client.ChartPathOptions) - AddWaitFlag(cmd, &client.Wait) + AddWaitFlag(cmd, &client.WaitStrategy) err := cmd.RegisterFlagCompletionFunc("version", func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { requiredArgs := 2 diff --git a/pkg/cmd/rollback.go b/pkg/cmd/rollback.go index 01a32b184..1823432dc 100644 --- a/pkg/cmd/rollback.go +++ b/pkg/cmd/rollback.go @@ -84,7 +84,7 @@ func newRollbackCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { f.BoolVar(&client.WaitForJobs, "wait-for-jobs", false, "if set and --wait enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as --timeout") f.BoolVar(&client.CleanupOnFail, "cleanup-on-fail", false, "allow deletion of new resources created in this rollback when rollback fails") f.IntVar(&client.MaxHistory, "history-max", settings.MaxHistory, "limit the maximum number of revisions saved per release. Use 0 for no limit") - AddWaitFlag(cmd, &client.Wait) + AddWaitFlag(cmd, &client.WaitStrategy) return cmd } diff --git a/pkg/cmd/uninstall.go b/pkg/cmd/uninstall.go index 3a86cc598..4680c324a 100644 --- a/pkg/cmd/uninstall.go +++ b/pkg/cmd/uninstall.go @@ -79,7 +79,7 @@ func newUninstallCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { f.StringVar(&client.DeletionPropagation, "cascade", "background", "Must be \"background\", \"orphan\", or \"foreground\". Selects the deletion cascading strategy for the dependents. Defaults to background.") f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)") f.StringVar(&client.Description, "description", "", "add a custom description") - AddWaitFlag(cmd, &client.Wait) + AddWaitFlag(cmd, &client.WaitStrategy) return cmd } diff --git a/pkg/cmd/upgrade.go b/pkg/cmd/upgrade.go index 74d12ac40..afbbde435 100644 --- a/pkg/cmd/upgrade.go +++ b/pkg/cmd/upgrade.go @@ -136,7 +136,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { instClient.DisableHooks = client.DisableHooks instClient.SkipCRDs = client.SkipCRDs instClient.Timeout = client.Timeout - instClient.Wait = client.Wait + instClient.WaitStrategy = client.WaitStrategy instClient.WaitForJobs = client.WaitForJobs instClient.Devel = client.Devel instClient.Namespace = client.Namespace @@ -294,7 +294,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { addValueOptionsFlags(f, valueOpts) bindOutputFlag(cmd, &outfmt) bindPostRenderFlag(cmd, &client.PostRenderer) - AddWaitFlag(cmd, &client.Wait) + AddWaitFlag(cmd, &client.WaitStrategy) err := cmd.RegisterFlagCompletionFunc("version", func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) != 2 { diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 61e681ad3..032f79850 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -125,7 +125,7 @@ func (c *Client) newStatusWatcher() (*statusWaiter, error) { }, nil } -func (c *Client) newWaiter(strategy WaitStrategy) (Waiter, error) { +func (c *Client) GetWaiter(strategy WaitStrategy) (Waiter, error) { switch strategy { case LegacyStrategy: kc, err := c.Factory.KubernetesClientSet() @@ -148,7 +148,7 @@ func (c *Client) newWaiter(strategy WaitStrategy) (Waiter, error) { func (c *Client) SetWaiter(ws WaitStrategy) error { var err error - c.Waiter, err = c.newWaiter(ws) + c.Waiter, err = c.GetWaiter(ws) if err != nil { return err } @@ -156,7 +156,7 @@ func (c *Client) SetWaiter(ws WaitStrategy) error { } // New creates a new Client. -func New(getter genericclioptions.RESTClientGetter) (*Client, error) { +func New(getter genericclioptions.RESTClientGetter) *Client { if getter == nil { getter = genericclioptions.NewConfigFlags(true) } @@ -165,12 +165,7 @@ func New(getter genericclioptions.RESTClientGetter) (*Client, error) { Factory: factory, Log: nopLogger, } - var err error - c.Waiter, err = c.newWaiter(HookOnlyStrategy) - if err != nil { - return nil, err - } - return c, nil + return c } var nopLogger = func(_ string, _ ...interface{}) {} diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index 527f28a72..8ae1df238 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -516,7 +516,7 @@ func TestWait(t *testing.T) { }), } var err error - c.Waiter, err = c.newWaiter(LegacyStrategy) + c.Waiter, err = c.GetWaiter(LegacyStrategy) if err != nil { t.Fatal(err) } @@ -573,7 +573,7 @@ func TestWaitJob(t *testing.T) { }), } var err error - c.Waiter, err = c.newWaiter(LegacyStrategy) + c.Waiter, err = c.GetWaiter(LegacyStrategy) if err != nil { t.Fatal(err) } @@ -632,7 +632,7 @@ func TestWaitDelete(t *testing.T) { }), } var err error - c.Waiter, err = c.newWaiter(LegacyStrategy) + c.Waiter, err = c.GetWaiter(LegacyStrategy) if err != nil { t.Fatal(err) } @@ -662,10 +662,7 @@ func TestWaitDelete(t *testing.T) { func TestReal(t *testing.T) { t.Skip("This is a live test, comment this line to run") - c, err := New(nil) - if err != nil { - t.Fatal(err) - } + c := New(nil) resources, err := c.Build(strings.NewReader(guestbookManifest), false) if err != nil { t.Fatal(err) @@ -675,10 +672,7 @@ func TestReal(t *testing.T) { } testSvcEndpointManifest := testServiceManifest + "\n---\n" + testEndpointManifest - c, err = New(nil) - if err != nil { - t.Fatal(err) - } + c = New(nil) resources, err = c.Build(strings.NewReader(testSvcEndpointManifest), false) if err != nil { t.Fatal(err) diff --git a/pkg/kube/fake/fake.go b/pkg/kube/fake/fake.go index c4322733a..f868afa1a 100644 --- a/pkg/kube/fake/fake.go +++ b/pkg/kube/fake/fake.go @@ -35,19 +35,29 @@ type FailingKubeClient struct { PrintingKubeClient CreateError error GetError error - WaitError error - WaitForDeleteError error DeleteError error DeleteWithPropagationError error - WatchUntilReadyError error UpdateError error BuildError error BuildTableError error BuildDummy bool BuildUnstructuredError error + WaitError error + WaitForDeleteError error + WatchUntilReadyError error WaitDuration time.Duration } +// FailingKubeWaiter implements kube.Waiter for testing purposes. +// It also has additional errors you can set to fail different functions, otherwise it delegates all its calls to `PrintingKubeWaiter` +type FailingKubeWaiter struct { + *PrintingKubeWaiter + waitError error + waitForDeleteError error + watchUntilReadyError error + waitDuration time.Duration +} + // Create returns the configured error if set or prints func (f *FailingKubeClient) Create(resources kube.ResourceList) (*kube.Result, error) { if f.CreateError != nil { @@ -65,28 +75,28 @@ func (f *FailingKubeClient) Get(resources kube.ResourceList, related bool) (map[ } // Waits the amount of time defined on f.WaitDuration, then returns the configured error if set or prints. -func (f *FailingKubeClient) Wait(resources kube.ResourceList, d time.Duration) error { - time.Sleep(f.WaitDuration) - if f.WaitError != nil { - return f.WaitError +func (f *FailingKubeWaiter) Wait(resources kube.ResourceList, d time.Duration) error { + time.Sleep(f.waitDuration) + if f.waitError != nil { + return f.waitError } - return f.PrintingKubeClient.Wait(resources, d) + return f.PrintingKubeWaiter.Wait(resources, d) } // WaitWithJobs returns the configured error if set or prints -func (f *FailingKubeClient) WaitWithJobs(resources kube.ResourceList, d time.Duration) error { - if f.WaitError != nil { - return f.WaitError +func (f *FailingKubeWaiter) WaitWithJobs(resources kube.ResourceList, d time.Duration) error { + if f.waitError != nil { + return f.waitError } - return f.PrintingKubeClient.WaitWithJobs(resources, d) + return f.PrintingKubeWaiter.WaitWithJobs(resources, d) } // WaitForDelete returns the configured error if set or prints -func (f *FailingKubeClient) WaitForDelete(resources kube.ResourceList, d time.Duration) error { - if f.WaitForDeleteError != nil { - return f.WaitForDeleteError +func (f *FailingKubeWaiter) WaitForDelete(resources kube.ResourceList, d time.Duration) error { + if f.waitForDeleteError != nil { + return f.waitForDeleteError } - return f.PrintingKubeClient.WaitForDelete(resources, d) + return f.PrintingKubeWaiter.WaitForDelete(resources, d) } // Delete returns the configured error if set or prints @@ -98,11 +108,11 @@ func (f *FailingKubeClient) Delete(resources kube.ResourceList) (*kube.Result, [ } // WatchUntilReady returns the configured error if set or prints -func (f *FailingKubeClient) WatchUntilReady(resources kube.ResourceList, d time.Duration) error { - if f.WatchUntilReadyError != nil { - return f.WatchUntilReadyError +func (f *FailingKubeWaiter) WatchUntilReady(resources kube.ResourceList, d time.Duration) error { + if f.watchUntilReadyError != nil { + return f.watchUntilReadyError } - return f.PrintingKubeClient.WatchUntilReady(resources, d) + return f.PrintingKubeWaiter.WatchUntilReady(resources, d) } // Update returns the configured error if set or prints @@ -140,8 +150,16 @@ func (f *FailingKubeClient) DeleteWithPropagationPolicy(resources kube.ResourceL return f.PrintingKubeClient.DeleteWithPropagationPolicy(resources, policy) } -func (f *FailingKubeClient) SetWaiter(_ kube.WaitStrategy) error { - return nil +func (f *FailingKubeClient) GetWaiter(ws kube.WaitStrategy) (kube.Waiter, error) { + waiter, _ := f.PrintingKubeClient.GetWaiter(ws) + printingKubeWaiter, _ := waiter.(*PrintingKubeWaiter) + return &FailingKubeWaiter{ + PrintingKubeWaiter: printingKubeWaiter, + waitError: f.WaitError, + waitForDeleteError: f.WaitForDeleteError, + watchUntilReadyError: f.WatchUntilReadyError, + waitDuration: f.WaitDuration, + }, nil } func createDummyResourceList() kube.ResourceList { @@ -151,5 +169,4 @@ func createDummyResourceList() kube.ResourceList { var resourceList kube.ResourceList resourceList.Append(&resInfo) return resourceList - } diff --git a/pkg/kube/fake/printer.go b/pkg/kube/fake/printer.go index fa25a04b3..f6659a904 100644 --- a/pkg/kube/fake/printer.go +++ b/pkg/kube/fake/printer.go @@ -37,6 +37,12 @@ type PrintingKubeClient struct { LogOutput io.Writer } +// PrintingKubeWaiter implements kube.Waiter, but simply prints the reader to the given output +type PrintingKubeWaiter struct { + Out io.Writer + LogOutput io.Writer +} + // IsReachable checks if the cluster is reachable func (p *PrintingKubeClient) IsReachable() error { return nil @@ -59,17 +65,23 @@ func (p *PrintingKubeClient) Get(resources kube.ResourceList, _ bool) (map[strin return make(map[string][]runtime.Object), nil } -func (p *PrintingKubeClient) Wait(resources kube.ResourceList, _ time.Duration) error { +func (p *PrintingKubeWaiter) Wait(resources kube.ResourceList, _ time.Duration) error { _, err := io.Copy(p.Out, bufferize(resources)) return err } -func (p *PrintingKubeClient) WaitWithJobs(resources kube.ResourceList, _ time.Duration) error { +func (p *PrintingKubeWaiter) WaitWithJobs(resources kube.ResourceList, _ time.Duration) error { _, err := io.Copy(p.Out, bufferize(resources)) return err } -func (p *PrintingKubeClient) WaitForDelete(resources kube.ResourceList, _ time.Duration) error { +func (p *PrintingKubeWaiter) WaitForDelete(resources kube.ResourceList, _ time.Duration) error { + _, err := io.Copy(p.Out, bufferize(resources)) + return err +} + +// WatchUntilReady implements KubeClient WatchUntilReady. +func (p *PrintingKubeWaiter) WatchUntilReady(resources kube.ResourceList, _ time.Duration) error { _, err := io.Copy(p.Out, bufferize(resources)) return err } @@ -85,12 +97,6 @@ func (p *PrintingKubeClient) Delete(resources kube.ResourceList) (*kube.Result, return &kube.Result{Deleted: resources}, nil } -// WatchUntilReady implements KubeClient WatchUntilReady. -func (p *PrintingKubeClient) WatchUntilReady(resources kube.ResourceList, _ time.Duration) error { - _, err := io.Copy(p.Out, bufferize(resources)) - return err -} - // Update implements KubeClient Update. func (p *PrintingKubeClient) Update(_, modified kube.ResourceList, _ bool) (*kube.Result, error) { _, err := io.Copy(p.Out, bufferize(modified)) @@ -140,8 +146,8 @@ func (p *PrintingKubeClient) DeleteWithPropagationPolicy(resources kube.Resource return &kube.Result{Deleted: resources}, nil } -func (p *PrintingKubeClient) SetWaiter(_ kube.WaitStrategy) error { - return nil +func (p *PrintingKubeClient) GetWaiter(_ kube.WaitStrategy) (kube.Waiter, error) { + return &PrintingKubeWaiter{Out: p.Out, LogOutput: p.LogOutput}, nil } func bufferize(resources kube.ResourceList) io.Reader { diff --git a/pkg/kube/interface.go b/pkg/kube/interface.go index d6ac823f1..fb42fed06 100644 --- a/pkg/kube/interface.go +++ b/pkg/kube/interface.go @@ -48,9 +48,9 @@ type Interface interface { Build(reader io.Reader, validate bool) (ResourceList, error) // IsReachable checks whether the client is able to connect to the cluster. IsReachable() error - // Set Waiter sets the Kube.Waiter - SetWaiter(ws WaitStrategy) error - Waiter + + // Get Waiter gets the Kube.Waiter + GetWaiter(ws WaitStrategy) (Waiter, error) } // Waiter defines methods related to waiting for resource states. From 8efd428e5da26d035eb7a095e348c9cbbfae9f26 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 25 Mar 2025 14:10:31 +0000 Subject: [PATCH 123/192] switch back to k8s rest mapper Signed-off-by: Austin Abro --- go.mod | 2 +- go.sum | 4 +- internal/restmapper/restmapper.go | 372 ------------------------------ pkg/kube/client.go | 5 +- 4 files changed, 5 insertions(+), 378 deletions(-) delete mode 100644 internal/restmapper/restmapper.go diff --git a/go.mod b/go.mod index c0f172c1e..bfc55057a 100644 --- a/go.mod +++ b/go.mod @@ -46,6 +46,7 @@ require ( k8s.io/klog/v2 v2.130.1 k8s.io/kubectl v0.32.3 oras.land/oras-go/v2 v2.5.0 + sigs.k8s.io/controller-runtime v0.20.4 sigs.k8s.io/yaml v1.4.0 ) @@ -177,7 +178,6 @@ require ( k8s.io/component-base v0.32.3 // indirect k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 // indirect k8s.io/utils v0.0.0-20241210054802-24370beab758 // indirect - sigs.k8s.io/controller-runtime v0.20.1 // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect sigs.k8s.io/kustomize/api v0.18.0 // indirect sigs.k8s.io/kustomize/kyaml v0.19.0 // indirect diff --git a/go.sum b/go.sum index 620678cbf..1153931d8 100644 --- a/go.sum +++ b/go.sum @@ -535,8 +535,8 @@ k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJ k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go/v2 v2.5.0 h1:o8Me9kLY74Vp5uw07QXPiitjsw7qNXi8Twd+19Zf02c= oras.land/oras-go/v2 v2.5.0/go.mod h1:z4eisnLP530vwIOUOJeBIj0aGI0L1C3d53atvCBqZHg= -sigs.k8s.io/controller-runtime v0.20.1 h1:JbGMAG/X94NeM3xvjenVUaBjy6Ui4Ogd/J5ZtjZnHaE= -sigs.k8s.io/controller-runtime v0.20.1/go.mod h1:BrP3w158MwvB3ZbNpaAcIKkHQ7YGpYnzpoSTZ8E14WU= +sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU= +sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/kustomize/api v0.18.0 h1:hTzp67k+3NEVInwz5BHyzc9rGxIauoXferXyjv5lWPo= diff --git a/internal/restmapper/restmapper.go b/internal/restmapper/restmapper.go deleted file mode 100644 index 85b7c2a69..000000000 --- a/internal/restmapper/restmapper.go +++ /dev/null @@ -1,372 +0,0 @@ -/* -Copyright The Helm Authors. -This file was initially copied and modified from - https://github.com/kubernetes-sigs/controller-runtime/blob/e818ce450d3d358600848dcfa1b585de64e7c865/pkg/client/apiutil/restmapper.go -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package restmapper - -import ( - "fmt" - "net/http" - "sort" - "strings" - "sync" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/discovery" - "k8s.io/client-go/rest" - "k8s.io/client-go/restmapper" -) - -/* -Adapted from controller-runtime v0.19 before The Kubernetes Aggregated Discovery was enabled -in controller-runtime v0.20 which broke the preferred version discovery in the RESTMapper. -https://github.com/kubernetes-sigs/controller-runtime/blob/e818ce450d3d358600848dcfa1b585de64e7c865/pkg/client/apiutil/restmapper.go -*/ - -// NewLazyRESTMapper returns a dynamic RESTMapper for cfg. The dynamic -// RESTMapper dynamically discovers resource types at runtime. -func NewLazyRESTMapper(cfg *rest.Config, httpClient *http.Client) (meta.RESTMapper, error) { - if httpClient == nil { - return nil, fmt.Errorf("httpClient must not be nil, consider using rest.HTTPClientFor(c) to create a client") - } - - client, err := discovery.NewDiscoveryClientForConfigAndClient(cfg, httpClient) - if err != nil { - return nil, err - } - return &mapper{ - mapper: restmapper.NewDiscoveryRESTMapper([]*restmapper.APIGroupResources{}), - client: client, - knownGroups: map[string]*restmapper.APIGroupResources{}, - apiGroups: map[string]*metav1.APIGroup{}, - }, nil -} - -// mapper is a RESTMapper that will lazily query the provided -// client for discovery information to do REST mappings. -type mapper struct { - mapper meta.RESTMapper - client discovery.DiscoveryInterface - knownGroups map[string]*restmapper.APIGroupResources - apiGroups map[string]*metav1.APIGroup - - // mutex to provide thread-safe mapper reloading. - mu sync.RWMutex -} - -// KindFor implements Mapper.KindFor. -func (m *mapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { - res, err := m.getMapper().KindFor(resource) - if meta.IsNoMatchError(err) { - if err := m.addKnownGroupAndReload(resource.Group, resource.Version); err != nil { - return schema.GroupVersionKind{}, err - } - res, err = m.getMapper().KindFor(resource) - } - - return res, err -} - -// KindsFor implements Mapper.KindsFor. -func (m *mapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { - res, err := m.getMapper().KindsFor(resource) - if meta.IsNoMatchError(err) { - if err := m.addKnownGroupAndReload(resource.Group, resource.Version); err != nil { - return nil, err - } - res, err = m.getMapper().KindsFor(resource) - } - - return res, err -} - -// ResourceFor implements Mapper.ResourceFor. -func (m *mapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { - res, err := m.getMapper().ResourceFor(input) - if meta.IsNoMatchError(err) { - if err := m.addKnownGroupAndReload(input.Group, input.Version); err != nil { - return schema.GroupVersionResource{}, err - } - res, err = m.getMapper().ResourceFor(input) - } - - return res, err -} - -// ResourcesFor implements Mapper.ResourcesFor. -func (m *mapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { - res, err := m.getMapper().ResourcesFor(input) - if meta.IsNoMatchError(err) { - if err := m.addKnownGroupAndReload(input.Group, input.Version); err != nil { - return nil, err - } - res, err = m.getMapper().ResourcesFor(input) - } - - return res, err -} - -// RESTMapping implements Mapper.RESTMapping. -func (m *mapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { - res, err := m.getMapper().RESTMapping(gk, versions...) - if meta.IsNoMatchError(err) { - if err := m.addKnownGroupAndReload(gk.Group, versions...); err != nil { - return nil, err - } - res, err = m.getMapper().RESTMapping(gk, versions...) - } - - return res, err -} - -// RESTMappings implements Mapper.RESTMappings. -func (m *mapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { - res, err := m.getMapper().RESTMappings(gk, versions...) - if meta.IsNoMatchError(err) { - if err := m.addKnownGroupAndReload(gk.Group, versions...); err != nil { - return nil, err - } - res, err = m.getMapper().RESTMappings(gk, versions...) - } - - return res, err -} - -// ResourceSingularizer implements Mapper.ResourceSingularizer. -func (m *mapper) ResourceSingularizer(resource string) (string, error) { - return m.getMapper().ResourceSingularizer(resource) -} - -func (m *mapper) getMapper() meta.RESTMapper { - m.mu.RLock() - defer m.mu.RUnlock() - return m.mapper -} - -// addKnownGroupAndReload reloads the mapper with updated information about missing API group. -// versions can be specified for partial updates, for instance for v1beta1 version only. -func (m *mapper) addKnownGroupAndReload(groupName string, versions ...string) error { - // versions will here be [""] if the forwarded Version value of - // GroupVersionResource (in calling method) was not specified. - if len(versions) == 1 && versions[0] == "" { - versions = nil - } - - // If no specific versions are set by user, we will scan all available ones for the API group. - // This operation requires 2 requests: /api and /apis, but only once. For all subsequent calls - // this data will be taken from cache. - if len(versions) == 0 { - apiGroup, err := m.findAPIGroupByName(groupName) - if err != nil { - return err - } - if apiGroup != nil { - for _, version := range apiGroup.Versions { - versions = append(versions, version.Version) - } - } - } - - m.mu.Lock() - defer m.mu.Unlock() - - // Create or fetch group resources from cache. - groupResources := &restmapper.APIGroupResources{ - Group: metav1.APIGroup{Name: groupName}, - VersionedResources: make(map[string][]metav1.APIResource), - } - - // Update information for group resources about versioned resources. - // The number of API calls is equal to the number of versions: /apis//. - // If we encounter a missing API version (NotFound error), we will remove the group from - // the m.apiGroups and m.knownGroups caches. - // If this happens, in the next call the group will be added back to apiGroups - // and only the existing versions will be loaded in knownGroups. - groupVersionResources, err := m.fetchGroupVersionResourcesLocked(groupName, versions...) - if err != nil { - return fmt.Errorf("failed to get API group resources: %w", err) - } - - if _, ok := m.knownGroups[groupName]; ok { - groupResources = m.knownGroups[groupName] - } - - // Update information for group resources about the API group by adding new versions. - // Ignore the versions that are already registered. - for groupVersion, resources := range groupVersionResources { - version := groupVersion.Version - - groupResources.VersionedResources[version] = resources.APIResources - found := false - for _, v := range groupResources.Group.Versions { - if v.Version == version { - found = true - break - } - } - - if !found { - groupResources.Group.Versions = append(groupResources.Group.Versions, metav1.GroupVersionForDiscovery{ - GroupVersion: metav1.GroupVersion{Group: groupName, Version: version}.String(), - Version: version, - }) - } - } - - // Update data in the cache. - m.knownGroups[groupName] = groupResources - - // Finally, update the group with received information and regenerate the mapper. - updatedGroupResources := make([]*restmapper.APIGroupResources, 0, len(m.knownGroups)) - for _, agr := range m.knownGroups { - updatedGroupResources = append(updatedGroupResources, agr) - } - - m.mapper = restmapper.NewDiscoveryRESTMapper(updatedGroupResources) - return nil -} - -// findAPIGroupByNameLocked returns API group by its name. -func (m *mapper) findAPIGroupByName(groupName string) (*metav1.APIGroup, error) { - // Looking in the cache first. - { - m.mu.RLock() - group, ok := m.apiGroups[groupName] - m.mu.RUnlock() - if ok { - return group, nil - } - } - - // Update the cache if nothing was found. - apiGroups, err := m.client.ServerGroups() - if err != nil { - return nil, fmt.Errorf("failed to get server groups: %w", err) - } - if len(apiGroups.Groups) == 0 { - return nil, fmt.Errorf("received an empty API groups list") - } - - m.mu.Lock() - for i := range apiGroups.Groups { - group := &apiGroups.Groups[i] - m.apiGroups[group.Name] = group - } - m.mu.Unlock() - - // Looking in the cache again. - m.mu.RLock() - defer m.mu.RUnlock() - - // Don't return an error here if the API group is not present. - // The reloaded RESTMapper will take care of returning a NoMatchError. - return m.apiGroups[groupName], nil -} - -// fetchGroupVersionResourcesLocked fetches the resources for the specified group and its versions. -// This method might modify the cache so it needs to be called under the lock. -func (m *mapper) fetchGroupVersionResourcesLocked(groupName string, versions ...string) (map[schema.GroupVersion]*metav1.APIResourceList, error) { - groupVersionResources := make(map[schema.GroupVersion]*metav1.APIResourceList) - failedGroups := make(map[schema.GroupVersion]error) - - for _, version := range versions { - groupVersion := schema.GroupVersion{Group: groupName, Version: version} - - apiResourceList, err := m.client.ServerResourcesForGroupVersion(groupVersion.String()) - if apierrors.IsNotFound(err) { - // If the version is not found, we remove the group from the cache - // so it gets refreshed on the next call. - if m.isAPIGroupCached(groupVersion) { - delete(m.apiGroups, groupName) - } - if m.isGroupVersionCached(groupVersion) { - delete(m.knownGroups, groupName) - } - continue - } else if err != nil { - failedGroups[groupVersion] = err - } - - if apiResourceList != nil { - // even in case of error, some fallback might have been returned. - groupVersionResources[groupVersion] = apiResourceList - } - } - - if len(failedGroups) > 0 { - err := ErrResourceDiscoveryFailed(failedGroups) - return nil, &err - } - - return groupVersionResources, nil -} - -// isGroupVersionCached checks if a version for a group is cached in the known groups cache. -func (m *mapper) isGroupVersionCached(gv schema.GroupVersion) bool { - if cachedGroup, ok := m.knownGroups[gv.Group]; ok { - _, cached := cachedGroup.VersionedResources[gv.Version] - return cached - } - - return false -} - -// isAPIGroupCached checks if a version for a group is cached in the api groups cache. -func (m *mapper) isAPIGroupCached(gv schema.GroupVersion) bool { - cachedGroup, ok := m.apiGroups[gv.Group] - if !ok { - return false - } - - for _, version := range cachedGroup.Versions { - if version.Version == gv.Version { - return true - } - } - - return false -} - -// ErrResourceDiscoveryFailed is returned if the RESTMapper cannot discover supported resources for some GroupVersions. -// It wraps the errors encountered, except "NotFound" errors are replaced with meta.NoResourceMatchError, for -// backwards compatibility with code that uses meta.IsNoMatchError() to check for unsupported APIs. -type ErrResourceDiscoveryFailed map[schema.GroupVersion]error - -// Error implements the error interface. -func (e *ErrResourceDiscoveryFailed) Error() string { - subErrors := []string{} - for k, v := range *e { - subErrors = append(subErrors, fmt.Sprintf("%s: %v", k, v)) - } - sort.Strings(subErrors) - return fmt.Sprintf("unable to retrieve the complete list of server APIs: %s", strings.Join(subErrors, ", ")) -} - -func (e *ErrResourceDiscoveryFailed) Unwrap() []error { - subErrors := []error{} - for gv, err := range *e { - if apierrors.IsNotFound(err) { - err = &meta.NoResourceMatchError{PartialResource: gv.WithResource("")} - } - subErrors = append(subErrors, err) - } - return subErrors -} diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 032f79850..a62b83b3e 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -35,6 +35,7 @@ import ( apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -50,8 +51,6 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/util/retry" cmdutil "k8s.io/kubectl/pkg/cmd/util" - - helmRestmapper "helm.sh/helm/v4/internal/restmapper" ) // ErrNoObjectsVisited indicates that during a visit operation, no matching objects were found. @@ -114,7 +113,7 @@ func (c *Client) newStatusWatcher() (*statusWaiter, error) { if err != nil { return nil, err } - restMapper, err := helmRestmapper.NewLazyRESTMapper(cfg, httpClient) + restMapper, err := apiutil.NewDynamicRESTMapper(cfg, httpClient) if err != nil { return nil, err } From 21ee7212429ed8354f3093af40272c3c730520b7 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Tue, 25 Mar 2025 14:15:27 +0000 Subject: [PATCH 124/192] go fmt Signed-off-by: Austin Abro --- pkg/kube/interface.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kube/interface.go b/pkg/kube/interface.go index fb42fed06..f68367dcd 100644 --- a/pkg/kube/interface.go +++ b/pkg/kube/interface.go @@ -48,7 +48,7 @@ type Interface interface { Build(reader io.Reader, validate bool) (ResourceList, error) // IsReachable checks whether the client is able to connect to the cluster. IsReachable() error - + // Get Waiter gets the Kube.Waiter GetWaiter(ws WaitStrategy) (Waiter, error) } From 000c098a41b15147652256e9b863ebf30dcd17d9 Mon Sep 17 00:00:00 2001 From: Suleiman Dibirov Date: Sat, 5 Apr 2025 17:47:25 +0300 Subject: [PATCH 125/192] fix(concurrency): add mutex to protect repoFailList and out in updateCharts Signed-off-by: Suleiman Dibirov --- pkg/cmd/repo_update.go | 16 +++++++++++++--- pkg/cmd/repo_update_test.go | 11 +++++++++-- 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/pkg/cmd/repo_update.go b/pkg/cmd/repo_update.go index 6590d9872..12de2bdaa 100644 --- a/pkg/cmd/repo_update.go +++ b/pkg/cmd/repo_update.go @@ -111,20 +111,30 @@ func (o *repoUpdateOptions) run(out io.Writer) error { func updateCharts(repos []*repo.ChartRepository, out io.Writer) error { fmt.Fprintln(out, "Hang tight while we grab the latest from your chart repositories...") var wg sync.WaitGroup - var repoFailList []string + failRepoURLChan := make(chan string, len(repos)) + for _, re := range repos { wg.Add(1) go func(re *repo.ChartRepository) { defer wg.Done() if _, err := re.DownloadIndexFile(); err != nil { fmt.Fprintf(out, "...Unable to get an update from the %q chart repository (%s):\n\t%s\n", re.Config.Name, re.Config.URL, err) - repoFailList = append(repoFailList, re.Config.URL) + failRepoURLChan <- re.Config.URL } else { fmt.Fprintf(out, "...Successfully got an update from the %q chart repository\n", re.Config.Name) } }(re) } - wg.Wait() + + go func() { + wg.Wait() + close(failRepoURLChan) + }() + + var repoFailList []string + for url := range failRepoURLChan { + repoFailList = append(repoFailList, url) + } if len(repoFailList) > 0 { return fmt.Errorf("Failed to update the following repositories: %s", diff --git a/pkg/cmd/repo_update_test.go b/pkg/cmd/repo_update_test.go index 6fc4c8f4b..aa8f52beb 100644 --- a/pkg/cmd/repo_update_test.go +++ b/pkg/cmd/repo_update_test.go @@ -172,7 +172,14 @@ func TestUpdateChartsFailWithError(t *testing.T) { defer ts.Stop() var invalidURL = ts.URL() + "55" - r, err := repo.NewChartRepository(&repo.Entry{ + r1, err := repo.NewChartRepository(&repo.Entry{ + Name: "charts", + URL: invalidURL, + }, getter.All(settings)) + if err != nil { + t.Error(err) + } + r2, err := repo.NewChartRepository(&repo.Entry{ Name: "charts", URL: invalidURL, }, getter.All(settings)) @@ -181,7 +188,7 @@ func TestUpdateChartsFailWithError(t *testing.T) { } b := bytes.NewBuffer(nil) - err = updateCharts([]*repo.ChartRepository{r}, b) + err = updateCharts([]*repo.ChartRepository{r1, r2}, b) if err == nil { t.Error("Repo update should return error because update of repository fails and 'fail-on-repo-update-fail' flag set") return From be2b84685af03aeb34b5b7d84960315aec71c270 Mon Sep 17 00:00:00 2001 From: George Jenkins Date: Mon, 31 Mar 2025 09:21:33 -0700 Subject: [PATCH 126/192] cleanup: Remove Helm v2 template lint rules Signed-off-by: George Jenkins --- pkg/lint/rules/template.go | 26 +------------------------- pkg/lint/rules/template_test.go | 20 -------------------- 2 files changed, 1 insertion(+), 45 deletions(-) diff --git a/pkg/lint/rules/template.go b/pkg/lint/rules/template.go index 287968340..4d421f5bf 100644 --- a/pkg/lint/rules/template.go +++ b/pkg/lint/rules/template.go @@ -24,7 +24,6 @@ import ( "os" "path" "path/filepath" - "regexp" "strings" "github.com/pkg/errors" @@ -39,11 +38,6 @@ import ( "helm.sh/helm/v4/pkg/lint/support" ) -var ( - crdHookSearch = regexp.MustCompile(`"?helm\.sh/hook"?:\s+crd-install`) - releaseTimeSearch = regexp.MustCompile(`\.Release\.Time`) -) - // Templates lints the templates in the Linter. func Templates(linter *support.Linter, values map[string]interface{}, namespace string, _ bool) { TemplatesWithKubeVersion(linter, values, namespace, nil) @@ -119,14 +113,10 @@ func TemplatesWithSkipSchemaValidation(linter *support.Linter, values map[string - Metadata.Namespace is not set */ for _, template := range chart.Templates { - fileName, data := template.Name, template.Data + fileName := template.Name fpath = fileName linter.RunLinterRule(support.ErrorSev, fpath, validateAllowedExtension(fileName)) - // These are v3 specific checks to make sure and warn people if their - // chart is not compatible with v3 - linter.RunLinterRule(support.WarningSev, fpath, validateNoCRDHooks(data)) - linter.RunLinterRule(support.ErrorSev, fpath, validateNoReleaseTime(data)) // We only apply the following lint rules to yaml files if filepath.Ext(fileName) != ".yaml" || filepath.Ext(fileName) == ".yml" { @@ -291,20 +281,6 @@ func validateMetadataNameFunc(obj *K8sYamlStruct) validation.ValidateNameFunc { } } -func validateNoCRDHooks(manifest []byte) error { - if crdHookSearch.Match(manifest) { - return errors.New("manifest is a crd-install hook. This hook is no longer supported in v3 and all CRDs should also exist the crds/ directory at the top level of the chart") - } - return nil -} - -func validateNoReleaseTime(manifest []byte) error { - if releaseTimeSearch.Match(manifest) { - return errors.New(".Release.Time has been removed in v3, please replace with the `now` function in your templates") - } - return nil -} - // validateMatchSelector ensures that template specs have a selector declared. // See https://github.com/helm/helm/issues/1990 func validateMatchSelector(yamlStruct *K8sYamlStruct, manifest string) error { diff --git a/pkg/lint/rules/template_test.go b/pkg/lint/rules/template_test.go index 7205ace6d..bd503368d 100644 --- a/pkg/lint/rules/template_test.go +++ b/pkg/lint/rules/template_test.go @@ -85,26 +85,6 @@ func TestTemplateIntegrationHappyPath(t *testing.T) { } } -func TestV3Fail(t *testing.T) { - linter := support.Linter{ChartDir: "./testdata/v3-fail"} - Templates(&linter, values, namespace, strict) - res := linter.Messages - - if len(res) != 3 { - t.Fatalf("Expected 3 errors, got %d, %v", len(res), res) - } - - if !strings.Contains(res[0].Err.Error(), ".Release.Time has been removed in v3") { - t.Errorf("Unexpected error: %s", res[0].Err) - } - if !strings.Contains(res[1].Err.Error(), "manifest is a crd-install hook") { - t.Errorf("Unexpected error: %s", res[1].Err) - } - if !strings.Contains(res[2].Err.Error(), "manifest is a crd-install hook") { - t.Errorf("Unexpected error: %s", res[2].Err) - } -} - func TestMultiTemplateFail(t *testing.T) { linter := support.Linter{ChartDir: "./testdata/multi-template-fail"} Templates(&linter, values, namespace, strict) From f4631bf3d8e7fe64395e4f5faec505539e73fb9e Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Tue, 25 Mar 2025 12:32:35 +0100 Subject: [PATCH 127/192] Migrate kube package to slog As for helm v4. We want to migrate logs to slog. Signed-off-by: Benoit Tigeot --- pkg/action/action.go | 2 +- pkg/kube/client.go | 36 ++++++++++++++---------------- pkg/kube/logger.go | 30 +++++++++++++++++++++++++ pkg/kube/ready.go | 50 ++++++++++++++++++++++-------------------- pkg/kube/ready_test.go | 18 +++++++-------- 5 files changed, 83 insertions(+), 53 deletions(-) create mode 100644 pkg/kube/logger.go diff --git a/pkg/action/action.go b/pkg/action/action.go index ea2dc0dd7..4f100f833 100644 --- a/pkg/action/action.go +++ b/pkg/action/action.go @@ -376,7 +376,7 @@ func (cfg *Configuration) recordRelease(r *release.Release) { // Init initializes the action configuration func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string, log DebugLog) error { kc := kube.New(getter) - kc.Log = log + kc.Log = log // TODO: Switch to slog compatible logger lazyClient := &lazyClient{ namespace: namespace, diff --git a/pkg/kube/client.go b/pkg/kube/client.go index a62b83b3e..44baa4ba0 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -73,7 +73,7 @@ type Client struct { // needs. The smaller surface area of the interface means there is a lower // chance of it changing. Factory Factory - Log func(string, ...interface{}) + Log Logger // Namespace allows to bypass the kubeconfig file for the choice of the namespace Namespace string @@ -167,8 +167,6 @@ func New(getter genericclioptions.RESTClientGetter) *Client { return c } -var nopLogger = func(_ string, _ ...interface{}) {} - // getKubeClient get or create a new KubernetesClientSet func (c *Client) getKubeClient() (kubernetes.Interface, error) { var err error @@ -198,7 +196,7 @@ func (c *Client) IsReachable() error { // Create creates Kubernetes resources specified in the resource list. func (c *Client) Create(resources ResourceList) (*Result, error) { - c.Log("creating %d resource(s)", len(resources)) + c.Log.Debug("creating resource(s)", "resources", resources) if err := perform(resources, createResource); err != nil { return nil, err } @@ -250,7 +248,7 @@ func (c *Client) Get(resources ResourceList, related bool) (map[string][]runtime objs, err = c.getSelectRelationPod(info, objs, isTable, &podSelectors) if err != nil { - c.Log("Warning: get the relation pod is failed, err:%s", err.Error()) + c.Log.Debug("failed to get related pods", "error", err) } } } @@ -268,7 +266,7 @@ func (c *Client) getSelectRelationPod(info *resource.Info, objs map[string][]run if info == nil { return objs, nil } - c.Log("get relation pod of object: %s/%s/%s", info.Namespace, info.Mapping.GroupVersionKind.Kind, info.Name) + c.Log.Debug("get relation pod of object", "namespace", info.Namespace, "kind", info.Mapping.GroupVersionKind.Kind, "name", info.Name) selector, ok, _ := getSelectorFromObject(info.Object) if !ok { return objs, nil @@ -410,7 +408,7 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err updateErrors := []string{} res := &Result{} - c.Log("checking %d resources for changes", len(target)) + c.Log.Debug("checking resources for changes", "original", original, "target", target) err := target.Visit(func(info *resource.Info, err error) error { if err != nil { return err @@ -431,7 +429,7 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err } kind := info.Mapping.GroupVersionKind.Kind - c.Log("Created a new %s called %q in %s\n", kind, info.Name, info.Namespace) + c.Log.Debug("created a new resource", "kind", kind, "name", info.Name, "namespace", info.Namespace) return nil } @@ -442,7 +440,7 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err } if err := updateResource(c, info, originalInfo.Object, force); err != nil { - c.Log("error updating the resource %q:\n\t %v", info.Name, err) + c.Log.Debug("error updating the resource", "kind", info.Mapping.GroupVersionKind.Kind, "name", info.Name, "error", err) updateErrors = append(updateErrors, err.Error()) } // Because we check for errors later, append the info regardless @@ -459,22 +457,22 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err } for _, info := range original.Difference(target) { - c.Log("Deleting %s %q in namespace %s...", info.Mapping.GroupVersionKind.Kind, info.Name, info.Namespace) + c.Log.Debug("deleting resource", "kind", info.Mapping.GroupVersionKind.Kind, "name", info.Name, "namespace", info.Namespace) if err := info.Get(); err != nil { - c.Log("Unable to get obj %q, err: %s", info.Name, err) + c.Log.Debug("unable to get object", "name", info.Name, "error", err) continue } annotations, err := metadataAccessor.Annotations(info.Object) if err != nil { - c.Log("Unable to get annotations on %q, err: %s", info.Name, err) + c.Log.Debug("unable to get annotations", "name", info.Name, "error", err) } if annotations != nil && annotations[ResourcePolicyAnno] == KeepPolicy { - c.Log("Skipping delete of %q due to annotation [%s=%s]", info.Name, ResourcePolicyAnno, KeepPolicy) + c.Log.Debug("skipping delete due to annotation", "name", info.Name, "annotation", ResourcePolicyAnno, "value", KeepPolicy) continue } if err := deleteResource(info, metav1.DeletePropagationBackground); err != nil { - c.Log("Failed to delete %q, err: %s", info.ObjectName(), err) + c.Log.Debug("failed to delete resource", "name", info.Name, "error", err) continue } res.Deleted = append(res.Deleted, info) @@ -503,11 +501,11 @@ func rdelete(c *Client, resources ResourceList, propagation metav1.DeletionPropa res := &Result{} mtx := sync.Mutex{} err := perform(resources, func(info *resource.Info) error { - c.Log("Starting delete for %q %s", info.Name, info.Mapping.GroupVersionKind.Kind) + c.Log.Debug("starting delete resource", "kind", info.Mapping.GroupVersionKind.Kind, "name", info.Name, "namespace", info.Namespace) err := deleteResource(info, propagation) if err == nil || apierrors.IsNotFound(err) { if err != nil { - c.Log("Ignoring delete failure for %q %s: %v", info.Name, info.Mapping.GroupVersionKind, err) + c.Log.Debug("ignoring delete failure", "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, "error", err) } mtx.Lock() defer mtx.Unlock() @@ -655,7 +653,7 @@ func updateResource(c *Client, target *resource.Info, currentObj runtime.Object, if err != nil { return errors.Wrap(err, "failed to replace object") } - c.Log("Replaced %q with kind %s for kind %s", target.Name, currentObj.GetObjectKind().GroupVersionKind().Kind, kind) + c.Log.Debug("replace succeeded", "name", target.Name, "initialKind", currentObj.GetObjectKind().GroupVersionKind().Kind, "kind", kind) } else { patch, patchType, err := createPatch(target, currentObj) if err != nil { @@ -663,7 +661,7 @@ func updateResource(c *Client, target *resource.Info, currentObj runtime.Object, } if patch == nil || string(patch) == "{}" { - c.Log("Looks like there are no changes for %s %q", kind, target.Name) + c.Log.Debug("no changes detected", "kind", kind, "name", target.Name) // This needs to happen to make sure that Helm has the latest info from the API // Otherwise there will be no labels and other functions that use labels will panic if err := target.Get(); err != nil { @@ -672,7 +670,7 @@ func updateResource(c *Client, target *resource.Info, currentObj runtime.Object, return nil } // send patch to server - c.Log("Patch %s %q in namespace %s", kind, target.Name, target.Namespace) + c.Log.Debug("patching resource", "kind", kind, "name", target.Name, "namespace", target.Namespace) obj, err = helper.Patch(target.Namespace, target.Name, patchType, patch, nil) if err != nil { return errors.Wrapf(err, "cannot patch %q with kind %s", target.Name, kind) diff --git a/pkg/kube/logger.go b/pkg/kube/logger.go new file mode 100644 index 000000000..da032b752 --- /dev/null +++ b/pkg/kube/logger.go @@ -0,0 +1,30 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube + +// Logger defines a minimal logging interface compatible with slog.Logger +type Logger interface { + Debug(msg string, args ...any) +} + +// NopLogger is a logger that does nothing +type NopLogger struct{} + +// Debug implements the Logger interface +func (n NopLogger) Debug(msg string, args ...any) {} + +var nopLogger = NopLogger{} diff --git a/pkg/kube/ready.go b/pkg/kube/ready.go index dd5869e6a..2814aa72e 100644 --- a/pkg/kube/ready.go +++ b/pkg/kube/ready.go @@ -19,6 +19,8 @@ package kube // import "helm.sh/helm/v4/pkg/kube" import ( "context" "fmt" + "io" + "log/slog" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" @@ -57,13 +59,13 @@ func CheckJobs(checkJobs bool) ReadyCheckerOption { // NewReadyChecker creates a new checker. Passed ReadyCheckerOptions can // be used to override defaults. -func NewReadyChecker(cl kubernetes.Interface, log func(string, ...interface{}), opts ...ReadyCheckerOption) ReadyChecker { +func NewReadyChecker(cl kubernetes.Interface, logger Logger, opts ...ReadyCheckerOption) ReadyChecker { c := ReadyChecker{ client: cl, - log: log, + log: logger, } if c.log == nil { - c.log = nopLogger + c.log = slog.New(slog.NewTextHandler(io.Discard, nil)) } for _, opt := range opts { opt(&c) @@ -74,7 +76,7 @@ func NewReadyChecker(cl kubernetes.Interface, log func(string, ...interface{}), // ReadyChecker is a type that can check core Kubernetes types for readiness. type ReadyChecker struct { client kubernetes.Interface - log func(string, ...interface{}) + log Logger checkJobs bool pausedAsReady bool } @@ -230,18 +232,18 @@ func (c *ReadyChecker) isPodReady(pod *corev1.Pod) bool { return true } } - c.log("Pod is not ready: %s/%s", pod.GetNamespace(), pod.GetName()) + c.log.Debug("Pod is not ready", "namespace", pod.GetNamespace(), "name", pod.GetName()) return false } func (c *ReadyChecker) jobReady(job *batchv1.Job) (bool, error) { if job.Status.Failed > *job.Spec.BackoffLimit { - c.log("Job is failed: %s/%s", job.GetNamespace(), job.GetName()) + c.log.Debug("Job is failed", "namespace", job.GetNamespace(), "name", job.GetName()) // If a job is failed, it can't recover, so throw an error return false, fmt.Errorf("job is failed: %s/%s", job.GetNamespace(), job.GetName()) } if job.Spec.Completions != nil && job.Status.Succeeded < *job.Spec.Completions { - c.log("Job is not completed: %s/%s", job.GetNamespace(), job.GetName()) + c.log.Debug("Job is not completed", "namespace", job.GetNamespace(), "name", job.GetName()) return false, nil } return true, nil @@ -255,7 +257,7 @@ func (c *ReadyChecker) serviceReady(s *corev1.Service) bool { // Ensure that the service cluster IP is not empty if s.Spec.ClusterIP == "" { - c.log("Service does not have cluster IP address: %s/%s", s.GetNamespace(), s.GetName()) + c.log.Debug("Service does not have cluster IP address", "namespace", s.GetNamespace(), "name", s.GetName()) return false } @@ -263,12 +265,12 @@ func (c *ReadyChecker) serviceReady(s *corev1.Service) bool { if s.Spec.Type == corev1.ServiceTypeLoadBalancer { // do not wait when at least 1 external IP is set if len(s.Spec.ExternalIPs) > 0 { - c.log("Service %s/%s has external IP addresses (%v), marking as ready", s.GetNamespace(), s.GetName(), s.Spec.ExternalIPs) + c.log.Debug("Service has external IP addresses", "namespace", s.GetNamespace(), "name", s.GetName(), "externalIPs", s.Spec.ExternalIPs) return true } if s.Status.LoadBalancer.Ingress == nil { - c.log("Service does not have load balancer ingress IP address: %s/%s", s.GetNamespace(), s.GetName()) + c.log.Debug("Service does not have load balancer ingress IP address", "namespace", s.GetNamespace(), "name", s.GetName()) return false } } @@ -278,7 +280,7 @@ func (c *ReadyChecker) serviceReady(s *corev1.Service) bool { func (c *ReadyChecker) volumeReady(v *corev1.PersistentVolumeClaim) bool { if v.Status.Phase != corev1.ClaimBound { - c.log("PersistentVolumeClaim is not bound: %s/%s", v.GetNamespace(), v.GetName()) + c.log.Debug("PersistentVolumeClaim is not bound", "namespace", v.GetNamespace(), "name", v.GetName()) return false } return true @@ -291,13 +293,13 @@ func (c *ReadyChecker) deploymentReady(rs *appsv1.ReplicaSet, dep *appsv1.Deploy } // Verify the generation observed by the deployment controller matches the spec generation if dep.Status.ObservedGeneration != dep.ObjectMeta.Generation { - c.log("Deployment is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", dep.Namespace, dep.Name, dep.Status.ObservedGeneration, dep.ObjectMeta.Generation) + c.log.Debug("Deployment is not ready, observedGeneration does not match spec generation", "namespace", dep.GetNamespace(), "name", dep.GetName(), "observedGeneration", dep.Status.ObservedGeneration, "expectedGeneration", dep.ObjectMeta.Generation) return false } expectedReady := *dep.Spec.Replicas - deploymentutil.MaxUnavailable(*dep) if !(rs.Status.ReadyReplicas >= expectedReady) { - c.log("Deployment is not ready: %s/%s. %d out of %d expected pods are ready", dep.Namespace, dep.Name, rs.Status.ReadyReplicas, expectedReady) + c.log.Debug("Deployment is not ready, not all Pods are ready", "namespace", dep.GetNamespace(), "name", dep.GetName(), "readyPods", rs.Status.ReadyReplicas, "totalPods", expectedReady) return false } return true @@ -306,7 +308,7 @@ func (c *ReadyChecker) deploymentReady(rs *appsv1.ReplicaSet, dep *appsv1.Deploy func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool { // Verify the generation observed by the daemonSet controller matches the spec generation if ds.Status.ObservedGeneration != ds.ObjectMeta.Generation { - c.log("DaemonSet is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", ds.Namespace, ds.Name, ds.Status.ObservedGeneration, ds.ObjectMeta.Generation) + c.log.Debug("DaemonSet is not ready, observedGeneration does not match spec generation", "namespace", ds.GetNamespace(), "name", ds.GetName(), "observedGeneration", ds.Status.ObservedGeneration, "expectedGeneration", ds.ObjectMeta.Generation) return false } @@ -317,7 +319,7 @@ func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool { // Make sure all the updated pods have been scheduled if ds.Status.UpdatedNumberScheduled != ds.Status.DesiredNumberScheduled { - c.log("DaemonSet is not ready: %s/%s. %d out of %d expected pods have been scheduled", ds.Namespace, ds.Name, ds.Status.UpdatedNumberScheduled, ds.Status.DesiredNumberScheduled) + c.log.Debug("DaemonSet is not ready, not all Pods scheduled", "namespace", ds.GetNamespace(), "name", ds.GetName(), "scheduledPods", ds.Status.UpdatedNumberScheduled, "totalPods", ds.Status.DesiredNumberScheduled) return false } maxUnavailable, err := intstr.GetScaledValueFromIntOrPercent(ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable, int(ds.Status.DesiredNumberScheduled), true) @@ -330,7 +332,7 @@ func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool { expectedReady := int(ds.Status.DesiredNumberScheduled) - maxUnavailable if !(int(ds.Status.NumberReady) >= expectedReady) { - c.log("DaemonSet is not ready: %s/%s. %d out of %d expected pods are ready", ds.Namespace, ds.Name, ds.Status.NumberReady, expectedReady) + c.log.Debug("DaemonSet is not ready. All Pods are not ready", "namespace", ds.GetNamespace(), "name", ds.GetName(), "readyPods", ds.Status.NumberReady, "totalPods", expectedReady) return false } return true @@ -382,13 +384,13 @@ func (c *ReadyChecker) crdReady(crd apiextv1.CustomResourceDefinition) bool { func (c *ReadyChecker) statefulSetReady(sts *appsv1.StatefulSet) bool { // Verify the generation observed by the statefulSet controller matches the spec generation if sts.Status.ObservedGeneration != sts.ObjectMeta.Generation { - c.log("StatefulSet is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", sts.Namespace, sts.Name, sts.Status.ObservedGeneration, sts.ObjectMeta.Generation) + c.log.Debug("StatefulSet is not ready, observedGeneration doest not match spec generation", "namespace", sts.GetNamespace(), "name", sts.GetName(), "observedGeneration", sts.Status.ObservedGeneration, "expectedGeneration", sts.ObjectMeta.Generation) return false } // If the update strategy is not a rolling update, there will be nothing to wait for if sts.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType { - c.log("StatefulSet skipped ready check: %s/%s. updateStrategy is %v", sts.Namespace, sts.Name, sts.Spec.UpdateStrategy.Type) + c.log.Debug("StatefulSet skipped ready check", "namespace", sts.GetNamespace(), "name", sts.GetName(), "updateStrategy", sts.Spec.UpdateStrategy.Type) return true } @@ -414,30 +416,30 @@ func (c *ReadyChecker) statefulSetReady(sts *appsv1.StatefulSet) bool { // Make sure all the updated pods have been scheduled if int(sts.Status.UpdatedReplicas) < expectedReplicas { - c.log("StatefulSet is not ready: %s/%s. %d out of %d expected pods have been scheduled", sts.Namespace, sts.Name, sts.Status.UpdatedReplicas, expectedReplicas) + c.log.Debug("StatefulSet is not ready, not all Pods have been scheduled", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.UpdatedReplicas, "totalPods", expectedReplicas) return false } if int(sts.Status.ReadyReplicas) != replicas { - c.log("StatefulSet is not ready: %s/%s. %d out of %d expected pods are ready", sts.Namespace, sts.Name, sts.Status.ReadyReplicas, replicas) + c.log.Debug("StatefulSet is not ready, not all Pods are ready", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.ReadyReplicas, "totalPods", replicas) return false } // This check only makes sense when all partitions are being upgraded otherwise during a // partitioned rolling upgrade, this condition will never evaluate to true, leading to // error. if partition == 0 && sts.Status.CurrentRevision != sts.Status.UpdateRevision { - c.log("StatefulSet is not ready: %s/%s. currentRevision %s does not yet match updateRevision %s", sts.Namespace, sts.Name, sts.Status.CurrentRevision, sts.Status.UpdateRevision) + c.log.Debug("StatefulSet is not ready, currentRevision does not match updateRevision", "namespace", sts.GetNamespace(), "name", sts.GetName(), "currentRevision", sts.Status.CurrentRevision, "updateRevision", sts.Status.UpdateRevision) return false } - c.log("StatefulSet is ready: %s/%s. %d out of %d expected pods are ready", sts.Namespace, sts.Name, sts.Status.ReadyReplicas, replicas) + c.log.Debug("StatefulSet is ready", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.ReadyReplicas, "totalPods", replicas) return true } func (c *ReadyChecker) replicationControllerReady(rc *corev1.ReplicationController) bool { // Verify the generation observed by the replicationController controller matches the spec generation if rc.Status.ObservedGeneration != rc.ObjectMeta.Generation { - c.log("ReplicationController is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", rc.Namespace, rc.Name, rc.Status.ObservedGeneration, rc.ObjectMeta.Generation) + c.log.Debug("ReplicationController is not ready, observedGeneration doest not match spec generation", "namespace", rc.GetNamespace(), "name", rc.GetName(), "observedGeneration", rc.Status.ObservedGeneration, "expectedGeneration", rc.ObjectMeta.Generation) return false } return true @@ -446,7 +448,7 @@ func (c *ReadyChecker) replicationControllerReady(rc *corev1.ReplicationControll func (c *ReadyChecker) replicaSetReady(rs *appsv1.ReplicaSet) bool { // Verify the generation observed by the replicaSet controller matches the spec generation if rs.Status.ObservedGeneration != rs.ObjectMeta.Generation { - c.log("ReplicaSet is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", rs.Namespace, rs.Name, rs.Status.ObservedGeneration, rs.ObjectMeta.Generation) + c.log.Debug("ReplicaSet is not ready, observedGeneration doest not match spec generation", "namespace", rs.GetNamespace(), "name", rs.GetName(), "observedGeneration", rs.Status.ObservedGeneration, "expectedGeneration", rs.ObjectMeta.Generation) return false } return true diff --git a/pkg/kube/ready_test.go b/pkg/kube/ready_test.go index 879fa4c76..bd382a9c2 100644 --- a/pkg/kube/ready_test.go +++ b/pkg/kube/ready_test.go @@ -37,7 +37,7 @@ const defaultNamespace = metav1.NamespaceDefault func Test_ReadyChecker_IsReady_Pod(t *testing.T) { type fields struct { client kubernetes.Interface - log func(string, ...interface{}) + log Logger checkJobs bool pausedAsReady bool } @@ -113,7 +113,7 @@ func Test_ReadyChecker_IsReady_Pod(t *testing.T) { func Test_ReadyChecker_IsReady_Job(t *testing.T) { type fields struct { client kubernetes.Interface - log func(string, ...interface{}) + log Logger checkJobs bool pausedAsReady bool } @@ -188,7 +188,7 @@ func Test_ReadyChecker_IsReady_Job(t *testing.T) { func Test_ReadyChecker_IsReady_Deployment(t *testing.T) { type fields struct { client kubernetes.Interface - log func(string, ...interface{}) + log Logger checkJobs bool pausedAsReady bool } @@ -270,7 +270,7 @@ func Test_ReadyChecker_IsReady_Deployment(t *testing.T) { func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) { type fields struct { client kubernetes.Interface - log func(string, ...interface{}) + log Logger checkJobs bool pausedAsReady bool } @@ -345,7 +345,7 @@ func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) { func Test_ReadyChecker_IsReady_Service(t *testing.T) { type fields struct { client kubernetes.Interface - log func(string, ...interface{}) + log Logger checkJobs bool pausedAsReady bool } @@ -420,7 +420,7 @@ func Test_ReadyChecker_IsReady_Service(t *testing.T) { func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) { type fields struct { client kubernetes.Interface - log func(string, ...interface{}) + log Logger checkJobs bool pausedAsReady bool } @@ -495,7 +495,7 @@ func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) { func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) { type fields struct { client kubernetes.Interface - log func(string, ...interface{}) + log Logger checkJobs bool pausedAsReady bool } @@ -570,7 +570,7 @@ func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) { func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) { type fields struct { client kubernetes.Interface - log func(string, ...interface{}) + log Logger checkJobs bool pausedAsReady bool } @@ -661,7 +661,7 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) { func Test_ReadyChecker_IsReady_ReplicaSet(t *testing.T) { type fields struct { client kubernetes.Interface - log func(string, ...interface{}) + log Logger checkJobs bool pausedAsReady bool } From dfaf2492213e19783ea36b72ae153ec9fd966513 Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Tue, 25 Mar 2025 16:15:18 +0100 Subject: [PATCH 128/192] Remove unreachable error Signed-off-by: Benoit Tigeot --- pkg/kube/client.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 44baa4ba0..5b2fcc56f 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -717,9 +717,6 @@ func copyRequestStreamToWriter(request *rest.Request, podName, containerName str if err != nil { return errors.Errorf("Failed to copy IO from logs for pod: %s, container: %s", podName, containerName) } - if err != nil { - return errors.Errorf("Failed to close reader for pod: %s, container: %s", podName, containerName) - } return nil } From 8d30464f2c80158b88db4a6dc4273e07fc3b09ea Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Tue, 25 Mar 2025 16:17:01 +0100 Subject: [PATCH 129/192] Do no mask warning alerts Signed-off-by: Benoit Tigeot --- pkg/kube/client.go | 2 +- pkg/kube/logger.go | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 5b2fcc56f..dcba9b0a4 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -248,7 +248,7 @@ func (c *Client) Get(resources ResourceList, related bool) (map[string][]runtime objs, err = c.getSelectRelationPod(info, objs, isTable, &podSelectors) if err != nil { - c.Log.Debug("failed to get related pods", "error", err) + c.Log.Warn("get the relation pod is failed", "error", err) } } } diff --git a/pkg/kube/logger.go b/pkg/kube/logger.go index da032b752..357a6827f 100644 --- a/pkg/kube/logger.go +++ b/pkg/kube/logger.go @@ -19,6 +19,7 @@ package kube // Logger defines a minimal logging interface compatible with slog.Logger type Logger interface { Debug(msg string, args ...any) + Warn(msg string, args ...any) } // NopLogger is a logger that does nothing @@ -26,5 +27,6 @@ type NopLogger struct{} // Debug implements the Logger interface func (n NopLogger) Debug(msg string, args ...any) {} +func (n NopLogger) Warn(msg string, args ...any) {} var nopLogger = NopLogger{} From d6d7cff4179b2ca111333220ea383a279b6ea489 Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Tue, 25 Mar 2025 16:30:23 +0100 Subject: [PATCH 130/192] Try to make log more common and more easily grepable Signed-off-by: Benoit Tigeot --- pkg/kube/client.go | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index dcba9b0a4..a7279e40b 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -196,7 +196,7 @@ func (c *Client) IsReachable() error { // Create creates Kubernetes resources specified in the resource list. func (c *Client) Create(resources ResourceList) (*Result, error) { - c.Log.Debug("creating resource(s)", "resources", resources) + c.Log.Debug("creating resource(s)", "resources", len(resources)) if err := perform(resources, createResource); err != nil { return nil, err } @@ -266,7 +266,7 @@ func (c *Client) getSelectRelationPod(info *resource.Info, objs map[string][]run if info == nil { return objs, nil } - c.Log.Debug("get relation pod of object", "namespace", info.Namespace, "kind", info.Mapping.GroupVersionKind.Kind, "name", info.Name) + c.Log.Debug("get relation pod of object", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind) selector, ok, _ := getSelectorFromObject(info.Object) if !ok { return objs, nil @@ -408,7 +408,7 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err updateErrors := []string{} res := &Result{} - c.Log.Debug("checking resources for changes", "original", original, "target", target) + c.Log.Debug("checking resources for changes", "resources", len(target)) err := target.Visit(func(info *resource.Info, err error) error { if err != nil { return err @@ -429,7 +429,7 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err } kind := info.Mapping.GroupVersionKind.Kind - c.Log.Debug("created a new resource", "kind", kind, "name", info.Name, "namespace", info.Namespace) + c.Log.Debug("created a new resource", "namespace", info.Namespace, "name", info.Name, "kind", kind) return nil } @@ -440,7 +440,7 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err } if err := updateResource(c, info, originalInfo.Object, force); err != nil { - c.Log.Debug("error updating the resource", "kind", info.Mapping.GroupVersionKind.Kind, "name", info.Name, "error", err) + c.Log.Debug("error updating the resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, "error", err) updateErrors = append(updateErrors, err.Error()) } // Because we check for errors later, append the info regardless @@ -457,22 +457,22 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err } for _, info := range original.Difference(target) { - c.Log.Debug("deleting resource", "kind", info.Mapping.GroupVersionKind.Kind, "name", info.Name, "namespace", info.Namespace) + c.Log.Debug("deleting resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind) if err := info.Get(); err != nil { - c.Log.Debug("unable to get object", "name", info.Name, "error", err) + c.Log.Debug("unable to get object", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, "error", err) continue } annotations, err := metadataAccessor.Annotations(info.Object) if err != nil { - c.Log.Debug("unable to get annotations", "name", info.Name, "error", err) + c.Log.Debug("unable to get annotations", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, "error", err) } if annotations != nil && annotations[ResourcePolicyAnno] == KeepPolicy { - c.Log.Debug("skipping delete due to annotation", "name", info.Name, "annotation", ResourcePolicyAnno, "value", KeepPolicy) + c.Log.Debug("skipping delete due to annotation", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, "annotation", ResourcePolicyAnno, "value", KeepPolicy) continue } if err := deleteResource(info, metav1.DeletePropagationBackground); err != nil { - c.Log.Debug("failed to delete resource", "name", info.Name, "error", err) + c.Log.Debug("failed to delete resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, "error", err) continue } res.Deleted = append(res.Deleted, info) @@ -501,11 +501,11 @@ func rdelete(c *Client, resources ResourceList, propagation metav1.DeletionPropa res := &Result{} mtx := sync.Mutex{} err := perform(resources, func(info *resource.Info) error { - c.Log.Debug("starting delete resource", "kind", info.Mapping.GroupVersionKind.Kind, "name", info.Name, "namespace", info.Namespace) + c.Log.Debug("starting delete resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind) err := deleteResource(info, propagation) if err == nil || apierrors.IsNotFound(err) { if err != nil { - c.Log.Debug("ignoring delete failure", "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, "error", err) + c.Log.Debug("ignoring delete failure", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, "error", err) } mtx.Lock() defer mtx.Unlock() From b642bca8f61dfbe19e98ad75b73b2eaac2511405 Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Tue, 25 Mar 2025 17:16:21 +0100 Subject: [PATCH 131/192] Provide an adapter to easily pass a slog.Default() ``` helmClient.Log = NewSlogAdapter(slog.Default()) ``` Signed-off-by: Benoit Tigeot --- pkg/kube/logger.go | 20 ++++++++++++++++++-- pkg/kube/ready.go | 4 +--- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/pkg/kube/logger.go b/pkg/kube/logger.go index 357a6827f..00724c7c3 100644 --- a/pkg/kube/logger.go +++ b/pkg/kube/logger.go @@ -16,17 +16,33 @@ limitations under the License. package kube +import "log/slog" + // Logger defines a minimal logging interface compatible with slog.Logger type Logger interface { Debug(msg string, args ...any) Warn(msg string, args ...any) } -// NopLogger is a logger that does nothing +type SlogAdapter struct { + logger *slog.Logger +} + type NopLogger struct{} -// Debug implements the Logger interface func (n NopLogger) Debug(msg string, args ...any) {} func (n NopLogger) Warn(msg string, args ...any) {} var nopLogger = NopLogger{} + +func (a SlogAdapter) Debug(msg string, args ...any) { + a.logger.Debug(msg, args...) +} + +func (a SlogAdapter) Warn(msg string, args ...any) { + a.logger.Warn(msg, args...) +} + +func NewSlogAdapter(logger *slog.Logger) Logger { + return SlogAdapter{logger: logger} +} diff --git a/pkg/kube/ready.go b/pkg/kube/ready.go index 2814aa72e..871bd4eee 100644 --- a/pkg/kube/ready.go +++ b/pkg/kube/ready.go @@ -19,8 +19,6 @@ package kube // import "helm.sh/helm/v4/pkg/kube" import ( "context" "fmt" - "io" - "log/slog" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" @@ -65,7 +63,7 @@ func NewReadyChecker(cl kubernetes.Interface, logger Logger, opts ...ReadyChecke log: logger, } if c.log == nil { - c.log = slog.New(slog.NewTextHandler(io.Discard, nil)) + c.log = nopLogger } for _, opt := range opts { opt(&c) From 227d2707888188ec823b1e0421f4ce0a88c3fe63 Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Tue, 25 Mar 2025 17:19:59 +0100 Subject: [PATCH 132/192] Extra comment + Default logger fallback Signed-off-by: Benoit Tigeot --- pkg/kube/logger.go | 34 ++++++++++++++++++++++++++-------- 1 file changed, 26 insertions(+), 8 deletions(-) diff --git a/pkg/kube/logger.go b/pkg/kube/logger.go index 00724c7c3..f0ddfe49a 100644 --- a/pkg/kube/logger.go +++ b/pkg/kube/logger.go @@ -14,35 +14,53 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package kube provides Kubernetes client utilities for Helm. package kube import "log/slog" -// Logger defines a minimal logging interface compatible with slog.Logger +// Logger defines a minimal logging interface compatible with structured logging. +// It provides methods for different log levels with structured key-value pairs. type Logger interface { + // Debug logs a message at debug level with structured key-value pairs. Debug(msg string, args ...any) - Warn(msg string, args ...any) -} -type SlogAdapter struct { - logger *slog.Logger + // Warn logs a message at warning level with structured key-value pairs. + Warn(msg string, args ...any) } +// NopLogger is a logger implementation that discards all log messages. type NopLogger struct{} -func (n NopLogger) Debug(msg string, args ...any) {} -func (n NopLogger) Warn(msg string, args ...any) {} +// Debug implements Logger.Debug by doing nothing. +func (NopLogger) Debug(msg string, args ...any) {} -var nopLogger = NopLogger{} +// Warn implements Logger.Warn by doing nothing. +func (NopLogger) Warn(msg string, args ...any) {} + +// DefaultLogger provides a no-op logger that discards all messages. +// It can be used as a default when no logger is provided. +var DefaultLogger Logger = NopLogger{} + +// SlogAdapter adapts a standard library slog.Logger to the Logger interface. +type SlogAdapter struct { + logger *slog.Logger +} +// Debug implements Logger.Debug by forwarding to the underlying slog.Logger. func (a SlogAdapter) Debug(msg string, args ...any) { a.logger.Debug(msg, args...) } +// Warn implements Logger.Warn by forwarding to the underlying slog.Logger. func (a SlogAdapter) Warn(msg string, args ...any) { a.logger.Warn(msg, args...) } +// NewSlogAdapter creates a Logger that forwards log messages to a slog.Logger. func NewSlogAdapter(logger *slog.Logger) Logger { + if logger == nil { + return DefaultLogger + } return SlogAdapter{logger: logger} } From eb2dfe7dbf8f23adcbccfc10f7b93d1ef3fcd7d6 Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Tue, 25 Mar 2025 17:31:05 +0100 Subject: [PATCH 133/192] Some interesting rephrasing by Terry Howe See: https://github.com/helm/helm/pull/30698#discussion_r2012394228 Signed-off-by: Benoit Tigeot --- pkg/kube/ready.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/pkg/kube/ready.go b/pkg/kube/ready.go index 871bd4eee..ab467bc01 100644 --- a/pkg/kube/ready.go +++ b/pkg/kube/ready.go @@ -291,13 +291,13 @@ func (c *ReadyChecker) deploymentReady(rs *appsv1.ReplicaSet, dep *appsv1.Deploy } // Verify the generation observed by the deployment controller matches the spec generation if dep.Status.ObservedGeneration != dep.ObjectMeta.Generation { - c.log.Debug("Deployment is not ready, observedGeneration does not match spec generation", "namespace", dep.GetNamespace(), "name", dep.GetName(), "observedGeneration", dep.Status.ObservedGeneration, "expectedGeneration", dep.ObjectMeta.Generation) + c.log.Debug("Deployment is not ready, observedGeneration does not match spec generation", "namespace", dep.GetNamespace(), "name", dep.GetName(), "actualGeneration", dep.Status.ObservedGeneration, "expectedGeneration", dep.ObjectMeta.Generation) return false } expectedReady := *dep.Spec.Replicas - deploymentutil.MaxUnavailable(*dep) if !(rs.Status.ReadyReplicas >= expectedReady) { - c.log.Debug("Deployment is not ready, not all Pods are ready", "namespace", dep.GetNamespace(), "name", dep.GetName(), "readyPods", rs.Status.ReadyReplicas, "totalPods", expectedReady) + c.log.Debug("Deployment does not have enough pods ready", "namespace", dep.GetNamespace(), "name", dep.GetName(), "readyPods", rs.Status.ReadyReplicas, "totalPods", expectedReady) return false } return true @@ -317,7 +317,7 @@ func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool { // Make sure all the updated pods have been scheduled if ds.Status.UpdatedNumberScheduled != ds.Status.DesiredNumberScheduled { - c.log.Debug("DaemonSet is not ready, not all Pods scheduled", "namespace", ds.GetNamespace(), "name", ds.GetName(), "scheduledPods", ds.Status.UpdatedNumberScheduled, "totalPods", ds.Status.DesiredNumberScheduled) + c.log.Debug("DaemonSet does not have enough Pods scheduled", "namespace", ds.GetNamespace(), "name", ds.GetName(), "scheduledPods", ds.Status.UpdatedNumberScheduled, "totalPods", ds.Status.DesiredNumberScheduled) return false } maxUnavailable, err := intstr.GetScaledValueFromIntOrPercent(ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable, int(ds.Status.DesiredNumberScheduled), true) @@ -330,7 +330,7 @@ func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool { expectedReady := int(ds.Status.DesiredNumberScheduled) - maxUnavailable if !(int(ds.Status.NumberReady) >= expectedReady) { - c.log.Debug("DaemonSet is not ready. All Pods are not ready", "namespace", ds.GetNamespace(), "name", ds.GetName(), "readyPods", ds.Status.NumberReady, "totalPods", expectedReady) + c.log.Debug("DaemonSet does not have enough Pods ready", "namespace", ds.GetNamespace(), "name", ds.GetName(), "readyPods", ds.Status.NumberReady, "totalPods", expectedReady) return false } return true @@ -382,7 +382,7 @@ func (c *ReadyChecker) crdReady(crd apiextv1.CustomResourceDefinition) bool { func (c *ReadyChecker) statefulSetReady(sts *appsv1.StatefulSet) bool { // Verify the generation observed by the statefulSet controller matches the spec generation if sts.Status.ObservedGeneration != sts.ObjectMeta.Generation { - c.log.Debug("StatefulSet is not ready, observedGeneration doest not match spec generation", "namespace", sts.GetNamespace(), "name", sts.GetName(), "observedGeneration", sts.Status.ObservedGeneration, "expectedGeneration", sts.ObjectMeta.Generation) + c.log.Debug("StatefulSet is not ready, observedGeneration doest not match spec generation", "namespace", sts.GetNamespace(), "name", sts.GetName(), "actualGeneration", sts.Status.ObservedGeneration, "expectedGeneration", sts.ObjectMeta.Generation) return false } @@ -414,12 +414,12 @@ func (c *ReadyChecker) statefulSetReady(sts *appsv1.StatefulSet) bool { // Make sure all the updated pods have been scheduled if int(sts.Status.UpdatedReplicas) < expectedReplicas { - c.log.Debug("StatefulSet is not ready, not all Pods have been scheduled", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.UpdatedReplicas, "totalPods", expectedReplicas) + c.log.Debug("StatefulSet does not have enough Pods scheduled", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.UpdatedReplicas, "totalPods", expectedReplicas) return false } if int(sts.Status.ReadyReplicas) != replicas { - c.log.Debug("StatefulSet is not ready, not all Pods are ready", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.ReadyReplicas, "totalPods", replicas) + c.log.Debug("StatefulSet does not have enough Pods ready", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.ReadyReplicas, "totalPods", replicas) return false } // This check only makes sense when all partitions are being upgraded otherwise during a @@ -437,7 +437,7 @@ func (c *ReadyChecker) statefulSetReady(sts *appsv1.StatefulSet) bool { func (c *ReadyChecker) replicationControllerReady(rc *corev1.ReplicationController) bool { // Verify the generation observed by the replicationController controller matches the spec generation if rc.Status.ObservedGeneration != rc.ObjectMeta.Generation { - c.log.Debug("ReplicationController is not ready, observedGeneration doest not match spec generation", "namespace", rc.GetNamespace(), "name", rc.GetName(), "observedGeneration", rc.Status.ObservedGeneration, "expectedGeneration", rc.ObjectMeta.Generation) + c.log.Debug("ReplicationController is not ready, observedGeneration doest not match spec generation", "namespace", rc.GetNamespace(), "name", rc.GetName(), "actualGeneration", rc.Status.ObservedGeneration, "expectedGeneration", rc.ObjectMeta.Generation) return false } return true @@ -446,7 +446,7 @@ func (c *ReadyChecker) replicationControllerReady(rc *corev1.ReplicationControll func (c *ReadyChecker) replicaSetReady(rs *appsv1.ReplicaSet) bool { // Verify the generation observed by the replicaSet controller matches the spec generation if rs.Status.ObservedGeneration != rs.ObjectMeta.Generation { - c.log.Debug("ReplicaSet is not ready, observedGeneration doest not match spec generation", "namespace", rs.GetNamespace(), "name", rs.GetName(), "observedGeneration", rs.Status.ObservedGeneration, "expectedGeneration", rs.ObjectMeta.Generation) + c.log.Debug("ReplicaSet is not ready, observedGeneration doest not match spec generation", "namespace", rs.GetNamespace(), "name", rs.GetName(), "actualGeneration", rs.Status.ObservedGeneration, "expectedGeneration", rs.ObjectMeta.Generation) return false } return true From 394ba2d55e923f0aa954863490f6d4e02ff2b209 Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Tue, 25 Mar 2025 17:33:29 +0100 Subject: [PATCH 134/192] Properly use DefaultLogger Signed-off-by: Benoit Tigeot --- pkg/kube/client_test.go | 2 +- pkg/kube/ready.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index 8ae1df238..994b2be5c 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -107,7 +107,7 @@ func newTestClient(t *testing.T) *Client { return &Client{ Factory: testFactory.WithNamespace("default"), - Log: nopLogger, + Log: DefaultLogger, } } diff --git a/pkg/kube/ready.go b/pkg/kube/ready.go index ab467bc01..510576997 100644 --- a/pkg/kube/ready.go +++ b/pkg/kube/ready.go @@ -63,7 +63,7 @@ func NewReadyChecker(cl kubernetes.Interface, logger Logger, opts ...ReadyChecke log: logger, } if c.log == nil { - c.log = nopLogger + c.log = DefaultLogger } for _, opt := range opts { opt(&c) From 3a22df9731a5c91c399d56f124702a501dd56d9c Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Tue, 25 Mar 2025 17:46:44 +0100 Subject: [PATCH 135/192] Deal with linting errors Signed-off-by: Benoit Tigeot --- pkg/kube/logger.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/kube/logger.go b/pkg/kube/logger.go index f0ddfe49a..616cdba7f 100644 --- a/pkg/kube/logger.go +++ b/pkg/kube/logger.go @@ -33,10 +33,10 @@ type Logger interface { type NopLogger struct{} // Debug implements Logger.Debug by doing nothing. -func (NopLogger) Debug(msg string, args ...any) {} +func (NopLogger) Debug(_ string, args ...any) {} // Warn implements Logger.Warn by doing nothing. -func (NopLogger) Warn(msg string, args ...any) {} +func (NopLogger) Warn(_ string, args ...any) {} // DefaultLogger provides a no-op logger that discards all messages. // It can be used as a default when no logger is provided. From ede73860c1478b4b100137bb1daf6b321e153284 Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Tue, 25 Mar 2025 19:55:50 +0100 Subject: [PATCH 136/192] Fix call to kube log Signed-off-by: Benoit Tigeot --- pkg/action/action.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/action/action.go b/pkg/action/action.go index 4f100f833..3caf91b71 100644 --- a/pkg/action/action.go +++ b/pkg/action/action.go @@ -20,6 +20,7 @@ import ( "bytes" "fmt" "io" + "log/slog" "os" "path" "path/filepath" @@ -376,7 +377,7 @@ func (cfg *Configuration) recordRelease(r *release.Release) { // Init initializes the action configuration func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string, log DebugLog) error { kc := kube.New(getter) - kc.Log = log // TODO: Switch to slog compatible logger + kc.Log = kube.NewSlogAdapter(slog.Default()) lazyClient := &lazyClient{ namespace: namespace, From fae2345edf20f5bd9b01aba0e4bcd2e6d23bda3c Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Wed, 26 Mar 2025 22:45:58 +0100 Subject: [PATCH 137/192] Demonstrate the impact of having Logger defined in kube package Signed-off-by: Benoit Tigeot --- pkg/action/action.go | 13 ++++++------- pkg/cmd/list.go | 6 +++++- pkg/storage/driver/cfgmaps.go | 4 ++-- pkg/storage/driver/secrets.go | 8 ++++---- pkg/storage/driver/sql.go | 9 +++++---- 5 files changed, 22 insertions(+), 18 deletions(-) diff --git a/pkg/action/action.go b/pkg/action/action.go index 3caf91b71..edff0ea2c 100644 --- a/pkg/action/action.go +++ b/pkg/action/action.go @@ -20,7 +20,6 @@ import ( "bytes" "fmt" "io" - "log/slog" "os" "path" "path/filepath" @@ -96,7 +95,7 @@ type Configuration struct { // Capabilities describes the capabilities of the Kubernetes cluster. Capabilities *chartutil.Capabilities - Log func(string, ...interface{}) + Log kube.Logger // HookOutputFunc called with container name and returns and expects writer that will receive the log output. HookOutputFunc func(namespace, pod, container string) io.Writer @@ -270,8 +269,8 @@ func (cfg *Configuration) getCapabilities() (*chartutil.Capabilities, error) { apiVersions, err := GetVersionSet(dc) if err != nil { if discovery.IsGroupDiscoveryFailedError(err) { - cfg.Log("WARNING: The Kubernetes server has an orphaned API service. Server reports: %s", err) - cfg.Log("WARNING: To fix this, kubectl delete apiservice ") + cfg.Log.Warn("The Kubernetes server has an orphaned API service. Server reports: %s", err) + cfg.Log.Warn("To fix this, kubectl delete apiservice ") } else { return nil, errors.Wrap(err, "could not get apiVersions from Kubernetes") } @@ -370,14 +369,14 @@ func GetVersionSet(client discovery.ServerResourcesInterface) (chartutil.Version // recordRelease with an update operation in case reuse has been set. func (cfg *Configuration) recordRelease(r *release.Release) { if err := cfg.Releases.Update(r); err != nil { - cfg.Log("warning: Failed to update release %s: %s", r.Name, err) + cfg.Log.Warn("Failed to update release %s: %s", r.Name, err) } } // Init initializes the action configuration -func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string, log DebugLog) error { +func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string, log kube.Logger) error { kc := kube.New(getter) - kc.Log = kube.NewSlogAdapter(slog.Default()) + kc.Log = log lazyClient := &lazyClient{ namespace: namespace, diff --git a/pkg/cmd/list.go b/pkg/cmd/list.go index 85acbc97f..1f1095f2b 100644 --- a/pkg/cmd/list.go +++ b/pkg/cmd/list.go @@ -19,6 +19,7 @@ package cmd import ( "fmt" "io" + "log/slog" "os" "strconv" @@ -28,6 +29,7 @@ import ( "helm.sh/helm/v4/pkg/action" "helm.sh/helm/v4/pkg/cli/output" "helm.sh/helm/v4/pkg/cmd/require" + "helm.sh/helm/v4/pkg/kube" release "helm.sh/helm/v4/pkg/release/v1" ) @@ -61,6 +63,8 @@ flag with the '--offset' flag allows you to page through results. func newListCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { client := action.NewList(cfg) var outfmt output.Format + slogger := slog.New(slog.NewJSONHandler(os.Stdout, nil)) + adapter := kube.NewSlogAdapter(slogger) cmd := &cobra.Command{ Use: "list", @@ -71,7 +75,7 @@ func newListCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { ValidArgsFunction: noMoreArgsCompFunc, RunE: func(cmd *cobra.Command, _ []string) error { if client.AllNamespaces { - if err := cfg.Init(settings.RESTClientGetter(), "", os.Getenv("HELM_DRIVER"), Debug); err != nil { + if err := cfg.Init(settings.RESTClientGetter(), "", os.Getenv("HELM_DRIVER"), adapter); err != nil { return err } } diff --git a/pkg/storage/driver/cfgmaps.go b/pkg/storage/driver/cfgmaps.go index 2b84b7f82..8a70f5064 100644 --- a/pkg/storage/driver/cfgmaps.go +++ b/pkg/storage/driver/cfgmaps.go @@ -31,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/util/validation" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "helm.sh/helm/v4/pkg/kube" rspb "helm.sh/helm/v4/pkg/release/v1" ) @@ -43,7 +44,7 @@ const ConfigMapsDriverName = "ConfigMap" // ConfigMapsInterface. type ConfigMaps struct { impl corev1.ConfigMapInterface - Log func(string, ...interface{}) + Log kube.Logger } // NewConfigMaps initializes a new ConfigMaps wrapping an implementation of @@ -51,7 +52,6 @@ type ConfigMaps struct { func NewConfigMaps(impl corev1.ConfigMapInterface) *ConfigMaps { return &ConfigMaps{ impl: impl, - Log: func(_ string, _ ...interface{}) {}, } } diff --git a/pkg/storage/driver/secrets.go b/pkg/storage/driver/secrets.go index 2ab128c6b..816965b0c 100644 --- a/pkg/storage/driver/secrets.go +++ b/pkg/storage/driver/secrets.go @@ -31,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/util/validation" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "helm.sh/helm/v4/pkg/kube" rspb "helm.sh/helm/v4/pkg/release/v1" ) @@ -43,7 +44,7 @@ const SecretsDriverName = "Secret" // SecretsInterface. type Secrets struct { impl corev1.SecretInterface - Log func(string, ...interface{}) + Log kube.Logger } // NewSecrets initializes a new Secrets wrapping an implementation of @@ -51,7 +52,6 @@ type Secrets struct { func NewSecrets(impl corev1.SecretInterface) *Secrets { return &Secrets{ impl: impl, - Log: func(_ string, _ ...interface{}) {}, } } @@ -96,7 +96,7 @@ func (secrets *Secrets) List(filter func(*rspb.Release) bool) ([]*rspb.Release, for _, item := range list.Items { rls, err := decodeRelease(string(item.Data["release"])) if err != nil { - secrets.Log("list: failed to decode release: %v: %s", item, err) + secrets.Log.Debug("list: failed to decode release: %v: %s", item, err) continue } @@ -135,7 +135,7 @@ func (secrets *Secrets) Query(labels map[string]string) ([]*rspb.Release, error) for _, item := range list.Items { rls, err := decodeRelease(string(item.Data["release"])) if err != nil { - secrets.Log("query: failed to decode release: %s", err) + secrets.Log.Debug("query: failed to decode release: %s", err) continue } rls.Labels = item.ObjectMeta.Labels diff --git a/pkg/storage/driver/sql.go b/pkg/storage/driver/sql.go index 12bdd3ff4..b9f9f534b 100644 --- a/pkg/storage/driver/sql.go +++ b/pkg/storage/driver/sql.go @@ -30,6 +30,7 @@ import ( // Import pq for postgres dialect _ "github.com/lib/pq" + "helm.sh/helm/v4/pkg/kube" rspb "helm.sh/helm/v4/pkg/release/v1" ) @@ -87,7 +88,7 @@ type SQL struct { namespace string statementBuilder sq.StatementBuilderType - Log func(string, ...interface{}) + Log kube.Logger } // Name returns the name of the driver. @@ -108,13 +109,13 @@ func (s *SQL) checkAlreadyApplied(migrations []*migrate.Migration) bool { records, err := migrate.GetMigrationRecords(s.db.DB, postgreSQLDialect) migrate.SetDisableCreateTable(false) if err != nil { - s.Log("checkAlreadyApplied: failed to get migration records: %v", err) + s.Log.Debug("checkAlreadyApplied: failed to get migration records: %v", err) return false } for _, record := range records { if _, ok := migrationsIDs[record.Id]; ok { - s.Log("checkAlreadyApplied: found previous migration (Id: %v) applied at %v", record.Id, record.AppliedAt) + s.Log.Debug("checkAlreadyApplied: found previous migration (Id: %v) applied at %v", record.Id, record.AppliedAt) delete(migrationsIDs, record.Id) } } @@ -276,7 +277,7 @@ type SQLReleaseCustomLabelWrapper struct { } // NewSQL initializes a new sql driver. -func NewSQL(connectionString string, logger func(string, ...interface{}), namespace string) (*SQL, error) { +func NewSQL(connectionString string, logger kube.Logger, namespace string) (*SQL, error) { db, err := sqlx.Connect(postgreSQLDialect, connectionString) if err != nil { return nil, err From 83cdffe4aeeded92b2dc7e8b4ef04068b931fbf6 Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Mon, 31 Mar 2025 09:51:37 +0200 Subject: [PATCH 138/192] Migrate to a dedicated internal package for slog adapter + migrate more Signed-off-by: Benoit Tigeot --- cmd/helm/helm.go | 4 +- {pkg/kube => internal/log}/logger.go | 34 +++---------- internal/log/slog.go | 71 ++++++++++++++++++++++++++++ pkg/action/action.go | 5 +- pkg/action/action_test.go | 8 +--- pkg/action/history.go | 2 +- pkg/action/install.go | 22 ++++----- pkg/cmd/helpers_test.go | 3 +- pkg/cmd/install.go | 2 +- pkg/cmd/list.go | 4 +- pkg/cmd/registry_login.go | 2 +- pkg/cmd/root.go | 14 ++---- pkg/cmd/search_repo.go | 3 +- pkg/cmd/upgrade.go | 2 +- pkg/kube/client.go | 2 +- pkg/kube/client_test.go | 3 +- pkg/kube/ready.go | 7 +-- pkg/kube/ready_test.go | 5 +- pkg/kube/wait.go | 1 + pkg/storage/driver/cfgmaps.go | 4 +- pkg/storage/driver/mock_test.go | 1 - pkg/storage/driver/secrets.go | 4 +- pkg/storage/driver/sql.go | 6 +-- 23 files changed, 127 insertions(+), 82 deletions(-) rename {pkg/kube => internal/log}/logger.go (65%) create mode 100644 internal/log/slog.go diff --git a/cmd/helm/helm.go b/cmd/helm/helm.go index da6a5c54e..a617bce2e 100644 --- a/cmd/helm/helm.go +++ b/cmd/helm/helm.go @@ -23,6 +23,7 @@ import ( // Import to initialize client auth plugins. _ "k8s.io/client-go/plugin/pkg/client/auth" + logadapter "helm.sh/helm/v4/internal/log" helmcmd "helm.sh/helm/v4/pkg/cmd" "helm.sh/helm/v4/pkg/kube" ) @@ -37,10 +38,11 @@ func main() { // another name (e.g., helm2 or helm3) does not change the name of the // manager as picked up by the automated name detection. kube.ManagedFieldsManager = "helm" + logger := logadapter.NewReadableTextLogger(os.Stderr, false) cmd, err := helmcmd.NewRootCmd(os.Stdout, os.Args[1:]) if err != nil { - helmcmd.Warning("%+v", err) + logger.Warn("%+v", err) os.Exit(1) } diff --git a/pkg/kube/logger.go b/internal/log/logger.go similarity index 65% rename from pkg/kube/logger.go rename to internal/log/logger.go index 616cdba7f..ff971bdb1 100644 --- a/pkg/kube/logger.go +++ b/internal/log/logger.go @@ -14,10 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package kube provides Kubernetes client utilities for Helm. -package kube - -import "log/slog" +package log // Logger defines a minimal logging interface compatible with structured logging. // It provides methods for different log levels with structured key-value pairs. @@ -27,6 +24,9 @@ type Logger interface { // Warn logs a message at warning level with structured key-value pairs. Warn(msg string, args ...any) + + // Error logs a message at error level with structured key-value pairs. + Error(msg string, args ...any) } // NopLogger is a logger implementation that discards all log messages. @@ -38,29 +38,9 @@ func (NopLogger) Debug(_ string, args ...any) {} // Warn implements Logger.Warn by doing nothing. func (NopLogger) Warn(_ string, args ...any) {} +// Error implements Logger.Error by doing nothing. +func (NopLogger) Error(_ string, args ...any) {} + // DefaultLogger provides a no-op logger that discards all messages. // It can be used as a default when no logger is provided. var DefaultLogger Logger = NopLogger{} - -// SlogAdapter adapts a standard library slog.Logger to the Logger interface. -type SlogAdapter struct { - logger *slog.Logger -} - -// Debug implements Logger.Debug by forwarding to the underlying slog.Logger. -func (a SlogAdapter) Debug(msg string, args ...any) { - a.logger.Debug(msg, args...) -} - -// Warn implements Logger.Warn by forwarding to the underlying slog.Logger. -func (a SlogAdapter) Warn(msg string, args ...any) { - a.logger.Warn(msg, args...) -} - -// NewSlogAdapter creates a Logger that forwards log messages to a slog.Logger. -func NewSlogAdapter(logger *slog.Logger) Logger { - if logger == nil { - return DefaultLogger - } - return SlogAdapter{logger: logger} -} diff --git a/internal/log/slog.go b/internal/log/slog.go new file mode 100644 index 000000000..7765545f9 --- /dev/null +++ b/internal/log/slog.go @@ -0,0 +1,71 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +import ( + "io" + "log/slog" +) + +// SlogAdapter adapts a standard library slog.Logger to the Logger interface. +type SlogAdapter struct { + logger *slog.Logger +} + +// Debug implements Logger.Debug by forwarding to the underlying slog.Logger. +func (a SlogAdapter) Debug(msg string, args ...any) { + a.logger.Debug(msg, args...) +} + +// Warn implements Logger.Warn by forwarding to the underlying slog.Logger. +func (a SlogAdapter) Warn(msg string, args ...any) { + a.logger.Warn(msg, args...) +} + +// Error implements Logger.Error by forwarding to the underlying slog.Logger. +func (a SlogAdapter) Error(msg string, args ...any) { + // TODO: Handle error with `slog.Any`: slog.Info("something went wrong", slog.Any("err", err)) + a.logger.Error(msg, args...) +} + +// NewSlogAdapter creates a Logger that forwards log messages to a slog.Logger. +func NewSlogAdapter(logger *slog.Logger) Logger { + if logger == nil { + return DefaultLogger + } + return SlogAdapter{logger: logger} +} + +// NewReadableTextLogger creates a Logger that outputs in a readable text format without timestamps +func NewReadableTextLogger(output io.Writer, debugEnabled bool) Logger { + level := slog.LevelInfo + if debugEnabled { + level = slog.LevelDebug + } + + handler := slog.NewTextHandler(output, &slog.HandlerOptions{ + Level: level, + ReplaceAttr: func(groups []string, a slog.Attr) slog.Attr { + if a.Key == slog.TimeKey { + return slog.Attr{} + } + return a + }, + }) + + return NewSlogAdapter(slog.New(handler)) +} diff --git a/pkg/action/action.go b/pkg/action/action.go index edff0ea2c..778dc9ec5 100644 --- a/pkg/action/action.go +++ b/pkg/action/action.go @@ -33,6 +33,7 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" + logadapter "helm.sh/helm/v4/internal/log" chart "helm.sh/helm/v4/pkg/chart/v2" chartutil "helm.sh/helm/v4/pkg/chart/v2/util" "helm.sh/helm/v4/pkg/engine" @@ -95,7 +96,7 @@ type Configuration struct { // Capabilities describes the capabilities of the Kubernetes cluster. Capabilities *chartutil.Capabilities - Log kube.Logger + Log logadapter.Logger // HookOutputFunc called with container name and returns and expects writer that will receive the log output. HookOutputFunc func(namespace, pod, container string) io.Writer @@ -374,7 +375,7 @@ func (cfg *Configuration) recordRelease(r *release.Release) { } // Init initializes the action configuration -func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string, log kube.Logger) error { +func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string, log logadapter.Logger) error { kc := kube.New(getter) kc.Log = log diff --git a/pkg/action/action_test.go b/pkg/action/action_test.go index ec6e261db..efaebb3f9 100644 --- a/pkg/action/action_test.go +++ b/pkg/action/action_test.go @@ -24,6 +24,7 @@ import ( "github.com/stretchr/testify/assert" fakeclientset "k8s.io/client-go/kubernetes/fake" + logadapter "helm.sh/helm/v4/internal/log" chart "helm.sh/helm/v4/pkg/chart/v2" chartutil "helm.sh/helm/v4/pkg/chart/v2/util" kubefake "helm.sh/helm/v4/pkg/kube/fake" @@ -49,12 +50,7 @@ func actionConfigFixture(t *testing.T) *Configuration { KubeClient: &kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}}, Capabilities: chartutil.DefaultCapabilities, RegistryClient: registryClient, - Log: func(format string, v ...interface{}) { - t.Helper() - if *verbose { - t.Logf(format, v...) - } - }, + Log: logadapter.DefaultLogger, } } diff --git a/pkg/action/history.go b/pkg/action/history.go index 04743f4cd..289118592 100644 --- a/pkg/action/history.go +++ b/pkg/action/history.go @@ -53,6 +53,6 @@ func (h *History) Run(name string) ([]*release.Release, error) { return nil, errors.Errorf("release name is invalid: %s", name) } - h.cfg.Log("getting history for release %s", name) + h.cfg.Log.Debug("getting history for release", "release", name) return h.cfg.Releases.History(name) } diff --git a/pkg/action/install.go b/pkg/action/install.go index 735b8ac17..8b749b777 100644 --- a/pkg/action/install.go +++ b/pkg/action/install.go @@ -172,7 +172,7 @@ func (i *Install) installCRDs(crds []chart.CRD) error { // If the error is CRD already exists, continue. if apierrors.IsAlreadyExists(err) { crdName := res[0].Name - i.cfg.Log("CRD %s is already present. Skipping.", crdName) + i.cfg.Log.Debug("CRD is already present. Skipping", "crd", crdName) continue } return errors.Wrapf(err, "failed to install CRD %s", obj.Name) @@ -200,7 +200,7 @@ func (i *Install) installCRDs(crds []chart.CRD) error { return err } - i.cfg.Log("Clearing discovery cache") + i.cfg.Log.Debug("clearing discovery cache") discoveryClient.Invalidate() _, _ = discoveryClient.ServerGroups() @@ -213,7 +213,7 @@ func (i *Install) installCRDs(crds []chart.CRD) error { return err } if resettable, ok := restMapper.(meta.ResettableRESTMapper); ok { - i.cfg.Log("Clearing REST mapper cache") + i.cfg.Log.Debug("clearing REST mapper cache") resettable.Reset() } } @@ -237,24 +237,24 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma // Check reachability of cluster unless in client-only mode (e.g. `helm template` without `--validate`) if !i.ClientOnly { if err := i.cfg.KubeClient.IsReachable(); err != nil { - i.cfg.Log(fmt.Sprintf("ERROR: Cluster reachability check failed: %v", err)) + i.cfg.Log.Error(fmt.Sprintf("cluster reachability check failed: %v", err)) return nil, errors.Wrap(err, "cluster reachability check failed") } } // HideSecret must be used with dry run. Otherwise, return an error. if !i.isDryRun() && i.HideSecret { - i.cfg.Log("ERROR: Hiding Kubernetes secrets requires a dry-run mode") + i.cfg.Log.Error("hiding Kubernetes secrets requires a dry-run mode") return nil, errors.New("Hiding Kubernetes secrets requires a dry-run mode") } if err := i.availableName(); err != nil { - i.cfg.Log(fmt.Sprintf("ERROR: Release name check failed: %v", err)) + i.cfg.Log.Error("release name check failed", "error", err) return nil, errors.Wrap(err, "release name check failed") } if err := chartutil.ProcessDependencies(chrt, vals); err != nil { - i.cfg.Log(fmt.Sprintf("ERROR: Processing chart dependencies failed: %v", err)) + i.cfg.Log.Error("chart dependencies processing failed", "error", err) return nil, errors.Wrap(err, "chart dependencies processing failed") } @@ -268,7 +268,7 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma if crds := chrt.CRDObjects(); !i.ClientOnly && !i.SkipCRDs && len(crds) > 0 { // On dry run, bail here if i.isDryRun() { - i.cfg.Log("WARNING: This chart or one of its subcharts contains CRDs. Rendering may fail or contain inaccuracies.") + i.cfg.Log.Warn("This chart or one of its subcharts contains CRDs. Rendering may fail or contain inaccuracies.") } else if err := i.installCRDs(crds); err != nil { return nil, err } @@ -288,7 +288,7 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma mem.SetNamespace(i.Namespace) i.cfg.Releases = storage.Init(mem) } else if !i.ClientOnly && len(i.APIVersions) > 0 { - i.cfg.Log("API Version list given outside of client only mode, this list will be ignored") + i.cfg.Log.Debug("API Version list given outside of client only mode, this list will be ignored") } // Make sure if Atomic is set, that wait is set as well. This makes it so @@ -505,7 +505,7 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource // One possible strategy would be to do a timed retry to see if we can get // this stored in the future. if err := i.recordRelease(rel); err != nil { - i.cfg.Log("failed to record the release: %s", err) + i.cfg.Log.Error("failed to record the release", "error", err) } return rel, nil @@ -514,7 +514,7 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource func (i *Install) failRelease(rel *release.Release, err error) (*release.Release, error) { rel.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", i.ReleaseName, err.Error())) if i.Atomic { - i.cfg.Log("Install failed and atomic is set, uninstalling release") + i.cfg.Log.Debug("install failed, uninstalling release", "release", i.ReleaseName) uninstall := NewUninstall(i.cfg) uninstall.DisableHooks = i.DisableHooks uninstall.KeepHistory = false diff --git a/pkg/cmd/helpers_test.go b/pkg/cmd/helpers_test.go index effbc1673..1f597d7ba 100644 --- a/pkg/cmd/helpers_test.go +++ b/pkg/cmd/helpers_test.go @@ -26,6 +26,7 @@ import ( shellwords "github.com/mattn/go-shellwords" "github.com/spf13/cobra" + logadapter "helm.sh/helm/v4/internal/log" "helm.sh/helm/v4/internal/test" "helm.sh/helm/v4/pkg/action" chartutil "helm.sh/helm/v4/pkg/chart/v2/util" @@ -92,7 +93,7 @@ func executeActionCommandStdinC(store *storage.Storage, in *os.File, cmd string) Releases: store, KubeClient: &kubefake.PrintingKubeClient{Out: io.Discard}, Capabilities: chartutil.DefaultCapabilities, - Log: func(_ string, _ ...interface{}) {}, + Log: logadapter.DefaultLogger, } root, err := newRootCmdWithConfig(actionConfig, buf, args) diff --git a/pkg/cmd/install.go b/pkg/cmd/install.go index 051612bb8..32a386ba0 100644 --- a/pkg/cmd/install.go +++ b/pkg/cmd/install.go @@ -265,7 +265,7 @@ func runInstall(args []string, client *action.Install, valueOpts *values.Options } if chartRequested.Metadata.Deprecated { - Warning("This chart is deprecated") + logger.Warn("this chart is deprecated") } if req := chartRequested.Metadata.Dependencies; req != nil { diff --git a/pkg/cmd/list.go b/pkg/cmd/list.go index 1f1095f2b..10b70d7d9 100644 --- a/pkg/cmd/list.go +++ b/pkg/cmd/list.go @@ -26,10 +26,10 @@ import ( "github.com/gosuri/uitable" "github.com/spf13/cobra" + logadapter "helm.sh/helm/v4/internal/log" "helm.sh/helm/v4/pkg/action" "helm.sh/helm/v4/pkg/cli/output" "helm.sh/helm/v4/pkg/cmd/require" - "helm.sh/helm/v4/pkg/kube" release "helm.sh/helm/v4/pkg/release/v1" ) @@ -64,7 +64,7 @@ func newListCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { client := action.NewList(cfg) var outfmt output.Format slogger := slog.New(slog.NewJSONHandler(os.Stdout, nil)) - adapter := kube.NewSlogAdapter(slogger) + adapter := logadapter.NewSlogAdapter(slogger) cmd := &cobra.Command{ Use: "list", diff --git a/pkg/cmd/registry_login.go b/pkg/cmd/registry_login.go index 1dfb3c798..bc6c1d13d 100644 --- a/pkg/cmd/registry_login.go +++ b/pkg/cmd/registry_login.go @@ -122,7 +122,7 @@ func getUsernamePassword(usernameOpt string, passwordOpt string, passwordFromStd } } } else { - Warning("Using --password via the CLI is insecure. Use --password-stdin.") + logger.Warn("using --password via the CLI is insecure. Use --password-stdin") } return username, password, nil diff --git a/pkg/cmd/root.go b/pkg/cmd/root.go index ea686be7c..407e89139 100644 --- a/pkg/cmd/root.go +++ b/pkg/cmd/root.go @@ -31,6 +31,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/clientcmd" + logadapter "helm.sh/helm/v4/internal/log" "helm.sh/helm/v4/internal/tlsutil" "helm.sh/helm/v4/pkg/action" "helm.sh/helm/v4/pkg/cli" @@ -95,16 +96,7 @@ By default, the default directories depend on the Operating System. The defaults ` var settings = cli.New() - -func Debug(format string, v ...interface{}) { - if settings.Debug { - log.Output(2, fmt.Sprintf("[debug] "+format+"\n", v...)) - } -} - -func Warning(format string, v ...interface{}) { - fmt.Fprintf(os.Stderr, "WARNING: "+format+"\n", v...) -} +var logger = logadapter.NewReadableTextLogger(os.Stderr, settings.Debug) func NewRootCmd(out io.Writer, args []string) (*cobra.Command, error) { actionConfig := new(action.Configuration) @@ -114,7 +106,7 @@ func NewRootCmd(out io.Writer, args []string) (*cobra.Command, error) { } cobra.OnInitialize(func() { helmDriver := os.Getenv("HELM_DRIVER") - if err := actionConfig.Init(settings.RESTClientGetter(), settings.Namespace(), helmDriver, Debug); err != nil { + if err := actionConfig.Init(settings.RESTClientGetter(), settings.Namespace(), helmDriver, logger); err != nil { log.Fatal(err) } if helmDriver == "memory" { diff --git a/pkg/cmd/search_repo.go b/pkg/cmd/search_repo.go index bc73e52b2..29bc19f6b 100644 --- a/pkg/cmd/search_repo.go +++ b/pkg/cmd/search_repo.go @@ -189,8 +189,7 @@ func (o *searchRepoOptions) buildIndex() (*search.Index, error) { f := filepath.Join(o.repoCacheDir, helmpath.CacheIndexFile(n)) ind, err := repo.LoadIndexFile(f) if err != nil { - Warning("Repo %q is corrupt or missing. Try 'helm repo update'.", n) - Warning("%s", err) + logger.Warn("repo is corrupt or missing", "repo", n, "error", err) continue } diff --git a/pkg/cmd/upgrade.go b/pkg/cmd/upgrade.go index afbbde435..e61c9bc3b 100644 --- a/pkg/cmd/upgrade.go +++ b/pkg/cmd/upgrade.go @@ -225,7 +225,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { } if ch.Metadata.Deprecated { - Warning("This chart is deprecated") + logger.Warn("this chart is deprecated") } // Create context and prepare the handle of SIGTERM diff --git a/pkg/kube/client.go b/pkg/kube/client.go index a7279e40b..c176b3fb8 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -73,7 +73,7 @@ type Client struct { // needs. The smaller surface area of the interface means there is a lower // chance of it changing. Factory Factory - Log Logger + Log logadapter.Logger // Namespace allows to bypass the kubeconfig file for the choice of the namespace Namespace string diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index 994b2be5c..abe841ea6 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -26,6 +26,7 @@ import ( "github.com/stretchr/testify/assert" + logadapter "helm.sh/helm/v4/internal/log" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -107,7 +108,7 @@ func newTestClient(t *testing.T) *Client { return &Client{ Factory: testFactory.WithNamespace("default"), - Log: DefaultLogger, + Log: logadapter.DefaultLogger, } } diff --git a/pkg/kube/ready.go b/pkg/kube/ready.go index 510576997..c128e31b0 100644 --- a/pkg/kube/ready.go +++ b/pkg/kube/ready.go @@ -32,6 +32,7 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" + logadapter "helm.sh/helm/v4/internal/log" deploymentutil "helm.sh/helm/v4/internal/third_party/k8s.io/kubernetes/deployment/util" ) @@ -57,13 +58,13 @@ func CheckJobs(checkJobs bool) ReadyCheckerOption { // NewReadyChecker creates a new checker. Passed ReadyCheckerOptions can // be used to override defaults. -func NewReadyChecker(cl kubernetes.Interface, logger Logger, opts ...ReadyCheckerOption) ReadyChecker { +func NewReadyChecker(cl kubernetes.Interface, logger logadapter.Logger, opts ...ReadyCheckerOption) ReadyChecker { c := ReadyChecker{ client: cl, log: logger, } if c.log == nil { - c.log = DefaultLogger + c.log = logadapter.DefaultLogger } for _, opt := range opts { opt(&c) @@ -74,7 +75,7 @@ func NewReadyChecker(cl kubernetes.Interface, logger Logger, opts ...ReadyChecke // ReadyChecker is a type that can check core Kubernetes types for readiness. type ReadyChecker struct { client kubernetes.Interface - log Logger + log logadapter.Logger checkJobs bool pausedAsReady bool } diff --git a/pkg/kube/ready_test.go b/pkg/kube/ready_test.go index bd382a9c2..f022bf596 100644 --- a/pkg/kube/ready_test.go +++ b/pkg/kube/ready_test.go @@ -19,6 +19,7 @@ import ( "context" "testing" + logadapter "helm.sh/helm/v4/internal/log" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" @@ -37,7 +38,7 @@ const defaultNamespace = metav1.NamespaceDefault func Test_ReadyChecker_IsReady_Pod(t *testing.T) { type fields struct { client kubernetes.Interface - log Logger + log logadapter.Logger checkJobs bool pausedAsReady bool } @@ -661,7 +662,7 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) { func Test_ReadyChecker_IsReady_ReplicaSet(t *testing.T) { type fields struct { client kubernetes.Interface - log Logger + log logadapter.Logger checkJobs bool pausedAsReady bool } diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go index 79a2df8cc..69779904f 100644 --- a/pkg/kube/wait.go +++ b/pkg/kube/wait.go @@ -25,6 +25,7 @@ import ( multierror "github.com/hashicorp/go-multierror" "github.com/pkg/errors" + logadapter "helm.sh/helm/v4/internal/log" appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" appsv1beta2 "k8s.io/api/apps/v1beta2" diff --git a/pkg/storage/driver/cfgmaps.go b/pkg/storage/driver/cfgmaps.go index 8a70f5064..48198a9bd 100644 --- a/pkg/storage/driver/cfgmaps.go +++ b/pkg/storage/driver/cfgmaps.go @@ -31,7 +31,7 @@ import ( "k8s.io/apimachinery/pkg/util/validation" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" - "helm.sh/helm/v4/pkg/kube" + logadapter "helm.sh/helm/v4/internal/log" rspb "helm.sh/helm/v4/pkg/release/v1" ) @@ -44,7 +44,7 @@ const ConfigMapsDriverName = "ConfigMap" // ConfigMapsInterface. type ConfigMaps struct { impl corev1.ConfigMapInterface - Log kube.Logger + Log logadapter.Logger } // NewConfigMaps initializes a new ConfigMaps wrapping an implementation of diff --git a/pkg/storage/driver/mock_test.go b/pkg/storage/driver/mock_test.go index 53919b45d..54fda0542 100644 --- a/pkg/storage/driver/mock_test.go +++ b/pkg/storage/driver/mock_test.go @@ -262,7 +262,6 @@ func newTestFixtureSQL(t *testing.T, _ ...*rspb.Release) (*SQL, sqlmock.Sqlmock) sqlxDB := sqlx.NewDb(sqlDB, "sqlmock") return &SQL{ db: sqlxDB, - Log: func(_ string, _ ...interface{}) {}, namespace: "default", statementBuilder: sq.StatementBuilder.PlaceholderFormat(sq.Dollar), }, mock diff --git a/pkg/storage/driver/secrets.go b/pkg/storage/driver/secrets.go index 816965b0c..bd1edcae1 100644 --- a/pkg/storage/driver/secrets.go +++ b/pkg/storage/driver/secrets.go @@ -31,7 +31,7 @@ import ( "k8s.io/apimachinery/pkg/util/validation" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" - "helm.sh/helm/v4/pkg/kube" + logadapter "helm.sh/helm/v4/internal/log" rspb "helm.sh/helm/v4/pkg/release/v1" ) @@ -44,7 +44,7 @@ const SecretsDriverName = "Secret" // SecretsInterface. type Secrets struct { impl corev1.SecretInterface - Log kube.Logger + Log logadapter.Logger } // NewSecrets initializes a new Secrets wrapping an implementation of diff --git a/pkg/storage/driver/sql.go b/pkg/storage/driver/sql.go index b9f9f534b..304503b0a 100644 --- a/pkg/storage/driver/sql.go +++ b/pkg/storage/driver/sql.go @@ -30,7 +30,7 @@ import ( // Import pq for postgres dialect _ "github.com/lib/pq" - "helm.sh/helm/v4/pkg/kube" + logadapter "helm.sh/helm/v4/internal/log" rspb "helm.sh/helm/v4/pkg/release/v1" ) @@ -88,7 +88,7 @@ type SQL struct { namespace string statementBuilder sq.StatementBuilderType - Log kube.Logger + Log logadapter.Logger } // Name returns the name of the driver. @@ -277,7 +277,7 @@ type SQLReleaseCustomLabelWrapper struct { } // NewSQL initializes a new sql driver. -func NewSQL(connectionString string, logger kube.Logger, namespace string) (*SQL, error) { +func NewSQL(connectionString string, logger logadapter.Logger, namespace string) (*SQL, error) { db, err := sqlx.Connect(postgreSQLDialect, connectionString) if err != nil { return nil, err From b42767be40ca2f364e7b1f54c8510b6c69e62f7f Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Mon, 31 Mar 2025 11:17:58 +0200 Subject: [PATCH 139/192] Migrate more code to log adapter Signed-off-by: Benoit Tigeot --- cmd/helm/helm.go | 2 +- pkg/action/rollback.go | 24 ++++++------ pkg/action/uninstall.go | 14 +++---- pkg/action/upgrade.go | 30 +++++++-------- pkg/cmd/install.go | 6 +-- pkg/cmd/plugin.go | 2 +- pkg/cmd/plugin_install.go | 2 +- pkg/cmd/plugin_list.go | 2 +- pkg/cmd/plugin_uninstall.go | 2 +- pkg/cmd/plugin_update.go | 4 +- pkg/cmd/pull.go | 2 +- pkg/cmd/search_hub.go | 2 +- pkg/cmd/search_repo.go | 6 +-- pkg/cmd/show.go | 4 +- pkg/cmd/upgrade.go | 2 +- pkg/kube/client.go | 2 + pkg/kube/client_test.go | 3 +- pkg/kube/ready_test.go | 17 +++++---- pkg/kube/wait.go | 3 +- pkg/storage/driver/cfgmaps.go | 20 +++++----- pkg/storage/driver/sql.go | 70 +++++++++++++++++------------------ 21 files changed, 112 insertions(+), 107 deletions(-) diff --git a/cmd/helm/helm.go b/cmd/helm/helm.go index a617bce2e..158118c06 100644 --- a/cmd/helm/helm.go +++ b/cmd/helm/helm.go @@ -47,7 +47,7 @@ func main() { } if err := cmd.Execute(); err != nil { - helmcmd.Debug("%+v", err) + logger.Debug("error", err) switch e := err.(type) { case helmcmd.PluginError: os.Exit(e.Code) diff --git a/pkg/action/rollback.go b/pkg/action/rollback.go index 870f1e635..4e61fe872 100644 --- a/pkg/action/rollback.go +++ b/pkg/action/rollback.go @@ -63,26 +63,26 @@ func (r *Rollback) Run(name string) error { r.cfg.Releases.MaxHistory = r.MaxHistory - r.cfg.Log("preparing rollback of %s", name) + r.cfg.Log.Debug("preparing rollback", "name", name) currentRelease, targetRelease, err := r.prepareRollback(name) if err != nil { return err } if !r.DryRun { - r.cfg.Log("creating rolled back release for %s", name) + r.cfg.Log.Debug("creating rolled back release", "name", name) if err := r.cfg.Releases.Create(targetRelease); err != nil { return err } } - r.cfg.Log("performing rollback of %s", name) + r.cfg.Log.Debug("performing rollback", "name", name) if _, err := r.performRollback(currentRelease, targetRelease); err != nil { return err } if !r.DryRun { - r.cfg.Log("updating status for rolled back release for %s", name) + r.cfg.Log.Debug("updating status for rolled back release", "name", name) if err := r.cfg.Releases.Update(targetRelease); err != nil { return err } @@ -129,7 +129,7 @@ func (r *Rollback) prepareRollback(name string) (*release.Release, *release.Rele return nil, nil, errors.Errorf("release has no %d version", previousVersion) } - r.cfg.Log("rolling back %s (current: v%d, target: v%d)", name, currentRelease.Version, previousVersion) + r.cfg.Log.Debug("rolling back", "name", name, "currentVersion", currentRelease.Version, "targetVersion", previousVersion) previousRelease, err := r.cfg.Releases.Get(name, previousVersion) if err != nil { @@ -162,7 +162,7 @@ func (r *Rollback) prepareRollback(name string) (*release.Release, *release.Rele func (r *Rollback) performRollback(currentRelease, targetRelease *release.Release) (*release.Release, error) { if r.DryRun { - r.cfg.Log("dry run for %s", targetRelease.Name) + r.cfg.Log.Debug("dry run", "name", targetRelease.Name) return targetRelease, nil } @@ -181,7 +181,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas return targetRelease, err } } else { - r.cfg.Log("rollback hooks disabled for %s", targetRelease.Name) + r.cfg.Log.Debug("rollback hooks disabled", "name", targetRelease.Name) } // It is safe to use "force" here because these are resources currently rendered by the chart. @@ -193,14 +193,14 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas if err != nil { msg := fmt.Sprintf("Rollback %q failed: %s", targetRelease.Name, err) - r.cfg.Log("warning: %s", msg) + r.cfg.Log.Warn(msg) currentRelease.Info.Status = release.StatusSuperseded targetRelease.Info.Status = release.StatusFailed targetRelease.Info.Description = msg r.cfg.recordRelease(currentRelease) r.cfg.recordRelease(targetRelease) if r.CleanupOnFail { - r.cfg.Log("Cleanup on fail set, cleaning up %d resources", len(results.Created)) + r.cfg.Log.Debug("cleanup on fail set, cleaning up resources", "count", len(results.Created)) _, errs := r.cfg.KubeClient.Delete(results.Created) if errs != nil { var errorList []string @@ -209,7 +209,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas } return targetRelease, errors.Wrapf(fmt.Errorf("unable to cleanup resources: %s", strings.Join(errorList, ", ")), "an error occurred while cleaning up resources. original rollback error: %s", err) } - r.cfg.Log("Resource cleanup complete") + r.cfg.Log.Debug("resource cleanup complete") } return targetRelease, err } @@ -220,7 +220,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas // levels, we should make these error level logs so users are notified // that they'll need to go do the cleanup on their own if err := recreate(r.cfg, results.Updated); err != nil { - r.cfg.Log(err.Error()) + r.cfg.Log.Error(err.Error()) } } waiter, err := r.cfg.KubeClient.GetWaiter(r.WaitStrategy) @@ -256,7 +256,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas } // Supersede all previous deployments, see issue #2941. for _, rel := range deployed { - r.cfg.Log("superseding previous deployment %d", rel.Version) + r.cfg.Log.Debug("superseding previous deployment", "version", rel.Version) rel.Info.Status = release.StatusSuperseded r.cfg.recordRelease(rel) } diff --git a/pkg/action/uninstall.go b/pkg/action/uninstall.go index eeff997d3..4e959172c 100644 --- a/pkg/action/uninstall.go +++ b/pkg/action/uninstall.go @@ -104,7 +104,7 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error) return nil, errors.Errorf("the release named %q is already deleted", name) } - u.cfg.Log("uninstall: Deleting %s", name) + u.cfg.Log.Debug("uninstall: deleting release", "name", name) rel.Info.Status = release.StatusUninstalling rel.Info.Deleted = helmtime.Now() rel.Info.Description = "Deletion in progress (or silently failed)" @@ -115,18 +115,18 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error) return res, err } } else { - u.cfg.Log("delete hooks disabled for %s", name) + u.cfg.Log.Debug("delete hooks disabled", "release", name) } // From here on out, the release is currently considered to be in StatusUninstalling // state. if err := u.cfg.Releases.Update(rel); err != nil { - u.cfg.Log("uninstall: Failed to store updated release: %s", err) + u.cfg.Log.Debug("uninstall: Failed to store updated release", "error", err) } deletedResources, kept, errs := u.deleteRelease(rel) if errs != nil { - u.cfg.Log("uninstall: Failed to delete release: %s", errs) + u.cfg.Log.Debug("uninstall: Failed to delete release", "errors", errs) return nil, errors.Errorf("failed to delete release: %s", name) } @@ -153,7 +153,7 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error) } if !u.KeepHistory { - u.cfg.Log("purge requested for %s", name) + u.cfg.Log.Debug("purge requested", "release", name) err := u.purgeReleases(rels...) if err != nil { errs = append(errs, errors.Wrap(err, "uninstall: Failed to purge the release")) @@ -168,7 +168,7 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error) } if err := u.cfg.Releases.Update(rel); err != nil { - u.cfg.Log("uninstall: Failed to store updated release: %s", err) + u.cfg.Log.Debug("uninstall: Failed to store updated release", "error", err) } if len(errs) > 0 { @@ -242,7 +242,7 @@ func parseCascadingFlag(cfg *Configuration, cascadingFlag string) v1.DeletionPro case "background": return v1.DeletePropagationBackground default: - cfg.Log("uninstall: given cascade value: %s, defaulting to delete propagation background", cascadingFlag) + cfg.Log.Debug("uninstall: given cascade value, defaulting to delete propagation background", "value", cascadingFlag) return v1.DeletePropagationBackground } } diff --git a/pkg/action/upgrade.go b/pkg/action/upgrade.go index e3b775a25..147c0fe5a 100644 --- a/pkg/action/upgrade.go +++ b/pkg/action/upgrade.go @@ -163,7 +163,7 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart. return nil, errors.Errorf("release name is invalid: %s", name) } - u.cfg.Log("preparing upgrade for %s", name) + u.cfg.Log.Debug("preparing upgrade", "name", name) currentRelease, upgradedRelease, err := u.prepareUpgrade(name, chart, vals) if err != nil { return nil, err @@ -171,7 +171,7 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart. u.cfg.Releases.MaxHistory = u.MaxHistory - u.cfg.Log("performing update for %s", name) + u.cfg.Log.Debug("performing update", "name", name) res, err := u.performUpgrade(ctx, currentRelease, upgradedRelease) if err != nil { return res, err @@ -179,7 +179,7 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart. // Do not update for dry runs if !u.isDryRun() { - u.cfg.Log("updating status for upgraded release for %s", name) + u.cfg.Log.Debug("updating status for upgraded release", "name", name) if err := u.cfg.Releases.Update(upgradedRelease); err != nil { return res, err } @@ -365,7 +365,7 @@ func (u *Upgrade) performUpgrade(ctx context.Context, originalRelease, upgradedR // Run if it is a dry run if u.isDryRun() { - u.cfg.Log("dry run for %s", upgradedRelease.Name) + u.cfg.Log.Debug("dry run for release", "name", upgradedRelease.Name) if len(u.Description) > 0 { upgradedRelease.Info.Description = u.Description } else { @@ -374,7 +374,7 @@ func (u *Upgrade) performUpgrade(ctx context.Context, originalRelease, upgradedR return upgradedRelease, nil } - u.cfg.Log("creating upgraded release for %s", upgradedRelease.Name) + u.cfg.Log.Debug("creating upgraded release", "name", upgradedRelease.Name) if err := u.cfg.Releases.Create(upgradedRelease); err != nil { return nil, err } @@ -425,7 +425,7 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele return } } else { - u.cfg.Log("upgrade hooks disabled for %s", upgradedRelease.Name) + u.cfg.Log.Debug("upgrade hooks disabled", "name", upgradedRelease.Name) } results, err := u.cfg.KubeClient.Update(current, target, u.Force) @@ -441,7 +441,7 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele // levels, we should make these error level logs so users are notified // that they'll need to go do the cleanup on their own if err := recreate(u.cfg, results.Updated); err != nil { - u.cfg.Log(err.Error()) + u.cfg.Log.Error(err.Error()) } } waiter, err := u.cfg.KubeClient.GetWaiter(u.WaitStrategy) @@ -486,13 +486,13 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, err error) (*release.Release, error) { msg := fmt.Sprintf("Upgrade %q failed: %s", rel.Name, err) - u.cfg.Log("warning: %s", msg) + u.cfg.Log.Warn("upgrade failed", "name", rel.Name, "error", err) rel.Info.Status = release.StatusFailed rel.Info.Description = msg u.cfg.recordRelease(rel) if u.CleanupOnFail && len(created) > 0 { - u.cfg.Log("Cleanup on fail set, cleaning up %d resources", len(created)) + u.cfg.Log.Debug("cleanup on fail set", "cleaning_resources", len(created)) _, errs := u.cfg.KubeClient.Delete(created) if errs != nil { var errorList []string @@ -501,10 +501,10 @@ func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, e } return rel, errors.Wrapf(fmt.Errorf("unable to cleanup resources: %s", strings.Join(errorList, ", ")), "an error occurred while cleaning up resources. original upgrade error: %s", err) } - u.cfg.Log("Resource cleanup complete") + u.cfg.Log.Debug("resource cleanup complete") } if u.Atomic { - u.cfg.Log("Upgrade failed and atomic is set, rolling back to last successful release") + u.cfg.Log.Debug("upgrade failed and atomic is set, rolling back to last successful release") // As a protection, get the last successful release before rollback. // If there are no successful releases, bail out @@ -556,13 +556,13 @@ func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, e func (u *Upgrade) reuseValues(chart *chart.Chart, current *release.Release, newVals map[string]interface{}) (map[string]interface{}, error) { if u.ResetValues { // If ResetValues is set, we completely ignore current.Config. - u.cfg.Log("resetting values to the chart's original version") + u.cfg.Log.Debug("resetting values to the chart's original version") return newVals, nil } // If the ReuseValues flag is set, we always copy the old values over the new config's values. if u.ReuseValues { - u.cfg.Log("reusing the old release's values") + u.cfg.Log.Debug("reusing the old release's values") // We have to regenerate the old coalesced values: oldVals, err := chartutil.CoalesceValues(current.Chart, current.Config) @@ -579,7 +579,7 @@ func (u *Upgrade) reuseValues(chart *chart.Chart, current *release.Release, newV // If the ResetThenReuseValues flag is set, we use the new chart's values, but we copy the old config's values over the new config's values. if u.ResetThenReuseValues { - u.cfg.Log("merging values from old release to new values") + u.cfg.Log.Debug("merging values from old release to new values") newVals = chartutil.CoalesceTables(newVals, current.Config) @@ -587,7 +587,7 @@ func (u *Upgrade) reuseValues(chart *chart.Chart, current *release.Release, newV } if len(newVals) == 0 && len(current.Config) > 0 { - u.cfg.Log("copying values from %s (v%d) to new release.", current.Name, current.Version) + u.cfg.Log.Debug("copying values from old release", "name", current.Name, "version", current.Version) newVals = current.Config } return newVals, nil diff --git a/pkg/cmd/install.go b/pkg/cmd/install.go index 32a386ba0..566739bc3 100644 --- a/pkg/cmd/install.go +++ b/pkg/cmd/install.go @@ -229,9 +229,9 @@ func addInstallFlags(cmd *cobra.Command, f *pflag.FlagSet, client *action.Instal } func runInstall(args []string, client *action.Install, valueOpts *values.Options, out io.Writer) (*release.Release, error) { - Debug("Original chart version: %q", client.Version) + logger.Debug("Original chart version", "version", client.Version) if client.Version == "" && client.Devel { - Debug("setting version to >0.0.0-0") + logger.Debug("setting version to >0.0.0-0") client.Version = ">0.0.0-0" } @@ -246,7 +246,7 @@ func runInstall(args []string, client *action.Install, valueOpts *values.Options return nil, err } - Debug("CHART PATH: %s\n", cp) + logger.Debug("Chart path", "path", cp) p := getter.All(settings) vals, err := valueOpts.MergeValues(p) diff --git a/pkg/cmd/plugin.go b/pkg/cmd/plugin.go index 3340e76e6..1bb7ffb57 100644 --- a/pkg/cmd/plugin.go +++ b/pkg/cmd/plugin.go @@ -66,7 +66,7 @@ func runHook(p *plugin.Plugin, event string) error { prog := exec.Command(main, argv...) - Debug("running %s hook: %s", event, prog) + logger.Debug("running hook", "event", event, "program", prog) prog.Stdout, prog.Stderr = os.Stdout, os.Stderr if err := prog.Run(); err != nil { diff --git a/pkg/cmd/plugin_install.go b/pkg/cmd/plugin_install.go index e17744cbb..ca3d4ed90 100644 --- a/pkg/cmd/plugin_install.go +++ b/pkg/cmd/plugin_install.go @@ -79,7 +79,7 @@ func (o *pluginInstallOptions) run(out io.Writer) error { return err } - Debug("loading plugin from %s", i.Path()) + logger.Debug("loading plugin", "path", i.Path()) p, err := plugin.LoadDir(i.Path()) if err != nil { return errors.Wrap(err, "plugin is installed but unusable") diff --git a/pkg/cmd/plugin_list.go b/pkg/cmd/plugin_list.go index 9cca790ae..9eb6707db 100644 --- a/pkg/cmd/plugin_list.go +++ b/pkg/cmd/plugin_list.go @@ -32,7 +32,7 @@ func newPluginListCmd(out io.Writer) *cobra.Command { Short: "list installed Helm plugins", ValidArgsFunction: noMoreArgsCompFunc, RunE: func(_ *cobra.Command, _ []string) error { - Debug("pluginDirs: %s", settings.PluginsDirectory) + logger.Debug("pluginDirs", settings.PluginsDirectory) plugins, err := plugin.FindPlugins(settings.PluginsDirectory) if err != nil { return err diff --git a/pkg/cmd/plugin_uninstall.go b/pkg/cmd/plugin_uninstall.go index c1f90ca49..3db454ff9 100644 --- a/pkg/cmd/plugin_uninstall.go +++ b/pkg/cmd/plugin_uninstall.go @@ -60,7 +60,7 @@ func (o *pluginUninstallOptions) complete(args []string) error { } func (o *pluginUninstallOptions) run(out io.Writer) error { - Debug("loading installed plugins from %s", settings.PluginsDirectory) + logger.Debug("loading installer plugins", "dir", settings.PluginsDirectory) plugins, err := plugin.FindPlugins(settings.PluginsDirectory) if err != nil { return err diff --git a/pkg/cmd/plugin_update.go b/pkg/cmd/plugin_update.go index cbbd8994c..38c451e2f 100644 --- a/pkg/cmd/plugin_update.go +++ b/pkg/cmd/plugin_update.go @@ -62,7 +62,7 @@ func (o *pluginUpdateOptions) complete(args []string) error { func (o *pluginUpdateOptions) run(out io.Writer) error { installer.Debug = settings.Debug - Debug("loading installed plugins from %s", settings.PluginsDirectory) + logger.Debug("loading installed plugins", "path", settings.PluginsDirectory) plugins, err := plugin.FindPlugins(settings.PluginsDirectory) if err != nil { return err @@ -104,7 +104,7 @@ func updatePlugin(p *plugin.Plugin) error { return err } - Debug("loading plugin from %s", i.Path()) + logger.Debug("loading plugin", "path", i.Path()) updatedPlugin, err := plugin.LoadDir(i.Path()) if err != nil { return err diff --git a/pkg/cmd/pull.go b/pkg/cmd/pull.go index 5d188ee4f..65ad95947 100644 --- a/pkg/cmd/pull.go +++ b/pkg/cmd/pull.go @@ -60,7 +60,7 @@ func newPullCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { RunE: func(_ *cobra.Command, args []string) error { client.Settings = settings if client.Version == "" && client.Devel { - Debug("setting version to >0.0.0-0") + logger.Debug("setting version to >0.0.0-0") client.Version = ">0.0.0-0" } diff --git a/pkg/cmd/search_hub.go b/pkg/cmd/search_hub.go index b7f25444e..a2d35f32b 100644 --- a/pkg/cmd/search_hub.go +++ b/pkg/cmd/search_hub.go @@ -89,7 +89,7 @@ func (o *searchHubOptions) run(out io.Writer, args []string) error { q := strings.Join(args, " ") results, err := c.Search(q) if err != nil { - Debug("%s", err) + logger.Debug("search failed", "error", err) return fmt.Errorf("unable to perform search against %q", o.searchEndpoint) } diff --git a/pkg/cmd/search_repo.go b/pkg/cmd/search_repo.go index 29bc19f6b..610176dd6 100644 --- a/pkg/cmd/search_repo.go +++ b/pkg/cmd/search_repo.go @@ -130,17 +130,17 @@ func (o *searchRepoOptions) run(out io.Writer, args []string) error { } func (o *searchRepoOptions) setupSearchedVersion() { - Debug("Original chart version: %q", o.version) + logger.Debug("original chart version", "version", o.version) if o.version != "" { return } if o.devel { // search for releases and prereleases (alpha, beta, and release candidate releases). - Debug("setting version to >0.0.0-0") + logger.Debug("setting version to >0.0.0-0") o.version = ">0.0.0-0" } else { // search only for stable releases, prerelease versions will be skipped - Debug("setting version to >0.0.0") + logger.Debug("setting version to >0.0.0") o.version = ">0.0.0" } } diff --git a/pkg/cmd/show.go b/pkg/cmd/show.go index a02af6f18..6aa322430 100644 --- a/pkg/cmd/show.go +++ b/pkg/cmd/show.go @@ -211,9 +211,9 @@ func addShowFlags(subCmd *cobra.Command, client *action.Show) { } func runShow(args []string, client *action.Show) (string, error) { - Debug("Original chart version: %q", client.Version) + logger.Debug("original chart version", "version", client.Version) if client.Version == "" && client.Devel { - Debug("setting version to >0.0.0-0") + logger.Debug("setting version to >0.0.0-0") client.Version = ">0.0.0-0" } diff --git a/pkg/cmd/upgrade.go b/pkg/cmd/upgrade.go index e61c9bc3b..a85eb5a41 100644 --- a/pkg/cmd/upgrade.go +++ b/pkg/cmd/upgrade.go @@ -173,7 +173,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { } if client.Version == "" && client.Devel { - Debug("setting version to >0.0.0-0") + logger.Debug("setting version to >0.0.0-0") client.Version = ">0.0.0-0" } diff --git a/pkg/kube/client.go b/pkg/kube/client.go index c176b3fb8..e82165486 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -51,6 +51,8 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/util/retry" cmdutil "k8s.io/kubectl/pkg/cmd/util" + + logadapter "helm.sh/helm/v4/internal/log" ) // ErrNoObjectsVisited indicates that during a visit operation, no matching objects were found. diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index abe841ea6..11a3413e4 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -26,7 +26,6 @@ import ( "github.com/stretchr/testify/assert" - logadapter "helm.sh/helm/v4/internal/log" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -35,6 +34,8 @@ import ( "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest/fake" cmdtesting "k8s.io/kubectl/pkg/cmd/testing" + + logadapter "helm.sh/helm/v4/internal/log" ) var unstructuredSerializer = resource.UnstructuredPlusDefaultContentConfig().NegotiatedSerializer diff --git a/pkg/kube/ready_test.go b/pkg/kube/ready_test.go index f022bf596..155d3d435 100644 --- a/pkg/kube/ready_test.go +++ b/pkg/kube/ready_test.go @@ -19,7 +19,6 @@ import ( "context" "testing" - logadapter "helm.sh/helm/v4/internal/log" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" @@ -31,6 +30,8 @@ import ( "k8s.io/cli-runtime/pkg/resource" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" + + logadapter "helm.sh/helm/v4/internal/log" ) const defaultNamespace = metav1.NamespaceDefault @@ -114,7 +115,7 @@ func Test_ReadyChecker_IsReady_Pod(t *testing.T) { func Test_ReadyChecker_IsReady_Job(t *testing.T) { type fields struct { client kubernetes.Interface - log Logger + log logadapter.Logger checkJobs bool pausedAsReady bool } @@ -189,7 +190,7 @@ func Test_ReadyChecker_IsReady_Job(t *testing.T) { func Test_ReadyChecker_IsReady_Deployment(t *testing.T) { type fields struct { client kubernetes.Interface - log Logger + log logadapter.Logger checkJobs bool pausedAsReady bool } @@ -271,7 +272,7 @@ func Test_ReadyChecker_IsReady_Deployment(t *testing.T) { func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) { type fields struct { client kubernetes.Interface - log Logger + log logadapter.Logger checkJobs bool pausedAsReady bool } @@ -346,7 +347,7 @@ func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) { func Test_ReadyChecker_IsReady_Service(t *testing.T) { type fields struct { client kubernetes.Interface - log Logger + log logadapter.Logger checkJobs bool pausedAsReady bool } @@ -421,7 +422,7 @@ func Test_ReadyChecker_IsReady_Service(t *testing.T) { func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) { type fields struct { client kubernetes.Interface - log Logger + log logadapter.Logger checkJobs bool pausedAsReady bool } @@ -496,7 +497,7 @@ func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) { func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) { type fields struct { client kubernetes.Interface - log Logger + log logadapter.Logger checkJobs bool pausedAsReady bool } @@ -571,7 +572,7 @@ func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) { func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) { type fields struct { client kubernetes.Interface - log Logger + log logadapter.Logger checkJobs bool pausedAsReady bool } diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go index 69779904f..e3d29d8a9 100644 --- a/pkg/kube/wait.go +++ b/pkg/kube/wait.go @@ -25,7 +25,6 @@ import ( multierror "github.com/hashicorp/go-multierror" "github.com/pkg/errors" - logadapter "helm.sh/helm/v4/internal/log" appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" appsv1beta2 "k8s.io/api/apps/v1beta2" @@ -45,6 +44,8 @@ import ( watchtools "k8s.io/client-go/tools/watch" "k8s.io/apimachinery/pkg/util/wait" + + logadapter "helm.sh/helm/v4/internal/log" ) // legacyWaiter is the legacy implementation of the Waiter interface. This logic was used by default in Helm 3 diff --git a/pkg/storage/driver/cfgmaps.go b/pkg/storage/driver/cfgmaps.go index 48198a9bd..421d39ba8 100644 --- a/pkg/storage/driver/cfgmaps.go +++ b/pkg/storage/driver/cfgmaps.go @@ -70,13 +70,13 @@ func (cfgmaps *ConfigMaps) Get(key string) (*rspb.Release, error) { return nil, ErrReleaseNotFound } - cfgmaps.Log("get: failed to get %q: %s", key, err) + cfgmaps.Log.Debug("failed to get release", "key", key, "error", err) return nil, err } // found the configmap, decode the base64 data string r, err := decodeRelease(obj.Data["release"]) if err != nil { - cfgmaps.Log("get: failed to decode data %q: %s", key, err) + cfgmaps.Log.Debug("failed to decode data", "key", key, "error", err) return nil, err } r.Labels = filterSystemLabels(obj.ObjectMeta.Labels) @@ -93,7 +93,7 @@ func (cfgmaps *ConfigMaps) List(filter func(*rspb.Release) bool) ([]*rspb.Releas list, err := cfgmaps.impl.List(context.Background(), opts) if err != nil { - cfgmaps.Log("list: failed to list: %s", err) + cfgmaps.Log.Debug("failed to list releases", "error", err) return nil, err } @@ -104,7 +104,7 @@ func (cfgmaps *ConfigMaps) List(filter func(*rspb.Release) bool) ([]*rspb.Releas for _, item := range list.Items { rls, err := decodeRelease(item.Data["release"]) if err != nil { - cfgmaps.Log("list: failed to decode release: %v: %s", item, err) + cfgmaps.Log.Debug("failed to decode release", "item", item, "error", err) continue } @@ -132,7 +132,7 @@ func (cfgmaps *ConfigMaps) Query(labels map[string]string) ([]*rspb.Release, err list, err := cfgmaps.impl.List(context.Background(), opts) if err != nil { - cfgmaps.Log("query: failed to query with labels: %s", err) + cfgmaps.Log.Debug("failed to query with labels", "error", err) return nil, err } @@ -144,7 +144,7 @@ func (cfgmaps *ConfigMaps) Query(labels map[string]string) ([]*rspb.Release, err for _, item := range list.Items { rls, err := decodeRelease(item.Data["release"]) if err != nil { - cfgmaps.Log("query: failed to decode release: %s", err) + cfgmaps.Log.Debug("failed to decode release", "error", err) continue } rls.Labels = item.ObjectMeta.Labels @@ -166,7 +166,7 @@ func (cfgmaps *ConfigMaps) Create(key string, rls *rspb.Release) error { // create a new configmap to hold the release obj, err := newConfigMapsObject(key, rls, lbs) if err != nil { - cfgmaps.Log("create: failed to encode release %q: %s", rls.Name, err) + cfgmaps.Log.Debug("failed to encode release", "name", rls.Name, "error", err) return err } // push the configmap object out into the kubiverse @@ -175,7 +175,7 @@ func (cfgmaps *ConfigMaps) Create(key string, rls *rspb.Release) error { return ErrReleaseExists } - cfgmaps.Log("create: failed to create: %s", err) + cfgmaps.Log.Debug("failed to create release", "error", err) return err } return nil @@ -194,13 +194,13 @@ func (cfgmaps *ConfigMaps) Update(key string, rls *rspb.Release) error { // create a new configmap object to hold the release obj, err := newConfigMapsObject(key, rls, lbs) if err != nil { - cfgmaps.Log("update: failed to encode release %q: %s", rls.Name, err) + cfgmaps.Log.Debug("failed to encode release", "name", rls.Name, "error", err) return err } // push the configmap object out into the kubiverse _, err = cfgmaps.impl.Update(context.Background(), obj, metav1.UpdateOptions{}) if err != nil { - cfgmaps.Log("update: failed to update: %s", err) + cfgmaps.Log.Debug("failed to update release", "error", err) return err } return nil diff --git a/pkg/storage/driver/sql.go b/pkg/storage/driver/sql.go index 304503b0a..9a4188d2d 100644 --- a/pkg/storage/driver/sql.go +++ b/pkg/storage/driver/sql.go @@ -109,13 +109,13 @@ func (s *SQL) checkAlreadyApplied(migrations []*migrate.Migration) bool { records, err := migrate.GetMigrationRecords(s.db.DB, postgreSQLDialect) migrate.SetDisableCreateTable(false) if err != nil { - s.Log.Debug("checkAlreadyApplied: failed to get migration records: %v", err) + s.Log.Debug("failed to get migration records", "error", err) return false } for _, record := range records { if _, ok := migrationsIDs[record.Id]; ok { - s.Log.Debug("checkAlreadyApplied: found previous migration (Id: %v) applied at %v", record.Id, record.AppliedAt) + s.Log.Debug("found previous migration", "id", record.Id, "appliedAt", record.AppliedAt) delete(migrationsIDs, record.Id) } } @@ -123,7 +123,7 @@ func (s *SQL) checkAlreadyApplied(migrations []*migrate.Migration) bool { // check if all migrations applied if len(migrationsIDs) != 0 { for id := range migrationsIDs { - s.Log("checkAlreadyApplied: find unapplied migration (id: %v)", id) + s.Log.Debug("find unapplied migration", "id", id) } return false } @@ -310,24 +310,24 @@ func (s *SQL) Get(key string) (*rspb.Release, error) { query, args, err := qb.ToSql() if err != nil { - s.Log("failed to build query: %v", err) + s.Log.Debug("failed to build query", "error", err) return nil, err } // Get will return an error if the result is empty if err := s.db.Get(&record, query, args...); err != nil { - s.Log("got SQL error when getting release %s: %v", key, err) + s.Log.Debug("got SQL error when getting release", "key", key, "error", err) return nil, ErrReleaseNotFound } release, err := decodeRelease(record.Body) if err != nil { - s.Log("get: failed to decode data %q: %v", key, err) + s.Log.Debug("failed to decode data", "key", key, "error", err) return nil, err } if release.Labels, err = s.getReleaseCustomLabels(key, s.namespace); err != nil { - s.Log("failed to get release %s/%s custom labels: %v", s.namespace, key, err) + s.Log.Debug("failed to get release custom labels", "namespace", s.namespace, "key", key, "error", err) return nil, err } @@ -348,13 +348,13 @@ func (s *SQL) List(filter func(*rspb.Release) bool) ([]*rspb.Release, error) { query, args, err := sb.ToSql() if err != nil { - s.Log("failed to build query: %v", err) + s.Log.Debug("failed to build query", "error", err) return nil, err } var records = []SQLReleaseWrapper{} if err := s.db.Select(&records, query, args...); err != nil { - s.Log("list: failed to list: %v", err) + s.Log.Debug("failed to list", "error", err) return nil, err } @@ -362,12 +362,12 @@ func (s *SQL) List(filter func(*rspb.Release) bool) ([]*rspb.Release, error) { for _, record := range records { release, err := decodeRelease(record.Body) if err != nil { - s.Log("list: failed to decode release: %v: %v", record, err) + s.Log.Debug("failed to decode release", "record", record, "error", err) continue } if release.Labels, err = s.getReleaseCustomLabels(record.Key, record.Namespace); err != nil { - s.Log("failed to get release %s/%s custom labels: %v", record.Namespace, record.Key, err) + s.Log.Debug("failed to get release custom labels", "namespace", record.Namespace, "key", record.Key, "error", err) return nil, err } for k, v := range getReleaseSystemLabels(release) { @@ -397,7 +397,7 @@ func (s *SQL) Query(labels map[string]string) ([]*rspb.Release, error) { if _, ok := labelMap[key]; ok { sb = sb.Where(sq.Eq{key: labels[key]}) } else { - s.Log("unknown label %s", key) + s.Log.Debug("unknown label", "key", key) return nil, fmt.Errorf("unknown label %s", key) } } @@ -410,13 +410,13 @@ func (s *SQL) Query(labels map[string]string) ([]*rspb.Release, error) { // Build our query query, args, err := sb.ToSql() if err != nil { - s.Log("failed to build query: %v", err) + s.Log.Debug("failed to build query", "error", err) return nil, err } var records = []SQLReleaseWrapper{} if err := s.db.Select(&records, query, args...); err != nil { - s.Log("list: failed to query with labels: %v", err) + s.Log.Debug("failed to query with labels", "error", err) return nil, err } @@ -428,12 +428,12 @@ func (s *SQL) Query(labels map[string]string) ([]*rspb.Release, error) { for _, record := range records { release, err := decodeRelease(record.Body) if err != nil { - s.Log("list: failed to decode release: %v: %v", record, err) + s.Log.Debug("failed to decode release", "record", record, "error", err) continue } if release.Labels, err = s.getReleaseCustomLabels(record.Key, record.Namespace); err != nil { - s.Log("failed to get release %s/%s custom labels: %v", record.Namespace, record.Key, err) + s.Log.Debug("failed to get release custom labels", "namespace", record.Namespace, "key", record.Key, "error", err) return nil, err } @@ -457,13 +457,13 @@ func (s *SQL) Create(key string, rls *rspb.Release) error { body, err := encodeRelease(rls) if err != nil { - s.Log("failed to encode release: %v", err) + s.Log.Debug("failed to encode release", "error", err) return err } transaction, err := s.db.Beginx() if err != nil { - s.Log("failed to start SQL transaction: %v", err) + s.Log.Debug("failed to start SQL transaction", "error", err) return fmt.Errorf("error beginning transaction: %v", err) } @@ -492,7 +492,7 @@ func (s *SQL) Create(key string, rls *rspb.Release) error { int(time.Now().Unix()), ).ToSql() if err != nil { - s.Log("failed to build insert query: %v", err) + s.Log.Debug("failed to build insert query", "error", err) return err } @@ -506,17 +506,17 @@ func (s *SQL) Create(key string, rls *rspb.Release) error { Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}). ToSql() if buildErr != nil { - s.Log("failed to build select query: %v", buildErr) + s.Log.Debug("failed to build select query", "error", buildErr) return err } var record SQLReleaseWrapper if err := transaction.Get(&record, selectQuery, args...); err == nil { - s.Log("release %s already exists", key) + s.Log.Debug("release already exists", "key", key) return ErrReleaseExists } - s.Log("failed to store release %s in SQL database: %v", key, err) + s.Log.Debug("failed to store release in SQL database", "key", key, "error", err) return err } @@ -539,13 +539,13 @@ func (s *SQL) Create(key string, rls *rspb.Release) error { if err != nil { defer transaction.Rollback() - s.Log("failed to build insert query: %v", err) + s.Log.Debug("failed to build insert query", "error", err) return err } if _, err := transaction.Exec(insertLabelsQuery, args...); err != nil { defer transaction.Rollback() - s.Log("failed to write Labels: %v", err) + s.Log.Debug("failed to write Labels", "error", err) return err } } @@ -564,7 +564,7 @@ func (s *SQL) Update(key string, rls *rspb.Release) error { body, err := encodeRelease(rls) if err != nil { - s.Log("failed to encode release: %v", err) + s.Log.Debug("failed to encode release", "error", err) return err } @@ -581,12 +581,12 @@ func (s *SQL) Update(key string, rls *rspb.Release) error { ToSql() if err != nil { - s.Log("failed to build update query: %v", err) + s.Log.Debug("failed to build update query", "error", err) return err } if _, err := s.db.Exec(query, args...); err != nil { - s.Log("failed to update release %s in SQL database: %v", key, err) + s.Log.Debug("failed to update release in SQL database", "key", key, "error", err) return err } @@ -597,7 +597,7 @@ func (s *SQL) Update(key string, rls *rspb.Release) error { func (s *SQL) Delete(key string) (*rspb.Release, error) { transaction, err := s.db.Beginx() if err != nil { - s.Log("failed to start SQL transaction: %v", err) + s.Log.Debug("failed to start SQL transaction", "error", err) return nil, fmt.Errorf("error beginning transaction: %v", err) } @@ -608,20 +608,20 @@ func (s *SQL) Delete(key string) (*rspb.Release, error) { Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}). ToSql() if err != nil { - s.Log("failed to build select query: %v", err) + s.Log.Debug("failed to build select query", "error", err) return nil, err } var record SQLReleaseWrapper err = transaction.Get(&record, selectQuery, args...) if err != nil { - s.Log("release %s not found: %v", key, err) + s.Log.Debug("release not found", "key", key, "error", err) return nil, ErrReleaseNotFound } release, err := decodeRelease(record.Body) if err != nil { - s.Log("failed to decode release %s: %v", key, err) + s.Log.Debug("failed to decode release", "key", key, "error", err) transaction.Rollback() return nil, err } @@ -633,18 +633,18 @@ func (s *SQL) Delete(key string) (*rspb.Release, error) { Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}). ToSql() if err != nil { - s.Log("failed to build delete query: %v", err) + s.Log.Debug("failed to build delete query", "error", err) return nil, err } _, err = transaction.Exec(deleteQuery, args...) if err != nil { - s.Log("failed perform delete query: %v", err) + s.Log.Debug("failed perform delete query", "error", err) return release, err } if release.Labels, err = s.getReleaseCustomLabels(key, s.namespace); err != nil { - s.Log("failed to get release %s/%s custom labels: %v", s.namespace, key, err) + s.Log.Debug("failed to get release custom labels", "namespace", s.namespace, "key", key, "error", err) return nil, err } @@ -655,7 +655,7 @@ func (s *SQL) Delete(key string) (*rspb.Release, error) { ToSql() if err != nil { - s.Log("failed to build delete Labels query: %v", err) + s.Log.Debug("failed to build delete Labels query", "error", err) return nil, err } _, err = transaction.Exec(deleteCustomLabelsQuery, args...) From 15de13f9d2644a99d029fdb43070acff873599e4 Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Mon, 31 Mar 2025 11:37:40 +0200 Subject: [PATCH 140/192] Fix linting issue and temporary removing logging in test acion Signed-off-by: Benoit Tigeot --- internal/log/logger.go | 6 +++--- internal/log/slog.go | 2 +- pkg/action/action_test.go | 5 +---- 3 files changed, 5 insertions(+), 8 deletions(-) diff --git a/internal/log/logger.go b/internal/log/logger.go index ff971bdb1..10351d476 100644 --- a/internal/log/logger.go +++ b/internal/log/logger.go @@ -33,13 +33,13 @@ type Logger interface { type NopLogger struct{} // Debug implements Logger.Debug by doing nothing. -func (NopLogger) Debug(_ string, args ...any) {} +func (NopLogger) Debug(_ string, _ ...any) {} // Warn implements Logger.Warn by doing nothing. -func (NopLogger) Warn(_ string, args ...any) {} +func (NopLogger) Warn(_ string, _ ...any) {} // Error implements Logger.Error by doing nothing. -func (NopLogger) Error(_ string, args ...any) {} +func (NopLogger) Error(_ string, _ ...any) {} // DefaultLogger provides a no-op logger that discards all messages. // It can be used as a default when no logger is provided. diff --git a/internal/log/slog.go b/internal/log/slog.go index 7765545f9..1cafd49b1 100644 --- a/internal/log/slog.go +++ b/internal/log/slog.go @@ -59,7 +59,7 @@ func NewReadableTextLogger(output io.Writer, debugEnabled bool) Logger { handler := slog.NewTextHandler(output, &slog.HandlerOptions{ Level: level, - ReplaceAttr: func(groups []string, a slog.Attr) slog.Attr { + ReplaceAttr: func(_ []string, a slog.Attr) slog.Attr { if a.Key == slog.TimeKey { return slog.Attr{} } diff --git a/pkg/action/action_test.go b/pkg/action/action_test.go index efaebb3f9..746a7e54b 100644 --- a/pkg/action/action_test.go +++ b/pkg/action/action_test.go @@ -16,7 +16,6 @@ limitations under the License. package action import ( - "flag" "fmt" "io" "testing" @@ -35,8 +34,6 @@ import ( "helm.sh/helm/v4/pkg/time" ) -var verbose = flag.Bool("test.log", false, "enable test logging") - func actionConfigFixture(t *testing.T) *Configuration { t.Helper() @@ -50,7 +47,7 @@ func actionConfigFixture(t *testing.T) *Configuration { KubeClient: &kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}}, Capabilities: chartutil.DefaultCapabilities, RegistryClient: registryClient, - Log: logadapter.DefaultLogger, + Log: logadapter.DefaultLogger, // TODO: permit to log in test as before with `var verbose = flag.Bool("test.log", false, "enable test logging")`` } } From 3db7ebc59130fcc14fd176e746f7c65f4ec1f719 Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Mon, 31 Mar 2025 11:51:32 +0200 Subject: [PATCH 141/192] Fix missing logger to SQL in test Signed-off-by: Benoit Tigeot --- pkg/storage/driver/mock_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/storage/driver/mock_test.go b/pkg/storage/driver/mock_test.go index 54fda0542..c592ee634 100644 --- a/pkg/storage/driver/mock_test.go +++ b/pkg/storage/driver/mock_test.go @@ -31,6 +31,7 @@ import ( kblabels "k8s.io/apimachinery/pkg/labels" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + logadapter "helm.sh/helm/v4/internal/log" rspb "helm.sh/helm/v4/pkg/release/v1" ) @@ -264,5 +265,6 @@ func newTestFixtureSQL(t *testing.T, _ ...*rspb.Release) (*SQL, sqlmock.Sqlmock) db: sqlxDB, namespace: "default", statementBuilder: sq.StatementBuilder.PlaceholderFormat(sq.Dollar), + Log: logadapter.DefaultLogger, }, mock } From 947658a96eee63ac1315c3e188775a430c0af6d5 Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Mon, 31 Mar 2025 14:39:53 +0200 Subject: [PATCH 142/192] Explain why we ignore the timestamp Signed-off-by: Benoit Tigeot --- internal/log/slog.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/log/slog.go b/internal/log/slog.go index 1cafd49b1..e0fd35ac5 100644 --- a/internal/log/slog.go +++ b/internal/log/slog.go @@ -59,6 +59,7 @@ func NewReadableTextLogger(output io.Writer, debugEnabled bool) Logger { handler := slog.NewTextHandler(output, &slog.HandlerOptions{ Level: level, + // Ignore the time key to avoid cluttering the output with timestamps ReplaceAttr: func(_ []string, a slog.Attr) slog.Attr { if a.Key == slog.TimeKey { return slog.Attr{} From b2380720eb3fec55333a1b0cca9a6d188db631d9 Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Mon, 7 Apr 2025 16:45:21 +0200 Subject: [PATCH 143/192] Migrate to pure slog without a custom wrapper Signed-off-by: Benoit Tigeot --- cmd/helm/helm.go | 6 +-- internal/log/logger.go | 46 --------------------- internal/log/slog.go | 72 --------------------------------- internal/monocular/client.go | 8 ++-- pkg/action/action.go | 6 +-- pkg/action/action_test.go | 4 +- pkg/cli/logger.go | 43 ++++++++++++++++++++ pkg/cmd/flags.go | 4 +- pkg/cmd/helpers_test.go | 4 +- pkg/cmd/install.go | 8 ++-- pkg/cmd/list.go | 6 +-- pkg/cmd/plugin.go | 2 +- pkg/cmd/plugin_install.go | 2 +- pkg/cmd/plugin_list.go | 2 +- pkg/cmd/plugin_uninstall.go | 2 +- pkg/cmd/plugin_update.go | 4 +- pkg/cmd/pull.go | 2 +- pkg/cmd/registry_login.go | 2 +- pkg/cmd/root.go | 5 +-- pkg/cmd/search_hub.go | 2 +- pkg/cmd/search_repo.go | 8 ++-- pkg/cmd/show.go | 4 +- pkg/cmd/upgrade.go | 4 +- pkg/kube/client.go | 7 ++-- pkg/kube/client_test.go | 5 +-- pkg/kube/ready.go | 9 +++-- pkg/kube/ready_test.go | 60 +++++++++++++-------------- pkg/kube/statuswait.go | 16 ++++---- pkg/kube/statuswait_test.go | 12 +++--- pkg/kube/wait.go | 41 ++++++++++--------- pkg/storage/driver/cfgmaps.go | 4 +- pkg/storage/driver/mock_test.go | 5 ++- pkg/storage/driver/secrets.go | 8 ++-- pkg/storage/driver/sql.go | 6 +-- 34 files changed, 169 insertions(+), 250 deletions(-) delete mode 100644 internal/log/logger.go delete mode 100644 internal/log/slog.go create mode 100644 pkg/cli/logger.go diff --git a/cmd/helm/helm.go b/cmd/helm/helm.go index 158118c06..9bdd8e98c 100644 --- a/cmd/helm/helm.go +++ b/cmd/helm/helm.go @@ -23,7 +23,6 @@ import ( // Import to initialize client auth plugins. _ "k8s.io/client-go/plugin/pkg/client/auth" - logadapter "helm.sh/helm/v4/internal/log" helmcmd "helm.sh/helm/v4/pkg/cmd" "helm.sh/helm/v4/pkg/kube" ) @@ -38,16 +37,15 @@ func main() { // another name (e.g., helm2 or helm3) does not change the name of the // manager as picked up by the automated name detection. kube.ManagedFieldsManager = "helm" - logger := logadapter.NewReadableTextLogger(os.Stderr, false) cmd, err := helmcmd.NewRootCmd(os.Stdout, os.Args[1:]) if err != nil { - logger.Warn("%+v", err) + helmcmd.Logger.Warn("%+v", err) os.Exit(1) } if err := cmd.Execute(); err != nil { - logger.Debug("error", err) + helmcmd.Logger.Debug("error", err) switch e := err.(type) { case helmcmd.PluginError: os.Exit(e.Code) diff --git a/internal/log/logger.go b/internal/log/logger.go deleted file mode 100644 index 10351d476..000000000 --- a/internal/log/logger.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package log - -// Logger defines a minimal logging interface compatible with structured logging. -// It provides methods for different log levels with structured key-value pairs. -type Logger interface { - // Debug logs a message at debug level with structured key-value pairs. - Debug(msg string, args ...any) - - // Warn logs a message at warning level with structured key-value pairs. - Warn(msg string, args ...any) - - // Error logs a message at error level with structured key-value pairs. - Error(msg string, args ...any) -} - -// NopLogger is a logger implementation that discards all log messages. -type NopLogger struct{} - -// Debug implements Logger.Debug by doing nothing. -func (NopLogger) Debug(_ string, _ ...any) {} - -// Warn implements Logger.Warn by doing nothing. -func (NopLogger) Warn(_ string, _ ...any) {} - -// Error implements Logger.Error by doing nothing. -func (NopLogger) Error(_ string, _ ...any) {} - -// DefaultLogger provides a no-op logger that discards all messages. -// It can be used as a default when no logger is provided. -var DefaultLogger Logger = NopLogger{} diff --git a/internal/log/slog.go b/internal/log/slog.go deleted file mode 100644 index e0fd35ac5..000000000 --- a/internal/log/slog.go +++ /dev/null @@ -1,72 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package log - -import ( - "io" - "log/slog" -) - -// SlogAdapter adapts a standard library slog.Logger to the Logger interface. -type SlogAdapter struct { - logger *slog.Logger -} - -// Debug implements Logger.Debug by forwarding to the underlying slog.Logger. -func (a SlogAdapter) Debug(msg string, args ...any) { - a.logger.Debug(msg, args...) -} - -// Warn implements Logger.Warn by forwarding to the underlying slog.Logger. -func (a SlogAdapter) Warn(msg string, args ...any) { - a.logger.Warn(msg, args...) -} - -// Error implements Logger.Error by forwarding to the underlying slog.Logger. -func (a SlogAdapter) Error(msg string, args ...any) { - // TODO: Handle error with `slog.Any`: slog.Info("something went wrong", slog.Any("err", err)) - a.logger.Error(msg, args...) -} - -// NewSlogAdapter creates a Logger that forwards log messages to a slog.Logger. -func NewSlogAdapter(logger *slog.Logger) Logger { - if logger == nil { - return DefaultLogger - } - return SlogAdapter{logger: logger} -} - -// NewReadableTextLogger creates a Logger that outputs in a readable text format without timestamps -func NewReadableTextLogger(output io.Writer, debugEnabled bool) Logger { - level := slog.LevelInfo - if debugEnabled { - level = slog.LevelDebug - } - - handler := slog.NewTextHandler(output, &slog.HandlerOptions{ - Level: level, - // Ignore the time key to avoid cluttering the output with timestamps - ReplaceAttr: func(_ []string, a slog.Attr) slog.Attr { - if a.Key == slog.TimeKey { - return slog.Attr{} - } - return a - }, - }) - - return NewSlogAdapter(slog.New(handler)) -} diff --git a/internal/monocular/client.go b/internal/monocular/client.go index 88a2564b9..f4c9debca 100644 --- a/internal/monocular/client.go +++ b/internal/monocular/client.go @@ -18,6 +18,7 @@ package monocular import ( "errors" + "log/slog" "net/url" ) @@ -30,8 +31,7 @@ type Client struct { // The base URL for requests BaseURL string - // The internal logger to use - Log func(string, ...interface{}) + Log *slog.Logger } // New creates a new client @@ -44,12 +44,10 @@ func New(u string) (*Client, error) { return &Client{ BaseURL: u, - Log: nopLogger, + Log: slog.Default(), }, nil } -var nopLogger = func(_ string, _ ...interface{}) {} - // Validate if the base URL for monocular is valid. func validate(u string) error { diff --git a/pkg/action/action.go b/pkg/action/action.go index 778dc9ec5..1993e6241 100644 --- a/pkg/action/action.go +++ b/pkg/action/action.go @@ -20,6 +20,7 @@ import ( "bytes" "fmt" "io" + "log/slog" "os" "path" "path/filepath" @@ -33,7 +34,6 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" - logadapter "helm.sh/helm/v4/internal/log" chart "helm.sh/helm/v4/pkg/chart/v2" chartutil "helm.sh/helm/v4/pkg/chart/v2/util" "helm.sh/helm/v4/pkg/engine" @@ -96,7 +96,7 @@ type Configuration struct { // Capabilities describes the capabilities of the Kubernetes cluster. Capabilities *chartutil.Capabilities - Log logadapter.Logger + Log *slog.Logger // HookOutputFunc called with container name and returns and expects writer that will receive the log output. HookOutputFunc func(namespace, pod, container string) io.Writer @@ -375,7 +375,7 @@ func (cfg *Configuration) recordRelease(r *release.Release) { } // Init initializes the action configuration -func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string, log logadapter.Logger) error { +func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string, log *slog.Logger) error { kc := kube.New(getter) kc.Log = log diff --git a/pkg/action/action_test.go b/pkg/action/action_test.go index 746a7e54b..ee32246af 100644 --- a/pkg/action/action_test.go +++ b/pkg/action/action_test.go @@ -18,12 +18,12 @@ package action import ( "fmt" "io" + "log/slog" "testing" "github.com/stretchr/testify/assert" fakeclientset "k8s.io/client-go/kubernetes/fake" - logadapter "helm.sh/helm/v4/internal/log" chart "helm.sh/helm/v4/pkg/chart/v2" chartutil "helm.sh/helm/v4/pkg/chart/v2/util" kubefake "helm.sh/helm/v4/pkg/kube/fake" @@ -47,7 +47,7 @@ func actionConfigFixture(t *testing.T) *Configuration { KubeClient: &kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}}, Capabilities: chartutil.DefaultCapabilities, RegistryClient: registryClient, - Log: logadapter.DefaultLogger, // TODO: permit to log in test as before with `var verbose = flag.Bool("test.log", false, "enable test logging")`` + Log: slog.New(slog.NewTextHandler(io.Discard, nil)), // TODO: permit to log in test as before with `var verbose = flag.Bool("test.log", false, "enable test logging")`` } } diff --git a/pkg/cli/logger.go b/pkg/cli/logger.go new file mode 100644 index 000000000..243284d76 --- /dev/null +++ b/pkg/cli/logger.go @@ -0,0 +1,43 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import ( + "log/slog" + "os" +) + +func NewLogger(debug bool) *slog.Logger { + level := slog.LevelInfo + if debug { + level = slog.LevelDebug + } + + // Create a handler that removes timestamps + handler := slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{ + Level: level, + ReplaceAttr: func(groups []string, a slog.Attr) slog.Attr { + // Remove the time attribute + if a.Key == slog.TimeKey { + return slog.Attr{} + } + return a + }, + }) + + return slog.New(handler) +} diff --git a/pkg/cmd/flags.go b/pkg/cmd/flags.go index ed3b83a55..454bb13de 100644 --- a/pkg/cmd/flags.go +++ b/pkg/cmd/flags.go @@ -82,11 +82,11 @@ func (ws *waitValue) Set(s string) error { *ws = waitValue(s) return nil case "true": - Warning("--wait=true is deprecated (boolean value) and can be replaced with --wait=watcher") + Logger.Warn("--wait=true is deprecated (boolean value) and can be replaced with --wait=watcher") *ws = waitValue(kube.StatusWatcherStrategy) return nil case "false": - Warning("--wait=false is deprecated (boolean value) and can be replaced by omitting the --wait flag") + Logger.Warn("--wait=false is deprecated (boolean value) and can be replaced by omitting the --wait flag") *ws = waitValue(kube.HookOnlyStrategy) return nil default: diff --git a/pkg/cmd/helpers_test.go b/pkg/cmd/helpers_test.go index 1f597d7ba..38e0a5b3e 100644 --- a/pkg/cmd/helpers_test.go +++ b/pkg/cmd/helpers_test.go @@ -19,6 +19,7 @@ package cmd import ( "bytes" "io" + "log/slog" "os" "strings" "testing" @@ -26,7 +27,6 @@ import ( shellwords "github.com/mattn/go-shellwords" "github.com/spf13/cobra" - logadapter "helm.sh/helm/v4/internal/log" "helm.sh/helm/v4/internal/test" "helm.sh/helm/v4/pkg/action" chartutil "helm.sh/helm/v4/pkg/chart/v2/util" @@ -93,7 +93,7 @@ func executeActionCommandStdinC(store *storage.Storage, in *os.File, cmd string) Releases: store, KubeClient: &kubefake.PrintingKubeClient{Out: io.Discard}, Capabilities: chartutil.DefaultCapabilities, - Log: logadapter.DefaultLogger, + Log: slog.New(slog.NewTextHandler(io.Discard, nil)), } root, err := newRootCmdWithConfig(actionConfig, buf, args) diff --git a/pkg/cmd/install.go b/pkg/cmd/install.go index 566739bc3..14746f8c3 100644 --- a/pkg/cmd/install.go +++ b/pkg/cmd/install.go @@ -229,9 +229,9 @@ func addInstallFlags(cmd *cobra.Command, f *pflag.FlagSet, client *action.Instal } func runInstall(args []string, client *action.Install, valueOpts *values.Options, out io.Writer) (*release.Release, error) { - logger.Debug("Original chart version", "version", client.Version) + Logger.Debug("Original chart version", "version", client.Version) if client.Version == "" && client.Devel { - logger.Debug("setting version to >0.0.0-0") + Logger.Debug("setting version to >0.0.0-0") client.Version = ">0.0.0-0" } @@ -246,7 +246,7 @@ func runInstall(args []string, client *action.Install, valueOpts *values.Options return nil, err } - logger.Debug("Chart path", "path", cp) + Logger.Debug("Chart path", "path", cp) p := getter.All(settings) vals, err := valueOpts.MergeValues(p) @@ -265,7 +265,7 @@ func runInstall(args []string, client *action.Install, valueOpts *values.Options } if chartRequested.Metadata.Deprecated { - logger.Warn("this chart is deprecated") + Logger.Warn("this chart is deprecated") } if req := chartRequested.Metadata.Dependencies; req != nil { diff --git a/pkg/cmd/list.go b/pkg/cmd/list.go index 10b70d7d9..a4eb91aad 100644 --- a/pkg/cmd/list.go +++ b/pkg/cmd/list.go @@ -19,14 +19,12 @@ package cmd import ( "fmt" "io" - "log/slog" "os" "strconv" "github.com/gosuri/uitable" "github.com/spf13/cobra" - logadapter "helm.sh/helm/v4/internal/log" "helm.sh/helm/v4/pkg/action" "helm.sh/helm/v4/pkg/cli/output" "helm.sh/helm/v4/pkg/cmd/require" @@ -63,8 +61,6 @@ flag with the '--offset' flag allows you to page through results. func newListCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { client := action.NewList(cfg) var outfmt output.Format - slogger := slog.New(slog.NewJSONHandler(os.Stdout, nil)) - adapter := logadapter.NewSlogAdapter(slogger) cmd := &cobra.Command{ Use: "list", @@ -75,7 +71,7 @@ func newListCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { ValidArgsFunction: noMoreArgsCompFunc, RunE: func(cmd *cobra.Command, _ []string) error { if client.AllNamespaces { - if err := cfg.Init(settings.RESTClientGetter(), "", os.Getenv("HELM_DRIVER"), adapter); err != nil { + if err := cfg.Init(settings.RESTClientGetter(), "", os.Getenv("HELM_DRIVER"), Logger); err != nil { return err } } diff --git a/pkg/cmd/plugin.go b/pkg/cmd/plugin.go index 1bb7ffb57..05d7135dd 100644 --- a/pkg/cmd/plugin.go +++ b/pkg/cmd/plugin.go @@ -66,7 +66,7 @@ func runHook(p *plugin.Plugin, event string) error { prog := exec.Command(main, argv...) - logger.Debug("running hook", "event", event, "program", prog) + Logger.Debug("running hook", "event", event, "program", prog) prog.Stdout, prog.Stderr = os.Stdout, os.Stderr if err := prog.Run(); err != nil { diff --git a/pkg/cmd/plugin_install.go b/pkg/cmd/plugin_install.go index ca3d4ed90..2e8fd4d6a 100644 --- a/pkg/cmd/plugin_install.go +++ b/pkg/cmd/plugin_install.go @@ -79,7 +79,7 @@ func (o *pluginInstallOptions) run(out io.Writer) error { return err } - logger.Debug("loading plugin", "path", i.Path()) + Logger.Debug("loading plugin", "path", i.Path()) p, err := plugin.LoadDir(i.Path()) if err != nil { return errors.Wrap(err, "plugin is installed but unusable") diff --git a/pkg/cmd/plugin_list.go b/pkg/cmd/plugin_list.go index 9eb6707db..3a1d0f2f5 100644 --- a/pkg/cmd/plugin_list.go +++ b/pkg/cmd/plugin_list.go @@ -32,7 +32,7 @@ func newPluginListCmd(out io.Writer) *cobra.Command { Short: "list installed Helm plugins", ValidArgsFunction: noMoreArgsCompFunc, RunE: func(_ *cobra.Command, _ []string) error { - logger.Debug("pluginDirs", settings.PluginsDirectory) + Logger.Debug("pluginDirs", settings.PluginsDirectory) plugins, err := plugin.FindPlugins(settings.PluginsDirectory) if err != nil { return err diff --git a/pkg/cmd/plugin_uninstall.go b/pkg/cmd/plugin_uninstall.go index 3db454ff9..18815b139 100644 --- a/pkg/cmd/plugin_uninstall.go +++ b/pkg/cmd/plugin_uninstall.go @@ -60,7 +60,7 @@ func (o *pluginUninstallOptions) complete(args []string) error { } func (o *pluginUninstallOptions) run(out io.Writer) error { - logger.Debug("loading installer plugins", "dir", settings.PluginsDirectory) + Logger.Debug("loading installer plugins", "dir", settings.PluginsDirectory) plugins, err := plugin.FindPlugins(settings.PluginsDirectory) if err != nil { return err diff --git a/pkg/cmd/plugin_update.go b/pkg/cmd/plugin_update.go index 38c451e2f..16ac84066 100644 --- a/pkg/cmd/plugin_update.go +++ b/pkg/cmd/plugin_update.go @@ -62,7 +62,7 @@ func (o *pluginUpdateOptions) complete(args []string) error { func (o *pluginUpdateOptions) run(out io.Writer) error { installer.Debug = settings.Debug - logger.Debug("loading installed plugins", "path", settings.PluginsDirectory) + Logger.Debug("loading installed plugins", "path", settings.PluginsDirectory) plugins, err := plugin.FindPlugins(settings.PluginsDirectory) if err != nil { return err @@ -104,7 +104,7 @@ func updatePlugin(p *plugin.Plugin) error { return err } - logger.Debug("loading plugin", "path", i.Path()) + Logger.Debug("loading plugin", "path", i.Path()) updatedPlugin, err := plugin.LoadDir(i.Path()) if err != nil { return err diff --git a/pkg/cmd/pull.go b/pkg/cmd/pull.go index 65ad95947..fca1c8b9b 100644 --- a/pkg/cmd/pull.go +++ b/pkg/cmd/pull.go @@ -60,7 +60,7 @@ func newPullCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { RunE: func(_ *cobra.Command, args []string) error { client.Settings = settings if client.Version == "" && client.Devel { - logger.Debug("setting version to >0.0.0-0") + Logger.Debug("setting version to >0.0.0-0") client.Version = ">0.0.0-0" } diff --git a/pkg/cmd/registry_login.go b/pkg/cmd/registry_login.go index bc6c1d13d..7c853d786 100644 --- a/pkg/cmd/registry_login.go +++ b/pkg/cmd/registry_login.go @@ -122,7 +122,7 @@ func getUsernamePassword(usernameOpt string, passwordOpt string, passwordFromStd } } } else { - logger.Warn("using --password via the CLI is insecure. Use --password-stdin") + Logger.Warn("using --password via the CLI is insecure. Use --password-stdin") } return username, password, nil diff --git a/pkg/cmd/root.go b/pkg/cmd/root.go index 407e89139..0cbcfebaf 100644 --- a/pkg/cmd/root.go +++ b/pkg/cmd/root.go @@ -31,7 +31,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/clientcmd" - logadapter "helm.sh/helm/v4/internal/log" "helm.sh/helm/v4/internal/tlsutil" "helm.sh/helm/v4/pkg/action" "helm.sh/helm/v4/pkg/cli" @@ -96,7 +95,7 @@ By default, the default directories depend on the Operating System. The defaults ` var settings = cli.New() -var logger = logadapter.NewReadableTextLogger(os.Stderr, settings.Debug) +var Logger = cli.NewLogger(settings.Debug) func NewRootCmd(out io.Writer, args []string) (*cobra.Command, error) { actionConfig := new(action.Configuration) @@ -106,7 +105,7 @@ func NewRootCmd(out io.Writer, args []string) (*cobra.Command, error) { } cobra.OnInitialize(func() { helmDriver := os.Getenv("HELM_DRIVER") - if err := actionConfig.Init(settings.RESTClientGetter(), settings.Namespace(), helmDriver, logger); err != nil { + if err := actionConfig.Init(settings.RESTClientGetter(), settings.Namespace(), helmDriver, Logger); err != nil { log.Fatal(err) } if helmDriver == "memory" { diff --git a/pkg/cmd/search_hub.go b/pkg/cmd/search_hub.go index a2d35f32b..1a2848b25 100644 --- a/pkg/cmd/search_hub.go +++ b/pkg/cmd/search_hub.go @@ -89,7 +89,7 @@ func (o *searchHubOptions) run(out io.Writer, args []string) error { q := strings.Join(args, " ") results, err := c.Search(q) if err != nil { - logger.Debug("search failed", "error", err) + Logger.Debug("search failed", "error", err) return fmt.Errorf("unable to perform search against %q", o.searchEndpoint) } diff --git a/pkg/cmd/search_repo.go b/pkg/cmd/search_repo.go index 610176dd6..a6aa755cd 100644 --- a/pkg/cmd/search_repo.go +++ b/pkg/cmd/search_repo.go @@ -130,17 +130,17 @@ func (o *searchRepoOptions) run(out io.Writer, args []string) error { } func (o *searchRepoOptions) setupSearchedVersion() { - logger.Debug("original chart version", "version", o.version) + Logger.Debug("original chart version", "version", o.version) if o.version != "" { return } if o.devel { // search for releases and prereleases (alpha, beta, and release candidate releases). - logger.Debug("setting version to >0.0.0-0") + Logger.Debug("setting version to >0.0.0-0") o.version = ">0.0.0-0" } else { // search only for stable releases, prerelease versions will be skipped - logger.Debug("setting version to >0.0.0") + Logger.Debug("setting version to >0.0.0") o.version = ">0.0.0" } } @@ -189,7 +189,7 @@ func (o *searchRepoOptions) buildIndex() (*search.Index, error) { f := filepath.Join(o.repoCacheDir, helmpath.CacheIndexFile(n)) ind, err := repo.LoadIndexFile(f) if err != nil { - logger.Warn("repo is corrupt or missing", "repo", n, "error", err) + Logger.Warn("repo is corrupt or missing", "repo", n, "error", err) continue } diff --git a/pkg/cmd/show.go b/pkg/cmd/show.go index 6aa322430..c70ffa256 100644 --- a/pkg/cmd/show.go +++ b/pkg/cmd/show.go @@ -211,9 +211,9 @@ func addShowFlags(subCmd *cobra.Command, client *action.Show) { } func runShow(args []string, client *action.Show) (string, error) { - logger.Debug("original chart version", "version", client.Version) + Logger.Debug("original chart version", "version", client.Version) if client.Version == "" && client.Devel { - logger.Debug("setting version to >0.0.0-0") + Logger.Debug("setting version to >0.0.0-0") client.Version = ">0.0.0-0" } diff --git a/pkg/cmd/upgrade.go b/pkg/cmd/upgrade.go index a85eb5a41..e6b5c0409 100644 --- a/pkg/cmd/upgrade.go +++ b/pkg/cmd/upgrade.go @@ -173,7 +173,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { } if client.Version == "" && client.Devel { - logger.Debug("setting version to >0.0.0-0") + Logger.Debug("setting version to >0.0.0-0") client.Version = ">0.0.0-0" } @@ -225,7 +225,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { } if ch.Metadata.Deprecated { - logger.Warn("this chart is deprecated") + Logger.Warn("this chart is deprecated") } // Create context and prepare the handle of SIGTERM diff --git a/pkg/kube/client.go b/pkg/kube/client.go index e82165486..be5214431 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -22,6 +22,7 @@ import ( "encoding/json" "fmt" "io" + "log/slog" "os" "path/filepath" "reflect" @@ -51,8 +52,6 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/util/retry" cmdutil "k8s.io/kubectl/pkg/cmd/util" - - logadapter "helm.sh/helm/v4/internal/log" ) // ErrNoObjectsVisited indicates that during a visit operation, no matching objects were found. @@ -75,7 +74,7 @@ type Client struct { // needs. The smaller surface area of the interface means there is a lower // chance of it changing. Factory Factory - Log logadapter.Logger + Log *slog.Logger // Namespace allows to bypass the kubeconfig file for the choice of the namespace Namespace string @@ -164,7 +163,7 @@ func New(getter genericclioptions.RESTClientGetter) *Client { factory := cmdutil.NewFactory(getter) c := &Client{ Factory: factory, - Log: nopLogger, + Log: slog.Default(), } return c } diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index 11a3413e4..6244e3ee5 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -19,6 +19,7 @@ package kube import ( "bytes" "io" + "log/slog" "net/http" "strings" "testing" @@ -34,8 +35,6 @@ import ( "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest/fake" cmdtesting "k8s.io/kubectl/pkg/cmd/testing" - - logadapter "helm.sh/helm/v4/internal/log" ) var unstructuredSerializer = resource.UnstructuredPlusDefaultContentConfig().NegotiatedSerializer @@ -109,7 +108,7 @@ func newTestClient(t *testing.T) *Client { return &Client{ Factory: testFactory.WithNamespace("default"), - Log: logadapter.DefaultLogger, + Log: slog.New(slog.NewTextHandler(io.Discard, nil)), } } diff --git a/pkg/kube/ready.go b/pkg/kube/ready.go index c128e31b0..745dd265e 100644 --- a/pkg/kube/ready.go +++ b/pkg/kube/ready.go @@ -19,6 +19,8 @@ package kube // import "helm.sh/helm/v4/pkg/kube" import ( "context" "fmt" + "io" + "log/slog" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" @@ -32,7 +34,6 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" - logadapter "helm.sh/helm/v4/internal/log" deploymentutil "helm.sh/helm/v4/internal/third_party/k8s.io/kubernetes/deployment/util" ) @@ -58,13 +59,13 @@ func CheckJobs(checkJobs bool) ReadyCheckerOption { // NewReadyChecker creates a new checker. Passed ReadyCheckerOptions can // be used to override defaults. -func NewReadyChecker(cl kubernetes.Interface, logger logadapter.Logger, opts ...ReadyCheckerOption) ReadyChecker { +func NewReadyChecker(cl kubernetes.Interface, logger *slog.Logger, opts ...ReadyCheckerOption) ReadyChecker { c := ReadyChecker{ client: cl, log: logger, } if c.log == nil { - c.log = logadapter.DefaultLogger + c.log = slog.New(slog.NewTextHandler(io.Discard, nil)) } for _, opt := range opts { opt(&c) @@ -75,7 +76,7 @@ func NewReadyChecker(cl kubernetes.Interface, logger logadapter.Logger, opts ... // ReadyChecker is a type that can check core Kubernetes types for readiness. type ReadyChecker struct { client kubernetes.Interface - log logadapter.Logger + log *slog.Logger checkJobs bool pausedAsReady bool } diff --git a/pkg/kube/ready_test.go b/pkg/kube/ready_test.go index 155d3d435..d9dd8fb3d 100644 --- a/pkg/kube/ready_test.go +++ b/pkg/kube/ready_test.go @@ -17,6 +17,8 @@ package kube // import "helm.sh/helm/v4/pkg/kube" import ( "context" + "io" + "log/slog" "testing" appsv1 "k8s.io/api/apps/v1" @@ -30,8 +32,6 @@ import ( "k8s.io/cli-runtime/pkg/resource" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" - - logadapter "helm.sh/helm/v4/internal/log" ) const defaultNamespace = metav1.NamespaceDefault @@ -39,7 +39,7 @@ const defaultNamespace = metav1.NamespaceDefault func Test_ReadyChecker_IsReady_Pod(t *testing.T) { type fields struct { client kubernetes.Interface - log logadapter.Logger + log *slog.Logger checkJobs bool pausedAsReady bool } @@ -59,7 +59,7 @@ func Test_ReadyChecker_IsReady_Pod(t *testing.T) { name: "IsReady Pod", fields: fields{ client: fake.NewClientset(), - log: func(string, ...interface{}) {}, + log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, @@ -75,7 +75,7 @@ func Test_ReadyChecker_IsReady_Pod(t *testing.T) { name: "IsReady Pod returns error", fields: fields{ client: fake.NewClientset(), - log: func(string, ...interface{}) {}, + log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, @@ -115,7 +115,7 @@ func Test_ReadyChecker_IsReady_Pod(t *testing.T) { func Test_ReadyChecker_IsReady_Job(t *testing.T) { type fields struct { client kubernetes.Interface - log logadapter.Logger + log *slog.Logger checkJobs bool pausedAsReady bool } @@ -135,7 +135,7 @@ func Test_ReadyChecker_IsReady_Job(t *testing.T) { name: "IsReady Job error while getting job", fields: fields{ client: fake.NewClientset(), - log: func(string, ...interface{}) {}, + log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, @@ -151,7 +151,7 @@ func Test_ReadyChecker_IsReady_Job(t *testing.T) { name: "IsReady Job", fields: fields{ client: fake.NewClientset(), - log: func(string, ...interface{}) {}, + log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, @@ -190,7 +190,7 @@ func Test_ReadyChecker_IsReady_Job(t *testing.T) { func Test_ReadyChecker_IsReady_Deployment(t *testing.T) { type fields struct { client kubernetes.Interface - log logadapter.Logger + log *slog.Logger checkJobs bool pausedAsReady bool } @@ -211,7 +211,7 @@ func Test_ReadyChecker_IsReady_Deployment(t *testing.T) { name: "IsReady Deployments error while getting current Deployment", fields: fields{ client: fake.NewClientset(), - log: func(string, ...interface{}) {}, + log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, @@ -228,7 +228,7 @@ func Test_ReadyChecker_IsReady_Deployment(t *testing.T) { name: "IsReady Deployments", //TODO fix this one fields: fields{ client: fake.NewClientset(), - log: func(string, ...interface{}) {}, + log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, @@ -272,7 +272,7 @@ func Test_ReadyChecker_IsReady_Deployment(t *testing.T) { func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) { type fields struct { client kubernetes.Interface - log logadapter.Logger + log *slog.Logger checkJobs bool pausedAsReady bool } @@ -292,7 +292,7 @@ func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) { name: "IsReady PersistentVolumeClaim", fields: fields{ client: fake.NewClientset(), - log: func(string, ...interface{}) {}, + log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, @@ -308,7 +308,7 @@ func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) { name: "IsReady PersistentVolumeClaim with error", fields: fields{ client: fake.NewClientset(), - log: func(string, ...interface{}) {}, + log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, @@ -347,7 +347,7 @@ func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) { func Test_ReadyChecker_IsReady_Service(t *testing.T) { type fields struct { client kubernetes.Interface - log logadapter.Logger + log *slog.Logger checkJobs bool pausedAsReady bool } @@ -367,7 +367,7 @@ func Test_ReadyChecker_IsReady_Service(t *testing.T) { name: "IsReady Service", fields: fields{ client: fake.NewClientset(), - log: func(string, ...interface{}) {}, + log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, @@ -383,7 +383,7 @@ func Test_ReadyChecker_IsReady_Service(t *testing.T) { name: "IsReady Service with error", fields: fields{ client: fake.NewClientset(), - log: func(string, ...interface{}) {}, + log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, @@ -422,7 +422,7 @@ func Test_ReadyChecker_IsReady_Service(t *testing.T) { func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) { type fields struct { client kubernetes.Interface - log logadapter.Logger + log *slog.Logger checkJobs bool pausedAsReady bool } @@ -442,7 +442,7 @@ func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) { name: "IsReady DaemonSet", fields: fields{ client: fake.NewClientset(), - log: func(string, ...interface{}) {}, + log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, @@ -458,7 +458,7 @@ func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) { name: "IsReady DaemonSet with error", fields: fields{ client: fake.NewClientset(), - log: func(string, ...interface{}) {}, + log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, @@ -497,7 +497,7 @@ func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) { func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) { type fields struct { client kubernetes.Interface - log logadapter.Logger + log *slog.Logger checkJobs bool pausedAsReady bool } @@ -517,7 +517,7 @@ func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) { name: "IsReady StatefulSet", fields: fields{ client: fake.NewClientset(), - log: func(string, ...interface{}) {}, + log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, @@ -533,7 +533,7 @@ func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) { name: "IsReady StatefulSet with error", fields: fields{ client: fake.NewClientset(), - log: func(string, ...interface{}) {}, + log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, @@ -572,7 +572,7 @@ func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) { func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) { type fields struct { client kubernetes.Interface - log logadapter.Logger + log *slog.Logger checkJobs bool pausedAsReady bool } @@ -592,7 +592,7 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) { name: "IsReady ReplicationController", fields: fields{ client: fake.NewClientset(), - log: func(string, ...interface{}) {}, + log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, @@ -608,7 +608,7 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) { name: "IsReady ReplicationController with error", fields: fields{ client: fake.NewClientset(), - log: func(string, ...interface{}) {}, + log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, @@ -624,7 +624,7 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) { name: "IsReady ReplicationController and pods not ready for object", fields: fields{ client: fake.NewClientset(), - log: func(string, ...interface{}) {}, + log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, @@ -663,7 +663,7 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) { func Test_ReadyChecker_IsReady_ReplicaSet(t *testing.T) { type fields struct { client kubernetes.Interface - log logadapter.Logger + log *slog.Logger checkJobs bool pausedAsReady bool } @@ -683,7 +683,7 @@ func Test_ReadyChecker_IsReady_ReplicaSet(t *testing.T) { name: "IsReady ReplicaSet", fields: fields{ client: fake.NewClientset(), - log: func(string, ...interface{}) {}, + log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, @@ -699,7 +699,7 @@ func Test_ReadyChecker_IsReady_ReplicaSet(t *testing.T) { name: "IsReady ReplicaSet not ready", fields: fields{ client: fake.NewClientset(), - log: func(string, ...interface{}) {}, + log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index 22242b40f..bcb48155b 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "log/slog" "sort" "time" @@ -42,7 +43,7 @@ import ( type statusWaiter struct { client dynamic.Interface restMapper meta.RESTMapper - log func(string, ...interface{}) + log *slog.Logger } func alwaysReady(_ *unstructured.Unstructured) (*status.Result, error) { @@ -55,7 +56,7 @@ func alwaysReady(_ *unstructured.Unstructured) (*status.Result, error) { func (w *statusWaiter) WatchUntilReady(resourceList ResourceList, timeout time.Duration) error { ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() - w.log("waiting for %d pods and jobs to complete with a timeout of %s", len(resourceList), timeout) + w.log.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) jobSR := helmStatusReaders.NewCustomJobStatusReader(w.restMapper) podSR := helmStatusReaders.NewCustomPodStatusReader(w.restMapper) @@ -76,7 +77,7 @@ func (w *statusWaiter) WatchUntilReady(resourceList ResourceList, timeout time.D func (w *statusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error { ctx, cancel := context.WithTimeout(context.TODO(), timeout) defer cancel() - w.log("beginning wait for %d resources with timeout of %s", len(resourceList), timeout) + w.log.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) return w.wait(ctx, resourceList, sw) } @@ -84,7 +85,7 @@ func (w *statusWaiter) Wait(resourceList ResourceList, timeout time.Duration) er func (w *statusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Duration) error { ctx, cancel := context.WithTimeout(context.TODO(), timeout) defer cancel() - w.log("beginning wait for %d resources with timeout of %s", len(resourceList), timeout) + w.log.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) newCustomJobStatusReader := helmStatusReaders.NewCustomJobStatusReader(w.restMapper) customSR := statusreaders.NewStatusReader(w.restMapper, newCustomJobStatusReader) @@ -95,7 +96,7 @@ func (w *statusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Dura func (w *statusWaiter) WaitForDelete(resourceList ResourceList, timeout time.Duration) error { ctx, cancel := context.WithTimeout(context.TODO(), timeout) defer cancel() - w.log("beginning wait for %d resources to be deleted with timeout of %s", len(resourceList), timeout) + w.log.Debug("waiting for resources to be deleted", "count", len(resourceList), "timeout", timeout) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) return w.waitForDelete(ctx, resourceList, sw) } @@ -179,7 +180,7 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, sw w return nil } -func statusObserver(cancel context.CancelFunc, desired status.Status, logFn func(string, ...interface{})) collector.ObserverFunc { +func statusObserver(cancel context.CancelFunc, desired status.Status, logger *slog.Logger) collector.ObserverFunc { return func(statusCollector *collector.ResourceStatusCollector, _ event.Event) { var rss []*event.ResourceStatus var nonDesiredResources []*event.ResourceStatus @@ -209,8 +210,7 @@ func statusObserver(cancel context.CancelFunc, desired status.Status, logFn func return nonDesiredResources[i].Identifier.Name < nonDesiredResources[j].Identifier.Name }) first := nonDesiredResources[0] - logFn("waiting for resource: name: %s, kind: %s, desired status: %s, actual status: %s \n", - first.Identifier.Name, first.Identifier.GroupKind.Kind, desired, first.Status) + logger.Debug("waiting for resource", "name", first.Identifier.Name, "kind", first.Identifier.GroupKind.Kind, "expectedStatus", desired, "actualStatus", first.Status) } } } diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index fee325ddc..7226058c4 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -18,6 +18,8 @@ package kube // import "helm.sh/helm/v3/pkg/kube" import ( "errors" + "io" + "log/slog" "testing" "time" @@ -217,7 +219,7 @@ func TestStatusWaitForDelete(t *testing.T) { statusWaiter := statusWaiter{ restMapper: fakeMapper, client: fakeClient, - log: t.Logf, + log: slog.New(slog.NewTextHandler(io.Discard, nil)), } objsToCreate := getRuntimeObjFromManifests(t, tt.manifestsToCreate) for _, objToCreate := range objsToCreate { @@ -258,7 +260,7 @@ func TestStatusWaitForDeleteNonExistentObject(t *testing.T) { statusWaiter := statusWaiter{ restMapper: fakeMapper, client: fakeClient, - log: t.Logf, + log: slog.New(slog.NewTextHandler(io.Discard, nil)), } // Don't create the object to test that the wait for delete works when the object doesn't exist objManifest := getRuntimeObjFromManifests(t, []string{podCurrentManifest}) @@ -317,7 +319,7 @@ func TestStatusWait(t *testing.T) { statusWaiter := statusWaiter{ client: fakeClient, restMapper: fakeMapper, - log: t.Logf, + log: slog.New(slog.NewTextHandler(io.Discard, nil)), } objs := getRuntimeObjFromManifests(t, tt.objManifests) for _, obj := range objs { @@ -371,7 +373,7 @@ func TestWaitForJobComplete(t *testing.T) { statusWaiter := statusWaiter{ client: fakeClient, restMapper: fakeMapper, - log: t.Logf, + log: slog.New(slog.NewTextHandler(io.Discard, nil)), } objs := getRuntimeObjFromManifests(t, tt.objManifests) for _, obj := range objs { @@ -431,7 +433,7 @@ func TestWatchForReady(t *testing.T) { statusWaiter := statusWaiter{ client: fakeClient, restMapper: fakeMapper, - log: t.Logf, + log: slog.New(slog.NewTextHandler(io.Discard, nil)), } objs := getRuntimeObjFromManifests(t, tt.objManifests) for _, obj := range objs { diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go index e3d29d8a9..0751a7217 100644 --- a/pkg/kube/wait.go +++ b/pkg/kube/wait.go @@ -44,15 +44,13 @@ import ( watchtools "k8s.io/client-go/tools/watch" "k8s.io/apimachinery/pkg/util/wait" - - logadapter "helm.sh/helm/v4/internal/log" ) // legacyWaiter is the legacy implementation of the Waiter interface. This logic was used by default in Helm 3 // Helm 4 now uses the StatusWaiter implementation instead type legacyWaiter struct { c ReadyChecker - log func(string, ...interface{}) + log *slog.Logger kubeClient *kubernetes.Clientset } @@ -69,7 +67,7 @@ func (hw *legacyWaiter) WaitWithJobs(resources ResourceList, timeout time.Durati // waitForResources polls to get the current status of all pods, PVCs, Services and // Jobs(optional) until all are ready or a timeout is reached func (hw *legacyWaiter) waitForResources(created ResourceList, timeout time.Duration) error { - hw.log("beginning wait for %d resources with timeout of %v", len(created), timeout) + hw.log.Debug("beginning wait for resources", "count", len(created), "timeout", timeout) ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() @@ -87,10 +85,10 @@ func (hw *legacyWaiter) waitForResources(created ResourceList, timeout time.Dura if waitRetries > 0 && hw.isRetryableError(err, v) { numberOfErrors[i]++ if numberOfErrors[i] > waitRetries { - hw.log("Max number of retries reached") + hw.log.Debug("max number of retries reached", "resource", v.Name, "retries", numberOfErrors[i]) return false, err } - hw.log("Retrying as current number of retries %d less than max number of retries %d", numberOfErrors[i]-1, waitRetries) + hw.log.Debug("retrying resource readiness", "resource", v.Name, "currentRetries", numberOfErrors[i]-1, "maxRetries", waitRetries) return false, nil } numberOfErrors[i] = 0 @@ -106,14 +104,14 @@ func (hw *legacyWaiter) isRetryableError(err error, resource *resource.Info) boo if err == nil { return false } - hw.log("Error received when checking status of resource %s. Error: '%s', Resource details: '%s'", resource.Name, err, resource) + hw.log.Debug("error received when checking resource status", "resource", resource.Name, "error", err) if ev, ok := err.(*apierrors.StatusError); ok { statusCode := ev.Status().Code retryable := hw.isRetryableHTTPStatusCode(statusCode) - hw.log("Status code received: %d. Retryable error? %t", statusCode, retryable) + hw.log.Debug("status code received", "resource", resource.Name, "statusCode", statusCode, "retryable", retryable) return retryable } - hw.log("Retryable error? %t", true) + hw.log.Debug("retryable error assumed", "resource", resource.Name) return true } @@ -123,7 +121,7 @@ func (hw *legacyWaiter) isRetryableHTTPStatusCode(httpStatusCode int32) bool { // waitForDeletedResources polls to check if all the resources are deleted or a timeout is reached func (hw *legacyWaiter) WaitForDelete(deleted ResourceList, timeout time.Duration) error { - slog.Debug("beginning wait for resources to be deleted", "count", len(deleted), "timeout", timeout) + hw.log.Debug("beginning wait for resources to be deleted", "count", len(deleted), "timeout", timeout) startTime := time.Now() ctx, cancel := context.WithTimeout(context.Background(), timeout) @@ -141,9 +139,9 @@ func (hw *legacyWaiter) WaitForDelete(deleted ResourceList, timeout time.Duratio elapsed := time.Since(startTime).Round(time.Second) if err != nil { - slog.Debug("wait for resources failed", "elapsed", elapsed, slog.Any("error", err)) + hw.log.Debug("wait for resources failed", "elapsed", elapsed, "error", err) } else { - slog.Debug("wait for resources succeeded", "elapsed", elapsed) + hw.log.Debug("wait for resources succeeded", "elapsed", elapsed) } return err @@ -251,7 +249,7 @@ func (hw *legacyWaiter) watchUntilReady(timeout time.Duration, info *resource.In return nil } - hw.log("Watching for changes to %s %s with timeout of %v", kind, info.Name, timeout) + hw.log.Debug("watching for resource changes", "kind", kind, "resource", info.Name, "timeout", timeout) // Use a selector on the name of the resource. This should be unique for the // given version and kind @@ -279,7 +277,8 @@ func (hw *legacyWaiter) watchUntilReady(timeout time.Duration, info *resource.In // we get. We care mostly about jobs, where what we want to see is // the status go into a good state. For other types, like ReplicaSet // we don't really do anything to support these as hooks. - hw.log("Add/Modify event for %s: %v", info.Name, e.Type) + hw.log.Debug("add/modify event received", "resource", info.Name, "eventType", e.Type) + switch kind { case "Job": return hw.waitForJob(obj, info.Name) @@ -288,11 +287,11 @@ func (hw *legacyWaiter) watchUntilReady(timeout time.Duration, info *resource.In } return true, nil case watch.Deleted: - hw.log("Deleted event for %s", info.Name) + hw.log.Debug("deleted event received", "resource", info.Name) return true, nil case watch.Error: // Handle error and return with an error. - hw.log("Error event for %s", info.Name) + hw.log.Error("error event received", "resource", info.Name) return true, errors.Errorf("failed to deploy %s", info.Name) default: return false, nil @@ -314,11 +313,12 @@ func (hw *legacyWaiter) waitForJob(obj runtime.Object, name string) (bool, error if c.Type == batchv1.JobComplete && c.Status == "True" { return true, nil } else if c.Type == batchv1.JobFailed && c.Status == "True" { + hw.log.Error("job failed", "job", name, "reason", c.Reason) return true, errors.Errorf("job %s failed: %s", name, c.Reason) } } - hw.log("%s: Jobs active: %d, jobs failed: %d, jobs succeeded: %d", name, o.Status.Active, o.Status.Failed, o.Status.Succeeded) + hw.log.Debug("job status update", "job", name, "active", o.Status.Active, "failed", o.Status.Failed, "succeeded", o.Status.Succeeded) return false, nil } @@ -333,14 +333,15 @@ func (hw *legacyWaiter) waitForPodSuccess(obj runtime.Object, name string) (bool switch o.Status.Phase { case corev1.PodSucceeded: - hw.log("Pod %s succeeded", o.Name) + hw.log.Debug("pod succeeded", "pod", o.Name) return true, nil case corev1.PodFailed: + hw.log.Error("pod failed", "pod", o.Name) return true, errors.Errorf("pod %s failed", o.Name) case corev1.PodPending: - hw.log("Pod %s pending", o.Name) + hw.log.Debug("pod pending", "pod", o.Name) case corev1.PodRunning: - hw.log("Pod %s running", o.Name) + hw.log.Debug("pod running", "pod", o.Name) } return false, nil diff --git a/pkg/storage/driver/cfgmaps.go b/pkg/storage/driver/cfgmaps.go index 421d39ba8..83715ac01 100644 --- a/pkg/storage/driver/cfgmaps.go +++ b/pkg/storage/driver/cfgmaps.go @@ -19,6 +19,7 @@ package driver // import "helm.sh/helm/v4/pkg/storage/driver" import ( "context" "fmt" + "log/slog" "strconv" "strings" "time" @@ -31,7 +32,6 @@ import ( "k8s.io/apimachinery/pkg/util/validation" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" - logadapter "helm.sh/helm/v4/internal/log" rspb "helm.sh/helm/v4/pkg/release/v1" ) @@ -44,7 +44,7 @@ const ConfigMapsDriverName = "ConfigMap" // ConfigMapsInterface. type ConfigMaps struct { impl corev1.ConfigMapInterface - Log logadapter.Logger + Log *slog.Logger } // NewConfigMaps initializes a new ConfigMaps wrapping an implementation of diff --git a/pkg/storage/driver/mock_test.go b/pkg/storage/driver/mock_test.go index c592ee634..b5bf08bf4 100644 --- a/pkg/storage/driver/mock_test.go +++ b/pkg/storage/driver/mock_test.go @@ -19,6 +19,8 @@ package driver // import "helm.sh/helm/v4/pkg/storage/driver" import ( "context" "fmt" + "io" + "log/slog" "testing" sqlmock "github.com/DATA-DOG/go-sqlmock" @@ -31,7 +33,6 @@ import ( kblabels "k8s.io/apimachinery/pkg/labels" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" - logadapter "helm.sh/helm/v4/internal/log" rspb "helm.sh/helm/v4/pkg/release/v1" ) @@ -265,6 +266,6 @@ func newTestFixtureSQL(t *testing.T, _ ...*rspb.Release) (*SQL, sqlmock.Sqlmock) db: sqlxDB, namespace: "default", statementBuilder: sq.StatementBuilder.PlaceholderFormat(sq.Dollar), - Log: logadapter.DefaultLogger, + Log: slog.New(slog.NewTextHandler(io.Discard, nil)), }, mock } diff --git a/pkg/storage/driver/secrets.go b/pkg/storage/driver/secrets.go index bd1edcae1..af6e8591e 100644 --- a/pkg/storage/driver/secrets.go +++ b/pkg/storage/driver/secrets.go @@ -19,6 +19,7 @@ package driver // import "helm.sh/helm/v4/pkg/storage/driver" import ( "context" "fmt" + "log/slog" "strconv" "strings" "time" @@ -31,7 +32,6 @@ import ( "k8s.io/apimachinery/pkg/util/validation" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" - logadapter "helm.sh/helm/v4/internal/log" rspb "helm.sh/helm/v4/pkg/release/v1" ) @@ -44,7 +44,7 @@ const SecretsDriverName = "Secret" // SecretsInterface. type Secrets struct { impl corev1.SecretInterface - Log logadapter.Logger + Log *slog.Logger } // NewSecrets initializes a new Secrets wrapping an implementation of @@ -96,7 +96,7 @@ func (secrets *Secrets) List(filter func(*rspb.Release) bool) ([]*rspb.Release, for _, item := range list.Items { rls, err := decodeRelease(string(item.Data["release"])) if err != nil { - secrets.Log.Debug("list: failed to decode release: %v: %s", item, err) + secrets.Log.Debug("list failed to decode release", "key", item.Name, "error", err) continue } @@ -135,7 +135,7 @@ func (secrets *Secrets) Query(labels map[string]string) ([]*rspb.Release, error) for _, item := range list.Items { rls, err := decodeRelease(string(item.Data["release"])) if err != nil { - secrets.Log.Debug("query: failed to decode release: %s", err) + secrets.Log.Debug("failed to decode release", "key", item.Name, "error", err) continue } rls.Labels = item.ObjectMeta.Labels diff --git a/pkg/storage/driver/sql.go b/pkg/storage/driver/sql.go index 9a4188d2d..7ba317593 100644 --- a/pkg/storage/driver/sql.go +++ b/pkg/storage/driver/sql.go @@ -18,6 +18,7 @@ package driver // import "helm.sh/helm/v4/pkg/storage/driver" import ( "fmt" + "log/slog" "sort" "strconv" "time" @@ -30,7 +31,6 @@ import ( // Import pq for postgres dialect _ "github.com/lib/pq" - logadapter "helm.sh/helm/v4/internal/log" rspb "helm.sh/helm/v4/pkg/release/v1" ) @@ -88,7 +88,7 @@ type SQL struct { namespace string statementBuilder sq.StatementBuilderType - Log logadapter.Logger + Log *slog.Logger } // Name returns the name of the driver. @@ -277,7 +277,7 @@ type SQLReleaseCustomLabelWrapper struct { } // NewSQL initializes a new sql driver. -func NewSQL(connectionString string, logger logadapter.Logger, namespace string) (*SQL, error) { +func NewSQL(connectionString string, logger *slog.Logger, namespace string) (*SQL, error) { db, err := sqlx.Connect(postgreSQLDialect, connectionString) if err != nil { return nil, err From 83a5a14826894232bdf033fa40a1e996f00e5b86 Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Mon, 7 Apr 2025 16:47:26 +0200 Subject: [PATCH 144/192] Properly discard by default logs Signed-off-by: Benoit Tigeot --- internal/monocular/client.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/internal/monocular/client.go b/internal/monocular/client.go index f4c9debca..452bc36e4 100644 --- a/internal/monocular/client.go +++ b/internal/monocular/client.go @@ -18,6 +18,7 @@ package monocular import ( "errors" + "io" "log/slog" "net/url" ) @@ -44,7 +45,7 @@ func New(u string) (*Client, error) { return &Client{ BaseURL: u, - Log: slog.Default(), + Log: slog.New(slog.NewTextHandler(io.Discard, nil)), }, nil } From b6adbbb227cfe5a097072eef37d4ef13d98000db Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Mon, 7 Apr 2025 16:50:00 +0200 Subject: [PATCH 145/192] Enforce error style with others Signed-off-by: Benoit Tigeot --- pkg/action/action.go | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/pkg/action/action.go b/pkg/action/action.go index 1993e6241..1996e0ff8 100644 --- a/pkg/action/action.go +++ b/pkg/action/action.go @@ -244,9 +244,6 @@ type RESTClientGetter interface { ToRESTMapper() (meta.RESTMapper, error) } -// DebugLog sets the logger that writes debug strings -type DebugLog func(format string, v ...interface{}) - // capabilities builds a Capabilities from discovery information. func (cfg *Configuration) getCapabilities() (*chartutil.Capabilities, error) { if cfg.Capabilities != nil { @@ -270,8 +267,8 @@ func (cfg *Configuration) getCapabilities() (*chartutil.Capabilities, error) { apiVersions, err := GetVersionSet(dc) if err != nil { if discovery.IsGroupDiscoveryFailedError(err) { - cfg.Log.Warn("The Kubernetes server has an orphaned API service. Server reports: %s", err) - cfg.Log.Warn("To fix this, kubectl delete apiservice ") + cfg.Log.Warn("the kubernetes server has an orphaned API service", "errors", err) + cfg.Log.Warn("to fix this, kubectl delete apiservice ") } else { return nil, errors.Wrap(err, "could not get apiVersions from Kubernetes") } @@ -370,7 +367,7 @@ func GetVersionSet(client discovery.ServerResourcesInterface) (chartutil.Version // recordRelease with an update operation in case reuse has been set. func (cfg *Configuration) recordRelease(r *release.Release) { if err := cfg.Releases.Update(r); err != nil { - cfg.Log.Warn("Failed to update release %s: %s", r.Name, err) + cfg.Log.Warn("failed to update release", "name", r.Name, "revision", r.Version, "error", err) } } From baa597c5671f7b9f20a2ad204b762e36f1c05bdc Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Mon, 7 Apr 2025 17:02:18 +0200 Subject: [PATCH 146/192] Do not remove the functionality to print log in test Signed-off-by: Benoit Tigeot --- pkg/action/action_test.go | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/pkg/action/action_test.go b/pkg/action/action_test.go index ee32246af..815d1a0c8 100644 --- a/pkg/action/action_test.go +++ b/pkg/action/action_test.go @@ -16,9 +16,11 @@ limitations under the License. package action import ( + "flag" "fmt" "io" "log/slog" + "os" "testing" "github.com/stretchr/testify/assert" @@ -37,6 +39,24 @@ import ( func actionConfigFixture(t *testing.T) *Configuration { t.Helper() + var verbose = flag.Bool("test.log", false, "enable test logging (debug by default)") + + logger := slog.New(slog.NewTextHandler(io.Discard, nil)) + if *verbose { + // Create a handler that removes timestamps + handler := slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{ + Level: slog.LevelDebug, + ReplaceAttr: func(groups []string, a slog.Attr) slog.Attr { + // Remove the time attribute + if a.Key == slog.TimeKey { + return slog.Attr{} + } + return a + }, + }) + logger = slog.New(handler) + } + registryClient, err := registry.NewClient() if err != nil { t.Fatal(err) @@ -47,7 +67,7 @@ func actionConfigFixture(t *testing.T) *Configuration { KubeClient: &kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}}, Capabilities: chartutil.DefaultCapabilities, RegistryClient: registryClient, - Log: slog.New(slog.NewTextHandler(io.Discard, nil)), // TODO: permit to log in test as before with `var verbose = flag.Bool("test.log", false, "enable test logging")`` + Log: logger, } } From 5580f6115767de0bd33e61dda18cef45ef661a5e Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Mon, 7 Apr 2025 17:32:36 +0200 Subject: [PATCH 147/192] Properly reproduce the nopLogger as before Signed-off-by: Benoit Tigeot --- pkg/kube/client.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index be5214431..b38e12693 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -163,7 +163,7 @@ func New(getter genericclioptions.RESTClientGetter) *Client { factory := cmdutil.NewFactory(getter) c := &Client{ Factory: factory, - Log: slog.Default(), + Log: slog.New(slog.NewTextHandler(io.Discard, nil)), } return c } From 6ce967391d6a0264d0b41d05776d32974f057c7d Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Mon, 7 Apr 2025 17:33:14 +0200 Subject: [PATCH 148/192] Trick slog to return the full error Signed-off-by: Benoit Tigeot --- cmd/helm/helm.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cmd/helm/helm.go b/cmd/helm/helm.go index 9bdd8e98c..39d89b034 100644 --- a/cmd/helm/helm.go +++ b/cmd/helm/helm.go @@ -17,6 +17,7 @@ limitations under the License. package main // import "helm.sh/helm/v4/cmd/helm" import ( + "fmt" "log" "os" @@ -40,7 +41,7 @@ func main() { cmd, err := helmcmd.NewRootCmd(os.Stdout, os.Args[1:]) if err != nil { - helmcmd.Logger.Warn("%+v", err) + helmcmd.Logger.Warn(fmt.Sprintf("%+v", err)) os.Exit(1) } From 3e4e78378e77601dedf0751260e0996ff1265c6e Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Mon, 7 Apr 2025 17:35:35 +0200 Subject: [PATCH 149/192] Go the slog way Signed-off-by: Benoit Tigeot --- cmd/helm/helm.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/helm/helm.go b/cmd/helm/helm.go index 39d89b034..c2605f377 100644 --- a/cmd/helm/helm.go +++ b/cmd/helm/helm.go @@ -17,8 +17,8 @@ limitations under the License. package main // import "helm.sh/helm/v4/cmd/helm" import ( - "fmt" "log" + "log/slog" "os" // Import to initialize client auth plugins. @@ -41,7 +41,7 @@ func main() { cmd, err := helmcmd.NewRootCmd(os.Stdout, os.Args[1:]) if err != nil { - helmcmd.Logger.Warn(fmt.Sprintf("%+v", err)) + helmcmd.Logger.Warn("command failed", slog.Any("error", err)) os.Exit(1) } From 710770eed4ffa97c1d29e34e6f661d0ab5ef4cd1 Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Mon, 7 Apr 2025 17:46:13 +0200 Subject: [PATCH 150/192] Linting Signed-off-by: Benoit Tigeot --- cmd/helm/helm.go | 2 +- pkg/action/action_test.go | 2 +- pkg/cli/logger.go | 2 +- pkg/cmd/plugin_list.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/helm/helm.go b/cmd/helm/helm.go index c2605f377..11c5e8769 100644 --- a/cmd/helm/helm.go +++ b/cmd/helm/helm.go @@ -46,7 +46,7 @@ func main() { } if err := cmd.Execute(); err != nil { - helmcmd.Logger.Debug("error", err) + helmcmd.Logger.Debug("error", slog.Any("error", err)) switch e := err.(type) { case helmcmd.PluginError: os.Exit(e.Code) diff --git a/pkg/action/action_test.go b/pkg/action/action_test.go index 815d1a0c8..c770f3920 100644 --- a/pkg/action/action_test.go +++ b/pkg/action/action_test.go @@ -46,7 +46,7 @@ func actionConfigFixture(t *testing.T) *Configuration { // Create a handler that removes timestamps handler := slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{ Level: slog.LevelDebug, - ReplaceAttr: func(groups []string, a slog.Attr) slog.Attr { + ReplaceAttr: func(_ []string, a slog.Attr) slog.Attr { // Remove the time attribute if a.Key == slog.TimeKey { return slog.Attr{} diff --git a/pkg/cli/logger.go b/pkg/cli/logger.go index 243284d76..d75622c37 100644 --- a/pkg/cli/logger.go +++ b/pkg/cli/logger.go @@ -30,7 +30,7 @@ func NewLogger(debug bool) *slog.Logger { // Create a handler that removes timestamps handler := slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{ Level: level, - ReplaceAttr: func(groups []string, a slog.Attr) slog.Attr { + ReplaceAttr: func(_ []string, a slog.Attr) slog.Attr { // Remove the time attribute if a.Key == slog.TimeKey { return slog.Attr{} diff --git a/pkg/cmd/plugin_list.go b/pkg/cmd/plugin_list.go index 3a1d0f2f5..52aefe8ef 100644 --- a/pkg/cmd/plugin_list.go +++ b/pkg/cmd/plugin_list.go @@ -32,7 +32,7 @@ func newPluginListCmd(out io.Writer) *cobra.Command { Short: "list installed Helm plugins", ValidArgsFunction: noMoreArgsCompFunc, RunE: func(_ *cobra.Command, _ []string) error { - Logger.Debug("pluginDirs", settings.PluginsDirectory) + Logger.Debug("pluginDirs", "directory", settings.PluginsDirectory) plugins, err := plugin.FindPlugins(settings.PluginsDirectory) if err != nil { return err From 5c746037b3ee8c5e23ce3932a31c942938a8b3a5 Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Mon, 7 Apr 2025 18:01:42 +0200 Subject: [PATCH 151/192] Prevent redefining verbose flags Signed-off-by: Benoit Tigeot --- pkg/action/action_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/action/action_test.go b/pkg/action/action_test.go index c770f3920..ee967714c 100644 --- a/pkg/action/action_test.go +++ b/pkg/action/action_test.go @@ -36,11 +36,11 @@ import ( "helm.sh/helm/v4/pkg/time" ) +var verbose = flag.Bool("test.log", false, "enable test logging (debug by default)") + func actionConfigFixture(t *testing.T) *Configuration { t.Helper() - var verbose = flag.Bool("test.log", false, "enable test logging (debug by default)") - logger := slog.New(slog.NewTextHandler(io.Discard, nil)) if *verbose { // Create a handler that removes timestamps From 0c85456788dcc1f87ace736e208fe8008189bb21 Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Mon, 7 Apr 2025 18:25:16 +0200 Subject: [PATCH 152/192] Leverage slog.Any for errors Signed-off-by: Benoit Tigeot --- pkg/action/action.go | 2 +- pkg/action/install.go | 7 ++-- pkg/action/uninstall.go | 5 +-- pkg/action/upgrade.go | 3 +- pkg/chart/v2/util/dependencies.go | 2 +- pkg/cmd/search_hub.go | 3 +- pkg/cmd/search_repo.go | 3 +- pkg/engine/lookup_func.go | 4 +-- pkg/ignore/rules.go | 6 ++-- pkg/kube/client.go | 12 +++---- pkg/kube/wait.go | 4 +-- pkg/storage/driver/cfgmaps.go | 20 +++++------ pkg/storage/driver/secrets.go | 4 +-- pkg/storage/driver/sql.go | 60 +++++++++++++++---------------- 14 files changed, 70 insertions(+), 65 deletions(-) diff --git a/pkg/action/action.go b/pkg/action/action.go index 1996e0ff8..d4f917b9f 100644 --- a/pkg/action/action.go +++ b/pkg/action/action.go @@ -367,7 +367,7 @@ func GetVersionSet(client discovery.ServerResourcesInterface) (chartutil.Version // recordRelease with an update operation in case reuse has been set. func (cfg *Configuration) recordRelease(r *release.Release) { if err := cfg.Releases.Update(r); err != nil { - cfg.Log.Warn("failed to update release", "name", r.Name, "revision", r.Version, "error", err) + cfg.Log.Warn("failed to update release", "name", r.Name, "revision", r.Version, slog.Any("error", err)) } } diff --git a/pkg/action/install.go b/pkg/action/install.go index 8b749b777..3f16969ae 100644 --- a/pkg/action/install.go +++ b/pkg/action/install.go @@ -21,6 +21,7 @@ import ( "context" "fmt" "io" + "log/slog" "net/url" "os" "path" @@ -249,12 +250,12 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma } if err := i.availableName(); err != nil { - i.cfg.Log.Error("release name check failed", "error", err) + i.cfg.Log.Error("release name check failed", slog.Any("error", err)) return nil, errors.Wrap(err, "release name check failed") } if err := chartutil.ProcessDependencies(chrt, vals); err != nil { - i.cfg.Log.Error("chart dependencies processing failed", "error", err) + i.cfg.Log.Error("chart dependencies processing failed", slog.Any("error", err)) return nil, errors.Wrap(err, "chart dependencies processing failed") } @@ -505,7 +506,7 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource // One possible strategy would be to do a timed retry to see if we can get // this stored in the future. if err := i.recordRelease(rel); err != nil { - i.cfg.Log.Error("failed to record the release", "error", err) + i.cfg.Log.Error("failed to record the release", slog.Any("error", err)) } return rel, nil diff --git a/pkg/action/uninstall.go b/pkg/action/uninstall.go index 4e959172c..c3835042f 100644 --- a/pkg/action/uninstall.go +++ b/pkg/action/uninstall.go @@ -17,6 +17,7 @@ limitations under the License. package action import ( + "log/slog" "strings" "time" @@ -121,7 +122,7 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error) // From here on out, the release is currently considered to be in StatusUninstalling // state. if err := u.cfg.Releases.Update(rel); err != nil { - u.cfg.Log.Debug("uninstall: Failed to store updated release", "error", err) + u.cfg.Log.Debug("uninstall: Failed to store updated release", slog.Any("error", err)) } deletedResources, kept, errs := u.deleteRelease(rel) @@ -168,7 +169,7 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error) } if err := u.cfg.Releases.Update(rel); err != nil { - u.cfg.Log.Debug("uninstall: Failed to store updated release", "error", err) + u.cfg.Log.Debug("uninstall: Failed to store updated release", slog.Any("error", err)) } if len(errs) > 0 { diff --git a/pkg/action/upgrade.go b/pkg/action/upgrade.go index 147c0fe5a..429bac9d7 100644 --- a/pkg/action/upgrade.go +++ b/pkg/action/upgrade.go @@ -20,6 +20,7 @@ import ( "bytes" "context" "fmt" + "log/slog" "strings" "sync" "time" @@ -486,7 +487,7 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, err error) (*release.Release, error) { msg := fmt.Sprintf("Upgrade %q failed: %s", rel.Name, err) - u.cfg.Log.Warn("upgrade failed", "name", rel.Name, "error", err) + u.cfg.Log.Warn("upgrade failed", "name", rel.Name, slog.Any("error", err)) rel.Info.Status = release.StatusFailed rel.Info.Description = msg diff --git a/pkg/chart/v2/util/dependencies.go b/pkg/chart/v2/util/dependencies.go index 72a08b2a9..b7f78010b 100644 --- a/pkg/chart/v2/util/dependencies.go +++ b/pkg/chart/v2/util/dependencies.go @@ -254,7 +254,7 @@ func processImportValues(c *chart.Chart, merge bool) error { // get child table vv, err := cvals.Table(r.Name + "." + child) if err != nil { - slog.Warn("ImportValues missing table from chart", "chart", r.Name, "error", err) + slog.Warn("ImportValues missing table from chart", "chart", r.Name, slog.Any("error", err)) continue } // create value map from child to be merged into parent diff --git a/pkg/cmd/search_hub.go b/pkg/cmd/search_hub.go index 1a2848b25..6aa5c10bd 100644 --- a/pkg/cmd/search_hub.go +++ b/pkg/cmd/search_hub.go @@ -19,6 +19,7 @@ package cmd import ( "fmt" "io" + "log/slog" "strings" "github.com/gosuri/uitable" @@ -89,7 +90,7 @@ func (o *searchHubOptions) run(out io.Writer, args []string) error { q := strings.Join(args, " ") results, err := c.Search(q) if err != nil { - Logger.Debug("search failed", "error", err) + Logger.Debug("search failed", slog.Any("error", err)) return fmt.Errorf("unable to perform search against %q", o.searchEndpoint) } diff --git a/pkg/cmd/search_repo.go b/pkg/cmd/search_repo.go index a6aa755cd..850bcbe16 100644 --- a/pkg/cmd/search_repo.go +++ b/pkg/cmd/search_repo.go @@ -21,6 +21,7 @@ import ( "bytes" "fmt" "io" + "log/slog" "os" "path/filepath" "strings" @@ -189,7 +190,7 @@ func (o *searchRepoOptions) buildIndex() (*search.Index, error) { f := filepath.Join(o.repoCacheDir, helmpath.CacheIndexFile(n)) ind, err := repo.LoadIndexFile(f) if err != nil { - Logger.Warn("repo is corrupt or missing", "repo", n, "error", err) + Logger.Warn("repo is corrupt or missing", "repo", n, slog.Any("error", err)) continue } diff --git a/pkg/engine/lookup_func.go b/pkg/engine/lookup_func.go index b7460850a..d7267f786 100644 --- a/pkg/engine/lookup_func.go +++ b/pkg/engine/lookup_func.go @@ -101,7 +101,7 @@ func getDynamicClientOnKind(apiversion string, kind string, config *rest.Config) gvk := schema.FromAPIVersionAndKind(apiversion, kind) apiRes, err := getAPIResourceForGVK(gvk, config) if err != nil { - slog.Error("unable to get apiresource", "groupVersionKind", gvk.String(), "error", err) + slog.Error("unable to get apiresource", "groupVersionKind", gvk.String(), slog.Any("error", err)) return nil, false, errors.Wrapf(err, "unable to get apiresource from unstructured: %s", gvk.String()) } gvr := schema.GroupVersionResource{ @@ -127,7 +127,7 @@ func getAPIResourceForGVK(gvk schema.GroupVersionKind, config *rest.Config) (met } resList, err := discoveryClient.ServerResourcesForGroupVersion(gvk.GroupVersion().String()) if err != nil { - slog.Error("unable to retrieve resource list", "GroupVersion", gvk.GroupVersion().String(), "error", err) + slog.Error("unable to retrieve resource list", "GroupVersion", gvk.GroupVersion().String(), slog.Any("error", err)) return res, err } for _, resource := range resList.APIResources { diff --git a/pkg/ignore/rules.go b/pkg/ignore/rules.go index 3f672873c..02a3777ff 100644 --- a/pkg/ignore/rules.go +++ b/pkg/ignore/rules.go @@ -177,7 +177,7 @@ func (r *Rules) parseRule(rule string) error { rule = strings.TrimPrefix(rule, "/") ok, err := filepath.Match(rule, n) if err != nil { - slog.Error("failed to compile", "rule", rule, "error", err) + slog.Error("failed to compile", "rule", rule, slog.Any("error", err)) return false } return ok @@ -187,7 +187,7 @@ func (r *Rules) parseRule(rule string) error { p.match = func(n string, _ os.FileInfo) bool { ok, err := filepath.Match(rule, n) if err != nil { - slog.Error("failed to compile", "rule", rule, "error", err) + slog.Error("failed to compile", "rule", rule, slog.Any("error", err)) return false } return ok @@ -199,7 +199,7 @@ func (r *Rules) parseRule(rule string) error { n = filepath.Base(n) ok, err := filepath.Match(rule, n) if err != nil { - slog.Error("failed to compile", "rule", rule, "error", err) + slog.Error("failed to compile", "rule", rule, slog.Any("error", err)) return false } return ok diff --git a/pkg/kube/client.go b/pkg/kube/client.go index b38e12693..bd4dbea91 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -249,7 +249,7 @@ func (c *Client) Get(resources ResourceList, related bool) (map[string][]runtime objs, err = c.getSelectRelationPod(info, objs, isTable, &podSelectors) if err != nil { - c.Log.Warn("get the relation pod is failed", "error", err) + c.Log.Warn("get the relation pod is failed", slog.Any("error", err)) } } } @@ -441,7 +441,7 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err } if err := updateResource(c, info, originalInfo.Object, force); err != nil { - c.Log.Debug("error updating the resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, "error", err) + c.Log.Debug("error updating the resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err)) updateErrors = append(updateErrors, err.Error()) } // Because we check for errors later, append the info regardless @@ -461,19 +461,19 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err c.Log.Debug("deleting resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind) if err := info.Get(); err != nil { - c.Log.Debug("unable to get object", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, "error", err) + c.Log.Debug("unable to get object", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err)) continue } annotations, err := metadataAccessor.Annotations(info.Object) if err != nil { - c.Log.Debug("unable to get annotations", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, "error", err) + c.Log.Debug("unable to get annotations", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err)) } if annotations != nil && annotations[ResourcePolicyAnno] == KeepPolicy { c.Log.Debug("skipping delete due to annotation", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, "annotation", ResourcePolicyAnno, "value", KeepPolicy) continue } if err := deleteResource(info, metav1.DeletePropagationBackground); err != nil { - c.Log.Debug("failed to delete resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, "error", err) + c.Log.Debug("failed to delete resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err)) continue } res.Deleted = append(res.Deleted, info) @@ -506,7 +506,7 @@ func rdelete(c *Client, resources ResourceList, propagation metav1.DeletionPropa err := deleteResource(info, propagation) if err == nil || apierrors.IsNotFound(err) { if err != nil { - c.Log.Debug("ignoring delete failure", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, "error", err) + c.Log.Debug("ignoring delete failure", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err)) } mtx.Lock() defer mtx.Unlock() diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go index 0751a7217..75598542e 100644 --- a/pkg/kube/wait.go +++ b/pkg/kube/wait.go @@ -104,7 +104,7 @@ func (hw *legacyWaiter) isRetryableError(err error, resource *resource.Info) boo if err == nil { return false } - hw.log.Debug("error received when checking resource status", "resource", resource.Name, "error", err) + hw.log.Debug("error received when checking resource status", "resource", resource.Name, slog.Any("error", err)) if ev, ok := err.(*apierrors.StatusError); ok { statusCode := ev.Status().Code retryable := hw.isRetryableHTTPStatusCode(statusCode) @@ -139,7 +139,7 @@ func (hw *legacyWaiter) WaitForDelete(deleted ResourceList, timeout time.Duratio elapsed := time.Since(startTime).Round(time.Second) if err != nil { - hw.log.Debug("wait for resources failed", "elapsed", elapsed, "error", err) + hw.log.Debug("wait for resources failed", "elapsed", elapsed, slog.Any("error", err)) } else { hw.log.Debug("wait for resources succeeded", "elapsed", elapsed) } diff --git a/pkg/storage/driver/cfgmaps.go b/pkg/storage/driver/cfgmaps.go index 83715ac01..dba9a138d 100644 --- a/pkg/storage/driver/cfgmaps.go +++ b/pkg/storage/driver/cfgmaps.go @@ -70,13 +70,13 @@ func (cfgmaps *ConfigMaps) Get(key string) (*rspb.Release, error) { return nil, ErrReleaseNotFound } - cfgmaps.Log.Debug("failed to get release", "key", key, "error", err) + cfgmaps.Log.Debug("failed to get release", "key", key, slog.Any("error", err)) return nil, err } // found the configmap, decode the base64 data string r, err := decodeRelease(obj.Data["release"]) if err != nil { - cfgmaps.Log.Debug("failed to decode data", "key", key, "error", err) + cfgmaps.Log.Debug("failed to decode data", "key", key, slog.Any("error", err)) return nil, err } r.Labels = filterSystemLabels(obj.ObjectMeta.Labels) @@ -93,7 +93,7 @@ func (cfgmaps *ConfigMaps) List(filter func(*rspb.Release) bool) ([]*rspb.Releas list, err := cfgmaps.impl.List(context.Background(), opts) if err != nil { - cfgmaps.Log.Debug("failed to list releases", "error", err) + cfgmaps.Log.Debug("failed to list releases", slog.Any("error", err)) return nil, err } @@ -104,7 +104,7 @@ func (cfgmaps *ConfigMaps) List(filter func(*rspb.Release) bool) ([]*rspb.Releas for _, item := range list.Items { rls, err := decodeRelease(item.Data["release"]) if err != nil { - cfgmaps.Log.Debug("failed to decode release", "item", item, "error", err) + cfgmaps.Log.Debug("failed to decode release", "item", item, slog.Any("error", err)) continue } @@ -132,7 +132,7 @@ func (cfgmaps *ConfigMaps) Query(labels map[string]string) ([]*rspb.Release, err list, err := cfgmaps.impl.List(context.Background(), opts) if err != nil { - cfgmaps.Log.Debug("failed to query with labels", "error", err) + cfgmaps.Log.Debug("failed to query with labels", slog.Any("error", err)) return nil, err } @@ -144,7 +144,7 @@ func (cfgmaps *ConfigMaps) Query(labels map[string]string) ([]*rspb.Release, err for _, item := range list.Items { rls, err := decodeRelease(item.Data["release"]) if err != nil { - cfgmaps.Log.Debug("failed to decode release", "error", err) + cfgmaps.Log.Debug("failed to decode release", slog.Any("error", err)) continue } rls.Labels = item.ObjectMeta.Labels @@ -166,7 +166,7 @@ func (cfgmaps *ConfigMaps) Create(key string, rls *rspb.Release) error { // create a new configmap to hold the release obj, err := newConfigMapsObject(key, rls, lbs) if err != nil { - cfgmaps.Log.Debug("failed to encode release", "name", rls.Name, "error", err) + cfgmaps.Log.Debug("failed to encode release", "name", rls.Name, slog.Any("error", err)) return err } // push the configmap object out into the kubiverse @@ -175,7 +175,7 @@ func (cfgmaps *ConfigMaps) Create(key string, rls *rspb.Release) error { return ErrReleaseExists } - cfgmaps.Log.Debug("failed to create release", "error", err) + cfgmaps.Log.Debug("failed to create release", slog.Any("error", err)) return err } return nil @@ -194,13 +194,13 @@ func (cfgmaps *ConfigMaps) Update(key string, rls *rspb.Release) error { // create a new configmap object to hold the release obj, err := newConfigMapsObject(key, rls, lbs) if err != nil { - cfgmaps.Log.Debug("failed to encode release", "name", rls.Name, "error", err) + cfgmaps.Log.Debug("failed to encode release", "name", rls.Name, slog.Any("error", err)) return err } // push the configmap object out into the kubiverse _, err = cfgmaps.impl.Update(context.Background(), obj, metav1.UpdateOptions{}) if err != nil { - cfgmaps.Log.Debug("failed to update release", "error", err) + cfgmaps.Log.Debug("failed to update release", slog.Any("error", err)) return err } return nil diff --git a/pkg/storage/driver/secrets.go b/pkg/storage/driver/secrets.go index af6e8591e..5045774e6 100644 --- a/pkg/storage/driver/secrets.go +++ b/pkg/storage/driver/secrets.go @@ -96,7 +96,7 @@ func (secrets *Secrets) List(filter func(*rspb.Release) bool) ([]*rspb.Release, for _, item := range list.Items { rls, err := decodeRelease(string(item.Data["release"])) if err != nil { - secrets.Log.Debug("list failed to decode release", "key", item.Name, "error", err) + secrets.Log.Debug("list failed to decode release", "key", item.Name, slog.Any("error", err)) continue } @@ -135,7 +135,7 @@ func (secrets *Secrets) Query(labels map[string]string) ([]*rspb.Release, error) for _, item := range list.Items { rls, err := decodeRelease(string(item.Data["release"])) if err != nil { - secrets.Log.Debug("failed to decode release", "key", item.Name, "error", err) + secrets.Log.Debug("failed to decode release", "key", item.Name, slog.Any("error", err)) continue } rls.Labels = item.ObjectMeta.Labels diff --git a/pkg/storage/driver/sql.go b/pkg/storage/driver/sql.go index 7ba317593..9f54de7f8 100644 --- a/pkg/storage/driver/sql.go +++ b/pkg/storage/driver/sql.go @@ -109,7 +109,7 @@ func (s *SQL) checkAlreadyApplied(migrations []*migrate.Migration) bool { records, err := migrate.GetMigrationRecords(s.db.DB, postgreSQLDialect) migrate.SetDisableCreateTable(false) if err != nil { - s.Log.Debug("failed to get migration records", "error", err) + s.Log.Debug("failed to get migration records", slog.Any("error", err)) return false } @@ -310,24 +310,24 @@ func (s *SQL) Get(key string) (*rspb.Release, error) { query, args, err := qb.ToSql() if err != nil { - s.Log.Debug("failed to build query", "error", err) + s.Log.Debug("failed to build query", slog.Any("error", err)) return nil, err } // Get will return an error if the result is empty if err := s.db.Get(&record, query, args...); err != nil { - s.Log.Debug("got SQL error when getting release", "key", key, "error", err) + s.Log.Debug("got SQL error when getting release", "key", key, slog.Any("error", err)) return nil, ErrReleaseNotFound } release, err := decodeRelease(record.Body) if err != nil { - s.Log.Debug("failed to decode data", "key", key, "error", err) + s.Log.Debug("failed to decode data", "key", key, slog.Any("error", err)) return nil, err } if release.Labels, err = s.getReleaseCustomLabels(key, s.namespace); err != nil { - s.Log.Debug("failed to get release custom labels", "namespace", s.namespace, "key", key, "error", err) + s.Log.Debug("failed to get release custom labels", "namespace", s.namespace, "key", key, slog.Any("error", err)) return nil, err } @@ -348,13 +348,13 @@ func (s *SQL) List(filter func(*rspb.Release) bool) ([]*rspb.Release, error) { query, args, err := sb.ToSql() if err != nil { - s.Log.Debug("failed to build query", "error", err) + s.Log.Debug("failed to build query", slog.Any("error", err)) return nil, err } var records = []SQLReleaseWrapper{} if err := s.db.Select(&records, query, args...); err != nil { - s.Log.Debug("failed to list", "error", err) + s.Log.Debug("failed to list", slog.Any("error", err)) return nil, err } @@ -362,12 +362,12 @@ func (s *SQL) List(filter func(*rspb.Release) bool) ([]*rspb.Release, error) { for _, record := range records { release, err := decodeRelease(record.Body) if err != nil { - s.Log.Debug("failed to decode release", "record", record, "error", err) + s.Log.Debug("failed to decode release", "record", record, slog.Any("error", err)) continue } if release.Labels, err = s.getReleaseCustomLabels(record.Key, record.Namespace); err != nil { - s.Log.Debug("failed to get release custom labels", "namespace", record.Namespace, "key", record.Key, "error", err) + s.Log.Debug("failed to get release custom labels", "namespace", record.Namespace, "key", record.Key, slog.Any("error", err)) return nil, err } for k, v := range getReleaseSystemLabels(release) { @@ -410,13 +410,13 @@ func (s *SQL) Query(labels map[string]string) ([]*rspb.Release, error) { // Build our query query, args, err := sb.ToSql() if err != nil { - s.Log.Debug("failed to build query", "error", err) + s.Log.Debug("failed to build query", slog.Any("error", err)) return nil, err } var records = []SQLReleaseWrapper{} if err := s.db.Select(&records, query, args...); err != nil { - s.Log.Debug("failed to query with labels", "error", err) + s.Log.Debug("failed to query with labels", slog.Any("error", err)) return nil, err } @@ -428,12 +428,12 @@ func (s *SQL) Query(labels map[string]string) ([]*rspb.Release, error) { for _, record := range records { release, err := decodeRelease(record.Body) if err != nil { - s.Log.Debug("failed to decode release", "record", record, "error", err) + s.Log.Debug("failed to decode release", "record", record, slog.Any("error", err)) continue } if release.Labels, err = s.getReleaseCustomLabels(record.Key, record.Namespace); err != nil { - s.Log.Debug("failed to get release custom labels", "namespace", record.Namespace, "key", record.Key, "error", err) + s.Log.Debug("failed to get release custom labels", "namespace", record.Namespace, "key", record.Key, slog.Any("error", err)) return nil, err } @@ -457,13 +457,13 @@ func (s *SQL) Create(key string, rls *rspb.Release) error { body, err := encodeRelease(rls) if err != nil { - s.Log.Debug("failed to encode release", "error", err) + s.Log.Debug("failed to encode release", slog.Any("error", err)) return err } transaction, err := s.db.Beginx() if err != nil { - s.Log.Debug("failed to start SQL transaction", "error", err) + s.Log.Debug("failed to start SQL transaction", slog.Any("error", err)) return fmt.Errorf("error beginning transaction: %v", err) } @@ -492,7 +492,7 @@ func (s *SQL) Create(key string, rls *rspb.Release) error { int(time.Now().Unix()), ).ToSql() if err != nil { - s.Log.Debug("failed to build insert query", "error", err) + s.Log.Debug("failed to build insert query", slog.Any("error", err)) return err } @@ -516,7 +516,7 @@ func (s *SQL) Create(key string, rls *rspb.Release) error { return ErrReleaseExists } - s.Log.Debug("failed to store release in SQL database", "key", key, "error", err) + s.Log.Debug("failed to store release in SQL database", "key", key, slog.Any("error", err)) return err } @@ -539,13 +539,13 @@ func (s *SQL) Create(key string, rls *rspb.Release) error { if err != nil { defer transaction.Rollback() - s.Log.Debug("failed to build insert query", "error", err) + s.Log.Debug("failed to build insert query", slog.Any("error", err)) return err } if _, err := transaction.Exec(insertLabelsQuery, args...); err != nil { defer transaction.Rollback() - s.Log.Debug("failed to write Labels", "error", err) + s.Log.Debug("failed to write Labels", slog.Any("error", err)) return err } } @@ -564,7 +564,7 @@ func (s *SQL) Update(key string, rls *rspb.Release) error { body, err := encodeRelease(rls) if err != nil { - s.Log.Debug("failed to encode release", "error", err) + s.Log.Debug("failed to encode release", slog.Any("error", err)) return err } @@ -581,12 +581,12 @@ func (s *SQL) Update(key string, rls *rspb.Release) error { ToSql() if err != nil { - s.Log.Debug("failed to build update query", "error", err) + s.Log.Debug("failed to build update query", slog.Any("error", err)) return err } if _, err := s.db.Exec(query, args...); err != nil { - s.Log.Debug("failed to update release in SQL database", "key", key, "error", err) + s.Log.Debug("failed to update release in SQL database", "key", key, slog.Any("error", err)) return err } @@ -597,7 +597,7 @@ func (s *SQL) Update(key string, rls *rspb.Release) error { func (s *SQL) Delete(key string) (*rspb.Release, error) { transaction, err := s.db.Beginx() if err != nil { - s.Log.Debug("failed to start SQL transaction", "error", err) + s.Log.Debug("failed to start SQL transaction", slog.Any("error", err)) return nil, fmt.Errorf("error beginning transaction: %v", err) } @@ -608,20 +608,20 @@ func (s *SQL) Delete(key string) (*rspb.Release, error) { Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}). ToSql() if err != nil { - s.Log.Debug("failed to build select query", "error", err) + s.Log.Debug("failed to build select query", slog.Any("error", err)) return nil, err } var record SQLReleaseWrapper err = transaction.Get(&record, selectQuery, args...) if err != nil { - s.Log.Debug("release not found", "key", key, "error", err) + s.Log.Debug("release not found", "key", key, slog.Any("error", err)) return nil, ErrReleaseNotFound } release, err := decodeRelease(record.Body) if err != nil { - s.Log.Debug("failed to decode release", "key", key, "error", err) + s.Log.Debug("failed to decode release", "key", key, slog.Any("error", err)) transaction.Rollback() return nil, err } @@ -633,18 +633,18 @@ func (s *SQL) Delete(key string) (*rspb.Release, error) { Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}). ToSql() if err != nil { - s.Log.Debug("failed to build delete query", "error", err) + s.Log.Debug("failed to build delete query", slog.Any("error", err)) return nil, err } _, err = transaction.Exec(deleteQuery, args...) if err != nil { - s.Log.Debug("failed perform delete query", "error", err) + s.Log.Debug("failed perform delete query", slog.Any("error", err)) return release, err } if release.Labels, err = s.getReleaseCustomLabels(key, s.namespace); err != nil { - s.Log.Debug("failed to get release custom labels", "namespace", s.namespace, "key", key, "error", err) + s.Log.Debug("failed to get release custom labels", "namespace", s.namespace, "key", key, slog.Any("error", err)) return nil, err } @@ -655,7 +655,7 @@ func (s *SQL) Delete(key string) (*rspb.Release, error) { ToSql() if err != nil { - s.Log.Debug("failed to build delete Labels query", "error", err) + s.Log.Debug("failed to build delete Labels query", slog.Any("error", err)) return nil, err } _, err = transaction.Exec(deleteCustomLabelsQuery, args...) From 0740dfc7a96d65420426bd4607439f5794093dc9 Mon Sep 17 00:00:00 2001 From: Matt Farina Date: Mon, 7 Apr 2025 13:40:29 -0400 Subject: [PATCH 153/192] Unarchiving fix Signed-off-by: Matt Farina --- pkg/chart/v2/loader/archive.go | 32 +++++++++++++++++++++++++++++++- pkg/chart/v2/loader/directory.go | 4 ++++ 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/pkg/chart/v2/loader/archive.go b/pkg/chart/v2/loader/archive.go index cb6d3bfe8..655fe87fa 100644 --- a/pkg/chart/v2/loader/archive.go +++ b/pkg/chart/v2/loader/archive.go @@ -33,6 +33,15 @@ import ( chart "helm.sh/helm/v4/pkg/chart/v2" ) +// MaxDecompressedChartSize is the maximum size of a chart archive that will be +// decompressed. This is the decompressed size of all the files. +// The default value is 100 MiB. +var MaxDecompressedChartSize int64 = 100 * 1024 * 1024 // Default 100 MiB + +// MaxDecompressedFileSize is the size of the largest file that Helm will attempt to load. +// The size of the file is the decompressed version of it when it is stored in an archive. +var MaxDecompressedFileSize int64 = 5 * 1024 * 1024 // Default 5 MiB + var drivePathPattern = regexp.MustCompile(`^[a-zA-Z]:/`) // FileLoader loads a chart from a file @@ -119,6 +128,7 @@ func LoadArchiveFiles(in io.Reader) ([]*BufferedFile, error) { files := []*BufferedFile{} tr := tar.NewReader(unzipped) + remainingSize := MaxDecompressedChartSize for { b := bytes.NewBuffer(nil) hd, err := tr.Next() @@ -178,10 +188,30 @@ func LoadArchiveFiles(in io.Reader) ([]*BufferedFile, error) { return nil, errors.New("chart yaml not in base directory") } - if _, err := io.Copy(b, tr); err != nil { + if hd.Size > remainingSize { + return nil, fmt.Errorf("decompressed chart is larger than the maximum size %d", MaxDecompressedChartSize) + } + + if hd.Size > MaxDecompressedFileSize { + return nil, fmt.Errorf("decompressed chart file %q is larger than the maximum file size %d", hd.Name, MaxDecompressedFileSize) + } + + limitedReader := io.LimitReader(tr, remainingSize) + + bytesWritten, err := io.Copy(b, limitedReader) + if err != nil { return nil, err } + remainingSize -= bytesWritten + // When the bytesWritten are less than the file size it means the limit reader ended + // copying early. Here we report that error. This is important if the last file extracted + // is the one that goes over the limit. It assumes the Size stored in the tar header + // is correct, something many applications do. + if bytesWritten < hd.Size || remainingSize <= 0 { + return nil, fmt.Errorf("decompressed chart is larger than the maximum size %d", MaxDecompressedChartSize) + } + data := bytes.TrimPrefix(b.Bytes(), utf8bom) files = append(files, &BufferedFile{Name: n, Data: data}) diff --git a/pkg/chart/v2/loader/directory.go b/pkg/chart/v2/loader/directory.go index 37b24d3f9..dbf3eb882 100644 --- a/pkg/chart/v2/loader/directory.go +++ b/pkg/chart/v2/loader/directory.go @@ -101,6 +101,10 @@ func LoadDir(dir string) (*chart.Chart, error) { return fmt.Errorf("cannot load irregular file %s as it has file mode type bits set", name) } + if fi.Size() > MaxDecompressedFileSize { + return fmt.Errorf("chart file %q is larger than the maximum file size %d", fi.Name(), MaxDecompressedFileSize) + } + data, err := os.ReadFile(name) if err != nil { return errors.Wrapf(err, "error reading %s", n) From 9b636902c6136d8624a1798b3c02b593d8b8b58a Mon Sep 17 00:00:00 2001 From: zanuka Date: Fri, 21 Mar 2025 16:03:37 -0700 Subject: [PATCH 154/192] updates mutate and validate web hook configs Signed-off-by: Mike Delucchi --- pkg/release/util/kind_sorter.go | 4 ++++ pkg/release/util/kind_sorter_test.go | 12 ++++++++++-- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/pkg/release/util/kind_sorter.go b/pkg/release/util/kind_sorter.go index 22795733c..72f99e115 100644 --- a/pkg/release/util/kind_sorter.go +++ b/pkg/release/util/kind_sorter.go @@ -65,12 +65,16 @@ var InstallOrder KindSortOrder = []string{ "IngressClass", "Ingress", "APIService", + "MutatingWebhookConfiguration", + "ValidatingWebhookConfiguration", } // UninstallOrder is the order in which manifests should be uninstalled (by Kind). // // Those occurring earlier in the list get uninstalled before those occurring later in the list. var UninstallOrder KindSortOrder = []string{ + "ValidatingWebhookConfiguration", + "MutatingWebhookConfiguration", "APIService", "Ingress", "IngressClass", diff --git a/pkg/release/util/kind_sorter_test.go b/pkg/release/util/kind_sorter_test.go index 00d80ecf2..919de24e5 100644 --- a/pkg/release/util/kind_sorter_test.go +++ b/pkg/release/util/kind_sorter_test.go @@ -173,6 +173,14 @@ func TestKindSorter(t *testing.T) { Name: "F", Head: &SimpleHead{Kind: "PriorityClass"}, }, + { + Name: "M", + Head: &SimpleHead{Kind: "MutatingWebhookConfiguration"}, + }, + { + Name: "V", + Head: &SimpleHead{Kind: "ValidatingWebhookConfiguration"}, + }, } for _, test := range []struct { @@ -180,8 +188,8 @@ func TestKindSorter(t *testing.T) { order KindSortOrder expected string }{ - {"install", InstallOrder, "FaAbcC3deEf1gh2iIjJkKlLmnopqrxstuUvw!"}, - {"uninstall", UninstallOrder, "wvUmutsxrqponLlKkJjIi2hg1fEed3CcbAaF!"}, + {"install", InstallOrder, "FaAbcC3deEf1gh2iIjJkKlLmnopqrxstuUvwMV!"}, + {"uninstall", UninstallOrder, "VMwvUmutsxrqponLlKkJjIi2hg1fEed3CcbAaF!"}, } { var buf bytes.Buffer t.Run(test.description, func(t *testing.T) { From c121b6b83ee5a8c4439a347704fcc61032d2dbe6 Mon Sep 17 00:00:00 2001 From: Mike Delucchi Date: Fri, 28 Mar 2025 13:32:09 -0700 Subject: [PATCH 155/192] changes order of operations Signed-off-by: Mike Delucchi --- pkg/release/util/kind_sorter.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/release/util/kind_sorter.go b/pkg/release/util/kind_sorter.go index 72f99e115..27b44cc36 100644 --- a/pkg/release/util/kind_sorter.go +++ b/pkg/release/util/kind_sorter.go @@ -64,18 +64,18 @@ var InstallOrder KindSortOrder = []string{ "CronJob", "IngressClass", "Ingress", - "APIService", "MutatingWebhookConfiguration", "ValidatingWebhookConfiguration", + "APIService", } // UninstallOrder is the order in which manifests should be uninstalled (by Kind). // // Those occurring earlier in the list get uninstalled before those occurring later in the list. var UninstallOrder KindSortOrder = []string{ + "APIService", "ValidatingWebhookConfiguration", "MutatingWebhookConfiguration", - "APIService", "Ingress", "IngressClass", "Service", From 1003a3c93f42b47293f3e53cacc51e09bf59278a Mon Sep 17 00:00:00 2001 From: Mike Delucchi Date: Mon, 31 Mar 2025 12:24:56 -0700 Subject: [PATCH 156/192] fixes kind_sorter_test to match new order Signed-off-by: Mike Delucchi --- pkg/release/util/kind_sorter_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/release/util/kind_sorter_test.go b/pkg/release/util/kind_sorter_test.go index 919de24e5..2a607ddd8 100644 --- a/pkg/release/util/kind_sorter_test.go +++ b/pkg/release/util/kind_sorter_test.go @@ -188,8 +188,8 @@ func TestKindSorter(t *testing.T) { order KindSortOrder expected string }{ - {"install", InstallOrder, "FaAbcC3deEf1gh2iIjJkKlLmnopqrxstuUvwMV!"}, - {"uninstall", UninstallOrder, "VMwvUmutsxrqponLlKkJjIi2hg1fEed3CcbAaF!"}, + {"install", InstallOrder, "FaAbcC3deEf1gh2iIjJkKlLmnopqrxstuUvMVw!"}, + {"uninstall", UninstallOrder, "wVMvUmutsxrqponLlKkJjIi2hg1fEed3CcbAaF!"}, } { var buf bytes.Buffer t.Run(test.description, func(t *testing.T) { From e1425f1aa56dbe1d51d40a721bb302707a7c9041 Mon Sep 17 00:00:00 2001 From: Mike Delucchi Date: Fri, 4 Apr 2025 10:44:42 -0700 Subject: [PATCH 157/192] fix: correct webhook order to match Kubernetes admission flow Place APIService before webhooks, with MutatingWebhookConfiguration before ValidatingWebhookConfiguration to match standard admission control. Signed-off-by: Mike Delucchi --- pkg/release/util/kind_sorter.go | 5 +++-- pkg/release/util/kind_sorter_test.go | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/pkg/release/util/kind_sorter.go b/pkg/release/util/kind_sorter.go index 27b44cc36..bc074340f 100644 --- a/pkg/release/util/kind_sorter.go +++ b/pkg/release/util/kind_sorter.go @@ -64,18 +64,19 @@ var InstallOrder KindSortOrder = []string{ "CronJob", "IngressClass", "Ingress", + "APIService", "MutatingWebhookConfiguration", "ValidatingWebhookConfiguration", - "APIService", } // UninstallOrder is the order in which manifests should be uninstalled (by Kind). // // Those occurring earlier in the list get uninstalled before those occurring later in the list. var UninstallOrder KindSortOrder = []string{ - "APIService", + // For uninstall, we remove validation before mutation to ensure webhooks don't block removal "ValidatingWebhookConfiguration", "MutatingWebhookConfiguration", + "APIService", "Ingress", "IngressClass", "Service", diff --git a/pkg/release/util/kind_sorter_test.go b/pkg/release/util/kind_sorter_test.go index 2a607ddd8..919de24e5 100644 --- a/pkg/release/util/kind_sorter_test.go +++ b/pkg/release/util/kind_sorter_test.go @@ -188,8 +188,8 @@ func TestKindSorter(t *testing.T) { order KindSortOrder expected string }{ - {"install", InstallOrder, "FaAbcC3deEf1gh2iIjJkKlLmnopqrxstuUvMVw!"}, - {"uninstall", UninstallOrder, "wVMvUmutsxrqponLlKkJjIi2hg1fEed3CcbAaF!"}, + {"install", InstallOrder, "FaAbcC3deEf1gh2iIjJkKlLmnopqrxstuUvwMV!"}, + {"uninstall", UninstallOrder, "VMwvUmutsxrqponLlKkJjIi2hg1fEed3CcbAaF!"}, } { var buf bytes.Buffer t.Run(test.description, func(t *testing.T) { From b29bc3a44d9af174d2cde2370bb09af103fdb32c Mon Sep 17 00:00:00 2001 From: Robert Sirchia Date: Wed, 9 Apr 2025 09:43:19 -0400 Subject: [PATCH 158/192] manually updating go.mod file Signed-off-by: Robert Sirchia --- go.mod | 2 +- go.sum | 11 ++++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index bfc55057a..66bb60fb3 100644 --- a/go.mod +++ b/go.mod @@ -129,7 +129,7 @@ require ( github.com/prometheus/procfs v0.15.1 // indirect github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 // indirect github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 // indirect - github.com/redis/go-redis/v9 v9.1.0 // indirect + github.com/redis/go-redis/v9 v9.6.3 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/shopspring/decimal v1.4.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect diff --git a/go.sum b/go.sum index 1153931d8..a0e23b98b 100644 --- a/go.sum +++ b/go.sum @@ -37,10 +37,11 @@ github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2y github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/bsm/ginkgo/v2 v2.7.0/go.mod h1:AiKlXPm7ItEHNc/2+OkrNG4E0ITzojb9/xWzvQ9XZ9w= -github.com/bsm/ginkgo/v2 v2.9.5 h1:rtVBYPs3+TC5iLUVOis1B9tjLTup7Cj5IfzosKtvTJ0= -github.com/bsm/ginkgo/v2 v2.9.5/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= -github.com/bsm/gomega v1.26.0 h1:LhQm+AFcgV2M0WyKroMASzAzCAJVpAxQXv4SaI9a69Y= +github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= +github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= github.com/bsm/gomega v1.26.0/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= +github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= +github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -288,8 +289,8 @@ github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5/go.mod h1:fyalQWdtzDBECAQFBJu github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 h1:EfpWLLCyXw8PSM2/XNJLjI3Pb27yVE+gIAfeqp8LUCc= github.com/redis/go-redis/extra/redisotel/v9 v9.0.5/go.mod h1:WZjPDy7VNzn77AAfnAfVjZNvfJTYfPetfZk5yoSTLaQ= github.com/redis/go-redis/v9 v9.0.5/go.mod h1:WqMKv5vnQbRuZstUwxQI195wHy+t4PuXDOjzMvcuQHk= -github.com/redis/go-redis/v9 v9.1.0 h1:137FnGdk+EQdCbye1FW+qOEcY5S+SpY9T0NiuqvtfMY= -github.com/redis/go-redis/v9 v9.1.0/go.mod h1:urWj3He21Dj5k4TK1y59xH8Uj6ATueP8AH1cY3lZl4c= +github.com/redis/go-redis/v9 v9.6.3 h1:8Dr5ygF1QFXRxIH/m3Xg9MMG1rS8YCtAgosrsewT6i0= +github.com/redis/go-redis/v9 v9.6.3/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/rubenv/sql-migrate v1.7.1 h1:f/o0WgfO/GqNuVg+6801K/KW3WdDSupzSjDYODmiUq4= From db76da32aca573380a406beeac6a5deff66d68a8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 9 Apr 2025 15:18:18 +0000 Subject: [PATCH 159/192] build(deps): bump golang.org/x/crypto from 0.36.0 to 0.37.0 Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.36.0 to 0.37.0. - [Commits](https://github.com/golang/crypto/compare/v0.36.0...v0.37.0) --- updated-dependencies: - dependency-name: golang.org/x/crypto dependency-version: 0.37.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 10 +++++----- go.sum | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index 66bb60fb3..aef4a656d 100644 --- a/go.mod +++ b/go.mod @@ -33,9 +33,9 @@ require ( github.com/spf13/pflag v1.0.6 github.com/stretchr/testify v1.10.0 github.com/xeipuuv/gojsonschema v1.2.0 - golang.org/x/crypto v0.36.0 - golang.org/x/term v0.30.0 - golang.org/x/text v0.23.0 + golang.org/x/crypto v0.37.0 + golang.org/x/term v0.31.0 + golang.org/x/text v0.24.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.32.3 k8s.io/apiextensions-apiserver v0.32.3 @@ -164,8 +164,8 @@ require ( golang.org/x/mod v0.22.0 // indirect golang.org/x/net v0.37.0 // indirect golang.org/x/oauth2 v0.25.0 // indirect - golang.org/x/sync v0.12.0 // indirect - golang.org/x/sys v0.31.0 // indirect + golang.org/x/sync v0.13.0 // indirect + golang.org/x/sys v0.32.0 // indirect golang.org/x/time v0.9.0 // indirect golang.org/x/tools v0.29.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect diff --git a/go.sum b/go.sum index a0e23b98b..456e1cfcf 100644 --- a/go.sum +++ b/go.sum @@ -394,8 +394,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g= -golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= -golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= +golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= @@ -431,8 +431,8 @@ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= -golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= +golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -454,8 +454,8 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= -golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= +golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -463,8 +463,8 @@ golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww= -golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= -golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= +golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= +golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= @@ -472,8 +472,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= -golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= +golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= From 55eb53e3a0ce1ccfb9f444adeac695df4d6513c9 Mon Sep 17 00:00:00 2001 From: Rostyslav Polishchuk Date: Thu, 10 Apr 2025 00:18:22 +0000 Subject: [PATCH 160/192] fix: order dependent test TestInstallRelease_Atomic_Interrupted needs the same wait as TestInstallRelease_Wait_Interrupted (see helm/helm#12088). The installation goroutine started by TestInstallRelease_Atomic_Interrupted proceeds in the background and may interfere with other tests (see helm/helm#30610) Also see helm/helm#12086 and helm/helm#12109 which are describe and address the root cause. Signed-off-by: Rostyslav Polishchuk --- pkg/action/install_test.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pkg/action/install_test.go b/pkg/action/install_test.go index aafda86c2..b2d147188 100644 --- a/pkg/action/install_test.go +++ b/pkg/action/install_test.go @@ -521,6 +521,8 @@ func TestInstallRelease_Atomic_Interrupted(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) time.AfterFunc(time.Second, cancel) + goroutines := runtime.NumGoroutine() + res, err := instAction.RunWithContext(ctx, buildChart(), vals) is.Error(err) is.Contains(err.Error(), "context canceled") @@ -531,6 +533,9 @@ func TestInstallRelease_Atomic_Interrupted(t *testing.T) { _, err = instAction.cfg.Releases.Get(res.Name, res.Version) is.Error(err) is.Equal(err, driver.ErrReleaseNotFound) + is.Equal(goroutines+1, runtime.NumGoroutine()) // installation goroutine still is in background + time.Sleep(10 * time.Second) // wait for goroutine to finish + is.Equal(goroutines, runtime.NumGoroutine()) } func TestNameTemplate(t *testing.T) { From 6b5fa336331a8cfa0d325632bea6fd60871cb747 Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Thu, 10 Apr 2025 09:56:47 +0200 Subject: [PATCH 161/192] debug log level is dynamic and set after Logger creation So we should use dynamic handler to set the log level after. With this patch we can clearly see the output. Before we were always stuck in log level "info" and not seeing debug log level ``` bin/helm upgrade --install --debug --wait frontend \ --namespace test \ --set replicaCount=2 \ --set backend=http://backend-podinfo:9898/echo \ podinfo/podinfo level=DEBUG msg="getting history for release" release=frontend level=DEBUG msg="preparing upgrade" name=frontend level=DEBUG msg="performing update" name=frontend level=DEBUG msg="creating upgraded release" name=frontend level=DEBUG msg="checking resources for changes" resources=2 level=DEBUG msg="no changes detected" kind=Service name=frontend-podinfo level=DEBUG msg="patching resource" kind=Deployment name=frontend-podinfo namespace=test level=DEBUG msg="waiting for resources" count=2 timeout=5m0s level=DEBUG msg="waiting for resource" name=frontend-podinfo kind=Deployment expectedStatus=Current actualStatus=Unknown level=DEBUG msg="updating status for upgraded release" name=frontend Release "frontend" has been upgraded. Happy Helming! NAME: frontend LAST DEPLOYED: Thu Apr 10 09:56:25 2025 NAMESPACE: test STATUS: deployed REVISION: 6 DESCRIPTION: Upgrade complete ``` Signed-off-by: Benoit Tigeot --- pkg/cli/logger.go | 56 +++++++++++++++--- pkg/cmd/root.go | 2 +- .../issue-7233/charts/alpine-0.1.0.tgz | Bin 0 -> 1167 bytes 3 files changed, 49 insertions(+), 9 deletions(-) create mode 100644 pkg/cmd/testdata/testcharts/issue-7233/charts/alpine-0.1.0.tgz diff --git a/pkg/cli/logger.go b/pkg/cli/logger.go index d75622c37..03a69be24 100644 --- a/pkg/cli/logger.go +++ b/pkg/cli/logger.go @@ -17,19 +17,53 @@ limitations under the License. package cli import ( + "context" "log/slog" "os" ) -func NewLogger(debug bool) *slog.Logger { - level := slog.LevelInfo - if debug { - level = slog.LevelDebug +// DebugCheckHandler checks settings.Debug at log time +type DebugCheckHandler struct { + handler slog.Handler + settings *EnvSettings +} + +// Enabled implements slog.Handler.Enabled +func (h *DebugCheckHandler) Enabled(_ context.Context, level slog.Level) bool { + if level == slog.LevelDebug { + return h.settings.Debug // Check settings.Debug at log time } + return true // Always log other levels +} - // Create a handler that removes timestamps - handler := slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{ - Level: level, +// Handle implements slog.Handler.Handle +func (h *DebugCheckHandler) Handle(ctx context.Context, r slog.Record) error { + return h.handler.Handle(ctx, r) +} + +// WithAttrs implements slog.Handler.WithAttrs +func (h *DebugCheckHandler) WithAttrs(attrs []slog.Attr) slog.Handler { + return &DebugCheckHandler{ + handler: h.handler.WithAttrs(attrs), + settings: h.settings, + } +} + +// WithGroup implements slog.Handler.WithGroup +func (h *DebugCheckHandler) WithGroup(name string) slog.Handler { + return &DebugCheckHandler{ + handler: h.handler.WithGroup(name), + settings: h.settings, + } +} + +// NewLogger creates a new logger with dynamic debug checking +func NewLogger(settings *EnvSettings) *slog.Logger { + // Create base handler that removes timestamps + baseHandler := slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{ + // Always use LevelDebug here to allow all messages through + // Our custom handler will do the filtering + Level: slog.LevelDebug, ReplaceAttr: func(_ []string, a slog.Attr) slog.Attr { // Remove the time attribute if a.Key == slog.TimeKey { @@ -39,5 +73,11 @@ func NewLogger(debug bool) *slog.Logger { }, }) - return slog.New(handler) + // Wrap with our dynamic debug-checking handler + dynamicHandler := &DebugCheckHandler{ + handler: baseHandler, + settings: settings, + } + + return slog.New(dynamicHandler) } diff --git a/pkg/cmd/root.go b/pkg/cmd/root.go index 0cbcfebaf..cbef840b3 100644 --- a/pkg/cmd/root.go +++ b/pkg/cmd/root.go @@ -95,7 +95,7 @@ By default, the default directories depend on the Operating System. The defaults ` var settings = cli.New() -var Logger = cli.NewLogger(settings.Debug) +var Logger = cli.NewLogger(settings) func NewRootCmd(out io.Writer, args []string) (*cobra.Command, error) { actionConfig := new(action.Configuration) diff --git a/pkg/cmd/testdata/testcharts/issue-7233/charts/alpine-0.1.0.tgz b/pkg/cmd/testdata/testcharts/issue-7233/charts/alpine-0.1.0.tgz new file mode 100644 index 0000000000000000000000000000000000000000..afd021846ed6a2f7cc2cf023ed188a7cf2f9d189 GIT binary patch literal 1167 zcmV;A1aSKwiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PI-XZsRr+&b6LmpuJto@*y$Scfr34{EN1WMOrLS6h%Fj#ugEZ zR7uK;>-D=AJjjWaZc?CYrw7fAAd37rGn{W`DC89rH2hzI$|PGX`Nh|lG)>d1>C`>b zH0?gq@#W>kXgZrsXS2~by}C$8dd9SW<{}927eIliq6m!^& zBCM*zYdlHb#8FN-#>=q*)TZUJG5nq_e9f(O23qP~Ml=20O_nnPhsrRT$8LA*?K z;hvE|`^kq}q-Cu#((`C=n7n4DsFz75OE=#y+O)c)$tX#qmv+{_Py+uq$ZOIkN&wIC zKOLuC{?F2@p8w~N4~~}Qb`Y5P()#prUJ3j+R8|}f>7gGOR5Jf++29%ek0;Y{hyRz; zY0v+&NT>eaGLg^Wqs*g{4CZKX9s&5;9q)F@4RJzEiA@{({b09CKKaVw2jU2T4eE)i2~P@50=~5F94>Y)|7*hU=(Jz&=f2yz(~mhRPLG& z$^l``HY6Z(O)I=NVezWwu#yTeFPYHL6cQQ~#zJZ$XbLm|N_jIhAXKOf%W96w?PZ}9 z=}HRCmYghJ;ubw+!yF#C=6g~bmJxi0Uu$Uy_WP$@!Gty_GKwLSVnf1qT2SIGX5n!u#05lSibC4@A1;IB5RBM25u)q{(pdm$&DMDkNr=7`uRo5Y3GPTw5$WVLa zT`M0iJ+yGU9VGsmaeZhqA3BHW$5vyVGvm)0YK`llVB1)_4?ZwG@_ktP_ppr*OaR~I zI3te2HqsSkHe!Pwx{!^ALN->1T3gR+R#u!mLgHsNjC0^p-uj?}3bm$uz=WUW;4+tHl)A88ky z+)*+DyRAVcNVz;Q2nnV^W=Oe{VkNF^%JJ1`{)O1_r<%#KM4PsLiib-khME&q@$2|a znx^s3eMj@8g!+H;?)vR_?~*b<#U9V~|Cd*@ZvUT-`}_Y{H$l1UW(C~L@2fGduigL&u(Z@M?PosbL<7QghdA0+Uf?u^1;PV^Vx z+57)w(&7JfG`j5he-1IEjjh4{KY$B^fe(YfPmK0*Itl!@&ETo%|0nq0z5h>UlS$A2 hbI39OZ5Z_Q@1>VsdigKN?*RY+|NrPaN*Dkj008|nJ|6%8 literal 0 HcmV?d00001 From cbaac7652d81917e99408fad1b7729d2a6a5e9f7 Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Thu, 10 Apr 2025 15:06:03 +0200 Subject: [PATCH 162/192] Call slog directly instead of using a wrapper Signed-off-by: Benoit Tigeot --- cmd/helm/helm.go | 4 +- internal/monocular/client.go | 5 --- pkg/action/action.go | 17 +++----- pkg/action/action_test.go | 4 +- pkg/action/history.go | 4 +- pkg/action/install.go | 22 +++++----- pkg/action/rollback.go | 25 +++++------ pkg/action/uninstall.go | 18 ++++---- pkg/action/upgrade.go | 30 ++++++------- pkg/cmd/flags.go | 5 ++- pkg/cmd/helpers_test.go | 2 - pkg/cmd/install.go | 9 ++-- pkg/cmd/list.go | 2 +- pkg/cmd/plugin.go | 3 +- pkg/cmd/plugin_install.go | 3 +- pkg/cmd/plugin_list.go | 3 +- pkg/cmd/plugin_uninstall.go | 3 +- pkg/cmd/plugin_update.go | 5 ++- pkg/cmd/pull.go | 3 +- pkg/cmd/registry_login.go | 3 +- pkg/cmd/root.go | 7 ++- pkg/cmd/search_hub.go | 2 +- pkg/cmd/search_repo.go | 8 ++-- pkg/cmd/show.go | 5 ++- pkg/cmd/upgrade.go | 5 ++- pkg/kube/client.go | 41 +++++++++--------- pkg/kube/client_test.go | 2 - pkg/kube/ready.go | 48 +++++++++------------ pkg/kube/ready_test.go | 39 ----------------- pkg/kube/statuswait.go | 17 ++++---- pkg/kube/statuswait_test.go | 7 --- pkg/kube/wait.go | 43 +++++++++---------- pkg/storage/driver/cfgmaps.go | 21 +++++---- pkg/storage/driver/mock_test.go | 3 -- pkg/storage/driver/secrets.go | 5 +-- pkg/storage/driver/sql.go | 75 ++++++++++++++++----------------- pkg/storage/storage.go | 28 ++++++------ pkg/storage/storage_test.go | 3 -- 38 files changed, 231 insertions(+), 298 deletions(-) diff --git a/cmd/helm/helm.go b/cmd/helm/helm.go index 11c5e8769..273ead226 100644 --- a/cmd/helm/helm.go +++ b/cmd/helm/helm.go @@ -41,12 +41,12 @@ func main() { cmd, err := helmcmd.NewRootCmd(os.Stdout, os.Args[1:]) if err != nil { - helmcmd.Logger.Warn("command failed", slog.Any("error", err)) + slog.Warn("command failed", slog.Any("error", err)) os.Exit(1) } if err := cmd.Execute(); err != nil { - helmcmd.Logger.Debug("error", slog.Any("error", err)) + slog.Debug("error", slog.Any("error", err)) switch e := err.(type) { case helmcmd.PluginError: os.Exit(e.Code) diff --git a/internal/monocular/client.go b/internal/monocular/client.go index 452bc36e4..f4ef5d647 100644 --- a/internal/monocular/client.go +++ b/internal/monocular/client.go @@ -18,8 +18,6 @@ package monocular import ( "errors" - "io" - "log/slog" "net/url" ) @@ -31,8 +29,6 @@ type Client struct { // The base URL for requests BaseURL string - - Log *slog.Logger } // New creates a new client @@ -45,7 +41,6 @@ func New(u string) (*Client, error) { return &Client{ BaseURL: u, - Log: slog.New(slog.NewTextHandler(io.Discard, nil)), }, nil } diff --git a/pkg/action/action.go b/pkg/action/action.go index d4f917b9f..09c1887bb 100644 --- a/pkg/action/action.go +++ b/pkg/action/action.go @@ -96,8 +96,6 @@ type Configuration struct { // Capabilities describes the capabilities of the Kubernetes cluster. Capabilities *chartutil.Capabilities - Log *slog.Logger - // HookOutputFunc called with container name and returns and expects writer that will receive the log output. HookOutputFunc func(namespace, pod, container string) io.Writer } @@ -267,8 +265,8 @@ func (cfg *Configuration) getCapabilities() (*chartutil.Capabilities, error) { apiVersions, err := GetVersionSet(dc) if err != nil { if discovery.IsGroupDiscoveryFailedError(err) { - cfg.Log.Warn("the kubernetes server has an orphaned API service", "errors", err) - cfg.Log.Warn("to fix this, kubectl delete apiservice ") + slog.Warn("the kubernetes server has an orphaned API service", "errors", err) + slog.Warn("to fix this, kubectl delete apiservice ") } else { return nil, errors.Wrap(err, "could not get apiVersions from Kubernetes") } @@ -367,29 +365,28 @@ func GetVersionSet(client discovery.ServerResourcesInterface) (chartutil.Version // recordRelease with an update operation in case reuse has been set. func (cfg *Configuration) recordRelease(r *release.Release) { if err := cfg.Releases.Update(r); err != nil { - cfg.Log.Warn("failed to update release", "name", r.Name, "revision", r.Version, slog.Any("error", err)) + slog.Warn("failed to update release", "name", r.Name, "revision", r.Version, slog.Any("error", err)) } } // Init initializes the action configuration -func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string, log *slog.Logger) error { +func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string) error { kc := kube.New(getter) - kc.Log = log lazyClient := &lazyClient{ namespace: namespace, clientFn: kc.Factory.KubernetesClientSet, } + // slog.SetDefault() + var store *storage.Storage switch helmDriver { case "secret", "secrets", "": d := driver.NewSecrets(newSecretClient(lazyClient)) - d.Log = log store = storage.Init(d) case "configmap", "configmaps": d := driver.NewConfigMaps(newConfigMapClient(lazyClient)) - d.Log = log store = storage.Init(d) case "memory": var d *driver.Memory @@ -409,7 +406,6 @@ func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namesp case "sql": d, err := driver.NewSQL( os.Getenv("HELM_DRIVER_SQL_CONNECTION_STRING"), - log, namespace, ) if err != nil { @@ -423,7 +419,6 @@ func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namesp cfg.RESTClientGetter = getter cfg.KubeClient = kc cfg.Releases = store - cfg.Log = log cfg.HookOutputFunc = func(_, _, _ string) io.Writer { return io.Discard } return nil diff --git a/pkg/action/action_test.go b/pkg/action/action_test.go index ee967714c..f544d3281 100644 --- a/pkg/action/action_test.go +++ b/pkg/action/action_test.go @@ -56,6 +56,7 @@ func actionConfigFixture(t *testing.T) *Configuration { }) logger = slog.New(handler) } + slog.SetDefault(logger) registryClient, err := registry.NewClient() if err != nil { @@ -67,7 +68,6 @@ func actionConfigFixture(t *testing.T) *Configuration { KubeClient: &kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}}, Capabilities: chartutil.DefaultCapabilities, RegistryClient: registryClient, - Log: logger, } } @@ -347,7 +347,7 @@ func TestConfiguration_Init(t *testing.T) { t.Run(tt.name, func(t *testing.T) { cfg := &Configuration{} - actualErr := cfg.Init(nil, "default", tt.helmDriver, nil) + actualErr := cfg.Init(nil, "default", tt.helmDriver) if tt.expectErr { assert.Error(t, actualErr) assert.Contains(t, actualErr.Error(), tt.errMsg) diff --git a/pkg/action/history.go b/pkg/action/history.go index 289118592..b8e472195 100644 --- a/pkg/action/history.go +++ b/pkg/action/history.go @@ -17,6 +17,8 @@ limitations under the License. package action import ( + "log/slog" + "github.com/pkg/errors" chartutil "helm.sh/helm/v4/pkg/chart/v2/util" @@ -53,6 +55,6 @@ func (h *History) Run(name string) ([]*release.Release, error) { return nil, errors.Errorf("release name is invalid: %s", name) } - h.cfg.Log.Debug("getting history for release", "release", name) + slog.Debug("getting history for release", "release", name) return h.cfg.Releases.History(name) } diff --git a/pkg/action/install.go b/pkg/action/install.go index 3f16969ae..25c48c762 100644 --- a/pkg/action/install.go +++ b/pkg/action/install.go @@ -173,7 +173,7 @@ func (i *Install) installCRDs(crds []chart.CRD) error { // If the error is CRD already exists, continue. if apierrors.IsAlreadyExists(err) { crdName := res[0].Name - i.cfg.Log.Debug("CRD is already present. Skipping", "crd", crdName) + slog.Debug("CRD is already present. Skipping", "crd", crdName) continue } return errors.Wrapf(err, "failed to install CRD %s", obj.Name) @@ -201,7 +201,7 @@ func (i *Install) installCRDs(crds []chart.CRD) error { return err } - i.cfg.Log.Debug("clearing discovery cache") + slog.Debug("clearing discovery cache") discoveryClient.Invalidate() _, _ = discoveryClient.ServerGroups() @@ -214,7 +214,7 @@ func (i *Install) installCRDs(crds []chart.CRD) error { return err } if resettable, ok := restMapper.(meta.ResettableRESTMapper); ok { - i.cfg.Log.Debug("clearing REST mapper cache") + slog.Debug("clearing REST mapper cache") resettable.Reset() } } @@ -238,24 +238,24 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma // Check reachability of cluster unless in client-only mode (e.g. `helm template` without `--validate`) if !i.ClientOnly { if err := i.cfg.KubeClient.IsReachable(); err != nil { - i.cfg.Log.Error(fmt.Sprintf("cluster reachability check failed: %v", err)) + slog.Error(fmt.Sprintf("cluster reachability check failed: %v", err)) return nil, errors.Wrap(err, "cluster reachability check failed") } } // HideSecret must be used with dry run. Otherwise, return an error. if !i.isDryRun() && i.HideSecret { - i.cfg.Log.Error("hiding Kubernetes secrets requires a dry-run mode") + slog.Error("hiding Kubernetes secrets requires a dry-run mode") return nil, errors.New("Hiding Kubernetes secrets requires a dry-run mode") } if err := i.availableName(); err != nil { - i.cfg.Log.Error("release name check failed", slog.Any("error", err)) + slog.Error("release name check failed", slog.Any("error", err)) return nil, errors.Wrap(err, "release name check failed") } if err := chartutil.ProcessDependencies(chrt, vals); err != nil { - i.cfg.Log.Error("chart dependencies processing failed", slog.Any("error", err)) + slog.Error("chart dependencies processing failed", slog.Any("error", err)) return nil, errors.Wrap(err, "chart dependencies processing failed") } @@ -269,7 +269,7 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma if crds := chrt.CRDObjects(); !i.ClientOnly && !i.SkipCRDs && len(crds) > 0 { // On dry run, bail here if i.isDryRun() { - i.cfg.Log.Warn("This chart or one of its subcharts contains CRDs. Rendering may fail or contain inaccuracies.") + slog.Warn("This chart or one of its subcharts contains CRDs. Rendering may fail or contain inaccuracies.") } else if err := i.installCRDs(crds); err != nil { return nil, err } @@ -289,7 +289,7 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma mem.SetNamespace(i.Namespace) i.cfg.Releases = storage.Init(mem) } else if !i.ClientOnly && len(i.APIVersions) > 0 { - i.cfg.Log.Debug("API Version list given outside of client only mode, this list will be ignored") + slog.Debug("API Version list given outside of client only mode, this list will be ignored") } // Make sure if Atomic is set, that wait is set as well. This makes it so @@ -506,7 +506,7 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource // One possible strategy would be to do a timed retry to see if we can get // this stored in the future. if err := i.recordRelease(rel); err != nil { - i.cfg.Log.Error("failed to record the release", slog.Any("error", err)) + slog.Error("failed to record the release", slog.Any("error", err)) } return rel, nil @@ -515,7 +515,7 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource func (i *Install) failRelease(rel *release.Release, err error) (*release.Release, error) { rel.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", i.ReleaseName, err.Error())) if i.Atomic { - i.cfg.Log.Debug("install failed, uninstalling release", "release", i.ReleaseName) + slog.Debug("install failed, uninstalling release", "release", i.ReleaseName) uninstall := NewUninstall(i.cfg) uninstall.DisableHooks = i.DisableHooks uninstall.KeepHistory = false diff --git a/pkg/action/rollback.go b/pkg/action/rollback.go index 4e61fe872..34bd0ac52 100644 --- a/pkg/action/rollback.go +++ b/pkg/action/rollback.go @@ -19,6 +19,7 @@ package action import ( "bytes" "fmt" + "log/slog" "strings" "time" @@ -63,26 +64,26 @@ func (r *Rollback) Run(name string) error { r.cfg.Releases.MaxHistory = r.MaxHistory - r.cfg.Log.Debug("preparing rollback", "name", name) + slog.Debug("preparing rollback", "name", name) currentRelease, targetRelease, err := r.prepareRollback(name) if err != nil { return err } if !r.DryRun { - r.cfg.Log.Debug("creating rolled back release", "name", name) + slog.Debug("creating rolled back release", "name", name) if err := r.cfg.Releases.Create(targetRelease); err != nil { return err } } - r.cfg.Log.Debug("performing rollback", "name", name) + slog.Debug("performing rollback", "name", name) if _, err := r.performRollback(currentRelease, targetRelease); err != nil { return err } if !r.DryRun { - r.cfg.Log.Debug("updating status for rolled back release", "name", name) + slog.Debug("updating status for rolled back release", "name", name) if err := r.cfg.Releases.Update(targetRelease); err != nil { return err } @@ -129,7 +130,7 @@ func (r *Rollback) prepareRollback(name string) (*release.Release, *release.Rele return nil, nil, errors.Errorf("release has no %d version", previousVersion) } - r.cfg.Log.Debug("rolling back", "name", name, "currentVersion", currentRelease.Version, "targetVersion", previousVersion) + slog.Debug("rolling back", "name", name, "currentVersion", currentRelease.Version, "targetVersion", previousVersion) previousRelease, err := r.cfg.Releases.Get(name, previousVersion) if err != nil { @@ -162,7 +163,7 @@ func (r *Rollback) prepareRollback(name string) (*release.Release, *release.Rele func (r *Rollback) performRollback(currentRelease, targetRelease *release.Release) (*release.Release, error) { if r.DryRun { - r.cfg.Log.Debug("dry run", "name", targetRelease.Name) + slog.Debug("dry run", "name", targetRelease.Name) return targetRelease, nil } @@ -181,7 +182,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas return targetRelease, err } } else { - r.cfg.Log.Debug("rollback hooks disabled", "name", targetRelease.Name) + slog.Debug("rollback hooks disabled", "name", targetRelease.Name) } // It is safe to use "force" here because these are resources currently rendered by the chart. @@ -193,14 +194,14 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas if err != nil { msg := fmt.Sprintf("Rollback %q failed: %s", targetRelease.Name, err) - r.cfg.Log.Warn(msg) + slog.Warn(msg) currentRelease.Info.Status = release.StatusSuperseded targetRelease.Info.Status = release.StatusFailed targetRelease.Info.Description = msg r.cfg.recordRelease(currentRelease) r.cfg.recordRelease(targetRelease) if r.CleanupOnFail { - r.cfg.Log.Debug("cleanup on fail set, cleaning up resources", "count", len(results.Created)) + slog.Debug("cleanup on fail set, cleaning up resources", "count", len(results.Created)) _, errs := r.cfg.KubeClient.Delete(results.Created) if errs != nil { var errorList []string @@ -209,7 +210,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas } return targetRelease, errors.Wrapf(fmt.Errorf("unable to cleanup resources: %s", strings.Join(errorList, ", ")), "an error occurred while cleaning up resources. original rollback error: %s", err) } - r.cfg.Log.Debug("resource cleanup complete") + slog.Debug("resource cleanup complete") } return targetRelease, err } @@ -220,7 +221,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas // levels, we should make these error level logs so users are notified // that they'll need to go do the cleanup on their own if err := recreate(r.cfg, results.Updated); err != nil { - r.cfg.Log.Error(err.Error()) + slog.Error(err.Error()) } } waiter, err := r.cfg.KubeClient.GetWaiter(r.WaitStrategy) @@ -256,7 +257,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas } // Supersede all previous deployments, see issue #2941. for _, rel := range deployed { - r.cfg.Log.Debug("superseding previous deployment", "version", rel.Version) + slog.Debug("superseding previous deployment", "version", rel.Version) rel.Info.Status = release.StatusSuperseded r.cfg.recordRelease(rel) } diff --git a/pkg/action/uninstall.go b/pkg/action/uninstall.go index c3835042f..b842d9933 100644 --- a/pkg/action/uninstall.go +++ b/pkg/action/uninstall.go @@ -105,7 +105,7 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error) return nil, errors.Errorf("the release named %q is already deleted", name) } - u.cfg.Log.Debug("uninstall: deleting release", "name", name) + slog.Debug("uninstall: deleting release", "name", name) rel.Info.Status = release.StatusUninstalling rel.Info.Deleted = helmtime.Now() rel.Info.Description = "Deletion in progress (or silently failed)" @@ -116,18 +116,18 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error) return res, err } } else { - u.cfg.Log.Debug("delete hooks disabled", "release", name) + slog.Debug("delete hooks disabled", "release", name) } // From here on out, the release is currently considered to be in StatusUninstalling // state. if err := u.cfg.Releases.Update(rel); err != nil { - u.cfg.Log.Debug("uninstall: Failed to store updated release", slog.Any("error", err)) + slog.Debug("uninstall: Failed to store updated release", slog.Any("error", err)) } deletedResources, kept, errs := u.deleteRelease(rel) if errs != nil { - u.cfg.Log.Debug("uninstall: Failed to delete release", "errors", errs) + slog.Debug("uninstall: Failed to delete release", "errors", errs) return nil, errors.Errorf("failed to delete release: %s", name) } @@ -154,7 +154,7 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error) } if !u.KeepHistory { - u.cfg.Log.Debug("purge requested", "release", name) + slog.Debug("purge requested", "release", name) err := u.purgeReleases(rels...) if err != nil { errs = append(errs, errors.Wrap(err, "uninstall: Failed to purge the release")) @@ -169,7 +169,7 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error) } if err := u.cfg.Releases.Update(rel); err != nil { - u.cfg.Log.Debug("uninstall: Failed to store updated release", slog.Any("error", err)) + slog.Debug("uninstall: Failed to store updated release", slog.Any("error", err)) } if len(errs) > 0 { @@ -226,7 +226,7 @@ func (u *Uninstall) deleteRelease(rel *release.Release) (kube.ResourceList, stri } if len(resources) > 0 { if kubeClient, ok := u.cfg.KubeClient.(kube.InterfaceDeletionPropagation); ok { - _, errs = kubeClient.DeleteWithPropagationPolicy(resources, parseCascadingFlag(u.cfg, u.DeletionPropagation)) + _, errs = kubeClient.DeleteWithPropagationPolicy(resources, parseCascadingFlag(u.DeletionPropagation)) return resources, kept, errs } _, errs = u.cfg.KubeClient.Delete(resources) @@ -234,7 +234,7 @@ func (u *Uninstall) deleteRelease(rel *release.Release) (kube.ResourceList, stri return resources, kept, errs } -func parseCascadingFlag(cfg *Configuration, cascadingFlag string) v1.DeletionPropagation { +func parseCascadingFlag(cascadingFlag string) v1.DeletionPropagation { switch cascadingFlag { case "orphan": return v1.DeletePropagationOrphan @@ -243,7 +243,7 @@ func parseCascadingFlag(cfg *Configuration, cascadingFlag string) v1.DeletionPro case "background": return v1.DeletePropagationBackground default: - cfg.Log.Debug("uninstall: given cascade value, defaulting to delete propagation background", "value", cascadingFlag) + slog.Debug("uninstall: given cascade value, defaulting to delete propagation background", "value", cascadingFlag) return v1.DeletePropagationBackground } } diff --git a/pkg/action/upgrade.go b/pkg/action/upgrade.go index 429bac9d7..ea09c8ed0 100644 --- a/pkg/action/upgrade.go +++ b/pkg/action/upgrade.go @@ -164,7 +164,7 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart. return nil, errors.Errorf("release name is invalid: %s", name) } - u.cfg.Log.Debug("preparing upgrade", "name", name) + slog.Debug("preparing upgrade", "name", name) currentRelease, upgradedRelease, err := u.prepareUpgrade(name, chart, vals) if err != nil { return nil, err @@ -172,7 +172,7 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart. u.cfg.Releases.MaxHistory = u.MaxHistory - u.cfg.Log.Debug("performing update", "name", name) + slog.Debug("performing update", "name", name) res, err := u.performUpgrade(ctx, currentRelease, upgradedRelease) if err != nil { return res, err @@ -180,7 +180,7 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart. // Do not update for dry runs if !u.isDryRun() { - u.cfg.Log.Debug("updating status for upgraded release", "name", name) + slog.Debug("updating status for upgraded release", "name", name) if err := u.cfg.Releases.Update(upgradedRelease); err != nil { return res, err } @@ -366,7 +366,7 @@ func (u *Upgrade) performUpgrade(ctx context.Context, originalRelease, upgradedR // Run if it is a dry run if u.isDryRun() { - u.cfg.Log.Debug("dry run for release", "name", upgradedRelease.Name) + slog.Debug("dry run for release", "name", upgradedRelease.Name) if len(u.Description) > 0 { upgradedRelease.Info.Description = u.Description } else { @@ -375,7 +375,7 @@ func (u *Upgrade) performUpgrade(ctx context.Context, originalRelease, upgradedR return upgradedRelease, nil } - u.cfg.Log.Debug("creating upgraded release", "name", upgradedRelease.Name) + slog.Debug("creating upgraded release", "name", upgradedRelease.Name) if err := u.cfg.Releases.Create(upgradedRelease); err != nil { return nil, err } @@ -426,7 +426,7 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele return } } else { - u.cfg.Log.Debug("upgrade hooks disabled", "name", upgradedRelease.Name) + slog.Debug("upgrade hooks disabled", "name", upgradedRelease.Name) } results, err := u.cfg.KubeClient.Update(current, target, u.Force) @@ -442,7 +442,7 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele // levels, we should make these error level logs so users are notified // that they'll need to go do the cleanup on their own if err := recreate(u.cfg, results.Updated); err != nil { - u.cfg.Log.Error(err.Error()) + slog.Error(err.Error()) } } waiter, err := u.cfg.KubeClient.GetWaiter(u.WaitStrategy) @@ -487,13 +487,13 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, err error) (*release.Release, error) { msg := fmt.Sprintf("Upgrade %q failed: %s", rel.Name, err) - u.cfg.Log.Warn("upgrade failed", "name", rel.Name, slog.Any("error", err)) + slog.Warn("upgrade failed", "name", rel.Name, slog.Any("error", err)) rel.Info.Status = release.StatusFailed rel.Info.Description = msg u.cfg.recordRelease(rel) if u.CleanupOnFail && len(created) > 0 { - u.cfg.Log.Debug("cleanup on fail set", "cleaning_resources", len(created)) + slog.Debug("cleanup on fail set", "cleaning_resources", len(created)) _, errs := u.cfg.KubeClient.Delete(created) if errs != nil { var errorList []string @@ -502,10 +502,10 @@ func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, e } return rel, errors.Wrapf(fmt.Errorf("unable to cleanup resources: %s", strings.Join(errorList, ", ")), "an error occurred while cleaning up resources. original upgrade error: %s", err) } - u.cfg.Log.Debug("resource cleanup complete") + slog.Debug("resource cleanup complete") } if u.Atomic { - u.cfg.Log.Debug("upgrade failed and atomic is set, rolling back to last successful release") + slog.Debug("upgrade failed and atomic is set, rolling back to last successful release") // As a protection, get the last successful release before rollback. // If there are no successful releases, bail out @@ -557,13 +557,13 @@ func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, e func (u *Upgrade) reuseValues(chart *chart.Chart, current *release.Release, newVals map[string]interface{}) (map[string]interface{}, error) { if u.ResetValues { // If ResetValues is set, we completely ignore current.Config. - u.cfg.Log.Debug("resetting values to the chart's original version") + slog.Debug("resetting values to the chart's original version") return newVals, nil } // If the ReuseValues flag is set, we always copy the old values over the new config's values. if u.ReuseValues { - u.cfg.Log.Debug("reusing the old release's values") + slog.Debug("reusing the old release's values") // We have to regenerate the old coalesced values: oldVals, err := chartutil.CoalesceValues(current.Chart, current.Config) @@ -580,7 +580,7 @@ func (u *Upgrade) reuseValues(chart *chart.Chart, current *release.Release, newV // If the ResetThenReuseValues flag is set, we use the new chart's values, but we copy the old config's values over the new config's values. if u.ResetThenReuseValues { - u.cfg.Log.Debug("merging values from old release to new values") + slog.Debug("merging values from old release to new values") newVals = chartutil.CoalesceTables(newVals, current.Config) @@ -588,7 +588,7 @@ func (u *Upgrade) reuseValues(chart *chart.Chart, current *release.Release, newV } if len(newVals) == 0 && len(current.Config) > 0 { - u.cfg.Log.Debug("copying values from old release", "name", current.Name, "version", current.Version) + slog.Debug("copying values from old release", "name", current.Name, "version", current.Version) newVals = current.Config } return newVals, nil diff --git a/pkg/cmd/flags.go b/pkg/cmd/flags.go index 454bb13de..eb829c21e 100644 --- a/pkg/cmd/flags.go +++ b/pkg/cmd/flags.go @@ -20,6 +20,7 @@ import ( "flag" "fmt" "log" + "log/slog" "path/filepath" "sort" "strings" @@ -82,11 +83,11 @@ func (ws *waitValue) Set(s string) error { *ws = waitValue(s) return nil case "true": - Logger.Warn("--wait=true is deprecated (boolean value) and can be replaced with --wait=watcher") + slog.Warn("--wait=true is deprecated (boolean value) and can be replaced with --wait=watcher") *ws = waitValue(kube.StatusWatcherStrategy) return nil case "false": - Logger.Warn("--wait=false is deprecated (boolean value) and can be replaced by omitting the --wait flag") + slog.Warn("--wait=false is deprecated (boolean value) and can be replaced by omitting the --wait flag") *ws = waitValue(kube.HookOnlyStrategy) return nil default: diff --git a/pkg/cmd/helpers_test.go b/pkg/cmd/helpers_test.go index 38e0a5b3e..b48f802b5 100644 --- a/pkg/cmd/helpers_test.go +++ b/pkg/cmd/helpers_test.go @@ -19,7 +19,6 @@ package cmd import ( "bytes" "io" - "log/slog" "os" "strings" "testing" @@ -93,7 +92,6 @@ func executeActionCommandStdinC(store *storage.Storage, in *os.File, cmd string) Releases: store, KubeClient: &kubefake.PrintingKubeClient{Out: io.Discard}, Capabilities: chartutil.DefaultCapabilities, - Log: slog.New(slog.NewTextHandler(io.Discard, nil)), } root, err := newRootCmdWithConfig(actionConfig, buf, args) diff --git a/pkg/cmd/install.go b/pkg/cmd/install.go index 14746f8c3..ee018c88a 100644 --- a/pkg/cmd/install.go +++ b/pkg/cmd/install.go @@ -21,6 +21,7 @@ import ( "fmt" "io" "log" + "log/slog" "os" "os/signal" "syscall" @@ -229,9 +230,9 @@ func addInstallFlags(cmd *cobra.Command, f *pflag.FlagSet, client *action.Instal } func runInstall(args []string, client *action.Install, valueOpts *values.Options, out io.Writer) (*release.Release, error) { - Logger.Debug("Original chart version", "version", client.Version) + slog.Debug("Original chart version", "version", client.Version) if client.Version == "" && client.Devel { - Logger.Debug("setting version to >0.0.0-0") + slog.Debug("setting version to >0.0.0-0") client.Version = ">0.0.0-0" } @@ -246,7 +247,7 @@ func runInstall(args []string, client *action.Install, valueOpts *values.Options return nil, err } - Logger.Debug("Chart path", "path", cp) + slog.Debug("Chart path", "path", cp) p := getter.All(settings) vals, err := valueOpts.MergeValues(p) @@ -265,7 +266,7 @@ func runInstall(args []string, client *action.Install, valueOpts *values.Options } if chartRequested.Metadata.Deprecated { - Logger.Warn("this chart is deprecated") + slog.Warn("this chart is deprecated") } if req := chartRequested.Metadata.Dependencies; req != nil { diff --git a/pkg/cmd/list.go b/pkg/cmd/list.go index a4eb91aad..69a4ff36d 100644 --- a/pkg/cmd/list.go +++ b/pkg/cmd/list.go @@ -71,7 +71,7 @@ func newListCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { ValidArgsFunction: noMoreArgsCompFunc, RunE: func(cmd *cobra.Command, _ []string) error { if client.AllNamespaces { - if err := cfg.Init(settings.RESTClientGetter(), "", os.Getenv("HELM_DRIVER"), Logger); err != nil { + if err := cfg.Init(settings.RESTClientGetter(), "", os.Getenv("HELM_DRIVER")); err != nil { return err } } diff --git a/pkg/cmd/plugin.go b/pkg/cmd/plugin.go index 05d7135dd..355ed4349 100644 --- a/pkg/cmd/plugin.go +++ b/pkg/cmd/plugin.go @@ -17,6 +17,7 @@ package cmd import ( "io" + "log/slog" "os" "os/exec" @@ -66,7 +67,7 @@ func runHook(p *plugin.Plugin, event string) error { prog := exec.Command(main, argv...) - Logger.Debug("running hook", "event", event, "program", prog) + slog.Debug("running hook", "event", event, "program", prog) prog.Stdout, prog.Stderr = os.Stdout, os.Stderr if err := prog.Run(); err != nil { diff --git a/pkg/cmd/plugin_install.go b/pkg/cmd/plugin_install.go index 2e8fd4d6a..14469f5b4 100644 --- a/pkg/cmd/plugin_install.go +++ b/pkg/cmd/plugin_install.go @@ -18,6 +18,7 @@ package cmd import ( "fmt" "io" + "log/slog" "github.com/pkg/errors" "github.com/spf13/cobra" @@ -79,7 +80,7 @@ func (o *pluginInstallOptions) run(out io.Writer) error { return err } - Logger.Debug("loading plugin", "path", i.Path()) + slog.Debug("loading plugin", "path", i.Path()) p, err := plugin.LoadDir(i.Path()) if err != nil { return errors.Wrap(err, "plugin is installed but unusable") diff --git a/pkg/cmd/plugin_list.go b/pkg/cmd/plugin_list.go index 52aefe8ef..fdd66ec0a 100644 --- a/pkg/cmd/plugin_list.go +++ b/pkg/cmd/plugin_list.go @@ -18,6 +18,7 @@ package cmd import ( "fmt" "io" + "log/slog" "github.com/gosuri/uitable" "github.com/spf13/cobra" @@ -32,7 +33,7 @@ func newPluginListCmd(out io.Writer) *cobra.Command { Short: "list installed Helm plugins", ValidArgsFunction: noMoreArgsCompFunc, RunE: func(_ *cobra.Command, _ []string) error { - Logger.Debug("pluginDirs", "directory", settings.PluginsDirectory) + slog.Debug("pluginDirs", "directory", settings.PluginsDirectory) plugins, err := plugin.FindPlugins(settings.PluginsDirectory) if err != nil { return err diff --git a/pkg/cmd/plugin_uninstall.go b/pkg/cmd/plugin_uninstall.go index 18815b139..61bc3d724 100644 --- a/pkg/cmd/plugin_uninstall.go +++ b/pkg/cmd/plugin_uninstall.go @@ -18,6 +18,7 @@ package cmd import ( "fmt" "io" + "log/slog" "os" "strings" @@ -60,7 +61,7 @@ func (o *pluginUninstallOptions) complete(args []string) error { } func (o *pluginUninstallOptions) run(out io.Writer) error { - Logger.Debug("loading installer plugins", "dir", settings.PluginsDirectory) + slog.Debug("loading installer plugins", "dir", settings.PluginsDirectory) plugins, err := plugin.FindPlugins(settings.PluginsDirectory) if err != nil { return err diff --git a/pkg/cmd/plugin_update.go b/pkg/cmd/plugin_update.go index 16ac84066..c9a8ca238 100644 --- a/pkg/cmd/plugin_update.go +++ b/pkg/cmd/plugin_update.go @@ -18,6 +18,7 @@ package cmd import ( "fmt" "io" + "log/slog" "path/filepath" "strings" @@ -62,7 +63,7 @@ func (o *pluginUpdateOptions) complete(args []string) error { func (o *pluginUpdateOptions) run(out io.Writer) error { installer.Debug = settings.Debug - Logger.Debug("loading installed plugins", "path", settings.PluginsDirectory) + slog.Debug("loading installed plugins", "path", settings.PluginsDirectory) plugins, err := plugin.FindPlugins(settings.PluginsDirectory) if err != nil { return err @@ -104,7 +105,7 @@ func updatePlugin(p *plugin.Plugin) error { return err } - Logger.Debug("loading plugin", "path", i.Path()) + slog.Debug("loading plugin", "path", i.Path()) updatedPlugin, err := plugin.LoadDir(i.Path()) if err != nil { return err diff --git a/pkg/cmd/pull.go b/pkg/cmd/pull.go index fca1c8b9b..e3d93c049 100644 --- a/pkg/cmd/pull.go +++ b/pkg/cmd/pull.go @@ -20,6 +20,7 @@ import ( "fmt" "io" "log" + "log/slog" "github.com/spf13/cobra" @@ -60,7 +61,7 @@ func newPullCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { RunE: func(_ *cobra.Command, args []string) error { client.Settings = settings if client.Version == "" && client.Devel { - Logger.Debug("setting version to >0.0.0-0") + slog.Debug("setting version to >0.0.0-0") client.Version = ">0.0.0-0" } diff --git a/pkg/cmd/registry_login.go b/pkg/cmd/registry_login.go index 7c853d786..3719c1c17 100644 --- a/pkg/cmd/registry_login.go +++ b/pkg/cmd/registry_login.go @@ -21,6 +21,7 @@ import ( "errors" "fmt" "io" + "log/slog" "os" "strings" @@ -122,7 +123,7 @@ func getUsernamePassword(usernameOpt string, passwordOpt string, passwordFromStd } } } else { - Logger.Warn("using --password via the CLI is insecure. Use --password-stdin") + slog.Warn("using --password via the CLI is insecure. Use --password-stdin") } return username, password, nil diff --git a/pkg/cmd/root.go b/pkg/cmd/root.go index cbef840b3..e9305206a 100644 --- a/pkg/cmd/root.go +++ b/pkg/cmd/root.go @@ -21,6 +21,7 @@ import ( "fmt" "io" "log" + "log/slog" "net/http" "os" "strings" @@ -95,7 +96,6 @@ By default, the default directories depend on the Operating System. The defaults ` var settings = cli.New() -var Logger = cli.NewLogger(settings) func NewRootCmd(out io.Writer, args []string) (*cobra.Command, error) { actionConfig := new(action.Configuration) @@ -105,7 +105,7 @@ func NewRootCmd(out io.Writer, args []string) (*cobra.Command, error) { } cobra.OnInitialize(func() { helmDriver := os.Getenv("HELM_DRIVER") - if err := actionConfig.Init(settings.RESTClientGetter(), settings.Namespace(), helmDriver, Logger); err != nil { + if err := actionConfig.Init(settings.RESTClientGetter(), settings.Namespace(), helmDriver); err != nil { log.Fatal(err) } if helmDriver == "memory" { @@ -139,6 +139,9 @@ func newRootCmdWithConfig(actionConfig *action.Configuration, out io.Writer, arg settings.AddFlags(flags) addKlogFlags(flags) + logger := cli.NewLogger(settings) + slog.SetDefault(logger) + // Setup shell completion for the namespace flag err := cmd.RegisterFlagCompletionFunc("namespace", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { if client, err := actionConfig.KubernetesClientSet(); err == nil { diff --git a/pkg/cmd/search_hub.go b/pkg/cmd/search_hub.go index 6aa5c10bd..380d1e394 100644 --- a/pkg/cmd/search_hub.go +++ b/pkg/cmd/search_hub.go @@ -90,7 +90,7 @@ func (o *searchHubOptions) run(out io.Writer, args []string) error { q := strings.Join(args, " ") results, err := c.Search(q) if err != nil { - Logger.Debug("search failed", slog.Any("error", err)) + slog.Debug("search failed", slog.Any("error", err)) return fmt.Errorf("unable to perform search against %q", o.searchEndpoint) } diff --git a/pkg/cmd/search_repo.go b/pkg/cmd/search_repo.go index 850bcbe16..b93b871b1 100644 --- a/pkg/cmd/search_repo.go +++ b/pkg/cmd/search_repo.go @@ -131,17 +131,17 @@ func (o *searchRepoOptions) run(out io.Writer, args []string) error { } func (o *searchRepoOptions) setupSearchedVersion() { - Logger.Debug("original chart version", "version", o.version) + slog.Debug("original chart version", "version", o.version) if o.version != "" { return } if o.devel { // search for releases and prereleases (alpha, beta, and release candidate releases). - Logger.Debug("setting version to >0.0.0-0") + slog.Debug("setting version to >0.0.0-0") o.version = ">0.0.0-0" } else { // search only for stable releases, prerelease versions will be skipped - Logger.Debug("setting version to >0.0.0") + slog.Debug("setting version to >0.0.0") o.version = ">0.0.0" } } @@ -190,7 +190,7 @@ func (o *searchRepoOptions) buildIndex() (*search.Index, error) { f := filepath.Join(o.repoCacheDir, helmpath.CacheIndexFile(n)) ind, err := repo.LoadIndexFile(f) if err != nil { - Logger.Warn("repo is corrupt or missing", "repo", n, slog.Any("error", err)) + slog.Warn("repo is corrupt or missing", "repo", n, slog.Any("error", err)) continue } diff --git a/pkg/cmd/show.go b/pkg/cmd/show.go index c70ffa256..22d8bee49 100644 --- a/pkg/cmd/show.go +++ b/pkg/cmd/show.go @@ -20,6 +20,7 @@ import ( "fmt" "io" "log" + "log/slog" "github.com/spf13/cobra" @@ -211,9 +212,9 @@ func addShowFlags(subCmd *cobra.Command, client *action.Show) { } func runShow(args []string, client *action.Show) (string, error) { - Logger.Debug("original chart version", "version", client.Version) + slog.Debug("original chart version", "version", client.Version) if client.Version == "" && client.Devel { - Logger.Debug("setting version to >0.0.0-0") + slog.Debug("setting version to >0.0.0-0") client.Version = ">0.0.0-0" } diff --git a/pkg/cmd/upgrade.go b/pkg/cmd/upgrade.go index e6b5c0409..2e0f16212 100644 --- a/pkg/cmd/upgrade.go +++ b/pkg/cmd/upgrade.go @@ -21,6 +21,7 @@ import ( "fmt" "io" "log" + "log/slog" "os" "os/signal" "syscall" @@ -173,7 +174,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { } if client.Version == "" && client.Devel { - Logger.Debug("setting version to >0.0.0-0") + slog.Debug("setting version to >0.0.0-0") client.Version = ">0.0.0-0" } @@ -225,7 +226,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { } if ch.Metadata.Deprecated { - Logger.Warn("this chart is deprecated") + slog.Warn("this chart is deprecated") } // Create context and prepare the handle of SIGTERM diff --git a/pkg/kube/client.go b/pkg/kube/client.go index bd4dbea91..beac9ac7a 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -74,7 +74,6 @@ type Client struct { // needs. The smaller surface area of the interface means there is a lower // chance of it changing. Factory Factory - Log *slog.Logger // Namespace allows to bypass the kubeconfig file for the choice of the namespace Namespace string @@ -121,7 +120,6 @@ func (c *Client) newStatusWatcher() (*statusWaiter, error) { return &statusWaiter{ restMapper: restMapper, client: dynamicClient, - log: c.Log, }, nil } @@ -132,7 +130,7 @@ func (c *Client) GetWaiter(strategy WaitStrategy) (Waiter, error) { if err != nil { return nil, err } - return &legacyWaiter{kubeClient: kc, log: c.Log}, nil + return &legacyWaiter{kubeClient: kc}, nil case StatusWatcherStrategy: return c.newStatusWatcher() case HookOnlyStrategy: @@ -163,7 +161,6 @@ func New(getter genericclioptions.RESTClientGetter) *Client { factory := cmdutil.NewFactory(getter) c := &Client{ Factory: factory, - Log: slog.New(slog.NewTextHandler(io.Discard, nil)), } return c } @@ -197,7 +194,7 @@ func (c *Client) IsReachable() error { // Create creates Kubernetes resources specified in the resource list. func (c *Client) Create(resources ResourceList) (*Result, error) { - c.Log.Debug("creating resource(s)", "resources", len(resources)) + slog.Debug("creating resource(s)", "resources", len(resources)) if err := perform(resources, createResource); err != nil { return nil, err } @@ -249,7 +246,7 @@ func (c *Client) Get(resources ResourceList, related bool) (map[string][]runtime objs, err = c.getSelectRelationPod(info, objs, isTable, &podSelectors) if err != nil { - c.Log.Warn("get the relation pod is failed", slog.Any("error", err)) + slog.Warn("get the relation pod is failed", slog.Any("error", err)) } } } @@ -267,7 +264,7 @@ func (c *Client) getSelectRelationPod(info *resource.Info, objs map[string][]run if info == nil { return objs, nil } - c.Log.Debug("get relation pod of object", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind) + slog.Debug("get relation pod of object", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind) selector, ok, _ := getSelectorFromObject(info.Object) if !ok { return objs, nil @@ -409,7 +406,7 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err updateErrors := []string{} res := &Result{} - c.Log.Debug("checking resources for changes", "resources", len(target)) + slog.Debug("checking resources for changes", "resources", len(target)) err := target.Visit(func(info *resource.Info, err error) error { if err != nil { return err @@ -430,7 +427,7 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err } kind := info.Mapping.GroupVersionKind.Kind - c.Log.Debug("created a new resource", "namespace", info.Namespace, "name", info.Name, "kind", kind) + slog.Debug("created a new resource", "namespace", info.Namespace, "name", info.Name, "kind", kind) return nil } @@ -441,7 +438,7 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err } if err := updateResource(c, info, originalInfo.Object, force); err != nil { - c.Log.Debug("error updating the resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err)) + slog.Debug("error updating the resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err)) updateErrors = append(updateErrors, err.Error()) } // Because we check for errors later, append the info regardless @@ -458,22 +455,22 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err } for _, info := range original.Difference(target) { - c.Log.Debug("deleting resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind) + slog.Debug("deleting resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind) if err := info.Get(); err != nil { - c.Log.Debug("unable to get object", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err)) + slog.Debug("unable to get object", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err)) continue } annotations, err := metadataAccessor.Annotations(info.Object) if err != nil { - c.Log.Debug("unable to get annotations", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err)) + slog.Debug("unable to get annotations", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err)) } if annotations != nil && annotations[ResourcePolicyAnno] == KeepPolicy { - c.Log.Debug("skipping delete due to annotation", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, "annotation", ResourcePolicyAnno, "value", KeepPolicy) + slog.Debug("skipping delete due to annotation", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, "annotation", ResourcePolicyAnno, "value", KeepPolicy) continue } if err := deleteResource(info, metav1.DeletePropagationBackground); err != nil { - c.Log.Debug("failed to delete resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err)) + slog.Debug("failed to delete resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err)) continue } res.Deleted = append(res.Deleted, info) @@ -497,16 +494,16 @@ func (c *Client) DeleteWithPropagationPolicy(resources ResourceList, policy meta return rdelete(c, resources, policy) } -func rdelete(c *Client, resources ResourceList, propagation metav1.DeletionPropagation) (*Result, []error) { +func rdelete(_ *Client, resources ResourceList, propagation metav1.DeletionPropagation) (*Result, []error) { var errs []error res := &Result{} mtx := sync.Mutex{} err := perform(resources, func(info *resource.Info) error { - c.Log.Debug("starting delete resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind) + slog.Debug("starting delete resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind) err := deleteResource(info, propagation) if err == nil || apierrors.IsNotFound(err) { if err != nil { - c.Log.Debug("ignoring delete failure", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err)) + slog.Debug("ignoring delete failure", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err)) } mtx.Lock() defer mtx.Unlock() @@ -640,7 +637,7 @@ func createPatch(target *resource.Info, current runtime.Object) ([]byte, types.P return patch, types.StrategicMergePatchType, err } -func updateResource(c *Client, target *resource.Info, currentObj runtime.Object, force bool) error { +func updateResource(_ *Client, target *resource.Info, currentObj runtime.Object, force bool) error { var ( obj runtime.Object helper = resource.NewHelper(target.Client, target.Mapping).WithFieldManager(getManagedFieldsManager()) @@ -654,7 +651,7 @@ func updateResource(c *Client, target *resource.Info, currentObj runtime.Object, if err != nil { return errors.Wrap(err, "failed to replace object") } - c.Log.Debug("replace succeeded", "name", target.Name, "initialKind", currentObj.GetObjectKind().GroupVersionKind().Kind, "kind", kind) + slog.Debug("replace succeeded", "name", target.Name, "initialKind", currentObj.GetObjectKind().GroupVersionKind().Kind, "kind", kind) } else { patch, patchType, err := createPatch(target, currentObj) if err != nil { @@ -662,7 +659,7 @@ func updateResource(c *Client, target *resource.Info, currentObj runtime.Object, } if patch == nil || string(patch) == "{}" { - c.Log.Debug("no changes detected", "kind", kind, "name", target.Name) + slog.Debug("no changes detected", "kind", kind, "name", target.Name) // This needs to happen to make sure that Helm has the latest info from the API // Otherwise there will be no labels and other functions that use labels will panic if err := target.Get(); err != nil { @@ -671,7 +668,7 @@ func updateResource(c *Client, target *resource.Info, currentObj runtime.Object, return nil } // send patch to server - c.Log.Debug("patching resource", "kind", kind, "name", target.Name, "namespace", target.Namespace) + slog.Debug("patching resource", "kind", kind, "name", target.Name, "namespace", target.Namespace) obj, err = helper.Patch(target.Namespace, target.Name, patchType, patch, nil) if err != nil { return errors.Wrapf(err, "cannot patch %q with kind %s", target.Name, kind) diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index 6244e3ee5..c755b490c 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -19,7 +19,6 @@ package kube import ( "bytes" "io" - "log/slog" "net/http" "strings" "testing" @@ -108,7 +107,6 @@ func newTestClient(t *testing.T) *Client { return &Client{ Factory: testFactory.WithNamespace("default"), - Log: slog.New(slog.NewTextHandler(io.Discard, nil)), } } diff --git a/pkg/kube/ready.go b/pkg/kube/ready.go index 745dd265e..9cbd89913 100644 --- a/pkg/kube/ready.go +++ b/pkg/kube/ready.go @@ -19,7 +19,6 @@ package kube // import "helm.sh/helm/v4/pkg/kube" import ( "context" "fmt" - "io" "log/slog" appsv1 "k8s.io/api/apps/v1" @@ -59,13 +58,9 @@ func CheckJobs(checkJobs bool) ReadyCheckerOption { // NewReadyChecker creates a new checker. Passed ReadyCheckerOptions can // be used to override defaults. -func NewReadyChecker(cl kubernetes.Interface, logger *slog.Logger, opts ...ReadyCheckerOption) ReadyChecker { +func NewReadyChecker(cl kubernetes.Interface, opts ...ReadyCheckerOption) ReadyChecker { c := ReadyChecker{ client: cl, - log: logger, - } - if c.log == nil { - c.log = slog.New(slog.NewTextHandler(io.Discard, nil)) } for _, opt := range opts { opt(&c) @@ -76,7 +71,6 @@ func NewReadyChecker(cl kubernetes.Interface, logger *slog.Logger, opts ...Ready // ReadyChecker is a type that can check core Kubernetes types for readiness. type ReadyChecker struct { client kubernetes.Interface - log *slog.Logger checkJobs bool pausedAsReady bool } @@ -232,18 +226,18 @@ func (c *ReadyChecker) isPodReady(pod *corev1.Pod) bool { return true } } - c.log.Debug("Pod is not ready", "namespace", pod.GetNamespace(), "name", pod.GetName()) + slog.Debug("Pod is not ready", "namespace", pod.GetNamespace(), "name", pod.GetName()) return false } func (c *ReadyChecker) jobReady(job *batchv1.Job) (bool, error) { if job.Status.Failed > *job.Spec.BackoffLimit { - c.log.Debug("Job is failed", "namespace", job.GetNamespace(), "name", job.GetName()) + slog.Debug("Job is failed", "namespace", job.GetNamespace(), "name", job.GetName()) // If a job is failed, it can't recover, so throw an error return false, fmt.Errorf("job is failed: %s/%s", job.GetNamespace(), job.GetName()) } if job.Spec.Completions != nil && job.Status.Succeeded < *job.Spec.Completions { - c.log.Debug("Job is not completed", "namespace", job.GetNamespace(), "name", job.GetName()) + slog.Debug("Job is not completed", "namespace", job.GetNamespace(), "name", job.GetName()) return false, nil } return true, nil @@ -257,7 +251,7 @@ func (c *ReadyChecker) serviceReady(s *corev1.Service) bool { // Ensure that the service cluster IP is not empty if s.Spec.ClusterIP == "" { - c.log.Debug("Service does not have cluster IP address", "namespace", s.GetNamespace(), "name", s.GetName()) + slog.Debug("Service does not have cluster IP address", "namespace", s.GetNamespace(), "name", s.GetName()) return false } @@ -265,12 +259,12 @@ func (c *ReadyChecker) serviceReady(s *corev1.Service) bool { if s.Spec.Type == corev1.ServiceTypeLoadBalancer { // do not wait when at least 1 external IP is set if len(s.Spec.ExternalIPs) > 0 { - c.log.Debug("Service has external IP addresses", "namespace", s.GetNamespace(), "name", s.GetName(), "externalIPs", s.Spec.ExternalIPs) + slog.Debug("Service has external IP addresses", "namespace", s.GetNamespace(), "name", s.GetName(), "externalIPs", s.Spec.ExternalIPs) return true } if s.Status.LoadBalancer.Ingress == nil { - c.log.Debug("Service does not have load balancer ingress IP address", "namespace", s.GetNamespace(), "name", s.GetName()) + slog.Debug("Service does not have load balancer ingress IP address", "namespace", s.GetNamespace(), "name", s.GetName()) return false } } @@ -280,7 +274,7 @@ func (c *ReadyChecker) serviceReady(s *corev1.Service) bool { func (c *ReadyChecker) volumeReady(v *corev1.PersistentVolumeClaim) bool { if v.Status.Phase != corev1.ClaimBound { - c.log.Debug("PersistentVolumeClaim is not bound", "namespace", v.GetNamespace(), "name", v.GetName()) + slog.Debug("PersistentVolumeClaim is not bound", "namespace", v.GetNamespace(), "name", v.GetName()) return false } return true @@ -293,13 +287,13 @@ func (c *ReadyChecker) deploymentReady(rs *appsv1.ReplicaSet, dep *appsv1.Deploy } // Verify the generation observed by the deployment controller matches the spec generation if dep.Status.ObservedGeneration != dep.ObjectMeta.Generation { - c.log.Debug("Deployment is not ready, observedGeneration does not match spec generation", "namespace", dep.GetNamespace(), "name", dep.GetName(), "actualGeneration", dep.Status.ObservedGeneration, "expectedGeneration", dep.ObjectMeta.Generation) + slog.Debug("Deployment is not ready, observedGeneration does not match spec generation", "namespace", dep.GetNamespace(), "name", dep.GetName(), "actualGeneration", dep.Status.ObservedGeneration, "expectedGeneration", dep.ObjectMeta.Generation) return false } expectedReady := *dep.Spec.Replicas - deploymentutil.MaxUnavailable(*dep) if !(rs.Status.ReadyReplicas >= expectedReady) { - c.log.Debug("Deployment does not have enough pods ready", "namespace", dep.GetNamespace(), "name", dep.GetName(), "readyPods", rs.Status.ReadyReplicas, "totalPods", expectedReady) + slog.Debug("Deployment does not have enough pods ready", "namespace", dep.GetNamespace(), "name", dep.GetName(), "readyPods", rs.Status.ReadyReplicas, "totalPods", expectedReady) return false } return true @@ -308,7 +302,7 @@ func (c *ReadyChecker) deploymentReady(rs *appsv1.ReplicaSet, dep *appsv1.Deploy func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool { // Verify the generation observed by the daemonSet controller matches the spec generation if ds.Status.ObservedGeneration != ds.ObjectMeta.Generation { - c.log.Debug("DaemonSet is not ready, observedGeneration does not match spec generation", "namespace", ds.GetNamespace(), "name", ds.GetName(), "observedGeneration", ds.Status.ObservedGeneration, "expectedGeneration", ds.ObjectMeta.Generation) + slog.Debug("DaemonSet is not ready, observedGeneration does not match spec generation", "namespace", ds.GetNamespace(), "name", ds.GetName(), "observedGeneration", ds.Status.ObservedGeneration, "expectedGeneration", ds.ObjectMeta.Generation) return false } @@ -319,7 +313,7 @@ func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool { // Make sure all the updated pods have been scheduled if ds.Status.UpdatedNumberScheduled != ds.Status.DesiredNumberScheduled { - c.log.Debug("DaemonSet does not have enough Pods scheduled", "namespace", ds.GetNamespace(), "name", ds.GetName(), "scheduledPods", ds.Status.UpdatedNumberScheduled, "totalPods", ds.Status.DesiredNumberScheduled) + slog.Debug("DaemonSet does not have enough Pods scheduled", "namespace", ds.GetNamespace(), "name", ds.GetName(), "scheduledPods", ds.Status.UpdatedNumberScheduled, "totalPods", ds.Status.DesiredNumberScheduled) return false } maxUnavailable, err := intstr.GetScaledValueFromIntOrPercent(ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable, int(ds.Status.DesiredNumberScheduled), true) @@ -332,7 +326,7 @@ func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool { expectedReady := int(ds.Status.DesiredNumberScheduled) - maxUnavailable if !(int(ds.Status.NumberReady) >= expectedReady) { - c.log.Debug("DaemonSet does not have enough Pods ready", "namespace", ds.GetNamespace(), "name", ds.GetName(), "readyPods", ds.Status.NumberReady, "totalPods", expectedReady) + slog.Debug("DaemonSet does not have enough Pods ready", "namespace", ds.GetNamespace(), "name", ds.GetName(), "readyPods", ds.Status.NumberReady, "totalPods", expectedReady) return false } return true @@ -384,13 +378,13 @@ func (c *ReadyChecker) crdReady(crd apiextv1.CustomResourceDefinition) bool { func (c *ReadyChecker) statefulSetReady(sts *appsv1.StatefulSet) bool { // Verify the generation observed by the statefulSet controller matches the spec generation if sts.Status.ObservedGeneration != sts.ObjectMeta.Generation { - c.log.Debug("StatefulSet is not ready, observedGeneration doest not match spec generation", "namespace", sts.GetNamespace(), "name", sts.GetName(), "actualGeneration", sts.Status.ObservedGeneration, "expectedGeneration", sts.ObjectMeta.Generation) + slog.Debug("StatefulSet is not ready, observedGeneration doest not match spec generation", "namespace", sts.GetNamespace(), "name", sts.GetName(), "actualGeneration", sts.Status.ObservedGeneration, "expectedGeneration", sts.ObjectMeta.Generation) return false } // If the update strategy is not a rolling update, there will be nothing to wait for if sts.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType { - c.log.Debug("StatefulSet skipped ready check", "namespace", sts.GetNamespace(), "name", sts.GetName(), "updateStrategy", sts.Spec.UpdateStrategy.Type) + slog.Debug("StatefulSet skipped ready check", "namespace", sts.GetNamespace(), "name", sts.GetName(), "updateStrategy", sts.Spec.UpdateStrategy.Type) return true } @@ -416,30 +410,30 @@ func (c *ReadyChecker) statefulSetReady(sts *appsv1.StatefulSet) bool { // Make sure all the updated pods have been scheduled if int(sts.Status.UpdatedReplicas) < expectedReplicas { - c.log.Debug("StatefulSet does not have enough Pods scheduled", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.UpdatedReplicas, "totalPods", expectedReplicas) + slog.Debug("StatefulSet does not have enough Pods scheduled", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.UpdatedReplicas, "totalPods", expectedReplicas) return false } if int(sts.Status.ReadyReplicas) != replicas { - c.log.Debug("StatefulSet does not have enough Pods ready", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.ReadyReplicas, "totalPods", replicas) + slog.Debug("StatefulSet does not have enough Pods ready", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.ReadyReplicas, "totalPods", replicas) return false } // This check only makes sense when all partitions are being upgraded otherwise during a // partitioned rolling upgrade, this condition will never evaluate to true, leading to // error. if partition == 0 && sts.Status.CurrentRevision != sts.Status.UpdateRevision { - c.log.Debug("StatefulSet is not ready, currentRevision does not match updateRevision", "namespace", sts.GetNamespace(), "name", sts.GetName(), "currentRevision", sts.Status.CurrentRevision, "updateRevision", sts.Status.UpdateRevision) + slog.Debug("StatefulSet is not ready, currentRevision does not match updateRevision", "namespace", sts.GetNamespace(), "name", sts.GetName(), "currentRevision", sts.Status.CurrentRevision, "updateRevision", sts.Status.UpdateRevision) return false } - c.log.Debug("StatefulSet is ready", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.ReadyReplicas, "totalPods", replicas) + slog.Debug("StatefulSet is ready", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.ReadyReplicas, "totalPods", replicas) return true } func (c *ReadyChecker) replicationControllerReady(rc *corev1.ReplicationController) bool { // Verify the generation observed by the replicationController controller matches the spec generation if rc.Status.ObservedGeneration != rc.ObjectMeta.Generation { - c.log.Debug("ReplicationController is not ready, observedGeneration doest not match spec generation", "namespace", rc.GetNamespace(), "name", rc.GetName(), "actualGeneration", rc.Status.ObservedGeneration, "expectedGeneration", rc.ObjectMeta.Generation) + slog.Debug("ReplicationController is not ready, observedGeneration doest not match spec generation", "namespace", rc.GetNamespace(), "name", rc.GetName(), "actualGeneration", rc.Status.ObservedGeneration, "expectedGeneration", rc.ObjectMeta.Generation) return false } return true @@ -448,7 +442,7 @@ func (c *ReadyChecker) replicationControllerReady(rc *corev1.ReplicationControll func (c *ReadyChecker) replicaSetReady(rs *appsv1.ReplicaSet) bool { // Verify the generation observed by the replicaSet controller matches the spec generation if rs.Status.ObservedGeneration != rs.ObjectMeta.Generation { - c.log.Debug("ReplicaSet is not ready, observedGeneration doest not match spec generation", "namespace", rs.GetNamespace(), "name", rs.GetName(), "actualGeneration", rs.Status.ObservedGeneration, "expectedGeneration", rs.ObjectMeta.Generation) + slog.Debug("ReplicaSet is not ready, observedGeneration doest not match spec generation", "namespace", rs.GetNamespace(), "name", rs.GetName(), "actualGeneration", rs.Status.ObservedGeneration, "expectedGeneration", rs.ObjectMeta.Generation) return false } return true diff --git a/pkg/kube/ready_test.go b/pkg/kube/ready_test.go index d9dd8fb3d..64cf68749 100644 --- a/pkg/kube/ready_test.go +++ b/pkg/kube/ready_test.go @@ -17,8 +17,6 @@ package kube // import "helm.sh/helm/v4/pkg/kube" import ( "context" - "io" - "log/slog" "testing" appsv1 "k8s.io/api/apps/v1" @@ -39,7 +37,6 @@ const defaultNamespace = metav1.NamespaceDefault func Test_ReadyChecker_IsReady_Pod(t *testing.T) { type fields struct { client kubernetes.Interface - log *slog.Logger checkJobs bool pausedAsReady bool } @@ -59,7 +56,6 @@ func Test_ReadyChecker_IsReady_Pod(t *testing.T) { name: "IsReady Pod", fields: fields{ client: fake.NewClientset(), - log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, @@ -75,7 +71,6 @@ func Test_ReadyChecker_IsReady_Pod(t *testing.T) { name: "IsReady Pod returns error", fields: fields{ client: fake.NewClientset(), - log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, @@ -92,7 +87,6 @@ func Test_ReadyChecker_IsReady_Pod(t *testing.T) { t.Run(tt.name, func(t *testing.T) { c := &ReadyChecker{ client: tt.fields.client, - log: tt.fields.log, checkJobs: tt.fields.checkJobs, pausedAsReady: tt.fields.pausedAsReady, } @@ -115,7 +109,6 @@ func Test_ReadyChecker_IsReady_Pod(t *testing.T) { func Test_ReadyChecker_IsReady_Job(t *testing.T) { type fields struct { client kubernetes.Interface - log *slog.Logger checkJobs bool pausedAsReady bool } @@ -135,7 +128,6 @@ func Test_ReadyChecker_IsReady_Job(t *testing.T) { name: "IsReady Job error while getting job", fields: fields{ client: fake.NewClientset(), - log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, @@ -151,7 +143,6 @@ func Test_ReadyChecker_IsReady_Job(t *testing.T) { name: "IsReady Job", fields: fields{ client: fake.NewClientset(), - log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, @@ -168,7 +159,6 @@ func Test_ReadyChecker_IsReady_Job(t *testing.T) { t.Run(tt.name, func(t *testing.T) { c := &ReadyChecker{ client: tt.fields.client, - log: tt.fields.log, checkJobs: tt.fields.checkJobs, pausedAsReady: tt.fields.pausedAsReady, } @@ -190,7 +180,6 @@ func Test_ReadyChecker_IsReady_Job(t *testing.T) { func Test_ReadyChecker_IsReady_Deployment(t *testing.T) { type fields struct { client kubernetes.Interface - log *slog.Logger checkJobs bool pausedAsReady bool } @@ -211,7 +200,6 @@ func Test_ReadyChecker_IsReady_Deployment(t *testing.T) { name: "IsReady Deployments error while getting current Deployment", fields: fields{ client: fake.NewClientset(), - log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, @@ -228,7 +216,6 @@ func Test_ReadyChecker_IsReady_Deployment(t *testing.T) { name: "IsReady Deployments", //TODO fix this one fields: fields{ client: fake.NewClientset(), - log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, @@ -246,7 +233,6 @@ func Test_ReadyChecker_IsReady_Deployment(t *testing.T) { t.Run(tt.name, func(t *testing.T) { c := &ReadyChecker{ client: tt.fields.client, - log: tt.fields.log, checkJobs: tt.fields.checkJobs, pausedAsReady: tt.fields.pausedAsReady, } @@ -272,7 +258,6 @@ func Test_ReadyChecker_IsReady_Deployment(t *testing.T) { func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) { type fields struct { client kubernetes.Interface - log *slog.Logger checkJobs bool pausedAsReady bool } @@ -292,7 +277,6 @@ func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) { name: "IsReady PersistentVolumeClaim", fields: fields{ client: fake.NewClientset(), - log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, @@ -308,7 +292,6 @@ func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) { name: "IsReady PersistentVolumeClaim with error", fields: fields{ client: fake.NewClientset(), - log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, @@ -325,7 +308,6 @@ func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) { t.Run(tt.name, func(t *testing.T) { c := &ReadyChecker{ client: tt.fields.client, - log: tt.fields.log, checkJobs: tt.fields.checkJobs, pausedAsReady: tt.fields.pausedAsReady, } @@ -347,7 +329,6 @@ func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) { func Test_ReadyChecker_IsReady_Service(t *testing.T) { type fields struct { client kubernetes.Interface - log *slog.Logger checkJobs bool pausedAsReady bool } @@ -367,7 +348,6 @@ func Test_ReadyChecker_IsReady_Service(t *testing.T) { name: "IsReady Service", fields: fields{ client: fake.NewClientset(), - log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, @@ -383,7 +363,6 @@ func Test_ReadyChecker_IsReady_Service(t *testing.T) { name: "IsReady Service with error", fields: fields{ client: fake.NewClientset(), - log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, @@ -400,7 +379,6 @@ func Test_ReadyChecker_IsReady_Service(t *testing.T) { t.Run(tt.name, func(t *testing.T) { c := &ReadyChecker{ client: tt.fields.client, - log: tt.fields.log, checkJobs: tt.fields.checkJobs, pausedAsReady: tt.fields.pausedAsReady, } @@ -422,7 +400,6 @@ func Test_ReadyChecker_IsReady_Service(t *testing.T) { func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) { type fields struct { client kubernetes.Interface - log *slog.Logger checkJobs bool pausedAsReady bool } @@ -442,7 +419,6 @@ func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) { name: "IsReady DaemonSet", fields: fields{ client: fake.NewClientset(), - log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, @@ -458,7 +434,6 @@ func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) { name: "IsReady DaemonSet with error", fields: fields{ client: fake.NewClientset(), - log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, @@ -475,7 +450,6 @@ func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) { t.Run(tt.name, func(t *testing.T) { c := &ReadyChecker{ client: tt.fields.client, - log: tt.fields.log, checkJobs: tt.fields.checkJobs, pausedAsReady: tt.fields.pausedAsReady, } @@ -497,7 +471,6 @@ func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) { func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) { type fields struct { client kubernetes.Interface - log *slog.Logger checkJobs bool pausedAsReady bool } @@ -517,7 +490,6 @@ func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) { name: "IsReady StatefulSet", fields: fields{ client: fake.NewClientset(), - log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, @@ -533,7 +505,6 @@ func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) { name: "IsReady StatefulSet with error", fields: fields{ client: fake.NewClientset(), - log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, @@ -550,7 +521,6 @@ func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) { t.Run(tt.name, func(t *testing.T) { c := &ReadyChecker{ client: tt.fields.client, - log: tt.fields.log, checkJobs: tt.fields.checkJobs, pausedAsReady: tt.fields.pausedAsReady, } @@ -572,7 +542,6 @@ func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) { func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) { type fields struct { client kubernetes.Interface - log *slog.Logger checkJobs bool pausedAsReady bool } @@ -592,7 +561,6 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) { name: "IsReady ReplicationController", fields: fields{ client: fake.NewClientset(), - log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, @@ -608,7 +576,6 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) { name: "IsReady ReplicationController with error", fields: fields{ client: fake.NewClientset(), - log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, @@ -624,7 +591,6 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) { name: "IsReady ReplicationController and pods not ready for object", fields: fields{ client: fake.NewClientset(), - log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, @@ -641,7 +607,6 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) { t.Run(tt.name, func(t *testing.T) { c := &ReadyChecker{ client: tt.fields.client, - log: tt.fields.log, checkJobs: tt.fields.checkJobs, pausedAsReady: tt.fields.pausedAsReady, } @@ -663,7 +628,6 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) { func Test_ReadyChecker_IsReady_ReplicaSet(t *testing.T) { type fields struct { client kubernetes.Interface - log *slog.Logger checkJobs bool pausedAsReady bool } @@ -683,7 +647,6 @@ func Test_ReadyChecker_IsReady_ReplicaSet(t *testing.T) { name: "IsReady ReplicaSet", fields: fields{ client: fake.NewClientset(), - log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, @@ -699,7 +662,6 @@ func Test_ReadyChecker_IsReady_ReplicaSet(t *testing.T) { name: "IsReady ReplicaSet not ready", fields: fields{ client: fake.NewClientset(), - log: slog.New(slog.NewTextHandler(io.Discard, nil)), checkJobs: true, pausedAsReady: false, }, @@ -716,7 +678,6 @@ func Test_ReadyChecker_IsReady_ReplicaSet(t *testing.T) { t.Run(tt.name, func(t *testing.T) { c := &ReadyChecker{ client: tt.fields.client, - log: tt.fields.log, checkJobs: tt.fields.checkJobs, pausedAsReady: tt.fields.pausedAsReady, } diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index bcb48155b..2d7cfe971 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -43,7 +43,6 @@ import ( type statusWaiter struct { client dynamic.Interface restMapper meta.RESTMapper - log *slog.Logger } func alwaysReady(_ *unstructured.Unstructured) (*status.Result, error) { @@ -56,7 +55,7 @@ func alwaysReady(_ *unstructured.Unstructured) (*status.Result, error) { func (w *statusWaiter) WatchUntilReady(resourceList ResourceList, timeout time.Duration) error { ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() - w.log.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) + slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) jobSR := helmStatusReaders.NewCustomJobStatusReader(w.restMapper) podSR := helmStatusReaders.NewCustomPodStatusReader(w.restMapper) @@ -77,7 +76,7 @@ func (w *statusWaiter) WatchUntilReady(resourceList ResourceList, timeout time.D func (w *statusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error { ctx, cancel := context.WithTimeout(context.TODO(), timeout) defer cancel() - w.log.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) + slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) return w.wait(ctx, resourceList, sw) } @@ -85,7 +84,7 @@ func (w *statusWaiter) Wait(resourceList ResourceList, timeout time.Duration) er func (w *statusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Duration) error { ctx, cancel := context.WithTimeout(context.TODO(), timeout) defer cancel() - w.log.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) + slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) newCustomJobStatusReader := helmStatusReaders.NewCustomJobStatusReader(w.restMapper) customSR := statusreaders.NewStatusReader(w.restMapper, newCustomJobStatusReader) @@ -96,7 +95,7 @@ func (w *statusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Dura func (w *statusWaiter) WaitForDelete(resourceList ResourceList, timeout time.Duration) error { ctx, cancel := context.WithTimeout(context.TODO(), timeout) defer cancel() - w.log.Debug("waiting for resources to be deleted", "count", len(resourceList), "timeout", timeout) + slog.Debug("waiting for resources to be deleted", "count", len(resourceList), "timeout", timeout) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) return w.waitForDelete(ctx, resourceList, sw) } @@ -114,7 +113,7 @@ func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceL } eventCh := sw.Watch(cancelCtx, resources, watcher.Options{}) statusCollector := collector.NewResourceStatusCollector(resources) - done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.NotFoundStatus, w.log)) + done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.NotFoundStatus)) <-done if statusCollector.Error != nil { @@ -157,7 +156,7 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, sw w eventCh := sw.Watch(cancelCtx, resources, watcher.Options{}) statusCollector := collector.NewResourceStatusCollector(resources) - done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.CurrentStatus, w.log)) + done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.CurrentStatus)) <-done if statusCollector.Error != nil { @@ -180,7 +179,7 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, sw w return nil } -func statusObserver(cancel context.CancelFunc, desired status.Status, logger *slog.Logger) collector.ObserverFunc { +func statusObserver(cancel context.CancelFunc, desired status.Status) collector.ObserverFunc { return func(statusCollector *collector.ResourceStatusCollector, _ event.Event) { var rss []*event.ResourceStatus var nonDesiredResources []*event.ResourceStatus @@ -210,7 +209,7 @@ func statusObserver(cancel context.CancelFunc, desired status.Status, logger *sl return nonDesiredResources[i].Identifier.Name < nonDesiredResources[j].Identifier.Name }) first := nonDesiredResources[0] - logger.Debug("waiting for resource", "name", first.Identifier.Name, "kind", first.Identifier.GroupKind.Kind, "expectedStatus", desired, "actualStatus", first.Status) + slog.Debug("waiting for resource", "name", first.Identifier.Name, "kind", first.Identifier.GroupKind.Kind, "expectedStatus", desired, "actualStatus", first.Status) } } } diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index 7226058c4..0b309b22d 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -18,8 +18,6 @@ package kube // import "helm.sh/helm/v3/pkg/kube" import ( "errors" - "io" - "log/slog" "testing" "time" @@ -219,7 +217,6 @@ func TestStatusWaitForDelete(t *testing.T) { statusWaiter := statusWaiter{ restMapper: fakeMapper, client: fakeClient, - log: slog.New(slog.NewTextHandler(io.Discard, nil)), } objsToCreate := getRuntimeObjFromManifests(t, tt.manifestsToCreate) for _, objToCreate := range objsToCreate { @@ -260,7 +257,6 @@ func TestStatusWaitForDeleteNonExistentObject(t *testing.T) { statusWaiter := statusWaiter{ restMapper: fakeMapper, client: fakeClient, - log: slog.New(slog.NewTextHandler(io.Discard, nil)), } // Don't create the object to test that the wait for delete works when the object doesn't exist objManifest := getRuntimeObjFromManifests(t, []string{podCurrentManifest}) @@ -319,7 +315,6 @@ func TestStatusWait(t *testing.T) { statusWaiter := statusWaiter{ client: fakeClient, restMapper: fakeMapper, - log: slog.New(slog.NewTextHandler(io.Discard, nil)), } objs := getRuntimeObjFromManifests(t, tt.objManifests) for _, obj := range objs { @@ -373,7 +368,6 @@ func TestWaitForJobComplete(t *testing.T) { statusWaiter := statusWaiter{ client: fakeClient, restMapper: fakeMapper, - log: slog.New(slog.NewTextHandler(io.Discard, nil)), } objs := getRuntimeObjFromManifests(t, tt.objManifests) for _, obj := range objs { @@ -433,7 +427,6 @@ func TestWatchForReady(t *testing.T) { statusWaiter := statusWaiter{ client: fakeClient, restMapper: fakeMapper, - log: slog.New(slog.NewTextHandler(io.Discard, nil)), } objs := getRuntimeObjFromManifests(t, tt.objManifests) for _, obj := range objs { diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go index 75598542e..f384193e6 100644 --- a/pkg/kube/wait.go +++ b/pkg/kube/wait.go @@ -50,24 +50,23 @@ import ( // Helm 4 now uses the StatusWaiter implementation instead type legacyWaiter struct { c ReadyChecker - log *slog.Logger kubeClient *kubernetes.Clientset } func (hw *legacyWaiter) Wait(resources ResourceList, timeout time.Duration) error { - hw.c = NewReadyChecker(hw.kubeClient, hw.log, PausedAsReady(true)) + hw.c = NewReadyChecker(hw.kubeClient, PausedAsReady(true)) return hw.waitForResources(resources, timeout) } func (hw *legacyWaiter) WaitWithJobs(resources ResourceList, timeout time.Duration) error { - hw.c = NewReadyChecker(hw.kubeClient, hw.log, PausedAsReady(true), CheckJobs(true)) + hw.c = NewReadyChecker(hw.kubeClient, PausedAsReady(true), CheckJobs(true)) return hw.waitForResources(resources, timeout) } // waitForResources polls to get the current status of all pods, PVCs, Services and // Jobs(optional) until all are ready or a timeout is reached func (hw *legacyWaiter) waitForResources(created ResourceList, timeout time.Duration) error { - hw.log.Debug("beginning wait for resources", "count", len(created), "timeout", timeout) + slog.Debug("beginning wait for resources", "count", len(created), "timeout", timeout) ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() @@ -85,10 +84,10 @@ func (hw *legacyWaiter) waitForResources(created ResourceList, timeout time.Dura if waitRetries > 0 && hw.isRetryableError(err, v) { numberOfErrors[i]++ if numberOfErrors[i] > waitRetries { - hw.log.Debug("max number of retries reached", "resource", v.Name, "retries", numberOfErrors[i]) + slog.Debug("max number of retries reached", "resource", v.Name, "retries", numberOfErrors[i]) return false, err } - hw.log.Debug("retrying resource readiness", "resource", v.Name, "currentRetries", numberOfErrors[i]-1, "maxRetries", waitRetries) + slog.Debug("retrying resource readiness", "resource", v.Name, "currentRetries", numberOfErrors[i]-1, "maxRetries", waitRetries) return false, nil } numberOfErrors[i] = 0 @@ -104,14 +103,14 @@ func (hw *legacyWaiter) isRetryableError(err error, resource *resource.Info) boo if err == nil { return false } - hw.log.Debug("error received when checking resource status", "resource", resource.Name, slog.Any("error", err)) + slog.Debug("error received when checking resource status", "resource", resource.Name, slog.Any("error", err)) if ev, ok := err.(*apierrors.StatusError); ok { statusCode := ev.Status().Code retryable := hw.isRetryableHTTPStatusCode(statusCode) - hw.log.Debug("status code received", "resource", resource.Name, "statusCode", statusCode, "retryable", retryable) + slog.Debug("status code received", "resource", resource.Name, "statusCode", statusCode, "retryable", retryable) return retryable } - hw.log.Debug("retryable error assumed", "resource", resource.Name) + slog.Debug("retryable error assumed", "resource", resource.Name) return true } @@ -121,7 +120,7 @@ func (hw *legacyWaiter) isRetryableHTTPStatusCode(httpStatusCode int32) bool { // waitForDeletedResources polls to check if all the resources are deleted or a timeout is reached func (hw *legacyWaiter) WaitForDelete(deleted ResourceList, timeout time.Duration) error { - hw.log.Debug("beginning wait for resources to be deleted", "count", len(deleted), "timeout", timeout) + slog.Debug("beginning wait for resources to be deleted", "count", len(deleted), "timeout", timeout) startTime := time.Now() ctx, cancel := context.WithTimeout(context.Background(), timeout) @@ -139,9 +138,9 @@ func (hw *legacyWaiter) WaitForDelete(deleted ResourceList, timeout time.Duratio elapsed := time.Since(startTime).Round(time.Second) if err != nil { - hw.log.Debug("wait for resources failed", "elapsed", elapsed, slog.Any("error", err)) + slog.Debug("wait for resources failed", "elapsed", elapsed, slog.Any("error", err)) } else { - hw.log.Debug("wait for resources succeeded", "elapsed", elapsed) + slog.Debug("wait for resources succeeded", "elapsed", elapsed) } return err @@ -249,7 +248,7 @@ func (hw *legacyWaiter) watchUntilReady(timeout time.Duration, info *resource.In return nil } - hw.log.Debug("watching for resource changes", "kind", kind, "resource", info.Name, "timeout", timeout) + slog.Debug("watching for resource changes", "kind", kind, "resource", info.Name, "timeout", timeout) // Use a selector on the name of the resource. This should be unique for the // given version and kind @@ -277,7 +276,7 @@ func (hw *legacyWaiter) watchUntilReady(timeout time.Duration, info *resource.In // we get. We care mostly about jobs, where what we want to see is // the status go into a good state. For other types, like ReplicaSet // we don't really do anything to support these as hooks. - hw.log.Debug("add/modify event received", "resource", info.Name, "eventType", e.Type) + slog.Debug("add/modify event received", "resource", info.Name, "eventType", e.Type) switch kind { case "Job": @@ -287,11 +286,11 @@ func (hw *legacyWaiter) watchUntilReady(timeout time.Duration, info *resource.In } return true, nil case watch.Deleted: - hw.log.Debug("deleted event received", "resource", info.Name) + slog.Debug("deleted event received", "resource", info.Name) return true, nil case watch.Error: // Handle error and return with an error. - hw.log.Error("error event received", "resource", info.Name) + slog.Error("error event received", "resource", info.Name) return true, errors.Errorf("failed to deploy %s", info.Name) default: return false, nil @@ -313,12 +312,12 @@ func (hw *legacyWaiter) waitForJob(obj runtime.Object, name string) (bool, error if c.Type == batchv1.JobComplete && c.Status == "True" { return true, nil } else if c.Type == batchv1.JobFailed && c.Status == "True" { - hw.log.Error("job failed", "job", name, "reason", c.Reason) + slog.Error("job failed", "job", name, "reason", c.Reason) return true, errors.Errorf("job %s failed: %s", name, c.Reason) } } - hw.log.Debug("job status update", "job", name, "active", o.Status.Active, "failed", o.Status.Failed, "succeeded", o.Status.Succeeded) + slog.Debug("job status update", "job", name, "active", o.Status.Active, "failed", o.Status.Failed, "succeeded", o.Status.Succeeded) return false, nil } @@ -333,15 +332,15 @@ func (hw *legacyWaiter) waitForPodSuccess(obj runtime.Object, name string) (bool switch o.Status.Phase { case corev1.PodSucceeded: - hw.log.Debug("pod succeeded", "pod", o.Name) + slog.Debug("pod succeeded", "pod", o.Name) return true, nil case corev1.PodFailed: - hw.log.Error("pod failed", "pod", o.Name) + slog.Error("pod failed", "pod", o.Name) return true, errors.Errorf("pod %s failed", o.Name) case corev1.PodPending: - hw.log.Debug("pod pending", "pod", o.Name) + slog.Debug("pod pending", "pod", o.Name) case corev1.PodRunning: - hw.log.Debug("pod running", "pod", o.Name) + slog.Debug("pod running", "pod", o.Name) } return false, nil diff --git a/pkg/storage/driver/cfgmaps.go b/pkg/storage/driver/cfgmaps.go index dba9a138d..3e4acfd81 100644 --- a/pkg/storage/driver/cfgmaps.go +++ b/pkg/storage/driver/cfgmaps.go @@ -44,7 +44,6 @@ const ConfigMapsDriverName = "ConfigMap" // ConfigMapsInterface. type ConfigMaps struct { impl corev1.ConfigMapInterface - Log *slog.Logger } // NewConfigMaps initializes a new ConfigMaps wrapping an implementation of @@ -70,13 +69,13 @@ func (cfgmaps *ConfigMaps) Get(key string) (*rspb.Release, error) { return nil, ErrReleaseNotFound } - cfgmaps.Log.Debug("failed to get release", "key", key, slog.Any("error", err)) + slog.Debug("failed to get release", "key", key, slog.Any("error", err)) return nil, err } // found the configmap, decode the base64 data string r, err := decodeRelease(obj.Data["release"]) if err != nil { - cfgmaps.Log.Debug("failed to decode data", "key", key, slog.Any("error", err)) + slog.Debug("failed to decode data", "key", key, slog.Any("error", err)) return nil, err } r.Labels = filterSystemLabels(obj.ObjectMeta.Labels) @@ -93,7 +92,7 @@ func (cfgmaps *ConfigMaps) List(filter func(*rspb.Release) bool) ([]*rspb.Releas list, err := cfgmaps.impl.List(context.Background(), opts) if err != nil { - cfgmaps.Log.Debug("failed to list releases", slog.Any("error", err)) + slog.Debug("failed to list releases", slog.Any("error", err)) return nil, err } @@ -104,7 +103,7 @@ func (cfgmaps *ConfigMaps) List(filter func(*rspb.Release) bool) ([]*rspb.Releas for _, item := range list.Items { rls, err := decodeRelease(item.Data["release"]) if err != nil { - cfgmaps.Log.Debug("failed to decode release", "item", item, slog.Any("error", err)) + slog.Debug("failed to decode release", "item", item, slog.Any("error", err)) continue } @@ -132,7 +131,7 @@ func (cfgmaps *ConfigMaps) Query(labels map[string]string) ([]*rspb.Release, err list, err := cfgmaps.impl.List(context.Background(), opts) if err != nil { - cfgmaps.Log.Debug("failed to query with labels", slog.Any("error", err)) + slog.Debug("failed to query with labels", slog.Any("error", err)) return nil, err } @@ -144,7 +143,7 @@ func (cfgmaps *ConfigMaps) Query(labels map[string]string) ([]*rspb.Release, err for _, item := range list.Items { rls, err := decodeRelease(item.Data["release"]) if err != nil { - cfgmaps.Log.Debug("failed to decode release", slog.Any("error", err)) + slog.Debug("failed to decode release", slog.Any("error", err)) continue } rls.Labels = item.ObjectMeta.Labels @@ -166,7 +165,7 @@ func (cfgmaps *ConfigMaps) Create(key string, rls *rspb.Release) error { // create a new configmap to hold the release obj, err := newConfigMapsObject(key, rls, lbs) if err != nil { - cfgmaps.Log.Debug("failed to encode release", "name", rls.Name, slog.Any("error", err)) + slog.Debug("failed to encode release", "name", rls.Name, slog.Any("error", err)) return err } // push the configmap object out into the kubiverse @@ -175,7 +174,7 @@ func (cfgmaps *ConfigMaps) Create(key string, rls *rspb.Release) error { return ErrReleaseExists } - cfgmaps.Log.Debug("failed to create release", slog.Any("error", err)) + slog.Debug("failed to create release", slog.Any("error", err)) return err } return nil @@ -194,13 +193,13 @@ func (cfgmaps *ConfigMaps) Update(key string, rls *rspb.Release) error { // create a new configmap object to hold the release obj, err := newConfigMapsObject(key, rls, lbs) if err != nil { - cfgmaps.Log.Debug("failed to encode release", "name", rls.Name, slog.Any("error", err)) + slog.Debug("failed to encode release", "name", rls.Name, slog.Any("error", err)) return err } // push the configmap object out into the kubiverse _, err = cfgmaps.impl.Update(context.Background(), obj, metav1.UpdateOptions{}) if err != nil { - cfgmaps.Log.Debug("failed to update release", slog.Any("error", err)) + slog.Debug("failed to update release", slog.Any("error", err)) return err } return nil diff --git a/pkg/storage/driver/mock_test.go b/pkg/storage/driver/mock_test.go index b5bf08bf4..54fda0542 100644 --- a/pkg/storage/driver/mock_test.go +++ b/pkg/storage/driver/mock_test.go @@ -19,8 +19,6 @@ package driver // import "helm.sh/helm/v4/pkg/storage/driver" import ( "context" "fmt" - "io" - "log/slog" "testing" sqlmock "github.com/DATA-DOG/go-sqlmock" @@ -266,6 +264,5 @@ func newTestFixtureSQL(t *testing.T, _ ...*rspb.Release) (*SQL, sqlmock.Sqlmock) db: sqlxDB, namespace: "default", statementBuilder: sq.StatementBuilder.PlaceholderFormat(sq.Dollar), - Log: slog.New(slog.NewTextHandler(io.Discard, nil)), }, mock } diff --git a/pkg/storage/driver/secrets.go b/pkg/storage/driver/secrets.go index 5045774e6..a69f1ed65 100644 --- a/pkg/storage/driver/secrets.go +++ b/pkg/storage/driver/secrets.go @@ -44,7 +44,6 @@ const SecretsDriverName = "Secret" // SecretsInterface. type Secrets struct { impl corev1.SecretInterface - Log *slog.Logger } // NewSecrets initializes a new Secrets wrapping an implementation of @@ -96,7 +95,7 @@ func (secrets *Secrets) List(filter func(*rspb.Release) bool) ([]*rspb.Release, for _, item := range list.Items { rls, err := decodeRelease(string(item.Data["release"])) if err != nil { - secrets.Log.Debug("list failed to decode release", "key", item.Name, slog.Any("error", err)) + slog.Debug("list failed to decode release", "key", item.Name, slog.Any("error", err)) continue } @@ -135,7 +134,7 @@ func (secrets *Secrets) Query(labels map[string]string) ([]*rspb.Release, error) for _, item := range list.Items { rls, err := decodeRelease(string(item.Data["release"])) if err != nil { - secrets.Log.Debug("failed to decode release", "key", item.Name, slog.Any("error", err)) + slog.Debug("failed to decode release", "key", item.Name, slog.Any("error", err)) continue } rls.Labels = item.ObjectMeta.Labels diff --git a/pkg/storage/driver/sql.go b/pkg/storage/driver/sql.go index 9f54de7f8..c3740b9a3 100644 --- a/pkg/storage/driver/sql.go +++ b/pkg/storage/driver/sql.go @@ -87,8 +87,6 @@ type SQL struct { db *sqlx.DB namespace string statementBuilder sq.StatementBuilderType - - Log *slog.Logger } // Name returns the name of the driver. @@ -109,13 +107,13 @@ func (s *SQL) checkAlreadyApplied(migrations []*migrate.Migration) bool { records, err := migrate.GetMigrationRecords(s.db.DB, postgreSQLDialect) migrate.SetDisableCreateTable(false) if err != nil { - s.Log.Debug("failed to get migration records", slog.Any("error", err)) + slog.Debug("failed to get migration records", slog.Any("error", err)) return false } for _, record := range records { if _, ok := migrationsIDs[record.Id]; ok { - s.Log.Debug("found previous migration", "id", record.Id, "appliedAt", record.AppliedAt) + slog.Debug("found previous migration", "id", record.Id, "appliedAt", record.AppliedAt) delete(migrationsIDs, record.Id) } } @@ -123,7 +121,7 @@ func (s *SQL) checkAlreadyApplied(migrations []*migrate.Migration) bool { // check if all migrations applied if len(migrationsIDs) != 0 { for id := range migrationsIDs { - s.Log.Debug("find unapplied migration", "id", id) + slog.Debug("find unapplied migration", "id", id) } return false } @@ -277,7 +275,7 @@ type SQLReleaseCustomLabelWrapper struct { } // NewSQL initializes a new sql driver. -func NewSQL(connectionString string, logger *slog.Logger, namespace string) (*SQL, error) { +func NewSQL(connectionString string, namespace string) (*SQL, error) { db, err := sqlx.Connect(postgreSQLDialect, connectionString) if err != nil { return nil, err @@ -285,7 +283,6 @@ func NewSQL(connectionString string, logger *slog.Logger, namespace string) (*SQ driver := &SQL{ db: db, - Log: logger, statementBuilder: sq.StatementBuilder.PlaceholderFormat(sq.Dollar), } @@ -310,24 +307,24 @@ func (s *SQL) Get(key string) (*rspb.Release, error) { query, args, err := qb.ToSql() if err != nil { - s.Log.Debug("failed to build query", slog.Any("error", err)) + slog.Debug("failed to build query", slog.Any("error", err)) return nil, err } // Get will return an error if the result is empty if err := s.db.Get(&record, query, args...); err != nil { - s.Log.Debug("got SQL error when getting release", "key", key, slog.Any("error", err)) + slog.Debug("got SQL error when getting release", "key", key, slog.Any("error", err)) return nil, ErrReleaseNotFound } release, err := decodeRelease(record.Body) if err != nil { - s.Log.Debug("failed to decode data", "key", key, slog.Any("error", err)) + slog.Debug("failed to decode data", "key", key, slog.Any("error", err)) return nil, err } if release.Labels, err = s.getReleaseCustomLabels(key, s.namespace); err != nil { - s.Log.Debug("failed to get release custom labels", "namespace", s.namespace, "key", key, slog.Any("error", err)) + slog.Debug("failed to get release custom labels", "namespace", s.namespace, "key", key, slog.Any("error", err)) return nil, err } @@ -348,13 +345,13 @@ func (s *SQL) List(filter func(*rspb.Release) bool) ([]*rspb.Release, error) { query, args, err := sb.ToSql() if err != nil { - s.Log.Debug("failed to build query", slog.Any("error", err)) + slog.Debug("failed to build query", slog.Any("error", err)) return nil, err } var records = []SQLReleaseWrapper{} if err := s.db.Select(&records, query, args...); err != nil { - s.Log.Debug("failed to list", slog.Any("error", err)) + slog.Debug("failed to list", slog.Any("error", err)) return nil, err } @@ -362,12 +359,12 @@ func (s *SQL) List(filter func(*rspb.Release) bool) ([]*rspb.Release, error) { for _, record := range records { release, err := decodeRelease(record.Body) if err != nil { - s.Log.Debug("failed to decode release", "record", record, slog.Any("error", err)) + slog.Debug("failed to decode release", "record", record, slog.Any("error", err)) continue } if release.Labels, err = s.getReleaseCustomLabels(record.Key, record.Namespace); err != nil { - s.Log.Debug("failed to get release custom labels", "namespace", record.Namespace, "key", record.Key, slog.Any("error", err)) + slog.Debug("failed to get release custom labels", "namespace", record.Namespace, "key", record.Key, slog.Any("error", err)) return nil, err } for k, v := range getReleaseSystemLabels(release) { @@ -397,7 +394,7 @@ func (s *SQL) Query(labels map[string]string) ([]*rspb.Release, error) { if _, ok := labelMap[key]; ok { sb = sb.Where(sq.Eq{key: labels[key]}) } else { - s.Log.Debug("unknown label", "key", key) + slog.Debug("unknown label", "key", key) return nil, fmt.Errorf("unknown label %s", key) } } @@ -410,13 +407,13 @@ func (s *SQL) Query(labels map[string]string) ([]*rspb.Release, error) { // Build our query query, args, err := sb.ToSql() if err != nil { - s.Log.Debug("failed to build query", slog.Any("error", err)) + slog.Debug("failed to build query", slog.Any("error", err)) return nil, err } var records = []SQLReleaseWrapper{} if err := s.db.Select(&records, query, args...); err != nil { - s.Log.Debug("failed to query with labels", slog.Any("error", err)) + slog.Debug("failed to query with labels", slog.Any("error", err)) return nil, err } @@ -428,12 +425,12 @@ func (s *SQL) Query(labels map[string]string) ([]*rspb.Release, error) { for _, record := range records { release, err := decodeRelease(record.Body) if err != nil { - s.Log.Debug("failed to decode release", "record", record, slog.Any("error", err)) + slog.Debug("failed to decode release", "record", record, slog.Any("error", err)) continue } if release.Labels, err = s.getReleaseCustomLabels(record.Key, record.Namespace); err != nil { - s.Log.Debug("failed to get release custom labels", "namespace", record.Namespace, "key", record.Key, slog.Any("error", err)) + slog.Debug("failed to get release custom labels", "namespace", record.Namespace, "key", record.Key, slog.Any("error", err)) return nil, err } @@ -457,13 +454,13 @@ func (s *SQL) Create(key string, rls *rspb.Release) error { body, err := encodeRelease(rls) if err != nil { - s.Log.Debug("failed to encode release", slog.Any("error", err)) + slog.Debug("failed to encode release", slog.Any("error", err)) return err } transaction, err := s.db.Beginx() if err != nil { - s.Log.Debug("failed to start SQL transaction", slog.Any("error", err)) + slog.Debug("failed to start SQL transaction", slog.Any("error", err)) return fmt.Errorf("error beginning transaction: %v", err) } @@ -492,7 +489,7 @@ func (s *SQL) Create(key string, rls *rspb.Release) error { int(time.Now().Unix()), ).ToSql() if err != nil { - s.Log.Debug("failed to build insert query", slog.Any("error", err)) + slog.Debug("failed to build insert query", slog.Any("error", err)) return err } @@ -506,17 +503,17 @@ func (s *SQL) Create(key string, rls *rspb.Release) error { Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}). ToSql() if buildErr != nil { - s.Log.Debug("failed to build select query", "error", buildErr) + slog.Debug("failed to build select query", "error", buildErr) return err } var record SQLReleaseWrapper if err := transaction.Get(&record, selectQuery, args...); err == nil { - s.Log.Debug("release already exists", "key", key) + slog.Debug("release already exists", "key", key) return ErrReleaseExists } - s.Log.Debug("failed to store release in SQL database", "key", key, slog.Any("error", err)) + slog.Debug("failed to store release in SQL database", "key", key, slog.Any("error", err)) return err } @@ -539,13 +536,13 @@ func (s *SQL) Create(key string, rls *rspb.Release) error { if err != nil { defer transaction.Rollback() - s.Log.Debug("failed to build insert query", slog.Any("error", err)) + slog.Debug("failed to build insert query", slog.Any("error", err)) return err } if _, err := transaction.Exec(insertLabelsQuery, args...); err != nil { defer transaction.Rollback() - s.Log.Debug("failed to write Labels", slog.Any("error", err)) + slog.Debug("failed to write Labels", slog.Any("error", err)) return err } } @@ -564,7 +561,7 @@ func (s *SQL) Update(key string, rls *rspb.Release) error { body, err := encodeRelease(rls) if err != nil { - s.Log.Debug("failed to encode release", slog.Any("error", err)) + slog.Debug("failed to encode release", slog.Any("error", err)) return err } @@ -581,12 +578,12 @@ func (s *SQL) Update(key string, rls *rspb.Release) error { ToSql() if err != nil { - s.Log.Debug("failed to build update query", slog.Any("error", err)) + slog.Debug("failed to build update query", slog.Any("error", err)) return err } if _, err := s.db.Exec(query, args...); err != nil { - s.Log.Debug("failed to update release in SQL database", "key", key, slog.Any("error", err)) + slog.Debug("failed to update release in SQL database", "key", key, slog.Any("error", err)) return err } @@ -597,7 +594,7 @@ func (s *SQL) Update(key string, rls *rspb.Release) error { func (s *SQL) Delete(key string) (*rspb.Release, error) { transaction, err := s.db.Beginx() if err != nil { - s.Log.Debug("failed to start SQL transaction", slog.Any("error", err)) + slog.Debug("failed to start SQL transaction", slog.Any("error", err)) return nil, fmt.Errorf("error beginning transaction: %v", err) } @@ -608,20 +605,20 @@ func (s *SQL) Delete(key string) (*rspb.Release, error) { Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}). ToSql() if err != nil { - s.Log.Debug("failed to build select query", slog.Any("error", err)) + slog.Debug("failed to build select query", slog.Any("error", err)) return nil, err } var record SQLReleaseWrapper err = transaction.Get(&record, selectQuery, args...) if err != nil { - s.Log.Debug("release not found", "key", key, slog.Any("error", err)) + slog.Debug("release not found", "key", key, slog.Any("error", err)) return nil, ErrReleaseNotFound } release, err := decodeRelease(record.Body) if err != nil { - s.Log.Debug("failed to decode release", "key", key, slog.Any("error", err)) + slog.Debug("failed to decode release", "key", key, slog.Any("error", err)) transaction.Rollback() return nil, err } @@ -633,18 +630,18 @@ func (s *SQL) Delete(key string) (*rspb.Release, error) { Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}). ToSql() if err != nil { - s.Log.Debug("failed to build delete query", slog.Any("error", err)) + slog.Debug("failed to build delete query", slog.Any("error", err)) return nil, err } _, err = transaction.Exec(deleteQuery, args...) if err != nil { - s.Log.Debug("failed perform delete query", slog.Any("error", err)) + slog.Debug("failed perform delete query", slog.Any("error", err)) return release, err } if release.Labels, err = s.getReleaseCustomLabels(key, s.namespace); err != nil { - s.Log.Debug("failed to get release custom labels", "namespace", s.namespace, "key", key, slog.Any("error", err)) + slog.Debug("failed to get release custom labels", "namespace", s.namespace, "key", key, slog.Any("error", err)) return nil, err } @@ -655,7 +652,7 @@ func (s *SQL) Delete(key string) (*rspb.Release, error) { ToSql() if err != nil { - s.Log.Debug("failed to build delete Labels query", slog.Any("error", err)) + slog.Debug("failed to build delete Labels query", slog.Any("error", err)) return nil, err } _, err = transaction.Exec(deleteCustomLabelsQuery, args...) diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go index 5e8718ea0..f98daeba6 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/storage.go @@ -18,6 +18,7 @@ package storage // import "helm.sh/helm/v4/pkg/storage" import ( "fmt" + "log/slog" "strings" "github.com/pkg/errors" @@ -42,15 +43,13 @@ type Storage struct { // be retained, including the most recent release. Values of 0 or less are // ignored (meaning no limits are imposed). MaxHistory int - - Log func(string, ...interface{}) } // Get retrieves the release from storage. An error is returned // if the storage driver failed to fetch the release, or the // release identified by the key, version pair does not exist. func (s *Storage) Get(name string, version int) (*rspb.Release, error) { - s.Log("getting release %q", makeKey(name, version)) + slog.Debug("getting release", "key", makeKey(name, version)) return s.Driver.Get(makeKey(name, version)) } @@ -58,7 +57,7 @@ func (s *Storage) Get(name string, version int) (*rspb.Release, error) { // error is returned if the storage driver fails to store the // release, or a release with an identical key already exists. func (s *Storage) Create(rls *rspb.Release) error { - s.Log("creating release %q", makeKey(rls.Name, rls.Version)) + slog.Debug("creating release", "key", makeKey(rls.Name, rls.Version)) if s.MaxHistory > 0 { // Want to make space for one more release. if err := s.removeLeastRecent(rls.Name, s.MaxHistory-1); err != nil && @@ -73,7 +72,7 @@ func (s *Storage) Create(rls *rspb.Release) error { // storage backend fails to update the release or if the release // does not exist. func (s *Storage) Update(rls *rspb.Release) error { - s.Log("updating release %q", makeKey(rls.Name, rls.Version)) + slog.Debug("updating release", "key", makeKey(rls.Name, rls.Version)) return s.Driver.Update(makeKey(rls.Name, rls.Version), rls) } @@ -81,21 +80,21 @@ func (s *Storage) Update(rls *rspb.Release) error { // the storage backend fails to delete the release or if the release // does not exist. func (s *Storage) Delete(name string, version int) (*rspb.Release, error) { - s.Log("deleting release %q", makeKey(name, version)) + slog.Debug("deleting release", "key", makeKey(name, version)) return s.Driver.Delete(makeKey(name, version)) } // ListReleases returns all releases from storage. An error is returned if the // storage backend fails to retrieve the releases. func (s *Storage) ListReleases() ([]*rspb.Release, error) { - s.Log("listing all releases in storage") + slog.Debug("listing all releases in storage") return s.Driver.List(func(_ *rspb.Release) bool { return true }) } // ListUninstalled returns all releases with Status == UNINSTALLED. An error is returned // if the storage backend fails to retrieve the releases. func (s *Storage) ListUninstalled() ([]*rspb.Release, error) { - s.Log("listing uninstalled releases in storage") + slog.Debug("listing uninstalled releases in storage") return s.Driver.List(func(rls *rspb.Release) bool { return relutil.StatusFilter(rspb.StatusUninstalled).Check(rls) }) @@ -104,7 +103,7 @@ func (s *Storage) ListUninstalled() ([]*rspb.Release, error) { // ListDeployed returns all releases with Status == DEPLOYED. An error is returned // if the storage backend fails to retrieve the releases. func (s *Storage) ListDeployed() ([]*rspb.Release, error) { - s.Log("listing all deployed releases in storage") + slog.Debug("listing all deployed releases in storage") return s.Driver.List(func(rls *rspb.Release) bool { return relutil.StatusFilter(rspb.StatusDeployed).Check(rls) }) @@ -132,7 +131,7 @@ func (s *Storage) Deployed(name string) (*rspb.Release, error) { // DeployedAll returns all deployed releases with the provided name, or // returns driver.NewErrNoDeployedReleases if not found. func (s *Storage) DeployedAll(name string) ([]*rspb.Release, error) { - s.Log("getting deployed releases from %q history", name) + slog.Debug("getting deployed releases", "name", name) ls, err := s.Driver.Query(map[string]string{ "name": name, @@ -151,7 +150,7 @@ func (s *Storage) DeployedAll(name string) ([]*rspb.Release, error) { // History returns the revision history for the release with the provided name, or // returns driver.ErrReleaseNotFound if no such release name exists. func (s *Storage) History(name string) ([]*rspb.Release, error) { - s.Log("getting release history for %q", name) + slog.Debug("getting release history", "name", name) return s.Driver.Query(map[string]string{"name": name, "owner": "helm"}) } @@ -206,7 +205,7 @@ func (s *Storage) removeLeastRecent(name string, maximum int) error { } } - s.Log("Pruned %d record(s) from %s with %d error(s)", len(toDelete), name, len(errs)) + slog.Debug("pruned records", "count", len(toDelete), "release", name, "errors", len(errs)) switch c := len(errs); c { case 0: return nil @@ -221,7 +220,7 @@ func (s *Storage) deleteReleaseVersion(name string, version int) error { key := makeKey(name, version) _, err := s.Delete(name, version) if err != nil { - s.Log("error pruning %s from release history: %s", key, err) + slog.Debug("error pruning release", "key", key, slog.Any("error", err)) return err } return nil @@ -229,7 +228,7 @@ func (s *Storage) deleteReleaseVersion(name string, version int) error { // Last fetches the last revision of the named release. func (s *Storage) Last(name string) (*rspb.Release, error) { - s.Log("getting last revision of %q", name) + slog.Debug("getting last revision", "name", name) h, err := s.History(name) if err != nil { return nil, err @@ -261,6 +260,5 @@ func Init(d driver.Driver) *Storage { } return &Storage{ Driver: d, - Log: func(_ string, _ ...interface{}) {}, } } diff --git a/pkg/storage/storage_test.go b/pkg/storage/storage_test.go index 1dadc9c93..f99d10214 100644 --- a/pkg/storage/storage_test.go +++ b/pkg/storage/storage_test.go @@ -312,7 +312,6 @@ func (d *MaxHistoryMockDriver) Name() string { func TestMaxHistoryErrorHandling(t *testing.T) { //func TestStorageRemoveLeastRecentWithError(t *testing.T) { storage := Init(NewMaxHistoryMockDriver(driver.NewMemory())) - storage.Log = t.Logf storage.MaxHistory = 1 @@ -338,7 +337,6 @@ func TestMaxHistoryErrorHandling(t *testing.T) { func TestStorageRemoveLeastRecent(t *testing.T) { storage := Init(driver.NewMemory()) - storage.Log = t.Logf // Make sure that specifying this at the outset doesn't cause any bugs. storage.MaxHistory = 10 @@ -395,7 +393,6 @@ func TestStorageRemoveLeastRecent(t *testing.T) { func TestStorageDoNotDeleteDeployed(t *testing.T) { storage := Init(driver.NewMemory()) - storage.Log = t.Logf storage.MaxHistory = 3 const name = "angry-bird" From e7eedae97cb8ea9050ec220a7cb33445c0b791f9 Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Thu, 10 Apr 2025 16:01:12 +0200 Subject: [PATCH 163/192] Use the logger with proper handling of dynamic debug on 2 locations Signed-off-by: Benoit Tigeot --- .../logger.go => internal/logging/logging.go | 26 +++++++++++-------- pkg/action/action.go | 2 -- pkg/action/action_test.go | 20 +++----------- pkg/cmd/root.go | 3 ++- 4 files changed, 21 insertions(+), 30 deletions(-) rename pkg/cli/logger.go => internal/logging/logging.go (76%) diff --git a/pkg/cli/logger.go b/internal/logging/logging.go similarity index 76% rename from pkg/cli/logger.go rename to internal/logging/logging.go index 03a69be24..946a211ef 100644 --- a/pkg/cli/logger.go +++ b/internal/logging/logging.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cli +package logging import ( "context" @@ -22,16 +22,20 @@ import ( "os" ) +// DebugEnabledFunc is a function type that determines if debug logging is enabled +// We use a function because we want to check the setting at log time, not when the logger is created +type DebugEnabledFunc func() bool + // DebugCheckHandler checks settings.Debug at log time type DebugCheckHandler struct { - handler slog.Handler - settings *EnvSettings + handler slog.Handler + debugEnabled DebugEnabledFunc } // Enabled implements slog.Handler.Enabled func (h *DebugCheckHandler) Enabled(_ context.Context, level slog.Level) bool { if level == slog.LevelDebug { - return h.settings.Debug // Check settings.Debug at log time + return h.debugEnabled() } return true // Always log other levels } @@ -44,21 +48,21 @@ func (h *DebugCheckHandler) Handle(ctx context.Context, r slog.Record) error { // WithAttrs implements slog.Handler.WithAttrs func (h *DebugCheckHandler) WithAttrs(attrs []slog.Attr) slog.Handler { return &DebugCheckHandler{ - handler: h.handler.WithAttrs(attrs), - settings: h.settings, + handler: h.handler.WithAttrs(attrs), + debugEnabled: h.debugEnabled, } } // WithGroup implements slog.Handler.WithGroup func (h *DebugCheckHandler) WithGroup(name string) slog.Handler { return &DebugCheckHandler{ - handler: h.handler.WithGroup(name), - settings: h.settings, + handler: h.handler.WithGroup(name), + debugEnabled: h.debugEnabled, } } // NewLogger creates a new logger with dynamic debug checking -func NewLogger(settings *EnvSettings) *slog.Logger { +func NewLogger(debugEnabled DebugEnabledFunc) *slog.Logger { // Create base handler that removes timestamps baseHandler := slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{ // Always use LevelDebug here to allow all messages through @@ -75,8 +79,8 @@ func NewLogger(settings *EnvSettings) *slog.Logger { // Wrap with our dynamic debug-checking handler dynamicHandler := &DebugCheckHandler{ - handler: baseHandler, - settings: settings, + handler: baseHandler, + debugEnabled: debugEnabled, } return slog.New(dynamicHandler) diff --git a/pkg/action/action.go b/pkg/action/action.go index 09c1887bb..47a7da99c 100644 --- a/pkg/action/action.go +++ b/pkg/action/action.go @@ -378,8 +378,6 @@ func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namesp clientFn: kc.Factory.KubernetesClientSet, } - // slog.SetDefault() - var store *storage.Storage switch helmDriver { case "secret", "secrets", "": diff --git a/pkg/action/action_test.go b/pkg/action/action_test.go index f544d3281..4a0691afb 100644 --- a/pkg/action/action_test.go +++ b/pkg/action/action_test.go @@ -20,12 +20,12 @@ import ( "fmt" "io" "log/slog" - "os" "testing" "github.com/stretchr/testify/assert" fakeclientset "k8s.io/client-go/kubernetes/fake" + "helm.sh/helm/v4/internal/logging" chart "helm.sh/helm/v4/pkg/chart/v2" chartutil "helm.sh/helm/v4/pkg/chart/v2/util" kubefake "helm.sh/helm/v4/pkg/kube/fake" @@ -41,21 +41,9 @@ var verbose = flag.Bool("test.log", false, "enable test logging (debug by defaul func actionConfigFixture(t *testing.T) *Configuration { t.Helper() - logger := slog.New(slog.NewTextHandler(io.Discard, nil)) - if *verbose { - // Create a handler that removes timestamps - handler := slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{ - Level: slog.LevelDebug, - ReplaceAttr: func(_ []string, a slog.Attr) slog.Attr { - // Remove the time attribute - if a.Key == slog.TimeKey { - return slog.Attr{} - } - return a - }, - }) - logger = slog.New(handler) - } + logger := logging.NewLogger(func() bool { + return *verbose + }) slog.SetDefault(logger) registryClient, err := registry.NewClient() diff --git a/pkg/cmd/root.go b/pkg/cmd/root.go index e9305206a..ee22533f0 100644 --- a/pkg/cmd/root.go +++ b/pkg/cmd/root.go @@ -32,6 +32,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/clientcmd" + "helm.sh/helm/v4/internal/logging" "helm.sh/helm/v4/internal/tlsutil" "helm.sh/helm/v4/pkg/action" "helm.sh/helm/v4/pkg/cli" @@ -139,7 +140,7 @@ func newRootCmdWithConfig(actionConfig *action.Configuration, out io.Writer, arg settings.AddFlags(flags) addKlogFlags(flags) - logger := cli.NewLogger(settings) + logger := logging.NewLogger(func() bool { return settings.Debug }) slog.SetDefault(logger) // Setup shell completion for the namespace flag From 7f02e89a7a3c88120c424f46471dd11ac9650db5 Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Thu, 10 Apr 2025 16:12:52 +0200 Subject: [PATCH 164/192] No longer log call location using flag, it will need to be done in slog Some ideas here: https://www.reddit.com/r/golang/comments/15nwnkl/achieve_lshortfile_with_slog/ Signed-off-by: Benoit Tigeot --- cmd/helm/helm.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/cmd/helm/helm.go b/cmd/helm/helm.go index 273ead226..eefce5158 100644 --- a/cmd/helm/helm.go +++ b/cmd/helm/helm.go @@ -17,7 +17,6 @@ limitations under the License. package main // import "helm.sh/helm/v4/cmd/helm" import ( - "log" "log/slog" "os" @@ -28,10 +27,6 @@ import ( "helm.sh/helm/v4/pkg/kube" ) -func init() { - log.SetFlags(log.Lshortfile) -} - func main() { // Setting the name of the app for managedFields in the Kubernetes client. // It is set here to the full name of "helm" so that renaming of helm to From c05bcbd498d62fc5c5794aef9279582dab8c8285 Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Thu, 10 Apr 2025 16:32:52 +0200 Subject: [PATCH 165/192] Fix nil pointer dereference in ready test Signed-off-by: Benoit Tigeot --- pkg/kube/ready_test.go | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/pkg/kube/ready_test.go b/pkg/kube/ready_test.go index 64cf68749..9d1dfd272 100644 --- a/pkg/kube/ready_test.go +++ b/pkg/kube/ready_test.go @@ -754,7 +754,7 @@ func Test_ReadyChecker_deploymentReady(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - c := NewReadyChecker(fake.NewClientset(), nil) + c := NewReadyChecker(fake.NewClientset()) if got := c.deploymentReady(tt.args.rs, tt.args.dep); got != tt.want { t.Errorf("deploymentReady() = %v, want %v", got, tt.want) } @@ -788,7 +788,7 @@ func Test_ReadyChecker_replicaSetReady(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - c := NewReadyChecker(fake.NewClientset(), nil) + c := NewReadyChecker(fake.NewClientset()) if got := c.replicaSetReady(tt.args.rs); got != tt.want { t.Errorf("replicaSetReady() = %v, want %v", got, tt.want) } @@ -822,7 +822,7 @@ func Test_ReadyChecker_replicationControllerReady(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - c := NewReadyChecker(fake.NewClientset(), nil) + c := NewReadyChecker(fake.NewClientset()) if got := c.replicationControllerReady(tt.args.rc); got != tt.want { t.Errorf("replicationControllerReady() = %v, want %v", got, tt.want) } @@ -877,7 +877,7 @@ func Test_ReadyChecker_daemonSetReady(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - c := NewReadyChecker(fake.NewClientset(), nil) + c := NewReadyChecker(fake.NewClientset()) if got := c.daemonSetReady(tt.args.ds); got != tt.want { t.Errorf("daemonSetReady() = %v, want %v", got, tt.want) } @@ -953,7 +953,7 @@ func Test_ReadyChecker_statefulSetReady(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - c := NewReadyChecker(fake.NewClientset(), nil) + c := NewReadyChecker(fake.NewClientset()) if got := c.statefulSetReady(tt.args.sts); got != tt.want { t.Errorf("statefulSetReady() = %v, want %v", got, tt.want) } @@ -1012,7 +1012,7 @@ func Test_ReadyChecker_podsReadyForObject(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - c := NewReadyChecker(fake.NewClientset(), nil) + c := NewReadyChecker(fake.NewClientset()) for _, pod := range tt.existPods { if _, err := c.client.CoreV1().Pods(defaultNamespace).Create(context.TODO(), &pod, metav1.CreateOptions{}); err != nil { t.Errorf("Failed to create Pod error: %v", err) @@ -1091,7 +1091,7 @@ func Test_ReadyChecker_jobReady(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - c := NewReadyChecker(fake.NewClientset(), nil) + c := NewReadyChecker(fake.NewClientset()) got, err := c.jobReady(tt.args.job) if (err != nil) != tt.wantErr { t.Errorf("jobReady() error = %v, wantErr %v", err, tt.wantErr) @@ -1130,7 +1130,7 @@ func Test_ReadyChecker_volumeReady(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - c := NewReadyChecker(fake.NewClientset(), nil) + c := NewReadyChecker(fake.NewClientset()) if got := c.volumeReady(tt.args.v); got != tt.want { t.Errorf("volumeReady() = %v, want %v", got, tt.want) } @@ -1175,7 +1175,7 @@ func Test_ReadyChecker_serviceReady(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - c := NewReadyChecker(fake.NewClientset(), nil) + c := NewReadyChecker(fake.NewClientset()) got := c.serviceReady(tt.args.service) if got != tt.want { t.Errorf("serviceReady() = %v, want %v", got, tt.want) @@ -1244,7 +1244,7 @@ func Test_ReadyChecker_crdBetaReady(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - c := NewReadyChecker(fake.NewClientset(), nil) + c := NewReadyChecker(fake.NewClientset()) got := c.crdBetaReady(tt.args.crdBeta) if got != tt.want { t.Errorf("crdBetaReady() = %v, want %v", got, tt.want) @@ -1313,7 +1313,7 @@ func Test_ReadyChecker_crdReady(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - c := NewReadyChecker(fake.NewClientset(), nil) + c := NewReadyChecker(fake.NewClientset()) got := c.crdReady(tt.args.crdBeta) if got != tt.want { t.Errorf("crdBetaReady() = %v, want %v", got, tt.want) From 68440d7b2908342687a30daf4677732c0ce5478b Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Thu, 10 Apr 2025 17:38:32 +0200 Subject: [PATCH 166/192] Prefer using slog.Any when displaying errors Signed-off-by: Benoit Tigeot --- pkg/action/action.go | 2 +- pkg/action/uninstall.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/action/action.go b/pkg/action/action.go index 47a7da99c..4b90b2d5c 100644 --- a/pkg/action/action.go +++ b/pkg/action/action.go @@ -265,7 +265,7 @@ func (cfg *Configuration) getCapabilities() (*chartutil.Capabilities, error) { apiVersions, err := GetVersionSet(dc) if err != nil { if discovery.IsGroupDiscoveryFailedError(err) { - slog.Warn("the kubernetes server has an orphaned API service", "errors", err) + slog.Warn("the kubernetes server has an orphaned API service", slog.Any("error", err)) slog.Warn("to fix this, kubectl delete apiservice ") } else { return nil, errors.Wrap(err, "could not get apiVersions from Kubernetes") diff --git a/pkg/action/uninstall.go b/pkg/action/uninstall.go index b842d9933..fa69d2a48 100644 --- a/pkg/action/uninstall.go +++ b/pkg/action/uninstall.go @@ -127,7 +127,7 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error) deletedResources, kept, errs := u.deleteRelease(rel) if errs != nil { - slog.Debug("uninstall: Failed to delete release", "errors", errs) + slog.Debug("uninstall: Failed to delete release", slog.Any("error", errs)) return nil, errors.Errorf("failed to delete release: %s", name) } From 7938662f959011d9fa6763f6124834ea446ae424 Mon Sep 17 00:00:00 2001 From: Matt Farina Date: Wed, 19 Mar 2025 13:54:00 -0400 Subject: [PATCH 167/192] Remove ValidName regex This regex was already deprecated. Validation happens inside the Metadata Validate function for the name instead of using this regex. Signed-off-by: Matt Farina --- pkg/action/action.go | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/pkg/action/action.go b/pkg/action/action.go index ea2dc0dd7..187df5412 100644 --- a/pkg/action/action.go +++ b/pkg/action/action.go @@ -23,7 +23,6 @@ import ( "os" "path" "path/filepath" - "regexp" "strings" "github.com/pkg/errors" @@ -63,21 +62,6 @@ var ( errPending = errors.New("another operation (install/upgrade/rollback) is in progress") ) -// ValidName is a regular expression for resource names. -// -// DEPRECATED: This will be removed in Helm 4, and is no longer used here. See -// pkg/lint/rules.validateMetadataNameFunc for the replacement. -// -// According to the Kubernetes help text, the regular expression it uses is: -// -// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)* -// -// This follows the above regular expression (but requires a full string match, not partial). -// -// The Kubernetes documentation is here, though it is not entirely correct: -// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names -var ValidName = regexp.MustCompile(`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`) - // Configuration injects the dependencies that all actions share. type Configuration struct { // RESTClientGetter is an interface that loads Kubernetes clients. From ed005f5c320c2059da3528f967e556d5123496b3 Mon Sep 17 00:00:00 2001 From: Matt Farina Date: Thu, 10 Apr 2025 13:45:02 -0400 Subject: [PATCH 168/192] Removing deprecation notice for this function. While the constructor is not used by Helm itself, it is used by SDK users and there is currently no alternative way to expose this. Signed-off-by: Matt Farina --- pkg/engine/lookup_func.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/pkg/engine/lookup_func.go b/pkg/engine/lookup_func.go index b7460850a..5b96dd386 100644 --- a/pkg/engine/lookup_func.go +++ b/pkg/engine/lookup_func.go @@ -35,9 +35,6 @@ type lookupFunc = func(apiversion string, resource string, namespace string, nam // NewLookupFunction returns a function for looking up objects in the cluster. // // If the resource does not exist, no error is raised. -// -// This function is considered deprecated, and will be renamed in Helm 4. It will no -// longer be a public function. func NewLookupFunction(config *rest.Config) lookupFunc { return newLookupFunction(clientProviderFromConfig{config: config}) } From 483789ac860985a183529bb4988bc2b7d01134c0 Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Fri, 11 Apr 2025 10:56:41 +0200 Subject: [PATCH 169/192] Bumps github.com/distribution/distribution/v3 from 3.0.0-rc.3 to 3.0.0 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ``` » go mod download go: module github.com/distribution/distribution/v3@v3.0.0 requires go >= 1.23.7; switching to go1.23.8 ``` We need to update Go, because of https://github.com/distribution/distribution/pull/4601 Signed-off-by: Benoit Tigeot --- go.mod | 10 ++++++---- go.sum | 3 +++ 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index aef4a656d..7d7447040 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,8 @@ module helm.sh/helm/v4 -go 1.23.0 +go 1.23.7 + +toolchain go1.23.8 require ( github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 @@ -12,7 +14,7 @@ require ( github.com/Masterminds/vcs v1.13.3 github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 github.com/cyphar/filepath-securejoin v0.4.1 - github.com/distribution/distribution/v3 v3.0.0-rc.3 + github.com/distribution/distribution/v3 v3.0.0 github.com/evanphx/json-patch v5.9.11+incompatible github.com/fluxcd/cli-utils v0.36.0-flux.12 github.com/foxcpp/go-mockdns v1.1.0 @@ -129,7 +131,7 @@ require ( github.com/prometheus/procfs v0.15.1 // indirect github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 // indirect github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 // indirect - github.com/redis/go-redis/v9 v9.6.3 // indirect + github.com/redis/go-redis/v9 v9.7.3 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/shopspring/decimal v1.4.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect @@ -163,7 +165,7 @@ require ( go.opentelemetry.io/proto/otlp v1.3.1 // indirect golang.org/x/mod v0.22.0 // indirect golang.org/x/net v0.37.0 // indirect - golang.org/x/oauth2 v0.25.0 // indirect + golang.org/x/oauth2 v0.28.0 // indirect golang.org/x/sync v0.13.0 // indirect golang.org/x/sys v0.32.0 // indirect golang.org/x/time v0.9.0 // indirect diff --git a/go.sum b/go.sum index 456e1cfcf..4438b96ac 100644 --- a/go.sum +++ b/go.sum @@ -65,6 +65,7 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/distribution/distribution/v3 v3.0.0-rc.3 h1:JRJso9IVLoooKX76oWR+DWCCdZlK5m4nRtDWvzB1ITg= github.com/distribution/distribution/v3 v3.0.0-rc.3/go.mod h1:offoOgrnYs+CFwis8nE0hyzYZqRCZj5EFc5kgfszwiE= +github.com/distribution/distribution/v3 v3.0.0/go.mod h1:tRNuFoZsUdyRVegq8xGNeds4KLjwLCRin/tTo6i1DhU= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= @@ -291,6 +292,7 @@ github.com/redis/go-redis/extra/redisotel/v9 v9.0.5/go.mod h1:WZjPDy7VNzn77AAfnA github.com/redis/go-redis/v9 v9.0.5/go.mod h1:WqMKv5vnQbRuZstUwxQI195wHy+t4PuXDOjzMvcuQHk= github.com/redis/go-redis/v9 v9.6.3 h1:8Dr5ygF1QFXRxIH/m3Xg9MMG1rS8YCtAgosrsewT6i0= github.com/redis/go-redis/v9 v9.6.3/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= +github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/rubenv/sql-migrate v1.7.1 h1:f/o0WgfO/GqNuVg+6801K/KW3WdDSupzSjDYODmiUq4= @@ -421,6 +423,7 @@ golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= From 00db8d6d96b788598603765600b9458d3a39c6e1 Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Fri, 11 Apr 2025 11:01:29 +0200 Subject: [PATCH 170/192] Go mod tidy Signed-off-by: Benoit Tigeot --- go.sum | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/go.sum b/go.sum index 4438b96ac..675bed1d8 100644 --- a/go.sum +++ b/go.sum @@ -63,8 +63,7 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/distribution/distribution/v3 v3.0.0-rc.3 h1:JRJso9IVLoooKX76oWR+DWCCdZlK5m4nRtDWvzB1ITg= -github.com/distribution/distribution/v3 v3.0.0-rc.3/go.mod h1:offoOgrnYs+CFwis8nE0hyzYZqRCZj5EFc5kgfszwiE= +github.com/distribution/distribution/v3 v3.0.0 h1:q4R8wemdRQDClzoNNStftB2ZAfqOiN6UX90KJc4HjyM= github.com/distribution/distribution/v3 v3.0.0/go.mod h1:tRNuFoZsUdyRVegq8xGNeds4KLjwLCRin/tTo6i1DhU= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= @@ -290,8 +289,7 @@ github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5/go.mod h1:fyalQWdtzDBECAQFBJu github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 h1:EfpWLLCyXw8PSM2/XNJLjI3Pb27yVE+gIAfeqp8LUCc= github.com/redis/go-redis/extra/redisotel/v9 v9.0.5/go.mod h1:WZjPDy7VNzn77AAfnAfVjZNvfJTYfPetfZk5yoSTLaQ= github.com/redis/go-redis/v9 v9.0.5/go.mod h1:WqMKv5vnQbRuZstUwxQI195wHy+t4PuXDOjzMvcuQHk= -github.com/redis/go-redis/v9 v9.6.3 h1:8Dr5ygF1QFXRxIH/m3Xg9MMG1rS8YCtAgosrsewT6i0= -github.com/redis/go-redis/v9 v9.6.3/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= +github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM= github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= @@ -421,8 +419,7 @@ golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= -golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= -golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc= golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= From 0d43534ab76e4c285f6a797e8644d4e23d200552 Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Fri, 11 Apr 2025 15:58:11 +0200 Subject: [PATCH 171/192] Testing without bump go version Signed-off-by: Benoit Tigeot --- go.mod | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 7d7447040..f198aa17d 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,6 @@ module helm.sh/helm/v4 -go 1.23.7 - -toolchain go1.23.8 +go 1.23.0 require ( github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 From 365340b09242cb2d2339043fbdc40dc8dc0a060c Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Fri, 11 Apr 2025 16:04:50 +0200 Subject: [PATCH 172/192] Follow distribution package requirement go: github.com/distribution/distribution/v3@v3.0.0 requires go >= 1.23.7; switching to go1.23.8 Signed-off-by: Benoit Tigeot --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index f198aa17d..190fcff0f 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module helm.sh/helm/v4 -go 1.23.0 +go 1.23.7 require ( github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 From 91ecb56355d1560ad6e6dbf0464042badca70ddb Mon Sep 17 00:00:00 2001 From: Matt Farina Date: Fri, 11 Apr 2025 15:57:53 -0400 Subject: [PATCH 173/192] Removing the alpine test chart A .gitignore was previously setup to ignore this file. When pkg/cmd was setup the .gitignore was not updated. The change adds the new location to continue to ignore this file. Note, the previous location is still included in the .gitignore because developers will have a file there and we do not want that accidently included in a commit. Signed-off-by: Matt Farina --- .gitignore | 1 + .../issue-7233/charts/alpine-0.1.0.tgz | Bin 1167 -> 0 bytes 2 files changed, 1 insertion(+) delete mode 100644 pkg/cmd/testdata/testcharts/issue-7233/charts/alpine-0.1.0.tgz diff --git a/.gitignore b/.gitignore index 75698e993..7ea0717ed 100644 --- a/.gitignore +++ b/.gitignore @@ -12,4 +12,5 @@ bin/ vendor/ # Ignores charts pulled for dependency build tests cmd/helm/testdata/testcharts/issue-7233/charts/* +pkg/cmd/testdata/testcharts/issue-7233/charts/* .pre-commit-config.yaml diff --git a/pkg/cmd/testdata/testcharts/issue-7233/charts/alpine-0.1.0.tgz b/pkg/cmd/testdata/testcharts/issue-7233/charts/alpine-0.1.0.tgz deleted file mode 100644 index afd021846ed6a2f7cc2cf023ed188a7cf2f9d189..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1167 zcmV;A1aSKwiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PI-XZsRr+&b6LmpuJto@*y$Scfr34{EN1WMOrLS6h%Fj#ugEZ zR7uK;>-D=AJjjWaZc?CYrw7fAAd37rGn{W`DC89rH2hzI$|PGX`Nh|lG)>d1>C`>b zH0?gq@#W>kXgZrsXS2~by}C$8dd9SW<{}927eIliq6m!^& zBCM*zYdlHb#8FN-#>=q*)TZUJG5nq_e9f(O23qP~Ml=20O_nnPhsrRT$8LA*?K z;hvE|`^kq}q-Cu#((`C=n7n4DsFz75OE=#y+O)c)$tX#qmv+{_Py+uq$ZOIkN&wIC zKOLuC{?F2@p8w~N4~~}Qb`Y5P()#prUJ3j+R8|}f>7gGOR5Jf++29%ek0;Y{hyRz; zY0v+&NT>eaGLg^Wqs*g{4CZKX9s&5;9q)F@4RJzEiA@{({b09CKKaVw2jU2T4eE)i2~P@50=~5F94>Y)|7*hU=(Jz&=f2yz(~mhRPLG& z$^l``HY6Z(O)I=NVezWwu#yTeFPYHL6cQQ~#zJZ$XbLm|N_jIhAXKOf%W96w?PZ}9 z=}HRCmYghJ;ubw+!yF#C=6g~bmJxi0Uu$Uy_WP$@!Gty_GKwLSVnf1qT2SIGX5n!u#05lSibC4@A1;IB5RBM25u)q{(pdm$&DMDkNr=7`uRo5Y3GPTw5$WVLa zT`M0iJ+yGU9VGsmaeZhqA3BHW$5vyVGvm)0YK`llVB1)_4?ZwG@_ktP_ppr*OaR~I zI3te2HqsSkHe!Pwx{!^ALN->1T3gR+R#u!mLgHsNjC0^p-uj?}3bm$uz=WUW;4+tHl)A88ky z+)*+DyRAVcNVz;Q2nnV^W=Oe{VkNF^%JJ1`{)O1_r<%#KM4PsLiib-khME&q@$2|a znx^s3eMj@8g!+H;?)vR_?~*b<#U9V~|Cd*@ZvUT-`}_Y{H$l1UW(C~L@2fGduigL&u(Z@M?PosbL<7QghdA0+Uf?u^1;PV^Vx z+57)w(&7JfG`j5he-1IEjjh4{KY$B^fe(YfPmK0*Itl!@&ETo%|0nq0z5h>UlS$A2 hbI39OZ5Z_Q@1>VsdigKN?*RY+|NrPaN*Dkj008|nJ|6%8 From f5aec508f5dd9e73ad668930743edb6742a87eb8 Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Sat, 12 Apr 2025 09:56:15 +0200 Subject: [PATCH 174/192] Add detailed debug logging for resource readiness states Signed-off-by: Benoit Tigeot --- pkg/kube/ready.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/pkg/kube/ready.go b/pkg/kube/ready.go index 9cbd89913..feda44d63 100644 --- a/pkg/kube/ready.go +++ b/pkg/kube/ready.go @@ -240,6 +240,7 @@ func (c *ReadyChecker) jobReady(job *batchv1.Job) (bool, error) { slog.Debug("Job is not completed", "namespace", job.GetNamespace(), "name", job.GetName()) return false, nil } + slog.Debug("Job is completed", "namespace", job.GetNamespace(), "name", job.GetName()) return true, nil } @@ -268,7 +269,7 @@ func (c *ReadyChecker) serviceReady(s *corev1.Service) bool { return false } } - + slog.Debug("Service is ready", "namespace", s.GetNamespace(), "name", s.GetName(), "clusterIP", s.Spec.ClusterIP, "externalIPs", s.Spec.ExternalIPs) return true } @@ -277,6 +278,7 @@ func (c *ReadyChecker) volumeReady(v *corev1.PersistentVolumeClaim) bool { slog.Debug("PersistentVolumeClaim is not bound", "namespace", v.GetNamespace(), "name", v.GetName()) return false } + slog.Debug("PersistentVolumeClaim is bound", "namespace", v.GetNamespace(), "name", v.GetName(), "phase", v.Status.Phase) return true } @@ -296,6 +298,7 @@ func (c *ReadyChecker) deploymentReady(rs *appsv1.ReplicaSet, dep *appsv1.Deploy slog.Debug("Deployment does not have enough pods ready", "namespace", dep.GetNamespace(), "name", dep.GetName(), "readyPods", rs.Status.ReadyReplicas, "totalPods", expectedReady) return false } + slog.Debug("Deployment is ready", "namespace", dep.GetNamespace(), "name", dep.GetName(), "readyPods", rs.Status.ReadyReplicas, "totalPods", expectedReady) return true } @@ -329,6 +332,7 @@ func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool { slog.Debug("DaemonSet does not have enough Pods ready", "namespace", ds.GetNamespace(), "name", ds.GetName(), "readyPods", ds.Status.NumberReady, "totalPods", expectedReady) return false } + slog.Debug("DaemonSet is ready", "namespace", ds.GetNamespace(), "name", ds.GetName(), "readyPods", ds.Status.NumberReady, "totalPods", expectedReady) return true } @@ -425,7 +429,6 @@ func (c *ReadyChecker) statefulSetReady(sts *appsv1.StatefulSet) bool { slog.Debug("StatefulSet is not ready, currentRevision does not match updateRevision", "namespace", sts.GetNamespace(), "name", sts.GetName(), "currentRevision", sts.Status.CurrentRevision, "updateRevision", sts.Status.UpdateRevision) return false } - slog.Debug("StatefulSet is ready", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.ReadyReplicas, "totalPods", replicas) return true } From 18ed1cf720be9f00bc61c1925d642ad188d6a6d5 Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Fri, 11 Apr 2025 12:20:49 +0200 Subject: [PATCH 175/192] Migrate to last golangci-lint and golangci-lint-action Close dependabot https://github.com/helm/helm/pull/30706 Signed-off-by: Benoit Tigeot --- .github/workflows/golangci-lint.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 6fbbd2c53..a3f6ba359 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -21,6 +21,6 @@ jobs: go-version: '1.23' check-latest: true - name: golangci-lint - uses: golangci/golangci-lint-action@55c2c1448f86e01eaae002a5a3a9624417608d84 #pin@6.5.2 + uses: golangci/golangci-lint-action@1481404843c368bc19ca9406f87d6e0fc97bdcfd #pin@7.0.0 with: - version: v1.62 + version: v2.0.2 From d8785481680d8fd98419a529f11b3a89f17bbb85 Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Fri, 11 Apr 2025 12:54:12 +0200 Subject: [PATCH 176/192] Migrate golint to v2 ``` WARN The configuration comments are not migrated. WARN Details about the migration: https://golangci-lint.run/product/migration-guide/ WARN The configuration `run.timeout` is ignored. By default, in v2, the timeout is disabled. ``` I've backported comment and timeout Signed-off-by: Benoit Tigeot --- .golangci.yml | 80 +++++++++++++++++++++++++++++++-------------------- 1 file changed, 49 insertions(+), 31 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index ff0dad5f6..f0d45e5ea 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,45 +1,63 @@ +version: "2" run: timeout: 10m - linters: - disable-all: true + default: none enable: - dupl - - gofmt - - goimports - - gosimple - govet - ineffassign - misspell - nakedret - revive - - unused - staticcheck - -linters-settings: - gofmt: - simplify: true - goimports: - local-prefixes: helm.sh/helm/v4 - dupl: - threshold: 400 -issues: - exclude-rules: + - unused + settings: + dupl: + threshold: 400 + exclusions: # Helm, and the Go source code itself, sometimes uses these names outside their built-in # functions. As the Go source code has re-used these names it's ok for Helm to do the same. # Linting will look for redefinition of built-in id's but we opt-in to the ones we choose to use. - - linters: - - revive - text: "redefines-builtin-id: redefinition of the built-in function append" - - linters: - - revive - text: "redefines-builtin-id: redefinition of the built-in function clear" - - linters: - - revive - text: "redefines-builtin-id: redefinition of the built-in function max" - - linters: - - revive - text: "redefines-builtin-id: redefinition of the built-in function min" - - linters: - - revive - text: "redefines-builtin-id: redefinition of the built-in function new" + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + rules: + - linters: + - revive + text: 'redefines-builtin-id: redefinition of the built-in function append' + - linters: + - revive + text: 'redefines-builtin-id: redefinition of the built-in function clear' + - linters: + - revive + text: 'redefines-builtin-id: redefinition of the built-in function max' + - linters: + - revive + text: 'redefines-builtin-id: redefinition of the built-in function min' + - linters: + - revive + text: 'redefines-builtin-id: redefinition of the built-in function new' + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - goimports + settings: + gofmt: + simplify: true + goimports: + local-prefixes: + - helm.sh/helm/v4 + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ From 7fe554e7a870bd14bea8da5fe577663156ffcc7f Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Mon, 14 Apr 2025 10:34:10 +0200 Subject: [PATCH 177/192] Fix naked return errors > xinternal/third_party/dep/fs/fs.go:175:3: naked return in func `copyFile` with 59 lines of code (nakedret) Signed-off-by: Benoit Tigeot --- internal/third_party/dep/fs/fs.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/internal/third_party/dep/fs/fs.go b/internal/third_party/dep/fs/fs.go index d29bb5f87..8202ee1d5 100644 --- a/internal/third_party/dep/fs/fs.go +++ b/internal/third_party/dep/fs/fs.go @@ -172,28 +172,28 @@ func copyFile(src, dst string) (err error) { in, err := os.Open(src) if err != nil { - return + return err } defer in.Close() out, err := os.Create(dst) if err != nil { - return + return err } if _, err = io.Copy(out, in); err != nil { out.Close() - return + return err } // Check for write errors on Close if err = out.Close(); err != nil { - return + return err } si, err := os.Stat(src) if err != nil { - return + return err } // Temporary fix for Go < 1.9 @@ -205,7 +205,7 @@ func copyFile(src, dst string) (err error) { } err = os.Chmod(dst, si.Mode()) - return + return err } // cloneSymlink will create a new symlink that points to the resolved path of sl. From a23b96276a8a8fd12089f1333cbdcb03ca44709c Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Mon, 14 Apr 2025 10:34:46 +0200 Subject: [PATCH 178/192] var mu is unused (unused) Signed-off-by: Benoit Tigeot --- internal/third_party/dep/fs/fs_test.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/internal/third_party/dep/fs/fs_test.go b/internal/third_party/dep/fs/fs_test.go index d42c3f110..909fa4d00 100644 --- a/internal/third_party/dep/fs/fs_test.go +++ b/internal/third_party/dep/fs/fs_test.go @@ -36,14 +36,9 @@ import ( "os/exec" "path/filepath" "runtime" - "sync" "testing" ) -var ( - mu sync.Mutex -) - func TestRenameWithFallback(t *testing.T) { dir := t.TempDir() From b2ac216763761d8d7b5af73a426da87c4264a232 Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Mon, 14 Apr 2025 10:35:32 +0200 Subject: [PATCH 179/192] func cleanUpDir is unused Signed-off-by: Benoit Tigeot --- internal/third_party/dep/fs/fs_test.go | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/internal/third_party/dep/fs/fs_test.go b/internal/third_party/dep/fs/fs_test.go index 909fa4d00..22c59868c 100644 --- a/internal/third_party/dep/fs/fs_test.go +++ b/internal/third_party/dep/fs/fs_test.go @@ -33,7 +33,6 @@ package fs import ( "os" - "os/exec" "path/filepath" "runtime" "testing" @@ -355,19 +354,6 @@ func TestCopyFile(t *testing.T) { } } -func cleanUpDir(dir string) { - // NOTE(mattn): It seems that sometimes git.exe is not dead - // when cleanUpDir() is called. But we do not know any way to wait for it. - if runtime.GOOS == "windows" { - mu.Lock() - exec.Command(`taskkill`, `/F`, `/IM`, `git.exe`).Run() - mu.Unlock() - } - if dir != "" { - os.RemoveAll(dir) - } -} - func TestCopyFileSymlink(t *testing.T) { tempdir := t.TempDir() From a9b77323671ffd93b7d5c423772676e839f4679c Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Mon, 14 Apr 2025 10:39:09 +0200 Subject: [PATCH 180/192] could remove embedded field X from selector Signed-off-by: Benoit Tigeot --- pkg/action/install.go | 6 +++--- pkg/action/show.go | 4 ++-- pkg/action/upgrade.go | 4 ++-- pkg/chart/v2/util/save.go | 4 ++-- pkg/cmd/flags.go | 6 +++--- pkg/cmd/install.go | 4 ++-- pkg/cmd/show.go | 2 +- pkg/cmd/upgrade.go | 4 ++-- pkg/kube/ready.go | 20 ++++++++++---------- pkg/plugin/installer/http_installer.go | 2 +- pkg/repo/index_test.go | 8 ++++---- pkg/storage/driver/cfgmaps.go | 6 +++--- pkg/storage/driver/mock_test.go | 12 ++++++------ pkg/storage/driver/secrets.go | 6 +++--- pkg/storage/storage.go | 10 +++++----- pkg/time/time.go | 2 +- 16 files changed, 50 insertions(+), 50 deletions(-) diff --git a/pkg/action/install.go b/pkg/action/install.go index 25c48c762..d05aae505 100644 --- a/pkg/action/install.go +++ b/pkg/action/install.go @@ -143,19 +143,19 @@ func NewInstall(cfg *Configuration) *Install { in := &Install{ cfg: cfg, } - in.ChartPathOptions.registryClient = cfg.RegistryClient + in.registryClient = cfg.RegistryClient return in } // SetRegistryClient sets the registry client for the install action func (i *Install) SetRegistryClient(registryClient *registry.Client) { - i.ChartPathOptions.registryClient = registryClient + i.registryClient = registryClient } // GetRegistryClient get the registry client. func (i *Install) GetRegistryClient() *registry.Client { - return i.ChartPathOptions.registryClient + return i.registryClient } func (i *Install) installCRDs(crds []chart.CRD) error { diff --git a/pkg/action/show.go b/pkg/action/show.go index 8f9da58e9..f9843941b 100644 --- a/pkg/action/show.go +++ b/pkg/action/show.go @@ -69,14 +69,14 @@ func NewShow(output ShowOutputFormat, cfg *Configuration) *Show { sh := &Show{ OutputFormat: output, } - sh.ChartPathOptions.registryClient = cfg.RegistryClient + sh.registryClient = cfg.RegistryClient return sh } // SetRegistryClient sets the registry client to use when pulling a chart from a registry. func (s *Show) SetRegistryClient(client *registry.Client) { - s.ChartPathOptions.registryClient = client + s.registryClient = client } // Run executes 'helm show' against the given release. diff --git a/pkg/action/upgrade.go b/pkg/action/upgrade.go index ea09c8ed0..b32bf256e 100644 --- a/pkg/action/upgrade.go +++ b/pkg/action/upgrade.go @@ -132,14 +132,14 @@ func NewUpgrade(cfg *Configuration) *Upgrade { up := &Upgrade{ cfg: cfg, } - up.ChartPathOptions.registryClient = cfg.RegistryClient + up.registryClient = cfg.RegistryClient return up } // SetRegistryClient sets the registry client to use when fetching charts. func (u *Upgrade) SetRegistryClient(client *registry.Client) { - u.ChartPathOptions.registryClient = client + u.registryClient = client } // Run executes the upgrade on the given release. diff --git a/pkg/chart/v2/util/save.go b/pkg/chart/v2/util/save.go index e1285ac88..dfa10915f 100644 --- a/pkg/chart/v2/util/save.go +++ b/pkg/chart/v2/util/save.go @@ -130,8 +130,8 @@ func Save(c *chart.Chart, outDir string) (string, error) { // Wrap in gzip writer zipper := gzip.NewWriter(f) - zipper.Header.Extra = headerBytes - zipper.Header.Comment = "Helm" + zipper.Extra = headerBytes + zipper.Comment = "Helm" // Wrap in tar writer twriter := tar.NewWriter(zipper) diff --git a/pkg/cmd/flags.go b/pkg/cmd/flags.go index eb829c21e..74c3c8352 100644 --- a/pkg/cmd/flags.go +++ b/pkg/cmd/flags.go @@ -260,7 +260,7 @@ func compVersionFlag(chartRef string, _ string) ([]string, cobra.ShellCompDirect var versions []string if indexFile, err := repo.LoadIndexFile(path); err == nil { for _, details := range indexFile.Entries[chartName] { - appVersion := details.Metadata.AppVersion + appVersion := details.AppVersion appVersionDesc := "" if appVersion != "" { appVersionDesc = fmt.Sprintf("App: %s, ", appVersion) @@ -271,10 +271,10 @@ func compVersionFlag(chartRef string, _ string) ([]string, cobra.ShellCompDirect createdDesc = fmt.Sprintf("Created: %s ", created) } deprecated := "" - if details.Metadata.Deprecated { + if details.Deprecated { deprecated = "(deprecated)" } - versions = append(versions, fmt.Sprintf("%s\t%s%s%s", details.Metadata.Version, appVersionDesc, createdDesc, deprecated)) + versions = append(versions, fmt.Sprintf("%s\t%s%s%s", details.Version, appVersionDesc, createdDesc, deprecated)) } } diff --git a/pkg/cmd/install.go b/pkg/cmd/install.go index ee018c88a..e35df7801 100644 --- a/pkg/cmd/install.go +++ b/pkg/cmd/install.go @@ -242,7 +242,7 @@ func runInstall(args []string, client *action.Install, valueOpts *values.Options } client.ReleaseName = name - cp, err := client.ChartPathOptions.LocateChart(chart, settings) + cp, err := client.LocateChart(chart, settings) if err != nil { return nil, err } @@ -279,7 +279,7 @@ func runInstall(args []string, client *action.Install, valueOpts *values.Options man := &downloader.Manager{ Out: out, ChartPath: cp, - Keyring: client.ChartPathOptions.Keyring, + Keyring: client.Keyring, SkipUpdate: false, Getters: p, RepositoryConfig: settings.RepositoryConfig, diff --git a/pkg/cmd/show.go b/pkg/cmd/show.go index 22d8bee49..1c7e7be44 100644 --- a/pkg/cmd/show.go +++ b/pkg/cmd/show.go @@ -218,7 +218,7 @@ func runShow(args []string, client *action.Show) (string, error) { client.Version = ">0.0.0-0" } - cp, err := client.ChartPathOptions.LocateChart(args[0], settings) + cp, err := client.LocateChart(args[0], settings) if err != nil { return "", err } diff --git a/pkg/cmd/upgrade.go b/pkg/cmd/upgrade.go index 2e0f16212..d1f2ad8e3 100644 --- a/pkg/cmd/upgrade.go +++ b/pkg/cmd/upgrade.go @@ -178,7 +178,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { client.Version = ">0.0.0-0" } - chartPath, err := client.ChartPathOptions.LocateChart(args[1], settings) + chartPath, err := client.LocateChart(args[1], settings) if err != nil { return err } @@ -205,7 +205,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { man := &downloader.Manager{ Out: out, ChartPath: chartPath, - Keyring: client.ChartPathOptions.Keyring, + Keyring: client.Keyring, SkipUpdate: false, Getters: p, RepositoryConfig: settings.RepositoryConfig, diff --git a/pkg/kube/ready.go b/pkg/kube/ready.go index feda44d63..11df4371c 100644 --- a/pkg/kube/ready.go +++ b/pkg/kube/ready.go @@ -288,8 +288,8 @@ func (c *ReadyChecker) deploymentReady(rs *appsv1.ReplicaSet, dep *appsv1.Deploy return false } // Verify the generation observed by the deployment controller matches the spec generation - if dep.Status.ObservedGeneration != dep.ObjectMeta.Generation { - slog.Debug("Deployment is not ready, observedGeneration does not match spec generation", "namespace", dep.GetNamespace(), "name", dep.GetName(), "actualGeneration", dep.Status.ObservedGeneration, "expectedGeneration", dep.ObjectMeta.Generation) + if dep.Status.ObservedGeneration != dep.Generation { + slog.Debug("Deployment is not ready, observedGeneration does not match spec generation", "namespace", dep.GetNamespace(), "name", dep.GetName(), "actualGeneration", dep.Status.ObservedGeneration, "expectedGeneration", dep.Generation) return false } @@ -304,8 +304,8 @@ func (c *ReadyChecker) deploymentReady(rs *appsv1.ReplicaSet, dep *appsv1.Deploy func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool { // Verify the generation observed by the daemonSet controller matches the spec generation - if ds.Status.ObservedGeneration != ds.ObjectMeta.Generation { - slog.Debug("DaemonSet is not ready, observedGeneration does not match spec generation", "namespace", ds.GetNamespace(), "name", ds.GetName(), "observedGeneration", ds.Status.ObservedGeneration, "expectedGeneration", ds.ObjectMeta.Generation) + if ds.Status.ObservedGeneration != ds.Generation { + slog.Debug("DaemonSet is not ready, observedGeneration does not match spec generation", "namespace", ds.GetNamespace(), "name", ds.GetName(), "observedGeneration", ds.Status.ObservedGeneration, "expectedGeneration", ds.Generation) return false } @@ -381,8 +381,8 @@ func (c *ReadyChecker) crdReady(crd apiextv1.CustomResourceDefinition) bool { func (c *ReadyChecker) statefulSetReady(sts *appsv1.StatefulSet) bool { // Verify the generation observed by the statefulSet controller matches the spec generation - if sts.Status.ObservedGeneration != sts.ObjectMeta.Generation { - slog.Debug("StatefulSet is not ready, observedGeneration doest not match spec generation", "namespace", sts.GetNamespace(), "name", sts.GetName(), "actualGeneration", sts.Status.ObservedGeneration, "expectedGeneration", sts.ObjectMeta.Generation) + if sts.Status.ObservedGeneration != sts.Generation { + slog.Debug("StatefulSet is not ready, observedGeneration doest not match spec generation", "namespace", sts.GetNamespace(), "name", sts.GetName(), "actualGeneration", sts.Status.ObservedGeneration, "expectedGeneration", sts.Generation) return false } @@ -435,8 +435,8 @@ func (c *ReadyChecker) statefulSetReady(sts *appsv1.StatefulSet) bool { func (c *ReadyChecker) replicationControllerReady(rc *corev1.ReplicationController) bool { // Verify the generation observed by the replicationController controller matches the spec generation - if rc.Status.ObservedGeneration != rc.ObjectMeta.Generation { - slog.Debug("ReplicationController is not ready, observedGeneration doest not match spec generation", "namespace", rc.GetNamespace(), "name", rc.GetName(), "actualGeneration", rc.Status.ObservedGeneration, "expectedGeneration", rc.ObjectMeta.Generation) + if rc.Status.ObservedGeneration != rc.Generation { + slog.Debug("ReplicationController is not ready, observedGeneration doest not match spec generation", "namespace", rc.GetNamespace(), "name", rc.GetName(), "actualGeneration", rc.Status.ObservedGeneration, "expectedGeneration", rc.Generation) return false } return true @@ -444,8 +444,8 @@ func (c *ReadyChecker) replicationControllerReady(rc *corev1.ReplicationControll func (c *ReadyChecker) replicaSetReady(rs *appsv1.ReplicaSet) bool { // Verify the generation observed by the replicaSet controller matches the spec generation - if rs.Status.ObservedGeneration != rs.ObjectMeta.Generation { - slog.Debug("ReplicaSet is not ready, observedGeneration doest not match spec generation", "namespace", rs.GetNamespace(), "name", rs.GetName(), "actualGeneration", rs.Status.ObservedGeneration, "expectedGeneration", rs.ObjectMeta.Generation) + if rs.Status.ObservedGeneration != rs.Generation { + slog.Debug("ReplicaSet is not ready, observedGeneration doest not match spec generation", "namespace", rs.GetNamespace(), "name", rs.GetName(), "actualGeneration", rs.Status.ObservedGeneration, "expectedGeneration", rs.Generation) return false } return true diff --git a/pkg/plugin/installer/http_installer.go b/pkg/plugin/installer/http_installer.go index cc45787bf..36bbba651 100644 --- a/pkg/plugin/installer/http_installer.go +++ b/pkg/plugin/installer/http_installer.go @@ -157,7 +157,7 @@ func (i *HTTPInstaller) Update() error { // Path is overridden because we want to join on the plugin name not the file name func (i HTTPInstaller) Path() string { - if i.base.Source == "" { + if i.Source == "" { return "" } return helmpath.DataPath("plugins", i.PluginName) diff --git a/pkg/repo/index_test.go b/pkg/repo/index_test.go index f50c7e65e..2a33cd1a9 100644 --- a/pkg/repo/index_test.go +++ b/pkg/repo/index_test.go @@ -123,17 +123,17 @@ func TestIndexFile(t *testing.T) { } cv, err := i.Get("setter", "0.1.9") - if err == nil && !strings.Contains(cv.Metadata.Version, "0.1.9") { - t.Errorf("Unexpected version: %s", cv.Metadata.Version) + if err == nil && !strings.Contains(cv.Version, "0.1.9") { + t.Errorf("Unexpected version: %s", cv.Version) } cv, err = i.Get("setter", "0.1.9+alpha") - if err != nil || cv.Metadata.Version != "0.1.9+alpha" { + if err != nil || cv.Version != "0.1.9+alpha" { t.Errorf("Expected version: 0.1.9+alpha") } cv, err = i.Get("setter", "0.1.8") - if err != nil || cv.Metadata.Version != "0.1.8" { + if err != nil || cv.Version != "0.1.8" { t.Errorf("Expected version: 0.1.8") } } diff --git a/pkg/storage/driver/cfgmaps.go b/pkg/storage/driver/cfgmaps.go index 3e4acfd81..ca9f0fa28 100644 --- a/pkg/storage/driver/cfgmaps.go +++ b/pkg/storage/driver/cfgmaps.go @@ -78,7 +78,7 @@ func (cfgmaps *ConfigMaps) Get(key string) (*rspb.Release, error) { slog.Debug("failed to decode data", "key", key, slog.Any("error", err)) return nil, err } - r.Labels = filterSystemLabels(obj.ObjectMeta.Labels) + r.Labels = filterSystemLabels(obj.Labels) // return the release object return r, nil } @@ -107,7 +107,7 @@ func (cfgmaps *ConfigMaps) List(filter func(*rspb.Release) bool) ([]*rspb.Releas continue } - rls.Labels = item.ObjectMeta.Labels + rls.Labels = item.Labels if filter(rls) { results = append(results, rls) @@ -146,7 +146,7 @@ func (cfgmaps *ConfigMaps) Query(labels map[string]string) ([]*rspb.Release, err slog.Debug("failed to decode release", slog.Any("error", err)) continue } - rls.Labels = item.ObjectMeta.Labels + rls.Labels = item.Labels results = append(results, rls) } return results, nil diff --git a/pkg/storage/driver/mock_test.go b/pkg/storage/driver/mock_test.go index 54fda0542..1dda258bb 100644 --- a/pkg/storage/driver/mock_test.go +++ b/pkg/storage/driver/mock_test.go @@ -130,7 +130,7 @@ func (mock *MockConfigMapsInterface) List(_ context.Context, opts metav1.ListOpt } for _, cfgmap := range mock.objects { - if labelSelector.Matches(kblabels.Set(cfgmap.ObjectMeta.Labels)) { + if labelSelector.Matches(kblabels.Set(cfgmap.Labels)) { list.Items = append(list.Items, *cfgmap) } } @@ -139,7 +139,7 @@ func (mock *MockConfigMapsInterface) List(_ context.Context, opts metav1.ListOpt // Create creates a new ConfigMap. func (mock *MockConfigMapsInterface) Create(_ context.Context, cfgmap *v1.ConfigMap, _ metav1.CreateOptions) (*v1.ConfigMap, error) { - name := cfgmap.ObjectMeta.Name + name := cfgmap.Name if object, ok := mock.objects[name]; ok { return object, apierrors.NewAlreadyExists(v1.Resource("tests"), name) } @@ -149,7 +149,7 @@ func (mock *MockConfigMapsInterface) Create(_ context.Context, cfgmap *v1.Config // Update updates a ConfigMap. func (mock *MockConfigMapsInterface) Update(_ context.Context, cfgmap *v1.ConfigMap, _ metav1.UpdateOptions) (*v1.ConfigMap, error) { - name := cfgmap.ObjectMeta.Name + name := cfgmap.Name if _, ok := mock.objects[name]; !ok { return nil, apierrors.NewNotFound(v1.Resource("tests"), name) } @@ -216,7 +216,7 @@ func (mock *MockSecretsInterface) List(_ context.Context, opts metav1.ListOption } for _, secret := range mock.objects { - if labelSelector.Matches(kblabels.Set(secret.ObjectMeta.Labels)) { + if labelSelector.Matches(kblabels.Set(secret.Labels)) { list.Items = append(list.Items, *secret) } } @@ -225,7 +225,7 @@ func (mock *MockSecretsInterface) List(_ context.Context, opts metav1.ListOption // Create creates a new Secret. func (mock *MockSecretsInterface) Create(_ context.Context, secret *v1.Secret, _ metav1.CreateOptions) (*v1.Secret, error) { - name := secret.ObjectMeta.Name + name := secret.Name if object, ok := mock.objects[name]; ok { return object, apierrors.NewAlreadyExists(v1.Resource("tests"), name) } @@ -235,7 +235,7 @@ func (mock *MockSecretsInterface) Create(_ context.Context, secret *v1.Secret, _ // Update updates a Secret. func (mock *MockSecretsInterface) Update(_ context.Context, secret *v1.Secret, _ metav1.UpdateOptions) (*v1.Secret, error) { - name := secret.ObjectMeta.Name + name := secret.Name if _, ok := mock.objects[name]; !ok { return nil, apierrors.NewNotFound(v1.Resource("tests"), name) } diff --git a/pkg/storage/driver/secrets.go b/pkg/storage/driver/secrets.go index a69f1ed65..4af38a66e 100644 --- a/pkg/storage/driver/secrets.go +++ b/pkg/storage/driver/secrets.go @@ -72,7 +72,7 @@ func (secrets *Secrets) Get(key string) (*rspb.Release, error) { } // found the secret, decode the base64 data string r, err := decodeRelease(string(obj.Data["release"])) - r.Labels = filterSystemLabels(obj.ObjectMeta.Labels) + r.Labels = filterSystemLabels(obj.Labels) return r, errors.Wrapf(err, "get: failed to decode data %q", key) } @@ -99,7 +99,7 @@ func (secrets *Secrets) List(filter func(*rspb.Release) bool) ([]*rspb.Release, continue } - rls.Labels = item.ObjectMeta.Labels + rls.Labels = item.Labels if filter(rls) { results = append(results, rls) @@ -137,7 +137,7 @@ func (secrets *Secrets) Query(labels map[string]string) ([]*rspb.Release, error) slog.Debug("failed to decode release", "key", item.Name, slog.Any("error", err)) continue } - rls.Labels = item.ObjectMeta.Labels + rls.Labels = item.Labels results = append(results, rls) } return results, nil diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go index f98daeba6..927b33c44 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/storage.go @@ -88,14 +88,14 @@ func (s *Storage) Delete(name string, version int) (*rspb.Release, error) { // storage backend fails to retrieve the releases. func (s *Storage) ListReleases() ([]*rspb.Release, error) { slog.Debug("listing all releases in storage") - return s.Driver.List(func(_ *rspb.Release) bool { return true }) + return s.List(func(_ *rspb.Release) bool { return true }) } // ListUninstalled returns all releases with Status == UNINSTALLED. An error is returned // if the storage backend fails to retrieve the releases. func (s *Storage) ListUninstalled() ([]*rspb.Release, error) { slog.Debug("listing uninstalled releases in storage") - return s.Driver.List(func(rls *rspb.Release) bool { + return s.List(func(rls *rspb.Release) bool { return relutil.StatusFilter(rspb.StatusUninstalled).Check(rls) }) } @@ -104,7 +104,7 @@ func (s *Storage) ListUninstalled() ([]*rspb.Release, error) { // if the storage backend fails to retrieve the releases. func (s *Storage) ListDeployed() ([]*rspb.Release, error) { slog.Debug("listing all deployed releases in storage") - return s.Driver.List(func(rls *rspb.Release) bool { + return s.List(func(rls *rspb.Release) bool { return relutil.StatusFilter(rspb.StatusDeployed).Check(rls) }) } @@ -133,7 +133,7 @@ func (s *Storage) Deployed(name string) (*rspb.Release, error) { func (s *Storage) DeployedAll(name string) ([]*rspb.Release, error) { slog.Debug("getting deployed releases", "name", name) - ls, err := s.Driver.Query(map[string]string{ + ls, err := s.Query(map[string]string{ "name": name, "owner": "helm", "status": "deployed", @@ -152,7 +152,7 @@ func (s *Storage) DeployedAll(name string) ([]*rspb.Release, error) { func (s *Storage) History(name string) ([]*rspb.Release, error) { slog.Debug("getting release history", "name", name) - return s.Driver.Query(map[string]string{"name": name, "owner": "helm"}) + return s.Query(map[string]string{"name": name, "owner": "helm"}) } // removeLeastRecent removes items from history until the length number of releases diff --git a/pkg/time/time.go b/pkg/time/time.go index 13b1211e6..5b3a0ccdc 100644 --- a/pkg/time/time.go +++ b/pkg/time/time.go @@ -41,7 +41,7 @@ func Now() Time { } func (t Time) MarshalJSON() ([]byte, error) { - if t.Time.IsZero() { + if t.IsZero() { return []byte(emptyString), nil } From a6d0335bbb464f324816e2a001b9216d46481e5f Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Mon, 14 Apr 2025 10:49:49 +0200 Subject: [PATCH 181/192] Use fmt.Fprintf(...) instead of ... Signed-off-by: Benoit Tigeot --- pkg/action/install.go | 2 +- pkg/cmd/template.go | 2 +- pkg/registry/utils_test.go | 6 ++---- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/pkg/action/install.go b/pkg/action/install.go index d05aae505..5f159c382 100644 --- a/pkg/action/install.go +++ b/pkg/action/install.go @@ -628,7 +628,7 @@ func writeToFile(outputDir string, name string, data string, appendData bool) er defer f.Close() - _, err = f.WriteString(fmt.Sprintf("---\n# Source: %s\n%s\n", name, data)) + _, err = fmt.Fprintf(f, "---\n# Source: %s\n%s\n", name, data) if err != nil { return err diff --git a/pkg/cmd/template.go b/pkg/cmd/template.go index 25ff31ade..f96b25e30 100644 --- a/pkg/cmd/template.go +++ b/pkg/cmd/template.go @@ -230,7 +230,7 @@ func writeToFile(outputDir string, name string, data string, appendData bool) er defer f.Close() - _, err = f.WriteString(fmt.Sprintf("---\n# Source: %s\n%s\n", name, data)) + _, err = fmt.Fprintf(f, "---\n# Source: %s\n%s\n", name, data) if err != nil { return err diff --git a/pkg/registry/utils_test.go b/pkg/registry/utils_test.go index 8e6943222..fe07c769a 100644 --- a/pkg/registry/utils_test.go +++ b/pkg/registry/utils_test.go @@ -184,9 +184,7 @@ func initCompromisedRegistryTestServer() string { w.Header().Set("Content-Type", "application/vnd.oci.image.manifest.v1+json") w.WriteHeader(200) - // layers[0] is the blob []byte("a") - w.Write([]byte( - fmt.Sprintf(`{ "schemaVersion": 2, "config": { + fmt.Fprintf(w, `{ "schemaVersion": 2, "config": { "mediaType": "%s", "digest": "sha256:a705ee2789ab50a5ba20930f246dbd5cc01ff9712825bb98f57ee8414377f133", "size": 181 @@ -198,7 +196,7 @@ func initCompromisedRegistryTestServer() string { "size": 1 } ] -}`, ConfigMediaType, ChartLayerMediaType))) +}`, ConfigMediaType, ChartLayerMediaType) } else if r.URL.Path == "/v2/testrepo/supposedlysafechart/blobs/sha256:a705ee2789ab50a5ba20930f246dbd5cc01ff9712825bb98f57ee8414377f133" { w.Header().Set("Content-Type", "application/json") w.WriteHeader(200) From 1654664b782e45e162f51a2abbdd7ca82ea82793 Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Mon, 14 Apr 2025 10:51:21 +0200 Subject: [PATCH 182/192] could use strings.ReplaceAll instead Signed-off-by: Benoit Tigeot --- internal/test/test.go | 2 +- pkg/cmd/docs.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/test/test.go b/internal/test/test.go index e6821282c..e071d3160 100644 --- a/internal/test/test.go +++ b/internal/test/test.go @@ -92,5 +92,5 @@ func update(filename string, in []byte) error { } func normalize(in []byte) []byte { - return bytes.Replace(in, []byte("\r\n"), []byte("\n"), -1) + return bytes.ReplaceAll(in, []byte("\r\n"), []byte("\n")) } diff --git a/pkg/cmd/docs.go b/pkg/cmd/docs.go index b3fd773f9..a22d96c4d 100644 --- a/pkg/cmd/docs.go +++ b/pkg/cmd/docs.go @@ -86,7 +86,7 @@ func (o *docsOptions) run(_ io.Writer) error { hdrFunc := func(filename string) string { base := filepath.Base(filename) name := strings.TrimSuffix(base, path.Ext(base)) - title := cases.Title(language.Und, cases.NoLower).String(strings.Replace(name, "_", " ", -1)) + title := cases.Title(language.Und, cases.NoLower).String(strings.ReplaceAll(name, "_", " ")) return fmt.Sprintf("---\ntitle: \"%s\"\n---\n\n", title) } From 374805deb4f1cd43a3310c6d42117929a724eb3d Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Mon, 14 Apr 2025 10:51:55 +0200 Subject: [PATCH 183/192] error strings should not be capitalized Signed-off-by: Benoit Tigeot --- pkg/cmd/repo_update.go | 2 +- pkg/cmd/repo_update_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/cmd/repo_update.go b/pkg/cmd/repo_update.go index 12de2bdaa..a905406cd 100644 --- a/pkg/cmd/repo_update.go +++ b/pkg/cmd/repo_update.go @@ -137,7 +137,7 @@ func updateCharts(repos []*repo.ChartRepository, out io.Writer) error { } if len(repoFailList) > 0 { - return fmt.Errorf("Failed to update the following repositories: %s", + return fmt.Errorf("failed to update the following repositories: %s", repoFailList) } diff --git a/pkg/cmd/repo_update_test.go b/pkg/cmd/repo_update_test.go index aa8f52beb..b0deff1ae 100644 --- a/pkg/cmd/repo_update_test.go +++ b/pkg/cmd/repo_update_test.go @@ -193,7 +193,7 @@ func TestUpdateChartsFailWithError(t *testing.T) { t.Error("Repo update should return error because update of repository fails and 'fail-on-repo-update-fail' flag set") return } - var expectedErr = "Failed to update the following repositories" + var expectedErr = "failed to update the following repositories" var receivedErr = err.Error() if !strings.Contains(receivedErr, expectedErr) { t.Errorf("Expected error (%s) but got (%s) instead", expectedErr, receivedErr) From eb65ce280bdd5c809b2ed2a6d9c44a721a4444f6 Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Mon, 14 Apr 2025 10:53:46 +0200 Subject: [PATCH 184/192] could apply De Morgan's law Signed-off-by: Benoit Tigeot --- pkg/cmd/repo_list.go | 2 +- pkg/kube/ready.go | 4 ++-- pkg/lint/rules/template.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/cmd/repo_list.go b/pkg/cmd/repo_list.go index 5b6113a13..71324dc85 100644 --- a/pkg/cmd/repo_list.go +++ b/pkg/cmd/repo_list.go @@ -39,7 +39,7 @@ func newRepoListCmd(out io.Writer) *cobra.Command { ValidArgsFunction: noMoreArgsCompFunc, RunE: func(_ *cobra.Command, _ []string) error { f, _ := repo.LoadFile(settings.RepositoryConfig) - if len(f.Repositories) == 0 && !(outfmt == output.JSON || outfmt == output.YAML) { + if len(f.Repositories) == 0 && outfmt != output.JSON && outfmt != output.YAML { return errors.New("no repositories to show") } diff --git a/pkg/kube/ready.go b/pkg/kube/ready.go index 11df4371c..7a06c72f9 100644 --- a/pkg/kube/ready.go +++ b/pkg/kube/ready.go @@ -294,7 +294,7 @@ func (c *ReadyChecker) deploymentReady(rs *appsv1.ReplicaSet, dep *appsv1.Deploy } expectedReady := *dep.Spec.Replicas - deploymentutil.MaxUnavailable(*dep) - if !(rs.Status.ReadyReplicas >= expectedReady) { + if rs.Status.ReadyReplicas < expectedReady { slog.Debug("Deployment does not have enough pods ready", "namespace", dep.GetNamespace(), "name", dep.GetName(), "readyPods", rs.Status.ReadyReplicas, "totalPods", expectedReady) return false } @@ -328,7 +328,7 @@ func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool { } expectedReady := int(ds.Status.DesiredNumberScheduled) - maxUnavailable - if !(int(ds.Status.NumberReady) >= expectedReady) { + if int(ds.Status.NumberReady) < expectedReady { slog.Debug("DaemonSet does not have enough Pods ready", "namespace", ds.GetNamespace(), "name", ds.GetName(), "readyPods", ds.Status.NumberReady, "totalPods", expectedReady) return false } diff --git a/pkg/lint/rules/template.go b/pkg/lint/rules/template.go index 4d421f5bf..81a18b411 100644 --- a/pkg/lint/rules/template.go +++ b/pkg/lint/rules/template.go @@ -287,7 +287,7 @@ func validateMatchSelector(yamlStruct *K8sYamlStruct, manifest string) error { switch yamlStruct.Kind { case "Deployment", "ReplicaSet", "DaemonSet", "StatefulSet": // verify that matchLabels or matchExpressions is present - if !(strings.Contains(manifest, "matchLabels") || strings.Contains(manifest, "matchExpressions")) { + if !strings.Contains(manifest, "matchLabels") && !strings.Contains(manifest, "matchExpressions") { return fmt.Errorf("a %s must contain matchLabels or matchExpressions, and %q does not", yamlStruct.Kind, yamlStruct.Metadata.Name) } } From a1c2f47c08b643812fa80464fd49de6d4d927d66 Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Mon, 14 Apr 2025 10:55:42 +0200 Subject: [PATCH 185/192] avoid using reflect.DeepEqual with errors Signed-off-by: Benoit Tigeot --- pkg/storage/driver/cfgmaps_test.go | 5 ++--- pkg/storage/driver/secrets_test.go | 5 ++--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/pkg/storage/driver/cfgmaps_test.go b/pkg/storage/driver/cfgmaps_test.go index 8ba6832fa..a563eb7d9 100644 --- a/pkg/storage/driver/cfgmaps_test.go +++ b/pkg/storage/driver/cfgmaps_test.go @@ -16,6 +16,7 @@ package driver import ( "encoding/base64" "encoding/json" + "errors" "reflect" "testing" @@ -242,10 +243,8 @@ func TestConfigMapDelete(t *testing.T) { if !reflect.DeepEqual(rel, rls) { t.Errorf("Expected {%v}, got {%v}", rel, rls) } - - // fetch the deleted release _, err = cfgmaps.Get(key) - if !reflect.DeepEqual(ErrReleaseNotFound, err) { + if !errors.Is(err, ErrReleaseNotFound) { t.Errorf("Expected {%v}, got {%v}", ErrReleaseNotFound, err) } } diff --git a/pkg/storage/driver/secrets_test.go b/pkg/storage/driver/secrets_test.go index 7affc81ab..9e45bae67 100644 --- a/pkg/storage/driver/secrets_test.go +++ b/pkg/storage/driver/secrets_test.go @@ -16,6 +16,7 @@ package driver import ( "encoding/base64" "encoding/json" + "errors" "reflect" "testing" @@ -242,10 +243,8 @@ func TestSecretDelete(t *testing.T) { if !reflect.DeepEqual(rel, rls) { t.Errorf("Expected {%v}, got {%v}", rel, rls) } - - // fetch the deleted release _, err = secrets.Get(key) - if !reflect.DeepEqual(ErrReleaseNotFound, err) { + if !errors.Is(err, ErrReleaseNotFound) { t.Errorf("Expected {%v}, got {%v}", ErrReleaseNotFound, err) } } From 0dffc580b0aec957b686fc08be681d3f74707749 Mon Sep 17 00:00:00 2001 From: Matt Farina Date: Mon, 14 Apr 2025 19:28:47 -0400 Subject: [PATCH 186/192] Simpligy the JSON Schema checking A new library was introduced that provides JSON Schema checking for newer versions of the schema. In Helm v4, there is no need to have two packages doing the JSON schema validation. The message output can have breaking changes. This change moves everything to the newer library. It also uses a wrapper error to enable a clean Helm only interface for the public Go API validation functions. This would enable the replacement of the Schema validation library, if needed, without breaking the Go API contract. Signed-off-by: Matt Farina --- Makefile | 2 +- go.mod | 3 - go.sum | 7 -- pkg/chart/v2/util/jsonschema.go | 74 +++++++------------ pkg/chart/v2/util/jsonschema_test.go | 12 +-- .../testdata/output/schema-negative-cli.txt | 2 +- pkg/cmd/testdata/output/schema-negative.txt | 4 +- .../output/subchart-schema-cli-negative.txt | 2 +- .../output/subchart-schema-negative.txt | 4 +- pkg/lint/rules/values_test.go | 4 +- 10 files changed, 41 insertions(+), 73 deletions(-) diff --git a/Makefile b/Makefile index 21144cf5a..0785fdb2e 100644 --- a/Makefile +++ b/Makefile @@ -156,7 +156,7 @@ format: $(GOIMPORTS) # Generate golden files used in unit tests .PHONY: gen-test-golden gen-test-golden: -gen-test-golden: PKG = ./cmd/helm ./pkg/action +gen-test-golden: PKG = ./pkg/cmd ./pkg/action gen-test-golden: TESTFLAGS = -update gen-test-golden: test-unit diff --git a/go.mod b/go.mod index ad119b6b8..36455fdba 100644 --- a/go.mod +++ b/go.mod @@ -33,7 +33,6 @@ require ( github.com/spf13/cobra v1.9.1 github.com/spf13/pflag v1.0.6 github.com/stretchr/testify v1.10.0 - github.com/xeipuuv/gojsonschema v1.2.0 golang.org/x/crypto v0.37.0 golang.org/x/term v0.31.0 golang.org/x/text v0.24.0 @@ -136,8 +135,6 @@ require ( github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/cast v1.7.0 // indirect github.com/x448/float16 v0.8.4 // indirect - github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect - github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xlab/treeprint v1.2.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 // indirect diff --git a/go.sum b/go.sum index 4fcd483a4..2cf584745 100644 --- a/go.sum +++ b/go.sum @@ -326,13 +326,6 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= -github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= diff --git a/pkg/chart/v2/util/jsonschema.go b/pkg/chart/v2/util/jsonschema.go index 66ab42542..a8baef0f6 100644 --- a/pkg/chart/v2/util/jsonschema.go +++ b/pkg/chart/v2/util/jsonschema.go @@ -18,14 +18,11 @@ package util import ( "bytes" - "encoding/json" "fmt" "strings" "github.com/pkg/errors" "github.com/santhosh-tekuri/jsonschema/v6" - "github.com/xeipuuv/gojsonschema" - "sigs.k8s.io/yaml" chart "helm.sh/helm/v4/pkg/chart/v2" ) @@ -64,69 +61,50 @@ func ValidateAgainstSingleSchema(values Values, schemaJSON []byte) (reterr error } }() - valuesData, err := yaml.Marshal(values) + // This unmarshal function leverages UseNumber() for number precision. The parser + // used for values does this as well. + schema, err := jsonschema.UnmarshalJSON(bytes.NewReader(schemaJSON)) if err != nil { return err } - valuesJSON, err := yaml.YAMLToJSON(valuesData) + + compiler := jsonschema.NewCompiler() + err = compiler.AddResource("file:///values.schema.json", schema) if err != nil { return err } - if bytes.Equal(valuesJSON, []byte("null")) { - valuesJSON = []byte("{}") - } - - if schemaIs2020(schemaJSON) { - return validateUsingNewValidator(valuesJSON, schemaJSON) - } - - schemaLoader := gojsonschema.NewBytesLoader(schemaJSON) - valuesLoader := gojsonschema.NewBytesLoader(valuesJSON) - result, err := gojsonschema.Validate(schemaLoader, valuesLoader) + validator, err := compiler.Compile("file:///values.schema.json") if err != nil { return err } - if !result.Valid() { - var sb strings.Builder - for _, desc := range result.Errors() { - sb.WriteString(fmt.Sprintf("- %s\n", desc)) - } - return errors.New(sb.String()) + err = validator.Validate(values.AsMap()) + if err != nil { + return JSONSchemaValidationError{err} } return nil } -func validateUsingNewValidator(valuesJSON, schemaJSON []byte) error { - schema, err := jsonschema.UnmarshalJSON(bytes.NewReader(schemaJSON)) - if err != nil { - return err - } - values, err := jsonschema.UnmarshalJSON(bytes.NewReader(valuesJSON)) - if err != nil { - return err - } +// Note, JSONSchemaValidationError is used to wrap the error from the underlying +// validation package so that Helm has a clean interface and the validation package +// could be replaced without changing the Helm SDK API. - compiler := jsonschema.NewCompiler() - err = compiler.AddResource("file:///values.schema.json", schema) - if err != nil { - return err - } +// JSONSchemaValidationError is the error returned when there is a schema validation +// error. +type JSONSchemaValidationError struct { + embeddedErr error +} - validator, err := compiler.Compile("file:///values.schema.json") - if err != nil { - return err - } +// Error prints the error message +func (e JSONSchemaValidationError) Error() string { + errStr := e.embeddedErr.Error() - return validator.Validate(values) -} + // This string prefixes all of our error details. Further up the stack of helm error message + // building more detail is provided to users. This is removed. + errStr = strings.TrimPrefix(errStr, "jsonschema validation failed with 'file:///values.schema.json#'\n") -func schemaIs2020(schemaJSON []byte) bool { - var partialSchema struct { - Schema string `json:"$schema"` - } - _ = json.Unmarshal(schemaJSON, &partialSchema) - return partialSchema.Schema == "https://json-schema.org/draft/2020-12/schema" + // The extra new line is needed for when there are sub-charts. + return errStr + "\n" } diff --git a/pkg/chart/v2/util/jsonschema_test.go b/pkg/chart/v2/util/jsonschema_test.go index 6337ab259..d781aa4be 100644 --- a/pkg/chart/v2/util/jsonschema_test.go +++ b/pkg/chart/v2/util/jsonschema_test.go @@ -69,7 +69,7 @@ func TestValidateAgainstSingleSchemaNegative(t *testing.T) { } schema, err := os.ReadFile("./testdata/test-values.schema.json") if err != nil { - t.Fatalf("Error reading YAML file: %s", err) + t.Fatalf("Error reading JSON file: %s", err) } var errString string @@ -79,8 +79,8 @@ func TestValidateAgainstSingleSchemaNegative(t *testing.T) { errString = err.Error() } - expectedErrString := `- (root): employmentInfo is required -- age: Must be greater than or equal to 0 + expectedErrString := `- at '': missing property 'employmentInfo' +- at '/age': minimum: got -5, want 0 ` if errString != expectedErrString { t.Errorf("Error string :\n`%s`\ndoes not match expected\n`%s`", errString, expectedErrString) @@ -174,7 +174,7 @@ func TestValidateAgainstSchemaNegative(t *testing.T) { } expectedErrString := `subchart: -- (root): age is required +- at '': missing property 'age' ` if errString != expectedErrString { t.Errorf("Error string :\n`%s`\ndoes not match expected\n`%s`", errString, expectedErrString) @@ -238,9 +238,9 @@ func TestValidateAgainstSchema2020Negative(t *testing.T) { } expectedErrString := `subchart: -jsonschema validation failed with 'file:///values.schema.json#' - at '/data': no items match contains schema - - at '/data/0': got number, want string` + - at '/data/0': got number, want string +` if errString != expectedErrString { t.Errorf("Error string :\n`%s`\ndoes not match expected\n`%s`", errString, expectedErrString) } diff --git a/pkg/cmd/testdata/output/schema-negative-cli.txt b/pkg/cmd/testdata/output/schema-negative-cli.txt index c4a5cc516..12bcc5103 100644 --- a/pkg/cmd/testdata/output/schema-negative-cli.txt +++ b/pkg/cmd/testdata/output/schema-negative-cli.txt @@ -1,4 +1,4 @@ Error: INSTALLATION FAILED: values don't meet the specifications of the schema(s) in the following chart(s): empty: -- age: Must be greater than or equal to 0 +- at '/age': minimum: got -5, want 0 diff --git a/pkg/cmd/testdata/output/schema-negative.txt b/pkg/cmd/testdata/output/schema-negative.txt index 929af5518..daf132635 100644 --- a/pkg/cmd/testdata/output/schema-negative.txt +++ b/pkg/cmd/testdata/output/schema-negative.txt @@ -1,5 +1,5 @@ Error: INSTALLATION FAILED: values don't meet the specifications of the schema(s) in the following chart(s): empty: -- (root): employmentInfo is required -- age: Must be greater than or equal to 0 +- at '': missing property 'employmentInfo' +- at '/age': minimum: got -5, want 0 diff --git a/pkg/cmd/testdata/output/subchart-schema-cli-negative.txt b/pkg/cmd/testdata/output/subchart-schema-cli-negative.txt index 7396b4bfe..179550f69 100644 --- a/pkg/cmd/testdata/output/subchart-schema-cli-negative.txt +++ b/pkg/cmd/testdata/output/subchart-schema-cli-negative.txt @@ -1,4 +1,4 @@ Error: INSTALLATION FAILED: values don't meet the specifications of the schema(s) in the following chart(s): subchart-with-schema: -- age: Must be greater than or equal to 0 +- at '/age': minimum: got -25, want 0 diff --git a/pkg/cmd/testdata/output/subchart-schema-negative.txt b/pkg/cmd/testdata/output/subchart-schema-negative.txt index 7b1f654a2..7522ef3e4 100644 --- a/pkg/cmd/testdata/output/subchart-schema-negative.txt +++ b/pkg/cmd/testdata/output/subchart-schema-negative.txt @@ -1,6 +1,6 @@ Error: INSTALLATION FAILED: values don't meet the specifications of the schema(s) in the following chart(s): chart-without-schema: -- (root): lastname is required +- at '': missing property 'lastname' subchart-with-schema: -- (root): age is required +- at '': missing property 'age' diff --git a/pkg/lint/rules/values_test.go b/pkg/lint/rules/values_test.go index 8a2556a60..348695785 100644 --- a/pkg/lint/rules/values_test.go +++ b/pkg/lint/rules/values_test.go @@ -96,7 +96,7 @@ func TestValidateValuesFileSchemaFailure(t *testing.T) { t.Fatal("expected values file to fail parsing") } - assert.Contains(t, err.Error(), "Expected: string, given: integer", "integer should be caught by schema") + assert.Contains(t, err.Error(), "- at '/username': got number, want string") } func TestValidateValuesFileSchemaOverrides(t *testing.T) { @@ -129,7 +129,7 @@ func TestValidateValuesFile(t *testing.T) { name: "value not overridden", yaml: "username: admin\npassword:", overrides: map[string]interface{}{"username": "anotherUser"}, - errorMessage: "Expected: string, given: null", + errorMessage: "- at '/password': got null, want string", }, { name: "value overridden", From 28742b170625447819d5860cc0bb7f0030760c50 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 15 Apr 2025 21:59:47 +0000 Subject: [PATCH 187/192] build(deps): bump github.com/rubenv/sql-migrate from 1.7.1 to 1.7.2 Bumps [github.com/rubenv/sql-migrate](https://github.com/rubenv/sql-migrate) from 1.7.1 to 1.7.2. - [Commits](https://github.com/rubenv/sql-migrate/compare/v1.7.1...v1.7.2) --- updated-dependencies: - dependency-name: github.com/rubenv/sql-migrate dependency-version: 1.7.2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ad119b6b8..ee64f502d 100644 --- a/go.mod +++ b/go.mod @@ -28,7 +28,7 @@ require ( github.com/opencontainers/image-spec v1.1.1 github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 github.com/pkg/errors v0.9.1 - github.com/rubenv/sql-migrate v1.7.1 + github.com/rubenv/sql-migrate v1.7.2 github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 github.com/spf13/cobra v1.9.1 github.com/spf13/pflag v1.0.6 diff --git a/go.sum b/go.sum index 4fcd483a4..8dbba067a 100644 --- a/go.sum +++ b/go.sum @@ -295,8 +295,8 @@ github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0 github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/rubenv/sql-migrate v1.7.1 h1:f/o0WgfO/GqNuVg+6801K/KW3WdDSupzSjDYODmiUq4= -github.com/rubenv/sql-migrate v1.7.1/go.mod h1:Ob2Psprc0/3ggbM6wCzyYVFFuc6FyZrb2AS+ezLDFb4= +github.com/rubenv/sql-migrate v1.7.2 h1:HOjuq5BmSVQHX14s/U3iS4I3YhP+h89Lg6QawwUFvyc= +github.com/rubenv/sql-migrate v1.7.2/go.mod h1:F2bGFBwCU+pnmbtNYDeKvSuvL6lBVtXDXUUv5t+u1qw= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 h1:PKK9DyHxif4LZo+uQSgXNqs0jj5+xZwwfKHgph2lxBw= From 3102c28804c7973178876c5ae65656c83975dacc Mon Sep 17 00:00:00 2001 From: Krisztian Litkey Date: Wed, 2 Apr 2025 11:00:56 +0300 Subject: [PATCH 188/192] fix: allow signing multiple charts with passphrase from stdin. Cache the signing key passphrase. When signing multiple charts with the passphrase from stdin, this allows signing all charts instead of all but the first failing with an error about stdin already having been closed. Signed-off-by: Krisztian Litkey --- pkg/action/package.go | 48 +++++++++++++++++++++++++++----------- pkg/action/package_test.go | 39 ++++++++++++++++++++++++++++--- 2 files changed, 71 insertions(+), 16 deletions(-) diff --git a/pkg/action/package.go b/pkg/action/package.go index 9ffe1722e..8f37779e6 100644 --- a/pkg/action/package.go +++ b/pkg/action/package.go @@ -39,6 +39,7 @@ type Package struct { Key string Keyring string PassphraseFile string + cachedPassphrase []byte Version string AppVersion string Destination string @@ -55,6 +56,10 @@ type Package struct { InsecureSkipTLSverify bool } +const ( + passPhraseFileStdin = "-" +) + // NewPackage creates a new Package object with the given configuration. func NewPackage() *Package { return &Package{} @@ -128,7 +133,7 @@ func (p *Package) Clearsign(filename string) error { passphraseFetcher := promptUser if p.PassphraseFile != "" { - passphraseFetcher, err = passphraseFileFetcher(p.PassphraseFile, os.Stdin) + passphraseFetcher, err = p.passphraseFileFetcher(p.PassphraseFile, os.Stdin) if err != nil { return err } @@ -156,25 +161,42 @@ func promptUser(name string) ([]byte, error) { return pw, err } -func passphraseFileFetcher(passphraseFile string, stdin *os.File) (provenance.PassphraseFetcher, error) { - file, err := openPassphraseFile(passphraseFile, stdin) - if err != nil { - return nil, err - } - defer file.Close() +func (p *Package) passphraseFileFetcher(passphraseFile string, stdin *os.File) (provenance.PassphraseFetcher, error) { + // When reading from stdin we cache the passphrase here. If we are + // packaging multiple charts, we reuse the cached passphrase. This + // allows giving the passphrase once on stdin without failing with + // complaints about stdin already being closed. + // + // An alternative to this would be to omit file.Close() for stdin + // below and require the user to provide the same passphrase once + // per chart on stdin, but that does not seem very user-friendly. + + if p.cachedPassphrase == nil { + file, err := openPassphraseFile(passphraseFile, stdin) + if err != nil { + return nil, err + } + defer file.Close() - reader := bufio.NewReader(file) - passphrase, _, err := reader.ReadLine() - if err != nil { - return nil, err + reader := bufio.NewReader(file) + passphrase, _, err := reader.ReadLine() + if err != nil { + return nil, err + } + p.cachedPassphrase = passphrase + + return func(_ string) ([]byte, error) { + return passphrase, nil + }, nil } + return func(_ string) ([]byte, error) { - return passphrase, nil + return p.cachedPassphrase, nil }, nil } func openPassphraseFile(passphraseFile string, stdin *os.File) (*os.File, error) { - if passphraseFile == "-" { + if passphraseFile == passPhraseFileStdin { stat, err := stdin.Stat() if err != nil { return nil, err diff --git a/pkg/action/package_test.go b/pkg/action/package_test.go index 26eeb1a2b..12bea10dd 100644 --- a/pkg/action/package_test.go +++ b/pkg/action/package_test.go @@ -29,8 +29,9 @@ import ( func TestPassphraseFileFetcher(t *testing.T) { secret := "secret" directory := ensure.TempFile(t, "passphrase-file", []byte(secret)) + testPkg := NewPackage() - fetcher, err := passphraseFileFetcher(path.Join(directory, "passphrase-file"), nil) + fetcher, err := testPkg.passphraseFileFetcher(path.Join(directory, "passphrase-file"), nil) if err != nil { t.Fatal("Unable to create passphraseFileFetcher", err) } @@ -48,8 +49,9 @@ func TestPassphraseFileFetcher(t *testing.T) { func TestPassphraseFileFetcher_WithLineBreak(t *testing.T) { secret := "secret" directory := ensure.TempFile(t, "passphrase-file", []byte(secret+"\n\n.")) + testPkg := NewPackage() - fetcher, err := passphraseFileFetcher(path.Join(directory, "passphrase-file"), nil) + fetcher, err := testPkg.passphraseFileFetcher(path.Join(directory, "passphrase-file"), nil) if err != nil { t.Fatal("Unable to create passphraseFileFetcher", err) } @@ -66,17 +68,48 @@ func TestPassphraseFileFetcher_WithLineBreak(t *testing.T) { func TestPassphraseFileFetcher_WithInvalidStdin(t *testing.T) { directory := t.TempDir() + testPkg := NewPackage() stdin, err := os.CreateTemp(directory, "non-existing") if err != nil { t.Fatal("Unable to create test file", err) } - if _, err := passphraseFileFetcher("-", stdin); err == nil { + if _, err := testPkg.passphraseFileFetcher("-", stdin); err == nil { t.Error("Expected passphraseFileFetcher returning an error") } } +func TestPassphraseFileFetcher_WithStdinAndMultipleFetches(t *testing.T) { + testPkg := NewPackage() + stdin, w, err := os.Pipe() + if err != nil { + t.Fatal("Unable to create pipe", err) + } + + passphrase := "secret-from-stdin" + + go func() { + w.Write([]byte(passphrase + "\n")) + }() + + for i := 0; i < 4; i++ { + fetcher, err := testPkg.passphraseFileFetcher("-", stdin) + if err != nil { + t.Errorf("Expected passphraseFileFetcher to not return an error, but got %v", err) + } + + pass, err := fetcher("key") + if err != nil { + t.Errorf("Expected passphraseFileFetcher invocation to succeed, failed with %v", err) + } + + if string(pass) != string(passphrase) { + t.Errorf("Expected multiple passphrase fetch to return %q, got %q", passphrase, pass) + } + } +} + func TestValidateVersion(t *testing.T) { type args struct { ver string From 9b9ff12c6d367551c910f7da6adc2080dc5b436c Mon Sep 17 00:00:00 2001 From: Robert Sirchia Date: Wed, 16 Apr 2025 11:36:05 -0400 Subject: [PATCH 189/192] adding slog debug to a few points Signed-off-by: Robert Sirchia --- pkg/chart/v2/util/jsonschema.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pkg/chart/v2/util/jsonschema.go b/pkg/chart/v2/util/jsonschema.go index a8baef0f6..a473ab3b3 100644 --- a/pkg/chart/v2/util/jsonschema.go +++ b/pkg/chart/v2/util/jsonschema.go @@ -19,6 +19,7 @@ package util import ( "bytes" "fmt" + "log/slog" "strings" "github.com/pkg/errors" @@ -31,13 +32,14 @@ import ( func ValidateAgainstSchema(chrt *chart.Chart, values map[string]interface{}) error { var sb strings.Builder if chrt.Schema != nil { + slog.Debug("chart name", "chart-name", chrt.Name) err := ValidateAgainstSingleSchema(values, chrt.Schema) if err != nil { sb.WriteString(fmt.Sprintf("%s:\n", chrt.Name())) sb.WriteString(err.Error()) } } - + slog.Debug("number of dependencies in the chart", "dependencies", len(chrt.Dependencies())) // For each dependency, recursively call this function with the coalesced values for _, subchart := range chrt.Dependencies() { subchartValues := values[subchart.Name()].(map[string]interface{}) @@ -67,6 +69,7 @@ func ValidateAgainstSingleSchema(values Values, schemaJSON []byte) (reterr error if err != nil { return err } + slog.Debug("unmarshalled JSON schema", "schema", schema) compiler := jsonschema.NewCompiler() err = compiler.AddResource("file:///values.schema.json", schema) @@ -78,6 +81,7 @@ func ValidateAgainstSingleSchema(values Values, schemaJSON []byte) (reterr error if err != nil { return err } + slog.Debug("validated JSON schema", "validator", validator) err = validator.Validate(values.AsMap()) if err != nil { From ef0361de2146c2b119c93709954c35cf6b7e10c7 Mon Sep 17 00:00:00 2001 From: Robert Sirchia Date: Wed, 16 Apr 2025 15:26:44 -0400 Subject: [PATCH 190/192] fixing as per review Signed-off-by: Robert Sirchia --- pkg/chart/v2/util/jsonschema.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pkg/chart/v2/util/jsonschema.go b/pkg/chart/v2/util/jsonschema.go index a473ab3b3..3de941e0b 100644 --- a/pkg/chart/v2/util/jsonschema.go +++ b/pkg/chart/v2/util/jsonschema.go @@ -32,7 +32,7 @@ import ( func ValidateAgainstSchema(chrt *chart.Chart, values map[string]interface{}) error { var sb strings.Builder if chrt.Schema != nil { - slog.Debug("chart name", "chart-name", chrt.Name) + slog.Debug("chart name", "chart-name", chrt.Name()) err := ValidateAgainstSingleSchema(values, chrt.Schema) if err != nil { sb.WriteString(fmt.Sprintf("%s:\n", chrt.Name())) @@ -69,7 +69,7 @@ func ValidateAgainstSingleSchema(values Values, schemaJSON []byte) (reterr error if err != nil { return err } - slog.Debug("unmarshalled JSON schema", "schema", schema) + slog.Debug("unmarshalled JSON schema", "schema", schemaJSON) compiler := jsonschema.NewCompiler() err = compiler.AddResource("file:///values.schema.json", schema) @@ -81,7 +81,6 @@ func ValidateAgainstSingleSchema(values Values, schemaJSON []byte) (reterr error if err != nil { return err } - slog.Debug("validated JSON schema", "validator", validator) err = validator.Validate(values.AsMap()) if err != nil { From 0c200aca73157b9525dbdef4b958c7842872c408 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 16 Apr 2025 19:51:50 +0000 Subject: [PATCH 191/192] build(deps): bump golang.org/x/net from 0.37.0 to 0.38.0 Bumps [golang.org/x/net](https://github.com/golang/net) from 0.37.0 to 0.38.0. - [Commits](https://github.com/golang/net/compare/v0.37.0...v0.38.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-version: 0.38.0 dependency-type: indirect ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 26635c337..2fc57d0bb 100644 --- a/go.mod +++ b/go.mod @@ -160,7 +160,7 @@ require ( go.opentelemetry.io/otel/trace v1.34.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect golang.org/x/mod v0.22.0 // indirect - golang.org/x/net v0.37.0 // indirect + golang.org/x/net v0.38.0 // indirect golang.org/x/oauth2 v0.28.0 // indirect golang.org/x/sync v0.13.0 // indirect golang.org/x/sys v0.32.0 // indirect diff --git a/go.sum b/go.sum index 56f3528ce..f32f7a530 100644 --- a/go.sum +++ b/go.sum @@ -414,8 +414,8 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= -golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= -golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc= golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= From d33e2896f076c30f97e5508479e3ce2423a9230e Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Thu, 17 Apr 2025 12:32:51 +0200 Subject: [PATCH 192/192] Prevent failures with method signatures on hooks changes on wait strategy. This PR try to fix linting and tests. Signed-off-by: Benoit Tigeot --- pkg/action/hooks.go | 6 +++--- pkg/action/hooks_test.go | 26 ++++++++++++++++---------- 2 files changed, 19 insertions(+), 13 deletions(-) diff --git a/pkg/action/hooks.go b/pkg/action/hooks.go index 37e134c48..dbacb123e 100644 --- a/pkg/action/hooks.go +++ b/pkg/action/hooks.go @@ -112,7 +112,7 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, // If a hook is failed, check the annotation of the previous successful hooks to determine whether the hooks // should be deleted under succeeded condition. - if err := cfg.deleteHooksByPolicy(executingHooks[0:i], release.HookSucceeded, timeout); err != nil { + if err := cfg.deleteHooksByPolicy(executingHooks[0:i], release.HookSucceeded, waitStrategy, timeout); err != nil { return err } @@ -178,9 +178,9 @@ func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.Hoo } // deleteHooksByPolicy deletes all hooks if the hook policy instructs it to -func (cfg *Configuration) deleteHooksByPolicy(hooks []*release.Hook, policy release.HookDeletePolicy, timeout time.Duration) error { +func (cfg *Configuration) deleteHooksByPolicy(hooks []*release.Hook, policy release.HookDeletePolicy, waitStrategy kube.WaitStrategy, timeout time.Duration) error { for _, h := range hooks { - if err := cfg.deleteHookByPolicy(h, policy, timeout); err != nil { + if err := cfg.deleteHookByPolicy(h, policy, waitStrategy, timeout); err != nil { return err } } diff --git a/pkg/action/hooks_test.go b/pkg/action/hooks_test.go index 68379add8..9ca42ec6a 100644 --- a/pkg/action/hooks_test.go +++ b/pkg/action/hooks_test.go @@ -228,6 +228,11 @@ type HookFailingKubeClient struct { deleteRecord []resource.Info } +type HookFailingKubeWaiter struct { + *kubefake.PrintingKubeWaiter + failOn resource.Info +} + func (*HookFailingKubeClient) Build(reader io.Reader, _ bool) (kube.ResourceList, error) { configMap := &v1.ConfigMap{} @@ -243,14 +248,13 @@ func (*HookFailingKubeClient) Build(reader io.Reader, _ bool) (kube.ResourceList }}, nil } -func (h *HookFailingKubeClient) WatchUntilReady(resources kube.ResourceList, duration time.Duration) error { +func (h *HookFailingKubeWaiter) WatchUntilReady(resources kube.ResourceList, _ time.Duration) error { for _, res := range resources { if res.Name == h.failOn.Name && res.Namespace == h.failOn.Namespace { return &HookFailedError{} } } - - return h.PrintingKubeClient.WatchUntilReady(resources, duration) + return nil } func (h *HookFailingKubeClient) Delete(resources kube.ResourceList) (*kube.Result, []error) { @@ -264,6 +268,14 @@ func (h *HookFailingKubeClient) Delete(resources kube.ResourceList) (*kube.Resul return h.PrintingKubeClient.Delete(resources) } +func (h *HookFailingKubeClient) GetWaiter(strategy kube.WaitStrategy) (kube.Waiter, error) { + waiter, _ := h.PrintingKubeClient.GetWaiter(strategy) + return &HookFailingKubeWaiter{ + PrintingKubeWaiter: waiter.(*kubefake.PrintingKubeWaiter), + failOn: h.failOn, + }, nil +} + func TestHooksCleanUp(t *testing.T) { hookEvent := release.HookPreInstall @@ -369,15 +381,9 @@ data: Releases: storage.Init(driver.NewMemory()), KubeClient: kubeClient, Capabilities: chartutil.DefaultCapabilities, - Log: func(format string, v ...interface{}) { - t.Helper() - if *verbose { - t.Logf(format, v...) - } - }, } - err := configuration.execHook(&tc.inputRelease, hookEvent, 600) + err := configuration.execHook(&tc.inputRelease, hookEvent, kube.StatusWatcherStrategy, 600) if !reflect.DeepEqual(kubeClient.deleteRecord, tc.expectedDeleteRecord) { t.Fatalf("Got unexpected delete record, expected: %#v, but got: %#v", kubeClient.deleteRecord, tc.expectedDeleteRecord)