Merge branch 'main' into update-golang-version

Signed-off-by: dongjiang <dongjiang2010@gmail.com>
pull/30677/head
dongjiang 5 months ago committed by GitHub
commit cd19e2a015
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -22,7 +22,7 @@ jobs:
- name: Add variables to environment file
run: cat ".github/env" >> "$GITHUB_ENV"
- name: Setup Go
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # pin@5.3.0
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # pin@5.4.0
with:
go-version: '${{ env.GOLANG_VERSION }}'
check-latest: true

@ -17,11 +17,11 @@ jobs:
- name: Add variables to environment file
run: cat ".github/env" >> "$GITHUB_ENV"
- name: Setup Go
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # pin@5.3.0
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # pin@5.4.0
with:
go-version: '${{ env.GOLANG_VERSION }}'
check-latest: true
- name: golangci-lint
uses: golangci/golangci-lint-action@4696ba8babb6127d732c3c6dde519db15edab9ea #pin@6.5.1
uses: golangci/golangci-lint-action@1481404843c368bc19ca9406f87d6e0fc97bdcfd #pin@7.0.0
with:
version: ${{ env.GOLANGCI_LINT_VERSION }}

@ -16,7 +16,7 @@ jobs:
- name: Add variables to environment file
run: cat ".github/env" >> "$GITHUB_ENV"
- name: Setup Go
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # pin@5.3.0
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # pin@5.4.0
with:
go-version: '${{ env.GOLANG_VERSION }}'
check-latest: true

@ -28,7 +28,7 @@ jobs:
run: cat ".github/env" >> "$GITHUB_ENV"
- name: Setup Go
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # pin@5.3.0
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # pin@5.4.0
with:
go-version: '${{ env.GOLANG_VERSION }}'
- name: Run unit tests
@ -85,7 +85,7 @@ jobs:
run: cat ".github/env" >> "$GITHUB_ENV"
- name: Setup Go
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # pin@5.3.0
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # pin@5.4.0
with:
go-version: '${{ env.GOLANG_VERSION }}'
check-latest: true

@ -55,7 +55,7 @@ jobs:
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
# format to the repository Actions tab.
- name: "Upload artifact"
uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: SARIF file
path: results.sarif

1
.gitignore vendored

@ -12,4 +12,5 @@ bin/
vendor/
# Ignores charts pulled for dependency build tests
cmd/helm/testdata/testcharts/issue-7233/charts/*
pkg/cmd/testdata/testcharts/issue-7233/charts/*
.pre-commit-config.yaml

@ -1,45 +1,63 @@
version: "2"
run:
timeout: 10m
linters:
disable-all: true
default: none
enable:
- dupl
- gofmt
- goimports
- gosimple
- govet
- ineffassign
- misspell
- nakedret
- revive
- unused
- staticcheck
linters-settings:
gofmt:
simplify: true
goimports:
local-prefixes: helm.sh/helm/v4
dupl:
threshold: 400
issues:
exclude-rules:
- unused
settings:
dupl:
threshold: 400
exclusions:
# Helm, and the Go source code itself, sometimes uses these names outside their built-in
# functions. As the Go source code has re-used these names it's ok for Helm to do the same.
# Linting will look for redefinition of built-in id's but we opt-in to the ones we choose to use.
- linters:
- revive
text: "redefines-builtin-id: redefinition of the built-in function append"
- linters:
- revive
text: "redefines-builtin-id: redefinition of the built-in function clear"
- linters:
- revive
text: "redefines-builtin-id: redefinition of the built-in function max"
- linters:
- revive
text: "redefines-builtin-id: redefinition of the built-in function min"
- linters:
- revive
text: "redefines-builtin-id: redefinition of the built-in function new"
generated: lax
presets:
- comments
- common-false-positives
- legacy
- std-error-handling
rules:
- linters:
- revive
text: 'redefines-builtin-id: redefinition of the built-in function append'
- linters:
- revive
text: 'redefines-builtin-id: redefinition of the built-in function clear'
- linters:
- revive
text: 'redefines-builtin-id: redefinition of the built-in function max'
- linters:
- revive
text: 'redefines-builtin-id: redefinition of the built-in function min'
- linters:
- revive
text: 'redefines-builtin-id: redefinition of the built-in function new'
paths:
- third_party$
- builtin$
- examples$
formatters:
enable:
- gofmt
- goimports
settings:
gofmt:
simplify: true
goimports:
local-prefixes:
- helm.sh/helm/v4
exclusions:
generated: lax
paths:
- third_party$
- builtin$
- examples$

@ -156,7 +156,7 @@ format: $(GOIMPORTS)
# Generate golden files used in unit tests
.PHONY: gen-test-golden
gen-test-golden:
gen-test-golden: PKG = ./cmd/helm ./pkg/action
gen-test-golden: PKG = ./pkg/cmd ./pkg/action
gen-test-golden: TESTFLAGS = -update
gen-test-golden: test-unit

@ -17,7 +17,7 @@ limitations under the License.
package main // import "helm.sh/helm/v4/cmd/helm"
import (
"log"
"log/slog"
"os"
// Import to initialize client auth plugins.
@ -27,10 +27,6 @@ import (
"helm.sh/helm/v4/pkg/kube"
)
func init() {
log.SetFlags(log.Lshortfile)
}
func main() {
// Setting the name of the app for managedFields in the Kubernetes client.
// It is set here to the full name of "helm" so that renaming of helm to
@ -40,12 +36,12 @@ func main() {
cmd, err := helmcmd.NewRootCmd(os.Stdout, os.Args[1:])
if err != nil {
helmcmd.Warning("%+v", err)
slog.Warn("command failed", slog.Any("error", err))
os.Exit(1)
}
if err := cmd.Execute(); err != nil {
helmcmd.Debug("%+v", err)
slog.Debug("error", slog.Any("error", err))
switch e := err.(type) {
case helmcmd.PluginError:
os.Exit(e.Code)

@ -11,10 +11,10 @@ require (
github.com/Masterminds/squirrel v1.5.4
github.com/Masterminds/vcs v1.13.3
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2
github.com/containerd/containerd v1.7.26
github.com/cyphar/filepath-securejoin v0.4.1
github.com/distribution/distribution/v3 v3.0.0-rc.3
github.com/distribution/distribution/v3 v3.0.0
github.com/evanphx/json-patch v5.9.11+incompatible
github.com/fluxcd/cli-utils v0.36.0-flux.12
github.com/foxcpp/go-mockdns v1.1.0
github.com/gobwas/glob v0.2.3
github.com/gofrs/flock v0.12.1
@ -28,14 +28,14 @@ require (
github.com/opencontainers/image-spec v1.1.1
github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5
github.com/pkg/errors v0.9.1
github.com/rubenv/sql-migrate v1.7.1
github.com/rubenv/sql-migrate v1.7.2
github.com/santhosh-tekuri/jsonschema/v6 v6.0.1
github.com/spf13/cobra v1.9.1
github.com/spf13/pflag v1.0.6
github.com/stretchr/testify v1.10.0
github.com/xeipuuv/gojsonschema v1.2.0
golang.org/x/crypto v0.36.0
golang.org/x/term v0.30.0
golang.org/x/text v0.23.0
golang.org/x/crypto v0.37.0
golang.org/x/term v0.31.0
golang.org/x/text v0.24.0
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.32.3
k8s.io/apiextensions-apiserver v0.32.3
@ -46,6 +46,7 @@ require (
k8s.io/klog/v2 v2.130.1
k8s.io/kubectl v0.32.3
oras.land/oras-go/v2 v2.5.0
sigs.k8s.io/controller-runtime v0.20.4
sigs.k8s.io/yaml v1.4.0
)
@ -60,9 +61,6 @@ require (
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/chai2010/gettext-go v1.0.2 // indirect
github.com/containerd/errdefs v0.3.0 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/containerd/platforms v0.2.1 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
@ -71,29 +69,30 @@ require (
github.com/docker/docker-credential-helpers v0.8.2 // indirect
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
github.com/docker/go-metrics v0.0.1 // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/emicklei/go-restful/v3 v3.12.1 // indirect
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
github.com/fatih/color v1.13.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-errors/errors v1.4.2 // indirect
github.com/go-errors/errors v1.5.1 // indirect
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/jsonreference v0.21.0 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/btree v1.0.1 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/btree v1.1.3 // indirect
github.com/google/gnostic-models v0.6.9 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/handlers v1.5.2 // indirect
github.com/gorilla/mux v1.8.1 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/gorilla/websocket v1.5.3 // indirect
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
@ -107,7 +106,7 @@ require (
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mailru/easyjson v0.9.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.17 // indirect
github.com/mattn/go-runewidth v0.0.9 // indirect
@ -120,28 +119,28 @@ require (
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/onsi/gomega v1.36.2 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_golang v1.20.5 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.60.1 // indirect
github.com/prometheus/common v0.62.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 // indirect
github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 // indirect
github.com/redis/go-redis/v9 v9.1.0 // indirect
github.com/redis/go-redis/v9 v9.7.3 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/shopspring/decimal v1.4.0 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/spf13/cast v1.7.0 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xlab/treeprint v1.2.0 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 // indirect
go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 // indirect
go.opentelemetry.io/otel v1.32.0 // indirect
go.opentelemetry.io/otel v1.34.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 // indirect
@ -154,31 +153,31 @@ require (
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 // indirect
go.opentelemetry.io/otel/log v0.8.0 // indirect
go.opentelemetry.io/otel/metric v1.32.0 // indirect
go.opentelemetry.io/otel/metric v1.34.0 // indirect
go.opentelemetry.io/otel/sdk v1.32.0 // indirect
go.opentelemetry.io/otel/sdk/log v0.8.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.32.0 // indirect
go.opentelemetry.io/otel/trace v1.32.0 // indirect
go.opentelemetry.io/otel/trace v1.34.0 // indirect
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
golang.org/x/mod v0.21.0 // indirect
golang.org/x/net v0.37.0 // indirect
golang.org/x/oauth2 v0.23.0 // indirect
golang.org/x/sync v0.12.0 // indirect
golang.org/x/sys v0.31.0 // indirect
golang.org/x/time v0.7.0 // indirect
golang.org/x/tools v0.26.0 // indirect
golang.org/x/mod v0.22.0 // indirect
golang.org/x/net v0.38.0 // indirect
golang.org/x/oauth2 v0.28.0 // indirect
golang.org/x/sync v0.13.0 // indirect
golang.org/x/sys v0.32.0 // indirect
golang.org/x/time v0.9.0 // indirect
golang.org/x/tools v0.29.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect
google.golang.org/grpc v1.68.0 // indirect
google.golang.org/protobuf v1.35.2 // indirect
google.golang.org/protobuf v1.36.4 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/component-base v0.32.3 // indirect
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 // indirect
k8s.io/utils v0.0.0-20241210054802-24370beab758 // indirect
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
sigs.k8s.io/kustomize/api v0.18.0 // indirect
sigs.k8s.io/kustomize/kyaml v0.18.1 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
sigs.k8s.io/kustomize/kyaml v0.19.0 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.5.0 // indirect
)

183
go.sum

@ -37,10 +37,11 @@ github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2y
github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70=
github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
github.com/bsm/ginkgo/v2 v2.7.0/go.mod h1:AiKlXPm7ItEHNc/2+OkrNG4E0ITzojb9/xWzvQ9XZ9w=
github.com/bsm/ginkgo/v2 v2.9.5 h1:rtVBYPs3+TC5iLUVOis1B9tjLTup7Cj5IfzosKtvTJ0=
github.com/bsm/ginkgo/v2 v2.9.5/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
github.com/bsm/gomega v1.26.0 h1:LhQm+AFcgV2M0WyKroMASzAzCAJVpAxQXv4SaI9a69Y=
github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
github.com/bsm/gomega v1.26.0/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@ -48,19 +49,10 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk=
github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA=
github.com/containerd/containerd v1.7.26 h1:3cs8K2RHlMQaPifLqgRyI4VBkoldNdEw62cb7qQga7k=
github.com/containerd/containerd v1.7.26/go.mod h1:m4JU0E+h0ebbo9yXD7Hyt+sWnc8tChm7MudCjj4jRvQ=
github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4=
github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s=
@ -71,34 +63,40 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/distribution/distribution/v3 v3.0.0-rc.3 h1:JRJso9IVLoooKX76oWR+DWCCdZlK5m4nRtDWvzB1ITg=
github.com/distribution/distribution/v3 v3.0.0-rc.3/go.mod h1:offoOgrnYs+CFwis8nE0hyzYZqRCZj5EFc5kgfszwiE=
github.com/distribution/distribution/v3 v3.0.0 h1:q4R8wemdRQDClzoNNStftB2ZAfqOiN6UX90KJc4HjyM=
github.com/distribution/distribution/v3 v3.0.0/go.mod h1:tRNuFoZsUdyRVegq8xGNeds4KLjwLCRin/tTo6i1DhU=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI=
github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo=
github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8=
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=
github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU=
github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8=
github.com/evanphx/json-patch v5.9.11+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU=
github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4=
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc=
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fluxcd/cli-utils v0.36.0-flux.12 h1:8cD6SmaKa/lGo0KCu0XWiGrXJMLMBQwSsnoP0cG+Gjw=
github.com/fluxcd/cli-utils v0.36.0-flux.12/go.mod h1:Nb/zMqsJAzjz4/HIsEc2LTqxC6eC0rV26t4hkJT/F9o=
github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI=
github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk=
github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs=
github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
@ -109,12 +107,12 @@ github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
@ -135,10 +133,10 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
@ -146,8 +144,8 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/pprof v0.0.0-20250128161936-077ca0a936bf h1:BvBLUD2hkvLI3dJTJMiopAq8/wp43AAZKTP7qdpptbU=
github.com/google/pprof v0.0.0-20250128161936-077ca0a936bf/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
@ -156,8 +154,8 @@ github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyE
github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY=
github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo=
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA=
@ -193,11 +191,8 @@ github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IX
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
@ -210,8 +205,8 @@ github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
@ -253,10 +248,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU=
github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk=
github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
@ -284,8 +279,8 @@ github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc=
github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
@ -296,14 +291,16 @@ github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5/go.mod h1:fyalQWdtzDBECAQFBJu
github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 h1:EfpWLLCyXw8PSM2/XNJLjI3Pb27yVE+gIAfeqp8LUCc=
github.com/redis/go-redis/extra/redisotel/v9 v9.0.5/go.mod h1:WZjPDy7VNzn77AAfnAfVjZNvfJTYfPetfZk5yoSTLaQ=
github.com/redis/go-redis/v9 v9.0.5/go.mod h1:WqMKv5vnQbRuZstUwxQI195wHy+t4PuXDOjzMvcuQHk=
github.com/redis/go-redis/v9 v9.1.0 h1:137FnGdk+EQdCbye1FW+qOEcY5S+SpY9T0NiuqvtfMY=
github.com/redis/go-redis/v9 v9.1.0/go.mod h1:urWj3He21Dj5k4TK1y59xH8Uj6ATueP8AH1cY3lZl4c=
github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM=
github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/rubenv/sql-migrate v1.7.1 h1:f/o0WgfO/GqNuVg+6801K/KW3WdDSupzSjDYODmiUq4=
github.com/rubenv/sql-migrate v1.7.1/go.mod h1:Ob2Psprc0/3ggbM6wCzyYVFFuc6FyZrb2AS+ezLDFb4=
github.com/rubenv/sql-migrate v1.7.2 h1:HOjuq5BmSVQHX14s/U3iS4I3YhP+h89Lg6QawwUFvyc=
github.com/rubenv/sql-migrate v1.7.2/go.mod h1:F2bGFBwCU+pnmbtNYDeKvSuvL6lBVtXDXUUv5t+u1qw=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 h1:PKK9DyHxif4LZo+uQSgXNqs0jj5+xZwwfKHgph2lxBw=
github.com/santhosh-tekuri/jsonschema/v6 v6.0.1/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU=
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
@ -319,41 +316,31 @@ github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ=
github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 h1:UW0+QyeyBVhn+COBec3nGhfnFe5lwB0ic1JBVjzhk0w=
go.opentelemetry.io/contrib/bridges/prometheus v0.57.0/go.mod h1:ppciCHRLsyCio54qbzQv0E4Jyth/fLWDTJYfvWpcSVk=
go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQa2NQ/Aoi7zA+wy7vMOKD9H4=
go.opentelemetry.io/contrib/exporters/autoexport v0.57.0/go.mod h1:EJBheUMttD/lABFyLXhce47Wr6DPWYReCzaZiXadH7g=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 h1:DheMAlT6POBP+gh8RUH19EOTnQIor5QE0uSRPtzCpSw=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0/go.mod h1:wZcGmeVO9nzP67aYSLDqXNWK87EZWhi7JWj1v7ZXf94=
go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U=
go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg=
go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY=
go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI=
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 h1:WzNab7hOOLzdDF/EoWCt4glhrbMPVMOO5JYTmpz36Ls=
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0/go.mod h1:hKvJwTzJdp90Vh7p6q/9PAOd55dI6WA6sWj62a/JvSs=
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 h1:S+LdBGiQXtJdowoJoQPEtI52syEP/JYBUpjO49EQhV8=
@ -378,20 +365,24 @@ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 h1:cC2yDI3IQd0Udsu
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0/go.mod h1:2PD5Ex6z8CFzDbTdOlwyNIUywRr1DN0ospafJM1wJ+s=
go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk=
go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8=
go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M=
go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8=
go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ=
go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE=
go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4=
go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU=
go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs=
go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo=
go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU=
go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ=
go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM=
go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8=
go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k=
go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE=
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@ -400,16 +391,16 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g=
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@ -423,10 +414,10 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ=
golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c=
golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc=
golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -437,8 +428,8 @@ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -460,8 +451,8 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
@ -469,8 +460,8 @@ golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww=
golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o=
golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
@ -478,10 +469,10 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
@ -490,8 +481,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk=
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE=
golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -502,8 +493,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:
google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0=
google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA=
google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io=
google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
google.golang.org/protobuf v1.36.4 h1:6A3ZDJHn/eNqc1i+IdefRzy/9PokBTPvcqMySR7NNIM=
google.golang.org/protobuf v1.36.4/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
@ -534,21 +525,23 @@ k8s.io/component-base v0.32.3 h1:98WJvvMs3QZ2LYHBzvltFSeJjEx7t5+8s71P7M74u8k=
k8s.io/component-base v0.32.3/go.mod h1:LWi9cR+yPAv7cu2X9rZanTiFKB2kHA+JjmhkKjCZRpI=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y=
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4=
k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 h1:hcha5B1kVACrLujCKLbr8XWMxCxzQx42DY8QKYJrDLg=
k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7/go.mod h1:GewRfANuJ70iYzvn+i4lezLDAFzvjxZYK1gn1lWcfas=
k8s.io/kubectl v0.32.3 h1:VMi584rbboso+yjfv0d8uBHwwxbC438LKq+dXd5tOAI=
k8s.io/kubectl v0.32.3/go.mod h1:6Euv2aso5GKzo/UVMacV6C7miuyevpfI91SvBvV9Zdg=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0=
k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
oras.land/oras-go/v2 v2.5.0 h1:o8Me9kLY74Vp5uw07QXPiitjsw7qNXi8Twd+19Zf02c=
oras.land/oras-go/v2 v2.5.0/go.mod h1:z4eisnLP530vwIOUOJeBIj0aGI0L1C3d53atvCBqZHg=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU=
sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/kustomize/api v0.18.0 h1:hTzp67k+3NEVInwz5BHyzc9rGxIauoXferXyjv5lWPo=
sigs.k8s.io/kustomize/api v0.18.0/go.mod h1:f8isXnX+8b+SGLHQ6yO4JG1rdkZlvhaCf/uZbLVMb0U=
sigs.k8s.io/kustomize/kyaml v0.18.1 h1:WvBo56Wzw3fjS+7vBjN6TeivvpbW9GmRaWZ9CIVmt4E=
sigs.k8s.io/kustomize/kyaml v0.18.1/go.mod h1:C3L2BFVU1jgcddNBE1TxuVLgS46TjObMwW5FT9FcjYo=
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
sigs.k8s.io/kustomize/kyaml v0.19.0 h1:RFge5qsO1uHhwJsu3ipV7RNolC7Uozc0jUBC/61XSlA=
sigs.k8s.io/kustomize/kyaml v0.19.0/go.mod h1:FeKD5jEOH+FbZPpqUghBP8mrLjJ3+zD3/rf9NNu1cwY=
sigs.k8s.io/structured-merge-diff/v4 v4.5.0 h1:nbCitCK2hfnhyiKo6uf2HxUPTCodY6Qaf85SbDIaMBk=
sigs.k8s.io/structured-merge-diff/v4 v4.5.0/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=

@ -0,0 +1,87 @@
/*
Copyright The Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logging
import (
"context"
"log/slog"
"os"
)
// DebugEnabledFunc is a function type that determines if debug logging is enabled
// We use a function because we want to check the setting at log time, not when the logger is created
type DebugEnabledFunc func() bool
// DebugCheckHandler checks settings.Debug at log time
type DebugCheckHandler struct {
handler slog.Handler
debugEnabled DebugEnabledFunc
}
// Enabled implements slog.Handler.Enabled
func (h *DebugCheckHandler) Enabled(_ context.Context, level slog.Level) bool {
if level == slog.LevelDebug {
return h.debugEnabled()
}
return true // Always log other levels
}
// Handle implements slog.Handler.Handle
func (h *DebugCheckHandler) Handle(ctx context.Context, r slog.Record) error {
return h.handler.Handle(ctx, r)
}
// WithAttrs implements slog.Handler.WithAttrs
func (h *DebugCheckHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
return &DebugCheckHandler{
handler: h.handler.WithAttrs(attrs),
debugEnabled: h.debugEnabled,
}
}
// WithGroup implements slog.Handler.WithGroup
func (h *DebugCheckHandler) WithGroup(name string) slog.Handler {
return &DebugCheckHandler{
handler: h.handler.WithGroup(name),
debugEnabled: h.debugEnabled,
}
}
// NewLogger creates a new logger with dynamic debug checking
func NewLogger(debugEnabled DebugEnabledFunc) *slog.Logger {
// Create base handler that removes timestamps
baseHandler := slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{
// Always use LevelDebug here to allow all messages through
// Our custom handler will do the filtering
Level: slog.LevelDebug,
ReplaceAttr: func(_ []string, a slog.Attr) slog.Attr {
// Remove the time attribute
if a.Key == slog.TimeKey {
return slog.Attr{}
}
return a
},
})
// Wrap with our dynamic debug-checking handler
dynamicHandler := &DebugCheckHandler{
handler: baseHandler,
debugEnabled: debugEnabled,
}
return slog.New(dynamicHandler)
}

@ -29,9 +29,6 @@ type Client struct {
// The base URL for requests
BaseURL string
// The internal logger to use
Log func(string, ...interface{})
}
// New creates a new client
@ -44,12 +41,9 @@ func New(u string) (*Client, error) {
return &Client{
BaseURL: u,
Log: nopLogger,
}, nil
}
var nopLogger = func(_ string, _ ...interface{}) {}
// Validate if the base URL for monocular is valid.
func validate(u string) error {

@ -0,0 +1,121 @@
/*
Copyright The Helm Authors.
This file was initially copied and modified from
https://github.com/fluxcd/kustomize-controller/blob/main/internal/statusreaders/job.go
Copyright 2022 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package statusreaders
import (
"context"
"fmt"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/fluxcd/cli-utils/pkg/kstatus/polling/engine"
"github.com/fluxcd/cli-utils/pkg/kstatus/polling/event"
"github.com/fluxcd/cli-utils/pkg/kstatus/polling/statusreaders"
"github.com/fluxcd/cli-utils/pkg/kstatus/status"
"github.com/fluxcd/cli-utils/pkg/object"
)
type customJobStatusReader struct {
genericStatusReader engine.StatusReader
}
func NewCustomJobStatusReader(mapper meta.RESTMapper) engine.StatusReader {
genericStatusReader := statusreaders.NewGenericStatusReader(mapper, jobConditions)
return &customJobStatusReader{
genericStatusReader: genericStatusReader,
}
}
func (j *customJobStatusReader) Supports(gk schema.GroupKind) bool {
return gk == batchv1.SchemeGroupVersion.WithKind("Job").GroupKind()
}
func (j *customJobStatusReader) ReadStatus(ctx context.Context, reader engine.ClusterReader, resource object.ObjMetadata) (*event.ResourceStatus, error) {
return j.genericStatusReader.ReadStatus(ctx, reader, resource)
}
func (j *customJobStatusReader) ReadStatusForObject(ctx context.Context, reader engine.ClusterReader, resource *unstructured.Unstructured) (*event.ResourceStatus, error) {
return j.genericStatusReader.ReadStatusForObject(ctx, reader, resource)
}
// Ref: https://github.com/kubernetes-sigs/cli-utils/blob/v0.29.4/pkg/kstatus/status/core.go
// Modified to return Current status only when the Job has completed as opposed to when it's in progress.
func jobConditions(u *unstructured.Unstructured) (*status.Result, error) {
obj := u.UnstructuredContent()
parallelism := status.GetIntField(obj, ".spec.parallelism", 1)
completions := status.GetIntField(obj, ".spec.completions", parallelism)
succeeded := status.GetIntField(obj, ".status.succeeded", 0)
failed := status.GetIntField(obj, ".status.failed", 0)
// Conditions
// https://github.com/kubernetes/kubernetes/blob/master/pkg/controller/job/utils.go#L24
objc, err := status.GetObjectWithConditions(obj)
if err != nil {
return nil, err
}
for _, c := range objc.Status.Conditions {
switch c.Type {
case "Complete":
if c.Status == corev1.ConditionTrue {
message := fmt.Sprintf("Job Completed. succeeded: %d/%d", succeeded, completions)
return &status.Result{
Status: status.CurrentStatus,
Message: message,
Conditions: []status.Condition{},
}, nil
}
case "Failed":
message := fmt.Sprintf("Job Failed. failed: %d/%d", failed, completions)
if c.Status == corev1.ConditionTrue {
return &status.Result{
Status: status.FailedStatus,
Message: message,
Conditions: []status.Condition{
{
Type: status.ConditionStalled,
Status: corev1.ConditionTrue,
Reason: "JobFailed",
Message: message,
},
},
}, nil
}
}
}
message := "Job in progress"
return &status.Result{
Status: status.InProgressStatus,
Message: message,
Conditions: []status.Condition{
{
Type: status.ConditionReconciling,
Status: corev1.ConditionTrue,
Reason: "JobInProgress",
Message: message,
},
},
}, nil
}

@ -0,0 +1,116 @@
/*
Copyright The Helm Authors.
This file was initially copied and modified from
https://github.com/fluxcd/kustomize-controller/blob/main/internal/statusreaders/job_test.go
Copyright 2022 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package statusreaders
import (
"testing"
"github.com/stretchr/testify/assert"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"github.com/fluxcd/cli-utils/pkg/kstatus/status"
)
func toUnstructured(t *testing.T, obj runtime.Object) (*unstructured.Unstructured, error) {
t.Helper()
// If the incoming object is already unstructured, perform a deep copy first
// otherwise DefaultUnstructuredConverter ends up returning the inner map without
// making a copy.
if _, ok := obj.(runtime.Unstructured); ok {
obj = obj.DeepCopyObject()
}
rawMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj)
if err != nil {
return nil, err
}
return &unstructured.Unstructured{Object: rawMap}, nil
}
func TestJobConditions(t *testing.T) {
t.Parallel()
tests := []struct {
name string
job *batchv1.Job
expectedStatus status.Status
}{
{
name: "job without Complete condition returns InProgress status",
job: &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: "job-no-condition",
},
Spec: batchv1.JobSpec{},
Status: batchv1.JobStatus{},
},
expectedStatus: status.InProgressStatus,
},
{
name: "job with Complete condition as True returns Current status",
job: &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: "job-complete",
},
Spec: batchv1.JobSpec{},
Status: batchv1.JobStatus{
Conditions: []batchv1.JobCondition{
{
Type: batchv1.JobComplete,
Status: corev1.ConditionTrue,
},
},
},
},
expectedStatus: status.CurrentStatus,
},
{
name: "job with Failed condition as True returns Failed status",
job: &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: "job-failed",
},
Spec: batchv1.JobSpec{},
Status: batchv1.JobStatus{
Conditions: []batchv1.JobCondition{
{
Type: batchv1.JobFailed,
Status: corev1.ConditionTrue,
},
},
},
},
expectedStatus: status.FailedStatus,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
us, err := toUnstructured(t, tc.job)
assert.NoError(t, err)
result, err := jobConditions(us)
assert.NoError(t, err)
assert.Equal(t, tc.expectedStatus, result.Status)
})
}
}

@ -0,0 +1,104 @@
/*
Copyright The Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package statusreaders
import (
"context"
"fmt"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/fluxcd/cli-utils/pkg/kstatus/polling/engine"
"github.com/fluxcd/cli-utils/pkg/kstatus/polling/event"
"github.com/fluxcd/cli-utils/pkg/kstatus/polling/statusreaders"
"github.com/fluxcd/cli-utils/pkg/kstatus/status"
"github.com/fluxcd/cli-utils/pkg/object"
)
type customPodStatusReader struct {
genericStatusReader engine.StatusReader
}
func NewCustomPodStatusReader(mapper meta.RESTMapper) engine.StatusReader {
genericStatusReader := statusreaders.NewGenericStatusReader(mapper, podConditions)
return &customPodStatusReader{
genericStatusReader: genericStatusReader,
}
}
func (j *customPodStatusReader) Supports(gk schema.GroupKind) bool {
return gk == corev1.SchemeGroupVersion.WithKind("Pod").GroupKind()
}
func (j *customPodStatusReader) ReadStatus(ctx context.Context, reader engine.ClusterReader, resource object.ObjMetadata) (*event.ResourceStatus, error) {
return j.genericStatusReader.ReadStatus(ctx, reader, resource)
}
func (j *customPodStatusReader) ReadStatusForObject(ctx context.Context, reader engine.ClusterReader, resource *unstructured.Unstructured) (*event.ResourceStatus, error) {
return j.genericStatusReader.ReadStatusForObject(ctx, reader, resource)
}
func podConditions(u *unstructured.Unstructured) (*status.Result, error) {
obj := u.UnstructuredContent()
phase := status.GetStringField(obj, ".status.phase", "")
switch corev1.PodPhase(phase) {
case corev1.PodSucceeded:
message := fmt.Sprintf("pod %s succeeded", u.GetName())
return &status.Result{
Status: status.CurrentStatus,
Message: message,
Conditions: []status.Condition{
{
Type: status.ConditionStalled,
Status: corev1.ConditionTrue,
Message: message,
},
},
}, nil
case corev1.PodFailed:
message := fmt.Sprintf("pod %s failed", u.GetName())
return &status.Result{
Status: status.FailedStatus,
Message: message,
Conditions: []status.Condition{
{
Type: status.ConditionStalled,
Status: corev1.ConditionTrue,
Reason: "PodFailed",
Message: message,
},
},
}, nil
}
message := "Pod in progress"
return &status.Result{
Status: status.InProgressStatus,
Message: message,
Conditions: []status.Condition{
{
Type: status.ConditionReconciling,
Status: corev1.ConditionTrue,
Reason: "PodInProgress",
Message: message,
},
},
}, nil
}

@ -0,0 +1,111 @@
/*
Copyright The Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package statusreaders
import (
"testing"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/fluxcd/cli-utils/pkg/kstatus/status"
)
func TestPodConditions(t *testing.T) {
tests := []struct {
name string
pod *v1.Pod
expectedStatus status.Status
}{
{
name: "pod without status returns in progress",
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "pod-no-status"},
Spec: v1.PodSpec{},
Status: v1.PodStatus{},
},
expectedStatus: status.InProgressStatus,
},
{
name: "pod succeeded returns current status",
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "pod-succeeded"},
Spec: v1.PodSpec{},
Status: v1.PodStatus{
Phase: v1.PodSucceeded,
},
},
expectedStatus: status.CurrentStatus,
},
{
name: "pod failed returns failed status",
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "pod-failed"},
Spec: v1.PodSpec{},
Status: v1.PodStatus{
Phase: v1.PodFailed,
},
},
expectedStatus: status.FailedStatus,
},
{
name: "pod pending returns in progress status",
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "pod-pending"},
Spec: v1.PodSpec{},
Status: v1.PodStatus{
Phase: v1.PodPending,
},
},
expectedStatus: status.InProgressStatus,
},
{
name: "pod running returns in progress status",
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "pod-running"},
Spec: v1.PodSpec{},
Status: v1.PodStatus{
Phase: v1.PodRunning,
},
},
expectedStatus: status.InProgressStatus,
},
{
name: "pod with unknown phase returns in progress status",
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "pod-unknown"},
Spec: v1.PodSpec{},
Status: v1.PodStatus{
Phase: v1.PodUnknown,
},
},
expectedStatus: status.InProgressStatus,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
us, err := toUnstructured(t, tc.pod)
assert.NoError(t, err)
result, err := podConditions(us)
assert.NoError(t, err)
assert.Equal(t, tc.expectedStatus, result.Status)
})
}
}

@ -21,7 +21,7 @@ limitations under the License.
package sympath
import (
"log"
"log/slog"
"os"
"path/filepath"
"sort"
@ -72,7 +72,7 @@ func symwalk(path string, info os.FileInfo, walkFn filepath.WalkFunc) error {
return errors.Wrapf(err, "error evaluating symlink %s", path)
}
//This log message is to highlight a symlink that is being used within a chart, symlinks can be used for nefarious reasons.
log.Printf("found symbolic link in path: %s resolves to %s. Contents of linked file included and used", path, resolved)
slog.Info("found symbolic link in path. Contents of linked file included and used", "path", path, "resolved", resolved)
if info, err = os.Lstat(resolved); err != nil {
return err
}

@ -92,5 +92,5 @@ func update(filename string, in []byte) error {
}
func normalize(in []byte) []byte {
return bytes.Replace(in, []byte("\r\n"), []byte("\n"), -1)
return bytes.ReplaceAll(in, []byte("\r\n"), []byte("\n"))
}

@ -172,28 +172,28 @@ func copyFile(src, dst string) (err error) {
in, err := os.Open(src)
if err != nil {
return
return err
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
return
return err
}
if _, err = io.Copy(out, in); err != nil {
out.Close()
return
return err
}
// Check for write errors on Close
if err = out.Close(); err != nil {
return
return err
}
si, err := os.Stat(src)
if err != nil {
return
return err
}
// Temporary fix for Go < 1.9
@ -205,7 +205,7 @@ func copyFile(src, dst string) (err error) {
}
err = os.Chmod(dst, si.Mode())
return
return err
}
// cloneSymlink will create a new symlink that points to the resolved path of sl.

@ -33,17 +33,11 @@ package fs
import (
"os"
"os/exec"
"path/filepath"
"runtime"
"sync"
"testing"
)
var (
mu sync.Mutex
)
func TestRenameWithFallback(t *testing.T) {
dir := t.TempDir()
@ -360,19 +354,6 @@ func TestCopyFile(t *testing.T) {
}
}
func cleanUpDir(dir string) {
// NOTE(mattn): It seems that sometimes git.exe is not dead
// when cleanUpDir() is called. But we do not know any way to wait for it.
if runtime.GOOS == "windows" {
mu.Lock()
exec.Command(`taskkill`, `/F`, `/IM`, `git.exe`).Run()
mu.Unlock()
}
if dir != "" {
os.RemoveAll(dir)
}
}
func TestCopyFileSymlink(t *testing.T) {
tempdir := t.TempDir()

@ -20,10 +20,10 @@ import (
"bytes"
"fmt"
"io"
"log/slog"
"os"
"path"
"path/filepath"
"regexp"
"strings"
"github.com/pkg/errors"
@ -63,21 +63,6 @@ var (
errPending = errors.New("another operation (install/upgrade/rollback) is in progress")
)
// ValidName is a regular expression for resource names.
//
// DEPRECATED: This will be removed in Helm 4, and is no longer used here. See
// pkg/lint/rules.validateMetadataNameFunc for the replacement.
//
// According to the Kubernetes help text, the regular expression it uses is:
//
// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*
//
// This follows the above regular expression (but requires a full string match, not partial).
//
// The Kubernetes documentation is here, though it is not entirely correct:
// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
var ValidName = regexp.MustCompile(`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`)
// Configuration injects the dependencies that all actions share.
type Configuration struct {
// RESTClientGetter is an interface that loads Kubernetes clients.
@ -95,8 +80,6 @@ type Configuration struct {
// Capabilities describes the capabilities of the Kubernetes cluster.
Capabilities *chartutil.Capabilities
Log func(string, ...interface{})
// HookOutputFunc called with container name and returns and expects writer that will receive the log output.
HookOutputFunc func(namespace, pod, container string) io.Writer
}
@ -243,9 +226,6 @@ type RESTClientGetter interface {
ToRESTMapper() (meta.RESTMapper, error)
}
// DebugLog sets the logger that writes debug strings
type DebugLog func(format string, v ...interface{})
// capabilities builds a Capabilities from discovery information.
func (cfg *Configuration) getCapabilities() (*chartutil.Capabilities, error) {
if cfg.Capabilities != nil {
@ -269,8 +249,8 @@ func (cfg *Configuration) getCapabilities() (*chartutil.Capabilities, error) {
apiVersions, err := GetVersionSet(dc)
if err != nil {
if discovery.IsGroupDiscoveryFailedError(err) {
cfg.Log("WARNING: The Kubernetes server has an orphaned API service. Server reports: %s", err)
cfg.Log("WARNING: To fix this, kubectl delete apiservice <service-name>")
slog.Warn("the kubernetes server has an orphaned API service", slog.Any("error", err))
slog.Warn("to fix this, kubectl delete apiservice <service-name>")
} else {
return nil, errors.Wrap(err, "could not get apiVersions from Kubernetes")
}
@ -369,14 +349,13 @@ func GetVersionSet(client discovery.ServerResourcesInterface) (chartutil.Version
// recordRelease with an update operation in case reuse has been set.
func (cfg *Configuration) recordRelease(r *release.Release) {
if err := cfg.Releases.Update(r); err != nil {
cfg.Log("warning: Failed to update release %s: %s", r.Name, err)
slog.Warn("failed to update release", "name", r.Name, "revision", r.Version, slog.Any("error", err))
}
}
// Init initializes the action configuration
func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string, log DebugLog) error {
func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string) error {
kc := kube.New(getter)
kc.Log = log
lazyClient := &lazyClient{
namespace: namespace,
@ -387,11 +366,9 @@ func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namesp
switch helmDriver {
case "secret", "secrets", "":
d := driver.NewSecrets(newSecretClient(lazyClient))
d.Log = log
store = storage.Init(d)
case "configmap", "configmaps":
d := driver.NewConfigMaps(newConfigMapClient(lazyClient))
d.Log = log
store = storage.Init(d)
case "memory":
var d *driver.Memory
@ -411,7 +388,6 @@ func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namesp
case "sql":
d, err := driver.NewSQL(
os.Getenv("HELM_DRIVER_SQL_CONNECTION_STRING"),
log,
namespace,
)
if err != nil {
@ -425,7 +401,6 @@ func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namesp
cfg.RESTClientGetter = getter
cfg.KubeClient = kc
cfg.Releases = store
cfg.Log = log
cfg.HookOutputFunc = func(_, _, _ string) io.Writer { return io.Discard }
return nil

@ -19,11 +19,13 @@ import (
"flag"
"fmt"
"io"
"log/slog"
"testing"
"github.com/stretchr/testify/assert"
fakeclientset "k8s.io/client-go/kubernetes/fake"
"helm.sh/helm/v4/internal/logging"
chart "helm.sh/helm/v4/pkg/chart/v2"
chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
kubefake "helm.sh/helm/v4/pkg/kube/fake"
@ -34,11 +36,16 @@ import (
"helm.sh/helm/v4/pkg/time"
)
var verbose = flag.Bool("test.log", false, "enable test logging")
var verbose = flag.Bool("test.log", false, "enable test logging (debug by default)")
func actionConfigFixture(t *testing.T) *Configuration {
t.Helper()
logger := logging.NewLogger(func() bool {
return *verbose
})
slog.SetDefault(logger)
registryClient, err := registry.NewClient()
if err != nil {
t.Fatal(err)
@ -49,12 +56,6 @@ func actionConfigFixture(t *testing.T) *Configuration {
KubeClient: &kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}},
Capabilities: chartutil.DefaultCapabilities,
RegistryClient: registryClient,
Log: func(format string, v ...interface{}) {
t.Helper()
if *verbose {
t.Logf(format, v...)
}
},
}
}
@ -334,7 +335,7 @@ func TestConfiguration_Init(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
cfg := &Configuration{}
actualErr := cfg.Init(nil, "default", tt.helmDriver, nil)
actualErr := cfg.Init(nil, "default", tt.helmDriver)
if tt.expectErr {
assert.Error(t, actualErr)
assert.Contains(t, actualErr.Error(), tt.errMsg)
@ -347,7 +348,7 @@ func TestConfiguration_Init(t *testing.T) {
}
func TestGetVersionSet(t *testing.T) {
client := fakeclientset.NewSimpleClientset()
client := fakeclientset.NewClientset()
vs, err := GetVersionSet(client.Discovery())
if err != nil {

@ -17,6 +17,8 @@ limitations under the License.
package action
import (
"log/slog"
"github.com/pkg/errors"
chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
@ -53,6 +55,6 @@ func (h *History) Run(name string) ([]*release.Release, error) {
return nil, errors.Errorf("release name is invalid: %s", name)
}
h.cfg.Log("getting history for release %s", name)
slog.Debug("getting history for release", "release", name)
return h.cfg.Releases.History(name)
}

@ -35,7 +35,7 @@ import (
)
// execHook executes all of the hooks for the given hook event.
func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, timeout time.Duration) error {
func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, waitStrategy kube.WaitStrategy, timeout time.Duration) error {
executingHooks := []*release.Hook{}
for _, h := range rl.Hooks {
@ -49,7 +49,7 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent,
// hooke are pre-ordered by kind, so keep order stable
sort.Stable(hookByWeight(executingHooks))
for _, h := range executingHooks {
for i, h := range executingHooks {
// Set default delete policy to before-hook-creation
if len(h.DeletePolicies) == 0 {
// TODO(jlegrone): Only apply before-hook-creation delete policy to run to completion
@ -59,7 +59,7 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent,
h.DeletePolicies = []release.HookDeletePolicy{release.HookBeforeHookCreation}
}
if err := cfg.deleteHookByPolicy(h, release.HookBeforeHookCreation, timeout); err != nil {
if err := cfg.deleteHookByPolicy(h, release.HookBeforeHookCreation, waitStrategy, timeout); err != nil {
return err
}
@ -87,8 +87,12 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent,
return errors.Wrapf(err, "warning: Hook %s %s failed", hook, h.Path)
}
waiter, err := cfg.KubeClient.GetWaiter(waitStrategy)
if err != nil {
return errors.Wrapf(err, "unable to get waiter")
}
// Watch hook resources until they have completed
err = cfg.KubeClient.WatchUntilReady(resources, timeout)
err = waiter.WatchUntilReady(resources, timeout)
// Note the time of success/failure
h.LastRun.CompletedAt = helmtime.Now()
// Mark hook as succeeded or failed
@ -101,10 +105,17 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent,
}
// If a hook is failed, check the annotation of the hook to determine whether the hook should be deleted
// under failed condition. If so, then clear the corresponding resource object in the hook
if errDeleting := cfg.deleteHookByPolicy(h, release.HookFailed, timeout); errDeleting != nil {
if errDeleting := cfg.deleteHookByPolicy(h, release.HookFailed, waitStrategy, timeout); errDeleting != nil {
// We log the error here as we want to propagate the hook failure upwards to the release object.
log.Printf("error deleting the hook resource on hook failure: %v", errDeleting)
}
// If a hook is failed, check the annotation of the previous successful hooks to determine whether the hooks
// should be deleted under succeeded condition.
if err := cfg.deleteHooksByPolicy(executingHooks[0:i], release.HookSucceeded, waitStrategy, timeout); err != nil {
return err
}
return err
}
h.LastRun.Phase = release.HookPhaseSucceeded
@ -118,7 +129,7 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent,
// We log here as we still want to attempt hook resource deletion even if output logging fails.
log.Printf("error outputting logs for hook failure: %v", err)
}
if err := cfg.deleteHookByPolicy(h, release.HookSucceeded, timeout); err != nil {
if err := cfg.deleteHookByPolicy(h, release.HookSucceeded, waitStrategy, timeout); err != nil {
return err
}
}
@ -139,7 +150,7 @@ func (x hookByWeight) Less(i, j int) bool {
}
// deleteHookByPolicy deletes a hook if the hook policy instructs it to
func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.HookDeletePolicy, timeout time.Duration) error {
func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.HookDeletePolicy, waitStrategy kube.WaitStrategy, timeout time.Duration) error {
// Never delete CustomResourceDefinitions; this could cause lots of
// cascading garbage collection.
if h.Kind == "CustomResourceDefinition" {
@ -155,16 +166,28 @@ func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.Hoo
return errors.New(joinErrors(errs))
}
// wait for resources until they are deleted to avoid conflicts
if kubeClient, ok := cfg.KubeClient.(kube.InterfaceExt); ok {
if err := kubeClient.WaitForDelete(resources, timeout); err != nil {
return err
}
waiter, err := cfg.KubeClient.GetWaiter(waitStrategy)
if err != nil {
return err
}
if err := waiter.WaitForDelete(resources, timeout); err != nil {
return err
}
}
return nil
}
// deleteHooksByPolicy deletes all hooks if the hook policy instructs it to
func (cfg *Configuration) deleteHooksByPolicy(hooks []*release.Hook, policy release.HookDeletePolicy, waitStrategy kube.WaitStrategy, timeout time.Duration) error {
for _, h := range hooks {
if err := cfg.deleteHookByPolicy(h, policy, waitStrategy, timeout); err != nil {
return err
}
}
return nil
}
// hookHasDeletePolicy determines whether the defined hook deletion policy matches the hook deletion polices
// supported by helm. If so, mark the hook as one should be deleted.
func hookHasDeletePolicy(h *release.Hook, policy release.HookDeletePolicy) bool {

@ -20,13 +20,22 @@ import (
"bytes"
"fmt"
"io"
"reflect"
"testing"
"time"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/cli-runtime/pkg/resource"
chart "helm.sh/helm/v4/pkg/chart/v2"
chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
"helm.sh/helm/v4/pkg/kube"
kubefake "helm.sh/helm/v4/pkg/kube/fake"
release "helm.sh/helm/v4/pkg/release/v1"
"helm.sh/helm/v4/pkg/storage"
"helm.sh/helm/v4/pkg/storage/driver"
)
func podManifestWithOutputLogs(hookDefinitions []release.HookOutputLogPolicy) string {
@ -206,3 +215,187 @@ func runInstallForHooksWithFailure(t *testing.T, manifest, expectedNamespace str
is.Equal(expectedOutput, outBuffer.String())
is.Equal(release.StatusFailed, res.Info.Status)
}
type HookFailedError struct{}
func (e *HookFailedError) Error() string {
return "Hook failed!"
}
type HookFailingKubeClient struct {
kubefake.PrintingKubeClient
failOn resource.Info
deleteRecord []resource.Info
}
type HookFailingKubeWaiter struct {
*kubefake.PrintingKubeWaiter
failOn resource.Info
}
func (*HookFailingKubeClient) Build(reader io.Reader, _ bool) (kube.ResourceList, error) {
configMap := &v1.ConfigMap{}
err := yaml.NewYAMLOrJSONDecoder(reader, 1000).Decode(configMap)
if err != nil {
return kube.ResourceList{}, err
}
return kube.ResourceList{{
Name: configMap.Name,
Namespace: configMap.Namespace,
}}, nil
}
func (h *HookFailingKubeWaiter) WatchUntilReady(resources kube.ResourceList, _ time.Duration) error {
for _, res := range resources {
if res.Name == h.failOn.Name && res.Namespace == h.failOn.Namespace {
return &HookFailedError{}
}
}
return nil
}
func (h *HookFailingKubeClient) Delete(resources kube.ResourceList) (*kube.Result, []error) {
for _, res := range resources {
h.deleteRecord = append(h.deleteRecord, resource.Info{
Name: res.Name,
Namespace: res.Namespace,
})
}
return h.PrintingKubeClient.Delete(resources)
}
func (h *HookFailingKubeClient) GetWaiter(strategy kube.WaitStrategy) (kube.Waiter, error) {
waiter, _ := h.PrintingKubeClient.GetWaiter(strategy)
return &HookFailingKubeWaiter{
PrintingKubeWaiter: waiter.(*kubefake.PrintingKubeWaiter),
failOn: h.failOn,
}, nil
}
func TestHooksCleanUp(t *testing.T) {
hookEvent := release.HookPreInstall
testCases := []struct {
name string
inputRelease release.Release
failOn resource.Info
expectedDeleteRecord []resource.Info
expectError bool
}{
{
"Deletion hook runs for previously successful hook on failure of a heavier weight hook",
release.Release{
Name: "test-release",
Namespace: "test",
Hooks: []*release.Hook{
{
Name: "hook-1",
Kind: "ConfigMap",
Path: "templates/service_account.yaml",
Manifest: `apiVersion: v1
kind: ConfigMap
metadata:
name: build-config-1
namespace: test
data:
foo: bar
`,
Weight: -5,
Events: []release.HookEvent{
hookEvent,
},
DeletePolicies: []release.HookDeletePolicy{
release.HookBeforeHookCreation,
release.HookSucceeded,
release.HookFailed,
},
LastRun: release.HookExecution{
Phase: release.HookPhaseSucceeded,
},
},
{
Name: "hook-2",
Kind: "ConfigMap",
Path: "templates/job.yaml",
Manifest: `apiVersion: v1
kind: ConfigMap
metadata:
name: build-config-2
namespace: test
data:
foo: bar
`,
Weight: 0,
Events: []release.HookEvent{
hookEvent,
},
DeletePolicies: []release.HookDeletePolicy{
release.HookBeforeHookCreation,
release.HookSucceeded,
release.HookFailed,
},
LastRun: release.HookExecution{
Phase: release.HookPhaseFailed,
},
},
},
}, resource.Info{
Name: "build-config-2",
Namespace: "test",
}, []resource.Info{
{
// This should be in the record for `before-hook-creation`
Name: "build-config-1",
Namespace: "test",
},
{
// This should be in the record for `before-hook-creation`
Name: "build-config-2",
Namespace: "test",
},
{
// This should be in the record for cleaning up (the failure first)
Name: "build-config-2",
Namespace: "test",
},
{
// This should be in the record for cleaning up (then the previously successful)
Name: "build-config-1",
Namespace: "test",
},
}, true,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
kubeClient := &HookFailingKubeClient{
kubefake.PrintingKubeClient{Out: io.Discard}, tc.failOn, []resource.Info{},
}
configuration := &Configuration{
Releases: storage.Init(driver.NewMemory()),
KubeClient: kubeClient,
Capabilities: chartutil.DefaultCapabilities,
}
err := configuration.execHook(&tc.inputRelease, hookEvent, kube.StatusWatcherStrategy, 600)
if !reflect.DeepEqual(kubeClient.deleteRecord, tc.expectedDeleteRecord) {
t.Fatalf("Got unexpected delete record, expected: %#v, but got: %#v", kubeClient.deleteRecord, tc.expectedDeleteRecord)
}
if err != nil && !tc.expectError {
t.Fatalf("Got an unexpected error.")
}
if err == nil && tc.expectError {
t.Fatalf("Expected and error but did not get it.")
}
})
}
}

@ -21,6 +21,7 @@ import (
"context"
"fmt"
"io"
"log/slog"
"net/url"
"os"
"path"
@ -79,7 +80,7 @@ type Install struct {
HideSecret bool
DisableHooks bool
Replace bool
Wait bool
WaitStrategy kube.WaitStrategy
WaitForJobs bool
Devel bool
DependencyUpdate bool
@ -142,19 +143,19 @@ func NewInstall(cfg *Configuration) *Install {
in := &Install{
cfg: cfg,
}
in.ChartPathOptions.registryClient = cfg.RegistryClient
in.registryClient = cfg.RegistryClient
return in
}
// SetRegistryClient sets the registry client for the install action
func (i *Install) SetRegistryClient(registryClient *registry.Client) {
i.ChartPathOptions.registryClient = registryClient
i.registryClient = registryClient
}
// GetRegistryClient get the registry client.
func (i *Install) GetRegistryClient() *registry.Client {
return i.ChartPathOptions.registryClient
return i.registryClient
}
func (i *Install) installCRDs(crds []chart.CRD) error {
@ -172,7 +173,7 @@ func (i *Install) installCRDs(crds []chart.CRD) error {
// If the error is CRD already exists, continue.
if apierrors.IsAlreadyExists(err) {
crdName := res[0].Name
i.cfg.Log("CRD %s is already present. Skipping.", crdName)
slog.Debug("CRD is already present. Skipping", "crd", crdName)
continue
}
return errors.Wrapf(err, "failed to install CRD %s", obj.Name)
@ -180,8 +181,12 @@ func (i *Install) installCRDs(crds []chart.CRD) error {
totalItems = append(totalItems, res...)
}
if len(totalItems) > 0 {
waiter, err := i.cfg.KubeClient.GetWaiter(i.WaitStrategy)
if err != nil {
return errors.Wrapf(err, "unable to get waiter")
}
// Give time for the CRD to be recognized.
if err := i.cfg.KubeClient.Wait(totalItems, 60*time.Second); err != nil {
if err := waiter.Wait(totalItems, 60*time.Second); err != nil {
return err
}
@ -196,7 +201,7 @@ func (i *Install) installCRDs(crds []chart.CRD) error {
return err
}
i.cfg.Log("Clearing discovery cache")
slog.Debug("clearing discovery cache")
discoveryClient.Invalidate()
_, _ = discoveryClient.ServerGroups()
@ -209,7 +214,7 @@ func (i *Install) installCRDs(crds []chart.CRD) error {
return err
}
if resettable, ok := restMapper.(meta.ResettableRESTMapper); ok {
i.cfg.Log("Clearing REST mapper cache")
slog.Debug("clearing REST mapper cache")
resettable.Reset()
}
}
@ -233,24 +238,24 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma
// Check reachability of cluster unless in client-only mode (e.g. `helm template` without `--validate`)
if !i.ClientOnly {
if err := i.cfg.KubeClient.IsReachable(); err != nil {
i.cfg.Log(fmt.Sprintf("ERROR: Cluster reachability check failed: %v", err))
slog.Error(fmt.Sprintf("cluster reachability check failed: %v", err))
return nil, errors.Wrap(err, "cluster reachability check failed")
}
}
// HideSecret must be used with dry run. Otherwise, return an error.
if !i.isDryRun() && i.HideSecret {
i.cfg.Log("ERROR: Hiding Kubernetes secrets requires a dry-run mode")
slog.Error("hiding Kubernetes secrets requires a dry-run mode")
return nil, errors.New("Hiding Kubernetes secrets requires a dry-run mode")
}
if err := i.availableName(); err != nil {
i.cfg.Log(fmt.Sprintf("ERROR: Release name check failed: %v", err))
slog.Error("release name check failed", slog.Any("error", err))
return nil, errors.Wrap(err, "release name check failed")
}
if err := chartutil.ProcessDependencies(chrt, vals); err != nil {
i.cfg.Log(fmt.Sprintf("ERROR: Processing chart dependencies failed: %v", err))
slog.Error("chart dependencies processing failed", slog.Any("error", err))
return nil, errors.Wrap(err, "chart dependencies processing failed")
}
@ -264,7 +269,7 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma
if crds := chrt.CRDObjects(); !i.ClientOnly && !i.SkipCRDs && len(crds) > 0 {
// On dry run, bail here
if i.isDryRun() {
i.cfg.Log("WARNING: This chart or one of its subcharts contains CRDs. Rendering may fail or contain inaccuracies.")
slog.Warn("This chart or one of its subcharts contains CRDs. Rendering may fail or contain inaccuracies.")
} else if err := i.installCRDs(crds); err != nil {
return nil, err
}
@ -284,12 +289,14 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma
mem.SetNamespace(i.Namespace)
i.cfg.Releases = storage.Init(mem)
} else if !i.ClientOnly && len(i.APIVersions) > 0 {
i.cfg.Log("API Version list given outside of client only mode, this list will be ignored")
slog.Debug("API Version list given outside of client only mode, this list will be ignored")
}
// Make sure if Atomic is set, that wait is set as well. This makes it so
// the user doesn't have to specify both
i.Wait = i.Wait || i.Atomic
if i.WaitStrategy == kube.HookOnlyStrategy && i.Atomic {
i.WaitStrategy = kube.StatusWatcherStrategy
}
caps, err := i.cfg.getCapabilities()
if err != nil {
@ -448,7 +455,7 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource
var err error
// pre-install hooks
if !i.DisableHooks {
if err := i.cfg.execHook(rel, release.HookPreInstall, i.Timeout); err != nil {
if err := i.cfg.execHook(rel, release.HookPreInstall, i.WaitStrategy, i.Timeout); err != nil {
return rel, fmt.Errorf("failed pre-install: %s", err)
}
}
@ -465,19 +472,22 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource
return rel, err
}
if i.Wait {
if i.WaitForJobs {
err = i.cfg.KubeClient.WaitWithJobs(resources, i.Timeout)
} else {
err = i.cfg.KubeClient.Wait(resources, i.Timeout)
}
if err != nil {
return rel, err
}
waiter, err := i.cfg.KubeClient.GetWaiter(i.WaitStrategy)
if err != nil {
return rel, fmt.Errorf("failed to get waiter: %w", err)
}
if i.WaitForJobs {
err = waiter.WaitWithJobs(resources, i.Timeout)
} else {
err = waiter.Wait(resources, i.Timeout)
}
if err != nil {
return rel, err
}
if !i.DisableHooks {
if err := i.cfg.execHook(rel, release.HookPostInstall, i.Timeout); err != nil {
if err := i.cfg.execHook(rel, release.HookPostInstall, i.WaitStrategy, i.Timeout); err != nil {
return rel, fmt.Errorf("failed post-install: %s", err)
}
}
@ -496,7 +506,7 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource
// One possible strategy would be to do a timed retry to see if we can get
// this stored in the future.
if err := i.recordRelease(rel); err != nil {
i.cfg.Log("failed to record the release: %s", err)
slog.Error("failed to record the release", slog.Any("error", err))
}
return rel, nil
@ -505,7 +515,7 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource
func (i *Install) failRelease(rel *release.Release, err error) (*release.Release, error) {
rel.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", i.ReleaseName, err.Error()))
if i.Atomic {
i.cfg.Log("Install failed and atomic is set, uninstalling release")
slog.Debug("install failed, uninstalling release", "release", i.ReleaseName)
uninstall := NewUninstall(i.cfg)
uninstall.DisableHooks = i.DisableHooks
uninstall.KeepHistory = false
@ -618,7 +628,7 @@ func writeToFile(outputDir string, name string, data string, appendData bool) er
defer f.Close()
_, err = f.WriteString(fmt.Sprintf("---\n# Source: %s\n%s\n", name, data))
_, err = fmt.Fprintf(f, "---\n# Source: %s\n%s\n", name, data)
if err != nil {
return err

@ -35,6 +35,7 @@ import (
"helm.sh/helm/v4/internal/test"
chart "helm.sh/helm/v4/pkg/chart/v2"
chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
"helm.sh/helm/v4/pkg/kube"
kubefake "helm.sh/helm/v4/pkg/kube/fake"
release "helm.sh/helm/v4/pkg/release/v1"
"helm.sh/helm/v4/pkg/storage/driver"
@ -411,7 +412,7 @@ func TestInstallRelease_Wait(t *testing.T) {
failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
failer.WaitError = fmt.Errorf("I timed out")
instAction.cfg.KubeClient = failer
instAction.Wait = true
instAction.WaitStrategy = kube.StatusWatcherStrategy
vals := map[string]interface{}{}
goroutines := runtime.NumGoroutine()
@ -430,7 +431,7 @@ func TestInstallRelease_Wait_Interrupted(t *testing.T) {
failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
failer.WaitDuration = 10 * time.Second
instAction.cfg.KubeClient = failer
instAction.Wait = true
instAction.WaitStrategy = kube.StatusWatcherStrategy
vals := map[string]interface{}{}
ctx, cancel := context.WithCancel(context.Background())
@ -453,7 +454,7 @@ func TestInstallRelease_WaitForJobs(t *testing.T) {
failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
failer.WaitError = fmt.Errorf("I timed out")
instAction.cfg.KubeClient = failer
instAction.Wait = true
instAction.WaitStrategy = kube.StatusWatcherStrategy
instAction.WaitForJobs = true
vals := map[string]interface{}{}
@ -520,6 +521,8 @@ func TestInstallRelease_Atomic_Interrupted(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
time.AfterFunc(time.Second, cancel)
goroutines := runtime.NumGoroutine()
res, err := instAction.RunWithContext(ctx, buildChart(), vals)
is.Error(err)
is.Contains(err.Error(), "context canceled")
@ -530,6 +533,9 @@ func TestInstallRelease_Atomic_Interrupted(t *testing.T) {
_, err = instAction.cfg.Releases.Get(res.Name, res.Version)
is.Error(err)
is.Equal(err, driver.ErrReleaseNotFound)
is.Equal(goroutines+1, runtime.NumGoroutine()) // installation goroutine still is in background
time.Sleep(10 * time.Second) // wait for goroutine to finish
is.Equal(goroutines, runtime.NumGoroutine())
}
func TestNameTemplate(t *testing.T) {

@ -39,6 +39,7 @@ type Package struct {
Key string
Keyring string
PassphraseFile string
cachedPassphrase []byte
Version string
AppVersion string
Destination string
@ -55,6 +56,10 @@ type Package struct {
InsecureSkipTLSverify bool
}
const (
passPhraseFileStdin = "-"
)
// NewPackage creates a new Package object with the given configuration.
func NewPackage() *Package {
return &Package{}
@ -128,7 +133,7 @@ func (p *Package) Clearsign(filename string) error {
passphraseFetcher := promptUser
if p.PassphraseFile != "" {
passphraseFetcher, err = passphraseFileFetcher(p.PassphraseFile, os.Stdin)
passphraseFetcher, err = p.passphraseFileFetcher(p.PassphraseFile, os.Stdin)
if err != nil {
return err
}
@ -156,25 +161,42 @@ func promptUser(name string) ([]byte, error) {
return pw, err
}
func passphraseFileFetcher(passphraseFile string, stdin *os.File) (provenance.PassphraseFetcher, error) {
file, err := openPassphraseFile(passphraseFile, stdin)
if err != nil {
return nil, err
}
defer file.Close()
func (p *Package) passphraseFileFetcher(passphraseFile string, stdin *os.File) (provenance.PassphraseFetcher, error) {
// When reading from stdin we cache the passphrase here. If we are
// packaging multiple charts, we reuse the cached passphrase. This
// allows giving the passphrase once on stdin without failing with
// complaints about stdin already being closed.
//
// An alternative to this would be to omit file.Close() for stdin
// below and require the user to provide the same passphrase once
// per chart on stdin, but that does not seem very user-friendly.
if p.cachedPassphrase == nil {
file, err := openPassphraseFile(passphraseFile, stdin)
if err != nil {
return nil, err
}
defer file.Close()
reader := bufio.NewReader(file)
passphrase, _, err := reader.ReadLine()
if err != nil {
return nil, err
reader := bufio.NewReader(file)
passphrase, _, err := reader.ReadLine()
if err != nil {
return nil, err
}
p.cachedPassphrase = passphrase
return func(_ string) ([]byte, error) {
return passphrase, nil
}, nil
}
return func(_ string) ([]byte, error) {
return passphrase, nil
return p.cachedPassphrase, nil
}, nil
}
func openPassphraseFile(passphraseFile string, stdin *os.File) (*os.File, error) {
if passphraseFile == "-" {
if passphraseFile == passPhraseFileStdin {
stat, err := stdin.Stat()
if err != nil {
return nil, err

@ -29,8 +29,9 @@ import (
func TestPassphraseFileFetcher(t *testing.T) {
secret := "secret"
directory := ensure.TempFile(t, "passphrase-file", []byte(secret))
testPkg := NewPackage()
fetcher, err := passphraseFileFetcher(path.Join(directory, "passphrase-file"), nil)
fetcher, err := testPkg.passphraseFileFetcher(path.Join(directory, "passphrase-file"), nil)
if err != nil {
t.Fatal("Unable to create passphraseFileFetcher", err)
}
@ -48,8 +49,9 @@ func TestPassphraseFileFetcher(t *testing.T) {
func TestPassphraseFileFetcher_WithLineBreak(t *testing.T) {
secret := "secret"
directory := ensure.TempFile(t, "passphrase-file", []byte(secret+"\n\n."))
testPkg := NewPackage()
fetcher, err := passphraseFileFetcher(path.Join(directory, "passphrase-file"), nil)
fetcher, err := testPkg.passphraseFileFetcher(path.Join(directory, "passphrase-file"), nil)
if err != nil {
t.Fatal("Unable to create passphraseFileFetcher", err)
}
@ -66,17 +68,48 @@ func TestPassphraseFileFetcher_WithLineBreak(t *testing.T) {
func TestPassphraseFileFetcher_WithInvalidStdin(t *testing.T) {
directory := t.TempDir()
testPkg := NewPackage()
stdin, err := os.CreateTemp(directory, "non-existing")
if err != nil {
t.Fatal("Unable to create test file", err)
}
if _, err := passphraseFileFetcher("-", stdin); err == nil {
if _, err := testPkg.passphraseFileFetcher("-", stdin); err == nil {
t.Error("Expected passphraseFileFetcher returning an error")
}
}
func TestPassphraseFileFetcher_WithStdinAndMultipleFetches(t *testing.T) {
testPkg := NewPackage()
stdin, w, err := os.Pipe()
if err != nil {
t.Fatal("Unable to create pipe", err)
}
passphrase := "secret-from-stdin"
go func() {
w.Write([]byte(passphrase + "\n"))
}()
for i := 0; i < 4; i++ {
fetcher, err := testPkg.passphraseFileFetcher("-", stdin)
if err != nil {
t.Errorf("Expected passphraseFileFetcher to not return an error, but got %v", err)
}
pass, err := fetcher("key")
if err != nil {
t.Errorf("Expected passphraseFileFetcher invocation to succeed, failed with %v", err)
}
if string(pass) != string(passphrase) {
t.Errorf("Expected multiple passphrase fetch to return %q, got %q", passphrase, pass)
}
}
}
func TestValidateVersion(t *testing.T) {
type args struct {
ver string

@ -28,6 +28,7 @@ import (
v1 "k8s.io/api/core/v1"
chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
"helm.sh/helm/v4/pkg/kube"
release "helm.sh/helm/v4/pkg/release/v1"
)
@ -96,7 +97,7 @@ func (r *ReleaseTesting) Run(name string) (*release.Release, error) {
rel.Hooks = executingHooks
}
if err := r.cfg.execHook(rel, release.HookTest, r.Timeout); err != nil {
if err := r.cfg.execHook(rel, release.HookTest, kube.StatusWatcherStrategy, r.Timeout); err != nil {
rel.Hooks = append(skippedHooks, rel.Hooks...)
r.cfg.Releases.Update(rel)
return rel, err

@ -19,12 +19,14 @@ package action
import (
"bytes"
"fmt"
"log/slog"
"strings"
"time"
"github.com/pkg/errors"
chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
"helm.sh/helm/v4/pkg/kube"
release "helm.sh/helm/v4/pkg/release/v1"
helmtime "helm.sh/helm/v4/pkg/time"
)
@ -37,7 +39,7 @@ type Rollback struct {
Version int
Timeout time.Duration
Wait bool
WaitStrategy kube.WaitStrategy
WaitForJobs bool
DisableHooks bool
DryRun bool
@ -62,26 +64,26 @@ func (r *Rollback) Run(name string) error {
r.cfg.Releases.MaxHistory = r.MaxHistory
r.cfg.Log("preparing rollback of %s", name)
slog.Debug("preparing rollback", "name", name)
currentRelease, targetRelease, err := r.prepareRollback(name)
if err != nil {
return err
}
if !r.DryRun {
r.cfg.Log("creating rolled back release for %s", name)
slog.Debug("creating rolled back release", "name", name)
if err := r.cfg.Releases.Create(targetRelease); err != nil {
return err
}
}
r.cfg.Log("performing rollback of %s", name)
slog.Debug("performing rollback", "name", name)
if _, err := r.performRollback(currentRelease, targetRelease); err != nil {
return err
}
if !r.DryRun {
r.cfg.Log("updating status for rolled back release for %s", name)
slog.Debug("updating status for rolled back release", "name", name)
if err := r.cfg.Releases.Update(targetRelease); err != nil {
return err
}
@ -128,7 +130,7 @@ func (r *Rollback) prepareRollback(name string) (*release.Release, *release.Rele
return nil, nil, errors.Errorf("release has no %d version", previousVersion)
}
r.cfg.Log("rolling back %s (current: v%d, target: v%d)", name, currentRelease.Version, previousVersion)
slog.Debug("rolling back", "name", name, "currentVersion", currentRelease.Version, "targetVersion", previousVersion)
previousRelease, err := r.cfg.Releases.Get(name, previousVersion)
if err != nil {
@ -161,7 +163,7 @@ func (r *Rollback) prepareRollback(name string) (*release.Release, *release.Rele
func (r *Rollback) performRollback(currentRelease, targetRelease *release.Release) (*release.Release, error) {
if r.DryRun {
r.cfg.Log("dry run for %s", targetRelease.Name)
slog.Debug("dry run", "name", targetRelease.Name)
return targetRelease, nil
}
@ -176,11 +178,11 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas
// pre-rollback hooks
if !r.DisableHooks {
if err := r.cfg.execHook(targetRelease, release.HookPreRollback, r.Timeout); err != nil {
if err := r.cfg.execHook(targetRelease, release.HookPreRollback, r.WaitStrategy, r.Timeout); err != nil {
return targetRelease, err
}
} else {
r.cfg.Log("rollback hooks disabled for %s", targetRelease.Name)
slog.Debug("rollback hooks disabled", "name", targetRelease.Name)
}
// It is safe to use "force" here because these are resources currently rendered by the chart.
@ -192,14 +194,14 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas
if err != nil {
msg := fmt.Sprintf("Rollback %q failed: %s", targetRelease.Name, err)
r.cfg.Log("warning: %s", msg)
slog.Warn(msg)
currentRelease.Info.Status = release.StatusSuperseded
targetRelease.Info.Status = release.StatusFailed
targetRelease.Info.Description = msg
r.cfg.recordRelease(currentRelease)
r.cfg.recordRelease(targetRelease)
if r.CleanupOnFail {
r.cfg.Log("Cleanup on fail set, cleaning up %d resources", len(results.Created))
slog.Debug("cleanup on fail set, cleaning up resources", "count", len(results.Created))
_, errs := r.cfg.KubeClient.Delete(results.Created)
if errs != nil {
var errorList []string
@ -208,7 +210,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas
}
return targetRelease, errors.Wrapf(fmt.Errorf("unable to cleanup resources: %s", strings.Join(errorList, ", ")), "an error occurred while cleaning up resources. original rollback error: %s", err)
}
r.cfg.Log("Resource cleanup complete")
slog.Debug("resource cleanup complete")
}
return targetRelease, err
}
@ -219,31 +221,32 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas
// levels, we should make these error level logs so users are notified
// that they'll need to go do the cleanup on their own
if err := recreate(r.cfg, results.Updated); err != nil {
r.cfg.Log(err.Error())
slog.Error(err.Error())
}
}
if r.Wait {
if r.WaitForJobs {
if err := r.cfg.KubeClient.WaitWithJobs(target, r.Timeout); err != nil {
targetRelease.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", targetRelease.Name, err.Error()))
r.cfg.recordRelease(currentRelease)
r.cfg.recordRelease(targetRelease)
return targetRelease, errors.Wrapf(err, "release %s failed", targetRelease.Name)
}
} else {
if err := r.cfg.KubeClient.Wait(target, r.Timeout); err != nil {
targetRelease.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", targetRelease.Name, err.Error()))
r.cfg.recordRelease(currentRelease)
r.cfg.recordRelease(targetRelease)
return targetRelease, errors.Wrapf(err, "release %s failed", targetRelease.Name)
}
waiter, err := r.cfg.KubeClient.GetWaiter(r.WaitStrategy)
if err != nil {
return nil, errors.Wrap(err, "unable to set metadata visitor from target release")
}
if r.WaitForJobs {
if err := waiter.WaitWithJobs(target, r.Timeout); err != nil {
targetRelease.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", targetRelease.Name, err.Error()))
r.cfg.recordRelease(currentRelease)
r.cfg.recordRelease(targetRelease)
return targetRelease, errors.Wrapf(err, "release %s failed", targetRelease.Name)
}
} else {
if err := waiter.Wait(target, r.Timeout); err != nil {
targetRelease.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", targetRelease.Name, err.Error()))
r.cfg.recordRelease(currentRelease)
r.cfg.recordRelease(targetRelease)
return targetRelease, errors.Wrapf(err, "release %s failed", targetRelease.Name)
}
}
// post-rollback hooks
if !r.DisableHooks {
if err := r.cfg.execHook(targetRelease, release.HookPostRollback, r.Timeout); err != nil {
if err := r.cfg.execHook(targetRelease, release.HookPostRollback, r.WaitStrategy, r.Timeout); err != nil {
return targetRelease, err
}
}
@ -254,7 +257,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas
}
// Supersede all previous deployments, see issue #2941.
for _, rel := range deployed {
r.cfg.Log("superseding previous deployment %d", rel.Version)
slog.Debug("superseding previous deployment", "version", rel.Version)
rel.Info.Status = release.StatusSuperseded
r.cfg.recordRelease(rel)
}

@ -69,14 +69,14 @@ func NewShow(output ShowOutputFormat, cfg *Configuration) *Show {
sh := &Show{
OutputFormat: output,
}
sh.ChartPathOptions.registryClient = cfg.RegistryClient
sh.registryClient = cfg.RegistryClient
return sh
}
// SetRegistryClient sets the registry client to use when pulling a chart from a registry.
func (s *Show) SetRegistryClient(client *registry.Client) {
s.ChartPathOptions.registryClient = client
s.registryClient = client
}
// Run executes 'helm show' against the given release.

@ -17,6 +17,7 @@ limitations under the License.
package action
import (
"log/slog"
"strings"
"time"
@ -41,7 +42,7 @@ type Uninstall struct {
DryRun bool
IgnoreNotFound bool
KeepHistory bool
Wait bool
WaitStrategy kube.WaitStrategy
DeletionPropagation string
Timeout time.Duration
Description string
@ -60,6 +61,11 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error)
return nil, err
}
waiter, err := u.cfg.KubeClient.GetWaiter(u.WaitStrategy)
if err != nil {
return nil, err
}
if u.DryRun {
// In the dry run case, just see if the release exists
r, err := u.cfg.releaseContent(name, 0)
@ -99,29 +105,29 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error)
return nil, errors.Errorf("the release named %q is already deleted", name)
}
u.cfg.Log("uninstall: Deleting %s", name)
slog.Debug("uninstall: deleting release", "name", name)
rel.Info.Status = release.StatusUninstalling
rel.Info.Deleted = helmtime.Now()
rel.Info.Description = "Deletion in progress (or silently failed)"
res := &release.UninstallReleaseResponse{Release: rel}
if !u.DisableHooks {
if err := u.cfg.execHook(rel, release.HookPreDelete, u.Timeout); err != nil {
if err := u.cfg.execHook(rel, release.HookPreDelete, u.WaitStrategy, u.Timeout); err != nil {
return res, err
}
} else {
u.cfg.Log("delete hooks disabled for %s", name)
slog.Debug("delete hooks disabled", "release", name)
}
// From here on out, the release is currently considered to be in StatusUninstalling
// state.
if err := u.cfg.Releases.Update(rel); err != nil {
u.cfg.Log("uninstall: Failed to store updated release: %s", err)
slog.Debug("uninstall: Failed to store updated release", slog.Any("error", err))
}
deletedResources, kept, errs := u.deleteRelease(rel)
if errs != nil {
u.cfg.Log("uninstall: Failed to delete release: %s", errs)
slog.Debug("uninstall: Failed to delete release", slog.Any("error", errs))
return nil, errors.Errorf("failed to delete release: %s", name)
}
@ -130,16 +136,12 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error)
}
res.Info = kept
if u.Wait {
if kubeClient, ok := u.cfg.KubeClient.(kube.InterfaceExt); ok {
if err := kubeClient.WaitForDelete(deletedResources, u.Timeout); err != nil {
errs = append(errs, err)
}
}
if err := waiter.WaitForDelete(deletedResources, u.Timeout); err != nil {
errs = append(errs, err)
}
if !u.DisableHooks {
if err := u.cfg.execHook(rel, release.HookPostDelete, u.Timeout); err != nil {
if err := u.cfg.execHook(rel, release.HookPostDelete, u.WaitStrategy, u.Timeout); err != nil {
errs = append(errs, err)
}
}
@ -152,7 +154,7 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error)
}
if !u.KeepHistory {
u.cfg.Log("purge requested for %s", name)
slog.Debug("purge requested", "release", name)
err := u.purgeReleases(rels...)
if err != nil {
errs = append(errs, errors.Wrap(err, "uninstall: Failed to purge the release"))
@ -167,7 +169,7 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error)
}
if err := u.cfg.Releases.Update(rel); err != nil {
u.cfg.Log("uninstall: Failed to store updated release: %s", err)
slog.Debug("uninstall: Failed to store updated release", slog.Any("error", err))
}
if len(errs) > 0 {
@ -224,7 +226,7 @@ func (u *Uninstall) deleteRelease(rel *release.Release) (kube.ResourceList, stri
}
if len(resources) > 0 {
if kubeClient, ok := u.cfg.KubeClient.(kube.InterfaceDeletionPropagation); ok {
_, errs = kubeClient.DeleteWithPropagationPolicy(resources, parseCascadingFlag(u.cfg, u.DeletionPropagation))
_, errs = kubeClient.DeleteWithPropagationPolicy(resources, parseCascadingFlag(u.DeletionPropagation))
return resources, kept, errs
}
_, errs = u.cfg.KubeClient.Delete(resources)
@ -232,7 +234,7 @@ func (u *Uninstall) deleteRelease(rel *release.Release) (kube.ResourceList, stri
return resources, kept, errs
}
func parseCascadingFlag(cfg *Configuration, cascadingFlag string) v1.DeletionPropagation {
func parseCascadingFlag(cascadingFlag string) v1.DeletionPropagation {
switch cascadingFlag {
case "orphan":
return v1.DeletePropagationOrphan
@ -241,7 +243,7 @@ func parseCascadingFlag(cfg *Configuration, cascadingFlag string) v1.DeletionPro
case "background":
return v1.DeletePropagationBackground
default:
cfg.Log("uninstall: given cascade value: %s, defaulting to delete propagation background", cascadingFlag)
slog.Debug("uninstall: given cascade value, defaulting to delete propagation background", "value", cascadingFlag)
return v1.DeletePropagationBackground
}
}

@ -22,6 +22,7 @@ import (
"github.com/stretchr/testify/assert"
"helm.sh/helm/v4/pkg/kube"
kubefake "helm.sh/helm/v4/pkg/kube/fake"
release "helm.sh/helm/v4/pkg/release/v1"
)
@ -82,7 +83,7 @@ func TestUninstallRelease_Wait(t *testing.T) {
unAction := uninstallAction(t)
unAction.DisableHooks = true
unAction.DryRun = false
unAction.Wait = true
unAction.WaitStrategy = kube.StatusWatcherStrategy
rel := releaseStub()
rel.Name = "come-fail-away"
@ -99,7 +100,7 @@ func TestUninstallRelease_Wait(t *testing.T) {
}`
unAction.cfg.Releases.Create(rel)
failer := unAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
failer.WaitError = fmt.Errorf("U timed out")
failer.WaitForDeleteError = fmt.Errorf("U timed out")
unAction.cfg.KubeClient = failer
res, err := unAction.Run(rel.Name)
is.Error(err)
@ -113,7 +114,7 @@ func TestUninstallRelease_Cascade(t *testing.T) {
unAction := uninstallAction(t)
unAction.DisableHooks = true
unAction.DryRun = false
unAction.Wait = false
unAction.WaitStrategy = kube.HookOnlyStrategy
unAction.DeletionPropagation = "foreground"
rel := releaseStub()

@ -20,6 +20,7 @@ import (
"bytes"
"context"
"fmt"
"log/slog"
"strings"
"sync"
"time"
@ -64,8 +65,8 @@ type Upgrade struct {
SkipCRDs bool
// Timeout is the timeout for this operation
Timeout time.Duration
// Wait determines whether the wait operation should be performed after the upgrade is requested.
Wait bool
// WaitStrategy determines what type of waiting should be done
WaitStrategy kube.WaitStrategy
// WaitForJobs determines whether the wait operation for the Jobs should be performed after the upgrade is requested.
WaitForJobs bool
// DisableHooks disables hook processing if set to true.
@ -131,14 +132,14 @@ func NewUpgrade(cfg *Configuration) *Upgrade {
up := &Upgrade{
cfg: cfg,
}
up.ChartPathOptions.registryClient = cfg.RegistryClient
up.registryClient = cfg.RegistryClient
return up
}
// SetRegistryClient sets the registry client to use when fetching charts.
func (u *Upgrade) SetRegistryClient(client *registry.Client) {
u.ChartPathOptions.registryClient = client
u.registryClient = client
}
// Run executes the upgrade on the given release.
@ -155,13 +156,15 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart.
// Make sure if Atomic is set, that wait is set as well. This makes it so
// the user doesn't have to specify both
u.Wait = u.Wait || u.Atomic
if u.WaitStrategy == kube.HookOnlyStrategy && u.Atomic {
u.WaitStrategy = kube.StatusWatcherStrategy
}
if err := chartutil.ValidateReleaseName(name); err != nil {
return nil, errors.Errorf("release name is invalid: %s", name)
}
u.cfg.Log("preparing upgrade for %s", name)
slog.Debug("preparing upgrade", "name", name)
currentRelease, upgradedRelease, err := u.prepareUpgrade(name, chart, vals)
if err != nil {
return nil, err
@ -169,7 +172,7 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart.
u.cfg.Releases.MaxHistory = u.MaxHistory
u.cfg.Log("performing update for %s", name)
slog.Debug("performing update", "name", name)
res, err := u.performUpgrade(ctx, currentRelease, upgradedRelease)
if err != nil {
return res, err
@ -177,7 +180,7 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart.
// Do not update for dry runs
if !u.isDryRun() {
u.cfg.Log("updating status for upgraded release for %s", name)
slog.Debug("updating status for upgraded release", "name", name)
if err := u.cfg.Releases.Update(upgradedRelease); err != nil {
return res, err
}
@ -363,7 +366,7 @@ func (u *Upgrade) performUpgrade(ctx context.Context, originalRelease, upgradedR
// Run if it is a dry run
if u.isDryRun() {
u.cfg.Log("dry run for %s", upgradedRelease.Name)
slog.Debug("dry run for release", "name", upgradedRelease.Name)
if len(u.Description) > 0 {
upgradedRelease.Info.Description = u.Description
} else {
@ -372,7 +375,7 @@ func (u *Upgrade) performUpgrade(ctx context.Context, originalRelease, upgradedR
return upgradedRelease, nil
}
u.cfg.Log("creating upgraded release for %s", upgradedRelease.Name)
slog.Debug("creating upgraded release", "name", upgradedRelease.Name)
if err := u.cfg.Releases.Create(upgradedRelease); err != nil {
return nil, err
}
@ -418,12 +421,12 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele
// pre-upgrade hooks
if !u.DisableHooks {
if err := u.cfg.execHook(upgradedRelease, release.HookPreUpgrade, u.Timeout); err != nil {
if err := u.cfg.execHook(upgradedRelease, release.HookPreUpgrade, u.WaitStrategy, u.Timeout); err != nil {
u.reportToPerformUpgrade(c, upgradedRelease, kube.ResourceList{}, fmt.Errorf("pre-upgrade hooks failed: %s", err))
return
}
} else {
u.cfg.Log("upgrade hooks disabled for %s", upgradedRelease.Name)
slog.Debug("upgrade hooks disabled", "name", upgradedRelease.Name)
}
results, err := u.cfg.KubeClient.Update(current, target, u.Force)
@ -439,32 +442,32 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele
// levels, we should make these error level logs so users are notified
// that they'll need to go do the cleanup on their own
if err := recreate(u.cfg, results.Updated); err != nil {
u.cfg.Log(err.Error())
slog.Error(err.Error())
}
}
if u.Wait {
u.cfg.Log(
"waiting for release %s resources (created: %d updated: %d deleted: %d)",
upgradedRelease.Name, len(results.Created), len(results.Updated), len(results.Deleted))
if u.WaitForJobs {
if err := u.cfg.KubeClient.WaitWithJobs(target, u.Timeout); err != nil {
u.cfg.recordRelease(originalRelease)
u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err)
return
}
} else {
if err := u.cfg.KubeClient.Wait(target, u.Timeout); err != nil {
u.cfg.recordRelease(originalRelease)
u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err)
return
}
waiter, err := u.cfg.KubeClient.GetWaiter(u.WaitStrategy)
if err != nil {
u.cfg.recordRelease(originalRelease)
u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err)
return
}
if u.WaitForJobs {
if err := waiter.WaitWithJobs(target, u.Timeout); err != nil {
u.cfg.recordRelease(originalRelease)
u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err)
return
}
} else {
if err := waiter.Wait(target, u.Timeout); err != nil {
u.cfg.recordRelease(originalRelease)
u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err)
return
}
}
// post-upgrade hooks
if !u.DisableHooks {
if err := u.cfg.execHook(upgradedRelease, release.HookPostUpgrade, u.Timeout); err != nil {
if err := u.cfg.execHook(upgradedRelease, release.HookPostUpgrade, u.WaitStrategy, u.Timeout); err != nil {
u.reportToPerformUpgrade(c, upgradedRelease, results.Created, fmt.Errorf("post-upgrade hooks failed: %s", err))
return
}
@ -484,13 +487,13 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele
func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, err error) (*release.Release, error) {
msg := fmt.Sprintf("Upgrade %q failed: %s", rel.Name, err)
u.cfg.Log("warning: %s", msg)
slog.Warn("upgrade failed", "name", rel.Name, slog.Any("error", err))
rel.Info.Status = release.StatusFailed
rel.Info.Description = msg
u.cfg.recordRelease(rel)
if u.CleanupOnFail && len(created) > 0 {
u.cfg.Log("Cleanup on fail set, cleaning up %d resources", len(created))
slog.Debug("cleanup on fail set", "cleaning_resources", len(created))
_, errs := u.cfg.KubeClient.Delete(created)
if errs != nil {
var errorList []string
@ -499,10 +502,10 @@ func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, e
}
return rel, errors.Wrapf(fmt.Errorf("unable to cleanup resources: %s", strings.Join(errorList, ", ")), "an error occurred while cleaning up resources. original upgrade error: %s", err)
}
u.cfg.Log("Resource cleanup complete")
slog.Debug("resource cleanup complete")
}
if u.Atomic {
u.cfg.Log("Upgrade failed and atomic is set, rolling back to last successful release")
slog.Debug("upgrade failed and atomic is set, rolling back to last successful release")
// As a protection, get the last successful release before rollback.
// If there are no successful releases, bail out
@ -526,7 +529,9 @@ func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, e
rollin := NewRollback(u.cfg)
rollin.Version = filteredHistory[0].Version
rollin.Wait = true
if u.WaitStrategy == kube.HookOnlyStrategy {
rollin.WaitStrategy = kube.StatusWatcherStrategy
}
rollin.WaitForJobs = u.WaitForJobs
rollin.DisableHooks = u.DisableHooks
rollin.Recreate = u.Recreate
@ -552,13 +557,13 @@ func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, e
func (u *Upgrade) reuseValues(chart *chart.Chart, current *release.Release, newVals map[string]interface{}) (map[string]interface{}, error) {
if u.ResetValues {
// If ResetValues is set, we completely ignore current.Config.
u.cfg.Log("resetting values to the chart's original version")
slog.Debug("resetting values to the chart's original version")
return newVals, nil
}
// If the ReuseValues flag is set, we always copy the old values over the new config's values.
if u.ReuseValues {
u.cfg.Log("reusing the old release's values")
slog.Debug("reusing the old release's values")
// We have to regenerate the old coalesced values:
oldVals, err := chartutil.CoalesceValues(current.Chart, current.Config)
@ -575,7 +580,7 @@ func (u *Upgrade) reuseValues(chart *chart.Chart, current *release.Release, newV
// If the ResetThenReuseValues flag is set, we use the new chart's values, but we copy the old config's values over the new config's values.
if u.ResetThenReuseValues {
u.cfg.Log("merging values from old release to new values")
slog.Debug("merging values from old release to new values")
newVals = chartutil.CoalesceTables(newVals, current.Config)
@ -583,7 +588,7 @@ func (u *Upgrade) reuseValues(chart *chart.Chart, current *release.Release, newV
}
if len(newVals) == 0 && len(current.Config) > 0 {
u.cfg.Log("copying values from %s (v%d) to new release.", current.Name, current.Version)
slog.Debug("copying values from old release", "name", current.Name, "version", current.Version)
newVals = current.Config
}
return newVals, nil

@ -24,6 +24,7 @@ import (
"time"
chart "helm.sh/helm/v4/pkg/chart/v2"
"helm.sh/helm/v4/pkg/kube"
"helm.sh/helm/v4/pkg/storage/driver"
"github.com/stretchr/testify/assert"
@ -52,7 +53,7 @@ func TestUpgradeRelease_Success(t *testing.T) {
rel.Info.Status = release.StatusDeployed
req.NoError(upAction.cfg.Releases.Create(rel))
upAction.Wait = true
upAction.WaitStrategy = kube.StatusWatcherStrategy
vals := map[string]interface{}{}
ctx, done := context.WithCancel(context.Background())
@ -82,7 +83,7 @@ func TestUpgradeRelease_Wait(t *testing.T) {
failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
failer.WaitError = fmt.Errorf("I timed out")
upAction.cfg.KubeClient = failer
upAction.Wait = true
upAction.WaitStrategy = kube.StatusWatcherStrategy
vals := map[string]interface{}{}
res, err := upAction.Run(rel.Name, buildChart(), vals)
@ -104,7 +105,7 @@ func TestUpgradeRelease_WaitForJobs(t *testing.T) {
failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
failer.WaitError = fmt.Errorf("I timed out")
upAction.cfg.KubeClient = failer
upAction.Wait = true
upAction.WaitStrategy = kube.StatusWatcherStrategy
upAction.WaitForJobs = true
vals := map[string]interface{}{}
@ -128,7 +129,7 @@ func TestUpgradeRelease_CleanupOnFail(t *testing.T) {
failer.WaitError = fmt.Errorf("I timed out")
failer.DeleteError = fmt.Errorf("I tried to delete nil")
upAction.cfg.KubeClient = failer
upAction.Wait = true
upAction.WaitStrategy = kube.StatusWatcherStrategy
upAction.CleanupOnFail = true
vals := map[string]interface{}{}
@ -395,7 +396,7 @@ func TestUpgradeRelease_Interrupted_Wait(t *testing.T) {
failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
failer.WaitDuration = 10 * time.Second
upAction.cfg.KubeClient = failer
upAction.Wait = true
upAction.WaitStrategy = kube.StatusWatcherStrategy
vals := map[string]interface{}{}
ctx := context.Background()

@ -33,6 +33,15 @@ import (
chart "helm.sh/helm/v4/pkg/chart/v2"
)
// MaxDecompressedChartSize is the maximum size of a chart archive that will be
// decompressed. This is the decompressed size of all the files.
// The default value is 100 MiB.
var MaxDecompressedChartSize int64 = 100 * 1024 * 1024 // Default 100 MiB
// MaxDecompressedFileSize is the size of the largest file that Helm will attempt to load.
// The size of the file is the decompressed version of it when it is stored in an archive.
var MaxDecompressedFileSize int64 = 5 * 1024 * 1024 // Default 5 MiB
var drivePathPattern = regexp.MustCompile(`^[a-zA-Z]:/`)
// FileLoader loads a chart from a file
@ -119,6 +128,7 @@ func LoadArchiveFiles(in io.Reader) ([]*BufferedFile, error) {
files := []*BufferedFile{}
tr := tar.NewReader(unzipped)
remainingSize := MaxDecompressedChartSize
for {
b := bytes.NewBuffer(nil)
hd, err := tr.Next()
@ -178,10 +188,30 @@ func LoadArchiveFiles(in io.Reader) ([]*BufferedFile, error) {
return nil, errors.New("chart yaml not in base directory")
}
if _, err := io.Copy(b, tr); err != nil {
if hd.Size > remainingSize {
return nil, fmt.Errorf("decompressed chart is larger than the maximum size %d", MaxDecompressedChartSize)
}
if hd.Size > MaxDecompressedFileSize {
return nil, fmt.Errorf("decompressed chart file %q is larger than the maximum file size %d", hd.Name, MaxDecompressedFileSize)
}
limitedReader := io.LimitReader(tr, remainingSize)
bytesWritten, err := io.Copy(b, limitedReader)
if err != nil {
return nil, err
}
remainingSize -= bytesWritten
// When the bytesWritten are less than the file size it means the limit reader ended
// copying early. Here we report that error. This is important if the last file extracted
// is the one that goes over the limit. It assumes the Size stored in the tar header
// is correct, something many applications do.
if bytesWritten < hd.Size || remainingSize <= 0 {
return nil, fmt.Errorf("decompressed chart is larger than the maximum size %d", MaxDecompressedChartSize)
}
data := bytes.TrimPrefix(b.Bytes(), utf8bom)
files = append(files, &BufferedFile{Name: n, Data: data})

@ -101,6 +101,10 @@ func LoadDir(dir string) (*chart.Chart, error) {
return fmt.Errorf("cannot load irregular file %s as it has file mode type bits set", name)
}
if fi.Size() > MaxDecompressedFileSize {
return fmt.Errorf("chart file %q is larger than the maximum file size %d", fi.Name(), MaxDecompressedFileSize)
}
data, err := os.ReadFile(name)
if err != nil {
return errors.Wrapf(err, "error reading %s", n)

@ -16,7 +16,7 @@ limitations under the License.
package util
import (
"log"
"log/slog"
"strings"
"github.com/mitchellh/copystructure"
@ -48,10 +48,10 @@ func processDependencyConditions(reqs []*chart.Dependency, cvals Values, cpath s
r.Enabled = bv
break
}
log.Printf("Warning: Condition path '%s' for chart %s returned non-bool value", c, r.Name)
slog.Warn("returned non-bool value", "path", c, "chart", r.Name)
} else if _, ok := err.(ErrNoValue); !ok {
// this is a real error
log.Printf("Warning: PathValue returned error %v", err)
slog.Warn("the method PathValue returned error", slog.Any("error", err))
}
}
}
@ -79,7 +79,7 @@ func processDependencyTags(reqs []*chart.Dependency, cvals Values) {
hasFalse = true
}
} else {
log.Printf("Warning: Tag '%s' for chart %s returned non-bool value", k, r.Name)
slog.Warn("returned non-bool value", "tag", k, "chart", r.Name)
}
}
}
@ -254,7 +254,7 @@ func processImportValues(c *chart.Chart, merge bool) error {
// get child table
vv, err := cvals.Table(r.Name + "." + child)
if err != nil {
log.Printf("Warning: ImportValues missing table from chart %s: %v", r.Name, err)
slog.Warn("ImportValues missing table from chart", "chart", r.Name, slog.Any("error", err))
continue
}
// create value map from child to be merged into parent
@ -271,7 +271,7 @@ func processImportValues(c *chart.Chart, merge bool) error {
})
vm, err := cvals.Table(r.Name + "." + child)
if err != nil {
log.Printf("Warning: ImportValues missing table: %v", err)
slog.Warn("ImportValues missing table", slog.Any("error", err))
continue
}
if merge {

@ -19,11 +19,11 @@ package util
import (
"bytes"
"fmt"
"log/slog"
"strings"
"github.com/pkg/errors"
"github.com/xeipuuv/gojsonschema"
"sigs.k8s.io/yaml"
"github.com/santhosh-tekuri/jsonschema/v6"
chart "helm.sh/helm/v4/pkg/chart/v2"
)
@ -32,13 +32,14 @@ import (
func ValidateAgainstSchema(chrt *chart.Chart, values map[string]interface{}) error {
var sb strings.Builder
if chrt.Schema != nil {
slog.Debug("chart name", "chart-name", chrt.Name())
err := ValidateAgainstSingleSchema(values, chrt.Schema)
if err != nil {
sb.WriteString(fmt.Sprintf("%s:\n", chrt.Name()))
sb.WriteString(err.Error())
}
}
slog.Debug("number of dependencies in the chart", "dependencies", len(chrt.Dependencies()))
// For each dependency, recursively call this function with the coalesced values
for _, subchart := range chrt.Dependencies() {
subchartValues := values[subchart.Name()].(map[string]interface{})
@ -62,32 +63,51 @@ func ValidateAgainstSingleSchema(values Values, schemaJSON []byte) (reterr error
}
}()
valuesData, err := yaml.Marshal(values)
// This unmarshal function leverages UseNumber() for number precision. The parser
// used for values does this as well.
schema, err := jsonschema.UnmarshalJSON(bytes.NewReader(schemaJSON))
if err != nil {
return err
}
valuesJSON, err := yaml.YAMLToJSON(valuesData)
slog.Debug("unmarshalled JSON schema", "schema", schemaJSON)
compiler := jsonschema.NewCompiler()
err = compiler.AddResource("file:///values.schema.json", schema)
if err != nil {
return err
}
if bytes.Equal(valuesJSON, []byte("null")) {
valuesJSON = []byte("{}")
}
schemaLoader := gojsonschema.NewBytesLoader(schemaJSON)
valuesLoader := gojsonschema.NewBytesLoader(valuesJSON)
result, err := gojsonschema.Validate(schemaLoader, valuesLoader)
validator, err := compiler.Compile("file:///values.schema.json")
if err != nil {
return err
}
if !result.Valid() {
var sb strings.Builder
for _, desc := range result.Errors() {
sb.WriteString(fmt.Sprintf("- %s\n", desc))
}
return errors.New(sb.String())
err = validator.Validate(values.AsMap())
if err != nil {
return JSONSchemaValidationError{err}
}
return nil
}
// Note, JSONSchemaValidationError is used to wrap the error from the underlying
// validation package so that Helm has a clean interface and the validation package
// could be replaced without changing the Helm SDK API.
// JSONSchemaValidationError is the error returned when there is a schema validation
// error.
type JSONSchemaValidationError struct {
embeddedErr error
}
// Error prints the error message
func (e JSONSchemaValidationError) Error() string {
errStr := e.embeddedErr.Error()
// This string prefixes all of our error details. Further up the stack of helm error message
// building more detail is provided to users. This is removed.
errStr = strings.TrimPrefix(errStr, "jsonschema validation failed with 'file:///values.schema.json#'\n")
// The extra new line is needed for when there are sub-charts.
return errStr + "\n"
}

@ -69,7 +69,7 @@ func TestValidateAgainstSingleSchemaNegative(t *testing.T) {
}
schema, err := os.ReadFile("./testdata/test-values.schema.json")
if err != nil {
t.Fatalf("Error reading YAML file: %s", err)
t.Fatalf("Error reading JSON file: %s", err)
}
var errString string
@ -79,8 +79,8 @@ func TestValidateAgainstSingleSchemaNegative(t *testing.T) {
errString = err.Error()
}
expectedErrString := `- (root): employmentInfo is required
- age: Must be greater than or equal to 0
expectedErrString := `- at '': missing property 'employmentInfo'
- at '/age': minimum: got -5, want 0
`
if errString != expectedErrString {
t.Errorf("Error string :\n`%s`\ndoes not match expected\n`%s`", errString, expectedErrString)
@ -104,6 +104,21 @@ const subchartSchema = `{
}
`
const subchartSchema2020 = `{
"$schema": "https://json-schema.org/draft/2020-12/schema",
"title": "Values",
"type": "object",
"properties": {
"data": {
"type": "array",
"contains": { "type": "string" },
"unevaluatedItems": { "type": "number" }
}
},
"required": ["data"]
}
`
func TestValidateAgainstSchema(t *testing.T) {
subchartJSON := []byte(subchartSchema)
subchart := &chart.Chart{
@ -159,7 +174,72 @@ func TestValidateAgainstSchemaNegative(t *testing.T) {
}
expectedErrString := `subchart:
- (root): age is required
- at '': missing property 'age'
`
if errString != expectedErrString {
t.Errorf("Error string :\n`%s`\ndoes not match expected\n`%s`", errString, expectedErrString)
}
}
func TestValidateAgainstSchema2020(t *testing.T) {
subchartJSON := []byte(subchartSchema2020)
subchart := &chart.Chart{
Metadata: &chart.Metadata{
Name: "subchart",
},
Schema: subchartJSON,
}
chrt := &chart.Chart{
Metadata: &chart.Metadata{
Name: "chrt",
},
}
chrt.AddDependency(subchart)
vals := map[string]interface{}{
"name": "John",
"subchart": map[string]interface{}{
"data": []any{"hello", 12},
},
}
if err := ValidateAgainstSchema(chrt, vals); err != nil {
t.Errorf("Error validating Values against Schema: %s", err)
}
}
func TestValidateAgainstSchema2020Negative(t *testing.T) {
subchartJSON := []byte(subchartSchema2020)
subchart := &chart.Chart{
Metadata: &chart.Metadata{
Name: "subchart",
},
Schema: subchartJSON,
}
chrt := &chart.Chart{
Metadata: &chart.Metadata{
Name: "chrt",
},
}
chrt.AddDependency(subchart)
vals := map[string]interface{}{
"name": "John",
"subchart": map[string]interface{}{
"data": []any{12},
},
}
var errString string
if err := ValidateAgainstSchema(chrt, vals); err == nil {
t.Fatalf("Expected an error, but got nil")
} else {
errString = err.Error()
}
expectedErrString := `subchart:
- at '/data': no items match contains schema
- at '/data/0': got number, want string
`
if errString != expectedErrString {
t.Errorf("Error string :\n`%s`\ndoes not match expected\n`%s`", errString, expectedErrString)

@ -130,8 +130,8 @@ func Save(c *chart.Chart, outDir string) (string, error) {
// Wrap in gzip writer
zipper := gzip.NewWriter(f)
zipper.Header.Extra = headerBytes
zipper.Header.Comment = "Helm"
zipper.Extra = headerBytes
zipper.Comment = "Helm"
// Wrap in tar writer
twriter := tar.NewWriter(zipper)

@ -86,7 +86,7 @@ func (o *docsOptions) run(_ io.Writer) error {
hdrFunc := func(filename string) string {
base := filepath.Base(filename)
name := strings.TrimSuffix(base, path.Ext(base))
title := cases.Title(language.Und, cases.NoLower).String(strings.Replace(name, "_", " ", -1))
title := cases.Title(language.Und, cases.NoLower).String(strings.ReplaceAll(name, "_", " "))
return fmt.Sprintf("---\ntitle: \"%s\"\n---\n\n", title)
}

@ -20,6 +20,7 @@ import (
"flag"
"fmt"
"log"
"log/slog"
"path/filepath"
"sort"
"strings"
@ -32,6 +33,7 @@ import (
"helm.sh/helm/v4/pkg/cli/output"
"helm.sh/helm/v4/pkg/cli/values"
"helm.sh/helm/v4/pkg/helmpath"
"helm.sh/helm/v4/pkg/kube"
"helm.sh/helm/v4/pkg/postrender"
"helm.sh/helm/v4/pkg/repo"
)
@ -51,6 +53,52 @@ func addValueOptionsFlags(f *pflag.FlagSet, v *values.Options) {
f.StringArrayVar(&v.LiteralValues, "set-literal", []string{}, "set a literal STRING value on the command line")
}
func AddWaitFlag(cmd *cobra.Command, wait *kube.WaitStrategy) {
cmd.Flags().Var(
newWaitValue(kube.HookOnlyStrategy, wait),
"wait",
"if specified, will wait until all resources are in the expected state before marking the operation as successful. It will wait for as long as --timeout. Valid inputs are 'watcher' and 'legacy'",
)
// Sets the strategy to use the watcher strategy if `--wait` is used without an argument
cmd.Flags().Lookup("wait").NoOptDefVal = string(kube.StatusWatcherStrategy)
}
type waitValue kube.WaitStrategy
func newWaitValue(defaultValue kube.WaitStrategy, ws *kube.WaitStrategy) *waitValue {
*ws = defaultValue
return (*waitValue)(ws)
}
func (ws *waitValue) String() string {
if ws == nil {
return ""
}
return string(*ws)
}
func (ws *waitValue) Set(s string) error {
switch s {
case string(kube.StatusWatcherStrategy), string(kube.LegacyStrategy):
*ws = waitValue(s)
return nil
case "true":
slog.Warn("--wait=true is deprecated (boolean value) and can be replaced with --wait=watcher")
*ws = waitValue(kube.StatusWatcherStrategy)
return nil
case "false":
slog.Warn("--wait=false is deprecated (boolean value) and can be replaced by omitting the --wait flag")
*ws = waitValue(kube.HookOnlyStrategy)
return nil
default:
return fmt.Errorf("invalid wait input %q. Valid inputs are %s, and %s", s, kube.StatusWatcherStrategy, kube.LegacyStrategy)
}
}
func (ws *waitValue) Type() string {
return "WaitStrategy"
}
func addChartPathOptionsFlags(f *pflag.FlagSet, c *action.ChartPathOptions) {
f.StringVar(&c.Version, "version", "", "specify a version constraint for the chart version to use. This constraint can be a specific tag (e.g. 1.1.1) or it may reference a valid range (e.g. ^2.0.0). If this is not specified, the latest version is used")
f.BoolVar(&c.Verify, "verify", false, "verify the package before using it")
@ -212,7 +260,7 @@ func compVersionFlag(chartRef string, _ string) ([]string, cobra.ShellCompDirect
var versions []string
if indexFile, err := repo.LoadIndexFile(path); err == nil {
for _, details := range indexFile.Entries[chartName] {
appVersion := details.Metadata.AppVersion
appVersion := details.AppVersion
appVersionDesc := ""
if appVersion != "" {
appVersionDesc = fmt.Sprintf("App: %s, ", appVersion)
@ -223,10 +271,10 @@ func compVersionFlag(chartRef string, _ string) ([]string, cobra.ShellCompDirect
createdDesc = fmt.Sprintf("Created: %s ", created)
}
deprecated := ""
if details.Metadata.Deprecated {
if details.Deprecated {
deprecated = "(deprecated)"
}
versions = append(versions, fmt.Sprintf("%s\t%s%s%s", details.Metadata.Version, appVersionDesc, createdDesc, deprecated))
versions = append(versions, fmt.Sprintf("%s\t%s%s%s", details.Version, appVersionDesc, createdDesc, deprecated))
}
}

@ -92,7 +92,6 @@ func executeActionCommandStdinC(store *storage.Storage, in *os.File, cmd string)
Releases: store,
KubeClient: &kubefake.PrintingKubeClient{Out: io.Discard},
Capabilities: chartutil.DefaultCapabilities,
Log: func(_ string, _ ...interface{}) {},
}
root, err := newRootCmdWithConfig(actionConfig, buf, args)

@ -21,6 +21,7 @@ import (
"fmt"
"io"
"log"
"log/slog"
"os"
"os/signal"
"syscall"
@ -194,7 +195,6 @@ func addInstallFlags(cmd *cobra.Command, f *pflag.FlagSet, client *action.Instal
f.BoolVar(&client.DisableHooks, "no-hooks", false, "prevent hooks from running during install")
f.BoolVar(&client.Replace, "replace", false, "reuse the given name, only if that name is a deleted release which remains in the history. This is unsafe in production")
f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)")
f.BoolVar(&client.Wait, "wait", false, "if set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment, StatefulSet, or ReplicaSet are in a ready state before marking the release as successful. It will wait for as long as --timeout")
f.BoolVar(&client.WaitForJobs, "wait-for-jobs", false, "if set and --wait enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as --timeout")
f.BoolVarP(&client.GenerateName, "generate-name", "g", false, "generate the name (and omit the NAME parameter)")
f.StringVar(&client.NameTemplate, "name-template", "", "specify template used to name the release")
@ -202,7 +202,7 @@ func addInstallFlags(cmd *cobra.Command, f *pflag.FlagSet, client *action.Instal
f.BoolVar(&client.Devel, "devel", false, "use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored")
f.BoolVar(&client.DependencyUpdate, "dependency-update", false, "update dependencies if they are missing before installing the chart")
f.BoolVar(&client.DisableOpenAPIValidation, "disable-openapi-validation", false, "if set, the installation process will not validate rendered templates against the Kubernetes OpenAPI Schema")
f.BoolVar(&client.Atomic, "atomic", false, "if set, the installation process deletes the installation on failure. The --wait flag will be set automatically if --atomic is used")
f.BoolVar(&client.Atomic, "atomic", false, "if set, the installation process deletes the installation on failure. The --wait flag will be set automatically to \"watcher\" if --atomic is used")
f.BoolVar(&client.SkipCRDs, "skip-crds", false, "if set, no CRDs will be installed. By default, CRDs are installed if not already present")
f.BoolVar(&client.SubNotes, "render-subchart-notes", false, "if set, render subchart notes along with the parent")
f.BoolVar(&client.SkipSchemaValidation, "skip-schema-validation", false, "if set, disables JSON schema validation")
@ -212,6 +212,7 @@ func addInstallFlags(cmd *cobra.Command, f *pflag.FlagSet, client *action.Instal
f.BoolVar(&client.TakeOwnership, "take-ownership", false, "if set, install will ignore the check for helm annotations and take ownership of the existing resources")
addValueOptionsFlags(f, valueOpts)
addChartPathOptionsFlags(f, &client.ChartPathOptions)
AddWaitFlag(cmd, &client.WaitStrategy)
err := cmd.RegisterFlagCompletionFunc("version", func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
requiredArgs := 2
@ -229,9 +230,9 @@ func addInstallFlags(cmd *cobra.Command, f *pflag.FlagSet, client *action.Instal
}
func runInstall(args []string, client *action.Install, valueOpts *values.Options, out io.Writer) (*release.Release, error) {
Debug("Original chart version: %q", client.Version)
slog.Debug("Original chart version", "version", client.Version)
if client.Version == "" && client.Devel {
Debug("setting version to >0.0.0-0")
slog.Debug("setting version to >0.0.0-0")
client.Version = ">0.0.0-0"
}
@ -241,12 +242,12 @@ func runInstall(args []string, client *action.Install, valueOpts *values.Options
}
client.ReleaseName = name
cp, err := client.ChartPathOptions.LocateChart(chart, settings)
cp, err := client.LocateChart(chart, settings)
if err != nil {
return nil, err
}
Debug("CHART PATH: %s\n", cp)
slog.Debug("Chart path", "path", cp)
p := getter.All(settings)
vals, err := valueOpts.MergeValues(p)
@ -265,7 +266,7 @@ func runInstall(args []string, client *action.Install, valueOpts *values.Options
}
if chartRequested.Metadata.Deprecated {
Warning("This chart is deprecated")
slog.Warn("this chart is deprecated")
}
if req := chartRequested.Metadata.Dependencies; req != nil {
@ -278,7 +279,7 @@ func runInstall(args []string, client *action.Install, valueOpts *values.Options
man := &downloader.Manager{
Out: out,
ChartPath: cp,
Keyring: client.ChartPathOptions.Keyring,
Keyring: client.Keyring,
SkipUpdate: false,
Getters: p,
RepositoryConfig: settings.RepositoryConfig,

@ -71,7 +71,7 @@ func newListCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
ValidArgsFunction: noMoreArgsCompFunc,
RunE: func(cmd *cobra.Command, _ []string) error {
if client.AllNamespaces {
if err := cfg.Init(settings.RESTClientGetter(), "", os.Getenv("HELM_DRIVER"), Debug); err != nil {
if err := cfg.Init(settings.RESTClientGetter(), "", os.Getenv("HELM_DRIVER")); err != nil {
return err
}
}

@ -17,6 +17,7 @@ package cmd
import (
"io"
"log/slog"
"os"
"os/exec"
@ -66,7 +67,7 @@ func runHook(p *plugin.Plugin, event string) error {
prog := exec.Command(main, argv...)
Debug("running %s hook: %s", event, prog)
slog.Debug("running hook", "event", event, "program", prog)
prog.Stdout, prog.Stderr = os.Stdout, os.Stderr
if err := prog.Run(); err != nil {

@ -18,6 +18,7 @@ package cmd
import (
"fmt"
"io"
"log/slog"
"github.com/pkg/errors"
"github.com/spf13/cobra"
@ -79,7 +80,7 @@ func (o *pluginInstallOptions) run(out io.Writer) error {
return err
}
Debug("loading plugin from %s", i.Path())
slog.Debug("loading plugin", "path", i.Path())
p, err := plugin.LoadDir(i.Path())
if err != nil {
return errors.Wrap(err, "plugin is installed but unusable")

@ -18,6 +18,7 @@ package cmd
import (
"fmt"
"io"
"log/slog"
"github.com/gosuri/uitable"
"github.com/spf13/cobra"
@ -32,7 +33,7 @@ func newPluginListCmd(out io.Writer) *cobra.Command {
Short: "list installed Helm plugins",
ValidArgsFunction: noMoreArgsCompFunc,
RunE: func(_ *cobra.Command, _ []string) error {
Debug("pluginDirs: %s", settings.PluginsDirectory)
slog.Debug("pluginDirs", "directory", settings.PluginsDirectory)
plugins, err := plugin.FindPlugins(settings.PluginsDirectory)
if err != nil {
return err

@ -18,6 +18,7 @@ package cmd
import (
"fmt"
"io"
"log/slog"
"os"
"strings"
@ -60,7 +61,7 @@ func (o *pluginUninstallOptions) complete(args []string) error {
}
func (o *pluginUninstallOptions) run(out io.Writer) error {
Debug("loading installed plugins from %s", settings.PluginsDirectory)
slog.Debug("loading installer plugins", "dir", settings.PluginsDirectory)
plugins, err := plugin.FindPlugins(settings.PluginsDirectory)
if err != nil {
return err

@ -18,6 +18,7 @@ package cmd
import (
"fmt"
"io"
"log/slog"
"path/filepath"
"strings"
@ -62,7 +63,7 @@ func (o *pluginUpdateOptions) complete(args []string) error {
func (o *pluginUpdateOptions) run(out io.Writer) error {
installer.Debug = settings.Debug
Debug("loading installed plugins from %s", settings.PluginsDirectory)
slog.Debug("loading installed plugins", "path", settings.PluginsDirectory)
plugins, err := plugin.FindPlugins(settings.PluginsDirectory)
if err != nil {
return err
@ -104,7 +105,7 @@ func updatePlugin(p *plugin.Plugin) error {
return err
}
Debug("loading plugin from %s", i.Path())
slog.Debug("loading plugin", "path", i.Path())
updatedPlugin, err := plugin.LoadDir(i.Path())
if err != nil {
return err

@ -20,6 +20,7 @@ import (
"fmt"
"io"
"log"
"log/slog"
"github.com/spf13/cobra"
@ -60,7 +61,7 @@ func newPullCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
RunE: func(_ *cobra.Command, args []string) error {
client.Settings = settings
if client.Version == "" && client.Devel {
Debug("setting version to >0.0.0-0")
slog.Debug("setting version to >0.0.0-0")
client.Version = ">0.0.0-0"
}

@ -21,6 +21,7 @@ import (
"errors"
"fmt"
"io"
"log/slog"
"os"
"strings"
@ -122,7 +123,7 @@ func getUsernamePassword(usernameOpt string, passwordOpt string, passwordFromStd
}
}
} else {
Warning("Using --password via the CLI is insecure. Use --password-stdin.")
slog.Warn("using --password via the CLI is insecure. Use --password-stdin")
}
return username, password, nil

@ -39,7 +39,7 @@ func newRepoListCmd(out io.Writer) *cobra.Command {
ValidArgsFunction: noMoreArgsCompFunc,
RunE: func(_ *cobra.Command, _ []string) error {
f, _ := repo.LoadFile(settings.RepositoryConfig)
if len(f.Repositories) == 0 && !(outfmt == output.JSON || outfmt == output.YAML) {
if len(f.Repositories) == 0 && outfmt != output.JSON && outfmt != output.YAML {
return errors.New("no repositories to show")
}

@ -42,11 +42,10 @@ To update all the repositories, use 'helm repo update'.
var errNoRepositories = errors.New("no repositories found. You must add one before updating")
type repoUpdateOptions struct {
update func([]*repo.ChartRepository, io.Writer, bool) error
repoFile string
repoCache string
names []string
failOnRepoUpdateFail bool
update func([]*repo.ChartRepository, io.Writer) error
repoFile string
repoCache string
names []string
}
func newRepoUpdateCmd(out io.Writer) *cobra.Command {
@ -69,12 +68,6 @@ func newRepoUpdateCmd(out io.Writer) *cobra.Command {
},
}
f := cmd.Flags()
// Adding this flag for Helm 3 as stop gap functionality for https://github.com/helm/helm/issues/10016.
// This should be deprecated in Helm 4 by update to the behaviour of `helm repo update` command.
f.BoolVar(&o.failOnRepoUpdateFail, "fail-on-repo-update-fail", false, "update fails if any of the repository updates fail")
return cmd
}
@ -112,29 +105,39 @@ func (o *repoUpdateOptions) run(out io.Writer) error {
}
}
return o.update(repos, out, o.failOnRepoUpdateFail)
return o.update(repos, out)
}
func updateCharts(repos []*repo.ChartRepository, out io.Writer, failOnRepoUpdateFail bool) error {
func updateCharts(repos []*repo.ChartRepository, out io.Writer) error {
fmt.Fprintln(out, "Hang tight while we grab the latest from your chart repositories...")
var wg sync.WaitGroup
var repoFailList []string
failRepoURLChan := make(chan string, len(repos))
for _, re := range repos {
wg.Add(1)
go func(re *repo.ChartRepository) {
defer wg.Done()
if _, err := re.DownloadIndexFile(); err != nil {
fmt.Fprintf(out, "...Unable to get an update from the %q chart repository (%s):\n\t%s\n", re.Config.Name, re.Config.URL, err)
repoFailList = append(repoFailList, re.Config.URL)
failRepoURLChan <- re.Config.URL
} else {
fmt.Fprintf(out, "...Successfully got an update from the %q chart repository\n", re.Config.Name)
}
}(re)
}
wg.Wait()
if len(repoFailList) > 0 && failOnRepoUpdateFail {
return fmt.Errorf("Failed to update the following repositories: %s",
go func() {
wg.Wait()
close(failRepoURLChan)
}()
var repoFailList []string
for url := range failRepoURLChan {
repoFailList = append(repoFailList, url)
}
if len(repoFailList) > 0 {
return fmt.Errorf("failed to update the following repositories: %s",
repoFailList)
}

@ -34,7 +34,7 @@ func TestUpdateCmd(t *testing.T) {
var out bytes.Buffer
// Instead of using the HTTP updater, we provide our own for this test.
// The TestUpdateCharts test verifies the HTTP behavior independently.
updater := func(repos []*repo.ChartRepository, out io.Writer, _ bool) error {
updater := func(repos []*repo.ChartRepository, out io.Writer) error {
for _, re := range repos {
fmt.Fprintln(out, re.Config.Name)
}
@ -59,7 +59,7 @@ func TestUpdateCmdMultiple(t *testing.T) {
var out bytes.Buffer
// Instead of using the HTTP updater, we provide our own for this test.
// The TestUpdateCharts test verifies the HTTP behavior independently.
updater := func(repos []*repo.ChartRepository, out io.Writer, _ bool) error {
updater := func(repos []*repo.ChartRepository, out io.Writer) error {
for _, re := range repos {
fmt.Fprintln(out, re.Config.Name)
}
@ -85,7 +85,7 @@ func TestUpdateCmdInvalid(t *testing.T) {
var out bytes.Buffer
// Instead of using the HTTP updater, we provide our own for this test.
// The TestUpdateCharts test verifies the HTTP behavior independently.
updater := func(repos []*repo.ChartRepository, out io.Writer, _ bool) error {
updater := func(repos []*repo.ChartRepository, out io.Writer) error {
for _, re := range repos {
fmt.Fprintln(out, re.Config.Name)
}
@ -145,7 +145,7 @@ func TestUpdateCharts(t *testing.T) {
}
b := bytes.NewBuffer(nil)
updateCharts([]*repo.ChartRepository{r}, b, false)
updateCharts([]*repo.ChartRepository{r}, b)
got := b.String()
if strings.Contains(got, "Unable to get an update") {
@ -161,7 +161,7 @@ func TestRepoUpdateFileCompletion(t *testing.T) {
checkFileCompletion(t, "repo update repo1", false)
}
func TestUpdateChartsFail(t *testing.T) {
func TestUpdateChartsFailWithError(t *testing.T) {
defer resetEnv()()
ensure.HelmHome(t)
@ -172,40 +172,14 @@ func TestUpdateChartsFail(t *testing.T) {
defer ts.Stop()
var invalidURL = ts.URL() + "55"
r, err := repo.NewChartRepository(&repo.Entry{
r1, err := repo.NewChartRepository(&repo.Entry{
Name: "charts",
URL: invalidURL,
}, getter.All(settings))
if err != nil {
t.Error(err)
}
b := bytes.NewBuffer(nil)
if err := updateCharts([]*repo.ChartRepository{r}, b, false); err != nil {
t.Error("Repo update should not return error if update of repository fails")
}
got := b.String()
if !strings.Contains(got, "Unable to get an update") {
t.Errorf("Repo should have failed update but instead got: %q", got)
}
if !strings.Contains(got, "Update Complete.") {
t.Error("Update was not successful")
}
}
func TestUpdateChartsFailWithError(t *testing.T) {
defer resetEnv()()
ensure.HelmHome(t)
ts := repotest.NewTempServer(
t,
repotest.WithChartSourceGlob("testdata/testserver/*.*"),
)
defer ts.Stop()
var invalidURL = ts.URL() + "55"
r, err := repo.NewChartRepository(&repo.Entry{
r2, err := repo.NewChartRepository(&repo.Entry{
Name: "charts",
URL: invalidURL,
}, getter.All(settings))
@ -214,12 +188,12 @@ func TestUpdateChartsFailWithError(t *testing.T) {
}
b := bytes.NewBuffer(nil)
err = updateCharts([]*repo.ChartRepository{r}, b, true)
err = updateCharts([]*repo.ChartRepository{r1, r2}, b)
if err == nil {
t.Error("Repo update should return error because update of repository fails and 'fail-on-repo-update-fail' flag set")
return
}
var expectedErr = "Failed to update the following repositories"
var expectedErr = "failed to update the following repositories"
var receivedErr = err.Error()
if !strings.Contains(receivedErr, expectedErr) {
t.Errorf("Expected error (%s) but got (%s) instead", expectedErr, receivedErr)

@ -81,10 +81,10 @@ func newRollbackCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
f.BoolVar(&client.Force, "force", false, "force resource update through delete/recreate if needed")
f.BoolVar(&client.DisableHooks, "no-hooks", false, "prevent hooks from running during rollback")
f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)")
f.BoolVar(&client.Wait, "wait", false, "if set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment, StatefulSet, or ReplicaSet are in a ready state before marking the release as successful. It will wait for as long as --timeout")
f.BoolVar(&client.WaitForJobs, "wait-for-jobs", false, "if set and --wait enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as --timeout")
f.BoolVar(&client.CleanupOnFail, "cleanup-on-fail", false, "allow deletion of new resources created in this rollback when rollback fails")
f.IntVar(&client.MaxHistory, "history-max", settings.MaxHistory, "limit the maximum number of revisions saved per release. Use 0 for no limit")
AddWaitFlag(cmd, &client.WaitStrategy)
return cmd
}

@ -21,6 +21,7 @@ import (
"fmt"
"io"
"log"
"log/slog"
"net/http"
"os"
"strings"
@ -31,6 +32,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/clientcmd"
"helm.sh/helm/v4/internal/logging"
"helm.sh/helm/v4/internal/tlsutil"
"helm.sh/helm/v4/pkg/action"
"helm.sh/helm/v4/pkg/cli"
@ -96,16 +98,6 @@ By default, the default directories depend on the Operating System. The defaults
var settings = cli.New()
func Debug(format string, v ...interface{}) {
if settings.Debug {
log.Output(2, fmt.Sprintf("[debug] "+format+"\n", v...))
}
}
func Warning(format string, v ...interface{}) {
fmt.Fprintf(os.Stderr, "WARNING: "+format+"\n", v...)
}
func NewRootCmd(out io.Writer, args []string) (*cobra.Command, error) {
actionConfig := new(action.Configuration)
cmd, err := newRootCmdWithConfig(actionConfig, out, args)
@ -114,7 +106,7 @@ func NewRootCmd(out io.Writer, args []string) (*cobra.Command, error) {
}
cobra.OnInitialize(func() {
helmDriver := os.Getenv("HELM_DRIVER")
if err := actionConfig.Init(settings.RESTClientGetter(), settings.Namespace(), helmDriver, Debug); err != nil {
if err := actionConfig.Init(settings.RESTClientGetter(), settings.Namespace(), helmDriver); err != nil {
log.Fatal(err)
}
if helmDriver == "memory" {
@ -148,6 +140,9 @@ func newRootCmdWithConfig(actionConfig *action.Configuration, out io.Writer, arg
settings.AddFlags(flags)
addKlogFlags(flags)
logger := logging.NewLogger(func() bool { return settings.Debug })
slog.SetDefault(logger)
// Setup shell completion for the namespace flag
err := cmd.RegisterFlagCompletionFunc("namespace", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
if client, err := actionConfig.KubernetesClientSet(); err == nil {

@ -19,6 +19,7 @@ package cmd
import (
"fmt"
"io"
"log/slog"
"strings"
"github.com/gosuri/uitable"
@ -89,7 +90,7 @@ func (o *searchHubOptions) run(out io.Writer, args []string) error {
q := strings.Join(args, " ")
results, err := c.Search(q)
if err != nil {
Debug("%s", err)
slog.Debug("search failed", slog.Any("error", err))
return fmt.Errorf("unable to perform search against %q", o.searchEndpoint)
}

@ -21,6 +21,7 @@ import (
"bytes"
"fmt"
"io"
"log/slog"
"os"
"path/filepath"
"strings"
@ -130,17 +131,17 @@ func (o *searchRepoOptions) run(out io.Writer, args []string) error {
}
func (o *searchRepoOptions) setupSearchedVersion() {
Debug("Original chart version: %q", o.version)
slog.Debug("original chart version", "version", o.version)
if o.version != "" {
return
}
if o.devel { // search for releases and prereleases (alpha, beta, and release candidate releases).
Debug("setting version to >0.0.0-0")
slog.Debug("setting version to >0.0.0-0")
o.version = ">0.0.0-0"
} else { // search only for stable releases, prerelease versions will be skipped
Debug("setting version to >0.0.0")
slog.Debug("setting version to >0.0.0")
o.version = ">0.0.0"
}
}
@ -189,8 +190,7 @@ func (o *searchRepoOptions) buildIndex() (*search.Index, error) {
f := filepath.Join(o.repoCacheDir, helmpath.CacheIndexFile(n))
ind, err := repo.LoadIndexFile(f)
if err != nil {
Warning("Repo %q is corrupt or missing. Try 'helm repo update'.", n)
Warning("%s", err)
slog.Warn("repo is corrupt or missing", "repo", n, slog.Any("error", err))
continue
}

@ -20,6 +20,7 @@ import (
"fmt"
"io"
"log"
"log/slog"
"github.com/spf13/cobra"
@ -211,13 +212,13 @@ func addShowFlags(subCmd *cobra.Command, client *action.Show) {
}
func runShow(args []string, client *action.Show) (string, error) {
Debug("Original chart version: %q", client.Version)
slog.Debug("original chart version", "version", client.Version)
if client.Version == "" && client.Devel {
Debug("setting version to >0.0.0-0")
slog.Debug("setting version to >0.0.0-0")
client.Version = ">0.0.0-0"
}
cp, err := client.ChartPathOptions.LocateChart(args[0], settings)
cp, err := client.LocateChart(args[0], settings)
if err != nil {
return "", err
}

@ -230,7 +230,7 @@ func writeToFile(outputDir string, name string, data string, appendData bool) er
defer f.Close()
_, err = f.WriteString(fmt.Sprintf("---\n# Source: %s\n%s\n", name, data))
_, err = fmt.Fprintf(f, "---\n# Source: %s\n%s\n", name, data)
if err != nil {
return err

@ -1,4 +1,4 @@
Error: INSTALLATION FAILED: values don't meet the specifications of the schema(s) in the following chart(s):
empty:
- age: Must be greater than or equal to 0
- at '/age': minimum: got -5, want 0

@ -1,5 +1,5 @@
Error: INSTALLATION FAILED: values don't meet the specifications of the schema(s) in the following chart(s):
empty:
- (root): employmentInfo is required
- age: Must be greater than or equal to 0
- at '': missing property 'employmentInfo'
- at '/age': minimum: got -5, want 0

@ -1,4 +1,4 @@
Error: INSTALLATION FAILED: values don't meet the specifications of the schema(s) in the following chart(s):
subchart-with-schema:
- age: Must be greater than or equal to 0
- at '/age': minimum: got -25, want 0

@ -1,6 +1,6 @@
Error: INSTALLATION FAILED: values don't meet the specifications of the schema(s) in the following chart(s):
chart-without-schema:
- (root): lastname is required
- at '': missing property 'lastname'
subchart-with-schema:
- (root): age is required
- at '': missing property 'age'

@ -76,10 +76,10 @@ func newUninstallCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
f.BoolVar(&client.DisableHooks, "no-hooks", false, "prevent hooks from running during uninstallation")
f.BoolVar(&client.IgnoreNotFound, "ignore-not-found", false, `Treat "release not found" as a successful uninstall`)
f.BoolVar(&client.KeepHistory, "keep-history", false, "remove all associated resources and mark the release as deleted, but retain the release history")
f.BoolVar(&client.Wait, "wait", false, "if set, will wait until all the resources are deleted before returning. It will wait for as long as --timeout")
f.StringVar(&client.DeletionPropagation, "cascade", "background", "Must be \"background\", \"orphan\", or \"foreground\". Selects the deletion cascading strategy for the dependents. Defaults to background.")
f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)")
f.StringVar(&client.Description, "description", "", "add a custom description")
AddWaitFlag(cmd, &client.WaitStrategy)
return cmd
}

@ -21,6 +21,7 @@ import (
"fmt"
"io"
"log"
"log/slog"
"os"
"os/signal"
"syscall"
@ -136,7 +137,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
instClient.DisableHooks = client.DisableHooks
instClient.SkipCRDs = client.SkipCRDs
instClient.Timeout = client.Timeout
instClient.Wait = client.Wait
instClient.WaitStrategy = client.WaitStrategy
instClient.WaitForJobs = client.WaitForJobs
instClient.Devel = client.Devel
instClient.Namespace = client.Namespace
@ -173,11 +174,11 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
}
if client.Version == "" && client.Devel {
Debug("setting version to >0.0.0-0")
slog.Debug("setting version to >0.0.0-0")
client.Version = ">0.0.0-0"
}
chartPath, err := client.ChartPathOptions.LocateChart(args[1], settings)
chartPath, err := client.LocateChart(args[1], settings)
if err != nil {
return err
}
@ -204,7 +205,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
man := &downloader.Manager{
Out: out,
ChartPath: chartPath,
Keyring: client.ChartPathOptions.Keyring,
Keyring: client.Keyring,
SkipUpdate: false,
Getters: p,
RepositoryConfig: settings.RepositoryConfig,
@ -225,7 +226,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
}
if ch.Metadata.Deprecated {
Warning("This chart is deprecated")
slog.Warn("this chart is deprecated")
}
// Create context and prepare the handle of SIGTERM
@ -278,9 +279,8 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
f.BoolVar(&client.ResetValues, "reset-values", false, "when upgrading, reset the values to the ones built into the chart")
f.BoolVar(&client.ReuseValues, "reuse-values", false, "when upgrading, reuse the last release's values and merge in any overrides from the command line via --set and -f. If '--reset-values' is specified, this is ignored")
f.BoolVar(&client.ResetThenReuseValues, "reset-then-reuse-values", false, "when upgrading, reset the values to the ones built into the chart, apply the last release's values and merge in any overrides from the command line via --set and -f. If '--reset-values' or '--reuse-values' is specified, this is ignored")
f.BoolVar(&client.Wait, "wait", false, "if set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment, StatefulSet, or ReplicaSet are in a ready state before marking the release as successful. It will wait for as long as --timeout")
f.BoolVar(&client.WaitForJobs, "wait-for-jobs", false, "if set and --wait enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as --timeout")
f.BoolVar(&client.Atomic, "atomic", false, "if set, upgrade process rolls back changes made in case of failed upgrade. The --wait flag will be set automatically if --atomic is used")
f.BoolVar(&client.Atomic, "atomic", false, "if set, upgrade process rolls back changes made in case of failed upgrade. The --wait flag will be set automatically to \"watcher\" if --atomic is used")
f.IntVar(&client.MaxHistory, "history-max", settings.MaxHistory, "limit the maximum number of revisions saved per release. Use 0 for no limit")
f.BoolVar(&client.CleanupOnFail, "cleanup-on-fail", false, "allow deletion of new resources created in this upgrade when upgrade fails")
f.BoolVar(&client.SubNotes, "render-subchart-notes", false, "if set, render subchart notes along with the parent")
@ -295,6 +295,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
addValueOptionsFlags(f, valueOpts)
bindOutputFlag(cmd, &outfmt)
bindPostRenderFlag(cmd, &client.PostRenderer)
AddWaitFlag(cmd, &client.WaitStrategy)
err := cmd.RegisterFlagCompletionFunc("version", func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
if len(args) != 2 {

@ -18,7 +18,7 @@ package engine
import (
"fmt"
"log"
"log/slog"
"path"
"path/filepath"
"regexp"
@ -203,7 +203,7 @@ func (e Engine) initFunMap(t *template.Template) {
if val == nil {
if e.LintMode {
// Don't fail on missing required values when linting
log.Printf("[INFO] Missing required value: %s", warn)
slog.Warn("missing required value", "message", warn)
return "", nil
}
return val, errors.New(warnWrap(warn))
@ -211,7 +211,7 @@ func (e Engine) initFunMap(t *template.Template) {
if val == "" {
if e.LintMode {
// Don't fail on missing required values when linting
log.Printf("[INFO] Missing required value: %s", warn)
slog.Warn("missing required values", "message", warn)
return "", nil
}
return val, errors.New(warnWrap(warn))
@ -224,7 +224,7 @@ func (e Engine) initFunMap(t *template.Template) {
funcMap["fail"] = func(msg string) (string, error) {
if e.LintMode {
// Don't fail when linting
log.Printf("[INFO] Fail: %s", msg)
slog.Info("funcMap fail", "message", msg)
return "", nil
}
return "", errors.New(warnWrap(msg))

@ -18,7 +18,7 @@ package engine
import (
"context"
"log"
"log/slog"
"strings"
"github.com/pkg/errors"
@ -35,9 +35,6 @@ type lookupFunc = func(apiversion string, resource string, namespace string, nam
// NewLookupFunction returns a function for looking up objects in the cluster.
//
// If the resource does not exist, no error is raised.
//
// This function is considered deprecated, and will be renamed in Helm 4. It will no
// longer be a public function.
func NewLookupFunction(config *rest.Config) lookupFunc {
return newLookupFunction(clientProviderFromConfig{config: config})
}
@ -101,7 +98,7 @@ func getDynamicClientOnKind(apiversion string, kind string, config *rest.Config)
gvk := schema.FromAPIVersionAndKind(apiversion, kind)
apiRes, err := getAPIResourceForGVK(gvk, config)
if err != nil {
log.Printf("[ERROR] unable to get apiresource from unstructured: %s , error %s", gvk.String(), err)
slog.Error("unable to get apiresource", "groupVersionKind", gvk.String(), slog.Any("error", err))
return nil, false, errors.Wrapf(err, "unable to get apiresource from unstructured: %s", gvk.String())
}
gvr := schema.GroupVersionResource{
@ -111,7 +108,7 @@ func getDynamicClientOnKind(apiversion string, kind string, config *rest.Config)
}
intf, err := dynamic.NewForConfig(config)
if err != nil {
log.Printf("[ERROR] unable to get dynamic client %s", err)
slog.Error("unable to get dynamic client", slog.Any("error", err))
return nil, false, err
}
res := intf.Resource(gvr)
@ -122,12 +119,12 @@ func getAPIResourceForGVK(gvk schema.GroupVersionKind, config *rest.Config) (met
res := metav1.APIResource{}
discoveryClient, err := discovery.NewDiscoveryClientForConfig(config)
if err != nil {
log.Printf("[ERROR] unable to create discovery client %s", err)
slog.Error("unable to create discovery client", slog.Any("error", err))
return res, err
}
resList, err := discoveryClient.ServerResourcesForGroupVersion(gvk.GroupVersion().String())
if err != nil {
log.Printf("[ERROR] unable to retrieve resource list for: %s , error: %s", gvk.GroupVersion().String(), err)
slog.Error("unable to retrieve resource list", "GroupVersion", gvk.GroupVersion().String(), slog.Any("error", err))
return res, err
}
for _, resource := range resList.APIResources {

@ -26,7 +26,6 @@ import (
"github.com/pkg/errors"
"helm.sh/helm/v4/internal/tlsutil"
"helm.sh/helm/v4/internal/urlutil"
"helm.sh/helm/v4/internal/version"
)
@ -137,12 +136,6 @@ func (g *HTTPGetter) httpClient() (*http.Client, error) {
return nil, errors.Wrap(err, "can't create TLS config for client")
}
sni, err := urlutil.ExtractHostname(g.opts.url)
if err != nil {
return nil, err
}
tlsConf.ServerName = sni
g.transport.TLSClientConfig = tlsConf
}

@ -358,6 +358,131 @@ func TestDownloadTLS(t *testing.T) {
}
}
func TestDownloadTLSWithRedirect(t *testing.T) {
cd := "../../testdata"
srv2Resp := "hello"
insecureSkipTLSverify := false
// Server 2 that will actually fulfil the request.
ca, pub, priv := filepath.Join(cd, "rootca.crt"), filepath.Join(cd, "localhost-crt.pem"), filepath.Join(cd, "key.pem")
tlsConf, err := tlsutil.NewTLSConfig(
tlsutil.WithCAFile(ca),
tlsutil.WithCertKeyPairFiles(pub, priv),
tlsutil.WithInsecureSkipVerify(insecureSkipTLSverify),
)
if err != nil {
t.Fatal(errors.Wrap(err, "can't create TLS config for client"))
}
tlsSrv2 := httptest.NewUnstartedServer(http.HandlerFunc(func(rw http.ResponseWriter, _ *http.Request) {
rw.Header().Set("Content-Type", "text/plain")
rw.Write([]byte(srv2Resp))
}))
tlsSrv2.TLS = tlsConf
tlsSrv2.StartTLS()
defer tlsSrv2.Close()
// Server 1 responds with a redirect to Server 2.
ca, pub, priv = filepath.Join(cd, "rootca.crt"), filepath.Join(cd, "crt.pem"), filepath.Join(cd, "key.pem")
tlsConf, err = tlsutil.NewTLSConfig(
tlsutil.WithCAFile(ca),
tlsutil.WithCertKeyPairFiles(pub, priv),
tlsutil.WithInsecureSkipVerify(insecureSkipTLSverify),
)
if err != nil {
t.Fatal(errors.Wrap(err, "can't create TLS config for client"))
}
tlsSrv1 := httptest.NewUnstartedServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
u, _ := url.ParseRequestURI(tlsSrv2.URL)
// Make the request using the hostname 'localhost' (to which 'localhost-crt.pem' is issued)
// to verify that a successful TLS connection is made even if the client doesn't specify
// the hostname (SNI) in `tls.Config.ServerName`. By default the hostname is derived from the
// request URL for every request (including redirects). Setting `tls.Config.ServerName` on the
// client just overrides the remote endpoint's hostname.
// See https://github.com/golang/go/blob/3979fb9/src/net/http/transport.go#L1505-L1513.
u.Host = fmt.Sprintf("localhost:%s", u.Port())
http.Redirect(rw, r, u.String(), http.StatusTemporaryRedirect)
}))
tlsSrv1.TLS = tlsConf
tlsSrv1.StartTLS()
defer tlsSrv1.Close()
u, _ := url.ParseRequestURI(tlsSrv1.URL)
t.Run("Test with TLS", func(t *testing.T) {
g, err := NewHTTPGetter(
WithURL(u.String()),
WithTLSClientConfig(pub, priv, ca),
)
if err != nil {
t.Fatal(err)
}
buf, err := g.Get(u.String())
if err != nil {
t.Error(err)
}
b, err := io.ReadAll(buf)
if err != nil {
t.Error(err)
}
if string(b) != srv2Resp {
t.Errorf("expected response from Server2 to be '%s', instead got: %s", srv2Resp, string(b))
}
})
t.Run("Test with TLS config being passed along in .Get (see #6635)", func(t *testing.T) {
g, err := NewHTTPGetter()
if err != nil {
t.Fatal(err)
}
buf, err := g.Get(u.String(), WithURL(u.String()), WithTLSClientConfig(pub, priv, ca))
if err != nil {
t.Error(err)
}
b, err := io.ReadAll(buf)
if err != nil {
t.Error(err)
}
if string(b) != srv2Resp {
t.Errorf("expected response from Server2 to be '%s', instead got: %s", srv2Resp, string(b))
}
})
t.Run("Test with only the CA file (see also #6635)", func(t *testing.T) {
g, err := NewHTTPGetter()
if err != nil {
t.Fatal(err)
}
buf, err := g.Get(u.String(), WithURL(u.String()), WithTLSClientConfig("", "", ca))
if err != nil {
t.Error(err)
}
b, err := io.ReadAll(buf)
if err != nil {
t.Error(err)
}
if string(b) != srv2Resp {
t.Errorf("expected response from Server2 to be '%s', instead got: %s", srv2Resp, string(b))
}
})
}
func TestDownloadInsecureSkipTLSVerify(t *testing.T) {
ts := httptest.NewTLSServer(http.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) {}))
defer ts.Close()
@ -450,9 +575,6 @@ func TestHttpClientInsecureSkipVerify(t *testing.T) {
if len(transport.TLSClientConfig.Certificates) <= 0 {
t.Fatal("transport.TLSClientConfig.Certificates is not present")
}
if transport.TLSClientConfig.ServerName == "" {
t.Fatal("TLSClientConfig.ServerName is blank")
}
}
func verifyInsecureSkipVerify(t *testing.T, g *HTTPGetter, caseName string, expectedValue bool) *http.Transport {

@ -20,7 +20,7 @@ import (
"bufio"
"bytes"
"io"
"log"
"log/slog"
"os"
"path/filepath"
"strings"
@ -102,7 +102,7 @@ func (r *Rules) Ignore(path string, fi os.FileInfo) bool {
}
for _, p := range r.patterns {
if p.match == nil {
log.Printf("ignore: no matcher supplied for %q", p.raw)
slog.Info("this will be ignored no matcher supplied", "patterns", p.raw)
return false
}
@ -177,7 +177,7 @@ func (r *Rules) parseRule(rule string) error {
rule = strings.TrimPrefix(rule, "/")
ok, err := filepath.Match(rule, n)
if err != nil {
log.Printf("Failed to compile %q: %s", rule, err)
slog.Error("failed to compile", "rule", rule, slog.Any("error", err))
return false
}
return ok
@ -187,7 +187,7 @@ func (r *Rules) parseRule(rule string) error {
p.match = func(n string, _ os.FileInfo) bool {
ok, err := filepath.Match(rule, n)
if err != nil {
log.Printf("Failed to compile %q: %s", rule, err)
slog.Error("failed to compile", "rule", rule, slog.Any("error", err))
return false
}
return ok
@ -199,7 +199,7 @@ func (r *Rules) parseRule(rule string) error {
n = filepath.Base(n)
ok, err := filepath.Match(rule, n)
if err != nil {
log.Printf("Failed to compile %q: %s", rule, err)
slog.Error("failed to compile", "rule", rule, slog.Any("error", err))
return false
}
return ok

@ -22,39 +22,34 @@ import (
"encoding/json"
"fmt"
"io"
"log/slog"
"os"
"path/filepath"
"reflect"
"strings"
"sync"
"time"
jsonpatch "github.com/evanphx/json-patch"
"github.com/pkg/errors"
batch "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1"
apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
multierror "github.com/hashicorp/go-multierror"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/cli-runtime/pkg/resource"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
cachetools "k8s.io/client-go/tools/cache"
watchtools "k8s.io/client-go/tools/watch"
"k8s.io/client-go/util/retry"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
)
@ -79,13 +74,21 @@ type Client struct {
// needs. The smaller surface area of the interface means there is a lower
// chance of it changing.
Factory Factory
Log func(string, ...interface{})
// Namespace allows to bypass the kubeconfig file for the choice of the namespace
Namespace string
Waiter
kubeClient kubernetes.Interface
}
type WaitStrategy string
const (
StatusWatcherStrategy WaitStrategy = "watcher"
LegacyStrategy WaitStrategy = "legacy"
HookOnlyStrategy WaitStrategy = "hookOnly"
)
func init() {
// Add CRDs to the scheme. They are missing by default.
if err := apiextv1.AddToScheme(scheme.Scheme); err != nil {
@ -97,19 +100,71 @@ func init() {
}
}
func (c *Client) newStatusWatcher() (*statusWaiter, error) {
cfg, err := c.Factory.ToRESTConfig()
if err != nil {
return nil, err
}
dynamicClient, err := c.Factory.DynamicClient()
if err != nil {
return nil, err
}
httpClient, err := rest.HTTPClientFor(cfg)
if err != nil {
return nil, err
}
restMapper, err := apiutil.NewDynamicRESTMapper(cfg, httpClient)
if err != nil {
return nil, err
}
return &statusWaiter{
restMapper: restMapper,
client: dynamicClient,
}, nil
}
func (c *Client) GetWaiter(strategy WaitStrategy) (Waiter, error) {
switch strategy {
case LegacyStrategy:
kc, err := c.Factory.KubernetesClientSet()
if err != nil {
return nil, err
}
return &legacyWaiter{kubeClient: kc}, nil
case StatusWatcherStrategy:
return c.newStatusWatcher()
case HookOnlyStrategy:
sw, err := c.newStatusWatcher()
if err != nil {
return nil, err
}
return &hookOnlyWaiter{sw: sw}, nil
default:
return nil, errors.New("unknown wait strategy")
}
}
func (c *Client) SetWaiter(ws WaitStrategy) error {
var err error
c.Waiter, err = c.GetWaiter(ws)
if err != nil {
return err
}
return nil
}
// New creates a new Client.
func New(getter genericclioptions.RESTClientGetter) *Client {
if getter == nil {
getter = genericclioptions.NewConfigFlags(true)
}
return &Client{
Factory: cmdutil.NewFactory(getter),
Log: nopLogger,
factory := cmdutil.NewFactory(getter)
c := &Client{
Factory: factory,
}
return c
}
var nopLogger = func(_ string, _ ...interface{}) {}
// getKubeClient get or create a new KubernetesClientSet
func (c *Client) getKubeClient() (kubernetes.Interface, error) {
var err error
@ -139,7 +194,7 @@ func (c *Client) IsReachable() error {
// Create creates Kubernetes resources specified in the resource list.
func (c *Client) Create(resources ResourceList) (*Result, error) {
c.Log("creating %d resource(s)", len(resources))
slog.Debug("creating resource(s)", "resources", len(resources))
if err := perform(resources, createResource); err != nil {
return nil, err
}
@ -191,7 +246,7 @@ func (c *Client) Get(resources ResourceList, related bool) (map[string][]runtime
objs, err = c.getSelectRelationPod(info, objs, isTable, &podSelectors)
if err != nil {
c.Log("Warning: get the relation pod is failed, err:%s", err.Error())
slog.Warn("get the relation pod is failed", slog.Any("error", err))
}
}
}
@ -209,7 +264,7 @@ func (c *Client) getSelectRelationPod(info *resource.Info, objs map[string][]run
if info == nil {
return objs, nil
}
c.Log("get relation pod of object: %s/%s/%s", info.Namespace, info.Mapping.GroupVersionKind.Kind, info.Name)
slog.Debug("get relation pod of object", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind)
selector, ok, _ := getSelectorFromObject(info.Object)
if !ok {
return objs, nil
@ -281,45 +336,6 @@ func getResource(info *resource.Info) (runtime.Object, error) {
return obj, nil
}
// Wait waits up to the given timeout for the specified resources to be ready.
func (c *Client) Wait(resources ResourceList, timeout time.Duration) error {
cs, err := c.getKubeClient()
if err != nil {
return err
}
checker := NewReadyChecker(cs, c.Log, PausedAsReady(true))
w := waiter{
c: checker,
log: c.Log,
timeout: timeout,
}
return w.waitForResources(resources)
}
// WaitWithJobs wait up to the given timeout for the specified resources to be ready, including jobs.
func (c *Client) WaitWithJobs(resources ResourceList, timeout time.Duration) error {
cs, err := c.getKubeClient()
if err != nil {
return err
}
checker := NewReadyChecker(cs, c.Log, PausedAsReady(true), CheckJobs(true))
w := waiter{
c: checker,
log: c.Log,
timeout: timeout,
}
return w.waitForResources(resources)
}
// WaitForDelete wait up to the given timeout for the specified resources to be deleted.
func (c *Client) WaitForDelete(resources ResourceList, timeout time.Duration) error {
w := waiter{
log: c.Log,
timeout: timeout,
}
return w.waitForDeletedResources(resources)
}
func (c *Client) namespace() string {
if c.Namespace != "" {
return c.Namespace
@ -390,7 +406,7 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err
updateErrors := []string{}
res := &Result{}
c.Log("checking %d resources for changes", len(target))
slog.Debug("checking resources for changes", "resources", len(target))
err := target.Visit(func(info *resource.Info, err error) error {
if err != nil {
return err
@ -411,7 +427,7 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err
}
kind := info.Mapping.GroupVersionKind.Kind
c.Log("Created a new %s called %q in %s\n", kind, info.Name, info.Namespace)
slog.Debug("created a new resource", "namespace", info.Namespace, "name", info.Name, "kind", kind)
return nil
}
@ -422,7 +438,7 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err
}
if err := updateResource(c, info, originalInfo.Object, force); err != nil {
c.Log("error updating the resource %q:\n\t %v", info.Name, err)
slog.Debug("error updating the resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err))
updateErrors = append(updateErrors, err.Error())
}
// Because we check for errors later, append the info regardless
@ -439,22 +455,22 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err
}
for _, info := range original.Difference(target) {
c.Log("Deleting %s %q in namespace %s...", info.Mapping.GroupVersionKind.Kind, info.Name, info.Namespace)
slog.Debug("deleting resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind)
if err := info.Get(); err != nil {
c.Log("Unable to get obj %q, err: %s", info.Name, err)
slog.Debug("unable to get object", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err))
continue
}
annotations, err := metadataAccessor.Annotations(info.Object)
if err != nil {
c.Log("Unable to get annotations on %q, err: %s", info.Name, err)
slog.Debug("unable to get annotations", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err))
}
if annotations != nil && annotations[ResourcePolicyAnno] == KeepPolicy {
c.Log("Skipping delete of %q due to annotation [%s=%s]", info.Name, ResourcePolicyAnno, KeepPolicy)
slog.Debug("skipping delete due to annotation", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, "annotation", ResourcePolicyAnno, "value", KeepPolicy)
continue
}
if err := deleteResource(info, metav1.DeletePropagationBackground); err != nil {
c.Log("Failed to delete %q, err: %s", info.ObjectName(), err)
slog.Debug("failed to delete resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err))
continue
}
res.Deleted = append(res.Deleted, info)
@ -478,16 +494,16 @@ func (c *Client) DeleteWithPropagationPolicy(resources ResourceList, policy meta
return rdelete(c, resources, policy)
}
func rdelete(c *Client, resources ResourceList, propagation metav1.DeletionPropagation) (*Result, []error) {
func rdelete(_ *Client, resources ResourceList, propagation metav1.DeletionPropagation) (*Result, []error) {
var errs []error
res := &Result{}
mtx := sync.Mutex{}
err := perform(resources, func(info *resource.Info) error {
c.Log("Starting delete for %q %s", info.Name, info.Mapping.GroupVersionKind.Kind)
slog.Debug("starting delete resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind)
err := deleteResource(info, propagation)
if err == nil || apierrors.IsNotFound(err) {
if err != nil {
c.Log("Ignoring delete failure for %q %s: %v", info.Name, info.Mapping.GroupVersionKind, err)
slog.Debug("ignoring delete failure", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err))
}
mtx.Lock()
defer mtx.Unlock()
@ -512,52 +528,6 @@ func rdelete(c *Client, resources ResourceList, propagation metav1.DeletionPropa
return res, nil
}
func (c *Client) watchTimeout(t time.Duration) func(*resource.Info) error {
return func(info *resource.Info) error {
return c.watchUntilReady(t, info)
}
}
// WatchUntilReady watches the resources given and waits until it is ready.
//
// This method is mainly for hook implementations. It watches for a resource to
// hit a particular milestone. The milestone depends on the Kind.
//
// For most kinds, it checks to see if the resource is marked as Added or Modified
// by the Kubernetes event stream. For some kinds, it does more:
//
// - Jobs: A job is marked "Ready" when it has successfully completed. This is
// ascertained by watching the Status fields in a job's output.
// - Pods: A pod is marked "Ready" when it has successfully completed. This is
// ascertained by watching the status.phase field in a pod's output.
//
// Handling for other kinds will be added as necessary.
func (c *Client) WatchUntilReady(resources ResourceList, timeout time.Duration) error {
// For jobs, there's also the option to do poll c.Jobs(namespace).Get():
// https://github.com/adamreese/kubernetes/blob/master/test/e2e/job.go#L291-L300
return perform(resources, c.watchTimeout(timeout))
}
func perform(infos ResourceList, fn func(*resource.Info) error) error {
var result error
if len(infos) == 0 {
return ErrNoObjectsVisited
}
errs := make(chan error)
go batchPerform(infos, fn, errs)
for range infos {
err := <-errs
if err != nil {
result = multierror.Append(result, err)
}
}
return result
}
// getManagedFieldsManager returns the manager string. If one was set it will be returned.
// Otherwise, one is calculated based on the name of the binary.
func getManagedFieldsManager() string {
@ -667,7 +637,7 @@ func createPatch(target *resource.Info, current runtime.Object) ([]byte, types.P
return patch, types.StrategicMergePatchType, err
}
func updateResource(c *Client, target *resource.Info, currentObj runtime.Object, force bool) error {
func updateResource(_ *Client, target *resource.Info, currentObj runtime.Object, force bool) error {
var (
obj runtime.Object
helper = resource.NewHelper(target.Client, target.Mapping).WithFieldManager(getManagedFieldsManager())
@ -681,7 +651,7 @@ func updateResource(c *Client, target *resource.Info, currentObj runtime.Object,
if err != nil {
return errors.Wrap(err, "failed to replace object")
}
c.Log("Replaced %q with kind %s for kind %s", target.Name, currentObj.GetObjectKind().GroupVersionKind().Kind, kind)
slog.Debug("replace succeeded", "name", target.Name, "initialKind", currentObj.GetObjectKind().GroupVersionKind().Kind, "kind", kind)
} else {
patch, patchType, err := createPatch(target, currentObj)
if err != nil {
@ -689,7 +659,7 @@ func updateResource(c *Client, target *resource.Info, currentObj runtime.Object,
}
if patch == nil || string(patch) == "{}" {
c.Log("Looks like there are no changes for %s %q", kind, target.Name)
slog.Debug("no changes detected", "kind", kind, "name", target.Name)
// This needs to happen to make sure that Helm has the latest info from the API
// Otherwise there will be no labels and other functions that use labels will panic
if err := target.Get(); err != nil {
@ -698,7 +668,7 @@ func updateResource(c *Client, target *resource.Info, currentObj runtime.Object,
return nil
}
// send patch to server
c.Log("Patch %s %q in namespace %s", kind, target.Name, target.Namespace)
slog.Debug("patching resource", "kind", kind, "name", target.Name, "namespace", target.Namespace)
obj, err = helper.Patch(target.Namespace, target.Name, patchType, patch, nil)
if err != nil {
return errors.Wrapf(err, "cannot patch %q with kind %s", target.Name, kind)
@ -709,109 +679,6 @@ func updateResource(c *Client, target *resource.Info, currentObj runtime.Object,
return nil
}
func (c *Client) watchUntilReady(timeout time.Duration, info *resource.Info) error {
kind := info.Mapping.GroupVersionKind.Kind
switch kind {
case "Job", "Pod":
default:
return nil
}
c.Log("Watching for changes to %s %s with timeout of %v", kind, info.Name, timeout)
// Use a selector on the name of the resource. This should be unique for the
// given version and kind
selector, err := fields.ParseSelector(fmt.Sprintf("metadata.name=%s", info.Name))
if err != nil {
return err
}
lw := cachetools.NewListWatchFromClient(info.Client, info.Mapping.Resource.Resource, info.Namespace, selector)
// What we watch for depends on the Kind.
// - For a Job, we watch for completion.
// - For all else, we watch until Ready.
// In the future, we might want to add some special logic for types
// like Ingress, Volume, etc.
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout)
defer cancel()
_, err = watchtools.UntilWithSync(ctx, lw, &unstructured.Unstructured{}, nil, func(e watch.Event) (bool, error) {
// Make sure the incoming object is versioned as we use unstructured
// objects when we build manifests
obj := convertWithMapper(e.Object, info.Mapping)
switch e.Type {
case watch.Added, watch.Modified:
// For things like a secret or a config map, this is the best indicator
// we get. We care mostly about jobs, where what we want to see is
// the status go into a good state. For other types, like ReplicaSet
// we don't really do anything to support these as hooks.
c.Log("Add/Modify event for %s: %v", info.Name, e.Type)
switch kind {
case "Job":
return c.waitForJob(obj, info.Name)
case "Pod":
return c.waitForPodSuccess(obj, info.Name)
}
return true, nil
case watch.Deleted:
c.Log("Deleted event for %s", info.Name)
return true, nil
case watch.Error:
// Handle error and return with an error.
c.Log("Error event for %s", info.Name)
return true, errors.Errorf("failed to deploy %s", info.Name)
default:
return false, nil
}
})
return err
}
// waitForJob is a helper that waits for a job to complete.
//
// This operates on an event returned from a watcher.
func (c *Client) waitForJob(obj runtime.Object, name string) (bool, error) {
o, ok := obj.(*batch.Job)
if !ok {
return true, errors.Errorf("expected %s to be a *batch.Job, got %T", name, obj)
}
for _, c := range o.Status.Conditions {
if c.Type == batch.JobComplete && c.Status == "True" {
return true, nil
} else if c.Type == batch.JobFailed && c.Status == "True" {
return true, errors.Errorf("job %s failed: %s", name, c.Reason)
}
}
c.Log("%s: Jobs active: %d, jobs failed: %d, jobs succeeded: %d", name, o.Status.Active, o.Status.Failed, o.Status.Succeeded)
return false, nil
}
// waitForPodSuccess is a helper that waits for a pod to complete.
//
// This operates on an event returned from a watcher.
func (c *Client) waitForPodSuccess(obj runtime.Object, name string) (bool, error) {
o, ok := obj.(*v1.Pod)
if !ok {
return true, errors.Errorf("expected %s to be a *v1.Pod, got %T", name, obj)
}
switch o.Status.Phase {
case v1.PodSucceeded:
c.Log("Pod %s succeeded", o.Name)
return true, nil
case v1.PodFailed:
return true, errors.Errorf("pod %s failed", o.Name)
case v1.PodPending:
c.Log("Pod %s pending", o.Name)
case v1.PodRunning:
c.Log("Pod %s running", o.Name)
}
return false, nil
}
// GetPodList uses the kubernetes interface to get the list of pods filtered by listOptions
func (c *Client) GetPodList(namespace string, listOptions metav1.ListOptions) (*v1.PodList, error) {
podList, err := c.kubeClient.CoreV1().Pods(namespace).List(context.Background(), listOptions)
@ -848,9 +715,6 @@ func copyRequestStreamToWriter(request *rest.Request, podName, containerName str
if err != nil {
return errors.Errorf("Failed to copy IO from logs for pod: %s, container: %s", podName, containerName)
}
if err != nil {
return errors.Errorf("Failed to close reader for pod: %s, container: %s", podName, containerName)
}
return nil
}

@ -107,7 +107,6 @@ func newTestClient(t *testing.T) *Client {
return &Client{
Factory: testFactory.WithNamespace("default"),
Log: nopLogger,
}
}
@ -515,6 +514,11 @@ func TestWait(t *testing.T) {
}
}),
}
var err error
c.Waiter, err = c.GetWaiter(LegacyStrategy)
if err != nil {
t.Fatal(err)
}
resources, err := c.Build(objBody(&podList), false)
if err != nil {
t.Fatal(err)
@ -567,6 +571,11 @@ func TestWaitJob(t *testing.T) {
}
}),
}
var err error
c.Waiter, err = c.GetWaiter(LegacyStrategy)
if err != nil {
t.Fatal(err)
}
resources, err := c.Build(objBody(job), false)
if err != nil {
t.Fatal(err)
@ -621,6 +630,11 @@ func TestWaitDelete(t *testing.T) {
}
}),
}
var err error
c.Waiter, err = c.GetWaiter(LegacyStrategy)
if err != nil {
t.Fatal(err)
}
resources, err := c.Build(objBody(&pod), false)
if err != nil {
t.Fatal(err)

@ -20,6 +20,7 @@ import (
"k8s.io/cli-runtime/pkg/resource"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/kubectl/pkg/validation"
)
@ -33,6 +34,9 @@ import (
// Helm does not need are not impacted or exposed. This minimizes the impact of Kubernetes changes
// being exposed.
type Factory interface {
// ToRESTConfig returns restconfig
ToRESTConfig() (*rest.Config, error)
// ToRawKubeConfigLoader return kubeconfig loader as-is
ToRawKubeConfigLoader() clientcmd.ClientConfig

@ -35,18 +35,29 @@ type FailingKubeClient struct {
PrintingKubeClient
CreateError error
GetError error
WaitError error
DeleteError error
DeleteWithPropagationError error
WatchUntilReadyError error
UpdateError error
BuildError error
BuildTableError error
BuildDummy bool
BuildUnstructuredError error
WaitError error
WaitForDeleteError error
WatchUntilReadyError error
WaitDuration time.Duration
}
// FailingKubeWaiter implements kube.Waiter for testing purposes.
// It also has additional errors you can set to fail different functions, otherwise it delegates all its calls to `PrintingKubeWaiter`
type FailingKubeWaiter struct {
*PrintingKubeWaiter
waitError error
waitForDeleteError error
watchUntilReadyError error
waitDuration time.Duration
}
// Create returns the configured error if set or prints
func (f *FailingKubeClient) Create(resources kube.ResourceList) (*kube.Result, error) {
if f.CreateError != nil {
@ -64,28 +75,28 @@ func (f *FailingKubeClient) Get(resources kube.ResourceList, related bool) (map[
}
// Waits the amount of time defined on f.WaitDuration, then returns the configured error if set or prints.
func (f *FailingKubeClient) Wait(resources kube.ResourceList, d time.Duration) error {
time.Sleep(f.WaitDuration)
if f.WaitError != nil {
return f.WaitError
func (f *FailingKubeWaiter) Wait(resources kube.ResourceList, d time.Duration) error {
time.Sleep(f.waitDuration)
if f.waitError != nil {
return f.waitError
}
return f.PrintingKubeClient.Wait(resources, d)
return f.PrintingKubeWaiter.Wait(resources, d)
}
// WaitWithJobs returns the configured error if set or prints
func (f *FailingKubeClient) WaitWithJobs(resources kube.ResourceList, d time.Duration) error {
if f.WaitError != nil {
return f.WaitError
func (f *FailingKubeWaiter) WaitWithJobs(resources kube.ResourceList, d time.Duration) error {
if f.waitError != nil {
return f.waitError
}
return f.PrintingKubeClient.WaitWithJobs(resources, d)
return f.PrintingKubeWaiter.WaitWithJobs(resources, d)
}
// WaitForDelete returns the configured error if set or prints
func (f *FailingKubeClient) WaitForDelete(resources kube.ResourceList, d time.Duration) error {
if f.WaitError != nil {
return f.WaitError
func (f *FailingKubeWaiter) WaitForDelete(resources kube.ResourceList, d time.Duration) error {
if f.waitForDeleteError != nil {
return f.waitForDeleteError
}
return f.PrintingKubeClient.WaitForDelete(resources, d)
return f.PrintingKubeWaiter.WaitForDelete(resources, d)
}
// Delete returns the configured error if set or prints
@ -97,11 +108,11 @@ func (f *FailingKubeClient) Delete(resources kube.ResourceList) (*kube.Result, [
}
// WatchUntilReady returns the configured error if set or prints
func (f *FailingKubeClient) WatchUntilReady(resources kube.ResourceList, d time.Duration) error {
if f.WatchUntilReadyError != nil {
return f.WatchUntilReadyError
func (f *FailingKubeWaiter) WatchUntilReady(resources kube.ResourceList, d time.Duration) error {
if f.watchUntilReadyError != nil {
return f.watchUntilReadyError
}
return f.PrintingKubeClient.WatchUntilReady(resources, d)
return f.PrintingKubeWaiter.WatchUntilReady(resources, d)
}
// Update returns the configured error if set or prints
@ -139,6 +150,18 @@ func (f *FailingKubeClient) DeleteWithPropagationPolicy(resources kube.ResourceL
return f.PrintingKubeClient.DeleteWithPropagationPolicy(resources, policy)
}
func (f *FailingKubeClient) GetWaiter(ws kube.WaitStrategy) (kube.Waiter, error) {
waiter, _ := f.PrintingKubeClient.GetWaiter(ws)
printingKubeWaiter, _ := waiter.(*PrintingKubeWaiter)
return &FailingKubeWaiter{
PrintingKubeWaiter: printingKubeWaiter,
waitError: f.WaitError,
waitForDeleteError: f.WaitForDeleteError,
watchUntilReadyError: f.WatchUntilReadyError,
waitDuration: f.WaitDuration,
}, nil
}
func createDummyResourceList() kube.ResourceList {
var resInfo resource.Info
resInfo.Name = "dummyName"
@ -146,5 +169,4 @@ func createDummyResourceList() kube.ResourceList {
var resourceList kube.ResourceList
resourceList.Append(&resInfo)
return resourceList
}

@ -37,6 +37,12 @@ type PrintingKubeClient struct {
LogOutput io.Writer
}
// PrintingKubeWaiter implements kube.Waiter, but simply prints the reader to the given output
type PrintingKubeWaiter struct {
Out io.Writer
LogOutput io.Writer
}
// IsReachable checks if the cluster is reachable
func (p *PrintingKubeClient) IsReachable() error {
return nil
@ -59,17 +65,23 @@ func (p *PrintingKubeClient) Get(resources kube.ResourceList, _ bool) (map[strin
return make(map[string][]runtime.Object), nil
}
func (p *PrintingKubeClient) Wait(resources kube.ResourceList, _ time.Duration) error {
func (p *PrintingKubeWaiter) Wait(resources kube.ResourceList, _ time.Duration) error {
_, err := io.Copy(p.Out, bufferize(resources))
return err
}
func (p *PrintingKubeWaiter) WaitWithJobs(resources kube.ResourceList, _ time.Duration) error {
_, err := io.Copy(p.Out, bufferize(resources))
return err
}
func (p *PrintingKubeClient) WaitWithJobs(resources kube.ResourceList, _ time.Duration) error {
func (p *PrintingKubeWaiter) WaitForDelete(resources kube.ResourceList, _ time.Duration) error {
_, err := io.Copy(p.Out, bufferize(resources))
return err
}
func (p *PrintingKubeClient) WaitForDelete(resources kube.ResourceList, _ time.Duration) error {
// WatchUntilReady implements KubeClient WatchUntilReady.
func (p *PrintingKubeWaiter) WatchUntilReady(resources kube.ResourceList, _ time.Duration) error {
_, err := io.Copy(p.Out, bufferize(resources))
return err
}
@ -85,12 +97,6 @@ func (p *PrintingKubeClient) Delete(resources kube.ResourceList) (*kube.Result,
return &kube.Result{Deleted: resources}, nil
}
// WatchUntilReady implements KubeClient WatchUntilReady.
func (p *PrintingKubeClient) WatchUntilReady(resources kube.ResourceList, _ time.Duration) error {
_, err := io.Copy(p.Out, bufferize(resources))
return err
}
// Update implements KubeClient Update.
func (p *PrintingKubeClient) Update(_, modified kube.ResourceList, _ bool) (*kube.Result, error) {
_, err := io.Copy(p.Out, bufferize(modified))
@ -140,6 +146,10 @@ func (p *PrintingKubeClient) DeleteWithPropagationPolicy(resources kube.Resource
return &kube.Result{Deleted: resources}, nil
}
func (p *PrintingKubeClient) GetWaiter(_ kube.WaitStrategy) (kube.Waiter, error) {
return &PrintingKubeWaiter{Out: p.Out, LogOutput: p.LogOutput}, nil
}
func bufferize(resources kube.ResourceList) io.Reader {
var builder strings.Builder
for _, info := range resources {

@ -32,26 +32,9 @@ type Interface interface {
// Create creates one or more resources.
Create(resources ResourceList) (*Result, error)
// Wait waits up to the given timeout for the specified resources to be ready.
Wait(resources ResourceList, timeout time.Duration) error
// WaitWithJobs wait up to the given timeout for the specified resources to be ready, including jobs.
WaitWithJobs(resources ResourceList, timeout time.Duration) error
// Delete destroys one or more resources.
Delete(resources ResourceList) (*Result, []error)
// WatchUntilReady watches the resources given and waits until it is ready.
//
// This method is mainly for hook implementations. It watches for a resource to
// hit a particular milestone. The milestone depends on the Kind.
//
// For Jobs, "ready" means the Job ran to completion (exited without error).
// For Pods, "ready" means the Pod phase is marked "succeeded".
// For all other kinds, it means the kind was created or modified without
// error.
WatchUntilReady(resources ResourceList, timeout time.Duration) error
// Update updates one or more resources or creates the resource
// if it doesn't exist.
Update(original, target ResourceList, force bool) (*Result, error)
@ -63,17 +46,34 @@ type Interface interface {
//
// Validates against OpenAPI schema if validate is true.
Build(reader io.Reader, validate bool) (ResourceList, error)
// IsReachable checks whether the client is able to connect to the cluster.
IsReachable() error
// Get Waiter gets the Kube.Waiter
GetWaiter(ws WaitStrategy) (Waiter, error)
}
// InterfaceExt was introduced to avoid breaking backwards compatibility for Interface implementers.
//
// TODO Helm 4: Remove InterfaceExt and integrate its method(s) into the Interface.
type InterfaceExt interface {
// Waiter defines methods related to waiting for resource states.
type Waiter interface {
// Wait waits up to the given timeout for the specified resources to be ready.
Wait(resources ResourceList, timeout time.Duration) error
// WaitWithJobs wait up to the given timeout for the specified resources to be ready, including jobs.
WaitWithJobs(resources ResourceList, timeout time.Duration) error
// WaitForDelete wait up to the given timeout for the specified resources to be deleted.
WaitForDelete(resources ResourceList, timeout time.Duration) error
// WatchUntilReady watches the resources given and waits until it is ready.
//
// This method is mainly for hook implementations. It watches for a resource to
// hit a particular milestone. The milestone depends on the Kind.
//
// For Jobs, "ready" means the Job ran to completion (exited without error).
// For Pods, "ready" means the Pod phase is marked "succeeded".
// For all other kinds, it means the kind was created or modified without
// error.
WatchUntilReady(resources ResourceList, timeout time.Duration) error
}
// InterfaceLogs was introduced to avoid breaking backwards compatibility for Interface implementers.
@ -118,7 +118,6 @@ type InterfaceResources interface {
}
var _ Interface = (*Client)(nil)
var _ InterfaceExt = (*Client)(nil)
var _ InterfaceLogs = (*Client)(nil)
var _ InterfaceDeletionPropagation = (*Client)(nil)
var _ InterfaceResources = (*Client)(nil)

@ -19,6 +19,7 @@ package kube // import "helm.sh/helm/v4/pkg/kube"
import (
"context"
"fmt"
"log/slog"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
@ -57,13 +58,9 @@ func CheckJobs(checkJobs bool) ReadyCheckerOption {
// NewReadyChecker creates a new checker. Passed ReadyCheckerOptions can
// be used to override defaults.
func NewReadyChecker(cl kubernetes.Interface, log func(string, ...interface{}), opts ...ReadyCheckerOption) ReadyChecker {
func NewReadyChecker(cl kubernetes.Interface, opts ...ReadyCheckerOption) ReadyChecker {
c := ReadyChecker{
client: cl,
log: log,
}
if c.log == nil {
c.log = nopLogger
}
for _, opt := range opts {
opt(&c)
@ -74,7 +71,6 @@ func NewReadyChecker(cl kubernetes.Interface, log func(string, ...interface{}),
// ReadyChecker is a type that can check core Kubernetes types for readiness.
type ReadyChecker struct {
client kubernetes.Interface
log func(string, ...interface{})
checkJobs bool
pausedAsReady bool
}
@ -230,20 +226,21 @@ func (c *ReadyChecker) isPodReady(pod *corev1.Pod) bool {
return true
}
}
c.log("Pod is not ready: %s/%s", pod.GetNamespace(), pod.GetName())
slog.Debug("Pod is not ready", "namespace", pod.GetNamespace(), "name", pod.GetName())
return false
}
func (c *ReadyChecker) jobReady(job *batchv1.Job) (bool, error) {
if job.Status.Failed > *job.Spec.BackoffLimit {
c.log("Job is failed: %s/%s", job.GetNamespace(), job.GetName())
slog.Debug("Job is failed", "namespace", job.GetNamespace(), "name", job.GetName())
// If a job is failed, it can't recover, so throw an error
return false, fmt.Errorf("job is failed: %s/%s", job.GetNamespace(), job.GetName())
}
if job.Spec.Completions != nil && job.Status.Succeeded < *job.Spec.Completions {
c.log("Job is not completed: %s/%s", job.GetNamespace(), job.GetName())
slog.Debug("Job is not completed", "namespace", job.GetNamespace(), "name", job.GetName())
return false, nil
}
slog.Debug("Job is completed", "namespace", job.GetNamespace(), "name", job.GetName())
return true, nil
}
@ -255,7 +252,7 @@ func (c *ReadyChecker) serviceReady(s *corev1.Service) bool {
// Ensure that the service cluster IP is not empty
if s.Spec.ClusterIP == "" {
c.log("Service does not have cluster IP address: %s/%s", s.GetNamespace(), s.GetName())
slog.Debug("Service does not have cluster IP address", "namespace", s.GetNamespace(), "name", s.GetName())
return false
}
@ -263,24 +260,25 @@ func (c *ReadyChecker) serviceReady(s *corev1.Service) bool {
if s.Spec.Type == corev1.ServiceTypeLoadBalancer {
// do not wait when at least 1 external IP is set
if len(s.Spec.ExternalIPs) > 0 {
c.log("Service %s/%s has external IP addresses (%v), marking as ready", s.GetNamespace(), s.GetName(), s.Spec.ExternalIPs)
slog.Debug("Service has external IP addresses", "namespace", s.GetNamespace(), "name", s.GetName(), "externalIPs", s.Spec.ExternalIPs)
return true
}
if s.Status.LoadBalancer.Ingress == nil {
c.log("Service does not have load balancer ingress IP address: %s/%s", s.GetNamespace(), s.GetName())
slog.Debug("Service does not have load balancer ingress IP address", "namespace", s.GetNamespace(), "name", s.GetName())
return false
}
}
slog.Debug("Service is ready", "namespace", s.GetNamespace(), "name", s.GetName(), "clusterIP", s.Spec.ClusterIP, "externalIPs", s.Spec.ExternalIPs)
return true
}
func (c *ReadyChecker) volumeReady(v *corev1.PersistentVolumeClaim) bool {
if v.Status.Phase != corev1.ClaimBound {
c.log("PersistentVolumeClaim is not bound: %s/%s", v.GetNamespace(), v.GetName())
slog.Debug("PersistentVolumeClaim is not bound", "namespace", v.GetNamespace(), "name", v.GetName())
return false
}
slog.Debug("PersistentVolumeClaim is bound", "namespace", v.GetNamespace(), "name", v.GetName(), "phase", v.Status.Phase)
return true
}
@ -290,23 +288,24 @@ func (c *ReadyChecker) deploymentReady(rs *appsv1.ReplicaSet, dep *appsv1.Deploy
return false
}
// Verify the generation observed by the deployment controller matches the spec generation
if dep.Status.ObservedGeneration != dep.ObjectMeta.Generation {
c.log("Deployment is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", dep.Namespace, dep.Name, dep.Status.ObservedGeneration, dep.ObjectMeta.Generation)
if dep.Status.ObservedGeneration != dep.Generation {
slog.Debug("Deployment is not ready, observedGeneration does not match spec generation", "namespace", dep.GetNamespace(), "name", dep.GetName(), "actualGeneration", dep.Status.ObservedGeneration, "expectedGeneration", dep.Generation)
return false
}
expectedReady := *dep.Spec.Replicas - deploymentutil.MaxUnavailable(*dep)
if !(rs.Status.ReadyReplicas >= expectedReady) {
c.log("Deployment is not ready: %s/%s. %d out of %d expected pods are ready", dep.Namespace, dep.Name, rs.Status.ReadyReplicas, expectedReady)
if rs.Status.ReadyReplicas < expectedReady {
slog.Debug("Deployment does not have enough pods ready", "namespace", dep.GetNamespace(), "name", dep.GetName(), "readyPods", rs.Status.ReadyReplicas, "totalPods", expectedReady)
return false
}
slog.Debug("Deployment is ready", "namespace", dep.GetNamespace(), "name", dep.GetName(), "readyPods", rs.Status.ReadyReplicas, "totalPods", expectedReady)
return true
}
func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool {
// Verify the generation observed by the daemonSet controller matches the spec generation
if ds.Status.ObservedGeneration != ds.ObjectMeta.Generation {
c.log("DaemonSet is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", ds.Namespace, ds.Name, ds.Status.ObservedGeneration, ds.ObjectMeta.Generation)
if ds.Status.ObservedGeneration != ds.Generation {
slog.Debug("DaemonSet is not ready, observedGeneration does not match spec generation", "namespace", ds.GetNamespace(), "name", ds.GetName(), "observedGeneration", ds.Status.ObservedGeneration, "expectedGeneration", ds.Generation)
return false
}
@ -317,7 +316,7 @@ func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool {
// Make sure all the updated pods have been scheduled
if ds.Status.UpdatedNumberScheduled != ds.Status.DesiredNumberScheduled {
c.log("DaemonSet is not ready: %s/%s. %d out of %d expected pods have been scheduled", ds.Namespace, ds.Name, ds.Status.UpdatedNumberScheduled, ds.Status.DesiredNumberScheduled)
slog.Debug("DaemonSet does not have enough Pods scheduled", "namespace", ds.GetNamespace(), "name", ds.GetName(), "scheduledPods", ds.Status.UpdatedNumberScheduled, "totalPods", ds.Status.DesiredNumberScheduled)
return false
}
maxUnavailable, err := intstr.GetScaledValueFromIntOrPercent(ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable, int(ds.Status.DesiredNumberScheduled), true)
@ -329,10 +328,11 @@ func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool {
}
expectedReady := int(ds.Status.DesiredNumberScheduled) - maxUnavailable
if !(int(ds.Status.NumberReady) >= expectedReady) {
c.log("DaemonSet is not ready: %s/%s. %d out of %d expected pods are ready", ds.Namespace, ds.Name, ds.Status.NumberReady, expectedReady)
if int(ds.Status.NumberReady) < expectedReady {
slog.Debug("DaemonSet does not have enough Pods ready", "namespace", ds.GetNamespace(), "name", ds.GetName(), "readyPods", ds.Status.NumberReady, "totalPods", expectedReady)
return false
}
slog.Debug("DaemonSet is ready", "namespace", ds.GetNamespace(), "name", ds.GetName(), "readyPods", ds.Status.NumberReady, "totalPods", expectedReady)
return true
}
@ -381,14 +381,14 @@ func (c *ReadyChecker) crdReady(crd apiextv1.CustomResourceDefinition) bool {
func (c *ReadyChecker) statefulSetReady(sts *appsv1.StatefulSet) bool {
// Verify the generation observed by the statefulSet controller matches the spec generation
if sts.Status.ObservedGeneration != sts.ObjectMeta.Generation {
c.log("StatefulSet is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", sts.Namespace, sts.Name, sts.Status.ObservedGeneration, sts.ObjectMeta.Generation)
if sts.Status.ObservedGeneration != sts.Generation {
slog.Debug("StatefulSet is not ready, observedGeneration doest not match spec generation", "namespace", sts.GetNamespace(), "name", sts.GetName(), "actualGeneration", sts.Status.ObservedGeneration, "expectedGeneration", sts.Generation)
return false
}
// If the update strategy is not a rolling update, there will be nothing to wait for
if sts.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType {
c.log("StatefulSet skipped ready check: %s/%s. updateStrategy is %v", sts.Namespace, sts.Name, sts.Spec.UpdateStrategy.Type)
slog.Debug("StatefulSet skipped ready check", "namespace", sts.GetNamespace(), "name", sts.GetName(), "updateStrategy", sts.Spec.UpdateStrategy.Type)
return true
}
@ -414,30 +414,29 @@ func (c *ReadyChecker) statefulSetReady(sts *appsv1.StatefulSet) bool {
// Make sure all the updated pods have been scheduled
if int(sts.Status.UpdatedReplicas) < expectedReplicas {
c.log("StatefulSet is not ready: %s/%s. %d out of %d expected pods have been scheduled", sts.Namespace, sts.Name, sts.Status.UpdatedReplicas, expectedReplicas)
slog.Debug("StatefulSet does not have enough Pods scheduled", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.UpdatedReplicas, "totalPods", expectedReplicas)
return false
}
if int(sts.Status.ReadyReplicas) != replicas {
c.log("StatefulSet is not ready: %s/%s. %d out of %d expected pods are ready", sts.Namespace, sts.Name, sts.Status.ReadyReplicas, replicas)
slog.Debug("StatefulSet does not have enough Pods ready", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.ReadyReplicas, "totalPods", replicas)
return false
}
// This check only makes sense when all partitions are being upgraded otherwise during a
// partitioned rolling upgrade, this condition will never evaluate to true, leading to
// error.
if partition == 0 && sts.Status.CurrentRevision != sts.Status.UpdateRevision {
c.log("StatefulSet is not ready: %s/%s. currentRevision %s does not yet match updateRevision %s", sts.Namespace, sts.Name, sts.Status.CurrentRevision, sts.Status.UpdateRevision)
slog.Debug("StatefulSet is not ready, currentRevision does not match updateRevision", "namespace", sts.GetNamespace(), "name", sts.GetName(), "currentRevision", sts.Status.CurrentRevision, "updateRevision", sts.Status.UpdateRevision)
return false
}
c.log("StatefulSet is ready: %s/%s. %d out of %d expected pods are ready", sts.Namespace, sts.Name, sts.Status.ReadyReplicas, replicas)
slog.Debug("StatefulSet is ready", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.ReadyReplicas, "totalPods", replicas)
return true
}
func (c *ReadyChecker) replicationControllerReady(rc *corev1.ReplicationController) bool {
// Verify the generation observed by the replicationController controller matches the spec generation
if rc.Status.ObservedGeneration != rc.ObjectMeta.Generation {
c.log("ReplicationController is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", rc.Namespace, rc.Name, rc.Status.ObservedGeneration, rc.ObjectMeta.Generation)
if rc.Status.ObservedGeneration != rc.Generation {
slog.Debug("ReplicationController is not ready, observedGeneration doest not match spec generation", "namespace", rc.GetNamespace(), "name", rc.GetName(), "actualGeneration", rc.Status.ObservedGeneration, "expectedGeneration", rc.Generation)
return false
}
return true
@ -445,8 +444,8 @@ func (c *ReadyChecker) replicationControllerReady(rc *corev1.ReplicationControll
func (c *ReadyChecker) replicaSetReady(rs *appsv1.ReplicaSet) bool {
// Verify the generation observed by the replicaSet controller matches the spec generation
if rs.Status.ObservedGeneration != rs.ObjectMeta.Generation {
c.log("ReplicaSet is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", rs.Namespace, rs.Name, rs.Status.ObservedGeneration, rs.ObjectMeta.Generation)
if rs.Status.ObservedGeneration != rs.Generation {
slog.Debug("ReplicaSet is not ready, observedGeneration doest not match spec generation", "namespace", rs.GetNamespace(), "name", rs.GetName(), "actualGeneration", rs.Status.ObservedGeneration, "expectedGeneration", rs.Generation)
return false
}
return true

@ -37,7 +37,6 @@ const defaultNamespace = metav1.NamespaceDefault
func Test_ReadyChecker_IsReady_Pod(t *testing.T) {
type fields struct {
client kubernetes.Interface
log func(string, ...interface{})
checkJobs bool
pausedAsReady bool
}
@ -56,8 +55,7 @@ func Test_ReadyChecker_IsReady_Pod(t *testing.T) {
{
name: "IsReady Pod",
fields: fields{
client: fake.NewSimpleClientset(),
log: func(string, ...interface{}) {},
client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
@ -72,8 +70,7 @@ func Test_ReadyChecker_IsReady_Pod(t *testing.T) {
{
name: "IsReady Pod returns error",
fields: fields{
client: fake.NewSimpleClientset(),
log: func(string, ...interface{}) {},
client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
@ -90,7 +87,6 @@ func Test_ReadyChecker_IsReady_Pod(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{
client: tt.fields.client,
log: tt.fields.log,
checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady,
}
@ -113,7 +109,6 @@ func Test_ReadyChecker_IsReady_Pod(t *testing.T) {
func Test_ReadyChecker_IsReady_Job(t *testing.T) {
type fields struct {
client kubernetes.Interface
log func(string, ...interface{})
checkJobs bool
pausedAsReady bool
}
@ -132,8 +127,7 @@ func Test_ReadyChecker_IsReady_Job(t *testing.T) {
{
name: "IsReady Job error while getting job",
fields: fields{
client: fake.NewSimpleClientset(),
log: func(string, ...interface{}) {},
client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
@ -148,8 +142,7 @@ func Test_ReadyChecker_IsReady_Job(t *testing.T) {
{
name: "IsReady Job",
fields: fields{
client: fake.NewSimpleClientset(),
log: func(string, ...interface{}) {},
client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
@ -166,7 +159,6 @@ func Test_ReadyChecker_IsReady_Job(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{
client: tt.fields.client,
log: tt.fields.log,
checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady,
}
@ -188,7 +180,6 @@ func Test_ReadyChecker_IsReady_Job(t *testing.T) {
func Test_ReadyChecker_IsReady_Deployment(t *testing.T) {
type fields struct {
client kubernetes.Interface
log func(string, ...interface{})
checkJobs bool
pausedAsReady bool
}
@ -208,8 +199,7 @@ func Test_ReadyChecker_IsReady_Deployment(t *testing.T) {
{
name: "IsReady Deployments error while getting current Deployment",
fields: fields{
client: fake.NewSimpleClientset(),
log: func(string, ...interface{}) {},
client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
@ -225,8 +215,7 @@ func Test_ReadyChecker_IsReady_Deployment(t *testing.T) {
{
name: "IsReady Deployments", //TODO fix this one
fields: fields{
client: fake.NewSimpleClientset(),
log: func(string, ...interface{}) {},
client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
@ -244,7 +233,6 @@ func Test_ReadyChecker_IsReady_Deployment(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{
client: tt.fields.client,
log: tt.fields.log,
checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady,
}
@ -270,7 +258,6 @@ func Test_ReadyChecker_IsReady_Deployment(t *testing.T) {
func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) {
type fields struct {
client kubernetes.Interface
log func(string, ...interface{})
checkJobs bool
pausedAsReady bool
}
@ -289,8 +276,7 @@ func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) {
{
name: "IsReady PersistentVolumeClaim",
fields: fields{
client: fake.NewSimpleClientset(),
log: func(string, ...interface{}) {},
client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
@ -305,8 +291,7 @@ func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) {
{
name: "IsReady PersistentVolumeClaim with error",
fields: fields{
client: fake.NewSimpleClientset(),
log: func(string, ...interface{}) {},
client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
@ -323,7 +308,6 @@ func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{
client: tt.fields.client,
log: tt.fields.log,
checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady,
}
@ -345,7 +329,6 @@ func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) {
func Test_ReadyChecker_IsReady_Service(t *testing.T) {
type fields struct {
client kubernetes.Interface
log func(string, ...interface{})
checkJobs bool
pausedAsReady bool
}
@ -364,8 +347,7 @@ func Test_ReadyChecker_IsReady_Service(t *testing.T) {
{
name: "IsReady Service",
fields: fields{
client: fake.NewSimpleClientset(),
log: func(string, ...interface{}) {},
client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
@ -380,8 +362,7 @@ func Test_ReadyChecker_IsReady_Service(t *testing.T) {
{
name: "IsReady Service with error",
fields: fields{
client: fake.NewSimpleClientset(),
log: func(string, ...interface{}) {},
client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
@ -398,7 +379,6 @@ func Test_ReadyChecker_IsReady_Service(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{
client: tt.fields.client,
log: tt.fields.log,
checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady,
}
@ -420,7 +400,6 @@ func Test_ReadyChecker_IsReady_Service(t *testing.T) {
func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) {
type fields struct {
client kubernetes.Interface
log func(string, ...interface{})
checkJobs bool
pausedAsReady bool
}
@ -439,8 +418,7 @@ func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) {
{
name: "IsReady DaemonSet",
fields: fields{
client: fake.NewSimpleClientset(),
log: func(string, ...interface{}) {},
client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
@ -455,8 +433,7 @@ func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) {
{
name: "IsReady DaemonSet with error",
fields: fields{
client: fake.NewSimpleClientset(),
log: func(string, ...interface{}) {},
client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
@ -473,7 +450,6 @@ func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{
client: tt.fields.client,
log: tt.fields.log,
checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady,
}
@ -495,7 +471,6 @@ func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) {
func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) {
type fields struct {
client kubernetes.Interface
log func(string, ...interface{})
checkJobs bool
pausedAsReady bool
}
@ -514,8 +489,7 @@ func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) {
{
name: "IsReady StatefulSet",
fields: fields{
client: fake.NewSimpleClientset(),
log: func(string, ...interface{}) {},
client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
@ -530,8 +504,7 @@ func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) {
{
name: "IsReady StatefulSet with error",
fields: fields{
client: fake.NewSimpleClientset(),
log: func(string, ...interface{}) {},
client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
@ -548,7 +521,6 @@ func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{
client: tt.fields.client,
log: tt.fields.log,
checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady,
}
@ -570,7 +542,6 @@ func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) {
func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) {
type fields struct {
client kubernetes.Interface
log func(string, ...interface{})
checkJobs bool
pausedAsReady bool
}
@ -589,8 +560,7 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) {
{
name: "IsReady ReplicationController",
fields: fields{
client: fake.NewSimpleClientset(),
log: func(string, ...interface{}) {},
client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
@ -605,8 +575,7 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) {
{
name: "IsReady ReplicationController with error",
fields: fields{
client: fake.NewSimpleClientset(),
log: func(string, ...interface{}) {},
client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
@ -621,8 +590,7 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) {
{
name: "IsReady ReplicationController and pods not ready for object",
fields: fields{
client: fake.NewSimpleClientset(),
log: func(string, ...interface{}) {},
client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
@ -639,7 +607,6 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{
client: tt.fields.client,
log: tt.fields.log,
checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady,
}
@ -661,7 +628,6 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) {
func Test_ReadyChecker_IsReady_ReplicaSet(t *testing.T) {
type fields struct {
client kubernetes.Interface
log func(string, ...interface{})
checkJobs bool
pausedAsReady bool
}
@ -680,8 +646,7 @@ func Test_ReadyChecker_IsReady_ReplicaSet(t *testing.T) {
{
name: "IsReady ReplicaSet",
fields: fields{
client: fake.NewSimpleClientset(),
log: func(string, ...interface{}) {},
client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
@ -696,8 +661,7 @@ func Test_ReadyChecker_IsReady_ReplicaSet(t *testing.T) {
{
name: "IsReady ReplicaSet not ready",
fields: fields{
client: fake.NewSimpleClientset(),
log: func(string, ...interface{}) {},
client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
@ -714,7 +678,6 @@ func Test_ReadyChecker_IsReady_ReplicaSet(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{
client: tt.fields.client,
log: tt.fields.log,
checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady,
}
@ -791,7 +754,7 @@ func Test_ReadyChecker_deploymentReady(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := NewReadyChecker(fake.NewSimpleClientset(), nil)
c := NewReadyChecker(fake.NewClientset())
if got := c.deploymentReady(tt.args.rs, tt.args.dep); got != tt.want {
t.Errorf("deploymentReady() = %v, want %v", got, tt.want)
}
@ -825,7 +788,7 @@ func Test_ReadyChecker_replicaSetReady(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := NewReadyChecker(fake.NewSimpleClientset(), nil)
c := NewReadyChecker(fake.NewClientset())
if got := c.replicaSetReady(tt.args.rs); got != tt.want {
t.Errorf("replicaSetReady() = %v, want %v", got, tt.want)
}
@ -859,7 +822,7 @@ func Test_ReadyChecker_replicationControllerReady(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := NewReadyChecker(fake.NewSimpleClientset(), nil)
c := NewReadyChecker(fake.NewClientset())
if got := c.replicationControllerReady(tt.args.rc); got != tt.want {
t.Errorf("replicationControllerReady() = %v, want %v", got, tt.want)
}
@ -914,7 +877,7 @@ func Test_ReadyChecker_daemonSetReady(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := NewReadyChecker(fake.NewSimpleClientset(), nil)
c := NewReadyChecker(fake.NewClientset())
if got := c.daemonSetReady(tt.args.ds); got != tt.want {
t.Errorf("daemonSetReady() = %v, want %v", got, tt.want)
}
@ -990,7 +953,7 @@ func Test_ReadyChecker_statefulSetReady(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := NewReadyChecker(fake.NewSimpleClientset(), nil)
c := NewReadyChecker(fake.NewClientset())
if got := c.statefulSetReady(tt.args.sts); got != tt.want {
t.Errorf("statefulSetReady() = %v, want %v", got, tt.want)
}
@ -1049,7 +1012,7 @@ func Test_ReadyChecker_podsReadyForObject(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := NewReadyChecker(fake.NewSimpleClientset(), nil)
c := NewReadyChecker(fake.NewClientset())
for _, pod := range tt.existPods {
if _, err := c.client.CoreV1().Pods(defaultNamespace).Create(context.TODO(), &pod, metav1.CreateOptions{}); err != nil {
t.Errorf("Failed to create Pod error: %v", err)
@ -1128,7 +1091,7 @@ func Test_ReadyChecker_jobReady(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := NewReadyChecker(fake.NewSimpleClientset(), nil)
c := NewReadyChecker(fake.NewClientset())
got, err := c.jobReady(tt.args.job)
if (err != nil) != tt.wantErr {
t.Errorf("jobReady() error = %v, wantErr %v", err, tt.wantErr)
@ -1167,7 +1130,7 @@ func Test_ReadyChecker_volumeReady(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := NewReadyChecker(fake.NewSimpleClientset(), nil)
c := NewReadyChecker(fake.NewClientset())
if got := c.volumeReady(tt.args.v); got != tt.want {
t.Errorf("volumeReady() = %v, want %v", got, tt.want)
}
@ -1212,7 +1175,7 @@ func Test_ReadyChecker_serviceReady(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := NewReadyChecker(fake.NewSimpleClientset(), nil)
c := NewReadyChecker(fake.NewClientset())
got := c.serviceReady(tt.args.service)
if got != tt.want {
t.Errorf("serviceReady() = %v, want %v", got, tt.want)
@ -1281,7 +1244,7 @@ func Test_ReadyChecker_crdBetaReady(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := NewReadyChecker(fake.NewSimpleClientset(), nil)
c := NewReadyChecker(fake.NewClientset())
got := c.crdBetaReady(tt.args.crdBeta)
if got != tt.want {
t.Errorf("crdBetaReady() = %v, want %v", got, tt.want)
@ -1350,7 +1313,7 @@ func Test_ReadyChecker_crdReady(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := NewReadyChecker(fake.NewSimpleClientset(), nil)
c := NewReadyChecker(fake.NewClientset())
got := c.crdReady(tt.args.crdBeta)
if got != tt.want {
t.Errorf("crdBetaReady() = %v, want %v", got, tt.want)

@ -0,0 +1,235 @@
/*
Copyright The Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kube // import "helm.sh/helm/v3/pkg/kube"
import (
"context"
"errors"
"fmt"
"log/slog"
"sort"
"time"
"github.com/fluxcd/cli-utils/pkg/kstatus/polling/aggregator"
"github.com/fluxcd/cli-utils/pkg/kstatus/polling/collector"
"github.com/fluxcd/cli-utils/pkg/kstatus/polling/engine"
"github.com/fluxcd/cli-utils/pkg/kstatus/polling/event"
"github.com/fluxcd/cli-utils/pkg/kstatus/polling/statusreaders"
"github.com/fluxcd/cli-utils/pkg/kstatus/status"
"github.com/fluxcd/cli-utils/pkg/kstatus/watcher"
"github.com/fluxcd/cli-utils/pkg/object"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/client-go/dynamic"
helmStatusReaders "helm.sh/helm/v4/internal/statusreaders"
)
type statusWaiter struct {
client dynamic.Interface
restMapper meta.RESTMapper
}
func alwaysReady(_ *unstructured.Unstructured) (*status.Result, error) {
return &status.Result{
Status: status.CurrentStatus,
Message: "Resource is current",
}, nil
}
func (w *statusWaiter) WatchUntilReady(resourceList ResourceList, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout)
sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper)
jobSR := helmStatusReaders.NewCustomJobStatusReader(w.restMapper)
podSR := helmStatusReaders.NewCustomPodStatusReader(w.restMapper)
// We don't want to wait on any other resources as watchUntilReady is only for Helm hooks
genericSR := statusreaders.NewGenericStatusReader(w.restMapper, alwaysReady)
sr := &statusreaders.DelegatingStatusReader{
StatusReaders: []engine.StatusReader{
jobSR,
podSR,
genericSR,
},
}
sw.StatusReader = sr
return w.wait(ctx, resourceList, sw)
}
func (w *statusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(context.TODO(), timeout)
defer cancel()
slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout)
sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper)
return w.wait(ctx, resourceList, sw)
}
func (w *statusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(context.TODO(), timeout)
defer cancel()
slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout)
sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper)
newCustomJobStatusReader := helmStatusReaders.NewCustomJobStatusReader(w.restMapper)
customSR := statusreaders.NewStatusReader(w.restMapper, newCustomJobStatusReader)
sw.StatusReader = customSR
return w.wait(ctx, resourceList, sw)
}
func (w *statusWaiter) WaitForDelete(resourceList ResourceList, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(context.TODO(), timeout)
defer cancel()
slog.Debug("waiting for resources to be deleted", "count", len(resourceList), "timeout", timeout)
sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper)
return w.waitForDelete(ctx, resourceList, sw)
}
func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceList, sw watcher.StatusWatcher) error {
cancelCtx, cancel := context.WithCancel(ctx)
defer cancel()
resources := []object.ObjMetadata{}
for _, resource := range resourceList {
obj, err := object.RuntimeToObjMeta(resource.Object)
if err != nil {
return err
}
resources = append(resources, obj)
}
eventCh := sw.Watch(cancelCtx, resources, watcher.Options{})
statusCollector := collector.NewResourceStatusCollector(resources)
done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.NotFoundStatus))
<-done
if statusCollector.Error != nil {
return statusCollector.Error
}
// Only check parent context error, otherwise we would error when desired status is achieved.
if ctx.Err() != nil {
errs := []error{}
for _, id := range resources {
rs := statusCollector.ResourceStatuses[id]
if rs.Status == status.NotFoundStatus {
continue
}
errs = append(errs, fmt.Errorf("resource still exists, name: %s, kind: %s, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status))
}
errs = append(errs, ctx.Err())
return errors.Join(errs...)
}
return nil
}
func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, sw watcher.StatusWatcher) error {
cancelCtx, cancel := context.WithCancel(ctx)
defer cancel()
resources := []object.ObjMetadata{}
for _, resource := range resourceList {
switch value := AsVersioned(resource).(type) {
case *appsv1.Deployment:
if value.Spec.Paused {
continue
}
}
obj, err := object.RuntimeToObjMeta(resource.Object)
if err != nil {
return err
}
resources = append(resources, obj)
}
eventCh := sw.Watch(cancelCtx, resources, watcher.Options{})
statusCollector := collector.NewResourceStatusCollector(resources)
done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.CurrentStatus))
<-done
if statusCollector.Error != nil {
return statusCollector.Error
}
// Only check parent context error, otherwise we would error when desired status is achieved.
if ctx.Err() != nil {
errs := []error{}
for _, id := range resources {
rs := statusCollector.ResourceStatuses[id]
if rs.Status == status.CurrentStatus {
continue
}
errs = append(errs, fmt.Errorf("resource not ready, name: %s, kind: %s, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status))
}
errs = append(errs, ctx.Err())
return errors.Join(errs...)
}
return nil
}
func statusObserver(cancel context.CancelFunc, desired status.Status) collector.ObserverFunc {
return func(statusCollector *collector.ResourceStatusCollector, _ event.Event) {
var rss []*event.ResourceStatus
var nonDesiredResources []*event.ResourceStatus
for _, rs := range statusCollector.ResourceStatuses {
if rs == nil {
continue
}
// If a resource is already deleted before waiting has started, it will show as unknown
// this check ensures we don't wait forever for a resource that is already deleted
if rs.Status == status.UnknownStatus && desired == status.NotFoundStatus {
continue
}
rss = append(rss, rs)
if rs.Status != desired {
nonDesiredResources = append(nonDesiredResources, rs)
}
}
if aggregator.AggregateStatus(rss, desired) == desired {
cancel()
return
}
if len(nonDesiredResources) > 0 {
// Log a single resource so the user knows what they're waiting for without an overwhelming amount of output
sort.Slice(nonDesiredResources, func(i, j int) bool {
return nonDesiredResources[i].Identifier.Name < nonDesiredResources[j].Identifier.Name
})
first := nonDesiredResources[0]
slog.Debug("waiting for resource", "name", first.Identifier.Name, "kind", first.Identifier.GroupKind.Kind, "expectedStatus", desired, "actualStatus", first.Status)
}
}
}
type hookOnlyWaiter struct {
sw *statusWaiter
}
func (w *hookOnlyWaiter) WatchUntilReady(resourceList ResourceList, timeout time.Duration) error {
return w.sw.WatchUntilReady(resourceList, timeout)
}
func (w *hookOnlyWaiter) Wait(_ ResourceList, _ time.Duration) error {
return nil
}
func (w *hookOnlyWaiter) WaitWithJobs(_ ResourceList, _ time.Duration) error {
return nil
}
func (w *hookOnlyWaiter) WaitForDelete(_ ResourceList, _ time.Duration) error {
return nil
}

@ -0,0 +1,447 @@
/*
Copyright The Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kube // import "helm.sh/helm/v3/pkg/kube"
import (
"errors"
"testing"
"time"
"github.com/fluxcd/cli-utils/pkg/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/yaml"
dynamicfake "k8s.io/client-go/dynamic/fake"
"k8s.io/kubectl/pkg/scheme"
)
var podCurrentManifest = `
apiVersion: v1
kind: Pod
metadata:
name: current-pod
namespace: ns
status:
conditions:
- type: Ready
status: "True"
phase: Running
`
var podNoStatusManifest = `
apiVersion: v1
kind: Pod
metadata:
name: in-progress-pod
namespace: ns
`
var jobNoStatusManifest = `
apiVersion: batch/v1
kind: Job
metadata:
name: test
namespace: qual
generation: 1
`
var jobReadyManifest = `
apiVersion: batch/v1
kind: Job
metadata:
name: ready-not-complete
namespace: default
generation: 1
status:
startTime: 2025-02-06T16:34:20-05:00
active: 1
ready: 1
`
var jobCompleteManifest = `
apiVersion: batch/v1
kind: Job
metadata:
name: test
namespace: qual
generation: 1
status:
succeeded: 1
active: 0
conditions:
- type: Complete
status: "True"
`
var podCompleteManifest = `
apiVersion: v1
kind: Pod
metadata:
name: good-pod
namespace: ns
status:
phase: Succeeded
`
var pausedDeploymentManifest = `
apiVersion: apps/v1
kind: Deployment
metadata:
name: paused
namespace: ns-1
generation: 1
spec:
paused: true
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.19.6
ports:
- containerPort: 80
`
var notReadyDeploymentManifest = `
apiVersion: apps/v1
kind: Deployment
metadata:
name: not-ready
namespace: ns-1
generation: 1
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.19.6
ports:
- containerPort: 80
`
func getGVR(t *testing.T, mapper meta.RESTMapper, obj *unstructured.Unstructured) schema.GroupVersionResource {
gvk := obj.GroupVersionKind()
mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
require.NoError(t, err)
return mapping.Resource
}
func getRuntimeObjFromManifests(t *testing.T, manifests []string) []runtime.Object {
objects := []runtime.Object{}
for _, manifest := range manifests {
m := make(map[string]interface{})
err := yaml.Unmarshal([]byte(manifest), &m)
assert.NoError(t, err)
resource := &unstructured.Unstructured{Object: m}
objects = append(objects, resource)
}
return objects
}
func getResourceListFromRuntimeObjs(t *testing.T, c *Client, objs []runtime.Object) ResourceList {
resourceList := ResourceList{}
for _, obj := range objs {
list, err := c.Build(objBody(obj), false)
assert.NoError(t, err)
resourceList = append(resourceList, list...)
}
return resourceList
}
func TestStatusWaitForDelete(t *testing.T) {
t.Parallel()
tests := []struct {
name string
manifestsToCreate []string
manifestsToDelete []string
expectErrs []error
}{
{
name: "wait for pod to be deleted",
manifestsToCreate: []string{podCurrentManifest},
manifestsToDelete: []string{podCurrentManifest},
expectErrs: nil,
},
{
name: "error when not all objects are deleted",
manifestsToCreate: []string{jobCompleteManifest, podCurrentManifest},
manifestsToDelete: []string{jobCompleteManifest},
expectErrs: []error{errors.New("resource still exists, name: current-pod, kind: Pod, status: Current"), errors.New("context deadline exceeded")},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
c := newTestClient(t)
timeout := time.Second
timeUntilPodDelete := time.Millisecond * 500
fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
fakeMapper := testutil.NewFakeRESTMapper(
v1.SchemeGroupVersion.WithKind("Pod"),
batchv1.SchemeGroupVersion.WithKind("Job"),
)
statusWaiter := statusWaiter{
restMapper: fakeMapper,
client: fakeClient,
}
objsToCreate := getRuntimeObjFromManifests(t, tt.manifestsToCreate)
for _, objToCreate := range objsToCreate {
u := objToCreate.(*unstructured.Unstructured)
gvr := getGVR(t, fakeMapper, u)
err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace())
assert.NoError(t, err)
}
objsToDelete := getRuntimeObjFromManifests(t, tt.manifestsToDelete)
for _, objToDelete := range objsToDelete {
u := objToDelete.(*unstructured.Unstructured)
gvr := getGVR(t, fakeMapper, u)
go func() {
time.Sleep(timeUntilPodDelete)
err := fakeClient.Tracker().Delete(gvr, u.GetNamespace(), u.GetName())
assert.NoError(t, err)
}()
}
resourceList := getResourceListFromRuntimeObjs(t, c, objsToCreate)
err := statusWaiter.WaitForDelete(resourceList, timeout)
if tt.expectErrs != nil {
assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error())
return
}
assert.NoError(t, err)
})
}
}
func TestStatusWaitForDeleteNonExistentObject(t *testing.T) {
t.Parallel()
c := newTestClient(t)
timeout := time.Second
fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
fakeMapper := testutil.NewFakeRESTMapper(
v1.SchemeGroupVersion.WithKind("Pod"),
)
statusWaiter := statusWaiter{
restMapper: fakeMapper,
client: fakeClient,
}
// Don't create the object to test that the wait for delete works when the object doesn't exist
objManifest := getRuntimeObjFromManifests(t, []string{podCurrentManifest})
resourceList := getResourceListFromRuntimeObjs(t, c, objManifest)
err := statusWaiter.WaitForDelete(resourceList, timeout)
assert.NoError(t, err)
}
func TestStatusWait(t *testing.T) {
t.Parallel()
tests := []struct {
name string
objManifests []string
expectErrs []error
waitForJobs bool
}{
{
name: "Job is not complete",
objManifests: []string{jobNoStatusManifest},
expectErrs: []error{errors.New("resource not ready, name: test, kind: Job, status: InProgress"), errors.New("context deadline exceeded")},
waitForJobs: true,
},
{
name: "Job is ready but not complete",
objManifests: []string{jobReadyManifest},
expectErrs: nil,
waitForJobs: false,
},
{
name: "Pod is ready",
objManifests: []string{podCurrentManifest},
expectErrs: nil,
},
{
name: "one of the pods never becomes ready",
objManifests: []string{podNoStatusManifest, podCurrentManifest},
expectErrs: []error{errors.New("resource not ready, name: in-progress-pod, kind: Pod, status: InProgress"), errors.New("context deadline exceeded")},
},
{
name: "paused deployment passes",
objManifests: []string{pausedDeploymentManifest},
expectErrs: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
c := newTestClient(t)
fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
fakeMapper := testutil.NewFakeRESTMapper(
v1.SchemeGroupVersion.WithKind("Pod"),
appsv1.SchemeGroupVersion.WithKind("Deployment"),
batchv1.SchemeGroupVersion.WithKind("Job"),
)
statusWaiter := statusWaiter{
client: fakeClient,
restMapper: fakeMapper,
}
objs := getRuntimeObjFromManifests(t, tt.objManifests)
for _, obj := range objs {
u := obj.(*unstructured.Unstructured)
gvr := getGVR(t, fakeMapper, u)
err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace())
assert.NoError(t, err)
}
resourceList := getResourceListFromRuntimeObjs(t, c, objs)
err := statusWaiter.Wait(resourceList, time.Second*3)
if tt.expectErrs != nil {
assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error())
return
}
assert.NoError(t, err)
})
}
}
func TestWaitForJobComplete(t *testing.T) {
t.Parallel()
tests := []struct {
name string
objManifests []string
expectErrs []error
}{
{
name: "Job is complete",
objManifests: []string{jobCompleteManifest},
},
{
name: "Job is not ready",
objManifests: []string{jobNoStatusManifest},
expectErrs: []error{errors.New("resource not ready, name: test, kind: Job, status: InProgress"), errors.New("context deadline exceeded")},
},
{
name: "Job is ready but not complete",
objManifests: []string{jobReadyManifest},
expectErrs: []error{errors.New("resource not ready, name: ready-not-complete, kind: Job, status: InProgress"), errors.New("context deadline exceeded")},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
c := newTestClient(t)
fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
fakeMapper := testutil.NewFakeRESTMapper(
batchv1.SchemeGroupVersion.WithKind("Job"),
)
statusWaiter := statusWaiter{
client: fakeClient,
restMapper: fakeMapper,
}
objs := getRuntimeObjFromManifests(t, tt.objManifests)
for _, obj := range objs {
u := obj.(*unstructured.Unstructured)
gvr := getGVR(t, fakeMapper, u)
err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace())
assert.NoError(t, err)
}
resourceList := getResourceListFromRuntimeObjs(t, c, objs)
err := statusWaiter.WaitWithJobs(resourceList, time.Second*3)
if tt.expectErrs != nil {
assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error())
return
}
assert.NoError(t, err)
})
}
}
func TestWatchForReady(t *testing.T) {
t.Parallel()
tests := []struct {
name string
objManifests []string
expectErrs []error
}{
{
name: "succeeds if pod and job are complete",
objManifests: []string{jobCompleteManifest, podCompleteManifest},
},
{
name: "succeeds when a resource that's not a pod or job is not ready",
objManifests: []string{notReadyDeploymentManifest},
},
{
name: "Fails if job is not complete",
objManifests: []string{jobReadyManifest},
expectErrs: []error{errors.New("resource not ready, name: ready-not-complete, kind: Job, status: InProgress"), errors.New("context deadline exceeded")},
},
{
name: "Fails if pod is not complete",
objManifests: []string{podCurrentManifest},
expectErrs: []error{errors.New("resource not ready, name: current-pod, kind: Pod, status: InProgress"), errors.New("context deadline exceeded")},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
c := newTestClient(t)
fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
fakeMapper := testutil.NewFakeRESTMapper(
v1.SchemeGroupVersion.WithKind("Pod"),
appsv1.SchemeGroupVersion.WithKind("Deployment"),
batchv1.SchemeGroupVersion.WithKind("Job"),
)
statusWaiter := statusWaiter{
client: fakeClient,
restMapper: fakeMapper,
}
objs := getRuntimeObjFromManifests(t, tt.objManifests)
for _, obj := range objs {
u := obj.(*unstructured.Unstructured)
gvr := getGVR(t, fakeMapper, u)
err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace())
assert.NoError(t, err)
}
resourceList := getResourceListFromRuntimeObjs(t, c, objs)
err := statusWaiter.WatchUntilReady(resourceList, time.Second*3)
if tt.expectErrs != nil {
assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error())
return
}
assert.NoError(t, err)
})
}
}

@ -19,9 +19,11 @@ package kube // import "helm.sh/helm/v4/pkg/kube"
import (
"context"
"fmt"
"log/slog"
"net/http"
"time"
multierror "github.com/hashicorp/go-multierror"
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
appsv1beta1 "k8s.io/api/apps/v1beta1"
@ -31,25 +33,42 @@ import (
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/cli-runtime/pkg/resource"
"k8s.io/client-go/kubernetes"
cachetools "k8s.io/client-go/tools/cache"
watchtools "k8s.io/client-go/tools/watch"
"k8s.io/apimachinery/pkg/util/wait"
)
type waiter struct {
c ReadyChecker
timeout time.Duration
log func(string, ...interface{})
// legacyWaiter is the legacy implementation of the Waiter interface. This logic was used by default in Helm 3
// Helm 4 now uses the StatusWaiter implementation instead
type legacyWaiter struct {
c ReadyChecker
kubeClient *kubernetes.Clientset
}
func (hw *legacyWaiter) Wait(resources ResourceList, timeout time.Duration) error {
hw.c = NewReadyChecker(hw.kubeClient, PausedAsReady(true))
return hw.waitForResources(resources, timeout)
}
func (hw *legacyWaiter) WaitWithJobs(resources ResourceList, timeout time.Duration) error {
hw.c = NewReadyChecker(hw.kubeClient, PausedAsReady(true), CheckJobs(true))
return hw.waitForResources(resources, timeout)
}
// waitForResources polls to get the current status of all pods, PVCs, Services and
// Jobs(optional) until all are ready or a timeout is reached
func (w *waiter) waitForResources(created ResourceList) error {
w.log("beginning wait for %d resources with timeout of %v", len(created), w.timeout)
func (hw *legacyWaiter) waitForResources(created ResourceList, timeout time.Duration) error {
slog.Debug("beginning wait for resources", "count", len(created), "timeout", timeout)
ctx, cancel := context.WithTimeout(context.Background(), w.timeout)
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
numberOfErrors := make([]int, len(created))
@ -60,15 +79,15 @@ func (w *waiter) waitForResources(created ResourceList) error {
return wait.PollUntilContextCancel(ctx, 2*time.Second, true, func(ctx context.Context) (bool, error) {
waitRetries := 30
for i, v := range created {
ready, err := w.c.IsReady(ctx, v)
ready, err := hw.c.IsReady(ctx, v)
if waitRetries > 0 && w.isRetryableError(err, v) {
if waitRetries > 0 && hw.isRetryableError(err, v) {
numberOfErrors[i]++
if numberOfErrors[i] > waitRetries {
w.log("Max number of retries reached")
slog.Debug("max number of retries reached", "resource", v.Name, "retries", numberOfErrors[i])
return false, err
}
w.log("Retrying as current number of retries %d less than max number of retries %d", numberOfErrors[i]-1, waitRetries)
slog.Debug("retrying resource readiness", "resource", v.Name, "currentRetries", numberOfErrors[i]-1, "maxRetries", waitRetries)
return false, nil
}
numberOfErrors[i] = 0
@ -80,33 +99,34 @@ func (w *waiter) waitForResources(created ResourceList) error {
})
}
func (w *waiter) isRetryableError(err error, resource *resource.Info) bool {
func (hw *legacyWaiter) isRetryableError(err error, resource *resource.Info) bool {
if err == nil {
return false
}
w.log("Error received when checking status of resource %s. Error: '%s', Resource details: '%s'", resource.Name, err, resource)
slog.Debug("error received when checking resource status", "resource", resource.Name, slog.Any("error", err))
if ev, ok := err.(*apierrors.StatusError); ok {
statusCode := ev.Status().Code
retryable := w.isRetryableHTTPStatusCode(statusCode)
w.log("Status code received: %d. Retryable error? %t", statusCode, retryable)
retryable := hw.isRetryableHTTPStatusCode(statusCode)
slog.Debug("status code received", "resource", resource.Name, "statusCode", statusCode, "retryable", retryable)
return retryable
}
w.log("Retryable error? %t", true)
slog.Debug("retryable error assumed", "resource", resource.Name)
return true
}
func (w *waiter) isRetryableHTTPStatusCode(httpStatusCode int32) bool {
func (hw *legacyWaiter) isRetryableHTTPStatusCode(httpStatusCode int32) bool {
return httpStatusCode == 0 || httpStatusCode == http.StatusTooManyRequests || (httpStatusCode >= 500 && httpStatusCode != http.StatusNotImplemented)
}
// waitForDeletedResources polls to check if all the resources are deleted or a timeout is reached
func (w *waiter) waitForDeletedResources(deleted ResourceList) error {
w.log("beginning wait for %d resources to be deleted with timeout of %v", len(deleted), w.timeout)
func (hw *legacyWaiter) WaitForDelete(deleted ResourceList, timeout time.Duration) error {
slog.Debug("beginning wait for resources to be deleted", "count", len(deleted), "timeout", timeout)
ctx, cancel := context.WithTimeout(context.Background(), w.timeout)
startTime := time.Now()
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
return wait.PollUntilContextCancel(ctx, 2*time.Second, true, func(_ context.Context) (bool, error) {
err := wait.PollUntilContextCancel(ctx, 2*time.Second, true, func(_ context.Context) (bool, error) {
for _, v := range deleted {
err := v.Get()
if err == nil || !apierrors.IsNotFound(err) {
@ -115,6 +135,15 @@ func (w *waiter) waitForDeletedResources(deleted ResourceList) error {
}
return true, nil
})
elapsed := time.Since(startTime).Round(time.Second)
if err != nil {
slog.Debug("wait for resources failed", "elapsed", elapsed, slog.Any("error", err))
} else {
slog.Debug("wait for resources succeeded", "elapsed", elapsed)
}
return err
}
// SelectorsForObject returns the pod label selector for a given object
@ -164,3 +193,155 @@ func SelectorsForObject(object runtime.Object) (selector labels.Selector, err er
return selector, errors.Wrap(err, "invalid label selector")
}
func (hw *legacyWaiter) watchTimeout(t time.Duration) func(*resource.Info) error {
return func(info *resource.Info) error {
return hw.watchUntilReady(t, info)
}
}
// WatchUntilReady watches the resources given and waits until it is ready.
//
// This method is mainly for hook implementations. It watches for a resource to
// hit a particular milestone. The milestone depends on the Kind.
//
// For most kinds, it checks to see if the resource is marked as Added or Modified
// by the Kubernetes event stream. For some kinds, it does more:
//
// - Jobs: A job is marked "Ready" when it has successfully completed. This is
// ascertained by watching the Status fields in a job's output.
// - Pods: A pod is marked "Ready" when it has successfully completed. This is
// ascertained by watching the status.phase field in a pod's output.
//
// Handling for other kinds will be added as necessary.
func (hw *legacyWaiter) WatchUntilReady(resources ResourceList, timeout time.Duration) error {
// For jobs, there's also the option to do poll c.Jobs(namespace).Get():
// https://github.com/adamreese/kubernetes/blob/master/test/e2e/job.go#L291-L300
return perform(resources, hw.watchTimeout(timeout))
}
func perform(infos ResourceList, fn func(*resource.Info) error) error {
var result error
if len(infos) == 0 {
return ErrNoObjectsVisited
}
errs := make(chan error)
go batchPerform(infos, fn, errs)
for range infos {
err := <-errs
if err != nil {
result = multierror.Append(result, err)
}
}
return result
}
func (hw *legacyWaiter) watchUntilReady(timeout time.Duration, info *resource.Info) error {
kind := info.Mapping.GroupVersionKind.Kind
switch kind {
case "Job", "Pod":
default:
return nil
}
slog.Debug("watching for resource changes", "kind", kind, "resource", info.Name, "timeout", timeout)
// Use a selector on the name of the resource. This should be unique for the
// given version and kind
selector, err := fields.ParseSelector(fmt.Sprintf("metadata.name=%s", info.Name))
if err != nil {
return err
}
lw := cachetools.NewListWatchFromClient(info.Client, info.Mapping.Resource.Resource, info.Namespace, selector)
// What we watch for depends on the Kind.
// - For a Job, we watch for completion.
// - For all else, we watch until Ready.
// In the future, we might want to add some special logic for types
// like Ingress, Volume, etc.
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout)
defer cancel()
_, err = watchtools.UntilWithSync(ctx, lw, &unstructured.Unstructured{}, nil, func(e watch.Event) (bool, error) {
// Make sure the incoming object is versioned as we use unstructured
// objects when we build manifests
obj := convertWithMapper(e.Object, info.Mapping)
switch e.Type {
case watch.Added, watch.Modified:
// For things like a secret or a config map, this is the best indicator
// we get. We care mostly about jobs, where what we want to see is
// the status go into a good state. For other types, like ReplicaSet
// we don't really do anything to support these as hooks.
slog.Debug("add/modify event received", "resource", info.Name, "eventType", e.Type)
switch kind {
case "Job":
return hw.waitForJob(obj, info.Name)
case "Pod":
return hw.waitForPodSuccess(obj, info.Name)
}
return true, nil
case watch.Deleted:
slog.Debug("deleted event received", "resource", info.Name)
return true, nil
case watch.Error:
// Handle error and return with an error.
slog.Error("error event received", "resource", info.Name)
return true, errors.Errorf("failed to deploy %s", info.Name)
default:
return false, nil
}
})
return err
}
// waitForJob is a helper that waits for a job to complete.
//
// This operates on an event returned from a watcher.
func (hw *legacyWaiter) waitForJob(obj runtime.Object, name string) (bool, error) {
o, ok := obj.(*batchv1.Job)
if !ok {
return true, errors.Errorf("expected %s to be a *batch.Job, got %T", name, obj)
}
for _, c := range o.Status.Conditions {
if c.Type == batchv1.JobComplete && c.Status == "True" {
return true, nil
} else if c.Type == batchv1.JobFailed && c.Status == "True" {
slog.Error("job failed", "job", name, "reason", c.Reason)
return true, errors.Errorf("job %s failed: %s", name, c.Reason)
}
}
slog.Debug("job status update", "job", name, "active", o.Status.Active, "failed", o.Status.Failed, "succeeded", o.Status.Succeeded)
return false, nil
}
// waitForPodSuccess is a helper that waits for a pod to complete.
//
// This operates on an event returned from a watcher.
func (hw *legacyWaiter) waitForPodSuccess(obj runtime.Object, name string) (bool, error) {
o, ok := obj.(*corev1.Pod)
if !ok {
return true, errors.Errorf("expected %s to be a *v1.Pod, got %T", name, obj)
}
switch o.Status.Phase {
case corev1.PodSucceeded:
slog.Debug("pod succeeded", "pod", o.Name)
return true, nil
case corev1.PodFailed:
slog.Error("pod failed", "pod", o.Name)
return true, errors.Errorf("pod %s failed", o.Name)
case corev1.PodPending:
slog.Debug("pod pending", "pod", o.Name)
case corev1.PodRunning:
slog.Debug("pod running", "pod", o.Name)
}
return false, nil
}

@ -24,7 +24,6 @@ import (
"os"
"path"
"path/filepath"
"regexp"
"strings"
"github.com/pkg/errors"
@ -39,11 +38,6 @@ import (
"helm.sh/helm/v4/pkg/lint/support"
)
var (
crdHookSearch = regexp.MustCompile(`"?helm\.sh/hook"?:\s+crd-install`)
releaseTimeSearch = regexp.MustCompile(`\.Release\.Time`)
)
// Templates lints the templates in the Linter.
func Templates(linter *support.Linter, values map[string]interface{}, namespace string, _ bool) {
TemplatesWithKubeVersion(linter, values, namespace, nil)
@ -119,14 +113,10 @@ func TemplatesWithSkipSchemaValidation(linter *support.Linter, values map[string
- Metadata.Namespace is not set
*/
for _, template := range chart.Templates {
fileName, data := template.Name, template.Data
fileName := template.Name
fpath = fileName
linter.RunLinterRule(support.ErrorSev, fpath, validateAllowedExtension(fileName))
// These are v3 specific checks to make sure and warn people if their
// chart is not compatible with v3
linter.RunLinterRule(support.WarningSev, fpath, validateNoCRDHooks(data))
linter.RunLinterRule(support.ErrorSev, fpath, validateNoReleaseTime(data))
// We only apply the following lint rules to yaml files
if filepath.Ext(fileName) != ".yaml" || filepath.Ext(fileName) == ".yml" {
@ -291,27 +281,13 @@ func validateMetadataNameFunc(obj *K8sYamlStruct) validation.ValidateNameFunc {
}
}
func validateNoCRDHooks(manifest []byte) error {
if crdHookSearch.Match(manifest) {
return errors.New("manifest is a crd-install hook. This hook is no longer supported in v3 and all CRDs should also exist the crds/ directory at the top level of the chart")
}
return nil
}
func validateNoReleaseTime(manifest []byte) error {
if releaseTimeSearch.Match(manifest) {
return errors.New(".Release.Time has been removed in v3, please replace with the `now` function in your templates")
}
return nil
}
// validateMatchSelector ensures that template specs have a selector declared.
// See https://github.com/helm/helm/issues/1990
func validateMatchSelector(yamlStruct *K8sYamlStruct, manifest string) error {
switch yamlStruct.Kind {
case "Deployment", "ReplicaSet", "DaemonSet", "StatefulSet":
// verify that matchLabels or matchExpressions is present
if !(strings.Contains(manifest, "matchLabels") || strings.Contains(manifest, "matchExpressions")) {
if !strings.Contains(manifest, "matchLabels") && !strings.Contains(manifest, "matchExpressions") {
return fmt.Errorf("a %s must contain matchLabels or matchExpressions, and %q does not", yamlStruct.Kind, yamlStruct.Metadata.Name)
}
}

@ -85,26 +85,6 @@ func TestTemplateIntegrationHappyPath(t *testing.T) {
}
}
func TestV3Fail(t *testing.T) {
linter := support.Linter{ChartDir: "./testdata/v3-fail"}
Templates(&linter, values, namespace, strict)
res := linter.Messages
if len(res) != 3 {
t.Fatalf("Expected 3 errors, got %d, %v", len(res), res)
}
if !strings.Contains(res[0].Err.Error(), ".Release.Time has been removed in v3") {
t.Errorf("Unexpected error: %s", res[0].Err)
}
if !strings.Contains(res[1].Err.Error(), "manifest is a crd-install hook") {
t.Errorf("Unexpected error: %s", res[1].Err)
}
if !strings.Contains(res[2].Err.Error(), "manifest is a crd-install hook") {
t.Errorf("Unexpected error: %s", res[2].Err)
}
}
func TestMultiTemplateFail(t *testing.T) {
linter := support.Linter{ChartDir: "./testdata/multi-template-fail"}
Templates(&linter, values, namespace, strict)

@ -96,7 +96,7 @@ func TestValidateValuesFileSchemaFailure(t *testing.T) {
t.Fatal("expected values file to fail parsing")
}
assert.Contains(t, err.Error(), "Expected: string, given: integer", "integer should be caught by schema")
assert.Contains(t, err.Error(), "- at '/username': got number, want string")
}
func TestValidateValuesFileSchemaOverrides(t *testing.T) {
@ -129,7 +129,7 @@ func TestValidateValuesFile(t *testing.T) {
name: "value not overridden",
yaml: "username: admin\npassword:",
overrides: map[string]interface{}{"username": "anotherUser"},
errorMessage: "Expected: string, given: null",
errorMessage: "- at '/password': got null, want string",
},
{
name: "value overridden",

@ -20,6 +20,7 @@ import (
"bytes"
"compress/gzip"
"io"
"log/slog"
"os"
"path"
"path/filepath"
@ -144,7 +145,7 @@ func (i *HTTPInstaller) Install() error {
return err
}
debug("copying %s to %s", src, i.Path())
slog.Debug("copying", "source", src, "path", i.Path())
return fs.CopyDir(src, i.Path())
}
@ -156,7 +157,7 @@ func (i *HTTPInstaller) Update() error {
// Path is overridden because we want to join on the plugin name not the file name
func (i HTTPInstaller) Path() string {
if i.base.Source == "" {
if i.Source == "" {
return ""
}
return helmpath.DataPath("plugins", i.PluginName)

@ -16,8 +16,6 @@ limitations under the License.
package installer
import (
"fmt"
"log"
"net/http"
"os"
"path/filepath"
@ -125,11 +123,3 @@ func isPlugin(dirname string) bool {
_, err := os.Stat(filepath.Join(dirname, plugin.PluginFileName))
return err == nil
}
var logger = log.New(os.Stderr, "[debug] ", log.Lshortfile)
func debug(format string, args ...interface{}) {
if Debug {
logger.Output(2, fmt.Sprintf(format, args...))
}
}

@ -16,6 +16,7 @@ limitations under the License.
package installer // import "helm.sh/helm/v4/pkg/plugin/installer"
import (
"log/slog"
"os"
"path/filepath"
@ -57,12 +58,12 @@ func (i *LocalInstaller) Install() error {
if !isPlugin(i.Source) {
return ErrMissingMetadata
}
debug("symlinking %s to %s", i.Source, i.Path())
slog.Debug("symlinking", "source", i.Source, "path", i.Path())
return os.Symlink(i.Source, i.Path())
}
// Update updates a local repository
func (i *LocalInstaller) Update() error {
debug("local repository is auto-updated")
slog.Debug("local repository is auto-updated")
return nil
}

@ -16,6 +16,7 @@ limitations under the License.
package installer // import "helm.sh/helm/v4/pkg/plugin/installer"
import (
"log/slog"
"os"
"sort"
@ -88,13 +89,13 @@ func (i *VCSInstaller) Install() error {
return ErrMissingMetadata
}
debug("copying %s to %s", i.Repo.LocalPath(), i.Path())
slog.Debug("copying files", "source", i.Repo.LocalPath(), "destination", i.Path())
return fs.CopyDir(i.Repo.LocalPath(), i.Path())
}
// Update updates a remote repository
func (i *VCSInstaller) Update() error {
debug("updating %s", i.Repo.Remote())
slog.Debug("updating", "source", i.Repo.Remote())
if i.Repo.IsDirty() {
return errors.New("plugin repo was modified")
}
@ -128,7 +129,7 @@ func (i *VCSInstaller) solveVersion(repo vcs.Repo) (string, error) {
if err != nil {
return "", err
}
debug("found refs: %s", refs)
slog.Debug("found refs", "refs", refs)
// Convert and filter the list to semver.Version instances
semvers := getSemVers(refs)
@ -139,7 +140,7 @@ func (i *VCSInstaller) solveVersion(repo vcs.Repo) (string, error) {
if constraint.Check(v) {
// If the constraint passes get the original reference
ver := v.Original()
debug("setting to %s", ver)
slog.Debug("setting to version", "version", ver)
return ver, nil
}
}
@ -149,17 +150,17 @@ func (i *VCSInstaller) solveVersion(repo vcs.Repo) (string, error) {
// setVersion attempts to checkout the version
func (i *VCSInstaller) setVersion(repo vcs.Repo, ref string) error {
debug("setting version to %q", i.Version)
slog.Debug("setting version", "version", i.Version)
return repo.UpdateVersion(ref)
}
// sync will clone or update a remote repo.
func (i *VCSInstaller) sync(repo vcs.Repo) error {
if _, err := os.Stat(repo.LocalPath()); os.IsNotExist(err) {
debug("cloning %s to %s", repo.Remote(), repo.LocalPath())
slog.Debug("cloning", "source", repo.Remote(), "destination", repo.LocalPath())
return repo.Get()
}
debug("updating %s", repo.Remote())
slog.Debug("updating", "source", repo.Remote(), "destination", repo.LocalPath())
return repo.Update()
}

@ -31,7 +31,6 @@ import (
"sync"
"github.com/Masterminds/semver/v3"
"github.com/containerd/containerd/remotes"
"github.com/opencontainers/image-spec/specs-go"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
@ -56,8 +55,6 @@ storing semantic versions, Helm adopts the convention of changing plus (+) to
an underscore (_) in chart version tags when pushing to a registry and back to
a plus (+) when pulling from a registry.`
var errDeprecatedRemote = errors.New("providing github.com/containerd/containerd/remotes.Resolver via ClientOptResolver is no longer suported")
type (
// RemoteClient shadows the ORAS remote.Client interface
// (hiding the ORAS type from Helm client visibility)
@ -231,12 +228,6 @@ func ClientOptPlainHTTP() ClientOption {
}
}
func ClientOptResolver(_ remotes.Resolver) ClientOption {
return func(c *Client) {
c.err = errDeprecatedRemote
}
}
type (
// LoginOption allows specifying various settings on login
LoginOption func(*loginOperation)
@ -771,7 +762,7 @@ func PushOptStrictMode(strictMode bool) PushOption {
}
}
// PushOptCreationDate returns a function that sets the creation time
// PushOptCreationTime returns a function that sets the creation time
func PushOptCreationTime(creationTime string) PushOption {
return func(operation *pushOperation) {
operation.creationTime = creationTime

@ -1,33 +0,0 @@
/*
Copyright The Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package registry
import (
"testing"
"github.com/containerd/containerd/remotes"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNewClientResolverNotSupported(t *testing.T) {
var r remotes.Resolver
client, err := NewClient(ClientOptResolver(r))
require.Equal(t, err, errDeprecatedRemote)
assert.Nil(t, client)
}

@ -184,9 +184,7 @@ func initCompromisedRegistryTestServer() string {
w.Header().Set("Content-Type", "application/vnd.oci.image.manifest.v1+json")
w.WriteHeader(200)
// layers[0] is the blob []byte("a")
w.Write([]byte(
fmt.Sprintf(`{ "schemaVersion": 2, "config": {
fmt.Fprintf(w, `{ "schemaVersion": 2, "config": {
"mediaType": "%s",
"digest": "sha256:a705ee2789ab50a5ba20930f246dbd5cc01ff9712825bb98f57ee8414377f133",
"size": 181
@ -198,7 +196,7 @@ func initCompromisedRegistryTestServer() string {
"size": 1
}
]
}`, ConfigMediaType, ChartLayerMediaType)))
}`, ConfigMediaType, ChartLayerMediaType)
} else if r.URL.Path == "/v2/testrepo/supposedlysafechart/blobs/sha256:a705ee2789ab50a5ba20930f246dbd5cc01ff9712825bb98f57ee8414377f133" {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(200)

@ -65,12 +65,17 @@ var InstallOrder KindSortOrder = []string{
"IngressClass",
"Ingress",
"APIService",
"MutatingWebhookConfiguration",
"ValidatingWebhookConfiguration",
}
// UninstallOrder is the order in which manifests should be uninstalled (by Kind).
//
// Those occurring earlier in the list get uninstalled before those occurring later in the list.
var UninstallOrder KindSortOrder = []string{
// For uninstall, we remove validation before mutation to ensure webhooks don't block removal
"ValidatingWebhookConfiguration",
"MutatingWebhookConfiguration",
"APIService",
"Ingress",
"IngressClass",

@ -173,6 +173,14 @@ func TestKindSorter(t *testing.T) {
Name: "F",
Head: &SimpleHead{Kind: "PriorityClass"},
},
{
Name: "M",
Head: &SimpleHead{Kind: "MutatingWebhookConfiguration"},
},
{
Name: "V",
Head: &SimpleHead{Kind: "ValidatingWebhookConfiguration"},
},
}
for _, test := range []struct {
@ -180,8 +188,8 @@ func TestKindSorter(t *testing.T) {
order KindSortOrder
expected string
}{
{"install", InstallOrder, "FaAbcC3deEf1gh2iIjJkKlLmnopqrxstuUvw!"},
{"uninstall", UninstallOrder, "wvUmutsxrqponLlKkJjIi2hg1fEed3CcbAaF!"},
{"install", InstallOrder, "FaAbcC3deEf1gh2iIjJkKlLmnopqrxstuUvwMV!"},
{"uninstall", UninstallOrder, "VMwvUmutsxrqponLlKkJjIi2hg1fEed3CcbAaF!"},
} {
var buf bytes.Buffer
t.Run(test.description, func(t *testing.T) {

@ -17,7 +17,7 @@ limitations under the License.
package util
import (
"log"
"log/slog"
"path"
"sort"
"strconv"
@ -196,7 +196,7 @@ func (file *manifestFile) sort(result *result) error {
}
if isUnknownHook {
log.Printf("info: skipping unknown hook: %q", hookTypes)
slog.Info("skipping unknown hooks", "hookTypes", hookTypes)
continue
}

@ -22,7 +22,7 @@ import (
"encoding/json"
"fmt"
"io"
"log"
"log/slog"
"net/url"
"os"
"path/filepath"
@ -343,7 +343,8 @@ func ResolveReferenceURL(baseURL, refURL string) (string, error) {
func (e *Entry) String() string {
buf, err := json.Marshal(e)
if err != nil {
log.Panic(err)
slog.Error("failed to marshal entry", slog.Any("error", err))
panic(err)
}
return string(buf)
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save