diff --git a/.github/workflows/govulncheck.yml b/.github/workflows/govulncheck.yml index a3088ad75..e8f2560e3 100644 --- a/.github/workflows/govulncheck.yml +++ b/.github/workflows/govulncheck.yml @@ -3,6 +3,11 @@ on: push: paths: - go.sum + - .github/workflows/govulncheck.yml + pull_request: + paths: + - go.sum + - .github/workflows/govulncheck.yml schedule: - cron: "0 0 * * *" @@ -15,6 +20,8 @@ jobs: steps: - name: Checkout uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # pin@v6.0.1 + with: + persist-credentials: false - name: Add variables to environment file run: cat ".github/env" >> "$GITHUB_ENV" - name: Setup Go diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 34b244086..cf8595742 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -31,6 +31,7 @@ jobs: uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # pin@6.1.0 with: go-version: '${{ env.GOLANG_VERSION }}' + check-latest: true - name: Run unit tests run: make test-coverage - name: Build Helm Binaries diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 3e475edc0..514a649cb 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -55,7 +55,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 with: name: SARIF file path: results.sarif diff --git a/.golangci.yml b/.golangci.yml index 236dadf7b..7eca135e5 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -26,11 +26,13 @@ linters: - misspell - nakedret - revive + - sloglint - staticcheck - thelper - unused - usestdlibvars - usetesting + - exhaustive exclusions: @@ -72,6 +74,9 @@ linters: recommendations: - github.com/evanphx/json-patch/v5 + exhaustive: + default-signifies-exhaustive: true + run: timeout: 10m diff --git a/Makefile b/Makefile index d7422bf23..a18b83f0d 100644 --- a/Makefile +++ b/Makefile @@ -58,20 +58,6 @@ LDFLAGS += -X helm.sh/helm/v4/internal/version.gitCommit=${GIT_COMMIT} LDFLAGS += -X helm.sh/helm/v4/internal/version.gitTreeState=${GIT_DIRTY} LDFLAGS += $(EXT_LDFLAGS) -# Define constants based on the client-go version -K8S_MODULES_VER=$(subst ., ,$(subst v,,$(shell go list -f '{{.Version}}' -m k8s.io/client-go))) -K8S_MODULES_MAJOR_VER=$(shell echo $$(($(firstword $(K8S_MODULES_VER)) + 1))) -K8S_MODULES_MINOR_VER=$(word 2,$(K8S_MODULES_VER)) - -LDFLAGS += -X helm.sh/helm/v4/pkg/chart/v2/lint/rules.k8sVersionMajor=$(K8S_MODULES_MAJOR_VER) -LDFLAGS += -X helm.sh/helm/v4/pkg/chart/v2/lint/rules.k8sVersionMinor=$(K8S_MODULES_MINOR_VER) -LDFLAGS += -X helm.sh/helm/v4/pkg/internal/v3/lint/rules.k8sVersionMajor=$(K8S_MODULES_MAJOR_VER) -LDFLAGS += -X helm.sh/helm/v4/pkg/internal/v3/lint/rules.k8sVersionMinor=$(K8S_MODULES_MINOR_VER) -LDFLAGS += -X helm.sh/helm/v4/pkg/chart/common.k8sVersionMajor=$(K8S_MODULES_MAJOR_VER) -LDFLAGS += -X helm.sh/helm/v4/pkg/chart/common.k8sVersionMinor=$(K8S_MODULES_MINOR_VER) -LDFLAGS += -X helm.sh/helm/v4/internal/version.kubeClientVersionMajor=$(K8S_MODULES_MAJOR_VER) -LDFLAGS += -X helm.sh/helm/v4/internal/version.kubeClientVersionMinor=$(K8S_MODULES_MINOR_VER) - .PHONY: all all: build @@ -129,6 +115,13 @@ test-coverage: .PHONY: test-style test-style: + @EXPECTED_VERSION=$$(grep GOLANGCI_LINT_VERSION .github/env | cut -d= -f2); \ + ACTUAL_VERSION=$$(golangci-lint --version 2>/dev/null | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -1); \ + if [ "v$$ACTUAL_VERSION" != "$$EXPECTED_VERSION" ]; then \ + echo "Warning: golangci-lint version is v$$ACTUAL_VERSION (expected $$EXPECTED_VERSION from CI)"; \ + echo "To install the correct version, run:"; \ + echo " curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b \$$(go env GOPATH)/bin $$EXPECTED_VERSION"; \ + fi golangci-lint run ./... @scripts/validate-license.sh diff --git a/OWNERS b/OWNERS index 761cf76a3..13827661a 100644 --- a/OWNERS +++ b/OWNERS @@ -1,4 +1,5 @@ maintainers: + - banjoh - gjenkins8 - joejulian - marckhouzam @@ -7,9 +8,8 @@ maintainers: - sabre1041 - scottrigby - technosophos -triage: - - banjoh - TerryHowe +triage: - yxxhero - zonggen - z4ce diff --git a/README.md b/README.md index 22422d5a4..37bc8abaa 100644 --- a/README.md +++ b/README.md @@ -49,6 +49,7 @@ If you want to use a package manager: - [Scoop](https://scoop.sh/) users can use `scoop install helm`. - [Snapcraft](https://snapcraft.io/) users can use `snap install helm --classic`. - [Flox](https://flox.dev) users can use `flox install kubernetes-helm`. +- [Mise-en-place](https://mise.jdx.dev/) users can use `mise use -g helm@latest` To rapidly get Helm up and running, start with the [Quick Start Guide](https://helm.sh/docs/intro/quickstart/). diff --git a/go.mod b/go.mod index 0561bcc44..6e2d9c15d 100644 --- a/go.mod +++ b/go.mod @@ -1,10 +1,10 @@ module helm.sh/helm/v4 -go 1.24.0 +go 1.25.0 require ( github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 - github.com/BurntSushi/toml v1.5.0 + github.com/BurntSushi/toml v1.6.0 github.com/DATA-DOG/go-sqlmock v1.5.2 github.com/Masterminds/semver/v3 v3.4.0 github.com/Masterminds/sprig/v3 v3.3.0 @@ -17,8 +17,8 @@ require ( github.com/evanphx/json-patch/v5 v5.9.11 github.com/extism/go-sdk v1.7.1 github.com/fatih/color v1.18.0 - github.com/fluxcd/cli-utils v0.36.0-flux.14 - github.com/foxcpp/go-mockdns v1.1.0 + github.com/fluxcd/cli-utils v0.37.0-flux.1 + github.com/foxcpp/go-mockdns v1.2.0 github.com/gobwas/glob v0.2.3 github.com/gofrs/flock v0.13.0 github.com/gosuri/uitable v0.0.4 @@ -28,25 +28,25 @@ require ( github.com/moby/term v0.5.2 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.1 - github.com/rubenv/sql-migrate v1.8.0 + github.com/rubenv/sql-migrate v1.8.1 github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 - github.com/spf13/cobra v1.10.1 + github.com/spf13/cobra v1.10.2 github.com/spf13/pflag v1.0.10 github.com/stretchr/testify v1.11.1 - github.com/tetratelabs/wazero v1.10.1 + github.com/tetratelabs/wazero v1.11.0 go.yaml.in/yaml/v3 v3.0.4 - golang.org/x/crypto v0.45.0 - golang.org/x/term v0.37.0 - golang.org/x/text v0.31.0 + golang.org/x/crypto v0.47.0 + golang.org/x/term v0.39.0 + golang.org/x/text v0.33.0 gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.34.2 - k8s.io/apiextensions-apiserver v0.34.2 - k8s.io/apimachinery v0.34.2 - k8s.io/apiserver v0.34.2 - k8s.io/cli-runtime v0.34.2 - k8s.io/client-go v0.34.2 + k8s.io/api v0.35.0 + k8s.io/apiextensions-apiserver v0.35.0 + k8s.io/apimachinery v0.35.0 + k8s.io/apiserver v0.35.0 + k8s.io/cli-runtime v0.35.0 + k8s.io/client-go v0.35.0 k8s.io/klog/v2 v2.130.1 - k8s.io/kubectl v0.34.2 + k8s.io/kubectl v0.35.0 oras.land/oras-go/v2 v2.6.0 sigs.k8s.io/controller-runtime v0.22.4 sigs.k8s.io/kustomize/kyaml v0.21.0 @@ -85,14 +85,12 @@ require ( github.com/go-openapi/jsonpointer v0.21.1 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect github.com/go-openapi/swag v0.23.1 // indirect - github.com/gogo/protobuf v1.3.2 // indirect github.com/google/btree v1.1.3 // indirect github.com/google/gnostic-models v0.7.0 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/handlers v1.5.2 // indirect github.com/gorilla/mux v1.8.1 // indirect - github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect github.com/hashicorp/golang-lru/arc/v2 v2.0.5 // indirect @@ -114,19 +112,16 @@ require ( github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/spdystream v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect - github.com/onsi/gomega v1.37.0 // indirect + github.com/onsi/gomega v1.38.2 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect - github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.22.0 // indirect + github.com/prometheus/client_golang v1.23.2 // indirect github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/common v0.65.0 // indirect + github.com/prometheus/common v0.66.1 // indirect github.com/prometheus/procfs v0.17.0 // indirect github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 // indirect github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 // indirect @@ -141,7 +136,7 @@ require ( go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 // indirect go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect go.opentelemetry.io/otel v1.37.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 // indirect @@ -156,30 +151,30 @@ require ( go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 // indirect go.opentelemetry.io/otel/log v0.8.0 // indirect go.opentelemetry.io/otel/metric v1.37.0 // indirect - go.opentelemetry.io/otel/sdk v1.34.0 // indirect + go.opentelemetry.io/otel/sdk v1.36.0 // indirect go.opentelemetry.io/otel/sdk/log v0.8.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.34.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.36.0 // indirect go.opentelemetry.io/otel/trace v1.37.0 // indirect go.opentelemetry.io/proto/otlp v1.5.0 // indirect - go.yaml.in/yaml/v2 v2.4.2 // indirect - golang.org/x/mod v0.29.0 // indirect - golang.org/x/net v0.47.0 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect + golang.org/x/mod v0.31.0 // indirect + golang.org/x/net v0.48.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect - golang.org/x/sync v0.18.0 // indirect - golang.org/x/sys v0.38.0 // indirect + golang.org/x/sync v0.19.0 // indirect + golang.org/x/sys v0.40.0 // indirect golang.org/x/time v0.12.0 // indirect - golang.org/x/tools v0.38.0 // indirect + golang.org/x/tools v0.40.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb // indirect - google.golang.org/grpc v1.72.1 // indirect - google.golang.org/protobuf v1.36.6 // indirect - gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a // indirect + google.golang.org/grpc v1.72.2 // indirect + google.golang.org/protobuf v1.36.8 // indirect + gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - k8s.io/component-base v0.34.2 // indirect - k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect - k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect - sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect + k8s.io/component-base v0.35.0 // indirect + k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect + k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect + sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect sigs.k8s.io/kustomize/api v0.20.1 // indirect sigs.k8s.io/randfill v1.0.0 // indirect sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect diff --git a/go.sum b/go.sum index cf09e6b3c..b1e843f1b 100644 --- a/go.sum +++ b/go.sum @@ -6,8 +6,8 @@ github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9 github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= -github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/BurntSushi/toml v1.6.0 h1:dRaEfpa2VI55EwlIW72hMRHdWouJeRF7TPYhI+AUQjk= +github.com/BurntSushi/toml v1.6.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= @@ -26,8 +26,6 @@ github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBi github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -93,10 +91,10 @@ github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fluxcd/cli-utils v0.36.0-flux.14 h1:I//AMVUXTc+M04UtIXArMXQZCazGMwfemodV1j/yG8c= -github.com/fluxcd/cli-utils v0.36.0-flux.14/go.mod h1:uDo7BYOfbdmk/asnHuI0IQPl6u0FCgcN54AHDu3Y5As= -github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI= -github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= +github.com/fluxcd/cli-utils v0.37.0-flux.1 h1:k/VvPNT3tGa/l2N+qzHduaQr3GVbgoWS6nw7tGZz16w= +github.com/fluxcd/cli-utils v0.37.0-flux.1/go.mod h1:aND5wX3LuTFtB7eUT7vsWr8mmxRVSPR2Wkvbn0SqPfw= +github.com/foxcpp/go-mockdns v1.2.0 h1:omK3OrHRD1IWJz1FuFBCFquhXslXoF17OvBS6JPzZF0= +github.com/foxcpp/go-mockdns v1.2.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= @@ -132,8 +130,6 @@ github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/gofrs/flock v0.13.0 h1:95JolYOvGMqeH31+FC7D2+uULf6mG61mEZ/A8dRYMzw= github.com/gofrs/flock v0.13.0/go.mod h1:jxeyy9R1auM5S6JYDBhDt+E2TCo7DkratH4Pgi8P+Z0= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -155,8 +151,6 @@ github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyE github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= -github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= @@ -182,8 +176,6 @@ github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= @@ -225,8 +217,6 @@ github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQ github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= -github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -242,12 +232,10 @@ github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= -github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= -github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y= -github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= +github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns= +github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= +github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= +github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= @@ -255,8 +243,6 @@ github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgr github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -265,16 +251,16 @@ github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjz github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= -github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= -github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= +github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= +github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= @@ -289,8 +275,8 @@ github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0 github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= -github.com/rubenv/sql-migrate v1.8.0 h1:dXnYiJk9k3wetp7GfQbKJcPHjVJL6YK19tKj8t2Ns0o= -github.com/rubenv/sql-migrate v1.8.0/go.mod h1:F2bGFBwCU+pnmbtNYDeKvSuvL6lBVtXDXUUv5t+u1qw= +github.com/rubenv/sql-migrate v1.8.1 h1:EPNwCvjAowHI3TnZ+4fQu3a915OpnQoPAjTXCGOy2U0= +github.com/rubenv/sql-migrate v1.8.1/go.mod h1:BTIKBORjzyxZDS6dzoiw6eAFYJ1iNlGAtjn4LGeVjS8= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 h1:KRzFb2m7YtdldCEkzs6KqmJw4nqEVZGK7IN2kJkjTuQ= @@ -304,8 +290,8 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= -github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= +github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -321,14 +307,12 @@ github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/tetratelabs/wabin v0.0.0-20230304001439-f6f874872834 h1:ZF+QBjOI+tILZjBaFj3HgFonKXUcwgJ4djLb6i42S3Q= github.com/tetratelabs/wabin v0.0.0-20230304001439-f6f874872834/go.mod h1:m9ymHTgNSEjuxvw8E7WWe4Pl4hZQHXONY8wE6dMLaRk= -github.com/tetratelabs/wazero v1.10.1 h1:2DugeJf6VVk58KTPszlNfeeN8AhhpwcZqkJj2wwFuH8= -github.com/tetratelabs/wazero v1.10.1/go.mod h1:DRm5twOQ5Gr1AoEdSi0CLjDQF1J9ZAuyqFIjl1KKfQU= +github.com/tetratelabs/wazero v1.11.0 h1:+gKemEuKCTevU4d7ZTzlsvgd1uaToIDtlQlmNbwqYhA= +github.com/tetratelabs/wazero v1.11.0/go.mod h1:eV28rsN8Q+xwjogd7f4/Pp4xFxO7uOGbLcD/LzB1wiU= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= @@ -336,8 +320,8 @@ go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 h1:UW0+QyeyBVhn+COBec3nGh go.opentelemetry.io/contrib/bridges/prometheus v0.57.0/go.mod h1:ppciCHRLsyCio54qbzQv0E4Jyth/fLWDTJYfvWpcSVk= go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQa2NQ/Aoi7zA+wy7vMOKD9H4= go.opentelemetry.io/contrib/exporters/autoexport v0.57.0/go.mod h1:EJBheUMttD/lABFyLXhce47Wr6DPWYReCzaZiXadH7g= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 h1:WzNab7hOOLzdDF/EoWCt4glhrbMPVMOO5JYTmpz36Ls= @@ -366,52 +350,43 @@ go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWer go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8= go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= -go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= -go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= +go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= +go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs= go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo= -go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= -go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= +go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= +go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= -go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= -go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= -go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g= -golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= -golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8= +golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= -golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= +golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI= +golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= @@ -419,28 +394,24 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= -golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= -golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= +golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= -golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -454,8 +425,8 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= -golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -463,8 +434,8 @@ golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww= -golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= -golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= +golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY= +golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= @@ -472,38 +443,33 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= -golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE= +golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8= golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk= -golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= -golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= +golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= +golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950= google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb h1:TLPQVbx1GJ8VKZxz52VAxl1EBgKXXbTiU9Fc5fZeLn4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I= -google.golang.org/grpc v1.72.1 h1:HR03wO6eyZ7lknl75XlxABNVLLFc2PAb6mHlYh756mA= -google.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a h1:v2PbRU4K3llS09c7zodFpNePeamkAwG3mPrAery9VeE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.72.2 h1:TdbGzwb82ty4OusHWepvFWGLgIbNo1/SUynEN0ssqv8= +google.golang.org/grpc v1.72.2/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= -gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo= +gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -512,34 +478,34 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.34.2 h1:fsSUNZhV+bnL6Aqrp6O7lMTy6o5x2C4XLjnh//8SLYY= -k8s.io/api v0.34.2/go.mod h1:MMBPaWlED2a8w4RSeanD76f7opUoypY8TFYkSM+3XHw= -k8s.io/apiextensions-apiserver v0.34.2 h1:WStKftnGeoKP4AZRz/BaAAEJvYp4mlZGN0UCv+uvsqo= -k8s.io/apiextensions-apiserver v0.34.2/go.mod h1:398CJrsgXF1wytdaanynDpJ67zG4Xq7yj91GrmYN2SE= -k8s.io/apimachinery v0.34.2 h1:zQ12Uk3eMHPxrsbUJgNF8bTauTVR2WgqJsTmwTE/NW4= -k8s.io/apimachinery v0.34.2/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= -k8s.io/apiserver v0.34.2 h1:2/yu8suwkmES7IzwlehAovo8dDE07cFRC7KMDb1+MAE= -k8s.io/apiserver v0.34.2/go.mod h1:gqJQy2yDOB50R3JUReHSFr+cwJnL8G1dzTA0YLEqAPI= -k8s.io/cli-runtime v0.34.2 h1:cct1GEuWc3IyVT8MSCoIWzRGw9HJ/C5rgP32H60H6aE= -k8s.io/cli-runtime v0.34.2/go.mod h1:X13tsrYexYUCIq8MarCBy8lrm0k0weFPTpcaNo7lms4= -k8s.io/client-go v0.34.2 h1:Co6XiknN+uUZqiddlfAjT68184/37PS4QAzYvQvDR8M= -k8s.io/client-go v0.34.2/go.mod h1:2VYDl1XXJsdcAxw7BenFslRQX28Dxz91U9MWKjX97fE= -k8s.io/component-base v0.34.2 h1:HQRqK9x2sSAsd8+R4xxRirlTjowsg6fWCPwWYeSvogQ= -k8s.io/component-base v0.34.2/go.mod h1:9xw2FHJavUHBFpiGkZoKuYZ5pdtLKe97DEByaA+hHbM= +k8s.io/api v0.35.0 h1:iBAU5LTyBI9vw3L5glmat1njFK34srdLmktWwLTprlY= +k8s.io/api v0.35.0/go.mod h1:AQ0SNTzm4ZAczM03QH42c7l3bih1TbAXYo0DkF8ktnA= +k8s.io/apiextensions-apiserver v0.35.0 h1:3xHk2rTOdWXXJM+RDQZJvdx0yEOgC0FgQ1PlJatA5T4= +k8s.io/apiextensions-apiserver v0.35.0/go.mod h1:E1Ahk9SADaLQ4qtzYFkwUqusXTcaV2uw3l14aqpL2LU= +k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8= +k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns= +k8s.io/apiserver v0.35.0 h1:CUGo5o+7hW9GcAEF3x3usT3fX4f9r8xmgQeCBDaOgX4= +k8s.io/apiserver v0.35.0/go.mod h1:QUy1U4+PrzbJaM3XGu2tQ7U9A4udRRo5cyxkFX0GEds= +k8s.io/cli-runtime v0.35.0 h1:PEJtYS/Zr4p20PfZSLCbY6YvaoLrfByd6THQzPworUE= +k8s.io/cli-runtime v0.35.0/go.mod h1:VBRvHzosVAoVdP3XwUQn1Oqkvaa8facnokNkD7jOTMY= +k8s.io/client-go v0.35.0 h1:IAW0ifFbfQQwQmga0UdoH0yvdqrbwMdq9vIFEhRpxBE= +k8s.io/client-go v0.35.0/go.mod h1:q2E5AAyqcbeLGPdoRB+Nxe3KYTfPce1Dnu1myQdqz9o= +k8s.io/component-base v0.35.0 h1:+yBrOhzri2S1BVqyVSvcM3PtPyx5GUxCK2tinZz1G94= +k8s.io/component-base v0.35.0/go.mod h1:85SCX4UCa6SCFt6p3IKAPej7jSnF3L8EbfSyMZayJR0= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= -k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= -k8s.io/kubectl v0.34.2 h1:+fWGrVlDONMUmmQLDaGkQ9i91oszjjRAa94cr37hzqA= -k8s.io/kubectl v0.34.2/go.mod h1:X2KTOdtZZNrTWmUD4oHApJ836pevSl+zvC5sI6oO2YQ= -k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= -k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE= +k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= +k8s.io/kubectl v0.35.0 h1:cL/wJKHDe8E8+rP3G7avnymcMg6bH6JEcR5w5uo06wc= +k8s.io/kubectl v0.35.0/go.mod h1:VR5/TSkYyxZwrRwY5I5dDq6l5KXmiCb+9w8IKplk3Qo= +k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck= +k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc= oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o= sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A= sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8= -sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= -sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I= sigs.k8s.io/kustomize/api v0.20.1/go.mod h1:t6hUFxO+Ph0VxIk1sKp1WS0dOjbPCtLJ4p8aADLwqjM= sigs.k8s.io/kustomize/kyaml v0.21.0 h1:7mQAf3dUwf0wBerWJd8rXhVcnkk5Tvn/q91cGkaP6HQ= diff --git a/internal/chart/v3/lint/lint_test.go b/internal/chart/v3/lint/lint_test.go index d61a9a740..221de8572 100644 --- a/internal/chart/v3/lint/lint_test.go +++ b/internal/chart/v3/lint/lint_test.go @@ -175,16 +175,6 @@ func TestHelmCreateChart(t *testing.T) { // // Resources like hpa and ingress, which are disabled by default in values.yaml are enabled here using the equivalent // of the `--set` flag. -// -// Note: This test requires the following ldflags to be set per the current Kubernetes version to avoid false-positive -// results. -// 1. -X helm.sh/helm/v4/pkg/lint/rules.k8sVersionMajor= -// 2. -X helm.sh/helm/v4/pkg/lint/rules.k8sVersionMinor= -// or directly use '$(LDFLAGS)' in Makefile. -// -// When run without ldflags, the test passes giving a false-positive result. This is because the variables -// `k8sVersionMajor` and `k8sVersionMinor` by default are set to an older version of Kubernetes, with which, there -// might not be the deprecation warning. func TestHelmCreateChart_CheckDeprecatedWarnings(t *testing.T) { createdChart, err := chartutil.Create("checkdeprecatedwarnings", t.TempDir()) if err != nil { diff --git a/internal/chart/v3/lint/rules/crds.go b/internal/chart/v3/lint/rules/crds.go index 735573624..deedeb0f2 100644 --- a/internal/chart/v3/lint/rules/crds.go +++ b/internal/chart/v3/lint/rules/crds.go @@ -70,7 +70,7 @@ func Crds(linter *support.Linter) { var yamlStruct *k8sYamlStruct err := decoder.Decode(&yamlStruct) - if err == io.EOF { + if errors.Is(err, io.EOF) { break } diff --git a/internal/chart/v3/lint/rules/deprecations.go b/internal/chart/v3/lint/rules/deprecations.go index 6f86bdbbd..a607a5fb4 100644 --- a/internal/chart/v3/lint/rules/deprecations.go +++ b/internal/chart/v3/lint/rules/deprecations.go @@ -28,14 +28,6 @@ import ( kscheme "k8s.io/client-go/kubernetes/scheme" ) -var ( - // This should be set in the Makefile based on the version of client-go being imported. - // These constants will be overwritten with LDFLAGS. The version components must be - // strings in order for LDFLAGS to set them. - k8sVersionMajor = "1" - k8sVersionMinor = "20" -) - // deprecatedAPIError indicates than an API is deprecated in Kubernetes type deprecatedAPIError struct { Deprecated string @@ -56,33 +48,29 @@ func validateNoDeprecations(resource *k8sYamlStruct, kubeVersion *common.KubeVer return nil } - majorVersion := k8sVersionMajor - minorVersion := k8sVersionMinor - - if kubeVersion != nil { - majorVersion = kubeVersion.Major - minorVersion = kubeVersion.Minor + if kubeVersion == nil { + kubeVersion = &common.DefaultCapabilities.KubeVersion } - runtimeObject, err := resourceToRuntimeObject(resource) + kubeVersionMajor, err := strconv.Atoi(kubeVersion.Major) if err != nil { - // do not error for non-kubernetes resources - if runtime.IsNotRegisteredError(err) { - return nil - } return err } - - major, err := strconv.Atoi(majorVersion) + kubeVersionMinor, err := strconv.Atoi(kubeVersion.Minor) if err != nil { return err } - minor, err := strconv.Atoi(minorVersion) + + runtimeObject, err := resourceToRuntimeObject(resource) if err != nil { + // do not error for non-kubernetes resources + if runtime.IsNotRegisteredError(err) { + return nil + } return err } - if !deprecation.IsDeprecated(runtimeObject, major, minor) { + if !deprecation.IsDeprecated(runtimeObject, kubeVersionMajor, kubeVersionMinor) { return nil } gvk := fmt.Sprintf("%s %s", resource.APIVersion, resource.Kind) diff --git a/internal/chart/v3/lint/rules/template.go b/internal/chart/v3/lint/rules/template.go index 204966364..38e602b7e 100644 --- a/internal/chart/v3/lint/rules/template.go +++ b/internal/chart/v3/lint/rules/template.go @@ -150,7 +150,7 @@ func TemplatesWithSkipSchemaValidation(linter *support.Linter, values map[string var yamlStruct *k8sYamlStruct err := decoder.Decode(&yamlStruct) - if err == io.EOF { + if errors.Is(err, io.EOF) { break } diff --git a/internal/chart/v3/loader/archive.go b/internal/chart/v3/loader/archive.go index 358c2ce4d..a9d4faf8f 100644 --- a/internal/chart/v3/loader/archive.go +++ b/internal/chart/v3/loader/archive.go @@ -56,7 +56,7 @@ func LoadFile(name string) (*chart.Chart, error) { c, err := LoadArchive(raw) if err != nil { - if err == gzip.ErrHeader { + if errors.Is(err, gzip.ErrHeader) { return nil, fmt.Errorf("file '%s' does not appear to be a valid chart file (details: %s)", name, err) } } diff --git a/internal/chart/v3/loader/load.go b/internal/chart/v3/loader/load.go index 1c5b4cad1..373c4659f 100644 --- a/internal/chart/v3/loader/load.go +++ b/internal/chart/v3/loader/load.go @@ -181,7 +181,7 @@ func LoadFiles(files []*archive.BufferedFile) (*chart.Chart, error) { // LoadValues loads values from a reader. // // The reader is expected to contain one or more YAML documents, the values of which are merged. -// And the values can be either a chart's default values or a user-supplied values. +// And the values can be either a chart's default values or user-supplied values. func LoadValues(data io.Reader) (map[string]interface{}, error) { values := map[string]interface{}{} reader := utilyaml.NewYAMLReader(bufio.NewReader(data)) @@ -189,7 +189,7 @@ func LoadValues(data io.Reader) (map[string]interface{}, error) { currentMap := map[string]interface{}{} raw, err := reader.Read() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { break } return nil, fmt.Errorf("error reading yaml document: %w", err) diff --git a/internal/chart/v3/loader/load_test.go b/internal/chart/v3/loader/load_test.go index f91005bf6..12403f9c2 100644 --- a/internal/chart/v3/loader/load_test.go +++ b/internal/chart/v3/loader/load_test.go @@ -20,6 +20,7 @@ import ( "archive/tar" "bytes" "compress/gzip" + "errors" "io" "log" "os" @@ -116,7 +117,7 @@ func TestBomTestData(t *testing.T) { tr := tar.NewReader(unzipped) for { file, err := tr.Next() - if err == io.EOF { + if errors.Is(err, io.EOF) { break } if err != nil { diff --git a/internal/chart/v3/util/dependencies.go b/internal/chart/v3/util/dependencies.go index cd7a8b78c..4ef9e6961 100644 --- a/internal/chart/v3/util/dependencies.go +++ b/internal/chart/v3/util/dependencies.go @@ -280,7 +280,12 @@ func processImportValues(c *chart.Chart, merge bool) error { // get child table vv, err := cvals.Table(r.Name + "." + child) if err != nil { - slog.Warn("ImportValues missing table from chart", "chart", r.Name, slog.Any("error", err)) + slog.Warn( + "ImportValues missing table from chart", + slog.String("chart", "chart"), + slog.String("name", r.Name), + slog.Any("error", err), + ) continue } // create value map from child to be merged into parent diff --git a/internal/chart/v3/util/save_test.go b/internal/chart/v3/util/save_test.go index 62625919b..7a42a76af 100644 --- a/internal/chart/v3/util/save_test.go +++ b/internal/chart/v3/util/save_test.go @@ -21,6 +21,7 @@ import ( "bytes" "compress/gzip" "crypto/sha256" + "errors" "fmt" "io" "os" @@ -201,7 +202,7 @@ func retrieveAllHeadersFromTar(path string) ([]*tar.Header, error) { headers := []*tar.Header{} for { hd, err := tr.Next() - if err == io.EOF { + if errors.Is(err, io.EOF) { break } diff --git a/internal/logging/logging.go b/internal/logging/logging.go index b8faf859e..674e2db34 100644 --- a/internal/logging/logging.go +++ b/internal/logging/logging.go @@ -36,6 +36,9 @@ type DebugCheckHandler struct { // Enabled implements slog.Handler.Enabled func (h *DebugCheckHandler) Enabled(_ context.Context, level slog.Level) bool { if level == slog.LevelDebug { + if h.debugEnabled == nil { + return false + } return h.debugEnabled() } return true // Always log other levels diff --git a/internal/logging/logging_test.go b/internal/logging/logging_test.go index 75e6c4025..d22a47a31 100644 --- a/internal/logging/logging_test.go +++ b/internal/logging/logging_test.go @@ -18,8 +18,10 @@ package logging import ( "bytes" + "context" "log/slog" "testing" + "time" "github.com/stretchr/testify/assert" ) @@ -113,3 +115,259 @@ func TestLogHolder_InterfaceCompliance(t *testing.T) { assert.Equal(t, handler, logger.Handler()) }) } + +func TestDebugCheckHandler_Enabled(t *testing.T) { + t.Run("returns debugEnabled function result for debug level", func(t *testing.T) { + // Test with debug enabled + debugEnabled := func() bool { return true } + buf := &bytes.Buffer{} + baseHandler := slog.NewTextHandler(buf, &slog.HandlerOptions{Level: slog.LevelDebug}) + handler := &DebugCheckHandler{ + handler: baseHandler, + debugEnabled: debugEnabled, + } + + assert.True(t, handler.Enabled(t.Context(), slog.LevelDebug)) + }) + + t.Run("returns false for debug level when debug disabled", func(t *testing.T) { + // Test with debug disabled + debugEnabled := func() bool { return false } + buf := &bytes.Buffer{} + baseHandler := slog.NewTextHandler(buf, &slog.HandlerOptions{Level: slog.LevelDebug}) + handler := &DebugCheckHandler{ + handler: baseHandler, + debugEnabled: debugEnabled, + } + + assert.False(t, handler.Enabled(t.Context(), slog.LevelDebug)) + }) + + t.Run("always returns true for non-debug levels", func(t *testing.T) { + debugEnabled := func() bool { return false } // Debug disabled + buf := &bytes.Buffer{} + baseHandler := slog.NewTextHandler(buf, &slog.HandlerOptions{Level: slog.LevelDebug}) + handler := &DebugCheckHandler{ + handler: baseHandler, + debugEnabled: debugEnabled, + } + + // Even with debug disabled, other levels should always be enabled + assert.True(t, handler.Enabled(t.Context(), slog.LevelInfo)) + assert.True(t, handler.Enabled(t.Context(), slog.LevelWarn)) + assert.True(t, handler.Enabled(t.Context(), slog.LevelError)) + }) + + t.Run("calls debugEnabled function dynamically", func(t *testing.T) { + callCount := 0 + debugEnabled := func() bool { + callCount++ + return callCount%2 == 1 // Alternates between true and false + } + + buf := &bytes.Buffer{} + baseHandler := slog.NewTextHandler(buf, &slog.HandlerOptions{Level: slog.LevelDebug}) + handler := &DebugCheckHandler{ + handler: baseHandler, + debugEnabled: debugEnabled, + } + + // First call should return true + assert.True(t, handler.Enabled(t.Context(), slog.LevelDebug)) + assert.Equal(t, 1, callCount) + + // Second call should return false + assert.False(t, handler.Enabled(t.Context(), slog.LevelDebug)) + assert.Equal(t, 2, callCount) + + // Third call should return true again + assert.True(t, handler.Enabled(t.Context(), slog.LevelDebug)) + assert.Equal(t, 3, callCount) + }) +} + +func TestDebugCheckHandler_Handle(t *testing.T) { + t.Run("delegates to underlying handler", func(t *testing.T) { + buf := &bytes.Buffer{} + baseHandler := slog.NewTextHandler(buf, &slog.HandlerOptions{Level: slog.LevelDebug}) + handler := &DebugCheckHandler{ + handler: baseHandler, + debugEnabled: func() bool { return true }, + } + + record := slog.NewRecord(time.Now(), slog.LevelInfo, "test message", 0) + err := handler.Handle(t.Context(), record) + + assert.NoError(t, err) + assert.Contains(t, buf.String(), "test message") + }) + + t.Run("handles context correctly", func(t *testing.T) { + buf := &bytes.Buffer{} + baseHandler := slog.NewTextHandler(buf, &slog.HandlerOptions{Level: slog.LevelDebug}) + handler := &DebugCheckHandler{ + handler: baseHandler, + debugEnabled: func() bool { return true }, + } + + type testKey string + ctx := context.WithValue(t.Context(), testKey("test"), "value") + record := slog.NewRecord(time.Now(), slog.LevelInfo, "context test", 0) + err := handler.Handle(ctx, record) + + assert.NoError(t, err) + assert.Contains(t, buf.String(), "context test") + }) +} + +func TestDebugCheckHandler_WithAttrs(t *testing.T) { + t.Run("returns new DebugCheckHandler with attributes", func(t *testing.T) { + logger := NewLogger(func() bool { return true }) + handler := logger.Handler() + newHandler := handler.WithAttrs([]slog.Attr{ + slog.String("key1", "value1"), + slog.Int("key2", 42), + }) + + // Should return a DebugCheckHandler + debugHandler, ok := newHandler.(*DebugCheckHandler) + assert.True(t, ok) + assert.NotNil(t, debugHandler) + + // Should preserve the debugEnabled function + assert.True(t, debugHandler.Enabled(t.Context(), slog.LevelDebug)) + + // Should have the attributes applied to the underlying handler + assert.NotEqual(t, handler, debugHandler.handler) + }) + + t.Run("preserves debugEnabled function", func(t *testing.T) { + callCount := 0 + debugEnabled := func() bool { + callCount++ + return callCount%2 == 1 + } + + buf := &bytes.Buffer{} + baseHandler := slog.NewTextHandler(buf, &slog.HandlerOptions{Level: slog.LevelDebug}) + handler := &DebugCheckHandler{ + handler: baseHandler, + debugEnabled: debugEnabled, + } + + attrs := []slog.Attr{slog.String("test", "value")} + newHandler := handler.WithAttrs(attrs) + + // The new handler should use the same debugEnabled function + assert.True(t, newHandler.Enabled(t.Context(), slog.LevelDebug)) + assert.Equal(t, 1, callCount) + + assert.False(t, newHandler.Enabled(t.Context(), slog.LevelDebug)) + assert.Equal(t, 2, callCount) + }) +} + +func TestDebugCheckHandler_WithGroup(t *testing.T) { + t.Run("returns new DebugCheckHandler with group", func(t *testing.T) { + buf := &bytes.Buffer{} + baseHandler := slog.NewTextHandler(buf, &slog.HandlerOptions{Level: slog.LevelDebug}) + handler := &DebugCheckHandler{ + handler: baseHandler, + debugEnabled: func() bool { return true }, + } + + newHandler := handler.WithGroup("testgroup") + + // Should return a DebugCheckHandler + debugHandler, ok := newHandler.(*DebugCheckHandler) + assert.True(t, ok) + assert.NotNil(t, debugHandler) + + // Should preserve the debugEnabled function + assert.True(t, debugHandler.Enabled(t.Context(), slog.LevelDebug)) + + // Should have the group applied to the underlying handler + assert.NotEqual(t, handler.handler, debugHandler.handler) + }) + + t.Run("preserves debugEnabled function", func(t *testing.T) { + callCount := 0 + debugEnabled := func() bool { + callCount++ + return callCount%2 == 1 + } + + buf := &bytes.Buffer{} + baseHandler := slog.NewTextHandler(buf, &slog.HandlerOptions{Level: slog.LevelDebug}) + handler := &DebugCheckHandler{ + handler: baseHandler, + debugEnabled: debugEnabled, + } + + newHandler := handler.WithGroup("testgroup") + + // The new handler should use the same debugEnabled function + assert.True(t, newHandler.Enabled(t.Context(), slog.LevelDebug)) + assert.Equal(t, 1, callCount) + + assert.False(t, newHandler.Enabled(t.Context(), slog.LevelDebug)) + assert.Equal(t, 2, callCount) + }) +} + +func TestDebugCheckHandler_Integration(t *testing.T) { + t.Run("works with NewLogger function", func(t *testing.T) { + debugEnabled := func() bool { return true } + logger := NewLogger(debugEnabled) + + assert.NotNil(t, logger) + + // The logger should have a DebugCheckHandler + handler := logger.Handler() + debugHandler, ok := handler.(*DebugCheckHandler) + assert.True(t, ok) + + // Should enable debug when debugEnabled returns true + assert.True(t, debugHandler.Enabled(t.Context(), slog.LevelDebug)) + + // Should enable other levels regardless + assert.True(t, debugHandler.Enabled(t.Context(), slog.LevelInfo)) + }) + + t.Run("dynamic debug checking works in practice", func(t *testing.T) { + debugState := false + debugEnabled := func() bool { return debugState } + + logger := NewLogger(debugEnabled) + + // Initially debug should be disabled + assert.False(t, logger.Handler().(*DebugCheckHandler).Enabled(t.Context(), slog.LevelDebug)) + + // Enable debug + debugState = true + assert.True(t, logger.Handler().(*DebugCheckHandler).Enabled(t.Context(), slog.LevelDebug)) + + // Disable debug again + debugState = false + assert.False(t, logger.Handler().(*DebugCheckHandler).Enabled(t.Context(), slog.LevelDebug)) + }) + + t.Run("handles nil debugEnabled function", func(t *testing.T) { + logger := NewLogger(nil) + + assert.NotNil(t, logger) + + // The logger should have a DebugCheckHandler + handler := logger.Handler() + debugHandler, ok := handler.(*DebugCheckHandler) + assert.True(t, ok) + + // When debugEnabled is nil, debug level should be disabled (default behavior) + assert.False(t, debugHandler.Enabled(t.Context(), slog.LevelDebug)) + + // Other levels should always be enabled + assert.True(t, debugHandler.Enabled(t.Context(), slog.LevelInfo)) + assert.True(t, debugHandler.Enabled(t.Context(), slog.LevelWarn)) + assert.True(t, debugHandler.Enabled(t.Context(), slog.LevelError)) + }) +} diff --git a/internal/plugin/config.go b/internal/plugin/config.go index e1f491779..785bde840 100644 --- a/internal/plugin/config.go +++ b/internal/plugin/config.go @@ -23,14 +23,13 @@ import ( "go.yaml.in/yaml/v3" ) -// Config represents an plugin type specific configuration -// It is expected to type assert (cast) the a Config to its expected underlying type (schema.ConfigCLIV1, schema.ConfigGetterV1, etc). +// Config represents a plugin type specific configuration +// It is expected to type assert (cast) the Config to its expected underlying type (schema.ConfigCLIV1, schema.ConfigGetterV1, etc). type Config interface { Validate() error } -func unmarshaConfig(pluginType string, configData map[string]any) (Config, error) { - +func unmarshalConfig(pluginType string, configData map[string]any) (Config, error) { pluginTypeMeta, ok := pluginTypesIndex[pluginType] if !ok { return nil, fmt.Errorf("unknown plugin type %q", pluginType) diff --git a/internal/plugin/config_test.go b/internal/plugin/config_test.go index c51b77ff0..beac05abf 100644 --- a/internal/plugin/config_test.go +++ b/internal/plugin/config_test.go @@ -27,7 +27,7 @@ import ( func TestUnmarshaConfig(t *testing.T) { // Test unmarshalling a CLI plugin config { - config, err := unmarshaConfig("cli/v1", map[string]any{ + config, err := unmarshalConfig("cli/v1", map[string]any{ "usage": "usage string", "shortHelp": "short help string", "longHelp": "long help string", @@ -46,7 +46,7 @@ func TestUnmarshaConfig(t *testing.T) { // Test unmarshalling invalid config data { - config, err := unmarshaConfig("cli/v1", map[string]any{ + config, err := unmarshalConfig("cli/v1", map[string]any{ "invalid field": "foo", }) require.Error(t, err) diff --git a/internal/plugin/installer/extractor.go b/internal/plugin/installer/extractor.go index 71efebc67..b753dfbca 100644 --- a/internal/plugin/installer/extractor.go +++ b/internal/plugin/installer/extractor.go @@ -140,7 +140,7 @@ func (g *TarGzExtractor) Extract(buffer *bytes.Buffer, targetDir string) error { tarReader := tar.NewReader(uncompressedStream) for { header, err := tarReader.Next() - if err == io.EOF { + if errors.Is(err, io.EOF) { break } if err != nil { diff --git a/internal/plugin/installer/http_installer_test.go b/internal/plugin/installer/http_installer_test.go index be40b1b90..7f7e6cef6 100644 --- a/internal/plugin/installer/http_installer_test.go +++ b/internal/plugin/installer/http_installer_test.go @@ -368,7 +368,7 @@ func TestExtractWithNestedDirectories(t *testing.T) { }{ {"plugin.yaml", "plugin metadata", 0600, tar.TypeReg}, {"bin/", "", 0755, tar.TypeDir}, - {"bin/plugin", "#!/bin/bash\necho plugin", 0755, tar.TypeReg}, + {"bin/plugin", "#!/usr/bin/env sh\necho plugin", 0755, tar.TypeReg}, {"docs/", "", 0755, tar.TypeDir}, {"docs/README.md", "readme content", 0644, tar.TypeReg}, {"docs/examples/", "", 0755, tar.TypeDir}, @@ -531,7 +531,7 @@ func TestExtractPluginInSubdirectory(t *testing.T) { {"my-plugin/", "", 0755, tar.TypeDir}, {"my-plugin/plugin.yaml", "name: my-plugin\nversion: 1.0.0\nusage: test\ndescription: test plugin\ncommand: $HELM_PLUGIN_DIR/bin/my-plugin", 0644, tar.TypeReg}, {"my-plugin/bin/", "", 0755, tar.TypeDir}, - {"my-plugin/bin/my-plugin", "#!/bin/bash\necho test", 0755, tar.TypeReg}, + {"my-plugin/bin/my-plugin", "#!/usr/bin/env sh\necho test", 0755, tar.TypeReg}, } for _, file := range files { diff --git a/internal/plugin/installer/installer.go b/internal/plugin/installer/installer.go index c7c1a8801..e3975c2d7 100644 --- a/internal/plugin/installer/installer.go +++ b/internal/plugin/installer/installer.go @@ -31,9 +31,6 @@ import ( // ErrMissingMetadata indicates that plugin.yaml is missing. var ErrMissingMetadata = errors.New("plugin metadata (plugin.yaml) missing") -// Debug enables verbose output. -var Debug bool - // Options contains options for plugin installation. type Options struct { // Verify enables signature verification before installation @@ -80,7 +77,7 @@ func InstallWithOptions(i Installer, opts Options) (*VerificationResult, error) return nil, err } if _, pathErr := os.Stat(i.Path()); !os.IsNotExist(pathErr) { - slog.Warn("plugin already exists", "path", i.Path(), slog.Any("error", pathErr)) + slog.Warn("plugin already exists", slog.String("path", i.Path()), slog.Any("error", pathErr)) return nil, errors.New("plugin already exists") } @@ -132,7 +129,7 @@ func InstallWithOptions(i Installer, opts Options) (*VerificationResult, error) // Update updates a plugin. func Update(i Installer) error { if _, pathErr := os.Stat(i.Path()); os.IsNotExist(pathErr) { - slog.Warn("plugin does not exist", "path", i.Path(), slog.Any("error", pathErr)) + slog.Warn("plugin does not exist", slog.String("path", i.Path()), slog.Any("error", pathErr)) return errors.New("plugin does not exist") } return i.Update() @@ -163,7 +160,11 @@ func NewForSource(source, version string) (installer Installer, err error) { func FindSource(location string) (Installer, error) { installer, err := existingVCSRepo(location) if err != nil && err.Error() == "Cannot detect VCS" { - slog.Warn("cannot get information about plugin source", "location", location, slog.Any("error", err)) + slog.Warn( + "cannot get information about plugin source", + slog.String("location", location), + slog.Any("error", err), + ) return installer, errors.New("cannot get information about plugin source") } return installer, err diff --git a/internal/plugin/installer/local_installer_test.go b/internal/plugin/installer/local_installer_test.go index 2decb695f..3ee8ab6d0 100644 --- a/internal/plugin/installer/local_installer_test.go +++ b/internal/plugin/installer/local_installer_test.go @@ -87,7 +87,7 @@ func TestLocalInstallerTarball(t *testing.T) { Mode int64 }{ {"test-plugin/plugin.yaml", "name: test-plugin\napiVersion: v1\ntype: cli/v1\nruntime: subprocess\nversion: 1.0.0\nconfig:\n shortHelp: test\n longHelp: test\nruntimeConfig:\n platformCommand:\n - command: echo", 0644}, - {"test-plugin/bin/test-plugin", "#!/bin/bash\necho test", 0755}, + {"test-plugin/bin/test-plugin", "#!/usr/bin/env sh\necho test", 0755}, } for _, file := range files { diff --git a/internal/plugin/installer/oci_installer.go b/internal/plugin/installer/oci_installer.go index afbb42ca5..67f99b6f8 100644 --- a/internal/plugin/installer/oci_installer.go +++ b/internal/plugin/installer/oci_installer.go @@ -19,6 +19,7 @@ import ( "archive/tar" "bytes" "compress/gzip" + "errors" "fmt" "io" "log/slog" @@ -214,7 +215,7 @@ func extractTar(r io.Reader, targetDir string) error { for { header, err := tarReader.Next() - if err == io.EOF { + if errors.Is(err, io.EOF) { break } if err != nil { diff --git a/internal/plugin/metadata.go b/internal/plugin/metadata.go index b051c1223..4e019f0b3 100644 --- a/internal/plugin/metadata.go +++ b/internal/plugin/metadata.go @@ -174,13 +174,12 @@ func buildLegacyRuntimeConfig(m MetadataLegacy) RuntimeConfig { } func fromMetadataV1(mv1 MetadataV1) (*Metadata, error) { - - config, err := unmarshaConfig(mv1.Type, mv1.Config) + config, err := unmarshalConfig(mv1.Type, mv1.Config) if err != nil { return nil, err } - runtimeConfig, err := convertMetdataRuntimeConfig(mv1.Runtime, mv1.RuntimeConfig) + runtimeConfig, err := convertMetadataRuntimeConfig(mv1.Runtime, mv1.RuntimeConfig) if err != nil { return nil, err } @@ -197,7 +196,7 @@ func fromMetadataV1(mv1 MetadataV1) (*Metadata, error) { }, nil } -func convertMetdataRuntimeConfig(runtimeType string, runtimeConfigRaw map[string]any) (RuntimeConfig, error) { +func convertMetadataRuntimeConfig(runtimeType string, runtimeConfigRaw map[string]any) (RuntimeConfig, error) { var runtimeConfig RuntimeConfig var err error diff --git a/internal/plugin/runtime.go b/internal/plugin/runtime.go index b2ff0b7ca..7d39a9a43 100644 --- a/internal/plugin/runtime.go +++ b/internal/plugin/runtime.go @@ -53,13 +53,13 @@ func remarshalRuntimeConfig[T RuntimeConfig](runtimeData map[string]any) (Runtim return config, nil } -// parseEnv takes a list of "KEY=value" environment variable strings +// ParseEnv takes a list of "KEY=value" environment variable strings // and transforms the result into a map[KEY]=value // // - empty input strings are ignored // - input strings with no value are stored as empty strings // - duplicate keys overwrite earlier values -func parseEnv(env []string) map[string]string { +func ParseEnv(env []string) map[string]string { result := make(map[string]string, len(env)) for _, envVar := range env { parts := strings.SplitN(envVar, "=", 2) @@ -75,7 +75,9 @@ func parseEnv(env []string) map[string]string { return result } -func formatEnv(env map[string]string) []string { +// FormatEnv takes a map[KEY]=value and transforms it into +// a list of "KEY=value" environment variable strings +func FormatEnv(env map[string]string) []string { result := make([]string, 0, len(env)) for key, value := range env { result = append(result, fmt.Sprintf("%s=%s", key, value)) diff --git a/internal/plugin/runtime_extismv1.go b/internal/plugin/runtime_extismv1.go index b5cc79a6f..cd9a02535 100644 --- a/internal/plugin/runtime_extismv1.go +++ b/internal/plugin/runtime_extismv1.go @@ -259,7 +259,7 @@ func buildPluginConfig(input *Input, r *RuntimeExtismV1) extism.PluginConfig { mc = mc.WithStderr(input.Stderr) } if len(input.Env) > 0 { - env := parseEnv(input.Env) + env := ParseEnv(input.Env) for k, v := range env { mc = mc.WithEnv(k, v) } diff --git a/internal/plugin/runtime_subprocess.go b/internal/plugin/runtime_subprocess.go index 802732b14..c836c1c6d 100644 --- a/internal/plugin/runtime_subprocess.go +++ b/internal/plugin/runtime_subprocess.go @@ -139,7 +139,7 @@ func (r *SubprocessPluginRuntime) InvokeHook(event string) error { return nil } - env := parseEnv(os.Environ()) + env := ParseEnv(os.Environ()) maps.Insert(env, maps.All(r.EnvVars)) env["HELM_PLUGIN_NAME"] = r.metadata.Name env["HELM_PLUGIN_DIR"] = r.pluginDir @@ -150,7 +150,7 @@ func (r *SubprocessPluginRuntime) InvokeHook(event string) error { } cmd := exec.Command(main, argv...) - cmd.Env = formatEnv(env) + cmd.Env = FormatEnv(env) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr @@ -198,9 +198,9 @@ func (r *SubprocessPluginRuntime) runCLI(input *Input) (*Output, error) { cmds := r.RuntimeConfig.PlatformCommand - env := parseEnv(os.Environ()) + env := ParseEnv(os.Environ()) maps.Insert(env, maps.All(r.EnvVars)) - maps.Insert(env, maps.All(parseEnv(input.Env))) + maps.Insert(env, maps.All(ParseEnv(input.Env))) env["HELM_PLUGIN_NAME"] = r.metadata.Name env["HELM_PLUGIN_DIR"] = r.pluginDir @@ -210,7 +210,7 @@ func (r *SubprocessPluginRuntime) runCLI(input *Input) (*Output, error) { } cmd := exec.Command(command, args...) - cmd.Env = formatEnv(env) + cmd.Env = FormatEnv(env) cmd.Stdin = input.Stdin cmd.Stdout = input.Stdout @@ -231,9 +231,9 @@ func (r *SubprocessPluginRuntime) runPostrenderer(input *Input) (*Output, error) return nil, fmt.Errorf("plugin %q input message does not implement InputMessagePostRendererV1", r.metadata.Name) } - env := parseEnv(os.Environ()) + env := ParseEnv(os.Environ()) maps.Insert(env, maps.All(r.EnvVars)) - maps.Insert(env, maps.All(parseEnv(input.Env))) + maps.Insert(env, maps.All(ParseEnv(input.Env))) env["HELM_PLUGIN_NAME"] = r.metadata.Name env["HELM_PLUGIN_DIR"] = r.pluginDir @@ -261,7 +261,7 @@ func (r *SubprocessPluginRuntime) runPostrenderer(input *Input) (*Output, error) postRendered := &bytes.Buffer{} stderr := &bytes.Buffer{} - cmd.Env = formatEnv(env) + cmd.Env = FormatEnv(env) cmd.Stdout = postRendered cmd.Stderr = stderr diff --git a/internal/plugin/runtime_subprocess_getter.go b/internal/plugin/runtime_subprocess_getter.go index 6a41b149f..fa6f470a9 100644 --- a/internal/plugin/runtime_subprocess_getter.go +++ b/internal/plugin/runtime_subprocess_getter.go @@ -56,9 +56,9 @@ func (r *SubprocessPluginRuntime) runGetter(input *Input) (*Output, error) { return nil, fmt.Errorf("no downloader found for protocol %q", msg.Protocol) } - env := parseEnv(os.Environ()) + env := ParseEnv(os.Environ()) maps.Insert(env, maps.All(r.EnvVars)) - maps.Insert(env, maps.All(parseEnv(input.Env))) + maps.Insert(env, maps.All(ParseEnv(input.Env))) env["HELM_PLUGIN_NAME"] = r.metadata.Name env["HELM_PLUGIN_DIR"] = r.pluginDir env["HELM_PLUGIN_USERNAME"] = msg.Options.Username @@ -83,7 +83,7 @@ func (r *SubprocessPluginRuntime) runGetter(input *Input) (*Output, error) { cmd := exec.Command( pluginCommand, args...) - cmd.Env = formatEnv(env) + cmd.Env = FormatEnv(env) cmd.Stdout = &buf cmd.Stderr = os.Stderr diff --git a/internal/plugin/runtime_test.go b/internal/plugin/runtime_test.go index f8fe481c1..5552af08e 100644 --- a/internal/plugin/runtime_test.go +++ b/internal/plugin/runtime_test.go @@ -56,7 +56,7 @@ func TestParseEnv(t *testing.T) { for name, tc := range testCases { t.Run(name, func(t *testing.T) { - result := parseEnv(tc.env) + result := ParseEnv(tc.env) assert.Equal(t, tc.expected, result) }) } @@ -93,7 +93,7 @@ func TestFormatEnv(t *testing.T) { for name, tc := range testCases { t.Run(name, func(t *testing.T) { - result := formatEnv(tc.env) + result := FormatEnv(tc.env) assert.ElementsMatch(t, tc.expected, result) }) } diff --git a/internal/plugin/sign.go b/internal/plugin/sign.go index 6b8aafd3e..6ddf113a2 100644 --- a/internal/plugin/sign.go +++ b/internal/plugin/sign.go @@ -63,7 +63,7 @@ func ExtractTgzPluginMetadata(r io.Reader) (*Metadata, error) { tr := tar.NewReader(gzr) for { header, err := tr.Next() - if err == io.EOF { + if errors.Is(err, io.EOF) { break } if err != nil { diff --git a/internal/plugin/testdata/plugdir/good/hello-legacy/hello.sh b/internal/plugin/testdata/plugdir/good/hello-legacy/hello.sh index dcfd58876..4f20796ef 100755 --- a/internal/plugin/testdata/plugdir/good/hello-legacy/hello.sh +++ b/internal/plugin/testdata/plugdir/good/hello-legacy/hello.sh @@ -1,9 +1,9 @@ -#!/bin/bash +#!/usr/bin/env sh echo "Hello from a Helm plugin" echo "PARAMS" -echo $* +echo "$@" $HELM_BIN ls --all diff --git a/internal/plugin/testdata/plugdir/good/hello-v1/hello.sh b/internal/plugin/testdata/plugdir/good/hello-v1/hello.sh index dcfd58876..4f20796ef 100755 --- a/internal/plugin/testdata/plugdir/good/hello-v1/hello.sh +++ b/internal/plugin/testdata/plugdir/good/hello-v1/hello.sh @@ -1,9 +1,9 @@ -#!/bin/bash +#!/usr/bin/env sh echo "Hello from a Helm plugin" echo "PARAMS" -echo $* +echo "$@" $HELM_BIN ls --all diff --git a/internal/statusreaders/pod_status_reader.go b/internal/statusreaders/pod_status_reader.go index c074c3487..bf633c0dd 100644 --- a/internal/statusreaders/pod_status_reader.go +++ b/internal/statusreaders/pod_status_reader.go @@ -86,19 +86,19 @@ func podConditions(u *unstructured.Unstructured) (*status.Result, error) { }, }, }, nil - } - - message := "Pod in progress" - return &status.Result{ - Status: status.InProgressStatus, - Message: message, - Conditions: []status.Condition{ - { - Type: status.ConditionReconciling, - Status: corev1.ConditionTrue, - Reason: "PodInProgress", - Message: message, + default: + message := "Pod in progress" + return &status.Result{ + Status: status.InProgressStatus, + Message: message, + Conditions: []status.Condition{ + { + Type: status.ConditionReconciling, + Status: corev1.ConditionTrue, + Reason: "PodInProgress", + Message: message, + }, }, - }, - }, nil + }, nil + } } diff --git a/internal/version/clientgo.go b/internal/version/clientgo.go new file mode 100644 index 000000000..ab2a38fd5 --- /dev/null +++ b/internal/version/clientgo.go @@ -0,0 +1,44 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +import ( + "fmt" + "runtime/debug" + "slices" + + _ "k8s.io/client-go/kubernetes" // Force k8s.io/client-go to be included in the build +) + +func K8sIOClientGoModVersion() (string, error) { + info, ok := debug.ReadBuildInfo() + if !ok { + return "", fmt.Errorf("failed to read build info") + } + + idx := slices.IndexFunc(info.Deps, func(m *debug.Module) bool { + return m.Path == "k8s.io/client-go" + }) + + if idx == -1 { + return "", fmt.Errorf("k8s.io/client-go not found in build info") + } + + m := info.Deps[idx] + + return m.Version, nil +} diff --git a/internal/version/clientgo_test.go b/internal/version/clientgo_test.go new file mode 100644 index 000000000..624c669af --- /dev/null +++ b/internal/version/clientgo_test.go @@ -0,0 +1,30 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestK8sClientGoModVersion(t *testing.T) { + // Unfortunately, test builds don't include debug info / module info + // So we expect "K8sIOClientGoModVersion" to return error + _, err := K8sIOClientGoModVersion() + require.ErrorContains(t, err, "k8s.io/client-go not found in build info") +} diff --git a/internal/version/version.go b/internal/version/version.go index f9e76eab7..3daf80893 100644 --- a/internal/version/version.go +++ b/internal/version/version.go @@ -14,13 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ -package version // import "helm.sh/helm/v4/internal/version" +package version import ( "flag" "fmt" + "log/slog" "runtime" "strings" + "testing" + + "github.com/Masterminds/semver/v3" ) var ( @@ -38,11 +42,10 @@ var ( gitCommit = "" // gitTreeState is the state of the git tree gitTreeState = "" +) - // The Kubernetes version can be set by LDFLAGS. In order to do that the value - // must be a string. - kubeClientVersionMajor = "" - kubeClientVersionMinor = "" +const ( + kubeClientGoVersionTesting = "v1.20" ) // BuildInfo describes the compile time information. @@ -74,12 +77,39 @@ func GetUserAgent() string { // Get returns build info func Get() BuildInfo { + + makeKubeClientVersionString := func() string { + // Test builds don't include debug info / module info + // (And even if they did, we probably want a stable version during tests anyway) + // Return a default value for test builds + if testing.Testing() { + return kubeClientGoVersionTesting + } + + vstr, err := K8sIOClientGoModVersion() + if err != nil { + slog.Error("failed to retrieve k8s.io/client-go version", slog.Any("error", err)) + return "" + } + + v, err := semver.NewVersion(vstr) + if err != nil { + slog.Error("unable to parse k8s.io/client-go version", slog.String("version", vstr), slog.Any("error", err)) + return "" + } + + kubeClientVersionMajor := v.Major() + 1 + kubeClientVersionMinor := v.Minor() + + return fmt.Sprintf("v%d.%d", kubeClientVersionMajor, kubeClientVersionMinor) + } + v := BuildInfo{ Version: GetVersion(), GitCommit: gitCommit, GitTreeState: gitTreeState, GoVersion: runtime.Version(), - KubeClientVersion: fmt.Sprintf("v%s.%s", kubeClientVersionMajor, kubeClientVersionMinor), + KubeClientVersion: makeKubeClientVersionString(), } // HACK(bacongobbler): strip out GoVersion during a test run for consistent test output diff --git a/pkg/action/action.go b/pkg/action/action.go index 9555006be..c2a27940f 100644 --- a/pkg/action/action.go +++ b/pkg/action/action.go @@ -230,7 +230,7 @@ func (cfg *Configuration) renderResources(ch *chart.Chart, values common.Values, if ch.Metadata.KubeVersion != "" { if !chartutil.IsCompatibleRange(ch.Metadata.KubeVersion, caps.KubeVersion.String()) { - return hs, b, "", fmt.Errorf("chart requires kubeVersion: %s which is incompatible with Kubernetes %s", ch.Metadata.KubeVersion, caps.KubeVersion.String()) + return hs, b, "", fmt.Errorf("chart requires kubeVersion: %s which is incompatible with Kubernetes %s", ch.Metadata.KubeVersion, caps.KubeVersion.Version) } } @@ -502,7 +502,12 @@ func GetVersionSet(client discovery.ServerResourcesInterface) (common.VersionSet // recordRelease with an update operation in case reuse has been set. func (cfg *Configuration) recordRelease(r *release.Release) { if err := cfg.Releases.Update(r); err != nil { - cfg.Logger().Warn("failed to update release", "name", r.Name, "revision", r.Version, slog.Any("error", err)) + cfg.Logger().Warn( + "failed to update release", + slog.String("name", r.Name), + slog.Int("revision", r.Version), + slog.Any("error", err), + ) } } diff --git a/pkg/action/action_test.go b/pkg/action/action_test.go index b77a462f5..85ee42d64 100644 --- a/pkg/action/action_test.go +++ b/pkg/action/action_test.go @@ -201,6 +201,12 @@ func withMetadataDependency(dependency chart.Dependency) chartOption { } } +func withFile(file common.File) chartOption { + return func(opts *chartOptions) { + opts.Files = append(opts.Files, &file) + } +} + func withSampleTemplates() chartOption { return func(opts *chartOptions) { modTime := time.Now() diff --git a/pkg/action/get_metadata_test.go b/pkg/action/get_metadata_test.go index cd5988d8e..4caa966ab 100644 --- a/pkg/action/get_metadata_test.go +++ b/pkg/action/get_metadata_test.go @@ -121,7 +121,7 @@ func TestGetMetadata_Run_WithDependencies(t *testing.T) { Namespace: "default", } - cfg.Releases.Create(rel) + require.NoError(t, cfg.Releases.Create(rel)) result, err := client.Run(releaseName) require.NoError(t, err) @@ -180,7 +180,7 @@ func TestGetMetadata_Run_WithDependenciesAliases(t *testing.T) { Namespace: "default", } - cfg.Releases.Create(rel) + require.NoError(t, cfg.Releases.Create(rel)) result, err := client.Run(releaseName) require.NoError(t, err) @@ -251,7 +251,7 @@ func TestGetMetadata_Run_WithMixedDependencies(t *testing.T) { Namespace: "default", } - cfg.Releases.Create(rel) + require.NoError(t, cfg.Releases.Create(rel)) result, err := client.Run(releaseName) require.NoError(t, err) @@ -315,7 +315,7 @@ func TestGetMetadata_Run_WithAnnotations(t *testing.T) { Namespace: "default", } - cfg.Releases.Create(rel) + require.NoError(t, cfg.Releases.Create(rel)) result, err := client.Run(releaseName) require.NoError(t, err) @@ -370,8 +370,8 @@ func TestGetMetadata_Run_SpecificVersion(t *testing.T) { Namespace: "default", } - cfg.Releases.Create(rel1) - cfg.Releases.Create(rel2) + require.NoError(t, cfg.Releases.Create(rel1)) + require.NoError(t, cfg.Releases.Create(rel2)) result, err := client.Run(releaseName) require.NoError(t, err) @@ -424,7 +424,7 @@ func TestGetMetadata_Run_DifferentStatuses(t *testing.T) { Namespace: "default", } - cfg.Releases.Create(rel) + require.NoError(t, cfg.Releases.Create(rel)) result, err := client.Run(releaseName) require.NoError(t, err) @@ -480,7 +480,7 @@ func TestGetMetadata_Run_EmptyAppVersion(t *testing.T) { Namespace: "default", } - cfg.Releases.Create(rel) + require.NoError(t, cfg.Releases.Create(rel)) result, err := client.Run(releaseName) require.NoError(t, err) diff --git a/pkg/action/get_test.go b/pkg/action/get_test.go new file mode 100644 index 000000000..876819ee4 --- /dev/null +++ b/pkg/action/get_test.go @@ -0,0 +1,69 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "errors" + "io" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + kubefake "helm.sh/helm/v4/pkg/kube/fake" + + "helm.sh/helm/v4/pkg/release/common" +) + +func TestNewGet(t *testing.T) { + config := actionConfigFixture(t) + client := NewGet(config) + + assert.NotNil(t, client) + assert.Equal(t, config, client.cfg) + assert.Equal(t, 0, client.Version) +} + +func TestGetRun(t *testing.T) { + config := actionConfigFixture(t) + client := NewGet(config) + simpleRelease := namedReleaseStub("test-release", common.StatusPendingUpgrade) + require.NoError(t, config.Releases.Create(simpleRelease)) + + releaser, err := client.Run(simpleRelease.Name) + require.NoError(t, err) + + result, err := releaserToV1Release(releaser) + require.NoError(t, err) + assert.Equal(t, simpleRelease.Name, result.Name) + assert.Equal(t, simpleRelease.Version, result.Version) +} + +func TestGetRun_UnreachableKubeClient(t *testing.T) { + config := actionConfigFixture(t) + failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil} + failingKubeClient.ConnectionError = errors.New("connection refused") + config.KubeClient = &failingKubeClient + + client := NewGet(config) + simpleRelease := namedReleaseStub("test-release", common.StatusPendingUpgrade) + require.NoError(t, config.Releases.Create(simpleRelease)) + + result, err := client.Run(simpleRelease.Name) + assert.Nil(t, result) + assert.Error(t, err) +} diff --git a/pkg/action/get_values_test.go b/pkg/action/get_values_test.go index 8e6588454..69a95a2e4 100644 --- a/pkg/action/get_values_test.go +++ b/pkg/action/get_values_test.go @@ -79,7 +79,7 @@ func TestGetValues_Run_UserConfigOnly(t *testing.T) { Namespace: "default", } - cfg.Releases.Create(rel) + require.NoError(t, cfg.Releases.Create(rel)) result, err := client.Run(releaseName) require.NoError(t, err) @@ -127,7 +127,7 @@ func TestGetValues_Run_AllValues(t *testing.T) { Namespace: "default", } - cfg.Releases.Create(rel) + require.NoError(t, cfg.Releases.Create(rel)) result, err := client.Run(releaseName) require.NoError(t, err) @@ -161,7 +161,7 @@ func TestGetValues_Run_EmptyValues(t *testing.T) { Namespace: "default", } - cfg.Releases.Create(rel) + require.NoError(t, cfg.Releases.Create(rel)) result, err := client.Run(releaseName) require.NoError(t, err) @@ -212,7 +212,7 @@ func TestGetValues_Run_NilConfig(t *testing.T) { Namespace: "default", } - cfg.Releases.Create(rel) + require.NoError(t, cfg.Releases.Create(rel)) result, err := client.Run(releaseName) require.NoError(t, err) diff --git a/pkg/action/history.go b/pkg/action/history.go index 90307c79b..3d561b3ad 100644 --- a/pkg/action/history.go +++ b/pkg/action/history.go @@ -20,7 +20,7 @@ import ( "fmt" chartutil "helm.sh/helm/v4/pkg/chart/v2/util" - release "helm.sh/helm/v4/pkg/release" + "helm.sh/helm/v4/pkg/release" ) // History is the action for checking the release's ledger. diff --git a/pkg/action/history_test.go b/pkg/action/history_test.go new file mode 100644 index 000000000..31fdd4a96 --- /dev/null +++ b/pkg/action/history_test.go @@ -0,0 +1,108 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "errors" + "io" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + kubefake "helm.sh/helm/v4/pkg/kube/fake" + release "helm.sh/helm/v4/pkg/release/v1" + + "helm.sh/helm/v4/pkg/release/common" +) + +func TestNewHistory(t *testing.T) { + config := actionConfigFixture(t) + client := NewHistory(config) + + assert.NotNil(t, client) + assert.Equal(t, config, client.cfg) +} + +func TestHistoryRun(t *testing.T) { + releaseName := "test-release" + simpleRelease := namedReleaseStub(releaseName, common.StatusPendingUpgrade) + updatedRelease := namedReleaseStub(releaseName, common.StatusDeployed) + updatedRelease.Chart.Metadata.Version = "0.1.1" + updatedRelease.Version = 2 + + config := actionConfigFixture(t) + client := NewHistory(config) + client.Max = 3 + client.cfg.Releases.MaxHistory = 3 + for _, rel := range []*release.Release{simpleRelease, updatedRelease} { + if err := client.cfg.Releases.Create(rel); err != nil { + t.Fatal(err, "Could not add releases to Config") + } + } + + releases, err := config.Releases.ListReleases() + require.NoError(t, err) + assert.Len(t, releases, 2, "expected 2 Releases in Config") + + releasers, err := client.Run(releaseName) + require.NoError(t, err) + assert.Len(t, releasers, 2, "expected 2 Releases in History result") + + release1, err := releaserToV1Release(releasers[0]) + require.NoError(t, err) + assert.Equal(t, simpleRelease.Name, release1.Name) + assert.Equal(t, simpleRelease.Version, release1.Version) + + release2, err := releaserToV1Release(releasers[1]) + require.NoError(t, err) + assert.Equal(t, updatedRelease.Name, release2.Name) + assert.Equal(t, updatedRelease.Version, release2.Version) +} + +func TestHistoryRun_UnreachableKubeClient(t *testing.T) { + config := actionConfigFixture(t) + failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil} + failingKubeClient.ConnectionError = errors.New("connection refused") + config.KubeClient = &failingKubeClient + + client := NewHistory(config) + result, err := client.Run("release-name") + assert.Nil(t, result) + assert.Error(t, err) +} + +func TestHistoryRun_InvalidReleaseNames(t *testing.T) { + config := actionConfigFixture(t) + client := NewHistory(config) + invalidReleaseNames := []string{ + "", + "too-long-release-name-max-53-characters-abcdefghijklmnopqrstuvwxyz", + "MyRelease", + "release_name", + "release@123", + "-badstart", + "badend-", + ".dotstart", + } + + for _, name := range invalidReleaseNames { + result, err := client.Run(name) + assert.Nil(t, result) + assert.ErrorContains(t, err, "release name is invalid") + } +} diff --git a/pkg/action/hooks.go b/pkg/action/hooks.go index 1e4fec9bd..28033395b 100644 --- a/pkg/action/hooks.go +++ b/pkg/action/hooks.go @@ -32,7 +32,34 @@ import ( ) // execHook executes all of the hooks for the given hook event. -func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, waitStrategy kube.WaitStrategy, timeout time.Duration, serverSideApply bool) error { +func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, + waitStrategy kube.WaitStrategy, waitOptions []kube.WaitOption, + timeout time.Duration, serverSideApply bool) error { + + shutdown, err := cfg.execHookWithDelayedShutdown(rl, hook, waitStrategy, waitOptions, timeout, serverSideApply) + if shutdown == nil { + return err + } + if err != nil { + if err := shutdown(); err != nil { + return err + } + return err + } + return shutdown() +} + +type ExecuteShutdownFunc = func() error + +func shutdownNoOp() error { + return nil +} + +// execHookWithDelayedShutdown executes all of the hooks for the given hook event and returns a shutdownHook function to trigger deletions after doing other things like e.g. retrieving logs. +func (cfg *Configuration) execHookWithDelayedShutdown(rl *release.Release, hook release.HookEvent, + waitStrategy kube.WaitStrategy, waitOptions []kube.WaitOption, timeout time.Duration, + serverSideApply bool) (ExecuteShutdownFunc, error) { + executingHooks := []*release.Hook{} for _, h := range rl.Hooks { @@ -50,13 +77,13 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, // Set default delete policy to before-hook-creation cfg.hookSetDeletePolicy(h) - if err := cfg.deleteHookByPolicy(h, release.HookBeforeHookCreation, waitStrategy, timeout); err != nil { - return err + if err := cfg.deleteHookByPolicy(h, release.HookBeforeHookCreation, waitStrategy, waitOptions, timeout); err != nil { + return shutdownNoOp, err } resources, err := cfg.KubeClient.Build(bytes.NewBufferString(h.Manifest), true) if err != nil { - return fmt.Errorf("unable to build kubernetes object for %s hook %s: %w", hook, h.Path, err) + return shutdownNoOp, fmt.Errorf("unable to build kubernetes object for %s hook %s: %w", hook, h.Path, err) } // Record the time at which the hook was applied to the cluster @@ -77,12 +104,17 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, kube.ClientCreateOptionServerSideApply(serverSideApply, false)); err != nil { h.LastRun.CompletedAt = time.Now() h.LastRun.Phase = release.HookPhaseFailed - return fmt.Errorf("warning: Hook %s %s failed: %w", hook, h.Path, err) + return shutdownNoOp, fmt.Errorf("warning: Hook %s %s failed: %w", hook, h.Path, err) } - waiter, err := cfg.KubeClient.GetWaiter(waitStrategy) + var waiter kube.Waiter + if c, supportsOptions := cfg.KubeClient.(kube.InterfaceWaitOptions); supportsOptions { + waiter, err = c.GetWaiterWithOptions(waitStrategy, waitOptions...) + } else { + waiter, err = cfg.KubeClient.GetWaiter(waitStrategy) + } if err != nil { - return fmt.Errorf("unable to get waiter: %w", err) + return shutdownNoOp, fmt.Errorf("unable to get waiter: %w", err) } // Watch hook resources until they have completed err = waiter.WatchUntilReady(resources, timeout) @@ -98,36 +130,38 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, } // If a hook is failed, check the annotation of the hook to determine whether the hook should be deleted // under failed condition. If so, then clear the corresponding resource object in the hook - if errDeleting := cfg.deleteHookByPolicy(h, release.HookFailed, waitStrategy, timeout); errDeleting != nil { - // We log the error here as we want to propagate the hook failure upwards to the release object. - log.Printf("error deleting the hook resource on hook failure: %v", errDeleting) - } - - // If a hook is failed, check the annotation of the previous successful hooks to determine whether the hooks - // should be deleted under succeeded condition. - if err := cfg.deleteHooksByPolicy(executingHooks[0:i], release.HookSucceeded, waitStrategy, timeout); err != nil { + return func() error { + if errDeleting := cfg.deleteHookByPolicy(h, release.HookFailed, waitStrategy, waitOptions, timeout); errDeleting != nil { + // We log the error here as we want to propagate the hook failure upwards to the release object. + log.Printf("error deleting the hook resource on hook failure: %v", errDeleting) + } + + // If a hook is failed, check the annotation of the previous successful hooks to determine whether the hooks + // should be deleted under succeeded condition. + if err := cfg.deleteHooksByPolicy(executingHooks[0:i], release.HookSucceeded, waitStrategy, waitOptions, timeout); err != nil { + return err + } return err - } - - return err + }, err } h.LastRun.Phase = release.HookPhaseSucceeded } - // If all hooks are successful, check the annotation of each hook to determine whether the hook should be deleted - // or output should be logged under succeeded condition. If so, then clear the corresponding resource object in each hook - for i := len(executingHooks) - 1; i >= 0; i-- { - h := executingHooks[i] - if err := cfg.outputLogsByPolicy(h, rl.Namespace, release.HookOutputOnSucceeded); err != nil { - // We log here as we still want to attempt hook resource deletion even if output logging fails. - log.Printf("error outputting logs for hook failure: %v", err) - } - if err := cfg.deleteHookByPolicy(h, release.HookSucceeded, waitStrategy, timeout); err != nil { - return err + return func() error { + // If all hooks are successful, check the annotation of each hook to determine whether the hook should be deleted + // or output should be logged under succeeded condition. If so, then clear the corresponding resource object in each hook + for i := len(executingHooks) - 1; i >= 0; i-- { + h := executingHooks[i] + if err := cfg.outputLogsByPolicy(h, rl.Namespace, release.HookOutputOnSucceeded); err != nil { + // We log here as we still want to attempt hook resource deletion even if output logging fails. + log.Printf("error outputting logs for hook failure: %v", err) + } + if err := cfg.deleteHookByPolicy(h, release.HookSucceeded, waitStrategy, waitOptions, timeout); err != nil { + return err + } } - } - - return nil + return nil + }, nil } // hookByWeight is a sorter for hooks @@ -143,7 +177,9 @@ func (x hookByWeight) Less(i, j int) bool { } // deleteHookByPolicy deletes a hook if the hook policy instructs it to -func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.HookDeletePolicy, waitStrategy kube.WaitStrategy, timeout time.Duration) error { +func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.HookDeletePolicy, + waitStrategy kube.WaitStrategy, waitOptions []kube.WaitOption, timeout time.Duration) error { + // Never delete CustomResourceDefinitions; this could cause lots of // cascading garbage collection. if h.Kind == "CustomResourceDefinition" { @@ -159,7 +195,12 @@ func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.Hoo return joinErrors(errs, "; ") } - waiter, err := cfg.KubeClient.GetWaiter(waitStrategy) + var waiter kube.Waiter + if c, supportsOptions := cfg.KubeClient.(kube.InterfaceWaitOptions); supportsOptions { + waiter, err = c.GetWaiterWithOptions(waitStrategy, waitOptions...) + } else { + waiter, err = cfg.KubeClient.GetWaiter(waitStrategy) + } if err != nil { return err } @@ -171,9 +212,11 @@ func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.Hoo } // deleteHooksByPolicy deletes all hooks if the hook policy instructs it to -func (cfg *Configuration) deleteHooksByPolicy(hooks []*release.Hook, policy release.HookDeletePolicy, waitStrategy kube.WaitStrategy, timeout time.Duration) error { +func (cfg *Configuration) deleteHooksByPolicy(hooks []*release.Hook, policy release.HookDeletePolicy, + waitStrategy kube.WaitStrategy, waitOptions []kube.WaitOption, timeout time.Duration) error { + for _, h := range hooks { - if err := cfg.deleteHookByPolicy(h, policy, waitStrategy, timeout); err != nil { + if err := cfg.deleteHookByPolicy(h, policy, waitStrategy, waitOptions, timeout); err != nil { return err } } diff --git a/pkg/action/hooks_test.go b/pkg/action/hooks_test.go index 02b70dda1..0270a0630 100644 --- a/pkg/action/hooks_test.go +++ b/pkg/action/hooks_test.go @@ -18,6 +18,7 @@ package action import ( "bytes" + "context" "fmt" "io" "reflect" @@ -278,8 +279,8 @@ func (h *HookFailingKubeClient) Delete(resources kube.ResourceList, deletionProp return h.PrintingKubeClient.Delete(resources, deletionPropagation) } -func (h *HookFailingKubeClient) GetWaiter(strategy kube.WaitStrategy) (kube.Waiter, error) { - waiter, _ := h.PrintingKubeClient.GetWaiter(strategy) +func (h *HookFailingKubeClient) GetWaiterWithOptions(strategy kube.WaitStrategy, opts ...kube.WaitOption) (kube.Waiter, error) { + waiter, _ := h.PrintingKubeClient.GetWaiterWithOptions(strategy, opts...) return &HookFailingKubeWaiter{ PrintingKubeWaiter: waiter.(*kubefake.PrintingKubeWaiter), failOn: h.failOn, @@ -394,7 +395,7 @@ data: } serverSideApply := true - err := configuration.execHook(&tc.inputRelease, hookEvent, kube.StatusWatcherStrategy, 600, serverSideApply) + err := configuration.execHook(&tc.inputRelease, hookEvent, kube.StatusWatcherStrategy, nil, 600, serverSideApply) if !reflect.DeepEqual(kubeClient.deleteRecord, tc.expectedDeleteRecord) { t.Fatalf("Got unexpected delete record, expected: %#v, but got: %#v", kubeClient.deleteRecord, tc.expectedDeleteRecord) @@ -442,3 +443,51 @@ func TestConfiguration_hookSetDeletePolicy(t *testing.T) { }) } } + +func TestExecHook_WaitOptionsPassedDownstream(t *testing.T) { + is := assert.New(t) + + failer := &kubefake.FailingKubeClient{ + PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, + } + + configuration := &Configuration{ + Releases: storage.Init(driver.NewMemory()), + KubeClient: failer, + Capabilities: common.DefaultCapabilities, + } + + rel := &release.Release{ + Name: "test-release", + Namespace: "test", + Hooks: []*release.Hook{ + { + Name: "test-hook", + Kind: "ConfigMap", + Path: "templates/hook.yaml", + Manifest: `apiVersion: v1 +kind: ConfigMap +metadata: + name: test-hook + namespace: test +data: + foo: bar +`, + Weight: 0, + Events: []release.HookEvent{ + release.HookPreInstall, + }, + }, + }, + } + + // Use WithWaitContext as a marker WaitOption that we can track + ctx := context.Background() + waitOptions := []kube.WaitOption{kube.WithWaitContext(ctx)} + + err := configuration.execHook(rel, release.HookPreInstall, kube.StatusWatcherStrategy, waitOptions, 600, false) + is.NoError(err) + + // Verify that WaitOptions were passed to GetWaiter + is.NotEmpty(failer.RecordedWaitOptions, "WaitOptions should be passed to GetWaiter") +} diff --git a/pkg/action/install.go b/pkg/action/install.go index 3de7267a1..a64e80caf 100644 --- a/pkg/action/install.go +++ b/pkg/action/install.go @@ -62,8 +62,8 @@ import ( "helm.sh/helm/v4/pkg/storage/driver" ) -// notesFileSuffix that we want to treat special. It goes through the templating engine -// but it's not a yaml file (resource) hence can't have hooks, etc. And the user actually +// notesFileSuffix that we want to treat specially. It goes through the templating engine +// but it's not a YAML file (resource) hence can't have hooks, etc. And the user actually // wants to see this file after rendering in the status command. However, it must be a suffix // since there can be filepath in front of it. const notesFileSuffix = "NOTES.txt" @@ -95,6 +95,7 @@ type Install struct { DisableHooks bool Replace bool WaitStrategy kube.WaitStrategy + WaitOptions []kube.WaitOption WaitForJobs bool Devel bool DependencyUpdate bool @@ -116,7 +117,7 @@ type Install struct { Labels map[string]string // KubeVersion allows specifying a custom kubernetes version to use and // APIVersions allows a manual set of supported API Versions to be passed - // (for things like templating). These are ignored if ClientOnly is false + // (for things like templating). KubeVersion *common.KubeVersion APIVersions common.VersionSet // Used by helm template to render charts with .Release.IsUpgrade. Ignored if Dry-Run is false @@ -192,7 +193,7 @@ func (i *Install) installCRDs(crds []chart.CRD) error { kube.ClientCreateOptionServerSideApply(i.ServerSideApply, i.ForceConflicts)); err != nil { // If the error is CRD already exists, continue. if apierrors.IsAlreadyExists(err) { - crdName := res[0].Name + crdName := obj.Name i.cfg.Logger().Debug("CRD is already present. Skipping", "crd", crdName) continue } @@ -201,7 +202,13 @@ func (i *Install) installCRDs(crds []chart.CRD) error { totalItems = append(totalItems, res...) } if len(totalItems) > 0 { - waiter, err := i.cfg.KubeClient.GetWaiter(i.WaitStrategy) + var waiter kube.Waiter + var err error + if c, supportsOptions := i.cfg.KubeClient.(kube.InterfaceWaitOptions); supportsOptions { + waiter, err = c.GetWaiterWithOptions(i.WaitStrategy, i.WaitOptions...) + } else { + waiter, err = i.cfg.KubeClient.GetWaiter(i.WaitStrategy) + } if err != nil { return fmt.Errorf("unable to get waiter: %w", err) } @@ -508,7 +515,7 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource var err error // pre-install hooks if !i.DisableHooks { - if err := i.cfg.execHook(rel, release.HookPreInstall, i.WaitStrategy, i.Timeout, i.ServerSideApply); err != nil { + if err := i.cfg.execHook(rel, release.HookPreInstall, i.WaitStrategy, i.WaitOptions, i.Timeout, i.ServerSideApply); err != nil { return rel, fmt.Errorf("failed pre-install: %s", err) } } @@ -534,7 +541,12 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource return rel, err } - waiter, err := i.cfg.KubeClient.GetWaiter(i.WaitStrategy) + var waiter kube.Waiter + if c, supportsOptions := i.cfg.KubeClient.(kube.InterfaceWaitOptions); supportsOptions { + waiter, err = c.GetWaiterWithOptions(i.WaitStrategy, i.WaitOptions...) + } else { + waiter, err = i.cfg.KubeClient.GetWaiter(i.WaitStrategy) + } if err != nil { return rel, fmt.Errorf("failed to get waiter: %w", err) } @@ -549,7 +561,7 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource } if !i.DisableHooks { - if err := i.cfg.execHook(rel, release.HookPostInstall, i.WaitStrategy, i.Timeout, i.ServerSideApply); err != nil { + if err := i.cfg.execHook(rel, release.HookPostInstall, i.WaitStrategy, i.WaitOptions, i.Timeout, i.ServerSideApply); err != nil { return rel, fmt.Errorf("failed post-install: %s", err) } } @@ -583,6 +595,7 @@ func (i *Install) failRelease(rel *release.Release, err error) (*release.Release uninstall.KeepHistory = false uninstall.Timeout = i.Timeout uninstall.WaitStrategy = i.WaitStrategy + uninstall.WaitOptions = i.WaitOptions if _, uninstallErr := uninstall.Run(i.ReleaseName); uninstallErr != nil { return rel, fmt.Errorf("an error occurred while uninstalling the release. original install error: %w: %w", err, uninstallErr) } diff --git a/pkg/action/install_test.go b/pkg/action/install_test.go index 792944fa9..35500e6f0 100644 --- a/pkg/action/install_test.go +++ b/pkg/action/install_test.go @@ -35,6 +35,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kuberuntime "k8s.io/apimachinery/pkg/runtime" @@ -43,10 +44,14 @@ import ( "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest/fake" + ci "helm.sh/helm/v4/pkg/chart" + "helm.sh/helm/v4/internal/test" "helm.sh/helm/v4/pkg/chart/common" + chart "helm.sh/helm/v4/pkg/chart/v2" "helm.sh/helm/v4/pkg/kube" kubefake "helm.sh/helm/v4/pkg/kube/fake" + "helm.sh/helm/v4/pkg/registry" rcommon "helm.sh/helm/v4/pkg/release/common" release "helm.sh/helm/v4/pkg/release/v1" "helm.sh/helm/v4/pkg/storage/driver" @@ -534,7 +539,7 @@ func TestInstallRelease_NoHooks(t *testing.T) { instAction := installAction(t) instAction.DisableHooks = true instAction.ReleaseName = "no-hooks" - instAction.cfg.Releases.Create(releaseStub()) + require.NoError(t, instAction.cfg.Releases.Create(releaseStub())) vals := map[string]interface{}{} resi, err := instAction.Run(buildChart(), vals) @@ -574,7 +579,7 @@ func TestInstallRelease_ReplaceRelease(t *testing.T) { rel := releaseStub() rel.Info.Status = rcommon.StatusUninstalled - instAction.cfg.Releases.Create(rel) + require.NoError(t, instAction.cfg.Releases.Create(rel)) instAction.ReleaseName = rel.Name vals := map[string]interface{}{} @@ -606,7 +611,7 @@ func TestInstallRelease_KubeVersion(t *testing.T) { vals = map[string]interface{}{} _, err = instAction.Run(buildChart(withKube(">=99.0.0")), vals) is.Error(err) - is.Contains(err.Error(), "chart requires kubeVersion") + is.Contains(err.Error(), "chart requires kubeVersion: >=99.0.0 which is incompatible with Kubernetes v1.20.") } func TestInstallRelease_Wait(t *testing.T) { @@ -1102,3 +1107,138 @@ func TestInstallRun_UnreachableKubeClient(t *testing.T) { assert.Nil(t, res) assert.ErrorContains(t, err, "connection refused") } + +func TestInstallSetRegistryClient(t *testing.T) { + config := actionConfigFixture(t) + instAction := NewInstall(config) + + registryClient := ®istry.Client{} + instAction.SetRegistryClient(registryClient) + + assert.Equal(t, registryClient, instAction.GetRegistryClient()) +} + +func TestInstalLCRDs(t *testing.T) { + config := actionConfigFixture(t) + instAction := NewInstall(config) + + mockFile := common.File{ + Name: "crds/foo.yaml", + Data: []byte("hello"), + } + mockChart := buildChart(withFile(mockFile)) + crdsToInstall := mockChart.CRDObjects() + assert.Len(t, crdsToInstall, 1) + assert.Equal(t, crdsToInstall[0].File.Data, mockFile.Data) + + require.NoError(t, instAction.installCRDs(crdsToInstall)) +} + +func TestInstalLCRDs_KubeClient_BuildError(t *testing.T) { + config := actionConfigFixture(t) + failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil} + failingKubeClient.BuildError = errors.New("build error") + config.KubeClient = &failingKubeClient + instAction := NewInstall(config) + + mockFile := common.File{ + Name: "crds/foo.yaml", + Data: []byte("hello"), + } + mockChart := buildChart(withFile(mockFile)) + crdsToInstall := mockChart.CRDObjects() + + require.Error(t, instAction.installCRDs(crdsToInstall), "failed to install CRD") +} + +func TestInstalLCRDs_KubeClient_CreateError(t *testing.T) { + config := actionConfigFixture(t) + failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil} + failingKubeClient.CreateError = errors.New("create error") + config.KubeClient = &failingKubeClient + instAction := NewInstall(config) + + mockFile := common.File{ + Name: "crds/foo.yaml", + Data: []byte("hello"), + } + mockChart := buildChart(withFile(mockFile)) + crdsToInstall := mockChart.CRDObjects() + + require.Error(t, instAction.installCRDs(crdsToInstall), "failed to install CRD") +} + +func TestInstalLCRDs_AlreadyExist(t *testing.T) { + config := actionConfigFixture(t) + failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil} + mockError := &apierrors.StatusError{ErrStatus: metav1.Status{ + Status: metav1.StatusFailure, + Reason: metav1.StatusReasonAlreadyExists, + }} + failingKubeClient.CreateError = mockError + config.KubeClient = &failingKubeClient + instAction := NewInstall(config) + + mockFile := common.File{ + Name: "crds/foo.yaml", + Data: []byte("hello"), + } + mockChart := buildChart(withFile(mockFile)) + crdsToInstall := mockChart.CRDObjects() + + assert.Nil(t, instAction.installCRDs(crdsToInstall)) +} + +func TestInstalLCRDs_WaiterError(t *testing.T) { + config := actionConfigFixture(t) + failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil} + failingKubeClient.WaitError = errors.New("wait error") + failingKubeClient.BuildDummy = true + config.KubeClient = &failingKubeClient + instAction := NewInstall(config) + + mockFile := common.File{ + Name: "crds/foo.yaml", + Data: []byte("hello"), + } + mockChart := buildChart(withFile(mockFile)) + crdsToInstall := mockChart.CRDObjects() + + require.Error(t, instAction.installCRDs(crdsToInstall), "wait error") +} + +func TestCheckDependencies(t *testing.T) { + dependency := chart.Dependency{Name: "hello"} + mockChart := buildChart(withDependency()) + + assert.Nil(t, CheckDependencies(mockChart, []ci.Dependency{&dependency})) +} + +func TestCheckDependencies_MissingDependency(t *testing.T) { + dependency := chart.Dependency{Name: "missing"} + mockChart := buildChart(withDependency()) + + assert.ErrorContains(t, CheckDependencies(mockChart, []ci.Dependency{&dependency}), "missing in charts") +} + +func TestInstallRelease_WaitOptionsPassedDownstream(t *testing.T) { + is := assert.New(t) + + instAction := installAction(t) + instAction.ReleaseName = "wait-options-test" + instAction.WaitStrategy = kube.StatusWatcherStrategy + + // Use WithWaitContext as a marker WaitOption that we can track + ctx := context.Background() + instAction.WaitOptions = []kube.WaitOption{kube.WithWaitContext(ctx)} + + // Access the underlying FailingKubeClient to check recorded options + failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient) + + vals := map[string]interface{}{} + _, err := instAction.Run(buildChart(), vals) + is.NoError(err) + + // Verify that WaitOptions were passed to GetWaiter + is.NotEmpty(failer.RecordedWaitOptions, "WaitOptions should be passed to GetWaiter") +} diff --git a/pkg/action/lint_test.go b/pkg/action/lint_test.go index 613149a4d..4684f91f1 100644 --- a/pkg/action/lint_test.go +++ b/pkg/action/lint_test.go @@ -17,7 +17,12 @@ limitations under the License. package action import ( + "errors" "testing" + + "github.com/stretchr/testify/assert" + + "helm.sh/helm/v4/pkg/chart/v2/lint/support" ) var ( @@ -163,3 +168,45 @@ func TestLint_ChartWithWarnings(t *testing.T) { } }) } + +func TestHasWarningsOrErrors(t *testing.T) { + testError := errors.New("test-error") + cases := []struct { + name string + data LintResult + expected bool + }{ + { + name: "has no warning messages and no errors", + data: LintResult{TotalChartsLinted: 1, Messages: make([]support.Message, 0), Errors: make([]error, 0)}, + expected: false, + }, + { + name: "has error", + data: LintResult{TotalChartsLinted: 1, Messages: make([]support.Message, 0), Errors: []error{testError}}, + expected: true, + }, + { + name: "has info message only", + data: LintResult{TotalChartsLinted: 1, Messages: []support.Message{{Severity: support.InfoSev, Path: "", Err: testError}}, Errors: make([]error, 0)}, + expected: false, + }, + { + name: "has warning message", + data: LintResult{TotalChartsLinted: 1, Messages: []support.Message{{Severity: support.WarningSev, Path: "", Err: testError}}, Errors: make([]error, 0)}, + expected: true, + }, + { + name: "has error message", + data: LintResult{TotalChartsLinted: 1, Messages: []support.Message{{Severity: support.ErrorSev, Path: "", Err: testError}}, Errors: make([]error, 0)}, + expected: true, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + result := HasWarningsOrErrors(&tc.data) + assert.Equal(t, tc.expected, result) + }) + } +} diff --git a/pkg/action/package.go b/pkg/action/package.go index 963883707..0ab49538c 100644 --- a/pkg/action/package.go +++ b/pkg/action/package.go @@ -103,7 +103,7 @@ func (p *Package) Run(path string, _ map[string]interface{}) (string, error) { ch.Metadata.AppVersion = p.AppVersion } - if reqs := ac.MetaDependencies(); reqs != nil { + if reqs := ac.MetaDependencies(); len(reqs) > 0 { if err := CheckDependencies(ch, reqs); err != nil { return "", err } diff --git a/pkg/action/package_test.go b/pkg/action/package_test.go index b0ca0bec4..84dcb71c1 100644 --- a/pkg/action/package_test.go +++ b/pkg/action/package_test.go @@ -22,6 +22,7 @@ import ( "testing" "github.com/Masterminds/semver/v3" + "github.com/stretchr/testify/require" "helm.sh/helm/v4/internal/test/ensure" ) @@ -90,7 +91,8 @@ func TestPassphraseFileFetcher_WithStdinAndMultipleFetches(t *testing.T) { passphrase := "secret-from-stdin" go func() { - w.Write([]byte(passphrase + "\n")) + _, err = w.Write([]byte(passphrase + "\n")) + require.NoError(t, err) }() for range 4 { @@ -152,3 +154,18 @@ func TestValidateVersion(t *testing.T) { }) } } + +func TestRun_ErrorPath(t *testing.T) { + client := NewPackage() + _, err := client.Run("err-path", nil) + require.Error(t, err) +} + +func TestRun(t *testing.T) { + chartPath := "testdata/charts/chart-with-schema" + client := NewPackage() + filename, err := client.Run(chartPath, nil) + require.NoError(t, err) + require.Equal(t, "empty-0.1.0.tgz", filename) + require.NoError(t, os.Remove(filename)) +} diff --git a/pkg/action/pull_test.go b/pkg/action/pull_test.go new file mode 100644 index 000000000..ba212973e --- /dev/null +++ b/pkg/action/pull_test.go @@ -0,0 +1,80 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "helm.sh/helm/v4/pkg/cli" + "helm.sh/helm/v4/pkg/registry" +) + +func TestNewPull(t *testing.T) { + config := actionConfigFixture(t) + client := NewPull(WithConfig(config)) + + assert.NotNil(t, client) + assert.Equal(t, config, client.cfg) +} + +func TestPullSetRegistryClient(t *testing.T) { + config := actionConfigFixture(t) + client := NewPull(WithConfig(config)) + + registryClient := ®istry.Client{} + client.SetRegistryClient(registryClient) + assert.Equal(t, registryClient, client.cfg.RegistryClient) +} + +func TestPullRun_ChartNotFound(t *testing.T) { + srv, err := startLocalServerForTests(t, nil) + if err != nil { + t.Fatal(err) + } + defer srv.Close() + + config := actionConfigFixture(t) + client := NewPull(WithConfig(config)) + client.Settings = cli.New() + client.RepoURL = srv.URL + + chartRef := "nginx" + _, err = client.Run(chartRef) + require.ErrorContains(t, err, "404 Not Found") +} + +func startLocalServerForTests(t *testing.T, handler http.Handler) (*httptest.Server, error) { + t.Helper() + if handler == nil { + fileBytes, err := os.ReadFile("../repo/v1/testdata/local-index.yaml") + if err != nil { + return nil, err + } + handler = http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + _, err = w.Write(fileBytes) + require.NoError(t, err) + }) + } + + return httptest.NewServer(handler), nil +} diff --git a/pkg/action/push_test.go b/pkg/action/push_test.go new file mode 100644 index 000000000..35c6f3efc --- /dev/null +++ b/pkg/action/push_test.go @@ -0,0 +1,66 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewPushWithPushConfig(t *testing.T) { + config := actionConfigFixture(t) + client := NewPushWithOpts(WithPushConfig(config)) + + assert.NotNil(t, client) + assert.Equal(t, config, client.cfg) +} + +func TestNewPushWithTLSClientConfig(t *testing.T) { + certFile := "certFile" + keyFile := "keyFile" + caFile := "caFile" + client := NewPushWithOpts(WithTLSClientConfig(certFile, keyFile, caFile)) + + assert.NotNil(t, client) + assert.Equal(t, certFile, client.certFile) + assert.Equal(t, keyFile, client.keyFile) + assert.Equal(t, caFile, client.caFile) +} + +func TestNewPushWithInsecureSkipTLSVerify(t *testing.T) { + client := NewPushWithOpts(WithInsecureSkipTLSVerify(true)) + + assert.NotNil(t, client) + assert.Equal(t, true, client.insecureSkipTLSVerify) +} + +func TestNewPushWithPlainHTTP(t *testing.T) { + client := NewPushWithOpts(WithPlainHTTP(true)) + + assert.NotNil(t, client) + assert.Equal(t, true, client.plainHTTP) +} + +func TestNewPushWithPushOptWriter(t *testing.T) { + buf := new(bytes.Buffer) + client := NewPushWithOpts(WithPushOptWriter(buf)) + + assert.NotNil(t, client) + assert.Equal(t, buf, client.out) +} diff --git a/pkg/action/registry_login_test.go b/pkg/action/registry_login_test.go new file mode 100644 index 000000000..de2450d9d --- /dev/null +++ b/pkg/action/registry_login_test.go @@ -0,0 +1,84 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewRegistryLogin(t *testing.T) { + config := actionConfigFixture(t) + client := NewRegistryLogin(config) + + assert.NotNil(t, client) + assert.Equal(t, config, client.cfg) +} + +func TestWithCertFile(t *testing.T) { + config := actionConfigFixture(t) + client := NewRegistryLogin(config) + + certFile := "testdata/cert.pem" + opt := WithCertFile(certFile) + + assert.Nil(t, opt(client)) + assert.Equal(t, certFile, client.certFile) +} + +func TestWithInsecure(t *testing.T) { + config := actionConfigFixture(t) + client := NewRegistryLogin(config) + + opt := WithInsecure(true) + + assert.Nil(t, opt(client)) + assert.Equal(t, true, client.insecure) +} + +func TestWithKeyFile(t *testing.T) { + config := actionConfigFixture(t) + client := NewRegistryLogin(config) + + keyFile := "testdata/key.pem" + opt := WithKeyFile(keyFile) + + assert.Nil(t, opt(client)) + assert.Equal(t, keyFile, client.keyFile) +} + +func TestWithCAFile(t *testing.T) { + config := actionConfigFixture(t) + client := NewRegistryLogin(config) + + caFile := "testdata/ca.pem" + opt := WithCAFile(caFile) + + assert.Nil(t, opt(client)) + assert.Equal(t, caFile, client.caFile) +} + +func TestWithPlainHTTPLogin(t *testing.T) { + config := actionConfigFixture(t) + client := NewRegistryLogin(config) + + opt := WithPlainHTTPLogin(true) + + assert.Nil(t, opt(client)) + assert.Equal(t, true, client.plainHTTP) +} diff --git a/pkg/action/registry_logout_test.go b/pkg/action/registry_logout_test.go new file mode 100644 index 000000000..669d9c9ba --- /dev/null +++ b/pkg/action/registry_logout_test.go @@ -0,0 +1,31 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewRegistryLogout(t *testing.T) { + config := actionConfigFixture(t) + client := NewRegistryLogout(config) + + assert.NotNil(t, client) + assert.Equal(t, config, client.cfg) +} diff --git a/pkg/action/release_testing.go b/pkg/action/release_testing.go index b649579f4..043a41236 100644 --- a/pkg/action/release_testing.go +++ b/pkg/action/release_testing.go @@ -41,8 +41,9 @@ const ( // // It provides the implementation of 'helm test'. type ReleaseTesting struct { - cfg *Configuration - Timeout time.Duration + cfg *Configuration + Timeout time.Duration + WaitOptions []kube.WaitOption // Used for fetching logs from test pods Namespace string Filters map[string][]string @@ -57,24 +58,24 @@ func NewReleaseTesting(cfg *Configuration) *ReleaseTesting { } // Run executes 'helm test' against the given release. -func (r *ReleaseTesting) Run(name string) (ri.Releaser, error) { +func (r *ReleaseTesting) Run(name string) (ri.Releaser, ExecuteShutdownFunc, error) { if err := r.cfg.KubeClient.IsReachable(); err != nil { - return nil, err + return nil, shutdownNoOp, err } if err := chartutil.ValidateReleaseName(name); err != nil { - return nil, fmt.Errorf("releaseTest: Release name is invalid: %s", name) + return nil, shutdownNoOp, fmt.Errorf("releaseTest: Release name is invalid: %s", name) } // finds the non-deleted release with the given name reli, err := r.cfg.Releases.Last(name) if err != nil { - return reli, err + return reli, shutdownNoOp, err } rel, err := releaserToV1Release(reli) if err != nil { - return rel, err + return reli, shutdownNoOp, err } skippedHooks := []*release.Hook{} @@ -102,14 +103,16 @@ func (r *ReleaseTesting) Run(name string) (ri.Releaser, error) { } serverSideApply := rel.ApplyMethod == string(release.ApplyMethodServerSideApply) - if err := r.cfg.execHook(rel, release.HookTest, kube.StatusWatcherStrategy, r.Timeout, serverSideApply); err != nil { + shutdown, err := r.cfg.execHookWithDelayedShutdown(rel, release.HookTest, kube.StatusWatcherStrategy, r.WaitOptions, r.Timeout, serverSideApply) + + if err != nil { rel.Hooks = append(skippedHooks, rel.Hooks...) - r.cfg.Releases.Update(rel) - return rel, err + r.cfg.Releases.Update(reli) + return reli, shutdown, err } rel.Hooks = append(skippedHooks, rel.Hooks...) - return rel, r.cfg.Releases.Update(rel) + return reli, shutdown, r.cfg.Releases.Update(reli) } // GetPodLogs will write the logs for all test pods in the given release into diff --git a/pkg/action/release_testing_test.go b/pkg/action/release_testing_test.go new file mode 100644 index 000000000..ab35e104a --- /dev/null +++ b/pkg/action/release_testing_test.go @@ -0,0 +1,119 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "bytes" + "context" + "errors" + "io" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "helm.sh/helm/v4/pkg/cli" + "helm.sh/helm/v4/pkg/kube" + kubefake "helm.sh/helm/v4/pkg/kube/fake" + release "helm.sh/helm/v4/pkg/release/v1" +) + +func TestNewReleaseTesting(t *testing.T) { + config := actionConfigFixture(t) + client := NewReleaseTesting(config) + + assert.NotNil(t, client) + assert.Equal(t, config, client.cfg) +} + +func TestReleaseTestingRun_UnreachableKubeClient(t *testing.T) { + config := actionConfigFixture(t) + failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil} + failingKubeClient.ConnectionError = errors.New("connection refused") + config.KubeClient = &failingKubeClient + + client := NewReleaseTesting(config) + result, _, err := client.Run("") + assert.Nil(t, result) + assert.Error(t, err) +} + +func TestReleaseTestingGetPodLogs_FilterEvents(t *testing.T) { + config := actionConfigFixture(t) + require.NoError(t, config.Init(cli.New().RESTClientGetter(), "", os.Getenv("HELM_DRIVER"))) + client := NewReleaseTesting(config) + client.Filters[ExcludeNameFilter] = []string{"event-1"} + client.Filters[IncludeNameFilter] = []string{"event-3"} + + hooks := []*release.Hook{ + { + Name: "event-1", + Events: []release.HookEvent{release.HookTest}, + }, + { + Name: "event-2", + Events: []release.HookEvent{release.HookTest}, + }, + } + + out := &bytes.Buffer{} + require.NoError(t, client.GetPodLogs(out, &release.Release{Hooks: hooks})) + + assert.Empty(t, out.String()) +} + +func TestReleaseTestingGetPodLogs_PodRetrievalError(t *testing.T) { + config := actionConfigFixture(t) + require.NoError(t, config.Init(cli.New().RESTClientGetter(), "", os.Getenv("HELM_DRIVER"))) + client := NewReleaseTesting(config) + + hooks := []*release.Hook{ + { + Name: "event-1", + Events: []release.HookEvent{release.HookTest}, + }, + } + + require.ErrorContains(t, client.GetPodLogs(&bytes.Buffer{}, &release.Release{Hooks: hooks}), "unable to get pod logs") +} + +func TestReleaseTesting_WaitOptionsPassedDownstream(t *testing.T) { + is := assert.New(t) + config := actionConfigFixture(t) + + // Create a release with a test hook + rel := releaseStub() + rel.Name = "wait-options-test-release" + rel.ApplyMethod = "csa" + require.NoError(t, config.Releases.Create(rel)) + + client := NewReleaseTesting(config) + + // Use WithWaitContext as a marker WaitOption that we can track + ctx := context.Background() + client.WaitOptions = []kube.WaitOption{kube.WithWaitContext(ctx)} + + // Access the underlying FailingKubeClient to check recorded options + failer := config.KubeClient.(*kubefake.FailingKubeClient) + + _, _, err := client.Run(rel.Name) + is.NoError(err) + + // Verify that WaitOptions were passed to GetWaiter + is.NotEmpty(failer.RecordedWaitOptions, "WaitOptions should be passed to GetWaiter") +} diff --git a/pkg/action/rollback.go b/pkg/action/rollback.go index cd160dfec..03150532e 100644 --- a/pkg/action/rollback.go +++ b/pkg/action/rollback.go @@ -18,8 +18,8 @@ package action import ( "bytes" + "errors" "fmt" - "strings" "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -28,6 +28,7 @@ import ( "helm.sh/helm/v4/pkg/kube" "helm.sh/helm/v4/pkg/release/common" release "helm.sh/helm/v4/pkg/release/v1" + "helm.sh/helm/v4/pkg/storage/driver" ) // Rollback is the action for rolling back to a given release. @@ -39,6 +40,7 @@ type Rollback struct { Version int Timeout time.Duration WaitStrategy kube.WaitStrategy + WaitOptions []kube.WaitOption WaitForJobs bool DisableHooks bool // DryRunStrategy can be set to prepare, but not execute the operation and whether or not to interact with the remote cluster @@ -209,7 +211,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas // pre-rollback hooks if !r.DisableHooks { - if err := r.cfg.execHook(targetRelease, release.HookPreRollback, r.WaitStrategy, r.Timeout, serverSideApply); err != nil { + if err := r.cfg.execHook(targetRelease, release.HookPreRollback, r.WaitStrategy, r.WaitOptions, r.Timeout, serverSideApply); err != nil { return targetRelease, err } } else { @@ -250,7 +252,12 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas return targetRelease, err } - waiter, err := r.cfg.KubeClient.GetWaiter(r.WaitStrategy) + var waiter kube.Waiter + if c, supportsOptions := r.cfg.KubeClient.(kube.InterfaceWaitOptions); supportsOptions { + waiter, err = c.GetWaiterWithOptions(r.WaitStrategy, r.WaitOptions...) + } else { + waiter, err = r.cfg.KubeClient.GetWaiter(r.WaitStrategy) + } if err != nil { return nil, fmt.Errorf("unable to get waiter: %w", err) } @@ -272,13 +279,13 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas // post-rollback hooks if !r.DisableHooks { - if err := r.cfg.execHook(targetRelease, release.HookPostRollback, r.WaitStrategy, r.Timeout, serverSideApply); err != nil { + if err := r.cfg.execHook(targetRelease, release.HookPostRollback, r.WaitStrategy, r.WaitOptions, r.Timeout, serverSideApply); err != nil { return targetRelease, err } } deployed, err := r.cfg.Releases.DeployedAll(currentRelease.Name) - if err != nil && !strings.Contains(err.Error(), "has no deployed releases") { + if err != nil && !errors.Is(err, driver.ErrNoDeployedReleases) { return nil, err } // Supersede all previous deployments, see issue #2941. diff --git a/pkg/action/rollback_test.go b/pkg/action/rollback_test.go new file mode 100644 index 000000000..deb6c7c80 --- /dev/null +++ b/pkg/action/rollback_test.go @@ -0,0 +1,85 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "context" + "errors" + "io" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "helm.sh/helm/v4/pkg/kube" + kubefake "helm.sh/helm/v4/pkg/kube/fake" +) + +func TestNewRollback(t *testing.T) { + config := actionConfigFixture(t) + client := NewRollback(config) + + assert.NotNil(t, client) + assert.Equal(t, config, client.cfg) +} + +func TestRollbackRun_UnreachableKubeClient(t *testing.T) { + config := actionConfigFixture(t) + failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil} + failingKubeClient.ConnectionError = errors.New("connection refused") + config.KubeClient = &failingKubeClient + + client := NewRollback(config) + assert.Error(t, client.Run("")) +} + +func TestRollback_WaitOptionsPassedDownstream(t *testing.T) { + is := assert.New(t) + config := actionConfigFixture(t) + + // Create a deployed release and a second version to roll back to + rel := releaseStub() + rel.Name = "wait-options-rollback" + rel.Info.Status = "deployed" + rel.ApplyMethod = "csa" + require.NoError(t, config.Releases.Create(rel)) + + rel2 := releaseStub() + rel2.Name = "wait-options-rollback" + rel2.Version = 2 + rel2.Info.Status = "deployed" + rel2.ApplyMethod = "csa" + require.NoError(t, config.Releases.Create(rel2)) + + client := NewRollback(config) + client.Version = 1 + client.WaitStrategy = kube.StatusWatcherStrategy + client.ServerSideApply = "auto" + + // Use WithWaitContext as a marker WaitOption that we can track + ctx := context.Background() + client.WaitOptions = []kube.WaitOption{kube.WithWaitContext(ctx)} + + // Access the underlying FailingKubeClient to check recorded options + failer := config.KubeClient.(*kubefake.FailingKubeClient) + + err := client.Run(rel.Name) + is.NoError(err) + + // Verify that WaitOptions were passed to GetWaiter + is.NotEmpty(failer.RecordedWaitOptions, "WaitOptions should be passed to GetWaiter") +} diff --git a/pkg/action/show_test.go b/pkg/action/show_test.go index f3b767fca..6e270ac6d 100644 --- a/pkg/action/show_test.go +++ b/pkg/action/show_test.go @@ -20,8 +20,11 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + "helm.sh/helm/v4/pkg/chart/common" chart "helm.sh/helm/v4/pkg/chart/v2" + "helm.sh/helm/v4/pkg/registry" ) func TestShow(t *testing.T) { @@ -168,3 +171,12 @@ bar t.Errorf("Expected\n%q\nGot\n%q\n", expect, output) } } + +func TestShowSetRegistryClient(t *testing.T) { + config := actionConfigFixture(t) + client := NewShow(ShowAll, config) + + registryClient := ®istry.Client{} + client.SetRegistryClient(registryClient) + assert.Equal(t, registryClient, client.registryClient) +} diff --git a/pkg/action/status_test.go b/pkg/action/status_test.go new file mode 100644 index 000000000..674715aff --- /dev/null +++ b/pkg/action/status_test.go @@ -0,0 +1,143 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "errors" + "io" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + kubefake "helm.sh/helm/v4/pkg/kube/fake" + rcommon "helm.sh/helm/v4/pkg/release/common" + release "helm.sh/helm/v4/pkg/release/v1" +) + +func TestNewStatus(t *testing.T) { + config := actionConfigFixture(t) + client := NewStatus(config) + + assert.NotNil(t, client) + assert.Equal(t, config, client.cfg) + assert.Equal(t, 0, client.Version) +} + +func TestStatusRun(t *testing.T) { + config := actionConfigFixture(t) + failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil} + failingKubeClient.BuildDummy = true + config.KubeClient = &failingKubeClient + client := NewStatus(config) + client.ShowResourcesTable = true + + releaseName := "test-release" + require.NoError(t, configureReleaseContent(config, releaseName)) + releaser, err := client.Run(releaseName) + require.NoError(t, err) + + result, err := releaserToV1Release(releaser) + require.NoError(t, err) + assert.Equal(t, releaseName, result.Name) + assert.Equal(t, 1, result.Version) +} + +func TestStatusRun_KubeClientNotReachable(t *testing.T) { + config := actionConfigFixture(t) + failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil} + failingKubeClient.ConnectionError = errors.New("connection refused") + config.KubeClient = &failingKubeClient + + client := NewStatus(config) + + result, err := client.Run("") + assert.Nil(t, result) + assert.Error(t, err) +} + +func TestStatusRun_KubeClientBuildTableError(t *testing.T) { + config := actionConfigFixture(t) + failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil} + failingKubeClient.BuildTableError = errors.New("build table error") + config.KubeClient = &failingKubeClient + + releaseName := "test-release" + require.NoError(t, configureReleaseContent(config, releaseName)) + + client := NewStatus(config) + client.ShowResourcesTable = true + + result, err := client.Run(releaseName) + + assert.Nil(t, result) + assert.ErrorContains(t, err, "build table error") +} + +func TestStatusRun_KubeClientBuildError(t *testing.T) { + config := actionConfigFixture(t) + failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil} + failingKubeClient.BuildError = errors.New("build error") + config.KubeClient = &failingKubeClient + + releaseName := "test-release" + require.NoError(t, configureReleaseContent(config, releaseName)) + + client := NewStatus(config) + client.ShowResourcesTable = false + + result, err := client.Run(releaseName) + assert.Nil(t, result) + assert.ErrorContains(t, err, "build error") +} + +func TestStatusRun_KubeClientGetError(t *testing.T) { + config := actionConfigFixture(t) + failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil} + failingKubeClient.BuildError = errors.New("get error") + config.KubeClient = &failingKubeClient + + releaseName := "test-release" + require.NoError(t, configureReleaseContent(config, releaseName)) + client := NewStatus(config) + + result, err := client.Run(releaseName) + assert.Nil(t, result) + assert.ErrorContains(t, err, "get error") +} + +func configureReleaseContent(cfg *Configuration, releaseName string) error { + rel := &release.Release{ + Name: releaseName, + Info: &release.Info{ + Status: rcommon.StatusDeployed, + }, + Manifest: testManifest, + Version: 1, + Namespace: "default", + } + + return cfg.Releases.Create(rel) +} + +const testManifest = ` +apiVersion: v1 +kind: Pod +metadata: + namespace: default + name: test-application +` diff --git a/pkg/action/uninstall.go b/pkg/action/uninstall.go index d5474490c..79156991c 100644 --- a/pkg/action/uninstall.go +++ b/pkg/action/uninstall.go @@ -45,6 +45,7 @@ type Uninstall struct { IgnoreNotFound bool KeepHistory bool WaitStrategy kube.WaitStrategy + WaitOptions []kube.WaitOption DeletionPropagation string Timeout time.Duration Description string @@ -63,7 +64,13 @@ func (u *Uninstall) Run(name string) (*releasei.UninstallReleaseResponse, error) return nil, err } - waiter, err := u.cfg.KubeClient.GetWaiter(u.WaitStrategy) + var waiter kube.Waiter + var err error + if c, supportsOptions := u.cfg.KubeClient.(kube.InterfaceWaitOptions); supportsOptions { + waiter, err = c.GetWaiterWithOptions(u.WaitStrategy, u.WaitOptions...) + } else { + waiter, err = u.cfg.KubeClient.GetWaiter(u.WaitStrategy) + } if err != nil { return nil, err } @@ -127,7 +134,7 @@ func (u *Uninstall) Run(name string) (*releasei.UninstallReleaseResponse, error) if !u.DisableHooks { serverSideApply := true - if err := u.cfg.execHook(rel, release.HookPreDelete, u.WaitStrategy, u.Timeout, serverSideApply); err != nil { + if err := u.cfg.execHook(rel, release.HookPreDelete, u.WaitStrategy, u.WaitOptions, u.Timeout, serverSideApply); err != nil { return res, err } } else { @@ -157,7 +164,7 @@ func (u *Uninstall) Run(name string) (*releasei.UninstallReleaseResponse, error) if !u.DisableHooks { serverSideApply := true - if err := u.cfg.execHook(rel, release.HookPostDelete, u.WaitStrategy, u.Timeout, serverSideApply); err != nil { + if err := u.cfg.execHook(rel, release.HookPostDelete, u.WaitStrategy, u.WaitOptions, u.Timeout, serverSideApply); err != nil { errs = append(errs, err) } } @@ -188,6 +195,25 @@ func (u *Uninstall) Run(name string) (*releasei.UninstallReleaseResponse, error) u.cfg.Logger().Debug("uninstall: Failed to store updated release", slog.Any("error", err)) } + // Supersede all previous deployments, see issue #12556 (which is a + // variation on #2941). + deployed, err := u.cfg.Releases.DeployedAll(name) + if err != nil && !errors.Is(err, driver.ErrNoDeployedReleases) { + return nil, err + } + for _, reli := range deployed { + rel, err := releaserToV1Release(reli) + if err != nil { + return nil, err + } + + u.cfg.Logger().Debug("superseding previous deployment", "version", rel.Version) + rel.Info.Status = common.StatusSuperseded + if err := u.cfg.Releases.Update(rel); err != nil { + u.cfg.Logger().Debug("uninstall: Failed to store updated release", slog.Any("error", err)) + } + } + if len(errs) > 0 { return res, fmt.Errorf("uninstallation completed with %d error(s): %w", len(errs), joinErrors(errs, "; ")) } diff --git a/pkg/action/uninstall_test.go b/pkg/action/uninstall_test.go index fba1e391f..b5a76d983 100644 --- a/pkg/action/uninstall_test.go +++ b/pkg/action/uninstall_test.go @@ -17,6 +17,7 @@ limitations under the License. package action import ( + "context" "errors" "fmt" "io" @@ -82,7 +83,7 @@ func TestUninstallRelease_deleteRelease(t *testing.T) { "password": "password" } }` - unAction.cfg.Releases.Create(rel) + require.NoError(t, unAction.cfg.Releases.Create(rel)) res, err := unAction.Run(rel.Name) is.NoError(err) expected := `These resources were kept due to the resource policy: @@ -112,7 +113,7 @@ func TestUninstallRelease_Wait(t *testing.T) { "password": "password" } }` - unAction.cfg.Releases.Create(rel) + require.NoError(t, unAction.cfg.Releases.Create(rel)) failer := unAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.WaitForDeleteError = fmt.Errorf("U timed out") unAction.cfg.KubeClient = failer @@ -146,7 +147,7 @@ func TestUninstallRelease_Cascade(t *testing.T) { "password": "password" } }` - unAction.cfg.Releases.Create(rel) + require.NoError(t, unAction.cfg.Releases.Create(rel)) failer := unAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.DeleteError = fmt.Errorf("Uninstall with cascade failed") failer.BuildDummy = true @@ -169,3 +170,40 @@ func TestUninstallRun_UnreachableKubeClient(t *testing.T) { assert.Nil(t, result) assert.ErrorContains(t, err, "connection refused") } + +func TestUninstall_WaitOptionsPassedDownstream(t *testing.T) { + is := assert.New(t) + + unAction := uninstallAction(t) + unAction.DisableHooks = true + unAction.DryRun = false + unAction.WaitStrategy = kube.StatusWatcherStrategy + + // Use WithWaitContext as a marker WaitOption that we can track + ctx := context.Background() + unAction.WaitOptions = []kube.WaitOption{kube.WithWaitContext(ctx)} + + rel := releaseStub() + rel.Name = "wait-options-uninstall" + rel.Manifest = `{ + "apiVersion": "v1", + "kind": "Secret", + "metadata": { + "name": "secret" + }, + "type": "Opaque", + "data": { + "password": "password" + } + }` + require.NoError(t, unAction.cfg.Releases.Create(rel)) + + // Access the underlying FailingKubeClient to check recorded options + failer := unAction.cfg.KubeClient.(*kubefake.FailingKubeClient) + + _, err := unAction.Run(rel.Name) + is.NoError(err) + + // Verify that WaitOptions were passed to GetWaiter + is.NotEmpty(failer.RecordedWaitOptions, "WaitOptions should be passed to GetWaiter") +} diff --git a/pkg/action/upgrade.go b/pkg/action/upgrade.go index 0172d8160..72f58d0d0 100644 --- a/pkg/action/upgrade.go +++ b/pkg/action/upgrade.go @@ -72,6 +72,8 @@ type Upgrade struct { Timeout time.Duration // WaitStrategy determines what type of waiting should be done WaitStrategy kube.WaitStrategy + // WaitOptions are additional options for waiting on resources + WaitOptions []kube.WaitOption // WaitForJobs determines whether the wait operation for the Jobs should be performed after the upgrade is requested. WaitForJobs bool // DisableHooks disables hook processing if set to true. @@ -465,7 +467,7 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele // pre-upgrade hooks if !u.DisableHooks { - if err := u.cfg.execHook(upgradedRelease, release.HookPreUpgrade, u.WaitStrategy, u.Timeout, serverSideApply); err != nil { + if err := u.cfg.execHook(upgradedRelease, release.HookPreUpgrade, u.WaitStrategy, u.WaitOptions, u.Timeout, serverSideApply); err != nil { u.reportToPerformUpgrade(c, upgradedRelease, kube.ResourceList{}, fmt.Errorf("pre-upgrade hooks failed: %s", err)) return } @@ -486,7 +488,12 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele return } - waiter, err := u.cfg.KubeClient.GetWaiter(u.WaitStrategy) + var waiter kube.Waiter + if c, supportsOptions := u.cfg.KubeClient.(kube.InterfaceWaitOptions); supportsOptions { + waiter, err = c.GetWaiterWithOptions(u.WaitStrategy, u.WaitOptions...) + } else { + waiter, err = u.cfg.KubeClient.GetWaiter(u.WaitStrategy) + } if err != nil { u.cfg.recordRelease(originalRelease) u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err) @@ -508,7 +515,7 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele // post-upgrade hooks if !u.DisableHooks { - if err := u.cfg.execHook(upgradedRelease, release.HookPostUpgrade, u.WaitStrategy, u.Timeout, serverSideApply); err != nil { + if err := u.cfg.execHook(upgradedRelease, release.HookPostUpgrade, u.WaitStrategy, u.WaitOptions, u.Timeout, serverSideApply); err != nil { u.reportToPerformUpgrade(c, upgradedRelease, results.Created, fmt.Errorf("post-upgrade hooks failed: %s", err)) return } @@ -528,7 +535,11 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, err error) (*release.Release, error) { msg := fmt.Sprintf("Upgrade %q failed: %s", rel.Name, err) - u.cfg.Logger().Warn("upgrade failed", "name", rel.Name, slog.Any("error", err)) + u.cfg.Logger().Warn( + "upgrade failed", + slog.String("name", rel.Name), + slog.Any("error", err), + ) rel.Info.Status = rcommon.StatusFailed rel.Info.Description = msg @@ -579,6 +590,7 @@ func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, e rollin := NewRollback(u.cfg) rollin.Version = filteredHistory[0].Version rollin.WaitStrategy = u.WaitStrategy + rollin.WaitOptions = u.WaitOptions rollin.WaitForJobs = u.WaitForJobs rollin.DisableHooks = u.DisableHooks rollin.ForceReplace = u.ForceReplace diff --git a/pkg/action/upgrade_test.go b/pkg/action/upgrade_test.go index f081690cf..694fb08b8 100644 --- a/pkg/action/upgrade_test.go +++ b/pkg/action/upgrade_test.go @@ -25,16 +25,19 @@ import ( "testing" "time" - chart "helm.sh/helm/v4/pkg/chart/v2" - "helm.sh/helm/v4/pkg/kube" - "helm.sh/helm/v4/pkg/storage/driver" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/cli-runtime/pkg/resource" + chart "helm.sh/helm/v4/pkg/chart/v2" + "helm.sh/helm/v4/pkg/kube" kubefake "helm.sh/helm/v4/pkg/kube/fake" + "helm.sh/helm/v4/pkg/registry" "helm.sh/helm/v4/pkg/release/common" release "helm.sh/helm/v4/pkg/release/v1" + "helm.sh/helm/v4/pkg/storage/driver" ) func upgradeAction(t *testing.T) *Upgrade { @@ -85,7 +88,7 @@ func TestUpgradeRelease_Wait(t *testing.T) { rel := releaseStub() rel.Name = "come-fail-away" rel.Info.Status = common.StatusDeployed - upAction.cfg.Releases.Create(rel) + require.NoError(t, upAction.cfg.Releases.Create(rel)) failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.WaitError = fmt.Errorf("I timed out") @@ -109,7 +112,7 @@ func TestUpgradeRelease_WaitForJobs(t *testing.T) { rel := releaseStub() rel.Name = "come-fail-away" rel.Info.Status = common.StatusDeployed - upAction.cfg.Releases.Create(rel) + require.NoError(t, upAction.cfg.Releases.Create(rel)) failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.WaitError = fmt.Errorf("I timed out") @@ -134,7 +137,7 @@ func TestUpgradeRelease_CleanupOnFail(t *testing.T) { rel := releaseStub() rel.Name = "come-fail-away" rel.Info.Status = common.StatusDeployed - upAction.cfg.Releases.Create(rel) + require.NoError(t, upAction.cfg.Releases.Create(rel)) failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.WaitError = fmt.Errorf("I timed out") @@ -163,7 +166,7 @@ func TestUpgradeRelease_RollbackOnFailure(t *testing.T) { rel := releaseStub() rel.Name = "nuketown" rel.Info.Status = common.StatusDeployed - upAction.cfg.Releases.Create(rel) + require.NoError(t, upAction.cfg.Releases.Create(rel)) failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient) // We can't make Update error because then the rollback won't work @@ -193,7 +196,7 @@ func TestUpgradeRelease_RollbackOnFailure(t *testing.T) { rel := releaseStub() rel.Name = "fallout" rel.Info.Status = common.StatusDeployed - upAction.cfg.Releases.Create(rel) + require.NoError(t, upAction.cfg.Releases.Create(rel)) failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.UpdateError = fmt.Errorf("update fail") @@ -401,12 +404,12 @@ func TestUpgradeRelease_Pending(t *testing.T) { rel := releaseStub() rel.Name = "come-fail-away" rel.Info.Status = common.StatusDeployed - upAction.cfg.Releases.Create(rel) + require.NoError(t, upAction.cfg.Releases.Create(rel)) rel2 := releaseStub() rel2.Name = "come-fail-away" rel2.Info.Status = common.StatusPendingUpgrade rel2.Version = 2 - upAction.cfg.Releases.Create(rel2) + require.NoError(t, upAction.cfg.Releases.Create(rel2)) vals := map[string]interface{}{} @@ -422,7 +425,7 @@ func TestUpgradeRelease_Interrupted_Wait(t *testing.T) { rel := releaseStub() rel.Name = "interrupted-release" rel.Info.Status = common.StatusDeployed - upAction.cfg.Releases.Create(rel) + require.NoError(t, upAction.cfg.Releases.Create(rel)) failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.WaitDuration = 10 * time.Second @@ -451,7 +454,7 @@ func TestUpgradeRelease_Interrupted_RollbackOnFailure(t *testing.T) { rel := releaseStub() rel.Name = "interrupted-release" rel.Info.Status = common.StatusDeployed - upAction.cfg.Releases.Create(rel) + require.NoError(t, upAction.cfg.Releases.Create(rel)) failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.WaitDuration = 5 * time.Second @@ -804,3 +807,47 @@ func TestUpgradeRun_UnreachableKubeClient(t *testing.T) { assert.Nil(t, result) assert.ErrorContains(t, err, "connection refused") } + +func TestUpgradeSetRegistryClient(t *testing.T) { + config := actionConfigFixture(t) + client := NewUpgrade(config) + + registryClient := ®istry.Client{} + client.SetRegistryClient(registryClient) + assert.Equal(t, registryClient, client.registryClient) +} + +func TestObjectKey(t *testing.T) { + obj := &appsv1.Deployment{} + obj.SetGroupVersionKind(schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "Deployment"}) + info := resource.Info{Name: "name", Namespace: "namespace", Object: obj} + + assert.Equal(t, "apps/v1/Deployment/namespace/name", objectKey(&info)) +} + +func TestUpgradeRelease_WaitOptionsPassedDownstream(t *testing.T) { + is := assert.New(t) + req := require.New(t) + + upAction := upgradeAction(t) + rel := releaseStub() + rel.Name = "wait-options-test" + rel.Info.Status = common.StatusDeployed + req.NoError(upAction.cfg.Releases.Create(rel)) + + upAction.WaitStrategy = kube.StatusWatcherStrategy + + // Use WithWaitContext as a marker WaitOption that we can track + ctx := context.Background() + upAction.WaitOptions = []kube.WaitOption{kube.WithWaitContext(ctx)} + + // Access the underlying FailingKubeClient to check recorded options + failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient) + + vals := map[string]interface{}{} + _, err := upAction.Run(rel.Name, buildChart(), vals) + req.NoError(err) + + // Verify that WaitOptions were passed to GetWaiter + is.NotEmpty(failer.RecordedWaitOptions, "WaitOptions should be passed to GetWaiter") +} diff --git a/pkg/action/verify_test.go b/pkg/action/verify_test.go new file mode 100644 index 000000000..343dacaef --- /dev/null +++ b/pkg/action/verify_test.go @@ -0,0 +1,48 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewVerify(t *testing.T) { + client := NewVerify() + + assert.NotNil(t, client) +} + +func TestVerifyRun(t *testing.T) { + client := NewVerify() + + client.Keyring = "../downloader/testdata/helm-test-key.pub" + output, err := client.Run("../downloader/testdata/signtest-0.1.0.tgz") + assert.Contains(t, output, "Signed by:") + assert.Contains(t, output, "Using Key With Fingerprint:") + assert.Contains(t, output, "Chart Hash Verified:") + require.NoError(t, err) +} + +func TestVerifyRun_DownloadError(t *testing.T) { + client := NewVerify() + output, err := client.Run("invalid-chart-path") + require.Error(t, err) + assert.Empty(t, output) +} diff --git a/pkg/chart/common.go b/pkg/chart/common.go index 8080f3dc8..cd87e91e7 100644 --- a/pkg/chart/common.go +++ b/pkg/chart/common.go @@ -186,7 +186,7 @@ func structToMap(obj interface{}) (map[string]interface{}, error) { objValue := reflect.ValueOf(obj) // If the value is a pointer, dereference it - if objValue.Kind() == reflect.Ptr { + if objValue.Kind() == reflect.Pointer { objValue = objValue.Elem() } @@ -209,7 +209,7 @@ func structToMap(obj interface{}) (map[string]interface{}, error) { return nil, err } result[field.Name] = nestedMap - case reflect.Ptr: + case reflect.Pointer: // Recurse for pointers by dereferencing if value.IsNil() { result[field.Name] = nil @@ -224,7 +224,7 @@ func structToMap(obj interface{}) (map[string]interface{}, error) { sliceOfMaps := make([]interface{}, value.Len()) for j := 0; j < value.Len(); j++ { sliceElement := value.Index(j) - if sliceElement.Kind() == reflect.Struct || sliceElement.Kind() == reflect.Ptr { + if sliceElement.Kind() == reflect.Struct || sliceElement.Kind() == reflect.Pointer { nestedMap, err := structToMap(sliceElement.Interface()) if err != nil { return nil, err diff --git a/pkg/chart/common/capabilities.go b/pkg/chart/common/capabilities.go index 355c3978a..18d00de90 100644 --- a/pkg/chart/common/capabilities.go +++ b/pkg/chart/common/capabilities.go @@ -19,7 +19,10 @@ import ( "fmt" "slices" "strconv" + "strings" + "testing" + "github.com/Masterminds/semver/v3" "k8s.io/client-go/kubernetes/scheme" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" @@ -29,25 +32,23 @@ import ( helmversion "helm.sh/helm/v4/internal/version" ) -var ( - // The Kubernetes version can be set by LDFLAGS. In order to do that the value - // must be a string. - k8sVersionMajor = "1" - k8sVersionMinor = "20" +const ( + kubeVersionMajorTesting = 1 + kubeVersionMinorTesting = 20 +) +var ( // DefaultVersionSet is the default version set, which includes only Core V1 ("v1"). DefaultVersionSet = allKnownVersions() - // DefaultCapabilities is the default set of capabilities. - DefaultCapabilities = &Capabilities{ - KubeVersion: KubeVersion{ - Version: fmt.Sprintf("v%s.%s.0", k8sVersionMajor, k8sVersionMinor), - Major: k8sVersionMajor, - Minor: k8sVersionMinor, - }, - APIVersions: DefaultVersionSet, - HelmVersion: helmversion.Get(), - } + DefaultCapabilities = func() *Capabilities { + caps, err := makeDefaultCapabilities() + if err != nil { + panic(fmt.Sprintf("failed to create default capabilities: %v", err)) + } + return caps + + }() ) // Capabilities describes the capabilities of the Kubernetes cluster. @@ -70,15 +71,22 @@ func (capabilities *Capabilities) Copy() *Capabilities { // KubeVersion is the Kubernetes version. type KubeVersion struct { - Version string // Kubernetes version - Major string // Kubernetes major version - Minor string // Kubernetes minor version + Version string // Full version (e.g., v1.33.4-gke.1245000) + normalizedVersion string // Normalized for constraint checking (e.g., v1.33.4) + Major string // Kubernetes major version + Minor string // Kubernetes minor version } -// String implements fmt.Stringer -func (kv *KubeVersion) String() string { return kv.Version } +// String implements fmt.Stringer. +// Returns the normalized version used for constraint checking. +func (kv *KubeVersion) String() string { + if kv.normalizedVersion != "" { + return kv.normalizedVersion + } + return kv.Version +} -// GitVersion returns the Kubernetes version string. +// GitVersion returns the full Kubernetes version string. // // Deprecated: use KubeVersion.Version. func (kv *KubeVersion) GitVersion() string { return kv.Version } @@ -91,10 +99,21 @@ func ParseKubeVersion(version string) (*KubeVersion, error) { if err != nil { return nil, err } + + // Preserve original input (e.g., v1.33.4-gke.1245000) + gitVersion := version + if !strings.HasPrefix(version, "v") { + gitVersion = "v" + version + } + + // Normalize for constraint checking (strips all suffixes) + normalizedVer := "v" + sv.String() + return &KubeVersion{ - Version: "v" + sv.String(), - Major: strconv.FormatUint(uint64(sv.Major()), 10), - Minor: strconv.FormatUint(uint64(sv.Minor()), 10), + Version: gitVersion, + normalizedVersion: normalizedVer, + Major: strconv.FormatUint(uint64(sv.Major()), 10), + Minor: strconv.FormatUint(uint64(sv.Minor()), 10), }, nil } @@ -122,3 +141,42 @@ func allKnownVersions() VersionSet { } return vs } + +func makeDefaultCapabilities() (*Capabilities, error) { + // Test builds don't include debug info / module info + // (And even if they did, we probably want stable capabilities for tests anyway) + // Return a default value for test builds + if testing.Testing() { + return newCapabilities(kubeVersionMajorTesting, kubeVersionMinorTesting) + } + + vstr, err := helmversion.K8sIOClientGoModVersion() + if err != nil { + return nil, fmt.Errorf("failed to retrieve k8s.io/client-go version: %w", err) + } + + v, err := semver.NewVersion(vstr) + if err != nil { + return nil, fmt.Errorf("unable to parse k8s.io/client-go version %q: %v", vstr, err) + } + + kubeVersionMajor := v.Major() + 1 + kubeVersionMinor := v.Minor() + + return newCapabilities(kubeVersionMajor, kubeVersionMinor) +} + +func newCapabilities(kubeVersionMajor, kubeVersionMinor uint64) (*Capabilities, error) { + + version := fmt.Sprintf("v%d.%d.0", kubeVersionMajor, kubeVersionMinor) + return &Capabilities{ + KubeVersion: KubeVersion{ + Version: version, + normalizedVersion: version, + Major: fmt.Sprintf("%d", kubeVersionMajor), + Minor: fmt.Sprintf("%d", kubeVersionMinor), + }, + APIVersions: DefaultVersionSet, + HelmVersion: helmversion.Get(), + }, nil +} diff --git a/pkg/chart/common/capabilities_test.go b/pkg/chart/common/capabilities_test.go index 716b903aa..b96d7d29b 100644 --- a/pkg/chart/common/capabilities_test.go +++ b/pkg/chart/common/capabilities_test.go @@ -41,7 +41,8 @@ func TestDefaultVersionSet(t *testing.T) { } func TestDefaultCapabilities(t *testing.T) { - kv := DefaultCapabilities.KubeVersion + caps := DefaultCapabilities + kv := caps.KubeVersion if kv.String() != "v1.20.0" { t.Errorf("Expected default KubeVersion.String() to be v1.20.0, got %q", kv.String()) } @@ -57,11 +58,8 @@ func TestDefaultCapabilities(t *testing.T) { if kv.Minor != "20" { t.Errorf("Expected default KubeVersion.Minor to be 20, got %q", kv.Minor) } -} - -func TestDefaultCapabilitiesHelmVersion(t *testing.T) { - hv := DefaultCapabilities.HelmVersion + hv := caps.HelmVersion if hv.Version != "v4.1" { t.Errorf("Expected default HelmVersion to be v4.1, got %q", hv.Version) } @@ -83,18 +81,41 @@ func TestParseKubeVersion(t *testing.T) { } } -func TestParseKubeVersionSuffix(t *testing.T) { - kv, err := ParseKubeVersion("v1.28+") - if err != nil { - t.Errorf("Expected v1.28+ to parse successfully") - } - if kv.Version != "v1.28" { - t.Errorf("Expected parsed KubeVersion.Version to be v1.28, got %q", kv.String()) +func TestParseKubeVersionWithVendorSuffixes(t *testing.T) { + tests := []struct { + name string + input string + wantVer string + wantString string + wantMajor string + wantMinor string + }{ + {"GKE vendor suffix", "v1.33.4-gke.1245000", "v1.33.4-gke.1245000", "v1.33.4", "1", "33"}, + {"GKE without v", "1.30.2-gke.1587003", "v1.30.2-gke.1587003", "v1.30.2", "1", "30"}, + {"EKS trailing +", "v1.28+", "v1.28+", "v1.28", "1", "28"}, + {"EKS + without v", "1.28+", "v1.28+", "v1.28", "1", "28"}, + {"Standard version", "v1.31.0", "v1.31.0", "v1.31.0", "1", "31"}, + {"Standard without v", "1.29.0", "v1.29.0", "v1.29.0", "1", "29"}, } - if kv.Major != "1" { - t.Errorf("Expected parsed KubeVersion.Major to be 1, got %q", kv.Major) - } - if kv.Minor != "28" { - t.Errorf("Expected parsed KubeVersion.Minor to be 28, got %q", kv.Minor) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + kv, err := ParseKubeVersion(tt.input) + if err != nil { + t.Fatalf("ParseKubeVersion() error = %v", err) + } + if kv.Version != tt.wantVer { + t.Errorf("Version = %q, want %q", kv.Version, tt.wantVer) + } + if kv.String() != tt.wantString { + t.Errorf("String() = %q, want %q", kv.String(), tt.wantString) + } + if kv.Major != tt.wantMajor { + t.Errorf("Major = %q, want %q", kv.Major, tt.wantMajor) + } + if kv.Minor != tt.wantMinor { + t.Errorf("Minor = %q, want %q", kv.Minor, tt.wantMinor) + } + }) } } diff --git a/pkg/chart/loader/archive/archive.go b/pkg/chart/loader/archive/archive.go index c6875db3f..e98f5c333 100644 --- a/pkg/chart/loader/archive/archive.go +++ b/pkg/chart/loader/archive/archive.go @@ -68,7 +68,7 @@ func LoadArchiveFiles(in io.Reader) ([]*BufferedFile, error) { for { b := bytes.NewBuffer(nil) hd, err := tr.Next() - if err == io.EOF { + if errors.Is(err, io.EOF) { break } if err != nil { diff --git a/pkg/chart/loader/load.go b/pkg/chart/loader/load.go index dbc5d3004..3fd381825 100644 --- a/pkg/chart/loader/load.go +++ b/pkg/chart/loader/load.go @@ -131,8 +131,8 @@ func LoadFile(name string) (chart.Charter, error) { files, err := archive.LoadArchiveFiles(raw) if err != nil { - if err == gzip.ErrHeader { - return nil, fmt.Errorf("file '%s' does not appear to be a valid chart file (details: %s)", name, err) + if errors.Is(err, gzip.ErrHeader) { + return nil, fmt.Errorf("file '%s' does not appear to be a valid chart file (details: %w)", name, err) } return nil, errors.New("unable to load chart archive") } @@ -163,7 +163,7 @@ func LoadArchive(in io.Reader) (chart.Charter, error) { files, err := archive.LoadArchiveFiles(in) if err != nil { - if err == gzip.ErrHeader { + if errors.Is(err, gzip.ErrHeader) { return nil, fmt.Errorf("stream does not appear to be a valid chart file (details: %w)", err) } return nil, fmt.Errorf("unable to load chart archive: %w", err) diff --git a/pkg/chart/v2/lint/lint_test.go b/pkg/chart/v2/lint/lint_test.go index 6f8f137f4..80dcef932 100644 --- a/pkg/chart/v2/lint/lint_test.go +++ b/pkg/chart/v2/lint/lint_test.go @@ -179,16 +179,6 @@ func TestHelmCreateChart(t *testing.T) { // // Resources like hpa and ingress, which are disabled by default in values.yaml are enabled here using the equivalent // of the `--set` flag. -// -// Note: This test requires the following ldflags to be set per the current Kubernetes version to avoid false-positive -// results. -// 1. -X helm.sh/helm/v4/pkg/lint/rules.k8sVersionMajor= -// 2. -X helm.sh/helm/v4/pkg/lint/rules.k8sVersionMinor= -// or directly use '$(LDFLAGS)' in Makefile. -// -// When run without ldflags, the test passes giving a false-positive result. This is because the variables -// `k8sVersionMajor` and `k8sVersionMinor` by default are set to an older version of Kubernetes, with which, there -// might not be the deprecation warning. func TestHelmCreateChart_CheckDeprecatedWarnings(t *testing.T) { createdChart, err := chartutil.Create("checkdeprecatedwarnings", t.TempDir()) if err != nil { diff --git a/pkg/chart/v2/lint/rules/crds.go b/pkg/chart/v2/lint/rules/crds.go index faef7dcf9..4bb4d370b 100644 --- a/pkg/chart/v2/lint/rules/crds.go +++ b/pkg/chart/v2/lint/rules/crds.go @@ -70,7 +70,7 @@ func Crds(linter *support.Linter) { var yamlStruct *k8sYamlStruct err := decoder.Decode(&yamlStruct) - if err == io.EOF { + if errors.Is(err, io.EOF) { break } diff --git a/pkg/chart/v2/lint/rules/deprecations.go b/pkg/chart/v2/lint/rules/deprecations.go index 6eba316bc..7d5245869 100644 --- a/pkg/chart/v2/lint/rules/deprecations.go +++ b/pkg/chart/v2/lint/rules/deprecations.go @@ -28,14 +28,6 @@ import ( kscheme "k8s.io/client-go/kubernetes/scheme" ) -var ( - // This should be set in the Makefile based on the version of client-go being imported. - // These constants will be overwritten with LDFLAGS. The version components must be - // strings in order for LDFLAGS to set them. - k8sVersionMajor = "1" - k8sVersionMinor = "20" -) - // deprecatedAPIError indicates than an API is deprecated in Kubernetes type deprecatedAPIError struct { Deprecated string @@ -56,12 +48,8 @@ func validateNoDeprecations(resource *k8sYamlStruct, kubeVersion *common.KubeVer return nil } - majorVersion := k8sVersionMajor - minorVersion := k8sVersionMinor - - if kubeVersion != nil { - majorVersion = kubeVersion.Major - minorVersion = kubeVersion.Minor + if kubeVersion == nil { + kubeVersion = &common.DefaultCapabilities.KubeVersion } runtimeObject, err := resourceToRuntimeObject(resource) @@ -73,16 +61,16 @@ func validateNoDeprecations(resource *k8sYamlStruct, kubeVersion *common.KubeVer return err } - major, err := strconv.Atoi(majorVersion) + kubeVersionMajor, err := strconv.Atoi(kubeVersion.Major) if err != nil { return err } - minor, err := strconv.Atoi(minorVersion) + kubeVersionMinor, err := strconv.Atoi(kubeVersion.Minor) if err != nil { return err } - if !deprecation.IsDeprecated(runtimeObject, major, minor) { + if !deprecation.IsDeprecated(runtimeObject, kubeVersionMajor, kubeVersionMinor) { return nil } gvk := fmt.Sprintf("%s %s", resource.APIVersion, resource.Kind) diff --git a/pkg/chart/v2/lint/rules/template.go b/pkg/chart/v2/lint/rules/template.go index 0c633dc1a..43665aa3a 100644 --- a/pkg/chart/v2/lint/rules/template.go +++ b/pkg/chart/v2/lint/rules/template.go @@ -180,7 +180,7 @@ func (t *templateLinter) Lint() { var yamlStruct *k8sYamlStruct err := decoder.Decode(&yamlStruct) - if err == io.EOF { + if errors.Is(err, io.EOF) { break } diff --git a/pkg/chart/v2/loader/archive.go b/pkg/chart/v2/loader/archive.go index f6ed0e84f..c6885e125 100644 --- a/pkg/chart/v2/loader/archive.go +++ b/pkg/chart/v2/loader/archive.go @@ -56,8 +56,8 @@ func LoadFile(name string) (*chart.Chart, error) { c, err := LoadArchive(raw) if err != nil { - if err == gzip.ErrHeader { - return nil, fmt.Errorf("file '%s' does not appear to be a valid chart file (details: %s)", name, err) + if errors.Is(err, gzip.ErrHeader) { + return nil, fmt.Errorf("file '%s' does not appear to be a valid chart file (details: %w)", name, err) } } return c, err diff --git a/pkg/chart/v2/loader/load.go b/pkg/chart/v2/loader/load.go index ba3a9b6bc..d466e247c 100644 --- a/pkg/chart/v2/loader/load.go +++ b/pkg/chart/v2/loader/load.go @@ -208,7 +208,7 @@ func LoadFiles(files []*archive.BufferedFile) (*chart.Chart, error) { // LoadValues loads values from a reader. // // The reader is expected to contain one or more YAML documents, the values of which are merged. -// And the values can be either a chart's default values or a user-supplied values. +// And the values can be either a chart's default values or user-supplied values. func LoadValues(data io.Reader) (map[string]interface{}, error) { values := map[string]interface{}{} reader := utilyaml.NewYAMLReader(bufio.NewReader(data)) @@ -216,7 +216,7 @@ func LoadValues(data io.Reader) (map[string]interface{}, error) { currentMap := map[string]interface{}{} raw, err := reader.Read() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { break } return nil, fmt.Errorf("error reading yaml document: %w", err) diff --git a/pkg/chart/v2/loader/load_test.go b/pkg/chart/v2/loader/load_test.go index ee0be5b18..397745dd6 100644 --- a/pkg/chart/v2/loader/load_test.go +++ b/pkg/chart/v2/loader/load_test.go @@ -20,6 +20,7 @@ import ( "archive/tar" "bytes" "compress/gzip" + "errors" "io" "log" "os" @@ -116,7 +117,7 @@ func TestBomTestData(t *testing.T) { tr := tar.NewReader(unzipped) for { file, err := tr.Next() - if err == io.EOF { + if errors.Is(err, io.EOF) { break } if err != nil { diff --git a/pkg/chart/v2/util/dependencies.go b/pkg/chart/v2/util/dependencies.go index 294b782f8..c7bb6621e 100644 --- a/pkg/chart/v2/util/dependencies.go +++ b/pkg/chart/v2/util/dependencies.go @@ -280,7 +280,11 @@ func processImportValues(c *chart.Chart, merge bool) error { // get child table vv, err := cvals.Table(r.Name + "." + child) if err != nil { - slog.Warn("ImportValues missing table from chart", "chart", r.Name, slog.Any("error", err)) + slog.Warn( + "ImportValues missing table from chart", + slog.String("chart", r.Name), + slog.Any("error", err), + ) continue } // create value map from child to be merged into parent diff --git a/pkg/chart/v2/util/save_test.go b/pkg/chart/v2/util/save_test.go index e317d1c09..6d4e2c8cd 100644 --- a/pkg/chart/v2/util/save_test.go +++ b/pkg/chart/v2/util/save_test.go @@ -21,6 +21,7 @@ import ( "bytes" "compress/gzip" "crypto/sha256" + "errors" "fmt" "io" "os" @@ -205,7 +206,7 @@ func retrieveAllHeadersFromTar(path string) ([]*tar.Header, error) { headers := []*tar.Header{} for { hd, err := tr.Next() - if err == io.EOF { + if errors.Is(err, io.EOF) { break } diff --git a/pkg/cli/environment.go b/pkg/cli/environment.go index 106d24336..5c19734aa 100644 --- a/pkg/cli/environment.go +++ b/pkg/cli/environment.go @@ -99,6 +99,7 @@ func New() *EnvSettings { env := &EnvSettings{ namespace: os.Getenv("HELM_NAMESPACE"), MaxHistory: envIntOr("HELM_MAX_HISTORY", defaultMaxHistory), + KubeConfig: os.Getenv("KUBECONFIG"), KubeContext: os.Getenv("HELM_KUBECONTEXT"), KubeToken: os.Getenv("HELM_KUBETOKEN"), KubeAsUser: os.Getenv("HELM_KUBEASUSER"), @@ -274,8 +275,10 @@ func (s *EnvSettings) EnvVars() map[string]string { // Namespace gets the namespace from the configuration func (s *EnvSettings) Namespace() string { - if ns, _, err := s.config.ToRawKubeConfigLoader().Namespace(); err == nil { - return ns + if s.config != nil { + if ns, _, err := s.config.ToRawKubeConfigLoader().Namespace(); err == nil { + return ns + } } if s.namespace != "" { return s.namespace diff --git a/pkg/cmd/flags.go b/pkg/cmd/flags.go index 939523ffd..251bfa032 100644 --- a/pkg/cmd/flags.go +++ b/pkg/cmd/flags.go @@ -59,7 +59,7 @@ func AddWaitFlag(cmd *cobra.Command, wait *kube.WaitStrategy) { cmd.Flags().Var( newWaitValue(kube.HookOnlyStrategy, wait), "wait", - "if specified, wait until resources are ready (up to --timeout). Values: 'watcher' (default), 'hookOnly', and 'legacy'.", + "if specified, wait until resources are ready (up to --timeout). Values: 'watcher', 'hookOnly', and 'legacy'.", ) // Sets the strategy to use the watcher strategy if `--wait` is used without an argument cmd.Flags().Lookup("wait").NoOptDefVal = string(kube.StatusWatcherStrategy) diff --git a/pkg/cmd/install.go b/pkg/cmd/install.go index e5d311394..d36cd9e34 100644 --- a/pkg/cmd/install.go +++ b/pkg/cmd/install.go @@ -275,7 +275,7 @@ func runInstall(args []string, client *action.Install, valueOpts *values.Options slog.Warn("this chart is deprecated") } - if req := ac.MetaDependencies(); req != nil { + if req := ac.MetaDependencies(); len(req) > 0 { // If CheckDependencies returns an error, we have unfulfilled dependencies. // As of Helm 2.4.0, this is treated as a stopping condition: // https://github.com/helm/helm/issues/2209 diff --git a/pkg/cmd/lint_test.go b/pkg/cmd/lint_test.go index 270273116..f825e36e2 100644 --- a/pkg/cmd/lint_test.go +++ b/pkg/cmd/lint_test.go @@ -76,7 +76,7 @@ func TestLintCmdWithKubeVersionFlag(t *testing.T) { golden: "output/lint-chart-with-deprecated-api-strict.txt", wantError: true, }, { - // the test builds will use the default k8sVersionMinor const in deprecations.go and capabilities.go + // the test builds will use the kubeVersionMinorTesting const in capabilities.go // which is "20" name: "lint chart with deprecated api version without kube version", cmd: fmt.Sprintf("lint %s", testChart), diff --git a/pkg/cmd/load_plugins.go b/pkg/cmd/load_plugins.go index ddda5fde6..7bdbf597d 100644 --- a/pkg/cmd/load_plugins.go +++ b/pkg/cmd/load_plugins.go @@ -132,7 +132,13 @@ func loadCLIPlugins(baseCmd *cobra.Command, out io.Writer) { DisableFlagParsing: true, } - // TODO: Make sure a command with this name does not already exist. + for _, cmd := range baseCmd.Commands() { + if cmd.Name() == c.Name() { + slog.Error("failed to load plugins: name conflicts", slog.String("name", c.Name())) + return + } + } + baseCmd.AddCommand(c) // For completion, we try to load more details about the plugins so as to allow for command and diff --git a/pkg/cmd/plugin_install.go b/pkg/cmd/plugin_install.go index a45c4a3de..efa9b466c 100644 --- a/pkg/cmd/plugin_install.go +++ b/pkg/cmd/plugin_install.go @@ -119,8 +119,6 @@ func (o *pluginInstallOptions) newInstallerForSource() (installer.Installer, err } func (o *pluginInstallOptions) run(out io.Writer) error { - installer.Debug = settings.Debug - i, err := o.newInstallerForSource() if err != nil { return err diff --git a/pkg/cmd/plugin_test.go b/pkg/cmd/plugin_test.go index 05cfe46f1..a250ba221 100644 --- a/pkg/cmd/plugin_test.go +++ b/pkg/cmd/plugin_test.go @@ -114,9 +114,9 @@ func TestLoadCLIPlugins(t *testing.T) { }{ {"args", "echo args", "This echos args", "-a -b -c\n", []string{"-a", "-b", "-c"}, 0}, {"echo", "echo stuff", "This echos stuff", "hello\n", []string{}, 0}, - {"env", "env stuff", "show the env", "HELM_PLUGIN_NAME=env\n", []string{}, 0}, {"exitwith", "exitwith code", "This exits with the specified exit code", "", []string{"2"}, 2}, {"fullenv", "show env vars", "show all env vars", fullEnvOutput, []string{}, 0}, + {"shortenv", "env stuff", "show the env", "HELM_PLUGIN_NAME=shortenv\n", []string{}, 0}, } pluginCmds := cmd.Commands() @@ -254,10 +254,6 @@ func TestLoadCLIPluginsForCompletion(t *testing.T) { tests := []staticCompletionDetails{ {"args", []string{}, []string{}, []staticCompletionDetails{}}, {"echo", []string{}, []string{}, []staticCompletionDetails{}}, - {"env", []string{}, []string{"global"}, []staticCompletionDetails{ - {"list", []string{}, []string{"a", "all", "log"}, []staticCompletionDetails{}}, - {"remove", []string{"all", "one"}, []string{}, []staticCompletionDetails{}}, - }}, {"exitwith", []string{}, []string{}, []staticCompletionDetails{ {"code", []string{}, []string{"a", "b"}, []staticCompletionDetails{}}, }}, @@ -268,6 +264,10 @@ func TestLoadCLIPluginsForCompletion(t *testing.T) { {"more", []string{"one", "two"}, []string{"b", "ball"}, []staticCompletionDetails{}}, }}, }}, + {"shortenv", []string{}, []string{"global"}, []staticCompletionDetails{ + {"list", []string{}, []string{"a", "all", "log"}, []staticCompletionDetails{}}, + {"remove", []string{"all", "one"}, []string{}, []staticCompletionDetails{}}, + }}, } checkCommand(t, cmd.Commands(), tests) } diff --git a/pkg/cmd/plugin_update.go b/pkg/cmd/plugin_update.go index c6d4b8530..6cc2729fc 100644 --- a/pkg/cmd/plugin_update.go +++ b/pkg/cmd/plugin_update.go @@ -61,7 +61,6 @@ func (o *pluginUpdateOptions) complete(args []string) error { } func (o *pluginUpdateOptions) run(out io.Writer) error { - installer.Debug = settings.Debug slog.Debug("loading installed plugins", "path", settings.PluginsDirectory) plugins, err := plugin.LoadAll(settings.PluginsDirectory) if err != nil { diff --git a/pkg/cmd/release_testing.go b/pkg/cmd/release_testing.go index 88a6f351f..5a6159e7d 100644 --- a/pkg/cmd/release_testing.go +++ b/pkg/cmd/release_testing.go @@ -55,7 +55,7 @@ func newReleaseTestCmd(cfg *action.Configuration, out io.Writer) *cobra.Command } return compListReleases(toComplete, args, cfg) }, - RunE: func(_ *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, args []string) (returnError error) { client.Namespace = settings.Namespace() notName := regexp.MustCompile(`^!\s?name=`) for _, f := range filter { @@ -65,7 +65,16 @@ func newReleaseTestCmd(cfg *action.Configuration, out io.Writer) *cobra.Command client.Filters[action.ExcludeNameFilter] = append(client.Filters[action.ExcludeNameFilter], notName.ReplaceAllLiteralString(f, "")) } } - reli, runErr := client.Run(args[0]) + + reli, shutdown, runErr := client.Run(args[0]) + defer func() { + if shutdownErr := shutdown(); shutdownErr != nil { + if returnError == nil { + returnError = shutdownErr + } + } + }() + // We only return an error if we weren't even able to get the // release, otherwise we keep going so we can print status and logs // if requested diff --git a/pkg/cmd/repo_list.go b/pkg/cmd/repo_list.go index 10b4442a0..450294948 100644 --- a/pkg/cmd/repo_list.go +++ b/pkg/cmd/repo_list.go @@ -30,6 +30,7 @@ import ( func newRepoListCmd(out io.Writer) *cobra.Command { var outfmt output.Format + var noHeaders bool cmd := &cobra.Command{ Use: "list", Aliases: []string{"ls"}, @@ -46,12 +47,17 @@ func newRepoListCmd(out io.Writer) *cobra.Command { return nil } - return outfmt.Write(out, &repoListWriter{f.Repositories}) + w := &repoListWriter{ + repos: f.Repositories, + noHeaders: noHeaders, + } + + return outfmt.Write(out, w) }, } + cmd.Flags().BoolVar(&noHeaders, "no-headers", false, "suppress headers in the output") bindOutputFlag(cmd, &outfmt) - return cmd } @@ -61,12 +67,15 @@ type repositoryElement struct { } type repoListWriter struct { - repos []*repo.Entry + repos []*repo.Entry + noHeaders bool } func (r *repoListWriter) WriteTable(out io.Writer) error { table := uitable.New() - table.AddRow("NAME", "URL") + if !r.noHeaders { + table.AddRow("NAME", "URL") + } for _, re := range r.repos { table.AddRow(re.Name, re.URL) } @@ -94,11 +103,11 @@ func (r *repoListWriter) encodeByFormat(out io.Writer, format output.Format) err return output.EncodeJSON(out, repolist) case output.YAML: return output.EncodeYAML(out, repolist) + default: + // Because this is a non-exported function and only called internally by + // WriteJSON and WriteYAML, we shouldn't get invalid types + return nil } - - // Because this is a non-exported function and only called internally by - // WriteJSON and WriteYAML, we shouldn't get invalid types - return nil } // Returns all repos from repos, except those with names matching ignoredRepoNames diff --git a/pkg/cmd/repo_list_test.go b/pkg/cmd/repo_list_test.go index 2f6a9e4ad..94cdf3969 100644 --- a/pkg/cmd/repo_list_test.go +++ b/pkg/cmd/repo_list_test.go @@ -48,6 +48,12 @@ func TestRepoList(t *testing.T) { golden: "output/repo-list.txt", wantError: false, }, + { + name: "list without headers", + cmd: fmt.Sprintf("repo list --repository-config %s --repository-cache %s --no-headers", repoFile2, rootDir), + golden: "output/repo-list-no-headers.txt", + wantError: false, + }, } runTestCmd(t, tests) diff --git a/pkg/cmd/search_hub.go b/pkg/cmd/search_hub.go index cfeeec59b..bb2ff6038 100644 --- a/pkg/cmd/search_hub.go +++ b/pkg/cmd/search_hub.go @@ -190,9 +190,10 @@ func (h *hubSearchWriter) encodeByFormat(out io.Writer, format output.Format) er return output.EncodeJSON(out, chartList) case output.YAML: return output.EncodeYAML(out, chartList) + default: + // Because this is a non-exported function and only called internally by + // WriteJSON and WriteYAML, we shouldn't get invalid types + return nil } - // Because this is a non-exported function and only called internally by - // WriteJSON and WriteYAML, we shouldn't get invalid types - return nil } diff --git a/pkg/cmd/search_repo.go b/pkg/cmd/search_repo.go index 07345a48f..febb138e2 100644 --- a/pkg/cmd/search_repo.go +++ b/pkg/cmd/search_repo.go @@ -190,7 +190,7 @@ func (o *searchRepoOptions) buildIndex() (*search.Index, error) { f := filepath.Join(o.repoCacheDir, helmpath.CacheIndexFile(n)) ind, err := repo.LoadIndexFile(f) if err != nil { - slog.Warn("repo is corrupt or missing", "repo", n, slog.Any("error", err)) + slog.Warn("repo is corrupt or missing", slog.String("repo", n), slog.Any("error", err)) continue } @@ -260,11 +260,11 @@ func (r *repoSearchWriter) encodeByFormat(out io.Writer, format output.Format) e return output.EncodeJSON(out, chartList) case output.YAML: return output.EncodeYAML(out, chartList) + default: + // Because this is a non-exported function and only called internally by + // WriteJSON and WriteYAML, we shouldn't get invalid types + return nil } - - // Because this is a non-exported function and only called internally by - // WriteJSON and WriteYAML, we shouldn't get invalid types - return nil } // Provides the list of charts that are part of the specified repo, and that starts with 'prefix'. diff --git a/pkg/cmd/template.go b/pkg/cmd/template.go index cf68c6c46..14f85042b 100644 --- a/pkg/cmd/template.go +++ b/pkg/cmd/template.go @@ -46,6 +46,16 @@ Render chart templates locally and display the output. Any values that would normally be looked up or retrieved in-cluster will be faked locally. Additionally, none of the server-side testing of chart validity (e.g. whether an API is supported) is done. + +To specify the Kubernetes API versions used for Capabilities.APIVersions, use +the '--api-versions' flag. This flag can be specified multiple times or as a +comma-separated list: + + $ helm template --api-versions networking.k8s.io/v1 --api-versions cert-manager.io/v1 mychart ./mychart + +or + + $ helm template --api-versions networking.k8s.io/v1,cert-manager.io/v1 mychart ./mychart ` func newTemplateCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/args/args.sh b/pkg/cmd/testdata/helmhome/helm/plugins/args/args.sh index 678b4eff5..6c62be8b9 100755 --- a/pkg/cmd/testdata/helmhome/helm/plugins/args/args.sh +++ b/pkg/cmd/testdata/helmhome/helm/plugins/args/args.sh @@ -1,2 +1,2 @@ -#!/bin/bash -echo $* +#!/usr/bin/env sh +echo "$@" diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/exitwith/exitwith.sh b/pkg/cmd/testdata/helmhome/helm/plugins/exitwith/exitwith.sh index ec8469657..9cf68da68 100755 --- a/pkg/cmd/testdata/helmhome/helm/plugins/exitwith/exitwith.sh +++ b/pkg/cmd/testdata/helmhome/helm/plugins/exitwith/exitwith.sh @@ -1,2 +1,2 @@ -#!/bin/bash -exit $* +#!/usr/bin/env sh +exit "$1" diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/env/completion.yaml b/pkg/cmd/testdata/helmhome/helm/plugins/shortenv/completion.yaml similarity index 89% rename from pkg/cmd/testdata/helmhome/helm/plugins/env/completion.yaml rename to pkg/cmd/testdata/helmhome/helm/plugins/shortenv/completion.yaml index e479a0503..027573ed4 100644 --- a/pkg/cmd/testdata/helmhome/helm/plugins/env/completion.yaml +++ b/pkg/cmd/testdata/helmhome/helm/plugins/shortenv/completion.yaml @@ -1,4 +1,4 @@ -name: env +name: shortenv commands: - name: list flags: diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/env/plugin-name.sh b/pkg/cmd/testdata/helmhome/helm/plugins/shortenv/plugin-name.sh similarity index 100% rename from pkg/cmd/testdata/helmhome/helm/plugins/env/plugin-name.sh rename to pkg/cmd/testdata/helmhome/helm/plugins/shortenv/plugin-name.sh diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/env/plugin.yaml b/pkg/cmd/testdata/helmhome/helm/plugins/shortenv/plugin.yaml similarity index 93% rename from pkg/cmd/testdata/helmhome/helm/plugins/env/plugin.yaml rename to pkg/cmd/testdata/helmhome/helm/plugins/shortenv/plugin.yaml index 78a0a23fb..5fe053ed0 100644 --- a/pkg/cmd/testdata/helmhome/helm/plugins/env/plugin.yaml +++ b/pkg/cmd/testdata/helmhome/helm/plugins/shortenv/plugin.yaml @@ -1,6 +1,6 @@ --- apiVersion: v1 -name: env +name: shortenv type: cli/v1 runtime: subprocess config: diff --git a/pkg/cmd/testdata/output/plugin_list_comp.txt b/pkg/cmd/testdata/output/plugin_list_comp.txt index 833efc5e9..1dff43551 100644 --- a/pkg/cmd/testdata/output/plugin_list_comp.txt +++ b/pkg/cmd/testdata/output/plugin_list_comp.txt @@ -1,7 +1,7 @@ args echo args echo echo stuff -env env stuff exitwith exitwith code fullenv show env vars +shortenv env stuff :4 Completion ended with directive: ShellCompDirectiveNoFileComp diff --git a/pkg/cmd/testdata/output/plugin_repeat_comp.txt b/pkg/cmd/testdata/output/plugin_repeat_comp.txt index 3fa05f0b3..b46c1b7d4 100644 --- a/pkg/cmd/testdata/output/plugin_repeat_comp.txt +++ b/pkg/cmd/testdata/output/plugin_repeat_comp.txt @@ -1,6 +1,6 @@ echo echo stuff -env env stuff exitwith exitwith code fullenv show env vars +shortenv env stuff :4 Completion ended with directive: ShellCompDirectiveNoFileComp diff --git a/pkg/cmd/testdata/output/repo-list-no-headers.txt b/pkg/cmd/testdata/output/repo-list-no-headers.txt new file mode 100644 index 000000000..13491aeb2 --- /dev/null +++ b/pkg/cmd/testdata/output/repo-list-no-headers.txt @@ -0,0 +1,3 @@ +charts https://charts.helm.sh/stable +firstexample http://firstexample.com +secondexample http://secondexample.com diff --git a/pkg/cmd/testdata/output/uninstall-keep-history-earlier-deployed.txt b/pkg/cmd/testdata/output/uninstall-keep-history-earlier-deployed.txt new file mode 100644 index 000000000..f5454b88d --- /dev/null +++ b/pkg/cmd/testdata/output/uninstall-keep-history-earlier-deployed.txt @@ -0,0 +1 @@ +release "aeneas" uninstalled diff --git a/pkg/cmd/testdata/output/version.txt b/pkg/cmd/testdata/output/version.txt index 44c5bf470..1f4cf4d4a 100644 --- a/pkg/cmd/testdata/output/version.txt +++ b/pkg/cmd/testdata/output/version.txt @@ -1 +1 @@ -version.BuildInfo{Version:"v4.1", GitCommit:"", GitTreeState:"", GoVersion:"", KubeClientVersion:"v."} +version.BuildInfo{Version:"v4.1", GitCommit:"", GitTreeState:"", GoVersion:"", KubeClientVersion:"v1.20"} diff --git a/pkg/cmd/uninstall_test.go b/pkg/cmd/uninstall_test.go index 1123f449b..ce436e68c 100644 --- a/pkg/cmd/uninstall_test.go +++ b/pkg/cmd/uninstall_test.go @@ -19,6 +19,7 @@ package cmd import ( "testing" + "helm.sh/helm/v4/pkg/release/common" release "helm.sh/helm/v4/pkg/release/v1" ) @@ -57,6 +58,15 @@ func TestUninstall(t *testing.T) { golden: "output/uninstall-keep-history.txt", rels: []*release.Release{release.Mock(&release.MockReleaseOptions{Name: "aeneas"})}, }, + { + name: "keep history with earlier deployed release", + cmd: "uninstall aeneas --keep-history", + golden: "output/uninstall-keep-history-earlier-deployed.txt", + rels: []*release.Release{ + release.Mock(&release.MockReleaseOptions{Name: "aeneas", Version: 1, Status: common.StatusDeployed}), + release.Mock(&release.MockReleaseOptions{Name: "aeneas", Version: 2, Status: common.StatusFailed}), + }, + }, { name: "wait", cmd: "uninstall aeneas --wait", diff --git a/pkg/cmd/upgrade.go b/pkg/cmd/upgrade.go index 92f130f60..918d6f5b8 100644 --- a/pkg/cmd/upgrade.go +++ b/pkg/cmd/upgrade.go @@ -153,6 +153,8 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { instClient.EnableDNS = client.EnableDNS instClient.HideSecret = client.HideSecret instClient.TakeOwnership = client.TakeOwnership + instClient.ForceConflicts = client.ForceConflicts + instClient.ServerSideApply = client.ServerSideApply != "false" if isReleaseUninstalled(versions) { instClient.Replace = true @@ -200,7 +202,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { if err != nil { return err } - if req := ac.MetaDependencies(); req != nil { + if req := ac.MetaDependencies(); len(req) > 0 { if err := action.CheckDependencies(ch, req); err != nil { err = fmt.Errorf("an error occurred while checking for chart dependencies. You may need to run `helm dependency build` to fetch missing dependencies: %w", err) if client.DependencyUpdate { diff --git a/pkg/cmd/upgrade_test.go b/pkg/cmd/upgrade_test.go index 8729be0ec..0ae1e3561 100644 --- a/pkg/cmd/upgrade_test.go +++ b/pkg/cmd/upgrade_test.go @@ -605,3 +605,58 @@ func TestUpgradeWithDryRun(t *testing.T) { t.Error("expected error when --hide-secret used without --dry-run") } } + +func TestUpgradeInstallServerSideApply(t *testing.T) { + _, _, chartPath := prepareMockRelease(t, "ssa-test") + + defer resetEnv()() + + tests := []struct { + name string + serverSideFlag string + expectedApplyMethod string + }{ + { + name: "upgrade --install with --server-side=false uses client-side apply", + serverSideFlag: "--server-side=false", + expectedApplyMethod: "csa", + }, + { + name: "upgrade --install with --server-side=true uses server-side apply", + serverSideFlag: "--server-side=true", + expectedApplyMethod: "ssa", + }, + { + name: "upgrade --install with --server-side=auto uses server-side apply (default for new install)", + serverSideFlag: "--server-side=auto", + expectedApplyMethod: "ssa", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + store := storageFixture() + releaseName := fmt.Sprintf("ssa-test-%s", tt.expectedApplyMethod) + + cmd := fmt.Sprintf("upgrade %s --install %s '%s'", releaseName, tt.serverSideFlag, chartPath) + _, _, err := executeActionCommandC(store, cmd) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + rel, err := store.Get(releaseName, 1) + if err != nil { + t.Fatalf("unexpected error getting release: %v", err) + } + + relV1, err := releaserToV1Release(rel) + if err != nil { + t.Fatalf("unexpected error converting release: %v", err) + } + + if relV1.ApplyMethod != tt.expectedApplyMethod { + t.Errorf("expected ApplyMethod %q, got %q", tt.expectedApplyMethod, relV1.ApplyMethod) + } + }) + } +} diff --git a/pkg/engine/lookup_func.go b/pkg/engine/lookup_func.go index 18ed2b63b..c6ad8d252 100644 --- a/pkg/engine/lookup_func.go +++ b/pkg/engine/lookup_func.go @@ -98,7 +98,11 @@ func getDynamicClientOnKind(apiversion string, kind string, config *rest.Config) gvk := schema.FromAPIVersionAndKind(apiversion, kind) apiRes, err := getAPIResourceForGVK(gvk, config) if err != nil { - slog.Error("unable to get apiresource", "groupVersionKind", gvk.String(), slog.Any("error", err)) + slog.Error( + "unable to get apiresource", + slog.String("groupVersionKind", gvk.String()), + slog.Any("error", err), + ) return nil, false, fmt.Errorf("unable to get apiresource from unstructured: %s: %w", gvk.String(), err) } gvr := schema.GroupVersionResource{ @@ -124,7 +128,11 @@ func getAPIResourceForGVK(gvk schema.GroupVersionKind, config *rest.Config) (met } resList, err := discoveryClient.ServerResourcesForGroupVersion(gvk.GroupVersion().String()) if err != nil { - slog.Error("unable to retrieve resource list", "GroupVersion", gvk.GroupVersion().String(), slog.Any("error", err)) + slog.Error( + "unable to retrieve resource list", + slog.String("GroupVersion", gvk.GroupVersion().String()), + slog.Any("error", err), + ) return res, err } for _, resource := range resList.APIResources { diff --git a/pkg/getter/plugingetter.go b/pkg/getter/plugingetter.go index d74611637..ef8b87503 100644 --- a/pkg/getter/plugingetter.go +++ b/pkg/getter/plugingetter.go @@ -38,12 +38,14 @@ func collectGetterPlugins(settings *cli.EnvSettings) (Providers, error) { if err != nil { return nil, err } + env := plugin.FormatEnv(settings.EnvVars()) pluginConstructorBuilder := func(plg plugin.Plugin) Constructor { return func(option ...Option) (Getter, error) { return &getterPlugin{ options: append([]Option{}, option...), plg: plg, + env: env, }, nil } } @@ -91,6 +93,7 @@ func convertOptions(globalOptions, options []Option) schema.GetterOptionsV1 { type getterPlugin struct { options []Option plg plugin.Plugin + env []string } func (g *getterPlugin) Get(href string, options ...Option) (*bytes.Buffer, error) { @@ -108,6 +111,7 @@ func (g *getterPlugin) Get(href string, options ...Option) (*bytes.Buffer, error Options: opts, Protocol: u.Scheme, }, + Env: g.env, // TODO should we pass Stdin, Stdout, and Stderr through Input here to getter plugins? // Stdout: os.Stdout, } diff --git a/pkg/getter/plugingetter_test.go b/pkg/getter/plugingetter_test.go index 8faaf7329..16af9eb31 100644 --- a/pkg/getter/plugingetter_test.go +++ b/pkg/getter/plugingetter_test.go @@ -144,3 +144,27 @@ func TestGetterPlugin(t *testing.T) { assert.Equal(t, "fake-plugin output", buf.String()) } + +func TestCollectGetterPluginsPassesEnv(t *testing.T) { + env := cli.New() + env.PluginsDirectory = pluginDir + env.Debug = true + + providers, err := collectGetterPlugins(env) + require.NoError(t, err) + require.NotEmpty(t, providers, "expected at least one plugin provider") + + getter, err := providers.ByScheme("test") + require.NoError(t, err) + + gp, ok := getter.(*getterPlugin) + require.True(t, ok, "expected getter to be a *getterPlugin") + + require.NotEmpty(t, gp.env, "expected env to be set on getterPlugin") + envMap := plugin.ParseEnv(gp.env) + + assert.Contains(t, envMap, "HELM_DEBUG", "expected HELM_DEBUG in env") + assert.Equal(t, "true", envMap["HELM_DEBUG"], "expected HELM_DEBUG to be true") + assert.Contains(t, envMap, "HELM_PLUGINS", "expected HELM_PLUGINS in env") + assert.Equal(t, pluginDir, envMap["HELM_PLUGINS"], "expected HELM_PLUGINS to match pluginsDirectory") +} diff --git a/pkg/ignore/rules.go b/pkg/ignore/rules.go index 3511c2d40..a8160da2a 100644 --- a/pkg/ignore/rules.go +++ b/pkg/ignore/rules.go @@ -176,7 +176,7 @@ func (r *Rules) parseRule(rule string) error { rule = after ok, err := filepath.Match(rule, n) if err != nil { - slog.Error("failed to compile", "rule", rule, slog.Any("error", err)) + slog.Error("failed to compile", slog.String("rule", rule), slog.Any("error", err)) return false } return ok @@ -186,7 +186,11 @@ func (r *Rules) parseRule(rule string) error { p.match = func(n string, _ os.FileInfo) bool { ok, err := filepath.Match(rule, n) if err != nil { - slog.Error("failed to compile", "rule", rule, slog.Any("error", err)) + slog.Error( + "failed to compile", + slog.String("rule", rule), + slog.Any("error", err), + ) return false } return ok @@ -198,7 +202,7 @@ func (r *Rules) parseRule(rule string) error { n = filepath.Base(n) ok, err := filepath.Match(rule, n) if err != nil { - slog.Error("failed to compile", "rule", rule, slog.Any("error", err)) + slog.Error("failed to compile", slog.String("rule", rule), slog.Any("error", err)) return false } return ok diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 47a226000..9af4bbcb3 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -87,6 +87,8 @@ type Client struct { // WaitContext is an optional context to use for wait operations. // If not set, a context will be created internally using the // timeout provided to the wait functions. + // + // Deprecated: Use WithWaitContext wait option when getting a Waiter instead. WaitContext context.Context Waiter @@ -139,7 +141,11 @@ func init() { } } -func (c *Client) newStatusWatcher() (*statusWaiter, error) { +func (c *Client) newStatusWatcher(opts ...WaitOption) (*statusWaiter, error) { + var o waitOptions + for _, opt := range opts { + opt(&o) + } cfg, err := c.Factory.ToRESTConfig() if err != nil { return nil, err @@ -156,14 +162,23 @@ func (c *Client) newStatusWatcher() (*statusWaiter, error) { if err != nil { return nil, err } + waitContext := o.ctx + if waitContext == nil { + waitContext = c.WaitContext + } return &statusWaiter{ restMapper: restMapper, client: dynamicClient, - ctx: c.WaitContext, + ctx: waitContext, + readers: o.statusReaders, }, nil } -func (c *Client) GetWaiter(strategy WaitStrategy) (Waiter, error) { +func (c *Client) GetWaiter(ws WaitStrategy) (Waiter, error) { + return c.GetWaiterWithOptions(ws) +} + +func (c *Client) GetWaiterWithOptions(strategy WaitStrategy, opts ...WaitOption) (Waiter, error) { switch strategy { case LegacyStrategy: kc, err := c.Factory.KubernetesClientSet() @@ -172,9 +187,9 @@ func (c *Client) GetWaiter(strategy WaitStrategy) (Waiter, error) { } return &legacyWaiter{kubeClient: kc, ctx: c.WaitContext}, nil case StatusWatcherStrategy: - return c.newStatusWatcher() + return c.newStatusWatcher(opts...) case HookOnlyStrategy: - sw, err := c.newStatusWatcher() + sw, err := c.newStatusWatcher(opts...) if err != nil { return nil, err } @@ -187,8 +202,12 @@ func (c *Client) GetWaiter(strategy WaitStrategy) (Waiter, error) { } func (c *Client) SetWaiter(ws WaitStrategy) error { + return c.SetWaiterWithOptions(ws) +} + +func (c *Client) SetWaiterWithOptions(ws WaitStrategy, opts ...WaitOption) error { var err error - c.Waiter, err = c.GetWaiter(ws) + c.Waiter, err = c.GetWaiterWithOptions(ws, opts...) if err != nil { return err } @@ -271,7 +290,7 @@ func ClientCreateOptionDryRun(dryRun bool) ClientCreateOption { } } -// ClientCreateOptionFieldValidationDirective specifies show API operations validate object's schema +// ClientCreateOptionFieldValidationDirective specifies how API operations validate object's schema // - For client-side apply: this is ignored // - For server-side apply: the directive is sent to the server to perform the validation // @@ -563,14 +582,44 @@ func (c *Client) update(originals, targets ResourceList, createApplyFunc CreateA } kind := target.Mapping.GroupVersionKind.Kind - c.Logger().Debug("created a new resource", "namespace", target.Namespace, "name", target.Name, "kind", kind) + c.Logger().Debug( + "created a new resource", + slog.String("namespace", target.Namespace), + slog.String("name", target.Name), + slog.String("kind", kind), + ) return nil } original := originals.Get(target) if original == nil { kind := target.Mapping.GroupVersionKind.Kind - return fmt.Errorf("original object %s with the name %q not found", kind, target.Name) + + slog.Warn("resource exists on cluster but not in original release, using cluster state as baseline", + "namespace", target.Namespace, "name", target.Name, "kind", kind) + + currentObj, err := helper.Get(target.Namespace, target.Name) + if err != nil { + return fmt.Errorf("original object %s with the name %q not found", kind, target.Name) + } + + // Create a temporary Info with the current cluster state to use as "original" + currentInfo := &resource.Info{ + Client: target.Client, + Mapping: target.Mapping, + Namespace: target.Namespace, + Name: target.Name, + Object: currentObj, + } + + if err := updateApplyFunc(currentInfo, target); err != nil { + updateErrors = append(updateErrors, err) + } + + // Because we check for errors later, append the info regardless + res.Updated = append(res.Updated, target) + + return nil } if err := updateApplyFunc(original, target); err != nil { @@ -594,21 +643,41 @@ func (c *Client) update(originals, targets ResourceList, createApplyFunc CreateA c.Logger().Debug("deleting resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind) if err := info.Get(); err != nil { - c.Logger().Debug("unable to get object", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err)) + c.Logger().Debug( + "unable to get object", + slog.String("namespace", info.Namespace), + slog.String("name", info.Name), + slog.String("kind", info.Mapping.GroupVersionKind.Kind), + slog.Any("error", err), + ) continue } annotations, err := metadataAccessor.Annotations(info.Object) if err != nil { - c.Logger().Debug("unable to get annotations", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err)) + c.Logger().Debug( + "unable to get annotations", + slog.String("namespace", info.Namespace), + slog.String("name", info.Name), + slog.String("kind", info.Mapping.GroupVersionKind.Kind), + slog.Any("error", err), + ) } if annotations != nil && annotations[ResourcePolicyAnno] == KeepPolicy { c.Logger().Debug("skipping delete due to annotation", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, "annotation", ResourcePolicyAnno, "value", KeepPolicy) continue } if err := deleteResource(info, metav1.DeletePropagationBackground); err != nil { - c.Logger().Debug("failed to delete resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err)) + c.Logger().Debug( + "failed to delete resource", + slog.String("namespace", info.Namespace), + slog.String("name", info.Name), + slog.String("kind", info.Mapping.GroupVersionKind.Kind), + slog.Any("error", err), + ) if !apierrors.IsNotFound(err) { - updateErrors = append(updateErrors, fmt.Errorf("failed to delete resource %s: %w", info.Name, err)) + updateErrors = append(updateErrors, fmt.Errorf( + "failed to delete resource namespace=%s, name=%s, kind=%s: %w", + info.Namespace, info.Name, info.Mapping.GroupVersionKind.Kind, err)) } continue } @@ -681,7 +750,7 @@ func ClientUpdateOptionDryRun(dryRun bool) ClientUpdateOption { } } -// ClientUpdateOptionFieldValidationDirective specifies show API operations validate object's schema +// ClientUpdateOptionFieldValidationDirective specifies how API operations validate object's schema // - For client-side apply: this is ignored // - For server-side apply: the directive is sent to the server to perform the validation // @@ -760,7 +829,13 @@ func (c *Client) Update(originals, targets ResourceList, options ...ClientUpdate slog.String("fieldValidationDirective", string(updateOptions.fieldValidationDirective))) return func(original, target *resource.Info) error { if err := replaceResource(target, updateOptions.fieldValidationDirective); err != nil { - c.Logger().Debug("error replacing the resource", "namespace", target.Namespace, "name", target.Name, "kind", target.Mapping.GroupVersionKind.Kind, slog.Any("error", err)) + c.Logger().With( + slog.String("namespace", target.Namespace), + slog.String("name", target.Name), + slog.String("gvk", target.Mapping.GroupVersionKind.String()), + ).Debug( + "error replacing the resource", slog.Any("error", err), + ) return err } @@ -829,7 +904,12 @@ func (c *Client) Delete(resources ResourceList, policy metav1.DeletionPropagatio err := deleteResource(target, policy) if err == nil || apierrors.IsNotFound(err) { if err != nil { - c.Logger().Debug("ignoring delete failure", "namespace", target.Namespace, "name", target.Name, "kind", target.Mapping.GroupVersionKind.Kind, slog.Any("error", err)) + c.Logger().Debug( + "ignoring delete failure", + slog.String("namespace", target.Namespace), + slog.String("name", target.Name), + slog.String("kind", target.Mapping.GroupVersionKind.Kind), + slog.Any("error", err)) } mtx.Lock() defer mtx.Unlock() diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index f3a797246..e257c66d2 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -28,6 +28,10 @@ import ( "testing" "time" + "github.com/fluxcd/cli-utils/pkg/kstatus/polling/engine" + "github.com/fluxcd/cli-utils/pkg/kstatus/polling/event" + "github.com/fluxcd/cli-utils/pkg/kstatus/status" + "github.com/fluxcd/cli-utils/pkg/object" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -411,7 +415,25 @@ func TestUpdate(t *testing.T) { "/namespaces/default/pods/forbidden:GET", "/namespaces/default/pods/forbidden:DELETE", ), - ExpectedError: "failed to delete resource forbidden:", + ExpectedError: "failed to delete resource namespace=default, name=forbidden, kind=Pod:", + }, + "rollback after failed upgrade with removed resource": { + // Simulates rollback scenario: + // - Revision 1 had "newpod" + // - Revision 2 removed "newpod" but upgrade failed (OriginalPods is empty) + // - Cluster still has "newpod" from Revision 1 + // - Rolling back to Revision 1 (TargetPods with "newpod") should succeed + OriginalPods: v1.PodList{}, // Revision 2 (failed) - resource was removed + TargetPods: newPodList("newpod"), // Revision 1 - rolling back to this + ThreeWayMergeForUnstructured: false, + ServerSideApply: true, + ExpectedActions: []string{ + "/namespaces/default/pods/newpod:GET", // Check if resource exists + "/namespaces/default/pods/newpod:GET", // Get current state (first call in update path) + "/namespaces/default/pods/newpod:GET", // Get current cluster state to use as baseline + "/namespaces/default/pods/newpod:PATCH", // Update using cluster state as baseline + }, + ExpectedError: "", }, } @@ -428,6 +450,10 @@ func TestUpdate(t *testing.T) { p, m := req.URL.Path, req.Method switch { + case p == "/namespaces/default/pods/newpod" && m == http.MethodGet: + return newResponse(http.StatusOK, &listTarget.Items[0]) + case p == "/namespaces/default/pods/newpod" && m == http.MethodPatch: + return newResponse(http.StatusOK, &listTarget.Items[0]) case p == "/namespaces/default/pods/starfish" && m == http.MethodGet: return newResponse(http.StatusOK, &listOriginal.Items[0]) case p == "/namespaces/default/pods/otter" && m == http.MethodGet: @@ -519,9 +545,23 @@ func TestUpdate(t *testing.T) { require.NoError(t, err) } - assert.Len(t, result.Created, 1, "expected 1 resource created, got %d", len(result.Created)) - assert.Len(t, result.Updated, 2, "expected 2 resource updated, got %d", len(result.Updated)) - assert.Len(t, result.Deleted, 1, "expected 1 resource deleted, got %d", len(result.Deleted)) + // Special handling for the rollback test case + if name == "rollback after failed upgrade with removed resource" { + assert.Len(t, result.Created, 0, "expected 0 resource created, got %d", len(result.Created)) + assert.Len(t, result.Updated, 1, "expected 1 resource updated, got %d", len(result.Updated)) + assert.Len(t, result.Deleted, 0, "expected 0 resource deleted, got %d", len(result.Deleted)) + } else { + assert.Len(t, result.Created, 1, "expected 1 resource created, got %d", len(result.Created)) + assert.Len(t, result.Updated, 2, "expected 2 resource updated, got %d", len(result.Updated)) + assert.Len(t, result.Deleted, 1, "expected 1 resource deleted, got %d", len(result.Deleted)) + } + + if tc.ExpectedError != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tc.ExpectedError) + } else { + require.NoError(t, err) + } actions := []string{} for _, action := range client.Actions { @@ -722,7 +762,7 @@ func TestWait(t *testing.T) { }), } var err error - c.Waiter, err = c.GetWaiter(LegacyStrategy) + c.Waiter, err = c.GetWaiterWithOptions(LegacyStrategy) if err != nil { t.Fatal(err) } @@ -783,7 +823,7 @@ func TestWaitJob(t *testing.T) { }), } var err error - c.Waiter, err = c.GetWaiter(LegacyStrategy) + c.Waiter, err = c.GetWaiterWithOptions(LegacyStrategy) if err != nil { t.Fatal(err) } @@ -845,7 +885,7 @@ func TestWaitDelete(t *testing.T) { }), } var err error - c.Waiter, err = c.GetWaiter(LegacyStrategy) + c.Waiter, err = c.GetWaiterWithOptions(LegacyStrategy) if err != nil { t.Fatal(err) } @@ -923,7 +963,7 @@ func TestGetPodList(t *testing.T) { responsePodList.Items = append(responsePodList.Items, newPodWithStatus(name, v1.PodStatus{}, namespace)) } - kubeClient := k8sfake.NewSimpleClientset(&responsePodList) + kubeClient := k8sfake.NewClientset(&responsePodList) c := Client{Namespace: namespace, kubeClient: kubeClient} podList, err := c.GetPodList(namespace, metav1.ListOptions{}) @@ -936,7 +976,7 @@ func TestOutputContainerLogsForPodList(t *testing.T) { namespace := "some-namespace" somePodList := newPodList("jimmy", "three", "structs") - kubeClient := k8sfake.NewSimpleClientset(&somePodList) + kubeClient := k8sfake.NewClientset(&somePodList) c := Client{Namespace: namespace, kubeClient: kubeClient} outBuffer := &bytes.Buffer{} outBufferFunc := func(_, _, _ string) io.Writer { return outBuffer } @@ -1314,7 +1354,7 @@ func TestIsReachable(t *testing.T) { setupClient: func(t *testing.T) *Client { t.Helper() client := newTestClient(t) - client.kubeClient = k8sfake.NewSimpleClientset() + client.kubeClient = k8sfake.NewClientset() return client }, expectError: false, @@ -1852,7 +1892,7 @@ func TestClientWaitContextCancellationLegacy(t *testing.T) { } var err error - c.Waiter, err = c.GetWaiter(LegacyStrategy) + c.Waiter, err = c.GetWaiterWithOptions(LegacyStrategy) require.NoError(t, err) resources, err := c.Build(objBody(&podList), false) @@ -1907,7 +1947,7 @@ func TestClientWaitWithJobsContextCancellationLegacy(t *testing.T) { } var err error - c.Waiter, err = c.GetWaiter(LegacyStrategy) + c.Waiter, err = c.GetWaiterWithOptions(LegacyStrategy) require.NoError(t, err) resources, err := c.Build(objBody(job), false) @@ -1968,7 +2008,7 @@ func TestClientWaitForDeleteContextCancellationLegacy(t *testing.T) { } var err error - c.Waiter, err = c.GetWaiter(LegacyStrategy) + c.Waiter, err = c.GetWaiterWithOptions(LegacyStrategy) require.NoError(t, err) resources, err := c.Build(objBody(&pod), false) @@ -2030,7 +2070,7 @@ func TestClientWaitContextNilDoesNotPanic(t *testing.T) { } var err error - c.Waiter, err = c.GetWaiter(LegacyStrategy) + c.Waiter, err = c.GetWaiterWithOptions(LegacyStrategy) require.NoError(t, err) resources, err := c.Build(objBody(&podList), false) @@ -2080,7 +2120,7 @@ func TestClientWaitContextPreCancelledLegacy(t *testing.T) { } var err error - c.Waiter, err = c.GetWaiter(LegacyStrategy) + c.Waiter, err = c.GetWaiterWithOptions(LegacyStrategy) require.NoError(t, err) resources, err := c.Build(objBody(&podList), false) @@ -2111,7 +2151,7 @@ metadata: namespace: default ` var err error - c.Waiter, err = c.GetWaiter(StatusWatcherStrategy) + c.Waiter, err = c.GetWaiterWithOptions(StatusWatcherStrategy) require.NoError(t, err) resources, err := c.Build(strings.NewReader(podManifest), false) @@ -2138,7 +2178,7 @@ metadata: namespace: default ` var err error - c.Waiter, err = c.GetWaiter(StatusWatcherStrategy) + c.Waiter, err = c.GetWaiterWithOptions(StatusWatcherStrategy) require.NoError(t, err) resources, err := c.Build(strings.NewReader(jobManifest), false) @@ -2170,7 +2210,7 @@ status: phase: Running ` var err error - c.Waiter, err = c.GetWaiter(StatusWatcherStrategy) + c.Waiter, err = c.GetWaiterWithOptions(StatusWatcherStrategy) require.NoError(t, err) resources, err := c.Build(strings.NewReader(podManifest), false) @@ -2182,3 +2222,100 @@ status: require.Error(t, err) assert.Contains(t, err.Error(), "context canceled", "expected context canceled error, got: %v", err) } + +// testStatusReader is a custom status reader for testing that returns a configurable status. +type testStatusReader struct { + supportedGK schema.GroupKind + status status.Status +} + +func (r *testStatusReader) Supports(gk schema.GroupKind) bool { + return gk == r.supportedGK +} + +func (r *testStatusReader) ReadStatus(_ context.Context, _ engine.ClusterReader, id object.ObjMetadata) (*event.ResourceStatus, error) { + return &event.ResourceStatus{ + Identifier: id, + Status: r.status, + Message: "test status reader", + }, nil +} + +func (r *testStatusReader) ReadStatusForObject(_ context.Context, _ engine.ClusterReader, u *unstructured.Unstructured) (*event.ResourceStatus, error) { + id := object.ObjMetadata{ + Namespace: u.GetNamespace(), + Name: u.GetName(), + GroupKind: u.GroupVersionKind().GroupKind(), + } + return &event.ResourceStatus{ + Identifier: id, + Status: r.status, + Message: "test status reader", + }, nil +} + +func TestClientStatusReadersPassedToStatusWaiter(t *testing.T) { + // This test verifies that Client.StatusReaders is correctly passed through + // to the statusWaiter when using the StatusWatcherStrategy. + // We use a custom status reader that immediately returns CurrentStatus for pods, + // which allows a pod without Ready condition to pass the wait. + podManifest := ` +apiVersion: v1 +kind: Pod +metadata: + name: test-pod + namespace: default +` + + c := newTestClient(t) + statusReaders := []engine.StatusReader{ + &testStatusReader{ + supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), + status: status.CurrentStatus, + }, + } + + var err error + c.Waiter, err = c.GetWaiterWithOptions(StatusWatcherStrategy, WithKStatusReaders(statusReaders...)) + require.NoError(t, err) + + resources, err := c.Build(strings.NewReader(podManifest), false) + require.NoError(t, err) + + // The pod has no Ready condition, but our custom reader returns CurrentStatus, + // so the wait should succeed immediately without timeout. + err = c.Wait(resources, time.Second*3) + require.NoError(t, err) +} + +func TestClientStatusReadersWithWaitWithJobs(t *testing.T) { + // This test verifies that Client.StatusReaders is correctly passed through + // to the statusWaiter when using WaitWithJobs. + jobManifest := ` +apiVersion: batch/v1 +kind: Job +metadata: + name: test-job + namespace: default +` + + c := newTestClient(t) + statusReaders := []engine.StatusReader{ + &testStatusReader{ + supportedGK: schema.GroupKind{Group: "batch", Kind: "Job"}, + status: status.CurrentStatus, + }, + } + + var err error + c.Waiter, err = c.GetWaiterWithOptions(StatusWatcherStrategy, WithKStatusReaders(statusReaders...)) + require.NoError(t, err) + + resources, err := c.Build(strings.NewReader(jobManifest), false) + require.NoError(t, err) + + // The job has no Complete condition, but our custom reader returns CurrentStatus, + // so the wait should succeed immediately without timeout. + err = c.WaitWithJobs(resources, time.Second*3) + require.NoError(t, err) +} diff --git a/pkg/kube/fake/failing_kube_client.go b/pkg/kube/fake/failing_kube_client.go index f340c045f..0f7787f79 100644 --- a/pkg/kube/fake/failing_kube_client.go +++ b/pkg/kube/fake/failing_kube_client.go @@ -47,6 +47,8 @@ type FailingKubeClient struct { WaitForDeleteError error WatchUntilReadyError error WaitDuration time.Duration + // RecordedWaitOptions stores the WaitOptions passed to GetWaiter for testing + RecordedWaitOptions []kube.WaitOption } var _ kube.Interface = &FailingKubeClient{} @@ -146,11 +148,20 @@ func (f *FailingKubeClient) BuildTable(r io.Reader, _ bool) (kube.ResourceList, if f.BuildTableError != nil { return []*resource.Info{}, f.BuildTableError } + if f.BuildDummy { + return createDummyResourceList(), nil + } return f.PrintingKubeClient.BuildTable(r, false) } func (f *FailingKubeClient) GetWaiter(ws kube.WaitStrategy) (kube.Waiter, error) { - waiter, _ := f.PrintingKubeClient.GetWaiter(ws) + return f.GetWaiterWithOptions(ws) +} + +func (f *FailingKubeClient) GetWaiterWithOptions(ws kube.WaitStrategy, opts ...kube.WaitOption) (kube.Waiter, error) { + // Record the WaitOptions for testing + f.RecordedWaitOptions = append(f.RecordedWaitOptions, opts...) + waiter, _ := f.PrintingKubeClient.GetWaiterWithOptions(ws, opts...) printingKubeWaiter, _ := waiter.(*PrintingKubeWaiter) return &FailingKubeWaiter{ PrintingKubeWaiter: printingKubeWaiter, diff --git a/pkg/kube/fake/printer.go b/pkg/kube/fake/printer.go index a7aad1dac..e3fa11576 100644 --- a/pkg/kube/fake/printer.go +++ b/pkg/kube/fake/printer.go @@ -148,7 +148,11 @@ func (p *PrintingKubeClient) DeleteWithPropagationPolicy(resources kube.Resource return &kube.Result{Deleted: resources}, nil } -func (p *PrintingKubeClient) GetWaiter(_ kube.WaitStrategy) (kube.Waiter, error) { +func (p *PrintingKubeClient) GetWaiter(ws kube.WaitStrategy) (kube.Waiter, error) { + return p.GetWaiterWithOptions(ws) +} + +func (p *PrintingKubeClient) GetWaiterWithOptions(_ kube.WaitStrategy, _ ...kube.WaitOption) (kube.Waiter, error) { return &PrintingKubeWaiter{Out: p.Out, LogOutput: p.LogOutput}, nil } diff --git a/pkg/kube/interface.go b/pkg/kube/interface.go index cc934ae1e..63c784751 100644 --- a/pkg/kube/interface.go +++ b/pkg/kube/interface.go @@ -56,7 +56,7 @@ type Interface interface { // IsReachable checks whether the client is able to connect to the cluster. IsReachable() error - // Get Waiter gets the Kube.Waiter + // GetWaiter gets the Kube.Waiter. GetWaiter(ws WaitStrategy) (Waiter, error) // GetPodList lists all pods that match the specified listOptions @@ -99,3 +99,14 @@ type Waiter interface { // error. WatchUntilReady(resources ResourceList, timeout time.Duration) error } + +// InterfaceWaitOptions defines an interface that extends Interface with +// methods that accept wait options. +// +// TODO Helm 5: Remove InterfaceWaitOptions and integrate its method(s) into the Interface. +type InterfaceWaitOptions interface { + // GetWaiter gets the Kube.Waiter with options. + GetWaiterWithOptions(ws WaitStrategy, opts ...WaitOption) (Waiter, error) +} + +var _ InterfaceWaitOptions = (*Client)(nil) diff --git a/pkg/kube/options.go b/pkg/kube/options.go new file mode 100644 index 000000000..49c6229ba --- /dev/null +++ b/pkg/kube/options.go @@ -0,0 +1,45 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube + +import ( + "context" + + "github.com/fluxcd/cli-utils/pkg/kstatus/polling/engine" +) + +// WaitOption is a function that configures an option for waiting on resources. +type WaitOption func(*waitOptions) + +// WithWaitContext sets the context for waiting on resources. +func WithWaitContext(ctx context.Context) WaitOption { + return func(wo *waitOptions) { + wo.ctx = ctx + } +} + +// WithKStatusReaders sets the status readers to be used while waiting on resources. +func WithKStatusReaders(readers ...engine.StatusReader) WaitOption { + return func(wo *waitOptions) { + wo.statusReaders = readers + } +} + +type waitOptions struct { + ctx context.Context + statusReaders []engine.StatusReader +} diff --git a/pkg/kube/ready.go b/pkg/kube/ready.go index 42e327bdd..bfa98504c 100644 --- a/pkg/kube/ready.go +++ b/pkg/kube/ready.go @@ -354,6 +354,8 @@ func (c *ReadyChecker) crdBetaReady(crd apiextv1beta1.CustomResourceDefinition) // continue. return true } + default: + // intentionally left empty } } return false @@ -374,6 +376,8 @@ func (c *ReadyChecker) crdReady(crd apiextv1.CustomResourceDefinition) bool { // continue. return true } + default: + // intentionally left empty } } return false diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index 6077af165..75c18de81 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -45,6 +45,7 @@ type statusWaiter struct { client dynamic.Interface restMapper meta.RESTMapper ctx context.Context + readers []engine.StatusReader } // DefaultStatusWatcherTimeout is the timeout used by the status waiter when a @@ -71,15 +72,13 @@ func (w *statusWaiter) WatchUntilReady(resourceList ResourceList, timeout time.D sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) jobSR := helmStatusReaders.NewCustomJobStatusReader(w.restMapper) podSR := helmStatusReaders.NewCustomPodStatusReader(w.restMapper) - // We don't want to wait on any other resources as watchUntilReady is only for Helm hooks + // We don't want to wait on any other resources as watchUntilReady is only for Helm hooks. + // If custom readers are defined they can be used as Helm hooks support any resource. + // We put them in front since the DelegatingStatusReader uses the first reader that matches. genericSR := statusreaders.NewGenericStatusReader(w.restMapper, alwaysReady) sr := &statusreaders.DelegatingStatusReader{ - StatusReaders: []engine.StatusReader{ - jobSR, - podSR, - genericSR, - }, + StatusReaders: append(w.readers, jobSR, podSR, genericSR), } sw.StatusReader = sr return w.wait(ctx, resourceList, sw) @@ -93,6 +92,7 @@ func (w *statusWaiter) Wait(resourceList ResourceList, timeout time.Duration) er defer cancel() slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) + sw.StatusReader = statusreaders.NewStatusReader(w.restMapper, w.readers...) return w.wait(ctx, resourceList, sw) } @@ -105,7 +105,9 @@ func (w *statusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Dura slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) newCustomJobStatusReader := helmStatusReaders.NewCustomJobStatusReader(w.restMapper) - customSR := statusreaders.NewStatusReader(w.restMapper, newCustomJobStatusReader) + readers := append([]engine.StatusReader(nil), w.readers...) + readers = append(readers, newCustomJobStatusReader) + customSR := statusreaders.NewStatusReader(w.restMapper, readers...) sw.StatusReader = customSR return w.wait(ctx, resourceList, sw) } @@ -132,7 +134,9 @@ func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceL } resources = append(resources, obj) } - eventCh := sw.Watch(cancelCtx, resources, watcher.Options{}) + eventCh := sw.Watch(cancelCtx, resources, watcher.Options{ + RESTScopeStrategy: watcher.RESTScopeNamespace, + }) statusCollector := collector.NewResourceStatusCollector(resources) done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.NotFoundStatus)) <-done @@ -175,7 +179,9 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, sw w resources = append(resources, obj) } - eventCh := sw.Watch(cancelCtx, resources, watcher.Options{}) + eventCh := sw.Watch(cancelCtx, resources, watcher.Options{ + RESTScopeStrategy: watcher.RESTScopeNamespace, + }) statusCollector := collector.NewResourceStatusCollector(resources) done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.CurrentStatus)) <-done diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index 4b06da896..a4e7ff62c 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -14,25 +14,36 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kube // import "helm.sh/helm/v3/pkg/kube" +package kube // import "helm.sh/helm/v4/pkg/kube" import ( + "context" "errors" + "fmt" + "strings" + "sync/atomic" "testing" "time" + "github.com/fluxcd/cli-utils/pkg/kstatus/polling/engine" + "github.com/fluxcd/cli-utils/pkg/kstatus/polling/event" + "github.com/fluxcd/cli-utils/pkg/kstatus/status" + "github.com/fluxcd/cli-utils/pkg/object" "github.com/fluxcd/cli-utils/pkg/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/apimachinery/pkg/watch" dynamicfake "k8s.io/client-go/dynamic/fake" + clienttesting "k8s.io/client-go/testing" "k8s.io/kubectl/pkg/scheme" ) @@ -153,6 +164,83 @@ spec: - containerPort: 80 ` +var podNamespace1Manifest = ` +apiVersion: v1 +kind: Pod +metadata: + name: pod-ns1 + namespace: namespace-1 +status: + conditions: + - type: Ready + status: "True" + phase: Running +` + +var podNamespace2Manifest = ` +apiVersion: v1 +kind: Pod +metadata: + name: pod-ns2 + namespace: namespace-2 +status: + conditions: + - type: Ready + status: "True" + phase: Running +` + +var podNamespace1NoStatusManifest = ` +apiVersion: v1 +kind: Pod +metadata: + name: pod-ns1 + namespace: namespace-1 +` + +var jobNamespace1CompleteManifest = ` +apiVersion: batch/v1 +kind: Job +metadata: + name: job-ns1 + namespace: namespace-1 + generation: 1 +status: + succeeded: 1 + active: 0 + conditions: + - type: Complete + status: "True" +` + +var podNamespace2SucceededManifest = ` +apiVersion: v1 +kind: Pod +metadata: + name: pod-ns2 + namespace: namespace-2 +status: + phase: Succeeded +` + +var clusterRoleManifest = ` +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: test-cluster-role +rules: +- apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list"] +` + +var namespaceManifest = ` +apiVersion: v1 +kind: Namespace +metadata: + name: test-namespace +` + func getGVR(t *testing.T, mapper meta.RESTMapper, obj *unstructured.Unstructured) schema.GroupVersionResource { t.Helper() gvk := obj.GroupVersionKind() @@ -232,11 +320,11 @@ func TestStatusWaitForDelete(t *testing.T) { for _, objToDelete := range objsToDelete { u := objToDelete.(*unstructured.Unstructured) gvr := getGVR(t, fakeMapper, u) - go func() { + go func(gvr schema.GroupVersionResource, u *unstructured.Unstructured) { time.Sleep(timeUntilPodDelete) err := fakeClient.Tracker().Delete(gvr, u.GetNamespace(), u.GetName()) assert.NoError(t, err) - }() + }(gvr, u) } resourceList := getResourceListFromRuntimeObjs(t, c, objsToCreate) err := statusWaiter.WaitForDelete(resourceList, timeout) @@ -448,3 +536,675 @@ func TestWatchForReady(t *testing.T) { }) } } + +func TestStatusWaitMultipleNamespaces(t *testing.T) { + t.Parallel() + tests := []struct { + name string + objManifests []string + expectErrs []error + testFunc func(statusWaiter, ResourceList, time.Duration) error + }{ + { + name: "pods in multiple namespaces", + objManifests: []string{podNamespace1Manifest, podNamespace2Manifest}, + testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + return sw.Wait(rl, timeout) + }, + }, + { + name: "hooks in multiple namespaces", + objManifests: []string{jobNamespace1CompleteManifest, podNamespace2SucceededManifest}, + testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + return sw.WatchUntilReady(rl, timeout) + }, + }, + { + name: "error when resource not ready in one namespace", + objManifests: []string{podNamespace1NoStatusManifest, podNamespace2Manifest}, + expectErrs: []error{errors.New("resource not ready, name: pod-ns1, kind: Pod, status: InProgress"), errors.New("context deadline exceeded")}, + testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + return sw.Wait(rl, timeout) + }, + }, + { + name: "delete resources in multiple namespaces", + objManifests: []string{podNamespace1Manifest, podNamespace2Manifest}, + testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + return sw.WaitForDelete(rl, timeout) + }, + }, + { + name: "cluster-scoped resources work correctly with unrestricted permissions", + objManifests: []string{podNamespace1Manifest, clusterRoleManifest}, + testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + return sw.Wait(rl, timeout) + }, + }, + { + name: "namespace-scoped and cluster-scoped resources work together", + objManifests: []string{podNamespace1Manifest, podNamespace2Manifest, clusterRoleManifest}, + testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + return sw.Wait(rl, timeout) + }, + }, + { + name: "delete cluster-scoped resources works correctly", + objManifests: []string{podNamespace1Manifest, namespaceManifest}, + testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + return sw.WaitForDelete(rl, timeout) + }, + }, + { + name: "watch cluster-scoped resources works correctly", + objManifests: []string{clusterRoleManifest}, + testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + return sw.WatchUntilReady(rl, timeout) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + batchv1.SchemeGroupVersion.WithKind("Job"), + schema.GroupVersion{Group: "rbac.authorization.k8s.io", Version: "v1"}.WithKind("ClusterRole"), + v1.SchemeGroupVersion.WithKind("Namespace"), + ) + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + } + objs := getRuntimeObjFromManifests(t, tt.objManifests) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + assert.NoError(t, err) + } + + if strings.Contains(tt.name, "delete") { + timeUntilDelete := time.Millisecond * 500 + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + go func(gvr schema.GroupVersionResource, u *unstructured.Unstructured) { + time.Sleep(timeUntilDelete) + err := fakeClient.Tracker().Delete(gvr, u.GetNamespace(), u.GetName()) + assert.NoError(t, err) + }(gvr, u) + } + } + + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + err := tt.testFunc(sw, resourceList, time.Second*3) + if tt.expectErrs != nil { + assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + return + } + assert.NoError(t, err) + }) + } +} + +// restrictedClientConfig holds the configuration for RBAC simulation on a fake dynamic client +type restrictedClientConfig struct { + allowedNamespaces map[string]bool + clusterScopedListAttempted bool +} + +// setupRestrictedClient configures a fake dynamic client to simulate RBAC restrictions +// by using PrependReactor and PrependWatchReactor to intercept list/watch operations. +func setupRestrictedClient(fakeClient *dynamicfake.FakeDynamicClient, allowedNamespaces []string) *restrictedClientConfig { + allowed := make(map[string]bool) + for _, ns := range allowedNamespaces { + allowed[ns] = true + } + config := &restrictedClientConfig{ + allowedNamespaces: allowed, + } + + // Intercept list operations + fakeClient.PrependReactor("list", "*", func(action clienttesting.Action) (bool, runtime.Object, error) { + listAction := action.(clienttesting.ListAction) + ns := listAction.GetNamespace() + if ns == "" { + // Cluster-scoped list + config.clusterScopedListAttempted = true + return true, nil, apierrors.NewForbidden( + action.GetResource().GroupResource(), + "", + fmt.Errorf("user does not have cluster-wide LIST permissions for cluster-scoped resources"), + ) + } + if !config.allowedNamespaces[ns] { + return true, nil, apierrors.NewForbidden( + action.GetResource().GroupResource(), + "", + fmt.Errorf("user does not have LIST permissions in namespace %q", ns), + ) + } + // Fall through to the default handler + return false, nil, nil + }) + + // Intercept watch operations + fakeClient.PrependWatchReactor("*", func(action clienttesting.Action) (bool, watch.Interface, error) { + watchAction := action.(clienttesting.WatchAction) + ns := watchAction.GetNamespace() + if ns == "" { + // Cluster-scoped watch + config.clusterScopedListAttempted = true + return true, nil, apierrors.NewForbidden( + action.GetResource().GroupResource(), + "", + fmt.Errorf("user does not have cluster-wide WATCH permissions for cluster-scoped resources"), + ) + } + if !config.allowedNamespaces[ns] { + return true, nil, apierrors.NewForbidden( + action.GetResource().GroupResource(), + "", + fmt.Errorf("user does not have WATCH permissions in namespace %q", ns), + ) + } + // Fall through to the default handler + return false, nil, nil + }) + + return config +} + +func TestStatusWaitRestrictedRBAC(t *testing.T) { + t.Parallel() + tests := []struct { + name string + objManifests []string + allowedNamespaces []string + expectErrs []error + testFunc func(statusWaiter, ResourceList, time.Duration) error + }{ + { + name: "pods in multiple namespaces with namespace permissions", + objManifests: []string{podNamespace1Manifest, podNamespace2Manifest}, + allowedNamespaces: []string{"namespace-1", "namespace-2"}, + testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + return sw.Wait(rl, timeout) + }, + }, + { + name: "delete pods in multiple namespaces with namespace permissions", + objManifests: []string{podNamespace1Manifest, podNamespace2Manifest}, + allowedNamespaces: []string{"namespace-1", "namespace-2"}, + testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + return sw.WaitForDelete(rl, timeout) + }, + }, + { + name: "hooks in multiple namespaces with namespace permissions", + objManifests: []string{jobNamespace1CompleteManifest, podNamespace2SucceededManifest}, + allowedNamespaces: []string{"namespace-1", "namespace-2"}, + testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + return sw.WatchUntilReady(rl, timeout) + }, + }, + { + name: "error when cluster-scoped resource included", + objManifests: []string{podNamespace1Manifest, clusterRoleManifest}, + allowedNamespaces: []string{"namespace-1"}, + expectErrs: []error{fmt.Errorf("user does not have cluster-wide LIST permissions for cluster-scoped resources")}, + testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + return sw.Wait(rl, timeout) + }, + }, + { + name: "error when deleting cluster-scoped resource", + objManifests: []string{podNamespace1Manifest, namespaceManifest}, + allowedNamespaces: []string{"namespace-1"}, + expectErrs: []error{fmt.Errorf("user does not have cluster-wide LIST permissions for cluster-scoped resources")}, + testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + return sw.WaitForDelete(rl, timeout) + }, + }, + { + name: "error when accessing disallowed namespace", + objManifests: []string{podNamespace1Manifest, podNamespace2Manifest}, + allowedNamespaces: []string{"namespace-1"}, + expectErrs: []error{fmt.Errorf("user does not have LIST permissions in namespace %q", "namespace-2")}, + testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + return sw.Wait(rl, timeout) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + baseFakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + batchv1.SchemeGroupVersion.WithKind("Job"), + schema.GroupVersion{Group: "rbac.authorization.k8s.io", Version: "v1"}.WithKind("ClusterRole"), + v1.SchemeGroupVersion.WithKind("Namespace"), + ) + restrictedConfig := setupRestrictedClient(baseFakeClient, tt.allowedNamespaces) + sw := statusWaiter{ + client: baseFakeClient, + restMapper: fakeMapper, + } + objs := getRuntimeObjFromManifests(t, tt.objManifests) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := baseFakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + assert.NoError(t, err) + } + + if strings.Contains(tt.name, "delet") { + timeUntilDelete := time.Millisecond * 500 + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + go func(gvr schema.GroupVersionResource, u *unstructured.Unstructured) { + time.Sleep(timeUntilDelete) + err := baseFakeClient.Tracker().Delete(gvr, u.GetNamespace(), u.GetName()) + assert.NoError(t, err) + }(gvr, u) + } + } + + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + err := tt.testFunc(sw, resourceList, time.Second*3) + if tt.expectErrs != nil { + require.Error(t, err) + for _, expectedErr := range tt.expectErrs { + assert.Contains(t, err.Error(), expectedErr.Error()) + } + return + } + assert.NoError(t, err) + assert.False(t, restrictedConfig.clusterScopedListAttempted) + }) + } +} + +func TestStatusWaitMixedResources(t *testing.T) { + t.Parallel() + tests := []struct { + name string + objManifests []string + allowedNamespaces []string + expectErrs []error + testFunc func(statusWaiter, ResourceList, time.Duration) error + }{ + { + name: "wait succeeds with namespace-scoped resources only", + objManifests: []string{podNamespace1Manifest, podNamespace2Manifest}, + allowedNamespaces: []string{"namespace-1", "namespace-2"}, + testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + return sw.Wait(rl, timeout) + }, + }, + { + name: "wait fails when cluster-scoped resource included", + objManifests: []string{podNamespace1Manifest, clusterRoleManifest}, + allowedNamespaces: []string{"namespace-1"}, + expectErrs: []error{fmt.Errorf("user does not have cluster-wide LIST permissions for cluster-scoped resources")}, + testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + return sw.Wait(rl, timeout) + }, + }, + { + name: "waitForDelete fails when cluster-scoped resource included", + objManifests: []string{podNamespace1Manifest, clusterRoleManifest}, + allowedNamespaces: []string{"namespace-1"}, + expectErrs: []error{fmt.Errorf("user does not have cluster-wide LIST permissions for cluster-scoped resources")}, + testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + return sw.WaitForDelete(rl, timeout) + }, + }, + { + name: "wait fails when namespace resource included", + objManifests: []string{podNamespace1Manifest, namespaceManifest}, + allowedNamespaces: []string{"namespace-1"}, + expectErrs: []error{fmt.Errorf("user does not have cluster-wide LIST permissions for cluster-scoped resources")}, + testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + return sw.Wait(rl, timeout) + }, + }, + { + name: "error when accessing disallowed namespace", + objManifests: []string{podNamespace1Manifest, podNamespace2Manifest}, + allowedNamespaces: []string{"namespace-1"}, + expectErrs: []error{fmt.Errorf("user does not have LIST permissions in namespace %q", "namespace-2")}, + testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + return sw.Wait(rl, timeout) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + baseFakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + batchv1.SchemeGroupVersion.WithKind("Job"), + schema.GroupVersion{Group: "rbac.authorization.k8s.io", Version: "v1"}.WithKind("ClusterRole"), + v1.SchemeGroupVersion.WithKind("Namespace"), + ) + restrictedConfig := setupRestrictedClient(baseFakeClient, tt.allowedNamespaces) + sw := statusWaiter{ + client: baseFakeClient, + restMapper: fakeMapper, + } + objs := getRuntimeObjFromManifests(t, tt.objManifests) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := baseFakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + assert.NoError(t, err) + } + + if strings.Contains(tt.name, "delet") { + timeUntilDelete := time.Millisecond * 500 + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + go func(gvr schema.GroupVersionResource, u *unstructured.Unstructured) { + time.Sleep(timeUntilDelete) + err := baseFakeClient.Tracker().Delete(gvr, u.GetNamespace(), u.GetName()) + assert.NoError(t, err) + }(gvr, u) + } + } + + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + err := tt.testFunc(sw, resourceList, time.Second*3) + if tt.expectErrs != nil { + require.Error(t, err) + for _, expectedErr := range tt.expectErrs { + assert.Contains(t, err.Error(), expectedErr.Error()) + } + return + } + assert.NoError(t, err) + assert.False(t, restrictedConfig.clusterScopedListAttempted) + }) + } +} + +// mockStatusReader is a custom status reader for testing that tracks when it's used +// and returns a configurable status for resources it supports. +type mockStatusReader struct { + supportedGK schema.GroupKind + status status.Status + callCount atomic.Int32 +} + +func (m *mockStatusReader) Supports(gk schema.GroupKind) bool { + return gk == m.supportedGK +} + +func (m *mockStatusReader) ReadStatus(_ context.Context, _ engine.ClusterReader, id object.ObjMetadata) (*event.ResourceStatus, error) { + m.callCount.Add(1) + return &event.ResourceStatus{ + Identifier: id, + Status: m.status, + Message: "mock status reader", + }, nil +} + +func (m *mockStatusReader) ReadStatusForObject(_ context.Context, _ engine.ClusterReader, u *unstructured.Unstructured) (*event.ResourceStatus, error) { + m.callCount.Add(1) + id := object.ObjMetadata{ + Namespace: u.GetNamespace(), + Name: u.GetName(), + GroupKind: u.GroupVersionKind().GroupKind(), + } + return &event.ResourceStatus{ + Identifier: id, + Status: m.status, + Message: "mock status reader", + }, nil +} + +func TestStatusWaitWithCustomReaders(t *testing.T) { + t.Parallel() + tests := []struct { + name string + objManifests []string + customReader *mockStatusReader + expectErrs []error + }{ + { + name: "custom reader makes pod immediately current", + objManifests: []string{podNoStatusManifest}, + customReader: &mockStatusReader{ + supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), + status: status.CurrentStatus, + }, + expectErrs: nil, + }, + { + name: "custom reader returns in-progress status", + objManifests: []string{podCurrentManifest}, + customReader: &mockStatusReader{ + supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), + status: status.InProgressStatus, + }, + expectErrs: []error{errors.New("resource not ready, name: current-pod, kind: Pod, status: InProgress"), errors.New("context deadline exceeded")}, + }, + { + name: "custom reader for different resource type is not used", + objManifests: []string{podCurrentManifest}, + customReader: &mockStatusReader{ + supportedGK: batchv1.SchemeGroupVersion.WithKind("Job").GroupKind(), + status: status.InProgressStatus, + }, + expectErrs: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + batchv1.SchemeGroupVersion.WithKind("Job"), + ) + statusWaiter := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + readers: []engine.StatusReader{tt.customReader}, + } + objs := getRuntimeObjFromManifests(t, tt.objManifests) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + assert.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + err := statusWaiter.Wait(resourceList, time.Second*3) + if tt.expectErrs != nil { + assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + return + } + assert.NoError(t, err) + }) + } +} + +func TestStatusWaitWithJobsAndCustomReaders(t *testing.T) { + t.Parallel() + tests := []struct { + name string + objManifests []string + customReader *mockStatusReader + expectErrs []error + }{ + { + name: "custom reader makes job immediately current", + objManifests: []string{jobNoStatusManifest}, + customReader: &mockStatusReader{ + supportedGK: batchv1.SchemeGroupVersion.WithKind("Job").GroupKind(), + status: status.CurrentStatus, + }, + expectErrs: nil, + }, + { + name: "custom reader for pod works with WaitWithJobs", + objManifests: []string{podNoStatusManifest}, + customReader: &mockStatusReader{ + supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), + status: status.CurrentStatus, + }, + expectErrs: nil, + }, + { + name: "built-in job reader is still appended after custom readers", + objManifests: []string{jobCompleteManifest}, + customReader: &mockStatusReader{ + supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), + status: status.CurrentStatus, + }, + expectErrs: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + batchv1.SchemeGroupVersion.WithKind("Job"), + ) + statusWaiter := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + readers: []engine.StatusReader{tt.customReader}, + } + objs := getRuntimeObjFromManifests(t, tt.objManifests) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + assert.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + err := statusWaiter.WaitWithJobs(resourceList, time.Second*3) + if tt.expectErrs != nil { + assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + return + } + assert.NoError(t, err) + }) + } +} + +func TestWatchUntilReadyWithCustomReaders(t *testing.T) { + t.Parallel() + tests := []struct { + name string + objManifests []string + customReader *mockStatusReader + expectErrs []error + }{ + { + name: "custom reader makes job immediately current for hooks", + objManifests: []string{jobNoStatusManifest}, + customReader: &mockStatusReader{ + supportedGK: batchv1.SchemeGroupVersion.WithKind("Job").GroupKind(), + status: status.CurrentStatus, + }, + expectErrs: nil, + }, + { + name: "custom reader makes pod immediately current for hooks", + objManifests: []string{podCurrentManifest}, + customReader: &mockStatusReader{ + supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), + status: status.CurrentStatus, + }, + expectErrs: nil, + }, + { + name: "custom reader takes precedence over built-in pod reader", + objManifests: []string{podCompleteManifest}, + customReader: &mockStatusReader{ + supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), + status: status.InProgressStatus, + }, + expectErrs: []error{errors.New("resource not ready, name: good-pod, kind: Pod, status: InProgress"), errors.New("context deadline exceeded")}, + }, + { + name: "custom reader takes precedence over built-in job reader", + objManifests: []string{jobCompleteManifest}, + customReader: &mockStatusReader{ + supportedGK: batchv1.SchemeGroupVersion.WithKind("Job").GroupKind(), + status: status.InProgressStatus, + }, + expectErrs: []error{errors.New("resource not ready, name: test, kind: Job, status: InProgress"), errors.New("context deadline exceeded")}, + }, + { + name: "custom reader for different resource type does not affect pods", + objManifests: []string{podCompleteManifest}, + customReader: &mockStatusReader{ + supportedGK: batchv1.SchemeGroupVersion.WithKind("Job").GroupKind(), + status: status.InProgressStatus, + }, + expectErrs: nil, + }, + { + name: "built-in readers still work when custom reader does not match", + objManifests: []string{jobCompleteManifest}, + customReader: &mockStatusReader{ + supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), + status: status.InProgressStatus, + }, + expectErrs: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + batchv1.SchemeGroupVersion.WithKind("Job"), + ) + statusWaiter := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + readers: []engine.StatusReader{tt.customReader}, + } + objs := getRuntimeObjFromManifests(t, tt.objManifests) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + assert.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + err := statusWaiter.WatchUntilReady(resourceList, time.Second*3) + if tt.expectErrs != nil { + assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + return + } + assert.NoError(t, err) + }) + } +} diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go index f776ae471..9a276a459 100644 --- a/pkg/kube/wait.go +++ b/pkg/kube/wait.go @@ -102,11 +102,20 @@ func (hw *legacyWaiter) isRetryableError(err error, resource *resource.Info) boo if err == nil { return false } - slog.Debug("error received when checking resource status", "resource", resource.Name, slog.Any("error", err)) + slog.Debug( + "error received when checking resource status", + slog.String("resource", resource.Name), + slog.Any("error", err), + ) if ev, ok := err.(*apierrors.StatusError); ok { statusCode := ev.Status().Code retryable := hw.isRetryableHTTPStatusCode(statusCode) - slog.Debug("status code received", "resource", resource.Name, "statusCode", statusCode, "retryable", retryable) + slog.Debug( + "status code received", + slog.String("resource", resource.Name), + slog.Int("statusCode", int(statusCode)), + slog.Bool("retryable", retryable), + ) return retryable } slog.Debug("retryable error assumed", "resource", resource.Name) @@ -137,9 +146,9 @@ func (hw *legacyWaiter) WaitForDelete(deleted ResourceList, timeout time.Duratio elapsed := time.Since(startTime).Round(time.Second) if err != nil { - slog.Debug("wait for resources failed", "elapsed", elapsed, slog.Any("error", err)) + slog.Debug("wait for resources failed", slog.Duration("elapsed", elapsed), slog.Any("error", err)) } else { - slog.Debug("wait for resources succeeded", "elapsed", elapsed) + slog.Debug("wait for resources succeeded", slog.Duration("elapsed", elapsed)) } return err @@ -324,6 +333,8 @@ func (hw *legacyWaiter) waitForPodSuccess(obj runtime.Object, name string) (bool slog.Debug("pod pending", "pod", o.Name) case corev1.PodRunning: slog.Debug("pod running", "pod", o.Name) + case corev1.PodUnknown: + slog.Debug("pod unknown", "pod", o.Name) } return false, nil diff --git a/pkg/repo/v1/chartrepo_test.go b/pkg/repo/v1/chartrepo_test.go index a707cf36d..353ab62d6 100644 --- a/pkg/repo/v1/chartrepo_test.go +++ b/pkg/repo/v1/chartrepo_test.go @@ -94,7 +94,7 @@ func TestIndexCustomSchemeDownload(t *testing.T) { } } -func TestConcurrenyDownloadIndex(t *testing.T) { +func TestConcurrencyDownloadIndex(t *testing.T) { srv, err := startLocalServerForTests(nil) if err != nil { t.Fatal(err) diff --git a/pkg/repo/v1/index.go b/pkg/repo/v1/index.go index d77d70a7f..7969d64e9 100644 --- a/pkg/repo/v1/index.go +++ b/pkg/repo/v1/index.go @@ -215,7 +215,9 @@ func (i IndexFile) Get(name, version string) (*ChartVersion, error) { } if constraint.Check(test) { - slog.Warn("unable to find exact version; falling back to closest available version", "chart", name, "requested", version, "selected", ver.Version) + if len(version) != 0 { + slog.Warn("unable to find exact version requested; falling back to closest available version", "chart", name, "requested", version, "selected", ver.Version) + } return ver, nil } } diff --git a/pkg/storage/driver/cfgmaps.go b/pkg/storage/driver/cfgmaps.go index 5af432d8a..f82ade5e9 100644 --- a/pkg/storage/driver/cfgmaps.go +++ b/pkg/storage/driver/cfgmaps.go @@ -75,13 +75,13 @@ func (cfgmaps *ConfigMaps) Get(key string) (release.Releaser, error) { return nil, ErrReleaseNotFound } - cfgmaps.Logger().Debug("failed to get release", "key", key, slog.Any("error", err)) + cfgmaps.Logger().Debug("failed to get release", slog.String("key", key), slog.Any("error", err)) return nil, err } // found the configmap, decode the base64 data string r, err := decodeRelease(obj.Data["release"]) if err != nil { - cfgmaps.Logger().Debug("failed to decode data", "key", key, slog.Any("error", err)) + cfgmaps.Logger().Debug("failed to decode data", slog.String("key", key), slog.Any("error", err)) return nil, err } r.Labels = filterSystemLabels(obj.Labels) @@ -109,7 +109,7 @@ func (cfgmaps *ConfigMaps) List(filter func(release.Releaser) bool) ([]release.R for _, item := range list.Items { rls, err := decodeRelease(item.Data["release"]) if err != nil { - cfgmaps.Logger().Debug("failed to decode release", "item", item, slog.Any("error", err)) + cfgmaps.Logger().Debug("failed to decode release", slog.Any("item", item), slog.Any("error", err)) continue } @@ -181,7 +181,7 @@ func (cfgmaps *ConfigMaps) Create(key string, rls release.Releaser) error { // create a new configmap to hold the release obj, err := newConfigMapsObject(key, rel, lbs) if err != nil { - cfgmaps.Logger().Debug("failed to encode release", "name", rac.Name(), slog.Any("error", err)) + cfgmaps.Logger().Debug("failed to encode release", slog.String("name", rac.Name()), slog.Any("error", err)) return err } // push the configmap object out into the kubiverse @@ -214,7 +214,11 @@ func (cfgmaps *ConfigMaps) Update(key string, rel release.Releaser) error { // create a new configmap object to hold the release obj, err := newConfigMapsObject(key, rls, lbs) if err != nil { - cfgmaps.Logger().Debug("failed to encode release", "name", rls.Name, slog.Any("error", err)) + cfgmaps.Logger().Debug( + "failed to encode release", + slog.String("name", rls.Name), + slog.Any("error", err), + ) return err } // push the configmap object out into the kubiverse diff --git a/pkg/storage/driver/secrets.go b/pkg/storage/driver/secrets.go index 85f3497e7..a73f3cf05 100644 --- a/pkg/storage/driver/secrets.go +++ b/pkg/storage/driver/secrets.go @@ -103,7 +103,10 @@ func (secrets *Secrets) List(filter func(release.Releaser) bool) ([]release.Rele for _, item := range list.Items { rls, err := decodeRelease(string(item.Data["release"])) if err != nil { - secrets.Logger().Debug("list failed to decode release", "key", item.Name, slog.Any("error", err)) + secrets.Logger().Debug( + "list failed to decode release", slog.String("key", item.Name), + slog.Any("error", err), + ) continue } @@ -142,7 +145,11 @@ func (secrets *Secrets) Query(labels map[string]string) ([]release.Releaser, err for _, item := range list.Items { rls, err := decodeRelease(string(item.Data["release"])) if err != nil { - secrets.Logger().Debug("failed to decode release", "key", item.Name, slog.Any("error", err)) + secrets.Logger().Debug( + "failed to decode release", + slog.String("key", item.Name), + slog.Any("error", err), + ) continue } rls.Labels = item.Labels diff --git a/pkg/storage/driver/sql.go b/pkg/storage/driver/sql.go index b6ea3916d..21d9f6679 100644 --- a/pkg/storage/driver/sql.go +++ b/pkg/storage/driver/sql.go @@ -319,18 +319,23 @@ func (s *SQL) Get(key string) (release.Releaser, error) { // Get will return an error if the result is empty if err := s.db.Get(&record, query, args...); err != nil { - s.Logger().Debug("got SQL error when getting release", "key", key, slog.Any("error", err)) + s.Logger().Debug("got SQL error when getting release", slog.String("key", key), slog.Any("error", err)) return nil, ErrReleaseNotFound } release, err := decodeRelease(record.Body) if err != nil { - s.Logger().Debug("failed to decode data", "key", key, slog.Any("error", err)) + s.Logger().Debug("failed to decode data", slog.String("key", key), slog.Any("error", err)) return nil, err } if release.Labels, err = s.getReleaseCustomLabels(key, s.namespace); err != nil { - s.Logger().Debug("failed to get release custom labels", "namespace", s.namespace, "key", key, slog.Any("error", err)) + s.Logger().Debug( + "failed to get release custom labels", + slog.String("namespace", s.namespace), + slog.String("key", key), + slog.Any("error", err), + ) return nil, err } @@ -365,12 +370,17 @@ func (s *SQL) List(filter func(release.Releaser) bool) ([]release.Releaser, erro for _, record := range records { release, err := decodeRelease(record.Body) if err != nil { - s.Logger().Debug("failed to decode release", "record", record, slog.Any("error", err)) + s.Logger().Debug("failed to decode release", slog.Any("record", record), slog.Any("error", err)) continue } if release.Labels, err = s.getReleaseCustomLabels(record.Key, record.Namespace); err != nil { - s.Logger().Debug("failed to get release custom labels", "namespace", record.Namespace, "key", record.Key, slog.Any("error", err)) + s.Logger().Debug( + "failed to get release custom labels", + slog.String("namespace", record.Namespace), + slog.String("key", record.Key), + slog.Any("error", err), + ) return nil, err } maps.Copy(release.Labels, getReleaseSystemLabels(release)) @@ -429,12 +439,17 @@ func (s *SQL) Query(labels map[string]string) ([]release.Releaser, error) { for _, record := range records { release, err := decodeRelease(record.Body) if err != nil { - s.Logger().Debug("failed to decode release", "record", record, slog.Any("error", err)) + s.Logger().Debug("failed to decode release", slog.Any("record", record), slog.Any("error", err)) continue } if release.Labels, err = s.getReleaseCustomLabels(record.Key, record.Namespace); err != nil { - s.Logger().Debug("failed to get release custom labels", "namespace", record.Namespace, "key", record.Key, slog.Any("error", err)) + s.Logger().Debug( + "failed to get release custom labels", + slog.String("namespace", record.Namespace), + slog.String("key", record.Key), + slog.Any("error", err), + ) return nil, err } @@ -518,11 +533,11 @@ func (s *SQL) Create(key string, rel release.Releaser) error { var record SQLReleaseWrapper if err := transaction.Get(&record, selectQuery, args...); err == nil { - s.Logger().Debug("release already exists", "key", key) + s.Logger().Debug("release already exists", slog.String("key", key)) return ErrReleaseExists } - s.Logger().Debug("failed to store release in SQL database", "key", key, slog.Any("error", err)) + s.Logger().Debug("failed to store release in SQL database", slog.String("key", key), slog.Any("error", err)) return err } @@ -596,7 +611,7 @@ func (s *SQL) Update(key string, rel release.Releaser) error { } if _, err := s.db.Exec(query, args...); err != nil { - s.Logger().Debug("failed to update release in SQL database", "key", key, slog.Any("error", err)) + s.Logger().Debug("failed to update release in SQL database", slog.String("key", key), slog.Any("error", err)) return err } @@ -625,13 +640,13 @@ func (s *SQL) Delete(key string) (release.Releaser, error) { var record SQLReleaseWrapper err = transaction.Get(&record, selectQuery, args...) if err != nil { - s.Logger().Debug("release not found", "key", key, slog.Any("error", err)) + s.Logger().Debug("release not found", slog.String("key", key), slog.Any("error", err)) return nil, ErrReleaseNotFound } release, err := decodeRelease(record.Body) if err != nil { - s.Logger().Debug("failed to decode release", "key", key, slog.Any("error", err)) + s.Logger().Debug("failed to decode release", slog.String("key", key), slog.Any("error", err)) transaction.Rollback() return nil, err } @@ -654,7 +669,11 @@ func (s *SQL) Delete(key string) (release.Releaser, error) { } if release.Labels, err = s.getReleaseCustomLabels(key, s.namespace); err != nil { - s.Logger().Debug("failed to get release custom labels", "namespace", s.namespace, "key", key, slog.Any("error", err)) + s.Logger().Debug( + "failed to get release custom labels", + slog.String("namespace", s.namespace), + slog.String("key", key), + slog.Any("error", err)) return nil, err } diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go index 07dc12c7b..d6c41635b 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/storage.go @@ -293,7 +293,7 @@ func (s *Storage) deleteReleaseVersion(name string, version int) error { key := makeKey(name, version) _, err := s.Delete(name, version) if err != nil { - s.Logger().Debug("error pruning release", "key", key, slog.Any("error", err)) + s.Logger().Debug("error pruning release", slog.String("key", key), slog.Any("error", err)) return err } return nil diff --git a/pkg/strvals/literal_parser.go b/pkg/strvals/literal_parser.go index d34e5e854..d5d4c25b4 100644 --- a/pkg/strvals/literal_parser.go +++ b/pkg/strvals/literal_parser.go @@ -17,6 +17,7 @@ package strvals import ( "bytes" + "errors" "fmt" "io" "strconv" @@ -66,7 +67,7 @@ func (t *literalParser) parse() error { if err == nil { continue } - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil } return err @@ -183,7 +184,7 @@ func (t *literalParser) listItem(list []interface{}, i, nestedNameLevel int) ([] case lastRune == '=': value, err := t.val() - if err != nil && err != io.EOF { + if err != nil && !errors.Is(err, io.EOF) { return list, err } return setIndex(list, i, string(value)) diff --git a/pkg/strvals/parser.go b/pkg/strvals/parser.go index 86e349f37..8eb761dce 100644 --- a/pkg/strvals/parser.go +++ b/pkg/strvals/parser.go @@ -161,7 +161,7 @@ func (t *parser) parse() error { if err == nil { continue } - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil } return err diff --git a/scripts/sync-repo.sh b/scripts/sync-repo.sh index 453102072..0651d3634 100755 --- a/scripts/sync-repo.sh +++ b/scripts/sync-repo.sh @@ -44,6 +44,7 @@ verify_prereqs() { } confirm() { + # shellcheck disable=SC2154 case $response in [yY][eE][sS]|[yY]) true @@ -66,11 +67,11 @@ main() { verify_prereqs # dry run of the command - gsutil rsync -d -n $1 gs://$2 + gsutil rsync -d -n "$1" gs://"$2" - read -p "Are you sure you would like to continue with these changes? [y/N]} " confirm + read -r -p "Are you sure you would like to continue with these changes? [y/N] " confirm if [[ $confirm =~ [yY](es)* ]]; then - gsutil rsync -d $1 gs://$2 + gsutil rsync -d "$1" gs://"$2" else error_exit "Discontinuing sync process." fi