diff --git a/.github/env b/.github/env index 9d79b174f..43ebdd36e 100644 --- a/.github/env +++ b/.github/env @@ -1,2 +1,2 @@ -GOLANG_VERSION=1.25 -GOLANGCI_LINT_VERSION=v2.5.0 +GOLANG_VERSION=1.26 +GOLANGCI_LINT_VERSION=v2.11.3 diff --git a/.github/workflows/build-test.yml b/.github/workflows/build-test.yml index 54fa3a078..32c524ed3 100644 --- a/.github/workflows/build-test.yml +++ b/.github/workflows/build-test.yml @@ -18,11 +18,11 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout source code - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # pin@v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # pin@v6.0.2 - name: Add variables to environment file run: cat ".github/env" >> "$GITHUB_ENV" - name: Setup Go - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # pin@6.1.0 + uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # pin@6.4.0 with: go-version: '${{ env.GOLANG_VERSION }}' check-latest: true diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 0f3fe6d8f..6714fc8de 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -24,14 +24,15 @@ on: schedule: - cron: '29 6 * * 6' -permissions: - contents: read - security-events: write +permissions: {} jobs: analyze: name: Analyze runs-on: ubuntu-latest + permissions: + contents: read + security-events: write strategy: fail-fast: false @@ -43,11 +44,11 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # pin@v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # pin@v6.0.2 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@e296a935590eb16afc0c0108289f68c87e2a89a5 # pinv4.30.7 + uses: github/codeql-action/init@e46ed2cbd01164d986452f91f178727624ae40d7 # pinv4.35.3 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -58,7 +59,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@e296a935590eb16afc0c0108289f68c87e2a89a5 # pinv4.30.7 + uses: github/codeql-action/autobuild@e46ed2cbd01164d986452f91f178727624ae40d7 # pinv4.35.3 # â„šī¸ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -72,4 +73,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@e296a935590eb16afc0c0108289f68c87e2a89a5 # pinv4.30.7 + uses: github/codeql-action/analyze@e46ed2cbd01164d986452f91f178727624ae40d7 # pinv4.35.3 diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 9d5723329..8b64a3be2 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -13,11 +13,11 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # pin@v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # pin@v6.0.2 - name: Add variables to environment file run: cat ".github/env" >> "$GITHUB_ENV" - name: Setup Go - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # pin@6.1.0 + uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # pin@6.4.0 with: go-version: '${{ env.GOLANG_VERSION }}' check-latest: true diff --git a/.github/workflows/govulncheck.yml b/.github/workflows/govulncheck.yml index e8f2560e3..6453ddd40 100644 --- a/.github/workflows/govulncheck.yml +++ b/.github/workflows/govulncheck.yml @@ -19,13 +19,13 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # pin@v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # pin@v6.0.2 with: persist-credentials: false - name: Add variables to environment file run: cat ".github/env" >> "$GITHUB_ENV" - name: Setup Go - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # pin@6.1.0 + uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # pin@6.4.0 with: go-version: '${{ env.GOLANG_VERSION }}' check-latest: true diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index cf8595742..ab8a4a509 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -20,7 +20,7 @@ jobs: runs-on: ubuntu-latest-16-cores steps: - name: Checkout source code - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # pin@v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # pin@v6.0.2 with: fetch-depth: 0 @@ -28,7 +28,7 @@ jobs: run: cat ".github/env" >> "$GITHUB_ENV" - name: Setup Go - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # pin@6.1.0 + uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # pin@6.4.0 with: go-version: '${{ env.GOLANG_VERSION }}' check-latest: true @@ -85,13 +85,15 @@ jobs: if: github.ref == 'refs/heads/main' && github.repository == 'helm/helm' steps: - name: Checkout source code - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # pin@v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # pin@v6.0.2 + with: + fetch-depth: 0 - name: Add variables to environment file run: cat ".github/env" >> "$GITHUB_ENV" - name: Setup Go - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # pin@6.1.0 + uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # pin@6.4.0 with: go-version: '${{ env.GOLANG_VERSION }}' check-latest: true diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 514a649cb..5d78ddd61 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -28,7 +28,7 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false @@ -55,7 +55,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 + uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 with: name: SARIF file path: results.sarif @@ -64,6 +64,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard (optional). # Commenting out will disable upload of results to your repo's Code Scanning dashboard - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@v4 + uses: github/codeql-action/upload-sarif@e46ed2cbd01164d986452f91f178727624ae40d7 # v4.35.3 with: sarif_file: results.sarif diff --git a/.github/workflows/stale.yaml b/.github/workflows/stale.yaml index 574427a5d..9b0c29952 100644 --- a/.github/workflows/stale.yaml +++ b/.github/workflows/stale.yaml @@ -3,11 +3,16 @@ on: schedule: - cron: "0 0 * * *" +permissions: {} + jobs: stale: runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write steps: - - uses: actions/stale@997185467fa4f803885201cee163a9f38240193d # v10.1.1 + - uses: actions/stale@b5d41d4e1d5dceea10e7104786b73624c18a190f # v10.2.0 with: repo-token: ${{ secrets.GITHUB_TOKEN }} stale-issue-message: 'This issue has been marked as stale because it has been open for 90 days with no activity. This thread will be automatically closed in 30 days if no further activity occurs.' diff --git a/.gitignore b/.gitignore index 0fd2c6bda..2209e9809 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,7 @@ .DS_Store .coverage/ .idea +.claude .vimrc .vscode/ .devcontainer/ diff --git a/.golangci.yml b/.golangci.yml index 7eca135e5..1ed3353b4 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -17,22 +17,27 @@ formatters: linters: default: none + # Keep sorted alphabetically enable: - depguard - dupl + - exhaustive - gomodguard - govet - ineffassign - misspell + - modernize - nakedret + - nolintlint + - perfsprint - revive - sloglint - staticcheck + - testifylint - thelper - unused - usestdlibvars - usetesting - - exhaustive exclusions: @@ -44,16 +49,9 @@ linters: - legacy - std-error-handling - rules: - # This rule is triggered for packages like 'util'. When changes to those packages - # occur it triggers this rule. This exclusion enables making changes to existing - # packages. - - linters: - - revive - text: 'var-naming: avoid meaningless package names' - warn-unused: true + # Keep sorted alphabetically settings: depguard: rules: @@ -67,6 +65,9 @@ linters: dupl: threshold: 400 + exhaustive: + default-signifies-exhaustive: true + gomodguard: blocked: modules: @@ -74,8 +75,44 @@ linters: recommendations: - github.com/evanphx/json-patch/v5 - exhaustive: - default-signifies-exhaustive: true + nolintlint: + require-specific: true + + revive: + enable-all-rules: false + enable-default-rules: true + max-open-files: 2048 + rules: + - name: unnecessary-format + - name: var-naming + arguments: + - ["ID"] # AllowList + - ["VM"] # DenyList + - - skip-initialism-name-checks: true + upper-case-const: true + skip-package-name-checks: true + skip-package-name-collision-with-go-std: true + extra-bad-package-names: + - helpers + - models + + testifylint: + disable: + - empty + - encoded-compare + - equal-values + - error-is-as + - error-nil + - expected-actual + - float-compare + - go-require + - len + - nil-compare + - require-error + - suite-dont-use-pkg + - suite-extra-assert-call + # Intentionally enable all testifylint rules so new checks are adopted automatically. + enable-all: true run: timeout: 10m diff --git a/.goreleaser.yaml b/.goreleaser.yaml new file mode 100644 index 000000000..ab2089882 --- /dev/null +++ b/.goreleaser.yaml @@ -0,0 +1,74 @@ +version: 2 + +project_name: helm + +dist: _dist +builds: + - env: + - CGO_ENABLED=0 + goos: + - linux + - windows + - darwin + goarch: + - amd64 + - arm64 + - arm + - "386" + - ppc64le + - s390x + - riscv64 + - loong64 + goamd64: + - v1 + goarm: + - "7" + goarm64: + - v8.0 + go386: + - sse2 + goriscv64: + - rva20u64 + ignore: + - goos: darwin + goarch: "386" + - goos: darwin + goarch: arm + - goos: darwin + goarch: ppc64le + - goos: darwin + goarch: s390x + - goos: darwin + goarch: riscv64 + - goos: darwin + goarch: loong64 + - goos: windows + goarch: "386" + - goos: windows + goarch: arm + - goos: windows + goarch: ppc64le + - goos: windows + goarch: s390x + - goos: windows + goarch: riscv64 + - goos: windows + goarch: loong64 + main: ./cmd/helm + no_unique_dist_dir: true + binary: "{{ .Os }}-{{ .Arch }}/helm" + ldflags: + - "{{ .Env.LDFLAGS }}" + flags: + - -trimpath + dir: . + +snapshot: + version_template: "{{ if index .Env \"GORELEASER_CURRENT_TAG\" }}{{ .Env.GORELEASER_CURRENT_TAG }}{{ else }}{{ incpatch .Version }}-next{{ end }}" + +changelog: + sort: asc + filters: + exclude: + - '^docs:' + - '^test:' diff --git a/AGENTS.md b/AGENTS.md index d2904a9da..a2e192f74 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -1,48 +1,88 @@ # AGENTS.md ## Overview -Helm is a package manager for Kubernetes written in Go, supporting v3 (stable) and v4 (unstable) APIs. -## Build & Test +Helm is a package manager for Kubernetes written in Go. It enables users to define, install, and upgrade complex Kubernetes applications using charts. +This document provides an overview of the codebase structure, development guidelines, and key patterns for contributors. + +The codebase supports both an SDK for advanced users, and a CLI for direct end user usage. + +The project currently supports Helm v3 and Helm v4 versions, based on the `dev-v3` and `main` branches respectively. + +## Build and test + ```bash make build # Build binary make test # Run all tests (style + unit) make test-unit # Unit tests only make test-coverage # With coverage -make test-style # Linting -golangci-lint run # Direct linting +make test-style # Linting (wraps golangci-lint) go test -run TestName # Specific test ``` -## Code Structure -- `/cmd/helm/` - CLI entry point (Cobra-based) -- `/pkg/` - Public API +## Code structure + +Major packages: + +- `cmd/helm/` - CLI entry point, wires CLI flags to `pkg/cmd/` commands +- `pkg/` - Public API - `action/` - Core operations (install, upgrade, rollback) + - `cmd/` - Cobra command implementations bridging CLI flags to `pkg/action/` - `chart/v2/` - Stable chart format - `engine/` - Template rendering (Go templates + Sprig) + - `kube/` - Kubernetes client abstraction layer - `registry/` - OCI support + - `release/` - Release types and interfaces (`v1/`, `common/`) + - `repo/` - Chart repository indexing and interaction - `storage/` - Release backends (Secrets/ConfigMaps/SQL) -- `/internal/` - Private implementation +- `internal/` - Private implementations - `chart/v3/` - Next-gen chart format + - `release/v2/` - Release package for chart v3 support + +## Development + +### Compatibility + +Changes are required to maintain backward compatibility as described in [HIP-0004: Document backwards-compatibility rules](https://github.com/helm/community/blob/main/hips/hip-0004.md). + +Typically this means that: -## Development Guidelines +- the signatures of public APIs, i.e., those in the `pkg/` directory should not change +- CLI commands and parameters should not be removed or changed in a way that would break existing scripts or workflows +- functional behaviour (as implied or documented) must not be modified in a way that would break existing users' expectations + +An exception to the above is where incompatible changes are needed to fix a security vulnerability, where minimal breaking changes may be made to address the issue. + +### Code standards -### Code Standards - Use table-driven tests with testify - Golden files in `testdata/` for complex output - Mock Kubernetes clients for action tests - All commits must include DCO sign-off: `git commit -s` ### Branching -- `main` - Helm v4 development -- `dev-v3` - Helm v3 stable (backport from main) -### Dependencies +Standard workflow is for PR development changes to the `main` branch. Minor release branches are cut from `main`, then maintained for critical fixes via patch releases. +Bug and security fixes are also backported to `dev-v3` where applicable. + +Development branches: + +- `main` - Helm v4 +- `dev-v3` - Helm v3 (backport security and bugfixes from main) + +Release branches: + +- `release-v3.X` - Release branches for v3.X versions +- `release-v4.X` - Release branches for v4.X versions + +### Major dependencies + - `k8s.io/client-go` - Kubernetes interaction - `github.com/spf13/cobra` - CLI framework - `github.com/Masterminds/sprig` - Template functions -### Key Patterns -- **Actions**: Operations in `/pkg/action/` use shared Configuration -- **Dual Chart Support**: v2 (stable) in `/pkg/`, v3 (dev) in `/internal/` -- **Storage Abstraction**: Pluggable release storage backends +### Key patterns + +- **Actions**: High-level operations live in `pkg/action/`, typically using a shared Configuration +- **Chart versions**: Charts v2 (stable) in `pkg/chart/v2`, v3 (under development) in `internal/chart/v3` +- **Plugins and extensibility**: Enabling additional functionality via plugins and extension points, such as custom template functions or storage backends is preferred over incorporating into Helm's codebase diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e809e7ca2..7aa19972f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -11,9 +11,13 @@ vulnerability_, please email a report to [cncf-helm-security@lists.cncf.io](mailto:cncf-helm-security@lists.cncf.io). This will give us a chance to try to fix the issue before it is exploited in the wild. -## Helm v3 and v4 +## Helm v3 -Helm v4 is currently under development on the `main` branch. During the development of Helm v4 and for some time after its released, Helm v3 will continue to be supported and developed on the `dev-v3` branch. Helm v3 will continue to get bug fixes and updates for new Kubernetes releases. Helm v4 is where new features and major changes will happen. For features to be backported to Helm v3, an exception will be needed. Bugs should first be fixed on Helm v4 and then backported to Helm v3. +Helm v4 development takes place on the `main` branch while Helm v3 is on the `dev-v3` branch. + +Helm v3 will continue to receive bug fixes and updates for new Kubernetes releases until July 8th 2026. Security enhancements will still be applied until November 11th 2026. See the blog for more details. + +Bugs should first be fixed on Helm v4 and then backported to Helm v3. Helm v3 (and the `dev-v3` branch) is no longer accepting new features. ## Sign Your Work @@ -158,9 +162,9 @@ There are 5 types of issues (each with their own corresponding [label](#labels)) for future reference. Generally these are questions that are too complex or large to store in the Slack channel or have particular interest to the community as a whole. Depending on the discussion, these can turn into `feature` or `bug` issues. -- `proposal`: Used for items (like this one) that propose a new ideas or functionality that require +- `proposal`: Used for items (like this one) that propose new ideas or functionality that require a larger community discussion. This allows for feedback from others in the community before a - feature is actually developed. This is not needed for small additions. Final word on whether + feature is actually developed. This is not needed for small additions. Final word on whether a feature needs a proposal is up to the core maintainers. All issues that are proposals should both have a label and an issue title of "Proposal: [the rest of the title]." A proposal can become a `feature` and does not require a milestone. diff --git a/KEYS b/KEYS index e772fff40..21ca3487f 100644 --- a/KEYS +++ b/KEYS @@ -1058,3 +1058,23 @@ K6V08VpFmniENmCDHshXYq0gGiTDAP9FsXl2UtmFU5xuYxH4fRKIxgmxJRAFMWI8 u3Rdu/s+DQ== =smBO -----END PGP PUBLIC KEY BLOCK----- +pub ed25519 2026-02-08 [SC] + BF888333D96A1C18E2682AAED79D67C9EC016739 +uid [ultimate] George Jenkins +sig 3 D79D67C9EC016739 2026-02-08 [self-signature] +sub cv25519 2026-02-08 [E] +sig D79D67C9EC016739 2026-02-08 [self-signature] + +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mDMEaYgDUBYJKwYBBAHaRw8BAQdAWKYkFrwgmfaY/hUq5Z2YpEy8WACKclo2eV/n +1ausaEy0JEdlb3JnZSBKZW5raW5zIDxndmplbmtpbnNAZ21haWwuY29tPoiTBBMW +CgA7FiEEv4iDM9lqHBjiaCqu151nyewBZzkFAmmIA1ACGwMFCwkIBwICIgIGFQoJ +CAsCBBYCAwECHgcCF4AACgkQ151nyewBZzlP0gD/ZFhm9FikdlZO5pW7xWR4YnP4 +yFAuY32G9dNdFn1x1p4BALR8Rtpp68eC9R8bq3/r1dK8gwig8DMWirdYaf2ePKoL +uDgEaYgDUBIKKwYBBAGXVQEFAQEHQJwM3R9CTypooHz/4w1waXAct8K2wA1bwi1r +yfb6uMMKAwEIB4h4BBgWCgAgFiEEv4iDM9lqHBjiaCqu151nyewBZzkFAmmIA1AC +GwwACgkQ151nyewBZzlgYAEAoVwYdoO6f3VwGukpv7RtKwF7PQC9AnBUx98TZZ6t +IaoA/RR14NXYYcd0fCwN6sFPq58/NbNkRHBrfw1CntxiJcYD +=duOC +-----END PGP PUBLIC KEY BLOCK----- diff --git a/Makefile b/Makefile index a18b83f0d..81b149a68 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,6 @@ BINDIR := $(CURDIR)/bin INSTALL_PATH ?= /usr/local/bin DIST_DIRS := find * -type d -exec -TARGETS := darwin/amd64 darwin/arm64 linux/amd64 linux/386 linux/arm linux/arm64 linux/loong64 linux/ppc64le linux/s390x linux/riscv64 windows/amd64 windows/arm64 TARGET_OBJS ?= darwin-amd64.tar.gz darwin-amd64.tar.gz.sha256 darwin-amd64.tar.gz.sha256sum darwin-arm64.tar.gz darwin-arm64.tar.gz.sha256 darwin-arm64.tar.gz.sha256sum linux-amd64.tar.gz linux-amd64.tar.gz.sha256 linux-amd64.tar.gz.sha256sum linux-386.tar.gz linux-386.tar.gz.sha256 linux-386.tar.gz.sha256sum linux-arm.tar.gz linux-arm.tar.gz.sha256 linux-arm.tar.gz.sha256sum linux-arm64.tar.gz linux-arm64.tar.gz.sha256 linux-arm64.tar.gz.sha256sum linux-loong64.tar.gz linux-loong64.tar.gz.sha256 linux-loong64.tar.gz.sha256sum linux-ppc64le.tar.gz linux-ppc64le.tar.gz.sha256 linux-ppc64le.tar.gz.sha256sum linux-s390x.tar.gz linux-s390x.tar.gz.sha256 linux-s390x.tar.gz.sha256sum linux-riscv64.tar.gz linux-riscv64.tar.gz.sha256 linux-riscv64.tar.gz.sha256sum windows-amd64.zip windows-amd64.zip.sha256 windows-amd64.zip.sha256sum windows-arm64.zip windows-arm64.zip.sha256 windows-arm64.zip.sha256sum BINNAME ?= helm @@ -9,7 +8,7 @@ GOBIN = $(shell go env GOBIN) ifeq ($(GOBIN),) GOBIN = $(shell go env GOPATH)/bin endif -GOX = $(GOBIN)/gox +GORELEASER = $(GOBIN)/goreleaser GOIMPORTS = $(GOBIN)/goimports ARCH = $(shell go env GOARCH) @@ -130,8 +129,7 @@ test-source-headers: @scripts/validate-license.sh .PHONY: test-acceptance -test-acceptance: TARGETS = linux/amd64 -test-acceptance: build build-cross +test-acceptance: build @if [ -d "${ACCEPTANCE_DIR}" ]; then \ cd ${ACCEPTANCE_DIR} && \ ROBOT_RUN_TESTS=$(ACCEPTANCE_RUN_TESTS) ROBOT_HELM_PATH='$(BINDIR)' make acceptance; \ @@ -162,8 +160,8 @@ gen-test-golden: test-unit # dependencies to the go.mod file. To avoid that we change to a directory # without a go.mod file when downloading the following dependencies -$(GOX): - (cd /; go install github.com/mitchellh/gox@v1.0.2-0.20220701044238-9f712387e2d2) +$(GORELEASER): + (cd /; go install github.com/goreleaser/goreleaser/v2@latest) $(GOIMPORTS): (cd /; go install golang.org/x/tools/cmd/goimports@latest) @@ -173,8 +171,8 @@ $(GOIMPORTS): .PHONY: build-cross build-cross: LDFLAGS += -extldflags "-static" -build-cross: $(GOX) - GOFLAGS="-trimpath" CGO_ENABLED=0 $(GOX) -parallel=3 -output="_dist/{{.OS}}-{{.Arch}}/$(BINNAME)" -osarch='$(TARGETS)' $(GOFLAGS) -tags '$(TAGS)' -ldflags '$(LDFLAGS)' ./cmd/helm +build-cross: $(GORELEASER) + LDFLAGS='$(LDFLAGS)' $(GORELEASER) build --snapshot --clean .PHONY: dist dist: diff --git a/cmd/helm/helm.go b/cmd/helm/helm.go index 66d342500..0c4f697b6 100644 --- a/cmd/helm/helm.go +++ b/cmd/helm/helm.go @@ -17,6 +17,7 @@ limitations under the License. package main // import "helm.sh/helm/v4/cmd/helm" import ( + "errors" "log/slog" "os" @@ -41,7 +42,8 @@ func main() { } if err := cmd.Execute(); err != nil { - if cerr, ok := err.(helmcmd.CommandError); ok { + var cerr helmcmd.CommandError + if errors.As(err, &cerr) { os.Exit(cerr.ExitCode) } os.Exit(1) diff --git a/cmd/helm/helm_test.go b/cmd/helm/helm_test.go index 0458e8037..a9362f772 100644 --- a/cmd/helm/helm_test.go +++ b/cmd/helm/helm_test.go @@ -18,6 +18,7 @@ package main import ( "bytes" + "errors" "os" "os/exec" "runtime" @@ -60,14 +61,15 @@ func TestCliPluginExitCode(t *testing.T) { cmd.Stderr = stderr err := cmd.Run() - exiterr, ok := err.(*exec.ExitError) + exiterr := &exec.ExitError{} + ok := errors.As(err, &exiterr) if !ok { t.Fatalf("Unexpected error type returned by os.Exit: %T", err) } assert.Empty(t, stdout.String()) - expectedStderr := "Error: plugin \"exitwith\" exited with error\n" + expectedStderr := "level=WARN msg=\"failed to load plugin (ignoring)\" plugin_yaml=../../pkg/cmd/testdata/helmhome/helm/plugins/noversion/plugin.yaml error=\"failed to load plugin \\\"../../pkg/cmd/testdata/helmhome/helm/plugins/noversion\\\": plugin `version` is required\"\nError: plugin \"exitwith\" exited with error\n" if stderr.String() != expectedStderr { t.Errorf("Expected %q written to stderr: Got %q", expectedStderr, stderr.String()) } diff --git a/go.mod b/go.mod index 8b5b2f4a3..40c02c7e1 100644 --- a/go.mod +++ b/go.mod @@ -1,30 +1,30 @@ module helm.sh/helm/v4 -go 1.25.0 +go 1.26.0 require ( github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 github.com/BurntSushi/toml v1.6.0 github.com/DATA-DOG/go-sqlmock v1.5.2 - github.com/Masterminds/semver/v3 v3.4.0 + github.com/Masterminds/semver/v3 v3.5.0 github.com/Masterminds/sprig/v3 v3.3.0 github.com/Masterminds/squirrel v1.5.4 github.com/Masterminds/vcs v1.13.3 - github.com/ProtonMail/go-crypto v1.3.0 + github.com/ProtonMail/go-crypto v1.4.1 github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 github.com/cyphar/filepath-securejoin v0.6.1 - github.com/distribution/distribution/v3 v3.0.0 + github.com/distribution/distribution/v3 v3.1.1 github.com/evanphx/json-patch/v5 v5.9.11 github.com/extism/go-sdk v1.7.1 - github.com/fatih/color v1.18.0 - github.com/fluxcd/cli-utils v0.37.0-flux.1 + github.com/fatih/color v1.19.0 + github.com/fluxcd/cli-utils v1.2.0 github.com/foxcpp/go-mockdns v1.2.0 github.com/gobwas/glob v0.2.3 github.com/gofrs/flock v0.13.0 github.com/gosuri/uitable v0.0.4 github.com/jmoiron/sqlx v1.4.0 - github.com/lib/pq v1.10.9 - github.com/mattn/go-shellwords v1.0.12 + github.com/lib/pq v1.12.3 + github.com/mattn/go-shellwords v1.0.13 github.com/moby/term v0.5.2 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.1 @@ -35,21 +35,21 @@ require ( github.com/stretchr/testify v1.11.1 github.com/tetratelabs/wazero v1.11.0 go.yaml.in/yaml/v3 v3.0.4 - golang.org/x/crypto v0.46.0 - golang.org/x/term v0.38.0 - golang.org/x/text v0.32.0 + golang.org/x/crypto v0.50.0 + golang.org/x/term v0.42.0 + golang.org/x/text v0.36.0 gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.35.0 - k8s.io/apiextensions-apiserver v0.35.0 - k8s.io/apimachinery v0.35.0 - k8s.io/apiserver v0.35.0 - k8s.io/cli-runtime v0.35.0 - k8s.io/client-go v0.35.0 - k8s.io/klog/v2 v2.130.1 - k8s.io/kubectl v0.35.0 + k8s.io/api v0.36.0 + k8s.io/apiextensions-apiserver v0.36.0 + k8s.io/apimachinery v0.36.0 + k8s.io/apiserver v0.36.0 + k8s.io/cli-runtime v0.36.0 + k8s.io/client-go v0.36.0 + k8s.io/klog/v2 v2.140.0 + k8s.io/kubectl v0.36.0 oras.land/oras-go/v2 v2.6.0 - sigs.k8s.io/controller-runtime v0.22.4 - sigs.k8s.io/kustomize/kyaml v0.21.0 + sigs.k8s.io/controller-runtime v0.24.0 + sigs.k8s.io/kustomize/kyaml v0.21.1 sigs.k8s.io/yaml v1.6.0 ) @@ -60,21 +60,21 @@ require ( github.com/Masterminds/goutils v1.1.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect - github.com/bshuster-repo/logrus-logstash-hook v1.0.0 // indirect - github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/bshuster-repo/logrus-logstash-hook v1.1.0 // indirect + github.com/cenkalti/backoff/v5 v5.0.3 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect - github.com/cloudflare/circl v1.6.1 // indirect - github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/cloudflare/circl v1.6.3 // indirect + github.com/coreos/go-systemd/v22 v22.7.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/docker-credential-helpers v0.8.2 // indirect - github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect + github.com/docker/docker-credential-helpers v0.9.5 // indirect + github.com/docker/go-events v0.0.0-20250808211157-605354379745 // indirect github.com/docker/go-metrics v0.0.1 // indirect github.com/dylibso/observe-sdk/go v0.0.0-20240819160327-2d926c5d788a // indirect - github.com/emicklei/go-restful/v3 v3.12.2 // indirect + github.com/emicklei/go-restful/v3 v3.13.0 // indirect github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fxamacker/cbor/v2 v2.9.0 // indirect @@ -91,8 +91,7 @@ require ( github.com/google/uuid v1.6.0 // indirect github.com/gorilla/handlers v1.5.2 // indirect github.com/gorilla/mux v1.8.1 // indirect - github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 // indirect github.com/hashicorp/golang-lru/arc/v2 v2.0.5 // indirect github.com/hashicorp/golang-lru/v2 v2.0.5 // indirect github.com/huandu/xstrings v1.5.0 // indirect @@ -100,12 +99,12 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.18.0 // indirect + github.com/klauspost/compress v1.18.4 // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/mailru/easyjson v0.9.0 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.9 // indirect github.com/miekg/dns v1.1.57 // indirect @@ -116,66 +115,67 @@ require ( github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/onsi/gomega v1.38.2 // indirect + github.com/onsi/gomega v1.39.1 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.23.2 // indirect github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/common v0.66.1 // indirect - github.com/prometheus/procfs v0.17.0 // indirect + github.com/prometheus/common v0.67.5 // indirect + github.com/prometheus/otlptranslator v1.0.0 // indirect + github.com/prometheus/procfs v0.20.1 // indirect github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 // indirect github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 // indirect github.com/redis/go-redis/v9 v9.7.3 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/shopspring/decimal v1.4.0 // indirect - github.com/sirupsen/logrus v1.9.3 // indirect + github.com/sirupsen/logrus v1.9.4 // indirect github.com/spf13/cast v1.7.0 // indirect github.com/tetratelabs/wabin v0.0.0-20230304001439-f6f874872834 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect - go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 // indirect - go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect - go.opentelemetry.io/otel v1.37.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/prometheus v0.54.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 // indirect - go.opentelemetry.io/otel/log v0.8.0 // indirect - go.opentelemetry.io/otel/metric v1.37.0 // indirect - go.opentelemetry.io/otel/sdk v1.36.0 // indirect - go.opentelemetry.io/otel/sdk/log v0.8.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.36.0 // indirect - go.opentelemetry.io/otel/trace v1.37.0 // indirect - go.opentelemetry.io/proto/otlp v1.5.0 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/contrib/bridges/prometheus v0.67.0 // indirect + go.opentelemetry.io/contrib/exporters/autoexport v0.67.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0 // indirect + go.opentelemetry.io/otel v1.43.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.18.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.19.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.42.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.43.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.42.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0 // indirect + go.opentelemetry.io/otel/exporters/prometheus v0.64.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.18.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.42.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.42.0 // indirect + go.opentelemetry.io/otel/log v0.19.0 // indirect + go.opentelemetry.io/otel/metric v1.43.0 // indirect + go.opentelemetry.io/otel/sdk v1.43.0 // indirect + go.opentelemetry.io/otel/sdk/log v0.19.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.43.0 // indirect + go.opentelemetry.io/otel/trace v1.43.0 // indirect + go.opentelemetry.io/proto/otlp v1.10.0 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect - golang.org/x/mod v0.30.0 // indirect - golang.org/x/net v0.47.0 // indirect - golang.org/x/oauth2 v0.30.0 // indirect - golang.org/x/sync v0.19.0 // indirect - golang.org/x/sys v0.39.0 // indirect - golang.org/x/time v0.12.0 // indirect - golang.org/x/tools v0.39.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a // indirect - google.golang.org/grpc v1.72.2 // indirect - google.golang.org/protobuf v1.36.8 // indirect + golang.org/x/mod v0.34.0 // indirect + golang.org/x/net v0.52.0 // indirect + golang.org/x/oauth2 v0.36.0 // indirect + golang.org/x/sync v0.20.0 // indirect + golang.org/x/sys v0.43.0 // indirect + golang.org/x/time v0.15.0 // indirect + golang.org/x/tools v0.43.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 // indirect + google.golang.org/grpc v1.80.0 // indirect + google.golang.org/protobuf v1.36.12-0.20260120151049-f2248ac996af // indirect gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - k8s.io/component-base v0.35.0 // indirect - k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect - k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect + k8s.io/component-base v0.36.0 // indirect + k8s.io/kube-openapi v0.0.0-20260317180543-43fb72c5454a // indirect + k8s.io/utils v0.0.0-20260210185600-b8788abfbbc2 // indirect sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect - sigs.k8s.io/kustomize/api v0.20.1 // indirect + sigs.k8s.io/kustomize/api v0.21.1 // indirect sigs.k8s.io/randfill v1.0.0 // indirect - sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.2 // indirect ) diff --git a/go.sum b/go.sum index 4e39db6d8..f1a2ca9f8 100644 --- a/go.sum +++ b/go.sum @@ -14,18 +14,20 @@ github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= -github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/semver/v3 v3.5.0 h1:kQceYJfbupGfZOKZQg0kou0DgAKhzDg2NZPAwZ/2OOE= +github.com/Masterminds/semver/v3 v3.5.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= github.com/Masterminds/vcs v1.13.3 h1:IIA2aBdXvfbIM+yl/eTnL4hb1XwdpvuQLglAix1gweE= github.com/Masterminds/vcs v1.13.3/go.mod h1:TiE7xuEjl1N4j016moRd6vezp6e6Lz23gypeXfzXeW8= -github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw= -github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE= +github.com/ProtonMail/go-crypto v1.4.1 h1:9RfcZHqEQUvP8RzecWEUafnZVtEvrBVL9BiF67IQOfM= +github.com/ProtonMail/go-crypto v1.4.1/go.mod h1:e1OaTyu5SYVrO9gKOEhTc+5UcXtTUa+P3uLudwcgPqo= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alicebob/miniredis/v2 v2.35.0 h1:QwLphYqCEAo1eu1TqPRN2jgVMPBweeQcR21jeqDCONI= +github.com/alicebob/miniredis/v2 v2.35.0/go.mod h1:TcL7YfarKPGDAthEtl5NBeHZfeUQj6OXMm/+iu5cLMM= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -34,25 +36,25 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= -github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/bshuster-repo/logrus-logstash-hook v1.1.0 h1:o2FzZifLg+z/DN1OFmzTWzZZx/roaqt8IPZCIVco8r4= +github.com/bshuster-repo/logrus-logstash-hook v1.1.0/go.mod h1:Q2aXOe7rNuPgbBtPCOzYyWDvKX7+FpxE5sRdvcPoui0= github.com/bsm/ginkgo/v2 v2.7.0/go.mod h1:AiKlXPm7ItEHNc/2+OkrNG4E0ITzojb9/xWzvQ9XZ9w= github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= github.com/bsm/gomega v1.26.0/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= -github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= -github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= +github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= -github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= -github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cloudflare/circl v1.6.3 h1:9GPOhQGF9MCYUeXyMYlqTR6a5gTrgR/fBLXvUgtVcg8= +github.com/cloudflare/circl v1.6.3/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4= +github.com/coreos/go-systemd/v22 v22.7.0 h1:LAEzFkke61DFROc7zNLX/WA2i5J8gYqe0rSj9KI28KA= +github.com/coreos/go-systemd/v22 v22.7.0/go.mod h1:xNUYtjHu2EDXbsxz1i41wouACIwT7Ybq9o0BQhMwD0w= github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= @@ -65,34 +67,34 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/distribution/distribution/v3 v3.0.0 h1:q4R8wemdRQDClzoNNStftB2ZAfqOiN6UX90KJc4HjyM= -github.com/distribution/distribution/v3 v3.0.0/go.mod h1:tRNuFoZsUdyRVegq8xGNeds4KLjwLCRin/tTo6i1DhU= +github.com/distribution/distribution/v3 v3.1.1 h1:KUbk7C8CfaLXy8kbf/hGq9cad/wCoLB6dbWH6DMbmX0= +github.com/distribution/distribution/v3 v3.1.1/go.mod h1:d7lXwZpph0bVcOj4Aqn0nMrWHIwRQGdiV5TLeI+/w6Y= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= -github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= -github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= -github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= -github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/docker-credential-helpers v0.9.5 h1:EFNN8DHvaiK8zVqFA2DT6BjXE0GzfLOZ38ggPTKePkY= +github.com/docker/docker-credential-helpers v0.9.5/go.mod h1:v1S+hepowrQXITkEfw6o4+BMbGot02wiKpzWhGUZK6c= +github.com/docker/go-events v0.0.0-20250808211157-605354379745 h1:yOn6Ze6IbYI/KAw2lw/83ELYvZh6hvsygTVkD0dzMC4= +github.com/docker/go-events v0.0.0-20250808211157-605354379745/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/dylibso/observe-sdk/go v0.0.0-20240819160327-2d926c5d788a h1:UwSIFv5g5lIvbGgtf3tVwC7Ky9rmMFBp0RMs+6f6YqE= github.com/dylibso/observe-sdk/go v0.0.0-20240819160327-2d926c5d788a/go.mod h1:C8DzXehI4zAbrdlbtOByKX6pfivJTBiV9Jjqv56Yd9Q= -github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= -github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.13.0 h1:C4Bl2xDndpU6nJ4bc1jXd+uTmYPVUwkD6bFY/oTyCes= +github.com/emicklei/go-restful/v3 v3.13.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc= github.com/extism/go-sdk v1.7.1 h1:lWJos6uY+tRFdlIHR+SJjwFDApY7OypS/2nMhiVQ9Sw= github.com/extism/go-sdk v1.7.1/go.mod h1:IT+Xdg5AZM9hVtpFUA+uZCJMge/hbvshl8bwzLtFyKA= -github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= -github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/fatih/color v1.19.0 h1:Zp3PiM21/9Ld6FzSKyL5c/BULoe/ONr9KlbYVOfG8+w= +github.com/fatih/color v1.19.0/go.mod h1:zNk67I0ZUT1bEGsSGyCZYZNrHuTkJJB+r6Q9VuMi0LE= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fluxcd/cli-utils v0.37.0-flux.1 h1:k/VvPNT3tGa/l2N+qzHduaQr3GVbgoWS6nw7tGZz16w= -github.com/fluxcd/cli-utils v0.37.0-flux.1/go.mod h1:aND5wX3LuTFtB7eUT7vsWr8mmxRVSPR2Wkvbn0SqPfw= +github.com/fluxcd/cli-utils v1.2.0 h1:1o07pXTMxJ/XJ1GpAbLtjdXwfCUMq4Ku1OcnvJHLohI= +github.com/fluxcd/cli-utils v1.2.0/go.mod h1:d5HdTDdR5sCbsIbgtOQ7x7srKYwYeZORU6CD2yn4j/M= github.com/foxcpp/go-mockdns v1.2.0 h1:omK3OrHRD1IWJz1FuFBCFquhXslXoF17OvBS6JPzZF0= github.com/foxcpp/go-mockdns v1.2.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= @@ -126,7 +128,6 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/flock v0.13.0 h1:95JolYOvGMqeH31+FC7D2+uULf6mG61mEZ/A8dRYMzw= github.com/gofrs/flock v0.13.0/go.mod h1:jxeyy9R1auM5S6JYDBhDt+E2TCo7DkratH4Pgi8P+Z0= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -143,8 +144,8 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5 h1:xhMrHhTJ6zxu3gA4enFM9MLn9AY7613teCdFnlUVbSQ= -github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= +github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83 h1:z2ogiKUYzX5Is6zr/vP9vJGqPwcdqsWjOt+V8J7+bTc= +github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83/go.mod h1:MxpfABSjhmINe3F1It9d+8exIHFvUqtLIRCdOGNXqiI= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= @@ -153,10 +154,8 @@ github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= -github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= -github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 h1:HWRh5R2+9EifMyIHV7ZV+MIZqgz+PMpZ14Jynv3O2Zs= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0/go.mod h1:JfhWUomR1baixubs02l85lZYYOm7LV6om4ceouMv45c= github.com/hashicorp/golang-lru/arc/v2 v2.0.5 h1:l2zaLDubNhW4XO3LnliVj0GXO3+/CGNJAg1dcN2Fpfw= github.com/hashicorp/golang-lru/arc/v2 v2.0.5/go.mod h1:ny6zBSQZi2JxIeYcv7kt2sH2PXJtirBN7RDhRpxPkxU= github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4= @@ -177,8 +176,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE= -github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= -github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/compress v1.18.4 h1:RPhnKRAQ4Fh8zU2FY/6ZFDwTVTxgJ/EMydqSTzE9a2c= +github.com/klauspost/compress v1.18.4/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -191,21 +190,21 @@ github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= -github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.12.3 h1:tTWxr2YLKwIvK90ZXEw8GP7UFHtcbTtty8zsI+YjrfQ= +github.com/lib/pq v1.12.3/go.mod h1:/p+8NSbOcwzAEI7wiMXFlgydTwcgTr3OSKMsD2BitpA= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= -github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/mattn/go-shellwords v1.0.13 h1:DC0OMEpGjm6LfNFU4ckYcvbQKyp2vE8atyFGXNtDcf4= +github.com/mattn/go-shellwords v1.0.13/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= @@ -232,10 +231,10 @@ github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns= -github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= -github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= -github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= +github.com/onsi/ginkgo/v2 v2.28.1 h1:S4hj+HbZp40fNKuLUQOYLDgZLwNUVn19N3Atb98NCyI= +github.com/onsi/ginkgo/v2 v2.28.1/go.mod h1:CLtbVInNckU3/+gC8LzkGUb9oF+e8W8TdUsxPwvdOgE= +github.com/onsi/gomega v1.39.1 h1:1IJLAad4zjPn2PsnhH70V4DKRFlrCzGBNrNaru+Vf28= +github.com/onsi/gomega v1.39.1/go.mod h1:hL6yVALoTOxeWudERyfppUcZXjMwIMLnuSfruD2lcfg= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= @@ -259,13 +258,15 @@ github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNw github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= -github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= +github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4= +github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw= +github.com/prometheus/otlptranslator v1.0.0 h1:s0LJW/iN9dkIH+EnhiD3BlkkP5QVIUVEoIwkU+A6qos= +github.com/prometheus/otlptranslator v1.0.0/go.mod h1:vRYWnXvI6aWGpsdY/mOT/cbeVRBlPWtBNDb7kGR3uKM= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= -github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= +github.com/prometheus/procfs v0.20.1 h1:XwbrGOIplXW/AU3YhIhLODXMJYyC1isLFfYCsTEycfc= +github.com/prometheus/procfs v0.20.1/go.mod h1:o9EMBZGRyvDrSPH1RqdxhojkuXstoe4UlK79eF5TGGo= github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 h1:EaDatTxkdHG+U3Bk4EUr+DZ7fOGwTfezUiUJMaIcaho= github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5/go.mod h1:fyalQWdtzDBECAQFBJuQe5bzQ02jGd5Qcbgb97Flm7U= github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 h1:EfpWLLCyXw8PSM2/XNJLjI3Pb27yVE+gIAfeqp8LUCc= @@ -286,8 +287,9 @@ github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepq github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w= +github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g= github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= @@ -314,58 +316,62 @@ github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcY github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 h1:UW0+QyeyBVhn+COBec3nGhfnFe5lwB0ic1JBVjzhk0w= -go.opentelemetry.io/contrib/bridges/prometheus v0.57.0/go.mod h1:ppciCHRLsyCio54qbzQv0E4Jyth/fLWDTJYfvWpcSVk= -go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQa2NQ/Aoi7zA+wy7vMOKD9H4= -go.opentelemetry.io/contrib/exporters/autoexport v0.57.0/go.mod h1:EJBheUMttD/lABFyLXhce47Wr6DPWYReCzaZiXadH7g= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= -go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= -go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 h1:WzNab7hOOLzdDF/EoWCt4glhrbMPVMOO5JYTmpz36Ls= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0/go.mod h1:hKvJwTzJdp90Vh7p6q/9PAOd55dI6WA6sWj62a/JvSs= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 h1:S+LdBGiQXtJdowoJoQPEtI52syEP/JYBUpjO49EQhV8= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0/go.mod h1:5KXybFvPGds3QinJWQT7pmXf+TN5YIa7CNYObWRkj50= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 h1:j7ZSD+5yn+lo3sGV69nW04rRR0jhYnBwjuX3r0HvnK0= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0/go.mod h1:WXbYJTUaZXAbYd8lbgGuvih0yuCfOFC5RJoYnoLcGz8= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 h1:t/Qur3vKSkUCcDVaSumWF2PKHt85pc7fRvFuoVT8qFU= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0/go.mod h1:Rl61tySSdcOJWoEgYZVtmnKdA0GeKrSqkHC1t+91CH8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 h1:cMyu9O88joYEaI47CnQkxO1XZdpoTF9fEnW2duIddhw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0/go.mod h1:6Am3rn7P9TVVeXYG+wtcGE7IE1tsQ+bP3AuWcKt/gOI= -go.opentelemetry.io/otel/exporters/prometheus v0.54.0 h1:rFwzp68QMgtzu9PgP3jm9XaMICI6TsofWWPcBDKwlsU= -go.opentelemetry.io/otel/exporters/prometheus v0.54.0/go.mod h1:QyjcV9qDP6VeK5qPyKETvNjmaaEc7+gqjh4SS0ZYzDU= -go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 h1:CHXNXwfKWfzS65yrlB2PVds1IBZcdsX8Vepy9of0iRU= -go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0/go.mod h1:zKU4zUgKiaRxrdovSS2amdM5gOc59slmo/zJwGX+YBg= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 h1:SZmDnHcgp3zwlPBS2JX2urGYe/jBKEIT6ZedHRUyCz8= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0/go.mod h1:fdWW0HtZJ7+jNpTKUR0GpMEDP69nR8YBJQxNiVCE3jk= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 h1:cC2yDI3IQd0Udsux7Qmq8ToKAx1XCilTQECZ0KDZyTw= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0/go.mod h1:2PD5Ex6z8CFzDbTdOlwyNIUywRr1DN0ospafJM1wJ+s= -go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk= -go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8= -go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= -go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= -go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= -go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= -go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs= -go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo= -go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= -go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= -go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= -go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= -go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= -go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= +github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M= +github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/bridges/prometheus v0.67.0 h1:dkBzNEAIKADEaFnuESzcXvpd09vxvDZsOjx11gjUqLk= +go.opentelemetry.io/contrib/bridges/prometheus v0.67.0/go.mod h1:Z5RIwRkZgauOIfnG5IpidvLpERjhTninpP1dTG2jTl4= +go.opentelemetry.io/contrib/exporters/autoexport v0.67.0 h1:4fnRcNpc6YFtG3zsFw9achKn3XgmxPxuMuqIL5rE8e8= +go.opentelemetry.io/contrib/exporters/autoexport v0.67.0/go.mod h1:qTvIHMFKoxW7HXg02gm6/Wofhq5p3Ib/A/NNt1EoBSQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0 h1:OyrsyzuttWTSur2qN/Lm0m2a8yqyIjUVBZcxFPuXq2o= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0/go.mod h1:C2NGBr+kAB4bk3xtMXfZ94gqFDtg/GkI7e9zqGh5Beg= +go.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I= +go.opentelemetry.io/otel v1.43.0/go.mod h1:JuG+u74mvjvcm8vj8pI5XiHy1zDeoCS2LB1spIq7Ay0= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.18.0 h1:deI9UQMoGFgrg5iLPgzueqFPHevDl+28YKfSpPTI6rY= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.18.0/go.mod h1:PFx9NgpNUKXdf7J4Q3agRxMs3Y07QhTCVipKmLsMKnU= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.19.0 h1:HIBTQ3VO5aupLKjC90JgMqpezVXwFuq6Ryjn0/izoag= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.19.0/go.mod h1:ji9vId85hMxqfvICA0Jt8JqEdrXaAkcpkI9HPXya0ro= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.42.0 h1:MdKucPl/HbzckWWEisiNqMPhRrAOQX8r4jTuGr636gk= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.42.0/go.mod h1:RolT8tWtfHcjajEH5wFIZ4Dgh5jpPdFXYV9pTAk/qjc= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.43.0 h1:w1K+pCJoPpQifuVpsKamUdn9U0zM3xUziVOqsGksUrY= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.43.0/go.mod h1:HBy4BjzgVE8139ieRI75oXm3EcDN+6GhD88JT1Kjvxg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 h1:88Y4s2C8oTui1LGM6bTWkw0ICGcOLCAI5l6zsD1j20k= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0/go.mod h1:Vl1/iaggsuRlrHf/hfPJPvVag77kKyvrLeD10kpMl+A= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.42.0 h1:zWWrB1U6nqhS/k6zYB74CjRpuiitRtLLi68VcgmOEto= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.42.0/go.mod h1:2qXPNBX1OVRC0IwOnfo1ljoid+RD0QK3443EaqVlsOU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0 h1:3iZJKlCZufyRzPzlQhUIWVmfltrXuGyfjREgGP3UUjc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0/go.mod h1:/G+nUPfhq2e+qiXMGxMwumDrP5jtzU+mWN7/sjT2rak= +go.opentelemetry.io/otel/exporters/prometheus v0.64.0 h1:g0LRDXMX/G1SEZtK8zl8Chm4K6GBwRkjPKE36LxiTYs= +go.opentelemetry.io/otel/exporters/prometheus v0.64.0/go.mod h1:UrgcjnarfdlBDP3GjDIJWe6HTprwSazNjwsI+Ru6hro= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.18.0 h1:KJVjPD3rcPb98rIs3HznyJlrfx9ge5oJvxxlGR+P/7s= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.18.0/go.mod h1:K3kRa2ckmHWQaTWQdPRHc7qGXASuVuoEQXzrvlA98Ws= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.42.0 h1:lSZHgNHfbmQTPfuTmWVkEu8J8qXaQwuV30pjCcAUvP8= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.42.0/go.mod h1:so9ounLcuoRDu033MW/E0AD4hhUjVqswrMF5FoZlBcw= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.42.0 h1:s/1iRkCKDfhlh1JF26knRneorus8aOwVIDhvYx9WoDw= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.42.0/go.mod h1:UI3wi0FXg1Pofb8ZBiBLhtMzgoTm1TYkMvn71fAqDzs= +go.opentelemetry.io/otel/log v0.19.0 h1:KUZs/GOsw79TBBMfDWsXS+KZ4g2Ckzksd1ymzsIEbo4= +go.opentelemetry.io/otel/log v0.19.0/go.mod h1:5DQYeGmxVIr4n0/BcJvF4upsraHjg6vudJJpnkL6Ipk= +go.opentelemetry.io/otel/metric v1.43.0 h1:d7638QeInOnuwOONPp4JAOGfbCEpYb+K6DVWvdxGzgM= +go.opentelemetry.io/otel/metric v1.43.0/go.mod h1:RDnPtIxvqlgO8GRW18W6Z/4P462ldprJtfxHxyKd2PY= +go.opentelemetry.io/otel/sdk v1.43.0 h1:pi5mE86i5rTeLXqoF/hhiBtUNcrAGHLKQdhg4h4V9Dg= +go.opentelemetry.io/otel/sdk v1.43.0/go.mod h1:P+IkVU3iWukmiit/Yf9AWvpyRDlUeBaRg6Y+C58QHzg= +go.opentelemetry.io/otel/sdk/log v0.19.0 h1:scYVLqT22D2gqXItnWiocLUKGH9yvkkeql5dBDiXyko= +go.opentelemetry.io/otel/sdk/log v0.19.0/go.mod h1:vFBowwXGLlW9AvpuF7bMgnNI95LiW10szrOdvzBHlAg= +go.opentelemetry.io/otel/sdk/log/logtest v0.19.0 h1:BEbF7ZBB6qQloV/Ub1+3NQoOUnVtcGkU3XX4Ws3GQfk= +go.opentelemetry.io/otel/sdk/log/logtest v0.19.0/go.mod h1:Lua81/3yM0wOmoHTokLj9y9ADeA02v1naRrVrkAZuKk= +go.opentelemetry.io/otel/sdk/metric v1.43.0 h1:S88dyqXjJkuBNLeMcVPRFXpRw2fuwdvfCGLEo89fDkw= +go.opentelemetry.io/otel/sdk/metric v1.43.0/go.mod h1:C/RJtwSEJ5hzTiUz5pXF1kILHStzb9zFlIEe85bhj6A= +go.opentelemetry.io/otel/trace v1.43.0 h1:BkNrHpup+4k4w+ZZ86CZoHHEkohws8AY+WTX09nk+3A= +go.opentelemetry.io/otel/trace v1.43.0/go.mod h1:/QJhyVBUUswCphDVxq+8mld+AvhXZLhe+8WVFxiFff0= +go.opentelemetry.io/proto/otlp v1.10.0 h1:IQRWgT5srOCYfiWnpqUYz9CVmbO8bFmKcwYxpuCSL2g= +go.opentelemetry.io/proto/otlp v1.10.0/go.mod h1:/CV4QoCR/S9yaPj8utp3lvQPoqMtxXdzn7ozvvozVqk= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= -go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= +go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= @@ -376,14 +382,14 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g= -golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= -golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= +golang.org/x/crypto v0.50.0 h1:zO47/JPrL6vsNkINmLoo/PH1gcxpls50DNogFvB5ZGI= +golang.org/x/crypto v0.50.0/go.mod h1:3muZ7vA7PBCE6xgPX7nkzzjiUq87kRItoJQM1Yo8S+Q= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= -golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= +golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= +golang.org/x/mod v0.34.0/go.mod h1:ykgH52iCZe79kzLLMhyCUzhMci+nQj+0XkbXpNYtVjY= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -394,10 +400,10 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= -golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= -golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= -golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= -golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= +golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0= +golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw= +golang.org/x/oauth2 v0.36.0 h1:peZ/1z27fi9hUOFCAZaHyrpWG5lwe0RJEEEeH0ThlIs= +golang.org/x/oauth2 v0.36.0/go.mod h1:YDBUJMTkDnJS+A4BP4eZBjCqtokkg1hODuPjwiGPO7Q= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -406,27 +412,26 @@ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= -golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4= +golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= -golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI= +golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -434,8 +439,8 @@ golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww= -golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= -golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= +golang.org/x/term v0.42.0 h1:UiKe+zDFmJobeJ5ggPwOshJIVt6/Ft0rcfrXZDLWAWY= +golang.org/x/term v0.42.0/go.mod h1:Dq/D+snpsbazcBG5+F9Q1n2rXV8Ma+71xEjTRufARgY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= @@ -443,27 +448,29 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= -golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= -golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= -golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/text v0.36.0 h1:JfKh3XmcRPqZPKevfXVpI1wXPTqbkE5f7JA92a55Yxg= +golang.org/x/text v0.36.0/go.mod h1:NIdBknypM8iqVmPiuco0Dh6P5Jcdk8lJL0CUebqK164= +golang.org/x/time v0.15.0 h1:bbrp8t3bGUeFOx08pvsMYRTCVSMk89u4tKbNOZbp88U= +golang.org/x/time v0.15.0/go.mod h1:Y4YMaQmXwGQZoFaVFk4YpCt4FLQMYKZe9oeV/f4MSno= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk= -golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= -golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= +golang.org/x/tools v0.43.0 h1:12BdW9CeB3Z+J/I/wj34VMl8X+fEXBxVR90JeMX5E7s= +golang.org/x/tools v0.43.0/go.mod h1:uHkMso649BX2cZK6+RpuIPXS3ho2hZo4FVwfoy1vIk0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950= -google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a h1:v2PbRU4K3llS09c7zodFpNePeamkAwG3mPrAery9VeE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= -google.golang.org/grpc v1.72.2 h1:TdbGzwb82ty4OusHWepvFWGLgIbNo1/SUynEN0ssqv8= -google.golang.org/grpc v1.72.2/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4= +gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E= +google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 h1:VPWxll4HlMw1Vs/qXtN7BvhZqsS9cdAittCNvVENElA= +google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:7QBABkRtR8z+TEnmXTqIqwJLlzrZKVfAUm7tY3yGv0M= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 h1:m8qni9SQFH0tJc1X0vmnpw/0t+AImlSvp30sEupozUg= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= +google.golang.org/grpc v1.80.0 h1:Xr6m2WmWZLETvUNvIUmeD5OAagMw3FiKmMlTdViWsHM= +google.golang.org/grpc v1.80.0/go.mod h1:ho/dLnxwi3EDJA4Zghp7k2Ec1+c2jqup0bFkw07bwF4= +google.golang.org/protobuf v1.36.12-0.20260120151049-f2248ac996af h1:+5/Sw3GsDNlEmu7TfklWKPdQ0Ykja5VEmq2i817+jbI= +google.golang.org/protobuf v1.36.12-0.20260120151049-f2248ac996af/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= @@ -478,41 +485,41 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.35.0 h1:iBAU5LTyBI9vw3L5glmat1njFK34srdLmktWwLTprlY= -k8s.io/api v0.35.0/go.mod h1:AQ0SNTzm4ZAczM03QH42c7l3bih1TbAXYo0DkF8ktnA= -k8s.io/apiextensions-apiserver v0.35.0 h1:3xHk2rTOdWXXJM+RDQZJvdx0yEOgC0FgQ1PlJatA5T4= -k8s.io/apiextensions-apiserver v0.35.0/go.mod h1:E1Ahk9SADaLQ4qtzYFkwUqusXTcaV2uw3l14aqpL2LU= -k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8= -k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns= -k8s.io/apiserver v0.35.0 h1:CUGo5o+7hW9GcAEF3x3usT3fX4f9r8xmgQeCBDaOgX4= -k8s.io/apiserver v0.35.0/go.mod h1:QUy1U4+PrzbJaM3XGu2tQ7U9A4udRRo5cyxkFX0GEds= -k8s.io/cli-runtime v0.35.0 h1:PEJtYS/Zr4p20PfZSLCbY6YvaoLrfByd6THQzPworUE= -k8s.io/cli-runtime v0.35.0/go.mod h1:VBRvHzosVAoVdP3XwUQn1Oqkvaa8facnokNkD7jOTMY= -k8s.io/client-go v0.35.0 h1:IAW0ifFbfQQwQmga0UdoH0yvdqrbwMdq9vIFEhRpxBE= -k8s.io/client-go v0.35.0/go.mod h1:q2E5AAyqcbeLGPdoRB+Nxe3KYTfPce1Dnu1myQdqz9o= -k8s.io/component-base v0.35.0 h1:+yBrOhzri2S1BVqyVSvcM3PtPyx5GUxCK2tinZz1G94= -k8s.io/component-base v0.35.0/go.mod h1:85SCX4UCa6SCFt6p3IKAPej7jSnF3L8EbfSyMZayJR0= -k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= -k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE= -k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= -k8s.io/kubectl v0.35.0 h1:cL/wJKHDe8E8+rP3G7avnymcMg6bH6JEcR5w5uo06wc= -k8s.io/kubectl v0.35.0/go.mod h1:VR5/TSkYyxZwrRwY5I5dDq6l5KXmiCb+9w8IKplk3Qo= -k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck= -k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/api v0.36.0 h1:SgqDhZzHdOtMk40xVSvCXkP9ME0H05hPM3p9AB1kL80= +k8s.io/api v0.36.0/go.mod h1:m1LVrGPNYax5NBHdO+QuAedXyuzTt4RryI/qnmNvs34= +k8s.io/apiextensions-apiserver v0.36.0 h1:Wt7E8J+VBCbj4FjiBfDTK/neXDDjyJVJc7xfuOHImZ0= +k8s.io/apiextensions-apiserver v0.36.0/go.mod h1:kGDjH0msuiIB3tgsYRV0kS9GqpMYMUsQ3GHv7TApyug= +k8s.io/apimachinery v0.36.0 h1:jZyPzhd5Z+3h9vJLt0z9XdzW9VzNzWAUw+P1xZ9PXtQ= +k8s.io/apimachinery v0.36.0/go.mod h1:FklypaRJt6n5wUIwWXIP6GJlIpUizTgfo1T/As+Tyxc= +k8s.io/apiserver v0.36.0 h1:Jg5OFAENUACByUCg15CmhZAYrr5ZyJ+jodyA1mHl3YE= +k8s.io/apiserver v0.36.0/go.mod h1:mHvwdHf+qKEm+1/hYm756SV+oREOKSPnsjagOpx6Vho= +k8s.io/cli-runtime v0.36.0 h1:HNxciQpQMMOKS0/GiUXcKDyA6J2FDILJj9NmP2BZrTg= +k8s.io/cli-runtime v0.36.0/go.mod h1:KObkknK9Ro5LYX+1RdiKc7C8CvGg4aX+V/Zv+E8WPHA= +k8s.io/client-go v0.36.0 h1:pOYi7C4RHChYjMiHpZSpSbIM6ZxVbRXBy7CuiIwqA3c= +k8s.io/client-go v0.36.0/go.mod h1:ZKKcpwF0aLYfkHFCjillCKaTK/yBkEDHTDXCFY6AS9Y= +k8s.io/component-base v0.36.0 h1:hFjEktssxiJhrK1zfybkH4kJOi8iZuF+mIDCqS5+jRo= +k8s.io/component-base v0.36.0/go.mod h1:JZvIfcNHk+uck+8LhJzhSBtydWXaZNQwX2OdL+Mnwsk= +k8s.io/klog/v2 v2.140.0 h1:Tf+J3AH7xnUzZyVVXhTgGhEKnFqye14aadWv7bzXdzc= +k8s.io/klog/v2 v2.140.0/go.mod h1:o+/RWfJ6PwpnFn7OyAG3QnO47BFsymfEfrz6XyYSSp0= +k8s.io/kube-openapi v0.0.0-20260317180543-43fb72c5454a h1:xCeOEAOoGYl2jnJoHkC3hkbPJgdATINPMAxaynU2Ovg= +k8s.io/kube-openapi v0.0.0-20260317180543-43fb72c5454a/go.mod h1:uGBT7iTA6c6MvqUvSXIaYZo9ukscABYi2btjhvgKGZ0= +k8s.io/kubectl v0.36.0 h1:hEGr8NvIm2Wjqs2Xy48Uzmvo6lpHdGKlLyMvau2gTms= +k8s.io/kubectl v0.36.0/go.mod h1:iDe8aV5BEi45W8k+5n71I2pJ/nwE0PHDu+/2cejzYoo= +k8s.io/utils v0.0.0-20260210185600-b8788abfbbc2 h1:AZYQSJemyQB5eRxqcPky+/7EdBj0xi3g0ZcxxJ7vbWU= +k8s.io/utils v0.0.0-20260210185600-b8788abfbbc2/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk= oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc= oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o= -sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A= -sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8= +sigs.k8s.io/controller-runtime v0.24.0 h1:Ck6N2LdS8Lovy1o25BB4r1xjvLEKUl1s2o9kU+KWDE4= +sigs.k8s.io/controller-runtime v0.24.0/go.mod h1:vFkfY5fGt5xAC/sKb8IBFKgWPNKG9OUG29dR8Y2wImw= sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= -sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I= -sigs.k8s.io/kustomize/api v0.20.1/go.mod h1:t6hUFxO+Ph0VxIk1sKp1WS0dOjbPCtLJ4p8aADLwqjM= -sigs.k8s.io/kustomize/kyaml v0.21.0 h1:7mQAf3dUwf0wBerWJd8rXhVcnkk5Tvn/q91cGkaP6HQ= -sigs.k8s.io/kustomize/kyaml v0.21.0/go.mod h1:hmxADesM3yUN2vbA5z1/YTBnzLJ1dajdqpQonwBL1FQ= +sigs.k8s.io/kustomize/api v0.21.1 h1:lzqbzvz2CSvsjIUZUBNFKtIMsEw7hVLJp0JeSIVmuJs= +sigs.k8s.io/kustomize/api v0.21.1/go.mod h1:f3wkKByTrgpgltLgySCntrYoq5d3q7aaxveSagwTlwI= +sigs.k8s.io/kustomize/kyaml v0.21.1 h1:IVlbmhC076nf6foyL6Taw4BkrLuEsXUXNpsE+ScX7fI= +sigs.k8s.io/kustomize/kyaml v0.21.1/go.mod h1:hmxADesM3yUN2vbA5z1/YTBnzLJ1dajdqpQonwBL1FQ= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/structured-merge-diff/v6 v6.3.2 h1:kwVWMx5yS1CrnFWA/2QHyRVJ8jM6dBA80uLmm0wJkk8= +sigs.k8s.io/structured-merge-diff/v6 v6.3.2/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/internal/chart/v3/chart.go b/internal/chart/v3/chart.go index 48f006e79..d76a5688c 100644 --- a/internal/chart/v3/chart.go +++ b/internal/chart/v3/chart.go @@ -45,11 +45,11 @@ type Chart struct { // Templates for this chart. Templates []*common.File `json:"templates"` // Values are default config for this chart. - Values map[string]interface{} `json:"values"` + Values map[string]any `json:"values"` // Schema is an optional JSON schema for imposing structure on Values Schema []byte `json:"schema"` // SchemaModTime the schema was last modified - SchemaModTime time.Time `json:"schemamodtime,omitempty"` + SchemaModTime time.Time `json:"schemamodtime"` // Files are miscellaneous files in a chart archive, // e.g. README, LICENSE, etc. Files []*common.File `json:"files"` diff --git a/internal/chart/v3/chart_test.go b/internal/chart/v3/chart_test.go index 07cbf4b39..5f6ca548d 100644 --- a/internal/chart/v3/chart_test.go +++ b/internal/chart/v3/chart_test.go @@ -124,8 +124,8 @@ func TestIsRoot(t *testing.T) { is := assert.New(t) - is.Equal(false, chrt1.IsRoot()) - is.Equal(true, chrt2.IsRoot()) + is.False(chrt1.IsRoot()) + is.True(chrt2.IsRoot()) } func TestChartPath(t *testing.T) { diff --git a/internal/chart/v3/dependency.go b/internal/chart/v3/dependency.go index 2d956b548..50ee5552e 100644 --- a/internal/chart/v3/dependency.go +++ b/internal/chart/v3/dependency.go @@ -44,7 +44,7 @@ type Dependency struct { Enabled bool `json:"enabled,omitempty" yaml:"enabled,omitempty"` // ImportValues holds the mapping of source values to parent key to be imported. Each item can be a // string or pair of child/parent sublist items. - ImportValues []interface{} `json:"import-values,omitempty" yaml:"import-values,omitempty"` + ImportValues []any `json:"import-values,omitempty" yaml:"import-values,omitempty"` // Alias usable alias to be used for the chart Alias string `json:"alias,omitempty" yaml:"alias,omitempty"` } diff --git a/internal/chart/v3/errors.go b/internal/chart/v3/errors.go index 059e43f07..156dce1de 100644 --- a/internal/chart/v3/errors.go +++ b/internal/chart/v3/errors.go @@ -25,6 +25,6 @@ func (v ValidationError) Error() string { } // ValidationErrorf takes a message and formatting options and creates a ValidationError -func ValidationErrorf(msg string, args ...interface{}) ValidationError { +func ValidationErrorf(msg string, args ...any) ValidationError { return ValidationError(fmt.Sprintf(msg, args...)) } diff --git a/internal/chart/v3/lint/lint.go b/internal/chart/v3/lint/lint.go index 0cd949065..e98edfabe 100644 --- a/internal/chart/v3/lint/lint.go +++ b/internal/chart/v3/lint/lint.go @@ -43,7 +43,7 @@ func WithSkipSchemaValidation(skipSchemaValidation bool) LinterOption { } } -func RunAll(baseDir string, values map[string]interface{}, namespace string, options ...LinterOption) support.Linter { +func RunAll(baseDir string, values map[string]any, namespace string, options ...LinterOption) support.Linter { chartDir, _ := filepath.Abs(baseDir) diff --git a/internal/chart/v3/lint/lint_test.go b/internal/chart/v3/lint/lint_test.go index 221de8572..afacb8052 100644 --- a/internal/chart/v3/lint/lint_test.go +++ b/internal/chart/v3/lint/lint_test.go @@ -95,7 +95,7 @@ func TestInvalidYaml(t *testing.T) { t.Fatalf("All didn't fail with expected errors, got %#v", m) } if !strings.Contains(m[0].Err.Error(), "deliberateSyntaxError") { - t.Errorf("All didn't have the error for deliberateSyntaxError") + t.Error("All didn't have the error for deliberateSyntaxError") } } @@ -107,7 +107,7 @@ func TestInvalidChartYamlV3(t *testing.T) { t.Fatalf("All didn't fail with expected errors, got %#v", m) } if !strings.Contains(m[0].Err.Error(), "failed to strictly parse chart metadata file") { - t.Errorf("All didn't have the error for duplicate YAML keys") + t.Error("All didn't have the error for duplicate YAML keys") } } @@ -231,13 +231,13 @@ func TestMalformedTemplate(t *testing.T) { }() select { case <-c: - t.Fatalf("lint malformed template timeout") + t.Fatal("lint malformed template timeout") case <-ch: if len(m) != 1 { t.Fatalf("All didn't fail with expected errors, got %#v", m) } if !strings.Contains(m[0].Err.Error(), "invalid character '{'") { - t.Errorf("All didn't have the error for invalid character '{'") + t.Error("All didn't have the error for invalid character '{'") } } } diff --git a/internal/chart/v3/lint/rules/chartfile.go b/internal/chart/v3/lint/rules/chartfile.go index fc246ba80..29991a8d5 100644 --- a/internal/chart/v3/lint/rules/chartfile.go +++ b/internal/chart/v3/lint/rules/chartfile.go @@ -69,15 +69,15 @@ func Chartfile(linter *support.Linter) { linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartDependencies(chartFile)) } -func validateChartVersionType(data map[string]interface{}) error { +func validateChartVersionType(data map[string]any) error { return isStringValue(data, "version") } -func validateChartAppVersionType(data map[string]interface{}) error { +func validateChartAppVersionType(data map[string]any) error { return isStringValue(data, "appVersion") } -func isStringValue(data map[string]interface{}, key string) error { +func isStringValue(data map[string]any, key string) error { value, ok := data[key] if !ok { return nil @@ -152,7 +152,7 @@ func validateChartVersion(cf *chart.Metadata) error { valid, msg := c.Validate(version) if !valid && len(msg) > 0 { - return fmt.Errorf("version %v", msg[0]) + return fmt.Errorf("version %w", msg[0]) } return nil @@ -214,12 +214,12 @@ func validateChartType(cf *chart.Metadata) error { // loadChartFileForTypeCheck loads the Chart.yaml // in a generic form of a map[string]interface{}, so that the type // of the values can be checked -func loadChartFileForTypeCheck(filename string) (map[string]interface{}, error) { +func loadChartFileForTypeCheck(filename string) (map[string]any, error) { b, err := os.ReadFile(filename) if err != nil { return nil, err } - y := make(map[string]interface{}) + y := make(map[string]any) err = yaml.Unmarshal(b, &y) return y, err } diff --git a/internal/chart/v3/lint/rules/chartfile_test.go b/internal/chart/v3/lint/rules/chartfile_test.go index 57893e151..a7669a0aa 100644 --- a/internal/chart/v3/lint/rules/chartfile_test.go +++ b/internal/chart/v3/lint/rules/chartfile_test.go @@ -50,26 +50,26 @@ func TestValidateChartYamlNotDirectory(t *testing.T) { err := validateChartYamlNotDirectory(nonExistingChartFilePath) if err == nil { - t.Errorf("validateChartYamlNotDirectory to return a linter error, got no error") + t.Error("validateChartYamlNotDirectory to return a linter error, got no error") } } func TestValidateChartYamlFormat(t *testing.T) { err := validateChartYamlFormat(errors.New("Read error")) if err == nil { - t.Errorf("validateChartYamlFormat to return a linter error, got no error") + t.Error("validateChartYamlFormat to return a linter error, got no error") } err = validateChartYamlFormat(nil) if err != nil { - t.Errorf("validateChartYamlFormat to return no error, got a linter error") + t.Error("validateChartYamlFormat to return no error, got a linter error") } } func TestValidateChartName(t *testing.T) { err := validateChartName(badChart) if err == nil { - t.Errorf("validateChartName to return a linter error, got no error") + t.Error("validateChartName to return a linter error, got no error") } err = validateChartName(badChartName) @@ -149,7 +149,7 @@ func TestValidateChartMaintainer(t *testing.T) { badChart.Maintainers = []*chart.Maintainer{nil} err := validateChartMaintainer(badChart) if err == nil { - t.Errorf("validateChartMaintainer did not return error for nil maintainer as expected") + t.Error("validateChartMaintainer did not return error for nil maintainer as expected") } if err.Error() != "a maintainer entry is empty" { t.Errorf("validateChartMaintainer returned unexpected error for nil maintainer: %s", err.Error()) @@ -185,7 +185,7 @@ func TestValidateChartIconPresence(t *testing.T) { err := validateChartIconPresence(testChart) if err == nil { - t.Errorf("validateChartIconPresence to return a linter error, got no error") + t.Error("validateChartIconPresence to return a linter error, got no error") } else if !strings.Contains(err.Error(), "icon is recommended") { t.Errorf("expected %q, got %q", "icon is recommended", err.Error()) } diff --git a/internal/chart/v3/lint/rules/crds.go b/internal/chart/v3/lint/rules/crds.go index deedeb0f2..0a479d214 100644 --- a/internal/chart/v3/lint/rules/crds.go +++ b/internal/chart/v3/lint/rules/crds.go @@ -19,7 +19,6 @@ package rules import ( "bytes" "errors" - "fmt" "io" "io/fs" "os" @@ -102,14 +101,14 @@ func validateCrdsDir(crdsPath string) error { func validateCrdAPIVersion(obj *k8sYamlStruct) error { if !strings.HasPrefix(obj.APIVersion, "apiextensions.k8s.io") { - return fmt.Errorf("apiVersion is not in 'apiextensions.k8s.io'") + return errors.New("apiVersion is not in 'apiextensions.k8s.io'") } return nil } func validateCrdKind(obj *k8sYamlStruct) error { if obj.Kind != "CustomResourceDefinition" { - return fmt.Errorf("object kind is not 'CustomResourceDefinition'") + return errors.New("object kind is not 'CustomResourceDefinition'") } return nil } diff --git a/internal/chart/v3/lint/rules/dependencies_test.go b/internal/chart/v3/lint/rules/dependencies_test.go index b80e4b8a9..31fd0ef33 100644 --- a/internal/chart/v3/lint/rules/dependencies_test.go +++ b/internal/chart/v3/lint/rules/dependencies_test.go @@ -72,7 +72,7 @@ func TestValidateDependencyInMetadata(t *testing.T) { c := chartWithBadDependencies() if err := validateDependencyInMetadata(&c); err == nil { - t.Errorf("chart should have been flagged for missing deps in chart metadata") + t.Error("chart should have been flagged for missing deps in chart metadata") } } @@ -132,7 +132,7 @@ func TestValidateDependenciesUnique(t *testing.T) { for _, tt := range tests { if err := validateDependenciesUnique(&tt.chart); err == nil { - t.Errorf("chart should have been flagged for dependency shadowing") + t.Error("chart should have been flagged for dependency shadowing") } } } diff --git a/internal/chart/v3/lint/rules/deprecations_test.go b/internal/chart/v3/lint/rules/deprecations_test.go index 35e541e5c..2b12a88f9 100644 --- a/internal/chart/v3/lint/rules/deprecations_test.go +++ b/internal/chart/v3/lint/rules/deprecations_test.go @@ -36,6 +36,6 @@ func TestValidateNoDeprecations(t *testing.T) { APIVersion: "v1", Kind: "Pod", }, nil); err != nil { - t.Errorf("Expected a v1 Pod to not be deprecated") + t.Error("Expected a v1 Pod to not be deprecated") } } diff --git a/internal/chart/v3/lint/rules/template.go b/internal/chart/v3/lint/rules/template.go index 38e602b7e..a8ae910eb 100644 --- a/internal/chart/v3/lint/rules/template.go +++ b/internal/chart/v3/lint/rules/template.go @@ -28,8 +28,8 @@ import ( "slices" "strings" + "k8s.io/apimachinery/pkg/api/validate/content" "k8s.io/apimachinery/pkg/api/validation" - apipath "k8s.io/apimachinery/pkg/api/validation/path" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apimachinery/pkg/util/yaml" @@ -42,17 +42,17 @@ import ( ) // Templates lints the templates in the Linter. -func Templates(linter *support.Linter, values map[string]interface{}, namespace string, _ bool) { +func Templates(linter *support.Linter, values map[string]any, namespace string, _ bool) { TemplatesWithKubeVersion(linter, values, namespace, nil) } // TemplatesWithKubeVersion lints the templates in the Linter, allowing to specify the kubernetes version. -func TemplatesWithKubeVersion(linter *support.Linter, values map[string]interface{}, namespace string, kubeVersion *common.KubeVersion) { +func TemplatesWithKubeVersion(linter *support.Linter, values map[string]any, namespace string, kubeVersion *common.KubeVersion) { TemplatesWithSkipSchemaValidation(linter, values, namespace, kubeVersion, false) } // TemplatesWithSkipSchemaValidation lints the templates in the Linter, allowing to specify the kubernetes version and if schema validation is enabled or not. -func TemplatesWithSkipSchemaValidation(linter *support.Linter, values map[string]interface{}, namespace string, kubeVersion *common.KubeVersion, skipSchemaValidation bool) { +func TemplatesWithSkipSchemaValidation(linter *support.Linter, values map[string]any, namespace string, kubeVersion *common.KubeVersion, skipSchemaValidation bool) { fpath := "templates/" templatesPath := filepath.Join(linter.ChartDir, fpath) @@ -292,7 +292,7 @@ func validateMetadataNameFunc(obj *k8sYamlStruct) validation.ValidateNameFunc { case "role", "clusterrole", "rolebinding", "clusterrolebinding": // https://github.com/kubernetes/kubernetes/blob/v1.20.0/pkg/apis/rbac/validation/validation.go#L32-L34 return func(name string, _ bool) []string { - return apipath.IsValidPathSegmentName(name) + return content.IsPathSegmentName(name) } default: return validation.NameIsDNSSubdomain diff --git a/internal/chart/v3/lint/rules/template_test.go b/internal/chart/v3/lint/rules/template_test.go index 0ffc92002..b1371659f 100644 --- a/internal/chart/v3/lint/rules/template_test.go +++ b/internal/chart/v3/lint/rules/template_test.go @@ -49,7 +49,7 @@ func TestValidateAllowedExtension(t *testing.T) { } } -var values = map[string]interface{}{"nameOverride": "", "httpPort": 80} +var values = map[string]any{"nameOverride": "", "httpPort": 80} const namespace = "testNamespace" const strict = false @@ -249,7 +249,7 @@ func TestStrictTemplateParsingMapError(t *testing.T) { APIVersion: "v2", Version: "0.1.0", }, - Values: map[string]interface{}{ + Values: map[string]any{ "mymap": map[string]string{ "key1": "val1", }, diff --git a/internal/chart/v3/lint/rules/values.go b/internal/chart/v3/lint/rules/values.go index 0af9765dd..b4a2edb0c 100644 --- a/internal/chart/v3/lint/rules/values.go +++ b/internal/chart/v3/lint/rules/values.go @@ -17,6 +17,7 @@ limitations under the License. package rules import ( + "errors" "fmt" "os" "path/filepath" @@ -32,7 +33,7 @@ import ( // they are only tested for well-formedness. // // If additional values are supplied, they are coalesced into the values in values.yaml. -func ValuesWithOverrides(linter *support.Linter, valueOverrides map[string]interface{}, skipSchemaValidation bool) { +func ValuesWithOverrides(linter *support.Linter, valueOverrides map[string]any, skipSchemaValidation bool) { file := "values.yaml" vf := filepath.Join(linter.ChartDir, file) fileExists := linter.RunLinterRule(support.InfoSev, file, validateValuesFileExistence(vf)) @@ -47,12 +48,12 @@ func ValuesWithOverrides(linter *support.Linter, valueOverrides map[string]inter func validateValuesFileExistence(valuesPath string) error { _, err := os.Stat(valuesPath) if err != nil { - return fmt.Errorf("file does not exist") + return errors.New("file does not exist") } return nil } -func validateValuesFile(valuesPath string, overrides map[string]interface{}, skipSchemaValidation bool) error { +func validateValuesFile(valuesPath string, overrides map[string]any, skipSchemaValidation bool) error { values, err := common.ReadValuesFile(valuesPath) if err != nil { return fmt.Errorf("unable to parse YAML: %w", err) @@ -63,7 +64,7 @@ func validateValuesFile(valuesPath string, overrides map[string]interface{}, ski // We could change that. For now, though, we retain that strategy, and thus can // coalesce tables (like reuse-values does) instead of doing the full chart // CoalesceValues - coalescedValues := util.CoalesceTables(make(map[string]interface{}, len(overrides)), overrides) + coalescedValues := util.CoalesceTables(make(map[string]any, len(overrides)), overrides) coalescedValues = util.CoalesceTables(coalescedValues, values) ext := filepath.Ext(valuesPath) diff --git a/internal/chart/v3/lint/rules/values_test.go b/internal/chart/v3/lint/rules/values_test.go index 288b77436..54c7e6457 100644 --- a/internal/chart/v3/lint/rules/values_test.go +++ b/internal/chart/v3/lint/rules/values_test.go @@ -57,7 +57,7 @@ func TestValidateValuesYamlNotDirectory(t *testing.T) { err := validateValuesFileExistence(nonExistingValuesFilePath) if err == nil { - t.Errorf("validateValuesFileExistence to return a linter error, got no error") + t.Error("validateValuesFileExistence to return a linter error, got no error") } } @@ -67,7 +67,7 @@ func TestValidateValuesFileWellFormed(t *testing.T) { ` tmpdir := ensure.TempFile(t, "values.yaml", []byte(badYaml)) valfile := filepath.Join(tmpdir, "values.yaml") - if err := validateValuesFile(valfile, map[string]interface{}{}, false); err == nil { + if err := validateValuesFile(valfile, map[string]any{}, false); err == nil { t.Fatal("expected values file to fail parsing") } } @@ -78,7 +78,7 @@ func TestValidateValuesFileSchema(t *testing.T) { createTestingSchema(t, tmpdir) valfile := filepath.Join(tmpdir, "values.yaml") - if err := validateValuesFile(valfile, map[string]interface{}{}, false); err != nil { + if err := validateValuesFile(valfile, map[string]any{}, false); err != nil { t.Fatalf("Failed validation with %s", err) } } @@ -91,7 +91,7 @@ func TestValidateValuesFileSchemaFailure(t *testing.T) { valfile := filepath.Join(tmpdir, "values.yaml") - err := validateValuesFile(valfile, map[string]interface{}{}, false) + err := validateValuesFile(valfile, map[string]any{}, false) if err == nil { t.Fatal("expected values file to fail parsing") } @@ -107,7 +107,7 @@ func TestValidateValuesFileSchemaFailureButWithSkipSchemaValidation(t *testing.T valfile := filepath.Join(tmpdir, "values.yaml") - err := validateValuesFile(valfile, map[string]interface{}{}, true) + err := validateValuesFile(valfile, map[string]any{}, true) if err != nil { t.Fatal("expected values file to pass parsing because of skipSchemaValidation") } @@ -115,7 +115,7 @@ func TestValidateValuesFileSchemaFailureButWithSkipSchemaValidation(t *testing.T func TestValidateValuesFileSchemaOverrides(t *testing.T) { yaml := "username: admin" - overrides := map[string]interface{}{ + overrides := map[string]any{ "password": "swordfish", } tmpdir := ensure.TempFile(t, "values.yaml", []byte(yaml)) @@ -131,24 +131,24 @@ func TestValidateValuesFile(t *testing.T) { tests := []struct { name string yaml string - overrides map[string]interface{} + overrides map[string]any errorMessage string }{ { name: "value added", yaml: "username: admin", - overrides: map[string]interface{}{"password": "swordfish"}, + overrides: map[string]any{"password": "swordfish"}, }, { name: "value not overridden", yaml: "username: admin\npassword:", - overrides: map[string]interface{}{"username": "anotherUser"}, + overrides: map[string]any{"username": "anotherUser"}, errorMessage: "- at '/password': got null, want string", }, { name: "value overridden", yaml: "username: admin\npassword:", - overrides: map[string]interface{}{"username": "anotherUser", "password": "swordfish"}, + overrides: map[string]any{"username": "anotherUser", "password": "swordfish"}, }, } diff --git a/internal/chart/v3/loader/archive.go b/internal/chart/v3/loader/archive.go index a9d4faf8f..442b8871d 100644 --- a/internal/chart/v3/loader/archive.go +++ b/internal/chart/v3/loader/archive.go @@ -57,7 +57,7 @@ func LoadFile(name string) (*chart.Chart, error) { c, err := LoadArchive(raw) if err != nil { if errors.Is(err, gzip.ErrHeader) { - return nil, fmt.Errorf("file '%s' does not appear to be a valid chart file (details: %s)", name, err) + return nil, fmt.Errorf("file '%s' does not appear to be a valid chart file (details: %w)", name, err) } } return c, err diff --git a/internal/chart/v3/loader/load.go b/internal/chart/v3/loader/load.go index 373c4659f..48f346ccf 100644 --- a/internal/chart/v3/loader/load.go +++ b/internal/chart/v3/loader/load.go @@ -182,11 +182,11 @@ func LoadFiles(files []*archive.BufferedFile) (*chart.Chart, error) { // // The reader is expected to contain one or more YAML documents, the values of which are merged. // And the values can be either a chart's default values or user-supplied values. -func LoadValues(data io.Reader) (map[string]interface{}, error) { - values := map[string]interface{}{} +func LoadValues(data io.Reader) (map[string]any, error) { + values := map[string]any{} reader := utilyaml.NewYAMLReader(bufio.NewReader(data)) for { - currentMap := map[string]interface{}{} + currentMap := map[string]any{} raw, err := reader.Read() if err != nil { if errors.Is(err, io.EOF) { @@ -204,13 +204,13 @@ func LoadValues(data io.Reader) (map[string]interface{}, error) { // MergeMaps merges two maps. If a key exists in both maps, the value from b will be used. // If the value is a map, the maps will be merged recursively. -func MergeMaps(a, b map[string]interface{}) map[string]interface{} { - out := make(map[string]interface{}, len(a)) +func MergeMaps(a, b map[string]any) map[string]any { + out := make(map[string]any, len(a)) maps.Copy(out, a) for k, v := range b { - if v, ok := v.(map[string]interface{}); ok { + if v, ok := v.(map[string]any); ok { if bv, ok := out[k]; ok { - if bv, ok := bv.(map[string]interface{}); ok { + if bv, ok := bv.(map[string]any); ok { out[k] = MergeMaps(bv, v) continue } diff --git a/internal/chart/v3/loader/load_test.go b/internal/chart/v3/loader/load_test.go index 12403f9c2..c4c252407 100644 --- a/internal/chart/v3/loader/load_test.go +++ b/internal/chart/v3/loader/load_test.go @@ -61,7 +61,7 @@ func TestLoadDirWithDevNull(t *testing.T) { t.Fatalf("Failed to load testdata: %s", err) } if _, err := l.Load(); err == nil { - t.Errorf("packages with an irregular file (/dev/null) should not load") + t.Error("packages with an irregular file (/dev/null) should not load") } } @@ -455,7 +455,7 @@ func TestLoadInvalidArchive(t *testing.T) { func TestLoadValues(t *testing.T) { testCases := map[string]struct { data []byte - expctedValues map[string]interface{} + expctedValues map[string]any }{ "It should load values correctly": { data: []byte(` @@ -464,11 +464,11 @@ foo: bar: version: v2 `), - expctedValues: map[string]interface{}{ - "foo": map[string]interface{}{ + expctedValues: map[string]any{ + "foo": map[string]any{ "image": "foo:v1", }, - "bar": map[string]interface{}{ + "bar": map[string]any{ "version": "v2", }, }, @@ -483,11 +483,11 @@ bar: foo: image: foo:v2 `), - expctedValues: map[string]interface{}{ - "foo": map[string]interface{}{ + expctedValues: map[string]any{ + "foo": map[string]any{ "image": "foo:v2", }, - "bar": map[string]interface{}{ + "bar": map[string]any{ "version": "v2", }, }, @@ -507,24 +507,24 @@ foo: } func TestMergeValuesV3(t *testing.T) { - nestedMap := map[string]interface{}{ + nestedMap := map[string]any{ "foo": "bar", "baz": map[string]string{ "cool": "stuff", }, } - anotherNestedMap := map[string]interface{}{ + anotherNestedMap := map[string]any{ "foo": "bar", "baz": map[string]string{ "cool": "things", "awesome": "stuff", }, } - flatMap := map[string]interface{}{ + flatMap := map[string]any{ "foo": "bar", "baz": "stuff", } - anotherFlatMap := map[string]interface{}{ + anotherFlatMap := map[string]any{ "testing": "fun", } @@ -547,7 +547,7 @@ func TestMergeValuesV3(t *testing.T) { } testMap = MergeMaps(anotherFlatMap, anotherNestedMap) - expectedMap := map[string]interface{}{ + expectedMap := map[string]any{ "testing": "fun", "foo": "bar", "baz": map[string]string{ diff --git a/internal/chart/v3/metadata.go b/internal/chart/v3/metadata.go index 4629d571b..5f7cea897 100644 --- a/internal/chart/v3/metadata.go +++ b/internal/chart/v3/metadata.go @@ -112,6 +112,9 @@ func (md *Metadata) Validate() error { return ValidationError("chart.metadata.name is required") } + if md.Name == "." || md.Name == ".." { + return ValidationErrorf("chart.metadata.name %q is not allowed", md.Name) + } if md.Name != filepath.Base(md.Name) { return ValidationErrorf("chart.metadata.name %q is invalid", md.Name) } diff --git a/internal/chart/v3/metadata_test.go b/internal/chart/v3/metadata_test.go index 596a03695..5f88552e9 100644 --- a/internal/chart/v3/metadata_test.go +++ b/internal/chart/v3/metadata_test.go @@ -16,6 +16,7 @@ limitations under the License. package v3 import ( + "errors" "testing" ) @@ -40,6 +41,16 @@ func TestValidate(t *testing.T) { &Metadata{APIVersion: "v3", Version: "1.0"}, ValidationError("chart.metadata.name is required"), }, + { + "chart with dot name", + &Metadata{Name: ".", APIVersion: "v3", Version: "1.0"}, + ValidationError("chart.metadata.name \".\" is not allowed"), + }, + { + "chart with dotdot name", + &Metadata{Name: "..", APIVersion: "v3", Version: "1.0"}, + ValidationError("chart.metadata.name \"..\" is not allowed"), + }, { "chart without name", &Metadata{Name: "../../test", APIVersion: "v3", Version: "1.0"}, @@ -181,7 +192,7 @@ func TestValidate(t *testing.T) { for _, tt := range tests { result := tt.md.Validate() - if result != tt.err { + if !errors.Is(result, tt.err) { t.Errorf("expected %q, got %q in test %q", tt.err, result, tt.name) } } diff --git a/internal/chart/v3/util/chartfile_test.go b/internal/chart/v3/util/chartfile_test.go index c3d19c381..65cb27923 100644 --- a/internal/chart/v3/util/chartfile_test.go +++ b/internal/chart/v3/util/chartfile_test.go @@ -35,7 +35,7 @@ func TestLoadChartfile(t *testing.T) { func verifyChartfile(t *testing.T, f *chart.Metadata, name string) { t.Helper() - if f == nil { //nolint:staticcheck + if f == nil { t.Fatal("Failed verifyChartfile because f is nil") } @@ -56,15 +56,15 @@ func verifyChartfile(t *testing.T, f *chart.Metadata, name string) { } if f.Maintainers[0].Name != "The Helm Team" { - t.Errorf("Unexpected maintainer name.") + t.Error("Unexpected maintainer name.") } if f.Maintainers[1].Email != "nobody@example.com" { - t.Errorf("Unexpected maintainer email.") + t.Error("Unexpected maintainer email.") } if len(f.Sources) != 1 { - t.Fatalf("Unexpected number of sources") + t.Fatal("Unexpected number of sources") } if f.Sources[0] != "https://example.com/foo/bar" { @@ -84,7 +84,7 @@ func verifyChartfile(t *testing.T, f *chart.Metadata, name string) { } if len(f.Annotations) != 2 { - t.Fatalf("Unexpected annotations") + t.Fatal("Unexpected annotations") } if want, got := "extravalue", f.Annotations["extrakey"]; want != got { @@ -111,7 +111,7 @@ func TestIsChartDir(t *testing.T) { } validChartDir, err = IsChartDir("testdata") if validChartDir || err == nil { - t.Errorf("expected error but did not get any") + t.Error("expected error but did not get any") return } } diff --git a/internal/chart/v3/util/create.go b/internal/chart/v3/util/create.go index 0dfa30995..48d2120e5 100644 --- a/internal/chart/v3/util/create.go +++ b/internal/chart/v3/util/create.go @@ -670,7 +670,7 @@ func CreateFrom(chartfile *chart.Metadata, dest, src string) error { return fmt.Errorf("reading values file: %w", err) } - var m map[string]interface{} + var m map[string]any if err := yaml.Unmarshal(transform(string(b), schart.Name()), &m); err != nil { return fmt.Errorf("transforming values file: %w", err) } diff --git a/internal/chart/v3/util/create_test.go b/internal/chart/v3/util/create_test.go index b3b58cc5a..abdd52a82 100644 --- a/internal/chart/v3/util/create_test.go +++ b/internal/chart/v3/util/create_test.go @@ -140,7 +140,7 @@ func TestCreate_Overwrite(t *testing.T) { } if errlog.Len() == 0 { - t.Errorf("Expected warnings about overwriting files.") + t.Error("Expected warnings about overwriting files.") } } diff --git a/internal/chart/v3/util/dependencies.go b/internal/chart/v3/util/dependencies.go index 4ef9e6961..9c4d8e80f 100644 --- a/internal/chart/v3/util/dependencies.go +++ b/internal/chart/v3/util/dependencies.go @@ -16,6 +16,7 @@ limitations under the License. package util import ( + "errors" "fmt" "log/slog" "strings" @@ -44,6 +45,7 @@ func processDependencyConditions(reqs []*chart.Dependency, cvals common.Values, if len(c) > 0 { // retrieve value vv, err := cvals.PathValue(cpath + c) + var errNoValue common.ErrNoValue if err == nil { // if not bool, warn if bv, ok := vv.(bool); ok { @@ -51,7 +53,7 @@ func processDependencyConditions(reqs []*chart.Dependency, cvals common.Values, break } slog.Warn("returned non-bool value", "path", c, "chart", r.Name) - } else if _, ok := err.(common.ErrNoValue); !ok { + } else if errors.As(err, &errNoValue) { // this is a real error slog.Warn("the method PathValue returned error", slog.Any("error", err)) } @@ -140,7 +142,7 @@ func copyMetadata(metadata *chart.Metadata) *chart.Metadata { } // processDependencyEnabled removes disabled charts from dependencies -func processDependencyEnabled(c *chart.Chart, v map[string]interface{}, path string) error { +func processDependencyEnabled(c *chart.Chart, v map[string]any, path string) error { if c.Metadata.Dependencies == nil { return nil } @@ -226,7 +228,7 @@ Loop: } // pathToMap creates a nested map given a YAML path in dot notation. -func pathToMap(path string, data map[string]interface{}) map[string]interface{} { +func pathToMap(path string, data map[string]any) map[string]any { if path == "." { return data } @@ -235,13 +237,13 @@ func pathToMap(path string, data map[string]interface{}) map[string]interface{} func parsePath(key string) []string { return strings.Split(key, ".") } -func set(path []string, data map[string]interface{}) map[string]interface{} { +func set(path []string, data map[string]any) map[string]any { if len(path) == 0 { return nil } cur := data for i := len(path) - 1; i >= 0; i-- { - cur = map[string]interface{}{path[i]: cur} + cur = map[string]any{path[i]: cur} } return cur } @@ -262,13 +264,13 @@ func processImportValues(c *chart.Chart, merge bool) error { if err != nil { return err } - b := make(map[string]interface{}) + b := make(map[string]any) // import values from each dependency if specified in import-values for _, r := range c.Metadata.Dependencies { - var outiv []interface{} + var outiv []any for _, riv := range r.ImportValues { switch iv := riv.(type) { - case map[string]interface{}: + case map[string]any: child := fmt.Sprintf("%v", iv["child"]) parent := fmt.Sprintf("%v", iv["parent"]) @@ -336,27 +338,27 @@ func processImportValues(c *chart.Chart, merge bool) error { return nil } -func deepCopyMap(vals map[string]interface{}) map[string]interface{} { +func deepCopyMap(vals map[string]any) map[string]any { valsCopy, err := copystructure.Copy(vals) if err != nil { return vals } - return valsCopy.(map[string]interface{}) + return valsCopy.(map[string]any) } -func trimNilValues(vals map[string]interface{}) map[string]interface{} { +func trimNilValues(vals map[string]any) map[string]any { valsCopy, err := copystructure.Copy(vals) if err != nil { return vals } - valsCopyMap := valsCopy.(map[string]interface{}) + valsCopyMap := valsCopy.(map[string]any) for key, val := range valsCopyMap { if val == nil { // Iterate over the values and remove nil keys delete(valsCopyMap, key) } else if istable(val) { // Recursively call into ourselves to remove keys from inner tables - valsCopyMap[key] = trimNilValues(val.(map[string]interface{})) + valsCopyMap[key] = trimNilValues(val.(map[string]any)) } } @@ -364,8 +366,8 @@ func trimNilValues(vals map[string]interface{}) map[string]interface{} { } // istable is a special-purpose function to see if the present thing matches the definition of a YAML table. -func istable(v interface{}) bool { - _, ok := v.(map[string]interface{}) +func istable(v any) bool { + _, ok := v.(map[string]any) return ok } diff --git a/internal/chart/v3/util/dependencies_test.go b/internal/chart/v3/util/dependencies_test.go index 3c5bb96f7..c8a176725 100644 --- a/internal/chart/v3/util/dependencies_test.go +++ b/internal/chart/v3/util/dependencies_test.go @@ -63,7 +63,7 @@ func TestLoadDependency(t *testing.T) { } func TestDependencyEnabled(t *testing.T) { - type M = map[string]interface{} + type M = map[string]any tests := []struct { name string v M @@ -385,7 +385,7 @@ func TestGetAliasDependency(t *testing.T) { req := c.Metadata.Dependencies if len(req) == 0 { - t.Fatalf("there are no dependencies to test") + t.Fatal("there are no dependencies to test") } // Success case @@ -403,7 +403,7 @@ func TestGetAliasDependency(t *testing.T) { if req[0].Version != "" { if !IsCompatibleRange(req[0].Version, aliasChart.Metadata.Version) { - t.Fatalf("dependency chart version is not in the compatible range") + t.Fatal("dependency chart version is not in the compatible range") } } @@ -415,7 +415,7 @@ func TestGetAliasDependency(t *testing.T) { req[0].Version = "something else which is not in the compatible range" if IsCompatibleRange(req[0].Version, aliasChart.Metadata.Version) { - t.Fatalf("dependency chart version which is not in the compatible range should cause a failure other than a success ") + t.Fatal("dependency chart version outside the compatible range should not be considered compatible") } } diff --git a/internal/chart/v3/util/doc.go b/internal/chart/v3/util/doc.go index 002d5babc..dc5a07462 100644 --- a/internal/chart/v3/util/doc.go +++ b/internal/chart/v3/util/doc.go @@ -42,4 +42,4 @@ into a Chart. When creating charts in memory, use the 'helm.sh/helm/pkg/chart' package directly. */ -package util // import chartutil "helm.sh/helm/v4/internal/chart/v3/util" +package util // import "helm.sh/helm/v4/internal/chart/v3/util" diff --git a/internal/chart/v3/util/expand.go b/internal/chart/v3/util/expand.go index 1a10fce3c..5b057b8e0 100644 --- a/internal/chart/v3/util/expand.go +++ b/internal/chart/v3/util/expand.go @@ -52,6 +52,17 @@ func Expand(dir string, r io.Reader) error { return errors.New("chart name not specified") } + // Reject chart names that are POSIX path dot-segments or dot-dot segments or contain path separators. + // A dot-segment name (e.g. ".") causes SecureJoin to resolve to the root + // directory and extraction then to write files directly into that extraction root + // instead of a per-chart subdirectory. + if chartName == "." || chartName == ".." { + return fmt.Errorf("chart name %q is not allowed", chartName) + } + if chartName != filepath.Base(chartName) { + return fmt.Errorf("chart name %q must not contain path separators", chartName) + } + // Find the base directory // The directory needs to be cleaned prior to passing to SecureJoin or the location may end up // being wrong or returning an error. This was introduced in v0.4.0. @@ -61,6 +72,12 @@ func Expand(dir string, r io.Reader) error { return err } + // Defense-in-depth: the chart directory must be a subdirectory of dir, + // never dir itself. + if chartdir == dir { + return fmt.Errorf("chart name %q resolves to the extraction root", chartName) + } + // Copy all files verbatim. We don't parse these files because parsing can remove // comments. for _, file := range files { diff --git a/internal/chart/v3/util/expand_test.go b/internal/chart/v3/util/expand_test.go index 280995f7e..e9e298b81 100644 --- a/internal/chart/v3/util/expand_test.go +++ b/internal/chart/v3/util/expand_test.go @@ -17,11 +17,73 @@ limitations under the License. package util import ( + "archive/tar" + "bytes" + "compress/gzip" + "io/fs" "os" "path/filepath" "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) +// makeTestChartArchive builds a gzipped tar archive from the given sourceDir directory, file entries are prefixed with the given chartName +func makeTestChartArchive(t *testing.T, chartName, sourceDir string) *bytes.Buffer { + t.Helper() + + var result bytes.Buffer + gw := gzip.NewWriter(&result) + tw := tar.NewWriter(gw) + + dir := os.DirFS(sourceDir) + + writeFile := func(relPath string) { + t.Helper() + f, err := dir.Open(relPath) + require.NoError(t, err) + + fStat, err := f.Stat() + require.NoError(t, err) + + err = tw.WriteHeader(&tar.Header{ + Name: filepath.Join(chartName, relPath), + Mode: int64(fStat.Mode()), + Size: fStat.Size(), + }) + require.NoError(t, err) + + data, err := fs.ReadFile(dir, relPath) + require.NoError(t, err) + tw.Write(data) + } + + err := fs.WalkDir(dir, ".", func(path string, d os.DirEntry, walkErr error) error { + if walkErr != nil { + return walkErr + } + + if d.IsDir() { + return nil + } + + writeFile(path) + + return nil + }) + if err != nil { + t.Fatal(err) + } + + err = tw.Close() + require.NoError(t, err) + err = gw.Close() + require.NoError(t, err) + + return &result +} + func TestExpand(t *testing.T) { dest := t.TempDir() @@ -75,6 +137,28 @@ func TestExpand(t *testing.T) { } } +func TestExpandError(t *testing.T) { + tests := map[string]struct { + chartName string + chartDir string + wantErr string + }{ + "dot name": {"dotname", "testdata/dotname", "not allowed"}, + "dotdot name": {"dotdotname", "testdata/dotdotname", "not allowed"}, + "slash in name": {"slashinname", "testdata/slashinname", "must not contain path separators"}, + } + + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + archive := makeTestChartArchive(t, tt.chartName, tt.chartDir) + + dest := t.TempDir() + err := Expand(dest, archive) + assert.ErrorContains(t, err, tt.wantErr) + }) + } +} + func TestExpandFile(t *testing.T) { dest := t.TempDir() diff --git a/internal/chart/v3/util/save_test.go b/internal/chart/v3/util/save_test.go index 7a42a76af..34e7d898e 100644 --- a/internal/chart/v3/util/save_test.go +++ b/internal/chart/v3/util/save_test.go @@ -21,8 +21,8 @@ import ( "bytes" "compress/gzip" "crypto/sha256" + "encoding/hex" "errors" - "fmt" "io" "os" "path" @@ -87,7 +87,7 @@ func TestSave(t *testing.T) { t.Fatalf("Schema data did not match.\nExpected:\n%s\nActual:\n%s", formattedExpected, formattedActual) } if _, err := Save(&chartWithInvalidJSON, dest); err == nil { - t.Fatalf("Invalid JSON was not caught while saving chart") + t.Fatal("Invalid JSON was not caught while saving chart") } c.Metadata.APIVersion = chart.APIVersionV3 @@ -153,7 +153,7 @@ func TestSavePreservesTimestamps(t *testing.T) { Version: "1.2.3", }, ModTime: initialCreateTime, - Values: map[string]interface{}{ + Values: map[string]any{ "imageName": "testimage", "imageId": 42, }, @@ -353,5 +353,5 @@ func sha256Sum(filePath string) (string, error) { return "", err } - return fmt.Sprintf("%x", h.Sum(nil)), nil + return hex.EncodeToString(h.Sum(nil)), nil } diff --git a/internal/chart/v3/util/testdata/dotdotname/Chart.yaml b/internal/chart/v3/util/testdata/dotdotname/Chart.yaml new file mode 100644 index 000000000..9b081f27b --- /dev/null +++ b/internal/chart/v3/util/testdata/dotdotname/Chart.yaml @@ -0,0 +1,4 @@ +apiVersion: v3 +name: .. +description: A Helm chart for Kubernetes +version: 0.1.0 \ No newline at end of file diff --git a/internal/chart/v3/util/testdata/dotname/Chart.yaml b/internal/chart/v3/util/testdata/dotname/Chart.yaml new file mode 100644 index 000000000..597c16290 --- /dev/null +++ b/internal/chart/v3/util/testdata/dotname/Chart.yaml @@ -0,0 +1,4 @@ +apiVersion: v3 +name: . +description: A Helm chart for Kubernetes +version: 0.1.0 \ No newline at end of file diff --git a/internal/chart/v3/util/testdata/slashinname/Chart.yaml b/internal/chart/v3/util/testdata/slashinname/Chart.yaml new file mode 100644 index 000000000..0c522a4b6 --- /dev/null +++ b/internal/chart/v3/util/testdata/slashinname/Chart.yaml @@ -0,0 +1,4 @@ +apiVersion: v3 +name: a/../b +description: A Helm chart for Kubernetes +version: 0.1.0 \ No newline at end of file diff --git a/internal/copystructure/copystructure.go b/internal/copystructure/copystructure.go index aa5510298..c55897aaa 100644 --- a/internal/copystructure/copystructure.go +++ b/internal/copystructure/copystructure.go @@ -89,7 +89,15 @@ func copyValue(original reflect.Value) (any, error) { } copied := reflect.MakeSlice(original.Type(), original.Len(), original.Cap()) for i := 0; i < original.Len(); i++ { - val, err := copyValue(original.Index(i)) + elem := original.Index(i) + + // Handle nil values in slices (e.g., interface{} elements that are nil) + if elem.Kind() == reflect.Interface && elem.IsNil() { + copied.Index(i).Set(elem) + continue + } + + val, err := copyValue(elem) if err != nil { return nil, err } diff --git a/internal/copystructure/copystructure_test.go b/internal/copystructure/copystructure_test.go index d1708dc75..b21af6460 100644 --- a/internal/copystructure/copystructure_test.go +++ b/internal/copystructure/copystructure_test.go @@ -113,6 +113,21 @@ func TestCopy_Slice(t *testing.T) { input[0]["key1"] = "modified" assert.Equal(t, "value1", resultSlice[0]["key1"]) }) + + t.Run("slice with nil elements", func(t *testing.T) { + input := []any{ + "value1", + nil, + "value2", + } + result, err := Copy(input) + require.NoError(t, err) + + resultSlice, ok := result.([]any) + require.True(t, ok) + assert.Equal(t, input, resultSlice) + assert.Nil(t, resultSlice[1]) + }) } func TestCopy_Map(t *testing.T) { diff --git a/internal/fileutil/fileutil_test.go b/internal/fileutil/fileutil_test.go index 881fbb49d..71fcae177 100644 --- a/internal/fileutil/fileutil_test.go +++ b/internal/fileutil/fileutil_test.go @@ -119,3 +119,29 @@ func TestAtomicWriteFile_LargeContent(t *testing.T) { t.Fatalf("expected large content to match, got different length: %d vs %d", len(largeContent), len(got)) } } + +// TestPlatformAtomicWriteFile_OverwritesExisting verifies that the platform +// helper replaces existing files instead of silently skipping them. +func TestPlatformAtomicWriteFile_OverwritesExisting(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "overwrite_test") + + first := bytes.NewReader([]byte("first")) + if err := PlatformAtomicWriteFile(path, first, 0644); err != nil { + t.Fatalf("first write failed: %v", err) + } + + second := bytes.NewReader([]byte("second")) + if err := PlatformAtomicWriteFile(path, second, 0644); err != nil { + t.Fatalf("second write failed: %v", err) + } + + contents, err := os.ReadFile(path) + if err != nil { + t.Fatalf("failed reading result: %v", err) + } + + if string(contents) != "second" { + t.Fatalf("expected file to be overwritten, got %q", string(contents)) + } +} diff --git a/internal/fileutil/fileutil_unix.go b/internal/fileutil/fileutil_unix.go new file mode 100644 index 000000000..bbacb10bf --- /dev/null +++ b/internal/fileutil/fileutil_unix.go @@ -0,0 +1,32 @@ +//go:build !windows + +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fileutil + +import ( + "io" + "os" +) + +// PlatformAtomicWriteFile atomically writes a file to disk. +// +// On non-Windows platforms we don't need extra coordination, so this simply +// delegates to AtomicWriteFile to preserve the existing overwrite behaviour. +func PlatformAtomicWriteFile(filename string, reader io.Reader, mode os.FileMode) error { + return AtomicWriteFile(filename, reader, mode) +} diff --git a/internal/fileutil/fileutil_windows.go b/internal/fileutil/fileutil_windows.go new file mode 100644 index 000000000..179237860 --- /dev/null +++ b/internal/fileutil/fileutil_windows.go @@ -0,0 +1,54 @@ +//go:build windows + +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fileutil + +import ( + "io" + "os" + + "github.com/gofrs/flock" +) + +// PlatformAtomicWriteFile atomically writes a file to disk with file locking to +// prevent concurrent writes. This is particularly useful on Windows where +// concurrent writes to the same file can cause "Access Denied" errors. +// +// The function acquires a lock on the target file and performs an atomic write, +// preserving the existing behaviour of overwriting any previous content once +// the lock is obtained. +func PlatformAtomicWriteFile(filename string, reader io.Reader, mode os.FileMode) error { + // Use a separate lock file to coordinate access between processes + // We cannot lock the target file directly as it would prevent the atomic rename + lockFileName := filename + ".lock" + fileLock := flock.New(lockFileName) + + // Lock() ensures serialized access - if another process is writing, this will wait + if err := fileLock.Lock(); err != nil { + return err + } + defer func() { + fileLock.Unlock() + // Clean up the lock file + // Ignore errors as the file might not exist or be in use by another process + os.Remove(lockFileName) + }() + + // Perform the atomic write while holding the lock + return AtomicWriteFile(filename, reader, mode) +} diff --git a/internal/gates/doc.go b/internal/gates/doc.go new file mode 100644 index 000000000..09501e005 --- /dev/null +++ b/internal/gates/doc.go @@ -0,0 +1,19 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package gates contains internal feature gates that can be used to enable or disable experimental features. +// This is a separate internal package instead of using the pkg/gates package to avoid circular dependencies. +package gates diff --git a/internal/gates/gates.go b/internal/gates/gates.go new file mode 100644 index 000000000..e071c54ea --- /dev/null +++ b/internal/gates/gates.go @@ -0,0 +1,21 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gates + +import "helm.sh/helm/v4/pkg/gates" + +// ChartV3 is the feature gate for chart API version v3. +const ChartV3 gates.Gate = "HELM_EXPERIMENTAL_CHART_V3" diff --git a/internal/plugin/installer/http_installer.go b/internal/plugin/installer/http_installer.go index bb96314f4..5a2912d2e 100644 --- a/internal/plugin/installer/http_installer.go +++ b/internal/plugin/installer/http_installer.go @@ -17,6 +17,7 @@ package installer // import "helm.sh/helm/v4/internal/plugin/installer" import ( "bytes" + "errors" "fmt" "log/slog" "os" @@ -143,7 +144,7 @@ func (i *HTTPInstaller) Install() error { // Update updates a local repository // Not implemented for now since tarball most likely will be packaged by version func (i *HTTPInstaller) Update() error { - return fmt.Errorf("method Update() not implemented for HttpInstaller") + return errors.New("method Update() not implemented for HttpInstaller") } // Path is overridden because we want to join on the plugin name not the file name @@ -163,7 +164,7 @@ func (i *HTTPInstaller) SupportsVerification() bool { // GetVerificationData returns cached plugin and provenance data for verification func (i *HTTPInstaller) GetVerificationData() (archiveData, provData []byte, filename string, err error) { if !i.SupportsVerification() { - return nil, nil, "", fmt.Errorf("verification not supported for this source") + return nil, nil, "", errors.New("verification not supported for this source") } // Download plugin data once and cache it diff --git a/internal/plugin/installer/http_installer_test.go b/internal/plugin/installer/http_installer_test.go index 7f7e6cef6..006b7a7b3 100644 --- a/internal/plugin/installer/http_installer_test.go +++ b/internal/plugin/installer/http_installer_test.go @@ -53,16 +53,16 @@ var fakePluginB64 = "H4sIAAAAAAAAA+3SQUvDMBgG4Jz7K0LwapdvSxrwJig6mCKC5xHabBaXdDS func TestStripName(t *testing.T) { if stripPluginName("fake-plugin-0.0.1.tar.gz") != "fake-plugin" { - t.Errorf("name does not match expected value") + t.Error("name does not match expected value") } if stripPluginName("fake-plugin-0.0.1.tgz") != "fake-plugin" { - t.Errorf("name does not match expected value") + t.Error("name does not match expected value") } if stripPluginName("fake-plugin.tgz") != "fake-plugin" { - t.Errorf("name does not match expected value") + t.Error("name does not match expected value") } if stripPluginName("fake-plugin.tar.gz") != "fake-plugin" { - t.Errorf("name does not match expected value") + t.Error("name does not match expected value") } } @@ -150,7 +150,7 @@ func TestHTTPInstallerNonExistentVersion(t *testing.T) { // inject fake http client responding with error httpInstaller.getter = &TestHTTPGetter{ - MockError: fmt.Errorf("failed to download plugin for some reason"), + MockError: errors.New("failed to download plugin for some reason"), } // attempt to install the plugin @@ -345,7 +345,7 @@ func TestMediaTypeToExtension(t *testing.T) { t.Errorf("Media type %q failed test", mt) } if shouldPass && ext == "" { - t.Errorf("Expected an extension but got empty string") + t.Error("Expected an extension but got empty string") } if !shouldPass && len(ext) != 0 { t.Error("Expected extension to be empty for unrecognized type") diff --git a/internal/plugin/installer/installer.go b/internal/plugin/installer/installer.go index e3975c2d7..69a797ad9 100644 --- a/internal/plugin/installer/installer.go +++ b/internal/plugin/installer/installer.go @@ -87,7 +87,7 @@ func InstallWithOptions(i Installer, opts Options) (*VerificationResult, error) if opts.Verify { verifier, ok := i.(Verifier) if !ok || !verifier.SupportsVerification() { - return nil, fmt.Errorf("--verify is only supported for plugin tarballs (.tgz files)") + return nil, errors.New("--verify is only supported for plugin tarballs (.tgz files)") } // Get verification data (works for both memory and file-based installers) @@ -98,24 +98,23 @@ func InstallWithOptions(i Installer, opts Options) (*VerificationResult, error) // Check if provenance data exists if len(provData) == 0 { - // No .prov file found - emit warning but continue installation - fmt.Fprintf(os.Stderr, "WARNING: No provenance file found for plugin. Plugin is not signed and cannot be verified.\n") - } else { - // Provenance data exists - verify the plugin - verification, err := plugin.VerifyPlugin(archiveData, provData, filename, opts.Keyring) - if err != nil { - return nil, fmt.Errorf("plugin verification failed: %w", err) - } + return nil, errors.New("plugin verification failed: no provenance file (.prov) found") + } - // Collect verification info - result = &VerificationResult{ - SignedBy: make([]string, 0), - Fingerprint: fmt.Sprintf("%X", verification.SignedBy.PrimaryKey.Fingerprint), - FileHash: verification.FileHash, - } - for name := range verification.SignedBy.Identities { - result.SignedBy = append(result.SignedBy, name) - } + // Provenance data exists - verify the plugin + verification, err := plugin.VerifyPlugin(archiveData, provData, filename, opts.Keyring) + if err != nil { + return nil, fmt.Errorf("plugin verification failed: %w", err) + } + + // Collect verification info + result = &VerificationResult{ + SignedBy: make([]string, 0), + Fingerprint: fmt.Sprintf("%X", verification.SignedBy.PrimaryKey.Fingerprint), + FileHash: verification.FileHash, + } + for name := range verification.SignedBy.Identities { + result.SignedBy = append(result.SignedBy, name) } } @@ -137,7 +136,7 @@ func Update(i Installer) error { // NewForSource determines the correct Installer for the given source. func NewForSource(source, version string) (installer Installer, err error) { - if strings.HasPrefix(source, fmt.Sprintf("%s://", registry.OCIScheme)) { + if strings.HasPrefix(source, registry.OCIScheme+"://") { // Source is an OCI registry reference installer, err = NewOCIInstaller(source) } else if isLocalReference(source) { diff --git a/internal/plugin/installer/installer_test.go b/internal/plugin/installer/installer_test.go index dcd76fe9c..50e8e1ee2 100644 --- a/internal/plugin/installer/installer_test.go +++ b/internal/plugin/installer/installer_test.go @@ -23,18 +23,18 @@ func TestIsRemoteHTTPArchive(t *testing.T) { source := srv.URL + "/plugins/fake-plugin-0.0.1.tar.gz" if isRemoteHTTPArchive("/not/a/URL") { - t.Errorf("Expected non-URL to return false") + t.Error("Expected non-URL to return false") } // URLs with valid archive extensions are considered valid archives // even if the server is unreachable (optimization to avoid unnecessary HTTP requests) if !isRemoteHTTPArchive("https://127.0.0.1:123/fake/plugin-1.2.3.tgz") { - t.Errorf("URL with .tgz extension should be considered a valid archive") + t.Error("URL with .tgz extension should be considered a valid archive") } // Test with invalid extension and unreachable server if isRemoteHTTPArchive("https://127.0.0.1:123/fake/plugin-1.2.3.notanarchive") { - t.Errorf("Bad URL without valid extension should not succeed") + t.Error("Bad URL without valid extension should not succeed") } if !isRemoteHTTPArchive(source) { diff --git a/internal/plugin/installer/local_installer.go b/internal/plugin/installer/local_installer.go index 1c8314282..71407380f 100644 --- a/internal/plugin/installer/local_installer.go +++ b/internal/plugin/installer/local_installer.go @@ -188,7 +188,7 @@ func (i *LocalInstaller) SupportsVerification() bool { // GetVerificationData loads plugin and provenance data from local files for verification func (i *LocalInstaller) GetVerificationData() (archiveData, provData []byte, filename string, err error) { if !i.SupportsVerification() { - return nil, nil, "", fmt.Errorf("verification not supported for directories") + return nil, nil, "", errors.New("verification not supported for directories") } // Read and cache the plugin archive file diff --git a/internal/plugin/installer/local_installer_test.go b/internal/plugin/installer/local_installer_test.go index 3ee8ab6d0..510c2880b 100644 --- a/internal/plugin/installer/local_installer_test.go +++ b/internal/plugin/installer/local_installer_test.go @@ -19,6 +19,7 @@ import ( "archive/tar" "bytes" "compress/gzip" + "errors" "os" "path/filepath" "testing" @@ -64,7 +65,7 @@ func TestLocalInstallerNotAFolder(t *testing.T) { if err == nil { t.Fatal("expected error") } - if err != ErrPluginNotADirectory { + if !errors.Is(err, ErrPluginNotADirectory) { t.Fatalf("expected error to equal: %q", err) } } diff --git a/internal/plugin/installer/oci_installer.go b/internal/plugin/installer/oci_installer.go index 67f99b6f8..50d01522a 100644 --- a/internal/plugin/installer/oci_installer.go +++ b/internal/plugin/installer/oci_installer.go @@ -130,7 +130,7 @@ func (i *OCIInstaller) Install() error { // Check if this is a gzip compressed file if len(i.pluginData) < 2 || i.pluginData[0] != 0x1f || i.pluginData[1] != 0x8b { - return fmt.Errorf("plugin data is not a gzip compressed archive") + return errors.New("plugin data is not a gzip compressed archive") } // Create cache directory diff --git a/internal/plugin/installer/oci_installer_test.go b/internal/plugin/installer/oci_installer_test.go index 1280cf97d..1f25f4e76 100644 --- a/internal/plugin/installer/oci_installer_test.go +++ b/internal/plugin/installer/oci_installer_test.go @@ -82,7 +82,7 @@ command: "$HELM_PLUGIN_DIR/bin/%s" // Add executable execContent := fmt.Sprintf("#!/bin/sh\necho '%s test plugin'", pluginName) execHeader := &tar.Header{ - Name: fmt.Sprintf("bin/%s", pluginName), + Name: "bin/" + pluginName, Mode: 0755, Size: int64(len(execContent)), Typeflag: tar.TypeReg, @@ -239,7 +239,7 @@ func TestNewOCIInstaller(t *testing.T) { if tt.expectError { if err == nil { - t.Errorf("expected error but got none") + t.Error("expected error but got none") } return } @@ -416,7 +416,7 @@ func TestOCIInstaller_Install_WithGetterOptions(t *testing.T) { err = Install(installer) if tc.wantErr { if err == nil { - t.Errorf("Expected installation to fail, but it succeeded") + t.Error("Expected installation to fail, but it succeeded") } } else { if err != nil { @@ -526,13 +526,13 @@ func TestOCIInstaller_Install_ComponentExtraction(t *testing.T) { // Verify plugin.yaml exists pluginYAMLPath := filepath.Join(tempDir, "plugin.yaml") if _, err := os.Stat(pluginYAMLPath); os.IsNotExist(err) { - t.Errorf("plugin.yaml not found after extraction") + t.Error("plugin.yaml not found after extraction") } // Verify bin directory exists binPath := filepath.Join(tempDir, "bin") if _, err := os.Stat(binPath); os.IsNotExist(err) { - t.Errorf("bin directory not found after extraction") + t.Error("bin directory not found after extraction") } // Verify executable exists and has correct permissions @@ -540,12 +540,12 @@ func TestOCIInstaller_Install_ComponentExtraction(t *testing.T) { if info, err := os.Stat(execPath); err != nil { t.Errorf("executable not found: %v", err) } else if info.Mode()&0111 == 0 { - t.Errorf("file is not executable") + t.Error("file is not executable") } // Verify this would be recognized as a plugin if !isPlugin(tempDir) { - t.Errorf("extracted directory is not a valid plugin") + t.Error("extracted directory is not a valid plugin") } } @@ -798,7 +798,7 @@ func TestOCIInstaller_Install_ValidationErrors(t *testing.T) { t.Error("expected valid gzip data") } if !strings.Contains(tt.errorMsg, "is not a gzip compressed archive") { - t.Errorf("expected error message to contain 'is not a gzip compressed archive'") + t.Error("expected error message to contain 'is not a gzip compressed archive'") } } }) diff --git a/internal/plugin/installer/vcs_installer_test.go b/internal/plugin/installer/vcs_installer_test.go index d542a0f75..7fe627b59 100644 --- a/internal/plugin/installer/vcs_installer_test.go +++ b/internal/plugin/installer/vcs_installer_test.go @@ -90,14 +90,14 @@ func TestVCSInstaller(t *testing.T) { // Install again to test plugin exists error if err := Install(i); err == nil { - t.Fatalf("expected error for plugin exists, got none") + t.Fatal("expected error for plugin exists, got none") } else if err.Error() != "plugin already exists" { t.Fatalf("expected error for plugin exists, got (%v)", err) } // Testing FindSource method, expect error because plugin code is not a cloned repository if _, err := FindSource(i.Path()); err == nil { - t.Fatalf("expected error for inability to find plugin source, got none") + t.Fatal("expected error for inability to find plugin source, got none") } else if err.Error() != "cannot get information about plugin source" { t.Fatalf("expected error for inability to find plugin source, got (%v)", err) } @@ -120,7 +120,7 @@ func TestVCSInstallerNonExistentVersion(t *testing.T) { } if err := Install(i); err == nil { - t.Fatalf("expected error for version does not exists, got none") + t.Fatal("expected error for version does not exists, got none") } else if strings.Contains(err.Error(), "Could not resolve host: github.com") { t.Skip("Unable to run test without Internet access") } else if err.Error() != fmt.Sprintf("requested version %q does not exist for plugin %q", version, source) { @@ -181,7 +181,7 @@ func TestVCSInstallerUpdate(t *testing.T) { } // Testing update for error if err := Update(vcsInstaller); err == nil { - t.Fatalf("expected error for plugin modified, got none") + t.Fatal("expected error for plugin modified, got none") } else if err.Error() != "plugin repo was modified" { t.Fatalf("expected error for plugin modified, got (%v)", err) } diff --git a/internal/plugin/installer/verification_test.go b/internal/plugin/installer/verification_test.go index 22f0a8308..e05cda7fd 100644 --- a/internal/plugin/installer/verification_test.go +++ b/internal/plugin/installer/verification_test.go @@ -16,10 +16,8 @@ limitations under the License. package installer import ( - "bytes" "crypto/sha256" "fmt" - "io" "os" "path/filepath" "strings" @@ -44,33 +42,49 @@ func TestInstallWithOptions_VerifyMissingProvenance(t *testing.T) { } defer os.RemoveAll(installer.Path()) - // Capture stderr to check warning message - oldStderr := os.Stderr - r, w, _ := os.Pipe() - os.Stderr = w - - // Install with verification enabled (should warn but succeed) + // Install with verification enabled should fail when .prov is missing result, err := InstallWithOptions(installer, Options{Verify: true, Keyring: "dummy"}) - // Restore stderr and read captured output - w.Close() - os.Stderr = oldStderr - var buf bytes.Buffer - io.Copy(&buf, r) - output := buf.String() - - // Should succeed with nil result (no verification performed) - if err != nil { - t.Fatalf("Expected installation to succeed despite missing .prov file, got error: %v", err) + // Should fail with a missing provenance error + if err == nil { + t.Fatal("Expected installation to fail when .prov file is missing and verification is enabled") + } + if !strings.Contains(err.Error(), "no provenance file") { + t.Errorf("Expected 'no provenance file' in error message, got: %v", err) } if result != nil { t.Errorf("Expected nil verification result when .prov file is missing, got: %+v", result) } - // Should contain warning message - expectedWarning := "WARNING: No provenance file found for plugin" - if !strings.Contains(output, expectedWarning) { - t.Errorf("Expected warning message '%s' in output, got: %s", expectedWarning, output) + // Plugin should NOT be installed + if _, err := os.Stat(installer.Path()); !os.IsNotExist(err) { + t.Error("Plugin should not be installed when verification fails due to missing .prov") + } +} + +func TestInstallWithOptions_NoVerifyMissingProvenance(t *testing.T) { + ensure.HelmHome(t) + + // Create a temporary plugin tarball without .prov file + pluginDir := createTestPluginDir(t) + pluginTgz := createTarballFromPluginDir(t, pluginDir) + defer os.Remove(pluginTgz) + + // Create local installer + installer, err := NewLocalInstaller(pluginTgz) + if err != nil { + t.Fatalf("Failed to create installer: %v", err) + } + defer os.RemoveAll(installer.Path()) + + // Install with verification explicitly disabled should succeed without .prov + result, err := InstallWithOptions(installer, Options{Verify: false}) + + if err != nil { + t.Fatalf("Expected installation to succeed with --verify=false, got error: %v", err) + } + if result != nil { + t.Errorf("Expected nil verification result when verification is disabled, got: %+v", result) } // Plugin should be installed @@ -107,7 +121,7 @@ func TestInstallWithOptions_VerifyWithValidProvenance(t *testing.T) { // Should fail due to invalid signature (empty keyring) but we test that it gets past the hash check if err == nil { - t.Fatalf("Expected installation to fail with empty keyring") + t.Fatal("Expected installation to fail with empty keyring") } if !strings.Contains(err.Error(), "plugin verification failed") { t.Errorf("Expected plugin verification failed error, got: %v", err) @@ -118,7 +132,7 @@ func TestInstallWithOptions_VerifyWithValidProvenance(t *testing.T) { // Plugin should not be installed due to verification failure if _, err := os.Stat(installer.Path()); !os.IsNotExist(err) { - t.Errorf("Plugin should not be installed when verification fails") + t.Error("Plugin should not be installed when verification fails") } } @@ -150,7 +164,7 @@ func TestInstallWithOptions_VerifyWithInvalidProvenance(t *testing.T) { // Should fail with verification error if err == nil { - t.Fatalf("Expected installation with invalid .prov file to fail") + t.Fatal("Expected installation with invalid .prov file to fail") } if result != nil { t.Errorf("Expected nil verification result when verification fails, got: %+v", result) @@ -164,7 +178,7 @@ func TestInstallWithOptions_VerifyWithInvalidProvenance(t *testing.T) { // Plugin should not be installed if _, err := os.Stat(installer.Path()); !os.IsNotExist(err) { - t.Errorf("Plugin should not be installed when verification fails") + t.Error("Plugin should not be installed when verification fails") } } @@ -218,7 +232,7 @@ func TestInstallWithOptions_VerifyDirectoryNotSupported(t *testing.T) { // Should fail with verification not supported error if err == nil { - t.Fatalf("Expected installation to fail with verification not supported error") + t.Fatal("Expected installation to fail with verification not supported error") } if !strings.Contains(err.Error(), "--verify is only supported for plugin tarballs") { t.Errorf("Expected verification not supported error, got: %v", err) @@ -257,7 +271,7 @@ func TestInstallWithOptions_VerifyMismatchedProvenance(t *testing.T) { // Should fail with verification error if err == nil { - t.Fatalf("Expected installation to fail with hash mismatch") + t.Fatal("Expected installation to fail with hash mismatch") } if !strings.Contains(err.Error(), "plugin verification failed") { t.Errorf("Expected plugin verification failed error, got: %v", err) @@ -298,7 +312,7 @@ func TestInstallWithOptions_VerifyProvenanceAccessError(t *testing.T) { // Should fail with access error (either at stat level or during verification) if err == nil { - t.Fatalf("Expected installation to fail with provenance file access error") + t.Fatal("Expected installation to fail with provenance file access error") } // The error could be either "failed to access provenance file" or "plugin verification failed" // depending on when the permission error occurs diff --git a/internal/plugin/loader.go b/internal/plugin/loader.go index 2f051b99e..d99395641 100644 --- a/internal/plugin/loader.go +++ b/internal/plugin/loader.go @@ -19,6 +19,7 @@ import ( "bytes" "fmt" "io" + "log/slog" "os" "path/filepath" @@ -158,18 +159,27 @@ func LoadDir(dirname string) (Plugin, error) { return pm.CreatePlugin(dirname, m) } -// LoadAll loads all plugins found beneath the base directory. +func LogIgnorePluginLoadErrorFilterFunc(pluginYAML string, err error) error { + slog.Warn("failed to load plugin (ignoring)", slog.String("plugin_yaml", pluginYAML), slog.Any("error", err)) + return nil +} + +// errorFilterFunc is a function that can filter errors during plugin loading +type ErrorFilterFunc func(string, error) error + +// LoadAllDir load all plugins found beneath the base directory, using the provided error filter to determine whether to fail on individual plugin load errors. // // This scans only one directory level. -func LoadAll(basedir string) ([]Plugin, error) { - var plugins []Plugin - // We want basedir/*/plugin.yaml +func LoadAllDir(basedir string, errorFilter ErrorFilterFunc) ([]Plugin, error) { + // We want /*/plugin.yaml scanpath := filepath.Join(basedir, "*", PluginFileName) matches, err := filepath.Glob(scanpath) if err != nil { return nil, fmt.Errorf("failed to search for plugins in %q: %w", scanpath, err) } + plugins := make([]Plugin, 0, len(matches)) + // empty dir should load if len(matches) == 0 { return plugins, nil @@ -179,9 +189,12 @@ func LoadAll(basedir string) ([]Plugin, error) { dir := filepath.Dir(yamlFile) p, err := LoadDir(dir) if err != nil { - return plugins, err + if errNew := errorFilter(yamlFile, err); errNew != nil { + return plugins, errNew + } + } else { + plugins = append(plugins, p) } - plugins = append(plugins, p) } return plugins, detectDuplicates(plugins) } @@ -193,8 +206,12 @@ type findFunc func(pluginsDir string) ([]Plugin, error) type filterFunc func(Plugin) bool // FindPlugins returns a list of plugins that match the descriptor +// Errors loading a plugin are ignored with a warning func FindPlugins(pluginsDirs []string, descriptor Descriptor) ([]Plugin, error) { - return findPlugins(pluginsDirs, LoadAll, makeDescriptorFilter(descriptor)) + loadAllIgnoreErrors := func(pluginsDir string) ([]Plugin, error) { + return LoadAllDir(pluginsDir, LogIgnorePluginLoadErrorFilterFunc) + } + return findPlugins(pluginsDirs, loadAllIgnoreErrors, makeDescriptorFilter(descriptor)) } // findPlugins is the internal implementation that uses the find and filter functions @@ -237,7 +254,11 @@ func makeDescriptorFilter(descriptor Descriptor) filterFunc { // FindPlugin returns a single plugin that matches the descriptor func FindPlugin(dirs []string, descriptor Descriptor) (Plugin, error) { - plugins, err := FindPlugins(dirs, descriptor) + loadAllIgnoreErrors := func(pluginsDir string) ([]Plugin, error) { + return LoadAllDir(pluginsDir, LogIgnorePluginLoadErrorFilterFunc) + } + + plugins, err := findPlugins(dirs, loadAllIgnoreErrors, makeDescriptorFilter(descriptor)) if err != nil { return nil, err } diff --git a/internal/plugin/loader_test.go b/internal/plugin/loader_test.go index e84905248..b98f87a07 100644 --- a/internal/plugin/loader_test.go +++ b/internal/plugin/loader_test.go @@ -17,7 +17,6 @@ package plugin import ( "bytes" - "fmt" "testing" "github.com/stretchr/testify/assert" @@ -71,7 +70,7 @@ func TestLoadDir(t *testing.T) { } return Metadata{ APIVersion: apiVersion, - Name: fmt.Sprintf("hello-%s", apiVersion), + Name: "hello-" + apiVersion, Version: "0.1.0", Type: "cli/v1", Runtime: "subprocess", @@ -205,16 +204,16 @@ func TestDetectDuplicates(t *testing.T) { } } -func TestLoadAll(t *testing.T) { - // Verify that empty dir loads: - { - plugs, err := LoadAll("testdata") - require.NoError(t, err) - assert.Len(t, plugs, 0) - } +func TestLoadAllDir_Empty(t *testing.T) { + emptyDir := t.TempDir() + plugs, err := LoadAllDir(emptyDir, func(_ string, err error) error { return err }) + require.NoError(t, err) + assert.Len(t, plugs, 0) +} +func TestLoadAllPluginsDir(t *testing.T) { basedir := "testdata/plugdir/good" - plugs, err := LoadAll(basedir) + plugs, err := LoadAllDir(basedir, func(_ string, err error) error { return err }) require.NoError(t, err) require.NotEmpty(t, plugs, "expected plugins to be loaded from %s", basedir) @@ -233,7 +232,7 @@ func TestLoadAll(t *testing.T) { assert.Contains(t, plugsMap, "postrenderer-v1") } -func TestFindPlugins(t *testing.T) { +func TestLoadAllPluginsDir_Zero(t *testing.T) { cases := []struct { name string plugdirs string @@ -241,28 +240,20 @@ func TestFindPlugins(t *testing.T) { }{ { name: "plugdirs is empty", - plugdirs: "", - expected: 0, + plugdirs: t.TempDir(), }, { name: "plugdirs isn't dir", plugdirs: "./plugin_test.go", - expected: 0, }, { name: "plugdirs doesn't have plugin", plugdirs: ".", - expected: 0, - }, - { - name: "normal", - plugdirs: "./testdata/plugdir/good", - expected: 7, }, } for _, c := range cases { t.Run(t.Name(), func(t *testing.T) { - plugin, err := LoadAll(c.plugdirs) + plugin, err := LoadAllDir(c.plugdirs, func(_ string, err error) error { return err }) require.NoError(t, err) assert.Len(t, plugin, c.expected, "expected %d plugins, got %d", c.expected, len(plugin)) }) @@ -338,6 +329,7 @@ runtime: subprocess "correct name field": { yaml: `apiVersion: v1 name: my-plugin +version: 1.0.0 type: cli/v1 runtime: subprocess `, diff --git a/internal/plugin/metadata.go b/internal/plugin/metadata.go index 4e019f0b3..562861b1c 100644 --- a/internal/plugin/metadata.go +++ b/internal/plugin/metadata.go @@ -19,9 +19,17 @@ import ( "errors" "fmt" + "github.com/Masterminds/semver/v3" + "helm.sh/helm/v4/internal/plugin/schema" ) +// isValidSemver checks if the given string is a valid semantic version +func isValidSemver(v string) bool { + _, err := semver.StrictNewVersion(v) + return err == nil +} + // Metadata of a plugin, converted from the "on-disk" legacy or v1 plugin.yaml // Specifically, Config and RuntimeConfig are converted to their respective types based on the plugin type and runtime type Metadata struct { @@ -57,24 +65,29 @@ func (m Metadata) Validate() error { errs = append(errs, fmt.Errorf("invalid plugin name %q: must contain only a-z, A-Z, 0-9, _ and -", m.Name)) } + // Require version to be valid semver if specified + if m.Version != "" && !isValidSemver(m.Version) { + errs = append(errs, fmt.Errorf("invalid plugin version %q: must be valid semver", m.Version)) + } + if m.APIVersion == "" { - errs = append(errs, fmt.Errorf("empty APIVersion")) + errs = append(errs, errors.New("empty APIVersion")) } if m.Type == "" { - errs = append(errs, fmt.Errorf("empty type field")) + errs = append(errs, errors.New("empty type field")) } if m.Runtime == "" { - errs = append(errs, fmt.Errorf("empty runtime field")) + errs = append(errs, errors.New("empty runtime field")) } if m.Config == nil { - errs = append(errs, fmt.Errorf("missing config field")) + errs = append(errs, errors.New("missing config field")) } if m.RuntimeConfig == nil { - errs = append(errs, fmt.Errorf("missing runtimeConfig field")) + errs = append(errs, errors.New("missing runtimeConfig field")) } // Validate the config itself diff --git a/internal/plugin/metadata_legacy.go b/internal/plugin/metadata_legacy.go index 3cd1a50cd..cdde475bb 100644 --- a/internal/plugin/metadata_legacy.go +++ b/internal/plugin/metadata_legacy.go @@ -16,6 +16,7 @@ limitations under the License. package plugin import ( + "errors" "fmt" "strings" "unicode" @@ -71,14 +72,19 @@ func (m *MetadataLegacy) Validate() error { if !validPluginName.MatchString(m.Name) { return fmt.Errorf("invalid plugin name %q: must contain only a-z, A-Z, 0-9, _ and -", m.Name) } + + if m.Version != "" && !isValidSemver(m.Version) { + return fmt.Errorf("invalid plugin version %q: must be valid semver", m.Version) + } + m.Usage = sanitizeString(m.Usage) if len(m.PlatformCommand) > 0 && len(m.Command) > 0 { - return fmt.Errorf("both platformCommand and command are set") + return errors.New("both platformCommand and command are set") } if len(m.PlatformHooks) > 0 && len(m.Hooks) > 0 { - return fmt.Errorf("both platformHooks and hooks are set") + return errors.New("both platformHooks and hooks are set") } // Validate downloader plugins diff --git a/internal/plugin/metadata_legacy_test.go b/internal/plugin/metadata_legacy_test.go index 9421e98b5..0ecb7e619 100644 --- a/internal/plugin/metadata_legacy_test.go +++ b/internal/plugin/metadata_legacy_test.go @@ -26,6 +26,10 @@ func TestMetadataLegacyValidate(t *testing.T) { "valid metadata": { Name: "myplugin", }, + "valid metadata (empty version)": { + Name: "myplugin", + Version: "", + }, "valid with command": { Name: "myplugin", Command: "echo hello", @@ -59,6 +63,13 @@ func TestMetadataLegacyValidate(t *testing.T) { }, }, }, + "valid with version": { + Name: "myplugin", + Version: "1.0.0", + }, + "valid with empty version": { + Name: "myplugin", + }, } for testName, metadata := range testsValid { @@ -116,6 +127,14 @@ func TestMetadataLegacyValidate(t *testing.T) { }, }, }, + "path traversal version": { + Name: "myplugin", + Version: "../../../../tmp/evil", + }, + "invalid version": { + Name: "myplugin", + Version: "not-a-version", + }, } for testName, metadata := range testsInvalid { diff --git a/internal/plugin/metadata_test.go b/internal/plugin/metadata_test.go index 145ef5101..a2acd7925 100644 --- a/internal/plugin/metadata_test.go +++ b/internal/plugin/metadata_test.go @@ -18,6 +18,8 @@ package plugin import ( "strings" "testing" + + "github.com/stretchr/testify/assert" ) func TestValidatePluginData(t *testing.T) { @@ -72,6 +74,43 @@ func TestValidatePluginData(t *testing.T) { } } +func TestMetadataValidateVersion(t *testing.T) { + testValid := map[string]struct { + version string + }{ + "valid semver": {version: "1.0.0"}, + "valid semver with prerelease": {version: "1.2.3-alpha.1+build.123"}, + "empty version": {version: ""}, + } + + testInvalid := map[string]struct { + version string + }{ + "valid semver with v prefix": {version: "v1.0.0"}, + "path traversal": {version: "../../../../tmp/evil"}, + "path traversal in version": {version: "1.0.0/../../etc"}, + "not a version": {version: "not-a-version"}, + } + + for name, tc := range testValid { + t.Run(name, func(t *testing.T) { + m := mockSubprocessCLIPlugin(t, "testplugin") + m.metadata.Version = tc.version + err := m.Metadata().Validate() + assert.NoError(t, err) + }) + } + + for name, tc := range testInvalid { + t.Run(name, func(t *testing.T) { + m := mockSubprocessCLIPlugin(t, "testplugin") + m.metadata.Version = tc.version + err := m.Metadata().Validate() + assert.ErrorContains(t, err, "invalid plugin version") + }) + } +} + func TestMetadataValidateMultipleErrors(t *testing.T) { // Create metadata with multiple validation issues metadata := Metadata{ diff --git a/internal/plugin/metadata_v1.go b/internal/plugin/metadata_v1.go index 81dbc2e20..81d4a8a70 100644 --- a/internal/plugin/metadata_v1.go +++ b/internal/plugin/metadata_v1.go @@ -16,6 +16,7 @@ limitations under the License. package plugin import ( + "errors" "fmt" ) @@ -48,7 +49,14 @@ type MetadataV1 struct { func (m *MetadataV1) Validate() error { if !validPluginName.MatchString(m.Name) { - return fmt.Errorf("invalid plugin `name`") + return errors.New("invalid plugin `name`") + } + + if m.Version == "" { + return errors.New("plugin `version` is required") + } + if !isValidSemver(m.Version) { + return fmt.Errorf("invalid plugin `version` %q: must be valid semver", m.Version) } if m.APIVersion != "v1" { @@ -56,11 +64,11 @@ func (m *MetadataV1) Validate() error { } if m.Type == "" { - return fmt.Errorf("`type` missing") + return errors.New("`type` missing") } if m.Runtime == "" { - return fmt.Errorf("`runtime` missing") + return errors.New("`runtime` missing") } return nil diff --git a/internal/plugin/metadata_v1_test.go b/internal/plugin/metadata_v1_test.go new file mode 100644 index 000000000..17a02dac0 --- /dev/null +++ b/internal/plugin/metadata_v1_test.go @@ -0,0 +1,85 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugin + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMetadataV1ValidateVersion(t *testing.T) { + base := func() MetadataV1 { + return MetadataV1{ + APIVersion: "v1", + Name: "myplugin", + Type: "cli/v1", + Runtime: "subprocess", + Version: "1.0.0", + } + } + + testsValid := map[string]string{ + "simple version": "1.0.0", + "with prerelease": "1.2.3-alpha.1", + "with build meta": "1.2.3+build.123", + "full prerelease": "1.2.3-alpha.1+build.123", + } + + for name, version := range testsValid { + t.Run("valid/"+name, func(t *testing.T) { + m := base() + m.Version = version + assert.NoError(t, m.Validate()) + }) + } + + testsInvalid := map[string]struct { + version string + errMsg string + }{ + "empty version": { + version: "", + errMsg: "plugin `version` is required", + }, + "v prefix": { + version: "v1.0.0", + errMsg: "invalid plugin `version` \"v1.0.0\": must be valid semver", + }, + "path traversal": { + version: "../../../../tmp/evil", + errMsg: "invalid plugin `version`", + }, + "path traversal etc": { + version: "../../../etc/passwd", + errMsg: "invalid plugin `version`", + }, + "not a version": { + version: "not-a-version", + errMsg: "invalid plugin `version`", + }, + } + + for name, tc := range testsInvalid { + t.Run("invalid/"+name, func(t *testing.T) { + m := base() + m.Version = tc.version + err := m.Validate() + assert.Error(t, err) + assert.Contains(t, err.Error(), tc.errMsg) + }) + } +} diff --git a/internal/plugin/plugin_test.go b/internal/plugin/plugin_test.go index ae0b343f3..2e3f274f2 100644 --- a/internal/plugin/plugin_test.go +++ b/internal/plugin/plugin_test.go @@ -82,7 +82,7 @@ func mockSubprocessCLIPlugin(t *testing.T, pluginName string) *SubprocessPluginR return &SubprocessPluginRuntime{ metadata: Metadata{ Name: pluginName, - Version: "v0.1.2", + Version: "0.1.2", Type: "cli/v1", APIVersion: "v1", Runtime: "subprocess", diff --git a/internal/plugin/runtime.go b/internal/plugin/runtime.go index b2ff0b7ca..7d39a9a43 100644 --- a/internal/plugin/runtime.go +++ b/internal/plugin/runtime.go @@ -53,13 +53,13 @@ func remarshalRuntimeConfig[T RuntimeConfig](runtimeData map[string]any) (Runtim return config, nil } -// parseEnv takes a list of "KEY=value" environment variable strings +// ParseEnv takes a list of "KEY=value" environment variable strings // and transforms the result into a map[KEY]=value // // - empty input strings are ignored // - input strings with no value are stored as empty strings // - duplicate keys overwrite earlier values -func parseEnv(env []string) map[string]string { +func ParseEnv(env []string) map[string]string { result := make(map[string]string, len(env)) for _, envVar := range env { parts := strings.SplitN(envVar, "=", 2) @@ -75,7 +75,9 @@ func parseEnv(env []string) map[string]string { return result } -func formatEnv(env map[string]string) []string { +// FormatEnv takes a map[KEY]=value and transforms it into +// a list of "KEY=value" environment variable strings +func FormatEnv(env map[string]string) []string { result := make([]string, 0, len(env)) for key, value := range env { result = append(result, fmt.Sprintf("%s=%s", key, value)) diff --git a/internal/plugin/runtime_extismv1.go b/internal/plugin/runtime_extismv1.go index b5cc79a6f..cd9a02535 100644 --- a/internal/plugin/runtime_extismv1.go +++ b/internal/plugin/runtime_extismv1.go @@ -259,7 +259,7 @@ func buildPluginConfig(input *Input, r *RuntimeExtismV1) extism.PluginConfig { mc = mc.WithStderr(input.Stderr) } if len(input.Env) > 0 { - env := parseEnv(input.Env) + env := ParseEnv(input.Env) for k, v := range env { mc = mc.WithEnv(k, v) } diff --git a/internal/plugin/runtime_subprocess.go b/internal/plugin/runtime_subprocess.go index 802732b14..cd1a0842c 100644 --- a/internal/plugin/runtime_subprocess.go +++ b/internal/plugin/runtime_subprocess.go @@ -117,8 +117,8 @@ func (r *SubprocessPluginRuntime) InvokeWithEnv(main string, argv []string, env cmd.Env = slices.Clone(os.Environ()) cmd.Env = append( cmd.Env, - fmt.Sprintf("HELM_PLUGIN_NAME=%s", r.metadata.Name), - fmt.Sprintf("HELM_PLUGIN_DIR=%s", r.pluginDir)) + "HELM_PLUGIN_NAME="+r.metadata.Name, + "HELM_PLUGIN_DIR="+r.pluginDir) cmd.Env = append(cmd.Env, env...) cmd.Stdin = stdin @@ -139,7 +139,7 @@ func (r *SubprocessPluginRuntime) InvokeHook(event string) error { return nil } - env := parseEnv(os.Environ()) + env := ParseEnv(os.Environ()) maps.Insert(env, maps.All(r.EnvVars)) env["HELM_PLUGIN_NAME"] = r.metadata.Name env["HELM_PLUGIN_DIR"] = r.pluginDir @@ -150,7 +150,7 @@ func (r *SubprocessPluginRuntime) InvokeHook(event string) error { } cmd := exec.Command(main, argv...) - cmd.Env = formatEnv(env) + cmd.Env = FormatEnv(env) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr @@ -198,9 +198,9 @@ func (r *SubprocessPluginRuntime) runCLI(input *Input) (*Output, error) { cmds := r.RuntimeConfig.PlatformCommand - env := parseEnv(os.Environ()) + env := ParseEnv(os.Environ()) maps.Insert(env, maps.All(r.EnvVars)) - maps.Insert(env, maps.All(parseEnv(input.Env))) + maps.Insert(env, maps.All(ParseEnv(input.Env))) env["HELM_PLUGIN_NAME"] = r.metadata.Name env["HELM_PLUGIN_DIR"] = r.pluginDir @@ -210,7 +210,7 @@ func (r *SubprocessPluginRuntime) runCLI(input *Input) (*Output, error) { } cmd := exec.Command(command, args...) - cmd.Env = formatEnv(env) + cmd.Env = FormatEnv(env) cmd.Stdin = input.Stdin cmd.Stdout = input.Stdout @@ -231,9 +231,9 @@ func (r *SubprocessPluginRuntime) runPostrenderer(input *Input) (*Output, error) return nil, fmt.Errorf("plugin %q input message does not implement InputMessagePostRendererV1", r.metadata.Name) } - env := parseEnv(os.Environ()) + env := ParseEnv(os.Environ()) maps.Insert(env, maps.All(r.EnvVars)) - maps.Insert(env, maps.All(parseEnv(input.Env))) + maps.Insert(env, maps.All(ParseEnv(input.Env))) env["HELM_PLUGIN_NAME"] = r.metadata.Name env["HELM_PLUGIN_DIR"] = r.pluginDir @@ -261,7 +261,7 @@ func (r *SubprocessPluginRuntime) runPostrenderer(input *Input) (*Output, error) postRendered := &bytes.Buffer{} stderr := &bytes.Buffer{} - cmd.Env = formatEnv(env) + cmd.Env = FormatEnv(env) cmd.Stdout = postRendered cmd.Stderr = stderr diff --git a/internal/plugin/runtime_subprocess_getter.go b/internal/plugin/runtime_subprocess_getter.go index 6a41b149f..c7262e0dd 100644 --- a/internal/plugin/runtime_subprocess_getter.go +++ b/internal/plugin/runtime_subprocess_getter.go @@ -24,6 +24,7 @@ import ( "os/exec" "path/filepath" "slices" + "strconv" "helm.sh/helm/v4/internal/plugin/schema" ) @@ -56,14 +57,14 @@ func (r *SubprocessPluginRuntime) runGetter(input *Input) (*Output, error) { return nil, fmt.Errorf("no downloader found for protocol %q", msg.Protocol) } - env := parseEnv(os.Environ()) + env := ParseEnv(os.Environ()) maps.Insert(env, maps.All(r.EnvVars)) - maps.Insert(env, maps.All(parseEnv(input.Env))) + maps.Insert(env, maps.All(ParseEnv(input.Env))) env["HELM_PLUGIN_NAME"] = r.metadata.Name env["HELM_PLUGIN_DIR"] = r.pluginDir env["HELM_PLUGIN_USERNAME"] = msg.Options.Username env["HELM_PLUGIN_PASSWORD"] = msg.Options.Password - env["HELM_PLUGIN_PASS_CREDENTIALS_ALL"] = fmt.Sprintf("%t", msg.Options.PassCredentialsAll) + env["HELM_PLUGIN_PASS_CREDENTIALS_ALL"] = strconv.FormatBool(msg.Options.PassCredentialsAll) command, args, err := PrepareCommands(d.PlatformCommand, false, []string{}, env) if err != nil { @@ -83,7 +84,7 @@ func (r *SubprocessPluginRuntime) runGetter(input *Input) (*Output, error) { cmd := exec.Command( pluginCommand, args...) - cmd.Env = formatEnv(env) + cmd.Env = FormatEnv(env) cmd.Stdout = &buf cmd.Stderr = os.Stderr diff --git a/internal/plugin/runtime_subprocess_test.go b/internal/plugin/runtime_subprocess_test.go index ed251d28b..c651dd3f7 100644 --- a/internal/plugin/runtime_subprocess_test.go +++ b/internal/plugin/runtime_subprocess_test.go @@ -16,6 +16,7 @@ limitations under the License. package plugin import ( + "errors" "fmt" "os" "path/filepath" @@ -41,7 +42,7 @@ func mockSubprocessCLIPluginErrorExit(t *testing.T, pluginName string, exitCode md := Metadata{ Name: pluginName, - Version: "v0.1.2", + Version: "0.1.2", Type: "cli/v1", APIVersion: "v1", Runtime: "subprocess", @@ -76,7 +77,8 @@ func TestSubprocessPluginRuntime(t *testing.T) { }) require.Error(t, err) - ieerr, ok := err.(*InvokeExecError) + ieerr := &InvokeExecError{} + ok := errors.As(err, &ieerr) require.True(t, ok, "expected InvokeExecError, got %T", err) assert.Equal(t, 56, ieerr.ExitCode) diff --git a/internal/plugin/runtime_test.go b/internal/plugin/runtime_test.go index f8fe481c1..5552af08e 100644 --- a/internal/plugin/runtime_test.go +++ b/internal/plugin/runtime_test.go @@ -56,7 +56,7 @@ func TestParseEnv(t *testing.T) { for name, tc := range testCases { t.Run(name, func(t *testing.T) { - result := parseEnv(tc.env) + result := ParseEnv(tc.env) assert.Equal(t, tc.expected, result) }) } @@ -93,7 +93,7 @@ func TestFormatEnv(t *testing.T) { for name, tc := range testCases { t.Run(name, func(t *testing.T) { - result := formatEnv(tc.env) + result := FormatEnv(tc.env) assert.ElementsMatch(t, tc.expected, result) }) } diff --git a/internal/plugin/schema/getter.go b/internal/plugin/schema/getter.go index 2c5e81df1..f53ae29bf 100644 --- a/internal/plugin/schema/getter.go +++ b/internal/plugin/schema/getter.go @@ -14,6 +14,7 @@ package schema import ( + "errors" "fmt" "time" ) @@ -55,7 +56,7 @@ type ConfigGetterV1 struct { func (c *ConfigGetterV1) Validate() error { if len(c.Protocols) == 0 { - return fmt.Errorf("getter has no protocols") + return errors.New("getter has no protocols") } for i, protocol := range c.Protocols { if protocol == "" { diff --git a/internal/plugin/signing_info.go b/internal/plugin/signing_info.go index 61ee9cd15..ff27cc02d 100644 --- a/internal/plugin/signing_info.go +++ b/internal/plugin/signing_info.go @@ -23,7 +23,7 @@ import ( "path/filepath" "strings" - "github.com/ProtonMail/go-crypto/openpgp/clearsign" //nolint + "github.com/ProtonMail/go-crypto/openpgp/clearsign" "helm.sh/helm/v4/pkg/helmpath" ) diff --git a/internal/plugin/subprocess_commands.go b/internal/plugin/subprocess_commands.go index 9a57ed891..211ce5ebf 100644 --- a/internal/plugin/subprocess_commands.go +++ b/internal/plugin/subprocess_commands.go @@ -16,7 +16,7 @@ limitations under the License. package plugin import ( - "fmt" + "errors" "os" "runtime" "strings" @@ -80,7 +80,7 @@ func getPlatformCommand(cmds []PlatformCommand) ([]string, []string) { func PrepareCommands(cmds []PlatformCommand, expandArgs bool, extraArgs []string, env map[string]string) (string, []string, error) { cmdParts, args := getPlatformCommand(cmds) if len(cmdParts) == 0 || cmdParts[0] == "" { - return "", nil, fmt.Errorf("no plugin command is applicable") + return "", nil, errors.New("no plugin command is applicable") } envMappingFunc := func(key string) string { return env[key] diff --git a/internal/plugin/subprocess_commands_test.go b/internal/plugin/subprocess_commands_test.go index 8e9c1663e..df854b4ca 100644 --- a/internal/plugin/subprocess_commands_test.go +++ b/internal/plugin/subprocess_commands_test.go @@ -210,7 +210,7 @@ func TestPrepareCommandsNoMatch(t *testing.T) { env := map[string]string{} if _, _, err := PrepareCommands(cmds, true, []string{}, env); err == nil { - t.Fatalf("Expected error to be returned") + t.Fatal("Expected error to be returned") } } @@ -219,7 +219,7 @@ func TestPrepareCommandsNoCommands(t *testing.T) { env := map[string]string{} if _, _, err := PrepareCommands(cmds, true, []string{}, env); err == nil { - t.Fatalf("Expected error to be returned") + t.Fatal("Expected error to be returned") } } diff --git a/internal/plugin/verify.go b/internal/plugin/verify.go index 760a56e67..df31fc4ce 100644 --- a/internal/plugin/verify.go +++ b/internal/plugin/verify.go @@ -33,7 +33,7 @@ func VerifyPlugin(archiveData, provData []byte, filename, keyring string) (*prov return sig.Verify(archiveData, provData, filename) } -// isTarball checks if a file has a tarball extension +// IsTarball checks if a file has a tarball extension func IsTarball(filename string) bool { return filepath.Ext(filename) == ".gz" || filepath.Ext(filename) == ".tgz" } diff --git a/internal/release/v2/doc.go b/internal/release/v2/doc.go new file mode 100644 index 000000000..5b8cebee3 --- /dev/null +++ b/internal/release/v2/doc.go @@ -0,0 +1,17 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v2 provides release handling for apiVersion v3 charts. +package v2 diff --git a/internal/release/v2/hook.go b/internal/release/v2/hook.go new file mode 100644 index 000000000..5009ffbd0 --- /dev/null +++ b/internal/release/v2/hook.go @@ -0,0 +1,189 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2 + +import ( + "encoding/json" + "time" +) + +// HookEvent specifies the hook event +type HookEvent string + +// Hook event types +const ( + HookPreInstall HookEvent = "pre-install" + HookPostInstall HookEvent = "post-install" + HookPreDelete HookEvent = "pre-delete" + HookPostDelete HookEvent = "post-delete" + HookPreUpgrade HookEvent = "pre-upgrade" + HookPostUpgrade HookEvent = "post-upgrade" + HookPreRollback HookEvent = "pre-rollback" + HookPostRollback HookEvent = "post-rollback" + HookTest HookEvent = "test" +) + +func (x HookEvent) String() string { return string(x) } + +// HookDeletePolicy specifies the hook delete policy +type HookDeletePolicy string + +// Hook delete policy types +const ( + HookSucceeded HookDeletePolicy = "hook-succeeded" + HookFailed HookDeletePolicy = "hook-failed" + HookBeforeHookCreation HookDeletePolicy = "before-hook-creation" +) + +func (x HookDeletePolicy) String() string { return string(x) } + +// HookOutputLogPolicy specifies the hook output log policy +type HookOutputLogPolicy string + +// Hook output log policy types +const ( + HookOutputOnSucceeded HookOutputLogPolicy = "hook-succeeded" + HookOutputOnFailed HookOutputLogPolicy = "hook-failed" +) + +func (x HookOutputLogPolicy) String() string { return string(x) } + +// HookAnnotation is the label name for a hook +const HookAnnotation = "helm.sh/hook" + +// HookWeightAnnotation is the label name for a hook weight +const HookWeightAnnotation = "helm.sh/hook-weight" + +// HookDeleteAnnotation is the label name for the delete policy for a hook +const HookDeleteAnnotation = "helm.sh/hook-delete-policy" + +// HookOutputLogAnnotation is the label name for the output log policy for a hook +const HookOutputLogAnnotation = "helm.sh/hook-output-log-policy" + +// Hook defines a hook object. +type Hook struct { + Name string `json:"name,omitempty"` + // Kind is the Kubernetes kind. + Kind string `json:"kind,omitempty"` + // Path is the chart-relative path to the template. + Path string `json:"path,omitempty"` + // Manifest is the manifest contents. + Manifest string `json:"manifest,omitempty"` + // Events are the events that this hook fires on. + Events []HookEvent `json:"events,omitempty"` + // LastRun indicates the date/time this was last run. + LastRun HookExecution `json:"last_run"` + // Weight indicates the sort order for execution among similar Hook type + Weight int `json:"weight,omitempty"` + // DeletePolicies are the policies that indicate when to delete the hook + DeletePolicies []HookDeletePolicy `json:"delete_policies,omitempty"` + // OutputLogPolicies defines whether we should copy hook logs back to main process + OutputLogPolicies []HookOutputLogPolicy `json:"output_log_policies,omitempty"` +} + +// A HookExecution records the result for the last execution of a hook for a given release. +type HookExecution struct { + // StartedAt indicates the date/time this hook was started + StartedAt time.Time `json:"started_at,omitzero"` + // CompletedAt indicates the date/time this hook was completed. + CompletedAt time.Time `json:"completed_at,omitzero"` + // Phase indicates whether the hook completed successfully + Phase HookPhase `json:"phase"` +} + +// A HookPhase indicates the state of a hook execution +type HookPhase string + +const ( + // HookPhaseUnknown indicates that a hook is in an unknown state + HookPhaseUnknown HookPhase = "Unknown" + // HookPhaseRunning indicates that a hook is currently executing + HookPhaseRunning HookPhase = "Running" + // HookPhaseSucceeded indicates that hook execution succeeded + HookPhaseSucceeded HookPhase = "Succeeded" + // HookPhaseFailed indicates that hook execution failed + HookPhaseFailed HookPhase = "Failed" +) + +// String converts a hook phase to a printable string +func (x HookPhase) String() string { return string(x) } + +// hookExecutionJSON is used for custom JSON marshaling/unmarshaling +type hookExecutionJSON struct { + StartedAt *time.Time `json:"started_at,omitempty"` + CompletedAt *time.Time `json:"completed_at,omitempty"` + Phase HookPhase `json:"phase"` +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +// It handles empty string time fields by treating them as zero values. +func (h *HookExecution) UnmarshalJSON(data []byte) error { + // First try to unmarshal into a map to handle empty string time fields + var raw map[string]any + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + + // Replace empty string time fields with nil + for _, field := range []string{"started_at", "completed_at"} { + if val, ok := raw[field]; ok { + if str, ok := val.(string); ok && str == "" { + raw[field] = nil + } + } + } + + // Re-marshal with cleaned data + cleaned, err := json.Marshal(raw) + if err != nil { + return err + } + + // Unmarshal into temporary struct with pointer time fields + var tmp hookExecutionJSON + if err := json.Unmarshal(cleaned, &tmp); err != nil { + return err + } + + // Copy values to HookExecution struct + if tmp.StartedAt != nil { + h.StartedAt = *tmp.StartedAt + } + if tmp.CompletedAt != nil { + h.CompletedAt = *tmp.CompletedAt + } + h.Phase = tmp.Phase + + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +// It omits zero-value time fields from the JSON output. +func (h HookExecution) MarshalJSON() ([]byte, error) { + tmp := hookExecutionJSON{ + Phase: h.Phase, + } + + if !h.StartedAt.IsZero() { + tmp.StartedAt = &h.StartedAt + } + if !h.CompletedAt.IsZero() { + tmp.CompletedAt = &h.CompletedAt + } + + return json.Marshal(tmp) +} diff --git a/internal/release/v2/hook_test.go b/internal/release/v2/hook_test.go new file mode 100644 index 000000000..5a0867398 --- /dev/null +++ b/internal/release/v2/hook_test.go @@ -0,0 +1,231 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2 + +import ( + "encoding/json" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestHookExecutionMarshalJSON(t *testing.T) { + started := time.Date(2025, 10, 8, 12, 0, 0, 0, time.UTC) + completed := time.Date(2025, 10, 8, 12, 5, 0, 0, time.UTC) + + tests := []struct { + name string + exec HookExecution + expected string + }{ + { + name: "all fields populated", + exec: HookExecution{ + StartedAt: started, + CompletedAt: completed, + Phase: HookPhaseSucceeded, + }, + expected: `{"started_at":"2025-10-08T12:00:00Z","completed_at":"2025-10-08T12:05:00Z","phase":"Succeeded"}`, + }, + { + name: "only phase", + exec: HookExecution{ + Phase: HookPhaseRunning, + }, + expected: `{"phase":"Running"}`, + }, + { + name: "with started time only", + exec: HookExecution{ + StartedAt: started, + Phase: HookPhaseRunning, + }, + expected: `{"started_at":"2025-10-08T12:00:00Z","phase":"Running"}`, + }, + { + name: "failed phase", + exec: HookExecution{ + StartedAt: started, + CompletedAt: completed, + Phase: HookPhaseFailed, + }, + expected: `{"started_at":"2025-10-08T12:00:00Z","completed_at":"2025-10-08T12:05:00Z","phase":"Failed"}`, + }, + { + name: "unknown phase", + exec: HookExecution{ + Phase: HookPhaseUnknown, + }, + expected: `{"phase":"Unknown"}`, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + data, err := json.Marshal(&tt.exec) + require.NoError(t, err) + assert.JSONEq(t, tt.expected, string(data)) + }) + } +} + +func TestHookExecutionUnmarshalJSON(t *testing.T) { + started := time.Date(2025, 10, 8, 12, 0, 0, 0, time.UTC) + completed := time.Date(2025, 10, 8, 12, 5, 0, 0, time.UTC) + + tests := []struct { + name string + input string + expected HookExecution + wantErr bool + }{ + { + name: "all fields populated", + input: `{"started_at":"2025-10-08T12:00:00Z","completed_at":"2025-10-08T12:05:00Z","phase":"Succeeded"}`, + expected: HookExecution{ + StartedAt: started, + CompletedAt: completed, + Phase: HookPhaseSucceeded, + }, + }, + { + name: "only phase", + input: `{"phase":"Running"}`, + expected: HookExecution{ + Phase: HookPhaseRunning, + }, + }, + { + name: "empty string time fields", + input: `{"started_at":"","completed_at":"","phase":"Succeeded"}`, + expected: HookExecution{ + Phase: HookPhaseSucceeded, + }, + }, + { + name: "missing time fields", + input: `{"phase":"Failed"}`, + expected: HookExecution{ + Phase: HookPhaseFailed, + }, + }, + { + name: "null time fields", + input: `{"started_at":null,"completed_at":null,"phase":"Unknown"}`, + expected: HookExecution{ + Phase: HookPhaseUnknown, + }, + }, + { + name: "mixed empty and valid time fields", + input: `{"started_at":"2025-10-08T12:00:00Z","completed_at":"","phase":"Running"}`, + expected: HookExecution{ + StartedAt: started, + Phase: HookPhaseRunning, + }, + }, + { + name: "with started time only", + input: `{"started_at":"2025-10-08T12:00:00Z","phase":"Running"}`, + expected: HookExecution{ + StartedAt: started, + Phase: HookPhaseRunning, + }, + }, + { + name: "failed phase with times", + input: `{"started_at":"2025-10-08T12:00:00Z","completed_at":"2025-10-08T12:05:00Z","phase":"Failed"}`, + expected: HookExecution{ + StartedAt: started, + CompletedAt: completed, + Phase: HookPhaseFailed, + }, + }, + { + name: "invalid time format", + input: `{"started_at":"invalid-time","phase":"Running"}`, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var exec HookExecution + err := json.Unmarshal([]byte(tt.input), &exec) + if tt.wantErr { + assert.Error(t, err) + return + } + require.NoError(t, err) + assert.Equal(t, tt.expected.StartedAt.Unix(), exec.StartedAt.Unix()) + assert.Equal(t, tt.expected.CompletedAt.Unix(), exec.CompletedAt.Unix()) + assert.Equal(t, tt.expected.Phase, exec.Phase) + }) + } +} + +func TestHookExecutionRoundTrip(t *testing.T) { + started := time.Date(2025, 10, 8, 12, 0, 0, 0, time.UTC) + completed := time.Date(2025, 10, 8, 12, 5, 0, 0, time.UTC) + + original := HookExecution{ + StartedAt: started, + CompletedAt: completed, + Phase: HookPhaseSucceeded, + } + + data, err := json.Marshal(&original) + require.NoError(t, err) + + var decoded HookExecution + err = json.Unmarshal(data, &decoded) + require.NoError(t, err) + + assert.Equal(t, original.StartedAt.Unix(), decoded.StartedAt.Unix()) + assert.Equal(t, original.CompletedAt.Unix(), decoded.CompletedAt.Unix()) + assert.Equal(t, original.Phase, decoded.Phase) +} + +func TestHookExecutionEmptyStringRoundTrip(t *testing.T) { + // This test specifically verifies that empty string time fields + // are handled correctly during parsing + input := `{"started_at":"","completed_at":"","phase":"Succeeded"}` + + var exec HookExecution + err := json.Unmarshal([]byte(input), &exec) + require.NoError(t, err) + + // Verify time fields are zero values + assert.True(t, exec.StartedAt.IsZero()) + assert.True(t, exec.CompletedAt.IsZero()) + assert.Equal(t, HookPhaseSucceeded, exec.Phase) + + // Marshal back and verify empty time fields are omitted + data, err := json.Marshal(&exec) + require.NoError(t, err) + + var result map[string]any + err = json.Unmarshal(data, &result) + require.NoError(t, err) + + // Zero time values should be omitted + assert.NotContains(t, result, "started_at") + assert.NotContains(t, result, "completed_at") + assert.Equal(t, "Succeeded", result["phase"]) +} diff --git a/internal/release/v2/info.go b/internal/release/v2/info.go new file mode 100644 index 000000000..6b17c0edd --- /dev/null +++ b/internal/release/v2/info.go @@ -0,0 +1,125 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2 + +import ( + "encoding/json" + "time" + + "helm.sh/helm/v4/pkg/release/common" + + "k8s.io/apimachinery/pkg/runtime" +) + +// Info describes release information. +type Info struct { + // FirstDeployed is when the release was first deployed. + FirstDeployed time.Time `json:"first_deployed,omitzero"` + // LastDeployed is when the release was last deployed. + LastDeployed time.Time `json:"last_deployed,omitzero"` + // Deleted tracks when this object was deleted. + Deleted time.Time `json:"deleted,omitzero"` + // Description is human-friendly "log entry" about this release. + Description string `json:"description,omitempty"` + // Status is the current state of the release + Status common.Status `json:"status,omitempty"` + // Contains the rendered templates/NOTES.txt if available + Notes string `json:"notes,omitempty"` + // Contains the deployed resources information + Resources map[string][]runtime.Object `json:"resources,omitempty"` +} + +// infoJSON is used for custom JSON marshaling/unmarshaling +type infoJSON struct { + FirstDeployed *time.Time `json:"first_deployed,omitempty"` + LastDeployed *time.Time `json:"last_deployed,omitempty"` + Deleted *time.Time `json:"deleted,omitempty"` + Description string `json:"description,omitempty"` + Status common.Status `json:"status,omitempty"` + Notes string `json:"notes,omitempty"` + Resources map[string][]runtime.Object `json:"resources,omitempty"` +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +// It handles empty string time fields by treating them as zero values. +func (i *Info) UnmarshalJSON(data []byte) error { + // First try to unmarshal into a map to handle empty string time fields + var raw map[string]any + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + + // Replace empty string time fields with nil + for _, field := range []string{"first_deployed", "last_deployed", "deleted"} { + if val, ok := raw[field]; ok { + if str, ok := val.(string); ok && str == "" { + raw[field] = nil + } + } + } + + // Re-marshal with cleaned data + cleaned, err := json.Marshal(raw) + if err != nil { + return err + } + + // Unmarshal into temporary struct with pointer time fields + var tmp infoJSON + if err := json.Unmarshal(cleaned, &tmp); err != nil { + return err + } + + // Copy values to Info struct + if tmp.FirstDeployed != nil { + i.FirstDeployed = *tmp.FirstDeployed + } + if tmp.LastDeployed != nil { + i.LastDeployed = *tmp.LastDeployed + } + if tmp.Deleted != nil { + i.Deleted = *tmp.Deleted + } + i.Description = tmp.Description + i.Status = tmp.Status + i.Notes = tmp.Notes + i.Resources = tmp.Resources + + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +// It omits zero-value time fields from the JSON output. +func (i Info) MarshalJSON() ([]byte, error) { + tmp := infoJSON{ + Description: i.Description, + Status: i.Status, + Notes: i.Notes, + Resources: i.Resources, + } + + if !i.FirstDeployed.IsZero() { + tmp.FirstDeployed = &i.FirstDeployed + } + if !i.LastDeployed.IsZero() { + tmp.LastDeployed = &i.LastDeployed + } + if !i.Deleted.IsZero() { + tmp.Deleted = &i.Deleted + } + + return json.Marshal(tmp) +} diff --git a/internal/release/v2/info_test.go b/internal/release/v2/info_test.go new file mode 100644 index 000000000..560861e06 --- /dev/null +++ b/internal/release/v2/info_test.go @@ -0,0 +1,285 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2 + +import ( + "encoding/json" + "testing" + "time" + + "helm.sh/helm/v4/pkg/release/common" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestInfoMarshalJSON(t *testing.T) { + now := time.Date(2025, 10, 8, 12, 0, 0, 0, time.UTC) + later := time.Date(2025, 10, 8, 13, 0, 0, 0, time.UTC) + deleted := time.Date(2025, 10, 8, 14, 0, 0, 0, time.UTC) + + tests := []struct { + name string + info Info + expected string + }{ + { + name: "all fields populated", + info: Info{ + FirstDeployed: now, + LastDeployed: later, + Deleted: deleted, + Description: "Test release", + Status: common.StatusDeployed, + Notes: "Test notes", + }, + expected: `{"first_deployed":"2025-10-08T12:00:00Z","last_deployed":"2025-10-08T13:00:00Z","deleted":"2025-10-08T14:00:00Z","description":"Test release","status":"deployed","notes":"Test notes"}`, + }, + { + name: "only required fields", + info: Info{ + FirstDeployed: now, + LastDeployed: later, + Status: common.StatusDeployed, + }, + expected: `{"first_deployed":"2025-10-08T12:00:00Z","last_deployed":"2025-10-08T13:00:00Z","status":"deployed"}`, + }, + { + name: "zero time values omitted", + info: Info{ + Description: "Test release", + Status: common.StatusDeployed, + }, + expected: `{"description":"Test release","status":"deployed"}`, + }, + { + name: "with pending status", + info: Info{ + FirstDeployed: now, + LastDeployed: later, + Status: common.StatusPendingInstall, + Description: "Installing release", + }, + expected: `{"first_deployed":"2025-10-08T12:00:00Z","last_deployed":"2025-10-08T13:00:00Z","description":"Installing release","status":"pending-install"}`, + }, + { + name: "uninstalled with deleted time", + info: Info{ + FirstDeployed: now, + LastDeployed: later, + Deleted: deleted, + Status: common.StatusUninstalled, + Description: "Uninstalled release", + }, + expected: `{"first_deployed":"2025-10-08T12:00:00Z","last_deployed":"2025-10-08T13:00:00Z","deleted":"2025-10-08T14:00:00Z","description":"Uninstalled release","status":"uninstalled"}`, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + data, err := json.Marshal(&tt.info) + require.NoError(t, err) + assert.JSONEq(t, tt.expected, string(data)) + }) + } +} + +func TestInfoUnmarshalJSON(t *testing.T) { + now := time.Date(2025, 10, 8, 12, 0, 0, 0, time.UTC) + later := time.Date(2025, 10, 8, 13, 0, 0, 0, time.UTC) + deleted := time.Date(2025, 10, 8, 14, 0, 0, 0, time.UTC) + + tests := []struct { + name string + input string + expected Info + wantErr bool + }{ + { + name: "all fields populated", + input: `{"first_deployed":"2025-10-08T12:00:00Z","last_deployed":"2025-10-08T13:00:00Z","deleted":"2025-10-08T14:00:00Z","description":"Test release","status":"deployed","notes":"Test notes"}`, + expected: Info{ + FirstDeployed: now, + LastDeployed: later, + Deleted: deleted, + Description: "Test release", + Status: common.StatusDeployed, + Notes: "Test notes", + }, + }, + { + name: "only required fields", + input: `{"first_deployed":"2025-10-08T12:00:00Z","last_deployed":"2025-10-08T13:00:00Z","status":"deployed"}`, + expected: Info{ + FirstDeployed: now, + LastDeployed: later, + Status: common.StatusDeployed, + }, + }, + { + name: "empty string time fields", + input: `{"first_deployed":"","last_deployed":"","deleted":"","description":"Test release","status":"deployed"}`, + expected: Info{ + Description: "Test release", + Status: common.StatusDeployed, + }, + }, + { + name: "missing time fields", + input: `{"description":"Test release","status":"deployed"}`, + expected: Info{ + Description: "Test release", + Status: common.StatusDeployed, + }, + }, + { + name: "null time fields", + input: `{"first_deployed":null,"last_deployed":null,"deleted":null,"description":"Test release","status":"deployed"}`, + expected: Info{ + Description: "Test release", + Status: common.StatusDeployed, + }, + }, + { + name: "mixed empty and valid time fields", + input: `{"first_deployed":"2025-10-08T12:00:00Z","last_deployed":"","deleted":"","status":"deployed"}`, + expected: Info{ + FirstDeployed: now, + Status: common.StatusDeployed, + }, + }, + { + name: "pending install status", + input: `{"first_deployed":"2025-10-08T12:00:00Z","status":"pending-install","description":"Installing"}`, + expected: Info{ + FirstDeployed: now, + Status: common.StatusPendingInstall, + Description: "Installing", + }, + }, + { + name: "uninstalled with deleted time", + input: `{"first_deployed":"2025-10-08T12:00:00Z","last_deployed":"2025-10-08T13:00:00Z","deleted":"2025-10-08T14:00:00Z","status":"uninstalled"}`, + expected: Info{ + FirstDeployed: now, + LastDeployed: later, + Deleted: deleted, + Status: common.StatusUninstalled, + }, + }, + { + name: "failed status", + input: `{"first_deployed":"2025-10-08T12:00:00Z","last_deployed":"2025-10-08T13:00:00Z","status":"failed","description":"Deployment failed"}`, + expected: Info{ + FirstDeployed: now, + LastDeployed: later, + Status: common.StatusFailed, + Description: "Deployment failed", + }, + }, + { + name: "invalid time format", + input: `{"first_deployed":"invalid-time","status":"deployed"}`, + wantErr: true, + }, + { + name: "empty object", + input: `{}`, + expected: Info{ + Status: "", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var info Info + err := json.Unmarshal([]byte(tt.input), &info) + if tt.wantErr { + assert.Error(t, err) + return + } + require.NoError(t, err) + assert.Equal(t, tt.expected.FirstDeployed.Unix(), info.FirstDeployed.Unix()) + assert.Equal(t, tt.expected.LastDeployed.Unix(), info.LastDeployed.Unix()) + assert.Equal(t, tt.expected.Deleted.Unix(), info.Deleted.Unix()) + assert.Equal(t, tt.expected.Description, info.Description) + assert.Equal(t, tt.expected.Status, info.Status) + assert.Equal(t, tt.expected.Notes, info.Notes) + assert.Equal(t, tt.expected.Resources, info.Resources) + }) + } +} + +func TestInfoRoundTrip(t *testing.T) { + now := time.Date(2025, 10, 8, 12, 0, 0, 0, time.UTC) + later := time.Date(2025, 10, 8, 13, 0, 0, 0, time.UTC) + + original := Info{ + FirstDeployed: now, + LastDeployed: later, + Description: "Test release", + Status: common.StatusDeployed, + Notes: "Release notes", + } + + data, err := json.Marshal(&original) + require.NoError(t, err) + + var decoded Info + err = json.Unmarshal(data, &decoded) + require.NoError(t, err) + + assert.Equal(t, original.FirstDeployed.Unix(), decoded.FirstDeployed.Unix()) + assert.Equal(t, original.LastDeployed.Unix(), decoded.LastDeployed.Unix()) + assert.Equal(t, original.Deleted.Unix(), decoded.Deleted.Unix()) + assert.Equal(t, original.Description, decoded.Description) + assert.Equal(t, original.Status, decoded.Status) + assert.Equal(t, original.Notes, decoded.Notes) +} + +func TestInfoEmptyStringRoundTrip(t *testing.T) { + // This test specifically verifies that empty string time fields + // are handled correctly during parsing + input := `{"first_deployed":"","last_deployed":"","deleted":"","status":"deployed","description":"test"}` + + var info Info + err := json.Unmarshal([]byte(input), &info) + require.NoError(t, err) + + // Verify time fields are zero values + assert.True(t, info.FirstDeployed.IsZero()) + assert.True(t, info.LastDeployed.IsZero()) + assert.True(t, info.Deleted.IsZero()) + assert.Equal(t, common.StatusDeployed, info.Status) + assert.Equal(t, "test", info.Description) + + // Marshal back and verify empty time fields are omitted + data, err := json.Marshal(&info) + require.NoError(t, err) + + var result map[string]any + err = json.Unmarshal(data, &result) + require.NoError(t, err) + + // Zero time values should be omitted due to omitzero tag + assert.NotContains(t, result, "first_deployed") + assert.NotContains(t, result, "last_deployed") + assert.NotContains(t, result, "deleted") + assert.Equal(t, "deployed", result["status"]) + assert.Equal(t, "test", result["description"]) +} diff --git a/internal/release/v2/mock.go b/internal/release/v2/mock.go new file mode 100644 index 000000000..295eb219f --- /dev/null +++ b/internal/release/v2/mock.go @@ -0,0 +1,143 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2 + +import ( + "math/rand" + "strconv" + "time" + + v3 "helm.sh/helm/v4/internal/chart/v3" + "helm.sh/helm/v4/pkg/chart/common" + rcommon "helm.sh/helm/v4/pkg/release/common" +) + +// MockHookTemplate is the hook template used for all mock release objects. +var MockHookTemplate = `apiVersion: v1 +kind: Job +metadata: + annotations: + "helm.sh/hook": pre-install +` + +// MockManifest is the manifest used for all mock release objects. +var MockManifest = `apiVersion: v1 +kind: Secret +metadata: + name: fixture +` + +// MockReleaseOptions allows for user-configurable options on mock release objects. +type MockReleaseOptions struct { + Name string + Version int + Chart *v3.Chart + Status rcommon.Status + Namespace string + Labels map[string]string +} + +// Mock creates a mock release object based on options set by MockReleaseOptions. This function should typically not be used outside of testing. +func Mock(opts *MockReleaseOptions) *Release { + date := time.Unix(242085845, 0).UTC() + + name := opts.Name + if name == "" { + name = "testrelease-" + strconv.Itoa(rand.Intn(100)) + } + + version := 1 + if opts.Version != 0 { + version = opts.Version + } + + namespace := opts.Namespace + if namespace == "" { + namespace = "default" + } + var labels map[string]string + if len(opts.Labels) > 0 { + labels = opts.Labels + } + + ch := opts.Chart + if opts.Chart == nil { + ch = &v3.Chart{ + Metadata: &v3.Metadata{ + Name: "foo", + Version: "0.1.0-beta.1", + AppVersion: "1.0", + APIVersion: v3.APIVersionV3, + Annotations: map[string]string{ + "category": "web-apps", + "supported": "true", + }, + Dependencies: []*v3.Dependency{ + { + Name: "cool-plugin", + Version: "1.0.0", + Repository: "https://coolplugin.io/charts", + Condition: "coolPlugin.enabled", + Enabled: true, + }, + { + Name: "crds", + Version: "2.7.1", + Condition: "crds.enabled", + }, + }, + }, + Templates: []*common.File{ + {Name: "templates/foo.tpl", ModTime: time.Now(), Data: []byte(MockManifest)}, + }, + } + } + + scode := rcommon.StatusDeployed + if len(opts.Status) > 0 { + scode = opts.Status + } + + info := &Info{ + FirstDeployed: date, + LastDeployed: date, + Status: scode, + Description: "Release mock", + Notes: "Some mock release notes!", + } + + return &Release{ + Name: name, + Info: info, + Chart: ch, + Config: map[string]any{"name": "value"}, + Version: version, + Namespace: namespace, + Hooks: []*Hook{ + { + Name: "pre-install-hook", + Kind: "Job", + Path: "pre-install-hook.yaml", + Manifest: MockHookTemplate, + LastRun: HookExecution{}, + Events: []HookEvent{HookPreInstall}, + }, + }, + Manifest: MockManifest, + Labels: labels, + } +} diff --git a/internal/release/v2/release.go b/internal/release/v2/release.go new file mode 100644 index 000000000..8b8f2ee07 --- /dev/null +++ b/internal/release/v2/release.go @@ -0,0 +1,60 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2 + +import ( + chart "helm.sh/helm/v4/internal/chart/v3" + "helm.sh/helm/v4/pkg/release/common" +) + +type ApplyMethod string + +const ApplyMethodClientSideApply ApplyMethod = "csa" +const ApplyMethodServerSideApply ApplyMethod = "ssa" + +// Release describes a deployment of a chart, together with the chart +// and the variables used to deploy that chart. +type Release struct { + // Name is the name of the release + Name string `json:"name,omitempty"` + // Info provides information about a release + Info *Info `json:"info,omitempty"` + // Chart is the chart that was released. + Chart *chart.Chart `json:"chart,omitempty"` + // Config is the set of extra Values added to the chart. + // These values override the default values inside of the chart. + Config map[string]any `json:"config,omitempty"` + // Manifest is the string representation of the rendered template. + Manifest string `json:"manifest,omitempty"` + // Hooks are all of the hooks declared for this release. + Hooks []*Hook `json:"hooks,omitempty"` + // Version is an int which represents the revision of the release. + Version int `json:"version,omitempty"` + // Namespace is the kubernetes namespace of the release. + Namespace string `json:"namespace,omitempty"` + // Labels of the release. + // Disabled encoding into Json cause labels are stored in storage driver metadata field. + Labels map[string]string `json:"-"` + // ApplyMethod stores whether server-side or client-side apply was used for the release + // Unset (empty string) should be treated as the default of client-side apply + ApplyMethod string `json:"apply_method,omitempty"` // "ssa" | "csa" +} + +// SetStatus is a helper for setting the status on a release. +func (r *Release) SetStatus(status common.Status, msg string) { + r.Info.Status = status + r.Info.Description = msg +} diff --git a/internal/release/v2/util/filter.go b/internal/release/v2/util/filter.go new file mode 100644 index 000000000..bc568cbdc --- /dev/null +++ b/internal/release/v2/util/filter.go @@ -0,0 +1,81 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util // import "helm.sh/helm/v4/internal/release/v2/util" + +import ( + v2 "helm.sh/helm/v4/internal/release/v2" + "helm.sh/helm/v4/pkg/release/common" +) + +// FilterFunc returns true if the release object satisfies +// the predicate of the underlying filter func. +type FilterFunc func(*v2.Release) bool + +// Check applies the FilterFunc to the release object. +func (fn FilterFunc) Check(rls *v2.Release) bool { + if rls == nil { + return false + } + return fn(rls) +} + +// Filter applies the filter(s) to the list of provided releases +// returning the list that satisfies the filtering predicate. +func (fn FilterFunc) Filter(rels []*v2.Release) (rets []*v2.Release) { + for _, rel := range rels { + if fn.Check(rel) { + rets = append(rets, rel) + } + } + return +} + +// Any returns a FilterFunc that filters a list of releases +// determined by the predicate 'f0 || f1 || ... || fn'. +func Any(filters ...FilterFunc) FilterFunc { + return func(rls *v2.Release) bool { + for _, filter := range filters { + if filter(rls) { + return true + } + } + return false + } +} + +// All returns a FilterFunc that filters a list of releases +// determined by the predicate 'f0 && f1 && ... && fn'. +func All(filters ...FilterFunc) FilterFunc { + return func(rls *v2.Release) bool { + for _, filter := range filters { + if !filter(rls) { + return false + } + } + return true + } +} + +// StatusFilter filters a set of releases by status code. +func StatusFilter(status common.Status) FilterFunc { + return FilterFunc(func(rls *v2.Release) bool { + if rls == nil { + return true + } + return rls.Info.Status == status + }) +} diff --git a/internal/release/v2/util/filter_test.go b/internal/release/v2/util/filter_test.go new file mode 100644 index 000000000..35236498a --- /dev/null +++ b/internal/release/v2/util/filter_test.go @@ -0,0 +1,60 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util // import "helm.sh/helm/v4/internal/release/v2/util" + +import ( + "testing" + + rspb "helm.sh/helm/v4/internal/release/v2" + "helm.sh/helm/v4/pkg/release/common" +) + +func TestFilterAny(t *testing.T) { + ls := Any(StatusFilter(common.StatusUninstalled)).Filter(releases) + if len(ls) != 2 { + t.Fatalf("expected 2 results, got '%d'", len(ls)) + } + + r0, r1 := ls[0], ls[1] + switch { + case r0.Info.Status != common.StatusUninstalled: + t.Fatalf("expected UNINSTALLED result, got '%s'", r1.Info.Status.String()) + case r1.Info.Status != common.StatusUninstalled: + t.Fatalf("expected UNINSTALLED result, got '%s'", r1.Info.Status.String()) + } +} + +func TestFilterAll(t *testing.T) { + fn := FilterFunc(func(rls *rspb.Release) bool { + // true if not uninstalled and version < 4 + v0 := !StatusFilter(common.StatusUninstalled).Check(rls) + v1 := rls.Version < 4 + return v0 && v1 + }) + + ls := All(fn).Filter(releases) + if len(ls) != 1 { + t.Fatalf("expected 1 result, got '%d'", len(ls)) + } + + switch r0 := ls[0]; { + case r0.Version == 4: + t.Fatal("got release with status revision 4") + case r0.Info.Status == common.StatusUninstalled: + t.Fatal("got release with status UNINSTALLED") + } +} diff --git a/internal/release/v2/util/kind_sorter.go b/internal/release/v2/util/kind_sorter.go new file mode 100644 index 000000000..dba35b6d2 --- /dev/null +++ b/internal/release/v2/util/kind_sorter.go @@ -0,0 +1,165 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util // import "helm.sh/helm/v4/internal/release/v2/util" + +import ( + "sort" + + release "helm.sh/helm/v4/internal/release/v2" +) + +// KindSortOrder is an ordering of Kinds. +type KindSortOrder []string + +// InstallOrder is the order in which manifests should be installed (by Kind). +// +// Those occurring earlier in the list get installed before those occurring later in the list. +var InstallOrder KindSortOrder = []string{ + "PriorityClass", + "Namespace", + "NetworkPolicy", + "ResourceQuota", + "LimitRange", + "PodSecurityPolicy", + "PodDisruptionBudget", + "ServiceAccount", + "Secret", + "SecretList", + "ConfigMap", + "StorageClass", + "PersistentVolume", + "PersistentVolumeClaim", + "CustomResourceDefinition", + "ClusterRole", + "ClusterRoleList", + "ClusterRoleBinding", + "ClusterRoleBindingList", + "Role", + "RoleList", + "RoleBinding", + "RoleBindingList", + "Service", + "DaemonSet", + "Pod", + "ReplicationController", + "ReplicaSet", + "Deployment", + "HorizontalPodAutoscaler", + "StatefulSet", + "Job", + "CronJob", + "IngressClass", + "Ingress", + "APIService", + "MutatingWebhookConfiguration", + "ValidatingWebhookConfiguration", +} + +// UninstallOrder is the order in which manifests should be uninstalled (by Kind). +// +// Those occurring earlier in the list get uninstalled before those occurring later in the list. +var UninstallOrder KindSortOrder = []string{ + // For uninstall, we remove validation before mutation to ensure webhooks don't block removal + "ValidatingWebhookConfiguration", + "MutatingWebhookConfiguration", + "APIService", + "Ingress", + "IngressClass", + "Service", + "CronJob", + "Job", + "StatefulSet", + "HorizontalPodAutoscaler", + "Deployment", + "ReplicaSet", + "ReplicationController", + "Pod", + "DaemonSet", + "RoleBindingList", + "RoleBinding", + "RoleList", + "Role", + "ClusterRoleBindingList", + "ClusterRoleBinding", + "ClusterRoleList", + "ClusterRole", + "CustomResourceDefinition", + "PersistentVolumeClaim", + "PersistentVolume", + "StorageClass", + "ConfigMap", + "SecretList", + "Secret", + "ServiceAccount", + "PodDisruptionBudget", + "PodSecurityPolicy", + "LimitRange", + "ResourceQuota", + "NetworkPolicy", + "Namespace", + "PriorityClass", +} + +// sort manifests by kind. +// +// Results are sorted by 'ordering', keeping order of items with equal kind/priority +func sortManifestsByKind(manifests []Manifest, ordering KindSortOrder) []Manifest { + sort.SliceStable(manifests, func(i, j int) bool { + return lessByKind(manifests[i], manifests[j], manifests[i].Head.Kind, manifests[j].Head.Kind, ordering) + }) + + return manifests +} + +// sort hooks by kind, using an out-of-place sort to preserve the input parameters. +// +// Results are sorted by 'ordering', keeping order of items with equal kind/priority +func sortHooksByKind(hooks []*release.Hook, ordering KindSortOrder) []*release.Hook { + h := hooks + sort.SliceStable(h, func(i, j int) bool { + return lessByKind(h[i], h[j], h[i].Kind, h[j].Kind, ordering) + }) + + return h +} + +func lessByKind(_ any, _ any, kindA string, kindB string, o KindSortOrder) bool { + ordering := make(map[string]int, len(o)) + for v, k := range o { + ordering[k] = v + } + + first, aok := ordering[kindA] + second, bok := ordering[kindB] + + if !aok && !bok { + // if both are unknown then sort alphabetically by kind, keep original order if same kind + if kindA != kindB { + return kindA < kindB + } + return first < second + } + // unknown kind is last + if !aok { + return false + } + if !bok { + return true + } + // sort different kinds, keep original order if same priority + return first < second +} diff --git a/internal/release/v2/util/kind_sorter_test.go b/internal/release/v2/util/kind_sorter_test.go new file mode 100644 index 000000000..06418a5fc --- /dev/null +++ b/internal/release/v2/util/kind_sorter_test.go @@ -0,0 +1,347 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util // import "helm.sh/helm/v4/internal/release/v2/util" + +import ( + "bytes" + "testing" + + release "helm.sh/helm/v4/internal/release/v2" +) + +func TestKindSorter(t *testing.T) { + manifests := []Manifest{ + { + Name: "U", + Head: &SimpleHead{Kind: "IngressClass"}, + }, + { + Name: "E", + Head: &SimpleHead{Kind: "SecretList"}, + }, + { + Name: "i", + Head: &SimpleHead{Kind: "ClusterRole"}, + }, + { + Name: "I", + Head: &SimpleHead{Kind: "ClusterRoleList"}, + }, + { + Name: "j", + Head: &SimpleHead{Kind: "ClusterRoleBinding"}, + }, + { + Name: "J", + Head: &SimpleHead{Kind: "ClusterRoleBindingList"}, + }, + { + Name: "f", + Head: &SimpleHead{Kind: "ConfigMap"}, + }, + { + Name: "u", + Head: &SimpleHead{Kind: "CronJob"}, + }, + { + Name: "2", + Head: &SimpleHead{Kind: "CustomResourceDefinition"}, + }, + { + Name: "n", + Head: &SimpleHead{Kind: "DaemonSet"}, + }, + { + Name: "r", + Head: &SimpleHead{Kind: "Deployment"}, + }, + { + Name: "!", + Head: &SimpleHead{Kind: "HonkyTonkSet"}, + }, + { + Name: "v", + Head: &SimpleHead{Kind: "Ingress"}, + }, + { + Name: "t", + Head: &SimpleHead{Kind: "Job"}, + }, + { + Name: "c", + Head: &SimpleHead{Kind: "LimitRange"}, + }, + { + Name: "a", + Head: &SimpleHead{Kind: "Namespace"}, + }, + { + Name: "A", + Head: &SimpleHead{Kind: "NetworkPolicy"}, + }, + { + Name: "g", + Head: &SimpleHead{Kind: "PersistentVolume"}, + }, + { + Name: "h", + Head: &SimpleHead{Kind: "PersistentVolumeClaim"}, + }, + { + Name: "o", + Head: &SimpleHead{Kind: "Pod"}, + }, + { + Name: "3", + Head: &SimpleHead{Kind: "PodDisruptionBudget"}, + }, + { + Name: "C", + Head: &SimpleHead{Kind: "PodSecurityPolicy"}, + }, + { + Name: "q", + Head: &SimpleHead{Kind: "ReplicaSet"}, + }, + { + Name: "p", + Head: &SimpleHead{Kind: "ReplicationController"}, + }, + { + Name: "b", + Head: &SimpleHead{Kind: "ResourceQuota"}, + }, + { + Name: "k", + Head: &SimpleHead{Kind: "Role"}, + }, + { + Name: "K", + Head: &SimpleHead{Kind: "RoleList"}, + }, + { + Name: "l", + Head: &SimpleHead{Kind: "RoleBinding"}, + }, + { + Name: "L", + Head: &SimpleHead{Kind: "RoleBindingList"}, + }, + { + Name: "e", + Head: &SimpleHead{Kind: "Secret"}, + }, + { + Name: "m", + Head: &SimpleHead{Kind: "Service"}, + }, + { + Name: "d", + Head: &SimpleHead{Kind: "ServiceAccount"}, + }, + { + Name: "s", + Head: &SimpleHead{Kind: "StatefulSet"}, + }, + { + Name: "1", + Head: &SimpleHead{Kind: "StorageClass"}, + }, + { + Name: "w", + Head: &SimpleHead{Kind: "APIService"}, + }, + { + Name: "x", + Head: &SimpleHead{Kind: "HorizontalPodAutoscaler"}, + }, + { + Name: "F", + Head: &SimpleHead{Kind: "PriorityClass"}, + }, + { + Name: "M", + Head: &SimpleHead{Kind: "MutatingWebhookConfiguration"}, + }, + { + Name: "V", + Head: &SimpleHead{Kind: "ValidatingWebhookConfiguration"}, + }, + } + + for _, test := range []struct { + description string + order KindSortOrder + expected string + }{ + {"install", InstallOrder, "FaAbcC3deEf1gh2iIjJkKlLmnopqrxstuUvwMV!"}, + {"uninstall", UninstallOrder, "VMwvUmutsxrqponLlKkJjIi2hg1fEed3CcbAaF!"}, + } { + var buf bytes.Buffer + t.Run(test.description, func(t *testing.T) { + if got, want := len(test.expected), len(manifests); got != want { + t.Fatalf("Expected %d names in order, got %d", want, got) + } + defer buf.Reset() + orig := manifests + for _, r := range sortManifestsByKind(manifests, test.order) { + buf.WriteString(r.Name) + } + if got := buf.String(); got != test.expected { + t.Errorf("Expected %q, got %q", test.expected, got) + } + for i, manifest := range orig { + if manifest != manifests[i] { + t.Fatal("Expected input to sortManifestsByKind to stay the same") + } + } + }) + } +} + +// TestKindSorterKeepOriginalOrder verifies manifests of same kind are kept in original order +func TestKindSorterKeepOriginalOrder(t *testing.T) { + manifests := []Manifest{ + { + Name: "a", + Head: &SimpleHead{Kind: "ClusterRole"}, + }, + { + Name: "A", + Head: &SimpleHead{Kind: "ClusterRole"}, + }, + { + Name: "0", + Head: &SimpleHead{Kind: "ConfigMap"}, + }, + { + Name: "1", + Head: &SimpleHead{Kind: "ConfigMap"}, + }, + { + Name: "z", + Head: &SimpleHead{Kind: "ClusterRoleBinding"}, + }, + { + Name: "!", + Head: &SimpleHead{Kind: "ClusterRoleBinding"}, + }, + { + Name: "u2", + Head: &SimpleHead{Kind: "Unknown"}, + }, + { + Name: "u1", + Head: &SimpleHead{Kind: "Unknown"}, + }, + { + Name: "t3", + Head: &SimpleHead{Kind: "Unknown2"}, + }, + } + for _, test := range []struct { + description string + order KindSortOrder + expected string + }{ + // expectation is sorted by kind (unknown is last) and within each group of same kind, the order is kept + {"cm,clusterRole,clusterRoleBinding,Unknown,Unknown2", InstallOrder, "01aAz!u2u1t3"}, + } { + var buf bytes.Buffer + t.Run(test.description, func(t *testing.T) { + defer buf.Reset() + for _, r := range sortManifestsByKind(manifests, test.order) { + buf.WriteString(r.Name) + } + if got := buf.String(); got != test.expected { + t.Errorf("Expected %q, got %q", test.expected, got) + } + }) + } +} + +func TestKindSorterNamespaceAgainstUnknown(t *testing.T) { + unknown := Manifest{ + Name: "a", + Head: &SimpleHead{Kind: "Unknown"}, + } + namespace := Manifest{ + Name: "b", + Head: &SimpleHead{Kind: "Namespace"}, + } + + manifests := []Manifest{unknown, namespace} + manifests = sortManifestsByKind(manifests, InstallOrder) + + expectedOrder := []Manifest{namespace, unknown} + for i, manifest := range manifests { + if expectedOrder[i].Name != manifest.Name { + t.Errorf("Expected %s, got %s", expectedOrder[i].Name, manifest.Name) + } + } +} + +// test hook sorting with a small subset of kinds, since it uses the same algorithm as sortManifestsByKind +func TestKindSorterForHooks(t *testing.T) { + hooks := []*release.Hook{ + { + Name: "i", + Kind: "ClusterRole", + }, + { + Name: "j", + Kind: "ClusterRoleBinding", + }, + { + Name: "c", + Kind: "LimitRange", + }, + { + Name: "a", + Kind: "Namespace", + }, + } + + for _, test := range []struct { + description string + order KindSortOrder + expected string + }{ + {"install", InstallOrder, "acij"}, + {"uninstall", UninstallOrder, "jica"}, + } { + var buf bytes.Buffer + t.Run(test.description, func(t *testing.T) { + if got, want := len(test.expected), len(hooks); got != want { + t.Fatalf("Expected %d names in order, got %d", want, got) + } + defer buf.Reset() + orig := hooks + for _, r := range sortHooksByKind(hooks, test.order) { + buf.WriteString(r.Name) + } + for i, hook := range orig { + if hook != hooks[i] { + t.Fatal("Expected input to sortHooksByKind to stay the same") + } + } + if got := buf.String(); got != test.expected { + t.Errorf("Expected %q, got %q", test.expected, got) + } + }) + } +} diff --git a/internal/release/v2/util/manifest.go b/internal/release/v2/util/manifest.go new file mode 100644 index 000000000..5dbcdaea5 --- /dev/null +++ b/internal/release/v2/util/manifest.go @@ -0,0 +1,82 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util // import "helm.sh/helm/v4/internal/release/v2/util" + +import ( + "fmt" + "regexp" + "strconv" + "strings" + "unicode" +) + +// SimpleHead defines what the structure of the head of a manifest file +type SimpleHead struct { + Version string `json:"apiVersion"` + Kind string `json:"kind,omitempty"` + Metadata *struct { + Name string `json:"name"` + Annotations map[string]string `json:"annotations"` + } `json:"metadata,omitempty"` +} + +var sep = regexp.MustCompile("(?:^|\\s*\n)---\\s*") + +// SplitManifests takes a manifest string and returns a map containing individual manifests. +// +// **Note for Chart API v3**: This function (due to the regex above) has allowed _WRONG_ +// Go templates to be defined inside charts across the years. The generated text from Go +// templates may contain `---apiVersion: v1`, and this function magically splits this back +// to `---\napiVersion: v1`. This has caused issues recently after Helm 4 introduced +// kio.ParseAll to inject annotations when post-renderers are used. In Chart API v3, +// we should kill this regex with fire (or change it) and expose charts doing the wrong +// thing Go template-wise. Helm should say a big _NO_ to charts doing the wrong thing, +// with or without post-renderers. +func SplitManifests(bigFile string) map[string]string { + // Basically, we're quickly splitting a stream of YAML documents into an + // array of YAML docs. The file name is just a place holder, but should be + // integer-sortable so that manifests get output in the same order as the + // input (see `BySplitManifestsOrder`). + tpl := "manifest-%d" + res := map[string]string{} + // Making sure that any extra whitespace in YAML stream doesn't interfere in splitting documents correctly. + bigFileTmp := strings.TrimLeftFunc(bigFile, unicode.IsSpace) + docs := sep.Split(bigFileTmp, -1) + var count int + for _, d := range docs { + if strings.TrimSpace(d) == "" { + continue + } + + d = strings.TrimLeftFunc(d, unicode.IsSpace) + res[fmt.Sprintf(tpl, count)] = d + count = count + 1 + } + return res +} + +// BySplitManifestsOrder sorts by in-file manifest order, as provided in function `SplitManifests` +type BySplitManifestsOrder []string + +func (a BySplitManifestsOrder) Len() int { return len(a) } +func (a BySplitManifestsOrder) Less(i, j int) bool { + // Split `manifest-%d` + anum, _ := strconv.ParseInt(a[i][len("manifest-"):], 10, 0) + bnum, _ := strconv.ParseInt(a[j][len("manifest-"):], 10, 0) + return anum < bnum +} +func (a BySplitManifestsOrder) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff --git a/internal/release/v2/util/manifest_sorter.go b/internal/release/v2/util/manifest_sorter.go new file mode 100644 index 000000000..f269dda6d --- /dev/null +++ b/internal/release/v2/util/manifest_sorter.go @@ -0,0 +1,244 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util // import "helm.sh/helm/v4/internal/release/v2/util" + +import ( + "fmt" + "log/slog" + "path" + "sort" + "strconv" + "strings" + + "sigs.k8s.io/yaml" + + v2 "helm.sh/helm/v4/internal/release/v2" + "helm.sh/helm/v4/pkg/chart/common" +) + +// Manifest represents a manifest file, which has a name and some content. +type Manifest struct { + Name string + Content string + Head *SimpleHead +} + +// manifestFile represents a file that contains a manifest. +type manifestFile struct { + entries map[string]string + path string +} + +// result is an intermediate structure used during sorting. +type result struct { + hooks []*v2.Hook + generic []Manifest +} + +// TODO: Refactor this out. It's here because naming conventions were not followed through. +// So fix the Test hook names and then remove this. +var events = map[string]v2.HookEvent{ + v2.HookPreInstall.String(): v2.HookPreInstall, + v2.HookPostInstall.String(): v2.HookPostInstall, + v2.HookPreDelete.String(): v2.HookPreDelete, + v2.HookPostDelete.String(): v2.HookPostDelete, + v2.HookPreUpgrade.String(): v2.HookPreUpgrade, + v2.HookPostUpgrade.String(): v2.HookPostUpgrade, + v2.HookPreRollback.String(): v2.HookPreRollback, + v2.HookPostRollback.String(): v2.HookPostRollback, + v2.HookTest.String(): v2.HookTest, + // Support test-success for backward compatibility with Helm 2 tests + "test-success": v2.HookTest, +} + +// SortManifests takes a map of filename/YAML contents, splits the file +// by manifest entries, and sorts the entries into hook types. +// +// The resulting hooks struct will be populated with all of the generated hooks. +// Any file that does not declare one of the hook types will be placed in the +// 'generic' bucket. +// +// Files that do not parse into the expected format are simply placed into a map and +// returned. +func SortManifests(files map[string]string, _ common.VersionSet, ordering KindSortOrder) ([]*v2.Hook, []Manifest, error) { + result := &result{} + + var sortedFilePaths []string + for filePath := range files { + sortedFilePaths = append(sortedFilePaths, filePath) + } + sort.Strings(sortedFilePaths) + + for _, filePath := range sortedFilePaths { + content := files[filePath] + + // Skip partials. We could return these as a separate map, but there doesn't + // seem to be any need for that at this time. + if strings.HasPrefix(path.Base(filePath), "_") { + continue + } + // Skip empty files and log this. + if strings.TrimSpace(content) == "" { + continue + } + + manifestFile := &manifestFile{ + entries: SplitManifests(content), + path: filePath, + } + + if err := manifestFile.sort(result); err != nil { + return result.hooks, result.generic, err + } + } + + return sortHooksByKind(result.hooks, ordering), sortManifestsByKind(result.generic, ordering), nil +} + +// sort takes a manifestFile object which may contain multiple resource definition +// entries and sorts each entry by hook types, and saves the resulting hooks and +// generic manifests (or non-hooks) to the result struct. +// +// To determine hook type, it looks for a YAML structure like this: +// +// kind: SomeKind +// apiVersion: v1 +// metadata: +// annotations: +// helm.sh/hook: pre-install +// +// To determine the policy to delete the hook, it looks for a YAML structure like this: +// +// kind: SomeKind +// apiVersion: v1 +// metadata: +// annotations: +// helm.sh/hook-delete-policy: hook-succeeded +// +// To determine the policy to output logs of the hook (for Pod and Job only), it looks for a YAML structure like this: +// +// kind: Pod +// apiVersion: v1 +// metadata: +// annotations: +// helm.sh/hook-output-log-policy: hook-succeeded,hook-failed +func (file *manifestFile) sort(result *result) error { + // Go through manifests in order found in file (function `SplitManifests` creates integer-sortable keys) + var sortedEntryKeys []string + for entryKey := range file.entries { + sortedEntryKeys = append(sortedEntryKeys, entryKey) + } + sort.Sort(BySplitManifestsOrder(sortedEntryKeys)) + + for _, entryKey := range sortedEntryKeys { + m := file.entries[entryKey] + + var entry SimpleHead + if err := yaml.Unmarshal([]byte(m), &entry); err != nil { + return fmt.Errorf("YAML parse error on %s: %w", file.path, err) + } + + if !hasAnyAnnotation(entry) { + result.generic = append(result.generic, Manifest{ + Name: file.path, + Content: m, + Head: &entry, + }) + continue + } + + hookTypes, ok := entry.Metadata.Annotations[v2.HookAnnotation] + if !ok { + result.generic = append(result.generic, Manifest{ + Name: file.path, + Content: m, + Head: &entry, + }) + continue + } + + hw := calculateHookWeight(entry) + + h := &v2.Hook{ + Name: entry.Metadata.Name, + Kind: entry.Kind, + Path: file.path, + Manifest: m, + Events: []v2.HookEvent{}, + Weight: hw, + DeletePolicies: []v2.HookDeletePolicy{}, + OutputLogPolicies: []v2.HookOutputLogPolicy{}, + } + + isUnknownHook := false + for hookType := range strings.SplitSeq(hookTypes, ",") { + hookType = strings.ToLower(strings.TrimSpace(hookType)) + e, ok := events[hookType] + if !ok { + isUnknownHook = true + break + } + h.Events = append(h.Events, e) + } + + if isUnknownHook { + slog.Info("skipping unknown hooks", "hookTypes", hookTypes) + continue + } + + result.hooks = append(result.hooks, h) + + operateAnnotationValues(entry, v2.HookDeleteAnnotation, func(value string) { + h.DeletePolicies = append(h.DeletePolicies, v2.HookDeletePolicy(value)) + }) + + operateAnnotationValues(entry, v2.HookOutputLogAnnotation, func(value string) { + h.OutputLogPolicies = append(h.OutputLogPolicies, v2.HookOutputLogPolicy(value)) + }) + } + + return nil +} + +// hasAnyAnnotation returns true if the given entry has any annotations at all. +func hasAnyAnnotation(entry SimpleHead) bool { + return entry.Metadata != nil && + entry.Metadata.Annotations != nil && + len(entry.Metadata.Annotations) != 0 +} + +// calculateHookWeight finds the weight in the hook weight annotation. +// +// If no weight is found, the assigned weight is 0 +func calculateHookWeight(entry SimpleHead) int { + hws := entry.Metadata.Annotations[v2.HookWeightAnnotation] + hw, err := strconv.Atoi(hws) + if err != nil { + hw = 0 + } + return hw +} + +// operateAnnotationValues finds the given annotation and runs the operate function with the value of that annotation +func operateAnnotationValues(entry SimpleHead, annotation string, operate func(p string)) { + if dps, ok := entry.Metadata.Annotations[annotation]; ok { + for dp := range strings.SplitSeq(dps, ",") { + dp = strings.ToLower(strings.TrimSpace(dp)) + operate(dp) + } + } +} diff --git a/internal/release/v2/util/manifest_sorter_test.go b/internal/release/v2/util/manifest_sorter_test.go new file mode 100644 index 000000000..28f0b34cc --- /dev/null +++ b/internal/release/v2/util/manifest_sorter_test.go @@ -0,0 +1,227 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util // import "helm.sh/helm/v4/internal/release/v2/util" + +import ( + "reflect" + "testing" + + "sigs.k8s.io/yaml" + + release "helm.sh/helm/v4/internal/release/v2" +) + +func TestSortManifests(t *testing.T) { + + data := []struct { + name []string + path string + kind []string + hooks map[string][]release.HookEvent + manifest string + }{ + { + name: []string{"first"}, + path: "one", + kind: []string{"Job"}, + hooks: map[string][]release.HookEvent{"first": {release.HookPreInstall}}, + manifest: `apiVersion: v1 +kind: Job +metadata: + name: first + labels: + doesnot: matter + annotations: + "helm.sh/hook": pre-install +`, + }, + { + name: []string{"second"}, + path: "two", + kind: []string{"ReplicaSet"}, + hooks: map[string][]release.HookEvent{"second": {release.HookPostInstall}}, + manifest: `kind: ReplicaSet +apiVersion: v1beta1 +metadata: + name: second + annotations: + "helm.sh/hook": post-install +`, + }, { + name: []string{"third"}, + path: "three", + kind: []string{"ReplicaSet"}, + hooks: map[string][]release.HookEvent{"third": nil}, + manifest: `kind: ReplicaSet +apiVersion: v1beta1 +metadata: + name: third + annotations: + "helm.sh/hook": no-such-hook +`, + }, { + name: []string{"fourth"}, + path: "four", + kind: []string{"Pod"}, + hooks: map[string][]release.HookEvent{"fourth": nil}, + manifest: `kind: Pod +apiVersion: v1 +metadata: + name: fourth + annotations: + nothing: here`, + }, { + name: []string{"fifth"}, + path: "five", + kind: []string{"ReplicaSet"}, + hooks: map[string][]release.HookEvent{"fifth": {release.HookPostDelete, release.HookPostInstall}}, + manifest: `kind: ReplicaSet +apiVersion: v1beta1 +metadata: + name: fifth + annotations: + "helm.sh/hook": post-delete, post-install +`, + }, { + // Regression test: files with an underscore in the base name should be skipped. + name: []string{"sixth"}, + path: "six/_six", + kind: []string{"ReplicaSet"}, + hooks: map[string][]release.HookEvent{"sixth": nil}, + manifest: `invalid manifest`, // This will fail if partial is not skipped. + }, { + // Regression test: files with no content should be skipped. + name: []string{"seventh"}, + path: "seven", + kind: []string{"ReplicaSet"}, + hooks: map[string][]release.HookEvent{"seventh": nil}, + manifest: "", + }, + { + name: []string{"eighth", "example-test"}, + path: "eight", + kind: []string{"ConfigMap", "Pod"}, + hooks: map[string][]release.HookEvent{"eighth": nil, "example-test": {release.HookTest}}, + manifest: `kind: ConfigMap +apiVersion: v1 +metadata: + name: eighth +data: + name: value +--- +apiVersion: v1 +kind: Pod +metadata: + name: example-test + annotations: + "helm.sh/hook": test +`, + }, + } + + manifests := make(map[string]string, len(data)) + for _, o := range data { + manifests[o.path] = o.manifest + } + + hs, generic, err := SortManifests(manifests, nil, InstallOrder) + if err != nil { + t.Fatalf("Unexpected error: %s", err) + } + + // This test will fail if 'six' or 'seven' was added. + if len(generic) != 2 { + t.Errorf("Expected 2 generic manifests, got %d", len(generic)) + } + + if len(hs) != 4 { + t.Errorf("Expected 4 hooks, got %d", len(hs)) + } + + for _, out := range hs { + found := false + for _, expect := range data { + if out.Path == expect.path { + found = true + if out.Path != expect.path { + t.Errorf("Expected path %s, got %s", expect.path, out.Path) + } + nameFound := false + for _, expectedName := range expect.name { + if out.Name == expectedName { + nameFound = true + } + } + if !nameFound { + t.Errorf("Got unexpected name %s", out.Name) + } + kindFound := false + for _, expectedKind := range expect.kind { + if out.Kind == expectedKind { + kindFound = true + } + } + if !kindFound { + t.Errorf("Got unexpected kind %s", out.Kind) + } + + expectedHooks := expect.hooks[out.Name] + if !reflect.DeepEqual(expectedHooks, out.Events) { + t.Errorf("expected events: %v but got: %v", expectedHooks, out.Events) + } + + } + } + if !found { + t.Errorf("Result not found: %v", out) + } + } + + // Verify the sort order + sorted := []Manifest{} + for _, s := range data { + manifests := SplitManifests(s.manifest) + + for _, m := range manifests { + var sh SimpleHead + if err := yaml.Unmarshal([]byte(m), &sh); err != nil { + // This is expected for manifests that are corrupt or empty. + t.Log(err) + continue + } + + name := sh.Metadata.Name + + // only keep track of non-hook manifests + if s.hooks[name] == nil { + another := Manifest{ + Content: m, + Name: name, + Head: &sh, + } + sorted = append(sorted, another) + } + } + } + + sorted = sortManifestsByKind(sorted, InstallOrder) + for i, m := range generic { + if m.Content != sorted[i].Content { + t.Errorf("Expected %q, got %q", m.Content, sorted[i].Content) + } + } +} diff --git a/internal/release/v2/util/manifest_test.go b/internal/release/v2/util/manifest_test.go new file mode 100644 index 000000000..72b095390 --- /dev/null +++ b/internal/release/v2/util/manifest_test.go @@ -0,0 +1,517 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util // import "helm.sh/helm/v4/internal/release/v2/util" + +import ( + "reflect" + "testing" +) + +func TestSplitManifests(t *testing.T) { + tests := []struct { + name string + input string + expected map[string]string + }{ + { + name: "single doc with leading separator and whitespace", + input: ` + +--- +apiVersion: v1 +kind: Pod +metadata: + name: finding-nemo, + annotations: + "helm.sh/hook": test +spec: + containers: + - name: nemo-test + image: fake-image + cmd: fake-command +`, + expected: map[string]string{ + "manifest-0": `apiVersion: v1 +kind: Pod +metadata: + name: finding-nemo, + annotations: + "helm.sh/hook": test +spec: + containers: + - name: nemo-test + image: fake-image + cmd: fake-command +`, + }, + }, + { + name: "empty input", + input: "", + expected: map[string]string{}, + }, + { + name: "whitespace only", + input: " \n\n \n", + expected: map[string]string{}, + }, + { + name: "whitespace-only doc after separator is skipped", + input: "---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: cm1\n---\n \n", + expected: map[string]string{ + "manifest-0": "apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: cm1", + }, + }, + { + name: "single doc no separator", + input: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +`, + expected: map[string]string{ + "manifest-0": `apiVersion: v1 +kind: ConfigMap +metadata: + name: test +`, + }, + }, + { + name: "two docs with proper separator", + input: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm1 +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm2 +`, + expected: map[string]string{ + "manifest-0": `apiVersion: v1 +kind: ConfigMap +metadata: + name: cm1`, + "manifest-1": `apiVersion: v1 +kind: ConfigMap +metadata: + name: cm2 +`, + }, + }, + + // Block scalar chomping indicator tests using | (clip), |- (strip), and |+ (keep) + // inputs with 0, 1, and 2 trailing newlines after the block content. + // Note: the emitter may normalize the output chomping indicator when the + // trailing newline count makes another indicator equivalent for the result. + + // | (clip) input — clips trailing newlines to exactly one, though with + // 0 trailing newlines the emitted output may normalize to |-. + { + name: "block scalar clip (|) with 0 trailing newlines", + input: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: | + hello`, + expected: map[string]string{ + "manifest-0": `apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: | + hello`, + }, + }, + { + name: "block scalar clip (|) with 1 trailing newline", + input: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: | + hello +`, + expected: map[string]string{ + "manifest-0": `apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: | + hello +`, + }, + }, + { + name: "block scalar clip (|) with 2 trailing newlines", + input: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: | + hello + +`, + expected: map[string]string{ + "manifest-0": `apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: | + hello + +`, + }, + }, + + // |- (strip) + { + name: "block scalar strip (|-) with 0 trailing newlines", + input: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: |- + hello`, + expected: map[string]string{ + "manifest-0": `apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: |- + hello`, + }, + }, + { + name: "block scalar strip (|-) with 1 trailing newline", + input: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: |- + hello +`, + expected: map[string]string{ + "manifest-0": `apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: |- + hello +`, + }, + }, + { + name: "block scalar strip (|-) with 2 trailing newlines", + input: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: |- + hello + +`, + expected: map[string]string{ + "manifest-0": `apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: |- + hello + +`, + }, + }, + + // |+ (keep) + { + name: "block scalar keep (|+) with 0 trailing newlines", + input: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: |+ + hello`, + expected: map[string]string{ + "manifest-0": `apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: |+ + hello`, + }, + }, + { + name: "block scalar keep (|+) with 1 trailing newline", + input: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: |+ + hello +`, + expected: map[string]string{ + "manifest-0": `apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: |+ + hello +`, + }, + }, + { + name: "block scalar keep (|+) with 2 trailing newlines", + input: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: |+ + hello + +`, + expected: map[string]string{ + "manifest-0": `apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: |+ + hello + +`, + }, + }, + + // Multi-doc with block scalars: the regex consumes \s*\n before ---, + // so trailing newlines from non-last docs are stripped. + { + name: "multi-doc block scalar clip (|) before separator", + input: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: | + hello +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: test2 +`, + expected: map[string]string{ + "manifest-0": `apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: | + hello`, + "manifest-1": `apiVersion: v1 +kind: ConfigMap +metadata: + name: test2 +`, + }, + }, + { + name: "multi-doc block scalar keep (|+) with 2 trailing newlines before separator", + input: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: |+ + hello + + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: test2 +`, + expected: map[string]string{ + "manifest-0": `apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: |+ + hello`, + "manifest-1": `apiVersion: v1 +kind: ConfigMap +metadata: + name: test2 +`, + }, + }, + + // **Note for Chart API v3**: The following tests exercise the lenient + // regex that splits `---apiVersion` back into separate documents. + // In Chart API v3, these inputs should return an _ERROR_ instead. + // See the comment on the SplitManifests function for more details. + { + name: "leading glued separator (---apiVersion)", + input: ` +---apiVersion: v1 +kind: ConfigMap +metadata: + name: cm1 +`, + expected: map[string]string{ + "manifest-0": `apiVersion: v1 +kind: ConfigMap +metadata: + name: cm1 +`, + }, + }, + { + name: "mid-content glued separator (---apiVersion)", + input: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm1 +---apiVersion: v1 +kind: ConfigMap +metadata: + name: cm2 +`, + expected: map[string]string{ + "manifest-0": `apiVersion: v1 +kind: ConfigMap +metadata: + name: cm1`, + "manifest-1": `apiVersion: v1 +kind: ConfigMap +metadata: + name: cm2 +`, + }, + }, + { + name: "multiple glued separators", + input: ` +---apiVersion: v1 +kind: ConfigMap +metadata: + name: cm1 +---apiVersion: v1 +kind: ConfigMap +metadata: + name: cm2 +---apiVersion: v1 +kind: ConfigMap +metadata: + name: cm3 +`, + expected: map[string]string{ + "manifest-0": `apiVersion: v1 +kind: ConfigMap +metadata: + name: cm1`, + "manifest-1": `apiVersion: v1 +kind: ConfigMap +metadata: + name: cm2`, + "manifest-2": `apiVersion: v1 +kind: ConfigMap +metadata: + name: cm3 +`, + }, + }, + { + name: "mixed glued and proper separators", + input: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm1 +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm2 +---apiVersion: v1 +kind: ConfigMap +metadata: + name: cm3 +`, + expected: map[string]string{ + "manifest-0": `apiVersion: v1 +kind: ConfigMap +metadata: + name: cm1`, + "manifest-1": `apiVersion: v1 +kind: ConfigMap +metadata: + name: cm2`, + "manifest-2": `apiVersion: v1 +kind: ConfigMap +metadata: + name: cm3 +`, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := SplitManifests(tt.input) + if !reflect.DeepEqual(result, tt.expected) { + t.Errorf("SplitManifests() =\n%v\nwant:\n%v", result, tt.expected) + } + }) + } +} diff --git a/internal/release/v2/util/sorter.go b/internal/release/v2/util/sorter.go new file mode 100644 index 000000000..5f13569c1 --- /dev/null +++ b/internal/release/v2/util/sorter.go @@ -0,0 +1,61 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util // import "helm.sh/helm/v4/internal/release/v2/util" + +import ( + "sort" + + rspb "helm.sh/helm/v4/internal/release/v2" +) + +// Reverse reverses the list of releases sorted by the sort func. +func Reverse(list []*rspb.Release, sortFn func([]*rspb.Release)) { + sortFn(list) + for i, j := 0, len(list)-1; i < j; i, j = i+1, j-1 { + list[i], list[j] = list[j], list[i] + } +} + +// SortByName returns the list of releases sorted +// in lexicographical order. +func SortByName(list []*rspb.Release) { + sort.Slice(list, func(i, j int) bool { + return list[i].Name < list[j].Name + }) +} + +// SortByDate returns the list of releases sorted by a +// release's last deployed time (in seconds). +func SortByDate(list []*rspb.Release) { + sort.Slice(list, func(i, j int) bool { + ti := list[i].Info.LastDeployed.Unix() + tj := list[j].Info.LastDeployed.Unix() + if ti != tj { + return ti < tj + } + // Use name as tie-breaker for stable sorting + return list[i].Name < list[j].Name + }) +} + +// SortByRevision returns the list of releases sorted by a +// release's revision number (release.Version). +func SortByRevision(list []*rspb.Release) { + sort.Slice(list, func(i, j int) bool { + return list[i].Version < list[j].Version + }) +} diff --git a/internal/release/v2/util/sorter_test.go b/internal/release/v2/util/sorter_test.go new file mode 100644 index 000000000..6cb876f69 --- /dev/null +++ b/internal/release/v2/util/sorter_test.go @@ -0,0 +1,109 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util // import "helm.sh/helm/v4/internal/release/v2/util" + +import ( + "testing" + "time" + + rspb "helm.sh/helm/v4/internal/release/v2" + "helm.sh/helm/v4/pkg/release/common" +) + +// note: this test data is shared with filter_test.go. + +var releases = []*rspb.Release{ + tsRelease("quiet-bear", 2, 2000, common.StatusSuperseded), + tsRelease("angry-bird", 4, 3000, common.StatusDeployed), + tsRelease("happy-cats", 1, 4000, common.StatusUninstalled), + tsRelease("vocal-dogs", 3, 6000, common.StatusUninstalled), +} + +func tsRelease(name string, vers int, dur time.Duration, status common.Status) *rspb.Release { + info := &rspb.Info{Status: status, LastDeployed: time.Now().Add(dur)} + return &rspb.Release{ + Name: name, + Version: vers, + Info: info, + } +} + +func check(t *testing.T, by string, fn func(int, int) bool) { + t.Helper() + for i := len(releases) - 1; i > 0; i-- { + if fn(i, i-1) { + t.Errorf("release at positions '(%d,%d)' not sorted by %s", i-1, i, by) + } + } +} + +func TestSortByName(t *testing.T) { + SortByName(releases) + + check(t, "ByName", func(i, j int) bool { + ni := releases[i].Name + nj := releases[j].Name + return ni < nj + }) +} + +func TestSortByDate(t *testing.T) { + SortByDate(releases) + + check(t, "ByDate", func(i, j int) bool { + ti := releases[i].Info.LastDeployed.Second() + tj := releases[j].Info.LastDeployed.Second() + return ti < tj + }) +} + +func TestSortByRevision(t *testing.T) { + SortByRevision(releases) + + check(t, "ByRevision", func(i, j int) bool { + vi := releases[i].Version + vj := releases[j].Version + return vi < vj + }) +} + +func TestReverseSortByName(t *testing.T) { + Reverse(releases, SortByName) + check(t, "ByName", func(i, j int) bool { + ni := releases[i].Name + nj := releases[j].Name + return ni > nj + }) +} + +func TestReverseSortByDate(t *testing.T) { + Reverse(releases, SortByDate) + check(t, "ByDate", func(i, j int) bool { + ti := releases[i].Info.LastDeployed.Second() + tj := releases[j].Info.LastDeployed.Second() + return ti > tj + }) +} + +func TestReverseSortByRevision(t *testing.T) { + Reverse(releases, SortByRevision) + check(t, "ByRevision", func(i, j int) bool { + vi := releases[i].Version + vj := releases[j].Version + return vi > vj + }) +} diff --git a/internal/resolver/resolver.go b/internal/resolver/resolver.go index 3efe94f10..184c8404b 100644 --- a/internal/resolver/resolver.go +++ b/internal/resolver/resolver.go @@ -149,7 +149,7 @@ func (r *Resolver) Resolve(reqs []*chart.Dependency, repoNames map[string]string } else { // Retrieve list of tags for repository - ref := fmt.Sprintf("%s/%s", strings.TrimPrefix(d.Repository, fmt.Sprintf("%s://", registry.OCIScheme)), d.Name) + ref := fmt.Sprintf("%s/%s", strings.TrimPrefix(d.Repository, registry.OCIScheme+"://"), d.Name) tags, err := r.registryClient.Tags(ref) if err != nil { return nil, fmt.Errorf("could not retrieve list of tags for repository %s: %w", d.Repository, err) diff --git a/internal/test/test.go b/internal/test/test.go index 632bc72fd..202e015ab 100644 --- a/internal/test/test.go +++ b/internal/test/test.go @@ -29,8 +29,8 @@ var updateGolden = flag.Bool("update", false, "update golden files") // TestingT describes a testing object compatible with the critical functions from the testing.T type type TestingT interface { - Fatal(...interface{}) - Fatalf(string, ...interface{}) + Fatal(...any) + Fatalf(string, ...any) HelperT } diff --git a/internal/third_party/dep/fs/fs.go b/internal/third_party/dep/fs/fs.go index 6e2720f3b..3140cf91e 100644 --- a/internal/third_party/dep/fs/fs.go +++ b/internal/third_party/dep/fs/fs.go @@ -164,7 +164,8 @@ func CopyFile(src, dst string) (err error) { // // ERROR_PRIVILEGE_NOT_HELD is 1314 (0x522): // https://msdn.microsoft.com/en-us/library/windows/desktop/ms681385(v=vs.85).aspx - if lerr, ok := err.(*os.LinkError); ok && lerr.Err != syscall.Errno(1314) { + lerr := &os.LinkError{} + if errors.As(err, &lerr) && !errors.Is(lerr.Err, syscall.Errno(1314)) { return err } } else { diff --git a/internal/third_party/dep/fs/fs_test.go b/internal/third_party/dep/fs/fs_test.go index 610771bc3..8f28c3af7 100644 --- a/internal/third_party/dep/fs/fs_test.go +++ b/internal/third_party/dep/fs/fs_test.go @@ -32,6 +32,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package fs import ( + "errors" "os" "path/filepath" "runtime" @@ -234,7 +235,7 @@ func TestCopyDirFail_SrcIsNotDir(t *testing.T) { t.Fatalf("expected error for CopyDir(%s, %s), got none", srcdir, dstdir) } - if err != errSrcNotDir { + if !errors.Is(err, errSrcNotDir) { t.Fatalf("expected %v error for CopyDir(%s, %s), got %s", errSrcNotDir, srcdir, dstdir, err) } @@ -260,7 +261,7 @@ func TestCopyDirFail_DstExists(t *testing.T) { t.Fatalf("expected error for CopyDir(%s, %s), got none", srcdir, dstdir) } - if err != errDstExist { + if !errors.Is(err, errDstExist) { t.Fatalf("expected %v error for CopyDir(%s, %s), got %s", errDstExist, srcdir, dstdir, err) } } diff --git a/internal/third_party/dep/fs/rename.go b/internal/third_party/dep/fs/rename.go index 5f13b1ca3..77c93b7ad 100644 --- a/internal/third_party/dep/fs/rename.go +++ b/internal/third_party/dep/fs/rename.go @@ -34,6 +34,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package fs import ( + "errors" "fmt" "os" "syscall" @@ -46,10 +47,11 @@ func renameFallback(err error, src, dst string) error { // copy if we detect that case. syscall.EXDEV is the common name for the // cross device link error which has varying output text across different // operating systems. - terr, ok := err.(*os.LinkError) + terr := &os.LinkError{} + ok := errors.As(err, &terr) if !ok { return err - } else if terr.Err != syscall.EXDEV { + } else if !errors.Is(terr.Err, syscall.EXDEV) { return fmt.Errorf("link error: cannot rename %s to %s: %w", src, dst, terr) } diff --git a/internal/tlsutil/tls.go b/internal/tlsutil/tls.go index 88f26d47b..e986e6909 100644 --- a/internal/tlsutil/tls.go +++ b/internal/tlsutil/tls.go @@ -112,7 +112,7 @@ func NewTLSConfig(options ...TLSConfigOption) (*tls.Config, error) { if len(to.caPEMBlock) > 0 { cp := x509.NewCertPool() if !cp.AppendCertsFromPEM(to.caPEMBlock) { - return nil, fmt.Errorf("failed to append certificates from pem block") + return nil, errors.New("failed to append certificates from pem block") } config.RootCAs = cp diff --git a/internal/tlsutil/tls_test.go b/internal/tlsutil/tls_test.go index f16eb218f..e6859f99f 100644 --- a/internal/tlsutil/tls_test.go +++ b/internal/tlsutil/tls_test.go @@ -58,10 +58,10 @@ func TestNewTLSConfig(t *testing.T) { t.Fatalf("expecting 1 client certificates, got %d", got) } if cfg.InsecureSkipVerify { - t.Fatalf("insecure skip verify mismatch, expecting false") + t.Fatal("insecure skip verify mismatch, expecting false") } if cfg.RootCAs == nil { - t.Fatalf("mismatch tls RootCAs, expecting non-nil") + t.Fatal("mismatch tls RootCAs, expecting non-nil") } } { @@ -77,10 +77,10 @@ func TestNewTLSConfig(t *testing.T) { t.Fatalf("expecting 0 client certificates, got %d", got) } if cfg.InsecureSkipVerify { - t.Fatalf("insecure skip verify mismatch, expecting false") + t.Fatal("insecure skip verify mismatch, expecting false") } if cfg.RootCAs == nil { - t.Fatalf("mismatch tls RootCAs, expecting non-nil") + t.Fatal("mismatch tls RootCAs, expecting non-nil") } } @@ -97,10 +97,10 @@ func TestNewTLSConfig(t *testing.T) { t.Fatalf("expecting 1 client certificates, got %d", got) } if cfg.InsecureSkipVerify { - t.Fatalf("insecure skip verify mismatch, expecting false") + t.Fatal("insecure skip verify mismatch, expecting false") } if cfg.RootCAs != nil { - t.Fatalf("mismatch tls RootCAs, expecting nil") + t.Fatal("mismatch tls RootCAs, expecting nil") } } } diff --git a/internal/version/clientgo.go b/internal/version/clientgo.go index ab2a38fd5..50a4fd5cb 100644 --- a/internal/version/clientgo.go +++ b/internal/version/clientgo.go @@ -17,7 +17,7 @@ limitations under the License. package version import ( - "fmt" + "errors" "runtime/debug" "slices" @@ -27,7 +27,7 @@ import ( func K8sIOClientGoModVersion() (string, error) { info, ok := debug.ReadBuildInfo() if !ok { - return "", fmt.Errorf("failed to read build info") + return "", errors.New("failed to read build info") } idx := slices.IndexFunc(info.Deps, func(m *debug.Module) bool { @@ -35,7 +35,7 @@ func K8sIOClientGoModVersion() (string, error) { }) if idx == -1 { - return "", fmt.Errorf("k8s.io/client-go not found in build info") + return "", errors.New("k8s.io/client-go not found in build info") } m := info.Deps[idx] diff --git a/pkg/action/action.go b/pkg/action/action.go index c2a27940f..8c1888144 100644 --- a/pkg/action/action.go +++ b/pkg/action/action.go @@ -27,6 +27,7 @@ import ( "path" "path/filepath" "slices" + "sort" "strings" "sync" "text/template" @@ -87,6 +88,33 @@ const ( DryRunServer DryRunStrategy = "server" ) +// PostRenderStrategy determines how hooks and regular templates are passed +// to the configured post-renderer. +type PostRenderStrategy string + +const ( + // PostRenderStrategyCombined sends hooks and regular templates together + // as a single stream to the post-renderer. This is the default in Helm 4. + PostRenderStrategyCombined PostRenderStrategy = "combined" + + // PostRenderStrategySeparate sends hooks and regular templates to the + // post-renderer in independent invocations. This avoids duplicate-resource + // errors from post-renderers that de-duplicate by resource identity + // (for example Kustomize) when the same resource appears in both a hook + // and a regular template. Passing hooks to post-renderers was introduced + // in Helm 4; Helm 3 never did so, which is why the issue only surfaces + // with the Helm 4 combined default. + PostRenderStrategySeparate PostRenderStrategy = "separate" + + // PostRenderStrategyNoHooks sends only regular templates to the + // post-renderer and leaves hooks untouched. This matches the Helm 3 + // behavior and is useful for post-renderers that declare transforms + // targeting template-only resources (for example Kustomize patches + // against a Deployment that exists in templates but not in hooks), + // which would otherwise fail against the hook stream. + PostRenderStrategyNoHooks PostRenderStrategy = "nohooks" +) + // Configuration injects the dependencies that all actions share. type Configuration struct { // RESTClientGetter is an interface that loads Kubernetes clients. @@ -159,15 +187,32 @@ func annotateAndMerge(files map[string]string) (string, error) { continue } - manifests, err := kio.ParseAll(content) - if err != nil { - return "", fmt.Errorf("parsing %s: %w", fname, err) + // For consistency with the non-post-renderers code path, we need + // to use releaseutil.SplitManifests here to split the file into + // individual documents before feeding them to kio.ParseAll. In + // Chart API before v3 this function had leniency for badly-written + // Go templates, so this must be preserved for older charts. + splitDocs := releaseutil.SplitManifests(content) + keys := make([]string, 0, len(splitDocs)) + for k := range splitDocs { + keys = append(keys, k) } - for _, manifest := range manifests { - if err := manifest.PipeE(kyaml.SetAnnotation(filenameAnnotation, fname)); err != nil { - return "", fmt.Errorf("annotating %s: %w", fname, err) + sort.Sort(releaseutil.BySplitManifestsOrder(keys)) + for _, key := range keys { + doc := splitDocs[key] + if strings.TrimSpace(doc) == "" { + continue + } + manifests, err := kio.ParseAll(doc) + if err != nil { + return "", fmt.Errorf("parsing %s: %w", fname, err) + } + for _, manifest := range manifests { + if err := manifest.PipeE(kyaml.SetAnnotation(filenameAnnotation, fname)); err != nil { + return "", fmt.Errorf("annotating %s: %w", fname, err) + } + combinedManifests = append(combinedManifests, manifest) } - combinedManifests = append(combinedManifests, manifest) } } @@ -180,7 +225,14 @@ func annotateAndMerge(files map[string]string) (string, error) { // splitAndDeannotate reconstructs individual files from a merged YAML stream, // removing filename annotations and grouping documents by their original filenames. -func splitAndDeannotate(postrendered string) (map[string]string, error) { +// Documents without a filename annotation are assigned a synthesized name of the +// form "generated-by-postrender--.yaml" (or +// "generated-by-postrender-.yaml" when fallbackPrefix is empty). The prefix +// disambiguates fallback filenames across multiple post-render invocations (for +// example when PostRenderStrategySeparate runs the post-renderer once per +// group), so that merging results from different invocations does not collide +// on the same synthetic key. +func splitAndDeannotate(postrendered, fallbackPrefix string) (map[string]string, error) { manifests, err := kio.ParseAll(postrendered) if err != nil { return nil, fmt.Errorf("error parsing YAML: %w", err) @@ -194,7 +246,11 @@ func splitAndDeannotate(postrendered string) (map[string]string, error) { } fname := meta.Annotations[filenameAnnotation] if fname == "" { - fname = fmt.Sprintf("generated-by-postrender-%d.yaml", i) + if fallbackPrefix == "" { + fname = fmt.Sprintf("generated-by-postrender-%d.yaml", i) + } else { + fname = fmt.Sprintf("generated-by-postrender-%s-%d.yaml", fallbackPrefix, i) + } } if err := manifest.PipeE(kyaml.ClearAnnotation(filenameAnnotation)); err != nil { return nil, fmt.Errorf("clearing filename annotation: %w", err) @@ -219,7 +275,7 @@ func splitAndDeannotate(postrendered string) (map[string]string, error) { // TODO: As part of the refactor the duplicate code in cmd/helm/template.go should be removed // // This code has to do with writing files to disk. -func (cfg *Configuration) renderResources(ch *chart.Chart, values common.Values, releaseName, outputDir string, subNotes, useReleaseName, includeCrds bool, pr postrenderer.PostRenderer, interactWithRemote, enableDNS, hideSecret bool) ([]*release.Hook, *bytes.Buffer, string, error) { +func (cfg *Configuration) renderResources(ch *chart.Chart, values common.Values, releaseName, outputDir string, subNotes, useReleaseName, includeCrds bool, pr postrenderer.PostRenderer, interactWithRemote, enableDNS, hideSecret bool, postRenderStrategy PostRenderStrategy) ([]*release.Hook, *bytes.Buffer, string, error) { var hs []*release.Hook b := bytes.NewBuffer(nil) @@ -283,29 +339,122 @@ func (cfg *Configuration) renderResources(ch *chart.Chart, values common.Values, notes := notesBuffer.String() if pr != nil { - // We need to send files to the post-renderer before sorting and splitting - // hooks from manifests. The post-renderer interface expects a stream of - // manifests (similar to what tools like Kustomize and kubectl expect), whereas - // the sorter uses filenames. - // Here, we merge the documents into a stream, post-render them, and then split - // them back into a map of filename -> content. - - // Merge files as stream of documents for sending to post renderer - merged, err := annotateAndMerge(files) - if err != nil { - return hs, b, notes, fmt.Errorf("error merging manifests: %w", err) - } + switch postRenderStrategy { + case PostRenderStrategySeparate, PostRenderStrategyNoHooks: + // Split hooks from manifests before post-rendering. For "separate", + // hooks and templates are sent to the post-renderer as independent + // streams to avoid duplicate-resource errors when the same resource + // appears in both (e.g. a ServiceAccount used by a pre-install hook + // that is also declared in the chart's regular templates). For + // "nohooks", hooks skip the post-renderer entirely, matching the + // Helm 3 behavior. + sortedHooks, sortedManifests, err := releaseutil.SortManifests(files, nil, releaseutil.InstallOrder) + if err != nil { + for name, content := range files { + if strings.TrimSpace(content) == "" { + continue + } + fmt.Fprintf(b, "---\n# Source: %s\n%s\n", name, content) + } + return hs, b, "", err + } - // Run the post renderer - postRendered, err := pr.Run(bytes.NewBufferString(merged)) - if err != nil { - return hs, b, notes, fmt.Errorf("error while running post render on files: %w", err) - } + // Build separate files maps for hooks and manifests. + hookFiles := make(map[string]string) + for _, h := range sortedHooks { + if existing, ok := hookFiles[h.Path]; ok { + hookFiles[h.Path] = existing + "\n---\n" + h.Manifest + } else { + hookFiles[h.Path] = h.Manifest + } + } + manifestFiles := make(map[string]string) + for _, m := range sortedManifests { + if existing, ok := manifestFiles[m.Name]; ok { + manifestFiles[m.Name] = existing + "\n---\n" + m.Content + } else { + manifestFiles[m.Name] = m.Content + } + } - // Use the file list and contents received from the post renderer - files, err = splitAndDeannotate(postRendered.String()) - if err != nil { - return hs, b, notes, fmt.Errorf("error while parsing post rendered output: %w", err) + // Decide which groups to post-render. "nohooks" passes hooks + // through untouched and only post-renders manifests. + groups := []struct { + name string + files map[string]string + postRender bool + }{ + {"hooks", hookFiles, postRenderStrategy == PostRenderStrategySeparate}, + {"manifests", manifestFiles, true}, + } + + files = make(map[string]string) + for _, group := range groups { + if len(group.files) == 0 { + continue + } + + if !group.postRender { + for k, v := range group.files { + if existing, ok := files[k]; ok { + files[k] = existing + "\n---\n" + v + } else { + files[k] = v + } + } + continue + } + + merged, err := annotateAndMerge(group.files) + if err != nil { + return hs, b, notes, fmt.Errorf("error merging %s: %w", group.name, err) + } + + postRendered, err := pr.Run(bytes.NewBufferString(merged)) + if err != nil { + return hs, b, notes, fmt.Errorf("error while running post render on %s: %w", group.name, err) + } + + rendered, err := splitAndDeannotate(postRendered.String(), group.name) + if err != nil { + return hs, b, notes, fmt.Errorf("error while parsing post rendered output for %s: %w", group.name, err) + } + + for k, v := range rendered { + if existing, ok := files[k]; ok { + files[k] = existing + "\n---\n" + v + } else { + files[k] = v + } + } + } + case PostRenderStrategyCombined, "": + // We need to send files to the post-renderer before sorting and splitting + // hooks from manifests. The post-renderer interface expects a stream of + // manifests (similar to what tools like Kustomize and kubectl expect), whereas + // the sorter uses filenames. + // Here, we merge the documents into a stream, post-render them, and then split + // them back into a map of filename -> content. + + // Merge files as stream of documents for sending to post renderer + merged, err := annotateAndMerge(files) + if err != nil { + return hs, b, notes, fmt.Errorf("error merging manifests: %w", err) + } + + // Run the post renderer + postRendered, err := pr.Run(bytes.NewBufferString(merged)) + if err != nil { + return hs, b, notes, fmt.Errorf("error while running post render on files: %w", err) + } + + // Use the file list and contents received from the post renderer + files, err = splitAndDeannotate(postRendered.String(), "") + if err != nil { + return hs, b, notes, fmt.Errorf("error while parsing post rendered output: %w", err) + } + default: + return hs, b, notes, fmt.Errorf("unknown post-render strategy: '%s'", postRenderStrategy) } } @@ -466,7 +615,7 @@ func GetVersionSet(client discovery.ServerResourcesInterface) (common.VersionSet return common.DefaultVersionSet, nil } - versionMap := make(map[string]interface{}) + versionMap := make(map[string]any) var versions []string // Extract the groups diff --git a/pkg/action/action_test.go b/pkg/action/action_test.go index 85ee42d64..54b07273b 100644 --- a/pkg/action/action_test.go +++ b/pkg/action/action_test.go @@ -157,12 +157,12 @@ func withName(name string) chartOption { } func withSampleValues() chartOption { - values := map[string]interface{}{ + values := map[string]any{ "someKey": "someValue", - "nestedKey": map[string]interface{}{ + "nestedKey": map[string]any{ "simpleKey": "simpleValue", - "anotherNestedKey": map[string]interface{}{ - "yetAnotherNestedKey": map[string]interface{}{ + "anotherNestedKey": map[string]any{ + "yetAnotherNestedKey": map[string]any{ "youReadyForAnotherNestedKey": "No", }, }, @@ -173,7 +173,7 @@ func withSampleValues() chartOption { } } -func withValues(values map[string]interface{}) chartOption { +func withValues(values map[string]any) chartOption { return func(opts *chartOptions) { opts.Values = values } @@ -274,7 +274,7 @@ func namedReleaseStub(name string, status rcommon.Status) *release.Release { Description: "Named Release Stub", }, Chart: buildChart(withSampleTemplates()), - Config: map[string]interface{}{"name": "value"}, + Config: map[string]any{"name": "value"}, Version: 1, Hooks: []*release.Hook{ { @@ -304,7 +304,7 @@ func TestConfiguration_Init(t *testing.T) { tests := []struct { name string helmDriver string - expectedDriverType interface{} + expectedDriverType any expectErr bool errMsg string }{ @@ -377,7 +377,7 @@ func TestGetVersionSet(t *testing.T) { } if !vs.Has("v1") { - t.Errorf("Expected supported versions to at least include v1.") + t.Error("Expected supported versions to at least include v1.") } if vs.Has("nosuchversion/v1") { t.Error("Non-existent version is reported found.") @@ -418,131 +418,1156 @@ func TestAnnotateAndMerge(t *testing.T) { { name: "single file with single manifest", files: map[string]string{ - "templates/configmap.yaml": `apiVersion: v1 + "templates/configmap.yaml": ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-cm +data: + key: value`, + }, + expected: `apiVersion: v1 +kind: ConfigMap +metadata: + name: test-cm + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/configmap.yaml' +data: + key: value +`, + }, + { + name: "multiple files with multiple manifests", + files: map[string]string{ + "templates/configmap.yaml": ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-cm +data: + key: value`, + "templates/secret.yaml": ` +apiVersion: v1 +kind: Secret +metadata: + name: test-secret +data: + password: dGVzdA==`, + }, + expected: `apiVersion: v1 +kind: ConfigMap +metadata: + name: test-cm + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/configmap.yaml' +data: + key: value +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-secret + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/secret.yaml' +data: + password: dGVzdA== +`, + }, + { + name: "file with multiple manifests", + files: map[string]string{ + "templates/multi.yaml": ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-cm1 +data: + key: value1 +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-cm2 +data: + key: value2`, + }, + expected: `apiVersion: v1 +kind: ConfigMap +metadata: + name: test-cm1 + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/multi.yaml' +data: + key: value1 +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-cm2 + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/multi.yaml' +data: + key: value2 +`, + }, + { + name: "partials and empty files are removed", + files: map[string]string{ + "templates/cm.yaml": ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-cm1 +`, + "templates/_partial.tpl": ` +{{-define name}} + {{- "abracadabra"}} +{{- end -}}`, + "templates/empty.yaml": ``, + }, + expected: `apiVersion: v1 +kind: ConfigMap +metadata: + name: test-cm1 + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/cm.yaml' +`, + }, + { + name: "empty file", + files: map[string]string{ + "templates/empty.yaml": ` +`, + }, + expected: ``, + }, + { + name: "invalid yaml", + files: map[string]string{ + "templates/invalid.yaml": ` +invalid: yaml: content: + - malformed`, + }, + expectedError: "parsing templates/invalid.yaml", + }, + { + name: "leading doc separator glued to content by template whitespace trimming", + files: map[string]string{ + "templates/service.yaml": ` +---apiVersion: v1 +kind: Service +metadata: + name: test-svc +`, + }, + expected: `apiVersion: v1 +kind: Service +metadata: + name: test-svc + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/service.yaml' +`, + }, + { + name: "leading doc separator on its own line", + files: map[string]string{ + "templates/service.yaml": ` +--- +apiVersion: v1 +kind: Service +metadata: + name: test-svc +`, + }, + expected: `apiVersion: v1 +kind: Service +metadata: + name: test-svc + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/service.yaml' +`, + }, + { + name: "multiple leading doc separators", + files: map[string]string{ + "templates/service.yaml": ` +--- +--- +apiVersion: v1 +kind: Service +metadata: + name: test-svc +`, + }, + expected: `apiVersion: v1 +kind: Service +metadata: + name: test-svc + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/service.yaml' +`, + }, + { + name: "mid-content doc separator glued to content by template whitespace trimming", + files: map[string]string{ + "templates/all.yaml": ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-cm +---apiVersion: v1 +kind: Service +metadata: + name: test-svc +`, + }, + expected: `apiVersion: v1 +kind: ConfigMap +metadata: + name: test-cm + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/all.yaml' +--- +apiVersion: v1 +kind: Service +metadata: + name: test-svc + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/all.yaml' +`, + }, + { + name: "ConfigMap with embedded CA certificate", + files: map[string]string{ + "templates/configmap.yaml": ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: ca-bundle +data: + ca.crt: | + ------BEGIN CERTIFICATE------ + MIICEzCCAXygAwIBAgIQMIMChMLGrR+QvmQvpwAU6zAKBggqhkjOPQQDAzASMRAw + DgYDVQQKEwdBY21lIENvMCAXDTcwMDEwMTAwMDAwMFoYDzIwODQwMTI5MTYwMDAw + WjASMRAwDgYDVQQKEwdBY21lIENvMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE7Rmm + ------END CERTIFICATE------ + ------BEGIN CERTIFICATE------ + MIICEzCCAXygAwIBAgIQMIMChMLGrR+QvmQvpwAU6zAKBggqhkjOPQQDAzASMRAw + DgYDVQQKEwdBY21lIENvMCAXDTcwMDEwMTAwMDAwMFoYDzIwODQwMTI5MTYwMDAw + WjASMRAwDgYDVQQKEwdBY21lIENvMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE7Rmm + ------END CERTIFICATE------ +`, + }, + expected: `apiVersion: v1 +kind: ConfigMap +metadata: + name: ca-bundle + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/configmap.yaml' +data: + ca.crt: | + ------BEGIN CERTIFICATE------ + MIICEzCCAXygAwIBAgIQMIMChMLGrR+QvmQvpwAU6zAKBggqhkjOPQQDAzASMRAw + DgYDVQQKEwdBY21lIENvMCAXDTcwMDEwMTAwMDAwMFoYDzIwODQwMTI5MTYwMDAw + WjASMRAwDgYDVQQKEwdBY21lIENvMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE7Rmm + ------END CERTIFICATE------ + ------BEGIN CERTIFICATE------ + MIICEzCCAXygAwIBAgIQMIMChMLGrR+QvmQvpwAU6zAKBggqhkjOPQQDAzASMRAw + DgYDVQQKEwdBY21lIENvMCAXDTcwMDEwMTAwMDAwMFoYDzIwODQwMTI5MTYwMDAw + WjASMRAwDgYDVQQKEwdBY21lIENvMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE7Rmm + ------END CERTIFICATE------ +`, + }, + { + name: "consecutive dashes in YAML value are not treated as document separators", + files: map[string]string{ + "templates/configmap.yaml": ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-cm +data: + config: | + # --------------------------------------------------------------------------- + [section] + key = value + # --------------------------------------------------------------------------- +`, + }, + expected: `apiVersion: v1 kind: ConfigMap metadata: name: test-cm + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/configmap.yaml' +data: + config: | + # --------------------------------------------------------------------------- + [section] + key = value + # --------------------------------------------------------------------------- +`, + }, + { + name: "JSON with dashes in values is not corrupted", + files: map[string]string{ + "templates/dashboard.yaml": ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: dashboard +data: + dashboard.json: | + {"options":{"---------":{"color":"#292929","text":"N/A"}}} +`, + }, + expected: `apiVersion: v1 +kind: ConfigMap +metadata: + name: dashboard + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/dashboard.yaml' +data: + dashboard.json: | + {"options":{"---------":{"color":"#292929","text":"N/A"}}} +`, + }, + + // **Note for Chart API v3**: This input should return an _ERROR_ in Chart API v3. + // See the comment on the releaseutil.SplitManifests function for more details. + { + name: "multiple glued separators in same file", + files: map[string]string{ + "templates/multi.yaml": ` +---apiVersion: v1 +kind: ConfigMap +metadata: + name: cm1 +---apiVersion: v1 +kind: ConfigMap +metadata: + name: cm2 +---apiVersion: v1 +kind: ConfigMap +metadata: + name: cm3 +`, + }, + expected: `apiVersion: v1 +kind: ConfigMap +metadata: + name: cm1 + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/multi.yaml' +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm2 + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/multi.yaml' +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm3 + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/multi.yaml' +`, + }, + + // **Note for Chart API v3**: This input should return an _ERROR_ in Chart API v3. + // See the comment on the releaseutil.SplitManifests function for more details. + { + name: "mixed glued and proper separators", + files: map[string]string{ + "templates/mixed.yaml": ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm1 +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm2 +---apiVersion: v1 +kind: ConfigMap +metadata: + name: cm3 +`, + }, + expected: `apiVersion: v1 +kind: ConfigMap +metadata: + name: cm1 + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/mixed.yaml' +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm2 + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/mixed.yaml' +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm3 + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/mixed.yaml' +`, + }, + { + name: "12 documents preserve in-file order", + files: map[string]string{ + "templates/many.yaml": ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm-01 +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm-02 +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm-03 +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm-04 +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm-05 +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm-06 +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm-07 +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm-08 +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm-09 +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm-10 +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm-11 +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm-12 +`, + }, + expected: `apiVersion: v1 +kind: ConfigMap +metadata: + name: cm-01 + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/many.yaml' +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm-02 + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/many.yaml' +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm-03 + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/many.yaml' +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm-04 + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/many.yaml' +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm-05 + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/many.yaml' +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm-06 + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/many.yaml' +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm-07 + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/many.yaml' +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm-08 + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/many.yaml' +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm-09 + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/many.yaml' +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm-10 + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/many.yaml' +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm-11 + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/many.yaml' +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm-12 + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/many.yaml' +`, + }, + + // Block scalar chomping indicator tests using | (clip), |- (strip), and |+ (keep) + // inputs with 0, 1, and 2 trailing newlines after the block content. + // Note: the emitter may normalize the output chomping indicator when the + // trailing newline count makes another indicator equivalent for the result. + + // | (clip) input — clips trailing newlines to exactly one, though with + // 0 trailing newlines the emitted output may normalize to |-. + { + name: "block scalar clip (|) with 0 trailing newlines", + files: map[string]string{ + "templates/cm.yaml": ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: | + hello`, + }, + expected: `apiVersion: v1 +kind: ConfigMap +metadata: + name: test + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/cm.yaml' +data: + key: |- + hello +`, + }, + { + name: "block scalar clip (|) with 1 trailing newline", + files: map[string]string{ + "templates/cm.yaml": ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: | + hello +`, + }, + expected: `apiVersion: v1 +kind: ConfigMap +metadata: + name: test + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/cm.yaml' +data: + key: | + hello +`, + }, + { + name: "block scalar clip (|) with 2 trailing newlines", + files: map[string]string{ + "templates/cm.yaml": ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: | + hello + +`, + }, + expected: `apiVersion: v1 +kind: ConfigMap +metadata: + name: test + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/cm.yaml' +data: + key: | + hello +`, + }, + + // |- (strip) — strips all trailing newlines + { + name: "block scalar strip (|-) with 0 trailing newlines", + files: map[string]string{ + "templates/cm.yaml": ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: |- + hello`, + }, + expected: `apiVersion: v1 +kind: ConfigMap +metadata: + name: test + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/cm.yaml' +data: + key: |- + hello +`, + }, + { + name: "block scalar strip (|-) with 1 trailing newline", + files: map[string]string{ + "templates/cm.yaml": ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: |- + hello +`, + }, + expected: `apiVersion: v1 +kind: ConfigMap +metadata: + name: test + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/cm.yaml' +data: + key: |- + hello +`, + }, + { + name: "block scalar strip (|-) with 2 trailing newlines", + files: map[string]string{ + "templates/cm.yaml": ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: |- + hello + +`, + }, + expected: `apiVersion: v1 +kind: ConfigMap +metadata: + name: test + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/cm.yaml' +data: + key: |- + hello +`, + }, + + // |+ (keep) — preserves all trailing newlines + { + name: "block scalar keep (|+) with 0 trailing newlines", + files: map[string]string{ + "templates/cm.yaml": ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: |+ + hello`, + }, + expected: `apiVersion: v1 +kind: ConfigMap +metadata: + name: test + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/cm.yaml' +data: + key: |- + hello +`, + }, + { + name: "block scalar keep (|+) with 1 trailing newline", + files: map[string]string{ + "templates/cm.yaml": ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: |+ + hello +`, + }, + expected: `apiVersion: v1 +kind: ConfigMap +metadata: + name: test + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/cm.yaml' +data: + key: | + hello +`, + }, + { + name: "block scalar keep (|+) with 2 trailing newlines", + files: map[string]string{ + "templates/cm.yaml": ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: |+ + hello + +`, + }, + expected: `apiVersion: v1 +kind: ConfigMap +metadata: + name: test + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/cm.yaml' +data: + key: |+ + hello + +`, + }, + + // Multi-doc tests: block scalar doc is NOT the last document. + // SplitManifests' regex consumes \s*\n before ---, so trailing + // newlines from non-last docs are always stripped. + + // | (clip) in multi-doc (first doc) + { + name: "multi-doc block scalar clip (|) with 0 trailing newlines", + files: map[string]string{ + "templates/cm.yaml": ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: | + hello +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: test2 +data: + val: simple`, + }, + expected: `apiVersion: v1 +kind: ConfigMap +metadata: + name: test + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/cm.yaml' +data: + key: |- + hello +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: test2 + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/cm.yaml' +data: + val: simple +`, + }, + { + name: "multi-doc block scalar clip (|) with 1 trailing newline", + files: map[string]string{ + "templates/cm.yaml": ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: | + hello + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: test2 +data: + val: simple`, + }, + expected: `apiVersion: v1 +kind: ConfigMap +metadata: + name: test + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/cm.yaml' +data: + key: |- + hello +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: test2 + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/cm.yaml' +data: + val: simple +`, + }, + { + name: "multi-doc block scalar clip (|) with 2 trailing newlines", + files: map[string]string{ + "templates/cm.yaml": ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: | + hello + + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: test2 +data: + val: simple`, + }, + expected: `apiVersion: v1 +kind: ConfigMap +metadata: + name: test + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/cm.yaml' +data: + key: |- + hello +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: test2 + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/cm.yaml' +data: + val: simple +`, + }, + + // |- (strip) in multi-doc (first doc) + { + name: "multi-doc block scalar strip (|-) with 0 trailing newlines", + files: map[string]string{ + "templates/cm.yaml": ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: |- + hello +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: test2 +data: + val: simple`, + }, + expected: `apiVersion: v1 +kind: ConfigMap +metadata: + name: test + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/cm.yaml' +data: + key: |- + hello +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: test2 + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/cm.yaml' +data: + val: simple +`, + }, + { + name: "multi-doc block scalar strip (|-) with 1 trailing newline", + files: map[string]string{ + "templates/cm.yaml": ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: |- + hello + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: test2 +data: + val: simple`, + }, + expected: `apiVersion: v1 +kind: ConfigMap +metadata: + name: test + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/cm.yaml' +data: + key: |- + hello +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: test2 + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/cm.yaml' +data: + val: simple +`, + }, + { + name: "multi-doc block scalar strip (|-) with 2 trailing newlines", + files: map[string]string{ + "templates/cm.yaml": ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: |- + hello + + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: test2 +data: + val: simple`, + }, + expected: `apiVersion: v1 +kind: ConfigMap +metadata: + name: test + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/cm.yaml' data: - key: value`, - }, - expected: `apiVersion: v1 + key: |- + hello +--- +apiVersion: v1 kind: ConfigMap metadata: - name: test-cm + name: test2 annotations: - postrenderer.helm.sh/postrender-filename: 'templates/configmap.yaml' + postrenderer.helm.sh/postrender-filename: 'templates/cm.yaml' data: - key: value + val: simple `, }, + + // |+ (keep) in multi-doc (first doc) { - name: "multiple files with multiple manifests", + name: "multi-doc block scalar keep (|+) with 0 trailing newlines", files: map[string]string{ - "templates/configmap.yaml": `apiVersion: v1 + "templates/cm.yaml": ` +apiVersion: v1 kind: ConfigMap metadata: - name: test-cm + name: test data: - key: value`, - "templates/secret.yaml": `apiVersion: v1 -kind: Secret + key: |+ + hello +--- +apiVersion: v1 +kind: ConfigMap metadata: - name: test-secret + name: test2 data: - password: dGVzdA==`, + val: simple`, }, expected: `apiVersion: v1 kind: ConfigMap metadata: - name: test-cm + name: test annotations: - postrenderer.helm.sh/postrender-filename: 'templates/configmap.yaml' + postrenderer.helm.sh/postrender-filename: 'templates/cm.yaml' data: - key: value + key: |- + hello --- apiVersion: v1 -kind: Secret +kind: ConfigMap metadata: - name: test-secret + name: test2 annotations: - postrenderer.helm.sh/postrender-filename: 'templates/secret.yaml' + postrenderer.helm.sh/postrender-filename: 'templates/cm.yaml' data: - password: dGVzdA== + val: simple `, }, { - name: "file with multiple manifests", + name: "multi-doc block scalar keep (|+) with 1 trailing newline", files: map[string]string{ - "templates/multi.yaml": `apiVersion: v1 + "templates/cm.yaml": ` +apiVersion: v1 kind: ConfigMap metadata: - name: test-cm1 + name: test data: - key: value1 + key: |+ + hello + --- apiVersion: v1 kind: ConfigMap metadata: - name: test-cm2 + name: test2 data: - key: value2`, + val: simple`, }, expected: `apiVersion: v1 kind: ConfigMap metadata: - name: test-cm1 + name: test annotations: - postrenderer.helm.sh/postrender-filename: 'templates/multi.yaml' + postrenderer.helm.sh/postrender-filename: 'templates/cm.yaml' data: - key: value1 + key: |- + hello --- apiVersion: v1 kind: ConfigMap metadata: - name: test-cm2 + name: test2 annotations: - postrenderer.helm.sh/postrender-filename: 'templates/multi.yaml' + postrenderer.helm.sh/postrender-filename: 'templates/cm.yaml' data: - key: value2 + val: simple `, }, { - name: "partials and empty files are removed", + name: "multi-doc block scalar keep (|+) with 2 trailing newlines", files: map[string]string{ - "templates/cm.yaml": `apiVersion: v1 + "templates/cm.yaml": ` +apiVersion: v1 kind: ConfigMap metadata: - name: test-cm1 -`, - "templates/_partial.tpl": ` -{{-define name}} - {{- "abracadabra"}} -{{- end -}}`, - "templates/empty.yaml": ``, + name: test +data: + key: |+ + hello + + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: test2 +data: + val: simple`, }, expected: `apiVersion: v1 kind: ConfigMap metadata: - name: test-cm1 + name: test + annotations: + postrenderer.helm.sh/postrender-filename: 'templates/cm.yaml' +data: + key: |- + hello +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: test2 annotations: postrenderer.helm.sh/postrender-filename: 'templates/cm.yaml' +data: + val: simple `, }, - { - name: "empty file", - files: map[string]string{ - "templates/empty.yaml": "", - }, - expected: ``, - }, - { - name: "invalid yaml", - files: map[string]string{ - "templates/invalid.yaml": `invalid: yaml: content: - - malformed`, - }, - expectedError: "parsing templates/invalid.yaml", - }, } for _, tt := range tests { @@ -697,7 +1722,7 @@ metadata: data: key: value`, expectedFiles: map[string]string{ - "generated-by-postrender-0.yaml": `apiVersion: v1 + "generated-by-postrender-test-0.yaml": `apiVersion: v1 kind: ConfigMap metadata: name: test-cm @@ -710,7 +1735,7 @@ data: for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - files, err := splitAndDeannotate(tt.input) + files, err := splitAndDeannotate(tt.input, "test") if tt.expectedError != "" { assert.Error(t, err) @@ -764,7 +1789,7 @@ data: require.NoError(t, err) // Split and deannotate - reconstructed, err := splitAndDeannotate(merged) + reconstructed, err := splitAndDeannotate(merged, "test") require.NoError(t, err) // Compare the results @@ -795,11 +1820,11 @@ func TestRenderResources_PostRenderer_Success(t *testing.T) { } ch := buildChart(withSampleTemplates()) - values := map[string]interface{}{} + values := map[string]any{} hooks, buf, notes, err := cfg.renderResources( ch, values, "test-release", "", false, false, false, - mockPR, false, false, false, + mockPR, false, false, false, PostRenderStrategyCombined, ) assert.NoError(t, err) @@ -809,12 +1834,15 @@ func TestRenderResources_PostRenderer_Success(t *testing.T) { expectedBuf := `--- # Source: yellow/templates/foodpie foodpie: world + --- # Source: yellow/templates/with-partials yellow: Earth + --- # Source: yellow/templates/yellow yellow: world + ` expectedHook := `kind: ConfigMap metadata: @@ -822,7 +1850,8 @@ metadata: annotations: "helm.sh/hook": post-install,pre-delete,post-upgrade data: - name: value` + name: value +` assert.Equal(t, expectedBuf, buf.String()) assert.Len(t, hooks, 1) @@ -838,11 +1867,11 @@ func TestRenderResources_PostRenderer_Error(t *testing.T) { } ch := buildChart(withSampleTemplates()) - values := map[string]interface{}{} + values := map[string]any{} _, _, _, err := cfg.renderResources( ch, values, "test-release", "", false, false, false, - mockPR, false, false, false, + mockPR, false, false, false, PostRenderStrategyCombined, ) assert.Error(t, err) @@ -866,11 +1895,11 @@ func TestRenderResources_PostRenderer_MergeError(t *testing.T) { {Name: "templates/invalid", ModTime: time.Now(), Data: []byte("invalid: yaml: content:")}, }, } - values := map[string]interface{}{} + values := map[string]any{} _, _, _, err := cfg.renderResources( ch, values, "test-release", "", false, false, false, - mockPR, false, false, false, + mockPR, false, false, false, PostRenderStrategyCombined, ) assert.Error(t, err) @@ -888,11 +1917,11 @@ func TestRenderResources_PostRenderer_SplitError(t *testing.T) { } ch := buildChart(withSampleTemplates()) - values := map[string]interface{}{} + values := map[string]any{} _, _, _, err := cfg.renderResources( ch, values, "test-release", "", false, false, false, - mockPR, false, false, false, + mockPR, false, false, false, PostRenderStrategyCombined, ) assert.Error(t, err) @@ -909,11 +1938,11 @@ func TestRenderResources_PostRenderer_Integration(t *testing.T) { } ch := buildChart(withSampleTemplates()) - values := map[string]interface{}{} + values := map[string]any{} hooks, buf, notes, err := cfg.renderResources( ch, values, "test-release", "", false, false, false, - mockPR, false, false, false, + mockPR, false, false, false, PostRenderStrategyCombined, ) assert.NoError(t, err) @@ -927,14 +1956,17 @@ func TestRenderResources_PostRenderer_Integration(t *testing.T) { # Source: hello/templates/goodbye goodbye: world color: blue + --- # Source: hello/templates/hello hello: world color: blue + --- # Source: hello/templates/with-partials hello: Earth color: blue + ` assert.Contains(t, output, "color: blue") assert.Equal(t, 3, strings.Count(output, "color: blue")) @@ -945,11 +1977,11 @@ func TestRenderResources_NoPostRenderer(t *testing.T) { cfg := actionConfigFixture(t) ch := buildChart(withSampleTemplates()) - values := map[string]interface{}{} + values := map[string]any{} hooks, buf, notes, err := cfg.renderResources( ch, values, "test-release", "", false, false, false, - nil, false, false, false, + nil, false, false, false, PostRenderStrategyCombined, ) assert.NoError(t, err) @@ -958,6 +1990,305 @@ func TestRenderResources_NoPostRenderer(t *testing.T) { assert.Equal(t, "", notes) } +func TestRenderResources_PostRenderer_DuplicateResourceInHookAndTemplate(t *testing.T) { + cfg := actionConfigFixture(t) + + // Simulate a chart where the same ServiceAccount appears both as a + // pre-install hook and as a regular template. This is a valid Helm pattern + // but previously caused post-renderers like Kustomize to fail with + // "may not add resource with an already registered id" because hooks and + // templates were merged into a single stream before post-rendering. + saHook := `apiVersion: v1 +kind: ServiceAccount +metadata: + name: my-app + annotations: + "helm.sh/hook": pre-install + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded` + + saTemplate := `apiVersion: v1 +kind: ServiceAccount +metadata: + name: my-app` + + deployment := `apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-app +spec: + template: + spec: + serviceAccountName: my-app` + + modTime := time.Now() + ch := buildChartWithTemplates([]*common.File{ + {Name: "templates/sa-hook.yaml", ModTime: modTime, Data: []byte(saHook)}, + {Name: "templates/sa.yaml", ModTime: modTime, Data: []byte(saTemplate)}, + {Name: "templates/deployment.yaml", ModTime: modTime, Data: []byte(deployment)}, + }) + + // Use a post-renderer that rejects duplicate resource IDs, similar to + // how Kustomize behaves. We verify that no single post-render call + // receives the ServiceAccount twice. + mockPR := &mockPostRenderer{ + transform: func(content string) string { + count := strings.Count(content, "kind: ServiceAccount") + if count > 1 { + t.Errorf("post-renderer received %d ServiceAccount resources in a single stream, expected at most 1", count) + } + return content + }, + } + + hooks, buf, _, err := cfg.renderResources( + ch, nil, "test-release", "", false, false, false, + mockPR, false, false, false, PostRenderStrategySeparate, + ) + + assert.NoError(t, err) + assert.Len(t, hooks, 1) + assert.Equal(t, "my-app", hooks[0].Name) + assert.Contains(t, buf.String(), "kind: Deployment") + assert.Contains(t, buf.String(), "kind: ServiceAccount") +} + +func TestRenderResources_PostRenderer_CombinedInvokesOnceWithEverything(t *testing.T) { + cfg := actionConfigFixture(t) + + hookManifest := `apiVersion: v1 +kind: ConfigMap +metadata: + name: hook-cm + annotations: + "helm.sh/hook": pre-install` + templateManifest := `apiVersion: v1 +kind: ConfigMap +metadata: + name: template-cm` + + modTime := time.Now() + ch := buildChartWithTemplates([]*common.File{ + {Name: "templates/hook.yaml", ModTime: modTime, Data: []byte(hookManifest)}, + {Name: "templates/cm.yaml", ModTime: modTime, Data: []byte(templateManifest)}, + }) + + var calls int + var lastInput string + mockPR := &mockPostRenderer{ + transform: func(content string) string { + calls++ + lastInput = content + return content + }, + } + + _, _, _, err := cfg.renderResources( + ch, nil, "test-release", "", false, false, false, + mockPR, false, false, false, PostRenderStrategyCombined, + ) + + assert.NoError(t, err) + assert.Equal(t, 1, calls, "combined strategy should invoke the post-renderer exactly once") + assert.Contains(t, lastInput, "hook-cm") + assert.Contains(t, lastInput, "template-cm") +} + +func TestRenderResources_PostRenderer_ZeroValueStrategyActsAsCombined(t *testing.T) { + cfg := actionConfigFixture(t) + + modTime := time.Now() + ch := buildChartWithTemplates([]*common.File{ + {Name: "templates/cm.yaml", ModTime: modTime, Data: []byte(`apiVersion: v1 +kind: ConfigMap +metadata: + name: template-cm`)}, + {Name: "templates/hook.yaml", ModTime: modTime, Data: []byte(`apiVersion: v1 +kind: ConfigMap +metadata: + name: hook-cm + annotations: + "helm.sh/hook": pre-install`)}, + }) + + var calls int + mockPR := &mockPostRenderer{ + transform: func(content string) string { + calls++ + return content + }, + } + + _, _, _, err := cfg.renderResources( + ch, nil, "test-release", "", false, false, false, + mockPR, false, false, false, PostRenderStrategy(""), + ) + + assert.NoError(t, err) + assert.Equal(t, 1, calls, "unset strategy must preserve backwards-compatible combined behavior") +} + +func TestRenderResources_PostRenderer_SeparateSplitsHooksAndTemplates(t *testing.T) { + cfg := actionConfigFixture(t) + + modTime := time.Now() + ch := buildChartWithTemplates([]*common.File{ + {Name: "templates/hook.yaml", ModTime: modTime, Data: []byte(`apiVersion: v1 +kind: ConfigMap +metadata: + name: hook-cm + annotations: + "helm.sh/hook": pre-install`)}, + {Name: "templates/cm.yaml", ModTime: modTime, Data: []byte(`apiVersion: v1 +kind: ConfigMap +metadata: + name: template-cm`)}, + }) + + var inputs []string + mockPR := &mockPostRenderer{ + transform: func(content string) string { + inputs = append(inputs, content) + return content + }, + } + + _, _, _, err := cfg.renderResources( + ch, nil, "test-release", "", false, false, false, + mockPR, false, false, false, PostRenderStrategySeparate, + ) + + assert.NoError(t, err) + assert.Len(t, inputs, 2, "separate strategy should invoke the post-renderer twice when both hooks and templates exist") + for _, in := range inputs { + hasHook := strings.Contains(in, "hook-cm") + hasTemplate := strings.Contains(in, "template-cm") + assert.False(t, hasHook && hasTemplate, "a single post-render invocation must not contain both hook and template resources") + assert.True(t, hasHook || hasTemplate, "each post-render invocation must contain either a hook or a template") + } +} + +func TestRenderResources_PostRenderer_SeparateWithOnlyTemplates(t *testing.T) { + cfg := actionConfigFixture(t) + + modTime := time.Now() + ch := buildChartWithTemplates([]*common.File{ + {Name: "templates/cm.yaml", ModTime: modTime, Data: []byte(`apiVersion: v1 +kind: ConfigMap +metadata: + name: template-cm`)}, + }) + + var calls int + mockPR := &mockPostRenderer{ + transform: func(content string) string { + calls++ + return content + }, + } + + _, _, _, err := cfg.renderResources( + ch, nil, "test-release", "", false, false, false, + mockPR, false, false, false, PostRenderStrategySeparate, + ) + + assert.NoError(t, err) + assert.Equal(t, 1, calls, "separate strategy should skip the empty hook group and invoke the post-renderer only once") +} + +func TestRenderResources_PostRenderer_NoHooksSkipsHooks(t *testing.T) { + cfg := actionConfigFixture(t) + + modTime := time.Now() + ch := buildChartWithTemplates([]*common.File{ + {Name: "templates/hook.yaml", ModTime: modTime, Data: []byte(`apiVersion: v1 +kind: ConfigMap +metadata: + name: hook-cm + annotations: + "helm.sh/hook": pre-install`)}, + {Name: "templates/cm.yaml", ModTime: modTime, Data: []byte(`apiVersion: v1 +kind: ConfigMap +metadata: + name: template-cm`)}, + }) + + var inputs []string + mockPR := &mockPostRenderer{ + transform: func(content string) string { + inputs = append(inputs, content) + return content + }, + } + + hooks, manifestDoc, _, err := cfg.renderResources( + ch, nil, "test-release", "", false, false, false, + mockPR, false, false, false, PostRenderStrategyNoHooks, + ) + + assert.NoError(t, err) + assert.Len(t, inputs, 1, "nohooks strategy should invoke the post-renderer exactly once (for templates only)") + assert.NotContains(t, inputs[0], "hook-cm", "hooks must not be sent to the post-renderer") + assert.Contains(t, inputs[0], "template-cm", "templates must be sent to the post-renderer") + + // Hooks still round-trip through the release so they can execute. + require.Len(t, hooks, 1) + assert.Contains(t, hooks[0].Manifest, "hook-cm") + assert.Contains(t, manifestDoc.String(), "template-cm") +} + +func TestRenderResources_PostRenderer_NoHooksWithOnlyHooks(t *testing.T) { + cfg := actionConfigFixture(t) + + modTime := time.Now() + ch := buildChartWithTemplates([]*common.File{ + {Name: "templates/hook.yaml", ModTime: modTime, Data: []byte(`apiVersion: v1 +kind: ConfigMap +metadata: + name: hook-cm + annotations: + "helm.sh/hook": pre-install`)}, + }) + + var calls int + mockPR := &mockPostRenderer{ + transform: func(content string) string { + calls++ + return content + }, + } + + _, _, _, err := cfg.renderResources( + ch, nil, "test-release", "", false, false, false, + mockPR, false, false, false, PostRenderStrategyNoHooks, + ) + + assert.NoError(t, err) + assert.Equal(t, 0, calls, "nohooks strategy should not invoke the post-renderer when the chart only has hooks") +} + +func TestRenderResources_PostRenderer_UnknownStrategyErrors(t *testing.T) { + cfg := actionConfigFixture(t) + + modTime := time.Now() + ch := buildChartWithTemplates([]*common.File{ + {Name: "templates/cm.yaml", ModTime: modTime, Data: []byte(`apiVersion: v1 +kind: ConfigMap +metadata: + name: template-cm`)}, + }) + + mockPR := &mockPostRenderer{} + + _, _, _, err := cfg.renderResources( + ch, nil, "test-release", "", false, false, false, + mockPR, false, false, false, PostRenderStrategy("bogus"), + ) + + assert.Error(t, err) + assert.Contains(t, err.Error(), "unknown post-render strategy") + assert.Contains(t, err.Error(), "bogus") +} + func TestDetermineReleaseSSAApplyMethod(t *testing.T) { assert.Equal(t, release.ApplyMethodClientSideApply, determineReleaseSSApplyMethod(false)) assert.Equal(t, release.ApplyMethodServerSideApply, determineReleaseSSApplyMethod(true)) diff --git a/pkg/action/dependency.go b/pkg/action/dependency.go index b12887bde..5c87f7cba 100644 --- a/pkg/action/dependency.go +++ b/pkg/action/dependency.go @@ -92,7 +92,7 @@ func (d *Dependency) dependencyStatus(chartpath string, dep *chart.Dependency, p for _, arc := range archives { // we need to trip the prefix dirs and the extension off. filename = strings.TrimSuffix(filepath.Base(arc), ".tgz") - maybeVersion := strings.TrimPrefix(filename, fmt.Sprintf("%s-", dep.Name)) + maybeVersion := strings.TrimPrefix(filename, dep.Name+"-") if _, err := semver.StrictNewVersion(maybeVersion); err == nil { // If the version parsed without an error, it is possibly a valid diff --git a/pkg/action/get_values.go b/pkg/action/get_values.go index 6475a140b..4a5e6f589 100644 --- a/pkg/action/get_values.go +++ b/pkg/action/get_values.go @@ -42,7 +42,7 @@ func NewGetValues(cfg *Configuration) *GetValues { } // Run executes 'helm get values' against the given release. -func (g *GetValues) Run(name string) (map[string]interface{}, error) { +func (g *GetValues) Run(name string) (map[string]any, error) { if err := g.cfg.KubeClient.IsReachable(); err != nil { return nil, err } diff --git a/pkg/action/get_values_test.go b/pkg/action/get_values_test.go index 69a95a2e4..c4cbdf2d5 100644 --- a/pkg/action/get_values_test.go +++ b/pkg/action/get_values_test.go @@ -37,7 +37,7 @@ func TestNewGetValues(t *testing.T) { assert.NotNil(t, client) assert.Equal(t, cfg, client.cfg) assert.Equal(t, 0, client.Version) - assert.Equal(t, false, client.AllValues) + assert.False(t, client.AllValues) } func TestGetValues_Run_UserConfigOnly(t *testing.T) { @@ -45,12 +45,12 @@ func TestGetValues_Run_UserConfigOnly(t *testing.T) { client := NewGetValues(cfg) releaseName := "test-release" - userConfig := map[string]interface{}{ - "database": map[string]interface{}{ + userConfig := map[string]any{ + "database": map[string]any{ "host": "localhost", "port": 5432, }, - "app": map[string]interface{}{ + "app": map[string]any{ "name": "my-app", "replicas": 3, }, @@ -66,9 +66,9 @@ func TestGetValues_Run_UserConfigOnly(t *testing.T) { Name: "test-chart", Version: "1.0.0", }, - Values: map[string]interface{}{ + Values: map[string]any{ "defaultKey": "defaultValue", - "app": map[string]interface{}{ + "app": map[string]any{ "name": "default-app", "timeout": 30, }, @@ -92,19 +92,19 @@ func TestGetValues_Run_AllValues(t *testing.T) { client.AllValues = true releaseName := "test-release" - userConfig := map[string]interface{}{ - "database": map[string]interface{}{ + userConfig := map[string]any{ + "database": map[string]any{ "host": "localhost", "port": 5432, }, - "app": map[string]interface{}{ + "app": map[string]any{ "name": "my-app", }, } - chartDefaultValues := map[string]interface{}{ + chartDefaultValues := map[string]any{ "defaultKey": "defaultValue", - "app": map[string]interface{}{ + "app": map[string]any{ "name": "default-app", "timeout": 30, }, @@ -132,11 +132,11 @@ func TestGetValues_Run_AllValues(t *testing.T) { result, err := client.Run(releaseName) require.NoError(t, err) - assert.Equal(t, "my-app", result["app"].(map[string]interface{})["name"]) - assert.Equal(t, 30, result["app"].(map[string]interface{})["timeout"]) + assert.Equal(t, "my-app", result["app"].(map[string]any)["name"]) + assert.Equal(t, 30, result["app"].(map[string]any)["timeout"]) assert.Equal(t, "defaultValue", result["defaultKey"]) - assert.Equal(t, "localhost", result["database"].(map[string]interface{})["host"]) - assert.Equal(t, 5432, result["database"].(map[string]interface{})["port"]) + assert.Equal(t, "localhost", result["database"].(map[string]any)["host"]) + assert.Equal(t, 5432, result["database"].(map[string]any)["port"]) } func TestGetValues_Run_EmptyValues(t *testing.T) { @@ -156,7 +156,7 @@ func TestGetValues_Run_EmptyValues(t *testing.T) { Version: "1.0.0", }, }, - Config: map[string]interface{}{}, + Config: map[string]any{}, Version: 1, Namespace: "default", } @@ -165,7 +165,7 @@ func TestGetValues_Run_EmptyValues(t *testing.T) { result, err := client.Run(releaseName) require.NoError(t, err) - assert.Equal(t, map[string]interface{}{}, result) + assert.Equal(t, map[string]any{}, result) } func TestGetValues_Run_UnreachableKubeClient(t *testing.T) { diff --git a/pkg/action/hooks.go b/pkg/action/hooks.go index 1e4fec9bd..a4a8da7a6 100644 --- a/pkg/action/hooks.go +++ b/pkg/action/hooks.go @@ -32,7 +32,34 @@ import ( ) // execHook executes all of the hooks for the given hook event. -func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, waitStrategy kube.WaitStrategy, timeout time.Duration, serverSideApply bool) error { +func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, + waitStrategy kube.WaitStrategy, waitOptions []kube.WaitOption, + timeout time.Duration, serverSideApply bool) error { + + shutdown, err := cfg.execHookWithDelayedShutdown(rl, hook, waitStrategy, waitOptions, timeout, serverSideApply) + if shutdown == nil { + return err + } + if err != nil { + if err := shutdown(); err != nil { + return err + } + return err + } + return shutdown() +} + +type ExecuteShutdownFunc = func() error + +func shutdownNoOp() error { + return nil +} + +// execHookWithDelayedShutdown executes all of the hooks for the given hook event and returns a shutdownHook function to trigger deletions after doing other things like e.g. retrieving logs. +func (cfg *Configuration) execHookWithDelayedShutdown(rl *release.Release, hook release.HookEvent, + waitStrategy kube.WaitStrategy, waitOptions []kube.WaitOption, timeout time.Duration, + serverSideApply bool) (ExecuteShutdownFunc, error) { + executingHooks := []*release.Hook{} for _, h := range rl.Hooks { @@ -50,13 +77,13 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, // Set default delete policy to before-hook-creation cfg.hookSetDeletePolicy(h) - if err := cfg.deleteHookByPolicy(h, release.HookBeforeHookCreation, waitStrategy, timeout); err != nil { - return err + if err := cfg.deleteHookByPolicy(h, release.HookBeforeHookCreation, waitStrategy, waitOptions, timeout); err != nil { + return shutdownNoOp, err } resources, err := cfg.KubeClient.Build(bytes.NewBufferString(h.Manifest), true) if err != nil { - return fmt.Errorf("unable to build kubernetes object for %s hook %s: %w", hook, h.Path, err) + return shutdownNoOp, fmt.Errorf("unable to build kubernetes object for %s hook %s: %w", hook, h.Path, err) } // Record the time at which the hook was applied to the cluster @@ -77,12 +104,17 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, kube.ClientCreateOptionServerSideApply(serverSideApply, false)); err != nil { h.LastRun.CompletedAt = time.Now() h.LastRun.Phase = release.HookPhaseFailed - return fmt.Errorf("warning: Hook %s %s failed: %w", hook, h.Path, err) + return shutdownNoOp, fmt.Errorf("warning: Hook %s %s failed: %w", hook, h.Path, err) } - waiter, err := cfg.KubeClient.GetWaiter(waitStrategy) + var waiter kube.Waiter + if c, supportsOptions := cfg.KubeClient.(kube.InterfaceWaitOptions); supportsOptions { + waiter, err = c.GetWaiterWithOptions(waitStrategy, waitOptions...) + } else { + waiter, err = cfg.KubeClient.GetWaiter(waitStrategy) + } if err != nil { - return fmt.Errorf("unable to get waiter: %w", err) + return shutdownNoOp, fmt.Errorf("unable to get waiter: %w", err) } // Watch hook resources until they have completed err = waiter.WatchUntilReady(resources, timeout) @@ -98,36 +130,38 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, } // If a hook is failed, check the annotation of the hook to determine whether the hook should be deleted // under failed condition. If so, then clear the corresponding resource object in the hook - if errDeleting := cfg.deleteHookByPolicy(h, release.HookFailed, waitStrategy, timeout); errDeleting != nil { - // We log the error here as we want to propagate the hook failure upwards to the release object. - log.Printf("error deleting the hook resource on hook failure: %v", errDeleting) - } - - // If a hook is failed, check the annotation of the previous successful hooks to determine whether the hooks - // should be deleted under succeeded condition. - if err := cfg.deleteHooksByPolicy(executingHooks[0:i], release.HookSucceeded, waitStrategy, timeout); err != nil { + return func() error { + if errDeleting := cfg.deleteHookByPolicy(h, release.HookFailed, waitStrategy, waitOptions, timeout); errDeleting != nil { + // We log the error here as we want to propagate the hook failure upwards to the release object. + log.Printf("error deleting the hook resource on hook failure: %v", errDeleting) + } + + // If a hook is failed, check the annotation of the previous successful hooks to determine whether the hooks + // should be deleted under succeeded condition. + if err := cfg.deleteHooksByPolicy(executingHooks[0:i], release.HookSucceeded, waitStrategy, waitOptions, timeout); err != nil { + return err + } return err - } - - return err + }, err } h.LastRun.Phase = release.HookPhaseSucceeded } - // If all hooks are successful, check the annotation of each hook to determine whether the hook should be deleted - // or output should be logged under succeeded condition. If so, then clear the corresponding resource object in each hook - for i := len(executingHooks) - 1; i >= 0; i-- { - h := executingHooks[i] - if err := cfg.outputLogsByPolicy(h, rl.Namespace, release.HookOutputOnSucceeded); err != nil { - // We log here as we still want to attempt hook resource deletion even if output logging fails. - log.Printf("error outputting logs for hook failure: %v", err) - } - if err := cfg.deleteHookByPolicy(h, release.HookSucceeded, waitStrategy, timeout); err != nil { - return err + return func() error { + // If all hooks are successful, check the annotation of each hook to determine whether the hook should be deleted + // or output should be logged under succeeded condition. If so, then clear the corresponding resource object in each hook + for i := len(executingHooks) - 1; i >= 0; i-- { + h := executingHooks[i] + if err := cfg.outputLogsByPolicy(h, rl.Namespace, release.HookOutputOnSucceeded); err != nil { + // We log here as we still want to attempt hook resource deletion even if output logging fails. + log.Printf("error outputting logs for hook failure: %v", err) + } + if err := cfg.deleteHookByPolicy(h, release.HookSucceeded, waitStrategy, waitOptions, timeout); err != nil { + return err + } } - } - - return nil + return nil + }, nil } // hookByWeight is a sorter for hooks @@ -143,7 +177,9 @@ func (x hookByWeight) Less(i, j int) bool { } // deleteHookByPolicy deletes a hook if the hook policy instructs it to -func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.HookDeletePolicy, waitStrategy kube.WaitStrategy, timeout time.Duration) error { +func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.HookDeletePolicy, + waitStrategy kube.WaitStrategy, waitOptions []kube.WaitOption, timeout time.Duration) error { + // Never delete CustomResourceDefinitions; this could cause lots of // cascading garbage collection. if h.Kind == "CustomResourceDefinition" { @@ -159,7 +195,12 @@ func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.Hoo return joinErrors(errs, "; ") } - waiter, err := cfg.KubeClient.GetWaiter(waitStrategy) + var waiter kube.Waiter + if c, supportsOptions := cfg.KubeClient.(kube.InterfaceWaitOptions); supportsOptions { + waiter, err = c.GetWaiterWithOptions(waitStrategy, waitOptions...) + } else { + waiter, err = cfg.KubeClient.GetWaiter(waitStrategy) + } if err != nil { return err } @@ -171,9 +212,11 @@ func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.Hoo } // deleteHooksByPolicy deletes all hooks if the hook policy instructs it to -func (cfg *Configuration) deleteHooksByPolicy(hooks []*release.Hook, policy release.HookDeletePolicy, waitStrategy kube.WaitStrategy, timeout time.Duration) error { +func (cfg *Configuration) deleteHooksByPolicy(hooks []*release.Hook, policy release.HookDeletePolicy, + waitStrategy kube.WaitStrategy, waitOptions []kube.WaitOption, timeout time.Duration) error { + for _, h := range hooks { - if err := cfg.deleteHookByPolicy(h, policy, waitStrategy, timeout); err != nil { + if err := cfg.deleteHookByPolicy(h, policy, waitStrategy, waitOptions, timeout); err != nil { return err } } @@ -214,9 +257,9 @@ func (cfg *Configuration) outputLogsByPolicy(h *release.Hook, releaseNamespace s } switch h.Kind { case "Job": - return cfg.outputContainerLogsForListOptions(namespace, metav1.ListOptions{LabelSelector: fmt.Sprintf("job-name=%s", h.Name)}) + return cfg.outputContainerLogsForListOptions(namespace, metav1.ListOptions{LabelSelector: "job-name=" + h.Name}) case "Pod": - return cfg.outputContainerLogsForListOptions(namespace, metav1.ListOptions{FieldSelector: fmt.Sprintf("metadata.name=%s", h.Name)}) + return cfg.outputContainerLogsForListOptions(namespace, metav1.ListOptions{FieldSelector: "metadata.name=" + h.Name}) default: return nil } diff --git a/pkg/action/hooks_test.go b/pkg/action/hooks_test.go index 02b70dda1..b017b1f11 100644 --- a/pkg/action/hooks_test.go +++ b/pkg/action/hooks_test.go @@ -18,6 +18,8 @@ package action import ( "bytes" + "context" + "errors" "fmt" "io" "reflect" @@ -172,7 +174,7 @@ func runInstallForHooksWithSuccess(t *testing.T, manifest, expectedNamespace str t.Helper() var expectedOutput string if shouldOutput { - expectedOutput = fmt.Sprintf("attempted to output logs for namespace: %s", expectedNamespace) + expectedOutput = "attempted to output logs for namespace: " + expectedNamespace } is := assert.New(t) instAction := installAction(t) @@ -185,7 +187,7 @@ func runInstallForHooksWithSuccess(t *testing.T, manifest, expectedNamespace str {Name: "templates/hello", ModTime: modTime, Data: []byte("hello: world")}, {Name: "templates/hooks", ModTime: modTime, Data: []byte(manifest)}, } - vals := map[string]interface{}{} + vals := map[string]any{} resi, err := instAction.Run(buildChartWithTemplates(templates), vals) is.NoError(err) @@ -199,13 +201,13 @@ func runInstallForHooksWithFailure(t *testing.T, manifest, expectedNamespace str t.Helper() var expectedOutput string if shouldOutput { - expectedOutput = fmt.Sprintf("attempted to output logs for namespace: %s", expectedNamespace) + expectedOutput = "attempted to output logs for namespace: " + expectedNamespace } is := assert.New(t) instAction := installAction(t) instAction.ReleaseName = "failed-hooks" failingClient := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient) - failingClient.WatchUntilReadyError = fmt.Errorf("failed watch") + failingClient.WatchUntilReadyError = errors.New("failed watch") instAction.cfg.KubeClient = failingClient outBuffer := &bytes.Buffer{} failingClient.PrintingKubeClient = kubefake.PrintingKubeClient{Out: io.Discard, LogOutput: outBuffer} @@ -215,7 +217,7 @@ func runInstallForHooksWithFailure(t *testing.T, manifest, expectedNamespace str {Name: "templates/hello", ModTime: modTime, Data: []byte("hello: world")}, {Name: "templates/hooks", ModTime: modTime, Data: []byte(manifest)}, } - vals := map[string]interface{}{} + vals := map[string]any{} resi, err := instAction.Run(buildChartWithTemplates(templates), vals) is.Error(err) @@ -278,8 +280,8 @@ func (h *HookFailingKubeClient) Delete(resources kube.ResourceList, deletionProp return h.PrintingKubeClient.Delete(resources, deletionPropagation) } -func (h *HookFailingKubeClient) GetWaiter(strategy kube.WaitStrategy) (kube.Waiter, error) { - waiter, _ := h.PrintingKubeClient.GetWaiter(strategy) +func (h *HookFailingKubeClient) GetWaiterWithOptions(strategy kube.WaitStrategy, opts ...kube.WaitOption) (kube.Waiter, error) { + waiter, _ := h.PrintingKubeClient.GetWaiterWithOptions(strategy, opts...) return &HookFailingKubeWaiter{ PrintingKubeWaiter: waiter.(*kubefake.PrintingKubeWaiter), failOn: h.failOn, @@ -394,18 +396,18 @@ data: } serverSideApply := true - err := configuration.execHook(&tc.inputRelease, hookEvent, kube.StatusWatcherStrategy, 600, serverSideApply) + err := configuration.execHook(&tc.inputRelease, hookEvent, kube.StatusWatcherStrategy, nil, 600, serverSideApply) if !reflect.DeepEqual(kubeClient.deleteRecord, tc.expectedDeleteRecord) { t.Fatalf("Got unexpected delete record, expected: %#v, but got: %#v", kubeClient.deleteRecord, tc.expectedDeleteRecord) } if err != nil && !tc.expectError { - t.Fatalf("Got an unexpected error.") + t.Fatal("Got an unexpected error.") } if err == nil && tc.expectError { - t.Fatalf("Expected and error but did not get it.") + t.Fatal("Expected and error but did not get it.") } }) } @@ -442,3 +444,51 @@ func TestConfiguration_hookSetDeletePolicy(t *testing.T) { }) } } + +func TestExecHook_WaitOptionsPassedDownstream(t *testing.T) { + is := assert.New(t) + + failer := &kubefake.FailingKubeClient{ + PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, + } + + configuration := &Configuration{ + Releases: storage.Init(driver.NewMemory()), + KubeClient: failer, + Capabilities: common.DefaultCapabilities, + } + + rel := &release.Release{ + Name: "test-release", + Namespace: "test", + Hooks: []*release.Hook{ + { + Name: "test-hook", + Kind: "ConfigMap", + Path: "templates/hook.yaml", + Manifest: `apiVersion: v1 +kind: ConfigMap +metadata: + name: test-hook + namespace: test +data: + foo: bar +`, + Weight: 0, + Events: []release.HookEvent{ + release.HookPreInstall, + }, + }, + }, + } + + // Use WithWaitContext as a marker WaitOption that we can track + ctx := context.Background() + waitOptions := []kube.WaitOption{kube.WithWaitContext(ctx)} + + err := configuration.execHook(rel, release.HookPreInstall, kube.StatusWatcherStrategy, waitOptions, 600, false) + is.NoError(err) + + // Verify that WaitOptions were passed to GetWaiter + is.NotEmpty(failer.RecordedWaitOptions, "WaitOptions should be passed to GetWaiter") +} diff --git a/pkg/action/install.go b/pkg/action/install.go index b379d6873..580b8a0cb 100644 --- a/pkg/action/install.go +++ b/pkg/action/install.go @@ -95,6 +95,7 @@ type Install struct { DisableHooks bool Replace bool WaitStrategy kube.WaitStrategy + WaitOptions []kube.WaitOption WaitForJobs bool Devel bool DependencyUpdate bool @@ -129,6 +130,10 @@ type Install struct { // TakeOwnership will ignore the check for helm annotations and take ownership of the resources. TakeOwnership bool PostRenderer postrenderer.PostRenderer + // PostRenderStrategy controls how hooks and regular templates are passed + // to the configured post-renderer. See PostRenderStrategy for the + // available modes. Defaults to PostRenderStrategyCombined. + PostRenderStrategy PostRenderStrategy // Lock to control raceconditions when the process receives a SIGTERM Lock sync.Mutex goroutineCount atomic.Int32 @@ -157,9 +162,10 @@ type ChartPathOptions struct { // NewInstall creates a new Install object with the given configuration. func NewInstall(cfg *Configuration) *Install { in := &Install{ - cfg: cfg, - ServerSideApply: true, - DryRunStrategy: DryRunNone, + cfg: cfg, + ServerSideApply: true, // Must always match the CLI default. + DryRunStrategy: DryRunNone, + PostRenderStrategy: PostRenderStrategyCombined, } in.registryClient = cfg.RegistryClient @@ -180,12 +186,24 @@ func (i *Install) installCRDs(crds []chart.CRD) error { // We do these one file at a time in the order they were read. totalItems := []*resource.Info{} for _, obj := range crds { + if obj.File == nil { + return fmt.Errorf("failed to install CRD %s: file is empty", obj.Name) + } + + if obj.File.Data == nil { + return fmt.Errorf("failed to install CRD %s: file data is empty", obj.Name) + } + // Read in the resources res, err := i.cfg.KubeClient.Build(bytes.NewBuffer(obj.File.Data), false) if err != nil { return fmt.Errorf("failed to install CRD %s: %w", obj.Name, err) } + if len(res) == 0 { + return fmt.Errorf("failed to install CRD %s: resources are empty", obj.Name) + } + // Send them to Kube if _, err := i.cfg.KubeClient.Create( res, @@ -201,7 +219,13 @@ func (i *Install) installCRDs(crds []chart.CRD) error { totalItems = append(totalItems, res...) } if len(totalItems) > 0 { - waiter, err := i.cfg.KubeClient.GetWaiter(i.WaitStrategy) + var waiter kube.Waiter + var err error + if c, supportsOptions := i.cfg.KubeClient.(kube.InterfaceWaitOptions); supportsOptions { + waiter, err = c.GetWaiterWithOptions(i.WaitStrategy, i.WaitOptions...) + } else { + waiter, err = i.cfg.KubeClient.GetWaiter(i.WaitStrategy) + } if err != nil { return fmt.Errorf("unable to get waiter: %w", err) } @@ -215,27 +239,30 @@ func (i *Install) installCRDs(crds []chart.CRD) error { // the case when an action configuration is reused for multiple actions, // as otherwise it is later loaded by ourselves when getCapabilities // is called later on in the installation process. - if i.cfg.Capabilities != nil { - discoveryClient, err := i.cfg.RESTClientGetter.ToDiscoveryClient() + if i.cfg.RESTClientGetter != nil { + if i.cfg.Capabilities != nil { + discoveryClient, err := i.cfg.RESTClientGetter.ToDiscoveryClient() + if err != nil { + return err + } + + if discoveryClient != nil { + i.cfg.Logger().Debug("clearing discovery cache") + discoveryClient.Invalidate() + _, _ = discoveryClient.ServerGroups() + } + } + + // Invalidate the REST mapper, since it will not have the new CRDs + // present. + restMapper, err := i.cfg.RESTClientGetter.ToRESTMapper() if err != nil { return err } - - i.cfg.Logger().Debug("clearing discovery cache") - discoveryClient.Invalidate() - - _, _ = discoveryClient.ServerGroups() - } - - // Invalidate the REST mapper, since it will not have the new CRDs - // present. - restMapper, err := i.cfg.RESTClientGetter.ToRESTMapper() - if err != nil { - return err - } - if resettable, ok := restMapper.(meta.ResettableRESTMapper); ok { - i.cfg.Logger().Debug("clearing REST mapper cache") - resettable.Reset() + if resettable, ok := restMapper.(meta.ResettableRESTMapper); ok { + i.cfg.Logger().Debug("clearing REST mapper cache") + resettable.Reset() + } } } return nil @@ -245,7 +272,7 @@ func (i *Install) installCRDs(crds []chart.CRD) error { // // If DryRun is set to true, this will prepare the release, but not install it -func (i *Install) Run(chrt ci.Charter, vals map[string]interface{}) (ri.Releaser, error) { +func (i *Install) Run(chrt ci.Charter, vals map[string]any) (ri.Releaser, error) { ctx := context.Background() return i.RunWithContext(ctx, chrt, vals) } @@ -254,7 +281,7 @@ func (i *Install) Run(chrt ci.Charter, vals map[string]interface{}) (ri.Releaser // // When the task is cancelled through ctx, the function returns and the install // proceeds in the background. -func (i *Install) RunWithContext(ctx context.Context, ch ci.Charter, vals map[string]interface{}) (ri.Releaser, error) { +func (i *Install) RunWithContext(ctx context.Context, ch ci.Charter, vals map[string]any) (ri.Releaser, error) { var chrt *chart.Chart switch c := ch.(type) { case *chart.Chart: @@ -348,14 +375,14 @@ func (i *Install) RunWithContext(ctx context.Context, ch ci.Charter, vals map[st rel := i.createRelease(chrt, vals, i.Labels) var manifestDoc *bytes.Buffer - rel.Hooks, manifestDoc, rel.Info.Notes, err = i.cfg.renderResources(chrt, valuesToRender, i.ReleaseName, i.OutputDir, i.SubNotes, i.UseReleaseName, i.IncludeCRDs, i.PostRenderer, interactWithServer(i.DryRunStrategy), i.EnableDNS, i.HideSecret) + rel.Hooks, manifestDoc, rel.Info.Notes, err = i.cfg.renderResources(chrt, valuesToRender, i.ReleaseName, i.OutputDir, i.SubNotes, i.UseReleaseName, i.IncludeCRDs, i.PostRenderer, interactWithServer(i.DryRunStrategy), i.EnableDNS, i.HideSecret, i.PostRenderStrategy) // Even for errors, attach this if available if manifestDoc != nil { rel.Manifest = manifestDoc.String() } // Check error from render if err != nil { - rel.SetStatus(rcommon.StatusFailed, fmt.Sprintf("failed to render resource: %s", err.Error())) + rel.SetStatus(rcommon.StatusFailed, "failed to render resource: "+err.Error()) // Return a release with partial data so that the client can show debugging information. return rel, err } @@ -480,8 +507,8 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource var err error // pre-install hooks if !i.DisableHooks { - if err := i.cfg.execHook(rel, release.HookPreInstall, i.WaitStrategy, i.Timeout, i.ServerSideApply); err != nil { - return rel, fmt.Errorf("failed pre-install: %s", err) + if err := i.cfg.execHook(rel, release.HookPreInstall, i.WaitStrategy, i.WaitOptions, i.Timeout, i.ServerSideApply); err != nil { + return rel, fmt.Errorf("failed pre-install: %w", err) } } @@ -506,7 +533,12 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource return rel, err } - waiter, err := i.cfg.KubeClient.GetWaiter(i.WaitStrategy) + var waiter kube.Waiter + if c, supportsOptions := i.cfg.KubeClient.(kube.InterfaceWaitOptions); supportsOptions { + waiter, err = c.GetWaiterWithOptions(i.WaitStrategy, i.WaitOptions...) + } else { + waiter, err = i.cfg.KubeClient.GetWaiter(i.WaitStrategy) + } if err != nil { return rel, fmt.Errorf("failed to get waiter: %w", err) } @@ -521,8 +553,8 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource } if !i.DisableHooks { - if err := i.cfg.execHook(rel, release.HookPostInstall, i.WaitStrategy, i.Timeout, i.ServerSideApply); err != nil { - return rel, fmt.Errorf("failed post-install: %s", err) + if err := i.cfg.execHook(rel, release.HookPostInstall, i.WaitStrategy, i.WaitOptions, i.Timeout, i.ServerSideApply); err != nil { + return rel, fmt.Errorf("failed post-install: %w", err) } } @@ -555,6 +587,7 @@ func (i *Install) failRelease(rel *release.Release, err error) (*release.Release uninstall.KeepHistory = false uninstall.Timeout = i.Timeout uninstall.WaitStrategy = i.WaitStrategy + uninstall.WaitOptions = i.WaitOptions if _, uninstallErr := uninstall.Run(i.ReleaseName); uninstallErr != nil { return rel, fmt.Errorf("an error occurred while uninstalling the release. original install error: %w: %w", err, uninstallErr) } @@ -625,7 +658,7 @@ func releaseV1ListToReleaserList(ls []*release.Release) ([]ri.Releaser, error) { } // createRelease creates a new release object -func (i *Install) createRelease(chrt *chart.Chart, rawVals map[string]interface{}, labels map[string]string) *release.Release { +func (i *Install) createRelease(chrt *chart.Chart, rawVals map[string]any, labels map[string]string) *release.Release { ts := i.cfg.Now() r := &release.Release{ diff --git a/pkg/action/install_test.go b/pkg/action/install_test.go index 9a0ca8d22..05ca9a75e 100644 --- a/pkg/action/install_test.go +++ b/pkg/action/install_test.go @@ -111,6 +111,54 @@ func createDummyResourceList(owned bool) kube.ResourceList { return resourceList } +func createDummyCRDList(owned bool) kube.ResourceList { + obj := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummyName", + Namespace: "spaced", + }, + } + + if owned { + obj.Labels = map[string]string{ + "app.kubernetes.io/managed-by": "Helm", + } + obj.Annotations = map[string]string{ + "meta.helm.sh/release-name": "test-install-release", + "meta.helm.sh/release-namespace": "spaced", + } + } + + resInfo := resource.Info{ + Name: "dummyName", + Namespace: "spaced", + Mapping: &meta.RESTMapping{ + Resource: schema.GroupVersionResource{Group: "test", Version: "v1", Resource: "crd"}, + GroupVersionKind: schema.GroupVersionKind{Group: "test", Version: "v1", Kind: "crd"}, + Scope: meta.RESTScopeNamespace, + }, + Object: obj, + } + body := io.NopCloser(bytes.NewReader([]byte(kuberuntime.EncodeOrDie(appsv1Codec, obj)))) + + resInfo.Client = &fake.RESTClient{ + GroupVersion: schema.GroupVersion{Group: "test", Version: "v1"}, + NegotiatedSerializer: scheme.Codecs.WithoutConversion(), + Client: fake.CreateHTTPClient(func(_ *http.Request) (*http.Response, error) { + header := http.Header{} + header.Set("Content-Type", kuberuntime.ContentTypeJSON) + return &http.Response{ + StatusCode: http.StatusOK, + Header: header, + Body: body, + }, nil + }), + } + var resourceList kube.ResourceList + resourceList.Append(&resInfo) + return resourceList +} + func installActionWithConfig(config *Configuration) *Install { instAction := NewInstall(config) instAction.Namespace = "spaced" @@ -134,7 +182,7 @@ func TestInstallRelease(t *testing.T) { req := require.New(t) instAction := installAction(t) - vals := map[string]interface{}{} + vals := map[string]any{} ctx, done := context.WithCancel(t.Context()) resi, err := instAction.RunWithContext(ctx, buildChart(), vals) if err != nil { @@ -240,13 +288,13 @@ func TestInstallReleaseWithTakeOwnership_ResourceOwnedNoFlag(t *testing.T) { func TestInstallReleaseWithValues(t *testing.T) { is := assert.New(t) instAction := installAction(t) - userVals := map[string]interface{}{ - "nestedKey": map[string]interface{}{ + userVals := map[string]any{ + "nestedKey": map[string]any{ "simpleKey": "simpleValue", }, } - expectedUserValues := map[string]interface{}{ - "nestedKey": map[string]interface{}{ + expectedUserValues := map[string]any{ + "nestedKey": map[string]any{ "simpleKey": "simpleValue", }, } @@ -280,7 +328,7 @@ func TestInstallReleaseWithValues(t *testing.T) { func TestInstallRelease_NoName(t *testing.T) { instAction := installAction(t) instAction.ReleaseName = "" - vals := map[string]interface{}{} + vals := map[string]any{} _, err := instAction.Run(buildChart(), vals) if err == nil { t.Fatal("expected failure when no name is specified") @@ -292,7 +340,7 @@ func TestInstallRelease_WithNotes(t *testing.T) { is := assert.New(t) instAction := installAction(t) instAction.ReleaseName = "with-notes" - vals := map[string]interface{}{} + vals := map[string]any{} resi, err := instAction.Run(buildChart(withNotes("note here")), vals) if err != nil { t.Fatalf("Failed install: %s", err) @@ -323,7 +371,7 @@ func TestInstallRelease_WithNotesRendered(t *testing.T) { is := assert.New(t) instAction := installAction(t) instAction.ReleaseName = "with-notes" - vals := map[string]interface{}{} + vals := map[string]any{} resi, err := instAction.Run(buildChart(withNotes("got-{{.Release.Name}}")), vals) if err != nil { t.Fatalf("Failed install: %s", err) @@ -336,7 +384,7 @@ func TestInstallRelease_WithNotesRendered(t *testing.T) { rel, err := releaserToV1Release(r) is.NoError(err) - expectedNotes := fmt.Sprintf("got-%s", res.Name) + expectedNotes := "got-" + res.Name is.Equal(expectedNotes, rel.Info.Notes) is.Equal(rel.Info.Description, "Install complete") } @@ -346,7 +394,7 @@ func TestInstallRelease_WithChartAndDependencyParentNotes(t *testing.T) { is := assert.New(t) instAction := installAction(t) instAction.ReleaseName = "with-notes" - vals := map[string]interface{}{} + vals := map[string]any{} resi, err := instAction.Run(buildChart(withNotes("parent"), withDependency(withNotes("child"))), vals) if err != nil { t.Fatalf("Failed install: %s", err) @@ -369,7 +417,7 @@ func TestInstallRelease_WithChartAndDependencyAllNotes(t *testing.T) { instAction := installAction(t) instAction.ReleaseName = "with-notes" instAction.SubNotes = true - vals := map[string]interface{}{} + vals := map[string]any{} resi, err := instAction.Run(buildChart(withNotes("parent"), withDependency(withNotes("child"))), vals) if err != nil { t.Fatalf("Failed install: %s", err) @@ -395,7 +443,7 @@ func TestInstallRelease_DryRunClient(t *testing.T) { instAction := installAction(t) instAction.DryRunStrategy = dryRunStrategy - vals := map[string]interface{}{} + vals := map[string]any{} resi, err := instAction.Run(buildChart(withSampleTemplates()), vals) if err != nil { t.Fatalf("Failed install: %s", err) @@ -423,7 +471,7 @@ func TestInstallRelease_DryRunHiddenSecret(t *testing.T) { // First perform a normal dry-run with the secret and confirm its presence. instAction.DryRunStrategy = DryRunClient - vals := map[string]interface{}{} + vals := map[string]any{} resi, err := instAction.Run(buildChart(withSampleSecret(), withSampleTemplates()), vals) if err != nil { t.Fatalf("Failed install: %s", err) @@ -438,7 +486,7 @@ func TestInstallRelease_DryRunHiddenSecret(t *testing.T) { // Perform a dry-run where the secret should not be present instAction.HideSecret = true - vals = map[string]interface{}{} + vals = map[string]any{} res2i, err := instAction.Run(buildChart(withSampleSecret(), withSampleTemplates()), vals) if err != nil { t.Fatalf("Failed install: %s", err) @@ -454,10 +502,10 @@ func TestInstallRelease_DryRunHiddenSecret(t *testing.T) { // Ensure there is an error when HideSecret True but not in a dry-run mode instAction.DryRunStrategy = DryRunNone - vals = map[string]interface{}{} + vals = map[string]any{} _, err = instAction.Run(buildChart(withSampleSecret(), withSampleTemplates()), vals) if err == nil { - t.Fatalf("Did not get expected an error when dry-run false and hide secret is true") + t.Fatal("Did not get the expected error when dry-run is false and hide secret is true") } } @@ -466,7 +514,7 @@ func TestInstallRelease_DryRun_Lookup(t *testing.T) { is := assert.New(t) instAction := installAction(t) instAction.DryRunStrategy = DryRunNone - vals := map[string]interface{}{} + vals := map[string]any{} mockChart := buildChart(withSampleTemplates()) mockChart.Templates = append(mockChart.Templates, &common.File{ @@ -489,7 +537,7 @@ func TestInstallReleaseIncorrectTemplate_DryRun(t *testing.T) { is := assert.New(t) instAction := installAction(t) instAction.DryRunStrategy = DryRunNone - vals := map[string]interface{}{} + vals := map[string]any{} _, err := instAction.Run(buildChart(withSampleIncludingIncorrectTemplates()), vals) expectedErr := `hello/templates/incorrect:1:10 executing "hello/templates/incorrect" at <.Values.bad.doh>: @@ -507,7 +555,7 @@ func TestInstallRelease_NoHooks(t *testing.T) { instAction.ReleaseName = "no-hooks" require.NoError(t, instAction.cfg.Releases.Create(releaseStub())) - vals := map[string]interface{}{} + vals := map[string]any{} resi, err := instAction.Run(buildChart(), vals) if err != nil { t.Fatalf("Failed install: %s", err) @@ -523,12 +571,12 @@ func TestInstallRelease_FailedHooks(t *testing.T) { instAction := installAction(t) instAction.ReleaseName = "failed-hooks" failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient) - failer.WatchUntilReadyError = fmt.Errorf("Failed watch") + failer.WatchUntilReadyError = errors.New("Failed watch") instAction.cfg.KubeClient = failer outBuffer := &bytes.Buffer{} failer.PrintingKubeClient = kubefake.PrintingKubeClient{Out: io.Discard, LogOutput: outBuffer} - vals := map[string]interface{}{} + vals := map[string]any{} resi, err := instAction.Run(buildChart(), vals) is.Error(err) res, err := releaserToV1Release(resi) @@ -548,7 +596,7 @@ func TestInstallRelease_ReplaceRelease(t *testing.T) { require.NoError(t, instAction.cfg.Releases.Create(rel)) instAction.ReleaseName = rel.Name - vals := map[string]interface{}{} + vals := map[string]any{} resi, err := instAction.Run(buildChart(), vals) is.NoError(err) res, err := releaserToV1Release(resi) @@ -568,13 +616,13 @@ func TestInstallRelease_ReplaceRelease(t *testing.T) { func TestInstallRelease_KubeVersion(t *testing.T) { is := assert.New(t) instAction := installAction(t) - vals := map[string]interface{}{} + vals := map[string]any{} _, err := instAction.Run(buildChart(withKube(">=0.0.0")), vals) is.NoError(err) // This should fail for a few hundred years instAction.ReleaseName = "should-fail" - vals = map[string]interface{}{} + vals = map[string]any{} _, err = instAction.Run(buildChart(withKube(">=99.0.0")), vals) is.Error(err) is.Contains(err.Error(), "chart requires kubeVersion: >=99.0.0 which is incompatible with Kubernetes v1.20.") @@ -585,10 +633,10 @@ func TestInstallRelease_Wait(t *testing.T) { instAction := installAction(t) instAction.ReleaseName = "come-fail-away" failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient) - failer.WaitError = fmt.Errorf("I timed out") + failer.WaitError = errors.New("I timed out") instAction.cfg.KubeClient = failer instAction.WaitStrategy = kube.StatusWatcherStrategy - vals := map[string]interface{}{} + vals := map[string]any{} goroutines := instAction.getGoroutineCount() @@ -609,7 +657,7 @@ func TestInstallRelease_Wait_Interrupted(t *testing.T) { failer.WaitDuration = 10 * time.Second instAction.cfg.KubeClient = failer instAction.WaitStrategy = kube.StatusWatcherStrategy - vals := map[string]interface{}{} + vals := map[string]any{} ctx, cancel := context.WithCancel(t.Context()) time.AfterFunc(time.Second, cancel) @@ -629,11 +677,11 @@ func TestInstallRelease_WaitForJobs(t *testing.T) { instAction := installAction(t) instAction.ReleaseName = "come-fail-away" failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient) - failer.WaitError = fmt.Errorf("I timed out") + failer.WaitError = errors.New("I timed out") instAction.cfg.KubeClient = failer instAction.WaitStrategy = kube.StatusWatcherStrategy instAction.WaitForJobs = true - vals := map[string]interface{}{} + vals := map[string]any{} resi, err := instAction.Run(buildChart(), vals) is.Error(err) @@ -650,13 +698,13 @@ func TestInstallRelease_RollbackOnFailure(t *testing.T) { instAction := installAction(t) instAction.ReleaseName = "come-fail-away" failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient) - failer.WaitError = fmt.Errorf("I timed out") + failer.WaitError = errors.New("I timed out") instAction.cfg.KubeClient = failer instAction.RollbackOnFailure = true // disabling hooks to avoid an early fail when // WaitForDelete is called on the pre-delete hook execution instAction.DisableHooks = true - vals := map[string]interface{}{} + vals := map[string]any{} resi, err := instAction.Run(buildChart(), vals) is.Error(err) @@ -675,11 +723,11 @@ func TestInstallRelease_RollbackOnFailure(t *testing.T) { instAction := installAction(t) instAction.ReleaseName = "come-fail-away-with-me" failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient) - failer.WaitError = fmt.Errorf("I timed out") - failer.DeleteError = fmt.Errorf("uninstall fail") + failer.WaitError = errors.New("I timed out") + failer.DeleteError = errors.New("uninstall fail") instAction.cfg.KubeClient = failer instAction.RollbackOnFailure = true - vals := map[string]interface{}{} + vals := map[string]any{} _, err := instAction.Run(buildChart(), vals) is.Error(err) @@ -697,7 +745,7 @@ func TestInstallRelease_RollbackOnFailure_Interrupted(t *testing.T) { failer.WaitDuration = 10 * time.Second instAction.cfg.KubeClient = failer instAction.RollbackOnFailure = true - vals := map[string]interface{}{} + vals := map[string]any{} ctx, cancel := context.WithCancel(t.Context()) time.AfterFunc(time.Second, cancel) @@ -793,7 +841,7 @@ func TestNameTemplate(t *testing.T) { func TestInstallReleaseOutputDir(t *testing.T) { is := assert.New(t) instAction := installAction(t) - vals := map[string]interface{}{} + vals := map[string]any{} dir := t.TempDir() @@ -825,7 +873,7 @@ func TestInstallReleaseOutputDir(t *testing.T) { func TestInstallOutputDirWithReleaseName(t *testing.T) { is := assert.New(t) instAction := installAction(t) - vals := map[string]interface{}{} + vals := map[string]any{} dir := t.TempDir() @@ -1084,8 +1132,8 @@ func TestInstallSetRegistryClient(t *testing.T) { assert.Equal(t, registryClient, instAction.GetRegistryClient()) } -func TestInstalLCRDs(t *testing.T) { - config := actionConfigFixture(t) +func TestInstallCRDs(t *testing.T) { + config := actionConfigFixtureWithDummyResources(t, createDummyCRDList(false)) instAction := NewInstall(config) mockFile := common.File{ @@ -1094,16 +1142,22 @@ func TestInstalLCRDs(t *testing.T) { } mockChart := buildChart(withFile(mockFile)) crdsToInstall := mockChart.CRDObjects() + assert.Len(t, crdsToInstall, 1) assert.Equal(t, crdsToInstall[0].File.Data, mockFile.Data) - require.NoError(t, instAction.installCRDs(crdsToInstall)) } -func TestInstalLCRDs_KubeClient_BuildError(t *testing.T) { - config := actionConfigFixture(t) - failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil} - failingKubeClient.BuildError = errors.New("build error") +func TestInstallCRDs_AlreadyExist(t *testing.T) { + dummyResources := createDummyCRDList(false) + failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: dummyResources} + mockError := &apierrors.StatusError{ErrStatus: metav1.Status{ + Status: metav1.StatusFailure, + Reason: metav1.StatusReasonAlreadyExists, + }} + failingKubeClient.CreateError = mockError + + config := actionConfigFixtureWithDummyResources(t, dummyResources) config.KubeClient = &failingKubeClient instAction := NewInstall(config) @@ -1114,13 +1168,13 @@ func TestInstalLCRDs_KubeClient_BuildError(t *testing.T) { mockChart := buildChart(withFile(mockFile)) crdsToInstall := mockChart.CRDObjects() - require.Error(t, instAction.installCRDs(crdsToInstall), "failed to install CRD") + assert.Nil(t, instAction.installCRDs(crdsToInstall)) } -func TestInstalLCRDs_KubeClient_CreateError(t *testing.T) { +func TestInstallCRDs_KubeClient_BuildError(t *testing.T) { config := actionConfigFixture(t) failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil} - failingKubeClient.CreateError = errors.New("create error") + failingKubeClient.BuildError = errors.New("build error") config.KubeClient = &failingKubeClient instAction := NewInstall(config) @@ -1134,14 +1188,10 @@ func TestInstalLCRDs_KubeClient_CreateError(t *testing.T) { require.Error(t, instAction.installCRDs(crdsToInstall), "failed to install CRD") } -func TestInstalLCRDs_AlreadyExist(t *testing.T) { +func TestInstallCRDs_KubeClient_CreateError(t *testing.T) { config := actionConfigFixture(t) failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil} - mockError := &apierrors.StatusError{ErrStatus: metav1.Status{ - Status: metav1.StatusFailure, - Reason: metav1.StatusReasonAlreadyExists, - }} - failingKubeClient.CreateError = mockError + failingKubeClient.CreateError = errors.New("create error") config.KubeClient = &failingKubeClient instAction := NewInstall(config) @@ -1152,10 +1202,10 @@ func TestInstalLCRDs_AlreadyExist(t *testing.T) { mockChart := buildChart(withFile(mockFile)) crdsToInstall := mockChart.CRDObjects() - assert.Nil(t, instAction.installCRDs(crdsToInstall)) + require.Error(t, instAction.installCRDs(crdsToInstall), "failed to install CRD") } -func TestInstalLCRDs_WaiterError(t *testing.T) { +func TestInstallCRDs_WaiterError(t *testing.T) { config := actionConfigFixture(t) failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil} failingKubeClient.WaitError = errors.New("wait error") @@ -1186,3 +1236,64 @@ func TestCheckDependencies_MissingDependency(t *testing.T) { assert.ErrorContains(t, CheckDependencies(mockChart, []ci.Dependency{&dependency}), "missing in charts") } + +func TestInstallCRDs_CheckNilErrors(t *testing.T) { + tests := []struct { + name string + input []chart.CRD + }{ + { + name: "only one crd with file nil", + input: []chart.CRD{ + {Name: "one", File: nil}, + }, + }, + { + name: "only one crd with its file data nil", + input: []chart.CRD{ + {Name: "one", File: &common.File{Name: "crds/foo.yaml", Data: nil}}, + }, + }, + { + name: "at least a crd with its file data nil", + input: []chart.CRD{ + {Name: "one", File: &common.File{Name: "crds/foo.yaml", Data: []byte("data")}}, + {Name: "two", File: &common.File{Name: "crds/foo2.yaml", Data: nil}}, + {Name: "three", File: &common.File{Name: "crds/foo3.yaml", Data: []byte("data")}}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + instAction := installAction(t) + + err := instAction.installCRDs(tt.input) + if err == nil { + t.Error("got nil expected err") + } + }) + } +} + +func TestInstallRelease_WaitOptionsPassedDownstream(t *testing.T) { + is := assert.New(t) + + instAction := installAction(t) + instAction.ReleaseName = "wait-options-test" + instAction.WaitStrategy = kube.StatusWatcherStrategy + + // Use WithWaitContext as a marker WaitOption that we can track + ctx := context.Background() + instAction.WaitOptions = []kube.WaitOption{kube.WithWaitContext(ctx)} + + // Access the underlying FailingKubeClient to check recorded options + failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient) + + vals := map[string]any{} + _, err := instAction.Run(buildChart(), vals) + is.NoError(err) + + // Verify that WaitOptions were passed to GetWaiter + is.NotEmpty(failer.RecordedWaitOptions, "WaitOptions should be passed to GetWaiter") +} diff --git a/pkg/action/lint.go b/pkg/action/lint.go index 208fd4637..6156fe5c8 100644 --- a/pkg/action/lint.go +++ b/pkg/action/lint.go @@ -53,7 +53,7 @@ func NewLint() *Lint { } // Run executes 'helm Lint' against the given chart. -func (l *Lint) Run(paths []string, vals map[string]interface{}) *LintResult { +func (l *Lint) Run(paths []string, vals map[string]any) *LintResult { lowestTolerance := support.ErrorSev if l.Strict { lowestTolerance = support.WarningSev @@ -87,7 +87,7 @@ func HasWarningsOrErrors(result *LintResult) bool { return len(result.Errors) > 0 } -func lintChart(path string, vals map[string]interface{}, namespace string, kubeVersion *common.KubeVersion, skipSchemaValidation bool) (support.Linter, error) { +func lintChart(path string, vals map[string]any, namespace string, kubeVersion *common.KubeVersion, skipSchemaValidation bool) (support.Linter, error) { var chartPath string linter := support.Linter{} diff --git a/pkg/action/lint_test.go b/pkg/action/lint_test.go index 4684f91f1..5b5c2c50a 100644 --- a/pkg/action/lint_test.go +++ b/pkg/action/lint_test.go @@ -26,7 +26,7 @@ import ( ) var ( - values = make(map[string]interface{}) + values = make(map[string]any) namespace = "testNamespace" chart1MultipleChartLint = "testdata/charts/multiplecharts-lint-chart-1" chart2MultipleChartLint = "testdata/charts/multiplecharts-lint-chart-2" @@ -88,12 +88,12 @@ func TestLintChart(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - _, err := lintChart(tt.chartPath, map[string]interface{}{}, namespace, nil, tt.skipSchemaValidation) + _, err := lintChart(tt.chartPath, map[string]any{}, namespace, nil, tt.skipSchemaValidation) switch { case err != nil && !tt.err: t.Errorf("%s", err) case err == nil && tt.err: - t.Errorf("Expected a chart parsing error") + t.Error("Expected a chart parsing error") } }) } diff --git a/pkg/action/package.go b/pkg/action/package.go index 0ab49538c..86426b412 100644 --- a/pkg/action/package.go +++ b/pkg/action/package.go @@ -70,7 +70,7 @@ func NewPackage() *Package { } // Run executes 'helm package' against the given chart and returns the path to the packaged chart. -func (p *Package) Run(path string, _ map[string]interface{}) (string, error) { +func (p *Package) Run(path string, _ map[string]any) (string, error) { chrt, err := loader.LoadDir(path) if err != nil { return "", err diff --git a/pkg/action/package_test.go b/pkg/action/package_test.go index 84dcb71c1..2e1d4ff07 100644 --- a/pkg/action/package_test.go +++ b/pkg/action/package_test.go @@ -17,6 +17,7 @@ limitations under the License. package action import ( + "errors" "os" "path" "testing" @@ -146,7 +147,7 @@ func TestValidateVersion(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if err := validateVersion(tt.args.ver); err != nil { - if err != tt.wantErr { + if !errors.Is(err, tt.wantErr) { t.Errorf("Expected {%v}, got {%v}", tt.wantErr, err) } diff --git a/pkg/action/push_test.go b/pkg/action/push_test.go index 35c6f3efc..125799252 100644 --- a/pkg/action/push_test.go +++ b/pkg/action/push_test.go @@ -47,14 +47,14 @@ func TestNewPushWithInsecureSkipTLSVerify(t *testing.T) { client := NewPushWithOpts(WithInsecureSkipTLSVerify(true)) assert.NotNil(t, client) - assert.Equal(t, true, client.insecureSkipTLSVerify) + assert.True(t, client.insecureSkipTLSVerify) } func TestNewPushWithPlainHTTP(t *testing.T) { client := NewPushWithOpts(WithPlainHTTP(true)) assert.NotNil(t, client) - assert.Equal(t, true, client.plainHTTP) + assert.True(t, client.plainHTTP) } func TestNewPushWithPushOptWriter(t *testing.T) { diff --git a/pkg/action/registry_login_test.go b/pkg/action/registry_login_test.go index de2450d9d..590673b3a 100644 --- a/pkg/action/registry_login_test.go +++ b/pkg/action/registry_login_test.go @@ -48,7 +48,7 @@ func TestWithInsecure(t *testing.T) { opt := WithInsecure(true) assert.Nil(t, opt(client)) - assert.Equal(t, true, client.insecure) + assert.True(t, client.insecure) } func TestWithKeyFile(t *testing.T) { @@ -80,5 +80,5 @@ func TestWithPlainHTTPLogin(t *testing.T) { opt := WithPlainHTTPLogin(true) assert.Nil(t, opt(client)) - assert.Equal(t, true, client.plainHTTP) + assert.True(t, client.plainHTTP) } diff --git a/pkg/action/release_testing.go b/pkg/action/release_testing.go index b649579f4..043a41236 100644 --- a/pkg/action/release_testing.go +++ b/pkg/action/release_testing.go @@ -41,8 +41,9 @@ const ( // // It provides the implementation of 'helm test'. type ReleaseTesting struct { - cfg *Configuration - Timeout time.Duration + cfg *Configuration + Timeout time.Duration + WaitOptions []kube.WaitOption // Used for fetching logs from test pods Namespace string Filters map[string][]string @@ -57,24 +58,24 @@ func NewReleaseTesting(cfg *Configuration) *ReleaseTesting { } // Run executes 'helm test' against the given release. -func (r *ReleaseTesting) Run(name string) (ri.Releaser, error) { +func (r *ReleaseTesting) Run(name string) (ri.Releaser, ExecuteShutdownFunc, error) { if err := r.cfg.KubeClient.IsReachable(); err != nil { - return nil, err + return nil, shutdownNoOp, err } if err := chartutil.ValidateReleaseName(name); err != nil { - return nil, fmt.Errorf("releaseTest: Release name is invalid: %s", name) + return nil, shutdownNoOp, fmt.Errorf("releaseTest: Release name is invalid: %s", name) } // finds the non-deleted release with the given name reli, err := r.cfg.Releases.Last(name) if err != nil { - return reli, err + return reli, shutdownNoOp, err } rel, err := releaserToV1Release(reli) if err != nil { - return rel, err + return reli, shutdownNoOp, err } skippedHooks := []*release.Hook{} @@ -102,14 +103,16 @@ func (r *ReleaseTesting) Run(name string) (ri.Releaser, error) { } serverSideApply := rel.ApplyMethod == string(release.ApplyMethodServerSideApply) - if err := r.cfg.execHook(rel, release.HookTest, kube.StatusWatcherStrategy, r.Timeout, serverSideApply); err != nil { + shutdown, err := r.cfg.execHookWithDelayedShutdown(rel, release.HookTest, kube.StatusWatcherStrategy, r.WaitOptions, r.Timeout, serverSideApply) + + if err != nil { rel.Hooks = append(skippedHooks, rel.Hooks...) - r.cfg.Releases.Update(rel) - return rel, err + r.cfg.Releases.Update(reli) + return reli, shutdown, err } rel.Hooks = append(skippedHooks, rel.Hooks...) - return rel, r.cfg.Releases.Update(rel) + return reli, shutdown, r.cfg.Releases.Update(reli) } // GetPodLogs will write the logs for all test pods in the given release into diff --git a/pkg/action/release_testing_test.go b/pkg/action/release_testing_test.go index 1ada78a4b..ab35e104a 100644 --- a/pkg/action/release_testing_test.go +++ b/pkg/action/release_testing_test.go @@ -18,6 +18,7 @@ package action import ( "bytes" + "context" "errors" "io" "os" @@ -27,6 +28,7 @@ import ( "github.com/stretchr/testify/require" "helm.sh/helm/v4/pkg/cli" + "helm.sh/helm/v4/pkg/kube" kubefake "helm.sh/helm/v4/pkg/kube/fake" release "helm.sh/helm/v4/pkg/release/v1" ) @@ -46,7 +48,7 @@ func TestReleaseTestingRun_UnreachableKubeClient(t *testing.T) { config.KubeClient = &failingKubeClient client := NewReleaseTesting(config) - result, err := client.Run("") + result, _, err := client.Run("") assert.Nil(t, result) assert.Error(t, err) } @@ -89,3 +91,29 @@ func TestReleaseTestingGetPodLogs_PodRetrievalError(t *testing.T) { require.ErrorContains(t, client.GetPodLogs(&bytes.Buffer{}, &release.Release{Hooks: hooks}), "unable to get pod logs") } + +func TestReleaseTesting_WaitOptionsPassedDownstream(t *testing.T) { + is := assert.New(t) + config := actionConfigFixture(t) + + // Create a release with a test hook + rel := releaseStub() + rel.Name = "wait-options-test-release" + rel.ApplyMethod = "csa" + require.NoError(t, config.Releases.Create(rel)) + + client := NewReleaseTesting(config) + + // Use WithWaitContext as a marker WaitOption that we can track + ctx := context.Background() + client.WaitOptions = []kube.WaitOption{kube.WithWaitContext(ctx)} + + // Access the underlying FailingKubeClient to check recorded options + failer := config.KubeClient.(*kubefake.FailingKubeClient) + + _, _, err := client.Run(rel.Name) + is.NoError(err) + + // Verify that WaitOptions were passed to GetWaiter + is.NotEmpty(failer.RecordedWaitOptions, "WaitOptions should be passed to GetWaiter") +} diff --git a/pkg/action/rollback.go b/pkg/action/rollback.go index 4cdb2d33b..459569781 100644 --- a/pkg/action/rollback.go +++ b/pkg/action/rollback.go @@ -40,6 +40,7 @@ type Rollback struct { Version int Timeout time.Duration WaitStrategy kube.WaitStrategy + WaitOptions []kube.WaitOption WaitForJobs bool DisableHooks bool // DryRunStrategy can be set to prepare, but not execute the operation and whether or not to interact with the remote cluster @@ -63,8 +64,9 @@ type Rollback struct { // NewRollback creates a new Rollback object with the given configuration. func NewRollback(cfg *Configuration) *Rollback { return &Rollback{ - cfg: cfg, - DryRunStrategy: DryRunNone, + cfg: cfg, + ServerSideApply: "auto", // Must always match the CLI default. + DryRunStrategy: DryRunNone, } } @@ -210,7 +212,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas // pre-rollback hooks if !r.DisableHooks { - if err := r.cfg.execHook(targetRelease, release.HookPreRollback, r.WaitStrategy, r.Timeout, serverSideApply); err != nil { + if err := r.cfg.execHook(targetRelease, release.HookPreRollback, r.WaitStrategy, r.WaitOptions, r.Timeout, serverSideApply); err != nil { return targetRelease, err } } else { @@ -251,7 +253,12 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas return targetRelease, err } - waiter, err := r.cfg.KubeClient.GetWaiter(r.WaitStrategy) + var waiter kube.Waiter + if c, supportsOptions := r.cfg.KubeClient.(kube.InterfaceWaitOptions); supportsOptions { + waiter, err = c.GetWaiterWithOptions(r.WaitStrategy, r.WaitOptions...) + } else { + waiter, err = r.cfg.KubeClient.GetWaiter(r.WaitStrategy) + } if err != nil { return nil, fmt.Errorf("unable to get waiter: %w", err) } @@ -273,7 +280,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas // post-rollback hooks if !r.DisableHooks { - if err := r.cfg.execHook(targetRelease, release.HookPostRollback, r.WaitStrategy, r.Timeout, serverSideApply); err != nil { + if err := r.cfg.execHook(targetRelease, release.HookPostRollback, r.WaitStrategy, r.WaitOptions, r.Timeout, serverSideApply); err != nil { return targetRelease, err } } diff --git a/pkg/action/rollback_test.go b/pkg/action/rollback_test.go index 5158bee26..deb6c7c80 100644 --- a/pkg/action/rollback_test.go +++ b/pkg/action/rollback_test.go @@ -17,12 +17,15 @@ limitations under the License. package action import ( + "context" "errors" "io" "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "helm.sh/helm/v4/pkg/kube" kubefake "helm.sh/helm/v4/pkg/kube/fake" ) @@ -43,3 +46,40 @@ func TestRollbackRun_UnreachableKubeClient(t *testing.T) { client := NewRollback(config) assert.Error(t, client.Run("")) } + +func TestRollback_WaitOptionsPassedDownstream(t *testing.T) { + is := assert.New(t) + config := actionConfigFixture(t) + + // Create a deployed release and a second version to roll back to + rel := releaseStub() + rel.Name = "wait-options-rollback" + rel.Info.Status = "deployed" + rel.ApplyMethod = "csa" + require.NoError(t, config.Releases.Create(rel)) + + rel2 := releaseStub() + rel2.Name = "wait-options-rollback" + rel2.Version = 2 + rel2.Info.Status = "deployed" + rel2.ApplyMethod = "csa" + require.NoError(t, config.Releases.Create(rel2)) + + client := NewRollback(config) + client.Version = 1 + client.WaitStrategy = kube.StatusWatcherStrategy + client.ServerSideApply = "auto" + + // Use WithWaitContext as a marker WaitOption that we can track + ctx := context.Background() + client.WaitOptions = []kube.WaitOption{kube.WithWaitContext(ctx)} + + // Access the underlying FailingKubeClient to check recorded options + failer := config.KubeClient.(*kubefake.FailingKubeClient) + + err := client.Run(rel.Name) + is.NoError(err) + + // Verify that WaitOptions were passed to GetWaiter + is.NotEmpty(failer.RecordedWaitOptions, "WaitOptions should be passed to GetWaiter") +} diff --git a/pkg/action/show_test.go b/pkg/action/show_test.go index 6e270ac6d..854dee07a 100644 --- a/pkg/action/show_test.go +++ b/pkg/action/show_test.go @@ -43,7 +43,7 @@ func TestShow(t *testing.T) { Raw: []*common.File{ {Name: "values.yaml", ModTime: modTime, Data: []byte("VALUES\n")}, }, - Values: map[string]interface{}{}, + Values: map[string]any{}, } output, err := client.Run("") diff --git a/pkg/action/testdata/rbac.txt b/pkg/action/testdata/rbac.txt index 0cb15b868..91938d5cc 100644 --- a/pkg/action/testdata/rbac.txt +++ b/pkg/action/testdata/rbac.txt @@ -23,3 +23,4 @@ subjects: - kind: ServiceAccount name: schedule-agents namespace: spaced + diff --git a/pkg/action/uninstall.go b/pkg/action/uninstall.go index bba66dc07..73927ed9d 100644 --- a/pkg/action/uninstall.go +++ b/pkg/action/uninstall.go @@ -45,6 +45,7 @@ type Uninstall struct { IgnoreNotFound bool KeepHistory bool WaitStrategy kube.WaitStrategy + WaitOptions []kube.WaitOption DeletionPropagation string Timeout time.Duration Description string @@ -63,7 +64,13 @@ func (u *Uninstall) Run(name string) (*releasei.UninstallReleaseResponse, error) return nil, err } - waiter, err := u.cfg.KubeClient.GetWaiter(u.WaitStrategy) + var waiter kube.Waiter + var err error + if c, supportsOptions := u.cfg.KubeClient.(kube.InterfaceWaitOptions); supportsOptions { + waiter, err = c.GetWaiterWithOptions(u.WaitStrategy, u.WaitOptions...) + } else { + waiter, err = u.cfg.KubeClient.GetWaiter(u.WaitStrategy) + } if err != nil { return nil, err } @@ -181,7 +188,7 @@ func (u *Uninstall) Run(name string) (*releasei.UninstallReleaseResponse, error) if !u.DisableHooks { serverSideApply := true - if err := u.cfg.execHook(rel, release.HookPreDelete, u.WaitStrategy, u.Timeout, serverSideApply); err != nil { + if err := u.cfg.execHook(rel, release.HookPreDelete, u.WaitStrategy, u.WaitOptions, u.Timeout, serverSideApply); err != nil { return res, err } } else { @@ -211,7 +218,7 @@ func (u *Uninstall) Run(name string) (*releasei.UninstallReleaseResponse, error) if !u.DisableHooks { serverSideApply := true - if err := u.cfg.execHook(rel, release.HookPostDelete, u.WaitStrategy, u.Timeout, serverSideApply); err != nil { + if err := u.cfg.execHook(rel, release.HookPostDelete, u.WaitStrategy, u.WaitOptions, u.Timeout, serverSideApply); err != nil { errs = append(errs, err) } } diff --git a/pkg/action/uninstall_test.go b/pkg/action/uninstall_test.go index 52a6c13b1..2bf3d3878 100644 --- a/pkg/action/uninstall_test.go +++ b/pkg/action/uninstall_test.go @@ -116,7 +116,7 @@ func TestUninstallRelease_Wait(t *testing.T) { }` require.NoError(t, unAction.cfg.Releases.Create(rel)) failer := unAction.cfg.KubeClient.(*kubefake.FailingKubeClient) - failer.WaitForDeleteError = fmt.Errorf("U timed out") + failer.WaitForDeleteError = errors.New("U timed out") unAction.cfg.KubeClient = failer resi, err := unAction.Run(rel.Name) is.Error(err) @@ -153,7 +153,7 @@ func TestUninstallRelease_Cascade(t *testing.T) { // Create dummy resources with Mapping but no Client - this skips ownership verification // (nil Client is treated as owned) and goes directly to delete dummyResources := kube.ResourceList{ - newDeploymentResource("secret", ""), + newDeploymentResource("secret", "", ""), } failer := unAction.cfg.KubeClient.(*kubefake.FailingKubeClient) diff --git a/pkg/action/upgrade.go b/pkg/action/upgrade.go index 13d28fd4d..00939ffa6 100644 --- a/pkg/action/upgrade.go +++ b/pkg/action/upgrade.go @@ -72,6 +72,8 @@ type Upgrade struct { Timeout time.Duration // WaitStrategy determines what type of waiting should be done WaitStrategy kube.WaitStrategy + // WaitOptions are additional options for waiting on resources + WaitOptions []kube.WaitOption // WaitForJobs determines whether the wait operation for the Jobs should be performed after the upgrade is requested. WaitForJobs bool // DisableHooks disables hook processing if set to true. @@ -119,6 +121,10 @@ type Upgrade struct { // If this is non-nil, then after templates are rendered, they will be sent to the // post renderer before sending to the Kubernetes API server. PostRenderer postrenderer.PostRenderer + // PostRenderStrategy controls how hooks and regular templates are passed + // to the configured post-renderer. See PostRenderStrategy for the + // available modes. Defaults to PostRenderStrategyCombined. + PostRenderStrategy PostRenderStrategy // DisableOpenAPIValidation controls whether OpenAPI validation is enforced. DisableOpenAPIValidation bool // Get missing dependencies @@ -139,9 +145,10 @@ type resultMessage struct { // NewUpgrade creates a new Upgrade object with the given configuration. func NewUpgrade(cfg *Configuration) *Upgrade { up := &Upgrade{ - cfg: cfg, - ServerSideApply: "auto", - DryRunStrategy: DryRunNone, + cfg: cfg, + ServerSideApply: "auto", // Must always match the CLI default. + DryRunStrategy: DryRunNone, + PostRenderStrategy: PostRenderStrategyCombined, } up.registryClient = cfg.RegistryClient @@ -154,13 +161,13 @@ func (u *Upgrade) SetRegistryClient(client *registry.Client) { } // Run executes the upgrade on the given release. -func (u *Upgrade) Run(name string, chart chart.Charter, vals map[string]interface{}) (ri.Releaser, error) { +func (u *Upgrade) Run(name string, chart chart.Charter, vals map[string]any) (ri.Releaser, error) { ctx := context.Background() return u.RunWithContext(ctx, name, chart, vals) } // RunWithContext executes the upgrade on the given release with context. -func (u *Upgrade) RunWithContext(ctx context.Context, name string, ch chart.Charter, vals map[string]interface{}) (ri.Releaser, error) { +func (u *Upgrade) RunWithContext(ctx context.Context, name string, ch chart.Charter, vals map[string]any) (ri.Releaser, error) { if err := u.cfg.KubeClient.IsReachable(); err != nil { return nil, err } @@ -211,7 +218,7 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, ch chart.Char } // prepareUpgrade builds an upgraded release for an upgrade operation. -func (u *Upgrade) prepareUpgrade(name string, chart *chartv2.Chart, vals map[string]interface{}) (*release.Release, *release.Release, bool, error) { +func (u *Upgrade) prepareUpgrade(name string, chart *chartv2.Chart, vals map[string]any) (*release.Release, *release.Release, bool, error) { if chart == nil { return nil, nil, false, errMissingChart } @@ -251,7 +258,7 @@ func (u *Upgrade) prepareUpgrade(name string, chart *chartv2.Chart, vals map[str var cerr error currentRelease, cerr = releaserToV1Release(currentReleasei) if cerr != nil { - return nil, nil, false, err + return nil, nil, false, cerr } if err != nil { if errors.Is(err, driver.ErrNoDeployedReleases) && @@ -294,7 +301,7 @@ func (u *Upgrade) prepareUpgrade(name string, chart *chartv2.Chart, vals map[str return nil, nil, false, err } - hooks, manifestDoc, notesTxt, err := u.cfg.renderResources(chart, valuesToRender, "", "", u.SubNotes, false, false, u.PostRenderer, interactWithServer(u.DryRunStrategy), u.EnableDNS, u.HideSecret) + hooks, manifestDoc, notesTxt, err := u.cfg.renderResources(chart, valuesToRender, "", "", u.SubNotes, false, false, u.PostRenderer, interactWithServer(u.DryRunStrategy), u.EnableDNS, u.HideSecret, u.PostRenderStrategy) if err != nil { return nil, nil, false, err } @@ -406,7 +413,7 @@ func (u *Upgrade) performUpgrade(ctx context.Context, originalRelease, upgradedR } rChan := make(chan resultMessage) ctxChan := make(chan resultMessage) - doneChan := make(chan interface{}) + doneChan := make(chan any) defer close(doneChan) go u.releasingUpgrade(rChan, upgradedRelease, current, target, originalRelease, serverSideApply) go u.handleContext(ctx, doneChan, ctxChan, upgradedRelease) @@ -432,7 +439,7 @@ func (u *Upgrade) reportToPerformUpgrade(c chan<- resultMessage, rel *release.Re } // Setup listener for SIGINT and SIGTERM -func (u *Upgrade) handleContext(ctx context.Context, done chan interface{}, c chan<- resultMessage, upgradedRelease *release.Release) { +func (u *Upgrade) handleContext(ctx context.Context, done chan any, c chan<- resultMessage, upgradedRelease *release.Release) { select { case <-ctx.Done(): err := ctx.Err() @@ -452,8 +459,8 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele // pre-upgrade hooks if !u.DisableHooks { - if err := u.cfg.execHook(upgradedRelease, release.HookPreUpgrade, u.WaitStrategy, u.Timeout, serverSideApply); err != nil { - u.reportToPerformUpgrade(c, upgradedRelease, kube.ResourceList{}, fmt.Errorf("pre-upgrade hooks failed: %s", err)) + if err := u.cfg.execHook(upgradedRelease, release.HookPreUpgrade, u.WaitStrategy, u.WaitOptions, u.Timeout, serverSideApply); err != nil { + u.reportToPerformUpgrade(c, upgradedRelease, kube.ResourceList{}, fmt.Errorf("pre-upgrade hooks failed: %w", err)) return } } else { @@ -473,7 +480,12 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele return } - waiter, err := u.cfg.KubeClient.GetWaiter(u.WaitStrategy) + var waiter kube.Waiter + if c, supportsOptions := u.cfg.KubeClient.(kube.InterfaceWaitOptions); supportsOptions { + waiter, err = c.GetWaiterWithOptions(u.WaitStrategy, u.WaitOptions...) + } else { + waiter, err = u.cfg.KubeClient.GetWaiter(u.WaitStrategy) + } if err != nil { u.cfg.recordRelease(originalRelease) u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err) @@ -495,8 +507,8 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele // post-upgrade hooks if !u.DisableHooks { - if err := u.cfg.execHook(upgradedRelease, release.HookPostUpgrade, u.WaitStrategy, u.Timeout, serverSideApply); err != nil { - u.reportToPerformUpgrade(c, upgradedRelease, results.Created, fmt.Errorf("post-upgrade hooks failed: %s", err)) + if err := u.cfg.execHook(upgradedRelease, release.HookPostUpgrade, u.WaitStrategy, u.WaitOptions, u.Timeout, serverSideApply); err != nil { + u.reportToPerformUpgrade(c, upgradedRelease, results.Created, fmt.Errorf("post-upgrade hooks failed: %w", err)) return } } @@ -570,6 +582,7 @@ func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, e rollin := NewRollback(u.cfg) rollin.Version = filteredHistory[0].Version rollin.WaitStrategy = u.WaitStrategy + rollin.WaitOptions = u.WaitOptions rollin.WaitForJobs = u.WaitForJobs rollin.DisableHooks = u.DisableHooks rollin.ForceReplace = u.ForceReplace @@ -593,7 +606,7 @@ func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, e // // This is skipped if the u.ResetValues flag is set, in which case the // request values are not altered. -func (u *Upgrade) reuseValues(chart *chartv2.Chart, current *release.Release, newVals map[string]interface{}) (map[string]interface{}, error) { +func (u *Upgrade) reuseValues(chart *chartv2.Chart, current *release.Release, newVals map[string]any) (map[string]any, error) { if u.ResetValues { // If ResetValues is set, we completely ignore current.Config. u.cfg.Logger().Debug("resetting values to the chart's original version") diff --git a/pkg/action/upgrade_test.go b/pkg/action/upgrade_test.go index 17c4927cc..393692976 100644 --- a/pkg/action/upgrade_test.go +++ b/pkg/action/upgrade_test.go @@ -60,7 +60,7 @@ func TestUpgradeRelease_Success(t *testing.T) { req.NoError(upAction.cfg.Releases.Create(rel)) upAction.WaitStrategy = kube.StatusWatcherStrategy - vals := map[string]interface{}{} + vals := map[string]any{} ctx, done := context.WithCancel(t.Context()) resi, err := upAction.RunWithContext(ctx, rel.Name, buildChart(), vals) @@ -91,10 +91,10 @@ func TestUpgradeRelease_Wait(t *testing.T) { require.NoError(t, upAction.cfg.Releases.Create(rel)) failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient) - failer.WaitError = fmt.Errorf("I timed out") + failer.WaitError = errors.New("I timed out") upAction.cfg.KubeClient = failer upAction.WaitStrategy = kube.StatusWatcherStrategy - vals := map[string]interface{}{} + vals := map[string]any{} resi, err := upAction.Run(rel.Name, buildChart(), vals) req.Error(err) @@ -115,11 +115,11 @@ func TestUpgradeRelease_WaitForJobs(t *testing.T) { require.NoError(t, upAction.cfg.Releases.Create(rel)) failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient) - failer.WaitError = fmt.Errorf("I timed out") + failer.WaitError = errors.New("I timed out") upAction.cfg.KubeClient = failer upAction.WaitStrategy = kube.StatusWatcherStrategy upAction.WaitForJobs = true - vals := map[string]interface{}{} + vals := map[string]any{} resi, err := upAction.Run(rel.Name, buildChart(), vals) req.Error(err) @@ -140,12 +140,12 @@ func TestUpgradeRelease_CleanupOnFail(t *testing.T) { require.NoError(t, upAction.cfg.Releases.Create(rel)) failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient) - failer.WaitError = fmt.Errorf("I timed out") - failer.DeleteError = fmt.Errorf("I tried to delete nil") + failer.WaitError = errors.New("I timed out") + failer.DeleteError = errors.New("I tried to delete nil") upAction.cfg.KubeClient = failer upAction.WaitStrategy = kube.StatusWatcherStrategy upAction.CleanupOnFail = true - vals := map[string]interface{}{} + vals := map[string]any{} resi, err := upAction.Run(rel.Name, buildChart(), vals) req.Error(err) @@ -170,10 +170,10 @@ func TestUpgradeRelease_RollbackOnFailure(t *testing.T) { failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient) // We can't make Update error because then the rollback won't work - failer.WatchUntilReadyError = fmt.Errorf("arming key removed") + failer.WatchUntilReadyError = errors.New("arming key removed") upAction.cfg.KubeClient = failer upAction.RollbackOnFailure = true - vals := map[string]interface{}{} + vals := map[string]any{} resi, err := upAction.Run(rel.Name, buildChart(), vals) req.Error(err) @@ -199,10 +199,10 @@ func TestUpgradeRelease_RollbackOnFailure(t *testing.T) { require.NoError(t, upAction.cfg.Releases.Create(rel)) failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient) - failer.UpdateError = fmt.Errorf("update fail") + failer.UpdateError = errors.New("update fail") upAction.cfg.KubeClient = failer upAction.RollbackOnFailure = true - vals := map[string]interface{}{} + vals := map[string]any{} _, err := upAction.Run(rel.Name, buildChart(), vals) req.Error(err) @@ -217,17 +217,17 @@ func TestUpgradeRelease_ReuseValues(t *testing.T) { t.Run("reuse values should work with values", func(t *testing.T) { upAction := upgradeAction(t) - existingValues := map[string]interface{}{ + existingValues := map[string]any{ "name": "value", "maxHeapSize": "128m", "replicas": 2, } - newValues := map[string]interface{}{ + newValues := map[string]any{ "name": "newValue", "maxHeapSize": "512m", "cpu": "12m", } - expectedValues := map[string]interface{}{ + expectedValues := map[string]any{ "name": "newValue", "maxHeapSize": "512m", "cpu": "12m", @@ -266,8 +266,8 @@ func TestUpgradeRelease_ReuseValues(t *testing.T) { t.Run("reuse values should not install disabled charts", func(t *testing.T) { upAction := upgradeAction(t) - chartDefaultValues := map[string]interface{}{ - "subchart": map[string]interface{}{ + chartDefaultValues := map[string]any{ + "subchart": map[string]any{ "enabled": true, }, } @@ -283,8 +283,8 @@ func TestUpgradeRelease_ReuseValues(t *testing.T) { withMetadataDependency(dependency), ) now := time.Now() - existingValues := map[string]interface{}{ - "subchart": map[string]interface{}{ + existingValues := map[string]any{ + "subchart": map[string]any{ "enabled": false, }, } @@ -311,7 +311,7 @@ func TestUpgradeRelease_ReuseValues(t *testing.T) { withMetadataDependency(dependency), ) // reusing values and upgrading - resi, err := upAction.Run(rel.Name, sampleChartWithSubChart, map[string]interface{}{}) + resi, err := upAction.Run(rel.Name, sampleChartWithSubChart, map[string]any{}) is.NoError(err) res, err := releaserToV1Release(resi) is.NoError(err) @@ -330,8 +330,8 @@ func TestUpgradeRelease_ReuseValues(t *testing.T) { is.Equal(common.StatusDeployed, updatedRes.Info.Status) is.Equal(0, len(updatedRes.Chart.Dependencies()), "expected 0 dependencies") - expectedValues := map[string]interface{}{ - "subchart": map[string]interface{}{ + expectedValues := map[string]any{ + "subchart": map[string]any{ "enabled": false, }, } @@ -345,20 +345,20 @@ func TestUpgradeRelease_ResetThenReuseValues(t *testing.T) { t.Run("reset then reuse values should work with values", func(t *testing.T) { upAction := upgradeAction(t) - existingValues := map[string]interface{}{ + existingValues := map[string]any{ "name": "value", "maxHeapSize": "128m", "replicas": 2, } - newValues := map[string]interface{}{ + newValues := map[string]any{ "name": "newValue", "maxHeapSize": "512m", "cpu": "12m", } - newChartValues := map[string]interface{}{ + newChartValues := map[string]any{ "memory": "256m", } - expectedValues := map[string]interface{}{ + expectedValues := map[string]any{ "name": "newValue", "maxHeapSize": "512m", "cpu": "12m", @@ -411,7 +411,7 @@ func TestUpgradeRelease_Pending(t *testing.T) { rel2.Version = 2 require.NoError(t, upAction.cfg.Releases.Create(rel2)) - vals := map[string]interface{}{} + vals := map[string]any{} _, err := upAction.Run(rel.Name, buildChart(), vals) req.Contains(err.Error(), "progress", err) @@ -431,7 +431,7 @@ func TestUpgradeRelease_Interrupted_Wait(t *testing.T) { failer.WaitDuration = 10 * time.Second upAction.cfg.KubeClient = failer upAction.WaitStrategy = kube.StatusWatcherStrategy - vals := map[string]interface{}{} + vals := map[string]any{} ctx, cancel := context.WithCancel(t.Context()) time.AfterFunc(time.Second, cancel) @@ -460,7 +460,7 @@ func TestUpgradeRelease_Interrupted_RollbackOnFailure(t *testing.T) { failer.WaitDuration = 5 * time.Second upAction.cfg.KubeClient = failer upAction.RollbackOnFailure = true - vals := map[string]interface{}{} + vals := map[string]any{} ctx, cancel := context.WithCancel(t.Context()) time.AfterFunc(time.Second, cancel) @@ -590,7 +590,7 @@ func TestUpgradeRelease_DryRun(t *testing.T) { req.NoError(upAction.cfg.Releases.Create(rel)) upAction.DryRunStrategy = DryRunClient - vals := map[string]interface{}{} + vals := map[string]any{} ctx, done := context.WithCancel(t.Context()) resi, err := upAction.RunWithContext(ctx, rel.Name, buildChart(withSampleSecret()), vals) @@ -610,7 +610,7 @@ func TestUpgradeRelease_DryRun(t *testing.T) { // Test the case for hiding the secret to ensure it is not displayed upAction.HideSecret = true - vals = map[string]interface{}{} + vals = map[string]any{} ctx, done = context.WithCancel(t.Context()) resi, err = upAction.RunWithContext(ctx, rel.Name, buildChart(withSampleSecret()), vals) @@ -630,7 +630,7 @@ func TestUpgradeRelease_DryRun(t *testing.T) { // Ensure in a dry run mode when using HideSecret upAction.DryRunStrategy = DryRunNone - vals = map[string]interface{}{} + vals = map[string]any{} ctx, done = context.WithCancel(t.Context()) _, err = upAction.RunWithContext(ctx, rel.Name, buildChart(withSampleSecret()), vals) @@ -752,7 +752,7 @@ func TestUpgradeRun_UnreachableKubeClient(t *testing.T) { config.KubeClient = &failingKubeClient client := NewUpgrade(config) - vals := map[string]interface{}{} + vals := map[string]any{} result, err := client.Run("", buildChart(), vals) assert.Nil(t, result) @@ -775,3 +775,30 @@ func TestObjectKey(t *testing.T) { assert.Equal(t, "apps/v1/Deployment/namespace/name", objectKey(&info)) } + +func TestUpgradeRelease_WaitOptionsPassedDownstream(t *testing.T) { + is := assert.New(t) + req := require.New(t) + + upAction := upgradeAction(t) + rel := releaseStub() + rel.Name = "wait-options-test" + rel.Info.Status = common.StatusDeployed + req.NoError(upAction.cfg.Releases.Create(rel)) + + upAction.WaitStrategy = kube.StatusWatcherStrategy + + // Use WithWaitContext as a marker WaitOption that we can track + ctx := context.Background() + upAction.WaitOptions = []kube.WaitOption{kube.WithWaitContext(ctx)} + + // Access the underlying FailingKubeClient to check recorded options + failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient) + + vals := map[string]any{} + _, err := upAction.Run(rel.Name, buildChart(), vals) + req.NoError(err) + + // Verify that WaitOptions were passed to GetWaiter + is.NotEmpty(failer.RecordedWaitOptions, "WaitOptions should be passed to GetWaiter") +} diff --git a/pkg/action/validate.go b/pkg/action/validate.go index 3a9d63253..102259bf1 100644 --- a/pkg/action/validate.go +++ b/pkg/action/validate.go @@ -17,6 +17,7 @@ limitations under the License. package action import ( + "errors" "fmt" "maps" @@ -46,6 +47,11 @@ func requireAdoption(resources kube.ResourceList) (kube.ResourceList, error) { return err } + isGenerateName, err := validateNameAndGenerateName(info) + if isGenerateName || err != nil { + return err + } + helper := resource.NewHelper(info.Client, info.Mapping) _, err = helper.Get(info.Namespace, info.Name) if err != nil { @@ -71,6 +77,11 @@ func existingResourceConflict(resources kube.ResourceList, releaseName, releaseN return err } + isGenerateName, err := validateNameAndGenerateName(info) + if isGenerateName || err != nil { + return err + } + helper := resource.NewHelper(info.Client, info.Mapping) existing, err := helper.Get(info.Namespace, info.Name) if err != nil { @@ -250,3 +261,23 @@ func mergeStrStrMaps(current, desired map[string]string) map[string]string { maps.Copy(result, desired) return result } + +// validateNameAndGenerateName validates that an object only has either `Name` or `GenerateName` set (and not both) +// If `GenerateName` is set, true is returned +// If an invalid combination of `Name` and `GenerateName` are set, an error is returned +func validateNameAndGenerateName(info *resource.Info) (bool, error) { + accessor, err := meta.Accessor(info.Object) + if err != nil { + return false, err + } + + if info.Name == "" && accessor.GetGenerateName() != "" { + return true, nil + } + + if info.Name != "" && accessor.GetGenerateName() != "" { + return true, errors.New("metadata.name and metadata.generateName cannot both be set") + } + + return false, nil +} diff --git a/pkg/action/validate_test.go b/pkg/action/validate_test.go index d3e0edcde..b425299f5 100644 --- a/pkg/action/validate_test.go +++ b/pkg/action/validate_test.go @@ -36,7 +36,7 @@ import ( "k8s.io/client-go/rest/fake" ) -func newDeploymentResource(name, namespace string) *resource.Info { +func newDeploymentResource(name, namespace, generateName string) *resource.Info { return &resource.Info{ Name: name, Mapping: &meta.RESTMapping{ @@ -45,8 +45,9 @@ func newDeploymentResource(name, namespace string) *resource.Info { }, Object: &appsv1.Deployment{ ObjectMeta: v1.ObjectMeta{ - Name: name, - Namespace: namespace, + Name: name, + Namespace: namespace, + GenerateName: generateName, }, }, } @@ -166,7 +167,7 @@ func TestExistingResourceConflict(t *testing.T) { } func TestCheckOwnership(t *testing.T) { - deployFoo := newDeploymentResource("foo", "ns-a") + deployFoo := newDeploymentResource("foo", "ns-a", "") // Verify that a resource that lacks labels/annotations is not owned err := checkOwnership(deployFoo.Object, "rel-a", "ns-a") @@ -305,8 +306,8 @@ func TestVerifyOwnershipBeforeDelete(t *testing.T) { func TestSetMetadataVisitor(t *testing.T) { var ( err error - deployFoo = newDeploymentResource("foo", "ns-a") - deployBar = newDeploymentResource("bar", "ns-a-system") + deployFoo = newDeploymentResource("foo", "ns-a", "") + deployBar = newDeploymentResource("bar", "ns-a-system", "") resources = kube.ResourceList{deployFoo, deployBar} ) @@ -327,8 +328,54 @@ func TestSetMetadataVisitor(t *testing.T) { assert.NoError(t, err) // Add a new resource that is missing ownership metadata and verify error - resources.Append(newDeploymentResource("baz", "default")) + resources.Append(newDeploymentResource("baz", "default", "")) err = resources.Visit(setMetadataVisitor("rel-b", "ns-a", false)) assert.Error(t, err) assert.Contains(t, err.Error(), `Deployment "baz" in namespace "" cannot be owned`) } + +func TestValidateNameAndGenerateName(t *testing.T) { + tests := []struct { + name string + info *resource.Info + wantSkip bool + wantErr bool + errContains string + }{ + { + name: "both name and generateName present", + info: newDeploymentResource("job-a", "foo", "job-a-"), + wantSkip: true, + wantErr: true, + errContains: "metadata.name and metadata.generateName cannot both be set", + }, + { + name: "only generateName present", + info: newDeploymentResource("", "foo", "job-a-"), + wantSkip: true, + wantErr: false, + }, + { + name: "only name present", + info: newDeploymentResource("job-a", "foo", ""), + wantSkip: false, + wantErr: false, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + + skip, err := validateNameAndGenerateName(tc.info) + + if tc.wantErr { + assert.Error(t, err) + assert.Contains(t, err.Error(), tc.errContains) + } else { + assert.NoError(t, err) + } + + assert.Equal(t, tc.wantSkip, skip) + }) + } +} diff --git a/pkg/chart/common.go b/pkg/chart/common.go index cd87e91e7..cec2c7091 100644 --- a/pkg/chart/common.go +++ b/pkg/chart/common.go @@ -17,7 +17,6 @@ package chart import ( "errors" - "fmt" "log/slog" "reflect" "strings" @@ -56,8 +55,8 @@ func (r *v2Accessor) IsRoot() bool { return r.chrt.IsRoot() } -func (r *v2Accessor) MetadataAsMap() map[string]interface{} { - var ret map[string]interface{} +func (r *v2Accessor) MetadataAsMap() map[string]any { + var ret map[string]any if r.chrt.Metadata == nil { return ret } @@ -101,7 +100,7 @@ func (r *v2Accessor) MetaDependencies() []Dependency { return deps } -func (r *v2Accessor) Values() map[string]interface{} { +func (r *v2Accessor) Values() map[string]any { return r.chrt.Values } @@ -125,8 +124,8 @@ func (r *v3Accessor) IsRoot() bool { return r.chrt.IsRoot() } -func (r *v3Accessor) MetadataAsMap() map[string]interface{} { - var ret map[string]interface{} +func (r *v3Accessor) MetadataAsMap() map[string]any { + var ret map[string]any if r.chrt.Metadata == nil { return ret } @@ -170,7 +169,7 @@ func (r *v3Accessor) MetaDependencies() []Dependency { return deps } -func (r *v3Accessor) Values() map[string]interface{} { +func (r *v3Accessor) Values() map[string]any { return r.chrt.Values } @@ -182,7 +181,7 @@ func (r *v3Accessor) Deprecated() bool { return r.chrt.Metadata.Deprecated } -func structToMap(obj interface{}) (map[string]interface{}, error) { +func structToMap(obj any) (map[string]any, error) { objValue := reflect.ValueOf(obj) // If the value is a pointer, dereference it @@ -192,10 +191,10 @@ func structToMap(obj interface{}) (map[string]interface{}, error) { // Check if the input is a struct if objValue.Kind() != reflect.Struct { - return nil, fmt.Errorf("input must be a struct or a pointer to a struct") + return nil, errors.New("input must be a struct or a pointer to a struct") } - result := make(map[string]interface{}) + result := make(map[string]any) objType := objValue.Type() for i := 0; i < objValue.NumField(); i++ { @@ -221,7 +220,7 @@ func structToMap(obj interface{}) (map[string]interface{}, error) { result[field.Name] = nestedMap } case reflect.Slice: - sliceOfMaps := make([]interface{}, value.Len()) + sliceOfMaps := make([]any, value.Len()) for j := 0; j < value.Len(); j++ { sliceElement := value.Index(j) if sliceElement.Kind() == reflect.Struct || sliceElement.Kind() == reflect.Pointer { diff --git a/pkg/chart/common/capabilities.go b/pkg/chart/common/capabilities.go index 18d00de90..20f4953cf 100644 --- a/pkg/chart/common/capabilities.go +++ b/pkg/chart/common/capabilities.go @@ -157,7 +157,7 @@ func makeDefaultCapabilities() (*Capabilities, error) { v, err := semver.NewVersion(vstr) if err != nil { - return nil, fmt.Errorf("unable to parse k8s.io/client-go version %q: %v", vstr, err) + return nil, fmt.Errorf("unable to parse k8s.io/client-go version %q: %w", vstr, err) } kubeVersionMajor := v.Major() + 1 @@ -173,8 +173,8 @@ func newCapabilities(kubeVersionMajor, kubeVersionMinor uint64) (*Capabilities, KubeVersion: KubeVersion{ Version: version, normalizedVersion: version, - Major: fmt.Sprintf("%d", kubeVersionMajor), - Minor: fmt.Sprintf("%d", kubeVersionMinor), + Major: strconv.FormatUint(kubeVersionMajor, 10), + Minor: strconv.FormatUint(kubeVersionMinor, 10), }, APIVersions: DefaultVersionSet, HelmVersion: helmversion.Get(), diff --git a/pkg/chart/common/capabilities_test.go b/pkg/chart/common/capabilities_test.go index b96d7d29b..c9f026cbb 100644 --- a/pkg/chart/common/capabilities_test.go +++ b/pkg/chart/common/capabilities_test.go @@ -68,7 +68,7 @@ func TestDefaultCapabilities(t *testing.T) { func TestParseKubeVersion(t *testing.T) { kv, err := ParseKubeVersion("v1.16.0") if err != nil { - t.Errorf("Expected v1.16.0 to parse successfully") + t.Error("Expected v1.16.0 to parse successfully") } if kv.Version != "v1.16.0" { t.Errorf("Expected parsed KubeVersion.Version to be v1.16.0, got %q", kv.String()) diff --git a/pkg/chart/common/util/coalesce.go b/pkg/chart/common/util/coalesce.go index 07794a04a..999eeb208 100644 --- a/pkg/chart/common/util/coalesce.go +++ b/pkg/chart/common/util/coalesce.go @@ -42,7 +42,7 @@ func concatPrefix(a, b string) string { // - Scalar values and arrays are replaced, maps are merged // - A chart has access to all of the variables for it, as well as all of // the values destined for its dependencies. -func CoalesceValues(chrt chart.Charter, vals map[string]interface{}) (common.Values, error) { +func CoalesceValues(chrt chart.Charter, vals map[string]any) (common.Values, error) { valsCopy, err := copyValues(vals) if err != nil { return vals, err @@ -64,7 +64,7 @@ func CoalesceValues(chrt chart.Charter, vals map[string]interface{}) (common.Val // Retaining Nils is useful when processes early in a Helm action or business // logic need to retain them for when Coalescing will happen again later in the // business logic. -func MergeValues(chrt chart.Charter, vals map[string]interface{}) (common.Values, error) { +func MergeValues(chrt chart.Charter, vals map[string]any) (common.Values, error) { valsCopy, err := copyValues(vals) if err != nil { return vals, err @@ -72,22 +72,22 @@ func MergeValues(chrt chart.Charter, vals map[string]interface{}) (common.Values return coalesce(log.Printf, chrt, valsCopy, "", true) } -func copyValues(vals map[string]interface{}) (common.Values, error) { +func copyValues(vals map[string]any) (common.Values, error) { v, err := copystructure.Copy(vals) if err != nil { return vals, err } - valsCopy := v.(map[string]interface{}) + valsCopy := v.(map[string]any) // if we have an empty map, make sure it is initialized if valsCopy == nil { - valsCopy = make(map[string]interface{}) + valsCopy = make(map[string]any) } return valsCopy, nil } -type printFn func(format string, v ...interface{}) +type printFn func(format string, v ...any) // coalesce coalesces the dest values and the chart values, giving priority to the dest values. // @@ -96,13 +96,13 @@ type printFn func(format string, v ...interface{}) // Note, the merge argument specifies whether this is being used by MergeValues // or CoalesceValues. Coalescing removes null values and their keys in some // situations while merging keeps the null values. -func coalesce(printf printFn, ch chart.Charter, dest map[string]interface{}, prefix string, merge bool) (map[string]interface{}, error) { +func coalesce(printf printFn, ch chart.Charter, dest map[string]any, prefix string, merge bool) (map[string]any, error) { coalesceValues(printf, ch, dest, prefix, merge) return coalesceDeps(printf, ch, dest, prefix, merge) } // coalesceDeps coalesces the dependencies of the given chart. -func coalesceDeps(printf printFn, chrt chart.Charter, dest map[string]interface{}, prefix string, merge bool) (map[string]interface{}, error) { +func coalesceDeps(printf printFn, chrt chart.Charter, dest map[string]any, prefix string, merge bool) (map[string]any, error) { ch, err := chart.NewAccessor(chrt) if err != nil { return dest, err @@ -114,12 +114,12 @@ func coalesceDeps(printf printFn, chrt chart.Charter, dest map[string]interface{ } if c, ok := dest[sub.Name()]; !ok { // If dest doesn't already have the key, create it. - dest[sub.Name()] = make(map[string]interface{}) + dest[sub.Name()] = make(map[string]any) } else if !istable(c) { return dest, fmt.Errorf("type mismatch on %s: %t", sub.Name(), c) } if dv, ok := dest[sub.Name()]; ok { - dvmap := dv.(map[string]interface{}) + dvmap := dv.(map[string]any) subPrefix := concatPrefix(prefix, ch.Name()) // Get globals out of dest and merge them into dvmap. coalesceGlobals(printf, dvmap, dest, subPrefix, merge) @@ -137,19 +137,19 @@ func coalesceDeps(printf printFn, chrt chart.Charter, dest map[string]interface{ // coalesceGlobals copies the globals out of src and merges them into dest. // // For convenience, returns dest. -func coalesceGlobals(printf printFn, dest, src map[string]interface{}, prefix string, _ bool) { - var dg, sg map[string]interface{} +func coalesceGlobals(printf printFn, dest, src map[string]any, prefix string, _ bool) { + var dg, sg map[string]any if destglob, ok := dest[common.GlobalKey]; !ok { - dg = make(map[string]interface{}) - } else if dg, ok = destglob.(map[string]interface{}); !ok { + dg = make(map[string]any) + } else if dg, ok = destglob.(map[string]any); !ok { printf("warning: skipping globals because destination %s is not a table.", common.GlobalKey) return } if srcglob, ok := src[common.GlobalKey]; !ok { - sg = make(map[string]interface{}) - } else if sg, ok = srcglob.(map[string]interface{}); !ok { + sg = make(map[string]any) + } else if sg, ok = srcglob.(map[string]any); !ok { printf("warning: skipping globals because source %s is not a table.", common.GlobalKey) return } @@ -160,12 +160,12 @@ func coalesceGlobals(printf printFn, dest, src map[string]interface{}, prefix st // tables in globals. for key, val := range sg { if istable(val) { - vv := copyMap(val.(map[string]interface{})) + vv := copyMap(val.(map[string]any)) if destv, ok := dg[key]; !ok { // Here there is no merge. We're just adding. dg[key] = vv } else { - if destvmap, ok := destv.(map[string]interface{}); !ok { + if destvmap, ok := destv.(map[string]any); !ok { printf("Conflict: cannot merge map onto non-map for %q. Skipping.", key) } else { // Basically, we reverse order of coalesce here to merge @@ -189,8 +189,8 @@ func coalesceGlobals(printf printFn, dest, src map[string]interface{}, prefix st dest[common.GlobalKey] = dg } -func copyMap(src map[string]interface{}) map[string]interface{} { - m := make(map[string]interface{}, len(src)) +func copyMap(src map[string]any) map[string]any { + m := make(map[string]any, len(src)) maps.Copy(m, src) return m } @@ -198,7 +198,7 @@ func copyMap(src map[string]interface{}) map[string]interface{} { // coalesceValues builds up a values map for a particular chart. // // Values in v will override the values in the chart. -func coalesceValues(printf printFn, c chart.Charter, v map[string]interface{}, prefix string, merge bool) { +func coalesceValues(printf printFn, c chart.Charter, v map[string]any, prefix string, merge bool) { ch, err := chart.NewAccessor(c) if err != nil { return @@ -210,7 +210,7 @@ func coalesceValues(printf printFn, c chart.Charter, v map[string]interface{}, p // the original c.Values is altered. Creating a deep copy stops the problem. // This section is fault-tolerant as there is no ability to return an error. valuesCopy, err := copystructure.Copy(ch.Values()) - var vc map[string]interface{} + var vc map[string]any var ok bool if err != nil { // If there is an error something is wrong with copying c.Values it @@ -220,7 +220,7 @@ func coalesceValues(printf printFn, c chart.Charter, v map[string]interface{}, p printf("warning: unable to copy values, err: %s", err) vc = ch.Values() } else { - vc, ok = valuesCopy.(map[string]interface{}) + vc, ok = valuesCopy.(map[string]any) if !ok { // c.Values has a map[string]interface{} structure. If the copy of // it cannot be treated as map[string]interface{} there is something @@ -238,9 +238,9 @@ func coalesceValues(printf printFn, c chart.Charter, v map[string]interface{}, p // This allows Helm's various sources of values (value files or --set) to // remove incompatible keys from any previous chart, file, or set values. delete(v, key) - } else if dest, ok := value.(map[string]interface{}); ok { + } else if dest, ok := value.(map[string]any); ok { // if v[key] is a table, merge nv's val table into v[key]. - src, ok := val.(map[string]interface{}) + src, ok := val.(map[string]any) if !ok { // If the original value is nil, there is nothing to coalesce, so we don't print // the warning @@ -251,6 +251,12 @@ func coalesceValues(printf printFn, c chart.Charter, v map[string]interface{}, p // If the key is a child chart, coalesce tables with Merge set to true merge := childChartMergeTrue(c, key, merge) + // When coalescing, clean nils from chart defaults before merging + // so they don't leak into the result. + if !merge { + cleanNilValues(src) + } + // Because v has higher precedence than nv, dest values override src // values. coalesceTablesFullKey(printf, dest, src, concatPrefix(subPrefix, key), merge) @@ -258,6 +264,16 @@ func coalesceValues(printf printFn, c chart.Charter, v map[string]interface{}, p } } else { // If the key is not in v, copy it from nv. + // When coalescing, skip chart default nils and clean nils from + // nested maps so they don't shadow globals or produce %!s(). + if !merge { + if val == nil { + continue + } + if sub, ok := val.(map[string]any); ok { + cleanNilValues(sub) + } + } v[key] = val } } @@ -283,18 +299,18 @@ func childChartMergeTrue(chrt chart.Charter, key string, merge bool) bool { // CoalesceTables merges a source map into a destination map. // // dest is considered authoritative. -func CoalesceTables(dst, src map[string]interface{}) map[string]interface{} { +func CoalesceTables(dst, src map[string]any) map[string]any { return coalesceTablesFullKey(log.Printf, dst, src, "", false) } -func MergeTables(dst, src map[string]interface{}) map[string]interface{} { +func MergeTables(dst, src map[string]any) map[string]any { return coalesceTablesFullKey(log.Printf, dst, src, "", true) } // coalesceTablesFullKey merges a source map into a destination map. // // dest is considered authoritative. -func coalesceTablesFullKey(printf printFn, dst, src map[string]interface{}, prefix string, merge bool) map[string]interface{} { +func coalesceTablesFullKey(printf printFn, dst, src map[string]any, prefix string, merge bool) map[string]any { // When --reuse-values is set but there are no modifications yet, return new values if src == nil { return dst @@ -302,6 +318,15 @@ func coalesceTablesFullKey(printf printFn, dst, src map[string]interface{}, pref if dst == nil { return src } + // Track original non-nil src keys before modifying src + // This lets us distinguish between user nullifying a chart default vs + // user setting nil for a key not in chart defaults. + srcOriginalNonNil := make(map[string]bool) + for key, val := range src { + if val != nil { + srcOriginalNonNil[key] = true + } + } for key, val := range dst { if val == nil { src[key] = nil @@ -311,13 +336,16 @@ func coalesceTablesFullKey(printf printFn, dst, src map[string]interface{}, pref // values. for key, val := range src { fullkey := concatPrefix(prefix, key) - if dv, ok := dst[key]; ok && !merge && dv == nil { + if dv, ok := dst[key]; ok && !merge && dv == nil && srcOriginalNonNil[key] { + // When coalescing (not merging), if dst has nil and src has a non-nil + // value, the user is nullifying a chart default - remove the key. + // But if src also has nil (or key not in src), preserve the nil delete(dst, key) } else if !ok { dst[key] = val } else if istable(val) { if istable(dv) { - coalesceTablesFullKey(printf, dv.(map[string]interface{}), val.(map[string]interface{}), fullkey, merge) + coalesceTablesFullKey(printf, dv.(map[string]any), val.(map[string]any), fullkey, merge) } else { printf("warning: cannot overwrite table with non table for %s (%v)", fullkey, val) } @@ -328,8 +356,20 @@ func coalesceTablesFullKey(printf printFn, dst, src map[string]interface{}, pref return dst } +// cleanNilValues recursively removes nil entries in-place from a map so that chart +// default nils don't leak into the coalesced result. +func cleanNilValues(m map[string]any) { + for key, val := range m { + if val == nil { + delete(m, key) + } else if sub, ok := val.(map[string]any); ok { + cleanNilValues(sub) + } + } +} + // istable is a special-purpose function to see if the present thing matches the definition of a YAML table. -func istable(v interface{}) bool { - _, ok := v.(map[string]interface{}) +func istable(v any) bool { + _, ok := v.(map[string]any) return ok } diff --git a/pkg/chart/common/util/coalesce_test.go b/pkg/chart/common/util/coalesce_test.go index 871bfa8da..252ef11ec 100644 --- a/pkg/chart/common/util/coalesce_test.go +++ b/pkg/chart/common/util/coalesce_test.go @@ -75,65 +75,65 @@ func TestCoalesceValues(t *testing.T) { c := withDeps(&chart.Chart{ Metadata: &chart.Metadata{Name: "moby"}, - Values: map[string]interface{}{ + Values: map[string]any{ "back": "exists", "bottom": "exists", "front": "exists", "left": "exists", "name": "moby", - "nested": map[string]interface{}{"boat": true}, + "nested": map[string]any{"boat": true}, "override": "bad", "right": "exists", "scope": "moby", "top": "nope", - "global": map[string]interface{}{ - "nested2": map[string]interface{}{"l0": "moby"}, + "global": map[string]any{ + "nested2": map[string]any{"l0": "moby"}, }, - "pequod": map[string]interface{}{ + "pequod": map[string]any{ "boat": "maybe", - "ahab": map[string]interface{}{ + "ahab": map[string]any{ "boat": "maybe", - "nested": map[string]interface{}{"boat": "maybe"}, + "nested": map[string]any{"boat": "maybe"}, }, }, }, }, withDeps(&chart.Chart{ Metadata: &chart.Metadata{Name: "pequod"}, - Values: map[string]interface{}{ + Values: map[string]any{ "name": "pequod", "scope": "pequod", - "global": map[string]interface{}{ - "nested2": map[string]interface{}{"l1": "pequod"}, + "global": map[string]any{ + "nested2": map[string]any{"l1": "pequod"}, }, "boat": false, - "ahab": map[string]interface{}{ + "ahab": map[string]any{ "boat": false, - "nested": map[string]interface{}{"boat": false}, + "nested": map[string]any{"boat": false}, }, }, }, &chart.Chart{ Metadata: &chart.Metadata{Name: "ahab"}, - Values: map[string]interface{}{ - "global": map[string]interface{}{ - "nested": map[string]interface{}{"foo": "bar", "foo2": "bar2"}, - "nested2": map[string]interface{}{"l2": "ahab"}, + Values: map[string]any{ + "global": map[string]any{ + "nested": map[string]any{"foo": "bar", "foo2": "bar2"}, + "nested2": map[string]any{"l2": "ahab"}, }, "scope": "ahab", "name": "ahab", "boat": true, - "nested": map[string]interface{}{"foo": false, "boat": true}, - "object": map[string]interface{}{"foo": "bar"}, + "nested": map[string]any{"foo": false, "boat": true}, + "object": map[string]any{"foo": "bar"}, }, }, ), &chart.Chart{ Metadata: &chart.Metadata{Name: "spouter"}, - Values: map[string]interface{}{ + Values: map[string]any{ "scope": "spouter", - "global": map[string]interface{}{ - "nested2": map[string]interface{}{"l1": "spouter"}, + "global": map[string]any{ + "nested2": map[string]any{"l1": "spouter"}, }, }, }, @@ -215,21 +215,21 @@ func TestCoalesceValues(t *testing.T) { } } - if _, ok := v["nested"].(map[string]interface{})["boat"]; ok { + if _, ok := v["nested"].(map[string]any)["boat"]; ok { t.Error("Expected nested boat key to be removed, still present") } - subchart := v["pequod"].(map[string]interface{}) + subchart := v["pequod"].(map[string]any) if _, ok := subchart["boat"]; ok { t.Error("Expected subchart boat key to be removed, still present") } - subsubchart := subchart["ahab"].(map[string]interface{}) + subsubchart := subchart["ahab"].(map[string]any) if _, ok := subsubchart["boat"]; ok { t.Error("Expected sub-subchart ahab boat key to be removed, still present") } - if _, ok := subsubchart["nested"].(map[string]interface{})["boat"]; ok { + if _, ok := subsubchart["nested"].(map[string]any)["boat"]; ok { t.Error("Expected sub-subchart nested boat key to be removed, still present") } @@ -241,7 +241,7 @@ func TestCoalesceValues(t *testing.T) { is.Equal(valsCopy, vals) } -func ttpl(tpl string, v map[string]interface{}) (string, error) { +func ttpl(tpl string, v map[string]any) (string, error) { var b bytes.Buffer tt := template.Must(template.New("t").Parse(tpl)) err := tt.Execute(&b, v) @@ -253,52 +253,52 @@ func TestMergeValues(t *testing.T) { c := withDeps(&chart.Chart{ Metadata: &chart.Metadata{Name: "moby"}, - Values: map[string]interface{}{ + Values: map[string]any{ "back": "exists", "bottom": "exists", "front": "exists", "left": "exists", "name": "moby", - "nested": map[string]interface{}{"boat": true}, + "nested": map[string]any{"boat": true}, "override": "bad", "right": "exists", "scope": "moby", "top": "nope", - "global": map[string]interface{}{ - "nested2": map[string]interface{}{"l0": "moby"}, + "global": map[string]any{ + "nested2": map[string]any{"l0": "moby"}, }, }, }, withDeps(&chart.Chart{ Metadata: &chart.Metadata{Name: "pequod"}, - Values: map[string]interface{}{ + Values: map[string]any{ "name": "pequod", "scope": "pequod", - "global": map[string]interface{}{ - "nested2": map[string]interface{}{"l1": "pequod"}, + "global": map[string]any{ + "nested2": map[string]any{"l1": "pequod"}, }, }, }, &chart.Chart{ Metadata: &chart.Metadata{Name: "ahab"}, - Values: map[string]interface{}{ - "global": map[string]interface{}{ - "nested": map[string]interface{}{"foo": "bar"}, - "nested2": map[string]interface{}{"l2": "ahab"}, + Values: map[string]any{ + "global": map[string]any{ + "nested": map[string]any{"foo": "bar"}, + "nested2": map[string]any{"l2": "ahab"}, }, "scope": "ahab", "name": "ahab", "boat": true, - "nested": map[string]interface{}{"foo": false, "bar": true}, + "nested": map[string]any{"foo": false, "bar": true}, }, }, ), &chart.Chart{ Metadata: &chart.Metadata{Name: "spouter"}, - Values: map[string]interface{}{ + Values: map[string]any{ "scope": "spouter", - "global": map[string]interface{}{ - "nested2": map[string]interface{}{"l1": "spouter"}, + "global": map[string]any{ + "nested2": map[string]any{"l1": "spouter"}, }, }, }, @@ -383,16 +383,16 @@ func TestMergeValues(t *testing.T) { } } - if _, ok := v["nested"].(map[string]interface{})["boat"]; !ok { + if _, ok := v["nested"].(map[string]any)["boat"]; !ok { t.Error("Expected nested boat key to be present but it was removed") } - subchart := v["pequod"].(map[string]interface{})["ahab"].(map[string]interface{}) + subchart := v["pequod"].(map[string]any)["ahab"].(map[string]any) if _, ok := subchart["boat"]; !ok { t.Error("Expected subchart boat key to be present but it was removed") } - if _, ok := subchart["nested"].(map[string]interface{})["bar"]; !ok { + if _, ok := subchart["nested"].(map[string]any)["bar"]; !ok { t.Error("Expected subchart nested bar key to be present but it was removed") } @@ -401,28 +401,28 @@ func TestMergeValues(t *testing.T) { } func TestCoalesceTables(t *testing.T) { - dst := map[string]interface{}{ + dst := map[string]any{ "name": "Ishmael", - "address": map[string]interface{}{ + "address": map[string]any{ "street": "123 Spouter Inn Ct.", "city": "Nantucket", "country": nil, }, - "details": map[string]interface{}{ + "details": map[string]any{ "friends": []string{"Tashtego"}, }, "boat": "pequod", "hole": nil, } - src := map[string]interface{}{ + src := map[string]any{ "occupation": "whaler", - "address": map[string]interface{}{ + "address": map[string]any{ "state": "MA", "street": "234 Spouter Inn Ct.", "country": "US", }, "details": "empty", - "boat": map[string]interface{}{ + "boat": map[string]any{ "mast": true, }, "hole": "black", @@ -439,7 +439,7 @@ func TestCoalesceTables(t *testing.T) { t.Errorf("Unexpected occupation: %s", dst["occupation"]) } - addr, ok := dst["address"].(map[string]interface{}) + addr, ok := dst["address"].(map[string]any) if !ok { t.Fatal("Address went away.") } @@ -460,7 +460,7 @@ func TestCoalesceTables(t *testing.T) { t.Error("The country is not left out.") } - if det, ok := dst["details"].(map[string]interface{}); !ok { + if det, ok := dst["details"].(map[string]any); !ok { t.Fatalf("Details is the wrong type: %v", dst["details"]) } else if _, ok := det["friends"]; !ok { t.Error("Could not find your friends. Maybe you don't have any. :-(") @@ -474,14 +474,14 @@ func TestCoalesceTables(t *testing.T) { t.Error("The hole still exists.") } - dst2 := map[string]interface{}{ + dst2 := map[string]any{ "name": "Ishmael", - "address": map[string]interface{}{ + "address": map[string]any{ "street": "123 Spouter Inn Ct.", "city": "Nantucket", "country": "US", }, - "details": map[string]interface{}{ + "details": map[string]any{ "friends": []string{"Tashtego"}, }, "boat": "pequod", @@ -496,7 +496,7 @@ func TestCoalesceTables(t *testing.T) { t.Errorf("Unexpected name: %s", dst2["name"]) } - addr2, ok := dst2["address"].(map[string]interface{}) + addr2, ok := dst2["address"].(map[string]any) if !ok { t.Fatal("Address went away.") } @@ -513,7 +513,7 @@ func TestCoalesceTables(t *testing.T) { t.Errorf("Unexpected Country: %v", addr2["country"]) } - if det2, ok := dst2["details"].(map[string]interface{}); !ok { + if det2, ok := dst2["details"].(map[string]any); !ok { t.Fatalf("Details is the wrong type: %v", dst2["details"]) } else if _, ok := det2["friends"]; !ok { t.Error("Could not find your friends. Maybe you don't have any. :-(") @@ -529,28 +529,28 @@ func TestCoalesceTables(t *testing.T) { } func TestMergeTables(t *testing.T) { - dst := map[string]interface{}{ + dst := map[string]any{ "name": "Ishmael", - "address": map[string]interface{}{ + "address": map[string]any{ "street": "123 Spouter Inn Ct.", "city": "Nantucket", "country": nil, }, - "details": map[string]interface{}{ + "details": map[string]any{ "friends": []string{"Tashtego"}, }, "boat": "pequod", "hole": nil, } - src := map[string]interface{}{ + src := map[string]any{ "occupation": "whaler", - "address": map[string]interface{}{ + "address": map[string]any{ "state": "MA", "street": "234 Spouter Inn Ct.", "country": "US", }, "details": "empty", - "boat": map[string]interface{}{ + "boat": map[string]any{ "mast": true, }, "hole": "black", @@ -567,7 +567,7 @@ func TestMergeTables(t *testing.T) { t.Errorf("Unexpected occupation: %s", dst["occupation"]) } - addr, ok := dst["address"].(map[string]interface{}) + addr, ok := dst["address"].(map[string]any) if !ok { t.Fatal("Address went away.") } @@ -590,7 +590,7 @@ func TestMergeTables(t *testing.T) { t.Error("The country is left out.") } - if det, ok := dst["details"].(map[string]interface{}); !ok { + if det, ok := dst["details"].(map[string]any); !ok { t.Fatalf("Details is the wrong type: %v", dst["details"]) } else if _, ok := det["friends"]; !ok { t.Error("Could not find your friends. Maybe you don't have any. :-(") @@ -606,14 +606,14 @@ func TestMergeTables(t *testing.T) { t.Error("The hole no longer exists.") } - dst2 := map[string]interface{}{ + dst2 := map[string]any{ "name": "Ishmael", - "address": map[string]interface{}{ + "address": map[string]any{ "street": "123 Spouter Inn Ct.", "city": "Nantucket", "country": "US", }, - "details": map[string]interface{}{ + "details": map[string]any{ "friends": []string{"Tashtego"}, }, "boat": "pequod", @@ -629,7 +629,7 @@ func TestMergeTables(t *testing.T) { t.Errorf("Unexpected name: %s", dst2["name"]) } - addr2, ok := dst2["address"].(map[string]interface{}) + addr2, ok := dst2["address"].(map[string]any) if !ok { t.Fatal("Address went away.") } @@ -646,7 +646,7 @@ func TestMergeTables(t *testing.T) { t.Errorf("Unexpected Country: %v", addr2["country"]) } - if det2, ok := dst2["details"].(map[string]interface{}); !ok { + if det2, ok := dst2["details"].(map[string]any); !ok { t.Fatalf("Details is the wrong type: %v", dst2["details"]) } else if _, ok := det2["friends"]; !ok { t.Error("Could not find your friends. Maybe you don't have any. :-(") @@ -669,24 +669,24 @@ func TestCoalesceValuesWarnings(t *testing.T) { c := withDeps(&chart.Chart{ Metadata: &chart.Metadata{Name: "level1"}, - Values: map[string]interface{}{ + Values: map[string]any{ "name": "moby", }, }, withDeps(&chart.Chart{ Metadata: &chart.Metadata{Name: "level2"}, - Values: map[string]interface{}{ + Values: map[string]any{ "name": "pequod", }, }, &chart.Chart{ Metadata: &chart.Metadata{Name: "level3"}, - Values: map[string]interface{}{ + Values: map[string]any{ "name": "ahab", "boat": true, - "spear": map[string]interface{}{ + "spear": map[string]any{ "tip": true, - "sail": map[string]interface{}{ + "sail": map[string]any{ "cotton": true, }, }, @@ -695,12 +695,12 @@ func TestCoalesceValuesWarnings(t *testing.T) { ), ) - vals := map[string]interface{}{ - "level2": map[string]interface{}{ - "level3": map[string]interface{}{ - "boat": map[string]interface{}{"mast": true}, - "spear": map[string]interface{}{ - "tip": map[string]interface{}{ + vals := map[string]any{ + "level2": map[string]any{ + "level3": map[string]any{ + "boat": map[string]any{"mast": true}, + "spear": map[string]any{ + "tip": map[string]any{ "sharp": true, }, "sail": true, @@ -710,7 +710,7 @@ func TestCoalesceValuesWarnings(t *testing.T) { } warnings := make([]string, 0) - printf := func(format string, v ...interface{}) { + printf := func(format string, v ...any) { t.Logf(format, v...) warnings = append(warnings, fmt.Sprintf(format, v...)) } @@ -731,3 +731,200 @@ func TestConcatPrefix(t *testing.T) { assert.Equal(t, "b", concatPrefix("", "b")) assert.Equal(t, "a.b", concatPrefix("a", "b")) } + +// TestCoalesceValuesEmptyMapWithNils tests the full CoalesceValues scenario +// from issue #31643 where chart has data: {} and user provides data: {foo: bar, baz: ~} +func TestCoalesceValuesEmptyMapWithNils(t *testing.T) { + is := assert.New(t) + + c := &chart.Chart{ + Metadata: &chart.Metadata{Name: "test"}, + Values: map[string]any{ + "data": map[string]any{}, // empty map in chart defaults + }, + } + + vals := map[string]any{ + "data": map[string]any{ + "foo": "bar", + "baz": nil, // explicit nil from user + }, + } + + v, err := CoalesceValues(c, vals) + is.NoError(err) + + data, ok := v["data"].(map[string]any) + is.True(ok, "data is not a map") + + // "foo" should be preserved + is.Equal("bar", data["foo"]) + + // "baz" should be preserved with nil value since it wasn't in chart defaults + _, ok = data["baz"] + is.True(ok, "Expected data.baz key to be present but it was removed") + is.Nil(data["baz"], "Expected data.baz key to be nil but it is not") +} + +// TestCoalesceValuesSubchartDefaultNilsCleaned tests that nil values in subchart defaults +// are cleaned up during coalescing when the parent doesn't set those keys. +// Regression test for issue #31919. +func TestCoalesceValuesSubchartDefaultNilsCleaned(t *testing.T) { + is := assert.New(t) + + // Subchart has a default with nil values (e.g. keyMapping: {password: null}) + subchart := &chart.Chart{ + Metadata: &chart.Metadata{Name: "child"}, + Values: map[string]any{ + "keyMapping": map[string]any{ + "password": nil, + }, + }, + } + + parent := withDeps(&chart.Chart{ + Metadata: &chart.Metadata{Name: "parent"}, + Values: map[string]any{}, + }, subchart) + + // Parent user values don't mention keyMapping at all + vals := map[string]any{} + + v, err := CoalesceValues(parent, vals) + is.NoError(err) + + childVals, ok := v["child"].(map[string]any) + is.True(ok, "child values should be a map") + + keyMapping, ok := childVals["keyMapping"].(map[string]any) + is.True(ok, "keyMapping should be a map") + + // The nil "password" key from chart defaults should be cleaned up + _, ok = keyMapping["password"] + is.False(ok, "Expected keyMapping.password (nil from chart defaults) to be removed, but it is still present") +} + +// TestCoalesceValuesUserNullErasesSubchartDefault tests that a user-supplied null +// value erases a subchart's default value during coalescing. +// Regression test for issue #31919. +func TestCoalesceValuesUserNullErasesSubchartDefault(t *testing.T) { + is := assert.New(t) + + subchart := &chart.Chart{ + Metadata: &chart.Metadata{Name: "child"}, + Values: map[string]any{ + "someKey": "default", + }, + } + + parent := withDeps(&chart.Chart{ + Metadata: &chart.Metadata{Name: "parent"}, + Values: map[string]any{}, + }, subchart) + + // User explicitly nullifies the subchart key via parent values + vals := map[string]any{ + "child": map[string]any{ + "someKey": nil, + }, + } + + v, err := CoalesceValues(parent, vals) + is.NoError(err) + + childVals, ok := v["child"].(map[string]any) + is.True(ok, "child values should be a map") + + // someKey should be erased — user null overrides subchart default + _, ok = childVals["someKey"] + is.False(ok, "Expected someKey to be removed by user null override, but it is still present") +} + +// TestCoalesceValuesSubchartNilDoesNotShadowGlobal tests that a nil value in +// subchart defaults doesn't shadow a global value accessible via pluck-like access. +// Regression test for issue #31971. +func TestCoalesceValuesSubchartNilDoesNotShadowGlobal(t *testing.T) { + is := assert.New(t) + + subchart := &chart.Chart{ + Metadata: &chart.Metadata{Name: "child"}, + Values: map[string]any{ + "ingress": map[string]any{ + "feature": nil, // nil in subchart defaults + }, + }, + } + + parent := withDeps(&chart.Chart{ + Metadata: &chart.Metadata{Name: "parent"}, + Values: map[string]any{}, + }, subchart) + + // Parent sets the global value + vals := map[string]any{ + "global": map[string]any{ + "ingress": map[string]any{ + "feature": true, + }, + }, + } + + v, err := CoalesceValues(parent, vals) + is.NoError(err) + + childVals, ok := v["child"].(map[string]any) + is.True(ok, "child values should be a map") + + ingress, ok := childVals["ingress"].(map[string]any) + is.True(ok, "ingress should be a map") + + // The nil "feature" from subchart defaults should be cleaned up, + // so that pluck can fall through to the global value + _, ok = ingress["feature"] + is.False(ok, "Expected ingress.feature (nil from chart defaults) to be removed so global can be used via pluck, but it is still present") +} + +// TestCoalesceValuesSubchartNilCleanedWhenUserPartiallyOverrides tests that nil +// values in subchart defaults are cleaned even when the user partially overrides +// the same map. Regression test for the coalesceTablesFullKey merge path. +func TestCoalesceValuesSubchartNilCleanedWhenUserPartiallyOverrides(t *testing.T) { + is := assert.New(t) + + subchart := &chart.Chart{ + Metadata: &chart.Metadata{Name: "child"}, + Values: map[string]any{ + "keyMapping": map[string]any{ + "password": nil, + "format": "bcrypt", + }, + }, + } + + parent := withDeps(&chart.Chart{ + Metadata: &chart.Metadata{Name: "parent"}, + Values: map[string]any{}, + }, subchart) + + // User overrides format but doesn't mention password + vals := map[string]any{ + "child": map[string]any{ + "keyMapping": map[string]any{ + "format": "sha256", + }, + }, + } + + v, err := CoalesceValues(parent, vals) + is.NoError(err) + + childVals, ok := v["child"].(map[string]any) + is.True(ok, "child values should be a map") + + keyMapping, ok := childVals["keyMapping"].(map[string]any) + is.True(ok, "keyMapping should be a map") + + is.Equal("sha256", keyMapping["format"], "User override should be preserved") + + _, ok = keyMapping["password"] + is.False(ok, "Expected keyMapping.password (nil from chart defaults) to be removed even when user partially overrides the map") +} diff --git a/pkg/chart/common/util/jsonschema.go b/pkg/chart/common/util/jsonschema.go index 6d7f32604..63ca0c274 100644 --- a/pkg/chart/common/util/jsonschema.go +++ b/pkg/chart/common/util/jsonschema.go @@ -73,7 +73,7 @@ func newHTTPURLLoader() *HTTPURLLoader { } // ValidateAgainstSchema checks that values does not violate the structure laid out in schema -func ValidateAgainstSchema(ch chart.Charter, values map[string]interface{}) error { +func ValidateAgainstSchema(ch chart.Charter, values map[string]any) error { chrt, err := chart.NewAccessor(ch) if err != nil { return err @@ -83,7 +83,7 @@ func ValidateAgainstSchema(ch chart.Charter, values map[string]interface{}) erro slog.Debug("chart name", "chart-name", chrt.Name()) err := ValidateAgainstSingleSchema(values, chrt.Schema()) if err != nil { - sb.WriteString(fmt.Sprintf("%s:\n", chrt.Name())) + fmt.Fprintf(&sb, "%s:\n", chrt.Name()) sb.WriteString(err.Error()) } } @@ -103,10 +103,8 @@ func ValidateAgainstSchema(ch chart.Charter, values map[string]interface{}) erro subchartValues, ok := raw.(map[string]any) if !ok { - sb.WriteString(fmt.Sprintf( - "%s:\ninvalid type for values: expected object (map), got %T\n", - sub.Name(), raw, - )) + fmt.Fprintf(&sb, "%s:\ninvalid type for values: expected object (map), got %T\n", + sub.Name(), raw) continue } diff --git a/pkg/chart/common/util/jsonschema_test.go b/pkg/chart/common/util/jsonschema_test.go index 834b1faf6..838d152a1 100644 --- a/pkg/chart/common/util/jsonschema_test.go +++ b/pkg/chart/common/util/jsonschema_test.go @@ -54,7 +54,7 @@ func TestValidateAgainstInvalidSingleSchema(t *testing.T) { var errString string if err := ValidateAgainstSingleSchema(values, schema); err == nil { - t.Fatalf("Expected an error, but got nil") + t.Fatal("Expected an error, but got nil") } else { errString = err.Error() } @@ -78,7 +78,7 @@ func TestValidateAgainstSingleSchemaNegative(t *testing.T) { var errString string if err := ValidateAgainstSingleSchema(values, schema); err == nil { - t.Fatalf("Expected an error, but got nil") + t.Fatal("Expected an error, but got nil") } else { errString = err.Error() } @@ -138,9 +138,9 @@ func TestValidateAgainstSchema(t *testing.T) { } chrt.AddDependency(subchart) - vals := map[string]interface{}{ + vals := map[string]any{ "name": "John", - "subchart": map[string]interface{}{ + "subchart": map[string]any{ "age": 25, }, } @@ -165,14 +165,14 @@ func TestValidateAgainstSchemaNegative(t *testing.T) { } chrt.AddDependency(subchart) - vals := map[string]interface{}{ + vals := map[string]any{ "name": "John", - "subchart": map[string]interface{}{}, + "subchart": map[string]any{}, } var errString string if err := ValidateAgainstSchema(chrt, vals); err == nil { - t.Fatalf("Expected an error, but got nil") + t.Fatal("Expected an error, but got nil") } else { errString = err.Error() } @@ -200,9 +200,9 @@ func TestValidateAgainstSchema2020(t *testing.T) { } chrt.AddDependency(subchart) - vals := map[string]interface{}{ + vals := map[string]any{ "name": "John", - "subchart": map[string]interface{}{ + "subchart": map[string]any{ "data": []any{"hello", 12}, }, } @@ -227,16 +227,16 @@ func TestValidateAgainstSchema2020Negative(t *testing.T) { } chrt.AddDependency(subchart) - vals := map[string]interface{}{ + vals := map[string]any{ "name": "John", - "subchart": map[string]interface{}{ + "subchart": map[string]any{ "data": []any{12}, }, } var errString string if err := ValidateAgainstSchema(chrt, vals); err == nil { - t.Fatalf("Expected an error, but got nil") + t.Fatal("Expected an error, but got nil") } else { errString = err.Error() } @@ -294,7 +294,7 @@ func TestValidateAgainstSingleSchema_UnresolvedURN_Ignored(t *testing.T) { "$schema": "https://json-schema.org/draft-07/schema#", "$ref": "urn:example:helm:schemas:v1:helm-schema-validation-conditions:v1/helmSchemaValidation-true" }`) - vals := map[string]interface{}{"any": "value"} + vals := map[string]any{"any": "value"} if err := ValidateAgainstSingleSchema(vals, schema); err != nil { t.Fatalf("expected no error when URN unresolved is ignored, got: %v", err) } @@ -386,6 +386,6 @@ func TestValidateAgainstSchema_InvalidSubchartValuesType_NoPanic(t *testing.T) { // We expect a non-nil error (invalid type), but crucially no panic. if err := ValidateAgainstSchema(chrt, vals); err == nil { - t.Fatalf("expected an error when subchart values have invalid type, got nil") + t.Fatal("expected an error when subchart values have invalid type, got nil") } } diff --git a/pkg/chart/common/util/values.go b/pkg/chart/common/util/values.go index 85cb29012..95ac7ba4d 100644 --- a/pkg/chart/common/util/values.go +++ b/pkg/chart/common/util/values.go @@ -26,14 +26,14 @@ import ( // ToRenderValues composes the struct from the data coming from the Releases, Charts and Values files // // This takes both ReleaseOptions and Capabilities to merge into the render values. -func ToRenderValues(chrt chart.Charter, chrtVals map[string]interface{}, options common.ReleaseOptions, caps *common.Capabilities) (common.Values, error) { +func ToRenderValues(chrt chart.Charter, chrtVals map[string]any, options common.ReleaseOptions, caps *common.Capabilities) (common.Values, error) { return ToRenderValuesWithSchemaValidation(chrt, chrtVals, options, caps, false) } // ToRenderValuesWithSchemaValidation composes the struct from the data coming from the Releases, Charts and Values files // // This takes both ReleaseOptions and Capabilities to merge into the render values. -func ToRenderValuesWithSchemaValidation(chrt chart.Charter, chrtVals map[string]interface{}, options common.ReleaseOptions, caps *common.Capabilities, skipSchemaValidation bool) (common.Values, error) { +func ToRenderValuesWithSchemaValidation(chrt chart.Charter, chrtVals map[string]any, options common.ReleaseOptions, caps *common.Capabilities, skipSchemaValidation bool) (common.Values, error) { if caps == nil { caps = common.DefaultCapabilities } @@ -41,10 +41,10 @@ func ToRenderValuesWithSchemaValidation(chrt chart.Charter, chrtVals map[string] if err != nil { return nil, err } - top := map[string]interface{}{ + top := map[string]any{ "Chart": accessor.MetadataAsMap(), "Capabilities": caps, - "Release": map[string]interface{}{ + "Release": map[string]any{ "Name": options.Name, "Namespace": options.Namespace, "IsUpgrade": options.IsUpgrade, diff --git a/pkg/chart/common/util/values_test.go b/pkg/chart/common/util/values_test.go index 706d3cfda..23b8a3de2 100644 --- a/pkg/chart/common/util/values_test.go +++ b/pkg/chart/common/util/values_test.go @@ -26,17 +26,17 @@ import ( func TestToRenderValues(t *testing.T) { - chartValues := map[string]interface{}{ + chartValues := map[string]any{ "name": "al Rashid", - "where": map[string]interface{}{ + "where": map[string]any{ "city": "Basrah", "title": "caliph", }, } - overrideValues := map[string]interface{}{ + overrideValues := map[string]any{ "name": "Haroun", - "where": map[string]interface{}{ + "where": map[string]any{ "city": "Baghdad", "date": "809 CE", }, @@ -67,11 +67,11 @@ func TestToRenderValues(t *testing.T) { } // Ensure that the top-level values are all set. - metamap := res["Chart"].(map[string]interface{}) + metamap := res["Chart"].(map[string]any) if name := metamap["Name"]; name.(string) != "test" { t.Errorf("Expected chart name 'test', got %q", name) } - relmap := res["Release"].(map[string]interface{}) + relmap := res["Release"].(map[string]any) if name := relmap["Name"]; name.(string) != "Seven Voyages" { t.Errorf("Expected release name 'Seven Voyages', got %q", name) } @@ -85,7 +85,7 @@ func TestToRenderValues(t *testing.T) { t.Error("Expected upgrade to be false.") } if !relmap["IsInstall"].(bool) { - t.Errorf("Expected install to be true.") + t.Error("Expected install to be true.") } if !res["Capabilities"].(*common.Capabilities).APIVersions.Has("v1") { t.Error("Expected Capabilities to have v1 as an API") @@ -98,7 +98,7 @@ func TestToRenderValues(t *testing.T) { if vals["name"] != "Haroun" { t.Errorf("Expected 'Haroun', got %q (%v)", vals["name"], vals) } - where := vals["where"].(map[string]interface{}) + where := vals["where"].(map[string]any) expects := map[string]string{ "city": "Baghdad", "date": "809 CE", diff --git a/pkg/chart/common/values.go b/pkg/chart/common/values.go index 94958a779..17a067790 100644 --- a/pkg/chart/common/values.go +++ b/pkg/chart/common/values.go @@ -29,7 +29,7 @@ import ( const GlobalKey = "global" // Values represents a collection of chart values. -type Values map[string]interface{} +type Values map[string]any // YAML encodes the Values into a YAML string. func (v Values) YAML() (string, error) { @@ -64,9 +64,9 @@ func (v Values) Table(name string) (Values, error) { // AsMap is a utility function for converting Values to a map[string]interface{}. // // It protects against nil map panics. -func (v Values) AsMap() map[string]interface{} { +func (v Values) AsMap() map[string]any { if len(v) == 0 { - return map[string]interface{}{} + return map[string]any{} } return v } @@ -86,7 +86,7 @@ func tableLookup(v Values, simple string) (Values, error) { if !ok { return v, ErrNoTable{simple} } - if vv, ok := v2.(map[string]interface{}); ok { + if vv, ok := v2.(map[string]any); ok { return vv, nil } @@ -113,7 +113,7 @@ func ReadValues(data []byte) (vals Values, err error) { func ReadValuesFile(filename string) (Values, error) { data, err := os.ReadFile(filename) if err != nil { - return map[string]interface{}{}, err + return map[string]any{}, err } return ReadValues(data) } @@ -129,8 +129,8 @@ type ReleaseOptions struct { } // istable is a special-purpose function to see if the present thing matches the definition of a YAML table. -func istable(v interface{}) bool { - _, ok := v.(map[string]interface{}) +func istable(v any) bool { + _, ok := v.(map[string]any) return ok } @@ -141,14 +141,14 @@ func istable(v interface{}) bool { // chapter: // one: // title: "Loomings" -func (v Values) PathValue(path string) (interface{}, error) { +func (v Values) PathValue(path string) (any, error) { if path == "" { return nil, errors.New("YAML path cannot be empty") } return v.pathValue(parsePath(path)) } -func (v Values) pathValue(path []string) (interface{}, error) { +func (v Values) pathValue(path []string) (any, error) { if len(path) == 1 { // if exists must be root key not table if _, ok := v[path[0]]; ok && !istable(v[path[0]]) { diff --git a/pkg/chart/common/values_test.go b/pkg/chart/common/values_test.go index 3cceeb2b5..9743869ec 100644 --- a/pkg/chart/common/values_test.go +++ b/pkg/chart/common/values_test.go @@ -113,7 +113,7 @@ chapter: } if _, err := d.Table("title"); err == nil { - t.Fatalf("Title is not a table.") + t.Fatal("Title is not a table.") } if _, err := d.Table("chapter"); err != nil { @@ -131,11 +131,11 @@ chapter: } if _, err := d.Table("chapter.OneHundredThirtySix"); err == nil { - t.Errorf("I think you mean 'Epilogue'") + t.Error("I think you mean 'Epilogue'") } } -func matchValues(t *testing.T, data map[string]interface{}) { +func matchValues(t *testing.T, data map[string]any) { t.Helper() if data["poet"] != "Coleridge" { t.Errorf("Unexpected poet: %s", data["poet"]) @@ -150,17 +150,17 @@ func matchValues(t *testing.T, data map[string]interface{}) { if o, err := ttpl("{{.mariner.shot}}", data); err != nil { t.Errorf(".mariner.shot: %s", err) } else if o != "ALBATROSS" { - t.Errorf("Expected that mariner shot ALBATROSS") + t.Error("Expected that mariner shot ALBATROSS") } if o, err := ttpl("{{.water.water.where}}", data); err != nil { t.Errorf(".water.water.where: %s", err) } else if o != "everywhere" { - t.Errorf("Expected water water everywhere") + t.Error("Expected water water everywhere") } } -func ttpl(tpl string, v map[string]interface{}) (string, error) { +func ttpl(tpl string, v map[string]any) (string, error) { var b bytes.Buffer tt := template.Must(template.New("t").Parse(tpl)) err := tt.Execute(&b, v) @@ -199,7 +199,7 @@ chapter: } if v, err := d.PathValue("title"); err == nil { if v != "Moby Dick" { - t.Errorf("Failed to return values for root key title") + t.Error("Failed to return values for root key title") } } } diff --git a/pkg/chart/interfaces.go b/pkg/chart/interfaces.go index 4001bc548..6d94ad3ea 100644 --- a/pkg/chart/interfaces.go +++ b/pkg/chart/interfaces.go @@ -19,21 +19,21 @@ import ( common "helm.sh/helm/v4/pkg/chart/common" ) -type Charter interface{} +type Charter any -type Dependency interface{} +type Dependency any type Accessor interface { Name() string IsRoot() bool - MetadataAsMap() map[string]interface{} + MetadataAsMap() map[string]any Files() []*common.File Templates() []*common.File ChartFullPath() string IsLibraryChart() bool Dependencies() []Charter MetaDependencies() []Dependency - Values() map[string]interface{} + Values() map[string]any Schema() []byte Deprecated() bool } diff --git a/pkg/chart/loader/archive/archive.go b/pkg/chart/loader/archive/archive.go index e98f5c333..a35c0152d 100644 --- a/pkg/chart/loader/archive/archive.go +++ b/pkg/chart/loader/archive/archive.go @@ -172,7 +172,7 @@ func EnsureArchive(name string, raw *os.File) error { buffer := make([]byte, 512) _, err := raw.Read(buffer) if err != nil && err != io.EOF { - return fmt.Errorf("file '%s' cannot be read: %s", name, err) + return fmt.Errorf("file '%s' cannot be read: %w", name, err) } // Helm may identify achieve of the application/x-gzip as application/vnd.ms-fontobject. diff --git a/pkg/chart/v2/chart.go b/pkg/chart/v2/chart.go index d77a53ddc..4cfc2b890 100644 --- a/pkg/chart/v2/chart.go +++ b/pkg/chart/v2/chart.go @@ -48,11 +48,11 @@ type Chart struct { // Templates for this chart. Templates []*common.File `json:"templates"` // Values are default config for this chart. - Values map[string]interface{} `json:"values"` + Values map[string]any `json:"values"` // Schema is an optional JSON schema for imposing structure on Values Schema []byte `json:"schema"` // SchemaModTime the schema was last modified - SchemaModTime time.Time `json:"schemamodtime,omitempty"` + SchemaModTime time.Time `json:"schemamodtime"` // Files are miscellaneous files in a chart archive, // e.g. README, LICENSE, etc. Files []*common.File `json:"files"` diff --git a/pkg/chart/v2/chart_test.go b/pkg/chart/v2/chart_test.go index d0837eb16..d44e7251b 100644 --- a/pkg/chart/v2/chart_test.go +++ b/pkg/chart/v2/chart_test.go @@ -124,8 +124,8 @@ func TestIsRoot(t *testing.T) { is := assert.New(t) - is.Equal(false, chrt1.IsRoot()) - is.Equal(true, chrt2.IsRoot()) + is.False(chrt1.IsRoot()) + is.True(chrt2.IsRoot()) } func TestChartPath(t *testing.T) { diff --git a/pkg/chart/v2/dependency.go b/pkg/chart/v2/dependency.go index 8a590a036..5a92ef305 100644 --- a/pkg/chart/v2/dependency.go +++ b/pkg/chart/v2/dependency.go @@ -44,7 +44,7 @@ type Dependency struct { Enabled bool `json:"enabled,omitempty" yaml:"enabled,omitempty"` // ImportValues holds the mapping of source values to parent key to be imported. Each item can be a // string or pair of child/parent sublist items. - ImportValues []interface{} `json:"import-values,omitempty" yaml:"import-values,omitempty"` + ImportValues []any `json:"import-values,omitempty" yaml:"import-values,omitempty"` // Alias usable alias to be used for the chart Alias string `json:"alias,omitempty" yaml:"alias,omitempty"` } diff --git a/pkg/chart/v2/errors.go b/pkg/chart/v2/errors.go index eeef75315..5f7f5e738 100644 --- a/pkg/chart/v2/errors.go +++ b/pkg/chart/v2/errors.go @@ -25,6 +25,6 @@ func (v ValidationError) Error() string { } // ValidationErrorf takes a message and formatting options and creates a ValidationError -func ValidationErrorf(msg string, args ...interface{}) ValidationError { +func ValidationErrorf(msg string, args ...any) ValidationError { return ValidationError(fmt.Sprintf(msg, args...)) } diff --git a/pkg/chart/v2/lint/lint.go b/pkg/chart/v2/lint/lint.go index 1c871d936..7f6f26320 100644 --- a/pkg/chart/v2/lint/lint.go +++ b/pkg/chart/v2/lint/lint.go @@ -43,7 +43,7 @@ func WithSkipSchemaValidation(skipSchemaValidation bool) LinterOption { } } -func RunAll(baseDir string, values map[string]interface{}, namespace string, options ...LinterOption) support.Linter { +func RunAll(baseDir string, values map[string]any, namespace string, options ...LinterOption) support.Linter { chartDir, _ := filepath.Abs(baseDir) diff --git a/pkg/chart/v2/lint/lint_test.go b/pkg/chart/v2/lint/lint_test.go index 80dcef932..4256281e0 100644 --- a/pkg/chart/v2/lint/lint_test.go +++ b/pkg/chart/v2/lint/lint_test.go @@ -100,7 +100,7 @@ func TestInvalidYaml(t *testing.T) { t.Fatalf("All didn't fail with expected errors, got %#v", m) } if !strings.Contains(m[0].Err.Error(), "deliberateSyntaxError") { - t.Errorf("All didn't have the error for deliberateSyntaxError") + t.Error("All didn't have the error for deliberateSyntaxError") } } @@ -111,7 +111,7 @@ func TestInvalidChartYaml(t *testing.T) { t.Fatalf("All didn't fail with expected errors, got %#v", m) } if !strings.Contains(m[0].Err.Error(), "failed to strictly parse chart metadata file") { - t.Errorf("All didn't have the error for duplicate YAML keys") + t.Error("All didn't have the error for duplicate YAML keys") } } @@ -235,13 +235,13 @@ func TestMalformedTemplate(t *testing.T) { }() select { case <-c: - t.Fatalf("lint malformed template timeout") + t.Fatal("lint malformed template timeout") case <-ch: if len(m) != 1 { t.Fatalf("All didn't fail with expected errors, got %#v", m) } if !strings.Contains(m[0].Err.Error(), "invalid character '{'") { - t.Errorf("All didn't have the error for invalid character '{'") + t.Error("All didn't have the error for invalid character '{'") } } } diff --git a/pkg/chart/v2/lint/rules/chartfile.go b/pkg/chart/v2/lint/rules/chartfile.go index 806363477..f8f609280 100644 --- a/pkg/chart/v2/lint/rules/chartfile.go +++ b/pkg/chart/v2/lint/rules/chartfile.go @@ -70,15 +70,15 @@ func Chartfile(linter *support.Linter) { linter.RunLinterRule(support.WarningSev, chartFileName, validateChartVersionStrictSemVerV2(chartFile)) } -func validateChartVersionType(data map[string]interface{}) error { +func validateChartVersionType(data map[string]any) error { return isStringValue(data, "version") } -func validateChartAppVersionType(data map[string]interface{}) error { +func validateChartAppVersionType(data map[string]any) error { return isStringValue(data, "appVersion") } -func isStringValue(data map[string]interface{}, key string) error { +func isStringValue(data map[string]any, key string) error { value, ok := data[key] if !ok { return nil @@ -153,7 +153,7 @@ func validateChartVersion(cf *chart.Metadata) error { valid, msg := c.Validate(version) if !valid && len(msg) > 0 { - return fmt.Errorf("version %v", msg[0]) + return fmt.Errorf("version %w", msg[0]) } return nil @@ -225,12 +225,12 @@ func validateChartType(cf *chart.Metadata) error { // loadChartFileForTypeCheck loads the Chart.yaml // in a generic form of a map[string]interface{}, so that the type // of the values can be checked -func loadChartFileForTypeCheck(filename string) (map[string]interface{}, error) { +func loadChartFileForTypeCheck(filename string) (map[string]any, error) { b, err := os.ReadFile(filename) if err != nil { return nil, err } - y := make(map[string]interface{}) + y := make(map[string]any) err = yaml.Unmarshal(b, &y) return y, err } diff --git a/pkg/chart/v2/lint/rules/chartfile_test.go b/pkg/chart/v2/lint/rules/chartfile_test.go index 692358426..c9e202770 100644 --- a/pkg/chart/v2/lint/rules/chartfile_test.go +++ b/pkg/chart/v2/lint/rules/chartfile_test.go @@ -50,26 +50,26 @@ func TestValidateChartYamlNotDirectory(t *testing.T) { err := validateChartYamlNotDirectory(nonExistingChartFilePath) if err == nil { - t.Errorf("validateChartYamlNotDirectory to return a linter error, got no error") + t.Error("validateChartYamlNotDirectory to return a linter error, got no error") } } func TestValidateChartYamlFormat(t *testing.T) { err := validateChartYamlFormat(errors.New("Read error")) if err == nil { - t.Errorf("validateChartYamlFormat to return a linter error, got no error") + t.Error("validateChartYamlFormat to return a linter error, got no error") } err = validateChartYamlFormat(nil) if err != nil { - t.Errorf("validateChartYamlFormat to return no error, got a linter error") + t.Error("validateChartYamlFormat to return no error, got a linter error") } } func TestValidateChartName(t *testing.T) { err := validateChartName(badChart) if err == nil { - t.Errorf("validateChartName to return a linter error, got no error") + t.Error("validateChartName to return a linter error, got no error") } err = validateChartName(badChartName) @@ -176,7 +176,7 @@ func TestValidateChartMaintainer(t *testing.T) { badChart.Maintainers = []*chart.Maintainer{nil} err := validateChartMaintainer(badChart) if err == nil { - t.Errorf("validateChartMaintainer did not return error for nil maintainer as expected") + t.Error("validateChartMaintainer did not return error for nil maintainer as expected") } if err.Error() != "a maintainer entry is empty" { t.Errorf("validateChartMaintainer returned unexpected error for nil maintainer: %s", err.Error()) @@ -212,7 +212,7 @@ func TestValidateChartIconPresence(t *testing.T) { err := validateChartIconPresence(testChart) if err == nil { - t.Errorf("validateChartIconPresence to return a linter error, got no error") + t.Error("validateChartIconPresence to return a linter error, got no error") } else if !strings.Contains(err.Error(), "icon is recommended") { t.Errorf("expected %q, got %q", "icon is recommended", err.Error()) } diff --git a/pkg/chart/v2/lint/rules/crds.go b/pkg/chart/v2/lint/rules/crds.go index 4bb4d370b..1cf16b42d 100644 --- a/pkg/chart/v2/lint/rules/crds.go +++ b/pkg/chart/v2/lint/rules/crds.go @@ -19,7 +19,6 @@ package rules import ( "bytes" "errors" - "fmt" "io" "io/fs" "os" @@ -102,14 +101,14 @@ func validateCrdsDir(crdsPath string) error { func validateCrdAPIVersion(obj *k8sYamlStruct) error { if !strings.HasPrefix(obj.APIVersion, "apiextensions.k8s.io") { - return fmt.Errorf("apiVersion is not in 'apiextensions.k8s.io'") + return errors.New("apiVersion is not in 'apiextensions.k8s.io'") } return nil } func validateCrdKind(obj *k8sYamlStruct) error { if obj.Kind != "CustomResourceDefinition" { - return fmt.Errorf("object kind is not 'CustomResourceDefinition'") + return errors.New("object kind is not 'CustomResourceDefinition'") } return nil } diff --git a/pkg/chart/v2/lint/rules/dependencies_test.go b/pkg/chart/v2/lint/rules/dependencies_test.go index 08a6646cd..c50fc4b2d 100644 --- a/pkg/chart/v2/lint/rules/dependencies_test.go +++ b/pkg/chart/v2/lint/rules/dependencies_test.go @@ -72,7 +72,7 @@ func TestValidateDependencyInMetadata(t *testing.T) { c := chartWithBadDependencies() if err := validateDependencyInMetadata(&c); err == nil { - t.Errorf("chart should have been flagged for missing deps in chart metadata") + t.Error("chart should have been flagged for missing deps in chart metadata") } } @@ -132,7 +132,7 @@ func TestValidateDependenciesUnique(t *testing.T) { for _, tt := range tests { if err := validateDependenciesUnique(&tt.chart); err == nil { - t.Errorf("chart should have been flagged for dependency shadowing") + t.Error("chart should have been flagged for dependency shadowing") } } } diff --git a/pkg/chart/v2/lint/rules/deprecations_test.go b/pkg/chart/v2/lint/rules/deprecations_test.go index e153f67e6..86643f1c9 100644 --- a/pkg/chart/v2/lint/rules/deprecations_test.go +++ b/pkg/chart/v2/lint/rules/deprecations_test.go @@ -36,6 +36,6 @@ func TestValidateNoDeprecations(t *testing.T) { APIVersion: "v1", Kind: "Pod", }, nil); err != nil { - t.Errorf("Expected a v1 Pod to not be deprecated") + t.Error("Expected a v1 Pod to not be deprecated") } } diff --git a/pkg/chart/v2/lint/rules/template.go b/pkg/chart/v2/lint/rules/template.go index 43665aa3a..94210dec8 100644 --- a/pkg/chart/v2/lint/rules/template.go +++ b/pkg/chart/v2/lint/rules/template.go @@ -28,8 +28,8 @@ import ( "slices" "strings" + "k8s.io/apimachinery/pkg/api/validate/content" "k8s.io/apimachinery/pkg/api/validation" - apipath "k8s.io/apimachinery/pkg/api/validation/path" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apimachinery/pkg/util/yaml" @@ -323,7 +323,7 @@ func validateMetadataNameFunc(obj *k8sYamlStruct) validation.ValidateNameFunc { case "role", "clusterrole", "rolebinding", "clusterrolebinding": // https://github.com/kubernetes/kubernetes/blob/v1.20.0/pkg/apis/rbac/validation/validation.go#L32-L34 return func(name string, _ bool) []string { - return apipath.IsValidPathSegmentName(name) + return content.IsPathSegmentName(name) } default: return validation.NameIsDNSSubdomain diff --git a/pkg/chart/v2/lint/rules/template_test.go b/pkg/chart/v2/lint/rules/template_test.go index c08ba6cc3..b6a62e8fd 100644 --- a/pkg/chart/v2/lint/rules/template_test.go +++ b/pkg/chart/v2/lint/rules/template_test.go @@ -49,7 +49,7 @@ func TestValidateAllowedExtension(t *testing.T) { } } -var values = map[string]interface{}{"nameOverride": "", "httpPort": 80} +var values = map[string]any{"nameOverride": "", "httpPort": 80} const namespace = "testNamespace" @@ -264,7 +264,7 @@ func TestStrictTemplateParsingMapError(t *testing.T) { APIVersion: "v2", Version: "0.1.0", }, - Values: map[string]interface{}{ + Values: map[string]any{ "mymap": map[string]string{ "key1": "val1", }, diff --git a/pkg/chart/v2/lint/rules/values.go b/pkg/chart/v2/lint/rules/values.go index 994a6a463..2c766068c 100644 --- a/pkg/chart/v2/lint/rules/values.go +++ b/pkg/chart/v2/lint/rules/values.go @@ -17,6 +17,7 @@ limitations under the License. package rules import ( + "errors" "fmt" "os" "path/filepath" @@ -32,7 +33,7 @@ import ( // they are only tested for well-formedness. // // If additional values are supplied, they are coalesced into the values in values.yaml. -func ValuesWithOverrides(linter *support.Linter, valueOverrides map[string]interface{}, skipSchemaValidation bool) { +func ValuesWithOverrides(linter *support.Linter, valueOverrides map[string]any, skipSchemaValidation bool) { file := "values.yaml" vf := filepath.Join(linter.ChartDir, file) fileExists := linter.RunLinterRule(support.InfoSev, file, validateValuesFileExistence(vf)) @@ -47,12 +48,12 @@ func ValuesWithOverrides(linter *support.Linter, valueOverrides map[string]inter func validateValuesFileExistence(valuesPath string) error { _, err := os.Stat(valuesPath) if err != nil { - return fmt.Errorf("file does not exist") + return errors.New("file does not exist") } return nil } -func validateValuesFile(valuesPath string, overrides map[string]interface{}, skipSchemaValidation bool) error { +func validateValuesFile(valuesPath string, overrides map[string]any, skipSchemaValidation bool) error { values, err := common.ReadValuesFile(valuesPath) if err != nil { return fmt.Errorf("unable to parse YAML: %w", err) @@ -63,7 +64,7 @@ func validateValuesFile(valuesPath string, overrides map[string]interface{}, ski // We could change that. For now, though, we retain that strategy, and thus can // coalesce tables (like reuse-values does) instead of doing the full chart // CoalesceValues - coalescedValues := util.CoalesceTables(make(map[string]interface{}, len(overrides)), overrides) + coalescedValues := util.CoalesceTables(make(map[string]any, len(overrides)), overrides) coalescedValues = util.CoalesceTables(coalescedValues, values) ext := filepath.Ext(valuesPath) diff --git a/pkg/chart/v2/lint/rules/values_test.go b/pkg/chart/v2/lint/rules/values_test.go index 288b77436..54c7e6457 100644 --- a/pkg/chart/v2/lint/rules/values_test.go +++ b/pkg/chart/v2/lint/rules/values_test.go @@ -57,7 +57,7 @@ func TestValidateValuesYamlNotDirectory(t *testing.T) { err := validateValuesFileExistence(nonExistingValuesFilePath) if err == nil { - t.Errorf("validateValuesFileExistence to return a linter error, got no error") + t.Error("validateValuesFileExistence to return a linter error, got no error") } } @@ -67,7 +67,7 @@ func TestValidateValuesFileWellFormed(t *testing.T) { ` tmpdir := ensure.TempFile(t, "values.yaml", []byte(badYaml)) valfile := filepath.Join(tmpdir, "values.yaml") - if err := validateValuesFile(valfile, map[string]interface{}{}, false); err == nil { + if err := validateValuesFile(valfile, map[string]any{}, false); err == nil { t.Fatal("expected values file to fail parsing") } } @@ -78,7 +78,7 @@ func TestValidateValuesFileSchema(t *testing.T) { createTestingSchema(t, tmpdir) valfile := filepath.Join(tmpdir, "values.yaml") - if err := validateValuesFile(valfile, map[string]interface{}{}, false); err != nil { + if err := validateValuesFile(valfile, map[string]any{}, false); err != nil { t.Fatalf("Failed validation with %s", err) } } @@ -91,7 +91,7 @@ func TestValidateValuesFileSchemaFailure(t *testing.T) { valfile := filepath.Join(tmpdir, "values.yaml") - err := validateValuesFile(valfile, map[string]interface{}{}, false) + err := validateValuesFile(valfile, map[string]any{}, false) if err == nil { t.Fatal("expected values file to fail parsing") } @@ -107,7 +107,7 @@ func TestValidateValuesFileSchemaFailureButWithSkipSchemaValidation(t *testing.T valfile := filepath.Join(tmpdir, "values.yaml") - err := validateValuesFile(valfile, map[string]interface{}{}, true) + err := validateValuesFile(valfile, map[string]any{}, true) if err != nil { t.Fatal("expected values file to pass parsing because of skipSchemaValidation") } @@ -115,7 +115,7 @@ func TestValidateValuesFileSchemaFailureButWithSkipSchemaValidation(t *testing.T func TestValidateValuesFileSchemaOverrides(t *testing.T) { yaml := "username: admin" - overrides := map[string]interface{}{ + overrides := map[string]any{ "password": "swordfish", } tmpdir := ensure.TempFile(t, "values.yaml", []byte(yaml)) @@ -131,24 +131,24 @@ func TestValidateValuesFile(t *testing.T) { tests := []struct { name string yaml string - overrides map[string]interface{} + overrides map[string]any errorMessage string }{ { name: "value added", yaml: "username: admin", - overrides: map[string]interface{}{"password": "swordfish"}, + overrides: map[string]any{"password": "swordfish"}, }, { name: "value not overridden", yaml: "username: admin\npassword:", - overrides: map[string]interface{}{"username": "anotherUser"}, + overrides: map[string]any{"username": "anotherUser"}, errorMessage: "- at '/password': got null, want string", }, { name: "value overridden", yaml: "username: admin\npassword:", - overrides: map[string]interface{}{"username": "anotherUser", "password": "swordfish"}, + overrides: map[string]any{"username": "anotherUser", "password": "swordfish"}, }, } diff --git a/pkg/chart/v2/loader/load.go b/pkg/chart/v2/loader/load.go index d466e247c..28115d062 100644 --- a/pkg/chart/v2/loader/load.go +++ b/pkg/chart/v2/loader/load.go @@ -120,7 +120,7 @@ func LoadFiles(files []*archive.BufferedFile) (*chart.Chart, error) { c.Metadata = new(chart.Metadata) } if c.Metadata.APIVersion != chart.APIVersionV1 { - log.Printf("Warning: Dependencies are handled in Chart.yaml since apiVersion \"v2\". We recommend migrating dependencies to Chart.yaml.") + log.Print("Warning: Dependencies are handled in Chart.yaml since apiVersion \"v2\". We recommend migrating dependencies to Chart.yaml.") } if err := yaml.Unmarshal(f.Data, c.Metadata); err != nil { return c, fmt.Errorf("cannot load requirements.yaml: %w", err) @@ -138,7 +138,7 @@ func LoadFiles(files []*archive.BufferedFile) (*chart.Chart, error) { c.Metadata = new(chart.Metadata) } if c.Metadata.APIVersion != chart.APIVersionV1 { - log.Printf("Warning: Dependency locking is handled in Chart.lock since apiVersion \"v2\". We recommend migrating to Chart.lock.") + log.Print("Warning: Dependency locking is handled in Chart.lock since apiVersion \"v2\". We recommend migrating to Chart.lock.") } if c.Metadata.APIVersion == chart.APIVersionV1 { c.Files = append(c.Files, &common.File{Name: f.Name, ModTime: f.ModTime, Data: f.Data}) @@ -209,11 +209,11 @@ func LoadFiles(files []*archive.BufferedFile) (*chart.Chart, error) { // // The reader is expected to contain one or more YAML documents, the values of which are merged. // And the values can be either a chart's default values or user-supplied values. -func LoadValues(data io.Reader) (map[string]interface{}, error) { - values := map[string]interface{}{} +func LoadValues(data io.Reader) (map[string]any, error) { + values := map[string]any{} reader := utilyaml.NewYAMLReader(bufio.NewReader(data)) for { - currentMap := map[string]interface{}{} + currentMap := map[string]any{} raw, err := reader.Read() if err != nil { if errors.Is(err, io.EOF) { @@ -231,13 +231,13 @@ func LoadValues(data io.Reader) (map[string]interface{}, error) { // MergeMaps merges two maps. If a key exists in both maps, the value from b will be used. // If the value is a map, the maps will be merged recursively. -func MergeMaps(a, b map[string]interface{}) map[string]interface{} { - out := make(map[string]interface{}, len(a)) +func MergeMaps(a, b map[string]any) map[string]any { + out := make(map[string]any, len(a)) maps.Copy(out, a) for k, v := range b { - if v, ok := v.(map[string]interface{}); ok { + if v, ok := v.(map[string]any); ok { if bv, ok := out[k]; ok { - if bv, ok := bv.(map[string]interface{}); ok { + if bv, ok := bv.(map[string]any); ok { out[k] = MergeMaps(bv, v) continue } diff --git a/pkg/chart/v2/loader/load_test.go b/pkg/chart/v2/loader/load_test.go index 397745dd6..aed071b2f 100644 --- a/pkg/chart/v2/loader/load_test.go +++ b/pkg/chart/v2/loader/load_test.go @@ -61,7 +61,7 @@ func TestLoadDirWithDevNull(t *testing.T) { t.Fatalf("Failed to load testdata: %s", err) } if _, err := l.Load(); err == nil { - t.Errorf("packages with an irregular file (/dev/null) should not load") + t.Error("packages with an irregular file (/dev/null) should not load") } } @@ -508,7 +508,7 @@ func TestLoadInvalidArchive(t *testing.T) { func TestLoadValues(t *testing.T) { testCases := map[string]struct { data []byte - expctedValues map[string]interface{} + expctedValues map[string]any }{ "It should load values correctly": { data: []byte(` @@ -517,11 +517,11 @@ foo: bar: version: v2 `), - expctedValues: map[string]interface{}{ - "foo": map[string]interface{}{ + expctedValues: map[string]any{ + "foo": map[string]any{ "image": "foo:v1", }, - "bar": map[string]interface{}{ + "bar": map[string]any{ "version": "v2", }, }, @@ -536,11 +536,11 @@ bar: foo: image: foo:v2 `), - expctedValues: map[string]interface{}{ - "foo": map[string]interface{}{ + expctedValues: map[string]any{ + "foo": map[string]any{ "image": "foo:v2", }, - "bar": map[string]interface{}{ + "bar": map[string]any{ "version": "v2", }, }, @@ -560,24 +560,24 @@ foo: } func TestMergeValuesV2(t *testing.T) { - nestedMap := map[string]interface{}{ + nestedMap := map[string]any{ "foo": "bar", "baz": map[string]string{ "cool": "stuff", }, } - anotherNestedMap := map[string]interface{}{ + anotherNestedMap := map[string]any{ "foo": "bar", "baz": map[string]string{ "cool": "things", "awesome": "stuff", }, } - flatMap := map[string]interface{}{ + flatMap := map[string]any{ "foo": "bar", "baz": "stuff", } - anotherFlatMap := map[string]interface{}{ + anotherFlatMap := map[string]any{ "testing": "fun", } @@ -600,7 +600,7 @@ func TestMergeValuesV2(t *testing.T) { } testMap = MergeMaps(anotherFlatMap, anotherNestedMap) - expectedMap := map[string]interface{}{ + expectedMap := map[string]any{ "testing": "fun", "foo": "bar", "baz": map[string]string{ diff --git a/pkg/chart/v2/metadata.go b/pkg/chart/v2/metadata.go index c46007863..5df307321 100644 --- a/pkg/chart/v2/metadata.go +++ b/pkg/chart/v2/metadata.go @@ -112,6 +112,9 @@ func (md *Metadata) Validate() error { return ValidationError("chart.metadata.name is required") } + if md.Name == "." || md.Name == ".." { + return ValidationErrorf("chart.metadata.name %q is not allowed", md.Name) + } if md.Name != filepath.Base(md.Name) { return ValidationErrorf("chart.metadata.name %q is invalid", md.Name) } diff --git a/pkg/chart/v2/metadata_test.go b/pkg/chart/v2/metadata_test.go index 7892f0209..63dd99e52 100644 --- a/pkg/chart/v2/metadata_test.go +++ b/pkg/chart/v2/metadata_test.go @@ -16,6 +16,7 @@ limitations under the License. package v2 import ( + "errors" "testing" ) @@ -40,6 +41,16 @@ func TestValidate(t *testing.T) { &Metadata{APIVersion: "v2", Version: "1.0"}, ValidationError("chart.metadata.name is required"), }, + { + "chart with dot name", + &Metadata{Name: ".", APIVersion: "v2", Version: "1.0"}, + ValidationError("chart.metadata.name \".\" is not allowed"), + }, + { + "chart with dotdot name", + &Metadata{Name: "..", APIVersion: "v2", Version: "1.0"}, + ValidationError("chart.metadata.name \"..\" is not allowed"), + }, { "chart without name", &Metadata{Name: "../../test", APIVersion: "v2", Version: "1.0"}, @@ -181,7 +192,7 @@ func TestValidate(t *testing.T) { for _, tt := range tests { result := tt.md.Validate() - if result != tt.err { + if !errors.Is(result, tt.err) { t.Errorf("expected %q, got %q in test %q", tt.err, result, tt.name) } } diff --git a/pkg/chart/v2/util/chartfile_test.go b/pkg/chart/v2/util/chartfile_test.go index 00c530b8a..068977eef 100644 --- a/pkg/chart/v2/util/chartfile_test.go +++ b/pkg/chart/v2/util/chartfile_test.go @@ -35,11 +35,11 @@ func TestLoadChartfile(t *testing.T) { func verifyChartfile(t *testing.T, f *chart.Metadata, name string) { t.Helper() - if f == nil { //nolint:staticcheck + if f == nil { t.Fatal("Failed verifyChartfile because f is nil") } - if f.APIVersion != chart.APIVersionV1 { //nolint:staticcheck + if f.APIVersion != chart.APIVersionV1 { t.Errorf("Expected API Version %q, got %q", chart.APIVersionV1, f.APIVersion) } @@ -60,15 +60,15 @@ func verifyChartfile(t *testing.T, f *chart.Metadata, name string) { } if f.Maintainers[0].Name != "The Helm Team" { - t.Errorf("Unexpected maintainer name.") + t.Error("Unexpected maintainer name.") } if f.Maintainers[1].Email != "nobody@example.com" { - t.Errorf("Unexpected maintainer email.") + t.Error("Unexpected maintainer email.") } if len(f.Sources) != 1 { - t.Fatalf("Unexpected number of sources") + t.Fatal("Unexpected number of sources") } if f.Sources[0] != "https://example.com/foo/bar" { @@ -88,7 +88,7 @@ func verifyChartfile(t *testing.T, f *chart.Metadata, name string) { } if len(f.Annotations) != 2 { - t.Fatalf("Unexpected annotations") + t.Fatal("Unexpected annotations") } if want, got := "extravalue", f.Annotations["extrakey"]; want != got { @@ -115,7 +115,7 @@ func TestIsChartDir(t *testing.T) { } validChartDir, err = IsChartDir("testdata") if validChartDir || err == nil { - t.Errorf("expected error but did not get any") + t.Error("expected error but did not get any") return } } diff --git a/pkg/chart/v2/util/create.go b/pkg/chart/v2/util/create.go index bf572c707..0d7ae8d5c 100644 --- a/pkg/chart/v2/util/create.go +++ b/pkg/chart/v2/util/create.go @@ -669,7 +669,7 @@ func CreateFrom(chartfile *chart.Metadata, dest, src string) error { return fmt.Errorf("reading values file: %w", err) } - var m map[string]interface{} + var m map[string]any if err := yaml.Unmarshal(transform(string(b), schart.Name()), &m); err != nil { return fmt.Errorf("transforming values file: %w", err) } diff --git a/pkg/chart/v2/util/create_test.go b/pkg/chart/v2/util/create_test.go index 086c4e5c8..967972fc8 100644 --- a/pkg/chart/v2/util/create_test.go +++ b/pkg/chart/v2/util/create_test.go @@ -140,7 +140,7 @@ func TestCreate_Overwrite(t *testing.T) { } if errlog.Len() == 0 { - t.Errorf("Expected warnings about overwriting files.") + t.Error("Expected warnings about overwriting files.") } } diff --git a/pkg/chart/v2/util/dependencies.go b/pkg/chart/v2/util/dependencies.go index c7bb6621e..abd673f9d 100644 --- a/pkg/chart/v2/util/dependencies.go +++ b/pkg/chart/v2/util/dependencies.go @@ -16,6 +16,7 @@ limitations under the License. package util import ( + "errors" "fmt" "log/slog" "strings" @@ -44,6 +45,7 @@ func processDependencyConditions(reqs []*chart.Dependency, cvals common.Values, if len(c) > 0 { // retrieve value vv, err := cvals.PathValue(cpath + c) + var errNoValue common.ErrNoValue if err == nil { // if not bool, warn if bv, ok := vv.(bool); ok { @@ -51,7 +53,7 @@ func processDependencyConditions(reqs []*chart.Dependency, cvals common.Values, break } slog.Warn("returned non-bool value", "path", c, "chart", r.Name) - } else if _, ok := err.(common.ErrNoValue); !ok { + } else if !errors.As(err, &errNoValue) { // this is a real error slog.Warn("the method PathValue returned error", slog.Any("error", err)) } @@ -140,7 +142,7 @@ func copyMetadata(metadata *chart.Metadata) *chart.Metadata { } // processDependencyEnabled removes disabled charts from dependencies -func processDependencyEnabled(c *chart.Chart, v map[string]interface{}, path string) error { +func processDependencyEnabled(c *chart.Chart, v map[string]any, path string) error { if c.Metadata.Dependencies == nil { return nil } @@ -226,7 +228,7 @@ Loop: } // pathToMap creates a nested map given a YAML path in dot notation. -func pathToMap(path string, data map[string]interface{}) map[string]interface{} { +func pathToMap(path string, data map[string]any) map[string]any { if path == "." { return data } @@ -235,13 +237,13 @@ func pathToMap(path string, data map[string]interface{}) map[string]interface{} func parsePath(key string) []string { return strings.Split(key, ".") } -func set(path []string, data map[string]interface{}) map[string]interface{} { +func set(path []string, data map[string]any) map[string]any { if len(path) == 0 { return nil } cur := data for i := len(path) - 1; i >= 0; i-- { - cur = map[string]interface{}{path[i]: cur} + cur = map[string]any{path[i]: cur} } return cur } @@ -262,13 +264,13 @@ func processImportValues(c *chart.Chart, merge bool) error { if err != nil { return err } - b := make(map[string]interface{}) + b := make(map[string]any) // import values from each dependency if specified in import-values for _, r := range c.Metadata.Dependencies { - var outiv []interface{} + var outiv []any for _, riv := range r.ImportValues { switch iv := riv.(type) { - case map[string]interface{}: + case map[string]any: child := fmt.Sprintf("%v", iv["child"]) parent := fmt.Sprintf("%v", iv["parent"]) @@ -335,27 +337,27 @@ func processImportValues(c *chart.Chart, merge bool) error { return nil } -func deepCopyMap(vals map[string]interface{}) map[string]interface{} { +func deepCopyMap(vals map[string]any) map[string]any { valsCopy, err := copystructure.Copy(vals) if err != nil { return vals } - return valsCopy.(map[string]interface{}) + return valsCopy.(map[string]any) } -func trimNilValues(vals map[string]interface{}) map[string]interface{} { +func trimNilValues(vals map[string]any) map[string]any { valsCopy, err := copystructure.Copy(vals) if err != nil { return vals } - valsCopyMap := valsCopy.(map[string]interface{}) + valsCopyMap := valsCopy.(map[string]any) for key, val := range valsCopyMap { if val == nil { // Iterate over the values and remove nil keys delete(valsCopyMap, key) } else if istable(val) { // Recursively call into ourselves to remove keys from inner tables - valsCopyMap[key] = trimNilValues(val.(map[string]interface{})) + valsCopyMap[key] = trimNilValues(val.(map[string]any)) } } @@ -363,8 +365,8 @@ func trimNilValues(vals map[string]interface{}) map[string]interface{} { } // istable is a special-purpose function to see if the present thing matches the definition of a YAML table. -func istable(v interface{}) bool { - _, ok := v.(map[string]interface{}) +func istable(v any) bool { + _, ok := v.(map[string]any) return ok } diff --git a/pkg/chart/v2/util/dependencies_test.go b/pkg/chart/v2/util/dependencies_test.go index c817b0b89..0e4df8528 100644 --- a/pkg/chart/v2/util/dependencies_test.go +++ b/pkg/chart/v2/util/dependencies_test.go @@ -63,7 +63,7 @@ func TestLoadDependency(t *testing.T) { } func TestDependencyEnabled(t *testing.T) { - type M = map[string]interface{} + type M = map[string]any tests := []struct { name string v M @@ -385,7 +385,7 @@ func TestGetAliasDependency(t *testing.T) { req := c.Metadata.Dependencies if len(req) == 0 { - t.Fatalf("there are no dependencies to test") + t.Fatal("there are no dependencies to test") } // Success case @@ -403,7 +403,7 @@ func TestGetAliasDependency(t *testing.T) { if req[0].Version != "" { if !IsCompatibleRange(req[0].Version, aliasChart.Metadata.Version) { - t.Fatalf("dependency chart version is not in the compatible range") + t.Fatal("dependency chart version is not in the compatible range") } } @@ -415,7 +415,7 @@ func TestGetAliasDependency(t *testing.T) { req[0].Version = "something else which is not in the compatible range" if IsCompatibleRange(req[0].Version, aliasChart.Metadata.Version) { - t.Fatalf("dependency chart version which is not in the compatible range should cause a failure other than a success ") + t.Fatal("dependency chart version outside the compatible range should fail, but it succeeded") } } diff --git a/pkg/chart/v2/util/doc.go b/pkg/chart/v2/util/doc.go index 141062074..ed741a83d 100644 --- a/pkg/chart/v2/util/doc.go +++ b/pkg/chart/v2/util/doc.go @@ -42,4 +42,4 @@ into a Chart. When creating charts in memory, use the 'helm.sh/helm/pkg/chart' package directly. */ -package util // import chartutil "helm.sh/helm/v4/pkg/chart/v2/util" +package util // import "helm.sh/helm/v4/pkg/chart/v2/util" diff --git a/pkg/chart/v2/util/expand.go b/pkg/chart/v2/util/expand.go index 077dfbf38..6022d5869 100644 --- a/pkg/chart/v2/util/expand.go +++ b/pkg/chart/v2/util/expand.go @@ -52,6 +52,17 @@ func Expand(dir string, r io.Reader) error { return errors.New("chart name not specified") } + // Reject chart names that are POSIX path dot-segments or dot-dot segments or contain path separators. + // A dot-segment name (e.g. ".") causes SecureJoin to resolve to the root + // directory and extraction then to write files directly into that extraction root + // instead of a per-chart subdirectory. + if chartName == "." || chartName == ".." { + return fmt.Errorf("chart name %q is not allowed", chartName) + } + if chartName != filepath.Base(chartName) { + return fmt.Errorf("chart name %q must not contain path separators", chartName) + } + // Find the base directory // The directory needs to be cleaned prior to passing to SecureJoin or the location may end up // being wrong or returning an error. This was introduced in v0.4.0. @@ -61,6 +72,12 @@ func Expand(dir string, r io.Reader) error { return err } + // Defense-in-depth: the chart directory must be a subdirectory of dir, + // never dir itself. + if chartdir == dir { + return fmt.Errorf("chart name %q resolves to the extraction root", chartName) + } + // Copy all files verbatim. We don't parse these files because parsing can remove // comments. for _, file := range files { diff --git a/pkg/chart/v2/util/expand_test.go b/pkg/chart/v2/util/expand_test.go index 280995f7e..e9e298b81 100644 --- a/pkg/chart/v2/util/expand_test.go +++ b/pkg/chart/v2/util/expand_test.go @@ -17,11 +17,73 @@ limitations under the License. package util import ( + "archive/tar" + "bytes" + "compress/gzip" + "io/fs" "os" "path/filepath" "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) +// makeTestChartArchive builds a gzipped tar archive from the given sourceDir directory, file entries are prefixed with the given chartName +func makeTestChartArchive(t *testing.T, chartName, sourceDir string) *bytes.Buffer { + t.Helper() + + var result bytes.Buffer + gw := gzip.NewWriter(&result) + tw := tar.NewWriter(gw) + + dir := os.DirFS(sourceDir) + + writeFile := func(relPath string) { + t.Helper() + f, err := dir.Open(relPath) + require.NoError(t, err) + + fStat, err := f.Stat() + require.NoError(t, err) + + err = tw.WriteHeader(&tar.Header{ + Name: filepath.Join(chartName, relPath), + Mode: int64(fStat.Mode()), + Size: fStat.Size(), + }) + require.NoError(t, err) + + data, err := fs.ReadFile(dir, relPath) + require.NoError(t, err) + tw.Write(data) + } + + err := fs.WalkDir(dir, ".", func(path string, d os.DirEntry, walkErr error) error { + if walkErr != nil { + return walkErr + } + + if d.IsDir() { + return nil + } + + writeFile(path) + + return nil + }) + if err != nil { + t.Fatal(err) + } + + err = tw.Close() + require.NoError(t, err) + err = gw.Close() + require.NoError(t, err) + + return &result +} + func TestExpand(t *testing.T) { dest := t.TempDir() @@ -75,6 +137,28 @@ func TestExpand(t *testing.T) { } } +func TestExpandError(t *testing.T) { + tests := map[string]struct { + chartName string + chartDir string + wantErr string + }{ + "dot name": {"dotname", "testdata/dotname", "not allowed"}, + "dotdot name": {"dotdotname", "testdata/dotdotname", "not allowed"}, + "slash in name": {"slashinname", "testdata/slashinname", "must not contain path separators"}, + } + + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + archive := makeTestChartArchive(t, tt.chartName, tt.chartDir) + + dest := t.TempDir() + err := Expand(dest, archive) + assert.ErrorContains(t, err, tt.wantErr) + }) + } +} + func TestExpandFile(t *testing.T) { dest := t.TempDir() diff --git a/pkg/chart/v2/util/save_test.go b/pkg/chart/v2/util/save_test.go index 6d4e2c8cd..2f2b73efd 100644 --- a/pkg/chart/v2/util/save_test.go +++ b/pkg/chart/v2/util/save_test.go @@ -21,8 +21,8 @@ import ( "bytes" "compress/gzip" "crypto/sha256" + "encoding/hex" "errors" - "fmt" "io" "os" "path" @@ -90,7 +90,7 @@ func TestSave(t *testing.T) { t.Fatalf("Schema data did not match.\nExpected:\n%s\nActual:\n%s", formattedExpected, formattedActual) } if _, err := Save(&chartWithInvalidJSON, dest); err == nil { - t.Fatalf("Invalid JSON was not caught while saving chart") + t.Fatal("Invalid JSON was not caught while saving chart") } c.Metadata.APIVersion = chart.APIVersionV2 @@ -157,7 +157,7 @@ func TestSavePreservesTimestamps(t *testing.T) { Version: "1.2.3", }, ModTime: initialCreateTime, - Values: map[string]interface{}{ + Values: map[string]any{ "imageName": "testimage", "imageId": 42, }, @@ -357,5 +357,5 @@ func sha256Sum(filePath string) (string, error) { return "", err } - return fmt.Sprintf("%x", h.Sum(nil)), nil + return hex.EncodeToString(h.Sum(nil)), nil } diff --git a/pkg/chart/v2/util/testdata/dotdotname/Chart.yaml b/pkg/chart/v2/util/testdata/dotdotname/Chart.yaml new file mode 100644 index 000000000..9b081f27b --- /dev/null +++ b/pkg/chart/v2/util/testdata/dotdotname/Chart.yaml @@ -0,0 +1,4 @@ +apiVersion: v3 +name: .. +description: A Helm chart for Kubernetes +version: 0.1.0 \ No newline at end of file diff --git a/pkg/chart/v2/util/testdata/dotname/Chart.yaml b/pkg/chart/v2/util/testdata/dotname/Chart.yaml new file mode 100644 index 000000000..597c16290 --- /dev/null +++ b/pkg/chart/v2/util/testdata/dotname/Chart.yaml @@ -0,0 +1,4 @@ +apiVersion: v3 +name: . +description: A Helm chart for Kubernetes +version: 0.1.0 \ No newline at end of file diff --git a/pkg/chart/v2/util/testdata/slashinname/Chart.yaml b/pkg/chart/v2/util/testdata/slashinname/Chart.yaml new file mode 100644 index 000000000..0c522a4b6 --- /dev/null +++ b/pkg/chart/v2/util/testdata/slashinname/Chart.yaml @@ -0,0 +1,4 @@ +apiVersion: v3 +name: a/../b +description: A Helm chart for Kubernetes +version: 0.1.0 \ No newline at end of file diff --git a/pkg/cli/environment.go b/pkg/cli/environment.go index 60cce51a9..45d773eb4 100644 --- a/pkg/cli/environment.go +++ b/pkg/cli/environment.go @@ -24,7 +24,6 @@ These dependencies are expressed as interfaces so that alternate implementations package cli import ( - "fmt" "net/http" "os" "strconv" @@ -36,7 +35,7 @@ import ( "helm.sh/helm/v4/internal/version" "helm.sh/helm/v4/pkg/helmpath" - "helm.sh/helm/v4/pkg/kube" + "helm.sh/helm/v4/pkg/kubeenv" ) // defaultMaxHistory sets the maximum number of releases to 0: unlimited @@ -135,7 +134,7 @@ func New() *EnvSettings { config.Burst = env.BurstLimit config.QPS = env.QPS config.Wrap(func(rt http.RoundTripper) http.RoundTripper { - return &kube.RetryingRoundTripper{Wrapped: rt} + return &kubeenv.RetryingRoundTripper{Wrapped: rt} }) config.UserAgent = version.GetUserAgent() return config @@ -246,7 +245,7 @@ func (s *EnvSettings) EnvVars() map[string]string { "HELM_CACHE_HOME": helmpath.CachePath(""), "HELM_CONFIG_HOME": helmpath.ConfigPath(""), "HELM_DATA_HOME": helmpath.DataPath(""), - "HELM_DEBUG": fmt.Sprint(s.Debug), + "HELM_DEBUG": strconv.FormatBool(s.Debug), "HELM_PLUGINS": s.PluginsDirectory, "HELM_REGISTRY_CONFIG": s.RegistryConfig, "HELM_REPOSITORY_CACHE": s.RepositoryCache, @@ -275,8 +274,10 @@ func (s *EnvSettings) EnvVars() map[string]string { // Namespace gets the namespace from the configuration func (s *EnvSettings) Namespace() string { - if ns, _, err := s.config.ToRawKubeConfigLoader().Namespace(); err == nil { - return ns + if s.config != nil { + if ns, _, err := s.config.ToRawKubeConfigLoader().Namespace(); err == nil { + return ns + } } if s.namespace != "" { return s.namespace diff --git a/pkg/cli/output/output.go b/pkg/cli/output/output.go index 28d503741..a9bd846fe 100644 --- a/pkg/cli/output/output.go +++ b/pkg/cli/output/output.go @@ -18,6 +18,7 @@ package output import ( "encoding/json" + "errors" "fmt" "io" @@ -50,7 +51,7 @@ func FormatsWithDesc() map[string]string { } // ErrInvalidFormatType is returned when an unsupported format type is used -var ErrInvalidFormatType = fmt.Errorf("invalid format type") +var ErrInvalidFormatType = errors.New("invalid format type") // String returns the string representation of the Format func (o Format) String() string { @@ -102,7 +103,7 @@ type Writer interface { // EncodeJSON is a helper function to decorate any error message with a bit more // context and avoid writing the same code over and over for printers. -func EncodeJSON(out io.Writer, obj interface{}) error { +func EncodeJSON(out io.Writer, obj any) error { enc := json.NewEncoder(out) err := enc.Encode(obj) if err != nil { @@ -113,7 +114,7 @@ func EncodeJSON(out io.Writer, obj interface{}) error { // EncodeYAML is a helper function to decorate any error message with a bit more // context and avoid writing the same code over and over for printers -func EncodeYAML(out io.Writer, obj interface{}) error { +func EncodeYAML(out io.Writer, obj any) error { raw, err := yaml.Marshal(obj) if err != nil { return fmt.Errorf("unable to write YAML output: %w", err) diff --git a/pkg/cli/values/options.go b/pkg/cli/values/options.go index cd65fa885..336dfb72b 100644 --- a/pkg/cli/values/options.go +++ b/pkg/cli/values/options.go @@ -42,8 +42,8 @@ type Options struct { // MergeValues merges values from files specified via -f/--values and directly // via --set-json, --set, --set-string, or --set-file, marshaling them to YAML -func (opts *Options) MergeValues(p getter.Providers) (map[string]interface{}, error) { - base := map[string]interface{}{} +func (opts *Options) MergeValues(p getter.Providers) (map[string]any, error) { + base := map[string]any{} // User specified a values files via -f/--values for _, filePath := range opts.ValueFiles { @@ -64,7 +64,7 @@ func (opts *Options) MergeValues(p getter.Providers) (map[string]interface{}, er trimmedValue := strings.TrimSpace(value) if len(trimmedValue) > 0 && trimmedValue[0] == '{' { // If value is JSON object format, parse it as map - var jsonMap map[string]interface{} + var jsonMap map[string]any if err := json.Unmarshal([]byte(trimmedValue), &jsonMap); err != nil { return nil, fmt.Errorf("failed parsing --set-json data JSON: %s", value) } @@ -93,7 +93,7 @@ func (opts *Options) MergeValues(p getter.Providers) (map[string]interface{}, er // User specified a value via --set-file for _, value := range opts.FileValues { - reader := func(rs []rune) (interface{}, error) { + reader := func(rs []rune) (any, error) { bytes, err := readFile(string(rs), p) if err != nil { return nil, err diff --git a/pkg/cli/values/options_test.go b/pkg/cli/values/options_test.go index fe1afc5d2..571622d80 100644 --- a/pkg/cli/values/options_test.go +++ b/pkg/cli/values/options_test.go @@ -19,7 +19,6 @@ package values import ( "bytes" "errors" - "fmt" "os" "path/filepath" "reflect" @@ -265,7 +264,7 @@ func TestReadFileErrorMessages(t *testing.T) { { name: "getter error with message", filePath: "http://example.com/file", - providers: getter.Providers{mockProvider([]string{"http"}, nil, fmt.Errorf("connection refused"))}, + providers: getter.Providers{mockProvider([]string{"http"}, nil, errors.New("connection refused"))}, wantErr: "connection refused", }, } @@ -290,7 +289,7 @@ func TestReadFileOriginal(t *testing.T) { filePath := "%a.txt" _, err := readFile(filePath, p) if err == nil { - t.Errorf("Expected error when has special strings") + t.Error("Expected error when has special strings") } } @@ -298,7 +297,7 @@ func TestMergeValuesCLI(t *testing.T) { tests := []struct { name string opts Options - expected map[string]interface{} + expected map[string]any wantErr bool }{ { @@ -306,8 +305,8 @@ func TestMergeValuesCLI(t *testing.T) { opts: Options{ JSONValues: []string{`{"foo": {"bar": "baz"}}`}, }, - expected: map[string]interface{}{ - "foo": map[string]interface{}{ + expected: map[string]any{ + "foo": map[string]any{ "bar": "baz", }, }, @@ -317,9 +316,9 @@ func TestMergeValuesCLI(t *testing.T) { opts: Options{ JSONValues: []string{"foo.bar=[1,2,3]"}, }, - expected: map[string]interface{}{ - "foo": map[string]interface{}{ - "bar": []interface{}{1.0, 2.0, 3.0}, + expected: map[string]any{ + "foo": map[string]any{ + "bar": []any{1.0, 2.0, 3.0}, }, }, }, @@ -328,7 +327,7 @@ func TestMergeValuesCLI(t *testing.T) { opts: Options{ Values: []string{"foo=bar"}, }, - expected: map[string]interface{}{ + expected: map[string]any{ "foo": "bar", }, }, @@ -337,7 +336,7 @@ func TestMergeValuesCLI(t *testing.T) { opts: Options{ StringValues: []string{"foo=123"}, }, - expected: map[string]interface{}{ + expected: map[string]any{ "foo": "123", }, }, @@ -346,7 +345,7 @@ func TestMergeValuesCLI(t *testing.T) { opts: Options{ LiteralValues: []string{"foo=true"}, }, - expected: map[string]interface{}{ + expected: map[string]any{ "foo": "true", }, }, @@ -358,7 +357,7 @@ func TestMergeValuesCLI(t *testing.T) { JSONValues: []string{`{"c": "foo1"}`}, LiteralValues: []string{"d=bar1"}, }, - expected: map[string]interface{}{ + expected: map[string]any{ "a": "foo", "b": "bar", "c": "foo1", diff --git a/pkg/cmd/completion.go b/pkg/cmd/completion.go index 6f6dbd25d..c02462d9d 100644 --- a/pkg/cmd/completion.go +++ b/pkg/cmd/completion.go @@ -193,7 +193,7 @@ compdef _helm %[1]s } // Cobra doesn't source zsh completion file, explicitly doing it here - fmt.Fprintf(out, "compdef _helm helm") + fmt.Fprint(out, "compdef _helm helm") return err } diff --git a/pkg/cmd/create.go b/pkg/cmd/create.go index 435c8ca82..b38ce6a12 100644 --- a/pkg/cmd/create.go +++ b/pkg/cmd/create.go @@ -23,6 +23,9 @@ import ( "github.com/spf13/cobra" + chartv3 "helm.sh/helm/v4/internal/chart/v3" + chartutilv3 "helm.sh/helm/v4/internal/chart/v3/util" + "helm.sh/helm/v4/internal/gates" chart "helm.sh/helm/v4/pkg/chart/v2" chartutil "helm.sh/helm/v4/pkg/chart/v2/util" "helm.sh/helm/v4/pkg/cmd/require" @@ -51,9 +54,10 @@ will be overwritten, but other files will be left alone. ` type createOptions struct { - starter string // --starter - name string - starterDir string + starter string // --starter + name string + starterDir string + chartAPIVersion string // --chart-api-version } func newCreateCmd(out io.Writer) *cobra.Command { @@ -81,12 +85,32 @@ func newCreateCmd(out io.Writer) *cobra.Command { } cmd.Flags().StringVarP(&o.starter, "starter", "p", "", "the name or absolute path to Helm starter scaffold") + cmd.Flags().StringVar(&o.chartAPIVersion, "chart-api-version", chart.APIVersionV2, "chart API version to use (v2 or v3)") + + if !gates.ChartV3.IsEnabled() { + cmd.Flags().MarkHidden("chart-api-version") + } + return cmd } func (o *createOptions) run(out io.Writer) error { fmt.Fprintf(out, "Creating %s\n", o.name) + switch o.chartAPIVersion { + case chart.APIVersionV2, "": + return o.createV2Chart(out) + case chartv3.APIVersionV3: + if !gates.ChartV3.IsEnabled() { + return gates.ChartV3.Error() + } + return o.createV3Chart(out) + default: + return fmt.Errorf("unsupported chart API version: %s (supported: v2, v3)", o.chartAPIVersion) + } +} + +func (o *createOptions) createV2Chart(out io.Writer) error { chartname := filepath.Base(o.name) cfile := &chart.Metadata{ Name: chartname, @@ -111,3 +135,29 @@ func (o *createOptions) run(out io.Writer) error { _, err := chartutil.Create(chartname, filepath.Dir(o.name)) return err } + +func (o *createOptions) createV3Chart(out io.Writer) error { + chartname := filepath.Base(o.name) + cfile := &chartv3.Metadata{ + Name: chartname, + Description: "A Helm chart for Kubernetes", + Type: "application", + Version: "0.1.0", + AppVersion: "0.1.0", + APIVersion: chartv3.APIVersionV3, + } + + if o.starter != "" { + // Create from the starter + lstarter := filepath.Join(o.starterDir, o.starter) + // If path is absolute, we don't want to prefix it with helm starters folder + if filepath.IsAbs(o.starter) { + lstarter = o.starter + } + return chartutilv3.CreateFrom(cfile, filepath.Dir(o.name), lstarter) + } + + chartutilv3.Stderr = out + _, err := chartutilv3.Create(chartname, filepath.Dir(o.name)) + return err +} diff --git a/pkg/cmd/create_test.go b/pkg/cmd/create_test.go index 90ed90eff..1a1289775 100644 --- a/pkg/cmd/create_test.go +++ b/pkg/cmd/create_test.go @@ -17,14 +17,18 @@ limitations under the License. package cmd import ( - "fmt" "os" "path/filepath" + "slices" "testing" + chartv3 "helm.sh/helm/v4/internal/chart/v3" + chartutilv3 "helm.sh/helm/v4/internal/chart/v3/util" + "helm.sh/helm/v4/internal/gates" "helm.sh/helm/v4/internal/test/ensure" - chart "helm.sh/helm/v4/pkg/chart/v2" - "helm.sh/helm/v4/pkg/chart/v2/loader" + chart "helm.sh/helm/v4/pkg/chart" + chartloader "helm.sh/helm/v4/pkg/chart/loader" + chartv2 "helm.sh/helm/v4/pkg/chart/v2" chartutil "helm.sh/helm/v4/pkg/chart/v2/util" "helm.sh/helm/v4/pkg/helmpath" ) @@ -43,150 +47,254 @@ func TestCreateCmd(t *testing.T) { if fi, err := os.Stat(cname); err != nil { t.Fatalf("no chart directory: %s", err) } else if !fi.IsDir() { - t.Fatalf("chart is not directory") + t.Fatal("chart is not directory") } - c, err := loader.LoadDir(cname) + c, err := chartloader.LoadDir(cname) if err != nil { t.Fatal(err) } - if c.Name() != cname { - t.Errorf("Expected %q name, got %q", cname, c.Name()) + acc, err := chart.NewAccessor(c) + if err != nil { + t.Fatal(err) + } + + if acc.Name() != cname { + t.Errorf("Expected %q name, got %q", cname, acc.Name()) + } + metadata := acc.MetadataAsMap() + apiVersion, ok := metadata["APIVersion"].(string) + if !ok { + t.Fatal("APIVersion not found in metadata") } - if c.Metadata.APIVersion != chart.APIVersionV2 { - t.Errorf("Wrong API version: %q", c.Metadata.APIVersion) + if apiVersion != chartv2.APIVersionV2 { + t.Errorf("Wrong API version: %q", apiVersion) } } func TestCreateStarterCmd(t *testing.T) { + tests := []struct { + name string + chartAPIVersion string + useAbsolutePath bool + expectedVersion string + }{ + { + name: "v2 with relative starter path", + chartAPIVersion: "", + useAbsolutePath: false, + expectedVersion: chartv2.APIVersionV2, + }, + { + name: "v2 with absolute starter path", + chartAPIVersion: "", + useAbsolutePath: true, + expectedVersion: chartv2.APIVersionV2, + }, + { + name: "v3 with relative starter path", + chartAPIVersion: "v3", + useAbsolutePath: false, + expectedVersion: chartv3.APIVersionV3, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Chdir(t.TempDir()) + ensure.HelmHome(t) + defer resetEnv()() + + // Enable feature gate for v3 charts + if tt.chartAPIVersion == "v3" { + t.Setenv(string(gates.ChartV3), "1") + } + + cname := "testchart" + + // Create a starter using the appropriate chartutil + starterchart := helmpath.DataPath("starters") + os.MkdirAll(starterchart, 0o755) + var err error + var dest string + if tt.chartAPIVersion == "v3" { + dest, err = chartutilv3.Create("starterchart", starterchart) + } else { + dest, err = chartutil.Create("starterchart", starterchart) + } + if err != nil { + t.Fatalf("Could not create chart: %s", err) + } + t.Logf("Created %s", dest) + + tplpath := filepath.Join(starterchart, "starterchart", "templates", "foo.tpl") + if err := os.WriteFile(tplpath, []byte("test"), 0o644); err != nil { + t.Fatalf("Could not write template: %s", err) + } + + // Build the command + starterArg := "starterchart" + if tt.useAbsolutePath { + starterArg = filepath.Join(starterchart, "starterchart") + } + cmd := "create --starter=" + starterArg + if tt.chartAPIVersion == "v3" { + cmd += " --chart-api-version=" + chartv3.APIVersionV3 + } else { + cmd += " --chart-api-version=" + chartv2.APIVersionV2 + } + cmd += " " + cname + + // Run create + if _, _, err := executeActionCommand(cmd); err != nil { + t.Fatalf("Failed to run create: %s", err) + } + + // Test that the chart is there + if fi, err := os.Stat(cname); err != nil { + t.Fatalf("no chart directory: %s", err) + } else if !fi.IsDir() { + t.Fatal("chart is not directory") + } + + // Load and verify the chart + c, err := chartloader.LoadDir(cname) + if err != nil { + t.Fatal(err) + } + + acc, err := chart.NewAccessor(c) + if err != nil { + t.Fatal(err) + } + + chartName := acc.Name() + metadata := acc.MetadataAsMap() + apiVersion, ok := metadata["APIVersion"].(string) + if !ok { + t.Fatal("APIVersion not found in metadata") + } + var templates []string + for _, tpl := range acc.Templates() { + templates = append(templates, tpl.Name) + } + + if chartName != cname { + t.Errorf("Expected %q name, got %q", cname, chartName) + } + if apiVersion != tt.expectedVersion { + t.Errorf("Wrong API version: expected %q, got %q", tt.expectedVersion, apiVersion) + } + + // Verify custom template exists + found := slices.Contains(templates, "templates/foo.tpl") + if !found { + t.Error("Did not find foo.tpl") + } + }) + } +} + +func TestCreateFileCompletion(t *testing.T) { + checkFileCompletion(t, "create", true) + checkFileCompletion(t, "create myname", false) +} + +func TestCreateCmdChartAPIVersionV2(t *testing.T) { t.Chdir(t.TempDir()) ensure.HelmHome(t) cname := "testchart" - defer resetEnv()() - // Create a starter. - starterchart := helmpath.DataPath("starters") - os.MkdirAll(starterchart, 0o755) - if dest, err := chartutil.Create("starterchart", starterchart); err != nil { - t.Fatalf("Could not create chart: %s", err) - } else { - t.Logf("Created %s", dest) - } - tplpath := filepath.Join(starterchart, "starterchart", "templates", "foo.tpl") - if err := os.WriteFile(tplpath, []byte("test"), 0o644); err != nil { - t.Fatalf("Could not write template: %s", err) - } - // Run a create - if _, _, err := executeActionCommand(fmt.Sprintf("create --starter=starterchart %s", cname)); err != nil { - t.Errorf("Failed to run create: %s", err) - return + // Run a create with explicit v2 + if _, _, err := executeActionCommand("create --chart-api-version=v2 " + cname); err != nil { + t.Fatalf("Failed to run create: %s", err) } // Test that the chart is there if fi, err := os.Stat(cname); err != nil { t.Fatalf("no chart directory: %s", err) } else if !fi.IsDir() { - t.Fatalf("chart is not directory") + t.Fatal("chart is not directory") } - c, err := loader.LoadDir(cname) + c, err := chartloader.LoadDir(cname) if err != nil { t.Fatal(err) } - if c.Name() != cname { - t.Errorf("Expected %q name, got %q", cname, c.Name()) - } - if c.Metadata.APIVersion != chart.APIVersionV2 { - t.Errorf("Wrong API version: %q", c.Metadata.APIVersion) + acc, err := chart.NewAccessor(c) + if err != nil { + t.Fatal(err) } - expectedNumberOfTemplates := 10 - if l := len(c.Templates); l != expectedNumberOfTemplates { - t.Errorf("Expected %d templates, got %d", expectedNumberOfTemplates, l) + if acc.Name() != cname { + t.Errorf("Expected %q name, got %q", cname, acc.Name()) } - - found := false - for _, tpl := range c.Templates { - if tpl.Name == "templates/foo.tpl" { - found = true - if data := string(tpl.Data); data != "test" { - t.Errorf("Expected template 'test', got %q", data) - } - } + metadata := acc.MetadataAsMap() + apiVersion, ok := metadata["APIVersion"].(string) + if !ok { + t.Fatal("APIVersion not found in metadata") } - if !found { - t.Error("Did not find foo.tpl") + if apiVersion != chartv2.APIVersionV2 { + t.Errorf("Wrong API version: expected %q, got %q", chartv2.APIVersionV2, apiVersion) } } -func TestCreateStarterAbsoluteCmd(t *testing.T) { +func TestCreateCmdChartAPIVersionV3(t *testing.T) { t.Chdir(t.TempDir()) - defer resetEnv()() ensure.HelmHome(t) + t.Setenv(string(gates.ChartV3), "1") cname := "testchart" - // Create a starter. - starterchart := helmpath.DataPath("starters") - os.MkdirAll(starterchart, 0o755) - if dest, err := chartutil.Create("starterchart", starterchart); err != nil { - t.Fatalf("Could not create chart: %s", err) - } else { - t.Logf("Created %s", dest) - } - tplpath := filepath.Join(starterchart, "starterchart", "templates", "foo.tpl") - if err := os.WriteFile(tplpath, []byte("test"), 0o644); err != nil { - t.Fatalf("Could not write template: %s", err) - } - - starterChartPath := filepath.Join(starterchart, "starterchart") - - // Run a create - if _, _, err := executeActionCommand(fmt.Sprintf("create --starter=%s %s", starterChartPath, cname)); err != nil { - t.Errorf("Failed to run create: %s", err) - return + // Run a create with v3 + if _, _, err := executeActionCommand("create --chart-api-version=v3 " + cname); err != nil { + t.Fatalf("Failed to run create: %s", err) } // Test that the chart is there if fi, err := os.Stat(cname); err != nil { t.Fatalf("no chart directory: %s", err) } else if !fi.IsDir() { - t.Fatalf("chart is not directory") + t.Fatal("chart is not directory") } - c, err := loader.LoadDir(cname) + c, err := chartloader.LoadDir(cname) if err != nil { t.Fatal(err) } - if c.Name() != cname { - t.Errorf("Expected %q name, got %q", cname, c.Name()) - } - if c.Metadata.APIVersion != chart.APIVersionV2 { - t.Errorf("Wrong API version: %q", c.Metadata.APIVersion) + acc, err := chart.NewAccessor(c) + if err != nil { + t.Fatal(err) } - expectedNumberOfTemplates := 10 - if l := len(c.Templates); l != expectedNumberOfTemplates { - t.Errorf("Expected %d templates, got %d", expectedNumberOfTemplates, l) + if acc.Name() != cname { + t.Errorf("Expected %q name, got %q", cname, acc.Name()) } - - found := false - for _, tpl := range c.Templates { - if tpl.Name == "templates/foo.tpl" { - found = true - if data := string(tpl.Data); data != "test" { - t.Errorf("Expected template 'test', got %q", data) - } - } + metadata := acc.MetadataAsMap() + apiVersion, ok := metadata["APIVersion"].(string) + if !ok { + t.Fatal("APIVersion not found in metadata") } - if !found { - t.Error("Did not find foo.tpl") + if apiVersion != chartv3.APIVersionV3 { + t.Errorf("Wrong API version: expected %q, got %q", chartv3.APIVersionV3, apiVersion) } } -func TestCreateFileCompletion(t *testing.T) { - checkFileCompletion(t, "create", true) - checkFileCompletion(t, "create myname", false) +func TestCreateCmdInvalidChartAPIVersion(t *testing.T) { + t.Chdir(t.TempDir()) + ensure.HelmHome(t) + cname := "testchart" + + // Run a create with invalid version + _, _, err := executeActionCommand("create --chart-api-version=v1 " + cname) + if err == nil { + t.Fatal("Expected error for invalid API version, got nil") + } + + expectedErr := "unsupported chart API version: v1 (supported: v2, v3)" + if err.Error() != expectedErr { + t.Errorf("Expected error %q, got %q", expectedErr, err.Error()) + } } diff --git a/pkg/cmd/dependency_build.go b/pkg/cmd/dependency_build.go index 7e5c731b7..b8ac16e60 100644 --- a/pkg/cmd/dependency_build.go +++ b/pkg/cmd/dependency_build.go @@ -16,6 +16,7 @@ limitations under the License. package cmd import ( + "errors" "fmt" "io" "os" @@ -76,7 +77,8 @@ func newDependencyBuildCmd(out io.Writer) *cobra.Command { man.Verify = downloader.VerifyIfPossible } err = man.Build() - if e, ok := err.(downloader.ErrRepoNotFound); ok { + var e downloader.ErrRepoNotFound + if errors.As(err, &e) { return fmt.Errorf("%s. Please add the missing repos via 'helm repo add'", e.Error()) } return err diff --git a/pkg/cmd/dependency_update_test.go b/pkg/cmd/dependency_update_test.go index 3eaa51df1..f9ff68097 100644 --- a/pkg/cmd/dependency_update_test.go +++ b/pkg/cmd/dependency_update_test.go @@ -208,7 +208,7 @@ func TestDependencyUpdateCmd_DoNotDeleteOldChartsOnError(t *testing.T) { // Make sure tmpcharts-x is deleted tmpPath := filepath.Join(dir(chartname), fmt.Sprintf("tmpcharts-%d", os.Getpid())) if _, err := os.Stat(tmpPath); !errors.Is(err, fs.ErrNotExist) { - t.Fatalf("tmpcharts dir still exists") + t.Fatal("tmpcharts dir still exists") } } diff --git a/pkg/cmd/flags.go b/pkg/cmd/flags.go index 251bfa032..5a220d1ce 100644 --- a/pkg/cmd/flags.go +++ b/pkg/cmd/flags.go @@ -17,6 +17,7 @@ limitations under the License. package cmd import ( + "errors" "flag" "fmt" "log" @@ -59,9 +60,8 @@ func AddWaitFlag(cmd *cobra.Command, wait *kube.WaitStrategy) { cmd.Flags().Var( newWaitValue(kube.HookOnlyStrategy, wait), "wait", - "if specified, wait until resources are ready (up to --timeout). Values: 'watcher', 'hookOnly', and 'legacy'.", + "wait until resources are ready (up to --timeout). Use '--wait' alone for 'watcher' strategy, or specify one of: 'watcher', 'hookOnly', 'legacy'. Default when flag is omitted: 'hookOnly'.", ) - // Sets the strategy to use the watcher strategy if `--wait` is used without an argument cmd.Flags().Lookup("wait").NoOptDefVal = string(kube.StatusWatcherStrategy) } @@ -120,7 +120,7 @@ func addChartPathOptionsFlags(f *pflag.FlagSet, c *action.ChartPathOptions) { // value to the given format pointer func bindOutputFlag(cmd *cobra.Command, varRef *output.Format) { cmd.Flags().VarP(newOutputValue(output.Table, varRef), outputFlag, "o", - fmt.Sprintf("prints the output in the specified format. Allowed values: %s", strings.Join(output.Formats(), ", "))) + "prints the output in the specified format. Allowed values: "+strings.Join(output.Formats(), ", ")) err := cmd.RegisterFlagCompletionFunc(outputFlag, func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { var formatNames []string @@ -196,7 +196,7 @@ func (p *postRendererString) Set(val string) error { return nil } if p.options.pluginName != "" { - return fmt.Errorf("cannot specify --post-renderer flag more than once") + return errors.New("cannot specify --post-renderer flag more than once") } p.options.pluginName = val pr, err := postrenderer.NewPostRendererPlugin(p.options.settings, p.options.pluginName, p.options.args...) diff --git a/pkg/cmd/get_all.go b/pkg/cmd/get_all.go index 32744796c..bce89d7d3 100644 --- a/pkg/cmd/get_all.go +++ b/pkg/cmd/get_all.go @@ -53,7 +53,7 @@ func newGetAllCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { return err } if template != "" { - data := map[string]interface{}{ + data := map[string]any{ "Release": res, } return tpl(template, data, out) diff --git a/pkg/cmd/get_values.go b/pkg/cmd/get_values.go index 02b195551..c6a89b00c 100644 --- a/pkg/cmd/get_values.go +++ b/pkg/cmd/get_values.go @@ -33,7 +33,7 @@ This command downloads a values file for a given release. ` type valuesWriter struct { - vals map[string]interface{} + vals map[string]any allValues bool } diff --git a/pkg/cmd/history.go b/pkg/cmd/history.go index b294a9da7..3349b7bc1 100644 --- a/pkg/cmd/history.go +++ b/pkg/cmd/history.go @@ -106,7 +106,7 @@ type releaseInfoJSON struct { // It handles empty string time fields by treating them as zero values. func (r *releaseInfo) UnmarshalJSON(data []byte) error { // First try to unmarshal into a map to handle empty string time fields - var raw map[string]interface{} + var raw map[string]any if err := json.Unmarshal(data, &raw); err != nil { return err } @@ -260,7 +260,7 @@ func compListRevisions(_ string, cfg *action.Configuration, releaseName string) return nil, cobra.ShellCompDirectiveError } for _, version := range hist { - appVersion := fmt.Sprintf("App: %s", version.Chart.Metadata.AppVersion) + appVersion := "App: " + version.Chart.Metadata.AppVersion chartDesc := fmt.Sprintf("Chart: %s-%s", version.Chart.Metadata.Name, version.Chart.Metadata.Version) revisions = append(revisions, fmt.Sprintf("%s\t%s, %s", strconv.Itoa(version.Version), appVersion, chartDesc)) } diff --git a/pkg/cmd/history_test.go b/pkg/cmd/history_test.go index d8adc2d19..b536bca36 100644 --- a/pkg/cmd/history_test.go +++ b/pkg/cmd/history_test.go @@ -321,7 +321,7 @@ func TestReleaseInfoEmptyStringRoundTrip(t *testing.T) { data, err := json.Marshal(&info) require.NoError(t, err) - var result map[string]interface{} + var result map[string]any err = json.Unmarshal(data, &result) require.NoError(t, err) diff --git a/pkg/cmd/install.go b/pkg/cmd/install.go index d36cd9e34..67e2a9fab 100644 --- a/pkg/cmd/install.go +++ b/pkg/cmd/install.go @@ -201,6 +201,7 @@ func addInstallFlags(cmd *cobra.Command, f *pflag.FlagSet, client *action.Instal f.BoolVar(&client.DependencyUpdate, "dependency-update", false, "update dependencies if they are missing before installing the chart") f.BoolVar(&client.DisableOpenAPIValidation, "disable-openapi-validation", false, "if set, the installation process will not validate rendered templates against the Kubernetes OpenAPI Schema") f.BoolVar(&client.RollbackOnFailure, "rollback-on-failure", false, "if set, Helm will rollback (uninstall) the installation upon failure. The --wait flag will be default to \"watcher\" if --rollback-on-failure is set") + f.BoolVar(&client.RollbackOnFailure, "atomic", false, "deprecated") f.MarkDeprecated("atomic", "use --rollback-on-failure instead") f.BoolVar(&client.SkipCRDs, "skip-crds", false, "if set, no CRDs will be installed. By default, CRDs are installed if not already present") f.BoolVar(&client.SubNotes, "render-subchart-notes", false, "if set, render subchart notes along with the parent") @@ -209,6 +210,26 @@ func addInstallFlags(cmd *cobra.Command, f *pflag.FlagSet, client *action.Instal f.BoolVar(&client.EnableDNS, "enable-dns", false, "enable DNS lookups when rendering templates") f.BoolVar(&client.HideNotes, "hide-notes", false, "if set, do not show notes in install output. Does not affect presence in chart metadata") f.BoolVar(&client.TakeOwnership, "take-ownership", false, "if set, install will ignore the check for helm annotations and take ownership of the existing resources") + + // For `helm template`, these notes flags are legacy, unused, and should not show in help, but + // must remain accepted for backwards compatibility in Helm 4. Deprecate and hide them for now + // TODO remove these from template command in Helm 5 + if cmd.Name() == "template" { + if err := cmd.Flags().MarkDeprecated("hide-notes", "this flag has no effect for 'helm template' and will be removed in Helm 5"); err != nil { + log.Fatal(err) + } + if err := cmd.Flags().MarkHidden("hide-notes"); err != nil { + log.Fatal(err) + } + + if err := cmd.Flags().MarkDeprecated("render-subchart-notes", "this flag has no effect for 'helm template' and will be removed in Helm 5"); err != nil { + log.Fatal(err) + } + if err := cmd.Flags().MarkHidden("render-subchart-notes"); err != nil { + log.Fatal(err) + } + } + addValueOptionsFlags(f, valueOpts) addChartPathOptionsFlags(f, &client.ChartPathOptions) AddWaitFlag(cmd, &client.WaitStrategy) @@ -301,7 +322,7 @@ func runInstall(args []string, client *action.Install, valueOpts *values.Options return nil, fmt.Errorf("failed reloading chart after repo update: %w", err) } } else { - return nil, fmt.Errorf("an error occurred while checking for chart dependencies. You may need to run `helm dependency build` to fetch missing dependencies: %w", err) + return nil, fmt.Errorf("an error occurred while checking for chart dependencies. You may need to run 'helm dependency build' to fetch missing dependencies: %w", err) } } } diff --git a/pkg/cmd/install_test.go b/pkg/cmd/install_test.go index f0f12e4f7..8d3435e03 100644 --- a/pkg/cmd/install_test.go +++ b/pkg/cmd/install_test.go @@ -240,7 +240,7 @@ func TestInstall(t *testing.T) { // Install chart with only crds { name: "install chart with only crds", - cmd: "install crd-test testdata/testcharts/chart-with-only-crds --namespace default", + cmd: "install crd-test testdata/testcharts/chart-with-only-crds --namespace default --dry-run", }, // Verify the user/pass works { @@ -291,27 +291,27 @@ func TestInstallVersionCompletion(t *testing.T) { tests := []cmdTestCase{{ name: "completion for install version flag with release name", - cmd: fmt.Sprintf("%s __complete install releasename testing/alpine --version ''", repoSetup), + cmd: repoSetup + " __complete install releasename testing/alpine --version ''", golden: "output/version-comp.txt", }, { name: "completion for install version flag with generate-name", - cmd: fmt.Sprintf("%s __complete install --generate-name testing/alpine --version ''", repoSetup), + cmd: repoSetup + " __complete install --generate-name testing/alpine --version ''", golden: "output/version-comp.txt", }, { name: "completion for install version flag, no filter", - cmd: fmt.Sprintf("%s __complete install releasename testing/alpine --version 0.3", repoSetup), + cmd: repoSetup + " __complete install releasename testing/alpine --version 0.3", golden: "output/version-comp.txt", }, { name: "completion for install version flag too few args", - cmd: fmt.Sprintf("%s __complete install testing/alpine --version ''", repoSetup), + cmd: repoSetup + " __complete install testing/alpine --version ''", golden: "output/version-invalid-comp.txt", }, { name: "completion for install version flag too many args", - cmd: fmt.Sprintf("%s __complete install releasename testing/alpine badarg --version ''", repoSetup), + cmd: repoSetup + " __complete install releasename testing/alpine badarg --version ''", golden: "output/version-invalid-comp.txt", }, { name: "completion for install version flag invalid chart", - cmd: fmt.Sprintf("%s __complete install releasename invalid/invalid --version ''", repoSetup), + cmd: repoSetup + " __complete install releasename invalid/invalid --version ''", golden: "output/version-invalid-comp.txt", }} runTestCmd(t, tests) diff --git a/pkg/cmd/lint_test.go b/pkg/cmd/lint_test.go index f825e36e2..82fe249f6 100644 --- a/pkg/cmd/lint_test.go +++ b/pkg/cmd/lint_test.go @@ -25,12 +25,12 @@ func TestLintCmdWithSubchartsFlag(t *testing.T) { testChart := "testdata/testcharts/chart-with-bad-subcharts" tests := []cmdTestCase{{ name: "lint good chart with bad subcharts", - cmd: fmt.Sprintf("lint %s", testChart), + cmd: "lint " + testChart, golden: "output/lint-chart-with-bad-subcharts.txt", wantError: true, }, { name: "lint good chart with bad subcharts using --with-subcharts flag", - cmd: fmt.Sprintf("lint --with-subcharts %s", testChart), + cmd: "lint --with-subcharts " + testChart, golden: "output/lint-chart-with-bad-subcharts-with-subcharts.txt", wantError: true, }} @@ -42,7 +42,7 @@ func TestLintCmdWithQuietFlag(t *testing.T) { testChart2 := "testdata/testcharts/chart-bad-requirements" tests := []cmdTestCase{{ name: "lint good chart using --quiet flag", - cmd: fmt.Sprintf("lint --quiet %s", testChart1), + cmd: "lint --quiet " + testChart1, golden: "output/lint-quiet.txt", }, { name: "lint two charts, one with error using --quiet flag", @@ -67,24 +67,24 @@ func TestLintCmdWithKubeVersionFlag(t *testing.T) { testChart := "testdata/testcharts/chart-with-deprecated-api" tests := []cmdTestCase{{ name: "lint chart with deprecated api version using kube version flag", - cmd: fmt.Sprintf("lint --kube-version 1.22.0 %s", testChart), + cmd: "lint --kube-version 1.22.0 " + testChart, golden: "output/lint-chart-with-deprecated-api.txt", wantError: false, }, { name: "lint chart with deprecated api version using kube version and strict flag", - cmd: fmt.Sprintf("lint --kube-version 1.22.0 --strict %s", testChart), + cmd: "lint --kube-version 1.22.0 --strict " + testChart, golden: "output/lint-chart-with-deprecated-api-strict.txt", wantError: true, }, { // the test builds will use the kubeVersionMinorTesting const in capabilities.go // which is "20" name: "lint chart with deprecated api version without kube version", - cmd: fmt.Sprintf("lint %s", testChart), + cmd: "lint " + testChart, golden: "output/lint-chart-with-deprecated-api-old-k8s.txt", wantError: false, }, { name: "lint chart with deprecated api version with older kube version", - cmd: fmt.Sprintf("lint --kube-version 1.21.0 --strict %s", testChart), + cmd: "lint --kube-version 1.20.0 --strict " + testChart, golden: "output/lint-chart-with-deprecated-api-old-k8s.txt", wantError: false, }} diff --git a/pkg/cmd/list.go b/pkg/cmd/list.go index 3c15a0954..54becddca 100644 --- a/pkg/cmd/list.go +++ b/pkg/cmd/list.go @@ -254,7 +254,7 @@ func filterReleases(releases []*release.Release, ignoredReleaseNames []string) [ // Provide dynamic auto-completion for release names func compListReleases(toComplete string, ignoredReleaseNames []string, cfg *action.Configuration) ([]string, cobra.ShellCompDirective) { - cobra.CompDebugln(fmt.Sprintf("compListReleases with toComplete %s", toComplete), settings.Debug) + cobra.CompDebugln("compListReleases with toComplete "+toComplete, settings.Debug) client := action.NewList(cfg) client.All = true diff --git a/pkg/cmd/load_plugins.go b/pkg/cmd/load_plugins.go index ddda5fde6..029dd04f5 100644 --- a/pkg/cmd/load_plugins.go +++ b/pkg/cmd/load_plugins.go @@ -18,6 +18,7 @@ package cmd import ( "bytes" "context" + "errors" "fmt" "io" "log/slog" @@ -120,7 +121,8 @@ func loadCLIPlugins(baseCmd *cobra.Command, out io.Writer) { Stderr: os.Stderr, } _, err = plug.Invoke(context.Background(), input) - if execErr, ok := err.(*plugin.InvokeExecError); ok { + execErr := &plugin.InvokeExecError{} + if errors.As(err, &execErr) { return CommandError{ error: execErr.Err, ExitCode: execErr.ExitCode, @@ -132,7 +134,13 @@ func loadCLIPlugins(baseCmd *cobra.Command, out io.Writer) { DisableFlagParsing: true, } - // TODO: Make sure a command with this name does not already exist. + for _, cmd := range baseCmd.Commands() { + if cmd.Name() == c.Name() { + slog.Error("failed to load plugins: name conflicts", slog.String("name", c.Name())) + return + } + } + baseCmd.AddCommand(c) // For completion, we try to load more details about the plugins so as to allow for command and diff --git a/pkg/cmd/package.go b/pkg/cmd/package.go index 96c0c47b2..14f9c8425 100644 --- a/pkg/cmd/package.go +++ b/pkg/cmd/package.go @@ -57,7 +57,7 @@ func newPackageCmd(out io.Writer) *cobra.Command { Long: packageDesc, RunE: func(_ *cobra.Command, args []string) error { if len(args) == 0 { - return fmt.Errorf("need at least one argument, the path to the chart") + return errors.New("need at least one argument, the path to the chart") } if client.Sign { if client.Key == "" { diff --git a/pkg/cmd/plugin_install.go b/pkg/cmd/plugin_install.go index efa9b466c..c248ed818 100644 --- a/pkg/cmd/plugin_install.go +++ b/pkg/cmd/plugin_install.go @@ -16,6 +16,7 @@ limitations under the License. package cmd import ( + "errors" "fmt" "io" "log/slog" @@ -50,11 +51,11 @@ const pluginInstallDesc = ` This command allows you to install a plugin from a url to a VCS repo or a local path. By default, plugin signatures are verified before installation when installing from -tarballs (.tgz or .tar.gz). This requires a corresponding .prov file to be available -alongside the tarball. +tarballs (.tgz or .tar.gz). A corresponding .prov file must be available alongside +the tarball; installation will fail if it is missing or invalid. For local development, plugins installed from local directories are automatically treated as "local dev" and do not require signatures. -Use --verify=false to skip signature verification for remote plugins. +Use --verify=false to explicitly skip signature verification (NOT recommended). ` func newPluginInstallCmd(out io.Writer) *cobra.Command { @@ -102,7 +103,7 @@ func (o *pluginInstallOptions) complete(args []string) error { func (o *pluginInstallOptions) newInstallerForSource() (installer.Installer, error) { // Check if source is an OCI registry reference - if strings.HasPrefix(o.source, fmt.Sprintf("%s://", registry.OCIScheme)) { + if strings.HasPrefix(o.source, registry.OCIScheme+"://") { // Build getter options for OCI options := []getter.Option{ getter.WithTLSClientConfig(o.certFile, o.keyFile, o.caFile), @@ -131,15 +132,15 @@ func (o *pluginInstallOptions) run(out io.Writer) error { if localInst, ok := i.(*installer.LocalInstaller); ok && !localInst.SupportsVerification() { // Local directory installations are allowed without verification shouldVerify = false - fmt.Fprintf(out, "Installing plugin from local directory (development mode)\n") + fmt.Fprint(out, "Installing plugin from local directory (development mode)\n") } else if shouldVerify { // For remote installations, check if verification is supported if verifier, ok := i.(installer.Verifier); !ok || !verifier.SupportsVerification() { - return fmt.Errorf("plugin source does not support verification. Use --verify=false to skip verification") + return errors.New("plugin source does not support verification. Use --verify=false to skip verification") } } else { // User explicitly disabled verification - fmt.Fprintf(out, "WARNING: Skipping plugin signature verification\n") + fmt.Fprint(out, "WARNING: Skipping plugin signature verification\n") } // Set up installation options @@ -150,7 +151,7 @@ func (o *pluginInstallOptions) run(out io.Writer) error { // If verify is requested, show verification output if shouldVerify { - fmt.Fprintf(out, "Verifying plugin signature...\n") + fmt.Fprint(out, "Verifying plugin signature...\n") } // Install the plugin with options diff --git a/pkg/cmd/plugin_package.go b/pkg/cmd/plugin_package.go index 05f8bb5ad..d82f4d2cc 100644 --- a/pkg/cmd/plugin_package.go +++ b/pkg/cmd/plugin_package.go @@ -81,7 +81,7 @@ func (o *pluginPackageOptions) run(out io.Writer) error { return err } if !fi.IsDir() { - return fmt.Errorf("plugin package only supports directories, not tarballs") + return errors.New("plugin package only supports directories, not tarballs") } // Load and validate plugin metadata @@ -119,7 +119,7 @@ func (o *pluginPackageOptions) run(out io.Writer) error { } } else { // User explicitly disabled signing - fmt.Fprintf(out, "WARNING: Skipping plugin signing. This is not recommended for plugins intended for distribution.\n") + fmt.Fprint(out, "WARNING: Skipping plugin signing. This is not recommended for plugins intended for distribution.\n") } // Now create the tarball (only after signing prerequisites are met) diff --git a/pkg/cmd/plugin_test.go b/pkg/cmd/plugin_test.go index 05cfe46f1..0a6435d99 100644 --- a/pkg/cmd/plugin_test.go +++ b/pkg/cmd/plugin_test.go @@ -100,7 +100,7 @@ func TestLoadCLIPlugins(t *testing.T) { "HELM_PLUGINS=testdata/helmhome/helm/plugins", "HELM_REPOSITORY_CONFIG=testdata/helmhome/helm/repositories.yaml", "HELM_REPOSITORY_CACHE=testdata/helmhome/helm/repository", - fmt.Sprintf("HELM_BIN=%s", os.Args[0]), + "HELM_BIN=" + os.Args[0], }, "\n") + "\n" // Test that the YAML file was correctly converted to a command. @@ -114,9 +114,10 @@ func TestLoadCLIPlugins(t *testing.T) { }{ {"args", "echo args", "This echos args", "-a -b -c\n", []string{"-a", "-b", "-c"}, 0}, {"echo", "echo stuff", "This echos stuff", "hello\n", []string{}, 0}, - {"env", "env stuff", "show the env", "HELM_PLUGIN_NAME=env\n", []string{}, 0}, {"exitwith", "exitwith code", "This exits with the specified exit code", "", []string{"2"}, 2}, {"fullenv", "show env vars", "show all env vars", fullEnvOutput, []string{}, 0}, + {"shortenv", "env stuff", "show the env", "HELM_PLUGIN_NAME=shortenv\n", []string{}, 0}, + // "noversion": plugin is invalid, and should not be loaded } pluginCmds := cmd.Commands() @@ -254,10 +255,6 @@ func TestLoadCLIPluginsForCompletion(t *testing.T) { tests := []staticCompletionDetails{ {"args", []string{}, []string{}, []staticCompletionDetails{}}, {"echo", []string{}, []string{}, []staticCompletionDetails{}}, - {"env", []string{}, []string{"global"}, []staticCompletionDetails{ - {"list", []string{}, []string{"a", "all", "log"}, []staticCompletionDetails{}}, - {"remove", []string{"all", "one"}, []string{}, []staticCompletionDetails{}}, - }}, {"exitwith", []string{}, []string{}, []staticCompletionDetails{ {"code", []string{}, []string{"a", "b"}, []staticCompletionDetails{}}, }}, @@ -268,6 +265,10 @@ func TestLoadCLIPluginsForCompletion(t *testing.T) { {"more", []string{"one", "two"}, []string{"b", "ball"}, []staticCompletionDetails{}}, }}, }}, + {"shortenv", []string{}, []string{"global"}, []staticCompletionDetails{ + {"list", []string{}, []string{"a", "all", "log"}, []staticCompletionDetails{}}, + {"remove", []string{"all", "one"}, []string{}, []staticCompletionDetails{}}, + }}, } checkCommand(t, cmd.Commands(), tests) } diff --git a/pkg/cmd/plugin_uninstall.go b/pkg/cmd/plugin_uninstall.go index 85eb46219..c75cf6264 100644 --- a/pkg/cmd/plugin_uninstall.go +++ b/pkg/cmd/plugin_uninstall.go @@ -62,7 +62,7 @@ func (o *pluginUninstallOptions) complete(args []string) error { func (o *pluginUninstallOptions) run(out io.Writer) error { slog.Debug("loading installer plugins", "dir", settings.PluginsDirectory) - plugins, err := plugin.LoadAll(settings.PluginsDirectory) + plugins, err := plugin.LoadAllDir(settings.PluginsDirectory, plugin.LogIgnorePluginLoadErrorFilterFunc) if err != nil { return err } diff --git a/pkg/cmd/plugin_update.go b/pkg/cmd/plugin_update.go index 6cc2729fc..83ef35107 100644 --- a/pkg/cmd/plugin_update.go +++ b/pkg/cmd/plugin_update.go @@ -62,7 +62,7 @@ func (o *pluginUpdateOptions) complete(args []string) error { func (o *pluginUpdateOptions) run(out io.Writer) error { slog.Debug("loading installed plugins", "path", settings.PluginsDirectory) - plugins, err := plugin.LoadAll(settings.PluginsDirectory) + plugins, err := plugin.LoadAllDir(settings.PluginsDirectory, plugin.LogIgnorePluginLoadErrorFilterFunc) if err != nil { return err } diff --git a/pkg/cmd/plugin_verify.go b/pkg/cmd/plugin_verify.go index 5f89e743e..fc54a9d77 100644 --- a/pkg/cmd/plugin_verify.go +++ b/pkg/cmd/plugin_verify.go @@ -16,6 +16,7 @@ limitations under the License. package cmd import ( + "errors" "fmt" "io" "os" @@ -75,12 +76,12 @@ func (o *pluginVerifyOptions) run(out io.Writer) error { // Only support tarball verification if fi.IsDir() { - return fmt.Errorf("directory verification not supported - only plugin tarballs can be verified") + return errors.New("directory verification not supported - only plugin tarballs can be verified") } // Verify it's a tarball if !plugin.IsTarball(o.pluginPath) { - return fmt.Errorf("plugin file must be a gzipped tarball (.tar.gz or .tgz)") + return errors.New("plugin file must be a gzipped tarball (.tar.gz or .tgz)") } // Look for provenance file diff --git a/pkg/cmd/printer.go b/pkg/cmd/printer.go index 30238f5bb..3a3840cfc 100644 --- a/pkg/cmd/printer.go +++ b/pkg/cmd/printer.go @@ -21,7 +21,7 @@ import ( "text/template" ) -func tpl(t string, vals map[string]interface{}, out io.Writer) error { +func tpl(t string, vals map[string]any, out io.Writer) error { tt, err := template.New("_").Parse(t) if err != nil { return err diff --git a/pkg/cmd/pull_test.go b/pkg/cmd/pull_test.go index 96631fe05..f749c218c 100644 --- a/pkg/cmd/pull_test.go +++ b/pkg/cmd/pull_test.go @@ -22,6 +22,7 @@ import ( "net/http/httptest" "os" "path/filepath" + "strings" "testing" "helm.sh/helm/v4/pkg/repo/v1/repotest" @@ -341,7 +342,7 @@ func runPullTests(t *testing.T, tests []struct { func buildOCIURL(registryURL, chartName, version, username, password string) string { baseURL := fmt.Sprintf("oci://%s/u/ocitestuser/%s", registryURL, chartName) if version != "" { - baseURL += fmt.Sprintf(" --version %s", version) + baseURL += " --version " + version } if username != "" && password != "" { baseURL += fmt.Sprintf(" --username %s --password %s", username, password) @@ -415,23 +416,23 @@ func TestPullVersionCompletion(t *testing.T) { tests := []cmdTestCase{{ name: "completion for pull version flag", - cmd: fmt.Sprintf("%s __complete pull testing/alpine --version ''", repoSetup), + cmd: repoSetup + " __complete pull testing/alpine --version ''", golden: "output/version-comp.txt", }, { name: "completion for pull version flag, no filter", - cmd: fmt.Sprintf("%s __complete pull testing/alpine --version 0.3", repoSetup), + cmd: repoSetup + " __complete pull testing/alpine --version 0.3", golden: "output/version-comp.txt", }, { name: "completion for pull version flag too few args", - cmd: fmt.Sprintf("%s __complete pull --version ''", repoSetup), + cmd: repoSetup + " __complete pull --version ''", golden: "output/version-invalid-comp.txt", }, { name: "completion for pull version flag too many args", - cmd: fmt.Sprintf("%s __complete pull testing/alpine badarg --version ''", repoSetup), + cmd: repoSetup + " __complete pull testing/alpine badarg --version ''", golden: "output/version-invalid-comp.txt", }, { name: "completion for pull version flag invalid chart", - cmd: fmt.Sprintf("%s __complete pull invalid/invalid --version ''", repoSetup), + cmd: repoSetup + " __complete pull invalid/invalid --version ''", golden: "output/version-invalid-comp.txt", }} runTestCmd(t, tests) @@ -506,3 +507,54 @@ func TestPullFileCompletion(t *testing.T) { checkFileCompletion(t, "pull", false) checkFileCompletion(t, "pull repo/chart", false) } + +// TestPullOCIWithTagAndDigest tests pulling an OCI chart with both tag and digest specified. +// This is a regression test for https://github.com/helm/helm/issues/31600 +func TestPullOCIWithTagAndDigest(t *testing.T) { + srv := repotest.NewTempServer( + t, + repotest.WithChartSourceGlob("testdata/testcharts/*.tgz*"), + ) + defer srv.Stop() + + ociSrv, err := repotest.NewOCIServer(t, srv.Root()) + if err != nil { + t.Fatal(err) + } + result := ociSrv.RunWithReturn(t) + + contentCache := t.TempDir() + outdir := t.TempDir() + + // Test: pull with tag and digest (the fixed bug from issue #31600) + // Previously this failed with "encoding/hex: invalid byte: U+0073 's'" + ref := fmt.Sprintf("oci://%s/u/ocitestuser/oci-dependent-chart:0.1.0@%s", + ociSrv.RegistryURL, result.PushedChart.Manifest.Digest) + + cmd := fmt.Sprintf("pull %s -d '%s' --registry-config %s --content-cache %s --plain-http", + ref, + outdir, + filepath.Join(srv.Root(), "config.json"), + contentCache, + ) + + _, _, err = executeActionCommand(cmd) + if err != nil { + t.Fatalf("pull with tag+digest failed: %v", err) + } + + // Verify the file was downloaded + // When digest is present, the filename uses the digest format (e.g. chart@sha256-hex.tgz) + expectedFile := filepath.Join(outdir, "oci-dependent-chart-0.1.0.tgz") + if _, err := os.Stat(expectedFile); err != nil { + // Try the digest-based filename; parse algorithm:hex to avoid fixed-offset assumptions + algorithm, digestPart, ok := strings.Cut(result.PushedChart.Manifest.Digest, ":") + if !ok { + t.Fatalf("digest must be in algorithm:hex format, got %q", result.PushedChart.Manifest.Digest) + } + expectedFile = filepath.Join(outdir, fmt.Sprintf("oci-dependent-chart@%s-%s.tgz", algorithm, digestPart)) + if _, err := os.Stat(expectedFile); err != nil { + t.Errorf("expected chart file not found: %v", err) + } + } +} diff --git a/pkg/cmd/push.go b/pkg/cmd/push.go index f57a7c52f..94c5732ff 100644 --- a/pkg/cmd/push.go +++ b/pkg/cmd/push.go @@ -62,7 +62,7 @@ func newPushCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { var comps []string for _, p := range providers { for _, scheme := range p.Schemes { - comps = append(comps, fmt.Sprintf("%s://", scheme)) + comps = append(comps, scheme+"://") } } return comps, cobra.ShellCompDirectiveNoFileComp | cobra.ShellCompDirectiveNoSpace diff --git a/pkg/cmd/release_testing.go b/pkg/cmd/release_testing.go index 88a6f351f..5a6159e7d 100644 --- a/pkg/cmd/release_testing.go +++ b/pkg/cmd/release_testing.go @@ -55,7 +55,7 @@ func newReleaseTestCmd(cfg *action.Configuration, out io.Writer) *cobra.Command } return compListReleases(toComplete, args, cfg) }, - RunE: func(_ *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, args []string) (returnError error) { client.Namespace = settings.Namespace() notName := regexp.MustCompile(`^!\s?name=`) for _, f := range filter { @@ -65,7 +65,16 @@ func newReleaseTestCmd(cfg *action.Configuration, out io.Writer) *cobra.Command client.Filters[action.ExcludeNameFilter] = append(client.Filters[action.ExcludeNameFilter], notName.ReplaceAllLiteralString(f, "")) } } - reli, runErr := client.Run(args[0]) + + reli, shutdown, runErr := client.Run(args[0]) + defer func() { + if shutdownErr := shutdown(); shutdownErr != nil { + if returnError == nil { + returnError = shutdownErr + } + } + }() + // We only return an error if we weren't even able to get the // release, otherwise we keep going so we can print status and logs // if requested diff --git a/pkg/cmd/repo_add_test.go b/pkg/cmd/repo_add_test.go index df9451d34..8f8e36d75 100644 --- a/pkg/cmd/repo_add_test.go +++ b/pkg/cmd/repo_add_test.go @@ -130,7 +130,7 @@ func TestRepoAdd(t *testing.T) { } if err := o.run(io.Discard); err != nil { - t.Errorf("Duplicate repository name was added") + t.Error("Duplicate repository name was added") } } @@ -162,7 +162,7 @@ func TestRepoAddCheckLegalName(t *testing.T) { t.Fatalf("Actual error %s, not equal to expected error %s", err, wantErrorMsg) } } else { - t.Fatalf("expect reported an error.") + t.Fatal("expect reported an error.") } } diff --git a/pkg/cmd/repo_remove_test.go b/pkg/cmd/repo_remove_test.go index fce15bb73..f2641ccf0 100644 --- a/pkg/cmd/repo_remove_test.go +++ b/pkg/cmd/repo_remove_test.go @@ -196,15 +196,15 @@ func TestRepoRemoveCompletion(t *testing.T) { // and that port changes each time we run the test. tests := []cmdTestCase{{ name: "completion for repo remove", - cmd: fmt.Sprintf("%s __completeNoDesc repo remove ''", repoSetup), + cmd: repoSetup + " __completeNoDesc repo remove ''", golden: "output/repo_list_comp.txt", }, { name: "completion for repo remove, no filter", - cmd: fmt.Sprintf("%s __completeNoDesc repo remove fo", repoSetup), + cmd: repoSetup + " __completeNoDesc repo remove fo", golden: "output/repo_list_comp.txt", }, { name: "completion for repo remove repetition", - cmd: fmt.Sprintf("%s __completeNoDesc repo remove foo ''", repoSetup), + cmd: repoSetup + " __completeNoDesc repo remove foo ''", golden: "output/repo_repeat_comp.txt", }} for _, test := range tests { diff --git a/pkg/cmd/require/args_test.go b/pkg/cmd/require/args_test.go index b6c430fc0..89403140e 100644 --- a/pkg/cmd/require/args_test.go +++ b/pkg/cmd/require/args_test.go @@ -16,8 +16,8 @@ limitations under the License. package require import ( - "fmt" "io" + "strconv" "strings" "testing" @@ -65,7 +65,7 @@ type testCase struct { func runTestCases(t *testing.T, testCases []testCase) { t.Helper() for i, tc := range testCases { - t.Run(fmt.Sprint(i), func(t *testing.T) { + t.Run(strconv.Itoa(i), func(t *testing.T) { cmd := &cobra.Command{ Use: "root", Run: func(*cobra.Command, []string) {}, diff --git a/pkg/cmd/rollback.go b/pkg/cmd/rollback.go index 00a2725bc..01d8b1866 100644 --- a/pkg/cmd/rollback.go +++ b/pkg/cmd/rollback.go @@ -61,7 +61,7 @@ func newRollbackCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { if len(args) > 1 { ver, err := strconv.Atoi(args[1]) if err != nil { - return fmt.Errorf("could not convert revision to a number: %v", err) + return fmt.Errorf("could not convert revision to a number: %w", err) } client.Version = ver } @@ -76,7 +76,7 @@ func newRollbackCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { return err } - fmt.Fprintf(out, "Rollback was a success! Happy Helming!\n") + fmt.Fprint(out, "Rollback was a success! Happy Helming!\n") return nil }, } diff --git a/pkg/cmd/search_hub.go b/pkg/cmd/search_hub.go index bb2ff6038..f9adb73f4 100644 --- a/pkg/cmd/search_hub.go +++ b/pkg/cmd/search_hub.go @@ -17,6 +17,7 @@ limitations under the License. package cmd import ( + "errors" "fmt" "io" "log/slog" @@ -136,12 +137,12 @@ func (h *hubSearchWriter) WriteTable(out io.Writer) error { if len(h.elements) == 0 { // Fail if no results found and --fail-on-no-result is enabled if h.failOnNoResult { - return fmt.Errorf("no results found") + return errors.New("no results found") } _, err := out.Write([]byte("No results found\n")) if err != nil { - return fmt.Errorf("unable to write results: %s", err) + return fmt.Errorf("unable to write results: %w", err) } return nil } @@ -175,7 +176,7 @@ func (h *hubSearchWriter) WriteYAML(out io.Writer) error { func (h *hubSearchWriter) encodeByFormat(out io.Writer, format output.Format) error { // Fail if no results found and --fail-on-no-result is enabled if len(h.elements) == 0 && h.failOnNoResult { - return fmt.Errorf("no results found") + return errors.New("no results found") } // Initialize the array so no results returns an empty array instead of null diff --git a/pkg/cmd/search_hub_test.go b/pkg/cmd/search_hub_test.go index 8e056f771..62881cbdd 100644 --- a/pkg/cmd/search_hub_test.go +++ b/pkg/cmd/search_hub_test.go @@ -168,7 +168,7 @@ func TestSearchHubCmd_FailOnNoResponseTests(t *testing.T) { _, out, err := executeActionCommandC(storage, tt.cmd) if tt.wantErr { if err == nil { - t.Errorf("expected error due to no record in response, got nil") + t.Error("expected error due to no record in response, got nil") } } else { if err != nil { diff --git a/pkg/cmd/search_repo.go b/pkg/cmd/search_repo.go index febb138e2..53626f1b6 100644 --- a/pkg/cmd/search_repo.go +++ b/pkg/cmd/search_repo.go @@ -216,12 +216,12 @@ func (r *repoSearchWriter) WriteTable(out io.Writer) error { if len(r.results) == 0 { // Fail if no results found and --fail-on-no-result is enabled if r.failOnNoResult { - return fmt.Errorf("no results found") + return errors.New("no results found") } _, err := out.Write([]byte("No results found\n")) if err != nil { - return fmt.Errorf("unable to write results: %s", err) + return fmt.Errorf("unable to write results: %w", err) } return nil } @@ -245,7 +245,7 @@ func (r *repoSearchWriter) WriteYAML(out io.Writer) error { func (r *repoSearchWriter) encodeByFormat(out io.Writer, format output.Format) error { // Fail if no results found and --fail-on-no-result is enabled if len(r.results) == 0 && r.failOnNoResult { - return fmt.Errorf("no results found") + return errors.New("no results found") } // Initialize the array so no results returns an empty array instead of null @@ -307,7 +307,7 @@ func compListChartsOfRepo(repoName string, prefix string) []string { // Provide dynamic auto-completion for commands that operate on charts (e.g., helm show) // When true, the includeFiles argument indicates that completion should include local files (e.g., local charts) func compListCharts(toComplete string, includeFiles bool) ([]string, cobra.ShellCompDirective) { - cobra.CompDebugln(fmt.Sprintf("compListCharts with toComplete %s", toComplete), settings.Debug) + cobra.CompDebugln("compListCharts with toComplete "+toComplete, settings.Debug) noSpace := false noFile := false @@ -323,7 +323,7 @@ func compListCharts(toComplete string, includeFiles bool) ([]string, cobra.Shell if len(repoInfo) > 1 { repoDesc = repoInfo[1] } - repoWithSlash := fmt.Sprintf("%s/", repo) + repoWithSlash := repo + "/" if strings.HasPrefix(toComplete, repoWithSlash) { // Must complete with charts within the specified repo. // Don't filter on toComplete to allow for shell fuzzy matching diff --git a/pkg/cmd/show_test.go b/pkg/cmd/show_test.go index ff3671dbc..5da2626d3 100644 --- a/pkg/cmd/show_test.go +++ b/pkg/cmd/show_test.go @@ -99,35 +99,35 @@ func TestShowVersionCompletion(t *testing.T) { tests := []cmdTestCase{{ name: "completion for show version flag", - cmd: fmt.Sprintf("%s __complete show chart testing/alpine --version ''", repoSetup), + cmd: repoSetup + " __complete show chart testing/alpine --version ''", golden: "output/version-comp.txt", }, { name: "completion for show version flag, no filter", - cmd: fmt.Sprintf("%s __complete show chart testing/alpine --version 0.3", repoSetup), + cmd: repoSetup + " __complete show chart testing/alpine --version 0.3", golden: "output/version-comp.txt", }, { name: "completion for show version flag too few args", - cmd: fmt.Sprintf("%s __complete show chart --version ''", repoSetup), + cmd: repoSetup + " __complete show chart --version ''", golden: "output/version-invalid-comp.txt", }, { name: "completion for show version flag too many args", - cmd: fmt.Sprintf("%s __complete show chart testing/alpine badarg --version ''", repoSetup), + cmd: repoSetup + " __complete show chart testing/alpine badarg --version ''", golden: "output/version-invalid-comp.txt", }, { name: "completion for show version flag invalid chart", - cmd: fmt.Sprintf("%s __complete show chart invalid/invalid --version ''", repoSetup), + cmd: repoSetup + " __complete show chart invalid/invalid --version ''", golden: "output/version-invalid-comp.txt", }, { name: "completion for show version flag with all", - cmd: fmt.Sprintf("%s __complete show all testing/alpine --version ''", repoSetup), + cmd: repoSetup + " __complete show all testing/alpine --version ''", golden: "output/version-comp.txt", }, { name: "completion for show version flag with readme", - cmd: fmt.Sprintf("%s __complete show readme testing/alpine --version ''", repoSetup), + cmd: repoSetup + " __complete show readme testing/alpine --version ''", golden: "output/version-comp.txt", }, { name: "completion for show version flag with values", - cmd: fmt.Sprintf("%s __complete show values testing/alpine --version ''", repoSetup), + cmd: repoSetup + " __complete show values testing/alpine --version ''", golden: "output/version-comp.txt", }} runTestCmd(t, tests) diff --git a/pkg/cmd/status.go b/pkg/cmd/status.go index f68316c6c..705691aaf 100644 --- a/pkg/cmd/status.go +++ b/pkg/cmd/status.go @@ -197,9 +197,9 @@ func (s statusPrinter) WriteTable(out io.Writer) error { } _, _ = fmt.Fprintf(out, "TEST SUITE: %s\n%s\n%s\n%s\n", h.Name, - fmt.Sprintf("Last Started: %s", h.LastRun.StartedAt.Format(time.ANSIC)), - fmt.Sprintf("Last Completed: %s", h.LastRun.CompletedAt.Format(time.ANSIC)), - fmt.Sprintf("Phase: %s", h.LastRun.Phase), + "Last Started: "+h.LastRun.StartedAt.Format(time.ANSIC), + "Last Completed: "+h.LastRun.CompletedAt.Format(time.ANSIC), + "Phase: "+h.LastRun.Phase, ) } } diff --git a/pkg/cmd/template.go b/pkg/cmd/template.go index 14f85042b..047fd60df 100644 --- a/pkg/cmd/template.go +++ b/pkg/cmd/template.go @@ -80,7 +80,7 @@ func newTemplateCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { if kubeVersion != "" { parsedKubeVersion, err := common.ParseKubeVersion(kubeVersion) if err != nil { - return fmt.Errorf("invalid kube version '%s': %s", kubeVersion, err) + return fmt.Errorf("invalid kube version '%s': %w", kubeVersion, err) } client.KubeVersion = parsedKubeVersion } diff --git a/pkg/cmd/template_test.go b/pkg/cmd/template_test.go index 5bcccf5d0..7391781f6 100644 --- a/pkg/cmd/template_test.go +++ b/pkg/cmd/template_test.go @@ -178,23 +178,23 @@ func TestTemplateVersionCompletion(t *testing.T) { tests := []cmdTestCase{{ name: "completion for template version flag with release name", - cmd: fmt.Sprintf("%s __complete template releasename testing/alpine --version ''", repoSetup), + cmd: repoSetup + " __complete template releasename testing/alpine --version ''", golden: "output/version-comp.txt", }, { name: "completion for template version flag with generate-name", - cmd: fmt.Sprintf("%s __complete template --generate-name testing/alpine --version ''", repoSetup), + cmd: repoSetup + " __complete template --generate-name testing/alpine --version ''", golden: "output/version-comp.txt", }, { name: "completion for template version flag too few args", - cmd: fmt.Sprintf("%s __complete template testing/alpine --version ''", repoSetup), + cmd: repoSetup + " __complete template testing/alpine --version ''", golden: "output/version-invalid-comp.txt", }, { name: "completion for template version flag too many args", - cmd: fmt.Sprintf("%s __complete template releasename testing/alpine badarg --version ''", repoSetup), + cmd: repoSetup + " __complete template releasename testing/alpine badarg --version ''", golden: "output/version-invalid-comp.txt", }, { name: "completion for template version flag invalid chart", - cmd: fmt.Sprintf("%s __complete template releasename invalid/invalid --version ''", repoSetup), + cmd: repoSetup + " __complete template releasename invalid/invalid --version ''", golden: "output/version-invalid-comp.txt", }} runTestCmd(t, tests) diff --git a/pkg/cmd/testdata/helm home with space/helm/plugins/fullenv/plugin.yaml b/pkg/cmd/testdata/helm home with space/helm/plugins/fullenv/plugin.yaml index a58544b03..c99e5122b 100644 --- a/pkg/cmd/testdata/helm home with space/helm/plugins/fullenv/plugin.yaml +++ b/pkg/cmd/testdata/helm home with space/helm/plugins/fullenv/plugin.yaml @@ -1,6 +1,7 @@ --- apiVersion: v1 name: fullenv +version: 0.1.0 type: cli/v1 runtime: subprocess config: diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/args/plugin.yaml b/pkg/cmd/testdata/helmhome/helm/plugins/args/plugin.yaml index 4156e7f17..24d79ac7e 100644 --- a/pkg/cmd/testdata/helmhome/helm/plugins/args/plugin.yaml +++ b/pkg/cmd/testdata/helmhome/helm/plugins/args/plugin.yaml @@ -1,4 +1,5 @@ name: args +version: 0.1.0 type: cli/v1 apiVersion: v1 runtime: subprocess diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/echo/plugin.yaml b/pkg/cmd/testdata/helmhome/helm/plugins/echo/plugin.yaml index a0a0b5255..a707c3373 100644 --- a/pkg/cmd/testdata/helmhome/helm/plugins/echo/plugin.yaml +++ b/pkg/cmd/testdata/helmhome/helm/plugins/echo/plugin.yaml @@ -1,4 +1,5 @@ name: echo +version: 0.1.0 type: cli/v1 apiVersion: v1 runtime: subprocess diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/exitwith/plugin.yaml b/pkg/cmd/testdata/helmhome/helm/plugins/exitwith/plugin.yaml index ba9508255..93930219b 100644 --- a/pkg/cmd/testdata/helmhome/helm/plugins/exitwith/plugin.yaml +++ b/pkg/cmd/testdata/helmhome/helm/plugins/exitwith/plugin.yaml @@ -1,6 +1,7 @@ --- apiVersion: v1 name: exitwith +version: 0.1.0 type: cli/v1 runtime: subprocess config: diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/fullenv/plugin.yaml b/pkg/cmd/testdata/helmhome/helm/plugins/fullenv/plugin.yaml index a58544b03..c99e5122b 100644 --- a/pkg/cmd/testdata/helmhome/helm/plugins/fullenv/plugin.yaml +++ b/pkg/cmd/testdata/helmhome/helm/plugins/fullenv/plugin.yaml @@ -1,6 +1,7 @@ --- apiVersion: v1 name: fullenv +version: 0.1.0 type: cli/v1 runtime: subprocess config: diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/noversion/plugin.yaml b/pkg/cmd/testdata/helmhome/helm/plugins/noversion/plugin.yaml new file mode 100644 index 000000000..70c356dea --- /dev/null +++ b/pkg/cmd/testdata/helmhome/helm/plugins/noversion/plugin.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +name: noversion +type: cli/v1 +runtime: subprocess +runtimeConfig: + platformCommand: + - command: "echo hello" diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/env/completion.yaml b/pkg/cmd/testdata/helmhome/helm/plugins/shortenv/completion.yaml similarity index 89% rename from pkg/cmd/testdata/helmhome/helm/plugins/env/completion.yaml rename to pkg/cmd/testdata/helmhome/helm/plugins/shortenv/completion.yaml index e479a0503..027573ed4 100644 --- a/pkg/cmd/testdata/helmhome/helm/plugins/env/completion.yaml +++ b/pkg/cmd/testdata/helmhome/helm/plugins/shortenv/completion.yaml @@ -1,4 +1,4 @@ -name: env +name: shortenv commands: - name: list flags: diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/env/plugin-name.sh b/pkg/cmd/testdata/helmhome/helm/plugins/shortenv/plugin-name.sh similarity index 100% rename from pkg/cmd/testdata/helmhome/helm/plugins/env/plugin-name.sh rename to pkg/cmd/testdata/helmhome/helm/plugins/shortenv/plugin-name.sh diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/env/plugin.yaml b/pkg/cmd/testdata/helmhome/helm/plugins/shortenv/plugin.yaml similarity index 87% rename from pkg/cmd/testdata/helmhome/helm/plugins/env/plugin.yaml rename to pkg/cmd/testdata/helmhome/helm/plugins/shortenv/plugin.yaml index 78a0a23fb..3f935db4b 100644 --- a/pkg/cmd/testdata/helmhome/helm/plugins/env/plugin.yaml +++ b/pkg/cmd/testdata/helmhome/helm/plugins/shortenv/plugin.yaml @@ -1,6 +1,7 @@ --- apiVersion: v1 -name: env +name: shortenv +version: 0.1.0 type: cli/v1 runtime: subprocess config: diff --git a/pkg/cmd/testdata/output/install-dry-run-with-secret-hidden.txt b/pkg/cmd/testdata/output/install-dry-run-with-secret-hidden.txt index eb770967f..c2219d8c4 100644 --- a/pkg/cmd/testdata/output/install-dry-run-with-secret-hidden.txt +++ b/pkg/cmd/testdata/output/install-dry-run-with-secret-hidden.txt @@ -19,3 +19,4 @@ metadata: data: foo: bar + diff --git a/pkg/cmd/testdata/output/install-dry-run-with-secret.txt b/pkg/cmd/testdata/output/install-dry-run-with-secret.txt index d22c1437f..62bd78018 100644 --- a/pkg/cmd/testdata/output/install-dry-run-with-secret.txt +++ b/pkg/cmd/testdata/output/install-dry-run-with-secret.txt @@ -15,6 +15,7 @@ metadata: name: test-secret stringData: foo: bar + --- # Source: chart-with-secret/templates/configmap.yaml apiVersion: v1 @@ -24,3 +25,4 @@ metadata: data: foo: bar + diff --git a/pkg/cmd/testdata/output/issue-9027.txt b/pkg/cmd/testdata/output/issue-9027.txt index eb19fc383..f43032499 100644 --- a/pkg/cmd/testdata/output/issue-9027.txt +++ b/pkg/cmd/testdata/output/issue-9027.txt @@ -2,30 +2,37 @@ # Source: issue-9027/charts/subchart/templates/values.yaml global: hash: + key1: 1 + key2: 2 key3: 13 key4: 4 key5: 5 key6: 6 hash: + key1: 1 + key2: 2 key3: 13 key4: 4 key5: 5 key6: 6 + --- # Source: issue-9027/templates/values.yaml global: hash: - key1: null - key2: null key3: 13 subchart: global: hash: + key1: 1 + key2: 2 key3: 13 key4: 4 key5: 5 key6: 6 hash: + key1: 1 + key2: 2 key3: 13 key4: 4 key5: 5 diff --git a/pkg/cmd/testdata/output/lint-chart-with-deprecated-api-strict.txt b/pkg/cmd/testdata/output/lint-chart-with-deprecated-api-strict.txt index a1ec4394e..c25efc1df 100644 --- a/pkg/cmd/testdata/output/lint-chart-with-deprecated-api-strict.txt +++ b/pkg/cmd/testdata/output/lint-chart-with-deprecated-api-strict.txt @@ -1,5 +1,5 @@ ==> Linting testdata/testcharts/chart-with-deprecated-api [INFO] Chart.yaml: icon is recommended -[WARNING] templates/horizontalpodautoscaler.yaml: autoscaling/v2beta1 HorizontalPodAutoscaler is deprecated in v1.22+, unavailable in v1.25+; use autoscaling/v2 HorizontalPodAutoscaler +[WARNING] templates/poddisruptionbudget.yaml: policy/v1beta1 PodDisruptionBudget is deprecated in v1.21+, unavailable in v1.25+; use policy/v1 PodDisruptionBudget Error: 1 chart(s) linted, 1 chart(s) failed diff --git a/pkg/cmd/testdata/output/lint-chart-with-deprecated-api.txt b/pkg/cmd/testdata/output/lint-chart-with-deprecated-api.txt index dac54620c..08dbde95d 100644 --- a/pkg/cmd/testdata/output/lint-chart-with-deprecated-api.txt +++ b/pkg/cmd/testdata/output/lint-chart-with-deprecated-api.txt @@ -1,5 +1,5 @@ ==> Linting testdata/testcharts/chart-with-deprecated-api [INFO] Chart.yaml: icon is recommended -[WARNING] templates/horizontalpodautoscaler.yaml: autoscaling/v2beta1 HorizontalPodAutoscaler is deprecated in v1.22+, unavailable in v1.25+; use autoscaling/v2 HorizontalPodAutoscaler +[WARNING] templates/poddisruptionbudget.yaml: policy/v1beta1 PodDisruptionBudget is deprecated in v1.21+, unavailable in v1.25+; use policy/v1 PodDisruptionBudget 1 chart(s) linted, 0 chart(s) failed diff --git a/pkg/cmd/testdata/output/object-order.txt b/pkg/cmd/testdata/output/object-order.txt index 307f928f2..1ff39f33c 100644 --- a/pkg/cmd/testdata/output/object-order.txt +++ b/pkg/cmd/testdata/output/object-order.txt @@ -155,6 +155,7 @@ spec: policyTypes: - Egress - Ingress + --- # Source: object-order/templates/01-a.yml # 4 (Deployment should come after all NetworkPolicy manifests, since 'helm template' outputs in install order) diff --git a/pkg/cmd/testdata/output/plugin_list_comp.txt b/pkg/cmd/testdata/output/plugin_list_comp.txt index 833efc5e9..1dff43551 100644 --- a/pkg/cmd/testdata/output/plugin_list_comp.txt +++ b/pkg/cmd/testdata/output/plugin_list_comp.txt @@ -1,7 +1,7 @@ args echo args echo echo stuff -env env stuff exitwith exitwith code fullenv show env vars +shortenv env stuff :4 Completion ended with directive: ShellCompDirectiveNoFileComp diff --git a/pkg/cmd/testdata/output/plugin_repeat_comp.txt b/pkg/cmd/testdata/output/plugin_repeat_comp.txt index 3fa05f0b3..b46c1b7d4 100644 --- a/pkg/cmd/testdata/output/plugin_repeat_comp.txt +++ b/pkg/cmd/testdata/output/plugin_repeat_comp.txt @@ -1,6 +1,6 @@ echo echo stuff -env env stuff exitwith exitwith code fullenv show env vars +shortenv env stuff :4 Completion ended with directive: ShellCompDirectiveNoFileComp diff --git a/pkg/cmd/testdata/output/template-name-template.txt b/pkg/cmd/testdata/output/template-name-template.txt index 9406048dd..b1077012e 100644 --- a/pkg/cmd/testdata/output/template-name-template.txt +++ b/pkg/cmd/testdata/output/template-name-template.txt @@ -4,6 +4,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: subchart-sa + --- # Source: subchart/templates/subdir/role.yaml apiVersion: rbac.authorization.k8s.io/v1 @@ -14,6 +15,7 @@ rules: - apiGroups: [""] resources: ["pods"] verbs: ["get","list","watch"] + --- # Source: subchart/templates/subdir/rolebinding.yaml apiVersion: rbac.authorization.k8s.io/v1 @@ -28,6 +30,7 @@ subjects: - kind: ServiceAccount name: subchart-sa namespace: default + --- # Source: subchart/charts/subcharta/templates/service.yaml apiVersion: v1 @@ -45,6 +48,7 @@ spec: name: apache selector: app.kubernetes.io/name: subcharta + --- # Source: subchart/charts/subchartb/templates/service.yaml apiVersion: v1 @@ -62,6 +66,7 @@ spec: name: nginx selector: app.kubernetes.io/name: subchartb + --- # Source: subchart/templates/service.yaml apiVersion: v1 @@ -93,6 +98,7 @@ metadata: "helm.sh/hook": test data: message: Hello World + --- # Source: subchart/templates/tests/test-nothing.yaml apiVersion: v1 @@ -112,3 +118,4 @@ spec: - echo - "$message" restartPolicy: Never + diff --git a/pkg/cmd/testdata/output/template-set.txt b/pkg/cmd/testdata/output/template-set.txt index 4040991cf..1ecb8707b 100644 --- a/pkg/cmd/testdata/output/template-set.txt +++ b/pkg/cmd/testdata/output/template-set.txt @@ -4,6 +4,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: subchart-sa + --- # Source: subchart/templates/subdir/role.yaml apiVersion: rbac.authorization.k8s.io/v1 @@ -14,6 +15,7 @@ rules: - apiGroups: [""] resources: ["pods"] verbs: ["get","list","watch"] + --- # Source: subchart/templates/subdir/rolebinding.yaml apiVersion: rbac.authorization.k8s.io/v1 @@ -28,6 +30,7 @@ subjects: - kind: ServiceAccount name: subchart-sa namespace: default + --- # Source: subchart/charts/subcharta/templates/service.yaml apiVersion: v1 @@ -45,6 +48,7 @@ spec: name: apache selector: app.kubernetes.io/name: subcharta + --- # Source: subchart/charts/subchartb/templates/service.yaml apiVersion: v1 @@ -62,6 +66,7 @@ spec: name: nginx selector: app.kubernetes.io/name: subchartb + --- # Source: subchart/templates/service.yaml apiVersion: v1 @@ -93,6 +98,7 @@ metadata: "helm.sh/hook": test data: message: Hello World + --- # Source: subchart/templates/tests/test-nothing.yaml apiVersion: v1 @@ -112,3 +118,4 @@ spec: - echo - "$message" restartPolicy: Never + diff --git a/pkg/cmd/testdata/output/template-skip-tests.txt b/pkg/cmd/testdata/output/template-skip-tests.txt index 5c907b563..4c5af8df3 100644 --- a/pkg/cmd/testdata/output/template-skip-tests.txt +++ b/pkg/cmd/testdata/output/template-skip-tests.txt @@ -4,6 +4,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: subchart-sa + --- # Source: subchart/templates/subdir/role.yaml apiVersion: rbac.authorization.k8s.io/v1 @@ -14,6 +15,7 @@ rules: - apiGroups: [""] resources: ["pods"] verbs: ["get","list","watch"] + --- # Source: subchart/templates/subdir/rolebinding.yaml apiVersion: rbac.authorization.k8s.io/v1 @@ -28,6 +30,7 @@ subjects: - kind: ServiceAccount name: subchart-sa namespace: default + --- # Source: subchart/charts/subcharta/templates/service.yaml apiVersion: v1 @@ -45,6 +48,7 @@ spec: name: apache selector: app.kubernetes.io/name: subcharta + --- # Source: subchart/charts/subchartb/templates/service.yaml apiVersion: v1 @@ -62,6 +66,7 @@ spec: name: nginx selector: app.kubernetes.io/name: subchartb + --- # Source: subchart/templates/service.yaml apiVersion: v1 diff --git a/pkg/cmd/testdata/output/template-subchart-cm-set-file.txt b/pkg/cmd/testdata/output/template-subchart-cm-set-file.txt index 56844e292..227d05903 100644 --- a/pkg/cmd/testdata/output/template-subchart-cm-set-file.txt +++ b/pkg/cmd/testdata/output/template-subchart-cm-set-file.txt @@ -4,6 +4,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: subchart-sa + --- # Source: subchart/templates/subdir/configmap.yaml apiVersion: v1 @@ -22,6 +23,7 @@ rules: - apiGroups: [""] resources: ["pods"] verbs: ["get","list","watch"] + --- # Source: subchart/templates/subdir/rolebinding.yaml apiVersion: rbac.authorization.k8s.io/v1 @@ -36,6 +38,7 @@ subjects: - kind: ServiceAccount name: subchart-sa namespace: default + --- # Source: subchart/charts/subcharta/templates/service.yaml apiVersion: v1 @@ -53,6 +56,7 @@ spec: name: apache selector: app.kubernetes.io/name: subcharta + --- # Source: subchart/charts/subchartb/templates/service.yaml apiVersion: v1 @@ -70,6 +74,7 @@ spec: name: nginx selector: app.kubernetes.io/name: subchartb + --- # Source: subchart/templates/service.yaml apiVersion: v1 @@ -101,6 +106,7 @@ metadata: "helm.sh/hook": test data: message: Hello World + --- # Source: subchart/templates/tests/test-nothing.yaml apiVersion: v1 @@ -120,3 +126,4 @@ spec: - echo - "$message" restartPolicy: Never + diff --git a/pkg/cmd/testdata/output/template-subchart-cm-set.txt b/pkg/cmd/testdata/output/template-subchart-cm-set.txt index e52f7c234..dd8be4db9 100644 --- a/pkg/cmd/testdata/output/template-subchart-cm-set.txt +++ b/pkg/cmd/testdata/output/template-subchart-cm-set.txt @@ -4,6 +4,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: subchart-sa + --- # Source: subchart/templates/subdir/configmap.yaml apiVersion: v1 @@ -22,6 +23,7 @@ rules: - apiGroups: [""] resources: ["pods"] verbs: ["get","list","watch"] + --- # Source: subchart/templates/subdir/rolebinding.yaml apiVersion: rbac.authorization.k8s.io/v1 @@ -36,6 +38,7 @@ subjects: - kind: ServiceAccount name: subchart-sa namespace: default + --- # Source: subchart/charts/subcharta/templates/service.yaml apiVersion: v1 @@ -53,6 +56,7 @@ spec: name: apache selector: app.kubernetes.io/name: subcharta + --- # Source: subchart/charts/subchartb/templates/service.yaml apiVersion: v1 @@ -70,6 +74,7 @@ spec: name: nginx selector: app.kubernetes.io/name: subchartb + --- # Source: subchart/templates/service.yaml apiVersion: v1 @@ -101,6 +106,7 @@ metadata: "helm.sh/hook": test data: message: Hello World + --- # Source: subchart/templates/tests/test-nothing.yaml apiVersion: v1 @@ -120,3 +126,4 @@ spec: - echo - "$message" restartPolicy: Never + diff --git a/pkg/cmd/testdata/output/template-subchart-cm.txt b/pkg/cmd/testdata/output/template-subchart-cm.txt index 9cc9e2296..c4600a798 100644 --- a/pkg/cmd/testdata/output/template-subchart-cm.txt +++ b/pkg/cmd/testdata/output/template-subchart-cm.txt @@ -4,6 +4,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: subchart-sa + --- # Source: subchart/templates/subdir/configmap.yaml apiVersion: v1 @@ -22,6 +23,7 @@ rules: - apiGroups: [""] resources: ["pods"] verbs: ["get","list","watch"] + --- # Source: subchart/templates/subdir/rolebinding.yaml apiVersion: rbac.authorization.k8s.io/v1 @@ -36,6 +38,7 @@ subjects: - kind: ServiceAccount name: subchart-sa namespace: default + --- # Source: subchart/charts/subcharta/templates/service.yaml apiVersion: v1 @@ -53,6 +56,7 @@ spec: name: apache selector: app.kubernetes.io/name: subcharta + --- # Source: subchart/charts/subchartb/templates/service.yaml apiVersion: v1 @@ -70,6 +74,7 @@ spec: name: nginx selector: app.kubernetes.io/name: subchartb + --- # Source: subchart/templates/service.yaml apiVersion: v1 @@ -101,6 +106,7 @@ metadata: "helm.sh/hook": test data: message: Hello World + --- # Source: subchart/templates/tests/test-nothing.yaml apiVersion: v1 @@ -120,3 +126,4 @@ spec: - echo - "$message" restartPolicy: Never + diff --git a/pkg/cmd/testdata/output/template-values-files.txt b/pkg/cmd/testdata/output/template-values-files.txt index 4040991cf..1ecb8707b 100644 --- a/pkg/cmd/testdata/output/template-values-files.txt +++ b/pkg/cmd/testdata/output/template-values-files.txt @@ -4,6 +4,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: subchart-sa + --- # Source: subchart/templates/subdir/role.yaml apiVersion: rbac.authorization.k8s.io/v1 @@ -14,6 +15,7 @@ rules: - apiGroups: [""] resources: ["pods"] verbs: ["get","list","watch"] + --- # Source: subchart/templates/subdir/rolebinding.yaml apiVersion: rbac.authorization.k8s.io/v1 @@ -28,6 +30,7 @@ subjects: - kind: ServiceAccount name: subchart-sa namespace: default + --- # Source: subchart/charts/subcharta/templates/service.yaml apiVersion: v1 @@ -45,6 +48,7 @@ spec: name: apache selector: app.kubernetes.io/name: subcharta + --- # Source: subchart/charts/subchartb/templates/service.yaml apiVersion: v1 @@ -62,6 +66,7 @@ spec: name: nginx selector: app.kubernetes.io/name: subchartb + --- # Source: subchart/templates/service.yaml apiVersion: v1 @@ -93,6 +98,7 @@ metadata: "helm.sh/hook": test data: message: Hello World + --- # Source: subchart/templates/tests/test-nothing.yaml apiVersion: v1 @@ -112,3 +118,4 @@ spec: - echo - "$message" restartPolicy: Never + diff --git a/pkg/cmd/testdata/output/template-with-api-version.txt b/pkg/cmd/testdata/output/template-with-api-version.txt index 8b6074cdb..ae726e624 100644 --- a/pkg/cmd/testdata/output/template-with-api-version.txt +++ b/pkg/cmd/testdata/output/template-with-api-version.txt @@ -4,6 +4,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: subchart-sa + --- # Source: subchart/templates/subdir/role.yaml apiVersion: rbac.authorization.k8s.io/v1 @@ -14,6 +15,7 @@ rules: - apiGroups: [""] resources: ["pods"] verbs: ["get","list","watch"] + --- # Source: subchart/templates/subdir/rolebinding.yaml apiVersion: rbac.authorization.k8s.io/v1 @@ -28,6 +30,7 @@ subjects: - kind: ServiceAccount name: subchart-sa namespace: default + --- # Source: subchart/charts/subcharta/templates/service.yaml apiVersion: v1 @@ -45,6 +48,7 @@ spec: name: apache selector: app.kubernetes.io/name: subcharta + --- # Source: subchart/charts/subchartb/templates/service.yaml apiVersion: v1 @@ -62,6 +66,7 @@ spec: name: nginx selector: app.kubernetes.io/name: subchartb + --- # Source: subchart/templates/service.yaml apiVersion: v1 @@ -95,6 +100,7 @@ metadata: "helm.sh/hook": test data: message: Hello World + --- # Source: subchart/templates/tests/test-nothing.yaml apiVersion: v1 @@ -114,3 +120,4 @@ spec: - echo - "$message" restartPolicy: Never + diff --git a/pkg/cmd/testdata/output/template-with-crds.txt b/pkg/cmd/testdata/output/template-with-crds.txt index 256fc7c3b..1d63265ec 100644 --- a/pkg/cmd/testdata/output/template-with-crds.txt +++ b/pkg/cmd/testdata/output/template-with-crds.txt @@ -21,6 +21,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: subchart-sa + --- # Source: subchart/templates/subdir/role.yaml apiVersion: rbac.authorization.k8s.io/v1 @@ -31,6 +32,7 @@ rules: - apiGroups: [""] resources: ["pods"] verbs: ["get","list","watch"] + --- # Source: subchart/templates/subdir/rolebinding.yaml apiVersion: rbac.authorization.k8s.io/v1 @@ -45,6 +47,7 @@ subjects: - kind: ServiceAccount name: subchart-sa namespace: default + --- # Source: subchart/charts/subcharta/templates/service.yaml apiVersion: v1 @@ -62,6 +65,7 @@ spec: name: apache selector: app.kubernetes.io/name: subcharta + --- # Source: subchart/charts/subchartb/templates/service.yaml apiVersion: v1 @@ -79,6 +83,7 @@ spec: name: nginx selector: app.kubernetes.io/name: subchartb + --- # Source: subchart/templates/service.yaml apiVersion: v1 @@ -110,6 +115,7 @@ metadata: "helm.sh/hook": test data: message: Hello World + --- # Source: subchart/templates/tests/test-nothing.yaml apiVersion: v1 @@ -129,3 +135,4 @@ spec: - echo - "$message" restartPolicy: Never + diff --git a/pkg/cmd/testdata/output/template-with-kube-version.txt b/pkg/cmd/testdata/output/template-with-kube-version.txt index 9d326f328..2c42e2e84 100644 --- a/pkg/cmd/testdata/output/template-with-kube-version.txt +++ b/pkg/cmd/testdata/output/template-with-kube-version.txt @@ -4,6 +4,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: subchart-sa + --- # Source: subchart/templates/subdir/role.yaml apiVersion: rbac.authorization.k8s.io/v1 @@ -14,6 +15,7 @@ rules: - apiGroups: [""] resources: ["pods"] verbs: ["get","list","watch"] + --- # Source: subchart/templates/subdir/rolebinding.yaml apiVersion: rbac.authorization.k8s.io/v1 @@ -28,6 +30,7 @@ subjects: - kind: ServiceAccount name: subchart-sa namespace: default + --- # Source: subchart/charts/subcharta/templates/service.yaml apiVersion: v1 @@ -45,6 +48,7 @@ spec: name: apache selector: app.kubernetes.io/name: subcharta + --- # Source: subchart/charts/subchartb/templates/service.yaml apiVersion: v1 @@ -62,6 +66,7 @@ spec: name: nginx selector: app.kubernetes.io/name: subchartb + --- # Source: subchart/templates/service.yaml apiVersion: v1 @@ -93,6 +98,7 @@ metadata: "helm.sh/hook": test data: message: Hello World + --- # Source: subchart/templates/tests/test-nothing.yaml apiVersion: v1 @@ -112,3 +118,4 @@ spec: - echo - "$message" restartPolicy: Never + diff --git a/pkg/cmd/testdata/output/template.txt b/pkg/cmd/testdata/output/template.txt index 58c480b47..ddbfebe9d 100644 --- a/pkg/cmd/testdata/output/template.txt +++ b/pkg/cmd/testdata/output/template.txt @@ -4,6 +4,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: subchart-sa + --- # Source: subchart/templates/subdir/role.yaml apiVersion: rbac.authorization.k8s.io/v1 @@ -14,6 +15,7 @@ rules: - apiGroups: [""] resources: ["pods"] verbs: ["get","list","watch"] + --- # Source: subchart/templates/subdir/rolebinding.yaml apiVersion: rbac.authorization.k8s.io/v1 @@ -28,6 +30,7 @@ subjects: - kind: ServiceAccount name: subchart-sa namespace: default + --- # Source: subchart/charts/subcharta/templates/service.yaml apiVersion: v1 @@ -45,6 +48,7 @@ spec: name: apache selector: app.kubernetes.io/name: subcharta + --- # Source: subchart/charts/subchartb/templates/service.yaml apiVersion: v1 @@ -62,6 +66,7 @@ spec: name: nginx selector: app.kubernetes.io/name: subchartb + --- # Source: subchart/templates/service.yaml apiVersion: v1 @@ -93,6 +98,7 @@ metadata: "helm.sh/hook": test data: message: Hello World + --- # Source: subchart/templates/tests/test-nothing.yaml apiVersion: v1 @@ -112,3 +118,4 @@ spec: - echo - "$message" restartPolicy: Never + diff --git a/pkg/cmd/testdata/output/upgrade-with-missing-dependencies.txt b/pkg/cmd/testdata/output/upgrade-with-missing-dependencies.txt index b2c154a80..cb0a3a167 100644 --- a/pkg/cmd/testdata/output/upgrade-with-missing-dependencies.txt +++ b/pkg/cmd/testdata/output/upgrade-with-missing-dependencies.txt @@ -1 +1 @@ -Error: an error occurred while checking for chart dependencies. You may need to run `helm dependency build` to fetch missing dependencies: found in Chart.yaml, but missing in charts/ directory: reqsubchart2 +Error: an error occurred while checking for chart dependencies. You may need to run 'helm dependency build' to fetch missing dependencies: found in Chart.yaml, but missing in charts/ directory: reqsubchart2 diff --git a/pkg/cmd/testdata/testcharts/chart-with-deprecated-api/templates/horizontalpodautoscaler.yaml b/pkg/cmd/testdata/testcharts/chart-with-deprecated-api/templates/horizontalpodautoscaler.yaml deleted file mode 100644 index b77a4beeb..000000000 --- a/pkg/cmd/testdata/testcharts/chart-with-deprecated-api/templates/horizontalpodautoscaler.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: deprecated -spec: - scaleTargetRef: - kind: Pod - name: pod - maxReplicas: 3 \ No newline at end of file diff --git a/pkg/cmd/testdata/testcharts/chart-with-deprecated-api/templates/poddisruptionbudget.yaml b/pkg/cmd/testdata/testcharts/chart-with-deprecated-api/templates/poddisruptionbudget.yaml new file mode 100644 index 000000000..214d3cb68 --- /dev/null +++ b/pkg/cmd/testdata/testcharts/chart-with-deprecated-api/templates/poddisruptionbudget.yaml @@ -0,0 +1,9 @@ +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: deprecated +spec: + maxUnavailable: 1 + selector: + matchLabels: + app: deprecated diff --git a/pkg/cmd/testdata/testplugin/plugin.yaml b/pkg/cmd/testdata/testplugin/plugin.yaml index 3ee5d04f6..fb1d82062 100644 --- a/pkg/cmd/testdata/testplugin/plugin.yaml +++ b/pkg/cmd/testdata/testplugin/plugin.yaml @@ -1,6 +1,7 @@ --- apiVersion: v1 name: testplugin +version: 0.1.0 type: cli/v1 runtime: subprocess config: diff --git a/pkg/cmd/uninstall.go b/pkg/cmd/uninstall.go index 4cc14ae1e..49f7bd19d 100644 --- a/pkg/cmd/uninstall.go +++ b/pkg/cmd/uninstall.go @@ -35,6 +35,9 @@ as well as the release history, freeing it up for future use. Use the '--dry-run' flag to see which releases will be uninstalled without actually uninstalling them. + +Use '--cascade foreground' with '--wait' to ensure resources with finalizers +are fully deleted before the command returns. ` func newUninstallCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { @@ -76,7 +79,7 @@ func newUninstallCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { f.BoolVar(&client.DisableHooks, "no-hooks", false, "prevent hooks from running during uninstallation") f.BoolVar(&client.IgnoreNotFound, "ignore-not-found", false, `Treat "release not found" as a successful uninstall`) f.BoolVar(&client.KeepHistory, "keep-history", false, "remove all associated resources and mark the release as deleted, but retain the release history") - f.StringVar(&client.DeletionPropagation, "cascade", "background", "Must be \"background\", \"orphan\", or \"foreground\". Selects the deletion cascading strategy for the dependents. Defaults to background.") + f.StringVar(&client.DeletionPropagation, "cascade", "background", "Must be \"background\", \"orphan\", or \"foreground\". Selects the deletion cascading strategy for the dependents. Defaults to background. Use \"foreground\" with --wait to ensure resources with finalizers are fully deleted before returning.") f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)") f.StringVar(&client.Description, "description", "", "add a custom description") AddWaitFlag(cmd, &client.WaitStrategy) diff --git a/pkg/cmd/upgrade.go b/pkg/cmd/upgrade.go index 918d6f5b8..43e19ab22 100644 --- a/pkg/cmd/upgrade.go +++ b/pkg/cmd/upgrade.go @@ -18,6 +18,7 @@ package cmd import ( "context" + "errors" "fmt" "io" "log" @@ -124,7 +125,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { histClient := action.NewHistory(cfg) histClient.Max = 1 versions, err := histClient.Run(args[0]) - if err == driver.ErrReleaseNotFound || isReleaseUninstalled(versions) { + if errors.Is(err, driver.ErrReleaseNotFound) || isReleaseUninstalled(versions) { // Only print this to stdout for table output if outfmt == output.Table { fmt.Fprintf(out, "Release %q does not exist. Installing it now.\n", args[0]) @@ -204,7 +205,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { } if req := ac.MetaDependencies(); len(req) > 0 { if err := action.CheckDependencies(ch, req); err != nil { - err = fmt.Errorf("an error occurred while checking for chart dependencies. You may need to run `helm dependency build` to fetch missing dependencies: %w", err) + err = fmt.Errorf("an error occurred while checking for chart dependencies. You may need to run 'helm dependency build' to fetch missing dependencies: %w", err) if client.DependencyUpdate { man := &downloader.Manager{ Out: out, diff --git a/pkg/cmd/upgrade_test.go b/pkg/cmd/upgrade_test.go index 0ae1e3561..f96f6ec0d 100644 --- a/pkg/cmd/upgrade_test.go +++ b/pkg/cmd/upgrade_test.go @@ -149,7 +149,7 @@ func TestUpgradeCmd(t *testing.T) { }, { name: "upgrade a release with missing dependencies", - cmd: fmt.Sprintf("upgrade bonkers-bunny %s", missingDepsPath), + cmd: "upgrade bonkers-bunny " + missingDepsPath, golden: "output/upgrade-with-missing-dependencies.txt", wantError: true, }, @@ -161,7 +161,7 @@ func TestUpgradeCmd(t *testing.T) { }, { name: "upgrade a release with resolving missing dependencies", - cmd: fmt.Sprintf("upgrade --dependency-update funny-bunny %s", presentDepsPath), + cmd: "upgrade --dependency-update funny-bunny " + presentDepsPath, golden: "output/upgrade-with-dependency-update.txt", rels: []*release.Release{relMock("funny-bunny", 2, ch2)}, }, @@ -443,23 +443,23 @@ func TestUpgradeVersionCompletion(t *testing.T) { tests := []cmdTestCase{{ name: "completion for upgrade version flag", - cmd: fmt.Sprintf("%s __complete upgrade releasename testing/alpine --version ''", repoSetup), + cmd: repoSetup + " __complete upgrade releasename testing/alpine --version ''", golden: "output/version-comp.txt", }, { name: "completion for upgrade version flag, no filter", - cmd: fmt.Sprintf("%s __complete upgrade releasename testing/alpine --version 0.3", repoSetup), + cmd: repoSetup + " __complete upgrade releasename testing/alpine --version 0.3", golden: "output/version-comp.txt", }, { name: "completion for upgrade version flag too few args", - cmd: fmt.Sprintf("%s __complete upgrade releasename --version ''", repoSetup), + cmd: repoSetup + " __complete upgrade releasename --version ''", golden: "output/version-invalid-comp.txt", }, { name: "completion for upgrade version flag too many args", - cmd: fmt.Sprintf("%s __complete upgrade releasename testing/alpine badarg --version ''", repoSetup), + cmd: repoSetup + " __complete upgrade releasename testing/alpine badarg --version ''", golden: "output/version-invalid-comp.txt", }, { name: "completion for upgrade version flag invalid chart", - cmd: fmt.Sprintf("%s __complete upgrade releasename invalid/invalid --version ''", repoSetup), + cmd: repoSetup + " __complete upgrade releasename invalid/invalid --version ''", golden: "output/version-invalid-comp.txt", }} runTestCmd(t, tests) @@ -636,7 +636,7 @@ func TestUpgradeInstallServerSideApply(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { store := storageFixture() - releaseName := fmt.Sprintf("ssa-test-%s", tt.expectedApplyMethod) + releaseName := "ssa-test-" + tt.expectedApplyMethod cmd := fmt.Sprintf("upgrade %s --install %s '%s'", releaseName, tt.serverSideFlag, chartPath) _, _, err := executeActionCommandC(store, cmd) diff --git a/pkg/downloader/cache.go b/pkg/downloader/cache.go index cecfc8bd7..1e23fbfcd 100644 --- a/pkg/downloader/cache.go +++ b/pkg/downloader/cache.go @@ -17,6 +17,7 @@ package downloader import ( "crypto/sha256" + "encoding/hex" "errors" "fmt" "io" @@ -85,5 +86,5 @@ func (c *DiskCache) Put(key [sha256.Size]byte, data io.Reader, cacheType string) // fileName generates the filename in a structured manner where the first part is the // directory and the full hash is the filename. func (c *DiskCache) fileName(id [sha256.Size]byte, cacheType string) string { - return filepath.Join(c.Root, fmt.Sprintf("%02x", id[0]), fmt.Sprintf("%x", id)+cacheType) + return filepath.Join(c.Root, fmt.Sprintf("%02x", id[0]), hex.EncodeToString(id[:])+cacheType) } diff --git a/pkg/downloader/chart_downloader.go b/pkg/downloader/chart_downloader.go index ee4f8abe3..9c26f925e 100644 --- a/pkg/downloader/chart_downloader.go +++ b/pkg/downloader/chart_downloader.go @@ -125,10 +125,15 @@ func (c *ChartDownloader) DownloadTo(ref, version, dest string) (string, *proven var digest32 [32]byte if hash != "" { // if there is a hash, populate the other formats - digest, err = hex.DecodeString(hash) + // Strip the algorithm prefix (e.g., "sha256:") if present + digest, err = hex.DecodeString(stripDigestAlgorithm(hash)) if err != nil { return "", nil, err } + if len(digest) != 32 { + return "", nil, fmt.Errorf("invalid digest length: %d", len(digest)) + } + copy(digest32[:], digest) if pth, err := c.Cache.Get(digest32, CacheChart); err == nil { fdata, err := os.ReadFile(pth) @@ -156,7 +161,11 @@ func (c *ChartDownloader) DownloadTo(ref, version, dest string) (string, *proven } destfile := filepath.Join(dest, name) - if err := fileutil.AtomicWriteFile(destfile, data, 0644); err != nil { + + // Use PlatformAtomicWriteFile to handle platform-specific concurrency concerns + // (Windows requires locking to avoid "Access Denied" errors when multiple + // processes write the same file) + if err := fileutil.PlatformAtomicWriteFile(destfile, data, 0644); err != nil { return destfile, nil, err } @@ -176,7 +185,7 @@ func (c *ChartDownloader) DownloadTo(ref, version, dest string) (string, *proven } } if !found { - body, err = g.Get(u.String() + ".prov") + body, err = g.Get(u.String()+".prov", c.Options...) if err != nil { if c.Verify == VerifyAlways { return destfile, ver, fmt.Errorf("failed to fetch provenance %q", u.String()+".prov") @@ -186,7 +195,9 @@ func (c *ChartDownloader) DownloadTo(ref, version, dest string) (string, *proven } } provfile := destfile + ".prov" - if err := fileutil.AtomicWriteFile(provfile, body, 0644); err != nil { + + // Use PlatformAtomicWriteFile for the provenance file as well + if err := fileutil.PlatformAtomicWriteFile(provfile, body, 0644); err != nil { return destfile, nil, err } @@ -225,10 +236,14 @@ func (c *ChartDownloader) DownloadToCache(ref, version string) (string, *provena c.Options = append(c.Options, getter.WithAcceptHeader("application/gzip,application/octet-stream")) // Check the cache for the file - digest, err := hex.DecodeString(digestString) + // Strip the algorithm prefix (e.g., "sha256:") if present + digest, err := hex.DecodeString(stripDigestAlgorithm(digestString)) if err != nil { return "", nil, fmt.Errorf("unable to decode digest: %w", err) } + if digestString != "" && len(digest) != 32 { + return "", nil, fmt.Errorf("invalid digest length: %d", len(digest)) + } var digest32 [32]byte copy(digest32[:], digest) @@ -380,7 +395,7 @@ func (c *ChartDownloader) ResolveChartVersion(ref, version string) (string, *url if err != nil { // If there is no special config, return the default HTTP client and // swallow the error. - if err == ErrNoOwnerRepo { + if errors.Is(err, ErrNoOwnerRepo) { // Make sure to add the ref URL as the URL for the getter c.Options = append(c.Options, getter.WithURL(ref)) return "", u, nil @@ -578,3 +593,12 @@ func loadRepoConfig(file string) (*repo.File, error) { } return r, nil } + +// stripDigestAlgorithm removes the algorithm prefix (e.g., "sha256:") from a digest string. +// If no prefix is present, the original string is returned unchanged. +func stripDigestAlgorithm(digest string) string { + if _, after, ok := strings.Cut(digest, ":"); ok { + return after + } + return digest +} diff --git a/pkg/downloader/chart_downloader_test.go b/pkg/downloader/chart_downloader_test.go index 4349ecef9..8a7514a8e 100644 --- a/pkg/downloader/chart_downloader_test.go +++ b/pkg/downloader/chart_downloader_test.go @@ -18,10 +18,12 @@ package downloader import ( "crypto/sha256" "encoding/hex" + "errors" "os" "path/filepath" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "helm.sh/helm/v4/internal/test/ensure" @@ -376,7 +378,7 @@ func TestScanReposForURL(t *testing.T) { // A lookup failure should produce an ErrNoOwnerRepo u = "https://no.such.repo/foo/bar-1.23.4.tgz" - if _, err = c.scanReposForURL(u, rf); err != ErrNoOwnerRepo { + if _, err = c.scanReposForURL(u, rf); !errors.Is(err, ErrNoOwnerRepo) { t.Fatalf("expected ErrNoOwnerRepo, got %v", err) } } @@ -485,3 +487,34 @@ func TestDownloadToCache(t *testing.T) { c.Keyring = "" }) } + +func TestStripDigestAlgorithm(t *testing.T) { + tests := map[string]struct { + input string + expected string + }{ + "sha256 prefixed digest": { + input: "sha256:aef46c66a7f2d5a12a7e3f54a64790daf5c9a9e66af3f46955efdaa6c900341d", + expected: "aef46c66a7f2d5a12a7e3f54a64790daf5c9a9e66af3f46955efdaa6c900341d", + }, + "sha512 prefixed digest": { + input: "sha512:abcdef1234567890", + expected: "abcdef1234567890", + }, + "plain hex digest without prefix": { + input: "aef46c66a7f2d5a12a7e3f54a64790daf5c9a9e66af3f46955efdaa6c900341d", + expected: "aef46c66a7f2d5a12a7e3f54a64790daf5c9a9e66af3f46955efdaa6c900341d", + }, + "empty string": { + input: "", + expected: "", + }, + } + + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + result := stripDigestAlgorithm(tt.input) + assert.Equalf(t, tt.expected, result, "stripDigestAlgorithm(%q) = %q, want %q", tt.input, result, tt.expected) + }) + } +} diff --git a/pkg/downloader/chart_downloader_windows_test.go b/pkg/downloader/chart_downloader_windows_test.go new file mode 100644 index 000000000..732416701 --- /dev/null +++ b/pkg/downloader/chart_downloader_windows_test.go @@ -0,0 +1,131 @@ +//go:build windows + +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package downloader + +import ( + "os" + "path/filepath" + "sync" + "testing" + + "helm.sh/helm/v4/pkg/cli" + "helm.sh/helm/v4/pkg/getter" + "helm.sh/helm/v4/pkg/repo/v1/repotest" +) + +// TestParallelDownloadTo tests that parallel downloads to the same file +// don't cause "Access Denied" errors on Windows. This test is Windows-specific +// because the file locking behavior is only needed on Windows. +func TestParallelDownloadTo(t *testing.T) { + // Set up a simple test server with a chart + srv := repotest.NewTempServer(t, repotest.WithChartSourceGlob("testdata/*.tgz")) + defer srv.Stop() + + if err := srv.CreateIndex(); err != nil { + t.Fatal(err) + } + + dest := t.TempDir() + cacheDir := t.TempDir() + + c := ChartDownloader{ + Out: os.Stderr, + RepositoryConfig: repoConfig, + RepositoryCache: repoCache, + ContentCache: cacheDir, + Cache: &DiskCache{Root: cacheDir}, + Getters: getter.All(&cli.EnvSettings{ + RepositoryConfig: repoConfig, + RepositoryCache: repoCache, + ContentCache: cacheDir, + }), + } + + // Use a direct URL to bypass repository lookup + chartURL := srv.URL() + "/local-subchart-0.1.0.tgz" + + // Number of parallel downloads to attempt + numDownloads := 10 + var wg sync.WaitGroup + errors := make([]error, numDownloads) + + // Launch multiple goroutines to download the same chart simultaneously + for i := 0; i < numDownloads; i++ { + wg.Add(1) + go func(index int) { + defer wg.Done() + _, _, err := c.DownloadTo(chartURL, "", dest) + errors[index] = err + }(i) + } + + wg.Wait() + + // Check if any download failed + failedCount := 0 + for i, err := range errors { + if err != nil { + t.Logf("Download %d failed: %v", i, err) + failedCount++ + } + } + + // With the file locking fix, all parallel downloads should succeed + if failedCount > 0 { + t.Errorf("Parallel downloads failed: %d out of %d downloads failed due to concurrent file access", failedCount, numDownloads) + } + + // Verify the file exists and is valid + expectedFile := filepath.Join(dest, "local-subchart-0.1.0.tgz") + info, err := os.Stat(expectedFile) + if err != nil { + t.Errorf("Expected file %s does not exist: %v", expectedFile, err) + } else { + // Verify the file is not empty + if info.Size() == 0 { + t.Errorf("Downloaded file %s is empty (0 bytes)", expectedFile) + } + + // Verify the file has the expected size (should match the source file) + sourceFile := "testdata/local-subchart-0.1.0.tgz" + sourceInfo, err := os.Stat(sourceFile) + if err == nil && info.Size() != sourceInfo.Size() { + t.Errorf("Downloaded file size (%d bytes) doesn't match source file size (%d bytes)", + info.Size(), sourceInfo.Size()) + } + + // Verify it's a valid tar.gz file by checking the magic bytes + file, err := os.Open(expectedFile) + if err == nil { + defer file.Close() + // gzip magic bytes are 0x1f 0x8b + magic := make([]byte, 2) + if n, err := file.Read(magic); err == nil && n == 2 { + if magic[0] != 0x1f || magic[1] != 0x8b { + t.Errorf("Downloaded file is not a valid gzip file (magic bytes: %x)", magic) + } + } + } + + // Verify no lock file was left behind + lockFile := expectedFile + ".lock" + if _, err := os.Stat(lockFile); err == nil { + t.Errorf("Lock file %s was not cleaned up", lockFile) + } + } +} diff --git a/pkg/downloader/manager.go b/pkg/downloader/manager.go index 6043fbaaa..16459229d 100644 --- a/pkg/downloader/manager.go +++ b/pkg/downloader/manager.go @@ -53,7 +53,7 @@ type ErrRepoNotFound struct { // Error implements the error interface. func (e ErrRepoNotFound) Error() string { - return fmt.Sprintf("no repository definition for %s", strings.Join(e.Repos, ", ")) + return "no repository definition for " + strings.Join(e.Repos, ", ") } // Manager handles the lifecycle of fetching, resolving, and storing dependencies. @@ -127,7 +127,7 @@ func (m *Manager) Build() error { return errors.New("the lock file (requirements.lock) is out of sync with the dependencies file (requirements.yaml). Please update the dependencies") } } else { - return errors.New("the lock file (Chart.lock) is out of sync with the dependencies file (Chart.yaml). Please update the dependencies") + return errors.New("the lock file (Chart.lock) is out of sync with the dependencies file (Chart.yaml). Please update the dependencies with 'helm dependency update'") } } diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index f5db7e158..6fd2beed8 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -129,8 +129,8 @@ func warnWrap(warn string) string { // 'include' needs to be defined in the scope of a 'tpl' template as // well as regular file-loaded templates. -func includeFun(t *template.Template, includedNames map[string]int) func(string, interface{}) (string, error) { - return func(name string, data interface{}) (string, error) { +func includeFun(t *template.Template, includedNames map[string]int) func(string, any) (string, error) { + return func(name string, data any) (string, error) { var buf strings.Builder if v, ok := includedNames[name]; ok { if v > recursionMaxNums { @@ -150,8 +150,8 @@ func includeFun(t *template.Template, includedNames map[string]int) func(string, // As does 'tpl', so that nested calls to 'tpl' see the templates // defined by their enclosing contexts. -func tplFun(parent *template.Template, includedNames map[string]int, strict bool) func(string, interface{}) (string, error) { - return func(tpl string, vals interface{}) (string, error) { +func tplFun(parent *template.Template, includedNames map[string]int, strict bool) func(string, any) (string, error) { + return func(tpl string, vals any) (string, error) { t, err := parent.Clone() if err != nil { return "", fmt.Errorf("cannot clone template: %w", err) @@ -204,7 +204,7 @@ func (e Engine) initFunMap(t *template.Template) { funcMap["tpl"] = tplFun(t, includedNames, e.Strict) // Add the `required` function here so we can use lintMode - funcMap["required"] = func(warn string, val interface{}) (interface{}, error) { + funcMap["required"] = func(warn string, val any) (any, error) { if val == nil { if e.LintMode { // Don't fail on missing required values when linting @@ -319,7 +319,7 @@ func cleanupParseError(filename string, err error) error { tokens := strings.Split(err.Error(), ": ") if len(tokens) == 1 { // This might happen if a non-templating error occurs - return fmt.Errorf("parse error in (%s): %s", filename, err) + return fmt.Errorf("parse error in (%s): %w", filename, err) } // The first token is "template" // The second token is either "filename:lineno" or "filename:lineNo:columnNo" @@ -410,9 +410,7 @@ func parseTemplateSimpleErrorString(remainder string) (TraceableError, bool) { // Executing form: ": executing \"\" at <>: [ template:...]" // Matches https://cs.opensource.google/go/go/+/refs/tags/go1.23.6:src/text/template/exec.go;l=141 func parseTemplateExecutingAtErrorType(remainder string) (TraceableError, bool) { - if idx := strings.Index(remainder, ": executing "); idx != -1 { - templateName := remainder[:idx] - after := remainder[idx+len(": executing "):] + if templateName, after, found := strings.Cut(remainder, ": executing "); found { if len(after) == 0 || after[0] != '"' { return TraceableError{}, false } @@ -431,12 +429,10 @@ func parseTemplateExecutingAtErrorType(remainder string) (TraceableError, bool) return TraceableError{}, false } afterAt := afterFunc[len(atPrefix):] - endLoc := strings.Index(afterAt, ">: ") - if endLoc == -1 { + locationName, errMsg, found := strings.Cut(afterAt, ">: ") + if !found { return TraceableError{}, false } - locationName := afterAt[:endLoc] - errMsg := afterAt[endLoc+len(">: "):] // trim chained next error starting with space + "template:" if present if cut := strings.Index(errMsg, " template:"); cut != -1 { @@ -466,7 +462,7 @@ func reformatExecErrorMsg(filename string, err error) error { tokens := strings.SplitN(err.Error(), ": ", 3) if len(tokens) != 3 { // This might happen if a non-templating error occurs - return fmt.Errorf("execution error in (%s): %s", filename, err) + return fmt.Errorf("execution error in (%s): %w", filename, err) } // The first token is "template" @@ -535,9 +531,9 @@ func allTemplates(c ci.Charter, vals common.Values) map[string]renderable { // // As it recurses, it also sets the values to be appropriate for the template // scope. -func recAllTpls(c ci.Charter, templates map[string]renderable, values common.Values) map[string]interface{} { +func recAllTpls(c ci.Charter, templates map[string]renderable, values common.Values) map[string]any { vals := values.AsMap() - subCharts := make(map[string]interface{}) + subCharts := make(map[string]any) accessor, err := ci.NewAccessor(c) if err != nil { slog.Error("error accessing chart", "error", err) @@ -545,7 +541,7 @@ func recAllTpls(c ci.Charter, templates map[string]renderable, values common.Val chartMetaData := accessor.MetadataAsMap() chartMetaData["IsRoot"] = accessor.IsRoot() - next := map[string]interface{}{ + next := map[string]any{ "Chart": chartMetaData, "Files": newFiles(accessor.Files()), "Release": vals["Release"], diff --git a/pkg/engine/engine_test.go b/pkg/engine/engine_test.go index c9cdf79c3..869b5d202 100644 --- a/pkg/engine/engine_test.go +++ b/pkg/engine/engine_test.go @@ -17,6 +17,7 @@ limitations under the License. package engine import ( + "errors" "fmt" "path" "strings" @@ -104,14 +105,14 @@ func TestRender(t *testing.T) { {Name: "templates/test4", ModTime: modTime, Data: []byte("{{toJson .Values}}")}, {Name: "templates/test5", ModTime: modTime, Data: []byte("{{getHostByName \"helm.sh\"}}")}, }, - Values: map[string]interface{}{"outer": "DEFAULT", "inner": "DEFAULT"}, + Values: map[string]any{"outer": "DEFAULT", "inner": "DEFAULT"}, } - vals := map[string]interface{}{ - "Values": map[string]interface{}{ + vals := map[string]any{ + "Values": map[string]any{ "outer": "spouter", "inner": "inn", - "global": map[string]interface{}{ + "global": map[string]any{ "callme": "Ishmael", }, }, @@ -226,11 +227,11 @@ func TestRenderWithDNS(t *testing.T) { Templates: []*common.File{ {Name: "templates/test1", ModTime: time.Now(), Data: []byte("{{getHostByName \"helm.sh\"}}")}, }, - Values: map[string]interface{}{}, + Values: map[string]any{}, } - vals := map[string]interface{}{ - "Values": map[string]interface{}{}, + vals := map[string]any{ + "Values": map[string]any{}, } v, err := util.CoalesceValues(c, vals) @@ -277,15 +278,15 @@ var _ ClientProvider = &testClientProvider{} // makeUnstructured is a convenience function for single-line creation of Unstructured objects. func makeUnstructured(apiVersion, kind, name, namespace string) *unstructured.Unstructured { - ret := &unstructured.Unstructured{Object: map[string]interface{}{ + ret := &unstructured.Unstructured{Object: map[string]any{ "apiVersion": apiVersion, "kind": kind, - "metadata": map[string]interface{}{ + "metadata": map[string]any{ "name": name, }, }} if namespace != "" { - ret.Object["metadata"].(map[string]interface{})["namespace"] = namespace + ret.Object["metadata"].(map[string]any)["namespace"] = namespace } return ret } @@ -356,7 +357,7 @@ func TestRenderWithClientProvider(t *testing.T) { Name: "moby", Version: "1.2.3", }, - Values: map[string]interface{}{}, + Values: map[string]any{}, } modTime := time.Now() @@ -368,8 +369,8 @@ func TestRenderWithClientProvider(t *testing.T) { }) } - vals := map[string]interface{}{ - "Values": map[string]interface{}{}, + vals := map[string]any{ + "Values": map[string]any{}, } v, err := util.CoalesceValues(c, vals) @@ -401,11 +402,11 @@ func TestRenderWithClientProvider_error(t *testing.T) { Templates: []*common.File{ {Name: "templates/error", ModTime: time.Now(), Data: []byte(`{{ lookup "v1" "Error" "" "" }}`)}, }, - Values: map[string]interface{}{}, + Values: map[string]any{}, } - vals := map[string]interface{}{ - "Values": map[string]interface{}{}, + vals := map[string]any{ + "Values": map[string]any{}, } v, err := util.CoalesceValues(c, vals) @@ -417,7 +418,7 @@ func TestRenderWithClientProvider_error(t *testing.T) { t: t, scheme: map[string]kindProps{ "v1/Error": { - shouldErr: fmt.Errorf("kaboom"), + shouldErr: errors.New("kaboom"), }, }, } @@ -438,7 +439,7 @@ func TestParallelRenderInternals(t *testing.T) { tpls := map[string]renderable{ "t": { tpl: `{{.val}}`, - vals: map[string]interface{}{"val": tt}, + vals: map[string]any{"val": tt}, }, } out, err := e.render(tpls) @@ -455,7 +456,7 @@ func TestParallelRenderInternals(t *testing.T) { } func TestParseErrors(t *testing.T) { - vals := common.Values{"Values": map[string]interface{}{}} + vals := common.Values{"Values": map[string]any{}} tplsUndefinedFunction := map[string]renderable{ "undefined_function": {tpl: `{{foo}}`, vals: vals}, @@ -471,7 +472,7 @@ func TestParseErrors(t *testing.T) { } func TestExecErrors(t *testing.T) { - vals := common.Values{"Values": map[string]interface{}{}} + vals := common.Values{"Values": map[string]any{}} cases := []struct { name string tpls map[string]renderable @@ -535,7 +536,7 @@ linebreak`, } func TestFailErrors(t *testing.T) { - vals := common.Values{"Values": map[string]interface{}{}} + vals := common.Values{"Values": map[string]any{}} failtpl := `All your base are belong to us{{ fail "This is an error" }}` tplsFailed := map[string]renderable{ @@ -643,7 +644,7 @@ func TestRenderDependency(t *testing.T) { }, }) - out, err := Render(ch, map[string]interface{}{}) + out, err := Render(ch, map[string]any{}) if err != nil { t.Fatalf("failed to render chart: %s", err) } @@ -675,7 +676,7 @@ func TestRenderNestedValues(t *testing.T) { {Name: deepestpath, ModTime: modTime, Data: []byte(`And this same {{.Values.what}} that smiles {{.Values.global.when}}`)}, {Name: checkrelease, ModTime: modTime, Data: []byte(`Tomorrow will be {{default "happy" .Release.Name }}`)}, }, - Values: map[string]interface{}{"what": "milkshake", "where": "here"}, + Values: map[string]any{"what": "milkshake", "where": "here"}, } inner := &chart.Chart{ @@ -683,7 +684,7 @@ func TestRenderNestedValues(t *testing.T) { Templates: []*common.File{ {Name: innerpath, ModTime: modTime, Data: []byte(`Old {{.Values.who}} is still a-flyin'`)}, }, - Values: map[string]interface{}{"who": "Robert", "what": "glasses"}, + Values: map[string]any{"who": "Robert", "what": "glasses"}, } inner.AddDependency(deepest) @@ -693,10 +694,10 @@ func TestRenderNestedValues(t *testing.T) { {Name: outerpath, ModTime: modTime, Data: []byte(`Gather ye {{.Values.what}} while ye may`)}, {Name: subchartspath, ModTime: modTime, Data: []byte(`The glorious Lamp of {{.Subcharts.herrick.Subcharts.deepest.Values.where}}, the {{.Subcharts.herrick.Values.what}}`)}, }, - Values: map[string]interface{}{ + Values: map[string]any{ "what": "stinkweed", "who": "me", - "herrick": map[string]interface{}{ + "herrick": map[string]any{ "who": "time", "what": "Sun", }, @@ -704,15 +705,15 @@ func TestRenderNestedValues(t *testing.T) { } outer.AddDependency(inner) - injValues := map[string]interface{}{ + injValues := map[string]any{ "what": "rosebuds", - "herrick": map[string]interface{}{ - "deepest": map[string]interface{}{ + "herrick": map[string]any{ + "deepest": map[string]any{ "what": "flower", "where": "Heaven", }, }, - "global": map[string]interface{}{ + "global": map[string]any{ "when": "to-day", }, } @@ -1059,9 +1060,11 @@ func TestRenderRecursionLimit(t *testing.T) { } var expect string + var expectSb1062 strings.Builder for range times { - expect += phrase + "\n" + expectSb1062.WriteString(phrase + "\n") } + expect += expectSb1062.String() if got := out["overlook/templates/quote"]; got != expect { t.Errorf("Expected %q, got %q (%v)", expect, got, out) } @@ -1349,7 +1352,7 @@ NestedHelperFunctions/charts/common/templates/_helpers_2.tpl:1:49 v := common.Values{} val, _ := util.CoalesceValues(c, v) - vals := map[string]interface{}{ + vals := map[string]any{ "Values": val.AsMap(), } _, err := Render(c, vals) @@ -1383,7 +1386,7 @@ template: no template "nested_helper.name" associated with template "gotpl"` v := common.Values{} val, _ := util.CoalesceValues(c, v) - vals := map[string]interface{}{ + vals := map[string]any{ "Values": val.AsMap(), } _, err := Render(c, vals) @@ -1463,7 +1466,7 @@ func TestTraceableError_SimpleForm(t *testing.T) { for _, errString := range testStrings { trace, done := parseTemplateSimpleErrorString(errString) if !done { - t.Errorf("Expected parse to pass but did not") + t.Error("Expected parse to pass but did not") } if trace.message != "error calling include" { t.Errorf("Expected %q, got %q", errString, trace.message) @@ -1480,7 +1483,7 @@ func TestTraceableError_ExecutingForm(t *testing.T) { expectedLocation := errTuple[1] trace, done := parseTemplateExecutingAtErrorType(errString) if !done { - t.Errorf("Expected parse to pass but did not") + t.Error("Expected parse to pass but did not") } if trace.location != expectedLocation { t.Errorf("Expected %q, got %q", expectedLocation, trace.location) @@ -1495,10 +1498,72 @@ func TestTraceableError_NoTemplateForm(t *testing.T) { for _, errString := range testStrings { trace, done := parseTemplateNoTemplateError(errString, errString) if !done { - t.Errorf("Expected parse to pass but did not") + t.Error("Expected parse to pass but did not") } if trace.message != errString { t.Errorf("Expected %q, got %q", errString, trace.message) } } } + +// TestRenderSubchartDefaultNilNoStringify tests the full pipeline: subchart default +// nil values should not produce "%!s()" in rendered template output. +// Regression test for the Bitnami common.secrets.key issue. +func TestRenderSubchartDefaultNilNoStringify(t *testing.T) { + modTime := time.Now() + + // Subchart has a default with nil values + subchart := &chart.Chart{ + Metadata: &chart.Metadata{Name: "child"}, + Templates: []*common.File{ + { + Name: "templates/test.yaml", + ModTime: modTime, + Data: []byte(`{{- if hasKey .Values.keyMapping "password" -}}{{- printf "subPath: %s" (index .Values.keyMapping "password") -}}{{- else -}}subPath: fallback{{- end -}}`), + }, + }, + Values: map[string]any{ + "keyMapping": map[string]any{ + "password": nil, // nil in chart defaults + }, + }, + } + + parent := &chart.Chart{ + Metadata: &chart.Metadata{Name: "parent"}, + Values: map[string]any{}, + } + parent.AddDependency(subchart) + + // Parent user values don't set keyMapping + injValues := map[string]any{} + + tmp, err := util.CoalesceValues(parent, injValues) + if err != nil { + t.Fatalf("Failed to coalesce values: %s", err) + } + + inject := common.Values{ + "Values": tmp, + "Chart": parent.Metadata, + "Release": common.Values{ + "Name": "test-release", + }, + } + + out, err := Render(parent, inject) + if err != nil { + t.Fatalf("Failed to render templates: %s", err) + } + + rendered := out["parent/charts/child/templates/test.yaml"] + + if strings.Contains(rendered, "%!s()") { + t.Errorf("Rendered output contains %%!s(), got: %q", rendered) + } + + expected := "subPath: fallback" + if rendered != expected { + t.Errorf("Expected %q, got %q", expected, rendered) + } +} diff --git a/pkg/engine/funcs.go b/pkg/engine/funcs.go index a97f8f104..e03c13b38 100644 --- a/pkg/engine/funcs.go +++ b/pkg/engine/funcs.go @@ -50,6 +50,7 @@ func funcMap() template.FuncMap { // Add some extra functionality extra := template.FuncMap{ "toToml": toTOML, + "mustToToml": mustToTOML, "fromToml": fromTOML, "toYaml": toYAML, "mustToYaml": mustToYAML, @@ -64,13 +65,13 @@ func funcMap() template.FuncMap { // This is a placeholder for the "include" function, which is // late-bound to a template. By declaring it here, we preserve the // integrity of the linter. - "include": func(string, interface{}) string { return "not implemented" }, - "tpl": func(string, interface{}) interface{} { return "not implemented" }, - "required": func(string, interface{}) (interface{}, error) { return "not implemented", nil }, + "include": func(string, any) string { return "not implemented" }, + "tpl": func(string, any) any { return "not implemented" }, + "required": func(string, any) (any, error) { return "not implemented", nil }, // Provide a placeholder for the "lookup" function, which requires a kubernetes // connection. - "lookup": func(string, string, string, string) (map[string]interface{}, error) { - return map[string]interface{}{}, nil + "lookup": func(string, string, string, string) (map[string]any, error) { + return map[string]any{}, nil }, } @@ -83,7 +84,7 @@ func funcMap() template.FuncMap { // always return a string, even on marshal error (empty string). // // This is designed to be called from a template. -func toYAML(v interface{}) string { +func toYAML(v any) string { data, err := yaml.Marshal(v) if err != nil { // Swallow errors inside of a template. @@ -97,7 +98,7 @@ func toYAML(v interface{}) string { // // This is designed to be called from a template when need to ensure that the // output YAML is valid. -func mustToYAML(v interface{}) string { +func mustToYAML(v any) string { data, err := yaml.Marshal(v) if err != nil { panic(err) @@ -105,7 +106,7 @@ func mustToYAML(v interface{}) string { return strings.TrimSuffix(string(data), "\n") } -func toYAMLPretty(v interface{}) string { +func toYAMLPretty(v any) string { var data bytes.Buffer encoder := goYaml.NewEncoder(&data) encoder.SetIndent(2) @@ -124,8 +125,8 @@ func toYAMLPretty(v interface{}) string { // YAML documents. Additionally, because its intended use is within templates // it tolerates errors. It will insert the returned error message string into // m["Error"] in the returned map. -func fromYAML(str string) map[string]interface{} { - m := map[string]interface{}{} +func fromYAML(str string) map[string]any { + m := map[string]any{} if err := yaml.Unmarshal([]byte(str), &m); err != nil { m["Error"] = err.Error() @@ -139,20 +140,21 @@ func fromYAML(str string) map[string]interface{} { // YAML documents. Additionally, because its intended use is within templates // it tolerates errors. It will insert the returned error message string as // the first and only item in the returned array. -func fromYAMLArray(str string) []interface{} { - a := []interface{}{} +func fromYAMLArray(str string) []any { + a := []any{} if err := yaml.Unmarshal([]byte(str), &a); err != nil { - a = []interface{}{err.Error()} + a = []any{err.Error()} } return a } -// toTOML takes an interface, marshals it to toml, and returns a string. It will -// always return a string, even on marshal error (empty string). +// toTOML takes an interface, marshals it to toml, and returns a string. +// On marshal error it returns the error string. // -// This is designed to be called from a template. -func toTOML(v interface{}) string { +// This is designed to be called from a template. Use mustToToml if you need +// the template to fail hard on marshal errors. +func toTOML(v any) string { b := bytes.NewBuffer(nil) e := toml.NewEncoder(b) err := e.Encode(v) @@ -162,14 +164,29 @@ func toTOML(v interface{}) string { return b.String() } +// mustToTOML takes an interface, marshals it to toml, and returns a string. +// It will panic if there is an error. +// +// This is designed to be called from a template when you need to ensure that the +// output TOML is valid. +func mustToTOML(v any) string { + b := bytes.NewBuffer(nil) + e := toml.NewEncoder(b) + err := e.Encode(v) + if err != nil { + panic(err) + } + return b.String() +} + // fromTOML converts a TOML document into a map[string]interface{}. // // This is not a general-purpose TOML parser, and will not parse all valid // TOML documents. Additionally, because its intended use is within templates // it tolerates errors. It will insert the returned error message string into // m["Error"] in the returned map. -func fromTOML(str string) map[string]interface{} { - m := make(map[string]interface{}) +func fromTOML(str string) map[string]any { + m := make(map[string]any) if err := toml.Unmarshal([]byte(str), &m); err != nil { m["Error"] = err.Error() @@ -181,7 +198,7 @@ func fromTOML(str string) map[string]interface{} { // always return a string, even on marshal error (empty string). // // This is designed to be called from a template. -func toJSON(v interface{}) string { +func toJSON(v any) string { data, err := json.Marshal(v) if err != nil { // Swallow errors inside of a template. @@ -195,7 +212,7 @@ func toJSON(v interface{}) string { // // This is designed to be called from a template when need to ensure that the // output JSON is valid. -func mustToJSON(v interface{}) string { +func mustToJSON(v any) string { data, err := json.Marshal(v) if err != nil { panic(err) @@ -209,8 +226,8 @@ func mustToJSON(v interface{}) string { // JSON documents. Additionally, because its intended use is within templates // it tolerates errors. It will insert the returned error message string into // m["Error"] in the returned map. -func fromJSON(str string) map[string]interface{} { - m := make(map[string]interface{}) +func fromJSON(str string) map[string]any { + m := make(map[string]any) if err := json.Unmarshal([]byte(str), &m); err != nil { m["Error"] = err.Error() @@ -224,11 +241,11 @@ func fromJSON(str string) map[string]interface{} { // JSON documents. Additionally, because its intended use is within templates // it tolerates errors. It will insert the returned error message string as // the first and only item in the returned array. -func fromJSONArray(str string) []interface{} { - a := []interface{}{} +func fromJSONArray(str string) []any { + a := []any{} if err := json.Unmarshal([]byte(str), &a); err != nil { - a = []interface{}{err.Error()} + a = []any{err.Error()} } return a } diff --git a/pkg/engine/funcs_test.go b/pkg/engine/funcs_test.go index 71a72e2e4..be9d0153f 100644 --- a/pkg/engine/funcs_test.go +++ b/pkg/engine/funcs_test.go @@ -28,19 +28,19 @@ func TestFuncs(t *testing.T) { //TODO write tests for failure cases tests := []struct { tpl, expect string - vars interface{} + vars any }{{ tpl: `{{ toYaml . }}`, expect: `foo: bar`, - vars: map[string]interface{}{"foo": "bar"}, + vars: map[string]any{"foo": "bar"}, }, { tpl: `{{ toYamlPretty . }}`, expect: "baz:\n - 1\n - 2\n - 3", - vars: map[string]interface{}{"baz": []int{1, 2, 3}}, + vars: map[string]any{"baz": []int{1, 2, 3}}, }, { tpl: `{{ toToml . }}`, expect: "foo = \"bar\"\n", - vars: map[string]interface{}{"foo": "bar"}, + vars: map[string]any{"foo": "bar"}, }, { tpl: `{{ fromToml . }}`, expect: "map[hello:world]", @@ -68,7 +68,7 @@ keyInElement1 = "valueInElement1"`, }, { tpl: `{{ toJson . }}`, expect: `{"foo":"bar"}`, - vars: map[string]interface{}{"foo": "bar"}, + vars: map[string]any{"foo": "bar"}, }, { tpl: `{{ fromYaml . }}`, expect: "map[hello:world]", @@ -109,11 +109,11 @@ keyInElement1 = "valueInElement1"`, }, { tpl: `{{ merge .dict (fromYaml .yaml) }}`, expect: `map[a:map[b:c]]`, - vars: map[string]interface{}{"dict": map[string]interface{}{"a": map[string]interface{}{"b": "c"}}, "yaml": `{"a":{"b":"d"}}`}, + vars: map[string]any{"dict": map[string]any{"a": map[string]any{"b": "c"}}, "yaml": `{"a":{"b":"d"}}`}, }, { tpl: `{{ merge (fromYaml .yaml) .dict }}`, expect: `map[a:map[b:d]]`, - vars: map[string]interface{}{"dict": map[string]interface{}{"a": map[string]interface{}{"b": "c"}}, "yaml": `{"a":{"b":"d"}}`}, + vars: map[string]any{"dict": map[string]any{"a": map[string]any{"b": "c"}}, "yaml": `{"a":{"b":"d"}}`}, }, { tpl: `{{ fromYaml . }}`, expect: `map[Error:error unmarshaling JSON: while decoding JSON: json: cannot unmarshal array into Go value of type map[string]interface {}]`, @@ -136,15 +136,15 @@ keyInElement1 = "valueInElement1"`, assert.Equal(t, tt.expect, b.String(), tt.tpl) } - loopMap := map[string]interface{}{ + loopMap := map[string]any{ "foo": "bar", } - loopMap["loop"] = []interface{}{loopMap} + loopMap["loop"] = []any{loopMap} mustFuncsTests := []struct { tpl string - expect interface{} - vars interface{} + expect any + vars any }{{ tpl: `{{ mustToYaml . }}`, vars: loopMap, @@ -159,6 +159,13 @@ keyInElement1 = "valueInElement1"`, tpl: `{{ toJson . }}`, expect: "", // should return empty string and swallow error vars: loopMap, + }, { + tpl: `{{ mustToToml . }}`, + vars: map[int]string{1: "one"}, // non-string key is invalid in TOML + }, { + tpl: `{{ mustToToml . }}`, + expect: "foo = \"bar\"\n", // should succeed and return TOML string + vars: map[string]string{"foo": "bar"}, }, } @@ -186,34 +193,34 @@ keyInElement1 = "valueInElement1"`, // be used to accidentally update mergo. This test and message should catch // the problem and explain why it's happening. func TestMerge(t *testing.T) { - dict := map[string]interface{}{ - "src2": map[string]interface{}{ + dict := map[string]any{ + "src2": map[string]any{ "h": 10, "i": "i", "j": "j", }, - "src1": map[string]interface{}{ + "src1": map[string]any{ "a": 1, "b": 2, - "d": map[string]interface{}{ + "d": map[string]any{ "e": "four", }, "g": []int{6, 7}, "i": "aye", "j": "jay", - "k": map[string]interface{}{ + "k": map[string]any{ "l": false, }, }, - "dst": map[string]interface{}{ + "dst": map[string]any{ "a": "one", "c": 3, - "d": map[string]interface{}{ + "d": map[string]any{ "f": 5, }, "g": []int{8, 9}, "i": "eye", - "k": map[string]interface{}{ + "k": map[string]any{ "l": true, }, }, @@ -223,11 +230,11 @@ func TestMerge(t *testing.T) { err := template.Must(template.New("test").Funcs(funcMap()).Parse(tpl)).Execute(&b, dict) assert.NoError(t, err) - expected := map[string]interface{}{ + expected := map[string]any{ "a": "one", // key overridden "b": 2, // merged from src1 "c": 3, // merged from dst - "d": map[string]interface{}{ // deep merge + "d": map[string]any{ // deep merge "e": "four", "f": 5, }, @@ -235,7 +242,7 @@ func TestMerge(t *testing.T) { "h": 10, // merged from src2 "i": "eye", // overridden twice "j": "jay", // overridden and merged - "k": map[string]interface{}{ + "k": map[string]any{ "l": true, // overridden }, } diff --git a/pkg/engine/lookup_func.go b/pkg/engine/lookup_func.go index c6ad8d252..52b6ffdaf 100644 --- a/pkg/engine/lookup_func.go +++ b/pkg/engine/lookup_func.go @@ -30,7 +30,7 @@ import ( "k8s.io/client-go/rest" ) -type lookupFunc = func(apiversion string, resource string, namespace string, name string) (map[string]interface{}, error) +type lookupFunc = func(apiversion string, resource string, namespace string, name string) (map[string]any, error) // NewLookupFunction returns a function for looking up objects in the cluster. // @@ -55,11 +55,11 @@ func (c clientProviderFromConfig) GetClientFor(apiVersion, kind string) (dynamic } func newLookupFunction(clientProvider ClientProvider) lookupFunc { - return func(apiversion string, kind string, namespace string, name string) (map[string]interface{}, error) { + return func(apiversion string, kind string, namespace string, name string) (map[string]any, error) { var client dynamic.ResourceInterface c, namespaced, err := clientProvider.GetClientFor(apiversion, kind) if err != nil { - return map[string]interface{}{}, err + return map[string]any{}, err } if namespaced && namespace != "" { client = c.Namespace(namespace) @@ -73,9 +73,9 @@ func newLookupFunction(clientProvider ClientProvider) lookupFunc { if apierrors.IsNotFound(err) { // Just return an empty interface when the object was not found. // That way, users can use `if not (lookup ...)` in their templates. - return map[string]interface{}{}, nil + return map[string]any{}, nil } - return map[string]interface{}{}, err + return map[string]any{}, err } return obj.UnstructuredContent(), nil } @@ -85,9 +85,9 @@ func newLookupFunction(clientProvider ClientProvider) lookupFunc { if apierrors.IsNotFound(err) { // Just return an empty interface when the object was not found. // That way, users can use `if not (lookup ...)` in their templates. - return map[string]interface{}{}, nil + return map[string]any{}, nil } - return map[string]interface{}{}, err + return map[string]any{}, err } return obj.UnstructuredContent(), nil } diff --git a/pkg/getter/getter_test.go b/pkg/getter/getter_test.go index 83920e809..3a09b4d82 100644 --- a/pkg/getter/getter_test.go +++ b/pkg/getter/getter_test.go @@ -60,7 +60,8 @@ func TestProvidersWithTimeout(t *testing.T) { if err != nil { t.Error(err) } - client, err := getter.(*HTTPGetter).httpClient() + httpGetter := getter.(*HTTPGetter) + client, err := httpGetter.httpClient(httpGetter.opts) if err != nil { t.Error(err) } diff --git a/pkg/getter/httpgetter.go b/pkg/getter/httpgetter.go index 110f45c54..2eb2d5d8c 100644 --- a/pkg/getter/httpgetter.go +++ b/pkg/getter/httpgetter.go @@ -20,6 +20,7 @@ import ( "crypto/tls" "fmt" "io" + "log/slog" "net/http" "net/url" "sync" @@ -37,13 +38,15 @@ type HTTPGetter struct { // Get performs a Get from repo.Getter and returns the body. func (g *HTTPGetter) Get(href string, options ...Option) (*bytes.Buffer, error) { + // Create a local copy of options to avoid data races when Get is called concurrently + opts := g.opts for _, opt := range options { - opt(&g.opts) + opt(&opts) } - return g.get(href) + return g.get(href, opts) } -func (g *HTTPGetter) get(href string) (*bytes.Buffer, error) { +func (g *HTTPGetter) get(href string, opts getterOptions) (*bytes.Buffer, error) { // Set a helm specific user agent so that a repo server and metrics can // separate helm calls from other tools interacting with repos. req, err := http.NewRequest(http.MethodGet, href, nil) @@ -51,18 +54,18 @@ func (g *HTTPGetter) get(href string) (*bytes.Buffer, error) { return nil, err } - if g.opts.acceptHeader != "" { - req.Header.Set("Accept", g.opts.acceptHeader) + if opts.acceptHeader != "" { + req.Header.Set("Accept", opts.acceptHeader) } req.Header.Set("User-Agent", version.GetUserAgent()) - if g.opts.userAgent != "" { - req.Header.Set("User-Agent", g.opts.userAgent) + if opts.userAgent != "" { + req.Header.Set("User-Agent", opts.userAgent) } // Before setting the basic auth credentials, make sure the URL associated // with the basic auth is the one being fetched. - u1, err := url.Parse(g.opts.url) + u1, err := url.Parse(opts.url) if err != nil { return nil, fmt.Errorf("unable to parse getter URL: %w", err) } @@ -74,22 +77,24 @@ func (g *HTTPGetter) get(href string) (*bytes.Buffer, error) { // Host on URL (returned from url.Parse) contains the port if present. // This check ensures credentials are not passed between different // services on different ports. - if g.opts.passCredentialsAll || (u1.Scheme == u2.Scheme && u1.Host == u2.Host) { - if g.opts.username != "" && g.opts.password != "" { - req.SetBasicAuth(g.opts.username, g.opts.password) + if opts.passCredentialsAll || (u1.Scheme == u2.Scheme && u1.Host == u2.Host) { + if opts.username != "" && opts.password != "" { + req.SetBasicAuth(opts.username, opts.password) } } - client, err := g.httpClient() + client, err := g.httpClient(opts) if err != nil { return nil, err } + slog.Debug("fetching", "url", href) resp, err := client.Do(req) if err != nil { return nil, err } defer resp.Body.Close() + slog.Debug("fetch complete", "url", href, "status", resp.Status, "content-length", resp.ContentLength) if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("failed to fetch %s : %s", href, resp.Status) } @@ -110,51 +115,52 @@ func NewHTTPGetter(options ...Option) (Getter, error) { return &client, nil } -func (g *HTTPGetter) httpClient() (*http.Client, error) { - if g.opts.transport != nil { +func (g *HTTPGetter) httpClient(opts getterOptions) (*http.Client, error) { + if opts.transport != nil { return &http.Client{ - Transport: g.opts.transport, - Timeout: g.opts.timeout, + Transport: opts.transport, + Timeout: opts.timeout, }, nil } - g.once.Do(func() { - g.transport = &http.Transport{ + // Check if we need custom TLS configuration + needsCustomTLS := (opts.certFile != "" && opts.keyFile != "") || opts.caFile != "" || opts.insecureSkipVerifyTLS + + if needsCustomTLS { + // Create a new transport for custom TLS to avoid race conditions + transport := &http.Transport{ DisableCompression: true, Proxy: http.ProxyFromEnvironment, - // Being nil would cause the tls.Config default to be used - // "NewTLSConfig" modifies an empty TLS config, not the default one - TLSClientConfig: &tls.Config{}, } - }) - if (g.opts.certFile != "" && g.opts.keyFile != "") || g.opts.caFile != "" || g.opts.insecureSkipVerifyTLS { tlsConf, err := tlsutil.NewTLSConfig( - tlsutil.WithInsecureSkipVerify(g.opts.insecureSkipVerifyTLS), - tlsutil.WithCertKeyPairFiles(g.opts.certFile, g.opts.keyFile), - tlsutil.WithCAFile(g.opts.caFile), + tlsutil.WithInsecureSkipVerify(opts.insecureSkipVerifyTLS), + tlsutil.WithCertKeyPairFiles(opts.certFile, opts.keyFile), + tlsutil.WithCAFile(opts.caFile), ) if err != nil { return nil, fmt.Errorf("can't create TLS config for client: %w", err) } - g.transport.TLSClientConfig = tlsConf + transport.TLSClientConfig = tlsConf + + return &http.Client{ + Transport: transport, + Timeout: opts.timeout, + }, nil } - if g.opts.insecureSkipVerifyTLS { - if g.transport.TLSClientConfig == nil { - g.transport.TLSClientConfig = &tls.Config{ - InsecureSkipVerify: true, - } - } else { - g.transport.TLSClientConfig.InsecureSkipVerify = true + // Use shared transport for default case (no custom TLS) + g.once.Do(func() { + g.transport = &http.Transport{ + DisableCompression: true, + Proxy: http.ProxyFromEnvironment, + TLSClientConfig: &tls.Config{}, } - } + }) - client := &http.Client{ + return &http.Client{ Transport: g.transport, - Timeout: g.opts.timeout, - } - - return client, nil + Timeout: opts.timeout, + }, nil } diff --git a/pkg/getter/httpgetter_test.go b/pkg/getter/httpgetter_test.go index b27b9f5d2..7d4581233 100644 --- a/pkg/getter/httpgetter_test.go +++ b/pkg/getter/httpgetter_test.go @@ -403,7 +403,7 @@ func TestDownloadTLSWithRedirect(t *testing.T) { // request URL for every request (including redirects). Setting `tls.Config.ServerName` on the // client just overrides the remote endpoint's hostname. // See https://github.com/golang/go/blob/3979fb9/src/net/http/transport.go#L1505-L1513. - u.Host = fmt.Sprintf("localhost:%s", u.Port()) + u.Host = "localhost:" + u.Port() http.Redirect(rw, r, u.String(), http.StatusTemporaryRedirect) })) @@ -577,16 +577,16 @@ func TestHttpClientInsecureSkipVerify(t *testing.T) { func verifyInsecureSkipVerify(t *testing.T, g *HTTPGetter, caseName string, expectedValue bool) *http.Transport { t.Helper() - returnVal, err := g.httpClient() + returnVal, err := g.httpClient(g.opts) if err != nil { t.Fatal(err) } - if returnVal == nil { //nolint:staticcheck - t.Fatalf("Expected non nil value for http client") + if returnVal == nil { + t.Fatal("Expected non nil value for http client") } - transport := (returnVal.Transport).(*http.Transport) //nolint:staticcheck + transport := (returnVal.Transport).(*http.Transport) gotValue := false if transport.TLSClientConfig != nil { gotValue = transport.TLSClientConfig.InsecureSkipVerify @@ -601,32 +601,32 @@ func verifyInsecureSkipVerify(t *testing.T, g *HTTPGetter, caseName string, expe func TestDefaultHTTPTransportReuse(t *testing.T) { g := HTTPGetter{} - httpClient1, err := g.httpClient() + httpClient1, err := g.httpClient(g.opts) if err != nil { t.Fatal(err) } - if httpClient1 == nil { //nolint:staticcheck - t.Fatalf("Expected non nil value for http client") + if httpClient1 == nil { + t.Fatal("Expected non nil value for http client") } - transport1 := (httpClient1.Transport).(*http.Transport) //nolint:staticcheck + transport1 := (httpClient1.Transport).(*http.Transport) - httpClient2, err := g.httpClient() + httpClient2, err := g.httpClient(g.opts) if err != nil { t.Fatal(err) } - if httpClient2 == nil { //nolint:staticcheck - t.Fatalf("Expected non nil value for http client") + if httpClient2 == nil { + t.Fatal("Expected non nil value for http client") } - transport2 := (httpClient2.Transport).(*http.Transport) //nolint:staticcheck + transport2 := (httpClient2.Transport).(*http.Transport) if transport1 != transport2 { - t.Fatalf("Expected default transport to be reused") + t.Fatal("Expected default transport to be reused") } } @@ -635,36 +635,36 @@ func TestHTTPTransportOption(t *testing.T) { g := HTTPGetter{} g.opts.transport = transport - httpClient1, err := g.httpClient() + httpClient1, err := g.httpClient(g.opts) if err != nil { t.Fatal(err) } - if httpClient1 == nil { //nolint:staticcheck - t.Fatalf("Expected non nil value for http client") + if httpClient1 == nil { + t.Fatal("Expected non nil value for http client") } - transport1 := (httpClient1.Transport).(*http.Transport) //nolint:staticcheck + transport1 := (httpClient1.Transport).(*http.Transport) if transport1 != transport { - t.Fatalf("Expected transport option to be applied") + t.Fatal("Expected transport option to be applied") } - httpClient2, err := g.httpClient() + httpClient2, err := g.httpClient(g.opts) if err != nil { t.Fatal(err) } - if httpClient2 == nil { //nolint:staticcheck - t.Fatalf("Expected non nil value for http client") + if httpClient2 == nil { + t.Fatal("Expected non nil value for http client") } - transport2 := (httpClient2.Transport).(*http.Transport) //nolint:staticcheck + transport2 := (httpClient2.Transport).(*http.Transport) if transport1 != transport2 { - t.Fatalf("Expected applied transport to be reused") + t.Fatal("Expected applied transport to be reused") } g = HTTPGetter{} diff --git a/pkg/getter/ocigetter.go b/pkg/getter/ocigetter.go index 24fc60c56..de8643bcd 100644 --- a/pkg/getter/ocigetter.go +++ b/pkg/getter/ocigetter.go @@ -58,7 +58,7 @@ func (g *OCIGetter) get(href string) (*bytes.Buffer, error) { client = c } - ref := strings.TrimPrefix(href, fmt.Sprintf("%s://", registry.OCIScheme)) + ref := strings.TrimPrefix(href, registry.OCIScheme+"://") if version := g.opts.version; version != "" && !strings.Contains(path.Base(ref), ":") { ref = fmt.Sprintf("%s:%s", ref, version) diff --git a/pkg/getter/ocigetter_test.go b/pkg/getter/ocigetter_test.go index ef196afcc..5a7c99ebe 100644 --- a/pkg/getter/ocigetter_test.go +++ b/pkg/getter/ocigetter_test.go @@ -128,7 +128,7 @@ func TestOCIHTTPTransportReuse(t *testing.T) { } if g.transport == nil { - t.Fatalf("Expected non nil value for transport") + t.Fatal("Expected non nil value for transport") } transport1 := g.transport @@ -140,12 +140,12 @@ func TestOCIHTTPTransportReuse(t *testing.T) { } if g.transport == nil { - t.Fatalf("Expected non nil value for transport") + t.Fatal("Expected non nil value for transport") } transport2 := g.transport if transport1 != transport2 { - t.Fatalf("Expected default transport to be reused") + t.Fatal("Expected default transport to be reused") } } diff --git a/pkg/getter/plugingetter.go b/pkg/getter/plugingetter.go index d74611637..ef8b87503 100644 --- a/pkg/getter/plugingetter.go +++ b/pkg/getter/plugingetter.go @@ -38,12 +38,14 @@ func collectGetterPlugins(settings *cli.EnvSettings) (Providers, error) { if err != nil { return nil, err } + env := plugin.FormatEnv(settings.EnvVars()) pluginConstructorBuilder := func(plg plugin.Plugin) Constructor { return func(option ...Option) (Getter, error) { return &getterPlugin{ options: append([]Option{}, option...), plg: plg, + env: env, }, nil } } @@ -91,6 +93,7 @@ func convertOptions(globalOptions, options []Option) schema.GetterOptionsV1 { type getterPlugin struct { options []Option plg plugin.Plugin + env []string } func (g *getterPlugin) Get(href string, options ...Option) (*bytes.Buffer, error) { @@ -108,6 +111,7 @@ func (g *getterPlugin) Get(href string, options ...Option) (*bytes.Buffer, error Options: opts, Protocol: u.Scheme, }, + Env: g.env, // TODO should we pass Stdin, Stdout, and Stderr through Input here to getter plugins? // Stdout: os.Stdout, } diff --git a/pkg/getter/plugingetter_test.go b/pkg/getter/plugingetter_test.go index 8faaf7329..16af9eb31 100644 --- a/pkg/getter/plugingetter_test.go +++ b/pkg/getter/plugingetter_test.go @@ -144,3 +144,27 @@ func TestGetterPlugin(t *testing.T) { assert.Equal(t, "fake-plugin output", buf.String()) } + +func TestCollectGetterPluginsPassesEnv(t *testing.T) { + env := cli.New() + env.PluginsDirectory = pluginDir + env.Debug = true + + providers, err := collectGetterPlugins(env) + require.NoError(t, err) + require.NotEmpty(t, providers, "expected at least one plugin provider") + + getter, err := providers.ByScheme("test") + require.NoError(t, err) + + gp, ok := getter.(*getterPlugin) + require.True(t, ok, "expected getter to be a *getterPlugin") + + require.NotEmpty(t, gp.env, "expected env to be set on getterPlugin") + envMap := plugin.ParseEnv(gp.env) + + assert.Contains(t, envMap, "HELM_DEBUG", "expected HELM_DEBUG in env") + assert.Equal(t, "true", envMap["HELM_DEBUG"], "expected HELM_DEBUG to be true") + assert.Contains(t, envMap, "HELM_PLUGINS", "expected HELM_PLUGINS in env") + assert.Equal(t, pluginDir, envMap["HELM_PLUGINS"], "expected HELM_PLUGINS to match pluginsDirectory") +} diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 6e09fceac..c955e8875 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kube // import "helm.sh/helm/v4/pkg/kube" +package kube import ( "bytes" @@ -87,6 +87,8 @@ type Client struct { // WaitContext is an optional context to use for wait operations. // If not set, a context will be created internally using the // timeout provided to the wait functions. + // + // Deprecated: Use WithWaitContext wait option when getting a Waiter instead. WaitContext context.Context Waiter @@ -139,7 +141,11 @@ func init() { } } -func (c *Client) newStatusWatcher() (*statusWaiter, error) { +func (c *Client) newStatusWatcher(opts ...WaitOption) (*statusWaiter, error) { + var o waitOptions + for _, opt := range opts { + opt(&o) + } cfg, err := c.Factory.ToRESTConfig() if err != nil { return nil, err @@ -156,14 +162,29 @@ func (c *Client) newStatusWatcher() (*statusWaiter, error) { if err != nil { return nil, err } - return &statusWaiter{ - restMapper: restMapper, - client: dynamicClient, - ctx: c.WaitContext, - }, nil + waitContext := o.ctx + if waitContext == nil { + waitContext = c.WaitContext + } + sw := &statusWaiter{ + restMapper: restMapper, + client: dynamicClient, + ctx: waitContext, + watchUntilReadyCtx: o.watchUntilReadyCtx, + waitCtx: o.waitCtx, + waitWithJobsCtx: o.waitWithJobsCtx, + waitForDeleteCtx: o.waitForDeleteCtx, + readers: o.statusReaders, + } + sw.SetLogger(c.Logger().Handler()) + return sw, nil +} + +func (c *Client) GetWaiter(ws WaitStrategy) (Waiter, error) { + return c.GetWaiterWithOptions(ws) } -func (c *Client) GetWaiter(strategy WaitStrategy) (Waiter, error) { +func (c *Client) GetWaiterWithOptions(strategy WaitStrategy, opts ...WaitOption) (Waiter, error) { switch strategy { case LegacyStrategy: kc, err := c.Factory.KubernetesClientSet() @@ -172,9 +193,9 @@ func (c *Client) GetWaiter(strategy WaitStrategy) (Waiter, error) { } return &legacyWaiter{kubeClient: kc, ctx: c.WaitContext}, nil case StatusWatcherStrategy: - return c.newStatusWatcher() + return c.newStatusWatcher(opts...) case HookOnlyStrategy: - sw, err := c.newStatusWatcher() + sw, err := c.newStatusWatcher(opts...) if err != nil { return nil, err } @@ -187,8 +208,12 @@ func (c *Client) GetWaiter(strategy WaitStrategy) (Waiter, error) { } func (c *Client) SetWaiter(ws WaitStrategy) error { + return c.SetWaiterWithOptions(ws) +} + +func (c *Client) SetWaiterWithOptions(ws WaitStrategy, opts ...WaitOption) error { var err error - c.Waiter, err = c.GetWaiter(ws) + c.Waiter, err = c.GetWaiterWithOptions(ws, opts...) if err != nil { return err } @@ -247,12 +272,12 @@ type ClientCreateOption func(*clientCreateOptions) error // ClientCreateOptionServerSideApply enables performing object apply server-side // see: https://kubernetes.io/docs/reference/using-api/server-side-apply/ // -// `forceConflicts` forces conflicts to be resolved (may be when serverSideApply enabled only) +// `forceConflicts` forces conflicts to be resolved (may be used when serverSideApply enabled only) // see: https://kubernetes.io/docs/reference/using-api/server-side-apply/#conflicts func ClientCreateOptionServerSideApply(serverSideApply, forceConflicts bool) ClientCreateOption { return func(o *clientCreateOptions) error { if !serverSideApply && forceConflicts { - return fmt.Errorf("forceConflicts enabled when serverSideApply disabled") + return errors.New("forceConflicts enabled when serverSideApply disabled") } o.serverSideApply = serverSideApply @@ -575,7 +600,32 @@ func (c *Client) update(originals, targets ResourceList, createApplyFunc CreateA original := originals.Get(target) if original == nil { kind := target.Mapping.GroupVersionKind.Kind - return fmt.Errorf("original object %s with the name %q not found", kind, target.Name) + + slog.Warn("resource exists on cluster but not in original release, using cluster state as baseline", + "namespace", target.Namespace, "name", target.Name, "kind", kind) + + currentObj, err := helper.Get(target.Namespace, target.Name) + if err != nil { + return fmt.Errorf("original object %s with the name %q not found", kind, target.Name) + } + + // Create a temporary Info with the current cluster state to use as "original" + currentInfo := &resource.Info{ + Client: target.Client, + Mapping: target.Mapping, + Namespace: target.Namespace, + Name: target.Name, + Object: currentObj, + } + + if err := updateApplyFunc(currentInfo, target); err != nil { + updateErrors = append(updateErrors, err) + } + + // Because we check for errors later, append the info regardless + res.Updated = append(res.Updated, target) + + return nil } if err := updateApplyFunc(original, target); err != nil { @@ -631,7 +681,9 @@ func (c *Client) update(originals, targets ResourceList, createApplyFunc CreateA slog.Any("error", err), ) if !apierrors.IsNotFound(err) { - updateErrors = append(updateErrors, fmt.Errorf("failed to delete resource %s: %w", info.Name, err)) + updateErrors = append(updateErrors, fmt.Errorf( + "failed to delete resource namespace=%s, name=%s, kind=%s: %w", + info.Namespace, info.Name, info.Mapping.GroupVersionKind.Kind, err)) } continue } @@ -675,7 +727,7 @@ func ClientUpdateOptionThreeWayMergeForUnstructured(threeWayMergeForUnstructured func ClientUpdateOptionServerSideApply(serverSideApply, forceConflicts bool) ClientUpdateOption { return func(o *clientUpdateOptions) error { if !serverSideApply && forceConflicts { - return fmt.Errorf("forceConflicts enabled when serverSideApply disabled") + return errors.New("forceConflicts enabled when serverSideApply disabled") } o.serverSideApply = serverSideApply @@ -759,15 +811,15 @@ func (c *Client) Update(originals, targets ResourceList, options ...ClientUpdate } if updateOptions.threeWayMergeForUnstructured && updateOptions.serverSideApply { - return &Result{}, fmt.Errorf("invalid operation: cannot use three-way merge for unstructured and server-side apply together") + return &Result{}, errors.New("invalid operation: cannot use three-way merge for unstructured and server-side apply together") } if updateOptions.forceConflicts && updateOptions.forceReplace { - return &Result{}, fmt.Errorf("invalid operation: cannot use force conflicts and force replace together") + return &Result{}, errors.New("invalid operation: cannot use force conflicts and force replace together") } if updateOptions.serverSideApply && updateOptions.forceReplace { - return &Result{}, fmt.Errorf("invalid operation: cannot use server-side apply and force replace together") + return &Result{}, errors.New("invalid operation: cannot use server-side apply and force replace together") } createApplyFunc := c.makeCreateApplyFunc( @@ -1189,7 +1241,7 @@ func patchResourceServerSide(target *resource.Info, dryRun bool, forceConflicts return fmt.Errorf("conflict occurred while applying object %s/%s %s: %w", target.Namespace, target.Name, target.Mapping.GroupVersionKind.String(), err) } - return err + return fmt.Errorf("server-side apply failed for object %s/%s %s: %w", target.Namespace, target.Name, target.Mapping.GroupVersionKind.String(), err) } return target.Refresh(obj, true) diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index d442864f8..ed871c05a 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -20,7 +20,6 @@ import ( "bytes" "context" "errors" - "fmt" "io" "net/http" "strings" @@ -28,9 +27,15 @@ import ( "testing" "time" + "github.com/fluxcd/cli-utils/pkg/kstatus/polling/engine" + "github.com/fluxcd/cli-utils/pkg/kstatus/polling/event" + "github.com/fluxcd/cli-utils/pkg/kstatus/status" + "github.com/fluxcd/cli-utils/pkg/object" + "github.com/fluxcd/cli-utils/pkg/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -40,8 +45,10 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" jsonserializer "k8s.io/apimachinery/pkg/runtime/serializer/json" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/resource" + dynamicfake "k8s.io/client-go/dynamic/fake" "k8s.io/client-go/kubernetes" k8sfake "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/scheme" @@ -411,7 +418,25 @@ func TestUpdate(t *testing.T) { "/namespaces/default/pods/forbidden:GET", "/namespaces/default/pods/forbidden:DELETE", ), - ExpectedError: "failed to delete resource forbidden:", + ExpectedError: "failed to delete resource namespace=default, name=forbidden, kind=Pod:", + }, + "rollback after failed upgrade with removed resource": { + // Simulates rollback scenario: + // - Revision 1 had "newpod" + // - Revision 2 removed "newpod" but upgrade failed (OriginalPods is empty) + // - Cluster still has "newpod" from Revision 1 + // - Rolling back to Revision 1 (TargetPods with "newpod") should succeed + OriginalPods: v1.PodList{}, // Revision 2 (failed) - resource was removed + TargetPods: newPodList("newpod"), // Revision 1 - rolling back to this + ThreeWayMergeForUnstructured: false, + ServerSideApply: true, + ExpectedActions: []string{ + "/namespaces/default/pods/newpod:GET", // Check if resource exists + "/namespaces/default/pods/newpod:GET", // Get current state (first call in update path) + "/namespaces/default/pods/newpod:GET", // Get current cluster state to use as baseline + "/namespaces/default/pods/newpod:PATCH", // Update using cluster state as baseline + }, + ExpectedError: "", }, } @@ -428,6 +453,10 @@ func TestUpdate(t *testing.T) { p, m := req.URL.Path, req.Method switch { + case p == "/namespaces/default/pods/newpod" && m == http.MethodGet: + return newResponse(http.StatusOK, &listTarget.Items[0]) + case p == "/namespaces/default/pods/newpod" && m == http.MethodPatch: + return newResponse(http.StatusOK, &listTarget.Items[0]) case p == "/namespaces/default/pods/starfish" && m == http.MethodGet: return newResponse(http.StatusOK, &listOriginal.Items[0]) case p == "/namespaces/default/pods/otter" && m == http.MethodGet: @@ -519,9 +548,23 @@ func TestUpdate(t *testing.T) { require.NoError(t, err) } - assert.Len(t, result.Created, 1, "expected 1 resource created, got %d", len(result.Created)) - assert.Len(t, result.Updated, 2, "expected 2 resource updated, got %d", len(result.Updated)) - assert.Len(t, result.Deleted, 1, "expected 1 resource deleted, got %d", len(result.Deleted)) + // Special handling for the rollback test case + if name == "rollback after failed upgrade with removed resource" { + assert.Len(t, result.Created, 0, "expected 0 resource created, got %d", len(result.Created)) + assert.Len(t, result.Updated, 1, "expected 1 resource updated, got %d", len(result.Updated)) + assert.Len(t, result.Deleted, 0, "expected 0 resource deleted, got %d", len(result.Deleted)) + } else { + assert.Len(t, result.Created, 1, "expected 1 resource created, got %d", len(result.Created)) + assert.Len(t, result.Updated, 2, "expected 2 resource updated, got %d", len(result.Updated)) + assert.Len(t, result.Deleted, 1, "expected 1 resource deleted, got %d", len(result.Deleted)) + } + + if tc.ExpectedError != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tc.ExpectedError) + } else { + require.NoError(t, err) + } actions := []string{} for _, action := range client.Actions { @@ -722,7 +765,7 @@ func TestWait(t *testing.T) { }), } var err error - c.Waiter, err = c.GetWaiter(LegacyStrategy) + c.Waiter, err = c.GetWaiterWithOptions(LegacyStrategy) if err != nil { t.Fatal(err) } @@ -783,7 +826,7 @@ func TestWaitJob(t *testing.T) { }), } var err error - c.Waiter, err = c.GetWaiter(LegacyStrategy) + c.Waiter, err = c.GetWaiterWithOptions(LegacyStrategy) if err != nil { t.Fatal(err) } @@ -845,7 +888,7 @@ func TestWaitDelete(t *testing.T) { }), } var err error - c.Waiter, err = c.GetWaiter(LegacyStrategy) + c.Waiter, err = c.GetWaiterWithOptions(LegacyStrategy) if err != nil { t.Fatal(err) } @@ -929,6 +972,7 @@ func TestGetPodList(t *testing.T) { podList, err := c.GetPodList(namespace, metav1.ListOptions{}) clientAssertions := assert.New(t) clientAssertions.NoError(err) + podList.ResourceVersion = "" clientAssertions.Equal(&responsePodList, podList) } @@ -1201,7 +1245,7 @@ func (c createPatchTestCase) run(t *testing.T) { } } -func newTestCustomResourceData(metadata map[string]string, spec map[string]interface{}) *unstructured.Unstructured { +func newTestCustomResourceData(metadata map[string]string, spec map[string]any) *unstructured.Unstructured { if metadata == nil { metadata = make(map[string]string) } @@ -1211,7 +1255,7 @@ func newTestCustomResourceData(metadata map[string]string, spec map[string]inter if _, ok := metadata["namespace"]; !ok { metadata["namespace"] = "default" } - o := map[string]interface{}{ + o := map[string]any{ "apiVersion": "crd.com/v1", "kind": "Data", "metadata": metadata, @@ -1234,7 +1278,7 @@ func TestCreatePatchCustomResourceMetadata(t *testing.T) { name: "take ownership of resource", target: target, original: target, - actual: newTestCustomResourceData(nil, map[string]interface{}{ + actual: newTestCustomResourceData(nil, map[string]any{ "color": "red", }), threeWayMergeForUnstructured: true, @@ -1250,7 +1294,7 @@ func TestCreatePatchCustomResourceMetadata(t *testing.T) { } func TestCreatePatchCustomResourceSpec(t *testing.T) { - target := newTestCustomResourceData(nil, map[string]interface{}{ + target := newTestCustomResourceData(nil, map[string]any{ "color": "red", "size": "large", }) @@ -1258,7 +1302,7 @@ func TestCreatePatchCustomResourceSpec(t *testing.T) { name: "merge with spec of existing custom resource", target: target, original: target, - actual: newTestCustomResourceData(nil, map[string]interface{}{ + actual: newTestCustomResourceData(nil, map[string]any{ "color": "red", "weight": "heavy", }), @@ -1390,7 +1434,7 @@ func TestIsIncompatibleServerError(t *testing.T) { Want: false, }, "Generic error": { - Err: fmt.Errorf("some generic error"), + Err: errors.New("some generic error"), Want: false, }, } @@ -1759,6 +1803,23 @@ func TestPatchResourceServerSide(t *testing.T) { }, ExpectedErrorContains: "the server reported a conflict", }, + "generic server-side apply error": { + Pods: newPodList("whale"), + DryRun: false, + ForceConflicts: false, + FieldValidationDirective: FieldValidationDirectiveStrict, + Callback: func(t *testing.T, _ testCase, _ []RequestResponseAction, _ *http.Request) (*http.Response, error) { + t.Helper() + + return newResponse(http.StatusBadRequest, &metav1.Status{ + Status: metav1.StatusFailure, + Message: `failed to create typed patch object: .spec.template.spec.containers[name="test"].env: duplicate entries for key [name="SERVER_CONTEXT_PATH"]`, + Reason: metav1.StatusReasonBadRequest, + Code: http.StatusBadRequest, + }) + }, + ExpectedErrorContains: "server-side apply failed for object default/whale /v1, Kind=Pod: failed to create typed patch object", + }, } for name, tc := range testCases { @@ -1852,7 +1913,7 @@ func TestClientWaitContextCancellationLegacy(t *testing.T) { } var err error - c.Waiter, err = c.GetWaiter(LegacyStrategy) + c.Waiter, err = c.GetWaiterWithOptions(LegacyStrategy) require.NoError(t, err) resources, err := c.Build(objBody(&podList), false) @@ -1907,7 +1968,7 @@ func TestClientWaitWithJobsContextCancellationLegacy(t *testing.T) { } var err error - c.Waiter, err = c.GetWaiter(LegacyStrategy) + c.Waiter, err = c.GetWaiterWithOptions(LegacyStrategy) require.NoError(t, err) resources, err := c.Build(objBody(job), false) @@ -1968,7 +2029,7 @@ func TestClientWaitForDeleteContextCancellationLegacy(t *testing.T) { } var err error - c.Waiter, err = c.GetWaiter(LegacyStrategy) + c.Waiter, err = c.GetWaiterWithOptions(LegacyStrategy) require.NoError(t, err) resources, err := c.Build(objBody(&pod), false) @@ -2030,7 +2091,7 @@ func TestClientWaitContextNilDoesNotPanic(t *testing.T) { } var err error - c.Waiter, err = c.GetWaiter(LegacyStrategy) + c.Waiter, err = c.GetWaiterWithOptions(LegacyStrategy) require.NoError(t, err) resources, err := c.Build(objBody(&podList), false) @@ -2080,7 +2141,7 @@ func TestClientWaitContextPreCancelledLegacy(t *testing.T) { } var err error - c.Waiter, err = c.GetWaiter(LegacyStrategy) + c.Waiter, err = c.GetWaiterWithOptions(LegacyStrategy) require.NoError(t, err) resources, err := c.Build(objBody(&podList), false) @@ -2111,7 +2172,7 @@ metadata: namespace: default ` var err error - c.Waiter, err = c.GetWaiter(StatusWatcherStrategy) + c.Waiter, err = c.GetWaiterWithOptions(StatusWatcherStrategy) require.NoError(t, err) resources, err := c.Build(strings.NewReader(podManifest), false) @@ -2138,7 +2199,7 @@ metadata: namespace: default ` var err error - c.Waiter, err = c.GetWaiter(StatusWatcherStrategy) + c.Waiter, err = c.GetWaiterWithOptions(StatusWatcherStrategy) require.NoError(t, err) resources, err := c.Build(strings.NewReader(jobManifest), false) @@ -2170,7 +2231,7 @@ status: phase: Running ` var err error - c.Waiter, err = c.GetWaiter(StatusWatcherStrategy) + c.Waiter, err = c.GetWaiterWithOptions(StatusWatcherStrategy) require.NoError(t, err) resources, err := c.Build(strings.NewReader(podManifest), false) @@ -2182,3 +2243,135 @@ status: require.Error(t, err) assert.Contains(t, err.Error(), "context canceled", "expected context canceled error, got: %v", err) } + +// testStatusReader is a custom status reader for testing that returns a configurable status. +type testStatusReader struct { + supportedGK schema.GroupKind + status status.Status +} + +func (r *testStatusReader) Supports(gk schema.GroupKind) bool { + return gk == r.supportedGK +} + +func (r *testStatusReader) ReadStatus(_ context.Context, _ engine.ClusterReader, id object.ObjMetadata) (*event.ResourceStatus, error) { + return &event.ResourceStatus{ + Identifier: id, + Status: r.status, + Message: "test status reader", + }, nil +} + +func (r *testStatusReader) ReadStatusForObject(_ context.Context, _ engine.ClusterReader, u *unstructured.Unstructured) (*event.ResourceStatus, error) { + id := object.ObjMetadata{ + Namespace: u.GetNamespace(), + Name: u.GetName(), + GroupKind: u.GroupVersionKind().GroupKind(), + } + return &event.ResourceStatus{ + Identifier: id, + Status: r.status, + Message: "test status reader", + }, nil +} + +func TestClientStatusReadersPassedToStatusWaiter(t *testing.T) { + // This test verifies that Client.StatusReaders is correctly passed through + // to the statusWaiter when using the StatusWatcherStrategy. + // We use a custom status reader that immediately returns CurrentStatus for pods, + // which allows a pod without Ready condition to pass the wait. + podManifest := ` +apiVersion: v1 +kind: Pod +metadata: + name: test-pod + namespace: default +` + + c := newTestClient(t) + statusReaders := []engine.StatusReader{ + &testStatusReader{ + supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), + status: status.CurrentStatus, + }, + } + + // Create a fake dynamic client with the pod resource + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper(v1.SchemeGroupVersion.WithKind("Pod")) + + // Create the pod in the fake client + createManifest(t, podManifest, fakeMapper, fakeClient) + + // Set up the waiter with the fake client and custom status readers + c.Waiter = &statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + readers: statusReaders, + } + + resources, err := c.Build(strings.NewReader(podManifest), false) + require.NoError(t, err) + + // The pod has no Ready condition, but our custom reader returns CurrentStatus, + // so the wait should succeed immediately without timeout. + err = c.Wait(resources, time.Second*3) + require.NoError(t, err) +} + +func TestClientStatusReadersWithWaitWithJobs(t *testing.T) { + // This test verifies that Client.StatusReaders is correctly passed through + // to the statusWaiter when using WaitWithJobs. + jobManifest := ` +apiVersion: batch/v1 +kind: Job +metadata: + name: test-job + namespace: default +` + + c := newTestClient(t) + statusReaders := []engine.StatusReader{ + &testStatusReader{ + supportedGK: schema.GroupKind{Group: "batch", Kind: "Job"}, + status: status.CurrentStatus, + }, + } + + // Create a fake dynamic client with the job resource + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper(batchv1.SchemeGroupVersion.WithKind("Job")) + + // Create the job in the fake client + createManifest(t, jobManifest, fakeMapper, fakeClient) + + // Set up the waiter with the fake client and custom status readers + c.Waiter = &statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + readers: statusReaders, + } + + resources, err := c.Build(strings.NewReader(jobManifest), false) + require.NoError(t, err) + + // The job has no Complete condition, but our custom reader returns CurrentStatus, + // so the wait should succeed immediately without timeout. + err = c.WaitWithJobs(resources, time.Second*3) + require.NoError(t, err) +} + +func createManifest(t *testing.T, manifest string, + fakeMapper meta.RESTMapper, fakeClient *dynamicfake.FakeDynamicClient) { + t.Helper() + + m := make(map[string]any) + err := yaml.Unmarshal([]byte(manifest), &m) + require.NoError(t, err) + obj := &unstructured.Unstructured{Object: m} + gvk := obj.GroupVersionKind() + mapping, err := fakeMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + require.NoError(t, err) + err = fakeClient.Tracker().Create(mapping.Resource, obj, obj.GetNamespace()) + require.NoError(t, err) +} diff --git a/pkg/kube/converter.go b/pkg/kube/converter.go index ac6d95fb4..7300a708c 100644 --- a/pkg/kube/converter.go +++ b/pkg/kube/converter.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kube // import "helm.sh/helm/v4/pkg/kube" +package kube import ( "sync" diff --git a/pkg/kube/factory.go b/pkg/kube/factory.go index 1d237c307..4e12e0889 100644 --- a/pkg/kube/factory.go +++ b/pkg/kube/factory.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kube // import "helm.sh/helm/v4/pkg/kube" +package kube import ( "k8s.io/cli-runtime/pkg/resource" diff --git a/pkg/kube/fake/failing_kube_client.go b/pkg/kube/fake/failing_kube_client.go index 31d0082cc..0f7787f79 100644 --- a/pkg/kube/fake/failing_kube_client.go +++ b/pkg/kube/fake/failing_kube_client.go @@ -47,6 +47,8 @@ type FailingKubeClient struct { WaitForDeleteError error WatchUntilReadyError error WaitDuration time.Duration + // RecordedWaitOptions stores the WaitOptions passed to GetWaiter for testing + RecordedWaitOptions []kube.WaitOption } var _ kube.Interface = &FailingKubeClient{} @@ -153,7 +155,13 @@ func (f *FailingKubeClient) BuildTable(r io.Reader, _ bool) (kube.ResourceList, } func (f *FailingKubeClient) GetWaiter(ws kube.WaitStrategy) (kube.Waiter, error) { - waiter, _ := f.PrintingKubeClient.GetWaiter(ws) + return f.GetWaiterWithOptions(ws) +} + +func (f *FailingKubeClient) GetWaiterWithOptions(ws kube.WaitStrategy, opts ...kube.WaitOption) (kube.Waiter, error) { + // Record the WaitOptions for testing + f.RecordedWaitOptions = append(f.RecordedWaitOptions, opts...) + waiter, _ := f.PrintingKubeClient.GetWaiterWithOptions(ws, opts...) printingKubeWaiter, _ := waiter.(*PrintingKubeWaiter) return &FailingKubeWaiter{ PrintingKubeWaiter: printingKubeWaiter, diff --git a/pkg/kube/fake/printer.go b/pkg/kube/fake/printer.go index a7aad1dac..40897f0a7 100644 --- a/pkg/kube/fake/printer.go +++ b/pkg/kube/fake/printer.go @@ -17,7 +17,6 @@ limitations under the License. package fake import ( - "fmt" "io" "strings" "time" @@ -133,7 +132,7 @@ func (p *PrintingKubeClient) GetPodList(_ string, _ metav1.ListOptions) (*v1.Pod // OutputContainerLogsForPodList implements KubeClient OutputContainerLogsForPodList. func (p *PrintingKubeClient) OutputContainerLogsForPodList(_ *v1.PodList, someNamespace string, _ func(namespace, pod, container string) io.Writer) error { - _, err := io.Copy(p.LogOutput, strings.NewReader(fmt.Sprintf("attempted to output logs for namespace: %s", someNamespace))) + _, err := io.Copy(p.LogOutput, strings.NewReader("attempted to output logs for namespace: "+someNamespace)) return err } @@ -148,7 +147,11 @@ func (p *PrintingKubeClient) DeleteWithPropagationPolicy(resources kube.Resource return &kube.Result{Deleted: resources}, nil } -func (p *PrintingKubeClient) GetWaiter(_ kube.WaitStrategy) (kube.Waiter, error) { +func (p *PrintingKubeClient) GetWaiter(ws kube.WaitStrategy) (kube.Waiter, error) { + return p.GetWaiterWithOptions(ws) +} + +func (p *PrintingKubeClient) GetWaiterWithOptions(_ kube.WaitStrategy, _ ...kube.WaitOption) (kube.Waiter, error) { return &PrintingKubeWaiter{Out: p.Out, LogOutput: p.LogOutput}, nil } diff --git a/pkg/kube/interface.go b/pkg/kube/interface.go index cc934ae1e..63c784751 100644 --- a/pkg/kube/interface.go +++ b/pkg/kube/interface.go @@ -56,7 +56,7 @@ type Interface interface { // IsReachable checks whether the client is able to connect to the cluster. IsReachable() error - // Get Waiter gets the Kube.Waiter + // GetWaiter gets the Kube.Waiter. GetWaiter(ws WaitStrategy) (Waiter, error) // GetPodList lists all pods that match the specified listOptions @@ -99,3 +99,14 @@ type Waiter interface { // error. WatchUntilReady(resources ResourceList, timeout time.Duration) error } + +// InterfaceWaitOptions defines an interface that extends Interface with +// methods that accept wait options. +// +// TODO Helm 5: Remove InterfaceWaitOptions and integrate its method(s) into the Interface. +type InterfaceWaitOptions interface { + // GetWaiter gets the Kube.Waiter with options. + GetWaiterWithOptions(ws WaitStrategy, opts ...WaitOption) (Waiter, error) +} + +var _ InterfaceWaitOptions = (*Client)(nil) diff --git a/pkg/kube/options.go b/pkg/kube/options.go new file mode 100644 index 000000000..3326c284b --- /dev/null +++ b/pkg/kube/options.go @@ -0,0 +1,82 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube + +import ( + "context" + + "github.com/fluxcd/cli-utils/pkg/kstatus/polling/engine" +) + +// WaitOption is a function that configures an option for waiting on resources. +type WaitOption func(*waitOptions) + +// WithWaitContext sets the context for waiting on resources. +// If unset, context.Background() will be used. +func WithWaitContext(ctx context.Context) WaitOption { + return func(wo *waitOptions) { + wo.ctx = ctx + } +} + +// WithWatchUntilReadyMethodContext sets the context specifically for the WatchUntilReady method. +// If unset, the context set by `WithWaitContext` will be used (falling back to `context.Background()`). +func WithWatchUntilReadyMethodContext(ctx context.Context) WaitOption { + return func(wo *waitOptions) { + wo.watchUntilReadyCtx = ctx + } +} + +// WithWaitMethodContext sets the context specifically for the Wait method. +// If unset, the context set by `WithWaitContext` will be used (falling back to `context.Background()`). +func WithWaitMethodContext(ctx context.Context) WaitOption { + return func(wo *waitOptions) { + wo.waitCtx = ctx + } +} + +// WithWaitWithJobsMethodContext sets the context specifically for the WaitWithJobs method. +// If unset, the context set by `WithWaitContext` will be used (falling back to `context.Background()`). +func WithWaitWithJobsMethodContext(ctx context.Context) WaitOption { + return func(wo *waitOptions) { + wo.waitWithJobsCtx = ctx + } +} + +// WithWaitForDeleteMethodContext sets the context specifically for the WaitForDelete method. +// If unset, the context set by `WithWaitContext` will be used (falling back to `context.Background()`). +func WithWaitForDeleteMethodContext(ctx context.Context) WaitOption { + return func(wo *waitOptions) { + wo.waitForDeleteCtx = ctx + } +} + +// WithKStatusReaders sets the status readers to be used while waiting on resources. +func WithKStatusReaders(readers ...engine.StatusReader) WaitOption { + return func(wo *waitOptions) { + wo.statusReaders = readers + } +} + +type waitOptions struct { + ctx context.Context + watchUntilReadyCtx context.Context + waitCtx context.Context + waitWithJobsCtx context.Context + waitForDeleteCtx context.Context + statusReaders []engine.StatusReader +} diff --git a/pkg/kube/ready.go b/pkg/kube/ready.go index bfa98504c..a1a3d4a9a 100644 --- a/pkg/kube/ready.go +++ b/pkg/kube/ready.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kube // import "helm.sh/helm/v4/pkg/kube" +package kube import ( "context" diff --git a/pkg/kube/ready_test.go b/pkg/kube/ready_test.go index db0d02cbe..3b80e95d2 100644 --- a/pkg/kube/ready_test.go +++ b/pkg/kube/ready_test.go @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kube // import "helm.sh/helm/v4/pkg/kube" +package kube import ( "context" diff --git a/pkg/kube/resource.go b/pkg/kube/resource.go index d88b171f0..bc0fff9a2 100644 --- a/pkg/kube/resource.go +++ b/pkg/kube/resource.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kube // import "helm.sh/helm/v4/pkg/kube" +package kube import "k8s.io/cli-runtime/pkg/resource" @@ -79,7 +79,14 @@ func (r ResourceList) Intersect(rs ResourceList) ResourceList { return r.Filter(rs.Contains) } -// isMatchingInfo returns true if infos match on Name and GroupVersionKind. +// isMatchingInfo returns true if infos match on Name, Namespace, Group and Kind. +// +// IMPORTANT: Version is intentionally excluded from the comparison. Resources +// served by the same CRD at different API versions (e.g. v2beta1 vs v2beta2) +// share the same underlying storage in the Kubernetes API server. Comparing +// the full GroupVersionKind causes Difference() to treat a version change as +// a resource removal + addition, which makes Helm delete the resource it just +// created during upgrades. See https://github.com/helm/helm/issues/31768 func isMatchingInfo(a, b *resource.Info) bool { - return a.Name == b.Name && a.Namespace == b.Namespace && a.Mapping.GroupVersionKind == b.Mapping.GroupVersionKind + return a.Name == b.Name && a.Namespace == b.Namespace && a.Mapping.GroupVersionKind.GroupKind() == b.Mapping.GroupVersionKind.GroupKind() } diff --git a/pkg/kube/resource_policy.go b/pkg/kube/resource_policy.go index fb1089785..3a271877c 100644 --- a/pkg/kube/resource_policy.go +++ b/pkg/kube/resource_policy.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kube // import "helm.sh/helm/v4/pkg/kube" +package kube // ResourcePolicyAnno is the annotation name for a resource policy const ResourcePolicyAnno = "helm.sh/resource-policy" diff --git a/pkg/kube/resource_test.go b/pkg/kube/resource_test.go index ccc613c1b..283b44945 100644 --- a/pkg/kube/resource_test.go +++ b/pkg/kube/resource_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kube // import "helm.sh/helm/v4/pkg/kube" +package kube import ( "testing" @@ -72,8 +72,8 @@ func TestIsMatchingInfo(t *testing.T) { gvkDiffVersion := schema.GroupVersionKind{Group: "group1", Version: "diff", Kind: "pod"} resourceInfoDiffVersion := resource.Info{Name: "name1", Namespace: "namespace1", Mapping: &meta.RESTMapping{GroupVersionKind: gvkDiffVersion}} - if isMatchingInfo(&resourceInfo, &resourceInfoDiffVersion) { - t.Error("expected resources not equal") + if !isMatchingInfo(&resourceInfo, &resourceInfoDiffVersion) { + t.Error("expected resources with different versions but same group and kind to be equal") } gvkDiffKind := schema.GroupVersionKind{Group: "group1", Version: "version1", Kind: "deployment"} diff --git a/pkg/kube/roundtripper.go b/pkg/kube/roundtripper.go index 52cb5bad2..e13f2103a 100644 --- a/pkg/kube/roundtripper.go +++ b/pkg/kube/roundtripper.go @@ -16,65 +16,9 @@ limitations under the License. package kube -import ( - "bytes" - "encoding/json" - "io" - "net/http" - "strings" -) +import "helm.sh/helm/v4/pkg/kubeenv" -type RetryingRoundTripper struct { - Wrapped http.RoundTripper -} - -func (rt *RetryingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - return rt.roundTrip(req, 1, nil) -} - -func (rt *RetryingRoundTripper) roundTrip(req *http.Request, retry int, prevResp *http.Response) (*http.Response, error) { - if retry < 0 { - return prevResp, nil - } - resp, rtErr := rt.Wrapped.RoundTrip(req) - if rtErr != nil { - return resp, rtErr - } - if resp.StatusCode < 500 { - return resp, rtErr - } - if resp.Header.Get("content-type") != "application/json" { - return resp, rtErr - } - b, err := io.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - return resp, err - } - - var ke kubernetesError - r := bytes.NewReader(b) - err = json.NewDecoder(r).Decode(&ke) - r.Seek(0, io.SeekStart) - resp.Body = io.NopCloser(r) - if err != nil { - return resp, err - } - if ke.Code < 500 { - return resp, nil - } - // Matches messages like "etcdserver: leader changed" - if strings.HasSuffix(ke.Message, "etcdserver: leader changed") { - return rt.roundTrip(req, retry-1, resp) - } - // Matches messages like "rpc error: code = Unknown desc = raft proposal dropped" - if strings.HasSuffix(ke.Message, "raft proposal dropped") { - return rt.roundTrip(req, retry-1, resp) - } - return resp, nil -} - -type kubernetesError struct { - Message string `json:"message"` - Code int `json:"code"` -} +// RetryingRoundTripper retries transient Kubernetes API server errors on a +// wrapped [http.RoundTripper]. The implementation lives in [kubeenv] so +// consumers can depend on that package without importing all of kube. +type RetryingRoundTripper = kubeenv.RetryingRoundTripper diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index a518f0c04..59c1218ff 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kube // import "helm.sh/helm/v3/pkg/kube" +package kube import ( "context" @@ -38,13 +38,20 @@ import ( "k8s.io/client-go/dynamic" watchtools "k8s.io/client-go/tools/watch" + "helm.sh/helm/v4/internal/logging" helmStatusReaders "helm.sh/helm/v4/internal/statusreaders" ) type statusWaiter struct { - client dynamic.Interface - restMapper meta.RESTMapper - ctx context.Context + client dynamic.Interface + restMapper meta.RESTMapper + ctx context.Context + watchUntilReadyCtx context.Context + waitCtx context.Context + waitWithJobsCtx context.Context + waitForDeleteCtx context.Context + readers []engine.StatusReader + logging.LogHolder } // DefaultStatusWatcherTimeout is the timeout used by the status waiter when a @@ -65,21 +72,19 @@ func (w *statusWaiter) WatchUntilReady(resourceList ResourceList, timeout time.D if timeout == 0 { timeout = DefaultStatusWatcherTimeout } - ctx, cancel := w.contextWithTimeout(timeout) + ctx, cancel := w.contextWithTimeout(w.watchUntilReadyCtx, timeout) defer cancel() - slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) + w.Logger().Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) jobSR := helmStatusReaders.NewCustomJobStatusReader(w.restMapper) podSR := helmStatusReaders.NewCustomPodStatusReader(w.restMapper) - // We don't want to wait on any other resources as watchUntilReady is only for Helm hooks + // We don't want to wait on any other resources as watchUntilReady is only for Helm hooks. + // If custom readers are defined they can be used as Helm hooks support any resource. + // We put them in front since the DelegatingStatusReader uses the first reader that matches. genericSR := statusreaders.NewGenericStatusReader(w.restMapper, alwaysReady) sr := &statusreaders.DelegatingStatusReader{ - StatusReaders: []engine.StatusReader{ - jobSR, - podSR, - genericSR, - }, + StatusReaders: append(w.readers, jobSR, podSR, genericSR), } sw.StatusReader = sr return w.wait(ctx, resourceList, sw) @@ -89,10 +94,11 @@ func (w *statusWaiter) Wait(resourceList ResourceList, timeout time.Duration) er if timeout == 0 { timeout = DefaultStatusWatcherTimeout } - ctx, cancel := w.contextWithTimeout(timeout) + ctx, cancel := w.contextWithTimeout(w.waitCtx, timeout) defer cancel() - slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) + w.Logger().Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) + sw.StatusReader = statusreaders.NewStatusReader(w.restMapper, w.readers...) return w.wait(ctx, resourceList, sw) } @@ -100,12 +106,14 @@ func (w *statusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Dura if timeout == 0 { timeout = DefaultStatusWatcherTimeout } - ctx, cancel := w.contextWithTimeout(timeout) + ctx, cancel := w.contextWithTimeout(w.waitWithJobsCtx, timeout) defer cancel() - slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) + w.Logger().Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) newCustomJobStatusReader := helmStatusReaders.NewCustomJobStatusReader(w.restMapper) - customSR := statusreaders.NewStatusReader(w.restMapper, newCustomJobStatusReader) + readers := append([]engine.StatusReader(nil), w.readers...) + readers = append(readers, newCustomJobStatusReader) + customSR := statusreaders.NewStatusReader(w.restMapper, readers...) sw.StatusReader = customSR return w.wait(ctx, resourceList, sw) } @@ -114,9 +122,9 @@ func (w *statusWaiter) WaitForDelete(resourceList ResourceList, timeout time.Dur if timeout == 0 { timeout = DefaultStatusWatcherTimeout } - ctx, cancel := w.contextWithTimeout(timeout) + ctx, cancel := w.contextWithTimeout(w.waitForDeleteCtx, timeout) defer cancel() - slog.Debug("waiting for resources to be deleted", "count", len(resourceList), "timeout", timeout) + w.Logger().Debug("waiting for resources to be deleted", "count", len(resourceList), "timeout", timeout) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) return w.waitForDelete(ctx, resourceList, sw) } @@ -136,24 +144,26 @@ func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceL RESTScopeStrategy: watcher.RESTScopeNamespace, }) statusCollector := collector.NewResourceStatusCollector(resources) - done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.NotFoundStatus)) + done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.NotFoundStatus, w.Logger())) <-done if statusCollector.Error != nil { return statusCollector.Error } - // Only check parent context error, otherwise we would error when desired status is achieved. - if ctx.Err() != nil { - errs := []error{} - for _, id := range resources { - rs := statusCollector.ResourceStatuses[id] - if rs.Status == status.NotFoundStatus { - continue - } - errs = append(errs, fmt.Errorf("resource still exists, name: %s, kind: %s, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) + errs := []error{} + for _, id := range resources { + rs := statusCollector.ResourceStatuses[id] + if rs.Status == status.NotFoundStatus || rs.Status == status.UnknownStatus { + continue } - errs = append(errs, ctx.Err()) + errs = append(errs, fmt.Errorf("resource %s/%s/%s still exists. status: %s, message: %s", + rs.Identifier.GroupKind.Kind, rs.Identifier.Namespace, rs.Identifier.Name, rs.Status, rs.Message)) + } + if err := ctx.Err(); err != nil { + errs = append(errs, err) + } + if len(errs) > 0 { return errors.Join(errs...) } return nil @@ -181,31 +191,36 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, sw w RESTScopeStrategy: watcher.RESTScopeNamespace, }) statusCollector := collector.NewResourceStatusCollector(resources) - done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.CurrentStatus)) + done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.CurrentStatus, w.Logger())) <-done if statusCollector.Error != nil { return statusCollector.Error } - // Only check parent context error, otherwise we would error when desired status is achieved. - if ctx.Err() != nil { - errs := []error{} - for _, id := range resources { - rs := statusCollector.ResourceStatuses[id] - if rs.Status == status.CurrentStatus { - continue - } - errs = append(errs, fmt.Errorf("resource not ready, name: %s, kind: %s, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) + errs := []error{} + for _, id := range resources { + rs := statusCollector.ResourceStatuses[id] + if rs.Status == status.CurrentStatus { + continue } - errs = append(errs, ctx.Err()) + errs = append(errs, fmt.Errorf("resource %s/%s/%s not ready. status: %s, message: %s", + rs.Identifier.GroupKind.Kind, rs.Identifier.Namespace, rs.Identifier.Name, rs.Status, rs.Message)) + } + if err := ctx.Err(); err != nil { + errs = append(errs, err) + } + if len(errs) > 0 { return errors.Join(errs...) } return nil } -func (w *statusWaiter) contextWithTimeout(timeout time.Duration) (context.Context, context.CancelFunc) { - return contextWithTimeout(w.ctx, timeout) +func (w *statusWaiter) contextWithTimeout(methodCtx context.Context, timeout time.Duration) (context.Context, context.CancelFunc) { + if methodCtx == nil { + methodCtx = w.ctx + } + return contextWithTimeout(methodCtx, timeout) } func contextWithTimeout(ctx context.Context, timeout time.Duration) (context.Context, context.CancelFunc) { @@ -215,7 +230,7 @@ func contextWithTimeout(ctx context.Context, timeout time.Duration) (context.Con return watchtools.ContextWithOptionalTimeout(ctx, timeout) } -func statusObserver(cancel context.CancelFunc, desired status.Status) collector.ObserverFunc { +func statusObserver(cancel context.CancelFunc, desired status.Status, logger *slog.Logger) collector.ObserverFunc { return func(statusCollector *collector.ResourceStatusCollector, _ event.Event) { var rss []*event.ResourceStatus var nonDesiredResources []*event.ResourceStatus @@ -223,11 +238,16 @@ func statusObserver(cancel context.CancelFunc, desired status.Status) collector. if rs == nil { continue } - // If a resource is already deleted before waiting has started, it will show as unknown - // this check ensures we don't wait forever for a resource that is already deleted + // If a resource is already deleted before waiting has started, it will show as unknown. + // This check ensures we don't wait forever for a resource that is already deleted. if rs.Status == status.UnknownStatus && desired == status.NotFoundStatus { continue } + // Failed is a terminal state. This check ensures we don't wait forever for a resource + // that has already failed, as intervention is required to resolve the failure. + if rs.Status == status.FailedStatus && desired == status.CurrentStatus { + continue + } rss = append(rss, rs) if rs.Status != desired { nonDesiredResources = append(nonDesiredResources, rs) @@ -235,7 +255,7 @@ func statusObserver(cancel context.CancelFunc, desired status.Status) collector. } if aggregator.AggregateStatus(rss, desired) == desired { - slog.Debug("all resources achieved desired status", "desiredStatus", desired, "resourceCount", len(rss)) + logger.Debug("all resources achieved desired status", "desiredStatus", desired, "resourceCount", len(rss)) cancel() return } @@ -246,7 +266,7 @@ func statusObserver(cancel context.CancelFunc, desired status.Status) collector. return nonDesiredResources[i].Identifier.Name < nonDesiredResources[j].Identifier.Name }) first := nonDesiredResources[0] - slog.Debug("waiting for resource", "namespace", first.Identifier.Namespace, "name", first.Identifier.Name, "kind", first.Identifier.GroupKind.Kind, "expectedStatus", desired, "actualStatus", first.Status) + logger.Debug("waiting for resource", "namespace", first.Identifier.Namespace, "name", first.Identifier.Name, "kind", first.Identifier.GroupKind.Kind, "expectedStatus", desired, "actualStatus", first.Status) } } } diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index a8ff4e0e6..73a424720 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -14,15 +14,22 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kube // import "helm.sh/helm/v3/pkg/kube" +package kube import ( + "context" "errors" "fmt" + "log/slog" "strings" + "sync/atomic" "testing" "time" + "github.com/fluxcd/cli-utils/pkg/kstatus/polling/engine" + "github.com/fluxcd/cli-utils/pkg/kstatus/polling/event" + "github.com/fluxcd/cli-utils/pkg/kstatus/status" + "github.com/fluxcd/cli-utils/pkg/object" "github.com/fluxcd/cli-utils/pkg/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -95,10 +102,27 @@ status: succeeded: 1 active: 0 conditions: - - type: Complete + - type: Complete status: "True" ` +var jobFailedManifest = ` +apiVersion: batch/v1 +kind: Job +metadata: + name: failed-job + namespace: default + generation: 1 +status: + failed: 1 + active: 0 + conditions: + - type: Failed + status: "True" + reason: BackoffLimitExceeded + message: "Job has reached the specified backoff limit" +` + var podCompleteManifest = ` apiVersion: v1 kind: Pod @@ -247,7 +271,7 @@ func getRuntimeObjFromManifests(t *testing.T, manifests []string) []runtime.Obje t.Helper() objects := []runtime.Object{} for _, manifest := range manifests { - m := make(map[string]interface{}) + m := make(map[string]any) err := yaml.Unmarshal([]byte(manifest), &m) assert.NoError(t, err) resource := &unstructured.Unstructured{Object: m} @@ -273,7 +297,7 @@ func TestStatusWaitForDelete(t *testing.T) { name string manifestsToCreate []string manifestsToDelete []string - expectErrs []error + expectErrs []string }{ { name: "wait for pod to be deleted", @@ -285,7 +309,7 @@ func TestStatusWaitForDelete(t *testing.T) { name: "error when not all objects are deleted", manifestsToCreate: []string{jobCompleteManifest, podCurrentManifest}, manifestsToDelete: []string{jobCompleteManifest}, - expectErrs: []error{errors.New("resource still exists, name: current-pod, kind: Pod, status: Current"), errors.New("context deadline exceeded")}, + expectErrs: []string{"resource Pod/ns/current-pod still exists. status: Current", "context deadline exceeded"}, }, } for _, tt := range tests { @@ -303,6 +327,7 @@ func TestStatusWaitForDelete(t *testing.T) { restMapper: fakeMapper, client: fakeClient, } + statusWaiter.SetLogger(slog.Default().Handler()) objsToCreate := getRuntimeObjFromManifests(t, tt.manifestsToCreate) for _, objToCreate := range objsToCreate { u := objToCreate.(*unstructured.Unstructured) @@ -323,7 +348,10 @@ func TestStatusWaitForDelete(t *testing.T) { resourceList := getResourceListFromRuntimeObjs(t, c, objsToCreate) err := statusWaiter.WaitForDelete(resourceList, timeout) if tt.expectErrs != nil { - assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + require.Error(t, err) + for _, expectedErrStr := range tt.expectErrs { + assert.Contains(t, err.Error(), expectedErrStr) + } return } assert.NoError(t, err) @@ -343,6 +371,7 @@ func TestStatusWaitForDeleteNonExistentObject(t *testing.T) { restMapper: fakeMapper, client: fakeClient, } + statusWaiter.SetLogger(slog.Default().Handler()) // Don't create the object to test that the wait for delete works when the object doesn't exist objManifest := getRuntimeObjFromManifests(t, []string{podCurrentManifest}) resourceList := getResourceListFromRuntimeObjs(t, c, objManifest) @@ -353,37 +382,35 @@ func TestStatusWaitForDeleteNonExistentObject(t *testing.T) { func TestStatusWait(t *testing.T) { t.Parallel() tests := []struct { - name string - objManifests []string - expectErrs []error - waitForJobs bool + name string + objManifests []string + expectErrStrs []string + waitForJobs bool }{ { - name: "Job is not complete", - objManifests: []string{jobNoStatusManifest}, - expectErrs: []error{errors.New("resource not ready, name: test, kind: Job, status: InProgress"), errors.New("context deadline exceeded")}, - waitForJobs: true, + name: "Job is not complete", + objManifests: []string{jobNoStatusManifest}, + expectErrStrs: []string{"resource Job/qual/test not ready. status: InProgress", "context deadline exceeded"}, + waitForJobs: true, }, { - name: "Job is ready but not complete", - objManifests: []string{jobReadyManifest}, - expectErrs: nil, - waitForJobs: false, + name: "Job is ready but not complete", + objManifests: []string{jobReadyManifest}, + expectErrStrs: nil, + waitForJobs: false, }, { name: "Pod is ready", objManifests: []string{podCurrentManifest}, - expectErrs: nil, }, { - name: "one of the pods never becomes ready", - objManifests: []string{podNoStatusManifest, podCurrentManifest}, - expectErrs: []error{errors.New("resource not ready, name: in-progress-pod, kind: Pod, status: InProgress"), errors.New("context deadline exceeded")}, + name: "one of the pods never becomes ready", + objManifests: []string{podNoStatusManifest, podCurrentManifest}, + expectErrStrs: []string{"resource Pod/ns/in-progress-pod not ready. status: InProgress", "context deadline exceeded"}, }, { name: "paused deployment passes", objManifests: []string{pausedDeploymentManifest}, - expectErrs: nil, }, } @@ -401,6 +428,7 @@ func TestStatusWait(t *testing.T) { client: fakeClient, restMapper: fakeMapper, } + statusWaiter.SetLogger(slog.Default().Handler()) objs := getRuntimeObjFromManifests(t, tt.objManifests) for _, obj := range objs { u := obj.(*unstructured.Unstructured) @@ -410,8 +438,11 @@ func TestStatusWait(t *testing.T) { } resourceList := getResourceListFromRuntimeObjs(t, c, objs) err := statusWaiter.Wait(resourceList, time.Second*3) - if tt.expectErrs != nil { - assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + if tt.expectErrStrs != nil { + require.Error(t, err) + for _, expectedErrStr := range tt.expectErrStrs { + assert.Contains(t, err.Error(), expectedErrStr) + } return } assert.NoError(t, err) @@ -422,23 +453,23 @@ func TestStatusWait(t *testing.T) { func TestWaitForJobComplete(t *testing.T) { t.Parallel() tests := []struct { - name string - objManifests []string - expectErrs []error + name string + objManifests []string + expectErrStrs []string }{ { name: "Job is complete", objManifests: []string{jobCompleteManifest}, }, { - name: "Job is not ready", - objManifests: []string{jobNoStatusManifest}, - expectErrs: []error{errors.New("resource not ready, name: test, kind: Job, status: InProgress"), errors.New("context deadline exceeded")}, + name: "Job is not ready", + objManifests: []string{jobNoStatusManifest}, + expectErrStrs: []string{"resource Job/qual/test not ready. status: InProgress", "context deadline exceeded"}, }, { - name: "Job is ready but not complete", - objManifests: []string{jobReadyManifest}, - expectErrs: []error{errors.New("resource not ready, name: ready-not-complete, kind: Job, status: InProgress"), errors.New("context deadline exceeded")}, + name: "Job is ready but not complete", + objManifests: []string{jobReadyManifest}, + expectErrStrs: []string{"resource Job/default/ready-not-complete not ready. status: InProgress", "context deadline exceeded"}, }, } @@ -454,6 +485,7 @@ func TestWaitForJobComplete(t *testing.T) { client: fakeClient, restMapper: fakeMapper, } + statusWaiter.SetLogger(slog.Default().Handler()) objs := getRuntimeObjFromManifests(t, tt.objManifests) for _, obj := range objs { u := obj.(*unstructured.Unstructured) @@ -463,8 +495,11 @@ func TestWaitForJobComplete(t *testing.T) { } resourceList := getResourceListFromRuntimeObjs(t, c, objs) err := statusWaiter.WaitWithJobs(resourceList, time.Second*3) - if tt.expectErrs != nil { - assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + if tt.expectErrStrs != nil { + require.Error(t, err) + for _, expectedErrStr := range tt.expectErrStrs { + assert.Contains(t, err.Error(), expectedErrStr) + } return } assert.NoError(t, err) @@ -475,9 +510,9 @@ func TestWaitForJobComplete(t *testing.T) { func TestWatchForReady(t *testing.T) { t.Parallel() tests := []struct { - name string - objManifests []string - expectErrs []error + name string + objManifests []string + expectErrStrs []string }{ { name: "succeeds if pod and job are complete", @@ -488,14 +523,14 @@ func TestWatchForReady(t *testing.T) { objManifests: []string{notReadyDeploymentManifest}, }, { - name: "Fails if job is not complete", - objManifests: []string{jobReadyManifest}, - expectErrs: []error{errors.New("resource not ready, name: ready-not-complete, kind: Job, status: InProgress"), errors.New("context deadline exceeded")}, + name: "Fails if job is not complete", + objManifests: []string{jobReadyManifest}, + expectErrStrs: []string{"resource Job/default/ready-not-complete not ready. status: InProgress", "context deadline exceeded"}, }, { - name: "Fails if pod is not complete", - objManifests: []string{podCurrentManifest}, - expectErrs: []error{errors.New("resource not ready, name: current-pod, kind: Pod, status: InProgress"), errors.New("context deadline exceeded")}, + name: "Fails if pod is not complete", + objManifests: []string{podCurrentManifest}, + expectErrStrs: []string{"resource Pod/ns/current-pod not ready. status: InProgress", "context deadline exceeded"}, }, } @@ -513,6 +548,7 @@ func TestWatchForReady(t *testing.T) { client: fakeClient, restMapper: fakeMapper, } + statusWaiter.SetLogger(slog.Default().Handler()) objs := getRuntimeObjFromManifests(t, tt.objManifests) for _, obj := range objs { u := obj.(*unstructured.Unstructured) @@ -522,8 +558,11 @@ func TestWatchForReady(t *testing.T) { } resourceList := getResourceListFromRuntimeObjs(t, c, objs) err := statusWaiter.WatchUntilReady(resourceList, time.Second*3) - if tt.expectErrs != nil { - assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + if tt.expectErrStrs != nil { + require.Error(t, err) + for _, expectedErrStr := range tt.expectErrStrs { + assert.Contains(t, err.Error(), expectedErrStr) + } return } assert.NoError(t, err) @@ -534,65 +573,65 @@ func TestWatchForReady(t *testing.T) { func TestStatusWaitMultipleNamespaces(t *testing.T) { t.Parallel() tests := []struct { - name string - objManifests []string - expectErrs []error - testFunc func(statusWaiter, ResourceList, time.Duration) error + name string + objManifests []string + expectErrStrs []string + testFunc func(*statusWaiter, ResourceList, time.Duration) error }{ { name: "pods in multiple namespaces", objManifests: []string{podNamespace1Manifest, podNamespace2Manifest}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.Wait(rl, timeout) }, }, { name: "hooks in multiple namespaces", objManifests: []string{jobNamespace1CompleteManifest, podNamespace2SucceededManifest}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.WatchUntilReady(rl, timeout) }, }, { - name: "error when resource not ready in one namespace", - objManifests: []string{podNamespace1NoStatusManifest, podNamespace2Manifest}, - expectErrs: []error{errors.New("resource not ready, name: pod-ns1, kind: Pod, status: InProgress"), errors.New("context deadline exceeded")}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + name: "error when resource not ready in one namespace", + objManifests: []string{podNamespace1NoStatusManifest, podNamespace2Manifest}, + expectErrStrs: []string{"resource Pod/namespace-1/pod-ns1 not ready. status: InProgress", "context deadline exceeded"}, + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.Wait(rl, timeout) }, }, { name: "delete resources in multiple namespaces", objManifests: []string{podNamespace1Manifest, podNamespace2Manifest}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.WaitForDelete(rl, timeout) }, }, { name: "cluster-scoped resources work correctly with unrestricted permissions", objManifests: []string{podNamespace1Manifest, clusterRoleManifest}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.Wait(rl, timeout) }, }, { name: "namespace-scoped and cluster-scoped resources work together", objManifests: []string{podNamespace1Manifest, podNamespace2Manifest, clusterRoleManifest}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.Wait(rl, timeout) }, }, { name: "delete cluster-scoped resources works correctly", objManifests: []string{podNamespace1Manifest, namespaceManifest}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.WaitForDelete(rl, timeout) }, }, { name: "watch cluster-scoped resources works correctly", objManifests: []string{clusterRoleManifest}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.WatchUntilReady(rl, timeout) }, }, @@ -613,6 +652,7 @@ func TestStatusWaitMultipleNamespaces(t *testing.T) { client: fakeClient, restMapper: fakeMapper, } + sw.SetLogger(slog.Default().Handler()) objs := getRuntimeObjFromManifests(t, tt.objManifests) for _, obj := range objs { u := obj.(*unstructured.Unstructured) @@ -635,9 +675,12 @@ func TestStatusWaitMultipleNamespaces(t *testing.T) { } resourceList := getResourceListFromRuntimeObjs(t, c, objs) - err := tt.testFunc(sw, resourceList, time.Second*3) - if tt.expectErrs != nil { - assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + err := tt.testFunc(&sw, resourceList, time.Second*3) + if tt.expectErrStrs != nil { + require.Error(t, err) + for _, expectedErrStr := range tt.expectErrStrs { + assert.Contains(t, err.Error(), expectedErrStr) + } return } assert.NoError(t, err) @@ -672,7 +715,7 @@ func setupRestrictedClient(fakeClient *dynamicfake.FakeDynamicClient, allowedNam return true, nil, apierrors.NewForbidden( action.GetResource().GroupResource(), "", - fmt.Errorf("user does not have cluster-wide LIST permissions for cluster-scoped resources"), + errors.New("user does not have cluster-wide LIST permissions for cluster-scoped resources"), ) } if !config.allowedNamespaces[ns] { @@ -696,7 +739,7 @@ func setupRestrictedClient(fakeClient *dynamicfake.FakeDynamicClient, allowedNam return true, nil, apierrors.NewForbidden( action.GetResource().GroupResource(), "", - fmt.Errorf("user does not have cluster-wide WATCH permissions for cluster-scoped resources"), + errors.New("user does not have cluster-wide WATCH permissions for cluster-scoped resources"), ) } if !config.allowedNamespaces[ns] { @@ -720,13 +763,13 @@ func TestStatusWaitRestrictedRBAC(t *testing.T) { objManifests []string allowedNamespaces []string expectErrs []error - testFunc func(statusWaiter, ResourceList, time.Duration) error + testFunc func(*statusWaiter, ResourceList, time.Duration) error }{ { name: "pods in multiple namespaces with namespace permissions", objManifests: []string{podNamespace1Manifest, podNamespace2Manifest}, allowedNamespaces: []string{"namespace-1", "namespace-2"}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.Wait(rl, timeout) }, }, @@ -734,7 +777,7 @@ func TestStatusWaitRestrictedRBAC(t *testing.T) { name: "delete pods in multiple namespaces with namespace permissions", objManifests: []string{podNamespace1Manifest, podNamespace2Manifest}, allowedNamespaces: []string{"namespace-1", "namespace-2"}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.WaitForDelete(rl, timeout) }, }, @@ -742,7 +785,7 @@ func TestStatusWaitRestrictedRBAC(t *testing.T) { name: "hooks in multiple namespaces with namespace permissions", objManifests: []string{jobNamespace1CompleteManifest, podNamespace2SucceededManifest}, allowedNamespaces: []string{"namespace-1", "namespace-2"}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.WatchUntilReady(rl, timeout) }, }, @@ -750,8 +793,8 @@ func TestStatusWaitRestrictedRBAC(t *testing.T) { name: "error when cluster-scoped resource included", objManifests: []string{podNamespace1Manifest, clusterRoleManifest}, allowedNamespaces: []string{"namespace-1"}, - expectErrs: []error{fmt.Errorf("user does not have cluster-wide LIST permissions for cluster-scoped resources")}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + expectErrs: []error{errors.New("user does not have cluster-wide LIST permissions for cluster-scoped resources")}, + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.Wait(rl, timeout) }, }, @@ -759,8 +802,8 @@ func TestStatusWaitRestrictedRBAC(t *testing.T) { name: "error when deleting cluster-scoped resource", objManifests: []string{podNamespace1Manifest, namespaceManifest}, allowedNamespaces: []string{"namespace-1"}, - expectErrs: []error{fmt.Errorf("user does not have cluster-wide LIST permissions for cluster-scoped resources")}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + expectErrs: []error{errors.New("user does not have cluster-wide LIST permissions for cluster-scoped resources")}, + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.WaitForDelete(rl, timeout) }, }, @@ -769,7 +812,7 @@ func TestStatusWaitRestrictedRBAC(t *testing.T) { objManifests: []string{podNamespace1Manifest, podNamespace2Manifest}, allowedNamespaces: []string{"namespace-1"}, expectErrs: []error{fmt.Errorf("user does not have LIST permissions in namespace %q", "namespace-2")}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.Wait(rl, timeout) }, }, @@ -791,6 +834,7 @@ func TestStatusWaitRestrictedRBAC(t *testing.T) { client: baseFakeClient, restMapper: fakeMapper, } + sw.SetLogger(slog.Default().Handler()) objs := getRuntimeObjFromManifests(t, tt.objManifests) for _, obj := range objs { u := obj.(*unstructured.Unstructured) @@ -813,7 +857,7 @@ func TestStatusWaitRestrictedRBAC(t *testing.T) { } resourceList := getResourceListFromRuntimeObjs(t, c, objs) - err := tt.testFunc(sw, resourceList, time.Second*3) + err := tt.testFunc(&sw, resourceList, time.Second*3) if tt.expectErrs != nil { require.Error(t, err) for _, expectedErr := range tt.expectErrs { @@ -834,13 +878,13 @@ func TestStatusWaitMixedResources(t *testing.T) { objManifests []string allowedNamespaces []string expectErrs []error - testFunc func(statusWaiter, ResourceList, time.Duration) error + testFunc func(*statusWaiter, ResourceList, time.Duration) error }{ { name: "wait succeeds with namespace-scoped resources only", objManifests: []string{podNamespace1Manifest, podNamespace2Manifest}, allowedNamespaces: []string{"namespace-1", "namespace-2"}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.Wait(rl, timeout) }, }, @@ -848,8 +892,8 @@ func TestStatusWaitMixedResources(t *testing.T) { name: "wait fails when cluster-scoped resource included", objManifests: []string{podNamespace1Manifest, clusterRoleManifest}, allowedNamespaces: []string{"namespace-1"}, - expectErrs: []error{fmt.Errorf("user does not have cluster-wide LIST permissions for cluster-scoped resources")}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + expectErrs: []error{errors.New("user does not have cluster-wide LIST permissions for cluster-scoped resources")}, + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.Wait(rl, timeout) }, }, @@ -857,8 +901,8 @@ func TestStatusWaitMixedResources(t *testing.T) { name: "waitForDelete fails when cluster-scoped resource included", objManifests: []string{podNamespace1Manifest, clusterRoleManifest}, allowedNamespaces: []string{"namespace-1"}, - expectErrs: []error{fmt.Errorf("user does not have cluster-wide LIST permissions for cluster-scoped resources")}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + expectErrs: []error{errors.New("user does not have cluster-wide LIST permissions for cluster-scoped resources")}, + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.WaitForDelete(rl, timeout) }, }, @@ -866,8 +910,8 @@ func TestStatusWaitMixedResources(t *testing.T) { name: "wait fails when namespace resource included", objManifests: []string{podNamespace1Manifest, namespaceManifest}, allowedNamespaces: []string{"namespace-1"}, - expectErrs: []error{fmt.Errorf("user does not have cluster-wide LIST permissions for cluster-scoped resources")}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + expectErrs: []error{errors.New("user does not have cluster-wide LIST permissions for cluster-scoped resources")}, + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.Wait(rl, timeout) }, }, @@ -876,7 +920,7 @@ func TestStatusWaitMixedResources(t *testing.T) { objManifests: []string{podNamespace1Manifest, podNamespace2Manifest}, allowedNamespaces: []string{"namespace-1"}, expectErrs: []error{fmt.Errorf("user does not have LIST permissions in namespace %q", "namespace-2")}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.Wait(rl, timeout) }, }, @@ -898,6 +942,7 @@ func TestStatusWaitMixedResources(t *testing.T) { client: baseFakeClient, restMapper: fakeMapper, } + sw.SetLogger(slog.Default().Handler()) objs := getRuntimeObjFromManifests(t, tt.objManifests) for _, obj := range objs { u := obj.(*unstructured.Unstructured) @@ -920,7 +965,7 @@ func TestStatusWaitMixedResources(t *testing.T) { } resourceList := getResourceListFromRuntimeObjs(t, c, objs) - err := tt.testFunc(sw, resourceList, time.Second*3) + err := tt.testFunc(&sw, resourceList, time.Second*3) if tt.expectErrs != nil { require.Error(t, err) for _, expectedErr := range tt.expectErrs { @@ -933,3 +978,828 @@ func TestStatusWaitMixedResources(t *testing.T) { }) } } + +// mockStatusReader is a custom status reader for testing that tracks when it's used +// and returns a configurable status for resources it supports. +type mockStatusReader struct { + supportedGK schema.GroupKind + status status.Status + callCount atomic.Int32 +} + +func (m *mockStatusReader) Supports(gk schema.GroupKind) bool { + return gk == m.supportedGK +} + +func (m *mockStatusReader) ReadStatus(_ context.Context, _ engine.ClusterReader, id object.ObjMetadata) (*event.ResourceStatus, error) { + m.callCount.Add(1) + return &event.ResourceStatus{ + Identifier: id, + Status: m.status, + Message: "mock status reader", + }, nil +} + +func (m *mockStatusReader) ReadStatusForObject(_ context.Context, _ engine.ClusterReader, u *unstructured.Unstructured) (*event.ResourceStatus, error) { + m.callCount.Add(1) + id := object.ObjMetadata{ + Namespace: u.GetNamespace(), + Name: u.GetName(), + GroupKind: u.GroupVersionKind().GroupKind(), + } + return &event.ResourceStatus{ + Identifier: id, + Status: m.status, + Message: "mock status reader", + }, nil +} + +func TestStatusWaitWithCustomReaders(t *testing.T) { + t.Parallel() + tests := []struct { + name string + objManifests []string + customReader *mockStatusReader + expectErrStrs []string + }{ + { + name: "custom reader makes pod immediately current", + objManifests: []string{podNoStatusManifest}, + customReader: &mockStatusReader{ + supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), + status: status.CurrentStatus, + }, + }, + { + name: "custom reader returns in-progress status", + objManifests: []string{podCurrentManifest}, + customReader: &mockStatusReader{ + supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), + status: status.InProgressStatus, + }, + expectErrStrs: []string{"resource Pod/ns/current-pod not ready. status: InProgress", "context deadline exceeded"}, + }, + { + name: "custom reader for different resource type is not used", + objManifests: []string{podCurrentManifest}, + customReader: &mockStatusReader{ + supportedGK: batchv1.SchemeGroupVersion.WithKind("Job").GroupKind(), + status: status.InProgressStatus, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + batchv1.SchemeGroupVersion.WithKind("Job"), + ) + statusWaiter := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + readers: []engine.StatusReader{tt.customReader}, + } + objs := getRuntimeObjFromManifests(t, tt.objManifests) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + assert.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + err := statusWaiter.Wait(resourceList, time.Second*3) + if tt.expectErrStrs != nil { + require.Error(t, err) + for _, expectedErrStr := range tt.expectErrStrs { + assert.Contains(t, err.Error(), expectedErrStr) + } + return + } + assert.NoError(t, err) + }) + } +} + +func TestStatusWaitWithJobsAndCustomReaders(t *testing.T) { + t.Parallel() + tests := []struct { + name string + objManifests []string + customReader *mockStatusReader + expectErrs []error + }{ + { + name: "custom reader makes job immediately current", + objManifests: []string{jobNoStatusManifest}, + customReader: &mockStatusReader{ + supportedGK: batchv1.SchemeGroupVersion.WithKind("Job").GroupKind(), + status: status.CurrentStatus, + }, + expectErrs: nil, + }, + { + name: "custom reader for pod works with WaitWithJobs", + objManifests: []string{podNoStatusManifest}, + customReader: &mockStatusReader{ + supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), + status: status.CurrentStatus, + }, + expectErrs: nil, + }, + { + name: "built-in job reader is still appended after custom readers", + objManifests: []string{jobCompleteManifest}, + customReader: &mockStatusReader{ + supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), + status: status.CurrentStatus, + }, + expectErrs: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + batchv1.SchemeGroupVersion.WithKind("Job"), + ) + statusWaiter := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + readers: []engine.StatusReader{tt.customReader}, + } + objs := getRuntimeObjFromManifests(t, tt.objManifests) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + assert.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + err := statusWaiter.WaitWithJobs(resourceList, time.Second*3) + if tt.expectErrs != nil { + assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + return + } + assert.NoError(t, err) + }) + } +} + +func TestStatusWaitWithFailedResources(t *testing.T) { + t.Parallel() + tests := []struct { + name string + objManifests []string + customReader *mockStatusReader + expectErrStrs []string + testFunc func(*statusWaiter, ResourceList, time.Duration) error + }{ + { + name: "Wait returns error when resource has failed", + objManifests: []string{podNoStatusManifest}, + customReader: &mockStatusReader{ + supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), + status: status.FailedStatus, + }, + expectErrStrs: []string{"resource Pod/ns/in-progress-pod not ready. status: Failed, message: mock status reader"}, + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { + return sw.Wait(rl, timeout) + }, + }, + { + name: "WaitWithJobs returns error when job has failed", + objManifests: []string{jobFailedManifest}, + customReader: nil, // Use the built-in job status reader + expectErrStrs: []string{ + "resource Job/default/failed-job not ready. status: Failed", + }, + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { + return sw.WaitWithJobs(rl, timeout) + }, + }, + { + name: "Wait returns errors when multiple resources fail", + objManifests: []string{podNoStatusManifest, podCurrentManifest}, + customReader: &mockStatusReader{ + supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), + status: status.FailedStatus, + }, + // The mock reader will make both pods return FailedStatus + expectErrStrs: []string{ + "resource Pod/ns/in-progress-pod not ready. status: Failed, message: mock status reader", + "resource Pod/ns/current-pod not ready. status: Failed, message: mock status reader", + }, + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { + return sw.Wait(rl, timeout) + }, + }, + { + name: "WatchUntilReady returns error when resource has failed", + objManifests: []string{podNoStatusManifest}, + customReader: &mockStatusReader{ + supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), + status: status.FailedStatus, + }, + // WatchUntilReady also waits for CurrentStatus, so failed resources should return error + expectErrStrs: []string{"resource Pod/ns/in-progress-pod not ready. status: Failed, message: mock status reader"}, + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { + return sw.WatchUntilReady(rl, timeout) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + batchv1.SchemeGroupVersion.WithKind("Job"), + ) + var readers []engine.StatusReader + if tt.customReader != nil { + readers = []engine.StatusReader{tt.customReader} + } + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + readers: readers, + } + objs := getRuntimeObjFromManifests(t, tt.objManifests) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + assert.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + err := tt.testFunc(&sw, resourceList, time.Second*3) + if tt.expectErrStrs != nil { + require.Error(t, err) + for _, expectedErrStr := range tt.expectErrStrs { + assert.Contains(t, err.Error(), expectedErrStr) + } + return + } + assert.NoError(t, err) + }) + } +} + +func TestWaitOptionFunctions(t *testing.T) { + t.Parallel() + + t.Run("WithWatchUntilReadyMethodContext sets watchUntilReadyCtx", func(t *testing.T) { + t.Parallel() + type contextKey struct{} + ctx := context.WithValue(context.Background(), contextKey{}, "test") + opts := &waitOptions{} + WithWatchUntilReadyMethodContext(ctx)(opts) + assert.Equal(t, ctx, opts.watchUntilReadyCtx) + }) + + t.Run("WithWaitMethodContext sets waitCtx", func(t *testing.T) { + t.Parallel() + type contextKey struct{} + ctx := context.WithValue(context.Background(), contextKey{}, "test") + opts := &waitOptions{} + WithWaitMethodContext(ctx)(opts) + assert.Equal(t, ctx, opts.waitCtx) + }) + + t.Run("WithWaitWithJobsMethodContext sets waitWithJobsCtx", func(t *testing.T) { + t.Parallel() + type contextKey struct{} + ctx := context.WithValue(context.Background(), contextKey{}, "test") + opts := &waitOptions{} + WithWaitWithJobsMethodContext(ctx)(opts) + assert.Equal(t, ctx, opts.waitWithJobsCtx) + }) + + t.Run("WithWaitForDeleteMethodContext sets waitForDeleteCtx", func(t *testing.T) { + t.Parallel() + type contextKey struct{} + ctx := context.WithValue(context.Background(), contextKey{}, "test") + opts := &waitOptions{} + WithWaitForDeleteMethodContext(ctx)(opts) + assert.Equal(t, ctx, opts.waitForDeleteCtx) + }) +} + +func TestMethodSpecificContextCancellation(t *testing.T) { + t.Parallel() + + t.Run("WatchUntilReady uses method-specific context", func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + ) + + // Create a cancelled method-specific context + methodCtx, methodCancel := context.WithCancel(context.Background()) + methodCancel() // Cancel immediately + + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + ctx: context.Background(), // General context is not cancelled + watchUntilReadyCtx: methodCtx, // Method context is cancelled + } + + objs := getRuntimeObjFromManifests(t, []string{podCompleteManifest}) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + require.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + + err := sw.WatchUntilReady(resourceList, time.Second*3) + // Should fail due to cancelled method context + require.Error(t, err) + assert.Contains(t, err.Error(), "context canceled") + }) + + t.Run("Wait uses method-specific context", func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + ) + + // Create a cancelled method-specific context + methodCtx, methodCancel := context.WithCancel(context.Background()) + methodCancel() // Cancel immediately + + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + ctx: context.Background(), // General context is not cancelled + waitCtx: methodCtx, // Method context is cancelled + } + + objs := getRuntimeObjFromManifests(t, []string{podCurrentManifest}) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + require.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + + err := sw.Wait(resourceList, time.Second*3) + // Should fail due to cancelled method context + require.Error(t, err) + assert.Contains(t, err.Error(), "context canceled") + }) + + t.Run("WaitWithJobs uses method-specific context", func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + batchv1.SchemeGroupVersion.WithKind("Job"), + ) + + // Create a cancelled method-specific context + methodCtx, methodCancel := context.WithCancel(context.Background()) + methodCancel() // Cancel immediately + + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + ctx: context.Background(), // General context is not cancelled + waitWithJobsCtx: methodCtx, // Method context is cancelled + } + + objs := getRuntimeObjFromManifests(t, []string{jobCompleteManifest}) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + require.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + + err := sw.WaitWithJobs(resourceList, time.Second*3) + // Should fail due to cancelled method context + require.Error(t, err) + assert.Contains(t, err.Error(), "context canceled") + }) + + t.Run("WaitForDelete uses method-specific context", func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + ) + + // Create a cancelled method-specific context + methodCtx, methodCancel := context.WithCancel(context.Background()) + methodCancel() // Cancel immediately + + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + ctx: context.Background(), // General context is not cancelled + waitForDeleteCtx: methodCtx, // Method context is cancelled + } + + objs := getRuntimeObjFromManifests(t, []string{podCurrentManifest}) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + require.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + + err := sw.WaitForDelete(resourceList, time.Second*3) + // Should fail due to cancelled method context + require.Error(t, err) + assert.Contains(t, err.Error(), "context canceled") + }) +} + +func TestMethodContextFallbackToGeneralContext(t *testing.T) { + t.Parallel() + + t.Run("WatchUntilReady falls back to general context when method context is nil", func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + ) + + // Create a cancelled general context + generalCtx, generalCancel := context.WithCancel(context.Background()) + generalCancel() // Cancel immediately + + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + ctx: generalCtx, // General context is cancelled + watchUntilReadyCtx: nil, // Method context is nil, should fall back + } + + objs := getRuntimeObjFromManifests(t, []string{podCompleteManifest}) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + require.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + + err := sw.WatchUntilReady(resourceList, time.Second*3) + // Should fail due to cancelled general context + require.Error(t, err) + assert.Contains(t, err.Error(), "context canceled") + }) + + t.Run("Wait falls back to general context when method context is nil", func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + ) + + // Create a cancelled general context + generalCtx, generalCancel := context.WithCancel(context.Background()) + generalCancel() // Cancel immediately + + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + ctx: generalCtx, // General context is cancelled + waitCtx: nil, // Method context is nil, should fall back + } + + objs := getRuntimeObjFromManifests(t, []string{podCurrentManifest}) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + require.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + + err := sw.Wait(resourceList, time.Second*3) + // Should fail due to cancelled general context + require.Error(t, err) + assert.Contains(t, err.Error(), "context canceled") + }) + + t.Run("WaitWithJobs falls back to general context when method context is nil", func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + batchv1.SchemeGroupVersion.WithKind("Job"), + ) + + // Create a cancelled general context + generalCtx, generalCancel := context.WithCancel(context.Background()) + generalCancel() // Cancel immediately + + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + ctx: generalCtx, // General context is cancelled + waitWithJobsCtx: nil, // Method context is nil, should fall back + } + + objs := getRuntimeObjFromManifests(t, []string{jobCompleteManifest}) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + require.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + + err := sw.WaitWithJobs(resourceList, time.Second*3) + // Should fail due to cancelled general context + require.Error(t, err) + assert.Contains(t, err.Error(), "context canceled") + }) + + t.Run("WaitForDelete falls back to general context when method context is nil", func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + ) + + // Create a cancelled general context + generalCtx, generalCancel := context.WithCancel(context.Background()) + generalCancel() // Cancel immediately + + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + ctx: generalCtx, // General context is cancelled + waitForDeleteCtx: nil, // Method context is nil, should fall back + } + + objs := getRuntimeObjFromManifests(t, []string{podCurrentManifest}) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + require.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + + err := sw.WaitForDelete(resourceList, time.Second*3) + // Should fail due to cancelled general context + require.Error(t, err) + assert.Contains(t, err.Error(), "context canceled") + }) +} + +func TestMethodContextOverridesGeneralContext(t *testing.T) { + t.Parallel() + + t.Run("method-specific context overrides general context for WatchUntilReady", func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + ) + + // General context is cancelled, but method context is not + generalCtx, generalCancel := context.WithCancel(context.Background()) + generalCancel() + + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + ctx: generalCtx, // Cancelled + watchUntilReadyCtx: context.Background(), // Not cancelled - should be used + } + + objs := getRuntimeObjFromManifests(t, []string{podCompleteManifest}) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + require.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + + err := sw.WatchUntilReady(resourceList, time.Second*3) + // Should succeed because method context is used and it's not cancelled + assert.NoError(t, err) + }) + + t.Run("method-specific context overrides general context for Wait", func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + ) + + // General context is cancelled, but method context is not + generalCtx, generalCancel := context.WithCancel(context.Background()) + generalCancel() + + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + ctx: generalCtx, // Cancelled + waitCtx: context.Background(), // Not cancelled - should be used + } + + objs := getRuntimeObjFromManifests(t, []string{podCurrentManifest}) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + require.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + + err := sw.Wait(resourceList, time.Second*3) + // Should succeed because method context is used and it's not cancelled + assert.NoError(t, err) + }) + + t.Run("method-specific context overrides general context for WaitWithJobs", func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + batchv1.SchemeGroupVersion.WithKind("Job"), + ) + + // General context is cancelled, but method context is not + generalCtx, generalCancel := context.WithCancel(context.Background()) + generalCancel() + + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + ctx: generalCtx, // Cancelled + waitWithJobsCtx: context.Background(), // Not cancelled - should be used + } + + objs := getRuntimeObjFromManifests(t, []string{jobCompleteManifest}) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + require.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + + err := sw.WaitWithJobs(resourceList, time.Second*3) + // Should succeed because method context is used and it's not cancelled + assert.NoError(t, err) + }) + + t.Run("method-specific context overrides general context for WaitForDelete", func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + ) + + // General context is cancelled, but method context is not + generalCtx, generalCancel := context.WithCancel(context.Background()) + generalCancel() + + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + ctx: generalCtx, // Cancelled + waitForDeleteCtx: context.Background(), // Not cancelled - should be used + } + + // Use a non-existent resource: WaitForDelete should return immediately since + // the pod is already in the desired "deleted" state. + // This also validates context selection: if generalCtx (cancelled) were + // incorrectly used instead of waitForDeleteCtx, the watch context would be + // immediately cancelled and the call would return a context error. + objs := getRuntimeObjFromManifests(t, []string{podCurrentManifest}) + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + err := sw.WaitForDelete(resourceList, time.Second) + // Should succeed because method context is used and it's not cancelled + assert.NoError(t, err) + }) +} + +func TestWatchUntilReadyWithCustomReaders(t *testing.T) { + t.Parallel() + tests := []struct { + name string + objManifests []string + customReader *mockStatusReader + expectErrStrs []string + }{ + { + name: "custom reader makes job immediately current for hooks", + objManifests: []string{jobNoStatusManifest}, + customReader: &mockStatusReader{ + supportedGK: batchv1.SchemeGroupVersion.WithKind("Job").GroupKind(), + status: status.CurrentStatus, + }, + }, + { + name: "custom reader makes pod immediately current for hooks", + objManifests: []string{podCurrentManifest}, + customReader: &mockStatusReader{ + supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), + status: status.CurrentStatus, + }, + }, + { + name: "custom reader takes precedence over built-in pod reader", + objManifests: []string{podCompleteManifest}, + customReader: &mockStatusReader{ + supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), + status: status.InProgressStatus, + }, + expectErrStrs: []string{"resource Pod/ns/good-pod not ready. status: InProgress", "context deadline exceeded"}, + }, + { + name: "custom reader takes precedence over built-in job reader", + objManifests: []string{jobCompleteManifest}, + customReader: &mockStatusReader{ + supportedGK: batchv1.SchemeGroupVersion.WithKind("Job").GroupKind(), + status: status.InProgressStatus, + }, + expectErrStrs: []string{"resource Job/qual/test not ready. status: InProgress", "context deadline exceeded"}, + }, + { + name: "custom reader for different resource type does not affect pods", + objManifests: []string{podCompleteManifest}, + customReader: &mockStatusReader{ + supportedGK: batchv1.SchemeGroupVersion.WithKind("Job").GroupKind(), + status: status.InProgressStatus, + }, + }, + { + name: "built-in readers still work when custom reader does not match", + objManifests: []string{jobCompleteManifest}, + customReader: &mockStatusReader{ + supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), + status: status.InProgressStatus, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + batchv1.SchemeGroupVersion.WithKind("Job"), + ) + statusWaiter := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + readers: []engine.StatusReader{tt.customReader}, + } + objs := getRuntimeObjFromManifests(t, tt.objManifests) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + assert.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + err := statusWaiter.WatchUntilReady(resourceList, time.Second*3) + if tt.expectErrStrs != nil { + require.Error(t, err) + for _, expectedErrStr := range tt.expectErrStrs { + assert.Contains(t, err.Error(), expectedErrStr) + } + return + } + assert.NoError(t, err) + }) + } +} diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go index 9a276a459..b5e91d8f3 100644 --- a/pkg/kube/wait.go +++ b/pkg/kube/wait.go @@ -14,10 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kube // import "helm.sh/helm/v4/pkg/kube" +package kube import ( "context" + "errors" "fmt" "log/slog" "net/http" @@ -107,7 +108,8 @@ func (hw *legacyWaiter) isRetryableError(err error, resource *resource.Info) boo slog.String("resource", resource.Name), slog.Any("error", err), ) - if ev, ok := err.(*apierrors.StatusError); ok { + ev := &apierrors.StatusError{} + if errors.As(err, &ev) { statusCode := ev.Status().Code retryable := hw.isRetryableHTTPStatusCode(statusCode) slog.Debug( @@ -244,7 +246,7 @@ func (hw *legacyWaiter) watchUntilReady(timeout time.Duration, info *resource.In // Use a selector on the name of the resource. This should be unique for the // given version and kind - selector, err := fields.ParseSelector(fmt.Sprintf("metadata.name=%s", info.Name)) + selector, err := fields.ParseSelector("metadata.name=" + info.Name) if err != nil { return err } diff --git a/pkg/kube/wait_test.go b/pkg/kube/wait_test.go index d96f2c486..73270b5a3 100644 --- a/pkg/kube/wait_test.go +++ b/pkg/kube/wait_test.go @@ -17,7 +17,7 @@ limitations under the License. package kube import ( - "fmt" + "errors" "net/http" "strings" "testing" @@ -39,7 +39,7 @@ import ( func TestSelectorsForObject(t *testing.T) { tests := []struct { name string - object interface{} + object any expectError bool errorContains string expectedLabels map[string]string @@ -299,7 +299,7 @@ func TestLegacyWaiter_waitForPodSuccess(t *testing.T) { done, err := lw.waitForPodSuccess(tt.obj, "foo") if tt.wantErr { if err == nil { - t.Errorf("expected error, got none") + t.Error("expected error, got none") } else if !strings.Contains(err.Error(), tt.errMessage) { t.Errorf("expected error to contain %q, got %q", tt.errMessage, err.Error()) } @@ -391,7 +391,7 @@ func TestLegacyWaiter_waitForJob(t *testing.T) { done, err := lw.waitForJob(tt.obj, "test-job") if tt.wantErr { if err == nil { - t.Errorf("expected error, got none") + t.Error("expected error, got none") } else if !strings.Contains(err.Error(), tt.errMessage) { t.Errorf("expected error to contain %q, got %q", tt.errMessage, err.Error()) } @@ -451,7 +451,7 @@ func TestLegacyWaiter_isRetryableError(t *testing.T) { }, { name: "non-status error", - err: fmt.Errorf("some generic error"), + err: errors.New("some generic error"), wantRetry: true, }, } diff --git a/pkg/kubeenv/roundtripper.go b/pkg/kubeenv/roundtripper.go new file mode 100644 index 000000000..e00f93984 --- /dev/null +++ b/pkg/kubeenv/roundtripper.go @@ -0,0 +1,84 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package kubeenv holds small, cycle-free Kubernetes client helpers shared by +// higher-level packages (for example pkg/cli and pkg/kube). +package kubeenv + +import ( + "bytes" + "encoding/json" + "io" + "net/http" + "strings" +) + +// RetryingRoundTripper retries transient Kubernetes API server errors on a +// wrapped [http.RoundTripper]. +type RetryingRoundTripper struct { + Wrapped http.RoundTripper +} + +func (rt *RetryingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + return rt.roundTrip(req, 1, nil) +} + +func (rt *RetryingRoundTripper) roundTrip(req *http.Request, retry int, prevResp *http.Response) (*http.Response, error) { + if retry < 0 { + return prevResp, nil + } + resp, rtErr := rt.Wrapped.RoundTrip(req) + if rtErr != nil { + return resp, rtErr + } + if resp.StatusCode < 500 { + return resp, rtErr + } + if resp.Header.Get("content-type") != "application/json" { + return resp, rtErr + } + b, err := io.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return resp, err + } + + var ke kubernetesError + r := bytes.NewReader(b) + err = json.NewDecoder(r).Decode(&ke) + r.Seek(0, io.SeekStart) + resp.Body = io.NopCloser(r) + if err != nil { + return resp, err + } + if ke.Code < 500 { + return resp, nil + } + // Matches messages like "etcdserver: leader changed" + if strings.HasSuffix(ke.Message, "etcdserver: leader changed") { + return rt.roundTrip(req, retry-1, resp) + } + // Matches messages like "rpc error: code = Unknown desc = raft proposal dropped" + if strings.HasSuffix(ke.Message, "raft proposal dropped") { + return rt.roundTrip(req, retry-1, resp) + } + return resp, nil +} + +type kubernetesError struct { + Message string `json:"message"` + Code int `json:"code"` +} diff --git a/pkg/kube/roundtripper_test.go b/pkg/kubeenv/roundtripper_test.go similarity index 99% rename from pkg/kube/roundtripper_test.go rename to pkg/kubeenv/roundtripper_test.go index 96602c1f4..b921eac82 100644 --- a/pkg/kube/roundtripper_test.go +++ b/pkg/kubeenv/roundtripper_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kube +package kubeenv import ( "encoding/json" diff --git a/pkg/provenance/sign.go b/pkg/provenance/sign.go index 57af1ad42..45d4fe1a5 100644 --- a/pkg/provenance/sign.go +++ b/pkg/provenance/sign.go @@ -25,9 +25,9 @@ import ( "os" "strings" - "github.com/ProtonMail/go-crypto/openpgp" //nolint - "github.com/ProtonMail/go-crypto/openpgp/clearsign" //nolint - "github.com/ProtonMail/go-crypto/openpgp/packet" //nolint + "github.com/ProtonMail/go-crypto/openpgp" + "github.com/ProtonMail/go-crypto/openpgp/clearsign" + "github.com/ProtonMail/go-crypto/openpgp/packet" "sigs.k8s.io/yaml" ) @@ -332,7 +332,7 @@ func parseMessageBlock(data []byte) (*SumCollection, error) { // // This is the generic version that can work with any metadata type. // The metadata parameter should be a pointer to a struct that can be unmarshaled from YAML. -func ParseMessageBlock(data []byte, metadata interface{}, sums *SumCollection) error { +func ParseMessageBlock(data []byte, metadata any, sums *SumCollection) error { parts := bytes.Split(data, []byte("\n...\n")) if len(parts) < 2 { return errors.New("message block must have at least two parts") diff --git a/pkg/provenance/sign_test.go b/pkg/provenance/sign_test.go index 1985e9eea..8784e6c12 100644 --- a/pkg/provenance/sign_test.go +++ b/pkg/provenance/sign_test.go @@ -17,15 +17,15 @@ package provenance import ( "crypto" - "fmt" + "errors" "io" "os" "path/filepath" "strings" "testing" - pgperrors "github.com/ProtonMail/go-crypto/openpgp/errors" //nolint - "github.com/ProtonMail/go-crypto/openpgp/packet" //nolint + pgperrors "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/packet" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "sigs.k8s.io/yaml" @@ -134,7 +134,7 @@ func TestParseMessageBlock(t *testing.T) { } if hash, ok := sc.Files["hashtest-1.2.3.tgz"]; !ok { - t.Errorf("hashtest file not found in Files") + t.Error("hashtest file not found in Files") } else if hash != "sha256:c6841b3a895f1444a6738b5d04564a57e860ce42f8519c3be807fb6d9bee7888" { t.Errorf("Unexpected hash: %q", hash) } @@ -330,7 +330,7 @@ func (s failSigner) Public() crypto.PublicKey { } func (s failSigner) Sign(_ io.Reader, _ []byte, _ crypto.SignerOpts) ([]byte, error) { - return nil, fmt.Errorf("always fails") + return nil, errors.New("always fails") } func TestClearSignError(t *testing.T) { diff --git a/pkg/pusher/ocipusher.go b/pkg/pusher/ocipusher.go index f03188391..2a12e09b4 100644 --- a/pkg/pusher/ocipusher.go +++ b/pkg/pusher/ocipusher.go @@ -76,7 +76,7 @@ func (pusher *OCIPusher) push(chartRef, href string) error { } var pushOpts []registry.PushOption - provRef := fmt.Sprintf("%s.prov", chartRef) + provRef := chartRef + ".prov" if _, err := os.Stat(provRef); err == nil { provBytes, err := os.ReadFile(provRef) if err != nil { @@ -86,7 +86,7 @@ func (pusher *OCIPusher) push(chartRef, href string) error { } ref := fmt.Sprintf("%s:%s", - path.Join(strings.TrimPrefix(href, fmt.Sprintf("%s://", registry.OCIScheme)), meta.Metadata.Name), + path.Join(strings.TrimPrefix(href, registry.OCIScheme+"://"), meta.Metadata.Name), meta.Metadata.Version) // The time the chart was "created" is semantically the time the chart archive file was last written(modified) diff --git a/pkg/registry/client.go b/pkg/registry/client.go index 750bb9715..f2bfd13b4 100644 --- a/pkg/registry/client.go +++ b/pkg/registry/client.go @@ -202,13 +202,15 @@ func ClientOptCredentialsFile(credentialsFile string) ClientOption { } } -// ClientOptHTTPClient returns a function that sets the httpClient setting on a client options set +// ClientOptHTTPClient returns a function that sets the HTTP client for the registry client. func ClientOptHTTPClient(httpClient *http.Client) ClientOption { return func(client *Client) { client.httpClient = httpClient } } +// ClientOptPlainHTTP returns a function that enables plain HTTP (non-TLS) +// communication for the registry client. func ClientOptPlainHTTP() ClientOption { return func(c *Client) { c.plainHTTP = true @@ -236,7 +238,7 @@ func warnIfHostHasPath(host string) bool { return false } -// Login logs into a registry +// Login authenticates the client with a remote OCI registry using the provided host and options. func (c *Client) Login(host string, options ...LoginOption) error { for _, option := range options { option(&loginOperation{host, c}) @@ -282,7 +284,8 @@ func LoginOptBasicAuth(username string, password string) LoginOption { } } -// LoginOptPlainText returns a function that allows plaintext (HTTP) login +// LoginOptPlainText returns a function that enables plaintext (HTTP) login +// instead of HTTPS for the registry client. func LoginOptPlainText(isPlainText bool) LoginOption { return func(o *loginOperation) { o.client.plainHTTP = isPlainText @@ -566,6 +569,7 @@ func (c *Client) Pull(ref string, options ...PullOption) (*PullResult, error) { // Build allowed media types for chart pull allowedMediaTypes := []string{ + ocispec.MediaTypeImageIndex, ocispec.MediaTypeImageManifest, ConfigMediaType, } @@ -881,7 +885,7 @@ func (c *Client) ValidateReference(ref, version string, u *url.URL) (string, *ur tag = version } else { // Retrieve list of repository tags - tags, err := c.Tags(strings.TrimPrefix(ref, fmt.Sprintf("%s://", OCIScheme))) + tags, err := c.Tags(strings.TrimPrefix(ref, OCIScheme+"://")) if err != nil { return "", nil, err } diff --git a/pkg/registry/client_http_test.go b/pkg/registry/client_http_test.go index a2c3a1833..1c6751559 100644 --- a/pkg/registry/client_http_test.go +++ b/pkg/registry/client_http_test.go @@ -18,7 +18,6 @@ package registry import ( "errors" - "fmt" "os" "testing" @@ -65,7 +64,7 @@ func (suite *HTTPRegistryClientTestSuite) Test_3_Tags() { } func (suite *HTTPRegistryClientTestSuite) Test_4_ManInTheMiddle() { - ref := fmt.Sprintf("%s/testrepo/supposedlysafechart:9.9.9", suite.CompromisedRegistryHost) + ref := suite.CompromisedRegistryHost + "/testrepo/supposedlysafechart:9.9.9" // returns content that does not match the expected digest _, err := suite.RegistryClient.Pull(ref) @@ -73,6 +72,13 @@ func (suite *HTTPRegistryClientTestSuite) Test_4_ManInTheMiddle() { suite.True(errors.Is(err, content.ErrMismatchedDigest)) } +func (suite *HTTPRegistryClientTestSuite) Test_5_ImageIndex() { + ref := suite.FakeRegistryHost + "/testrepo/image-index:0.1.0" + + _, err := suite.RegistryClient.Pull(ref) + suite.Nil(err) +} + func TestHTTPRegistryClientTestSuite(t *testing.T) { suite.Run(t, new(HTTPRegistryClientTestSuite)) } diff --git a/pkg/registry/client_test.go b/pkg/registry/client_test.go index 98a8b2ea3..702dfff69 100644 --- a/pkg/registry/client_test.go +++ b/pkg/registry/client_test.go @@ -82,7 +82,7 @@ func TestLogin_ResetsForceAttemptOAuth2_OnSuccess(t *testing.T) { } if c.authorizer == nil || c.authorizer.ForceAttemptOAuth2 { - t.Fatalf("expected ForceAttemptOAuth2 default to be false") + t.Fatal("expected ForceAttemptOAuth2 default to be false") } // Call Login with plain HTTP against our test server @@ -91,7 +91,7 @@ func TestLogin_ResetsForceAttemptOAuth2_OnSuccess(t *testing.T) { } if c.authorizer.ForceAttemptOAuth2 { - t.Errorf("ForceAttemptOAuth2 should be false after successful Login") + t.Error("ForceAttemptOAuth2 should be false after successful Login") } } @@ -117,7 +117,7 @@ func TestLogin_ResetsForceAttemptOAuth2_OnFailure(t *testing.T) { _ = c.Login(host, LoginOptPlainText(true), LoginOptBasicAuth("u", "p")) if c.authorizer.ForceAttemptOAuth2 { - t.Errorf("ForceAttemptOAuth2 should be false after failed Login") + t.Error("ForceAttemptOAuth2 should be false after failed Login") } } diff --git a/pkg/registry/plugin.go b/pkg/registry/plugin.go index e4b4afa24..959faaa7d 100644 --- a/pkg/registry/plugin.go +++ b/pkg/registry/plugin.go @@ -18,6 +18,7 @@ package registry import ( "encoding/json" + "errors" "fmt" "strings" @@ -190,7 +191,7 @@ func GetPluginName(source string) (string, error) { // e.g., "ghcr.io/user/plugin-name:v1.0.0" -> Repository: "user/plugin-name" repository := ref.Repository if repository == "" { - return "", fmt.Errorf("invalid OCI reference: missing repository") + return "", errors.New("invalid OCI reference: missing repository") } // Get the last part of the repository path as the plugin name diff --git a/pkg/registry/plugin_test.go b/pkg/registry/plugin_test.go index f8525829c..61995ece9 100644 --- a/pkg/registry/plugin_test.go +++ b/pkg/registry/plugin_test.go @@ -75,7 +75,7 @@ func TestGetPluginName(t *testing.T) { if tt.expectErr { if err == nil { - t.Errorf("expected error but got none") + t.Error("expected error but got none") } return } diff --git a/pkg/registry/reference.go b/pkg/registry/reference.go index 9a98cf5c9..455792539 100644 --- a/pkg/registry/reference.go +++ b/pkg/registry/reference.go @@ -17,7 +17,6 @@ limitations under the License. package registry import ( - "fmt" "strings" "oras.land/oras-go/v2/registry" @@ -80,5 +79,5 @@ func (r *reference) String() string { // IsOCI determines whether a URL is to be treated as an OCI URL func IsOCI(url string) bool { - return strings.HasPrefix(url, fmt.Sprintf("%s://", OCIScheme)) + return strings.HasPrefix(url, OCIScheme+"://") } diff --git a/pkg/registry/registry_test.go b/pkg/registry/registry_test.go index d4921c50b..b0c08fa3c 100644 --- a/pkg/registry/registry_test.go +++ b/pkg/registry/registry_test.go @@ -35,6 +35,7 @@ import ( "github.com/distribution/distribution/v3/registry" _ "github.com/distribution/distribution/v3/registry/auth/htpasswd" _ "github.com/distribution/distribution/v3/registry/storage/driver/inmemory" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "golang.org/x/crypto/bcrypt" @@ -60,6 +61,7 @@ var ( type TestRegistry struct { suite.Suite Out io.Writer + FakeRegistryHost string DockerRegistryHost string CompromisedRegistryHost string WorkspaceDir string @@ -135,7 +137,7 @@ func setup(suite *TestRegistry, tlsEnabled, insecure bool) { config.HTTP.Addr = ln.Addr().String() config.HTTP.DrainTimeout = time.Duration(10) * time.Second - config.Storage = map[string]configuration.Parameters{"inmemory": map[string]interface{}{}} + config.Storage = map[string]configuration.Parameters{"inmemory": map[string]any{}} config.Auth = configuration.Auth{ "htpasswd": configuration.Parameters{ @@ -159,6 +161,7 @@ func setup(suite *TestRegistry, tlsEnabled, insecure bool) { suite.dockerRegistry, err = registry.NewRegistry(context.Background(), config) suite.Nil(err, "no error creating test registry") + suite.FakeRegistryHost = initFakeRegistryTestServer() suite.CompromisedRegistryHost = initCompromisedRegistryTestServer() go func() { _ = suite.dockerRegistry.ListenAndServe() @@ -206,7 +209,174 @@ func initCompromisedRegistryTestServer() string { })) u, _ := url.Parse(s.URL) - return fmt.Sprintf("localhost:%s", u.Port()) + return "localhost:" + u.Port() +} + +func initFakeRegistryTestServer() string { + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/v2/testrepo/image-index/manifests/0.1.0": + w.Header().Set("Content-Type", ocispec.MediaTypeImageIndex) + w.Write([]byte(`{ + "schemaVersion": 2, + "mediaType": "application/vnd.oci.image.index.v1+json", + "manifests": [ + { + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "digest": "sha256:2771e37a12b7bcb2902456ecf3f29bf9ee11ec348e66e8eb322d9780ad7fc2df", + "size": 1035, + "platform": { + "architecture": "amd64", + "os": "linux" + }, + "annotations": { + "com.docker.official-images.bashbrew.arch": "amd64", + "org.opencontainers.image.base.name": "scratch", + "org.opencontainers.image.created": "2025-08-13T22:16:57Z", + "org.opencontainers.image.revision": "6930d60e10e81283a57be3ee3a2b5ca328a40304", + "org.opencontainers.image.source": "https://github.com/docker-library/hello-world.git#6930d60e10e81283a57be3ee3a2b5ca328a40304:amd64/hello-world", + "org.opencontainers.image.url": "https://hub.docker.com/_/hello-world", + "org.opencontainers.image.version": "linux" + } + }, + { + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "digest": "sha256:6b75187531c5e9b6a85c8946d5d82e4ef3801e051fbff338f382f3edfa60e3d2", + "size": 566, + "platform": { + "architecture": "unknown", + "os": "unknown" + }, + "annotations": { + "com.docker.official-images.bashbrew.arch": "amd64", + "vnd.docker.reference.digest": "sha256:2771e37a12b7bcb2902456ecf3f29bf9ee11ec348e66e8eb322d9780ad7fc2df", + "vnd.docker.reference.type": "attestation-manifest" + } + }, + { + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "digest": "sha256:7fbdc47de56b45d092f8f419e8b6183adf0159d00e05574c01787231b54fe28f", + "size": 815 + } + ] +}`)) + + case "/v2/testrepo/image-index/manifests/sha256:2771e37a12b7bcb2902456ecf3f29bf9ee11ec348e66e8eb322d9780ad7fc2df": + w.Header().Set("Content-Type", ocispec.MediaTypeImageManifest) + w.Write([]byte(`{ + "schemaVersion": 2, + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "config": { + "mediaType": "application/vnd.oci.image.config.v1+json", + "digest": "sha256:1b44b5a3e06a9aae883e7bf25e45c100be0bb81a0e01b32de604f3ac44711634", + "size": 547 + }, + "layers": [ + { + "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", + "digest": "sha256:17eec7bbc9d79fa397ac95c7283ecd04d1fe6978516932a3db110c6206430809", + "size": 2380 + } + ], + "annotations": { + "com.docker.official-images.bashbrew.arch": "amd64", + "org.opencontainers.image.base.name": "scratch", + "org.opencontainers.image.created": "2025-08-08T19:05:17Z", + "org.opencontainers.image.revision": "6930d60e10e81283a57be3ee3a2b5ca328a40304", + "org.opencontainers.image.source": "https://github.com/docker-library/hello-world.git#6930d60e10e81283a57be3ee3a2b5ca328a40304:amd64/hello-world", + "org.opencontainers.image.url": "https://hub.docker.com/_/hello-world", + "org.opencontainers.image.version": "linux" + } +}`)) + + case "/v2/testrepo/image-index/manifests/sha256:6b75187531c5e9b6a85c8946d5d82e4ef3801e051fbff338f382f3edfa60e3d2": + w.Header().Set("Content-Type", ocispec.MediaTypeImageManifest) + w.Write([]byte(`{ + "schemaVersion": 2, + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "config": { + "mediaType": "application/vnd.oci.image.config.v1+json", + "digest": "sha256:ec4b6233950725be4c816667d1eb2782ad59dc65b12f7ac53f1ffa0ad5b95b5b", + "size": 167 + }, + "layers": [ + { + "mediaType": "application/vnd.in-toto+json", + "digest": "sha256:ea52d2000f90ad63267302cba134025ee586b07a63c47aa9467471a395aee6c2", + "size": 4822, + "annotations": { + "in-toto.io/predicate-type": "https://slsa.dev/provenance/v0.2" + } + } + ] +}`)) + + case "/v2/testrepo/image-index/manifests/sha256:7fbdc47de56b45d092f8f419e8b6183adf0159d00e05574c01787231b54fe28f": + w.Header().Set("Content-Type", ocispec.MediaTypeImageManifest) + w.Write([]byte(`{ + "schemaVersion": 2, + "config": { + "mediaType": "application/vnd.cncf.helm.config.v1+json", + "digest": "sha256:24de43e4a9f5ed9427479f27dd7bab9d158227abe593302a6f54d1e13a903ac3", + "size": 112 + }, + "layers": [ + { + "mediaType": "application/vnd.cncf.helm.chart.provenance.v1.prov", + "digest": "sha256:b0a02b7412f78ae93324d48df8fcc316d8482e5ad7827b5b238657a29a22f256", + "size": 695 + }, + { + "mediaType": "application/vnd.cncf.helm.chart.content.v1.tar+gzip", + "digest": "sha256:e5ef611620fb97704d8751c16bab17fedb68883bfb0edc76f78a70e9173f9b55", + "size": 973 + } + ], + "annotations": { + "org.opencontainers.image.description": "A Helm chart for Kubernetes", + "org.opencontainers.image.title": "signtest", + "org.opencontainers.image.version": "0.1.0" + } +}`)) + + case "/v2/testrepo/image-index/blobs/sha256:24de43e4a9f5ed9427479f27dd7bab9d158227abe593302a6f54d1e13a903ac3": + w.Header().Set("Content-Type", ConfigMediaType) + w.Write([]byte(`{ + "name":"signtest", + "version":"0.1.0", + "description":"A Helm chart for Kubernetes", + "apiVersion":"v1" +}`)) + + case "/v2/testrepo/image-index/blobs/sha256:b0a02b7412f78ae93324d48df8fcc316d8482e5ad7827b5b238657a29a22f256": + data, err := os.ReadFile("../downloader/testdata/signtest-0.1.0.tgz.prov") + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + w.Write([]byte(err.Error())) + return + } + + w.Header().Set("Content-Type", ProvLayerMediaType) + w.Write(data) + + case "/v2/testrepo/image-index/blobs/sha256:e5ef611620fb97704d8751c16bab17fedb68883bfb0edc76f78a70e9173f9b55": + data, err := os.ReadFile("../downloader/testdata/signtest-0.1.0.tgz") + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + w.Write([]byte(err.Error())) + return + } + + w.Header().Set("Content-Type", ChartLayerMediaType) + w.Write(data) + + default: + w.WriteHeader(http.StatusNotFound) + } + })) + + u, _ := url.Parse(s.URL) + return "localhost:" + u.Port() } func testPush(suite *TestRegistry) { @@ -214,7 +384,7 @@ func testPush(suite *TestRegistry) { testingChartCreationTime := "1977-09-02T22:04:05Z" // Bad bytes - ref := fmt.Sprintf("%s/testrepo/testchart:1.2.3", suite.DockerRegistryHost) + ref := suite.DockerRegistryHost + "/testrepo/testchart:1.2.3" _, err := suite.RegistryClient.Push([]byte("hello"), ref, PushOptCreationTime(testingChartCreationTime)) suite.NotNil(err, "error pushing non-chart bytes") @@ -298,7 +468,7 @@ func testPush(suite *TestRegistry) { func testPull(suite *TestRegistry) { // bad/missing ref - ref := fmt.Sprintf("%s/testrepo/no-existy:1.2.3", suite.DockerRegistryHost) + ref := suite.DockerRegistryHost + "/testrepo/no-existy:1.2.3" _, err := suite.RegistryClient.Pull(ref) suite.NotNil(err, "error on bad/missing ref") diff --git a/pkg/registry/tag_test.go b/pkg/registry/tag_test.go index 09f0f12ea..e3ec47703 100644 --- a/pkg/registry/tag_test.go +++ b/pkg/registry/tag_test.go @@ -71,7 +71,7 @@ func TestGetTagMatchingVersionOrConstraint_InvalidConstraint(t *testing.T) { tags := []string{"1.0.0"} _, err := GetTagMatchingVersionOrConstraint(tags, ">a1") if err == nil { - t.Fatalf("expected error for invalid constraint") + t.Fatal("expected error for invalid constraint") } } @@ -79,7 +79,7 @@ func TestGetTagMatchingVersionOrConstraint_NoMatches(t *testing.T) { tags := []string{"0.1.0", "0.2.0"} _, err := GetTagMatchingVersionOrConstraint(tags, ">=1.0.0") if err == nil { - t.Fatalf("expected error when no tags match") + t.Fatal("expected error when no tags match") } if !strings.Contains(err.Error(), ">=1.0.0") { t.Fatalf("expected error to contain version string, got: %v", err) diff --git a/pkg/release/common.go b/pkg/release/common.go index d33c96646..7d40effdc 100644 --- a/pkg/release/common.go +++ b/pkg/release/common.go @@ -21,6 +21,7 @@ import ( "fmt" "time" + v2release "helm.sh/helm/v4/internal/release/v2" "helm.sh/helm/v4/pkg/chart" v1release "helm.sh/helm/v4/pkg/release/v1" ) @@ -35,6 +36,10 @@ func newDefaultAccessor(rel Releaser) (Accessor, error) { return &v1Accessor{&v}, nil case *v1release.Release: return &v1Accessor{v}, nil + case v2release.Release: + return &v2Accessor{&v}, nil + case *v2release.Release: + return &v2Accessor{v}, nil default: return nil, fmt.Errorf("unsupported release type: %T", rel) } @@ -46,6 +51,10 @@ func newDefaultHookAccessor(hook Hook) (HookAccessor, error) { return &v1HookAccessor{&h}, nil case *v1release.Hook: return &v1HookAccessor{h}, nil + case v2release.Hook: + return &v2HookAccessor{&h}, nil + case *v2release.Hook: + return &v2HookAccessor{h}, nil default: return nil, errors.New("unsupported release hook type") } @@ -114,3 +123,67 @@ func (a *v1HookAccessor) Path() string { func (a *v1HookAccessor) Manifest() string { return a.hook.Manifest } + +type v2Accessor struct { + rel *v2release.Release +} + +func (a *v2Accessor) Name() string { + return a.rel.Name +} + +func (a *v2Accessor) Namespace() string { + return a.rel.Namespace +} + +func (a *v2Accessor) Version() int { + return a.rel.Version +} + +func (a *v2Accessor) Hooks() []Hook { + var hooks = make([]Hook, len(a.rel.Hooks)) + for i, h := range a.rel.Hooks { + hooks[i] = h + } + return hooks +} + +func (a *v2Accessor) Manifest() string { + return a.rel.Manifest +} + +func (a *v2Accessor) Notes() string { + return a.rel.Info.Notes +} + +func (a *v2Accessor) Labels() map[string]string { + return a.rel.Labels +} + +func (a *v2Accessor) Chart() chart.Charter { + return a.rel.Chart +} + +func (a *v2Accessor) Status() string { + return a.rel.Info.Status.String() +} + +func (a *v2Accessor) ApplyMethod() string { + return a.rel.ApplyMethod +} + +func (a *v2Accessor) DeployedAt() time.Time { + return a.rel.Info.LastDeployed +} + +type v2HookAccessor struct { + hook *v2release.Hook +} + +func (a *v2HookAccessor) Path() string { + return a.hook.Path +} + +func (a *v2HookAccessor) Manifest() string { + return a.hook.Manifest +} diff --git a/pkg/release/common_test.go b/pkg/release/common_test.go index e9f8d364a..bc22971e9 100644 --- a/pkg/release/common_test.go +++ b/pkg/release/common_test.go @@ -22,6 +22,7 @@ import ( "github.com/stretchr/testify/assert" + v2release "helm.sh/helm/v4/internal/release/v2" "helm.sh/helm/v4/pkg/release/common" rspb "helm.sh/helm/v4/pkg/release/v1" ) @@ -63,3 +64,72 @@ func TestNewDefaultAccessor(t *testing.T) { is.Equal(rel.ApplyMethod, accessor.ApplyMethod()) is.Equal(rel.Labels, accessor.Labels()) } + +func TestNewDefaultAccessorV2(t *testing.T) { + // Testing the default implementation for v2 releases (charts/v3) + is := assert.New(t) + + // Create v2 release + info := &v2release.Info{Status: common.StatusDeployed, LastDeployed: time.Now().Add(1000), Notes: "test notes"} + labels := make(map[string]string) + labels["foo"] = "bar" + rel := &v2release.Release{ + Name: "happy-cats-v2", + Version: 3, + Info: info, + Labels: labels, + Namespace: "test-namespace", + ApplyMethod: "ssa", + Manifest: "test manifest content", + Hooks: []*v2release.Hook{ + { + Name: "test-hook", + Kind: "Job", + Path: "templates/hook.yaml", + Manifest: "hook manifest", + }, + }, + } + + // Test accessor creation + accessor, err := newDefaultAccessor(rel) + is.NoError(err) + + // Verify all accessor methods return correct values + is.Equal(rel.Name, accessor.Name()) + is.Equal(rel.Namespace, accessor.Namespace()) + is.Equal(rel.Version, accessor.Version()) + is.Equal(rel.ApplyMethod, accessor.ApplyMethod()) + is.Equal(rel.Labels, accessor.Labels()) + is.Equal(rel.Manifest, accessor.Manifest()) + is.Equal(rel.Info.Notes, accessor.Notes()) + is.Equal(rel.Info.Status.String(), accessor.Status()) + is.Equal(rel.Info.LastDeployed, accessor.DeployedAt()) + + // Verify hooks are accessible + hooks := accessor.Hooks() + is.Len(hooks, 1) + + // Test hook accessor + hookAccessor, err := newDefaultHookAccessor(hooks[0]) + is.NoError(err) + is.Equal("templates/hook.yaml", hookAccessor.Path()) + is.Equal("hook manifest", hookAccessor.Manifest()) +} + +func TestNewDefaultAccessorV2ByValue(t *testing.T) { + // Test that passing v2 release by value also works + is := assert.New(t) + + info := &v2release.Info{Status: common.StatusDeployed, LastDeployed: time.Now()} + rel := v2release.Release{ + Name: "test-release", + Version: 1, + Info: info, + Namespace: "default", + } + + accessor, err := newDefaultAccessor(rel) + is.NoError(err) + is.Equal("test-release", accessor.Name()) +} diff --git a/pkg/release/interfaces.go b/pkg/release/interfaces.go index aaa5a756f..c758de944 100644 --- a/pkg/release/interfaces.go +++ b/pkg/release/interfaces.go @@ -22,9 +22,9 @@ import ( "helm.sh/helm/v4/pkg/chart" ) -type Releaser interface{} +type Releaser any -type Hook interface{} +type Hook any type Accessor interface { Name() string diff --git a/pkg/release/v1/hook.go b/pkg/release/v1/hook.go index f0a370c15..5c382cd77 100644 --- a/pkg/release/v1/hook.go +++ b/pkg/release/v1/hook.go @@ -86,7 +86,7 @@ type Hook struct { // Events are the events that this hook fires on. Events []HookEvent `json:"events,omitempty"` // LastRun indicates the date/time this was last run. - LastRun HookExecution `json:"last_run,omitempty"` + LastRun HookExecution `json:"last_run"` // Weight indicates the sort order for execution among similar Hook type Weight int `json:"weight,omitempty"` // DeletePolicies are the policies that indicate when to delete the hook @@ -133,7 +133,7 @@ type hookExecutionJSON struct { // It handles empty string time fields by treating them as zero values. func (h *HookExecution) UnmarshalJSON(data []byte) error { // First try to unmarshal into a map to handle empty string time fields - var raw map[string]interface{} + var raw map[string]any if err := json.Unmarshal(data, &raw); err != nil { return err } diff --git a/pkg/release/v1/hook_test.go b/pkg/release/v1/hook_test.go index cea2568bc..f3b8811a6 100644 --- a/pkg/release/v1/hook_test.go +++ b/pkg/release/v1/hook_test.go @@ -220,7 +220,7 @@ func TestHookExecutionEmptyStringRoundTrip(t *testing.T) { data, err := json.Marshal(&exec) require.NoError(t, err) - var result map[string]interface{} + var result map[string]any err = json.Unmarshal(data, &result) require.NoError(t, err) diff --git a/pkg/release/v1/info.go b/pkg/release/v1/info.go index f895fdf6c..e6bfc1b6f 100644 --- a/pkg/release/v1/info.go +++ b/pkg/release/v1/info.go @@ -57,7 +57,7 @@ type infoJSON struct { // It handles empty string time fields by treating them as zero values. func (i *Info) UnmarshalJSON(data []byte) error { // First try to unmarshal into a map to handle empty string time fields - var raw map[string]interface{} + var raw map[string]any if err := json.Unmarshal(data, &raw); err != nil { return err } diff --git a/pkg/release/v1/info_test.go b/pkg/release/v1/info_test.go index 0fff78f76..6cff4db64 100644 --- a/pkg/release/v1/info_test.go +++ b/pkg/release/v1/info_test.go @@ -272,7 +272,7 @@ func TestInfoEmptyStringRoundTrip(t *testing.T) { data, err := json.Marshal(&info) require.NoError(t, err) - var result map[string]interface{} + var result map[string]any err = json.Unmarshal(data, &result) require.NoError(t, err) diff --git a/pkg/release/v1/mock.go b/pkg/release/v1/mock.go index dc135a24a..fc98a4525 100644 --- a/pkg/release/v1/mock.go +++ b/pkg/release/v1/mock.go @@ -17,8 +17,8 @@ limitations under the License. package v1 import ( - "fmt" "math/rand" + "strconv" "time" "helm.sh/helm/v4/pkg/chart/common" @@ -57,7 +57,7 @@ func Mock(opts *MockReleaseOptions) *Release { name := opts.Name if name == "" { - name = "testrelease-" + fmt.Sprint(rand.Intn(100)) + name = "testrelease-" + strconv.Itoa(rand.Intn(100)) } version := 1 @@ -123,7 +123,7 @@ func Mock(opts *MockReleaseOptions) *Release { Name: name, Info: info, Chart: ch, - Config: map[string]interface{}{"name": "value"}, + Config: map[string]any{"name": "value"}, Version: version, Namespace: namespace, Hooks: []*Hook{ diff --git a/pkg/release/v1/release.go b/pkg/release/v1/release.go index 454ee6eb7..3bbc0e4ce 100644 --- a/pkg/release/v1/release.go +++ b/pkg/release/v1/release.go @@ -36,7 +36,7 @@ type Release struct { Chart *chart.Chart `json:"chart,omitempty"` // Config is the set of extra Values added to the chart. // These values override the default values inside of the chart. - Config map[string]interface{} `json:"config,omitempty"` + Config map[string]any `json:"config,omitempty"` // Manifest is the string representation of the rendered template. Manifest string `json:"manifest,omitempty"` // Hooks are all of the hooks declared for this release. diff --git a/pkg/release/v1/util/kind_sorter.go b/pkg/release/v1/util/kind_sorter.go index bc074340f..01f1f801e 100644 --- a/pkg/release/v1/util/kind_sorter.go +++ b/pkg/release/v1/util/kind_sorter.go @@ -137,7 +137,7 @@ func sortHooksByKind(hooks []*release.Hook, ordering KindSortOrder) []*release.H return h } -func lessByKind(_ interface{}, _ interface{}, kindA string, kindB string, o KindSortOrder) bool { +func lessByKind(_ any, _ any, kindA string, kindB string, o KindSortOrder) bool { ordering := make(map[string]int, len(o)) for v, k := range o { ordering[k] = v diff --git a/pkg/release/v1/util/manifest.go b/pkg/release/v1/util/manifest.go index 9a87949f8..fa26f6256 100644 --- a/pkg/release/v1/util/manifest.go +++ b/pkg/release/v1/util/manifest.go @@ -21,6 +21,7 @@ import ( "regexp" "strconv" "strings" + "unicode" ) // SimpleHead defines what the structure of the head of a manifest file @@ -35,7 +36,16 @@ type SimpleHead struct { var sep = regexp.MustCompile("(?:^|\\s*\n)---\\s*") -// SplitManifests takes a string of manifest and returns a map contains individual manifests +// SplitManifests takes a manifest string and returns a map containing individual manifests. +// +// **Note for Chart API v3**: This function (due to the regex above) has allowed _WRONG_ +// Go templates to be defined inside charts across the years. The generated text from Go +// templates may contain `---apiVersion: v1`, and this function magically splits this back +// to `---\napiVersion: v1`. This has caused issues recently after Helm 4 introduced +// kio.ParseAll to inject annotations when post-renderers are used. In Chart API v3, +// we should kill this regex with fire (or change it) and expose charts doing the wrong +// thing Go template-wise. Helm should say a big _NO_ to charts doing the wrong thing, +// with or without post-renderers. func SplitManifests(bigFile string) map[string]string { // Basically, we're quickly splitting a stream of YAML documents into an // array of YAML docs. The file name is just a place holder, but should be @@ -44,15 +54,15 @@ func SplitManifests(bigFile string) map[string]string { tpl := "manifest-%d" res := map[string]string{} // Making sure that any extra whitespace in YAML stream doesn't interfere in splitting documents correctly. - bigFileTmp := strings.TrimSpace(bigFile) + bigFileTmp := strings.TrimLeftFunc(bigFile, unicode.IsSpace) docs := sep.Split(bigFileTmp, -1) var count int for _, d := range docs { - if d == "" { + if strings.TrimSpace(d) == "" { continue } - d = strings.TrimSpace(d) + d = strings.TrimLeftFunc(d, unicode.IsSpace) res[fmt.Sprintf(tpl, count)] = d count = count + 1 } diff --git a/pkg/release/v1/util/manifest_test.go b/pkg/release/v1/util/manifest_test.go index 754ac1367..516ac42d7 100644 --- a/pkg/release/v1/util/manifest_test.go +++ b/pkg/release/v1/util/manifest_test.go @@ -21,7 +21,15 @@ import ( "testing" ) -const mockManifestFile = ` +func TestSplitManifests(t *testing.T) { + tests := []struct { + name string + input string + expected map[string]string + }{ + { + name: "single doc with leading separator and whitespace", + input: ` --- apiVersion: v1 @@ -35,9 +43,9 @@ spec: - name: nemo-test image: fake-image cmd: fake-command -` - -const expectedManifest = `apiVersion: v1 +`, + expected: map[string]string{ + "manifest-0": `apiVersion: v1 kind: Pod metadata: name: finding-nemo, @@ -47,15 +55,463 @@ spec: containers: - name: nemo-test image: fake-image - cmd: fake-command` + cmd: fake-command +`, + }, + }, + { + name: "empty input", + input: "", + expected: map[string]string{}, + }, + { + name: "whitespace only", + input: " \n\n \n", + expected: map[string]string{}, + }, + { + name: "whitespace-only doc after separator is skipped", + input: "---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: cm1\n---\n \n", + expected: map[string]string{ + "manifest-0": "apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: cm1", + }, + }, + { + name: "single doc no separator", + input: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +`, + expected: map[string]string{ + "manifest-0": `apiVersion: v1 +kind: ConfigMap +metadata: + name: test +`, + }, + }, + { + name: "two docs with proper separator", + input: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm1 +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm2 +`, + expected: map[string]string{ + "manifest-0": `apiVersion: v1 +kind: ConfigMap +metadata: + name: cm1`, + "manifest-1": `apiVersion: v1 +kind: ConfigMap +metadata: + name: cm2 +`, + }, + }, -func TestSplitManifest(t *testing.T) { - manifests := SplitManifests(mockManifestFile) - if len(manifests) != 1 { - t.Errorf("Expected 1 manifest, got %v", len(manifests)) + // Block scalar chomping indicator tests using | (clip), |- (strip), and |+ (keep) + // inputs with 0, 1, and 2 trailing newlines after the block content. + // Note: the emitter may normalize the output chomping indicator when the + // trailing newline count makes another indicator equivalent for the result. + + // | (clip) input — clips trailing newlines to exactly one, though with + // 0 trailing newlines the emitted output may normalize to |-. + { + name: "block scalar clip (|) with 0 trailing newlines", + input: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: | + hello`, + expected: map[string]string{ + "manifest-0": `apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: | + hello`, + }, + }, + { + name: "block scalar clip (|) with 1 trailing newline", + input: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: | + hello +`, + expected: map[string]string{ + "manifest-0": `apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: | + hello +`, + }, + }, + { + name: "block scalar clip (|) with 2 trailing newlines", + input: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: | + hello + +`, + expected: map[string]string{ + "manifest-0": `apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: | + hello + +`, + }, + }, + + // |- (strip) + { + name: "block scalar strip (|-) with 0 trailing newlines", + input: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: |- + hello`, + expected: map[string]string{ + "manifest-0": `apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: |- + hello`, + }, + }, + { + name: "block scalar strip (|-) with 1 trailing newline", + input: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: |- + hello +`, + expected: map[string]string{ + "manifest-0": `apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: |- + hello +`, + }, + }, + { + name: "block scalar strip (|-) with 2 trailing newlines", + input: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: |- + hello + +`, + expected: map[string]string{ + "manifest-0": `apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: |- + hello + +`, + }, + }, + + // |+ (keep) + { + name: "block scalar keep (|+) with 0 trailing newlines", + input: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: |+ + hello`, + expected: map[string]string{ + "manifest-0": `apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: |+ + hello`, + }, + }, + { + name: "block scalar keep (|+) with 1 trailing newline", + input: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: |+ + hello +`, + expected: map[string]string{ + "manifest-0": `apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: |+ + hello +`, + }, + }, + { + name: "block scalar keep (|+) with 2 trailing newlines", + input: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: |+ + hello + +`, + expected: map[string]string{ + "manifest-0": `apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: |+ + hello + +`, + }, + }, + + // Multi-doc with block scalars: the regex consumes \s*\n before ---, + // so trailing newlines from non-last docs are stripped. + { + name: "multi-doc block scalar clip (|) before separator", + input: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: | + hello +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: test2 +`, + expected: map[string]string{ + "manifest-0": `apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: | + hello`, + "manifest-1": `apiVersion: v1 +kind: ConfigMap +metadata: + name: test2 +`, + }, + }, + { + name: "multi-doc block scalar keep (|+) with 2 trailing newlines before separator", + input: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: |+ + hello + + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: test2 +`, + expected: map[string]string{ + "manifest-0": `apiVersion: v1 +kind: ConfigMap +metadata: + name: test +data: + key: |+ + hello`, + "manifest-1": `apiVersion: v1 +kind: ConfigMap +metadata: + name: test2 +`, + }, + }, + + // **Note for Chart API v3**: The following tests exercise the lenient + // regex that splits `---apiVersion` back into separate documents. + // In Chart API v3, these inputs should return an _ERROR_ instead. + // See the comment on the SplitManifests function for more details. + { + name: "leading glued separator (---apiVersion)", + input: ` +---apiVersion: v1 +kind: ConfigMap +metadata: + name: cm1 +`, + expected: map[string]string{ + "manifest-0": `apiVersion: v1 +kind: ConfigMap +metadata: + name: cm1 +`, + }, + }, + { + name: "mid-content glued separator (---apiVersion)", + input: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm1 +---apiVersion: v1 +kind: ConfigMap +metadata: + name: cm2 +`, + expected: map[string]string{ + "manifest-0": `apiVersion: v1 +kind: ConfigMap +metadata: + name: cm1`, + "manifest-1": `apiVersion: v1 +kind: ConfigMap +metadata: + name: cm2 +`, + }, + }, + { + name: "multiple glued separators", + input: ` +---apiVersion: v1 +kind: ConfigMap +metadata: + name: cm1 +---apiVersion: v1 +kind: ConfigMap +metadata: + name: cm2 +---apiVersion: v1 +kind: ConfigMap +metadata: + name: cm3 +`, + expected: map[string]string{ + "manifest-0": `apiVersion: v1 +kind: ConfigMap +metadata: + name: cm1`, + "manifest-1": `apiVersion: v1 +kind: ConfigMap +metadata: + name: cm2`, + "manifest-2": `apiVersion: v1 +kind: ConfigMap +metadata: + name: cm3 +`, + }, + }, + { + name: "mixed glued and proper separators", + input: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm1 +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm2 +---apiVersion: v1 +kind: ConfigMap +metadata: + name: cm3 +`, + expected: map[string]string{ + "manifest-0": `apiVersion: v1 +kind: ConfigMap +metadata: + name: cm1`, + "manifest-1": `apiVersion: v1 +kind: ConfigMap +metadata: + name: cm2`, + "manifest-2": `apiVersion: v1 +kind: ConfigMap +metadata: + name: cm3 +`, + }, + }, } - expected := map[string]string{"manifest-0": expectedManifest} - if !reflect.DeepEqual(manifests, expected) { - t.Errorf("Expected %v, got %v", expected, manifests) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := SplitManifests(tt.input) + if !reflect.DeepEqual(result, tt.expected) { + t.Errorf("SplitManifests() =\n%v\nwant:\n%v", result, tt.expected) + } + }) } } diff --git a/pkg/repo/v1/chartrepo_test.go b/pkg/repo/v1/chartrepo_test.go index 353ab62d6..a0a8dcfc4 100644 --- a/pkg/repo/v1/chartrepo_test.go +++ b/pkg/repo/v1/chartrepo_test.go @@ -85,7 +85,7 @@ func TestIndexCustomSchemeDownload(t *testing.T) { } if len(myCustomGetter.repoUrls) != 1 { - t.Fatalf("Custom Getter.Get should be called once") + t.Fatal("Custom Getter.Get should be called once") } expectedRepoIndexURL := repoURL + "/index.yaml" @@ -126,24 +126,20 @@ func TestConcurrencyDownloadIndex(t *testing.T) { // 2) read index.yaml via LoadIndexFile (read operation). // This checks for race conditions and ensures correct behavior under concurrent read/write access. for range 150 { - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { idx, err := repo.DownloadIndexFile() if err != nil { t.Errorf("Failed to download index file to %s: %v", idx, err) } - }() + }) - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { _, err := LoadIndexFile(indexFName) if err != nil { t.Errorf("Failed to load index file: %v", err) } - }() + }) } wg.Wait() } @@ -244,7 +240,7 @@ func TestErrorFindChartInRepoURL(t *testing.T) { }) if _, err := FindChartInRepoURL("http://someserver/something", "nginx", g); err == nil { - t.Errorf("Expected error for bad chart URL, but did not get any errors") + t.Error("Expected error for bad chart URL, but did not get any errors") } else if !strings.Contains(err.Error(), `looks like "http://someserver/something" is not a valid chart repository or cannot be reached`) { t.Errorf("Expected error for bad chart URL, but got a different error (%v)", err) } @@ -256,22 +252,22 @@ func TestErrorFindChartInRepoURL(t *testing.T) { defer srv.Close() if _, err = FindChartInRepoURL(srv.URL, "nginx1", g); err == nil { - t.Errorf("Expected error for chart not found, but did not get any errors") + t.Error("Expected error for chart not found, but did not get any errors") } else if err.Error() != `chart "nginx1" not found in `+srv.URL+` repository` { t.Errorf("Expected error for chart not found, but got a different error (%v)", err) } if !errors.Is(err, ChartNotFoundError{}) { - t.Errorf("error is not of correct error type structure") + t.Error("error is not of correct error type structure") } if _, err = FindChartInRepoURL(srv.URL, "nginx1", g, WithChartVersion("0.1.0")); err == nil { - t.Errorf("Expected error for chart not found, but did not get any errors") + t.Error("Expected error for chart not found, but did not get any errors") } else if err.Error() != `chart "nginx1" version "0.1.0" not found in `+srv.URL+` repository` { t.Errorf("Expected error for chart not found, but got a different error (%v)", err) } if _, err = FindChartInRepoURL(srv.URL, "chartWithNoURL", g); err == nil { - t.Errorf("Expected error for no chart URLs available, but did not get any errors") + t.Error("Expected error for no chart URLs available, but did not get any errors") } else if err.Error() != `chart "chartWithNoURL" has no downloadable URLs` { t.Errorf("Expected error for chart not found, but got a different error (%v)", err) } diff --git a/pkg/repo/v1/index.go b/pkg/repo/v1/index.go index 7969d64e9..3dbdf7dfc 100644 --- a/pkg/repo/v1/index.go +++ b/pkg/repo/v1/index.go @@ -80,7 +80,7 @@ func (c ChartVersions) Less(a, b int) bool { // IndexFile represents the index file in a chart repository type IndexFile struct { // This is used ONLY for validation against chartmuseum's index files and is discarded after validation. - ServerInfo map[string]interface{} `json:"serverInfo,omitempty"` + ServerInfo map[string]any `json:"serverInfo,omitempty"` APIVersion string `json:"apiVersion"` Generated time.Time `json:"generated"` Entries map[string]ChartVersions `json:"entries"` @@ -270,7 +270,7 @@ func (i *IndexFile) Merge(f *IndexFile) { type ChartVersion struct { *chart.Metadata URLs []string `json:"urls"` - Created time.Time `json:"created,omitempty"` + Created time.Time `json:"created"` Removed bool `json:"removed,omitempty"` Digest string `json:"digest,omitempty"` @@ -391,7 +391,7 @@ func loadIndex(data []byte, source string) (*IndexFile, error) { // checking its validity as JSON. If the data is valid JSON, it will use the // `encoding/json` package to unmarshal it. Otherwise, it will use the // `sigs.k8s.io/yaml` package to unmarshal the YAML data. -func jsonOrYamlUnmarshal(b []byte, i interface{}) error { +func jsonOrYamlUnmarshal(b []byte, i any) error { if json.Valid(b) { return json.Unmarshal(b, i) } diff --git a/pkg/repo/v1/index_test.go b/pkg/repo/v1/index_test.go index 517457dc4..550c8e82c 100644 --- a/pkg/repo/v1/index_test.go +++ b/pkg/repo/v1/index_test.go @@ -20,7 +20,7 @@ import ( "bufio" "bytes" "encoding/json" - "fmt" + "errors" "net/http" "os" "path/filepath" @@ -130,12 +130,12 @@ func TestIndexFile(t *testing.T) { cv, err = i.Get("setter", "0.1.9+alpha") if err != nil || cv.Version != "0.1.9+alpha" { - t.Errorf("Expected version: 0.1.9+alpha") + t.Error("Expected version: 0.1.9+alpha") } cv, err = i.Get("setter", "0.1.8") if err != nil || cv.Version != "0.1.8" { - t.Errorf("Expected version: 0.1.8") + t.Error("Expected version: 0.1.8") } } @@ -174,7 +174,7 @@ func TestLoadIndex(t *testing.T) { // TestLoadIndex_Duplicates is a regression to make sure that we don't non-deterministically allow duplicate packages. func TestLoadIndex_Duplicates(t *testing.T) { if _, err := loadIndex([]byte(indexWithDuplicates), "indexWithDuplicates"); err == nil { - t.Errorf("Expected an error when duplicate entries are present") + t.Error("Expected an error when duplicate entries are present") } } @@ -186,7 +186,7 @@ func TestLoadIndex_EmptyEntry(t *testing.T) { func TestLoadIndex_Empty(t *testing.T) { if _, err := loadIndex([]byte(""), "indexWithEmpty"); err == nil { - t.Errorf("Expected an error when index.yaml is empty.") + t.Error("Expected an error when index.yaml is empty.") } } @@ -360,7 +360,7 @@ func verifyLocalIndex(t *testing.T, i *IndexFile) { alpine, ok := i.Entries["alpine"] if !ok { - t.Fatalf("'alpine' section not found.") + t.Fatal("'alpine' section not found.") } if l := len(alpine); l != 1 { @@ -369,7 +369,7 @@ func verifyLocalIndex(t *testing.T, i *IndexFile) { nginx, ok := i.Entries["nginx"] if !ok || len(nginx) != 2 { - t.Fatalf("Expected 2 nginx entries") + t.Fatal("Expected 2 nginx entries") } expects := []*ChartVersion{ @@ -595,7 +595,7 @@ func TestAddFileIndexEntriesNil(t *testing.T) { {&chart.Metadata{APIVersion: "v2", Name: " ", Version: "8033-5.apinie+s.r"}, "setter-0.1.9+beta.tgz", "http://example.com/charts", "sha256:1234567890abc"}, } { if err := i.MustAdd(x.md, x.filename, x.baseURL, x.digest); err == nil { - t.Errorf("expected err to be non-nil when entries not initialized") + t.Error("expected err to be non-nil when entries not initialized") } } } @@ -610,7 +610,7 @@ func TestIgnoreSkippableChartValidationError(t *testing.T) { Input: nil, }, "generic_error": { - Input: fmt.Errorf("foo"), + Input: errors.New("foo"), }, "non_skipped_validation_error": { Input: chart.ValidationError("chart.metadata.type must be application or library"), @@ -639,7 +639,7 @@ func TestIgnoreSkippableChartValidationError(t *testing.T) { return } - if tc.Input != result { + if !errors.Is(tc.Input, result) { t.Error("expected the result equal to input") } diff --git a/pkg/repo/v1/repo_test.go b/pkg/repo/v1/repo_test.go index bdaa61eda..9b5c54309 100644 --- a/pkg/repo/v1/repo_test.go +++ b/pkg/repo/v1/repo_test.go @@ -114,11 +114,11 @@ func TestRepoFile_Get(t *testing.T) { name := "second" entry := repo.Get(name) - if entry == nil { //nolint:staticcheck + if entry == nil { t.Fatalf("Expected repo entry %q to be found", name) } - if entry.URL != "https://example.com/second" { //nolint:staticcheck + if entry.URL != "https://example.com/second" { t.Errorf("Expected repo URL to be %q but got %q", "https://example.com/second", entry.URL) } @@ -219,9 +219,9 @@ func TestWriteFile(t *testing.T) { func TestRepoNotExists(t *testing.T) { if _, err := LoadFile("/this/path/does/not/exist.yaml"); err == nil { - t.Errorf("expected err to be non-nil when path does not exist") + t.Error("expected err to be non-nil when path does not exist") } else if !strings.Contains(err.Error(), "couldn't load repositories file") { - t.Errorf("expected prompt `couldn't load repositories file`") + t.Error("expected prompt `couldn't load repositories file`") } } diff --git a/pkg/repo/v1/repotest/server.go b/pkg/repo/v1/repotest/server.go index 12b96de5a..f1b5e0744 100644 --- a/pkg/repo/v1/repotest/server.go +++ b/pkg/repo/v1/repotest/server.go @@ -153,6 +153,10 @@ type OCIServerRunConfig struct { type OCIServerOpt func(config *OCIServerRunConfig) +type OCIServerRunResult struct { + PushedChart *ociRegistry.PushResult +} + func WithDependingChart(c *chart.Chart) OCIServerOpt { return func(config *OCIServerRunConfig) { config.DependingChart = c @@ -171,21 +175,21 @@ func NewOCIServer(t *testing.T, dir string) (*OCIServer, error) { htpasswdPath := filepath.Join(dir, testHtpasswdFileBasename) err = os.WriteFile(htpasswdPath, fmt.Appendf(nil, "%s:%s\n", testUsername, string(pwBytes)), 0o644) if err != nil { - t.Fatalf("error creating test htpasswd file") + t.Fatal("error creating test htpasswd file") } // Registry config config := &configuration.Configuration{} ln, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { - t.Fatalf("error finding free port for test registry") + t.Fatalf("error finding free port for test registry: %v", err) } defer ln.Close() port := ln.Addr().(*net.TCPAddr).Port config.HTTP.Addr = ln.Addr().String() config.HTTP.DrainTimeout = time.Duration(10) * time.Second - config.Storage = map[string]configuration.Parameters{"inmemory": map[string]interface{}{}} + config.Storage = map[string]configuration.Parameters{"inmemory": map[string]any{}} config.Auth = configuration.Auth{ "htpasswd": configuration.Parameters{ "realm": "localhost", @@ -210,6 +214,11 @@ func NewOCIServer(t *testing.T, dir string) (*OCIServer, error) { } func (srv *OCIServer) Run(t *testing.T, opts ...OCIServerOpt) { + t.Helper() + _ = srv.RunWithReturn(t, opts...) +} + +func (srv *OCIServer) RunWithReturn(t *testing.T, opts ...OCIServerOpt) *OCIServerRunResult { t.Helper() cfg := &OCIServerRunConfig{} for _, fn := range opts { @@ -228,7 +237,7 @@ func (srv *OCIServer) Run(t *testing.T, opts ...OCIServerOpt) { ociRegistry.ClientOptCredentialsFile(credentialsFile), ) if err != nil { - t.Fatalf("error creating registry client") + t.Fatalf("error creating registry client: %v", err) } err = registryClient.Login( @@ -240,7 +249,7 @@ func (srv *OCIServer) Run(t *testing.T, opts ...OCIServerOpt) { t.Fatalf("error logging into registry with good credentials: %v", err) } - ref := fmt.Sprintf("%s/u/ocitestuser/oci-dependent-chart:0.1.0", srv.RegistryURL) + ref := srv.RegistryURL + "/u/ocitestuser/oci-dependent-chart:0.1.0" err = chartutil.ExpandFile(srv.Dir, filepath.Join(srv.Dir, "oci-dependent-chart-0.1.0.tgz")) if err != nil { @@ -284,7 +293,9 @@ func (srv *OCIServer) Run(t *testing.T, opts ...OCIServerOpt) { srv.Client = registryClient c := cfg.DependingChart if c == nil { - return + return &OCIServerRunResult{ + PushedChart: result, + } } dependingRef := fmt.Sprintf("%s/u/ocitestuser/%s:%s", @@ -308,6 +319,10 @@ func (srv *OCIServer) Run(t *testing.T, opts ...OCIServerOpt) { result.Manifest.Digest, result.Manifest.Size, result.Config.Digest, result.Config.Size, result.Chart.Digest, result.Chart.Size) + + return &OCIServerRunResult{ + PushedChart: result, + } } // Root gets the docroot for the server. diff --git a/pkg/storage/driver/cfgmaps.go b/pkg/storage/driver/cfgmaps.go index f82ade5e9..00a0832b3 100644 --- a/pkg/storage/driver/cfgmaps.go +++ b/pkg/storage/driver/cfgmaps.go @@ -171,7 +171,7 @@ func (cfgmaps *ConfigMaps) Create(key string, rls release.Releaser) error { lbs.init() lbs.fromMap(rac.Labels()) - lbs.set("createdAt", fmt.Sprintf("%v", time.Now().Unix())) + lbs.set("createdAt", strconv.FormatInt(time.Now().Unix(), 10)) rel, err := releaserToV1Release(rls) if err != nil { @@ -209,7 +209,7 @@ func (cfgmaps *ConfigMaps) Update(key string, rel release.Releaser) error { lbs.init() lbs.fromMap(rls.Labels) - lbs.set("modifiedAt", fmt.Sprintf("%v", time.Now().Unix())) + lbs.set("modifiedAt", strconv.FormatInt(time.Now().Unix(), 10)) // create a new configmap object to hold the release obj, err := newConfigMapsObject(key, rls, lbs) diff --git a/pkg/storage/driver/cfgmaps_test.go b/pkg/storage/driver/cfgmaps_test.go index 8beb45547..947ebff71 100644 --- a/pkg/storage/driver/cfgmaps_test.go +++ b/pkg/storage/driver/cfgmaps_test.go @@ -180,7 +180,7 @@ func TestConfigMapQuery(t *testing.T) { } _, err = cfgmaps.Query(map[string]string{"name": "notExist"}) - if err != ErrReleaseNotFound { + if !errors.Is(err, ErrReleaseNotFound) { t.Errorf("Expected {%v}, got {%v}", ErrReleaseNotFound, err) } } @@ -252,7 +252,7 @@ func TestConfigMapDelete(t *testing.T) { // perform the delete on a non-existent release _, err := cfgmaps.Delete("nonexistent") - if err != ErrReleaseNotFound { + if !errors.Is(err, ErrReleaseNotFound) { t.Fatalf("Expected ErrReleaseNotFound: got {%v}", err) } diff --git a/pkg/storage/driver/memory_test.go b/pkg/storage/driver/memory_test.go index 329b82b2f..c6401b425 100644 --- a/pkg/storage/driver/memory_test.go +++ b/pkg/storage/driver/memory_test.go @@ -280,7 +280,7 @@ func TestMemoryDelete(t *testing.T) { } _, err = ts.Get(tt.key) if err == nil { - t.Errorf("Expected an error when asking for a deleted key") + t.Error("Expected an error when asking for a deleted key") } } diff --git a/pkg/storage/driver/secrets.go b/pkg/storage/driver/secrets.go index a73f3cf05..5e12684df 100644 --- a/pkg/storage/driver/secrets.go +++ b/pkg/storage/driver/secrets.go @@ -171,7 +171,7 @@ func (secrets *Secrets) Create(key string, rel release.Releaser) error { lbs.init() lbs.fromMap(rls.Labels) - lbs.set("createdAt", fmt.Sprintf("%v", time.Now().Unix())) + lbs.set("createdAt", strconv.FormatInt(time.Now().Unix(), 10)) // create a new secret to hold the release obj, err := newSecretsObject(key, rls, lbs) @@ -202,7 +202,7 @@ func (secrets *Secrets) Update(key string, rel release.Releaser) error { lbs.init() lbs.fromMap(rls.Labels) - lbs.set("modifiedAt", fmt.Sprintf("%v", time.Now().Unix())) + lbs.set("modifiedAt", strconv.FormatInt(time.Now().Unix(), 10)) // create a new secret object to hold the release obj, err := newSecretsObject(key, rls, lbs) diff --git a/pkg/storage/driver/secrets_test.go b/pkg/storage/driver/secrets_test.go index f4aa1176c..a11ec4380 100644 --- a/pkg/storage/driver/secrets_test.go +++ b/pkg/storage/driver/secrets_test.go @@ -165,7 +165,7 @@ func TestSecretQuery(t *testing.T) { } _, err = secrets.Query(map[string]string{"name": "notExist"}) - if err != ErrReleaseNotFound { + if !errors.Is(err, ErrReleaseNotFound) { t.Errorf("Expected {%v}, got {%v}", ErrReleaseNotFound, err) } } @@ -237,7 +237,7 @@ func TestSecretDelete(t *testing.T) { // perform the delete on a non-existing release _, err := secrets.Delete("nonexistent") - if err != ErrReleaseNotFound { + if !errors.Is(err, ErrReleaseNotFound) { t.Fatalf("Expected ErrReleaseNotFound, got: {%v}", err) } diff --git a/pkg/storage/driver/sql_test.go b/pkg/storage/driver/sql_test.go index f7c29033c..4b4686b66 100644 --- a/pkg/storage/driver/sql_test.go +++ b/pkg/storage/driver/sql_test.go @@ -15,6 +15,7 @@ package driver import ( "database/sql/driver" + "errors" "fmt" "reflect" "regexp" @@ -289,7 +290,7 @@ func TestSqlCreateAlreadyExists(t *testing.T) { mock. ExpectExec(regexp.QuoteMeta(insertQuery)). WithArgs(key, sqlReleaseDefaultType, body, rel.Name, rel.Namespace, int(rel.Version), rel.Info.Status.String(), sqlReleaseDefaultOwner, recentUnixTimestamp()). - WillReturnError(fmt.Errorf("dialect dependent SQL error")) + WillReturnError(errors.New("dialect dependent SQL error")) selectQuery := fmt.Sprintf( regexp.QuoteMeta("SELECT %s FROM %s WHERE %s = $1 AND %s = $2"), @@ -447,7 +448,7 @@ func TestSqlQuery(t *testing.T) { _, err := sqlDriver.Query(labelSetUnknown) if err == nil { t.Errorf("Expected error {%v}, got nil", ErrReleaseNotFound) - } else if err != ErrReleaseNotFound { + } else if !errors.Is(err, ErrReleaseNotFound) { t.Fatalf("failed to query for unknown smug-pigeon release: %v", err) } diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go index d6c41635b..d5d2ea317 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/storage.go @@ -339,12 +339,14 @@ func Init(d driver.Driver) *Storage { Driver: d, } + var h slog.Handler // Get logger from driver if it implements the LoggerSetterGetter interface if ls, ok := d.(logging.LoggerSetterGetter); ok { - ls.SetLogger(s.Logger().Handler()) + h = ls.Logger().Handler() } else { // If the driver does not implement the LoggerSetterGetter interface, set the default logger - s.SetLogger(slog.Default().Handler()) + h = slog.Default().Handler() } + s.SetLogger(h) return s } diff --git a/pkg/storage/storage_test.go b/pkg/storage/storage_test.go index 5b2a3bba5..9b4203224 100644 --- a/pkg/storage/storage_test.go +++ b/pkg/storage/storage_test.go @@ -17,8 +17,10 @@ limitations under the License. package storage // import "helm.sh/helm/v4/pkg/storage" import ( + "context" "errors" "fmt" + "log/slog" "reflect" "testing" @@ -206,7 +208,7 @@ func TestStorageDeployed(t *testing.T) { switch { case rls == nil: - t.Fatalf("Release is nil") + t.Fatal("Release is nil") case rel.Name != name: t.Fatalf("Expected release name %q, actual %q\n", name, rel.Name) case rel.Version != vers: @@ -249,7 +251,7 @@ func TestStorageDeployedWithCorruption(t *testing.T) { switch { case rls == nil: - t.Fatalf("Release is nil") + t.Fatal("Release is nil") case rel.Name != name: t.Fatalf("Expected release name %q, actual %q\n", name, rel.Name) case rel.Version != vers: @@ -574,8 +576,40 @@ func (test ReleaseTestData) ToRelease() *rspb.Release { } } -func assertErrNil(eh func(args ...interface{}), err error, message string) { +func assertErrNil(eh func(args ...any), err error, message string) { if err != nil { eh(fmt.Sprintf("%s: %q", message, err)) } } + +func TestStorageGetsLoggerFromDriver(t *testing.T) { + d := driver.NewMemory() + l := &mockSLogHandler{} + d.SetLogger(l) + s := Init(d) + _, _ = s.Get("doesnt-matter", 123) + if !l.Called { + t.Fatal("Expected storage to use driver's logger, but it did not") + } +} + +type mockSLogHandler struct { + Called bool +} + +func (m *mockSLogHandler) Enabled(context.Context, slog.Level) bool { + return true +} + +func (m *mockSLogHandler) Handle(context.Context, slog.Record) error { + m.Called = true + return nil +} + +func (m *mockSLogHandler) WithAttrs([]slog.Attr) slog.Handler { + return m +} + +func (m *mockSLogHandler) WithGroup(string) slog.Handler { + return m +} diff --git a/pkg/strvals/literal_parser.go b/pkg/strvals/literal_parser.go index d5d4c25b4..963558113 100644 --- a/pkg/strvals/literal_parser.go +++ b/pkg/strvals/literal_parser.go @@ -26,8 +26,8 @@ import ( // ParseLiteral parses a set line interpreting the value as a literal string. // // A set line is of the form name1=value1 -func ParseLiteral(s string) (map[string]interface{}, error) { - vals := map[string]interface{}{} +func ParseLiteral(s string) (map[string]any, error) { + vals := map[string]any{} scanner := bytes.NewBufferString(s) t := newLiteralParser(scanner, vals) err := t.parse() @@ -39,7 +39,7 @@ func ParseLiteral(s string) (map[string]interface{}, error) { // // If the strval string has a key that exists in dest, it overwrites the // dest version. -func ParseLiteralInto(s string, dest map[string]interface{}) error { +func ParseLiteralInto(s string, dest map[string]any) error { scanner := bytes.NewBufferString(s) t := newLiteralParser(scanner, dest) return t.parse() @@ -54,10 +54,10 @@ func ParseLiteralInto(s string, dest map[string]interface{}) error { // where data is the final parsed data from the parses with correct types type literalParser struct { sc *bytes.Buffer - data map[string]interface{} + data map[string]any } -func newLiteralParser(sc *bytes.Buffer, data map[string]interface{}) *literalParser { +func newLiteralParser(sc *bytes.Buffer, data map[string]any) *literalParser { return &literalParser{sc: sc, data: data} } @@ -88,7 +88,7 @@ func runesUntilLiteral(in io.RuneReader, stop map[rune]bool) ([]rune, rune, erro } } -func (t *literalParser) key(data map[string]interface{}, nestedNameLevel int) (reterr error) { +func (t *literalParser) key(data map[string]any, nestedNameLevel int) (reterr error) { defer func() { if r := recover(); r != nil { reterr = fmt.Errorf("unable to parse key: %s", r) @@ -106,7 +106,7 @@ func (t *literalParser) key(data map[string]interface{}, nestedNameLevel int) (r case lastRune == '=': // found end of key: swallow the '=' and get the value value, err := t.val() - if err == nil && err != io.EOF { + if err == nil && !errors.Is(err, io.EOF) { return err } set(data, string(key), string(value)) @@ -120,9 +120,9 @@ func (t *literalParser) key(data map[string]interface{}, nestedNameLevel int) (r } // first, create or find the target map in the given data - inner := map[string]interface{}{} + inner := map[string]any{} if _, ok := data[string(key)]; ok { - inner = data[string(key)].(map[string]interface{}) + inner = data[string(key)].(map[string]any) } // recurse on sub-tree with remaining data @@ -144,9 +144,9 @@ func (t *literalParser) key(data map[string]interface{}, nestedNameLevel int) (r kk := string(key) // find or create target list - list := []interface{}{} + list := []any{} if _, ok := data[kk]; ok { - list = data[kk].([]interface{}) + list = data[kk].([]any) } // now we need to get the value after the ] @@ -169,7 +169,7 @@ func (t *literalParser) keyIndex() (int, error) { return strconv.Atoi(string(v)) } -func (t *literalParser) listItem(list []interface{}, i, nestedNameLevel int) ([]interface{}, error) { +func (t *literalParser) listItem(list []any, i, nestedNameLevel int) ([]any, error) { if i < 0 { return list, fmt.Errorf("negative %d index not allowed", i) } @@ -191,14 +191,14 @@ func (t *literalParser) listItem(list []interface{}, i, nestedNameLevel int) ([] case lastRune == '.': // we have a nested object. Send to t.key - inner := map[string]interface{}{} + inner := map[string]any{} if len(list) > i { var ok bool - inner, ok = list[i].(map[string]interface{}) + inner, ok = list[i].(map[string]any) if !ok { // We have indices out of order. Initialize empty value. - list[i] = map[string]interface{}{} - inner = list[i].(map[string]interface{}) + list[i] = map[string]any{} + inner = list[i].(map[string]any) } } @@ -215,12 +215,12 @@ func (t *literalParser) listItem(list []interface{}, i, nestedNameLevel int) ([] if err != nil { return list, fmt.Errorf("error parsing index: %w", err) } - var crtList []interface{} + var crtList []any if len(list) > i { // If nested list already exists, take the value of list to next cycle. existed := list[i] if existed != nil { - crtList = list[i].([]interface{}) + crtList = list[i].([]any) } } diff --git a/pkg/strvals/literal_parser_test.go b/pkg/strvals/literal_parser_test.go index 6a76458f5..cc3d73d50 100644 --- a/pkg/strvals/literal_parser_test.go +++ b/pkg/strvals/literal_parser_test.go @@ -26,7 +26,7 @@ import ( func TestParseLiteral(t *testing.T) { cases := []struct { str string - expect map[string]interface{} + expect map[string]any err bool }{ { @@ -35,61 +35,61 @@ func TestParseLiteral(t *testing.T) { }, { str: "name=", - expect: map[string]interface{}{"name": ""}, + expect: map[string]any{"name": ""}, }, { str: "name=value", - expect: map[string]interface{}{"name": "value"}, + expect: map[string]any{"name": "value"}, err: false, }, { str: "long_int_string=1234567890", - expect: map[string]interface{}{"long_int_string": "1234567890"}, + expect: map[string]any{"long_int_string": "1234567890"}, err: false, }, { str: "boolean=true", - expect: map[string]interface{}{"boolean": "true"}, + expect: map[string]any{"boolean": "true"}, err: false, }, { str: "is_null=null", - expect: map[string]interface{}{"is_null": "null"}, + expect: map[string]any{"is_null": "null"}, err: false, }, { str: "zero=0", - expect: map[string]interface{}{"zero": "0"}, + expect: map[string]any{"zero": "0"}, err: false, }, { str: "name1=null,name2=value2", - expect: map[string]interface{}{"name1": "null,name2=value2"}, + expect: map[string]any{"name1": "null,name2=value2"}, err: false, }, { str: "name1=value,,,tail", - expect: map[string]interface{}{"name1": "value,,,tail"}, + expect: map[string]any{"name1": "value,,,tail"}, err: false, }, { str: "leading_zeros=00009", - expect: map[string]interface{}{"leading_zeros": "00009"}, + expect: map[string]any{"leading_zeros": "00009"}, err: false, }, { str: "name=one two three", - expect: map[string]interface{}{"name": "one two three"}, + expect: map[string]any{"name": "one two three"}, err: false, }, { str: "outer.inner=value", - expect: map[string]interface{}{"outer": map[string]interface{}{"inner": "value"}}, + expect: map[string]any{"outer": map[string]any{"inner": "value"}}, err: false, }, { str: "outer.middle.inner=value", - expect: map[string]interface{}{"outer": map[string]interface{}{"middle": map[string]interface{}{"inner": "value"}}}, + expect: map[string]any{"outer": map[string]any{"middle": map[string]any{"inner": "value"}}}, err: false, }, { @@ -98,7 +98,7 @@ func TestParseLiteral(t *testing.T) { }, { str: "name1.name2=", - expect: map[string]interface{}{"name1": map[string]interface{}{"name2": ""}}, + expect: map[string]any{"name1": map[string]any{"name2": ""}}, err: false, }, { @@ -111,20 +111,20 @@ func TestParseLiteral(t *testing.T) { }, { str: "name1={value1,value2}", - expect: map[string]interface{}{"name1": "{value1,value2}"}, + expect: map[string]any{"name1": "{value1,value2}"}, }, // List support { str: "list[0]=foo", - expect: map[string]interface{}{"list": []string{"foo"}}, + expect: map[string]any{"list": []string{"foo"}}, err: false, }, { str: "list[0].foo=bar", - expect: map[string]interface{}{ - "list": []interface{}{ - map[string]interface{}{"foo": "bar"}, + expect: map[string]any{ + "list": []any{ + map[string]any{"foo": "bar"}, }, }, err: false, @@ -135,7 +135,7 @@ func TestParseLiteral(t *testing.T) { }, { str: "list[3]=bar", - expect: map[string]interface{}{"list": []interface{}{nil, nil, nil, "bar"}}, + expect: map[string]any{"list": []any{nil, nil, nil, "bar"}}, err: false, }, { @@ -144,133 +144,133 @@ func TestParseLiteral(t *testing.T) { }, { str: "noval[0]", - expect: map[string]interface{}{"noval": []interface{}{}}, + expect: map[string]any{"noval": []any{}}, err: false, }, { str: "noval[0]=", - expect: map[string]interface{}{"noval": []interface{}{""}}, + expect: map[string]any{"noval": []any{""}}, err: false, }, { str: "nested[0][0]=1", - expect: map[string]interface{}{"nested": []interface{}{[]interface{}{"1"}}}, + expect: map[string]any{"nested": []any{[]any{"1"}}}, err: false, }, { str: "nested[1][1]=1", - expect: map[string]interface{}{"nested": []interface{}{nil, []interface{}{nil, "1"}}}, + expect: map[string]any{"nested": []any{nil, []any{nil, "1"}}}, err: false, }, { str: "name1.name2[0].foo=bar", - expect: map[string]interface{}{ - "name1": map[string]interface{}{ - "name2": []map[string]interface{}{{"foo": "bar"}}, + expect: map[string]any{ + "name1": map[string]any{ + "name2": []map[string]any{{"foo": "bar"}}, }, }, }, { str: "name1.name2[1].foo=bar", - expect: map[string]interface{}{ - "name1": map[string]interface{}{ - "name2": []map[string]interface{}{nil, {"foo": "bar"}}, + expect: map[string]any{ + "name1": map[string]any{ + "name2": []map[string]any{nil, {"foo": "bar"}}, }, }, }, { str: "name1.name2[1].foo=bar", - expect: map[string]interface{}{ - "name1": map[string]interface{}{ - "name2": []map[string]interface{}{nil, {"foo": "bar"}}, + expect: map[string]any{ + "name1": map[string]any{ + "name2": []map[string]any{nil, {"foo": "bar"}}, }, }, }, { str: "]={}].", - expect: map[string]interface{}{"]": "{}]."}, + expect: map[string]any{"]": "{}]."}, err: false, }, // issue test cases: , = $ ( ) { } . \ \\ { str: "name=val,val", - expect: map[string]interface{}{"name": "val,val"}, + expect: map[string]any{"name": "val,val"}, err: false, }, { str: "name=val.val", - expect: map[string]interface{}{"name": "val.val"}, + expect: map[string]any{"name": "val.val"}, err: false, }, { str: "name=val=val", - expect: map[string]interface{}{"name": "val=val"}, + expect: map[string]any{"name": "val=val"}, err: false, }, { str: "name=val$val", - expect: map[string]interface{}{"name": "val$val"}, + expect: map[string]any{"name": "val$val"}, err: false, }, { str: "name=(value", - expect: map[string]interface{}{"name": "(value"}, + expect: map[string]any{"name": "(value"}, err: false, }, { str: "name=value)", - expect: map[string]interface{}{"name": "value)"}, + expect: map[string]any{"name": "value)"}, err: false, }, { str: "name=(value)", - expect: map[string]interface{}{"name": "(value)"}, + expect: map[string]any{"name": "(value)"}, err: false, }, { str: "name={value", - expect: map[string]interface{}{"name": "{value"}, + expect: map[string]any{"name": "{value"}, err: false, }, { str: "name=value}", - expect: map[string]interface{}{"name": "value}"}, + expect: map[string]any{"name": "value}"}, err: false, }, { str: "name={value}", - expect: map[string]interface{}{"name": "{value}"}, + expect: map[string]any{"name": "{value}"}, err: false, }, { str: "name={value1,value2}", - expect: map[string]interface{}{"name": "{value1,value2}"}, + expect: map[string]any{"name": "{value1,value2}"}, err: false, }, { str: `name=val\val`, - expect: map[string]interface{}{"name": `val\val`}, + expect: map[string]any{"name": `val\val`}, err: false, }, { str: `name=val\\val`, - expect: map[string]interface{}{"name": `val\\val`}, + expect: map[string]any{"name": `val\\val`}, err: false, }, { str: `name=val\\\val`, - expect: map[string]interface{}{"name": `val\\\val`}, + expect: map[string]any{"name": `val\\\val`}, err: false, }, { str: `name={val,.?*v\0a!l)some`, - expect: map[string]interface{}{"name": `{val,.?*v\0a!l)some`}, + expect: map[string]any{"name": `{val,.?*v\0a!l)some`}, err: false, }, { str: `name=em%GT)tqUDqz,i-\h+Mbqs-!:.m\\rE=mkbM#rR}@{-k@`, - expect: map[string]interface{}{"name": `em%GT)tqUDqz,i-\h+Mbqs-!:.m\\rE=mkbM#rR}@{-k@`}, + expect: map[string]any{"name": `em%GT)tqUDqz,i-\h+Mbqs-!:.m\\rE=mkbM#rR}@{-k@`}, }, } @@ -307,20 +307,20 @@ func TestParseLiteralInto(t *testing.T) { tests := []struct { input string input2 string - got map[string]interface{} - expect map[string]interface{} + got map[string]any + expect map[string]any err bool }{ { input: "outer.inner1=value1,outer.inner3=value3,outer.inner4=4", - got: map[string]interface{}{ - "outer": map[string]interface{}{ + got: map[string]any{ + "outer": map[string]any{ "inner1": "overwrite", "inner2": "value2", }, }, - expect: map[string]interface{}{ - "outer": map[string]interface{}{ + expect: map[string]any{ + "outer": map[string]any{ "inner1": "value1,outer.inner3=value3,outer.inner4=4", "inner2": "value2", }}, @@ -329,9 +329,9 @@ func TestParseLiteralInto(t *testing.T) { { input: "listOuter[0][0].type=listValue", input2: "listOuter[0][0].status=alive", - got: map[string]interface{}{}, - expect: map[string]interface{}{ - "listOuter": [][]interface{}{{map[string]string{ + got: map[string]any{}, + expect: map[string]any{ + "listOuter": [][]any{{map[string]string{ "type": "listValue", "status": "alive", }}}, @@ -341,9 +341,9 @@ func TestParseLiteralInto(t *testing.T) { { input: "listOuter[0][0].type=listValue", input2: "listOuter[1][0].status=alive", - got: map[string]interface{}{}, - expect: map[string]interface{}{ - "listOuter": [][]interface{}{ + got: map[string]any{}, + expect: map[string]any{ + "listOuter": [][]any{ { map[string]string{"type": "listValue"}, }, @@ -357,17 +357,17 @@ func TestParseLiteralInto(t *testing.T) { { input: "listOuter[0][1][0].type=listValue", input2: "listOuter[0][0][1].status=alive", - got: map[string]interface{}{ - "listOuter": []interface{}{ - []interface{}{ - []interface{}{ + got: map[string]any{ + "listOuter": []any{ + []any{ + []any{ map[string]string{"exited": "old"}, }, }, }, }, - expect: map[string]interface{}{ - "listOuter": [][][]interface{}{ + expect: map[string]any{ + "listOuter": [][][]any{ { { map[string]string{"exited": "old"}, @@ -429,13 +429,13 @@ func TestParseLiteralNestedLevels(t *testing.T) { tests := []struct { str string - expect map[string]interface{} + expect map[string]any err bool errStr string }{ { "outer.middle.inner=value", - map[string]interface{}{"outer": map[string]interface{}{"middle": map[string]interface{}{"inner": "value"}}}, + map[string]any{"outer": map[string]any{"middle": map[string]any{"inner": "value"}}}, false, "", }, diff --git a/pkg/strvals/parser.go b/pkg/strvals/parser.go index 8eb761dce..cecaa2453 100644 --- a/pkg/strvals/parser.go +++ b/pkg/strvals/parser.go @@ -52,8 +52,8 @@ func ToYAML(s string) (string, error) { // Parse parses a set line. // // A set line is of the form name1=value1,name2=value2 -func Parse(s string) (map[string]interface{}, error) { - vals := map[string]interface{}{} +func Parse(s string) (map[string]any, error) { + vals := map[string]any{} scanner := bytes.NewBufferString(s) t := newParser(scanner, vals, false) err := t.parse() @@ -63,8 +63,8 @@ func Parse(s string) (map[string]interface{}, error) { // ParseString parses a set line and forces a string value. // // A set line is of the form name1=value1,name2=value2 -func ParseString(s string) (map[string]interface{}, error) { - vals := map[string]interface{}{} +func ParseString(s string) (map[string]any, error) { + vals := map[string]any{} scanner := bytes.NewBufferString(s) t := newParser(scanner, vals, true) err := t.parse() @@ -75,7 +75,7 @@ func ParseString(s string) (map[string]interface{}, error) { // // If the strval string has a key that exists in dest, it overwrites the // dest version. -func ParseInto(s string, dest map[string]interface{}) error { +func ParseInto(s string, dest map[string]any) error { scanner := bytes.NewBufferString(s) t := newParser(scanner, dest, false) return t.parse() @@ -87,8 +87,8 @@ func ParseInto(s string, dest map[string]interface{}) error { // // When the files at path1 and path2 contained "val1" and "val2" respectively, the set line is consumed as // name1=val1,name2=val2 -func ParseFile(s string, reader RunesValueReader) (map[string]interface{}, error) { - vals := map[string]interface{}{} +func ParseFile(s string, reader RunesValueReader) (map[string]any, error) { + vals := map[string]any{} scanner := bytes.NewBufferString(s) t := newFileParser(scanner, vals, reader) err := t.parse() @@ -98,7 +98,7 @@ func ParseFile(s string, reader RunesValueReader) (map[string]interface{}, error // ParseIntoString parses a strvals line and merges the result into dest. // // This method always returns a string as the value. -func ParseIntoString(s string, dest map[string]interface{}) error { +func ParseIntoString(s string, dest map[string]any) error { scanner := bytes.NewBufferString(s) t := newParser(scanner, dest, true) return t.parse() @@ -109,7 +109,7 @@ func ParseIntoString(s string, dest map[string]interface{}) error { // An empty val is treated as null. // // If a key exists in dest, the new value overwrites the dest version. -func ParseJSON(s string, dest map[string]interface{}) error { +func ParseJSON(s string, dest map[string]any) error { scanner := bytes.NewBufferString(s) t := newJSONParser(scanner, dest) return t.parse() @@ -118,7 +118,7 @@ func ParseJSON(s string, dest map[string]interface{}) error { // ParseIntoFile parses a filevals line and merges the result into dest. // // This method always returns a string as the value. -func ParseIntoFile(s string, dest map[string]interface{}, reader RunesValueReader) error { +func ParseIntoFile(s string, dest map[string]any, reader RunesValueReader) error { scanner := bytes.NewBufferString(s) t := newFileParser(scanner, dest, reader) return t.parse() @@ -126,7 +126,7 @@ func ParseIntoFile(s string, dest map[string]interface{}, reader RunesValueReade // RunesValueReader is a function that takes the given value (a slice of runes) // and returns the parsed value -type RunesValueReader func([]rune) (interface{}, error) +type RunesValueReader func([]rune) (any, error) // parser is a simple parser that takes a strvals line and parses it into a // map representation. @@ -135,23 +135,23 @@ type RunesValueReader func([]rune) (interface{}, error) // where data is the final parsed data from the parses with correct types type parser struct { sc *bytes.Buffer - data map[string]interface{} + data map[string]any reader RunesValueReader isjsonval bool } -func newParser(sc *bytes.Buffer, data map[string]interface{}, stringBool bool) *parser { - stringConverter := func(rs []rune) (interface{}, error) { +func newParser(sc *bytes.Buffer, data map[string]any, stringBool bool) *parser { + stringConverter := func(rs []rune) (any, error) { return typedVal(rs, stringBool), nil } return &parser{sc: sc, data: data, reader: stringConverter} } -func newJSONParser(sc *bytes.Buffer, data map[string]interface{}) *parser { +func newJSONParser(sc *bytes.Buffer, data map[string]any) *parser { return &parser{sc: sc, data: data, reader: nil, isjsonval: true} } -func newFileParser(sc *bytes.Buffer, data map[string]interface{}, reader RunesValueReader) *parser { +func newFileParser(sc *bytes.Buffer, data map[string]any, reader RunesValueReader) *parser { return &parser{sc: sc, data: data, reader: reader} } @@ -176,7 +176,7 @@ func runeSet(r []rune) map[rune]bool { return s } -func (t *parser) key(data map[string]interface{}, nestedNameLevel int) (reterr error) { +func (t *parser) key(data map[string]any, nestedNameLevel int) (reterr error) { defer func() { if r := recover(); r != nil { reterr = fmt.Errorf("unable to parse key: %s", r) @@ -200,9 +200,9 @@ func (t *parser) key(data map[string]interface{}, nestedNameLevel int) (reterr e } kk := string(k) // Find or create target list - list := []interface{}{} + list := []any{} if _, ok := data[kk]; ok { - list = data[kk].([]interface{}) + list = data[kk].([]any) } // Now we need to get the value after the ]. @@ -224,7 +224,7 @@ func (t *parser) key(data map[string]interface{}, nestedNameLevel int) (reterr e // Since Decode has its own buffer that consumes more characters (from underlying t.sc) than the ones actually decoded, // we invoke Decode on a separate reader built with a copy of what is left in t.sc. After Decode is executed, we // discard in t.sc the chars of the decoded json value (the number of those characters is returned by InputOffset). - var jsonval interface{} + var jsonval any dec := json.NewDecoder(strings.NewReader(t.sc.String())) if err = dec.Decode(&jsonval); err != nil { return err @@ -270,9 +270,9 @@ func (t *parser) key(data map[string]interface{}, nestedNameLevel int) (reterr e } // First, create or find the target map. - inner := map[string]interface{}{} + inner := map[string]any{} if _, ok := data[string(k)]; ok { - inner = data[string(k)].(map[string]interface{}) + inner = data[string(k)].(map[string]any) } // Recurse @@ -288,7 +288,7 @@ func (t *parser) key(data map[string]interface{}, nestedNameLevel int) (reterr e } } -func set(data map[string]interface{}, key string, val interface{}) { +func set(data map[string]any, key string, val any) { // If key is empty, don't set it. if len(key) == 0 { return @@ -296,7 +296,7 @@ func set(data map[string]interface{}, key string, val interface{}) { data[key] = val } -func setIndex(list []interface{}, index int, val interface{}) (l2 []interface{}, err error) { +func setIndex(list []any, index int, val any) (l2 []any, err error) { // There are possible index values that are out of range on a target system // causing a panic. This will catch the panic and return an error instead. // The value of the index that causes a panic varies from system to system. @@ -313,7 +313,7 @@ func setIndex(list []interface{}, index int, val interface{}) (l2 []interface{}, return list, fmt.Errorf("index of %d is greater than maximum supported index of %d", index, MaxIndex) } if len(list) <= index { - newlist := make([]interface{}, index+1) + newlist := make([]any, index+1) copy(newlist, list) list = newlist } @@ -333,7 +333,7 @@ func (t *parser) keyIndex() (int, error) { } -func (t *parser) listItem(list []interface{}, i, nestedNameLevel int) ([]interface{}, error) { +func (t *parser) listItem(list []any, i, nestedNameLevel int) ([]any, error) { if i < 0 { return list, fmt.Errorf("negative %d index not allowed", i) } @@ -357,7 +357,7 @@ func (t *parser) listItem(list []interface{}, i, nestedNameLevel int) ([]interfa // Since Decode has its own buffer that consumes more characters (from underlying t.sc) than the ones actually decoded, // we invoke Decode on a separate reader built with a copy of what is left in t.sc. After Decode is executed, we // discard in t.sc the chars of the decoded json value (the number of those characters is returned by InputOffset). - var jsonval interface{} + var jsonval any dec := json.NewDecoder(strings.NewReader(t.sc.String())) if err = dec.Decode(&jsonval); err != nil { return list, err @@ -397,12 +397,12 @@ func (t *parser) listItem(list []interface{}, i, nestedNameLevel int) ([]interfa if err != nil { return list, fmt.Errorf("error parsing index: %w", err) } - var crtList []interface{} + var crtList []any if len(list) > i { // If nested list already exists, take the value of list to next cycle. existed := list[i] if existed != nil { - crtList = list[i].([]interface{}) + crtList = list[i].([]any) } } // Now we need to get the value after the ]. @@ -413,14 +413,14 @@ func (t *parser) listItem(list []interface{}, i, nestedNameLevel int) ([]interfa return setIndex(list, i, list2) case last == '.': // We have a nested object. Send to t.key - inner := map[string]interface{}{} + inner := map[string]any{} if len(list) > i { var ok bool - inner, ok = list[i].(map[string]interface{}) + inner, ok = list[i].(map[string]any) if !ok { // We have indices out of order. Initialize empty value. - list[i] = map[string]interface{}{} - inner = list[i].(map[string]interface{}) + list[i] = map[string]any{} + inner = list[i].(map[string]any) } } @@ -463,18 +463,18 @@ func (t *parser) val() ([]rune, error) { return v, err } -func (t *parser) valList() ([]interface{}, error) { +func (t *parser) valList() ([]any, error) { r, _, e := t.sc.ReadRune() if e != nil { - return []interface{}{}, e + return []any{}, e } if r != '{' { t.sc.UnreadRune() - return []interface{}{}, ErrNotList + return []any{}, ErrNotList } - list := []interface{}{} + list := []any{} stop := runeSet([]rune{',', '}'}) for { switch rs, last, err := runesUntil(t.sc, stop); { @@ -526,7 +526,7 @@ func inMap(k rune, m map[rune]bool) bool { return ok } -func typedVal(v []rune, st bool) interface{} { +func typedVal(v []rune, st bool) any { val := string(v) if st { diff --git a/pkg/strvals/parser_test.go b/pkg/strvals/parser_test.go index 73403fc52..e3f8b9d8e 100644 --- a/pkg/strvals/parser_test.go +++ b/pkg/strvals/parser_test.go @@ -26,48 +26,48 @@ import ( func TestSetIndex(t *testing.T) { tests := []struct { name string - initial []interface{} - expect []interface{} + initial []any + expect []any add int val int err bool }{ { name: "short", - initial: []interface{}{0, 1}, - expect: []interface{}{0, 1, 2}, + initial: []any{0, 1}, + expect: []any{0, 1, 2}, add: 2, val: 2, err: false, }, { name: "equal", - initial: []interface{}{0, 1}, - expect: []interface{}{0, 2}, + initial: []any{0, 1}, + expect: []any{0, 2}, add: 1, val: 2, err: false, }, { name: "long", - initial: []interface{}{0, 1, 2, 3, 4, 5}, - expect: []interface{}{0, 1, 2, 4, 4, 5}, + initial: []any{0, 1, 2, 3, 4, 5}, + expect: []any{0, 1, 2, 4, 4, 5}, add: 3, val: 4, err: false, }, { name: "negative", - initial: []interface{}{0, 1, 2, 3, 4, 5}, - expect: []interface{}{0, 1, 2, 3, 4, 5}, + initial: []any{0, 1, 2, 3, 4, 5}, + expect: []any{0, 1, 2, 3, 4, 5}, add: -1, val: 4, err: true, }, { name: "large", - initial: []interface{}{0, 1, 2, 3, 4, 5}, - expect: []interface{}{0, 1, 2, 3, 4, 5}, + initial: []any{0, 1, 2, 3, 4, 5}, + expect: []any{0, 1, 2, 3, 4, 5}, add: MaxIndex + 1, val: 4, err: true, @@ -104,53 +104,53 @@ func TestSetIndex(t *testing.T) { func TestParseSet(t *testing.T) { testsString := []struct { str string - expect map[string]interface{} + expect map[string]any err bool }{ { str: "long_int_string=1234567890", - expect: map[string]interface{}{"long_int_string": "1234567890"}, + expect: map[string]any{"long_int_string": "1234567890"}, err: false, }, { str: "boolean=true", - expect: map[string]interface{}{"boolean": "true"}, + expect: map[string]any{"boolean": "true"}, err: false, }, { str: "is_null=null", - expect: map[string]interface{}{"is_null": "null"}, + expect: map[string]any{"is_null": "null"}, err: false, }, { str: "zero=0", - expect: map[string]interface{}{"zero": "0"}, + expect: map[string]any{"zero": "0"}, err: false, }, } tests := []struct { str string - expect map[string]interface{} + expect map[string]any err bool }{ { "name1=null,f=false,t=true", - map[string]interface{}{"name1": nil, "f": false, "t": true}, + map[string]any{"name1": nil, "f": false, "t": true}, false, }, { "name1=value1", - map[string]interface{}{"name1": "value1"}, + map[string]any{"name1": "value1"}, false, }, { "name1=value1,name2=value2", - map[string]interface{}{"name1": "value1", "name2": "value2"}, + map[string]any{"name1": "value1", "name2": "value2"}, false, }, { "name1=value1,name2=value2,", - map[string]interface{}{"name1": "value1", "name2": "value2"}, + map[string]any{"name1": "value1", "name2": "value2"}, false, }, { @@ -159,27 +159,27 @@ func TestParseSet(t *testing.T) { }, { str: "name1=,name2=value2", - expect: map[string]interface{}{"name1": "", "name2": "value2"}, + expect: map[string]any{"name1": "", "name2": "value2"}, }, { str: "leading_zeros=00009", - expect: map[string]interface{}{"leading_zeros": "00009"}, + expect: map[string]any{"leading_zeros": "00009"}, }, { str: "zero_int=0", - expect: map[string]interface{}{"zero_int": 0}, + expect: map[string]any{"zero_int": 0}, }, { str: "long_int=1234567890", - expect: map[string]interface{}{"long_int": 1234567890}, + expect: map[string]any{"long_int": 1234567890}, }, { str: "boolean=true", - expect: map[string]interface{}{"boolean": true}, + expect: map[string]any{"boolean": true}, }, { str: "is_null=null", - expect: map[string]interface{}{"is_null": nil}, + expect: map[string]any{"is_null": nil}, err: false, }, { @@ -200,40 +200,40 @@ func TestParseSet(t *testing.T) { }, { "name1=one\\,two,name2=three\\,four", - map[string]interface{}{"name1": "one,two", "name2": "three,four"}, + map[string]any{"name1": "one,two", "name2": "three,four"}, false, }, { "name1=one\\=two,name2=three\\=four", - map[string]interface{}{"name1": "one=two", "name2": "three=four"}, + map[string]any{"name1": "one=two", "name2": "three=four"}, false, }, { "name1=one two three,name2=three two one", - map[string]interface{}{"name1": "one two three", "name2": "three two one"}, + map[string]any{"name1": "one two three", "name2": "three two one"}, false, }, { "outer.inner=value", - map[string]interface{}{"outer": map[string]interface{}{"inner": "value"}}, + map[string]any{"outer": map[string]any{"inner": "value"}}, false, }, { "outer.middle.inner=value", - map[string]interface{}{"outer": map[string]interface{}{"middle": map[string]interface{}{"inner": "value"}}}, + map[string]any{"outer": map[string]any{"middle": map[string]any{"inner": "value"}}}, false, }, { "outer.inner1=value,outer.inner2=value2", - map[string]interface{}{"outer": map[string]interface{}{"inner1": "value", "inner2": "value2"}}, + map[string]any{"outer": map[string]any{"inner1": "value", "inner2": "value2"}}, false, }, { "outer.inner1=value,outer.middle.inner=value", - map[string]interface{}{ - "outer": map[string]interface{}{ + map[string]any{ + "outer": map[string]any{ "inner1": "value", - "middle": map[string]interface{}{ + "middle": map[string]any{ "inner": "value", }, }, @@ -250,7 +250,7 @@ func TestParseSet(t *testing.T) { }, { str: "name1.name2=", - expect: map[string]interface{}{"name1": map[string]interface{}{"name2": ""}}, + expect: map[string]any{"name1": map[string]any{"name2": ""}}, }, { str: "name1.=name2", @@ -262,12 +262,12 @@ func TestParseSet(t *testing.T) { }, { "name1={value1,value2}", - map[string]interface{}{"name1": []string{"value1", "value2"}}, + map[string]any{"name1": []string{"value1", "value2"}}, false, }, { "name1={value1,value2},name2={value1,value2}", - map[string]interface{}{ + map[string]any{ "name1": []string{"value1", "value2"}, "name2": []string{"value1", "value2"}, }, @@ -275,12 +275,12 @@ func TestParseSet(t *testing.T) { }, { "name1={1021,902}", - map[string]interface{}{"name1": []int{1021, 902}}, + map[string]any{"name1": []int{1021, 902}}, false, }, { "name1.name2={value1,value2}", - map[string]interface{}{"name1": map[string]interface{}{"name2": []string{"value1", "value2"}}}, + map[string]any{"name1": map[string]any{"name2": []string{"value1", "value2"}}}, false, }, { @@ -290,21 +290,21 @@ func TestParseSet(t *testing.T) { // List support { str: "list[0]=foo", - expect: map[string]interface{}{"list": []string{"foo"}}, + expect: map[string]any{"list": []string{"foo"}}, }, { str: "list[0].foo=bar", - expect: map[string]interface{}{ - "list": []interface{}{ - map[string]interface{}{"foo": "bar"}, + expect: map[string]any{ + "list": []any{ + map[string]any{"foo": "bar"}, }, }, }, { str: "list[0].foo=bar,list[0].hello=world", - expect: map[string]interface{}{ - "list": []interface{}{ - map[string]interface{}{"foo": "bar", "hello": "world"}, + expect: map[string]any{ + "list": []any{ + map[string]any{"foo": "bar", "hello": "world"}, }, }, }, @@ -314,15 +314,15 @@ func TestParseSet(t *testing.T) { }, { str: "list[0]=foo,list[1]=bar", - expect: map[string]interface{}{"list": []string{"foo", "bar"}}, + expect: map[string]any{"list": []string{"foo", "bar"}}, }, { str: "list[0]=foo,list[1]=bar,", - expect: map[string]interface{}{"list": []string{"foo", "bar"}}, + expect: map[string]any{"list": []string{"foo", "bar"}}, }, { str: "list[0]=foo,list[3]=bar", - expect: map[string]interface{}{"list": []interface{}{"foo", nil, nil, "bar"}}, + expect: map[string]any{"list": []any{"foo", nil, nil, "bar"}}, }, { str: "list[0]=foo,list[-20]=bar", @@ -334,41 +334,41 @@ func TestParseSet(t *testing.T) { }, { str: "noval[0]", - expect: map[string]interface{}{"noval": []interface{}{}}, + expect: map[string]any{"noval": []any{}}, }, { str: "noval[0]=", - expect: map[string]interface{}{"noval": []interface{}{""}}, + expect: map[string]any{"noval": []any{""}}, }, { str: "nested[0][0]=1", - expect: map[string]interface{}{"nested": []interface{}{[]interface{}{1}}}, + expect: map[string]any{"nested": []any{[]any{1}}}, }, { str: "nested[1][1]=1", - expect: map[string]interface{}{"nested": []interface{}{nil, []interface{}{nil, 1}}}, + expect: map[string]any{"nested": []any{nil, []any{nil, 1}}}, }, { str: "name1.name2[0].foo=bar,name1.name2[1].foo=bar", - expect: map[string]interface{}{ - "name1": map[string]interface{}{ - "name2": []map[string]interface{}{{"foo": "bar"}, {"foo": "bar"}}, + expect: map[string]any{ + "name1": map[string]any{ + "name2": []map[string]any{{"foo": "bar"}, {"foo": "bar"}}, }, }, }, { str: "name1.name2[1].foo=bar,name1.name2[0].foo=bar", - expect: map[string]interface{}{ - "name1": map[string]interface{}{ - "name2": []map[string]interface{}{{"foo": "bar"}, {"foo": "bar"}}, + expect: map[string]any{ + "name1": map[string]any{ + "name2": []map[string]any{{"foo": "bar"}, {"foo": "bar"}}, }, }, }, { str: "name1.name2[1].foo=bar", - expect: map[string]interface{}{ - "name1": map[string]interface{}{ - "name2": []map[string]interface{}{nil, {"foo": "bar"}}, + expect: map[string]any{ + "name1": map[string]any{ + "name2": []map[string]any{nil, {"foo": "bar"}}, }, }, }, @@ -434,20 +434,20 @@ func TestParseInto(t *testing.T) { tests := []struct { input string input2 string - got map[string]interface{} - expect map[string]interface{} + got map[string]any + expect map[string]any err bool }{ { input: "outer.inner1=value1,outer.inner3=value3,outer.inner4=4", - got: map[string]interface{}{ - "outer": map[string]interface{}{ + got: map[string]any{ + "outer": map[string]any{ "inner1": "overwrite", "inner2": "value2", }, }, - expect: map[string]interface{}{ - "outer": map[string]interface{}{ + expect: map[string]any{ + "outer": map[string]any{ "inner1": "value1", "inner2": "value2", "inner3": "value3", @@ -458,9 +458,9 @@ func TestParseInto(t *testing.T) { { input: "listOuter[0][0].type=listValue", input2: "listOuter[0][0].status=alive", - got: map[string]interface{}{}, - expect: map[string]interface{}{ - "listOuter": [][]interface{}{{map[string]string{ + got: map[string]any{}, + expect: map[string]any{ + "listOuter": [][]any{{map[string]string{ "type": "listValue", "status": "alive", }}}, @@ -470,9 +470,9 @@ func TestParseInto(t *testing.T) { { input: "listOuter[0][0].type=listValue", input2: "listOuter[1][0].status=alive", - got: map[string]interface{}{}, - expect: map[string]interface{}{ - "listOuter": [][]interface{}{ + got: map[string]any{}, + expect: map[string]any{ + "listOuter": [][]any{ { map[string]string{"type": "listValue"}, }, @@ -486,17 +486,17 @@ func TestParseInto(t *testing.T) { { input: "listOuter[0][1][0].type=listValue", input2: "listOuter[0][0][1].status=alive", - got: map[string]interface{}{ - "listOuter": []interface{}{ - []interface{}{ - []interface{}{ + got: map[string]any{ + "listOuter": []any{ + []any{ + []any{ map[string]string{"exited": "old"}, }, }, }, }, - expect: map[string]interface{}{ - "listOuter": [][][]interface{}{ + expect: map[string]any{ + "listOuter": [][][]any{ { { map[string]string{"exited": "old"}, @@ -544,15 +544,15 @@ func TestParseInto(t *testing.T) { } func TestParseIntoString(t *testing.T) { - got := map[string]interface{}{ - "outer": map[string]interface{}{ + got := map[string]any{ + "outer": map[string]any{ "inner1": "overwrite", "inner2": "value2", }, } input := "outer.inner1=1,outer.inner3=3" - expect := map[string]interface{}{ - "outer": map[string]interface{}{ + expect := map[string]any{ + "outer": map[string]any{ "inner1": "1", "inner2": "value2", "inner3": "3", @@ -580,20 +580,20 @@ func TestParseIntoString(t *testing.T) { func TestParseJSON(t *testing.T) { tests := []struct { input string - got map[string]interface{} - expect map[string]interface{} + got map[string]any + expect map[string]any err bool }{ { // set json scalars values, and replace one existing key input: "outer.inner1=\"1\",outer.inner3=3,outer.inner4=true,outer.inner5=\"true\"", - got: map[string]interface{}{ - "outer": map[string]interface{}{ + got: map[string]any{ + "outer": map[string]any{ "inner1": "overwrite", "inner2": "value2", }, }, - expect: map[string]interface{}{ - "outer": map[string]interface{}{ + expect: map[string]any{ + "outer": map[string]any{ "inner1": "1", "inner2": "value2", "inner3": 3, @@ -605,43 +605,43 @@ func TestParseJSON(t *testing.T) { }, { // set json objects and arrays, and replace one existing key input: "outer.inner1={\"a\":\"1\",\"b\":2,\"c\":[1,2,3]},outer.inner3=[\"new value 1\",\"new value 2\"],outer.inner4={\"aa\":\"1\",\"bb\":2,\"cc\":[1,2,3]},outer.inner5=[{\"A\":\"1\",\"B\":2,\"C\":[1,2,3]}]", - got: map[string]interface{}{ - "outer": map[string]interface{}{ - "inner1": map[string]interface{}{ + got: map[string]any{ + "outer": map[string]any{ + "inner1": map[string]any{ "x": "overwrite", }, "inner2": "value2", - "inner3": []interface{}{ + "inner3": []any{ "overwrite", }, }, }, - expect: map[string]interface{}{ - "outer": map[string]interface{}{ - "inner1": map[string]interface{}{"a": "1", "b": 2, "c": []interface{}{1, 2, 3}}, + expect: map[string]any{ + "outer": map[string]any{ + "inner1": map[string]any{"a": "1", "b": 2, "c": []any{1, 2, 3}}, "inner2": "value2", - "inner3": []interface{}{"new value 1", "new value 2"}, - "inner4": map[string]interface{}{"aa": "1", "bb": 2, "cc": []interface{}{1, 2, 3}}, - "inner5": []interface{}{map[string]interface{}{"A": "1", "B": 2, "C": []interface{}{1, 2, 3}}}, + "inner3": []any{"new value 1", "new value 2"}, + "inner4": map[string]any{"aa": "1", "bb": 2, "cc": []any{1, 2, 3}}, + "inner5": []any{map[string]any{"A": "1", "B": 2, "C": []any{1, 2, 3}}}, }, }, err: false, }, { // null assignment, and no value assigned (equivalent to null) input: "outer.inner1=,outer.inner3={\"aa\":\"1\",\"bb\":2,\"cc\":[1,2,3]},outer.inner3.cc[1]=null", - got: map[string]interface{}{ - "outer": map[string]interface{}{ - "inner1": map[string]interface{}{ + got: map[string]any{ + "outer": map[string]any{ + "inner1": map[string]any{ "x": "overwrite", }, "inner2": "value2", }, }, - expect: map[string]interface{}{ - "outer": map[string]interface{}{ + expect: map[string]any{ + "outer": map[string]any{ "inner1": nil, "inner2": "value2", - "inner3": map[string]interface{}{"aa": "1", "bb": 2, "cc": []interface{}{1, nil, 3}}, + "inner3": map[string]any{"aa": "1", "bb": 2, "cc": []any{1, nil, 3}}, }, }, err: false, @@ -680,10 +680,10 @@ func TestParseJSON(t *testing.T) { func TestParseFile(t *testing.T) { input := "name1=path1" - expect := map[string]interface{}{ + expect := map[string]any{ "name1": "value1", } - rs2v := func(rs []rune) (interface{}, error) { + rs2v := func(rs []rune) (any, error) { v := string(rs) if v != "path1" { t.Errorf("%s: runesToVal: Expected value path1, got %s", input, v) @@ -712,12 +712,12 @@ func TestParseFile(t *testing.T) { } func TestParseIntoFile(t *testing.T) { - got := map[string]interface{}{} + got := map[string]any{} input := "name1=path1" - expect := map[string]interface{}{ + expect := map[string]any{ "name1": "value1", } - rs2v := func(rs []rune) (interface{}, error) { + rs2v := func(rs []rune) (any, error) { v := string(rs) if v != "path1" { t.Errorf("%s: runesToVal: Expected value path1, got %s", input, v) @@ -768,13 +768,13 @@ func TestParseSetNestedLevels(t *testing.T) { } tests := []struct { str string - expect map[string]interface{} + expect map[string]any err bool errStr string }{ { "outer.middle.inner=value", - map[string]interface{}{"outer": map[string]interface{}{"middle": map[string]interface{}{"inner": "value"}}}, + map[string]any{"outer": map[string]any{"middle": map[string]any{"inner": "value"}}}, false, "", }, diff --git a/scripts/release-notes.sh b/scripts/release-notes.sh index 48328cb38..763c07c03 100755 --- a/scripts/release-notes.sh +++ b/scripts/release-notes.sh @@ -20,10 +20,10 @@ PREVIOUS_RELEASE=${PREVIOUS_RELEASE:-$1} ## Ensure Correct Usage if [[ -z "${PREVIOUS_RELEASE}" || -z "${RELEASE}" ]]; then echo Usage: - echo ./scripts/release-notes.sh v3.0.0 v3.1.0 + echo ./scripts/release-notes.sh v4.0.0 v4.1.0 echo or - echo PREVIOUS_RELEASE=v3.0.0 - echo RELEASE=v3.1.0 + echo PREVIOUS_RELEASE=v4.0.0 + echo RELEASE=v4.1.0 echo ./scripts/release-notes.sh exit 1 fi @@ -94,7 +94,7 @@ Download Helm ${RELEASE}. The common platform binaries are here: - [Windows amd64](https://get.helm.sh/helm-${RELEASE}-windows-amd64.zip) ([checksum](https://get.helm.sh/helm-${RELEASE}-windows-amd64.zip.sha256sum) / $(cat _dist/helm-${RELEASE}-windows-amd64.zip.sha256)) - [Windows arm64](https://get.helm.sh/helm-${RELEASE}-windows-arm64.zip) ([checksum](https://get.helm.sh/helm-${RELEASE}-windows-arm64.zip.sha256sum) / $(cat _dist/helm-${RELEASE}-windows-arm64.zip.sha256)) -The [Quickstart Guide](https://helm.sh/docs/intro/quickstart/) will get you going from there. For **upgrade instructions** or detailed installation notes, check the [install guide](https://helm.sh/docs/intro/install/). You can also use a [script to install](https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3) on any system with \`bash\`. +The [Quickstart Guide](https://helm.sh/docs/intro/quickstart/) will get you going from there. For **upgrade instructions** or detailed installation notes, check the [install guide](https://helm.sh/docs/intro/install/). You can also use a [script to install](https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-4) on any system with \`bash\`. ## What's Next diff --git a/scripts/sync-repo.sh b/scripts/sync-repo.sh deleted file mode 100755 index 453102072..000000000 --- a/scripts/sync-repo.sh +++ /dev/null @@ -1,82 +0,0 @@ -#!/usr/bin/env bash - -# Copyright The Helm Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Bash 'Strict Mode' -# http://redsymbol.net/articles/unofficial-bash-strict-mode -set -euo pipefail -IFS=$'\n\t' - -# Helper Functions ------------------------------------------------------------- - -# Display error message and exit -error_exit() { - echo "error: ${1:-"unknown error"}" 1>&2 - exit 1 -} - -# Checks if a command exists. Returns 1 or 0 -command_exists() { - hash "${1}" 2>/dev/null -} - -# Program Functions ------------------------------------------------------------ - -verify_prereqs() { - echo "Verifying Prerequisites...." - if command_exists gsutil; then - echo "Thumbs up! Looks like you have gsutil. Let's continue." - else - error_exit "Couldn't find gsutil. Bailing out." - fi -} - -confirm() { - case $response in - [yY][eE][sS]|[yY]) - true - ;; - *) - false - ;; - esac -} - -# Main ------------------------------------------------------------------------- - -main() { - if [ "$#" -ne 2 ]; then - error_exit "Illegal number of parameters. You must pass in local directory path and a GCS bucket name" - fi - - echo "Getting ready to sync your local directory ($1) to a remote repository at gs://$2" - - verify_prereqs - - # dry run of the command - gsutil rsync -d -n $1 gs://$2 - - read -p "Are you sure you would like to continue with these changes? [y/N]} " confirm - if [[ $confirm =~ [yY](es)* ]]; then - gsutil rsync -d $1 gs://$2 - else - error_exit "Discontinuing sync process." - fi - - echo "Your remote chart repository now matches the contents of the $1 directory!" - -} - -main "${@:-}"