diff --git a/.github/ISSUE_TEMPLATE/bug-report.yaml b/.github/ISSUE_TEMPLATE/bug-report.yaml
new file mode 100644
index 000000000..4309d800b
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug-report.yaml
@@ -0,0 +1,69 @@
+name: Bug Report
+description: Report a bug encountered in Helm
+labels: kind/bug
+body:
+ - type: textarea
+ id: problem
+ attributes:
+ label: What happened?
+ description: |
+ Please provide as much info as possible. Not doing so may result in your bug not being addressed in a timely manner.
+ validations:
+ required: true
+
+ - type: textarea
+ id: expected
+ attributes:
+ label: What did you expect to happen?
+ validations:
+ required: true
+
+ - type: textarea
+ id: repro
+ attributes:
+ label: How can we reproduce it (as minimally and precisely as possible)?
+ description: |
+ Please list steps someone can follow to trigger the issue.
+
+ For example:
+ 1. Run `helm install mychart ./path-to-chart -f values.yaml --debug`
+ 2. Observe the following error: ...
+
+ You can include:
+ - a sample `values.yaml` block
+ - a link to a chart
+ - specific `helm` commands used
+
+ This helps others reproduce and debug your issue more effectively.
+ validations:
+ required: true
+
+ - type: textarea
+ id: helmVersion
+ attributes:
+ label: Helm version
+ value: |
+
+ ```console
+ $ helm version
+ # paste output here
+ ```
+
+ validations:
+ required: true
+
+ - type: textarea
+ id: kubeVersion
+ attributes:
+ label: Kubernetes version
+ value: |
+
+
+ ```console
+ $ kubectl version
+ # paste output here
+ ```
+
+
+ validations:
+ required: true
diff --git a/.github/ISSUE_TEMPLATE/documentation.yaml b/.github/ISSUE_TEMPLATE/documentation.yaml
new file mode 100644
index 000000000..bb1b7537c
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/documentation.yaml
@@ -0,0 +1,27 @@
+name: Documentation
+description: Report any mistakes or missing information from the documentation or the examples
+labels: kind/documentation
+body:
+ - type: markdown
+ attributes:
+ value: |
+ ⚠️ **Note**: Most documentation lives in [helm/helm-www](https://github.com/helm/helm-www).
+ If your issue is about Helm website documentation or examples, please [open an issue there](https://github.com/helm/helm-www/issues/new/choose).
+
+ - type: textarea
+ id: feature
+ attributes:
+ label: What would you like to be added?
+ description: |
+ Link to the issue (please include a link to the specific documentation or example).
+ Link to the issue raised in [Helm Documentation Improvement Proposal](https://github.com/helm/helm-www)
+ validations:
+ required: true
+
+ - type: textarea
+ id: rationale
+ attributes:
+ label: Why is this needed?
+ validations:
+ required: true
+
diff --git a/.github/ISSUE_TEMPLATE/feature.yaml b/.github/ISSUE_TEMPLATE/feature.yaml
new file mode 100644
index 000000000..45b9c3f94
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature.yaml
@@ -0,0 +1,21 @@
+name: Enhancement/feature
+description: Provide supporting details for a feature in development
+labels: kind/feature
+body:
+ - type: textarea
+ id: feature
+ attributes:
+ label: What would you like to be added?
+ description: |
+ Feature requests are unlikely to make progress as issues.
+ Initial discussion and ideas can happen on an issue.
+ But significant changes or features must be proposed as a [Helm Improvement Proposal](https://github.com/helm/community/blob/main/hips/hip-0001.md) (HIP)
+ validations:
+ required: true
+
+ - type: textarea
+ id: rationale
+ attributes:
+ label: Why is this needed?
+ validations:
+ required: true
diff --git a/.github/env b/.github/env
new file mode 100644
index 000000000..4384ba074
--- /dev/null
+++ b/.github/env
@@ -0,0 +1,2 @@
+GOLANG_VERSION=1.24
+GOLANGCI_LINT_VERSION=v2.1.0
diff --git a/.github/issue_template.md b/.github/issue_template.md
deleted file mode 100644
index 48f48e5b6..000000000
--- a/.github/issue_template.md
+++ /dev/null
@@ -1,9 +0,0 @@
-
-
-Output of `helm version`:
-
-Output of `kubectl version`:
-
-Cloud Provider/Platform (AKS, GKE, Minikube etc.):
-
-
diff --git a/.github/workflows/build-test.yml b/.github/workflows/build-test.yml
index 2ccea3d0e..dbd885350 100644
--- a/.github/workflows/build-test.yml
+++ b/.github/workflows/build-test.yml
@@ -18,14 +18,18 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout source code
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # pin@v4.2.2
+ uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # pin@v5.0.0
+ - name: Add variables to environment file
+ run: cat ".github/env" >> "$GITHUB_ENV"
- name: Setup Go
- uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # pin@5.3.0
+ uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # pin@5.5.0
with:
- go-version: '1.23'
+ go-version: '${{ env.GOLANG_VERSION }}'
check-latest: true
- name: Test source headers are present
run: make test-source-headers
+ - name: Check if go modules need to be tidied
+ run: go mod tidy -diff
- name: Run unit tests
run: make test-coverage
- name: Test build
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
index 9a6aeb582..c1a2bff20 100644
--- a/.github/workflows/codeql-analysis.yml
+++ b/.github/workflows/codeql-analysis.yml
@@ -43,7 +43,7 @@ jobs:
steps:
- name: Checkout repository
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # pin@v4.2.2
+ uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # pin@v5.0.0
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml
index 5971ada24..0d5b4e969 100644
--- a/.github/workflows/golangci-lint.yml
+++ b/.github/workflows/golangci-lint.yml
@@ -13,14 +13,15 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # pin@v4.2.2
-
+ uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # pin@v5.0.0
+ - name: Add variables to environment file
+ run: cat ".github/env" >> "$GITHUB_ENV"
- name: Setup Go
- uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # pin@5.3.0
+ uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # pin@5.5.0
with:
- go-version: '1.23'
+ go-version: '${{ env.GOLANG_VERSION }}'
check-latest: true
- name: golangci-lint
- uses: golangci/golangci-lint-action@2226d7cb06a077cd73e56eedd38eecad18e5d837 #pin@6.5.0
+ uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 #pin@8.0.0
with:
- version: v1.62
+ version: ${{ env.GOLANGCI_LINT_VERSION }}
diff --git a/.github/workflows/govulncheck.yml b/.github/workflows/govulncheck.yml
index f8572f2d6..84d260a8f 100644
--- a/.github/workflows/govulncheck.yml
+++ b/.github/workflows/govulncheck.yml
@@ -13,10 +13,14 @@ jobs:
name: govulncheck
runs-on: ubuntu-latest
steps:
+ - name: Checkout
+ uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # pin@v5.0.0
+ - name: Add variables to environment file
+ run: cat ".github/env" >> "$GITHUB_ENV"
- name: Setup Go
- uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # pin@5.3.0
+ uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # pin@5.5.0
with:
- go-version: '1.23'
+ go-version: '${{ env.GOLANG_VERSION }}'
check-latest: true
- name: govulncheck
uses: golang/govulncheck-action@b625fbe08f3bccbe446d94fbf87fcc875a4f50ee # pin@1.0.4
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index c5e7c6840..21c527442 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -20,18 +20,19 @@ jobs:
runs-on: ubuntu-latest-16-cores
steps:
- name: Checkout source code
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # pin@v4.2.2
+ uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # pin@v5.0.0
with:
fetch-depth: 0
+ - name: Add variables to environment file
+ run: cat ".github/env" >> "$GITHUB_ENV"
+
- name: Setup Go
- uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # pin@5.3.0
+ uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # pin@5.5.0
with:
- go-version: '1.23'
-
+ go-version: '${{ env.GOLANG_VERSION }}'
- name: Run unit tests
run: make test-coverage
-
- name: Build Helm Binaries
run: |
set -eu -o pipefail
@@ -78,12 +79,15 @@ jobs:
if: github.ref == 'refs/heads/main'
steps:
- name: Checkout source code
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # pin@v4.2.2
+ uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # pin@v5.0.0
+
+ - name: Add variables to environment file
+ run: cat ".github/env" >> "$GITHUB_ENV"
- name: Setup Go
- uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # pin@5.3.0
+ uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # pin@5.5.0
with:
- go-version: '1.23'
+ go-version: '${{ env.GOLANG_VERSION }}'
check-latest: true
- name: Run unit tests
diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml
index f89fcd98c..6a44c8afb 100644
--- a/.github/workflows/scorecards.yml
+++ b/.github/workflows/scorecards.yml
@@ -28,12 +28,12 @@ jobs:
steps:
- name: "Checkout code"
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
persist-credentials: false
- name: "Run analysis"
- uses: ossf/scorecard-action@f49aabe0b5af0936a0987cfb85d86b75731b0186 # v2.4.1
+ uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2
with:
results_file: results.sarif
results_format: sarif
@@ -55,7 +55,7 @@ jobs:
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
# format to the repository Actions tab.
- name: "Upload artifact"
- uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1
+ uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: SARIF file
path: results.sarif
diff --git a/.github/workflows/stale-issue-bot.yaml b/.github/workflows/stale.yaml
similarity index 60%
rename from .github/workflows/stale-issue-bot.yaml
rename to .github/workflows/stale.yaml
index 613d2900c..965410793 100644
--- a/.github/workflows/stale-issue-bot.yaml
+++ b/.github/workflows/stale.yaml
@@ -2,18 +2,17 @@ name: "Close stale issues"
on:
schedule:
- cron: "0 0 * * *"
-permissions:
- contents: read
jobs:
stale:
runs-on: ubuntu-latest
steps:
- - uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0
+ - uses: actions/stale@3a9db7e6a41a89f618792c92c0e97cc736e1b13f # v10.0.0
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-issue-message: 'This issue has been marked as stale because it has been open for 90 days with no activity. This thread will be automatically closed in 30 days if no further activity occurs.'
+ stale-pr-message: 'This pull request has been marked as stale because it has been open for 90 days with no activity. This pull request will be automatically closed in 30 days if no further activity occurs.'
exempt-issue-labels: 'keep open,v4.x,in progress'
days-before-stale: 90
days-before-close: 30
- operations-per-run: 100
+ operations-per-run: 200
diff --git a/.gitignore b/.gitignore
index 75698e993..0fd2c6bda 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,7 +2,7 @@
*.swp
.DS_Store
.coverage/
-.idea/
+.idea
.vimrc
.vscode/
.devcontainer/
@@ -12,4 +12,5 @@ bin/
vendor/
# Ignores charts pulled for dependency build tests
cmd/helm/testdata/testcharts/issue-7233/charts/*
+pkg/cmd/testdata/testcharts/issue-7233/charts/*
.pre-commit-config.yaml
diff --git a/.golangci.yml b/.golangci.yml
index ff0dad5f6..3df31b997 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -1,45 +1,75 @@
-run:
- timeout: 10m
+formatters:
+ enable:
+ - gofmt
+ - goimports
+
+ exclusions:
+ generated: lax
+
+ settings:
+ gofmt:
+ simplify: true
+
+ goimports:
+ local-prefixes:
+ - helm.sh/helm/v4
linters:
- disable-all: true
+ default: none
+
enable:
+ - depguard
- dupl
- - gofmt
- - goimports
- - gosimple
+ - gomodguard
- govet
- ineffassign
- misspell
- nakedret
- revive
- - unused
- staticcheck
+ - thelper
+ - unused
+ - usestdlibvars
+ - usetesting
+
+ exclusions:
+
+ generated: lax
+
+ presets:
+ - comments
+ - common-false-positives
+ - legacy
+ - std-error-handling
+
+ rules:
+ - linters:
+ - revive
+ text: 'var-naming: avoid meaningless package names'
+
+ warn-unused: true
+
+ settings:
+ depguard:
+ rules:
+ Main:
+ deny:
+ - pkg: github.com/hashicorp/go-multierror
+ desc: "use errors instead"
+ - pkg: github.com/pkg/errors
+ desc: "use errors instead"
+
+ dupl:
+ threshold: 400
+
+ gomodguard:
+ blocked:
+ modules:
+ - github.com/evanphx/json-patch:
+ recommendations:
+ - github.com/evanphx/json-patch/v5
+
+run:
+ timeout: 10m
-linters-settings:
- gofmt:
- simplify: true
- goimports:
- local-prefixes: helm.sh/helm/v4
- dupl:
- threshold: 400
-issues:
- exclude-rules:
- # Helm, and the Go source code itself, sometimes uses these names outside their built-in
- # functions. As the Go source code has re-used these names it's ok for Helm to do the same.
- # Linting will look for redefinition of built-in id's but we opt-in to the ones we choose to use.
- - linters:
- - revive
- text: "redefines-builtin-id: redefinition of the built-in function append"
- - linters:
- - revive
- text: "redefines-builtin-id: redefinition of the built-in function clear"
- - linters:
- - revive
- text: "redefines-builtin-id: redefinition of the built-in function max"
- - linters:
- - revive
- text: "redefines-builtin-id: redefinition of the built-in function min"
- - linters:
- - revive
- text: "redefines-builtin-id: redefinition of the built-in function new"
+version: "2"
diff --git a/Makefile b/Makefile
index 21144cf5a..e3e6cb538 100644
--- a/Makefile
+++ b/Makefile
@@ -13,15 +13,15 @@ GOX = $(GOBIN)/gox
GOIMPORTS = $(GOBIN)/goimports
ARCH = $(shell go env GOARCH)
-ACCEPTANCE_DIR:=../acceptance-testing
+ACCEPTANCE_DIR := ../acceptance-testing
# To specify the subset of acceptance tests to run. '.' means all tests
-ACCEPTANCE_RUN_TESTS=.
+ACCEPTANCE_RUN_TESTS = .
# go option
PKG := ./...
TAGS :=
TESTS := .
-TESTFLAGS :=
+TESTFLAGS := -shuffle=on -count=1
LDFLAGS := -w -s
GOFLAGS :=
CGO_ENABLED ?= 0
@@ -63,10 +63,12 @@ K8S_MODULES_VER=$(subst ., ,$(subst v,,$(shell go list -f '{{.Version}}' -m k8s.
K8S_MODULES_MAJOR_VER=$(shell echo $$(($(firstword $(K8S_MODULES_VER)) + 1)))
K8S_MODULES_MINOR_VER=$(word 2,$(K8S_MODULES_VER))
-LDFLAGS += -X helm.sh/helm/v4/pkg/lint/rules.k8sVersionMajor=$(K8S_MODULES_MAJOR_VER)
-LDFLAGS += -X helm.sh/helm/v4/pkg/lint/rules.k8sVersionMinor=$(K8S_MODULES_MINOR_VER)
-LDFLAGS += -X helm.sh/helm/v4/pkg/chart/v2/util.k8sVersionMajor=$(K8S_MODULES_MAJOR_VER)
-LDFLAGS += -X helm.sh/helm/v4/pkg/chart/v2/util.k8sVersionMinor=$(K8S_MODULES_MINOR_VER)
+LDFLAGS += -X helm.sh/helm/v4/pkg/chart/v2/lint/rules.k8sVersionMajor=$(K8S_MODULES_MAJOR_VER)
+LDFLAGS += -X helm.sh/helm/v4/pkg/chart/v2/lint/rules.k8sVersionMinor=$(K8S_MODULES_MINOR_VER)
+LDFLAGS += -X helm.sh/helm/v4/pkg/internal/v3/lint/rules.k8sVersionMajor=$(K8S_MODULES_MAJOR_VER)
+LDFLAGS += -X helm.sh/helm/v4/pkg/internal/v3/lint/rules.k8sVersionMinor=$(K8S_MODULES_MINOR_VER)
+LDFLAGS += -X helm.sh/helm/v4/pkg/chart/common/util.k8sVersionMajor=$(K8S_MODULES_MAJOR_VER)
+LDFLAGS += -X helm.sh/helm/v4/pkg/chart/common/util.k8sVersionMinor=$(K8S_MODULES_MINOR_VER)
.PHONY: all
all: build
@@ -75,7 +77,7 @@ all: build
# build
.PHONY: build
-build: $(BINDIR)/$(BINNAME)
+build: $(BINDIR)/$(BINNAME) tidy
$(BINDIR)/$(BINNAME): $(SRC)
CGO_ENABLED=$(CGO_ENABLED) go build $(GOFLAGS) -trimpath -tags '$(TAGS)' -ldflags '$(LDFLAGS)' -o '$(BINDIR)'/$(BINNAME) ./cmd/helm
@@ -112,14 +114,16 @@ test-unit:
# based on older versions, this is run separately. When run without the ldflags in the unit test (above) or coverage
# test, it still passes with a false-positive result as the resources shouldn’t be deprecated in the older Kubernetes
# version if it only starts failing with the latest.
- go test $(GOFLAGS) -run ^TestHelmCreateChart_CheckDeprecatedWarnings$$ ./pkg/lint/ $(TESTFLAGS) -ldflags '$(LDFLAGS)'
+ go test $(GOFLAGS) -run ^TestHelmCreateChart_CheckDeprecatedWarnings$$ ./pkg/chart/v2/lint/ $(TESTFLAGS) -ldflags '$(LDFLAGS)'
+ go test $(GOFLAGS) -run ^TestHelmCreateChart_CheckDeprecatedWarnings$$ ./internal/chart/v3/lint/ $(TESTFLAGS) -ldflags '$(LDFLAGS)'
+# To run the coverage for a specific package use: make test-coverage PKG=./pkg/action
.PHONY: test-coverage
test-coverage:
@echo
- @echo "==> Running unit tests with coverage <=="
- @ ./scripts/coverage.sh
+ @echo "==> Running unit tests with coverage: $(PKG) <=="
+ @ ./scripts/coverage.sh $(PKG)
.PHONY: test-style
test-style:
@@ -145,10 +149,6 @@ test-acceptance: build build-cross
test-acceptance-completion: ACCEPTANCE_RUN_TESTS = shells.robot
test-acceptance-completion: test-acceptance
-.PHONY: coverage
-coverage:
- @scripts/coverage.sh
-
.PHONY: format
format: $(GOIMPORTS)
go list -f '{{.Dir}}' ./... | xargs $(GOIMPORTS) -w -local helm.sh/helm
@@ -156,7 +156,7 @@ format: $(GOIMPORTS)
# Generate golden files used in unit tests
.PHONY: gen-test-golden
gen-test-golden:
-gen-test-golden: PKG = ./cmd/helm ./pkg/action
+gen-test-golden: PKG = ./pkg/cmd ./pkg/action
gen-test-golden: TESTFLAGS = -update
gen-test-golden: test-unit
@@ -227,22 +227,23 @@ clean:
.PHONY: release-notes
release-notes:
- @if [ ! -d "./_dist" ]; then \
- echo "please run 'make fetch-dist' first" && \
- exit 1; \
- fi
- @if [ -z "${PREVIOUS_RELEASE}" ]; then \
- echo "please set PREVIOUS_RELEASE environment variable" \
- && exit 1; \
- fi
-
- @./scripts/release-notes.sh ${PREVIOUS_RELEASE} ${VERSION}
-
-
+ @if [ ! -d "./_dist" ]; then \
+ echo "please run 'make fetch-dist' first" && \
+ exit 1; \
+ fi
+ @if [ -z "${PREVIOUS_RELEASE}" ]; then \
+ echo "please set PREVIOUS_RELEASE environment variable" && \
+ exit 1; \
+ fi
+ @./scripts/release-notes.sh ${PREVIOUS_RELEASE} ${VERSION}
.PHONY: info
info:
- @echo "Version: ${VERSION}"
- @echo "Git Tag: ${GIT_TAG}"
- @echo "Git Commit: ${GIT_COMMIT}"
- @echo "Git Tree State: ${GIT_DIRTY}"
+ @echo "Version: ${VERSION}"
+ @echo "Git Tag: ${GIT_TAG}"
+ @echo "Git Commit: ${GIT_COMMIT}"
+ @echo "Git Tree State: ${GIT_DIRTY}"
+
+.PHONY: tidy
+tidy:
+ go mod tidy
diff --git a/OWNERS b/OWNERS
index de3e4e6a6..761cf76a3 100644
--- a/OWNERS
+++ b/OWNERS
@@ -9,6 +9,7 @@ maintainers:
- technosophos
triage:
- banjoh
+ - TerryHowe
- yxxhero
- zonggen
- z4ce
diff --git a/README.md b/README.md
index 5f4d71d4c..66fdab041 100644
--- a/README.md
+++ b/README.md
@@ -5,6 +5,7 @@
[](https://pkg.go.dev/helm.sh/helm/v4)
[](https://bestpractices.coreinfrastructure.org/projects/3131)
[](https://scorecard.dev/viewer/?uri=github.com/helm/helm)
+[](https://insights.linuxfoundation.org/project/helm)
Helm is a tool for managing Charts. Charts are packages of pre-configured Kubernetes resources.
@@ -56,7 +57,7 @@ including installing pre-releases.
## Docs
-Get started with the [Quick Start guide](https://helm.sh/docs/intro/quickstart/) or plunge into the [complete documentation](https://helm.sh/docs)
+Get started with the [Quick Start guide](https://helm.sh/docs/intro/quickstart/) or plunge into the [complete documentation](https://helm.sh/docs).
## Roadmap
diff --git a/cmd/helm/helm.go b/cmd/helm/helm.go
index da6a5c54e..66d342500 100644
--- a/cmd/helm/helm.go
+++ b/cmd/helm/helm.go
@@ -17,7 +17,7 @@ limitations under the License.
package main // import "helm.sh/helm/v4/cmd/helm"
import (
- "log"
+ "log/slog"
"os"
// Import to initialize client auth plugins.
@@ -27,10 +27,6 @@ import (
"helm.sh/helm/v4/pkg/kube"
)
-func init() {
- log.SetFlags(log.Lshortfile)
-}
-
func main() {
// Setting the name of the app for managedFields in the Kubernetes client.
// It is set here to the full name of "helm" so that renaming of helm to
@@ -38,19 +34,16 @@ func main() {
// manager as picked up by the automated name detection.
kube.ManagedFieldsManager = "helm"
- cmd, err := helmcmd.NewRootCmd(os.Stdout, os.Args[1:])
+ cmd, err := helmcmd.NewRootCmd(os.Stdout, os.Args[1:], helmcmd.SetupLogging)
if err != nil {
- helmcmd.Warning("%+v", err)
+ slog.Warn("command failed", slog.Any("error", err))
os.Exit(1)
}
if err := cmd.Execute(); err != nil {
- helmcmd.Debug("%+v", err)
- switch e := err.(type) {
- case helmcmd.PluginError:
- os.Exit(e.Code)
- default:
- os.Exit(1)
+ if cerr, ok := err.(helmcmd.CommandError); ok {
+ os.Exit(cerr.ExitCode)
}
+ os.Exit(1)
}
}
diff --git a/cmd/helm/helm_test.go b/cmd/helm/helm_test.go
index 5431daad0..0458e8037 100644
--- a/cmd/helm/helm_test.go
+++ b/cmd/helm/helm_test.go
@@ -22,11 +22,13 @@ import (
"os/exec"
"runtime"
"testing"
+
+ "github.com/stretchr/testify/assert"
)
-func TestPluginExitCode(t *testing.T) {
+func TestCliPluginExitCode(t *testing.T) {
if os.Getenv("RUN_MAIN_FOR_TESTING") == "1" {
- os.Args = []string{"helm", "exitwith", "2"}
+ os.Args = []string{"helm", "exitwith", "43"}
// We DO call helm's main() here. So this looks like a normal `helm` process.
main()
@@ -43,7 +45,7 @@ func TestPluginExitCode(t *testing.T) {
// So that the second run is able to run main() and this first run can verify the exit status returned by that.
//
// This technique originates from https://talks.golang.org/2014/testing.slide#23.
- cmd := exec.Command(os.Args[0], "-test.run=TestPluginExitCode")
+ cmd := exec.Command(os.Args[0], "-test.run=TestCliPluginExitCode")
cmd.Env = append(
os.Environ(),
"RUN_MAIN_FOR_TESTING=1",
@@ -57,23 +59,21 @@ func TestPluginExitCode(t *testing.T) {
cmd.Stdout = stdout
cmd.Stderr = stderr
err := cmd.Run()
- exiterr, ok := err.(*exec.ExitError)
+ exiterr, ok := err.(*exec.ExitError)
if !ok {
- t.Fatalf("Unexpected error returned by os.Exit: %T", err)
+ t.Fatalf("Unexpected error type returned by os.Exit: %T", err)
}
- if stdout.String() != "" {
- t.Errorf("Expected no write to stdout: Got %q", stdout.String())
- }
+ assert.Empty(t, stdout.String())
expectedStderr := "Error: plugin \"exitwith\" exited with error\n"
if stderr.String() != expectedStderr {
t.Errorf("Expected %q written to stderr: Got %q", expectedStderr, stderr.String())
}
- if exiterr.ExitCode() != 2 {
- t.Errorf("Expected exit code 2: Got %d", exiterr.ExitCode())
+ if exiterr.ExitCode() != 43 {
+ t.Errorf("Expected exit code 43: Got %d", exiterr.ExitCode())
}
}
}
diff --git a/go.mod b/go.mod
index cefaac3c7..77e761de2 100644
--- a/go.mod
+++ b/go.mod
@@ -1,52 +1,56 @@
module helm.sh/helm/v4
-go 1.23.0
+go 1.24.0
require (
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24
- github.com/BurntSushi/toml v1.4.0
+ github.com/BurntSushi/toml v1.5.0
github.com/DATA-DOG/go-sqlmock v1.5.2
- github.com/Masterminds/semver/v3 v3.3.0
+ github.com/Masterminds/semver/v3 v3.4.0
github.com/Masterminds/sprig/v3 v3.3.0
github.com/Masterminds/squirrel v1.5.4
github.com/Masterminds/vcs v1.13.3
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2
- github.com/containerd/containerd v1.7.26
github.com/cyphar/filepath-securejoin v0.4.1
- github.com/distribution/distribution/v3 v3.0.0-rc.3
- github.com/evanphx/json-patch v5.9.11+incompatible
+ github.com/distribution/distribution/v3 v3.0.0
+ github.com/evanphx/json-patch/v5 v5.9.11
+ github.com/extism/go-sdk v1.7.1
+ github.com/fatih/color v1.18.0
+ github.com/fluxcd/cli-utils v0.36.0-flux.14
github.com/foxcpp/go-mockdns v1.1.0
github.com/gobwas/glob v0.2.3
github.com/gofrs/flock v0.12.1
github.com/gosuri/uitable v0.0.4
- github.com/hashicorp/go-multierror v1.1.1
github.com/jmoiron/sqlx v1.4.0
github.com/lib/pq v1.10.9
github.com/mattn/go-shellwords v1.0.12
github.com/mitchellh/copystructure v1.2.0
github.com/moby/term v0.5.2
+ github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.1.1
- github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5
- github.com/pkg/errors v0.9.1
- github.com/rubenv/sql-migrate v1.7.1
- github.com/spf13/cobra v1.9.1
- github.com/spf13/pflag v1.0.6
- github.com/stretchr/testify v1.10.0
- github.com/xeipuuv/gojsonschema v1.2.0
- golang.org/x/crypto v0.36.0
- golang.org/x/term v0.30.0
- golang.org/x/text v0.23.0
- gopkg.in/yaml.v3 v3.0.1
- k8s.io/api v0.32.2
- k8s.io/apiextensions-apiserver v0.32.2
- k8s.io/apimachinery v0.32.2
- k8s.io/apiserver v0.32.2
- k8s.io/cli-runtime v0.32.2
- k8s.io/client-go v0.32.2
+ github.com/rubenv/sql-migrate v1.8.0
+ github.com/santhosh-tekuri/jsonschema/v6 v6.0.2
+ github.com/spf13/cobra v1.10.1
+ github.com/spf13/pflag v1.0.10
+ github.com/stretchr/testify v1.11.1
+ github.com/tetratelabs/wazero v1.9.0
+ go.yaml.in/yaml/v3 v3.0.4
+ golang.org/x/crypto v0.42.0
+ golang.org/x/term v0.35.0
+ golang.org/x/text v0.29.0
+ gopkg.in/yaml.v3 v3.0.1 // indirect
+ k8s.io/api v0.34.1
+ k8s.io/apiextensions-apiserver v0.34.1
+ k8s.io/apimachinery v0.34.1
+ k8s.io/apiserver v0.34.1
+ k8s.io/cli-runtime v0.34.1
+ k8s.io/client-go v0.34.1
k8s.io/klog/v2 v2.130.1
- k8s.io/kubectl v0.32.2
- oras.land/oras-go/v2 v2.5.0
- sigs.k8s.io/yaml v1.4.0
+ k8s.io/kubectl v0.34.1
+ oras.land/oras-go/v2 v2.6.0
+ sigs.k8s.io/controller-runtime v0.22.1
+ sigs.k8s.io/kustomize/kyaml v0.20.1
+ sigs.k8s.io/yaml v1.6.0
)
require (
@@ -60,9 +64,6 @@ require (
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/chai2010/gettext-go v1.0.2 // indirect
- github.com/containerd/errdefs v0.3.0 // indirect
- github.com/containerd/log v0.1.0 // indirect
- github.com/containerd/platforms v0.2.1 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
@@ -71,114 +72,113 @@ require (
github.com/docker/docker-credential-helpers v0.8.2 // indirect
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
github.com/docker/go-metrics v0.0.1 // indirect
- github.com/emicklei/go-restful/v3 v3.11.0 // indirect
+ github.com/dylibso/observe-sdk/go v0.0.0-20240819160327-2d926c5d788a // indirect
+ github.com/emicklei/go-restful/v3 v3.12.2 // indirect
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
- github.com/fatih/color v1.13.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
- github.com/fxamacker/cbor/v2 v2.7.0 // indirect
- github.com/go-errors/errors v1.4.2 // indirect
+ github.com/fxamacker/cbor/v2 v2.9.0 // indirect
+ github.com/go-errors/errors v1.5.1 // indirect
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
- github.com/go-logr/logr v1.4.2 // indirect
+ github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
- github.com/go-openapi/jsonpointer v0.21.0 // indirect
- github.com/go-openapi/jsonreference v0.20.2 // indirect
- github.com/go-openapi/swag v0.23.0 // indirect
+ github.com/go-openapi/jsonpointer v0.21.1 // indirect
+ github.com/go-openapi/jsonreference v0.21.0 // indirect
+ github.com/go-openapi/swag v0.23.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
- github.com/golang/protobuf v1.5.4 // indirect
- github.com/google/btree v1.0.1 // indirect
- github.com/google/gnostic-models v0.6.8 // indirect
- github.com/google/go-cmp v0.6.0 // indirect
- github.com/google/gofuzz v1.2.0 // indirect
- github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
+ github.com/google/btree v1.1.3 // indirect
+ github.com/google/gnostic-models v0.7.0 // indirect
+ github.com/google/go-cmp v0.7.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/handlers v1.5.2 // indirect
github.com/gorilla/mux v1.8.1 // indirect
- github.com/gorilla/websocket v1.5.0 // indirect
+ github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
- github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect
- github.com/hashicorp/errwrap v1.1.0 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
github.com/hashicorp/golang-lru/arc/v2 v2.0.5 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.5 // indirect
github.com/huandu/xstrings v1.5.0 // indirect
+ github.com/ianlancetaylor/demangle v0.0.0-20240805132620-81f5be970eca // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
- github.com/klauspost/compress v1.17.11 // indirect
+ github.com/klauspost/compress v1.18.0 // indirect
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
- github.com/mailru/easyjson v0.7.7 // indirect
+ github.com/mailru/easyjson v0.9.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
- github.com/mattn/go-isatty v0.0.17 // indirect
+ github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.9 // indirect
github.com/miekg/dns v1.1.57 // indirect
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/moby/spdystream v0.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
- github.com/modern-go/reflect2 v1.0.2 // indirect
+ github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
- github.com/opencontainers/go-digest v1.0.0 // indirect
+ github.com/onsi/gomega v1.37.0 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
+ github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
- github.com/prometheus/client_golang v1.20.5 // indirect
- github.com/prometheus/client_model v0.6.1 // indirect
- github.com/prometheus/common v0.60.1 // indirect
- github.com/prometheus/procfs v0.15.1 // indirect
+ github.com/prometheus/client_golang v1.22.0 // indirect
+ github.com/prometheus/client_model v0.6.2 // indirect
+ github.com/prometheus/common v0.65.0 // indirect
+ github.com/prometheus/procfs v0.17.0 // indirect
github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 // indirect
github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 // indirect
- github.com/redis/go-redis/v9 v9.1.0 // indirect
+ github.com/redis/go-redis/v9 v9.7.3 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/shopspring/decimal v1.4.0 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/spf13/cast v1.7.0 // indirect
+ github.com/tetratelabs/wabin v0.0.0-20230304001439-f6f874872834 // indirect
github.com/x448/float16 v0.8.4 // indirect
- github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
- github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xlab/treeprint v1.2.0 // indirect
+ go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 // indirect
go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 // indirect
- go.opentelemetry.io/otel v1.32.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
+ go.opentelemetry.io/otel v1.37.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 // indirect
go.opentelemetry.io/otel/exporters/prometheus v0.54.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 // indirect
go.opentelemetry.io/otel/log v0.8.0 // indirect
- go.opentelemetry.io/otel/metric v1.32.0 // indirect
- go.opentelemetry.io/otel/sdk v1.32.0 // indirect
+ go.opentelemetry.io/otel/metric v1.37.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.34.0 // indirect
go.opentelemetry.io/otel/sdk/log v0.8.0 // indirect
- go.opentelemetry.io/otel/sdk/metric v1.32.0 // indirect
- go.opentelemetry.io/otel/trace v1.32.0 // indirect
- go.opentelemetry.io/proto/otlp v1.3.1 // indirect
- golang.org/x/mod v0.21.0 // indirect
- golang.org/x/net v0.33.0 // indirect
- golang.org/x/oauth2 v0.23.0 // indirect
- golang.org/x/sync v0.12.0 // indirect
- golang.org/x/sys v0.31.0 // indirect
- golang.org/x/time v0.7.0 // indirect
- golang.org/x/tools v0.26.0 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect
- google.golang.org/grpc v1.68.0 // indirect
- google.golang.org/protobuf v1.35.2 // indirect
+ go.opentelemetry.io/otel/sdk/metric v1.34.0 // indirect
+ go.opentelemetry.io/otel/trace v1.37.0 // indirect
+ go.opentelemetry.io/proto/otlp v1.5.0 // indirect
+ go.yaml.in/yaml/v2 v2.4.2 // indirect
+ golang.org/x/mod v0.27.0 // indirect
+ golang.org/x/net v0.43.0 // indirect
+ golang.org/x/oauth2 v0.30.0 // indirect
+ golang.org/x/sync v0.17.0 // indirect
+ golang.org/x/sys v0.36.0 // indirect
+ golang.org/x/time v0.12.0 // indirect
+ golang.org/x/tools v0.36.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb // indirect
+ google.golang.org/grpc v1.72.1 // indirect
+ google.golang.org/protobuf v1.36.6 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
- k8s.io/component-base v0.32.2 // indirect
- k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
- k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
- sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
- sigs.k8s.io/kustomize/api v0.18.0 // indirect
- sigs.k8s.io/kustomize/kyaml v0.18.1 // indirect
- sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
+ k8s.io/component-base v0.34.1 // indirect
+ k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect
+ k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect
+ sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
+ sigs.k8s.io/kustomize/api v0.20.1 // indirect
+ sigs.k8s.io/randfill v1.0.0 // indirect
+ sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
)
diff --git a/go.sum b/go.sum
index d947675fd..9fa40e4d4 100644
--- a/go.sum
+++ b/go.sum
@@ -6,16 +6,16 @@ github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
-github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0=
-github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
+github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
+github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=
github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE=
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
-github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0=
-github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
+github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0=
+github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs=
github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0=
github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM=
@@ -37,10 +37,11 @@ github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2y
github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70=
github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
github.com/bsm/ginkgo/v2 v2.7.0/go.mod h1:AiKlXPm7ItEHNc/2+OkrNG4E0ITzojb9/xWzvQ9XZ9w=
-github.com/bsm/ginkgo/v2 v2.9.5 h1:rtVBYPs3+TC5iLUVOis1B9tjLTup7Cj5IfzosKtvTJ0=
-github.com/bsm/ginkgo/v2 v2.9.5/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
-github.com/bsm/gomega v1.26.0 h1:LhQm+AFcgV2M0WyKroMASzAzCAJVpAxQXv4SaI9a69Y=
+github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
+github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
github.com/bsm/gomega v1.26.0/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
+github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
+github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@@ -48,19 +49,10 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk=
github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA=
-github.com/containerd/containerd v1.7.26 h1:3cs8K2RHlMQaPifLqgRyI4VBkoldNdEw62cb7qQga7k=
-github.com/containerd/containerd v1.7.26/go.mod h1:m4JU0E+h0ebbo9yXD7Hyt+sWnc8tChm7MudCjj4jRvQ=
-github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4=
-github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
-github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
-github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
-github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
-github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
-github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s=
@@ -71,52 +63,60 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
-github.com/distribution/distribution/v3 v3.0.0-rc.3 h1:JRJso9IVLoooKX76oWR+DWCCdZlK5m4nRtDWvzB1ITg=
-github.com/distribution/distribution/v3 v3.0.0-rc.3/go.mod h1:offoOgrnYs+CFwis8nE0hyzYZqRCZj5EFc5kgfszwiE=
+github.com/distribution/distribution/v3 v3.0.0 h1:q4R8wemdRQDClzoNNStftB2ZAfqOiN6UX90KJc4HjyM=
+github.com/distribution/distribution/v3 v3.0.0/go.mod h1:tRNuFoZsUdyRVegq8xGNeds4KLjwLCRin/tTo6i1DhU=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
+github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI=
+github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo=
github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8=
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=
github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
-github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
-github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
-github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8=
-github.com/evanphx/json-patch v5.9.11+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/dylibso/observe-sdk/go v0.0.0-20240819160327-2d926c5d788a h1:UwSIFv5g5lIvbGgtf3tVwC7Ky9rmMFBp0RMs+6f6YqE=
+github.com/dylibso/observe-sdk/go v0.0.0-20240819160327-2d926c5d788a/go.mod h1:C8DzXehI4zAbrdlbtOByKX6pfivJTBiV9Jjqv56Yd9Q=
+github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
+github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU=
+github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4=
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc=
-github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
-github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
+github.com/extism/go-sdk v1.7.1 h1:lWJos6uY+tRFdlIHR+SJjwFDApY7OypS/2nMhiVQ9Sw=
+github.com/extism/go-sdk v1.7.1/go.mod h1:IT+Xdg5AZM9hVtpFUA+uZCJMge/hbvshl8bwzLtFyKA=
+github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
+github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/fluxcd/cli-utils v0.36.0-flux.14 h1:I//AMVUXTc+M04UtIXArMXQZCazGMwfemodV1j/yG8c=
+github.com/fluxcd/cli-utils v0.36.0-flux.14/go.mod h1:uDo7BYOfbdmk/asnHuI0IQPl6u0FCgcN54AHDu3Y5As=
github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI=
github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
-github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
-github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
-github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
-github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
+github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
+github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
+github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk=
+github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs=
github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
-github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
+github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
-github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
-github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
-github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
-github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
-github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
-github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
-github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
+github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
+github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
+github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic=
+github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk=
+github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
+github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
+github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU=
+github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0=
github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
@@ -135,46 +135,38 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
-github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
-github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
-github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
-github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
+github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
+github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
+github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
+github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
-github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
+github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
-github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
-github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
-github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
-github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
+github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5 h1:xhMrHhTJ6zxu3gA4enFM9MLn9AY7613teCdFnlUVbSQ=
+github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE=
github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
-github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
-github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
+github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY=
github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo=
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA=
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0=
-github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
-github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
-github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
github.com/hashicorp/golang-lru/arc/v2 v2.0.5 h1:l2zaLDubNhW4XO3LnliVj0GXO3+/CGNJAg1dcN2Fpfw=
github.com/hashicorp/golang-lru/arc/v2 v2.0.5/go.mod h1:ny6zBSQZi2JxIeYcv7kt2sH2PXJtirBN7RDhRpxPkxU=
github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4=
github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI=
github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
+github.com/ianlancetaylor/demangle v0.0.0-20240805132620-81f5be970eca h1:T54Ema1DU8ngI+aef9ZhAhNGQhcRTrWxVeG07F+c/Rw=
+github.com/ianlancetaylor/demangle v0.0.0-20240805132620-81f5be970eca/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o=
@@ -189,15 +181,12 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE=
-github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
-github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
+github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
+github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
-github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
@@ -210,16 +199,13 @@ github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
-github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
-github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
-github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
+github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
-github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
-github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
-github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng=
-github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
@@ -244,8 +230,9 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
+github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0=
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
@@ -253,18 +240,16 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
-github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
-github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
-github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
-github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
+github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus=
+github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8=
+github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y=
+github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
-github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI=
-github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -276,34 +261,36 @@ github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjz
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
-github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
-github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
+github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
+github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
-github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
+github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
+github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
-github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc=
-github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
+github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE=
+github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
-github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
-github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
+github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0=
+github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw=
github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 h1:EaDatTxkdHG+U3Bk4EUr+DZ7fOGwTfezUiUJMaIcaho=
github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5/go.mod h1:fyalQWdtzDBECAQFBJuQe5bzQ02jGd5Qcbgb97Flm7U=
github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 h1:EfpWLLCyXw8PSM2/XNJLjI3Pb27yVE+gIAfeqp8LUCc=
github.com/redis/go-redis/extra/redisotel/v9 v9.0.5/go.mod h1:WZjPDy7VNzn77AAfnAfVjZNvfJTYfPetfZk5yoSTLaQ=
github.com/redis/go-redis/v9 v9.0.5/go.mod h1:WqMKv5vnQbRuZstUwxQI195wHy+t4PuXDOjzMvcuQHk=
-github.com/redis/go-redis/v9 v9.1.0 h1:137FnGdk+EQdCbye1FW+qOEcY5S+SpY9T0NiuqvtfMY=
-github.com/redis/go-redis/v9 v9.1.0/go.mod h1:urWj3He21Dj5k4TK1y59xH8Uj6ATueP8AH1cY3lZl4c=
+github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM=
+github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
-github.com/rubenv/sql-migrate v1.7.1 h1:f/o0WgfO/GqNuVg+6801K/KW3WdDSupzSjDYODmiUq4=
-github.com/rubenv/sql-migrate v1.7.1/go.mod h1:Ob2Psprc0/3ggbM6wCzyYVFFuc6FyZrb2AS+ezLDFb4=
+github.com/rubenv/sql-migrate v1.8.0 h1:dXnYiJk9k3wetp7GfQbKJcPHjVJL6YK19tKj8t2Ns0o=
+github.com/rubenv/sql-migrate v1.8.0/go.mod h1:F2bGFBwCU+pnmbtNYDeKvSuvL6lBVtXDXUUv5t+u1qw=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 h1:KRzFb2m7YtdldCEkzs6KqmJw4nqEVZGK7IN2kJkjTuQ=
+github.com/santhosh-tekuri/jsonschema/v6 v6.0.2/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU=
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
@@ -313,47 +300,42 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w=
github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
-github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
-github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
-github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
-github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
+github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
+github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
+github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
-github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
-github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
+github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
+github.com/tetratelabs/wabin v0.0.0-20230304001439-f6f874872834 h1:ZF+QBjOI+tILZjBaFj3HgFonKXUcwgJ4djLb6i42S3Q=
+github.com/tetratelabs/wabin v0.0.0-20230304001439-f6f874872834/go.mod h1:m9ymHTgNSEjuxvw8E7WWe4Pl4hZQHXONY8wE6dMLaRk=
+github.com/tetratelabs/wazero v1.9.0 h1:IcZ56OuxrtaEz8UYNRHBrUa9bYeX9oVY93KspZZBf/I=
+github.com/tetratelabs/wazero v1.9.0/go.mod h1:TSbcXCfFP0L2FGkRPxHphadXPjo1T6W+CseNNY7EkjM=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
-github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
-github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
-github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
-github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
-github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
-github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
-github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ=
github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
+go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 h1:UW0+QyeyBVhn+COBec3nGhfnFe5lwB0ic1JBVjzhk0w=
go.opentelemetry.io/contrib/bridges/prometheus v0.57.0/go.mod h1:ppciCHRLsyCio54qbzQv0E4Jyth/fLWDTJYfvWpcSVk=
go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQa2NQ/Aoi7zA+wy7vMOKD9H4=
go.opentelemetry.io/contrib/exporters/autoexport v0.57.0/go.mod h1:EJBheUMttD/lABFyLXhce47Wr6DPWYReCzaZiXadH7g=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 h1:DheMAlT6POBP+gh8RUH19EOTnQIor5QE0uSRPtzCpSw=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0/go.mod h1:wZcGmeVO9nzP67aYSLDqXNWK87EZWhi7JWj1v7ZXf94=
-go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U=
-go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
+go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
+go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 h1:WzNab7hOOLzdDF/EoWCt4glhrbMPVMOO5JYTmpz36Ls=
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0/go.mod h1:hKvJwTzJdp90Vh7p6q/9PAOd55dI6WA6sWj62a/JvSs=
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 h1:S+LdBGiQXtJdowoJoQPEtI52syEP/JYBUpjO49EQhV8=
@@ -362,10 +344,10 @@ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 h1:j7Z
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0/go.mod h1:WXbYJTUaZXAbYd8lbgGuvih0yuCfOFC5RJoYnoLcGz8=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 h1:t/Qur3vKSkUCcDVaSumWF2PKHt85pc7fRvFuoVT8qFU=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0/go.mod h1:Rl61tySSdcOJWoEgYZVtmnKdA0GeKrSqkHC1t+91CH8=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 h1:IJFEoHiytixx8cMiVAO+GmHR6Frwu+u5Ur8njpFO6Ac=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 h1:9kV11HXBHZAvuPUZxmMWrH8hZn/6UnHX4K0mu36vNsU=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 h1:cMyu9O88joYEaI47CnQkxO1XZdpoTF9fEnW2duIddhw=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0/go.mod h1:6Am3rn7P9TVVeXYG+wtcGE7IE1tsQ+bP3AuWcKt/gOI=
go.opentelemetry.io/otel/exporters/prometheus v0.54.0 h1:rFwzp68QMgtzu9PgP3jm9XaMICI6TsofWWPcBDKwlsU=
@@ -378,20 +360,30 @@ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 h1:cC2yDI3IQd0Udsu
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0/go.mod h1:2PD5Ex6z8CFzDbTdOlwyNIUywRr1DN0ospafJM1wJ+s=
go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk=
go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8=
-go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M=
-go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8=
-go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4=
-go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU=
+go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE=
+go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
+go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A=
+go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU=
go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs=
go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo=
-go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU=
-go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ=
-go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM=
-go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8=
-go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
-go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
+go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk=
+go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w=
+go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
+go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
+go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4=
+go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4=
+go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
+go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
+go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
+go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
+go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
+go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
+go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
+go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@@ -400,16 +392,16 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g=
-golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
-golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
+golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI=
+golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
-golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
-golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
+golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
+golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -423,10 +415,10 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ=
-golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
-golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
-golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
-golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
+golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
+golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
+golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
+golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -437,31 +429,29 @@ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
-golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
-golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
+golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
+golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
-golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
+golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
@@ -469,8 +459,8 @@ golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww=
-golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
-golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
+golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ=
+golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
@@ -478,10 +468,10 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
-golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
-golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
-golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
-golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
+golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
+golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
+golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
+golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
@@ -490,20 +480,20 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk=
-golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
-golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
+golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
+golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g=
-google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
-google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0=
-google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA=
-google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io=
-google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950=
+google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb h1:TLPQVbx1GJ8VKZxz52VAxl1EBgKXXbTiU9Fc5fZeLn4=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I=
+google.golang.org/grpc v1.72.1 h1:HR03wO6eyZ7lknl75XlxABNVLLFc2PAb6mHlYh756mA=
+google.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
+google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
+google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
@@ -518,37 +508,41 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-k8s.io/api v0.32.2 h1:bZrMLEkgizC24G9eViHGOPbW+aRo9duEISRIJKfdJuw=
-k8s.io/api v0.32.2/go.mod h1:hKlhk4x1sJyYnHENsrdCWw31FEmCijNGPJO5WzHiJ6Y=
-k8s.io/apiextensions-apiserver v0.32.2 h1:2YMk285jWMk2188V2AERy5yDwBYrjgWYggscghPCvV4=
-k8s.io/apiextensions-apiserver v0.32.2/go.mod h1:GPwf8sph7YlJT3H6aKUWtd0E+oyShk/YHWQHf/OOgCA=
-k8s.io/apimachinery v0.32.2 h1:yoQBR9ZGkA6Rgmhbp/yuT9/g+4lxtsGYwW6dR6BDPLQ=
-k8s.io/apimachinery v0.32.2/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
-k8s.io/apiserver v0.32.2 h1:WzyxAu4mvLkQxwD9hGa4ZfExo3yZZaYzoYvvVDlM6vw=
-k8s.io/apiserver v0.32.2/go.mod h1:PEwREHiHNU2oFdte7BjzA1ZyjWjuckORLIK/wLV5goM=
-k8s.io/cli-runtime v0.32.2 h1:aKQR4foh9qeyckKRkNXUccP9moxzffyndZAvr+IXMks=
-k8s.io/cli-runtime v0.32.2/go.mod h1:a/JpeMztz3xDa7GCyyShcwe55p8pbcCVQxvqZnIwXN8=
-k8s.io/client-go v0.32.2 h1:4dYCD4Nz+9RApM2b/3BtVvBHw54QjMFUl1OLcJG5yOA=
-k8s.io/client-go v0.32.2/go.mod h1:fpZ4oJXclZ3r2nDOv+Ux3XcJutfrwjKTCHz2H3sww94=
-k8s.io/component-base v0.32.2 h1:1aUL5Vdmu7qNo4ZsE+569PV5zFatM9hl+lb3dEea2zU=
-k8s.io/component-base v0.32.2/go.mod h1:PXJ61Vx9Lg+P5mS8TLd7bCIr+eMJRQTyXe8KvkrvJq0=
+k8s.io/api v0.34.1 h1:jC+153630BMdlFukegoEL8E/yT7aLyQkIVuwhmwDgJM=
+k8s.io/api v0.34.1/go.mod h1:SB80FxFtXn5/gwzCoN6QCtPD7Vbu5w2n1S0J5gFfTYk=
+k8s.io/apiextensions-apiserver v0.34.1 h1:NNPBva8FNAPt1iSVwIE0FsdrVriRXMsaWFMqJbII2CI=
+k8s.io/apiextensions-apiserver v0.34.1/go.mod h1:hP9Rld3zF5Ay2Of3BeEpLAToP+l4s5UlxiHfqRaRcMc=
+k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4=
+k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
+k8s.io/apiserver v0.34.1 h1:U3JBGdgANK3dfFcyknWde1G6X1F4bg7PXuvlqt8lITA=
+k8s.io/apiserver v0.34.1/go.mod h1:eOOc9nrVqlBI1AFCvVzsob0OxtPZUCPiUJL45JOTBG0=
+k8s.io/cli-runtime v0.34.1 h1:btlgAgTrYd4sk8vJTRG6zVtqBKt9ZMDeQZo2PIzbL7M=
+k8s.io/cli-runtime v0.34.1/go.mod h1:aVA65c+f0MZiMUPbseU/M9l1Wo2byeaGwUuQEQVVveE=
+k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY=
+k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8=
+k8s.io/component-base v0.34.1 h1:v7xFgG+ONhytZNFpIz5/kecwD+sUhVE6HU7qQUiRM4A=
+k8s.io/component-base v0.34.1/go.mod h1:mknCpLlTSKHzAQJJnnHVKqjxR7gBeHRv0rPXA7gdtQ0=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
-k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y=
-k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4=
-k8s.io/kubectl v0.32.2 h1:TAkag6+XfSBgkqK9I7ZvwtF0WVtUAvK8ZqTt+5zi1Us=
-k8s.io/kubectl v0.32.2/go.mod h1:+h/NQFSPxiDZYX/WZaWw9fwYezGLISP0ud8nQKg+3g8=
-k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
-k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
-oras.land/oras-go/v2 v2.5.0 h1:o8Me9kLY74Vp5uw07QXPiitjsw7qNXi8Twd+19Zf02c=
-oras.land/oras-go/v2 v2.5.0/go.mod h1:z4eisnLP530vwIOUOJeBIj0aGI0L1C3d53atvCBqZHg=
-sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
-sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
-sigs.k8s.io/kustomize/api v0.18.0 h1:hTzp67k+3NEVInwz5BHyzc9rGxIauoXferXyjv5lWPo=
-sigs.k8s.io/kustomize/api v0.18.0/go.mod h1:f8isXnX+8b+SGLHQ6yO4JG1rdkZlvhaCf/uZbLVMb0U=
-sigs.k8s.io/kustomize/kyaml v0.18.1 h1:WvBo56Wzw3fjS+7vBjN6TeivvpbW9GmRaWZ9CIVmt4E=
-sigs.k8s.io/kustomize/kyaml v0.18.1/go.mod h1:C3L2BFVU1jgcddNBE1TxuVLgS46TjObMwW5FT9FcjYo=
-sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
-sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
-sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
-sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
+k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA=
+k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts=
+k8s.io/kubectl v0.34.1 h1:1qP1oqT5Xc93K+H8J7ecpBjaz511gan89KO9Vbsh/OI=
+k8s.io/kubectl v0.34.1/go.mod h1:JRYlhJpGPyk3dEmJ+BuBiOB9/dAvnrALJEiY/C5qa6A=
+k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
+k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc=
+oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o=
+sigs.k8s.io/controller-runtime v0.22.1 h1:Ah1T7I+0A7ize291nJZdS1CabF/lB4E++WizgV24Eqg=
+sigs.k8s.io/controller-runtime v0.22.1/go.mod h1:FwiwRjkRPbiN+zp2QRp7wlTCzbUXxZ/D4OzuQUDwBHY=
+sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
+sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
+sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I=
+sigs.k8s.io/kustomize/api v0.20.1/go.mod h1:t6hUFxO+Ph0VxIk1sKp1WS0dOjbPCtLJ4p8aADLwqjM=
+sigs.k8s.io/kustomize/kyaml v0.20.1 h1:PCMnA2mrVbRP3NIB6v9kYCAc38uvFLVs8j/CD567A78=
+sigs.k8s.io/kustomize/kyaml v0.20.1/go.mod h1:0EmkQHRUsJxY8Ug9Niig1pUMSCGHxQ5RklbpV/Ri6po=
+sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
+sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
+sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
+sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
+sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
+sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
diff --git a/internal/chart/v3/chart.go b/internal/chart/v3/chart.go
new file mode 100644
index 000000000..2edc6c339
--- /dev/null
+++ b/internal/chart/v3/chart.go
@@ -0,0 +1,174 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v3
+
+import (
+ "path/filepath"
+ "regexp"
+ "strings"
+
+ "helm.sh/helm/v4/pkg/chart/common"
+)
+
+// APIVersionV3 is the API version number for version 3.
+const APIVersionV3 = "v3"
+
+// aliasNameFormat defines the characters that are legal in an alias name.
+var aliasNameFormat = regexp.MustCompile("^[a-zA-Z0-9_-]+$")
+
+// Chart is a helm package that contains metadata, a default config, zero or more
+// optionally parameterizable templates, and zero or more charts (dependencies).
+type Chart struct {
+ // Raw contains the raw contents of the files originally contained in the chart archive.
+ //
+ // This should not be used except in special cases like `helm show values`,
+ // where we want to display the raw values, comments and all.
+ Raw []*common.File `json:"-"`
+ // Metadata is the contents of the Chartfile.
+ Metadata *Metadata `json:"metadata"`
+ // Lock is the contents of Chart.lock.
+ Lock *Lock `json:"lock"`
+ // Templates for this chart.
+ Templates []*common.File `json:"templates"`
+ // Values are default config for this chart.
+ Values map[string]interface{} `json:"values"`
+ // Schema is an optional JSON schema for imposing structure on Values
+ Schema []byte `json:"schema"`
+ // Files are miscellaneous files in a chart archive,
+ // e.g. README, LICENSE, etc.
+ Files []*common.File `json:"files"`
+
+ parent *Chart
+ dependencies []*Chart
+}
+
+type CRD struct {
+ // Name is the File.Name for the crd file
+ Name string
+ // Filename is the File obj Name including (sub-)chart.ChartFullPath
+ Filename string
+ // File is the File obj for the crd
+ File *common.File
+}
+
+// SetDependencies replaces the chart dependencies.
+func (ch *Chart) SetDependencies(charts ...*Chart) {
+ ch.dependencies = nil
+ ch.AddDependency(charts...)
+}
+
+// Name returns the name of the chart.
+func (ch *Chart) Name() string {
+ if ch.Metadata == nil {
+ return ""
+ }
+ return ch.Metadata.Name
+}
+
+// AddDependency determines if the chart is a subchart.
+func (ch *Chart) AddDependency(charts ...*Chart) {
+ for i, x := range charts {
+ charts[i].parent = ch
+ ch.dependencies = append(ch.dependencies, x)
+ }
+}
+
+// Root finds the root chart.
+func (ch *Chart) Root() *Chart {
+ if ch.IsRoot() {
+ return ch
+ }
+ return ch.Parent().Root()
+}
+
+// Dependencies are the charts that this chart depends on.
+func (ch *Chart) Dependencies() []*Chart { return ch.dependencies }
+
+// IsRoot determines if the chart is the root chart.
+func (ch *Chart) IsRoot() bool { return ch.parent == nil }
+
+// Parent returns a subchart's parent chart.
+func (ch *Chart) Parent() *Chart { return ch.parent }
+
+// ChartPath returns the full path to this chart in dot notation.
+func (ch *Chart) ChartPath() string {
+ if !ch.IsRoot() {
+ return ch.Parent().ChartPath() + "." + ch.Name()
+ }
+ return ch.Name()
+}
+
+// ChartFullPath returns the full path to this chart.
+// Note that the path may not correspond to the path where the file can be found on the file system if the path
+// points to an aliased subchart.
+func (ch *Chart) ChartFullPath() string {
+ if !ch.IsRoot() {
+ return ch.Parent().ChartFullPath() + "/charts/" + ch.Name()
+ }
+ return ch.Name()
+}
+
+// Validate validates the metadata.
+func (ch *Chart) Validate() error {
+ return ch.Metadata.Validate()
+}
+
+// AppVersion returns the appversion of the chart.
+func (ch *Chart) AppVersion() string {
+ if ch.Metadata == nil {
+ return ""
+ }
+ return ch.Metadata.AppVersion
+}
+
+// CRDs returns a list of File objects in the 'crds/' directory of a Helm chart.
+// Deprecated: use CRDObjects()
+func (ch *Chart) CRDs() []*common.File {
+ files := []*common.File{}
+ // Find all resources in the crds/ directory
+ for _, f := range ch.Files {
+ if strings.HasPrefix(f.Name, "crds/") && hasManifestExtension(f.Name) {
+ files = append(files, f)
+ }
+ }
+ // Get CRDs from dependencies, too.
+ for _, dep := range ch.Dependencies() {
+ files = append(files, dep.CRDs()...)
+ }
+ return files
+}
+
+// CRDObjects returns a list of CRD objects in the 'crds/' directory of a Helm chart & subcharts
+func (ch *Chart) CRDObjects() []CRD {
+ crds := []CRD{}
+ // Find all resources in the crds/ directory
+ for _, f := range ch.Files {
+ if strings.HasPrefix(f.Name, "crds/") && hasManifestExtension(f.Name) {
+ mycrd := CRD{Name: f.Name, Filename: filepath.Join(ch.ChartFullPath(), f.Name), File: f}
+ crds = append(crds, mycrd)
+ }
+ }
+ // Get CRDs from dependencies, too.
+ for _, dep := range ch.Dependencies() {
+ crds = append(crds, dep.CRDObjects()...)
+ }
+ return crds
+}
+
+func hasManifestExtension(fname string) bool {
+ ext := filepath.Ext(fname)
+ return strings.EqualFold(ext, ".yaml") || strings.EqualFold(ext, ".yml") || strings.EqualFold(ext, ".json")
+}
diff --git a/internal/chart/v3/chart_test.go b/internal/chart/v3/chart_test.go
new file mode 100644
index 000000000..b1820ac0a
--- /dev/null
+++ b/internal/chart/v3/chart_test.go
@@ -0,0 +1,213 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package v3
+
+import (
+ "encoding/json"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "helm.sh/helm/v4/pkg/chart/common"
+)
+
+func TestCRDs(t *testing.T) {
+ chrt := Chart{
+ Files: []*common.File{
+ {
+ Name: "crds/foo.yaml",
+ Data: []byte("hello"),
+ },
+ {
+ Name: "bar.yaml",
+ Data: []byte("hello"),
+ },
+ {
+ Name: "crds/foo/bar/baz.yaml",
+ Data: []byte("hello"),
+ },
+ {
+ Name: "crdsfoo/bar/baz.yaml",
+ Data: []byte("hello"),
+ },
+ {
+ Name: "crds/README.md",
+ Data: []byte("# hello"),
+ },
+ },
+ }
+
+ is := assert.New(t)
+ crds := chrt.CRDs()
+ is.Equal(2, len(crds))
+ is.Equal("crds/foo.yaml", crds[0].Name)
+ is.Equal("crds/foo/bar/baz.yaml", crds[1].Name)
+}
+
+func TestSaveChartNoRawData(t *testing.T) {
+ chrt := Chart{
+ Raw: []*common.File{
+ {
+ Name: "fhqwhgads.yaml",
+ Data: []byte("Everybody to the Limit"),
+ },
+ },
+ }
+
+ is := assert.New(t)
+ data, err := json.Marshal(chrt)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ res := &Chart{}
+ if err := json.Unmarshal(data, res); err != nil {
+ t.Fatal(err)
+ }
+
+ is.Equal([]*common.File(nil), res.Raw)
+}
+
+func TestMetadata(t *testing.T) {
+ chrt := Chart{
+ Metadata: &Metadata{
+ Name: "foo.yaml",
+ AppVersion: "1.0.0",
+ APIVersion: "v3",
+ Version: "1.0.0",
+ Type: "application",
+ },
+ }
+
+ is := assert.New(t)
+
+ is.Equal("foo.yaml", chrt.Name())
+ is.Equal("1.0.0", chrt.AppVersion())
+ is.Equal(nil, chrt.Validate())
+}
+
+func TestIsRoot(t *testing.T) {
+ chrt1 := Chart{
+ parent: &Chart{
+ Metadata: &Metadata{
+ Name: "foo",
+ },
+ },
+ }
+
+ chrt2 := Chart{
+ Metadata: &Metadata{
+ Name: "foo",
+ },
+ }
+
+ is := assert.New(t)
+
+ is.Equal(false, chrt1.IsRoot())
+ is.Equal(true, chrt2.IsRoot())
+}
+
+func TestChartPath(t *testing.T) {
+ chrt1 := Chart{
+ parent: &Chart{
+ Metadata: &Metadata{
+ Name: "foo",
+ },
+ },
+ }
+
+ chrt2 := Chart{
+ Metadata: &Metadata{
+ Name: "foo",
+ },
+ }
+
+ is := assert.New(t)
+
+ is.Equal("foo.", chrt1.ChartPath())
+ is.Equal("foo", chrt2.ChartPath())
+}
+
+func TestChartFullPath(t *testing.T) {
+ chrt1 := Chart{
+ parent: &Chart{
+ Metadata: &Metadata{
+ Name: "foo",
+ },
+ },
+ }
+
+ chrt2 := Chart{
+ Metadata: &Metadata{
+ Name: "foo",
+ },
+ }
+
+ is := assert.New(t)
+
+ is.Equal("foo/charts/", chrt1.ChartFullPath())
+ is.Equal("foo", chrt2.ChartFullPath())
+}
+
+func TestCRDObjects(t *testing.T) {
+ chrt := Chart{
+ Files: []*common.File{
+ {
+ Name: "crds/foo.yaml",
+ Data: []byte("hello"),
+ },
+ {
+ Name: "bar.yaml",
+ Data: []byte("hello"),
+ },
+ {
+ Name: "crds/foo/bar/baz.yaml",
+ Data: []byte("hello"),
+ },
+ {
+ Name: "crdsfoo/bar/baz.yaml",
+ Data: []byte("hello"),
+ },
+ {
+ Name: "crds/README.md",
+ Data: []byte("# hello"),
+ },
+ },
+ }
+
+ expected := []CRD{
+ {
+ Name: "crds/foo.yaml",
+ Filename: "crds/foo.yaml",
+ File: &common.File{
+ Name: "crds/foo.yaml",
+ Data: []byte("hello"),
+ },
+ },
+ {
+ Name: "crds/foo/bar/baz.yaml",
+ Filename: "crds/foo/bar/baz.yaml",
+ File: &common.File{
+ Name: "crds/foo/bar/baz.yaml",
+ Data: []byte("hello"),
+ },
+ },
+ }
+
+ is := assert.New(t)
+ crds := chrt.CRDObjects()
+ is.Equal(expected, crds)
+}
diff --git a/internal/chart/v3/dependency.go b/internal/chart/v3/dependency.go
new file mode 100644
index 000000000..2d956b548
--- /dev/null
+++ b/internal/chart/v3/dependency.go
@@ -0,0 +1,82 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v3
+
+import "time"
+
+// Dependency describes a chart upon which another chart depends.
+//
+// Dependencies can be used to express developer intent, or to capture the state
+// of a chart.
+type Dependency struct {
+ // Name is the name of the dependency.
+ //
+ // This must mach the name in the dependency's Chart.yaml.
+ Name string `json:"name" yaml:"name"`
+ // Version is the version (range) of this chart.
+ //
+ // A lock file will always produce a single version, while a dependency
+ // may contain a semantic version range.
+ Version string `json:"version,omitempty" yaml:"version,omitempty"`
+ // The URL to the repository.
+ //
+ // Appending `index.yaml` to this string should result in a URL that can be
+ // used to fetch the repository index.
+ Repository string `json:"repository" yaml:"repository"`
+ // A yaml path that resolves to a boolean, used for enabling/disabling charts (e.g. subchart1.enabled )
+ Condition string `json:"condition,omitempty" yaml:"condition,omitempty"`
+ // Tags can be used to group charts for enabling/disabling together
+ Tags []string `json:"tags,omitempty" yaml:"tags,omitempty"`
+ // Enabled bool determines if chart should be loaded
+ Enabled bool `json:"enabled,omitempty" yaml:"enabled,omitempty"`
+ // ImportValues holds the mapping of source values to parent key to be imported. Each item can be a
+ // string or pair of child/parent sublist items.
+ ImportValues []interface{} `json:"import-values,omitempty" yaml:"import-values,omitempty"`
+ // Alias usable alias to be used for the chart
+ Alias string `json:"alias,omitempty" yaml:"alias,omitempty"`
+}
+
+// Validate checks for common problems with the dependency datastructure in
+// the chart. This check must be done at load time before the dependency's charts are
+// loaded.
+func (d *Dependency) Validate() error {
+ if d == nil {
+ return ValidationError("dependencies must not contain empty or null nodes")
+ }
+ d.Name = sanitizeString(d.Name)
+ d.Version = sanitizeString(d.Version)
+ d.Repository = sanitizeString(d.Repository)
+ d.Condition = sanitizeString(d.Condition)
+ for i := range d.Tags {
+ d.Tags[i] = sanitizeString(d.Tags[i])
+ }
+ if d.Alias != "" && !aliasNameFormat.MatchString(d.Alias) {
+ return ValidationErrorf("dependency %q has disallowed characters in the alias", d.Name)
+ }
+ return nil
+}
+
+// Lock is a lock file for dependencies.
+//
+// It represents the state that the dependencies should be in.
+type Lock struct {
+ // Generated is the date the lock file was last generated.
+ Generated time.Time `json:"generated"`
+ // Digest is a hash of the dependencies in Chart.yaml.
+ Digest string `json:"digest"`
+ // Dependencies is the list of dependencies that this lock file has locked.
+ Dependencies []*Dependency `json:"dependencies"`
+}
diff --git a/internal/chart/v3/dependency_test.go b/internal/chart/v3/dependency_test.go
new file mode 100644
index 000000000..fcea19aea
--- /dev/null
+++ b/internal/chart/v3/dependency_test.go
@@ -0,0 +1,44 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package v3
+
+import (
+ "testing"
+)
+
+func TestValidateDependency(t *testing.T) {
+ dep := &Dependency{
+ Name: "example",
+ }
+ for value, shouldFail := range map[string]bool{
+ "abcdefghijklmenopQRSTUVWXYZ-0123456780_": false,
+ "-okay": false,
+ "_okay": false,
+ "- bad": true,
+ " bad": true,
+ "bad\nvalue": true,
+ "bad ": true,
+ "bad$": true,
+ } {
+ dep.Alias = value
+ res := dep.Validate()
+ if res != nil && !shouldFail {
+ t.Errorf("Failed on case %q", dep.Alias)
+ } else if res == nil && shouldFail {
+ t.Errorf("Expected failure for %q", dep.Alias)
+ }
+ }
+}
diff --git a/internal/chart/v3/doc.go b/internal/chart/v3/doc.go
new file mode 100644
index 000000000..e003833a0
--- /dev/null
+++ b/internal/chart/v3/doc.go
@@ -0,0 +1,21 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package v3 provides chart handling for apiVersion v3 charts
+
+This package and its sub-packages provide handling for apiVersion v3 charts.
+*/
+package v3
diff --git a/internal/chart/v3/errors.go b/internal/chart/v3/errors.go
new file mode 100644
index 000000000..059e43f07
--- /dev/null
+++ b/internal/chart/v3/errors.go
@@ -0,0 +1,30 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v3
+
+import "fmt"
+
+// ValidationError represents a data validation error.
+type ValidationError string
+
+func (v ValidationError) Error() string {
+ return "validation: " + string(v)
+}
+
+// ValidationErrorf takes a message and formatting options and creates a ValidationError
+func ValidationErrorf(msg string, args ...interface{}) ValidationError {
+ return ValidationError(fmt.Sprintf(msg, args...))
+}
diff --git a/internal/chart/v3/fuzz_test.go b/internal/chart/v3/fuzz_test.go
new file mode 100644
index 000000000..982c26489
--- /dev/null
+++ b/internal/chart/v3/fuzz_test.go
@@ -0,0 +1,48 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v3
+
+import (
+ "testing"
+
+ fuzz "github.com/AdaLogics/go-fuzz-headers"
+)
+
+func FuzzMetadataValidate(f *testing.F) {
+ f.Fuzz(func(t *testing.T, data []byte) {
+ fdp := fuzz.NewConsumer(data)
+ // Add random values to the metadata
+ md := &Metadata{}
+ err := fdp.GenerateStruct(md)
+ if err != nil {
+ t.Skip()
+ }
+ md.Validate()
+ })
+}
+
+func FuzzDependencyValidate(f *testing.F) {
+ f.Fuzz(func(t *testing.T, data []byte) {
+ f := fuzz.NewConsumer(data)
+ // Add random values to the dependenci
+ d := &Dependency{}
+ err := f.GenerateStruct(d)
+ if err != nil {
+ t.Skip()
+ }
+ d.Validate()
+ })
+}
diff --git a/internal/chart/v3/lint/lint.go b/internal/chart/v3/lint/lint.go
new file mode 100644
index 000000000..0cd949065
--- /dev/null
+++ b/internal/chart/v3/lint/lint.go
@@ -0,0 +1,66 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package lint // import "helm.sh/helm/v4/internal/chart/v3/lint"
+
+import (
+ "path/filepath"
+
+ "helm.sh/helm/v4/internal/chart/v3/lint/rules"
+ "helm.sh/helm/v4/internal/chart/v3/lint/support"
+ "helm.sh/helm/v4/pkg/chart/common"
+)
+
+type linterOptions struct {
+ KubeVersion *common.KubeVersion
+ SkipSchemaValidation bool
+}
+
+type LinterOption func(lo *linterOptions)
+
+func WithKubeVersion(kubeVersion *common.KubeVersion) LinterOption {
+ return func(lo *linterOptions) {
+ lo.KubeVersion = kubeVersion
+ }
+}
+
+func WithSkipSchemaValidation(skipSchemaValidation bool) LinterOption {
+ return func(lo *linterOptions) {
+ lo.SkipSchemaValidation = skipSchemaValidation
+ }
+}
+
+func RunAll(baseDir string, values map[string]interface{}, namespace string, options ...LinterOption) support.Linter {
+
+ chartDir, _ := filepath.Abs(baseDir)
+
+ lo := linterOptions{}
+ for _, option := range options {
+ option(&lo)
+ }
+
+ result := support.Linter{
+ ChartDir: chartDir,
+ }
+
+ rules.Chartfile(&result)
+ rules.ValuesWithOverrides(&result, values, lo.SkipSchemaValidation)
+ rules.TemplatesWithSkipSchemaValidation(&result, values, namespace, lo.KubeVersion, lo.SkipSchemaValidation)
+ rules.Dependencies(&result)
+ rules.Crds(&result)
+
+ return result
+}
diff --git a/internal/chart/v3/lint/lint_test.go b/internal/chart/v3/lint/lint_test.go
new file mode 100644
index 000000000..6f5912ae7
--- /dev/null
+++ b/internal/chart/v3/lint/lint_test.go
@@ -0,0 +1,246 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package lint
+
+import (
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+
+ "helm.sh/helm/v4/internal/chart/v3/lint/support"
+ chartutil "helm.sh/helm/v4/internal/chart/v3/util"
+)
+
+var values map[string]interface{}
+
+const namespace = "testNamespace"
+
+const badChartDir = "rules/testdata/badchartfile"
+const badValuesFileDir = "rules/testdata/badvaluesfile"
+const badYamlFileDir = "rules/testdata/albatross"
+const badCrdFileDir = "rules/testdata/badcrdfile"
+const goodChartDir = "rules/testdata/goodone"
+const subChartValuesDir = "rules/testdata/withsubchart"
+const malformedTemplate = "rules/testdata/malformed-template"
+const invalidChartFileDir = "rules/testdata/invalidchartfile"
+
+func TestBadChartV3(t *testing.T) {
+ m := RunAll(badChartDir, values, namespace).Messages
+ if len(m) != 8 {
+ t.Errorf("Number of errors %v", len(m))
+ t.Errorf("All didn't fail with expected errors, got %#v", m)
+ }
+ // There should be one INFO, one WARNING, and 2 ERROR messages, check for them
+ var i, w, e, e2, e3, e4, e5, e6 bool
+ for _, msg := range m {
+ if msg.Severity == support.InfoSev {
+ if strings.Contains(msg.Err.Error(), "icon is recommended") {
+ i = true
+ }
+ }
+ if msg.Severity == support.WarningSev {
+ if strings.Contains(msg.Err.Error(), "does not exist") {
+ w = true
+ }
+ }
+ if msg.Severity == support.ErrorSev {
+ if strings.Contains(msg.Err.Error(), "version '0.0.0.0' is not a valid SemVerV2") {
+ e = true
+ }
+ if strings.Contains(msg.Err.Error(), "name is required") {
+ e2 = true
+ }
+
+ if strings.Contains(msg.Err.Error(), "apiVersion is required. The value must be \"v3\"") {
+ e3 = true
+ }
+
+ if strings.Contains(msg.Err.Error(), "chart type is not valid in apiVersion") {
+ e4 = true
+ }
+
+ if strings.Contains(msg.Err.Error(), "dependencies are not valid in the Chart file with apiVersion") {
+ e5 = true
+ }
+ // This comes from the dependency check, which loads dependency info from the Chart.yaml
+ if strings.Contains(msg.Err.Error(), "unable to load chart") {
+ e6 = true
+ }
+ }
+ }
+ if !e || !e2 || !e3 || !e4 || !e5 || !i || !e6 || !w {
+ t.Errorf("Didn't find all the expected errors, got %#v", m)
+ }
+}
+
+func TestInvalidYaml(t *testing.T) {
+ m := RunAll(badYamlFileDir, values, namespace).Messages
+ if len(m) != 1 {
+ t.Fatalf("All didn't fail with expected errors, got %#v", m)
+ }
+ if !strings.Contains(m[0].Err.Error(), "deliberateSyntaxError") {
+ t.Errorf("All didn't have the error for deliberateSyntaxError")
+ }
+}
+
+func TestInvalidChartYamlV3(t *testing.T) {
+ m := RunAll(invalidChartFileDir, values, namespace).Messages
+ t.Log(m)
+ if len(m) != 3 {
+ t.Fatalf("All didn't fail with expected errors, got %#v", m)
+ }
+ if !strings.Contains(m[0].Err.Error(), "failed to strictly parse chart metadata file") {
+ t.Errorf("All didn't have the error for duplicate YAML keys")
+ }
+}
+
+func TestBadValuesV3(t *testing.T) {
+ m := RunAll(badValuesFileDir, values, namespace).Messages
+ if len(m) < 1 {
+ t.Fatalf("All didn't fail with expected errors, got %#v", m)
+ }
+ if !strings.Contains(m[0].Err.Error(), "unable to parse YAML") {
+ t.Errorf("All didn't have the error for invalid key format: %s", m[0].Err)
+ }
+}
+
+func TestBadCrdFileV3(t *testing.T) {
+ m := RunAll(badCrdFileDir, values, namespace).Messages
+ assert.Lenf(t, m, 2, "All didn't fail with expected errors, got %#v", m)
+ assert.ErrorContains(t, m[0].Err, "apiVersion is not in 'apiextensions.k8s.io'")
+ assert.ErrorContains(t, m[1].Err, "object kind is not 'CustomResourceDefinition'")
+}
+
+func TestGoodChart(t *testing.T) {
+ m := RunAll(goodChartDir, values, namespace).Messages
+ if len(m) != 0 {
+ t.Error("All returned linter messages when it shouldn't have")
+ for i, msg := range m {
+ t.Logf("Message %d: %s", i, msg)
+ }
+ }
+}
+
+// TestHelmCreateChart tests that a `helm create` always passes a `helm lint` test.
+//
+// See https://github.com/helm/helm/issues/7923
+func TestHelmCreateChart(t *testing.T) {
+ dir := t.TempDir()
+
+ createdChart, err := chartutil.Create("testhelmcreatepasseslint", dir)
+ if err != nil {
+ t.Error(err)
+ // Fatal is bad because of the defer.
+ return
+ }
+
+ // Note: we test with strict=true here, even though others have
+ // strict = false.
+ m := RunAll(createdChart, values, namespace, WithSkipSchemaValidation(true)).Messages
+ if ll := len(m); ll != 1 {
+ t.Errorf("All should have had exactly 1 error. Got %d", ll)
+ for i, msg := range m {
+ t.Logf("Message %d: %s", i, msg.Error())
+ }
+ } else if msg := m[0].Err.Error(); !strings.Contains(msg, "icon is recommended") {
+ t.Errorf("Unexpected lint error: %s", msg)
+ }
+}
+
+// TestHelmCreateChart_CheckDeprecatedWarnings checks if any default template created by `helm create` throws
+// deprecated warnings in the linter check against the current Kubernetes version (provided using ldflags).
+//
+// See https://github.com/helm/helm/issues/11495
+//
+// Resources like hpa and ingress, which are disabled by default in values.yaml are enabled here using the equivalent
+// of the `--set` flag.
+//
+// Note: This test requires the following ldflags to be set per the current Kubernetes version to avoid false-positive
+// results.
+// 1. -X helm.sh/helm/v4/pkg/lint/rules.k8sVersionMajor=
+// 2. -X helm.sh/helm/v4/pkg/lint/rules.k8sVersionMinor=
+// or directly use '$(LDFLAGS)' in Makefile.
+//
+// When run without ldflags, the test passes giving a false-positive result. This is because the variables
+// `k8sVersionMajor` and `k8sVersionMinor` by default are set to an older version of Kubernetes, with which, there
+// might not be the deprecation warning.
+func TestHelmCreateChart_CheckDeprecatedWarnings(t *testing.T) {
+ createdChart, err := chartutil.Create("checkdeprecatedwarnings", t.TempDir())
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ // Add values to enable hpa, and ingress which are disabled by default.
+ // This is the equivalent of:
+ // helm lint checkdeprecatedwarnings --set 'autoscaling.enabled=true,ingress.enabled=true'
+ updatedValues := map[string]interface{}{
+ "autoscaling": map[string]interface{}{
+ "enabled": true,
+ },
+ "ingress": map[string]interface{}{
+ "enabled": true,
+ },
+ }
+
+ linterRunDetails := RunAll(createdChart, updatedValues, namespace, WithSkipSchemaValidation(true))
+ for _, msg := range linterRunDetails.Messages {
+ if strings.HasPrefix(msg.Error(), "[WARNING]") &&
+ strings.Contains(msg.Error(), "deprecated") {
+ // When there is a deprecation warning for an object created
+ // by `helm create` for the current Kubernetes version, fail.
+ t.Errorf("Unexpected deprecation warning for %q: %s", msg.Path, msg.Error())
+ }
+ }
+}
+
+// lint ignores import-values
+// See https://github.com/helm/helm/issues/9658
+func TestSubChartValuesChart(t *testing.T) {
+ m := RunAll(subChartValuesDir, values, namespace).Messages
+ if len(m) != 0 {
+ t.Error("All returned linter messages when it shouldn't have")
+ for i, msg := range m {
+ t.Logf("Message %d: %s", i, msg)
+ }
+ }
+}
+
+// lint stuck with malformed template object
+// See https://github.com/helm/helm/issues/11391
+func TestMalformedTemplate(t *testing.T) {
+ c := time.After(3 * time.Second)
+ ch := make(chan int, 1)
+ var m []support.Message
+ go func() {
+ m = RunAll(malformedTemplate, values, namespace).Messages
+ ch <- 1
+ }()
+ select {
+ case <-c:
+ t.Fatalf("lint malformed template timeout")
+ case <-ch:
+ if len(m) != 1 {
+ t.Fatalf("All didn't fail with expected errors, got %#v", m)
+ }
+ if !strings.Contains(m[0].Err.Error(), "invalid character '{'") {
+ t.Errorf("All didn't have the error for invalid character '{'")
+ }
+ }
+}
diff --git a/internal/chart/v3/lint/rules/chartfile.go b/internal/chart/v3/lint/rules/chartfile.go
new file mode 100644
index 000000000..fc246ba80
--- /dev/null
+++ b/internal/chart/v3/lint/rules/chartfile.go
@@ -0,0 +1,225 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules // import "helm.sh/helm/v4/internal/chart/v3/lint/rules"
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/Masterminds/semver/v3"
+ "github.com/asaskevich/govalidator"
+ "sigs.k8s.io/yaml"
+
+ chart "helm.sh/helm/v4/internal/chart/v3"
+ "helm.sh/helm/v4/internal/chart/v3/lint/support"
+ chartutil "helm.sh/helm/v4/internal/chart/v3/util"
+)
+
+// Chartfile runs a set of linter rules related to Chart.yaml file
+func Chartfile(linter *support.Linter) {
+ chartFileName := "Chart.yaml"
+ chartPath := filepath.Join(linter.ChartDir, chartFileName)
+
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartYamlNotDirectory(chartPath))
+
+ chartFile, err := chartutil.LoadChartfile(chartPath)
+ validChartFile := linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartYamlFormat(err))
+
+ // Guard clause. Following linter rules require a parsable ChartFile
+ if !validChartFile {
+ return
+ }
+
+ _, err = chartutil.StrictLoadChartfile(chartPath)
+ linter.RunLinterRule(support.WarningSev, chartFileName, validateChartYamlStrictFormat(err))
+
+ // type check for Chart.yaml . ignoring error as any parse
+ // errors would already be caught in the above load function
+ chartFileForTypeCheck, _ := loadChartFileForTypeCheck(chartPath)
+
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartName(chartFile))
+
+ // Chart metadata
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartAPIVersion(chartFile))
+
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartVersionType(chartFileForTypeCheck))
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartVersion(chartFile))
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartAppVersionType(chartFileForTypeCheck))
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartMaintainer(chartFile))
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartSources(chartFile))
+ linter.RunLinterRule(support.InfoSev, chartFileName, validateChartIconPresence(chartFile))
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartIconURL(chartFile))
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartType(chartFile))
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartDependencies(chartFile))
+}
+
+func validateChartVersionType(data map[string]interface{}) error {
+ return isStringValue(data, "version")
+}
+
+func validateChartAppVersionType(data map[string]interface{}) error {
+ return isStringValue(data, "appVersion")
+}
+
+func isStringValue(data map[string]interface{}, key string) error {
+ value, ok := data[key]
+ if !ok {
+ return nil
+ }
+ valueType := fmt.Sprintf("%T", value)
+ if valueType != "string" {
+ return fmt.Errorf("%s should be of type string but it's of type %s", key, valueType)
+ }
+ return nil
+}
+
+func validateChartYamlNotDirectory(chartPath string) error {
+ fi, err := os.Stat(chartPath)
+
+ if err == nil && fi.IsDir() {
+ return errors.New("should be a file, not a directory")
+ }
+ return nil
+}
+
+func validateChartYamlFormat(chartFileError error) error {
+ if chartFileError != nil {
+ return fmt.Errorf("unable to parse YAML\n\t%w", chartFileError)
+ }
+ return nil
+}
+
+func validateChartYamlStrictFormat(chartFileError error) error {
+ if chartFileError != nil {
+ return fmt.Errorf("failed to strictly parse chart metadata file\n\t%w", chartFileError)
+ }
+ return nil
+}
+
+func validateChartName(cf *chart.Metadata) error {
+ if cf.Name == "" {
+ return errors.New("name is required")
+ }
+ name := filepath.Base(cf.Name)
+ if name != cf.Name {
+ return fmt.Errorf("chart name %q is invalid", cf.Name)
+ }
+ return nil
+}
+
+func validateChartAPIVersion(cf *chart.Metadata) error {
+ if cf.APIVersion == "" {
+ return errors.New("apiVersion is required. The value must be \"v3\"")
+ }
+
+ if cf.APIVersion != chart.APIVersionV3 {
+ return fmt.Errorf("apiVersion '%s' is not valid. The value must be \"v3\"", cf.APIVersion)
+ }
+
+ return nil
+}
+
+func validateChartVersion(cf *chart.Metadata) error {
+ if cf.Version == "" {
+ return errors.New("version is required")
+ }
+
+ version, err := semver.StrictNewVersion(cf.Version)
+ if err != nil {
+ return fmt.Errorf("version '%s' is not a valid SemVerV2", cf.Version)
+ }
+
+ c, err := semver.NewConstraint(">0.0.0-0")
+ if err != nil {
+ return err
+ }
+ valid, msg := c.Validate(version)
+
+ if !valid && len(msg) > 0 {
+ return fmt.Errorf("version %v", msg[0])
+ }
+
+ return nil
+}
+
+func validateChartMaintainer(cf *chart.Metadata) error {
+ for _, maintainer := range cf.Maintainers {
+ if maintainer == nil {
+ return errors.New("a maintainer entry is empty")
+ }
+ if maintainer.Name == "" {
+ return errors.New("each maintainer requires a name")
+ } else if maintainer.Email != "" && !govalidator.IsEmail(maintainer.Email) {
+ return fmt.Errorf("invalid email '%s' for maintainer '%s'", maintainer.Email, maintainer.Name)
+ } else if maintainer.URL != "" && !govalidator.IsURL(maintainer.URL) {
+ return fmt.Errorf("invalid url '%s' for maintainer '%s'", maintainer.URL, maintainer.Name)
+ }
+ }
+ return nil
+}
+
+func validateChartSources(cf *chart.Metadata) error {
+ for _, source := range cf.Sources {
+ if source == "" || !govalidator.IsRequestURL(source) {
+ return fmt.Errorf("invalid source URL '%s'", source)
+ }
+ }
+ return nil
+}
+
+func validateChartIconPresence(cf *chart.Metadata) error {
+ if cf.Icon == "" {
+ return errors.New("icon is recommended")
+ }
+ return nil
+}
+
+func validateChartIconURL(cf *chart.Metadata) error {
+ if cf.Icon != "" && !govalidator.IsRequestURL(cf.Icon) {
+ return fmt.Errorf("invalid icon URL '%s'", cf.Icon)
+ }
+ return nil
+}
+
+func validateChartDependencies(cf *chart.Metadata) error {
+ if len(cf.Dependencies) > 0 && cf.APIVersion != chart.APIVersionV3 {
+ return fmt.Errorf("dependencies are not valid in the Chart file with apiVersion '%s'. They are valid in apiVersion '%s'", cf.APIVersion, chart.APIVersionV3)
+ }
+ return nil
+}
+
+func validateChartType(cf *chart.Metadata) error {
+ if len(cf.Type) > 0 && cf.APIVersion != chart.APIVersionV3 {
+ return fmt.Errorf("chart type is not valid in apiVersion '%s'. It is valid in apiVersion '%s'", cf.APIVersion, chart.APIVersionV3)
+ }
+ return nil
+}
+
+// loadChartFileForTypeCheck loads the Chart.yaml
+// in a generic form of a map[string]interface{}, so that the type
+// of the values can be checked
+func loadChartFileForTypeCheck(filename string) (map[string]interface{}, error) {
+ b, err := os.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ y := make(map[string]interface{})
+ err = yaml.Unmarshal(b, &y)
+ return y, err
+}
diff --git a/pkg/lint/rules/chartfile_test.go b/internal/chart/v3/lint/rules/chartfile_test.go
similarity index 83%
rename from pkg/lint/rules/chartfile_test.go
rename to internal/chart/v3/lint/rules/chartfile_test.go
index 061d90e33..57893e151 100644
--- a/pkg/lint/rules/chartfile_test.go
+++ b/internal/chart/v3/lint/rules/chartfile_test.go
@@ -17,16 +17,15 @@ limitations under the License.
package rules
import (
+ "errors"
"os"
"path/filepath"
"strings"
"testing"
- "github.com/pkg/errors"
-
- chart "helm.sh/helm/v4/pkg/chart/v2"
- chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
- "helm.sh/helm/v4/pkg/lint/support"
+ chart "helm.sh/helm/v4/internal/chart/v3"
+ "helm.sh/helm/v4/internal/chart/v3/lint/support"
+ chartutil "helm.sh/helm/v4/internal/chart/v3/util"
)
const (
@@ -85,9 +84,11 @@ func TestValidateChartVersion(t *testing.T) {
ErrorMsg string
}{
{"", "version is required"},
- {"1.2.3.4", "version '1.2.3.4' is not a valid SemVer"},
- {"waps", "'waps' is not a valid SemVer"},
- {"-3", "'-3' is not a valid SemVer"},
+ {"1.2.3.4", "version '1.2.3.4' is not a valid SemVerV2"},
+ {"waps", "'waps' is not a valid SemVerV2"},
+ {"-3", "'-3' is not a valid SemVerV2"},
+ {"1.1", "'1.1' is not a valid SemVerV2"},
+ {"1", "'1' is not a valid SemVerV2"},
}
var successTest = []string{"0.0.1", "0.0.1+build", "0.0.1-beta"}
@@ -143,6 +144,16 @@ func TestValidateChartMaintainer(t *testing.T) {
t.Errorf("validateChartMaintainer(%s, %s) to return no error, got %s", test.Name, test.Email, err.Error())
}
}
+
+ // Testing for an empty maintainer
+ badChart.Maintainers = []*chart.Maintainer{nil}
+ err := validateChartMaintainer(badChart)
+ if err == nil {
+ t.Errorf("validateChartMaintainer did not return error for nil maintainer as expected")
+ }
+ if err.Error() != "a maintainer entry is empty" {
+ t.Errorf("validateChartMaintainer returned unexpected error for nil maintainer: %s", err.Error())
+ }
}
func TestValidateChartSources(t *testing.T) {
@@ -166,10 +177,30 @@ func TestValidateChartSources(t *testing.T) {
}
func TestValidateChartIconPresence(t *testing.T) {
- err := validateChartIconPresence(badChart)
- if err == nil {
- t.Errorf("validateChartIconPresence to return a linter error, got no error")
- }
+ t.Run("Icon absent", func(t *testing.T) {
+ testChart := &chart.Metadata{
+ Icon: "",
+ }
+
+ err := validateChartIconPresence(testChart)
+
+ if err == nil {
+ t.Errorf("validateChartIconPresence to return a linter error, got no error")
+ } else if !strings.Contains(err.Error(), "icon is recommended") {
+ t.Errorf("expected %q, got %q", "icon is recommended", err.Error())
+ }
+ })
+ t.Run("Icon present", func(t *testing.T) {
+ testChart := &chart.Metadata{
+ Icon: "http://example.org/icon.png",
+ }
+
+ err := validateChartIconPresence(testChart)
+
+ if err != nil {
+ t.Errorf("Unexpected error: %q", err.Error())
+ }
+ })
}
func TestValidateChartIconURL(t *testing.T) {
@@ -192,7 +223,7 @@ func TestValidateChartIconURL(t *testing.T) {
}
}
-func TestChartfile(t *testing.T) {
+func TestV3Chartfile(t *testing.T) {
t.Run("Chart.yaml basic validity issues", func(t *testing.T) {
linter := support.Linter{ChartDir: badChartDir}
Chartfile(&linter)
@@ -208,7 +239,7 @@ func TestChartfile(t *testing.T) {
t.Errorf("Unexpected message 0: %s", msgs[0].Err)
}
- if !strings.Contains(msgs[1].Err.Error(), "apiVersion is required. The value must be either \"v1\" or \"v2\"") {
+ if !strings.Contains(msgs[1].Err.Error(), "apiVersion is required. The value must be \"v3\"") {
t.Errorf("Unexpected message 1: %s", msgs[1].Err)
}
@@ -219,14 +250,6 @@ func TestChartfile(t *testing.T) {
if !strings.Contains(msgs[3].Err.Error(), "icon is recommended") {
t.Errorf("Unexpected message 3: %s", msgs[3].Err)
}
-
- if !strings.Contains(msgs[4].Err.Error(), "chart type is not valid in apiVersion") {
- t.Errorf("Unexpected message 4: %s", msgs[4].Err)
- }
-
- if !strings.Contains(msgs[5].Err.Error(), "dependencies are not valid in the Chart file with apiVersion") {
- t.Errorf("Unexpected message 5: %s", msgs[5].Err)
- }
})
t.Run("Chart.yaml validity issues due to type mismatch", func(t *testing.T) {
diff --git a/internal/chart/v3/lint/rules/crds.go b/internal/chart/v3/lint/rules/crds.go
new file mode 100644
index 000000000..6bafb52eb
--- /dev/null
+++ b/internal/chart/v3/lint/rules/crds.go
@@ -0,0 +1,113 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "k8s.io/apimachinery/pkg/util/yaml"
+
+ "helm.sh/helm/v4/internal/chart/v3/lint/support"
+ "helm.sh/helm/v4/internal/chart/v3/loader"
+)
+
+// Crds lints the CRDs in the Linter.
+func Crds(linter *support.Linter) {
+ fpath := "crds/"
+ crdsPath := filepath.Join(linter.ChartDir, fpath)
+
+ // crds directory is optional
+ if _, err := os.Stat(crdsPath); errors.Is(err, fs.ErrNotExist) {
+ return
+ }
+
+ crdsDirValid := linter.RunLinterRule(support.ErrorSev, fpath, validateCrdsDir(crdsPath))
+ if !crdsDirValid {
+ return
+ }
+
+ // Load chart and parse CRDs
+ chart, err := loader.Load(linter.ChartDir)
+
+ chartLoaded := linter.RunLinterRule(support.ErrorSev, fpath, err)
+
+ if !chartLoaded {
+ return
+ }
+
+ /* Iterate over all the CRDs to check:
+ 1. It is a YAML file and not a template
+ 2. The API version is apiextensions.k8s.io
+ 3. The kind is CustomResourceDefinition
+ */
+ for _, crd := range chart.CRDObjects() {
+ fileName := crd.Name
+ fpath = fileName
+
+ decoder := yaml.NewYAMLOrJSONDecoder(bytes.NewReader(crd.File.Data), 4096)
+ for {
+ var yamlStruct *k8sYamlStruct
+
+ err := decoder.Decode(&yamlStruct)
+ if err == io.EOF {
+ break
+ }
+
+ // If YAML parsing fails here, it will always fail in the next block as well, so we should return here.
+ // This also confirms the YAML is not a template, since templates can't be decoded into a K8sYamlStruct.
+ if !linter.RunLinterRule(support.ErrorSev, fpath, validateYamlContent(err)) {
+ return
+ }
+
+ linter.RunLinterRule(support.ErrorSev, fpath, validateCrdAPIVersion(yamlStruct))
+ linter.RunLinterRule(support.ErrorSev, fpath, validateCrdKind(yamlStruct))
+ }
+ }
+}
+
+// Validation functions
+func validateCrdsDir(crdsPath string) error {
+ fi, err := os.Stat(crdsPath)
+ if err != nil {
+ return err
+ }
+ if !fi.IsDir() {
+ return errors.New("not a directory")
+ }
+ return nil
+}
+
+func validateCrdAPIVersion(obj *k8sYamlStruct) error {
+ if !strings.HasPrefix(obj.APIVersion, "apiextensions.k8s.io") {
+ return fmt.Errorf("apiVersion is not in 'apiextensions.k8s.io'")
+ }
+ return nil
+}
+
+func validateCrdKind(obj *k8sYamlStruct) error {
+ if obj.Kind != "CustomResourceDefinition" {
+ return fmt.Errorf("object kind is not 'CustomResourceDefinition'")
+ }
+ return nil
+}
diff --git a/pkg/postrender/postrender.go b/internal/chart/v3/lint/rules/crds_test.go
similarity index 50%
rename from pkg/postrender/postrender.go
rename to internal/chart/v3/lint/rules/crds_test.go
index 3af384290..d93e3d978 100644
--- a/pkg/postrender/postrender.go
+++ b/internal/chart/v3/lint/rules/crds_test.go
@@ -14,16 +14,23 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// Package postrender contains an interface that can be implemented for custom
-// post-renderers and an exec implementation that can be used for arbitrary
-// binaries and scripts
-package postrender
-
-import "bytes"
-
-type PostRenderer interface {
- // Run expects a single buffer filled with Helm rendered manifests. It
- // expects the modified results to be returned on a separate buffer or an
- // error if there was an issue or failure while running the post render step
- Run(renderedManifests *bytes.Buffer) (modifiedManifests *bytes.Buffer, err error)
+package rules
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "helm.sh/helm/v4/internal/chart/v3/lint/support"
+)
+
+const invalidCrdsDir = "./testdata/invalidcrdsdir"
+
+func TestInvalidCrdsDir(t *testing.T) {
+ linter := support.Linter{ChartDir: invalidCrdsDir}
+ Crds(&linter)
+ res := linter.Messages
+
+ assert.Len(t, res, 1)
+ assert.ErrorContains(t, res[0].Err, "not a directory")
}
diff --git a/internal/chart/v3/lint/rules/dependencies.go b/internal/chart/v3/lint/rules/dependencies.go
new file mode 100644
index 000000000..f45153728
--- /dev/null
+++ b/internal/chart/v3/lint/rules/dependencies.go
@@ -0,0 +1,101 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules // import "helm.sh/helm/v4/internal/chart/v3/lint/rules"
+
+import (
+ "fmt"
+ "strings"
+
+ chart "helm.sh/helm/v4/internal/chart/v3"
+ "helm.sh/helm/v4/internal/chart/v3/lint/support"
+ "helm.sh/helm/v4/internal/chart/v3/loader"
+)
+
+// Dependencies runs lints against a chart's dependencies
+//
+// See https://github.com/helm/helm/issues/7910
+func Dependencies(linter *support.Linter) {
+ c, err := loader.LoadDir(linter.ChartDir)
+ if !linter.RunLinterRule(support.ErrorSev, "", validateChartFormat(err)) {
+ return
+ }
+
+ linter.RunLinterRule(support.ErrorSev, linter.ChartDir, validateDependencyInMetadata(c))
+ linter.RunLinterRule(support.ErrorSev, linter.ChartDir, validateDependenciesUnique(c))
+ linter.RunLinterRule(support.WarningSev, linter.ChartDir, validateDependencyInChartsDir(c))
+}
+
+func validateChartFormat(chartError error) error {
+ if chartError != nil {
+ return fmt.Errorf("unable to load chart\n\t%w", chartError)
+ }
+ return nil
+}
+
+func validateDependencyInChartsDir(c *chart.Chart) (err error) {
+ dependencies := map[string]struct{}{}
+ missing := []string{}
+ for _, dep := range c.Dependencies() {
+ dependencies[dep.Metadata.Name] = struct{}{}
+ }
+ for _, dep := range c.Metadata.Dependencies {
+ if _, ok := dependencies[dep.Name]; !ok {
+ missing = append(missing, dep.Name)
+ }
+ }
+ if len(missing) > 0 {
+ err = fmt.Errorf("chart directory is missing these dependencies: %s", strings.Join(missing, ","))
+ }
+ return err
+}
+
+func validateDependencyInMetadata(c *chart.Chart) (err error) {
+ dependencies := map[string]struct{}{}
+ missing := []string{}
+ for _, dep := range c.Metadata.Dependencies {
+ dependencies[dep.Name] = struct{}{}
+ }
+ for _, dep := range c.Dependencies() {
+ if _, ok := dependencies[dep.Metadata.Name]; !ok {
+ missing = append(missing, dep.Metadata.Name)
+ }
+ }
+ if len(missing) > 0 {
+ err = fmt.Errorf("chart metadata is missing these dependencies: %s", strings.Join(missing, ","))
+ }
+ return err
+}
+
+func validateDependenciesUnique(c *chart.Chart) (err error) {
+ dependencies := map[string]*chart.Dependency{}
+ shadowing := []string{}
+
+ for _, dep := range c.Metadata.Dependencies {
+ key := dep.Name
+ if dep.Alias != "" {
+ key = dep.Alias
+ }
+ if dependencies[key] != nil {
+ shadowing = append(shadowing, key)
+ }
+ dependencies[key] = dep
+ }
+ if len(shadowing) > 0 {
+ err = fmt.Errorf("multiple dependencies with name or alias: %s", strings.Join(shadowing, ","))
+ }
+ return err
+}
diff --git a/internal/chart/v3/lint/rules/dependencies_test.go b/internal/chart/v3/lint/rules/dependencies_test.go
new file mode 100644
index 000000000..b80e4b8a9
--- /dev/null
+++ b/internal/chart/v3/lint/rules/dependencies_test.go
@@ -0,0 +1,157 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package rules
+
+import (
+ "path/filepath"
+ "testing"
+
+ chart "helm.sh/helm/v4/internal/chart/v3"
+ "helm.sh/helm/v4/internal/chart/v3/lint/support"
+ chartutil "helm.sh/helm/v4/internal/chart/v3/util"
+)
+
+func chartWithBadDependencies() chart.Chart {
+ badChartDeps := chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "badchart",
+ Version: "0.1.0",
+ APIVersion: "v2",
+ Dependencies: []*chart.Dependency{
+ {
+ Name: "sub2",
+ },
+ {
+ Name: "sub3",
+ },
+ },
+ },
+ }
+
+ badChartDeps.SetDependencies(
+ &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "sub1",
+ Version: "0.1.0",
+ APIVersion: "v2",
+ },
+ },
+ &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "sub2",
+ Version: "0.1.0",
+ APIVersion: "v2",
+ },
+ },
+ )
+ return badChartDeps
+}
+
+func TestValidateDependencyInChartsDir(t *testing.T) {
+ c := chartWithBadDependencies()
+
+ if err := validateDependencyInChartsDir(&c); err == nil {
+ t.Error("chart should have been flagged for missing deps in chart directory")
+ }
+}
+
+func TestValidateDependencyInMetadata(t *testing.T) {
+ c := chartWithBadDependencies()
+
+ if err := validateDependencyInMetadata(&c); err == nil {
+ t.Errorf("chart should have been flagged for missing deps in chart metadata")
+ }
+}
+
+func TestValidateDependenciesUnique(t *testing.T) {
+ tests := []struct {
+ chart chart.Chart
+ }{
+ {chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "badchart",
+ Version: "0.1.0",
+ APIVersion: "v2",
+ Dependencies: []*chart.Dependency{
+ {
+ Name: "foo",
+ },
+ {
+ Name: "foo",
+ },
+ },
+ },
+ }},
+ {chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "badchart",
+ Version: "0.1.0",
+ APIVersion: "v2",
+ Dependencies: []*chart.Dependency{
+ {
+ Name: "foo",
+ Alias: "bar",
+ },
+ {
+ Name: "bar",
+ },
+ },
+ },
+ }},
+ {chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "badchart",
+ Version: "0.1.0",
+ APIVersion: "v2",
+ Dependencies: []*chart.Dependency{
+ {
+ Name: "foo",
+ Alias: "baz",
+ },
+ {
+ Name: "bar",
+ Alias: "baz",
+ },
+ },
+ },
+ }},
+ }
+
+ for _, tt := range tests {
+ if err := validateDependenciesUnique(&tt.chart); err == nil {
+ t.Errorf("chart should have been flagged for dependency shadowing")
+ }
+ }
+}
+
+func TestDependencies(t *testing.T) {
+ tmp := t.TempDir()
+
+ c := chartWithBadDependencies()
+ err := chartutil.SaveDir(&c, tmp)
+ if err != nil {
+ t.Fatal(err)
+ }
+ linter := support.Linter{ChartDir: filepath.Join(tmp, c.Metadata.Name)}
+
+ Dependencies(&linter)
+ if l := len(linter.Messages); l != 2 {
+ t.Errorf("expected 2 linter errors for bad chart dependencies. Got %d.", l)
+ for i, msg := range linter.Messages {
+ t.Logf("Message: %d, Error: %#v", i, msg)
+ }
+ }
+}
diff --git a/pkg/lint/rules/deprecations.go b/internal/chart/v3/lint/rules/deprecations.go
similarity index 90%
rename from pkg/lint/rules/deprecations.go
rename to internal/chart/v3/lint/rules/deprecations.go
index bd4a4436a..6f86bdbbd 100644
--- a/pkg/lint/rules/deprecations.go
+++ b/internal/chart/v3/lint/rules/deprecations.go
@@ -14,18 +14,18 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package rules // import "helm.sh/helm/v4/pkg/lint/rules"
+package rules // import "helm.sh/helm/v4/internal/chart/v3/lint/rules"
import (
"fmt"
"strconv"
+ "helm.sh/helm/v4/pkg/chart/common"
+
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apiserver/pkg/endpoints/deprecation"
kscheme "k8s.io/client-go/kubernetes/scheme"
-
- chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
)
var (
@@ -47,7 +47,7 @@ func (e deprecatedAPIError) Error() string {
return msg
}
-func validateNoDeprecations(resource *K8sYamlStruct, kubeVersion *chartutil.KubeVersion) error {
+func validateNoDeprecations(resource *k8sYamlStruct, kubeVersion *common.KubeVersion) error {
// if `resource` does not have an APIVersion or Kind, we cannot test it for deprecation
if resource.APIVersion == "" {
return nil
@@ -92,7 +92,7 @@ func validateNoDeprecations(resource *K8sYamlStruct, kubeVersion *chartutil.Kube
}
}
-func resourceToRuntimeObject(resource *K8sYamlStruct) (runtime.Object, error) {
+func resourceToRuntimeObject(resource *k8sYamlStruct) (runtime.Object, error) {
scheme := runtime.NewScheme()
kscheme.AddToScheme(scheme)
diff --git a/internal/chart/v3/lint/rules/deprecations_test.go b/internal/chart/v3/lint/rules/deprecations_test.go
new file mode 100644
index 000000000..35e541e5c
--- /dev/null
+++ b/internal/chart/v3/lint/rules/deprecations_test.go
@@ -0,0 +1,41 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules // import "helm.sh/helm/v4/internal/chart/v3/lint/rules"
+
+import "testing"
+
+func TestValidateNoDeprecations(t *testing.T) {
+ deprecated := &k8sYamlStruct{
+ APIVersion: "extensions/v1beta1",
+ Kind: "Deployment",
+ }
+ err := validateNoDeprecations(deprecated, nil)
+ if err == nil {
+ t.Fatal("Expected deprecated extension to be flagged")
+ }
+ depErr := err.(deprecatedAPIError)
+ if depErr.Message == "" {
+ t.Fatalf("Expected error message to be non-blank: %v", err)
+ }
+
+ if err := validateNoDeprecations(&k8sYamlStruct{
+ APIVersion: "v1",
+ Kind: "Pod",
+ }, nil); err != nil {
+ t.Errorf("Expected a v1 Pod to not be deprecated")
+ }
+}
diff --git a/internal/chart/v3/lint/rules/template.go b/internal/chart/v3/lint/rules/template.go
new file mode 100644
index 000000000..d4c62839f
--- /dev/null
+++ b/internal/chart/v3/lint/rules/template.go
@@ -0,0 +1,348 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "path/filepath"
+ "slices"
+ "strings"
+
+ "k8s.io/apimachinery/pkg/api/validation"
+ apipath "k8s.io/apimachinery/pkg/api/validation/path"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+ "k8s.io/apimachinery/pkg/util/yaml"
+
+ "helm.sh/helm/v4/internal/chart/v3/lint/support"
+ "helm.sh/helm/v4/internal/chart/v3/loader"
+ chartutil "helm.sh/helm/v4/internal/chart/v3/util"
+ "helm.sh/helm/v4/pkg/chart/common"
+ "helm.sh/helm/v4/pkg/chart/common/util"
+ "helm.sh/helm/v4/pkg/engine"
+)
+
+// Templates lints the templates in the Linter.
+func Templates(linter *support.Linter, values map[string]interface{}, namespace string, _ bool) {
+ TemplatesWithKubeVersion(linter, values, namespace, nil)
+}
+
+// TemplatesWithKubeVersion lints the templates in the Linter, allowing to specify the kubernetes version.
+func TemplatesWithKubeVersion(linter *support.Linter, values map[string]interface{}, namespace string, kubeVersion *common.KubeVersion) {
+ TemplatesWithSkipSchemaValidation(linter, values, namespace, kubeVersion, false)
+}
+
+// TemplatesWithSkipSchemaValidation lints the templates in the Linter, allowing to specify the kubernetes version and if schema validation is enabled or not.
+func TemplatesWithSkipSchemaValidation(linter *support.Linter, values map[string]interface{}, namespace string, kubeVersion *common.KubeVersion, skipSchemaValidation bool) {
+ fpath := "templates/"
+ templatesPath := filepath.Join(linter.ChartDir, fpath)
+
+ // Templates directory is optional for now
+ templatesDirExists := linter.RunLinterRule(support.WarningSev, fpath, templatesDirExists(templatesPath))
+ if !templatesDirExists {
+ return
+ }
+
+ validTemplatesDir := linter.RunLinterRule(support.ErrorSev, fpath, validateTemplatesDir(templatesPath))
+ if !validTemplatesDir {
+ return
+ }
+
+ // Load chart and parse templates
+ chart, err := loader.Load(linter.ChartDir)
+
+ chartLoaded := linter.RunLinterRule(support.ErrorSev, fpath, err)
+
+ if !chartLoaded {
+ return
+ }
+
+ options := common.ReleaseOptions{
+ Name: "test-release",
+ Namespace: namespace,
+ }
+
+ caps := common.DefaultCapabilities.Copy()
+ if kubeVersion != nil {
+ caps.KubeVersion = *kubeVersion
+ }
+
+ // lint ignores import-values
+ // See https://github.com/helm/helm/issues/9658
+ if err := chartutil.ProcessDependencies(chart, values); err != nil {
+ return
+ }
+
+ cvals, err := util.CoalesceValues(chart, values)
+ if err != nil {
+ return
+ }
+
+ valuesToRender, err := util.ToRenderValuesWithSchemaValidation(chart, cvals, options, caps, skipSchemaValidation)
+ if err != nil {
+ linter.RunLinterRule(support.ErrorSev, fpath, err)
+ return
+ }
+ var e engine.Engine
+ e.LintMode = true
+ renderedContentMap, err := e.Render(chart, valuesToRender)
+
+ renderOk := linter.RunLinterRule(support.ErrorSev, fpath, err)
+
+ if !renderOk {
+ return
+ }
+
+ /* Iterate over all the templates to check:
+ - It is a .yaml file
+ - All the values in the template file is defined
+ - {{}} include | quote
+ - Generated content is a valid Yaml file
+ - Metadata.Namespace is not set
+ */
+ for _, template := range chart.Templates {
+ fileName := template.Name
+ fpath = fileName
+
+ linter.RunLinterRule(support.ErrorSev, fpath, validateAllowedExtension(fileName))
+
+ // We only apply the following lint rules to yaml files
+ if filepath.Ext(fileName) != ".yaml" || filepath.Ext(fileName) == ".yml" {
+ continue
+ }
+
+ // NOTE: disabled for now, Refs https://github.com/helm/helm/issues/1463
+ // Check that all the templates have a matching value
+ // linter.RunLinterRule(support.WarningSev, fpath, validateNoMissingValues(templatesPath, valuesToRender, preExecutedTemplate))
+
+ // NOTE: disabled for now, Refs https://github.com/helm/helm/issues/1037
+ // linter.RunLinterRule(support.WarningSev, fpath, validateQuotes(string(preExecutedTemplate)))
+
+ renderedContent := renderedContentMap[path.Join(chart.Name(), fileName)]
+ if strings.TrimSpace(renderedContent) != "" {
+ linter.RunLinterRule(support.WarningSev, fpath, validateTopIndentLevel(renderedContent))
+
+ decoder := yaml.NewYAMLOrJSONDecoder(strings.NewReader(renderedContent), 4096)
+
+ // Lint all resources if the file contains multiple documents separated by ---
+ for {
+ // Even though k8sYamlStruct only defines a few fields, an error in any other
+ // key will be raised as well
+ var yamlStruct *k8sYamlStruct
+
+ err := decoder.Decode(&yamlStruct)
+ if err == io.EOF {
+ break
+ }
+
+ // If YAML linting fails here, it will always fail in the next block as well, so we should return here.
+ // fix https://github.com/helm/helm/issues/11391
+ if !linter.RunLinterRule(support.ErrorSev, fpath, validateYamlContent(err)) {
+ return
+ }
+ if yamlStruct != nil {
+ // NOTE: set to warnings to allow users to support out-of-date kubernetes
+ // Refs https://github.com/helm/helm/issues/8596
+ linter.RunLinterRule(support.WarningSev, fpath, validateMetadataName(yamlStruct))
+ linter.RunLinterRule(support.WarningSev, fpath, validateNoDeprecations(yamlStruct, kubeVersion))
+
+ linter.RunLinterRule(support.ErrorSev, fpath, validateMatchSelector(yamlStruct, renderedContent))
+ linter.RunLinterRule(support.ErrorSev, fpath, validateListAnnotations(yamlStruct, renderedContent))
+ }
+ }
+ }
+ }
+}
+
+// validateTopIndentLevel checks that the content does not start with an indent level > 0.
+//
+// This error can occur when a template accidentally inserts space. It can cause
+// unpredictable errors depending on whether the text is normalized before being passed
+// into the YAML parser. So we trap it here.
+//
+// See https://github.com/helm/helm/issues/8467
+func validateTopIndentLevel(content string) error {
+ // Read lines until we get to a non-empty one
+ scanner := bufio.NewScanner(bytes.NewBufferString(content))
+ for scanner.Scan() {
+ line := scanner.Text()
+ // If line is empty, skip
+ if strings.TrimSpace(line) == "" {
+ continue
+ }
+ // If it starts with one or more spaces, this is an error
+ if strings.HasPrefix(line, " ") || strings.HasPrefix(line, "\t") {
+ return fmt.Errorf("document starts with an illegal indent: %q, which may cause parsing problems", line)
+ }
+ // Any other condition passes.
+ return nil
+ }
+ return scanner.Err()
+}
+
+// Validation functions
+func templatesDirExists(templatesPath string) error {
+ _, err := os.Stat(templatesPath)
+ if errors.Is(err, os.ErrNotExist) {
+ return errors.New("directory does not exist")
+ }
+ return nil
+}
+
+func validateTemplatesDir(templatesPath string) error {
+ fi, err := os.Stat(templatesPath)
+ if err != nil {
+ return err
+ }
+ if !fi.IsDir() {
+ return errors.New("not a directory")
+ }
+ return nil
+}
+
+func validateAllowedExtension(fileName string) error {
+ ext := filepath.Ext(fileName)
+ validExtensions := []string{".yaml", ".yml", ".tpl", ".txt"}
+
+ if slices.Contains(validExtensions, ext) {
+ return nil
+ }
+
+ return fmt.Errorf("file extension '%s' not valid. Valid extensions are .yaml, .yml, .tpl, or .txt", ext)
+}
+
+func validateYamlContent(err error) error {
+ if err != nil {
+ return fmt.Errorf("unable to parse YAML: %w", err)
+ }
+ return nil
+}
+
+// validateMetadataName uses the correct validation function for the object
+// Kind, or if not set, defaults to the standard definition of a subdomain in
+// DNS (RFC 1123), used by most resources.
+func validateMetadataName(obj *k8sYamlStruct) error {
+ fn := validateMetadataNameFunc(obj)
+ allErrs := field.ErrorList{}
+ for _, msg := range fn(obj.Metadata.Name, false) {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("metadata").Child("name"), obj.Metadata.Name, msg))
+ }
+ if len(allErrs) > 0 {
+ return fmt.Errorf("object name does not conform to Kubernetes naming requirements: %q: %w", obj.Metadata.Name, allErrs.ToAggregate())
+ }
+ return nil
+}
+
+// validateMetadataNameFunc will return a name validation function for the
+// object kind, if defined below.
+//
+// Rules should match those set in the various api validations:
+// https://github.com/kubernetes/kubernetes/blob/v1.20.0/pkg/apis/core/validation/validation.go#L205-L274
+// https://github.com/kubernetes/kubernetes/blob/v1.20.0/pkg/apis/apps/validation/validation.go#L39
+// ...
+//
+// Implementing here to avoid importing k/k.
+//
+// If no mapping is defined, returns NameIsDNSSubdomain. This is used by object
+// kinds that don't have special requirements, so is the most likely to work if
+// new kinds are added.
+func validateMetadataNameFunc(obj *k8sYamlStruct) validation.ValidateNameFunc {
+ switch strings.ToLower(obj.Kind) {
+ case "pod", "node", "secret", "endpoints", "resourcequota", // core
+ "controllerrevision", "daemonset", "deployment", "replicaset", "statefulset", // apps
+ "autoscaler", // autoscaler
+ "cronjob", "job", // batch
+ "lease", // coordination
+ "endpointslice", // discovery
+ "networkpolicy", "ingress", // networking
+ "podsecuritypolicy", // policy
+ "priorityclass", // scheduling
+ "podpreset", // settings
+ "storageclass", "volumeattachment", "csinode": // storage
+ return validation.NameIsDNSSubdomain
+ case "service":
+ return validation.NameIsDNS1035Label
+ case "namespace":
+ return validation.ValidateNamespaceName
+ case "serviceaccount":
+ return validation.ValidateServiceAccountName
+ case "certificatesigningrequest":
+ // No validation.
+ // https://github.com/kubernetes/kubernetes/blob/v1.20.0/pkg/apis/certificates/validation/validation.go#L137-L140
+ return func(_ string, _ bool) []string { return nil }
+ case "role", "clusterrole", "rolebinding", "clusterrolebinding":
+ // https://github.com/kubernetes/kubernetes/blob/v1.20.0/pkg/apis/rbac/validation/validation.go#L32-L34
+ return func(name string, _ bool) []string {
+ return apipath.IsValidPathSegmentName(name)
+ }
+ default:
+ return validation.NameIsDNSSubdomain
+ }
+}
+
+// validateMatchSelector ensures that template specs have a selector declared.
+// See https://github.com/helm/helm/issues/1990
+func validateMatchSelector(yamlStruct *k8sYamlStruct, manifest string) error {
+ switch yamlStruct.Kind {
+ case "Deployment", "ReplicaSet", "DaemonSet", "StatefulSet":
+ // verify that matchLabels or matchExpressions is present
+ if !strings.Contains(manifest, "matchLabels") && !strings.Contains(manifest, "matchExpressions") {
+ return fmt.Errorf("a %s must contain matchLabels or matchExpressions, and %q does not", yamlStruct.Kind, yamlStruct.Metadata.Name)
+ }
+ }
+ return nil
+}
+
+func validateListAnnotations(yamlStruct *k8sYamlStruct, manifest string) error {
+ if yamlStruct.Kind == "List" {
+ m := struct {
+ Items []struct {
+ Metadata struct {
+ Annotations map[string]string
+ }
+ }
+ }{}
+
+ if err := yaml.Unmarshal([]byte(manifest), &m); err != nil {
+ return validateYamlContent(err)
+ }
+
+ for _, i := range m.Items {
+ if _, ok := i.Metadata.Annotations["helm.sh/resource-policy"]; ok {
+ return errors.New("annotation 'helm.sh/resource-policy' within List objects are ignored")
+ }
+ }
+ }
+ return nil
+}
+
+// k8sYamlStruct stubs a Kubernetes YAML file.
+type k8sYamlStruct struct {
+ APIVersion string `json:"apiVersion"`
+ Kind string
+ Metadata k8sYamlMetadata
+}
+
+type k8sYamlMetadata struct {
+ Namespace string
+ Name string
+}
diff --git a/internal/chart/v3/lint/rules/template_test.go b/internal/chart/v3/lint/rules/template_test.go
new file mode 100644
index 000000000..40bcfa26b
--- /dev/null
+++ b/internal/chart/v3/lint/rules/template_test.go
@@ -0,0 +1,441 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ chart "helm.sh/helm/v4/internal/chart/v3"
+ "helm.sh/helm/v4/internal/chart/v3/lint/support"
+ chartutil "helm.sh/helm/v4/internal/chart/v3/util"
+ "helm.sh/helm/v4/pkg/chart/common"
+)
+
+const templateTestBasedir = "./testdata/albatross"
+
+func TestValidateAllowedExtension(t *testing.T) {
+ var failTest = []string{"/foo", "/test.toml"}
+ for _, test := range failTest {
+ err := validateAllowedExtension(test)
+ if err == nil || !strings.Contains(err.Error(), "Valid extensions are .yaml, .yml, .tpl, or .txt") {
+ t.Errorf("validateAllowedExtension('%s') to return \"Valid extensions are .yaml, .yml, .tpl, or .txt\", got no error", test)
+ }
+ }
+ var successTest = []string{"/foo.yaml", "foo.yaml", "foo.tpl", "/foo/bar/baz.yaml", "NOTES.txt"}
+ for _, test := range successTest {
+ err := validateAllowedExtension(test)
+ if err != nil {
+ t.Errorf("validateAllowedExtension('%s') to return no error but got \"%s\"", test, err.Error())
+ }
+ }
+}
+
+var values = map[string]interface{}{"nameOverride": "", "httpPort": 80}
+
+const namespace = "testNamespace"
+const strict = false
+
+func TestTemplateParsing(t *testing.T) {
+ linter := support.Linter{ChartDir: templateTestBasedir}
+ Templates(&linter, values, namespace, strict)
+ res := linter.Messages
+
+ if len(res) != 1 {
+ t.Fatalf("Expected one error, got %d, %v", len(res), res)
+ }
+
+ if !strings.Contains(res[0].Err.Error(), "deliberateSyntaxError") {
+ t.Errorf("Unexpected error: %s", res[0])
+ }
+}
+
+var wrongTemplatePath = filepath.Join(templateTestBasedir, "templates", "fail.yaml")
+var ignoredTemplatePath = filepath.Join(templateTestBasedir, "fail.yaml.ignored")
+
+// Test a template with all the existing features:
+// namespaces, partial templates
+func TestTemplateIntegrationHappyPath(t *testing.T) {
+ // Rename file so it gets ignored by the linter
+ os.Rename(wrongTemplatePath, ignoredTemplatePath)
+ defer os.Rename(ignoredTemplatePath, wrongTemplatePath)
+
+ linter := support.Linter{ChartDir: templateTestBasedir}
+ Templates(&linter, values, namespace, strict)
+ res := linter.Messages
+
+ if len(res) != 0 {
+ t.Fatalf("Expected no error, got %d, %v", len(res), res)
+ }
+}
+
+func TestMultiTemplateFail(t *testing.T) {
+ linter := support.Linter{ChartDir: "./testdata/multi-template-fail"}
+ Templates(&linter, values, namespace, strict)
+ res := linter.Messages
+
+ if len(res) != 1 {
+ t.Fatalf("Expected 1 error, got %d, %v", len(res), res)
+ }
+
+ if !strings.Contains(res[0].Err.Error(), "object name does not conform to Kubernetes naming requirements") {
+ t.Errorf("Unexpected error: %s", res[0].Err)
+ }
+}
+
+func TestValidateMetadataName(t *testing.T) {
+ tests := []struct {
+ obj *k8sYamlStruct
+ wantErr bool
+ }{
+ // Most kinds use IsDNS1123Subdomain.
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: ""}}, true},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "foo"}}, false},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "foo.bar1234baz.seventyone"}}, false},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "FOO"}}, true},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "123baz"}}, false},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "foo.BAR.baz"}}, true},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "one-two"}}, false},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "-two"}}, true},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "one_two"}}, true},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "a..b"}}, true},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "%^$%*@^*@^"}}, true},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "operator:pod"}}, true},
+ {&k8sYamlStruct{Kind: "ServiceAccount", Metadata: k8sYamlMetadata{Name: "foo"}}, false},
+ {&k8sYamlStruct{Kind: "ServiceAccount", Metadata: k8sYamlMetadata{Name: "foo.bar1234baz.seventyone"}}, false},
+ {&k8sYamlStruct{Kind: "ServiceAccount", Metadata: k8sYamlMetadata{Name: "FOO"}}, true},
+ {&k8sYamlStruct{Kind: "ServiceAccount", Metadata: k8sYamlMetadata{Name: "operator:sa"}}, true},
+
+ // Service uses IsDNS1035Label.
+ {&k8sYamlStruct{Kind: "Service", Metadata: k8sYamlMetadata{Name: "foo"}}, false},
+ {&k8sYamlStruct{Kind: "Service", Metadata: k8sYamlMetadata{Name: "123baz"}}, true},
+ {&k8sYamlStruct{Kind: "Service", Metadata: k8sYamlMetadata{Name: "foo.bar"}}, true},
+
+ // Namespace uses IsDNS1123Label.
+ {&k8sYamlStruct{Kind: "Namespace", Metadata: k8sYamlMetadata{Name: "foo"}}, false},
+ {&k8sYamlStruct{Kind: "Namespace", Metadata: k8sYamlMetadata{Name: "123baz"}}, false},
+ {&k8sYamlStruct{Kind: "Namespace", Metadata: k8sYamlMetadata{Name: "foo.bar"}}, true},
+ {&k8sYamlStruct{Kind: "Namespace", Metadata: k8sYamlMetadata{Name: "foo-bar"}}, false},
+
+ // CertificateSigningRequest has no validation.
+ {&k8sYamlStruct{Kind: "CertificateSigningRequest", Metadata: k8sYamlMetadata{Name: ""}}, false},
+ {&k8sYamlStruct{Kind: "CertificateSigningRequest", Metadata: k8sYamlMetadata{Name: "123baz"}}, false},
+ {&k8sYamlStruct{Kind: "CertificateSigningRequest", Metadata: k8sYamlMetadata{Name: "%^$%*@^*@^"}}, false},
+
+ // RBAC uses path validation.
+ {&k8sYamlStruct{Kind: "Role", Metadata: k8sYamlMetadata{Name: "foo"}}, false},
+ {&k8sYamlStruct{Kind: "Role", Metadata: k8sYamlMetadata{Name: "123baz"}}, false},
+ {&k8sYamlStruct{Kind: "Role", Metadata: k8sYamlMetadata{Name: "foo.bar"}}, false},
+ {&k8sYamlStruct{Kind: "Role", Metadata: k8sYamlMetadata{Name: "operator:role"}}, false},
+ {&k8sYamlStruct{Kind: "Role", Metadata: k8sYamlMetadata{Name: "operator/role"}}, true},
+ {&k8sYamlStruct{Kind: "Role", Metadata: k8sYamlMetadata{Name: "operator%role"}}, true},
+ {&k8sYamlStruct{Kind: "ClusterRole", Metadata: k8sYamlMetadata{Name: "foo"}}, false},
+ {&k8sYamlStruct{Kind: "ClusterRole", Metadata: k8sYamlMetadata{Name: "123baz"}}, false},
+ {&k8sYamlStruct{Kind: "ClusterRole", Metadata: k8sYamlMetadata{Name: "foo.bar"}}, false},
+ {&k8sYamlStruct{Kind: "ClusterRole", Metadata: k8sYamlMetadata{Name: "operator:role"}}, false},
+ {&k8sYamlStruct{Kind: "ClusterRole", Metadata: k8sYamlMetadata{Name: "operator/role"}}, true},
+ {&k8sYamlStruct{Kind: "ClusterRole", Metadata: k8sYamlMetadata{Name: "operator%role"}}, true},
+ {&k8sYamlStruct{Kind: "RoleBinding", Metadata: k8sYamlMetadata{Name: "operator:role"}}, false},
+ {&k8sYamlStruct{Kind: "ClusterRoleBinding", Metadata: k8sYamlMetadata{Name: "operator:role"}}, false},
+
+ // Unknown Kind
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: ""}}, true},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "foo"}}, false},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "foo.bar1234baz.seventyone"}}, false},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "FOO"}}, true},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "123baz"}}, false},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "foo.BAR.baz"}}, true},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "one-two"}}, false},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "-two"}}, true},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "one_two"}}, true},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "a..b"}}, true},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "%^$%*@^*@^"}}, true},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "operator:pod"}}, true},
+
+ // No kind
+ {&k8sYamlStruct{Metadata: k8sYamlMetadata{Name: "foo"}}, false},
+ {&k8sYamlStruct{Metadata: k8sYamlMetadata{Name: "operator:pod"}}, true},
+ }
+ for _, tt := range tests {
+ t.Run(fmt.Sprintf("%s/%s", tt.obj.Kind, tt.obj.Metadata.Name), func(t *testing.T) {
+ if err := validateMetadataName(tt.obj); (err != nil) != tt.wantErr {
+ t.Errorf("validateMetadataName() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ })
+ }
+}
+
+func TestDeprecatedAPIFails(t *testing.T) {
+ mychart := chart.Chart{
+ Metadata: &chart.Metadata{
+ APIVersion: "v2",
+ Name: "failapi",
+ Version: "0.1.0",
+ Icon: "satisfy-the-linting-gods.gif",
+ },
+ Templates: []*common.File{
+ {
+ Name: "templates/baddeployment.yaml",
+ Data: []byte("apiVersion: apps/v1beta1\nkind: Deployment\nmetadata:\n name: baddep\nspec: {selector: {matchLabels: {foo: bar}}}"),
+ },
+ {
+ Name: "templates/goodsecret.yaml",
+ Data: []byte("apiVersion: v1\nkind: Secret\nmetadata:\n name: goodsecret"),
+ },
+ },
+ }
+ tmpdir := t.TempDir()
+
+ if err := chartutil.SaveDir(&mychart, tmpdir); err != nil {
+ t.Fatal(err)
+ }
+
+ linter := support.Linter{ChartDir: filepath.Join(tmpdir, mychart.Name())}
+ Templates(&linter, values, namespace, strict)
+ if l := len(linter.Messages); l != 1 {
+ for i, msg := range linter.Messages {
+ t.Logf("Message %d: %s", i, msg)
+ }
+ t.Fatalf("Expected 1 lint error, got %d", l)
+ }
+
+ err := linter.Messages[0].Err.(deprecatedAPIError)
+ if err.Deprecated != "apps/v1beta1 Deployment" {
+ t.Errorf("Surprised to learn that %q is deprecated", err.Deprecated)
+ }
+}
+
+const manifest = `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: foo
+data:
+ myval1: {{default "val" .Values.mymap.key1 }}
+ myval2: {{default "val" .Values.mymap.key2 }}
+`
+
+// TestStrictTemplateParsingMapError is a regression test.
+//
+// The template engine should not produce an error when a map in values.yaml does
+// not contain all possible keys.
+//
+// See https://github.com/helm/helm/issues/7483
+func TestStrictTemplateParsingMapError(t *testing.T) {
+
+ ch := chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "regression7483",
+ APIVersion: "v2",
+ Version: "0.1.0",
+ },
+ Values: map[string]interface{}{
+ "mymap": map[string]string{
+ "key1": "val1",
+ },
+ },
+ Templates: []*common.File{
+ {
+ Name: "templates/configmap.yaml",
+ Data: []byte(manifest),
+ },
+ },
+ }
+ dir := t.TempDir()
+ if err := chartutil.SaveDir(&ch, dir); err != nil {
+ t.Fatal(err)
+ }
+ linter := &support.Linter{
+ ChartDir: filepath.Join(dir, ch.Metadata.Name),
+ }
+ Templates(linter, ch.Values, namespace, strict)
+ if len(linter.Messages) != 0 {
+ t.Errorf("expected zero messages, got %d", len(linter.Messages))
+ for i, msg := range linter.Messages {
+ t.Logf("Message %d: %q", i, msg)
+ }
+ }
+}
+
+func TestValidateMatchSelector(t *testing.T) {
+ md := &k8sYamlStruct{
+ APIVersion: "apps/v1",
+ Kind: "Deployment",
+ Metadata: k8sYamlMetadata{
+ Name: "mydeployment",
+ },
+ }
+ manifest := `
+ apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: nginx-deployment
+ labels:
+ app: nginx
+spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ app: nginx
+ template:
+ metadata:
+ labels:
+ app: nginx
+ spec:
+ containers:
+ - name: nginx
+ image: nginx:1.14.2
+ `
+ if err := validateMatchSelector(md, manifest); err != nil {
+ t.Error(err)
+ }
+ manifest = `
+ apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: nginx-deployment
+ labels:
+ app: nginx
+spec:
+ replicas: 3
+ selector:
+ matchExpressions:
+ app: nginx
+ template:
+ metadata:
+ labels:
+ app: nginx
+ spec:
+ containers:
+ - name: nginx
+ image: nginx:1.14.2
+ `
+ if err := validateMatchSelector(md, manifest); err != nil {
+ t.Error(err)
+ }
+ manifest = `
+ apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: nginx-deployment
+ labels:
+ app: nginx
+spec:
+ replicas: 3
+ template:
+ metadata:
+ labels:
+ app: nginx
+ spec:
+ containers:
+ - name: nginx
+ image: nginx:1.14.2
+ `
+ if err := validateMatchSelector(md, manifest); err == nil {
+ t.Error("expected Deployment with no selector to fail")
+ }
+}
+
+func TestValidateTopIndentLevel(t *testing.T) {
+ for doc, shouldFail := range map[string]bool{
+ // Should not fail
+ "\n\n\n\t\n \t\n": false,
+ "apiVersion:foo\n bar:baz": false,
+ "\n\n\napiVersion:foo\n\n\n": false,
+ // Should fail
+ " apiVersion:foo": true,
+ "\n\n apiVersion:foo\n\n": true,
+ } {
+ if err := validateTopIndentLevel(doc); (err == nil) == shouldFail {
+ t.Errorf("Expected %t for %q", shouldFail, doc)
+ }
+ }
+
+}
+
+// TestEmptyWithCommentsManifests checks the lint is not failing against empty manifests that contains only comments
+// See https://github.com/helm/helm/issues/8621
+func TestEmptyWithCommentsManifests(t *testing.T) {
+ mychart := chart.Chart{
+ Metadata: &chart.Metadata{
+ APIVersion: "v2",
+ Name: "emptymanifests",
+ Version: "0.1.0",
+ Icon: "satisfy-the-linting-gods.gif",
+ },
+ Templates: []*common.File{
+ {
+ Name: "templates/empty-with-comments.yaml",
+ Data: []byte("#@formatter:off\n"),
+ },
+ },
+ }
+ tmpdir := t.TempDir()
+
+ if err := chartutil.SaveDir(&mychart, tmpdir); err != nil {
+ t.Fatal(err)
+ }
+
+ linter := support.Linter{ChartDir: filepath.Join(tmpdir, mychart.Name())}
+ Templates(&linter, values, namespace, strict)
+ if l := len(linter.Messages); l > 0 {
+ for i, msg := range linter.Messages {
+ t.Logf("Message %d: %s", i, msg)
+ }
+ t.Fatalf("Expected 0 lint errors, got %d", l)
+ }
+}
+func TestValidateListAnnotations(t *testing.T) {
+ md := &k8sYamlStruct{
+ APIVersion: "v1",
+ Kind: "List",
+ Metadata: k8sYamlMetadata{
+ Name: "list",
+ },
+ }
+ manifest := `
+apiVersion: v1
+kind: List
+items:
+ - apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ annotations:
+ helm.sh/resource-policy: keep
+`
+
+ if err := validateListAnnotations(md, manifest); err == nil {
+ t.Fatal("expected list with nested keep annotations to fail")
+ }
+
+ manifest = `
+apiVersion: v1
+kind: List
+metadata:
+ annotations:
+ helm.sh/resource-policy: keep
+items:
+ - apiVersion: v1
+ kind: ConfigMap
+`
+
+ if err := validateListAnnotations(md, manifest); err != nil {
+ t.Fatalf("List objects keep annotations should pass. got: %s", err)
+ }
+}
diff --git a/internal/chart/v3/lint/rules/testdata/albatross/Chart.yaml b/internal/chart/v3/lint/rules/testdata/albatross/Chart.yaml
new file mode 100644
index 000000000..5e1ed515c
--- /dev/null
+++ b/internal/chart/v3/lint/rules/testdata/albatross/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: albatross
+description: testing chart
+version: 199.44.12345-Alpha.1+cafe009
+icon: http://riverrun.io
diff --git a/pkg/lint/rules/testdata/albatross/templates/_helpers.tpl b/internal/chart/v3/lint/rules/testdata/albatross/templates/_helpers.tpl
similarity index 100%
rename from pkg/lint/rules/testdata/albatross/templates/_helpers.tpl
rename to internal/chart/v3/lint/rules/testdata/albatross/templates/_helpers.tpl
diff --git a/pkg/lint/rules/testdata/albatross/templates/fail.yaml b/internal/chart/v3/lint/rules/testdata/albatross/templates/fail.yaml
similarity index 100%
rename from pkg/lint/rules/testdata/albatross/templates/fail.yaml
rename to internal/chart/v3/lint/rules/testdata/albatross/templates/fail.yaml
diff --git a/pkg/lint/rules/testdata/albatross/templates/svc.yaml b/internal/chart/v3/lint/rules/testdata/albatross/templates/svc.yaml
similarity index 100%
rename from pkg/lint/rules/testdata/albatross/templates/svc.yaml
rename to internal/chart/v3/lint/rules/testdata/albatross/templates/svc.yaml
diff --git a/pkg/lint/rules/testdata/albatross/values.yaml b/internal/chart/v3/lint/rules/testdata/albatross/values.yaml
similarity index 100%
rename from pkg/lint/rules/testdata/albatross/values.yaml
rename to internal/chart/v3/lint/rules/testdata/albatross/values.yaml
diff --git a/internal/chart/v3/lint/rules/testdata/anotherbadchartfile/Chart.yaml b/internal/chart/v3/lint/rules/testdata/anotherbadchartfile/Chart.yaml
new file mode 100644
index 000000000..8a598473b
--- /dev/null
+++ b/internal/chart/v3/lint/rules/testdata/anotherbadchartfile/Chart.yaml
@@ -0,0 +1,15 @@
+name: "some-chart"
+apiVersion: v3
+description: A Helm chart for Kubernetes
+version: 72445e2
+home: ""
+type: application
+appVersion: 72225e2
+icon: "https://some-url.com/icon.jpeg"
+dependencies:
+ - name: mariadb
+ version: 5.x.x
+ repository: https://charts.helm.sh/stable/
+ condition: mariadb.enabled
+ tags:
+ - database
diff --git a/pkg/lint/rules/testdata/badchartfile/Chart.yaml b/internal/chart/v3/lint/rules/testdata/badchartfile/Chart.yaml
similarity index 100%
rename from pkg/lint/rules/testdata/badchartfile/Chart.yaml
rename to internal/chart/v3/lint/rules/testdata/badchartfile/Chart.yaml
diff --git a/pkg/lint/rules/testdata/badchartfile/values.yaml b/internal/chart/v3/lint/rules/testdata/badchartfile/values.yaml
similarity index 100%
rename from pkg/lint/rules/testdata/badchartfile/values.yaml
rename to internal/chart/v3/lint/rules/testdata/badchartfile/values.yaml
diff --git a/internal/chart/v3/lint/rules/testdata/badchartname/Chart.yaml b/internal/chart/v3/lint/rules/testdata/badchartname/Chart.yaml
new file mode 100644
index 000000000..41f452354
--- /dev/null
+++ b/internal/chart/v3/lint/rules/testdata/badchartname/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+description: A Helm chart for Kubernetes
+version: 0.1.0
+name: "../badchartname"
+type: application
diff --git a/pkg/lint/rules/testdata/badchartname/values.yaml b/internal/chart/v3/lint/rules/testdata/badchartname/values.yaml
similarity index 100%
rename from pkg/lint/rules/testdata/badchartname/values.yaml
rename to internal/chart/v3/lint/rules/testdata/badchartname/values.yaml
diff --git a/internal/chart/v3/lint/rules/testdata/badcrdfile/Chart.yaml b/internal/chart/v3/lint/rules/testdata/badcrdfile/Chart.yaml
new file mode 100644
index 000000000..3bf007393
--- /dev/null
+++ b/internal/chart/v3/lint/rules/testdata/badcrdfile/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v3
+description: A Helm chart for Kubernetes
+version: 0.1.0
+name: badcrdfile
+type: application
+icon: http://riverrun.io
diff --git a/internal/chart/v3/lint/rules/testdata/badcrdfile/crds/bad-apiversion.yaml b/internal/chart/v3/lint/rules/testdata/badcrdfile/crds/bad-apiversion.yaml
new file mode 100644
index 000000000..468916053
--- /dev/null
+++ b/internal/chart/v3/lint/rules/testdata/badcrdfile/crds/bad-apiversion.yaml
@@ -0,0 +1,2 @@
+apiVersion: bad.k8s.io/v1beta1
+kind: CustomResourceDefinition
diff --git a/internal/chart/v3/lint/rules/testdata/badcrdfile/crds/bad-crd.yaml b/internal/chart/v3/lint/rules/testdata/badcrdfile/crds/bad-crd.yaml
new file mode 100644
index 000000000..523b97f85
--- /dev/null
+++ b/internal/chart/v3/lint/rules/testdata/badcrdfile/crds/bad-crd.yaml
@@ -0,0 +1,2 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: NotACustomResourceDefinition
diff --git a/pkg/lint/rules/testdata/withsubchart/values.yaml b/internal/chart/v3/lint/rules/testdata/badcrdfile/templates/.gitkeep
similarity index 100%
rename from pkg/lint/rules/testdata/withsubchart/values.yaml
rename to internal/chart/v3/lint/rules/testdata/badcrdfile/templates/.gitkeep
diff --git a/internal/chart/v3/lint/rules/testdata/badcrdfile/values.yaml b/internal/chart/v3/lint/rules/testdata/badcrdfile/values.yaml
new file mode 100644
index 000000000..2fffc7715
--- /dev/null
+++ b/internal/chart/v3/lint/rules/testdata/badcrdfile/values.yaml
@@ -0,0 +1 @@
+# Default values for badcrdfile.
diff --git a/internal/chart/v3/lint/rules/testdata/badvaluesfile/Chart.yaml b/internal/chart/v3/lint/rules/testdata/badvaluesfile/Chart.yaml
new file mode 100644
index 000000000..aace27e21
--- /dev/null
+++ b/internal/chart/v3/lint/rules/testdata/badvaluesfile/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v3
+name: badvaluesfile
+description: A Helm chart for Kubernetes
+version: 0.0.1
+home: ""
+icon: http://riverrun.io
diff --git a/pkg/lint/rules/testdata/badvaluesfile/templates/badvaluesfile.yaml b/internal/chart/v3/lint/rules/testdata/badvaluesfile/templates/badvaluesfile.yaml
similarity index 100%
rename from pkg/lint/rules/testdata/badvaluesfile/templates/badvaluesfile.yaml
rename to internal/chart/v3/lint/rules/testdata/badvaluesfile/templates/badvaluesfile.yaml
diff --git a/pkg/lint/rules/testdata/badvaluesfile/values.yaml b/internal/chart/v3/lint/rules/testdata/badvaluesfile/values.yaml
similarity index 100%
rename from pkg/lint/rules/testdata/badvaluesfile/values.yaml
rename to internal/chart/v3/lint/rules/testdata/badvaluesfile/values.yaml
diff --git a/internal/chart/v3/lint/rules/testdata/goodone/Chart.yaml b/internal/chart/v3/lint/rules/testdata/goodone/Chart.yaml
new file mode 100644
index 000000000..bf8f5e309
--- /dev/null
+++ b/internal/chart/v3/lint/rules/testdata/goodone/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: goodone
+description: good testing chart
+version: 199.44.12345-Alpha.1+cafe009
+icon: http://riverrun.io
diff --git a/internal/chart/v3/lint/rules/testdata/goodone/crds/test-crd.yaml b/internal/chart/v3/lint/rules/testdata/goodone/crds/test-crd.yaml
new file mode 100644
index 000000000..1d7350f1d
--- /dev/null
+++ b/internal/chart/v3/lint/rules/testdata/goodone/crds/test-crd.yaml
@@ -0,0 +1,19 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: tests.test.io
+spec:
+ group: test.io
+ names:
+ kind: Test
+ listKind: TestList
+ plural: tests
+ singular: test
+ scope: Namespaced
+ versions:
+ - name : v1alpha2
+ served: true
+ storage: true
+ - name : v1alpha1
+ served: true
+ storage: false
diff --git a/pkg/lint/rules/testdata/goodone/templates/goodone.yaml b/internal/chart/v3/lint/rules/testdata/goodone/templates/goodone.yaml
similarity index 100%
rename from pkg/lint/rules/testdata/goodone/templates/goodone.yaml
rename to internal/chart/v3/lint/rules/testdata/goodone/templates/goodone.yaml
diff --git a/pkg/lint/rules/testdata/goodone/values.yaml b/internal/chart/v3/lint/rules/testdata/goodone/values.yaml
similarity index 100%
rename from pkg/lint/rules/testdata/goodone/values.yaml
rename to internal/chart/v3/lint/rules/testdata/goodone/values.yaml
diff --git a/internal/chart/v3/lint/rules/testdata/invalidchartfile/Chart.yaml b/internal/chart/v3/lint/rules/testdata/invalidchartfile/Chart.yaml
new file mode 100644
index 000000000..0fd58d1d4
--- /dev/null
+++ b/internal/chart/v3/lint/rules/testdata/invalidchartfile/Chart.yaml
@@ -0,0 +1,6 @@
+name: some-chart
+apiVersion: v2
+apiVersion: v1
+description: A Helm chart for Kubernetes
+version: 1.3.0
+icon: http://example.com
diff --git a/internal/chart/v3/lint/rules/testdata/invalidchartfile/values.yaml b/internal/chart/v3/lint/rules/testdata/invalidchartfile/values.yaml
new file mode 100644
index 000000000..e69de29bb
diff --git a/internal/chart/v3/lint/rules/testdata/invalidcrdsdir/Chart.yaml b/internal/chart/v3/lint/rules/testdata/invalidcrdsdir/Chart.yaml
new file mode 100644
index 000000000..0f6d1ee98
--- /dev/null
+++ b/internal/chart/v3/lint/rules/testdata/invalidcrdsdir/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v3
+description: A Helm chart for Kubernetes
+version: 0.1.0
+name: invalidcrdsdir
+type: application
+icon: http://riverrun.io
diff --git a/internal/chart/v3/lint/rules/testdata/invalidcrdsdir/crds b/internal/chart/v3/lint/rules/testdata/invalidcrdsdir/crds
new file mode 100644
index 000000000..e69de29bb
diff --git a/internal/chart/v3/lint/rules/testdata/invalidcrdsdir/values.yaml b/internal/chart/v3/lint/rules/testdata/invalidcrdsdir/values.yaml
new file mode 100644
index 000000000..6b1611a64
--- /dev/null
+++ b/internal/chart/v3/lint/rules/testdata/invalidcrdsdir/values.yaml
@@ -0,0 +1 @@
+# Default values for invalidcrdsdir.
diff --git a/pkg/lint/rules/testdata/malformed-template/.helmignore b/internal/chart/v3/lint/rules/testdata/malformed-template/.helmignore
similarity index 100%
rename from pkg/lint/rules/testdata/malformed-template/.helmignore
rename to internal/chart/v3/lint/rules/testdata/malformed-template/.helmignore
diff --git a/internal/chart/v3/lint/rules/testdata/malformed-template/Chart.yaml b/internal/chart/v3/lint/rules/testdata/malformed-template/Chart.yaml
new file mode 100644
index 000000000..d46b98cb5
--- /dev/null
+++ b/internal/chart/v3/lint/rules/testdata/malformed-template/Chart.yaml
@@ -0,0 +1,25 @@
+apiVersion: v3
+name: test
+description: A Helm chart for Kubernetes
+
+# A chart can be either an 'application' or a 'library' chart.
+#
+# Application charts are a collection of templates that can be packaged into versioned archives
+# to be deployed.
+#
+# Library charts provide useful utilities or functions for the chart developer. They're included as
+# a dependency of application charts to inject those utilities and functions into the rendering
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+# Versions are expected to follow Semantic Versioning (https://semver.org/)
+version: 0.1.0
+
+# This is the version number of the application being deployed. This version number should be
+# incremented each time you make changes to the application. Versions are not expected to
+# follow Semantic Versioning. They should reflect the version the application is using.
+# It is recommended to use it with quotes.
+appVersion: "1.16.0"
+icon: https://riverrun.io
\ No newline at end of file
diff --git a/pkg/lint/rules/testdata/malformed-template/templates/bad.yaml b/internal/chart/v3/lint/rules/testdata/malformed-template/templates/bad.yaml
similarity index 100%
rename from pkg/lint/rules/testdata/malformed-template/templates/bad.yaml
rename to internal/chart/v3/lint/rules/testdata/malformed-template/templates/bad.yaml
diff --git a/pkg/lint/rules/testdata/malformed-template/values.yaml b/internal/chart/v3/lint/rules/testdata/malformed-template/values.yaml
similarity index 100%
rename from pkg/lint/rules/testdata/malformed-template/values.yaml
rename to internal/chart/v3/lint/rules/testdata/malformed-template/values.yaml
diff --git a/internal/chart/v3/lint/rules/testdata/multi-template-fail/Chart.yaml b/internal/chart/v3/lint/rules/testdata/multi-template-fail/Chart.yaml
new file mode 100644
index 000000000..bfb580bea
--- /dev/null
+++ b/internal/chart/v3/lint/rules/testdata/multi-template-fail/Chart.yaml
@@ -0,0 +1,21 @@
+apiVersion: v3
+name: multi-template-fail
+description: A Helm chart for Kubernetes
+
+# A chart can be either an 'application' or a 'library' chart.
+#
+# Application charts are a collection of templates that can be packaged into versioned archives
+# to be deployed.
+#
+# Library charts provide useful utilities or functions for the chart developer. They're included as
+# a dependency of application charts to inject those utilities and functions into the rendering
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+version: 0.1.0
+
+# This is the version number of the application being deployed. This version number should be
+# incremented each time you make changes to the application and it is recommended to use it with quotes.
+appVersion: "1.16.0"
diff --git a/pkg/lint/rules/testdata/multi-template-fail/templates/multi-fail.yaml b/internal/chart/v3/lint/rules/testdata/multi-template-fail/templates/multi-fail.yaml
similarity index 100%
rename from pkg/lint/rules/testdata/multi-template-fail/templates/multi-fail.yaml
rename to internal/chart/v3/lint/rules/testdata/multi-template-fail/templates/multi-fail.yaml
diff --git a/internal/chart/v3/lint/rules/testdata/v3-fail/Chart.yaml b/internal/chart/v3/lint/rules/testdata/v3-fail/Chart.yaml
new file mode 100644
index 000000000..2a29c33fa
--- /dev/null
+++ b/internal/chart/v3/lint/rules/testdata/v3-fail/Chart.yaml
@@ -0,0 +1,21 @@
+apiVersion: v3
+name: v3-fail
+description: A Helm chart for Kubernetes
+
+# A chart can be either an 'application' or a 'library' chart.
+#
+# Application charts are a collection of templates that can be packaged into versioned archives
+# to be deployed.
+#
+# Library charts provide useful utilities or functions for the chart developer. They're included as
+# a dependency of application charts to inject those utilities and functions into the rendering
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+version: 0.1.0
+
+# This is the version number of the application being deployed. This version number should be
+# incremented each time you make changes to the application and it is recommended to use it with quotes.
+appVersion: "1.16.0"
diff --git a/pkg/lint/rules/testdata/v3-fail/templates/_helpers.tpl b/internal/chart/v3/lint/rules/testdata/v3-fail/templates/_helpers.tpl
similarity index 100%
rename from pkg/lint/rules/testdata/v3-fail/templates/_helpers.tpl
rename to internal/chart/v3/lint/rules/testdata/v3-fail/templates/_helpers.tpl
diff --git a/pkg/lint/rules/testdata/v3-fail/templates/deployment.yaml b/internal/chart/v3/lint/rules/testdata/v3-fail/templates/deployment.yaml
similarity index 100%
rename from pkg/lint/rules/testdata/v3-fail/templates/deployment.yaml
rename to internal/chart/v3/lint/rules/testdata/v3-fail/templates/deployment.yaml
diff --git a/pkg/lint/rules/testdata/v3-fail/templates/ingress.yaml b/internal/chart/v3/lint/rules/testdata/v3-fail/templates/ingress.yaml
similarity index 100%
rename from pkg/lint/rules/testdata/v3-fail/templates/ingress.yaml
rename to internal/chart/v3/lint/rules/testdata/v3-fail/templates/ingress.yaml
diff --git a/pkg/lint/rules/testdata/v3-fail/templates/service.yaml b/internal/chart/v3/lint/rules/testdata/v3-fail/templates/service.yaml
similarity index 100%
rename from pkg/lint/rules/testdata/v3-fail/templates/service.yaml
rename to internal/chart/v3/lint/rules/testdata/v3-fail/templates/service.yaml
diff --git a/pkg/lint/rules/testdata/v3-fail/values.yaml b/internal/chart/v3/lint/rules/testdata/v3-fail/values.yaml
similarity index 100%
rename from pkg/lint/rules/testdata/v3-fail/values.yaml
rename to internal/chart/v3/lint/rules/testdata/v3-fail/values.yaml
diff --git a/internal/chart/v3/lint/rules/testdata/withsubchart/Chart.yaml b/internal/chart/v3/lint/rules/testdata/withsubchart/Chart.yaml
new file mode 100644
index 000000000..fa15eabaf
--- /dev/null
+++ b/internal/chart/v3/lint/rules/testdata/withsubchart/Chart.yaml
@@ -0,0 +1,16 @@
+apiVersion: v3
+name: withsubchart
+description: A Helm chart for Kubernetes
+type: application
+version: 0.1.0
+appVersion: "1.16.0"
+icon: http://riverrun.io
+
+dependencies:
+ - name: subchart
+ version: 0.1.16
+ repository: "file://../subchart"
+ import-values:
+ - child: subchart
+ parent: subchart
+
diff --git a/internal/chart/v3/lint/rules/testdata/withsubchart/charts/subchart/Chart.yaml b/internal/chart/v3/lint/rules/testdata/withsubchart/charts/subchart/Chart.yaml
new file mode 100644
index 000000000..35b13e70d
--- /dev/null
+++ b/internal/chart/v3/lint/rules/testdata/withsubchart/charts/subchart/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v3
+name: subchart
+description: A Helm chart for Kubernetes
+type: application
+version: 0.1.0
+appVersion: "1.16.0"
diff --git a/pkg/lint/rules/testdata/withsubchart/charts/subchart/templates/subchart.yaml b/internal/chart/v3/lint/rules/testdata/withsubchart/charts/subchart/templates/subchart.yaml
similarity index 100%
rename from pkg/lint/rules/testdata/withsubchart/charts/subchart/templates/subchart.yaml
rename to internal/chart/v3/lint/rules/testdata/withsubchart/charts/subchart/templates/subchart.yaml
diff --git a/pkg/lint/rules/testdata/withsubchart/charts/subchart/values.yaml b/internal/chart/v3/lint/rules/testdata/withsubchart/charts/subchart/values.yaml
similarity index 100%
rename from pkg/lint/rules/testdata/withsubchart/charts/subchart/values.yaml
rename to internal/chart/v3/lint/rules/testdata/withsubchart/charts/subchart/values.yaml
diff --git a/pkg/lint/rules/testdata/withsubchart/templates/mainchart.yaml b/internal/chart/v3/lint/rules/testdata/withsubchart/templates/mainchart.yaml
similarity index 100%
rename from pkg/lint/rules/testdata/withsubchart/templates/mainchart.yaml
rename to internal/chart/v3/lint/rules/testdata/withsubchart/templates/mainchart.yaml
diff --git a/internal/chart/v3/lint/rules/testdata/withsubchart/values.yaml b/internal/chart/v3/lint/rules/testdata/withsubchart/values.yaml
new file mode 100644
index 000000000..e69de29bb
diff --git a/internal/chart/v3/lint/rules/values.go b/internal/chart/v3/lint/rules/values.go
new file mode 100644
index 000000000..0af9765dd
--- /dev/null
+++ b/internal/chart/v3/lint/rules/values.go
@@ -0,0 +1,84 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "helm.sh/helm/v4/internal/chart/v3/lint/support"
+ "helm.sh/helm/v4/pkg/chart/common"
+ "helm.sh/helm/v4/pkg/chart/common/util"
+)
+
+// ValuesWithOverrides tests the values.yaml file.
+//
+// If a schema is present in the chart, values are tested against that. Otherwise,
+// they are only tested for well-formedness.
+//
+// If additional values are supplied, they are coalesced into the values in values.yaml.
+func ValuesWithOverrides(linter *support.Linter, valueOverrides map[string]interface{}, skipSchemaValidation bool) {
+ file := "values.yaml"
+ vf := filepath.Join(linter.ChartDir, file)
+ fileExists := linter.RunLinterRule(support.InfoSev, file, validateValuesFileExistence(vf))
+
+ if !fileExists {
+ return
+ }
+
+ linter.RunLinterRule(support.ErrorSev, file, validateValuesFile(vf, valueOverrides, skipSchemaValidation))
+}
+
+func validateValuesFileExistence(valuesPath string) error {
+ _, err := os.Stat(valuesPath)
+ if err != nil {
+ return fmt.Errorf("file does not exist")
+ }
+ return nil
+}
+
+func validateValuesFile(valuesPath string, overrides map[string]interface{}, skipSchemaValidation bool) error {
+ values, err := common.ReadValuesFile(valuesPath)
+ if err != nil {
+ return fmt.Errorf("unable to parse YAML: %w", err)
+ }
+
+ // Helm 3.0.0 carried over the values linting from Helm 2.x, which only tests the top
+ // level values against the top-level expectations. Subchart values are not linted.
+ // We could change that. For now, though, we retain that strategy, and thus can
+ // coalesce tables (like reuse-values does) instead of doing the full chart
+ // CoalesceValues
+ coalescedValues := util.CoalesceTables(make(map[string]interface{}, len(overrides)), overrides)
+ coalescedValues = util.CoalesceTables(coalescedValues, values)
+
+ ext := filepath.Ext(valuesPath)
+ schemaPath := valuesPath[:len(valuesPath)-len(ext)] + ".schema.json"
+ schema, err := os.ReadFile(schemaPath)
+ if len(schema) == 0 {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+
+ if !skipSchemaValidation {
+ return util.ValidateAgainstSingleSchema(coalescedValues, schema)
+ }
+
+ return nil
+}
diff --git a/pkg/lint/rules/values_test.go b/internal/chart/v3/lint/rules/values_test.go
similarity index 80%
rename from pkg/lint/rules/values_test.go
rename to internal/chart/v3/lint/rules/values_test.go
index 8a2556a60..288b77436 100644
--- a/pkg/lint/rules/values_test.go
+++ b/internal/chart/v3/lint/rules/values_test.go
@@ -67,7 +67,7 @@ func TestValidateValuesFileWellFormed(t *testing.T) {
`
tmpdir := ensure.TempFile(t, "values.yaml", []byte(badYaml))
valfile := filepath.Join(tmpdir, "values.yaml")
- if err := validateValuesFile(valfile, map[string]interface{}{}); err == nil {
+ if err := validateValuesFile(valfile, map[string]interface{}{}, false); err == nil {
t.Fatal("expected values file to fail parsing")
}
}
@@ -78,7 +78,7 @@ func TestValidateValuesFileSchema(t *testing.T) {
createTestingSchema(t, tmpdir)
valfile := filepath.Join(tmpdir, "values.yaml")
- if err := validateValuesFile(valfile, map[string]interface{}{}); err != nil {
+ if err := validateValuesFile(valfile, map[string]interface{}{}, false); err != nil {
t.Fatalf("Failed validation with %s", err)
}
}
@@ -91,12 +91,26 @@ func TestValidateValuesFileSchemaFailure(t *testing.T) {
valfile := filepath.Join(tmpdir, "values.yaml")
- err := validateValuesFile(valfile, map[string]interface{}{})
+ err := validateValuesFile(valfile, map[string]interface{}{}, false)
if err == nil {
t.Fatal("expected values file to fail parsing")
}
- assert.Contains(t, err.Error(), "Expected: string, given: integer", "integer should be caught by schema")
+ assert.Contains(t, err.Error(), "- at '/username': got number, want string")
+}
+
+func TestValidateValuesFileSchemaFailureButWithSkipSchemaValidation(t *testing.T) {
+ // 1234 is an int, not a string. This should fail normally but pass with skipSchemaValidation.
+ yaml := "username: 1234\npassword: swordfish"
+ tmpdir := ensure.TempFile(t, "values.yaml", []byte(yaml))
+ createTestingSchema(t, tmpdir)
+
+ valfile := filepath.Join(tmpdir, "values.yaml")
+
+ err := validateValuesFile(valfile, map[string]interface{}{}, true)
+ if err != nil {
+ t.Fatal("expected values file to pass parsing because of skipSchemaValidation")
+ }
}
func TestValidateValuesFileSchemaOverrides(t *testing.T) {
@@ -108,7 +122,7 @@ func TestValidateValuesFileSchemaOverrides(t *testing.T) {
createTestingSchema(t, tmpdir)
valfile := filepath.Join(tmpdir, "values.yaml")
- if err := validateValuesFile(valfile, overrides); err != nil {
+ if err := validateValuesFile(valfile, overrides, false); err != nil {
t.Fatalf("Failed validation with %s", err)
}
}
@@ -129,7 +143,7 @@ func TestValidateValuesFile(t *testing.T) {
name: "value not overridden",
yaml: "username: admin\npassword:",
overrides: map[string]interface{}{"username": "anotherUser"},
- errorMessage: "Expected: string, given: null",
+ errorMessage: "- at '/password': got null, want string",
},
{
name: "value overridden",
@@ -145,7 +159,7 @@ func TestValidateValuesFile(t *testing.T) {
valfile := filepath.Join(tmpdir, "values.yaml")
- err := validateValuesFile(valfile, tt.overrides)
+ err := validateValuesFile(valfile, tt.overrides, false)
switch {
case err != nil && tt.errorMessage == "":
diff --git a/internal/chart/v3/lint/support/doc.go b/internal/chart/v3/lint/support/doc.go
new file mode 100644
index 000000000..2d54a9b7d
--- /dev/null
+++ b/internal/chart/v3/lint/support/doc.go
@@ -0,0 +1,23 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package support contains tools for linting charts.
+
+Linting is the process of testing charts for errors or warnings regarding
+formatting, compilation, or standards compliance.
+*/
+package support // import "helm.sh/helm/v4/internal/chart/v3/lint/support"
diff --git a/pkg/lint/support/message.go b/internal/chart/v3/lint/support/message.go
similarity index 100%
rename from pkg/lint/support/message.go
rename to internal/chart/v3/lint/support/message.go
diff --git a/pkg/lint/support/message_test.go b/internal/chart/v3/lint/support/message_test.go
similarity index 98%
rename from pkg/lint/support/message_test.go
rename to internal/chart/v3/lint/support/message_test.go
index 9e12a638b..ce5b5e42e 100644
--- a/pkg/lint/support/message_test.go
+++ b/internal/chart/v3/lint/support/message_test.go
@@ -17,12 +17,10 @@ limitations under the License.
package support
import (
+ "errors"
"testing"
-
- "github.com/pkg/errors"
)
-var linter = Linter{}
var errLint = errors.New("lint failed")
func TestRunLinterRule(t *testing.T) {
@@ -46,6 +44,7 @@ func TestRunLinterRule(t *testing.T) {
{-1, errLint, 4, false, ErrorSev},
}
+ linter := Linter{}
for _, test := range tests {
isValid := linter.RunLinterRule(test.Severity, "chart", test.LintError)
if len(linter.Messages) != test.ExpectedMessages {
diff --git a/internal/chart/v3/loader/archive.go b/internal/chart/v3/loader/archive.go
new file mode 100644
index 000000000..311959d56
--- /dev/null
+++ b/internal/chart/v3/loader/archive.go
@@ -0,0 +1,234 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package loader
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "path"
+ "regexp"
+ "strings"
+
+ chart "helm.sh/helm/v4/internal/chart/v3"
+)
+
+// MaxDecompressedChartSize is the maximum size of a chart archive that will be
+// decompressed. This is the decompressed size of all the files.
+// The default value is 100 MiB.
+var MaxDecompressedChartSize int64 = 100 * 1024 * 1024 // Default 100 MiB
+
+// MaxDecompressedFileSize is the size of the largest file that Helm will attempt to load.
+// The size of the file is the decompressed version of it when it is stored in an archive.
+var MaxDecompressedFileSize int64 = 5 * 1024 * 1024 // Default 5 MiB
+
+var drivePathPattern = regexp.MustCompile(`^[a-zA-Z]:/`)
+
+// FileLoader loads a chart from a file
+type FileLoader string
+
+// Load loads a chart
+func (l FileLoader) Load() (*chart.Chart, error) {
+ return LoadFile(string(l))
+}
+
+// LoadFile loads from an archive file.
+func LoadFile(name string) (*chart.Chart, error) {
+ if fi, err := os.Stat(name); err != nil {
+ return nil, err
+ } else if fi.IsDir() {
+ return nil, errors.New("cannot load a directory")
+ }
+
+ raw, err := os.Open(name)
+ if err != nil {
+ return nil, err
+ }
+ defer raw.Close()
+
+ err = ensureArchive(name, raw)
+ if err != nil {
+ return nil, err
+ }
+
+ c, err := LoadArchive(raw)
+ if err != nil {
+ if err == gzip.ErrHeader {
+ return nil, fmt.Errorf("file '%s' does not appear to be a valid chart file (details: %s)", name, err)
+ }
+ }
+ return c, err
+}
+
+// ensureArchive's job is to return an informative error if the file does not appear to be a gzipped archive.
+//
+// Sometimes users will provide a values.yaml for an argument where a chart is expected. One common occurrence
+// of this is invoking `helm template values.yaml mychart` which would otherwise produce a confusing error
+// if we didn't check for this.
+func ensureArchive(name string, raw *os.File) error {
+ defer raw.Seek(0, 0) // reset read offset to allow archive loading to proceed.
+
+ // Check the file format to give us a chance to provide the user with more actionable feedback.
+ buffer := make([]byte, 512)
+ _, err := raw.Read(buffer)
+ if err != nil && err != io.EOF {
+ return fmt.Errorf("file '%s' cannot be read: %s", name, err)
+ }
+
+ // Helm may identify achieve of the application/x-gzip as application/vnd.ms-fontobject.
+ // Fix for: https://github.com/helm/helm/issues/12261
+ if contentType := http.DetectContentType(buffer); contentType != "application/x-gzip" && !isGZipApplication(buffer) {
+ // TODO: Is there a way to reliably test if a file content is YAML? ghodss/yaml accepts a wide
+ // variety of content (Makefile, .zshrc) as valid YAML without errors.
+
+ // Wrong content type. Let's check if it's yaml and give an extra hint?
+ if strings.HasSuffix(name, ".yml") || strings.HasSuffix(name, ".yaml") {
+ return fmt.Errorf("file '%s' seems to be a YAML file, but expected a gzipped archive", name)
+ }
+ return fmt.Errorf("file '%s' does not appear to be a gzipped archive; got '%s'", name, contentType)
+ }
+ return nil
+}
+
+// isGZipApplication checks whether the archive is of the application/x-gzip type.
+func isGZipApplication(data []byte) bool {
+ sig := []byte("\x1F\x8B\x08")
+ return bytes.HasPrefix(data, sig)
+}
+
+// LoadArchiveFiles reads in files out of an archive into memory. This function
+// performs important path security checks and should always be used before
+// expanding a tarball
+func LoadArchiveFiles(in io.Reader) ([]*BufferedFile, error) {
+ unzipped, err := gzip.NewReader(in)
+ if err != nil {
+ return nil, err
+ }
+ defer unzipped.Close()
+
+ files := []*BufferedFile{}
+ tr := tar.NewReader(unzipped)
+ remainingSize := MaxDecompressedChartSize
+ for {
+ b := bytes.NewBuffer(nil)
+ hd, err := tr.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ if hd.FileInfo().IsDir() {
+ // Use this instead of hd.Typeflag because we don't have to do any
+ // inference chasing.
+ continue
+ }
+
+ switch hd.Typeflag {
+ // We don't want to process these extension header files.
+ case tar.TypeXGlobalHeader, tar.TypeXHeader:
+ continue
+ }
+
+ // Archive could contain \ if generated on Windows
+ delimiter := "/"
+ if strings.ContainsRune(hd.Name, '\\') {
+ delimiter = "\\"
+ }
+
+ parts := strings.Split(hd.Name, delimiter)
+ n := strings.Join(parts[1:], delimiter)
+
+ // Normalize the path to the / delimiter
+ n = strings.ReplaceAll(n, delimiter, "/")
+
+ if path.IsAbs(n) {
+ return nil, errors.New("chart illegally contains absolute paths")
+ }
+
+ n = path.Clean(n)
+ if n == "." {
+ // In this case, the original path was relative when it should have been absolute.
+ return nil, fmt.Errorf("chart illegally contains content outside the base directory: %q", hd.Name)
+ }
+ if strings.HasPrefix(n, "..") {
+ return nil, errors.New("chart illegally references parent directory")
+ }
+
+ // In some particularly arcane acts of path creativity, it is possible to intermix
+ // UNIX and Windows style paths in such a way that you produce a result of the form
+ // c:/foo even after all the built-in absolute path checks. So we explicitly check
+ // for this condition.
+ if drivePathPattern.MatchString(n) {
+ return nil, errors.New("chart contains illegally named files")
+ }
+
+ if parts[0] == "Chart.yaml" {
+ return nil, errors.New("chart yaml not in base directory")
+ }
+
+ if hd.Size > remainingSize {
+ return nil, fmt.Errorf("decompressed chart is larger than the maximum size %d", MaxDecompressedChartSize)
+ }
+
+ if hd.Size > MaxDecompressedFileSize {
+ return nil, fmt.Errorf("decompressed chart file %q is larger than the maximum file size %d", hd.Name, MaxDecompressedFileSize)
+ }
+
+ limitedReader := io.LimitReader(tr, remainingSize)
+
+ bytesWritten, err := io.Copy(b, limitedReader)
+ if err != nil {
+ return nil, err
+ }
+
+ remainingSize -= bytesWritten
+ // When the bytesWritten are less than the file size it means the limit reader ended
+ // copying early. Here we report that error. This is important if the last file extracted
+ // is the one that goes over the limit. It assumes the Size stored in the tar header
+ // is correct, something many applications do.
+ if bytesWritten < hd.Size || remainingSize <= 0 {
+ return nil, fmt.Errorf("decompressed chart is larger than the maximum size %d", MaxDecompressedChartSize)
+ }
+
+ data := bytes.TrimPrefix(b.Bytes(), utf8bom)
+
+ files = append(files, &BufferedFile{Name: n, Data: data})
+ b.Reset()
+ }
+
+ if len(files) == 0 {
+ return nil, errors.New("no files in chart archive")
+ }
+ return files, nil
+}
+
+// LoadArchive loads from a reader containing a compressed tar archive.
+func LoadArchive(in io.Reader) (*chart.Chart, error) {
+ files, err := LoadArchiveFiles(in)
+ if err != nil {
+ return nil, err
+ }
+
+ return LoadFiles(files)
+}
diff --git a/internal/chart/v3/loader/archive_test.go b/internal/chart/v3/loader/archive_test.go
new file mode 100644
index 000000000..d16c47563
--- /dev/null
+++ b/internal/chart/v3/loader/archive_test.go
@@ -0,0 +1,92 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package loader
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "testing"
+)
+
+func TestLoadArchiveFiles(t *testing.T) {
+ tcs := []struct {
+ name string
+ generate func(w *tar.Writer)
+ check func(t *testing.T, files []*BufferedFile, err error)
+ }{
+ {
+ name: "empty input should return no files",
+ generate: func(_ *tar.Writer) {},
+ check: func(t *testing.T, _ []*BufferedFile, err error) {
+ t.Helper()
+ if err.Error() != "no files in chart archive" {
+ t.Fatalf(`expected "no files in chart archive", got [%#v]`, err)
+ }
+ },
+ },
+ {
+ name: "should ignore files with XGlobalHeader type",
+ generate: func(w *tar.Writer) {
+ // simulate the presence of a `pax_global_header` file like you would get when
+ // processing a GitHub release archive.
+ err := w.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeXGlobalHeader,
+ Name: "pax_global_header",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // we need to have at least one file, otherwise we'll get the "no files in chart archive" error
+ err = w.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeReg,
+ Name: "dir/empty",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ },
+ check: func(t *testing.T, files []*BufferedFile, err error) {
+ t.Helper()
+ if err != nil {
+ t.Fatalf(`got unwanted error [%#v] for tar file with pax_global_header content`, err)
+ }
+
+ if len(files) != 1 {
+ t.Fatalf(`expected to get one file but got [%v]`, files)
+ }
+ },
+ },
+ }
+
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ buf := &bytes.Buffer{}
+ gzw := gzip.NewWriter(buf)
+ tw := tar.NewWriter(gzw)
+
+ tc.generate(tw)
+
+ _ = tw.Close()
+ _ = gzw.Close()
+
+ files, err := LoadArchiveFiles(buf)
+ tc.check(t, files, err)
+ })
+ }
+}
diff --git a/internal/chart/v3/loader/directory.go b/internal/chart/v3/loader/directory.go
new file mode 100644
index 000000000..947051604
--- /dev/null
+++ b/internal/chart/v3/loader/directory.go
@@ -0,0 +1,121 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package loader
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ chart "helm.sh/helm/v4/internal/chart/v3"
+ "helm.sh/helm/v4/internal/sympath"
+ "helm.sh/helm/v4/pkg/ignore"
+)
+
+var utf8bom = []byte{0xEF, 0xBB, 0xBF}
+
+// DirLoader loads a chart from a directory
+type DirLoader string
+
+// Load loads the chart
+func (l DirLoader) Load() (*chart.Chart, error) {
+ return LoadDir(string(l))
+}
+
+// LoadDir loads from a directory.
+//
+// This loads charts only from directories.
+func LoadDir(dir string) (*chart.Chart, error) {
+ topdir, err := filepath.Abs(dir)
+ if err != nil {
+ return nil, err
+ }
+
+ // Just used for errors.
+ c := &chart.Chart{}
+
+ rules := ignore.Empty()
+ ifile := filepath.Join(topdir, ignore.HelmIgnore)
+ if _, err := os.Stat(ifile); err == nil {
+ r, err := ignore.ParseFile(ifile)
+ if err != nil {
+ return c, err
+ }
+ rules = r
+ }
+ rules.AddDefaults()
+
+ files := []*BufferedFile{}
+ topdir += string(filepath.Separator)
+
+ walk := func(name string, fi os.FileInfo, err error) error {
+ n := strings.TrimPrefix(name, topdir)
+ if n == "" {
+ // No need to process top level. Avoid bug with helmignore .* matching
+ // empty names. See issue 1779.
+ return nil
+ }
+
+ // Normalize to / since it will also work on Windows
+ n = filepath.ToSlash(n)
+
+ if err != nil {
+ return err
+ }
+ if fi.IsDir() {
+ // Directory-based ignore rules should involve skipping the entire
+ // contents of that directory.
+ if rules.Ignore(n, fi) {
+ return filepath.SkipDir
+ }
+ return nil
+ }
+
+ // If a .helmignore file matches, skip this file.
+ if rules.Ignore(n, fi) {
+ return nil
+ }
+
+ // Irregular files include devices, sockets, and other uses of files that
+ // are not regular files. In Go they have a file mode type bit set.
+ // See https://golang.org/pkg/os/#FileMode for examples.
+ if !fi.Mode().IsRegular() {
+ return fmt.Errorf("cannot load irregular file %s as it has file mode type bits set", name)
+ }
+
+ if fi.Size() > MaxDecompressedFileSize {
+ return fmt.Errorf("chart file %q is larger than the maximum file size %d", fi.Name(), MaxDecompressedFileSize)
+ }
+
+ data, err := os.ReadFile(name)
+ if err != nil {
+ return fmt.Errorf("error reading %s: %w", n, err)
+ }
+
+ data = bytes.TrimPrefix(data, utf8bom)
+
+ files = append(files, &BufferedFile{Name: n, Data: data})
+ return nil
+ }
+ if err = sympath.Walk(topdir, walk); err != nil {
+ return c, err
+ }
+
+ return LoadFiles(files)
+}
diff --git a/internal/chart/v3/loader/load.go b/internal/chart/v3/loader/load.go
new file mode 100644
index 000000000..2959fc71d
--- /dev/null
+++ b/internal/chart/v3/loader/load.go
@@ -0,0 +1,220 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package loader
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "maps"
+ "os"
+ "path/filepath"
+ "strings"
+
+ utilyaml "k8s.io/apimachinery/pkg/util/yaml"
+ "sigs.k8s.io/yaml"
+
+ chart "helm.sh/helm/v4/internal/chart/v3"
+ "helm.sh/helm/v4/pkg/chart/common"
+)
+
+// ChartLoader loads a chart.
+type ChartLoader interface {
+ Load() (*chart.Chart, error)
+}
+
+// Loader returns a new ChartLoader appropriate for the given chart name
+func Loader(name string) (ChartLoader, error) {
+ fi, err := os.Stat(name)
+ if err != nil {
+ return nil, err
+ }
+ if fi.IsDir() {
+ return DirLoader(name), nil
+ }
+ return FileLoader(name), nil
+}
+
+// Load takes a string name, tries to resolve it to a file or directory, and then loads it.
+//
+// This is the preferred way to load a chart. It will discover the chart encoding
+// and hand off to the appropriate chart reader.
+//
+// If a .helmignore file is present, the directory loader will skip loading any files
+// matching it. But .helmignore is not evaluated when reading out of an archive.
+func Load(name string) (*chart.Chart, error) {
+ l, err := Loader(name)
+ if err != nil {
+ return nil, err
+ }
+ return l.Load()
+}
+
+// BufferedFile represents an archive file buffered for later processing.
+type BufferedFile struct {
+ Name string
+ Data []byte
+}
+
+// LoadFiles loads from in-memory files.
+func LoadFiles(files []*BufferedFile) (*chart.Chart, error) {
+ c := new(chart.Chart)
+ subcharts := make(map[string][]*BufferedFile)
+
+ // do not rely on assumed ordering of files in the chart and crash
+ // if Chart.yaml was not coming early enough to initialize metadata
+ for _, f := range files {
+ c.Raw = append(c.Raw, &common.File{Name: f.Name, Data: f.Data})
+ if f.Name == "Chart.yaml" {
+ if c.Metadata == nil {
+ c.Metadata = new(chart.Metadata)
+ }
+ if err := yaml.Unmarshal(f.Data, c.Metadata); err != nil {
+ return c, fmt.Errorf("cannot load Chart.yaml: %w", err)
+ }
+ // While the documentation says the APIVersion is required, in practice there
+ // are cases where that's not enforced. Since this package set is for v3 charts,
+ // when this function is used v3 is automatically added when not present.
+ if c.Metadata.APIVersion == "" {
+ c.Metadata.APIVersion = chart.APIVersionV3
+ }
+ }
+ }
+ for _, f := range files {
+ switch {
+ case f.Name == "Chart.yaml":
+ // already processed
+ continue
+ case f.Name == "Chart.lock":
+ c.Lock = new(chart.Lock)
+ if err := yaml.Unmarshal(f.Data, &c.Lock); err != nil {
+ return c, fmt.Errorf("cannot load Chart.lock: %w", err)
+ }
+ case f.Name == "values.yaml":
+ values, err := LoadValues(bytes.NewReader(f.Data))
+ if err != nil {
+ return c, fmt.Errorf("cannot load values.yaml: %w", err)
+ }
+ c.Values = values
+ case f.Name == "values.schema.json":
+ c.Schema = f.Data
+
+ case strings.HasPrefix(f.Name, "templates/"):
+ c.Templates = append(c.Templates, &common.File{Name: f.Name, Data: f.Data})
+ case strings.HasPrefix(f.Name, "charts/"):
+ if filepath.Ext(f.Name) == ".prov" {
+ c.Files = append(c.Files, &common.File{Name: f.Name, Data: f.Data})
+ continue
+ }
+
+ fname := strings.TrimPrefix(f.Name, "charts/")
+ cname := strings.SplitN(fname, "/", 2)[0]
+ subcharts[cname] = append(subcharts[cname], &BufferedFile{Name: fname, Data: f.Data})
+ default:
+ c.Files = append(c.Files, &common.File{Name: f.Name, Data: f.Data})
+ }
+ }
+
+ if c.Metadata == nil {
+ return c, errors.New("Chart.yaml file is missing") //nolint:staticcheck
+ }
+
+ if err := c.Validate(); err != nil {
+ return c, err
+ }
+
+ for n, files := range subcharts {
+ var sc *chart.Chart
+ var err error
+ switch {
+ case strings.IndexAny(n, "_.") == 0:
+ continue
+ case filepath.Ext(n) == ".tgz":
+ file := files[0]
+ if file.Name != n {
+ return c, fmt.Errorf("error unpacking subchart tar in %s: expected %s, got %s", c.Name(), n, file.Name)
+ }
+ // Untar the chart and add to c.Dependencies
+ sc, err = LoadArchive(bytes.NewBuffer(file.Data))
+ default:
+ // We have to trim the prefix off of every file, and ignore any file
+ // that is in charts/, but isn't actually a chart.
+ buff := make([]*BufferedFile, 0, len(files))
+ for _, f := range files {
+ parts := strings.SplitN(f.Name, "/", 2)
+ if len(parts) < 2 {
+ continue
+ }
+ f.Name = parts[1]
+ buff = append(buff, f)
+ }
+ sc, err = LoadFiles(buff)
+ }
+
+ if err != nil {
+ return c, fmt.Errorf("error unpacking subchart %s in %s: %w", n, c.Name(), err)
+ }
+ c.AddDependency(sc)
+ }
+
+ return c, nil
+}
+
+// LoadValues loads values from a reader.
+//
+// The reader is expected to contain one or more YAML documents, the values of which are merged.
+// And the values can be either a chart's default values or a user-supplied values.
+func LoadValues(data io.Reader) (map[string]interface{}, error) {
+ values := map[string]interface{}{}
+ reader := utilyaml.NewYAMLReader(bufio.NewReader(data))
+ for {
+ currentMap := map[string]interface{}{}
+ raw, err := reader.Read()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ return nil, fmt.Errorf("error reading yaml document: %w", err)
+ }
+ if err := yaml.Unmarshal(raw, ¤tMap); err != nil {
+ return nil, fmt.Errorf("cannot unmarshal yaml document: %w", err)
+ }
+ values = MergeMaps(values, currentMap)
+ }
+ return values, nil
+}
+
+// MergeMaps merges two maps. If a key exists in both maps, the value from b will be used.
+// If the value is a map, the maps will be merged recursively.
+func MergeMaps(a, b map[string]interface{}) map[string]interface{} {
+ out := make(map[string]interface{}, len(a))
+ maps.Copy(out, a)
+ for k, v := range b {
+ if v, ok := v.(map[string]interface{}); ok {
+ if bv, ok := out[k]; ok {
+ if bv, ok := bv.(map[string]interface{}); ok {
+ out[k] = MergeMaps(bv, v)
+ continue
+ }
+ }
+ }
+ out[k] = v
+ }
+ return out
+}
diff --git a/internal/chart/v3/loader/load_test.go b/internal/chart/v3/loader/load_test.go
new file mode 100644
index 000000000..1d8ca836a
--- /dev/null
+++ b/internal/chart/v3/loader/load_test.go
@@ -0,0 +1,712 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package loader
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "io"
+ "log"
+ "os"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "strings"
+ "testing"
+ "time"
+
+ chart "helm.sh/helm/v4/internal/chart/v3"
+ "helm.sh/helm/v4/pkg/chart/common"
+)
+
+func TestLoadDir(t *testing.T) {
+ l, err := Loader("testdata/frobnitz")
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ c, err := l.Load()
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ verifyFrobnitz(t, c)
+ verifyChart(t, c)
+ verifyDependencies(t, c)
+ verifyDependenciesLock(t, c)
+}
+
+func TestLoadDirWithDevNull(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("test only works on unix systems with /dev/null present")
+ }
+
+ l, err := Loader("testdata/frobnitz_with_dev_null")
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ if _, err := l.Load(); err == nil {
+ t.Errorf("packages with an irregular file (/dev/null) should not load")
+ }
+}
+
+func TestLoadDirWithSymlink(t *testing.T) {
+ sym := filepath.Join("..", "LICENSE")
+ link := filepath.Join("testdata", "frobnitz_with_symlink", "LICENSE")
+
+ if err := os.Symlink(sym, link); err != nil {
+ t.Fatal(err)
+ }
+
+ defer os.Remove(link)
+
+ l, err := Loader("testdata/frobnitz_with_symlink")
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+
+ c, err := l.Load()
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ verifyFrobnitz(t, c)
+ verifyChart(t, c)
+ verifyDependencies(t, c)
+ verifyDependenciesLock(t, c)
+}
+
+func TestBomTestData(t *testing.T) {
+ testFiles := []string{"frobnitz_with_bom/.helmignore", "frobnitz_with_bom/templates/template.tpl", "frobnitz_with_bom/Chart.yaml"}
+ for _, file := range testFiles {
+ data, err := os.ReadFile("testdata/" + file)
+ if err != nil || !bytes.HasPrefix(data, utf8bom) {
+ t.Errorf("Test file has no BOM or is invalid: testdata/%s", file)
+ }
+ }
+
+ archive, err := os.ReadFile("testdata/frobnitz_with_bom.tgz")
+ if err != nil {
+ t.Fatalf("Error reading archive frobnitz_with_bom.tgz: %s", err)
+ }
+ unzipped, err := gzip.NewReader(bytes.NewReader(archive))
+ if err != nil {
+ t.Fatalf("Error reading archive frobnitz_with_bom.tgz: %s", err)
+ }
+ defer unzipped.Close()
+ for _, testFile := range testFiles {
+ data := make([]byte, 3)
+ err := unzipped.Reset(bytes.NewReader(archive))
+ if err != nil {
+ t.Fatalf("Error reading archive frobnitz_with_bom.tgz: %s", err)
+ }
+ tr := tar.NewReader(unzipped)
+ for {
+ file, err := tr.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ t.Fatalf("Error reading archive frobnitz_with_bom.tgz: %s", err)
+ }
+ if file != nil && strings.EqualFold(file.Name, testFile) {
+ _, err := tr.Read(data)
+ if err != nil {
+ t.Fatalf("Error reading archive frobnitz_with_bom.tgz: %s", err)
+ } else {
+ break
+ }
+ }
+ }
+ if !bytes.Equal(data, utf8bom) {
+ t.Fatalf("Test file has no BOM or is invalid: frobnitz_with_bom.tgz/%s", testFile)
+ }
+ }
+}
+
+func TestLoadDirWithUTFBOM(t *testing.T) {
+ l, err := Loader("testdata/frobnitz_with_bom")
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ c, err := l.Load()
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ verifyFrobnitz(t, c)
+ verifyChart(t, c)
+ verifyDependencies(t, c)
+ verifyDependenciesLock(t, c)
+ verifyBomStripped(t, c.Files)
+}
+
+func TestLoadArchiveWithUTFBOM(t *testing.T) {
+ l, err := Loader("testdata/frobnitz_with_bom.tgz")
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ c, err := l.Load()
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ verifyFrobnitz(t, c)
+ verifyChart(t, c)
+ verifyDependencies(t, c)
+ verifyDependenciesLock(t, c)
+ verifyBomStripped(t, c.Files)
+}
+
+func TestLoadFile(t *testing.T) {
+ l, err := Loader("testdata/frobnitz-1.2.3.tgz")
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ c, err := l.Load()
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ verifyFrobnitz(t, c)
+ verifyChart(t, c)
+ verifyDependencies(t, c)
+}
+
+func TestLoadFiles(t *testing.T) {
+ goodFiles := []*BufferedFile{
+ {
+ Name: "Chart.yaml",
+ Data: []byte(`apiVersion: v3
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+`),
+ },
+ {
+ Name: "values.yaml",
+ Data: []byte("var: some values"),
+ },
+ {
+ Name: "values.schema.json",
+ Data: []byte("type: Values"),
+ },
+ {
+ Name: "templates/deployment.yaml",
+ Data: []byte("some deployment"),
+ },
+ {
+ Name: "templates/service.yaml",
+ Data: []byte("some service"),
+ },
+ }
+
+ c, err := LoadFiles(goodFiles)
+ if err != nil {
+ t.Errorf("Expected good files to be loaded, got %v", err)
+ }
+
+ if c.Name() != "frobnitz" {
+ t.Errorf("Expected chart name to be 'frobnitz', got %s", c.Name())
+ }
+
+ if c.Values["var"] != "some values" {
+ t.Error("Expected chart values to be populated with default values")
+ }
+
+ if len(c.Raw) != 5 {
+ t.Errorf("Expected %d files, got %d", 5, len(c.Raw))
+ }
+
+ if !bytes.Equal(c.Schema, []byte("type: Values")) {
+ t.Error("Expected chart schema to be populated with default values")
+ }
+
+ if len(c.Templates) != 2 {
+ t.Errorf("Expected number of templates == 2, got %d", len(c.Templates))
+ }
+
+ if _, err = LoadFiles([]*BufferedFile{}); err == nil {
+ t.Fatal("Expected err to be non-nil")
+ }
+ if err.Error() != "Chart.yaml file is missing" {
+ t.Errorf("Expected chart metadata missing error, got '%s'", err.Error())
+ }
+}
+
+// Test the order of file loading. The Chart.yaml file needs to come first for
+// later comparison checks. See https://github.com/helm/helm/pull/8948
+func TestLoadFilesOrder(t *testing.T) {
+ goodFiles := []*BufferedFile{
+ {
+ Name: "requirements.yaml",
+ Data: []byte("dependencies:"),
+ },
+ {
+ Name: "values.yaml",
+ Data: []byte("var: some values"),
+ },
+
+ {
+ Name: "templates/deployment.yaml",
+ Data: []byte("some deployment"),
+ },
+ {
+ Name: "templates/service.yaml",
+ Data: []byte("some service"),
+ },
+ {
+ Name: "Chart.yaml",
+ Data: []byte(`apiVersion: v3
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+`),
+ },
+ }
+
+ // Capture stderr to make sure message about Chart.yaml handle dependencies
+ // is not present
+ r, w, err := os.Pipe()
+ if err != nil {
+ t.Fatalf("Unable to create pipe: %s", err)
+ }
+ stderr := log.Writer()
+ log.SetOutput(w)
+ defer func() {
+ log.SetOutput(stderr)
+ }()
+
+ _, err = LoadFiles(goodFiles)
+ if err != nil {
+ t.Errorf("Expected good files to be loaded, got %v", err)
+ }
+ w.Close()
+
+ var text bytes.Buffer
+ io.Copy(&text, r)
+ if text.String() != "" {
+ t.Errorf("Expected no message to Stderr, got %s", text.String())
+ }
+
+}
+
+// Packaging the chart on a Windows machine will produce an
+// archive that has \\ as delimiters. Test that we support these archives
+func TestLoadFileBackslash(t *testing.T) {
+ c, err := Load("testdata/frobnitz_backslash-1.2.3.tgz")
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ verifyChartFileAndTemplate(t, c, "frobnitz_backslash")
+ verifyChart(t, c)
+ verifyDependencies(t, c)
+}
+
+func TestLoadV3WithReqs(t *testing.T) {
+ l, err := Loader("testdata/frobnitz.v3.reqs")
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ c, err := l.Load()
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ verifyDependencies(t, c)
+ verifyDependenciesLock(t, c)
+}
+
+func TestLoadInvalidArchive(t *testing.T) {
+ tmpdir := t.TempDir()
+
+ writeTar := func(filename, internalPath string, body []byte) {
+ dest, err := os.Create(filename)
+ if err != nil {
+ t.Fatal(err)
+ }
+ zipper := gzip.NewWriter(dest)
+ tw := tar.NewWriter(zipper)
+
+ h := &tar.Header{
+ Name: internalPath,
+ Mode: 0755,
+ Size: int64(len(body)),
+ ModTime: time.Now(),
+ }
+ if err := tw.WriteHeader(h); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := tw.Write(body); err != nil {
+ t.Fatal(err)
+ }
+ tw.Close()
+ zipper.Close()
+ dest.Close()
+ }
+
+ for _, tt := range []struct {
+ chartname string
+ internal string
+ expectError string
+ }{
+ {"illegal-dots.tgz", "../../malformed-helm-test", "chart illegally references parent directory"},
+ {"illegal-dots2.tgz", "/foo/../../malformed-helm-test", "chart illegally references parent directory"},
+ {"illegal-dots3.tgz", "/../../malformed-helm-test", "chart illegally references parent directory"},
+ {"illegal-dots4.tgz", "./../../malformed-helm-test", "chart illegally references parent directory"},
+ {"illegal-name.tgz", "./.", "chart illegally contains content outside the base directory"},
+ {"illegal-name2.tgz", "/./.", "chart illegally contains content outside the base directory"},
+ {"illegal-name3.tgz", "missing-leading-slash", "chart illegally contains content outside the base directory"},
+ {"illegal-name4.tgz", "/missing-leading-slash", "Chart.yaml file is missing"},
+ {"illegal-abspath.tgz", "//foo", "chart illegally contains absolute paths"},
+ {"illegal-abspath2.tgz", "///foo", "chart illegally contains absolute paths"},
+ {"illegal-abspath3.tgz", "\\\\foo", "chart illegally contains absolute paths"},
+ {"illegal-abspath3.tgz", "\\..\\..\\foo", "chart illegally references parent directory"},
+
+ // Under special circumstances, this can get normalized to things that look like absolute Windows paths
+ {"illegal-abspath4.tgz", "\\.\\c:\\\\foo", "chart contains illegally named files"},
+ {"illegal-abspath5.tgz", "/./c://foo", "chart contains illegally named files"},
+ {"illegal-abspath6.tgz", "\\\\?\\Some\\windows\\magic", "chart illegally contains absolute paths"},
+ } {
+ illegalChart := filepath.Join(tmpdir, tt.chartname)
+ writeTar(illegalChart, tt.internal, []byte("hello: world"))
+ _, err := Load(illegalChart)
+ if err == nil {
+ t.Fatal("expected error when unpacking illegal files")
+ }
+ if !strings.Contains(err.Error(), tt.expectError) {
+ t.Errorf("Expected error to contain %q, got %q for %s", tt.expectError, err.Error(), tt.chartname)
+ }
+ }
+
+ // Make sure that absolute path gets interpreted as relative
+ illegalChart := filepath.Join(tmpdir, "abs-path.tgz")
+ writeTar(illegalChart, "/Chart.yaml", []byte("hello: world"))
+ _, err := Load(illegalChart)
+ if err.Error() != "validation: chart.metadata.name is required" {
+ t.Error(err)
+ }
+
+ // And just to validate that the above was not spurious
+ illegalChart = filepath.Join(tmpdir, "abs-path2.tgz")
+ writeTar(illegalChart, "files/whatever.yaml", []byte("hello: world"))
+ _, err = Load(illegalChart)
+ if err.Error() != "Chart.yaml file is missing" {
+ t.Errorf("Unexpected error message: %s", err)
+ }
+
+ // Finally, test that drive letter gets stripped off on Windows
+ illegalChart = filepath.Join(tmpdir, "abs-winpath.tgz")
+ writeTar(illegalChart, "c:\\Chart.yaml", []byte("hello: world"))
+ _, err = Load(illegalChart)
+ if err.Error() != "validation: chart.metadata.name is required" {
+ t.Error(err)
+ }
+}
+
+func TestLoadValues(t *testing.T) {
+ testCases := map[string]struct {
+ data []byte
+ expctedValues map[string]interface{}
+ }{
+ "It should load values correctly": {
+ data: []byte(`
+foo:
+ image: foo:v1
+bar:
+ version: v2
+`),
+ expctedValues: map[string]interface{}{
+ "foo": map[string]interface{}{
+ "image": "foo:v1",
+ },
+ "bar": map[string]interface{}{
+ "version": "v2",
+ },
+ },
+ },
+ "It should load values correctly with multiple documents in one file": {
+ data: []byte(`
+foo:
+ image: foo:v1
+bar:
+ version: v2
+---
+foo:
+ image: foo:v2
+`),
+ expctedValues: map[string]interface{}{
+ "foo": map[string]interface{}{
+ "image": "foo:v2",
+ },
+ "bar": map[string]interface{}{
+ "version": "v2",
+ },
+ },
+ },
+ }
+ for testName, testCase := range testCases {
+ t.Run(testName, func(tt *testing.T) {
+ values, err := LoadValues(bytes.NewReader(testCase.data))
+ if err != nil {
+ tt.Fatal(err)
+ }
+ if !reflect.DeepEqual(values, testCase.expctedValues) {
+ tt.Errorf("Expected values: %v, got %v", testCase.expctedValues, values)
+ }
+ })
+ }
+}
+
+func TestMergeValuesV3(t *testing.T) {
+ nestedMap := map[string]interface{}{
+ "foo": "bar",
+ "baz": map[string]string{
+ "cool": "stuff",
+ },
+ }
+ anotherNestedMap := map[string]interface{}{
+ "foo": "bar",
+ "baz": map[string]string{
+ "cool": "things",
+ "awesome": "stuff",
+ },
+ }
+ flatMap := map[string]interface{}{
+ "foo": "bar",
+ "baz": "stuff",
+ }
+ anotherFlatMap := map[string]interface{}{
+ "testing": "fun",
+ }
+
+ testMap := MergeMaps(flatMap, nestedMap)
+ equal := reflect.DeepEqual(testMap, nestedMap)
+ if !equal {
+ t.Errorf("Expected a nested map to overwrite a flat value. Expected: %v, got %v", nestedMap, testMap)
+ }
+
+ testMap = MergeMaps(nestedMap, flatMap)
+ equal = reflect.DeepEqual(testMap, flatMap)
+ if !equal {
+ t.Errorf("Expected a flat value to overwrite a map. Expected: %v, got %v", flatMap, testMap)
+ }
+
+ testMap = MergeMaps(nestedMap, anotherNestedMap)
+ equal = reflect.DeepEqual(testMap, anotherNestedMap)
+ if !equal {
+ t.Errorf("Expected a nested map to overwrite another nested map. Expected: %v, got %v", anotherNestedMap, testMap)
+ }
+
+ testMap = MergeMaps(anotherFlatMap, anotherNestedMap)
+ expectedMap := map[string]interface{}{
+ "testing": "fun",
+ "foo": "bar",
+ "baz": map[string]string{
+ "cool": "things",
+ "awesome": "stuff",
+ },
+ }
+ equal = reflect.DeepEqual(testMap, expectedMap)
+ if !equal {
+ t.Errorf("Expected a map with different keys to merge properly with another map. Expected: %v, got %v", expectedMap, testMap)
+ }
+}
+
+func verifyChart(t *testing.T, c *chart.Chart) {
+ t.Helper()
+ if c.Name() == "" {
+ t.Fatalf("No chart metadata found on %v", c)
+ }
+ t.Logf("Verifying chart %s", c.Name())
+ if len(c.Templates) != 1 {
+ t.Errorf("Expected 1 template, got %d", len(c.Templates))
+ }
+
+ numfiles := 6
+ if len(c.Files) != numfiles {
+ t.Errorf("Expected %d extra files, got %d", numfiles, len(c.Files))
+ for _, n := range c.Files {
+ t.Logf("\t%s", n.Name)
+ }
+ }
+
+ if len(c.Dependencies()) != 2 {
+ t.Errorf("Expected 2 dependencies, got %d (%v)", len(c.Dependencies()), c.Dependencies())
+ for _, d := range c.Dependencies() {
+ t.Logf("\tSubchart: %s\n", d.Name())
+ }
+ }
+
+ expect := map[string]map[string]string{
+ "alpine": {
+ "version": "0.1.0",
+ },
+ "mariner": {
+ "version": "4.3.2",
+ },
+ }
+
+ for _, dep := range c.Dependencies() {
+ if dep.Metadata == nil {
+ t.Fatalf("expected metadata on dependency: %v", dep)
+ }
+ exp, ok := expect[dep.Name()]
+ if !ok {
+ t.Fatalf("Unknown dependency %s", dep.Name())
+ }
+ if exp["version"] != dep.Metadata.Version {
+ t.Errorf("Expected %s version %s, got %s", dep.Name(), exp["version"], dep.Metadata.Version)
+ }
+ }
+
+}
+
+func verifyDependencies(t *testing.T, c *chart.Chart) {
+ t.Helper()
+ if len(c.Metadata.Dependencies) != 2 {
+ t.Errorf("Expected 2 dependencies, got %d", len(c.Metadata.Dependencies))
+ }
+ tests := []*chart.Dependency{
+ {Name: "alpine", Version: "0.1.0", Repository: "https://example.com/charts"},
+ {Name: "mariner", Version: "4.3.2", Repository: "https://example.com/charts"},
+ }
+ for i, tt := range tests {
+ d := c.Metadata.Dependencies[i]
+ if d.Name != tt.Name {
+ t.Errorf("Expected dependency named %q, got %q", tt.Name, d.Name)
+ }
+ if d.Version != tt.Version {
+ t.Errorf("Expected dependency named %q to have version %q, got %q", tt.Name, tt.Version, d.Version)
+ }
+ if d.Repository != tt.Repository {
+ t.Errorf("Expected dependency named %q to have repository %q, got %q", tt.Name, tt.Repository, d.Repository)
+ }
+ }
+}
+
+func verifyDependenciesLock(t *testing.T, c *chart.Chart) {
+ t.Helper()
+ if len(c.Metadata.Dependencies) != 2 {
+ t.Errorf("Expected 2 dependencies, got %d", len(c.Metadata.Dependencies))
+ }
+ tests := []*chart.Dependency{
+ {Name: "alpine", Version: "0.1.0", Repository: "https://example.com/charts"},
+ {Name: "mariner", Version: "4.3.2", Repository: "https://example.com/charts"},
+ }
+ for i, tt := range tests {
+ d := c.Metadata.Dependencies[i]
+ if d.Name != tt.Name {
+ t.Errorf("Expected dependency named %q, got %q", tt.Name, d.Name)
+ }
+ if d.Version != tt.Version {
+ t.Errorf("Expected dependency named %q to have version %q, got %q", tt.Name, tt.Version, d.Version)
+ }
+ if d.Repository != tt.Repository {
+ t.Errorf("Expected dependency named %q to have repository %q, got %q", tt.Name, tt.Repository, d.Repository)
+ }
+ }
+}
+
+func verifyFrobnitz(t *testing.T, c *chart.Chart) {
+ t.Helper()
+ verifyChartFileAndTemplate(t, c, "frobnitz")
+}
+
+func verifyChartFileAndTemplate(t *testing.T, c *chart.Chart, name string) {
+ t.Helper()
+ if c.Metadata == nil {
+ t.Fatal("Metadata is nil")
+ }
+ if c.Name() != name {
+ t.Errorf("Expected %s, got %s", name, c.Name())
+ }
+ if len(c.Templates) != 1 {
+ t.Fatalf("Expected 1 template, got %d", len(c.Templates))
+ }
+ if c.Templates[0].Name != "templates/template.tpl" {
+ t.Errorf("Unexpected template: %s", c.Templates[0].Name)
+ }
+ if len(c.Templates[0].Data) == 0 {
+ t.Error("No template data.")
+ }
+ if len(c.Files) != 6 {
+ t.Fatalf("Expected 6 Files, got %d", len(c.Files))
+ }
+ if len(c.Dependencies()) != 2 {
+ t.Fatalf("Expected 2 Dependency, got %d", len(c.Dependencies()))
+ }
+ if len(c.Metadata.Dependencies) != 2 {
+ t.Fatalf("Expected 2 Dependencies.Dependency, got %d", len(c.Metadata.Dependencies))
+ }
+ if len(c.Lock.Dependencies) != 2 {
+ t.Fatalf("Expected 2 Lock.Dependency, got %d", len(c.Lock.Dependencies))
+ }
+
+ for _, dep := range c.Dependencies() {
+ switch dep.Name() {
+ case "mariner":
+ case "alpine":
+ if len(dep.Templates) != 1 {
+ t.Fatalf("Expected 1 template, got %d", len(dep.Templates))
+ }
+ if dep.Templates[0].Name != "templates/alpine-pod.yaml" {
+ t.Errorf("Unexpected template: %s", dep.Templates[0].Name)
+ }
+ if len(dep.Templates[0].Data) == 0 {
+ t.Error("No template data.")
+ }
+ if len(dep.Files) != 1 {
+ t.Fatalf("Expected 1 Files, got %d", len(dep.Files))
+ }
+ if len(dep.Dependencies()) != 2 {
+ t.Fatalf("Expected 2 Dependency, got %d", len(dep.Dependencies()))
+ }
+ default:
+ t.Errorf("Unexpected dependency %s", dep.Name())
+ }
+ }
+}
+
+func verifyBomStripped(t *testing.T, files []*common.File) {
+ t.Helper()
+ for _, file := range files {
+ if bytes.HasPrefix(file.Data, utf8bom) {
+ t.Errorf("Byte Order Mark still present in processed file %s", file.Name)
+ }
+ }
+}
diff --git a/internal/chart/v3/loader/testdata/LICENSE b/internal/chart/v3/loader/testdata/LICENSE
new file mode 100644
index 000000000..6121943b1
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/LICENSE
@@ -0,0 +1 @@
+LICENSE placeholder.
diff --git a/internal/chart/v3/loader/testdata/albatross/Chart.yaml b/internal/chart/v3/loader/testdata/albatross/Chart.yaml
new file mode 100644
index 000000000..eeef737ff
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/albatross/Chart.yaml
@@ -0,0 +1,4 @@
+name: albatross
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/internal/chart/v3/loader/testdata/albatross/values.yaml b/internal/chart/v3/loader/testdata/albatross/values.yaml
new file mode 100644
index 000000000..3121cd7ce
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/albatross/values.yaml
@@ -0,0 +1,4 @@
+albatross: "true"
+
+global:
+ author: Coleridge
diff --git a/internal/chart/v3/loader/testdata/frobnitz-1.2.3.tgz b/internal/chart/v3/loader/testdata/frobnitz-1.2.3.tgz
new file mode 100644
index 000000000..de28e4120
Binary files /dev/null and b/internal/chart/v3/loader/testdata/frobnitz-1.2.3.tgz differ
diff --git a/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/.helmignore b/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/.helmignore
new file mode 100644
index 000000000..9973a57b8
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/.helmignore
@@ -0,0 +1 @@
+ignore/
diff --git a/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/Chart.yaml b/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/Chart.yaml
new file mode 100644
index 000000000..1b63fc3e2
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/Chart.yaml
@@ -0,0 +1,27 @@
+apiVersion: v3
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+annotations:
+ extrakey: extravalue
+ anotherkey: anothervalue
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
diff --git a/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/INSTALL.txt b/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/INSTALL.txt
new file mode 100644
index 000000000..2010438c2
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/INSTALL.txt
@@ -0,0 +1 @@
+This is an install document. The client may display this.
diff --git a/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/LICENSE b/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/LICENSE
new file mode 100644
index 000000000..6121943b1
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/LICENSE
@@ -0,0 +1 @@
+LICENSE placeholder.
diff --git a/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/README.md b/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/README.md
new file mode 100644
index 000000000..8cf4cc3d7
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/README.md
@@ -0,0 +1,11 @@
+# Frobnitz
+
+This is an example chart.
+
+## Usage
+
+This is an example. It has no usage.
+
+## Development
+
+For developer info, see the top-level repository.
diff --git a/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/_ignore_me b/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/_ignore_me
new file mode 100644
index 000000000..2cecca682
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/_ignore_me
@@ -0,0 +1 @@
+This should be ignored by the loader, but may be included in a chart.
diff --git a/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/Chart.yaml b/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/Chart.yaml
new file mode 100644
index 000000000..2a2c9c883
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: alpine
+description: Deploy a basic Alpine Linux pod
+version: 0.1.0
+home: https://helm.sh/helm
diff --git a/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/README.md b/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/README.md
new file mode 100644
index 000000000..b30b949dd
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.toml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/charts/mast1/Chart.yaml b/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/charts/mast1/Chart.yaml
new file mode 100644
index 000000000..aea109c75
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/charts/mast1/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: mast1
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/charts/mast1/values.yaml b/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/charts/mast1/values.yaml
new file mode 100644
index 000000000..42c39c262
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/charts/mast1/values.yaml
@@ -0,0 +1,4 @@
+# Default values for mast1.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name = "value"
diff --git a/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/charts/mast2-0.1.0.tgz b/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/charts/mast2-0.1.0.tgz
new file mode 100644
index 000000000..61cb62051
Binary files /dev/null and b/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/charts/mast2-0.1.0.tgz differ
diff --git a/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/templates/alpine-pod.yaml b/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/templates/alpine-pod.yaml
new file mode 100644
index 000000000..21ae20aad
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service}}
+ app.kubernetes.io/name: {{.Chart.Name}}
+ helm.sh/chart: "{{.Chart.Name}}-{{.Chart.Version}}"
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.9"
+ command: ["/bin/sleep","9000"]
diff --git a/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/values.yaml b/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/values.yaml
new file mode 100644
index 000000000..6c2aab7ba
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: "my-alpine"
diff --git a/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/mariner-4.3.2.tgz b/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/mariner-4.3.2.tgz
new file mode 100644
index 000000000..3190136b0
Binary files /dev/null and b/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/mariner-4.3.2.tgz differ
diff --git a/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/docs/README.md b/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/docs/README.md
new file mode 100644
index 000000000..d40747caf
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/docs/README.md
@@ -0,0 +1 @@
+This is a placeholder for documentation.
diff --git a/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/icon.svg b/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/icon.svg
new file mode 100644
index 000000000..892130606
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/icon.svg
@@ -0,0 +1,8 @@
+
+
diff --git a/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/ignore/me.txt b/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/ignore/me.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/templates/template.tpl b/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/templates/template.tpl
new file mode 100644
index 000000000..c651ee6a0
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/templates/template.tpl
@@ -0,0 +1 @@
+Hello {{.Name | default "world"}}
diff --git a/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/values.yaml b/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/values.yaml
new file mode 100644
index 000000000..61f501258
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/values.yaml
@@ -0,0 +1,6 @@
+# A values file contains configuration.
+
+name: "Some Name"
+
+section:
+ name: "Name in a section"
diff --git a/internal/chart/v3/loader/testdata/frobnitz/.helmignore b/internal/chart/v3/loader/testdata/frobnitz/.helmignore
new file mode 100644
index 000000000..9973a57b8
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz/.helmignore
@@ -0,0 +1 @@
+ignore/
diff --git a/internal/chart/v3/loader/testdata/frobnitz/Chart.lock b/internal/chart/v3/loader/testdata/frobnitz/Chart.lock
new file mode 100644
index 000000000..6fcc2ed9f
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz/Chart.lock
@@ -0,0 +1,8 @@
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
+digest: invalid
diff --git a/internal/chart/v3/loader/testdata/frobnitz/Chart.yaml b/internal/chart/v3/loader/testdata/frobnitz/Chart.yaml
new file mode 100644
index 000000000..1b63fc3e2
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz/Chart.yaml
@@ -0,0 +1,27 @@
+apiVersion: v3
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+annotations:
+ extrakey: extravalue
+ anotherkey: anothervalue
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
diff --git a/internal/chart/v3/loader/testdata/frobnitz/INSTALL.txt b/internal/chart/v3/loader/testdata/frobnitz/INSTALL.txt
new file mode 100644
index 000000000..2010438c2
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz/INSTALL.txt
@@ -0,0 +1 @@
+This is an install document. The client may display this.
diff --git a/internal/chart/v3/loader/testdata/frobnitz/LICENSE b/internal/chart/v3/loader/testdata/frobnitz/LICENSE
new file mode 100644
index 000000000..6121943b1
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz/LICENSE
@@ -0,0 +1 @@
+LICENSE placeholder.
diff --git a/internal/chart/v3/loader/testdata/frobnitz/README.md b/internal/chart/v3/loader/testdata/frobnitz/README.md
new file mode 100644
index 000000000..8cf4cc3d7
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz/README.md
@@ -0,0 +1,11 @@
+# Frobnitz
+
+This is an example chart.
+
+## Usage
+
+This is an example. It has no usage.
+
+## Development
+
+For developer info, see the top-level repository.
diff --git a/internal/chart/v3/loader/testdata/frobnitz/charts/_ignore_me b/internal/chart/v3/loader/testdata/frobnitz/charts/_ignore_me
new file mode 100644
index 000000000..2cecca682
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz/charts/_ignore_me
@@ -0,0 +1 @@
+This should be ignored by the loader, but may be included in a chart.
diff --git a/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/Chart.yaml b/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/Chart.yaml
new file mode 100644
index 000000000..2a2c9c883
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: alpine
+description: Deploy a basic Alpine Linux pod
+version: 0.1.0
+home: https://helm.sh/helm
diff --git a/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/README.md b/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/README.md
new file mode 100644
index 000000000..b30b949dd
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.toml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/charts/mast1/Chart.yaml b/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/charts/mast1/Chart.yaml
new file mode 100644
index 000000000..aea109c75
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/charts/mast1/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: mast1
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/charts/mast1/values.yaml b/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/charts/mast1/values.yaml
new file mode 100644
index 000000000..42c39c262
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/charts/mast1/values.yaml
@@ -0,0 +1,4 @@
+# Default values for mast1.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name = "value"
diff --git a/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/charts/mast2-0.1.0.tgz b/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/charts/mast2-0.1.0.tgz
new file mode 100644
index 000000000..61cb62051
Binary files /dev/null and b/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/charts/mast2-0.1.0.tgz differ
diff --git a/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/templates/alpine-pod.yaml b/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/templates/alpine-pod.yaml
new file mode 100644
index 000000000..21ae20aad
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service}}
+ app.kubernetes.io/name: {{.Chart.Name}}
+ helm.sh/chart: "{{.Chart.Name}}-{{.Chart.Version}}"
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.9"
+ command: ["/bin/sleep","9000"]
diff --git a/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/values.yaml b/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/values.yaml
new file mode 100644
index 000000000..6c2aab7ba
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: "my-alpine"
diff --git a/internal/chart/v3/loader/testdata/frobnitz/charts/mariner-4.3.2.tgz b/internal/chart/v3/loader/testdata/frobnitz/charts/mariner-4.3.2.tgz
new file mode 100644
index 000000000..5c6bc4dcb
Binary files /dev/null and b/internal/chart/v3/loader/testdata/frobnitz/charts/mariner-4.3.2.tgz differ
diff --git a/internal/chart/v3/loader/testdata/frobnitz/docs/README.md b/internal/chart/v3/loader/testdata/frobnitz/docs/README.md
new file mode 100644
index 000000000..d40747caf
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz/docs/README.md
@@ -0,0 +1 @@
+This is a placeholder for documentation.
diff --git a/internal/chart/v3/loader/testdata/frobnitz/icon.svg b/internal/chart/v3/loader/testdata/frobnitz/icon.svg
new file mode 100644
index 000000000..892130606
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz/icon.svg
@@ -0,0 +1,8 @@
+
+
diff --git a/internal/chart/v3/loader/testdata/frobnitz/ignore/me.txt b/internal/chart/v3/loader/testdata/frobnitz/ignore/me.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/internal/chart/v3/loader/testdata/frobnitz/templates/template.tpl b/internal/chart/v3/loader/testdata/frobnitz/templates/template.tpl
new file mode 100644
index 000000000..c651ee6a0
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz/templates/template.tpl
@@ -0,0 +1 @@
+Hello {{.Name | default "world"}}
diff --git a/internal/chart/v3/loader/testdata/frobnitz/values.yaml b/internal/chart/v3/loader/testdata/frobnitz/values.yaml
new file mode 100644
index 000000000..61f501258
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz/values.yaml
@@ -0,0 +1,6 @@
+# A values file contains configuration.
+
+name: "Some Name"
+
+section:
+ name: "Name in a section"
diff --git a/internal/chart/v3/loader/testdata/frobnitz_backslash-1.2.3.tgz b/internal/chart/v3/loader/testdata/frobnitz_backslash-1.2.3.tgz
new file mode 100644
index 000000000..dfbe88a73
Binary files /dev/null and b/internal/chart/v3/loader/testdata/frobnitz_backslash-1.2.3.tgz differ
diff --git a/internal/chart/v3/loader/testdata/frobnitz_backslash/.helmignore b/internal/chart/v3/loader/testdata/frobnitz_backslash/.helmignore
new file mode 100755
index 000000000..9973a57b8
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_backslash/.helmignore
@@ -0,0 +1 @@
+ignore/
diff --git a/internal/chart/v3/loader/testdata/frobnitz_backslash/Chart.lock b/internal/chart/v3/loader/testdata/frobnitz_backslash/Chart.lock
new file mode 100755
index 000000000..6fcc2ed9f
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_backslash/Chart.lock
@@ -0,0 +1,8 @@
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
+digest: invalid
diff --git a/internal/chart/v3/loader/testdata/frobnitz_backslash/Chart.yaml b/internal/chart/v3/loader/testdata/frobnitz_backslash/Chart.yaml
new file mode 100755
index 000000000..6a952e333
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_backslash/Chart.yaml
@@ -0,0 +1,27 @@
+apiVersion: v3
+name: frobnitz_backslash
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+annotations:
+ extrakey: extravalue
+ anotherkey: anothervalue
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
diff --git a/internal/chart/v3/loader/testdata/frobnitz_backslash/INSTALL.txt b/internal/chart/v3/loader/testdata/frobnitz_backslash/INSTALL.txt
new file mode 100755
index 000000000..2010438c2
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_backslash/INSTALL.txt
@@ -0,0 +1 @@
+This is an install document. The client may display this.
diff --git a/internal/chart/v3/loader/testdata/frobnitz_backslash/LICENSE b/internal/chart/v3/loader/testdata/frobnitz_backslash/LICENSE
new file mode 100755
index 000000000..6121943b1
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_backslash/LICENSE
@@ -0,0 +1 @@
+LICENSE placeholder.
diff --git a/internal/chart/v3/loader/testdata/frobnitz_backslash/README.md b/internal/chart/v3/loader/testdata/frobnitz_backslash/README.md
new file mode 100755
index 000000000..8cf4cc3d7
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_backslash/README.md
@@ -0,0 +1,11 @@
+# Frobnitz
+
+This is an example chart.
+
+## Usage
+
+This is an example. It has no usage.
+
+## Development
+
+For developer info, see the top-level repository.
diff --git a/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/_ignore_me b/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/_ignore_me
new file mode 100755
index 000000000..2cecca682
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/_ignore_me
@@ -0,0 +1 @@
+This should be ignored by the loader, but may be included in a chart.
diff --git a/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/Chart.yaml b/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/Chart.yaml
new file mode 100755
index 000000000..2a2c9c883
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: alpine
+description: Deploy a basic Alpine Linux pod
+version: 0.1.0
+home: https://helm.sh/helm
diff --git a/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/README.md b/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/README.md
new file mode 100755
index 000000000..b30b949dd
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.toml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/charts/mast1/Chart.yaml b/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/charts/mast1/Chart.yaml
new file mode 100755
index 000000000..aea109c75
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/charts/mast1/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: mast1
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/charts/mast1/values.yaml b/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/charts/mast1/values.yaml
new file mode 100755
index 000000000..42c39c262
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/charts/mast1/values.yaml
@@ -0,0 +1,4 @@
+# Default values for mast1.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name = "value"
diff --git a/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/charts/mast2-0.1.0.tgz b/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/charts/mast2-0.1.0.tgz
new file mode 100755
index 000000000..61cb62051
Binary files /dev/null and b/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/charts/mast2-0.1.0.tgz differ
diff --git a/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/templates/alpine-pod.yaml b/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/templates/alpine-pod.yaml
new file mode 100755
index 000000000..0ac5ca6a8
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service | quote }}
+ app.kubernetes.io/name: {{.Chart.Name}}
+ helm.sh/chart: "{{.Chart.Name}}-{{.Chart.Version}}"
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.9"
+ command: ["/bin/sleep","9000"]
diff --git a/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/values.yaml b/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/values.yaml
new file mode 100755
index 000000000..6c2aab7ba
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: "my-alpine"
diff --git a/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/mariner-4.3.2.tgz b/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/mariner-4.3.2.tgz
new file mode 100755
index 000000000..5c6bc4dcb
Binary files /dev/null and b/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/mariner-4.3.2.tgz differ
diff --git a/internal/chart/v3/loader/testdata/frobnitz_backslash/docs/README.md b/internal/chart/v3/loader/testdata/frobnitz_backslash/docs/README.md
new file mode 100755
index 000000000..d40747caf
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_backslash/docs/README.md
@@ -0,0 +1 @@
+This is a placeholder for documentation.
diff --git a/internal/chart/v3/loader/testdata/frobnitz_backslash/icon.svg b/internal/chart/v3/loader/testdata/frobnitz_backslash/icon.svg
new file mode 100755
index 000000000..892130606
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_backslash/icon.svg
@@ -0,0 +1,8 @@
+
+
diff --git a/internal/chart/v3/loader/testdata/frobnitz_backslash/ignore/me.txt b/internal/chart/v3/loader/testdata/frobnitz_backslash/ignore/me.txt
new file mode 100755
index 000000000..e69de29bb
diff --git a/internal/chart/v3/loader/testdata/frobnitz_backslash/templates/template.tpl b/internal/chart/v3/loader/testdata/frobnitz_backslash/templates/template.tpl
new file mode 100755
index 000000000..c651ee6a0
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_backslash/templates/template.tpl
@@ -0,0 +1 @@
+Hello {{.Name | default "world"}}
diff --git a/internal/chart/v3/loader/testdata/frobnitz_backslash/values.yaml b/internal/chart/v3/loader/testdata/frobnitz_backslash/values.yaml
new file mode 100755
index 000000000..61f501258
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_backslash/values.yaml
@@ -0,0 +1,6 @@
+# A values file contains configuration.
+
+name: "Some Name"
+
+section:
+ name: "Name in a section"
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_bom.tgz b/internal/chart/v3/loader/testdata/frobnitz_with_bom.tgz
new file mode 100644
index 000000000..7f0edc6b2
Binary files /dev/null and b/internal/chart/v3/loader/testdata/frobnitz_with_bom.tgz differ
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_bom/.helmignore b/internal/chart/v3/loader/testdata/frobnitz_with_bom/.helmignore
new file mode 100644
index 000000000..7a4b92da2
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_bom/.helmignore
@@ -0,0 +1 @@
+ignore/
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_bom/Chart.lock b/internal/chart/v3/loader/testdata/frobnitz_with_bom/Chart.lock
new file mode 100644
index 000000000..ed43b227f
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_bom/Chart.lock
@@ -0,0 +1,8 @@
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
+digest: invalid
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_bom/Chart.yaml b/internal/chart/v3/loader/testdata/frobnitz_with_bom/Chart.yaml
new file mode 100644
index 000000000..924fae6fc
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_bom/Chart.yaml
@@ -0,0 +1,27 @@
+apiVersion: v3
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+annotations:
+ extrakey: extravalue
+ anotherkey: anothervalue
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_bom/INSTALL.txt b/internal/chart/v3/loader/testdata/frobnitz_with_bom/INSTALL.txt
new file mode 100644
index 000000000..77c4e724a
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_bom/INSTALL.txt
@@ -0,0 +1 @@
+This is an install document. The client may display this.
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_bom/LICENSE b/internal/chart/v3/loader/testdata/frobnitz_with_bom/LICENSE
new file mode 100644
index 000000000..c27b00bf2
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_bom/LICENSE
@@ -0,0 +1 @@
+LICENSE placeholder.
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_bom/README.md b/internal/chart/v3/loader/testdata/frobnitz_with_bom/README.md
new file mode 100644
index 000000000..e9c40031b
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_bom/README.md
@@ -0,0 +1,11 @@
+# Frobnitz
+
+This is an example chart.
+
+## Usage
+
+This is an example. It has no usage.
+
+## Development
+
+For developer info, see the top-level repository.
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/_ignore_me b/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/_ignore_me
new file mode 100644
index 000000000..a7e3a38b7
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/_ignore_me
@@ -0,0 +1 @@
+This should be ignored by the loader, but may be included in a chart.
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/Chart.yaml b/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/Chart.yaml
new file mode 100644
index 000000000..6fe4f411f
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: alpine
+description: Deploy a basic Alpine Linux pod
+version: 0.1.0
+home: https://helm.sh/helm
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/README.md b/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/README.md
new file mode 100644
index 000000000..ea7526bee
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.toml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/charts/mast1/Chart.yaml b/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/charts/mast1/Chart.yaml
new file mode 100644
index 000000000..0732c7d7d
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/charts/mast1/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: mast1
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/charts/mast1/values.yaml b/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/charts/mast1/values.yaml
new file mode 100644
index 000000000..f690d53c4
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/charts/mast1/values.yaml
@@ -0,0 +1,4 @@
+# Default values for mast1.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name = "value"
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/charts/mast2-0.1.0.tgz b/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/charts/mast2-0.1.0.tgz
new file mode 100644
index 000000000..61cb62051
Binary files /dev/null and b/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/charts/mast2-0.1.0.tgz differ
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/templates/alpine-pod.yaml b/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/templates/alpine-pod.yaml
new file mode 100644
index 000000000..f3e662a28
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service}}
+ app.kubernetes.io/name: {{.Chart.Name}}
+ helm.sh/chart: "{{.Chart.Name}}-{{.Chart.Version}}"
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.9"
+ command: ["/bin/sleep","9000"]
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/values.yaml b/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/values.yaml
new file mode 100644
index 000000000..6b7cb2596
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: "my-alpine"
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/mariner-4.3.2.tgz b/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/mariner-4.3.2.tgz
new file mode 100644
index 000000000..5c6bc4dcb
Binary files /dev/null and b/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/mariner-4.3.2.tgz differ
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_bom/docs/README.md b/internal/chart/v3/loader/testdata/frobnitz_with_bom/docs/README.md
new file mode 100644
index 000000000..816c3e431
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_bom/docs/README.md
@@ -0,0 +1 @@
+This is a placeholder for documentation.
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_bom/icon.svg b/internal/chart/v3/loader/testdata/frobnitz_with_bom/icon.svg
new file mode 100644
index 000000000..892130606
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_bom/icon.svg
@@ -0,0 +1,8 @@
+
+
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_bom/ignore/me.txt b/internal/chart/v3/loader/testdata/frobnitz_with_bom/ignore/me.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_bom/templates/template.tpl b/internal/chart/v3/loader/testdata/frobnitz_with_bom/templates/template.tpl
new file mode 100644
index 000000000..bb29c5491
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_bom/templates/template.tpl
@@ -0,0 +1 @@
+Hello {{.Name | default "world"}}
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_bom/values.yaml b/internal/chart/v3/loader/testdata/frobnitz_with_bom/values.yaml
new file mode 100644
index 000000000..c24ceadf9
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_bom/values.yaml
@@ -0,0 +1,6 @@
+# A values file contains configuration.
+
+name: "Some Name"
+
+section:
+ name: "Name in a section"
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/.helmignore b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/.helmignore
new file mode 100644
index 000000000..9973a57b8
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/.helmignore
@@ -0,0 +1 @@
+ignore/
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/Chart.lock b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/Chart.lock
new file mode 100644
index 000000000..6fcc2ed9f
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/Chart.lock
@@ -0,0 +1,8 @@
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
+digest: invalid
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/Chart.yaml b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/Chart.yaml
new file mode 100644
index 000000000..1b63fc3e2
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/Chart.yaml
@@ -0,0 +1,27 @@
+apiVersion: v3
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+annotations:
+ extrakey: extravalue
+ anotherkey: anothervalue
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/INSTALL.txt b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/INSTALL.txt
new file mode 100644
index 000000000..2010438c2
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/INSTALL.txt
@@ -0,0 +1 @@
+This is an install document. The client may display this.
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/LICENSE b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/LICENSE
new file mode 100644
index 000000000..6121943b1
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/LICENSE
@@ -0,0 +1 @@
+LICENSE placeholder.
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/README.md b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/README.md
new file mode 100644
index 000000000..8cf4cc3d7
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/README.md
@@ -0,0 +1,11 @@
+# Frobnitz
+
+This is an example chart.
+
+## Usage
+
+This is an example. It has no usage.
+
+## Development
+
+For developer info, see the top-level repository.
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/_ignore_me b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/_ignore_me
new file mode 100644
index 000000000..2cecca682
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/_ignore_me
@@ -0,0 +1 @@
+This should be ignored by the loader, but may be included in a chart.
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/Chart.yaml b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/Chart.yaml
new file mode 100644
index 000000000..2a2c9c883
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: alpine
+description: Deploy a basic Alpine Linux pod
+version: 0.1.0
+home: https://helm.sh/helm
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/README.md b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/README.md
new file mode 100644
index 000000000..b30b949dd
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.toml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/charts/mast1/Chart.yaml b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/charts/mast1/Chart.yaml
new file mode 100644
index 000000000..aea109c75
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/charts/mast1/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: mast1
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/charts/mast1/values.yaml b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/charts/mast1/values.yaml
new file mode 100644
index 000000000..42c39c262
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/charts/mast1/values.yaml
@@ -0,0 +1,4 @@
+# Default values for mast1.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name = "value"
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/charts/mast2-0.1.0.tgz b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/charts/mast2-0.1.0.tgz
new file mode 100644
index 000000000..61cb62051
Binary files /dev/null and b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/charts/mast2-0.1.0.tgz differ
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/templates/alpine-pod.yaml b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/templates/alpine-pod.yaml
new file mode 100644
index 000000000..21ae20aad
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service}}
+ app.kubernetes.io/name: {{.Chart.Name}}
+ helm.sh/chart: "{{.Chart.Name}}-{{.Chart.Version}}"
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.9"
+ command: ["/bin/sleep","9000"]
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/values.yaml b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/values.yaml
new file mode 100644
index 000000000..6c2aab7ba
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: "my-alpine"
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/mariner-4.3.2.tgz b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/mariner-4.3.2.tgz
new file mode 100644
index 000000000..5c6bc4dcb
Binary files /dev/null and b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/mariner-4.3.2.tgz differ
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/docs/README.md b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/docs/README.md
new file mode 100644
index 000000000..d40747caf
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/docs/README.md
@@ -0,0 +1 @@
+This is a placeholder for documentation.
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/icon.svg b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/icon.svg
new file mode 100644
index 000000000..892130606
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/icon.svg
@@ -0,0 +1,8 @@
+
+
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/ignore/me.txt b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/ignore/me.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/null b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/null
new file mode 120000
index 000000000..dc1dc0cde
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/null
@@ -0,0 +1 @@
+/dev/null
\ No newline at end of file
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/templates/template.tpl b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/templates/template.tpl
new file mode 100644
index 000000000..c651ee6a0
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/templates/template.tpl
@@ -0,0 +1 @@
+Hello {{.Name | default "world"}}
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/values.yaml b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/values.yaml
new file mode 100644
index 000000000..61f501258
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/values.yaml
@@ -0,0 +1,6 @@
+# A values file contains configuration.
+
+name: "Some Name"
+
+section:
+ name: "Name in a section"
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_symlink/.helmignore b/internal/chart/v3/loader/testdata/frobnitz_with_symlink/.helmignore
new file mode 100644
index 000000000..9973a57b8
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_symlink/.helmignore
@@ -0,0 +1 @@
+ignore/
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_symlink/Chart.lock b/internal/chart/v3/loader/testdata/frobnitz_with_symlink/Chart.lock
new file mode 100644
index 000000000..6fcc2ed9f
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_symlink/Chart.lock
@@ -0,0 +1,8 @@
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
+digest: invalid
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_symlink/Chart.yaml b/internal/chart/v3/loader/testdata/frobnitz_with_symlink/Chart.yaml
new file mode 100644
index 000000000..1b63fc3e2
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_symlink/Chart.yaml
@@ -0,0 +1,27 @@
+apiVersion: v3
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+annotations:
+ extrakey: extravalue
+ anotherkey: anothervalue
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_symlink/INSTALL.txt b/internal/chart/v3/loader/testdata/frobnitz_with_symlink/INSTALL.txt
new file mode 100644
index 000000000..2010438c2
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_symlink/INSTALL.txt
@@ -0,0 +1 @@
+This is an install document. The client may display this.
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_symlink/README.md b/internal/chart/v3/loader/testdata/frobnitz_with_symlink/README.md
new file mode 100644
index 000000000..8cf4cc3d7
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_symlink/README.md
@@ -0,0 +1,11 @@
+# Frobnitz
+
+This is an example chart.
+
+## Usage
+
+This is an example. It has no usage.
+
+## Development
+
+For developer info, see the top-level repository.
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/_ignore_me b/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/_ignore_me
new file mode 100644
index 000000000..2cecca682
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/_ignore_me
@@ -0,0 +1 @@
+This should be ignored by the loader, but may be included in a chart.
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/Chart.yaml b/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/Chart.yaml
new file mode 100644
index 000000000..2a2c9c883
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: alpine
+description: Deploy a basic Alpine Linux pod
+version: 0.1.0
+home: https://helm.sh/helm
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/README.md b/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/README.md
new file mode 100644
index 000000000..b30b949dd
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.toml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/charts/mast1/Chart.yaml b/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/charts/mast1/Chart.yaml
new file mode 100644
index 000000000..aea109c75
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/charts/mast1/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: mast1
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/charts/mast1/values.yaml b/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/charts/mast1/values.yaml
new file mode 100644
index 000000000..42c39c262
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/charts/mast1/values.yaml
@@ -0,0 +1,4 @@
+# Default values for mast1.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name = "value"
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/charts/mast2-0.1.0.tgz b/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/charts/mast2-0.1.0.tgz
new file mode 100644
index 000000000..61cb62051
Binary files /dev/null and b/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/charts/mast2-0.1.0.tgz differ
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/templates/alpine-pod.yaml b/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/templates/alpine-pod.yaml
new file mode 100644
index 000000000..21ae20aad
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service}}
+ app.kubernetes.io/name: {{.Chart.Name}}
+ helm.sh/chart: "{{.Chart.Name}}-{{.Chart.Version}}"
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.9"
+ command: ["/bin/sleep","9000"]
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/values.yaml b/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/values.yaml
new file mode 100644
index 000000000..6c2aab7ba
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: "my-alpine"
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/mariner-4.3.2.tgz b/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/mariner-4.3.2.tgz
new file mode 100644
index 000000000..5c6bc4dcb
Binary files /dev/null and b/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/mariner-4.3.2.tgz differ
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_symlink/docs/README.md b/internal/chart/v3/loader/testdata/frobnitz_with_symlink/docs/README.md
new file mode 100644
index 000000000..d40747caf
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_symlink/docs/README.md
@@ -0,0 +1 @@
+This is a placeholder for documentation.
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_symlink/icon.svg b/internal/chart/v3/loader/testdata/frobnitz_with_symlink/icon.svg
new file mode 100644
index 000000000..892130606
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_symlink/icon.svg
@@ -0,0 +1,8 @@
+
+
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_symlink/ignore/me.txt b/internal/chart/v3/loader/testdata/frobnitz_with_symlink/ignore/me.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_symlink/templates/template.tpl b/internal/chart/v3/loader/testdata/frobnitz_with_symlink/templates/template.tpl
new file mode 100644
index 000000000..c651ee6a0
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_symlink/templates/template.tpl
@@ -0,0 +1 @@
+Hello {{.Name | default "world"}}
diff --git a/internal/chart/v3/loader/testdata/frobnitz_with_symlink/values.yaml b/internal/chart/v3/loader/testdata/frobnitz_with_symlink/values.yaml
new file mode 100644
index 000000000..61f501258
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/frobnitz_with_symlink/values.yaml
@@ -0,0 +1,6 @@
+# A values file contains configuration.
+
+name: "Some Name"
+
+section:
+ name: "Name in a section"
diff --git a/internal/chart/v3/loader/testdata/genfrob.sh b/internal/chart/v3/loader/testdata/genfrob.sh
new file mode 100755
index 000000000..eae68906b
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/genfrob.sh
@@ -0,0 +1,18 @@
+#!/bin/sh
+
+# Pack the albatross chart into the mariner chart.
+echo "Packing albatross into mariner"
+tar -zcvf mariner/charts/albatross-0.1.0.tgz albatross
+
+echo "Packing mariner into frobnitz"
+tar -zcvf frobnitz/charts/mariner-4.3.2.tgz mariner
+cp frobnitz/charts/mariner-4.3.2.tgz frobnitz_backslash/charts/
+cp frobnitz/charts/mariner-4.3.2.tgz frobnitz_with_bom/charts/
+cp frobnitz/charts/mariner-4.3.2.tgz frobnitz_with_dev_null/charts/
+cp frobnitz/charts/mariner-4.3.2.tgz frobnitz_with_symlink/charts/
+
+# Pack the frobnitz chart.
+echo "Packing frobnitz"
+tar --exclude=ignore/* -zcvf frobnitz-1.2.3.tgz frobnitz
+tar --exclude=ignore/* -zcvf frobnitz_backslash-1.2.3.tgz frobnitz_backslash
+tar --exclude=ignore/* -zcvf frobnitz_with_bom.tgz frobnitz_with_bom
diff --git a/internal/chart/v3/loader/testdata/mariner/Chart.yaml b/internal/chart/v3/loader/testdata/mariner/Chart.yaml
new file mode 100644
index 000000000..4d3eea730
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/mariner/Chart.yaml
@@ -0,0 +1,9 @@
+apiVersion: v3
+name: mariner
+description: A Helm chart for Kubernetes
+version: 4.3.2
+home: ""
+dependencies:
+ - name: albatross
+ repository: https://example.com/mariner/charts
+ version: "0.1.0"
diff --git a/internal/chart/v3/loader/testdata/mariner/charts/albatross-0.1.0.tgz b/internal/chart/v3/loader/testdata/mariner/charts/albatross-0.1.0.tgz
new file mode 100644
index 000000000..ec7bfbfcf
Binary files /dev/null and b/internal/chart/v3/loader/testdata/mariner/charts/albatross-0.1.0.tgz differ
diff --git a/internal/chart/v3/loader/testdata/mariner/templates/placeholder.tpl b/internal/chart/v3/loader/testdata/mariner/templates/placeholder.tpl
new file mode 100644
index 000000000..29c11843a
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/mariner/templates/placeholder.tpl
@@ -0,0 +1 @@
+# This is a placeholder.
diff --git a/internal/chart/v3/loader/testdata/mariner/values.yaml b/internal/chart/v3/loader/testdata/mariner/values.yaml
new file mode 100644
index 000000000..b0ccb0086
--- /dev/null
+++ b/internal/chart/v3/loader/testdata/mariner/values.yaml
@@ -0,0 +1,7 @@
+# Default values for .
+# This is a YAML-formatted file. https://github.com/toml-lang/toml
+# Declare name/value pairs to be passed into your templates.
+# name: "value"
+
+:
+ test: true
diff --git a/internal/chart/v3/metadata.go b/internal/chart/v3/metadata.go
new file mode 100644
index 000000000..4629d571b
--- /dev/null
+++ b/internal/chart/v3/metadata.go
@@ -0,0 +1,178 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v3
+
+import (
+ "path/filepath"
+ "strings"
+ "unicode"
+
+ "github.com/Masterminds/semver/v3"
+)
+
+// Maintainer describes a Chart maintainer.
+type Maintainer struct {
+ // Name is a user name or organization name
+ Name string `json:"name,omitempty"`
+ // Email is an optional email address to contact the named maintainer
+ Email string `json:"email,omitempty"`
+ // URL is an optional URL to an address for the named maintainer
+ URL string `json:"url,omitempty"`
+}
+
+// Validate checks valid data and sanitizes string characters.
+func (m *Maintainer) Validate() error {
+ if m == nil {
+ return ValidationError("maintainers must not contain empty or null nodes")
+ }
+ m.Name = sanitizeString(m.Name)
+ m.Email = sanitizeString(m.Email)
+ m.URL = sanitizeString(m.URL)
+ return nil
+}
+
+// Metadata for a Chart file. This models the structure of a Chart.yaml file.
+type Metadata struct {
+ // The name of the chart. Required.
+ Name string `json:"name,omitempty"`
+ // The URL to a relevant project page, git repo, or contact person
+ Home string `json:"home,omitempty"`
+ // Source is the URL to the source code of this chart
+ Sources []string `json:"sources,omitempty"`
+ // A SemVer 2 conformant version string of the chart. Required.
+ Version string `json:"version,omitempty"`
+ // A one-sentence description of the chart
+ Description string `json:"description,omitempty"`
+ // A list of string keywords
+ Keywords []string `json:"keywords,omitempty"`
+ // A list of name and URL/email address combinations for the maintainer(s)
+ Maintainers []*Maintainer `json:"maintainers,omitempty"`
+ // The URL to an icon file.
+ Icon string `json:"icon,omitempty"`
+ // The API Version of this chart. Required.
+ APIVersion string `json:"apiVersion,omitempty"`
+ // The condition to check to enable chart
+ Condition string `json:"condition,omitempty"`
+ // The tags to check to enable chart
+ Tags string `json:"tags,omitempty"`
+ // The version of the application enclosed inside of this chart.
+ AppVersion string `json:"appVersion,omitempty"`
+ // Whether or not this chart is deprecated
+ Deprecated bool `json:"deprecated,omitempty"`
+ // Annotations are additional mappings uninterpreted by Helm,
+ // made available for inspection by other applications.
+ Annotations map[string]string `json:"annotations,omitempty"`
+ // KubeVersion is a SemVer constraint specifying the version of Kubernetes required.
+ KubeVersion string `json:"kubeVersion,omitempty"`
+ // Dependencies are a list of dependencies for a chart.
+ Dependencies []*Dependency `json:"dependencies,omitempty"`
+ // Specifies the chart type: application or library
+ Type string `json:"type,omitempty"`
+}
+
+// Validate checks the metadata for known issues and sanitizes string
+// characters.
+func (md *Metadata) Validate() error {
+ if md == nil {
+ return ValidationError("chart.metadata is required")
+ }
+
+ md.Name = sanitizeString(md.Name)
+ md.Description = sanitizeString(md.Description)
+ md.Home = sanitizeString(md.Home)
+ md.Icon = sanitizeString(md.Icon)
+ md.Condition = sanitizeString(md.Condition)
+ md.Tags = sanitizeString(md.Tags)
+ md.AppVersion = sanitizeString(md.AppVersion)
+ md.KubeVersion = sanitizeString(md.KubeVersion)
+ for i := range md.Sources {
+ md.Sources[i] = sanitizeString(md.Sources[i])
+ }
+ for i := range md.Keywords {
+ md.Keywords[i] = sanitizeString(md.Keywords[i])
+ }
+
+ if md.APIVersion == "" {
+ return ValidationError("chart.metadata.apiVersion is required")
+ }
+ if md.Name == "" {
+ return ValidationError("chart.metadata.name is required")
+ }
+
+ if md.Name != filepath.Base(md.Name) {
+ return ValidationErrorf("chart.metadata.name %q is invalid", md.Name)
+ }
+
+ if md.Version == "" {
+ return ValidationError("chart.metadata.version is required")
+ }
+ if !isValidSemver(md.Version) {
+ return ValidationErrorf("chart.metadata.version %q is invalid", md.Version)
+ }
+ if !isValidChartType(md.Type) {
+ return ValidationError("chart.metadata.type must be application or library")
+ }
+
+ for _, m := range md.Maintainers {
+ if err := m.Validate(); err != nil {
+ return err
+ }
+ }
+
+ // Aliases need to be validated here to make sure that the alias name does
+ // not contain any illegal characters.
+ dependencies := map[string]*Dependency{}
+ for _, dependency := range md.Dependencies {
+ if err := dependency.Validate(); err != nil {
+ return err
+ }
+ key := dependency.Name
+ if dependency.Alias != "" {
+ key = dependency.Alias
+ }
+ if dependencies[key] != nil {
+ return ValidationErrorf("more than one dependency with name or alias %q", key)
+ }
+ dependencies[key] = dependency
+ }
+ return nil
+}
+
+func isValidChartType(in string) bool {
+ switch in {
+ case "", "application", "library":
+ return true
+ }
+ return false
+}
+
+func isValidSemver(v string) bool {
+ _, err := semver.NewVersion(v)
+ return err == nil
+}
+
+// sanitizeString normalize spaces and removes non-printable characters.
+func sanitizeString(str string) string {
+ return strings.Map(func(r rune) rune {
+ if unicode.IsSpace(r) {
+ return ' '
+ }
+ if unicode.IsPrint(r) {
+ return r
+ }
+ return -1
+ }, str)
+}
diff --git a/internal/chart/v3/metadata_test.go b/internal/chart/v3/metadata_test.go
new file mode 100644
index 000000000..596a03695
--- /dev/null
+++ b/internal/chart/v3/metadata_test.go
@@ -0,0 +1,201 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package v3
+
+import (
+ "testing"
+)
+
+func TestValidate(t *testing.T) {
+ tests := []struct {
+ name string
+ md *Metadata
+ err error
+ }{
+ {
+ "chart without metadata",
+ nil,
+ ValidationError("chart.metadata is required"),
+ },
+ {
+ "chart without apiVersion",
+ &Metadata{Name: "test", Version: "1.0"},
+ ValidationError("chart.metadata.apiVersion is required"),
+ },
+ {
+ "chart without name",
+ &Metadata{APIVersion: "v3", Version: "1.0"},
+ ValidationError("chart.metadata.name is required"),
+ },
+ {
+ "chart without name",
+ &Metadata{Name: "../../test", APIVersion: "v3", Version: "1.0"},
+ ValidationError("chart.metadata.name \"../../test\" is invalid"),
+ },
+ {
+ "chart without version",
+ &Metadata{Name: "test", APIVersion: "v3"},
+ ValidationError("chart.metadata.version is required"),
+ },
+ {
+ "chart with bad type",
+ &Metadata{Name: "test", APIVersion: "v3", Version: "1.0", Type: "test"},
+ ValidationError("chart.metadata.type must be application or library"),
+ },
+ {
+ "chart without dependency",
+ &Metadata{Name: "test", APIVersion: "v3", Version: "1.0", Type: "application"},
+ nil,
+ },
+ {
+ "dependency with valid alias",
+ &Metadata{
+ Name: "test",
+ APIVersion: "v3",
+ Version: "1.0",
+ Type: "application",
+ Dependencies: []*Dependency{
+ {Name: "dependency", Alias: "legal-alias"},
+ },
+ },
+ nil,
+ },
+ {
+ "dependency with bad characters in alias",
+ &Metadata{
+ Name: "test",
+ APIVersion: "v3",
+ Version: "1.0",
+ Type: "application",
+ Dependencies: []*Dependency{
+ {Name: "bad", Alias: "illegal alias"},
+ },
+ },
+ ValidationError("dependency \"bad\" has disallowed characters in the alias"),
+ },
+ {
+ "same dependency twice",
+ &Metadata{
+ Name: "test",
+ APIVersion: "v3",
+ Version: "1.0",
+ Type: "application",
+ Dependencies: []*Dependency{
+ {Name: "foo", Alias: ""},
+ {Name: "foo", Alias: ""},
+ },
+ },
+ ValidationError("more than one dependency with name or alias \"foo\""),
+ },
+ {
+ "two dependencies with alias from second dependency shadowing first one",
+ &Metadata{
+ Name: "test",
+ APIVersion: "v3",
+ Version: "1.0",
+ Type: "application",
+ Dependencies: []*Dependency{
+ {Name: "foo", Alias: ""},
+ {Name: "bar", Alias: "foo"},
+ },
+ },
+ ValidationError("more than one dependency with name or alias \"foo\""),
+ },
+ {
+ // this case would make sense and could work in future versions of Helm, currently template rendering would
+ // result in undefined behaviour
+ "same dependency twice with different version",
+ &Metadata{
+ Name: "test",
+ APIVersion: "v3",
+ Version: "1.0",
+ Type: "application",
+ Dependencies: []*Dependency{
+ {Name: "foo", Alias: "", Version: "1.2.3"},
+ {Name: "foo", Alias: "", Version: "1.0.0"},
+ },
+ },
+ ValidationError("more than one dependency with name or alias \"foo\""),
+ },
+ {
+ // this case would make sense and could work in future versions of Helm, currently template rendering would
+ // result in undefined behaviour
+ "two dependencies with same name but different repos",
+ &Metadata{
+ Name: "test",
+ APIVersion: "v3",
+ Version: "1.0",
+ Type: "application",
+ Dependencies: []*Dependency{
+ {Name: "foo", Repository: "repo-0"},
+ {Name: "foo", Repository: "repo-1"},
+ },
+ },
+ ValidationError("more than one dependency with name or alias \"foo\""),
+ },
+ {
+ "dependencies has nil",
+ &Metadata{
+ Name: "test",
+ APIVersion: "v3",
+ Version: "1.0",
+ Type: "application",
+ Dependencies: []*Dependency{
+ nil,
+ },
+ },
+ ValidationError("dependencies must not contain empty or null nodes"),
+ },
+ {
+ "maintainer not empty",
+ &Metadata{
+ Name: "test",
+ APIVersion: "v3",
+ Version: "1.0",
+ Type: "application",
+ Maintainers: []*Maintainer{
+ nil,
+ },
+ },
+ ValidationError("maintainers must not contain empty or null nodes"),
+ },
+ {
+ "version invalid",
+ &Metadata{APIVersion: "3", Name: "test", Version: "1.2.3.4"},
+ ValidationError("chart.metadata.version \"1.2.3.4\" is invalid"),
+ },
+ }
+
+ for _, tt := range tests {
+ result := tt.md.Validate()
+ if result != tt.err {
+ t.Errorf("expected %q, got %q in test %q", tt.err, result, tt.name)
+ }
+ }
+}
+
+func TestValidate_sanitize(t *testing.T) {
+ md := &Metadata{APIVersion: "3", Name: "test", Version: "1.0", Description: "\adescr\u0081iption\rtest", Maintainers: []*Maintainer{{Name: "\r"}}}
+ if err := md.Validate(); err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ if md.Description != "description test" {
+ t.Fatalf("description was not sanitized: %q", md.Description)
+ }
+ if md.Maintainers[0].Name != " " {
+ t.Fatal("maintainer name was not sanitized")
+ }
+}
diff --git a/internal/chart/v3/util/chartfile.go b/internal/chart/v3/util/chartfile.go
new file mode 100644
index 000000000..25271e1cf
--- /dev/null
+++ b/internal/chart/v3/util/chartfile.go
@@ -0,0 +1,96 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "errors"
+ "fmt"
+ "io/fs"
+ "os"
+ "path/filepath"
+
+ "sigs.k8s.io/yaml"
+
+ chart "helm.sh/helm/v4/internal/chart/v3"
+)
+
+// LoadChartfile loads a Chart.yaml file into a *chart.Metadata.
+func LoadChartfile(filename string) (*chart.Metadata, error) {
+ b, err := os.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ y := new(chart.Metadata)
+ err = yaml.Unmarshal(b, y)
+ return y, err
+}
+
+// StrictLoadChartfile loads a Chart.yaml into a *chart.Metadata using a strict unmarshaling
+func StrictLoadChartfile(filename string) (*chart.Metadata, error) {
+ b, err := os.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ y := new(chart.Metadata)
+ err = yaml.UnmarshalStrict(b, y)
+ return y, err
+}
+
+// SaveChartfile saves the given metadata as a Chart.yaml file at the given path.
+//
+// 'filename' should be the complete path and filename ('foo/Chart.yaml')
+func SaveChartfile(filename string, cf *chart.Metadata) error {
+ out, err := yaml.Marshal(cf)
+ if err != nil {
+ return err
+ }
+ return os.WriteFile(filename, out, 0644)
+}
+
+// IsChartDir validate a chart directory.
+//
+// Checks for a valid Chart.yaml.
+func IsChartDir(dirName string) (bool, error) {
+ if fi, err := os.Stat(dirName); err != nil {
+ return false, err
+ } else if !fi.IsDir() {
+ return false, fmt.Errorf("%q is not a directory", dirName)
+ }
+
+ chartYaml := filepath.Join(dirName, ChartfileName)
+ if _, err := os.Stat(chartYaml); errors.Is(err, fs.ErrNotExist) {
+ return false, fmt.Errorf("no %s exists in directory %q", ChartfileName, dirName)
+ }
+
+ chartYamlContent, err := os.ReadFile(chartYaml)
+ if err != nil {
+ return false, fmt.Errorf("cannot read %s in directory %q", ChartfileName, dirName)
+ }
+
+ chartContent := new(chart.Metadata)
+ if err := yaml.Unmarshal(chartYamlContent, &chartContent); err != nil {
+ return false, err
+ }
+ if chartContent == nil {
+ return false, fmt.Errorf("chart metadata (%s) missing", ChartfileName)
+ }
+ if chartContent.Name == "" {
+ return false, fmt.Errorf("invalid chart (%s): name must not be empty", ChartfileName)
+ }
+
+ return true, nil
+}
diff --git a/internal/chart/v3/util/chartfile_test.go b/internal/chart/v3/util/chartfile_test.go
new file mode 100644
index 000000000..c3d19c381
--- /dev/null
+++ b/internal/chart/v3/util/chartfile_test.go
@@ -0,0 +1,117 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "testing"
+
+ chart "helm.sh/helm/v4/internal/chart/v3"
+)
+
+const testfile = "testdata/chartfiletest.yaml"
+
+func TestLoadChartfile(t *testing.T) {
+ f, err := LoadChartfile(testfile)
+ if err != nil {
+ t.Errorf("Failed to open %s: %s", testfile, err)
+ return
+ }
+ verifyChartfile(t, f, "frobnitz")
+}
+
+func verifyChartfile(t *testing.T, f *chart.Metadata, name string) {
+ t.Helper()
+ if f == nil { //nolint:staticcheck
+ t.Fatal("Failed verifyChartfile because f is nil")
+ }
+
+ if f.Name != name {
+ t.Errorf("Expected %s, got %s", name, f.Name)
+ }
+
+ if f.Description != "This is a frobnitz." {
+ t.Errorf("Unexpected description %q", f.Description)
+ }
+
+ if f.Version != "1.2.3" {
+ t.Errorf("Unexpected version %q", f.Version)
+ }
+
+ if len(f.Maintainers) != 2 {
+ t.Errorf("Expected 2 maintainers, got %d", len(f.Maintainers))
+ }
+
+ if f.Maintainers[0].Name != "The Helm Team" {
+ t.Errorf("Unexpected maintainer name.")
+ }
+
+ if f.Maintainers[1].Email != "nobody@example.com" {
+ t.Errorf("Unexpected maintainer email.")
+ }
+
+ if len(f.Sources) != 1 {
+ t.Fatalf("Unexpected number of sources")
+ }
+
+ if f.Sources[0] != "https://example.com/foo/bar" {
+ t.Errorf("Expected https://example.com/foo/bar, got %s", f.Sources)
+ }
+
+ if f.Home != "http://example.com" {
+ t.Error("Unexpected home.")
+ }
+
+ if f.Icon != "https://example.com/64x64.png" {
+ t.Errorf("Unexpected icon: %q", f.Icon)
+ }
+
+ if len(f.Keywords) != 3 {
+ t.Error("Unexpected keywords")
+ }
+
+ if len(f.Annotations) != 2 {
+ t.Fatalf("Unexpected annotations")
+ }
+
+ if want, got := "extravalue", f.Annotations["extrakey"]; want != got {
+ t.Errorf("Want %q, but got %q", want, got)
+ }
+
+ if want, got := "anothervalue", f.Annotations["anotherkey"]; want != got {
+ t.Errorf("Want %q, but got %q", want, got)
+ }
+
+ kk := []string{"frobnitz", "sprocket", "dodad"}
+ for i, k := range f.Keywords {
+ if kk[i] != k {
+ t.Errorf("Expected %q, got %q", kk[i], k)
+ }
+ }
+}
+
+func TestIsChartDir(t *testing.T) {
+ validChartDir, err := IsChartDir("testdata/frobnitz")
+ if !validChartDir {
+ t.Errorf("unexpected error while reading chart-directory: (%v)", err)
+ return
+ }
+ validChartDir, err = IsChartDir("testdata")
+ if validChartDir || err == nil {
+ t.Errorf("expected error but did not get any")
+ return
+ }
+}
diff --git a/internal/chart/v3/util/compatible.go b/internal/chart/v3/util/compatible.go
new file mode 100644
index 000000000..d384d2d45
--- /dev/null
+++ b/internal/chart/v3/util/compatible.go
@@ -0,0 +1,34 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import "github.com/Masterminds/semver/v3"
+
+// IsCompatibleRange compares a version to a constraint.
+// It returns true if the version matches the constraint, and false in all other cases.
+func IsCompatibleRange(constraint, ver string) bool {
+ sv, err := semver.NewVersion(ver)
+ if err != nil {
+ return false
+ }
+
+ c, err := semver.NewConstraint(constraint)
+ if err != nil {
+ return false
+ }
+ return c.Check(sv)
+}
diff --git a/internal/chart/v3/util/compatible_test.go b/internal/chart/v3/util/compatible_test.go
new file mode 100644
index 000000000..e17d33e35
--- /dev/null
+++ b/internal/chart/v3/util/compatible_test.go
@@ -0,0 +1,43 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package version represents the current version of the project.
+package util
+
+import "testing"
+
+func TestIsCompatibleRange(t *testing.T) {
+ tests := []struct {
+ constraint string
+ ver string
+ expected bool
+ }{
+ {"v2.0.0-alpha.4", "v2.0.0-alpha.4", true},
+ {"v2.0.0-alpha.3", "v2.0.0-alpha.4", false},
+ {"v2.0.0", "v2.0.0-alpha.4", false},
+ {"v2.0.0-alpha.4", "v2.0.0", false},
+ {"~v2.0.0", "v2.0.1", true},
+ {"v2", "v2.0.0", true},
+ {">2.0.0", "v2.1.1", true},
+ {"v2.1.*", "v2.1.1", true},
+ }
+
+ for _, tt := range tests {
+ if IsCompatibleRange(tt.constraint, tt.ver) != tt.expected {
+ t.Errorf("expected constraint %s to be %v for %s", tt.constraint, tt.expected, tt.ver)
+ }
+ }
+}
diff --git a/internal/chart/v3/util/create.go b/internal/chart/v3/util/create.go
new file mode 100644
index 000000000..9f742e646
--- /dev/null
+++ b/internal/chart/v3/util/create.go
@@ -0,0 +1,833 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+
+ "sigs.k8s.io/yaml"
+
+ chart "helm.sh/helm/v4/internal/chart/v3"
+ "helm.sh/helm/v4/internal/chart/v3/loader"
+ "helm.sh/helm/v4/pkg/chart/common"
+)
+
+// chartName is a regular expression for testing the supplied name of a chart.
+// This regular expression is probably stricter than it needs to be. We can relax it
+// somewhat. Newline characters, as well as $, quotes, +, parens, and % are known to be
+// problematic.
+var chartName = regexp.MustCompile("^[a-zA-Z0-9._-]+$")
+
+const (
+ // ChartfileName is the default Chart file name.
+ ChartfileName = "Chart.yaml"
+ // ValuesfileName is the default values file name.
+ ValuesfileName = "values.yaml"
+ // SchemafileName is the default values schema file name.
+ SchemafileName = "values.schema.json"
+ // TemplatesDir is the relative directory name for templates.
+ TemplatesDir = "templates"
+ // ChartsDir is the relative directory name for charts dependencies.
+ ChartsDir = "charts"
+ // TemplatesTestsDir is the relative directory name for tests.
+ TemplatesTestsDir = TemplatesDir + sep + "tests"
+ // IgnorefileName is the name of the Helm ignore file.
+ IgnorefileName = ".helmignore"
+ // IngressFileName is the name of the example ingress file.
+ IngressFileName = TemplatesDir + sep + "ingress.yaml"
+ // HTTPRouteFileName is the name of the example HTTPRoute file.
+ HTTPRouteFileName = TemplatesDir + sep + "httproute.yaml"
+ // DeploymentName is the name of the example deployment file.
+ DeploymentName = TemplatesDir + sep + "deployment.yaml"
+ // ServiceName is the name of the example service file.
+ ServiceName = TemplatesDir + sep + "service.yaml"
+ // ServiceAccountName is the name of the example serviceaccount file.
+ ServiceAccountName = TemplatesDir + sep + "serviceaccount.yaml"
+ // HorizontalPodAutoscalerName is the name of the example hpa file.
+ HorizontalPodAutoscalerName = TemplatesDir + sep + "hpa.yaml"
+ // NotesName is the name of the example NOTES.txt file.
+ NotesName = TemplatesDir + sep + "NOTES.txt"
+ // HelpersName is the name of the example helpers file.
+ HelpersName = TemplatesDir + sep + "_helpers.tpl"
+ // TestConnectionName is the name of the example test file.
+ TestConnectionName = TemplatesTestsDir + sep + "test-connection.yaml"
+)
+
+// maxChartNameLength is lower than the limits we know of with certain file systems,
+// and with certain Kubernetes fields.
+const maxChartNameLength = 250
+
+const sep = string(filepath.Separator)
+
+const defaultChartfile = `apiVersion: v3
+name: %s
+description: A Helm chart for Kubernetes
+
+# A chart can be either an 'application' or a 'library' chart.
+#
+# Application charts are a collection of templates that can be packaged into versioned archives
+# to be deployed.
+#
+# Library charts provide useful utilities or functions for the chart developer. They're included as
+# a dependency of application charts to inject those utilities and functions into the rendering
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+# Versions are expected to follow Semantic Versioning (https://semver.org/)
+version: 0.1.0
+
+# This is the version number of the application being deployed. This version number should be
+# incremented each time you make changes to the application. Versions are not expected to
+# follow Semantic Versioning. They should reflect the version the application is using.
+# It is recommended to use it with quotes.
+appVersion: "1.16.0"
+`
+
+const defaultValues = `# Default values for %s.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+# This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/
+replicaCount: 1
+
+# This sets the container image more information can be found here: https://kubernetes.io/docs/concepts/containers/images/
+image:
+ repository: nginx
+ # This sets the pull policy for images.
+ pullPolicy: IfNotPresent
+ # Overrides the image tag whose default is the chart appVersion.
+ tag: ""
+
+# This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+imagePullSecrets: []
+# This is to override the chart name.
+nameOverride: ""
+fullnameOverride: ""
+
+# This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/
+serviceAccount:
+ # Specifies whether a service account should be created
+ create: true
+ # Automatically mount a ServiceAccount's API credentials?
+ automount: true
+ # Annotations to add to the service account
+ annotations: {}
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name: ""
+
+# This is for setting Kubernetes Annotations to a Pod.
+# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
+podAnnotations: {}
+# This is for setting Kubernetes Labels to a Pod.
+# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
+podLabels: {}
+
+podSecurityContext: {}
+ # fsGroup: 2000
+
+securityContext: {}
+ # capabilities:
+ # drop:
+ # - ALL
+ # readOnlyRootFilesystem: true
+ # runAsNonRoot: true
+ # runAsUser: 1000
+
+# This is for setting up a service more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/
+service:
+ # This sets the service type more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
+ type: ClusterIP
+ # This sets the ports more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports
+ port: 80
+
+# This block is for setting up the ingress for more information can be found here: https://kubernetes.io/docs/concepts/services-networking/ingress/
+ingress:
+ enabled: false
+ className: ""
+ annotations: {}
+ # kubernetes.io/ingress.class: nginx
+ # kubernetes.io/tls-acme: "true"
+ hosts:
+ - host: chart-example.local
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ tls: []
+ # - secretName: chart-example-tls
+ # hosts:
+ # - chart-example.local
+
+# -- Expose the service via gateway-api HTTPRoute
+# Requires Gateway API resources and suitable controller installed within the cluster
+# (see: https://gateway-api.sigs.k8s.io/guides/)
+httpRoute:
+ # HTTPRoute enabled.
+ enabled: false
+ # HTTPRoute annotations.
+ annotations: {}
+ # Which Gateways this Route is attached to.
+ parentRefs:
+ - name: gateway
+ sectionName: http
+ # namespace: default
+ # Hostnames matching HTTP header.
+ hostnames:
+ - chart-example.local
+ # List of rules and filters applied.
+ rules:
+ - matches:
+ - path:
+ type: PathPrefix
+ value: /headers
+ # filters:
+ # - type: RequestHeaderModifier
+ # requestHeaderModifier:
+ # set:
+ # - name: My-Overwrite-Header
+ # value: this-is-the-only-value
+ # remove:
+ # - User-Agent
+ # - matches:
+ # - path:
+ # type: PathPrefix
+ # value: /echo
+ # headers:
+ # - name: version
+ # value: v2
+
+resources: {}
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+# This is to setup the liveness and readiness probes more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
+livenessProbe:
+ httpGet:
+ path: /
+ port: http
+readinessProbe:
+ httpGet:
+ path: /
+ port: http
+
+# This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/
+autoscaling:
+ enabled: false
+ minReplicas: 1
+ maxReplicas: 100
+ targetCPUUtilizationPercentage: 80
+ # targetMemoryUtilizationPercentage: 80
+
+# Additional volumes on the output Deployment definition.
+volumes: []
+# - name: foo
+# secret:
+# secretName: mysecret
+# optional: false
+
+# Additional volumeMounts on the output Deployment definition.
+volumeMounts: []
+# - name: foo
+# mountPath: "/etc/foo"
+# readOnly: true
+
+nodeSelector: {}
+
+tolerations: []
+
+affinity: {}
+`
+
+const defaultIgnore = `# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
+`
+
+const defaultIngress = `{{- if .Values.ingress.enabled -}}
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: {{ include ".fullname" . }}
+ labels:
+ {{- include ".labels" . | nindent 4 }}
+ {{- with .Values.ingress.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ {{- with .Values.ingress.className }}
+ ingressClassName: {{ . }}
+ {{- end }}
+ {{- if .Values.ingress.tls }}
+ tls:
+ {{- range .Values.ingress.tls }}
+ - hosts:
+ {{- range .hosts }}
+ - {{ . | quote }}
+ {{- end }}
+ secretName: {{ .secretName }}
+ {{- end }}
+ {{- end }}
+ rules:
+ {{- range .Values.ingress.hosts }}
+ - host: {{ .host | quote }}
+ http:
+ paths:
+ {{- range .paths }}
+ - path: {{ .path }}
+ {{- with .pathType }}
+ pathType: {{ . }}
+ {{- end }}
+ backend:
+ service:
+ name: {{ include ".fullname" $ }}
+ port:
+ number: {{ $.Values.service.port }}
+ {{- end }}
+ {{- end }}
+{{- end }}
+`
+
+const defaultHTTPRoute = `{{- if .Values.httpRoute.enabled -}}
+{{- $fullName := include ".fullname" . -}}
+{{- $svcPort := .Values.service.port -}}
+apiVersion: gateway.networking.k8s.io/v1
+kind: HTTPRoute
+metadata:
+ name: {{ $fullName }}
+ labels:
+ {{- include ".labels" . | nindent 4 }}
+ {{- with .Values.httpRoute.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ parentRefs:
+ {{- with .Values.httpRoute.parentRefs }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ {{- with .Values.httpRoute.hostnames }}
+ hostnames:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ rules:
+ {{- range .Values.httpRoute.rules }}
+ {{- with .matches }}
+ - matches:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .filters }}
+ filters:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ backendRefs:
+ - name: {{ $fullName }}
+ port: {{ $svcPort }}
+ weight: 1
+ {{- end }}
+{{- end }}
+`
+
+const defaultDeployment = `apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include ".fullname" . }}
+ labels:
+ {{- include ".labels" . | nindent 4 }}
+spec:
+ {{- if not .Values.autoscaling.enabled }}
+ replicas: {{ .Values.replicaCount }}
+ {{- end }}
+ selector:
+ matchLabels:
+ {{- include ".selectorLabels" . | nindent 6 }}
+ template:
+ metadata:
+ {{- with .Values.podAnnotations }}
+ annotations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ labels:
+ {{- include ".labels" . | nindent 8 }}
+ {{- with .Values.podLabels }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ serviceAccountName: {{ include ".serviceAccountName" . }}
+ {{- with .Values.podSecurityContext }}
+ securityContext:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ containers:
+ - name: {{ .Chart.Name }}
+ {{- with .Values.securityContext }}
+ securityContext:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ ports:
+ - name: http
+ containerPort: {{ .Values.service.port }}
+ protocol: TCP
+ {{- with .Values.livenessProbe }}
+ livenessProbe:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ {{- with .Values.readinessProbe }}
+ readinessProbe:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ {{- with .Values.resources }}
+ resources:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ {{- with .Values.volumeMounts }}
+ volumeMounts:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ {{- with .Values.volumes }}
+ volumes:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+`
+
+const defaultService = `apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include ".fullname" . }}
+ labels:
+ {{- include ".labels" . | nindent 4 }}
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.port }}
+ targetPort: http
+ protocol: TCP
+ name: http
+ selector:
+ {{- include ".selectorLabels" . | nindent 4 }}
+`
+
+const defaultServiceAccount = `{{- if .Values.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include ".serviceAccountName" . }}
+ labels:
+ {{- include ".labels" . | nindent 4 }}
+ {{- with .Values.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+automountServiceAccountToken: {{ .Values.serviceAccount.automount }}
+{{- end }}
+`
+
+const defaultHorizontalPodAutoscaler = `{{- if .Values.autoscaling.enabled }}
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+ name: {{ include ".fullname" . }}
+ labels:
+ {{- include ".labels" . | nindent 4 }}
+spec:
+ scaleTargetRef:
+ apiVersion: apps/v1
+ kind: Deployment
+ name: {{ include ".fullname" . }}
+ minReplicas: {{ .Values.autoscaling.minReplicas }}
+ maxReplicas: {{ .Values.autoscaling.maxReplicas }}
+ metrics:
+ {{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
+ - type: Resource
+ resource:
+ name: cpu
+ target:
+ type: Utilization
+ averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
+ {{- end }}
+ {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
+ - type: Resource
+ resource:
+ name: memory
+ target:
+ type: Utilization
+ averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
+ {{- end }}
+{{- end }}
+`
+
+const defaultNotes = `1. Get the application URL by running these commands:
+{{- if .Values.httpRoute.enabled }}
+{{- if .Values.httpRoute.hostnames }}
+ export APP_HOSTNAME={{ .Values.httpRoute.hostnames | first }}
+{{- else }}
+ export APP_HOSTNAME=$(kubectl get --namespace {{(first .Values.httpRoute.parentRefs).namespace | default .Release.Namespace }} gateway/{{ (first .Values.httpRoute.parentRefs).name }} -o jsonpath="{.spec.listeners[0].hostname}")
+ {{- end }}
+{{- if and .Values.httpRoute.rules (first .Values.httpRoute.rules).matches (first (first .Values.httpRoute.rules).matches).path.value }}
+ echo "Visit http://$APP_HOSTNAME{{ (first (first .Values.httpRoute.rules).matches).path.value }} to use your application"
+
+ NOTE: Your HTTPRoute depends on the listener configuration of your gateway and your HTTPRoute rules.
+ The rules can be set for path, method, header and query parameters.
+ You can check the gateway configuration with 'kubectl get --namespace {{(first .Values.httpRoute.parentRefs).namespace | default .Release.Namespace }} gateway/{{ (first .Values.httpRoute.parentRefs).name }} -o yaml'
+{{- end }}
+{{- else if .Values.ingress.enabled }}
+{{- range $host := .Values.ingress.hosts }}
+ {{- range .paths }}
+ http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
+ {{- end }}
+{{- end }}
+{{- else if contains "NodePort" .Values.service.type }}
+ export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include ".fullname" . }})
+ export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+ echo http://$NODE_IP:$NODE_PORT
+{{- else if contains "LoadBalancer" .Values.service.type }}
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ You can watch its status by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include ".fullname" . }}'
+ export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include ".fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
+ echo http://$SERVICE_IP:{{ .Values.service.port }}
+{{- else if contains "ClusterIP" .Values.service.type }}
+ export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include ".name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+ export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
+ echo "Visit http://127.0.0.1:8080 to use your application"
+ kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
+{{- end }}
+`
+
+const defaultHelpers = `{{/*
+Expand the name of the chart.
+*/}}
+{{- define ".name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define ".fullname" -}}
+{{- if .Values.fullnameOverride }}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- $name := default .Chart.Name .Values.nameOverride }}
+{{- if contains $name .Release.Name }}
+{{- .Release.Name | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
+{{- end }}
+{{- end }}
+{{- end }}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define ".chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Common labels
+*/}}
+{{- define ".labels" -}}
+helm.sh/chart: {{ include ".chart" . }}
+{{ include ".selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end }}
+
+{{/*
+Selector labels
+*/}}
+{{- define ".selectorLabels" -}}
+app.kubernetes.io/name: {{ include ".name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end }}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define ".serviceAccountName" -}}
+{{- if .Values.serviceAccount.create }}
+{{- default (include ".fullname" .) .Values.serviceAccount.name }}
+{{- else }}
+{{- default "default" .Values.serviceAccount.name }}
+{{- end }}
+{{- end }}
+`
+
+const defaultTestConnection = `apiVersion: v1
+kind: Pod
+metadata:
+ name: "{{ include ".fullname" . }}-test-connection"
+ labels:
+ {{- include ".labels" . | nindent 4 }}
+ annotations:
+ "helm.sh/hook": test
+spec:
+ containers:
+ - name: wget
+ image: busybox
+ command: ['wget']
+ args: ['{{ include ".fullname" . }}:{{ .Values.service.port }}']
+ restartPolicy: Never
+`
+
+// Stderr is an io.Writer to which error messages can be written
+//
+// In Helm 4, this will be replaced. It is needed in Helm 3 to preserve API backward
+// compatibility.
+var Stderr io.Writer = os.Stderr
+
+// CreateFrom creates a new chart, but scaffolds it from the src chart.
+func CreateFrom(chartfile *chart.Metadata, dest, src string) error {
+ schart, err := loader.Load(src)
+ if err != nil {
+ return fmt.Errorf("could not load %s: %w", src, err)
+ }
+
+ schart.Metadata = chartfile
+
+ var updatedTemplates []*common.File
+
+ for _, template := range schart.Templates {
+ newData := transform(string(template.Data), schart.Name())
+ updatedTemplates = append(updatedTemplates, &common.File{Name: template.Name, Data: newData})
+ }
+
+ schart.Templates = updatedTemplates
+ b, err := yaml.Marshal(schart.Values)
+ if err != nil {
+ return fmt.Errorf("reading values file: %w", err)
+ }
+
+ var m map[string]interface{}
+ if err := yaml.Unmarshal(transform(string(b), schart.Name()), &m); err != nil {
+ return fmt.Errorf("transforming values file: %w", err)
+ }
+ schart.Values = m
+
+ // SaveDir looks for the file values.yaml when saving rather than the values
+ // key in order to preserve the comments in the YAML. The name placeholder
+ // needs to be replaced on that file.
+ for _, f := range schart.Raw {
+ if f.Name == ValuesfileName {
+ f.Data = transform(string(f.Data), schart.Name())
+ }
+ }
+
+ return SaveDir(schart, dest)
+}
+
+// Create creates a new chart in a directory.
+//
+// Inside of dir, this will create a directory based on the name of
+// chartfile.Name. It will then write the Chart.yaml into this directory and
+// create the (empty) appropriate directories.
+//
+// The returned string will point to the newly created directory. It will be
+// an absolute path, even if the provided base directory was relative.
+//
+// If dir does not exist, this will return an error.
+// If Chart.yaml or any directories cannot be created, this will return an
+// error. In such a case, this will attempt to clean up by removing the
+// new chart directory.
+func Create(name, dir string) (string, error) {
+
+ // Sanity-check the name of a chart so user doesn't create one that causes problems.
+ if err := validateChartName(name); err != nil {
+ return "", err
+ }
+
+ path, err := filepath.Abs(dir)
+ if err != nil {
+ return path, err
+ }
+
+ if fi, err := os.Stat(path); err != nil {
+ return path, err
+ } else if !fi.IsDir() {
+ return path, fmt.Errorf("no such directory %s", path)
+ }
+
+ cdir := filepath.Join(path, name)
+ if fi, err := os.Stat(cdir); err == nil && !fi.IsDir() {
+ return cdir, fmt.Errorf("file %s already exists and is not a directory", cdir)
+ }
+
+ // Note: If adding a new template below (i.e., to `helm create`) which is disabled by default (similar to hpa and
+ // ingress below); or making an existing template disabled by default, add the enabling condition in
+ // `TestHelmCreateChart_CheckDeprecatedWarnings` in `pkg/lint/lint_test.go` to make it run through deprecation checks
+ // with latest Kubernetes version.
+ files := []struct {
+ path string
+ content []byte
+ }{
+ {
+ // Chart.yaml
+ path: filepath.Join(cdir, ChartfileName),
+ content: fmt.Appendf(nil, defaultChartfile, name),
+ },
+ {
+ // values.yaml
+ path: filepath.Join(cdir, ValuesfileName),
+ content: fmt.Appendf(nil, defaultValues, name),
+ },
+ {
+ // .helmignore
+ path: filepath.Join(cdir, IgnorefileName),
+ content: []byte(defaultIgnore),
+ },
+ {
+ // ingress.yaml
+ path: filepath.Join(cdir, IngressFileName),
+ content: transform(defaultIngress, name),
+ },
+ {
+ // httproute.yaml
+ path: filepath.Join(cdir, HTTPRouteFileName),
+ content: transform(defaultHTTPRoute, name),
+ },
+ {
+ // deployment.yaml
+ path: filepath.Join(cdir, DeploymentName),
+ content: transform(defaultDeployment, name),
+ },
+ {
+ // service.yaml
+ path: filepath.Join(cdir, ServiceName),
+ content: transform(defaultService, name),
+ },
+ {
+ // serviceaccount.yaml
+ path: filepath.Join(cdir, ServiceAccountName),
+ content: transform(defaultServiceAccount, name),
+ },
+ {
+ // hpa.yaml
+ path: filepath.Join(cdir, HorizontalPodAutoscalerName),
+ content: transform(defaultHorizontalPodAutoscaler, name),
+ },
+ {
+ // NOTES.txt
+ path: filepath.Join(cdir, NotesName),
+ content: transform(defaultNotes, name),
+ },
+ {
+ // _helpers.tpl
+ path: filepath.Join(cdir, HelpersName),
+ content: transform(defaultHelpers, name),
+ },
+ {
+ // test-connection.yaml
+ path: filepath.Join(cdir, TestConnectionName),
+ content: transform(defaultTestConnection, name),
+ },
+ }
+
+ for _, file := range files {
+ if _, err := os.Stat(file.path); err == nil {
+ // There is no handle to a preferred output stream here.
+ fmt.Fprintf(Stderr, "WARNING: File %q already exists. Overwriting.\n", file.path)
+ }
+ if err := writeFile(file.path, file.content); err != nil {
+ return cdir, err
+ }
+ }
+ // Need to add the ChartsDir explicitly as it does not contain any file OOTB
+ if err := os.MkdirAll(filepath.Join(cdir, ChartsDir), 0755); err != nil {
+ return cdir, err
+ }
+ return cdir, nil
+}
+
+// transform performs a string replacement of the specified source for
+// a given key with the replacement string
+func transform(src, replacement string) []byte {
+ return []byte(strings.ReplaceAll(src, "", replacement))
+}
+
+func writeFile(name string, content []byte) error {
+ if err := os.MkdirAll(filepath.Dir(name), 0755); err != nil {
+ return err
+ }
+ return os.WriteFile(name, content, 0644)
+}
+
+func validateChartName(name string) error {
+ if name == "" || len(name) > maxChartNameLength {
+ return fmt.Errorf("chart name must be between 1 and %d characters", maxChartNameLength)
+ }
+ if !chartName.MatchString(name) {
+ return fmt.Errorf("chart name must match the regular expression %q", chartName.String())
+ }
+ return nil
+}
diff --git a/internal/chart/v3/util/create_test.go b/internal/chart/v3/util/create_test.go
new file mode 100644
index 000000000..b3b58cc5a
--- /dev/null
+++ b/internal/chart/v3/util/create_test.go
@@ -0,0 +1,172 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "bytes"
+ "os"
+ "path/filepath"
+ "testing"
+
+ chart "helm.sh/helm/v4/internal/chart/v3"
+ "helm.sh/helm/v4/internal/chart/v3/loader"
+)
+
+func TestCreate(t *testing.T) {
+ tdir := t.TempDir()
+
+ c, err := Create("foo", tdir)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ dir := filepath.Join(tdir, "foo")
+
+ mychart, err := loader.LoadDir(c)
+ if err != nil {
+ t.Fatalf("Failed to load newly created chart %q: %s", c, err)
+ }
+
+ if mychart.Name() != "foo" {
+ t.Errorf("Expected name to be 'foo', got %q", mychart.Name())
+ }
+
+ for _, f := range []string{
+ ChartfileName,
+ DeploymentName,
+ HelpersName,
+ IgnorefileName,
+ NotesName,
+ ServiceAccountName,
+ ServiceName,
+ TemplatesDir,
+ TemplatesTestsDir,
+ TestConnectionName,
+ ValuesfileName,
+ } {
+ if _, err := os.Stat(filepath.Join(dir, f)); err != nil {
+ t.Errorf("Expected %s file: %s", f, err)
+ }
+ }
+}
+
+func TestCreateFrom(t *testing.T) {
+ tdir := t.TempDir()
+
+ cf := &chart.Metadata{
+ APIVersion: chart.APIVersionV3,
+ Name: "foo",
+ Version: "0.1.0",
+ }
+ srcdir := "./testdata/frobnitz/charts/mariner"
+
+ if err := CreateFrom(cf, tdir, srcdir); err != nil {
+ t.Fatal(err)
+ }
+
+ dir := filepath.Join(tdir, "foo")
+ c := filepath.Join(tdir, cf.Name)
+ mychart, err := loader.LoadDir(c)
+ if err != nil {
+ t.Fatalf("Failed to load newly created chart %q: %s", c, err)
+ }
+
+ if mychart.Name() != "foo" {
+ t.Errorf("Expected name to be 'foo', got %q", mychart.Name())
+ }
+
+ for _, f := range []string{
+ ChartfileName,
+ ValuesfileName,
+ filepath.Join(TemplatesDir, "placeholder.tpl"),
+ } {
+ if _, err := os.Stat(filepath.Join(dir, f)); err != nil {
+ t.Errorf("Expected %s file: %s", f, err)
+ }
+
+ // Check each file to make sure has been replaced
+ b, err := os.ReadFile(filepath.Join(dir, f))
+ if err != nil {
+ t.Errorf("Unable to read file %s: %s", f, err)
+ }
+ if bytes.Contains(b, []byte("")) {
+ t.Errorf("File %s contains ", f)
+ }
+ }
+}
+
+// TestCreate_Overwrite is a regression test for making sure that files are overwritten.
+func TestCreate_Overwrite(t *testing.T) {
+ tdir := t.TempDir()
+
+ var errlog bytes.Buffer
+
+ if _, err := Create("foo", tdir); err != nil {
+ t.Fatal(err)
+ }
+
+ dir := filepath.Join(tdir, "foo")
+
+ tplname := filepath.Join(dir, "templates/hpa.yaml")
+ writeFile(tplname, []byte("FOO"))
+
+ // Now re-run the create
+ Stderr = &errlog
+ if _, err := Create("foo", tdir); err != nil {
+ t.Fatal(err)
+ }
+
+ data, err := os.ReadFile(tplname)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if string(data) == "FOO" {
+ t.Fatal("File that should have been modified was not.")
+ }
+
+ if errlog.Len() == 0 {
+ t.Errorf("Expected warnings about overwriting files.")
+ }
+}
+
+func TestValidateChartName(t *testing.T) {
+ for name, shouldPass := range map[string]bool{
+ "": false,
+ "abcdefghijklmnopqrstuvwxyz-_.": true,
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ-_.": true,
+ "$hello": false,
+ "Hellô": false,
+ "he%%o": false,
+ "he\nllo": false,
+
+ "abcdefghijklmnopqrstuvwxyz-_." +
+ "abcdefghijklmnopqrstuvwxyz-_." +
+ "abcdefghijklmnopqrstuvwxyz-_." +
+ "abcdefghijklmnopqrstuvwxyz-_." +
+ "abcdefghijklmnopqrstuvwxyz-_." +
+ "abcdefghijklmnopqrstuvwxyz-_." +
+ "abcdefghijklmnopqrstuvwxyz-_." +
+ "abcdefghijklmnopqrstuvwxyz-_." +
+ "abcdefghijklmnopqrstuvwxyz-_." +
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ-_.": false,
+ } {
+ if err := validateChartName(name); (err != nil) == shouldPass {
+ t.Errorf("test for %q failed", name)
+ }
+ }
+}
diff --git a/internal/chart/v3/util/dependencies.go b/internal/chart/v3/util/dependencies.go
new file mode 100644
index 000000000..489772115
--- /dev/null
+++ b/internal/chart/v3/util/dependencies.go
@@ -0,0 +1,377 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "fmt"
+ "log/slog"
+ "strings"
+
+ "github.com/mitchellh/copystructure"
+
+ chart "helm.sh/helm/v4/internal/chart/v3"
+ "helm.sh/helm/v4/pkg/chart/common"
+ "helm.sh/helm/v4/pkg/chart/common/util"
+)
+
+// ProcessDependencies checks through this chart's dependencies, processing accordingly.
+func ProcessDependencies(c *chart.Chart, v common.Values) error {
+ if err := processDependencyEnabled(c, v, ""); err != nil {
+ return err
+ }
+ return processDependencyImportValues(c, true)
+}
+
+// processDependencyConditions disables charts based on condition path value in values
+func processDependencyConditions(reqs []*chart.Dependency, cvals common.Values, cpath string) {
+ if reqs == nil {
+ return
+ }
+ for _, r := range reqs {
+ for c := range strings.SplitSeq(strings.TrimSpace(r.Condition), ",") {
+ if len(c) > 0 {
+ // retrieve value
+ vv, err := cvals.PathValue(cpath + c)
+ if err == nil {
+ // if not bool, warn
+ if bv, ok := vv.(bool); ok {
+ r.Enabled = bv
+ break
+ }
+ slog.Warn("returned non-bool value", "path", c, "chart", r.Name)
+ } else if _, ok := err.(common.ErrNoValue); !ok {
+ // this is a real error
+ slog.Warn("the method PathValue returned error", slog.Any("error", err))
+ }
+ }
+ }
+ }
+}
+
+// processDependencyTags disables charts based on tags in values
+func processDependencyTags(reqs []*chart.Dependency, cvals common.Values) {
+ if reqs == nil {
+ return
+ }
+ vt, err := cvals.Table("tags")
+ if err != nil {
+ return
+ }
+ for _, r := range reqs {
+ var hasTrue, hasFalse bool
+ for _, k := range r.Tags {
+ if b, ok := vt[k]; ok {
+ // if not bool, warn
+ if bv, ok := b.(bool); ok {
+ if bv {
+ hasTrue = true
+ } else {
+ hasFalse = true
+ }
+ } else {
+ slog.Warn("returned non-bool value", "tag", k, "chart", r.Name)
+ }
+ }
+ }
+ if !hasTrue && hasFalse {
+ r.Enabled = false
+ } else if hasTrue || !hasTrue && !hasFalse {
+ r.Enabled = true
+ }
+ }
+}
+
+// getAliasDependency finds the chart for an alias dependency and copies parts that will be modified
+func getAliasDependency(charts []*chart.Chart, dep *chart.Dependency) *chart.Chart {
+ for _, c := range charts {
+ if c == nil {
+ continue
+ }
+ if c.Name() != dep.Name {
+ continue
+ }
+ if !IsCompatibleRange(dep.Version, c.Metadata.Version) {
+ continue
+ }
+
+ out := *c
+ out.Metadata = copyMetadata(c.Metadata)
+
+ // empty dependencies and shallow copy all dependencies, otherwise parent info may be corrupted if
+ // there is more than one dependency aliasing this chart
+ out.SetDependencies()
+ for _, dependency := range c.Dependencies() {
+ cpy := *dependency
+ out.AddDependency(&cpy)
+ }
+
+ if dep.Alias != "" {
+ out.Metadata.Name = dep.Alias
+ }
+ return &out
+ }
+ return nil
+}
+
+func copyMetadata(metadata *chart.Metadata) *chart.Metadata {
+ md := *metadata
+
+ if md.Dependencies != nil {
+ dependencies := make([]*chart.Dependency, len(md.Dependencies))
+ for i := range md.Dependencies {
+ dependency := *md.Dependencies[i]
+ dependencies[i] = &dependency
+ }
+ md.Dependencies = dependencies
+ }
+ return &md
+}
+
+// processDependencyEnabled removes disabled charts from dependencies
+func processDependencyEnabled(c *chart.Chart, v map[string]interface{}, path string) error {
+ if c.Metadata.Dependencies == nil {
+ return nil
+ }
+
+ var chartDependencies []*chart.Chart
+ // If any dependency is not a part of Chart.yaml
+ // then this should be added to chartDependencies.
+ // However, if the dependency is already specified in Chart.yaml
+ // we should not add it, as it would be processed from Chart.yaml anyway.
+
+Loop:
+ for _, existing := range c.Dependencies() {
+ for _, req := range c.Metadata.Dependencies {
+ if existing.Name() == req.Name && IsCompatibleRange(req.Version, existing.Metadata.Version) {
+ continue Loop
+ }
+ }
+ chartDependencies = append(chartDependencies, existing)
+ }
+
+ for _, req := range c.Metadata.Dependencies {
+ if req == nil {
+ continue
+ }
+ if chartDependency := getAliasDependency(c.Dependencies(), req); chartDependency != nil {
+ chartDependencies = append(chartDependencies, chartDependency)
+ }
+ if req.Alias != "" {
+ req.Name = req.Alias
+ }
+ }
+ c.SetDependencies(chartDependencies...)
+
+ // set all to true
+ for _, lr := range c.Metadata.Dependencies {
+ lr.Enabled = true
+ }
+ cvals, err := util.CoalesceValues(c, v)
+ if err != nil {
+ return err
+ }
+ // flag dependencies as enabled/disabled
+ processDependencyTags(c.Metadata.Dependencies, cvals)
+ processDependencyConditions(c.Metadata.Dependencies, cvals, path)
+ // make a map of charts to remove
+ rm := map[string]struct{}{}
+ for _, r := range c.Metadata.Dependencies {
+ if !r.Enabled {
+ // remove disabled chart
+ rm[r.Name] = struct{}{}
+ }
+ }
+ // don't keep disabled charts in new slice
+ cd := []*chart.Chart{}
+ copy(cd, c.Dependencies()[:0])
+ for _, n := range c.Dependencies() {
+ if _, ok := rm[n.Metadata.Name]; !ok {
+ cd = append(cd, n)
+ }
+ }
+ // don't keep disabled charts in metadata
+ cdMetadata := []*chart.Dependency{}
+ copy(cdMetadata, c.Metadata.Dependencies[:0])
+ for _, n := range c.Metadata.Dependencies {
+ if _, ok := rm[n.Name]; !ok {
+ cdMetadata = append(cdMetadata, n)
+ }
+ }
+
+ // recursively call self to process sub dependencies
+ for _, t := range cd {
+ subpath := path + t.Metadata.Name + "."
+ if err := processDependencyEnabled(t, cvals, subpath); err != nil {
+ return err
+ }
+ }
+ // set the correct dependencies in metadata
+ c.Metadata.Dependencies = nil
+ c.Metadata.Dependencies = append(c.Metadata.Dependencies, cdMetadata...)
+ c.SetDependencies(cd...)
+
+ return nil
+}
+
+// pathToMap creates a nested map given a YAML path in dot notation.
+func pathToMap(path string, data map[string]interface{}) map[string]interface{} {
+ if path == "." {
+ return data
+ }
+ return set(parsePath(path), data)
+}
+
+func parsePath(key string) []string { return strings.Split(key, ".") }
+
+func set(path []string, data map[string]interface{}) map[string]interface{} {
+ if len(path) == 0 {
+ return nil
+ }
+ cur := data
+ for i := len(path) - 1; i >= 0; i-- {
+ cur = map[string]interface{}{path[i]: cur}
+ }
+ return cur
+}
+
+// processImportValues merges values from child to parent based on the chart's dependencies' ImportValues field.
+func processImportValues(c *chart.Chart, merge bool) error {
+ if c.Metadata.Dependencies == nil {
+ return nil
+ }
+ // combine chart values and empty config to get Values
+ var cvals common.Values
+ var err error
+ if merge {
+ cvals, err = util.MergeValues(c, nil)
+ } else {
+ cvals, err = util.CoalesceValues(c, nil)
+ }
+ if err != nil {
+ return err
+ }
+ b := make(map[string]interface{})
+ // import values from each dependency if specified in import-values
+ for _, r := range c.Metadata.Dependencies {
+ var outiv []interface{}
+ for _, riv := range r.ImportValues {
+ switch iv := riv.(type) {
+ case map[string]interface{}:
+ child := fmt.Sprintf("%v", iv["child"])
+ parent := fmt.Sprintf("%v", iv["parent"])
+
+ outiv = append(outiv, map[string]string{
+ "child": child,
+ "parent": parent,
+ })
+
+ // get child table
+ vv, err := cvals.Table(r.Name + "." + child)
+ if err != nil {
+ slog.Warn("ImportValues missing table from chart", "chart", r.Name, slog.Any("error", err))
+ continue
+ }
+ // create value map from child to be merged into parent
+ if merge {
+ b = util.MergeTables(b, pathToMap(parent, vv.AsMap()))
+ } else {
+ b = util.CoalesceTables(b, pathToMap(parent, vv.AsMap()))
+ }
+ case string:
+ child := "exports." + iv
+ outiv = append(outiv, map[string]string{
+ "child": child,
+ "parent": ".",
+ })
+ vm, err := cvals.Table(r.Name + "." + child)
+ if err != nil {
+ slog.Warn("ImportValues missing table", slog.Any("error", err))
+ continue
+ }
+ if merge {
+ b = util.MergeTables(b, vm.AsMap())
+ } else {
+ b = util.CoalesceTables(b, vm.AsMap())
+ }
+ }
+ }
+ r.ImportValues = outiv
+ }
+
+ // Imported values from a child to a parent chart have a lower priority than
+ // the parents values. This enables parent charts to import a large section
+ // from a child and then override select parts. This is why b is merged into
+ // cvals in the code below and not the other way around.
+ if merge {
+ // deep copying the cvals as there are cases where pointers can end
+ // up in the cvals when they are copied onto b in ways that break things.
+ cvals = deepCopyMap(cvals)
+ c.Values = util.MergeTables(cvals, b)
+ } else {
+ // Trimming the nil values from cvals is needed for backwards compatibility.
+ // Previously, the b value had been populated with cvals along with some
+ // overrides. This caused the coalescing functionality to remove the
+ // nil/null values. This trimming is for backwards compat.
+ cvals = trimNilValues(cvals)
+ c.Values = util.CoalesceTables(cvals, b)
+ }
+
+ return nil
+}
+
+func deepCopyMap(vals map[string]interface{}) map[string]interface{} {
+ valsCopy, err := copystructure.Copy(vals)
+ if err != nil {
+ return vals
+ }
+ return valsCopy.(map[string]interface{})
+}
+
+func trimNilValues(vals map[string]interface{}) map[string]interface{} {
+ valsCopy, err := copystructure.Copy(vals)
+ if err != nil {
+ return vals
+ }
+ valsCopyMap := valsCopy.(map[string]interface{})
+ for key, val := range valsCopyMap {
+ if val == nil {
+ // Iterate over the values and remove nil keys
+ delete(valsCopyMap, key)
+ } else if istable(val) {
+ // Recursively call into ourselves to remove keys from inner tables
+ valsCopyMap[key] = trimNilValues(val.(map[string]interface{}))
+ }
+ }
+
+ return valsCopyMap
+}
+
+// istable is a special-purpose function to see if the present thing matches the definition of a YAML table.
+func istable(v interface{}) bool {
+ _, ok := v.(map[string]interface{})
+ return ok
+}
+
+// processDependencyImportValues imports specified chart values from child to parent.
+func processDependencyImportValues(c *chart.Chart, merge bool) error {
+ for _, d := range c.Dependencies() {
+ // recurse
+ if err := processDependencyImportValues(d, merge); err != nil {
+ return err
+ }
+ }
+ return processImportValues(c, merge)
+}
diff --git a/internal/chart/v3/util/dependencies_test.go b/internal/chart/v3/util/dependencies_test.go
new file mode 100644
index 000000000..3c5bb96f7
--- /dev/null
+++ b/internal/chart/v3/util/dependencies_test.go
@@ -0,0 +1,570 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package util
+
+import (
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "testing"
+
+ chart "helm.sh/helm/v4/internal/chart/v3"
+ "helm.sh/helm/v4/internal/chart/v3/loader"
+ "helm.sh/helm/v4/pkg/chart/common"
+)
+
+func loadChart(t *testing.T, path string) *chart.Chart {
+ t.Helper()
+ c, err := loader.Load(path)
+ if err != nil {
+ t.Fatalf("failed to load testdata: %s", err)
+ }
+ return c
+}
+
+func TestLoadDependency(t *testing.T) {
+ tests := []*chart.Dependency{
+ {Name: "alpine", Version: "0.1.0", Repository: "https://example.com/charts"},
+ {Name: "mariner", Version: "4.3.2", Repository: "https://example.com/charts"},
+ }
+
+ check := func(deps []*chart.Dependency) {
+ if len(deps) != 2 {
+ t.Errorf("expected 2 dependencies, got %d", len(deps))
+ }
+ for i, tt := range tests {
+ if deps[i].Name != tt.Name {
+ t.Errorf("expected dependency named %q, got %q", tt.Name, deps[i].Name)
+ }
+ if deps[i].Version != tt.Version {
+ t.Errorf("expected dependency named %q to have version %q, got %q", tt.Name, tt.Version, deps[i].Version)
+ }
+ if deps[i].Repository != tt.Repository {
+ t.Errorf("expected dependency named %q to have repository %q, got %q", tt.Name, tt.Repository, deps[i].Repository)
+ }
+ }
+ }
+ c := loadChart(t, "testdata/frobnitz")
+ check(c.Metadata.Dependencies)
+ check(c.Lock.Dependencies)
+}
+
+func TestDependencyEnabled(t *testing.T) {
+ type M = map[string]interface{}
+ tests := []struct {
+ name string
+ v M
+ e []string // expected charts including duplicates in alphanumeric order
+ }{{
+ "tags with no effect",
+ M{"tags": M{"nothinguseful": false}},
+ []string{"parentchart", "parentchart.subchart1", "parentchart.subchart1.subcharta", "parentchart.subchart1.subchartb"},
+ }, {
+ "tags disabling a group",
+ M{"tags": M{"front-end": false}},
+ []string{"parentchart"},
+ }, {
+ "tags disabling a group and enabling a different group",
+ M{"tags": M{"front-end": false, "back-end": true}},
+ []string{"parentchart", "parentchart.subchart2", "parentchart.subchart2.subchartb", "parentchart.subchart2.subchartc"},
+ }, {
+ "tags disabling only children, children still enabled since tag front-end=true in values.yaml",
+ M{"tags": M{"subcharta": false, "subchartb": false}},
+ []string{"parentchart", "parentchart.subchart1", "parentchart.subchart1.subcharta", "parentchart.subchart1.subchartb"},
+ }, {
+ "tags disabling all parents/children with additional tag re-enabling a parent",
+ M{"tags": M{"front-end": false, "subchart1": true, "back-end": false}},
+ []string{"parentchart", "parentchart.subchart1"},
+ }, {
+ "conditions enabling the parent charts, but back-end (b, c) is still disabled via values.yaml",
+ M{"subchart1": M{"enabled": true}, "subchart2": M{"enabled": true}},
+ []string{"parentchart", "parentchart.subchart1", "parentchart.subchart1.subcharta", "parentchart.subchart1.subchartb", "parentchart.subchart2"},
+ }, {
+ "conditions disabling the parent charts, effectively disabling children",
+ M{"subchart1": M{"enabled": false}, "subchart2": M{"enabled": false}},
+ []string{"parentchart"},
+ }, {
+ "conditions a child using the second condition path of child's condition",
+ M{"subchart1": M{"subcharta": M{"enabled": false}}},
+ []string{"parentchart", "parentchart.subchart1", "parentchart.subchart1.subchartb"},
+ }, {
+ "tags enabling a parent/child group with condition disabling one child",
+ M{"subchart2": M{"subchartc": M{"enabled": false}}, "tags": M{"back-end": true}},
+ []string{"parentchart", "parentchart.subchart1", "parentchart.subchart1.subcharta", "parentchart.subchart1.subchartb", "parentchart.subchart2", "parentchart.subchart2.subchartb"},
+ }, {
+ "tags will not enable a child if parent is explicitly disabled with condition",
+ M{"subchart1": M{"enabled": false}, "tags": M{"front-end": true}},
+ []string{"parentchart"},
+ }, {
+ "subcharts with alias also respect conditions",
+ M{"subchart1": M{"enabled": false}, "subchart2alias": M{"enabled": true, "subchartb": M{"enabled": true}}},
+ []string{"parentchart", "parentchart.subchart2alias", "parentchart.subchart2alias.subchartb"},
+ }}
+
+ for _, tc := range tests {
+ c := loadChart(t, "testdata/subpop")
+ t.Run(tc.name, func(t *testing.T) {
+ if err := processDependencyEnabled(c, tc.v, ""); err != nil {
+ t.Fatalf("error processing enabled dependencies %v", err)
+ }
+
+ names := extractChartNames(c)
+ if len(names) != len(tc.e) {
+ t.Fatalf("slice lengths do not match got %v, expected %v", len(names), len(tc.e))
+ }
+ for i := range names {
+ if names[i] != tc.e[i] {
+ t.Fatalf("slice values do not match got %v, expected %v", names, tc.e)
+ }
+ }
+ })
+ }
+}
+
+// extractChartNames recursively searches chart dependencies returning all charts found
+func extractChartNames(c *chart.Chart) []string {
+ var out []string
+ var fn func(c *chart.Chart)
+ fn = func(c *chart.Chart) {
+ out = append(out, c.ChartPath())
+ for _, d := range c.Dependencies() {
+ fn(d)
+ }
+ }
+ fn(c)
+ sort.Strings(out)
+ return out
+}
+
+func TestProcessDependencyImportValues(t *testing.T) {
+ c := loadChart(t, "testdata/subpop")
+
+ e := make(map[string]string)
+
+ e["imported-chart1.SC1bool"] = "true"
+ e["imported-chart1.SC1float"] = "3.14"
+ e["imported-chart1.SC1int"] = "100"
+ e["imported-chart1.SC1string"] = "dollywood"
+ e["imported-chart1.SC1extra1"] = "11"
+ e["imported-chart1.SPextra1"] = "helm rocks"
+ e["imported-chart1.SC1extra1"] = "11"
+
+ e["imported-chartA.SCAbool"] = "false"
+ e["imported-chartA.SCAfloat"] = "3.1"
+ e["imported-chartA.SCAint"] = "55"
+ e["imported-chartA.SCAstring"] = "jabba"
+ e["imported-chartA.SPextra3"] = "1.337"
+ e["imported-chartA.SC1extra2"] = "1.337"
+ e["imported-chartA.SCAnested1.SCAnested2"] = "true"
+
+ e["imported-chartA-B.SCAbool"] = "false"
+ e["imported-chartA-B.SCAfloat"] = "3.1"
+ e["imported-chartA-B.SCAint"] = "55"
+ e["imported-chartA-B.SCAstring"] = "jabba"
+
+ e["imported-chartA-B.SCBbool"] = "true"
+ e["imported-chartA-B.SCBfloat"] = "7.77"
+ e["imported-chartA-B.SCBint"] = "33"
+ e["imported-chartA-B.SCBstring"] = "boba"
+ e["imported-chartA-B.SPextra5"] = "k8s"
+ e["imported-chartA-B.SC1extra5"] = "tiller"
+
+ // These values are imported from the child chart to the parent. Parent
+ // values take precedence over imported values. This enables importing a
+ // large section from a child chart and overriding a selection from it.
+ e["overridden-chart1.SC1bool"] = "false"
+ e["overridden-chart1.SC1float"] = "3.141592"
+ e["overridden-chart1.SC1int"] = "99"
+ e["overridden-chart1.SC1string"] = "pollywog"
+ e["overridden-chart1.SPextra2"] = "42"
+
+ e["overridden-chartA.SCAbool"] = "true"
+ e["overridden-chartA.SCAfloat"] = "41.3"
+ e["overridden-chartA.SCAint"] = "808"
+ e["overridden-chartA.SCAstring"] = "jabberwocky"
+ e["overridden-chartA.SPextra4"] = "true"
+
+ // These values are imported from the child chart to the parent. Parent
+ // values take precedence over imported values. This enables importing a
+ // large section from a child chart and overriding a selection from it.
+ e["overridden-chartA-B.SCAbool"] = "true"
+ e["overridden-chartA-B.SCAfloat"] = "41.3"
+ e["overridden-chartA-B.SCAint"] = "808"
+ e["overridden-chartA-B.SCAstring"] = "jabberwocky"
+ e["overridden-chartA-B.SCBbool"] = "false"
+ e["overridden-chartA-B.SCBfloat"] = "1.99"
+ e["overridden-chartA-B.SCBint"] = "77"
+ e["overridden-chartA-B.SCBstring"] = "jango"
+ e["overridden-chartA-B.SPextra6"] = "111"
+ e["overridden-chartA-B.SCAextra1"] = "23"
+ e["overridden-chartA-B.SCBextra1"] = "13"
+ e["overridden-chartA-B.SC1extra6"] = "77"
+
+ // `exports` style
+ e["SCBexported1B"] = "1965"
+ e["SC1extra7"] = "true"
+ e["SCBexported2A"] = "blaster"
+ e["global.SC1exported2.all.SC1exported3"] = "SC1expstr"
+
+ if err := processDependencyImportValues(c, false); err != nil {
+ t.Fatalf("processing import values dependencies %v", err)
+ }
+ cc := common.Values(c.Values)
+ for kk, vv := range e {
+ pv, err := cc.PathValue(kk)
+ if err != nil {
+ t.Fatalf("retrieving import values table %v %v", kk, err)
+ }
+
+ switch pv := pv.(type) {
+ case float64:
+ if s := strconv.FormatFloat(pv, 'f', -1, 64); s != vv {
+ t.Errorf("failed to match imported float value %v with expected %v for key %q", s, vv, kk)
+ }
+ case bool:
+ if b := strconv.FormatBool(pv); b != vv {
+ t.Errorf("failed to match imported bool value %v with expected %v for key %q", b, vv, kk)
+ }
+ default:
+ if pv != vv {
+ t.Errorf("failed to match imported string value %q with expected %q for key %q", pv, vv, kk)
+ }
+ }
+ }
+
+ // Since this was processed with coalescing there should be no null values.
+ // Here we verify that.
+ _, err := cc.PathValue("ensurenull")
+ if err == nil {
+ t.Error("expect nil value not found but found it")
+ }
+ switch xerr := err.(type) {
+ case common.ErrNoValue:
+ // We found what we expected
+ default:
+ t.Errorf("expected an ErrNoValue but got %q instead", xerr)
+ }
+
+ c = loadChart(t, "testdata/subpop")
+ if err := processDependencyImportValues(c, true); err != nil {
+ t.Fatalf("processing import values dependencies %v", err)
+ }
+ cc = common.Values(c.Values)
+ val, err := cc.PathValue("ensurenull")
+ if err != nil {
+ t.Error("expect value but ensurenull was not found")
+ }
+ if val != nil {
+ t.Errorf("expect nil value but got %q instead", val)
+ }
+}
+
+func TestProcessDependencyImportValuesFromSharedDependencyToAliases(t *testing.T) {
+ c := loadChart(t, "testdata/chart-with-import-from-aliased-dependencies")
+
+ if err := processDependencyEnabled(c, c.Values, ""); err != nil {
+ t.Fatalf("expected no errors but got %q", err)
+ }
+ if err := processDependencyImportValues(c, true); err != nil {
+ t.Fatalf("processing import values dependencies %v", err)
+ }
+ e := make(map[string]string)
+
+ e["foo-defaults.defaultValue"] = "42"
+ e["bar-defaults.defaultValue"] = "42"
+
+ e["foo.defaults.defaultValue"] = "42"
+ e["bar.defaults.defaultValue"] = "42"
+
+ e["foo.grandchild.defaults.defaultValue"] = "42"
+ e["bar.grandchild.defaults.defaultValue"] = "42"
+
+ cValues := common.Values(c.Values)
+ for kk, vv := range e {
+ pv, err := cValues.PathValue(kk)
+ if err != nil {
+ t.Fatalf("retrieving import values table %v %v", kk, err)
+ }
+ if pv != vv {
+ t.Errorf("failed to match imported value %v with expected %v", pv, vv)
+ }
+ }
+}
+
+func TestProcessDependencyImportValuesMultiLevelPrecedence(t *testing.T) {
+ c := loadChart(t, "testdata/three-level-dependent-chart/umbrella")
+
+ e := make(map[string]string)
+
+ // The order of precedence should be:
+ // 1. User specified values (e.g CLI)
+ // 2. Parent chart values
+ // 3. Imported values
+ // 4. Sub-chart values
+ // The 4 app charts here deal with things differently:
+ // - app1 has a port value set in the umbrella chart. It does not import any
+ // values so the value from the umbrella chart should be used.
+ // - app2 has a value in the app chart and imports from the library. The
+ // app chart value should take precedence.
+ // - app3 has no value in the app chart and imports the value from the library
+ // chart. The library chart value should be used.
+ // - app4 has a value in the app chart and does not import the value from the
+ // library chart. The app charts value should be used.
+ e["app1.service.port"] = "3456"
+ e["app2.service.port"] = "8080"
+ e["app3.service.port"] = "9090"
+ e["app4.service.port"] = "1234"
+ if err := processDependencyImportValues(c, true); err != nil {
+ t.Fatalf("processing import values dependencies %v", err)
+ }
+ cc := common.Values(c.Values)
+ for kk, vv := range e {
+ pv, err := cc.PathValue(kk)
+ if err != nil {
+ t.Fatalf("retrieving import values table %v %v", kk, err)
+ }
+
+ switch pv := pv.(type) {
+ case float64:
+ if s := strconv.FormatFloat(pv, 'f', -1, 64); s != vv {
+ t.Errorf("failed to match imported float value %v with expected %v", s, vv)
+ }
+ default:
+ if pv != vv {
+ t.Errorf("failed to match imported string value %q with expected %q", pv, vv)
+ }
+ }
+ }
+}
+
+func TestProcessDependencyImportValuesForEnabledCharts(t *testing.T) {
+ c := loadChart(t, "testdata/import-values-from-enabled-subchart/parent-chart")
+ nameOverride := "parent-chart-prod"
+
+ if err := processDependencyImportValues(c, true); err != nil {
+ t.Fatalf("processing import values dependencies %v", err)
+ }
+
+ if len(c.Dependencies()) != 2 {
+ t.Fatalf("expected 2 dependencies for this chart, but got %d", len(c.Dependencies()))
+ }
+
+ if err := processDependencyEnabled(c, c.Values, ""); err != nil {
+ t.Fatalf("expected no errors but got %q", err)
+ }
+
+ if len(c.Dependencies()) != 1 {
+ t.Fatal("expected no changes in dependencies")
+ }
+
+ if len(c.Metadata.Dependencies) != 1 {
+ t.Fatalf("expected 1 dependency specified in Chart.yaml, got %d", len(c.Metadata.Dependencies))
+ }
+
+ prodDependencyValues := c.Dependencies()[0].Values
+ if prodDependencyValues["nameOverride"] != nameOverride {
+ t.Fatalf("dependency chart name should be %s but got %s", nameOverride, prodDependencyValues["nameOverride"])
+ }
+}
+
+func TestGetAliasDependency(t *testing.T) {
+ c := loadChart(t, "testdata/frobnitz")
+ req := c.Metadata.Dependencies
+
+ if len(req) == 0 {
+ t.Fatalf("there are no dependencies to test")
+ }
+
+ // Success case
+ aliasChart := getAliasDependency(c.Dependencies(), req[0])
+ if aliasChart == nil {
+ t.Fatalf("failed to get dependency chart for alias %s", req[0].Name)
+ }
+ if req[0].Alias != "" {
+ if aliasChart.Name() != req[0].Alias {
+ t.Fatalf("dependency chart name should be %s but got %s", req[0].Alias, aliasChart.Name())
+ }
+ } else if aliasChart.Name() != req[0].Name {
+ t.Fatalf("dependency chart name should be %s but got %s", req[0].Name, aliasChart.Name())
+ }
+
+ if req[0].Version != "" {
+ if !IsCompatibleRange(req[0].Version, aliasChart.Metadata.Version) {
+ t.Fatalf("dependency chart version is not in the compatible range")
+ }
+ }
+
+ // Failure case
+ req[0].Name = "something-else"
+ if aliasChart := getAliasDependency(c.Dependencies(), req[0]); aliasChart != nil {
+ t.Fatalf("expected no chart but got %s", aliasChart.Name())
+ }
+
+ req[0].Version = "something else which is not in the compatible range"
+ if IsCompatibleRange(req[0].Version, aliasChart.Metadata.Version) {
+ t.Fatalf("dependency chart version which is not in the compatible range should cause a failure other than a success ")
+ }
+}
+
+func TestDependentChartAliases(t *testing.T) {
+ c := loadChart(t, "testdata/dependent-chart-alias")
+ req := c.Metadata.Dependencies
+
+ if len(c.Dependencies()) != 2 {
+ t.Fatalf("expected 2 dependencies for this chart, but got %d", len(c.Dependencies()))
+ }
+
+ if err := processDependencyEnabled(c, c.Values, ""); err != nil {
+ t.Fatalf("expected no errors but got %q", err)
+ }
+
+ if len(c.Dependencies()) != 3 {
+ t.Fatal("expected alias dependencies to be added")
+ }
+
+ if len(c.Dependencies()) != len(c.Metadata.Dependencies) {
+ t.Fatalf("expected number of chart dependencies %d, but got %d", len(c.Metadata.Dependencies), len(c.Dependencies()))
+ }
+
+ aliasChart := getAliasDependency(c.Dependencies(), req[2])
+
+ if aliasChart == nil {
+ t.Fatalf("failed to get dependency chart for alias %s", req[2].Name)
+ }
+ if aliasChart.Parent() != c {
+ t.Fatalf("dependency chart has wrong parent, expected %s but got %s", c.Name(), aliasChart.Parent().Name())
+ }
+ if req[2].Alias != "" {
+ if aliasChart.Name() != req[2].Alias {
+ t.Fatalf("dependency chart name should be %s but got %s", req[2].Alias, aliasChart.Name())
+ }
+ } else if aliasChart.Name() != req[2].Name {
+ t.Fatalf("dependency chart name should be %s but got %s", req[2].Name, aliasChart.Name())
+ }
+
+ req[2].Name = "dummy-name"
+ if aliasChart := getAliasDependency(c.Dependencies(), req[2]); aliasChart != nil {
+ t.Fatalf("expected no chart but got %s", aliasChart.Name())
+ }
+
+}
+
+func TestDependentChartWithSubChartsAbsentInDependency(t *testing.T) {
+ c := loadChart(t, "testdata/dependent-chart-no-requirements-yaml")
+
+ if len(c.Dependencies()) != 2 {
+ t.Fatalf("expected 2 dependencies for this chart, but got %d", len(c.Dependencies()))
+ }
+
+ if err := processDependencyEnabled(c, c.Values, ""); err != nil {
+ t.Fatalf("expected no errors but got %q", err)
+ }
+
+ if len(c.Dependencies()) != 2 {
+ t.Fatal("expected no changes in dependencies")
+ }
+}
+
+func TestDependentChartWithSubChartsHelmignore(t *testing.T) {
+ // FIXME what does this test?
+ loadChart(t, "testdata/dependent-chart-helmignore")
+}
+
+func TestDependentChartsWithSubChartsSymlink(t *testing.T) {
+ joonix := filepath.Join("testdata", "joonix")
+ if err := os.Symlink(filepath.Join("..", "..", "frobnitz"), filepath.Join(joonix, "charts", "frobnitz")); err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(filepath.Join(joonix, "charts", "frobnitz"))
+ c := loadChart(t, joonix)
+
+ if c.Name() != "joonix" {
+ t.Fatalf("unexpected chart name: %s", c.Name())
+ }
+ if n := len(c.Dependencies()); n != 1 {
+ t.Fatalf("expected 1 dependency for this chart, but got %d", n)
+ }
+}
+
+func TestDependentChartsWithSubchartsAllSpecifiedInDependency(t *testing.T) {
+ c := loadChart(t, "testdata/dependent-chart-with-all-in-requirements-yaml")
+
+ if len(c.Dependencies()) != 2 {
+ t.Fatalf("expected 2 dependencies for this chart, but got %d", len(c.Dependencies()))
+ }
+
+ if err := processDependencyEnabled(c, c.Values, ""); err != nil {
+ t.Fatalf("expected no errors but got %q", err)
+ }
+
+ if len(c.Dependencies()) != 2 {
+ t.Fatal("expected no changes in dependencies")
+ }
+
+ if len(c.Dependencies()) != len(c.Metadata.Dependencies) {
+ t.Fatalf("expected number of chart dependencies %d, but got %d", len(c.Metadata.Dependencies), len(c.Dependencies()))
+ }
+}
+
+func TestDependentChartsWithSomeSubchartsSpecifiedInDependency(t *testing.T) {
+ c := loadChart(t, "testdata/dependent-chart-with-mixed-requirements-yaml")
+
+ if len(c.Dependencies()) != 2 {
+ t.Fatalf("expected 2 dependencies for this chart, but got %d", len(c.Dependencies()))
+ }
+
+ if err := processDependencyEnabled(c, c.Values, ""); err != nil {
+ t.Fatalf("expected no errors but got %q", err)
+ }
+
+ if len(c.Dependencies()) != 2 {
+ t.Fatal("expected no changes in dependencies")
+ }
+
+ if len(c.Metadata.Dependencies) != 1 {
+ t.Fatalf("expected 1 dependency specified in Chart.yaml, got %d", len(c.Metadata.Dependencies))
+ }
+}
+
+func validateDependencyTree(t *testing.T, c *chart.Chart) {
+ t.Helper()
+ for _, dependency := range c.Dependencies() {
+ if dependency.Parent() != c {
+ if dependency.Parent() != c {
+ t.Fatalf("dependency chart %s has wrong parent, expected %s but got %s", dependency.Name(), c.Name(), dependency.Parent().Name())
+ }
+ }
+ // recurse entire tree
+ validateDependencyTree(t, dependency)
+ }
+}
+
+func TestChartWithDependencyAliasedTwiceAndDoublyReferencedSubDependency(t *testing.T) {
+ c := loadChart(t, "testdata/chart-with-dependency-aliased-twice")
+
+ if len(c.Dependencies()) != 1 {
+ t.Fatalf("expected one dependency for this chart, but got %d", len(c.Dependencies()))
+ }
+
+ if err := processDependencyEnabled(c, c.Values, ""); err != nil {
+ t.Fatalf("expected no errors but got %q", err)
+ }
+
+ if len(c.Dependencies()) != 2 {
+ t.Fatal("expected two dependencies after processing aliases")
+ }
+ validateDependencyTree(t, c)
+}
diff --git a/internal/chart/v3/util/doc.go b/internal/chart/v3/util/doc.go
new file mode 100644
index 000000000..002d5babc
--- /dev/null
+++ b/internal/chart/v3/util/doc.go
@@ -0,0 +1,45 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+package util contains tools for working with charts.
+
+Charts are described in the chart package (pkg/chart).
+This package provides utilities for serializing and deserializing charts.
+
+A chart can be represented on the file system in one of two ways:
+
+ - As a directory that contains a Chart.yaml file and other chart things.
+ - As a tarred gzipped file containing a directory that then contains a
+ Chart.yaml file.
+
+This package provides utilities for working with those file formats.
+
+The preferred way of loading a chart is using 'loader.Load`:
+
+ chart, err := loader.Load(filename)
+
+This will attempt to discover whether the file at 'filename' is a directory or
+a chart archive. It will then load accordingly.
+
+For accepting raw compressed tar file data from an io.Reader, the
+'loader.LoadArchive()' will read in the data, uncompress it, and unpack it
+into a Chart.
+
+When creating charts in memory, use the 'helm.sh/helm/pkg/chart'
+package directly.
+*/
+package util // import chartutil "helm.sh/helm/v4/internal/chart/v3/util"
diff --git a/internal/chart/v3/util/expand.go b/internal/chart/v3/util/expand.go
new file mode 100644
index 000000000..6cbbeabf2
--- /dev/null
+++ b/internal/chart/v3/util/expand.go
@@ -0,0 +1,94 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+
+ securejoin "github.com/cyphar/filepath-securejoin"
+ "sigs.k8s.io/yaml"
+
+ chart "helm.sh/helm/v4/internal/chart/v3"
+ "helm.sh/helm/v4/internal/chart/v3/loader"
+)
+
+// Expand uncompresses and extracts a chart into the specified directory.
+func Expand(dir string, r io.Reader) error {
+ files, err := loader.LoadArchiveFiles(r)
+ if err != nil {
+ return err
+ }
+
+ // Get the name of the chart
+ var chartName string
+ for _, file := range files {
+ if file.Name == "Chart.yaml" {
+ ch := &chart.Metadata{}
+ if err := yaml.Unmarshal(file.Data, ch); err != nil {
+ return fmt.Errorf("cannot load Chart.yaml: %w", err)
+ }
+ chartName = ch.Name
+ }
+ }
+ if chartName == "" {
+ return errors.New("chart name not specified")
+ }
+
+ // Find the base directory
+ // The directory needs to be cleaned prior to passing to SecureJoin or the location may end up
+ // being wrong or returning an error. This was introduced in v0.4.0.
+ dir = filepath.Clean(dir)
+ chartdir, err := securejoin.SecureJoin(dir, chartName)
+ if err != nil {
+ return err
+ }
+
+ // Copy all files verbatim. We don't parse these files because parsing can remove
+ // comments.
+ for _, file := range files {
+ outpath, err := securejoin.SecureJoin(chartdir, file.Name)
+ if err != nil {
+ return err
+ }
+
+ // Make sure the necessary subdirs get created.
+ basedir := filepath.Dir(outpath)
+ if err := os.MkdirAll(basedir, 0755); err != nil {
+ return err
+ }
+
+ if err := os.WriteFile(outpath, file.Data, 0644); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ExpandFile expands the src file into the dest directory.
+func ExpandFile(dest, src string) error {
+ h, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer h.Close()
+ return Expand(dest, h)
+}
diff --git a/internal/chart/v3/util/expand_test.go b/internal/chart/v3/util/expand_test.go
new file mode 100644
index 000000000..280995f7e
--- /dev/null
+++ b/internal/chart/v3/util/expand_test.go
@@ -0,0 +1,124 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+)
+
+func TestExpand(t *testing.T) {
+ dest := t.TempDir()
+
+ reader, err := os.Open("testdata/frobnitz-1.2.3.tgz")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := Expand(dest, reader); err != nil {
+ t.Fatal(err)
+ }
+
+ expectedChartPath := filepath.Join(dest, "frobnitz")
+ fi, err := os.Stat(expectedChartPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !fi.IsDir() {
+ t.Fatalf("expected a chart directory at %s", expectedChartPath)
+ }
+
+ dir, err := os.Open(expectedChartPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ fis, err := dir.Readdir(0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expectLen := 11
+ if len(fis) != expectLen {
+ t.Errorf("Expected %d files, but got %d", expectLen, len(fis))
+ }
+
+ for _, fi := range fis {
+ expect, err := os.Stat(filepath.Join("testdata", "frobnitz", fi.Name()))
+ if err != nil {
+ t.Fatal(err)
+ }
+ // os.Stat can return different values for directories, based on the OS
+ // for Linux, for example, os.Stat always returns the size of the directory
+ // (value-4096) regardless of the size of the contents of the directory
+ mode := expect.Mode()
+ if !mode.IsDir() {
+ if fi.Size() != expect.Size() {
+ t.Errorf("Expected %s to have size %d, got %d", fi.Name(), expect.Size(), fi.Size())
+ }
+ }
+ }
+}
+
+func TestExpandFile(t *testing.T) {
+ dest := t.TempDir()
+
+ if err := ExpandFile(dest, "testdata/frobnitz-1.2.3.tgz"); err != nil {
+ t.Fatal(err)
+ }
+
+ expectedChartPath := filepath.Join(dest, "frobnitz")
+ fi, err := os.Stat(expectedChartPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !fi.IsDir() {
+ t.Fatalf("expected a chart directory at %s", expectedChartPath)
+ }
+
+ dir, err := os.Open(expectedChartPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ fis, err := dir.Readdir(0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expectLen := 11
+ if len(fis) != expectLen {
+ t.Errorf("Expected %d files, but got %d", expectLen, len(fis))
+ }
+
+ for _, fi := range fis {
+ expect, err := os.Stat(filepath.Join("testdata", "frobnitz", fi.Name()))
+ if err != nil {
+ t.Fatal(err)
+ }
+ // os.Stat can return different values for directories, based on the OS
+ // for Linux, for example, os.Stat always returns the size of the directory
+ // (value-4096) regardless of the size of the contents of the directory
+ mode := expect.Mode()
+ if !mode.IsDir() {
+ if fi.Size() != expect.Size() {
+ t.Errorf("Expected %s to have size %d, got %d", fi.Name(), expect.Size(), fi.Size())
+ }
+ }
+ }
+}
diff --git a/internal/chart/v3/util/save.go b/internal/chart/v3/util/save.go
new file mode 100644
index 000000000..49d93bf40
--- /dev/null
+++ b/internal/chart/v3/util/save.go
@@ -0,0 +1,254 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "archive/tar"
+ "compress/gzip"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "time"
+
+ "sigs.k8s.io/yaml"
+
+ chart "helm.sh/helm/v4/internal/chart/v3"
+ "helm.sh/helm/v4/pkg/chart/common"
+)
+
+var headerBytes = []byte("+aHR0cHM6Ly95b3V0dS5iZS96OVV6MWljandyTQo=")
+
+// SaveDir saves a chart as files in a directory.
+//
+// This takes the chart name, and creates a new subdirectory inside of the given dest
+// directory, writing the chart's contents to that subdirectory.
+func SaveDir(c *chart.Chart, dest string) error {
+ // Create the chart directory
+ err := validateName(c.Name())
+ if err != nil {
+ return err
+ }
+ outdir := filepath.Join(dest, c.Name())
+ if fi, err := os.Stat(outdir); err == nil && !fi.IsDir() {
+ return fmt.Errorf("file %s already exists and is not a directory", outdir)
+ }
+ if err := os.MkdirAll(outdir, 0755); err != nil {
+ return err
+ }
+
+ // Save the chart file.
+ if err := SaveChartfile(filepath.Join(outdir, ChartfileName), c.Metadata); err != nil {
+ return err
+ }
+
+ // Save values.yaml
+ for _, f := range c.Raw {
+ if f.Name == ValuesfileName {
+ vf := filepath.Join(outdir, ValuesfileName)
+ if err := writeFile(vf, f.Data); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Save values.schema.json if it exists
+ if c.Schema != nil {
+ filename := filepath.Join(outdir, SchemafileName)
+ if err := writeFile(filename, c.Schema); err != nil {
+ return err
+ }
+ }
+
+ // Save templates and files
+ for _, o := range [][]*common.File{c.Templates, c.Files} {
+ for _, f := range o {
+ n := filepath.Join(outdir, f.Name)
+ if err := writeFile(n, f.Data); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Save dependencies
+ base := filepath.Join(outdir, ChartsDir)
+ for _, dep := range c.Dependencies() {
+ // Here, we write each dependency as a tar file.
+ if _, err := Save(dep, base); err != nil {
+ return fmt.Errorf("saving %s: %w", dep.ChartFullPath(), err)
+ }
+ }
+ return nil
+}
+
+// Save creates an archived chart to the given directory.
+//
+// This takes an existing chart and a destination directory.
+//
+// If the directory is /foo, and the chart is named bar, with version 1.0.0, this
+// will generate /foo/bar-1.0.0.tgz.
+//
+// This returns the absolute path to the chart archive file.
+func Save(c *chart.Chart, outDir string) (string, error) {
+ if err := c.Validate(); err != nil {
+ return "", fmt.Errorf("chart validation: %w", err)
+ }
+
+ filename := fmt.Sprintf("%s-%s.tgz", c.Name(), c.Metadata.Version)
+ filename = filepath.Join(outDir, filename)
+ dir := filepath.Dir(filename)
+ if stat, err := os.Stat(dir); err != nil {
+ if errors.Is(err, fs.ErrNotExist) {
+ if err2 := os.MkdirAll(dir, 0755); err2 != nil {
+ return "", err2
+ }
+ } else {
+ return "", fmt.Errorf("stat %s: %w", dir, err)
+ }
+ } else if !stat.IsDir() {
+ return "", fmt.Errorf("is not a directory: %s", dir)
+ }
+
+ f, err := os.Create(filename)
+ if err != nil {
+ return "", err
+ }
+
+ // Wrap in gzip writer
+ zipper := gzip.NewWriter(f)
+ zipper.Extra = headerBytes
+ zipper.Comment = "Helm"
+
+ // Wrap in tar writer
+ twriter := tar.NewWriter(zipper)
+ rollback := false
+ defer func() {
+ twriter.Close()
+ zipper.Close()
+ f.Close()
+ if rollback {
+ os.Remove(filename)
+ }
+ }()
+
+ if err := writeTarContents(twriter, c, ""); err != nil {
+ rollback = true
+ return filename, err
+ }
+ return filename, nil
+}
+
+func writeTarContents(out *tar.Writer, c *chart.Chart, prefix string) error {
+ err := validateName(c.Name())
+ if err != nil {
+ return err
+ }
+ base := filepath.Join(prefix, c.Name())
+
+ // Save Chart.yaml
+ cdata, err := yaml.Marshal(c.Metadata)
+ if err != nil {
+ return err
+ }
+ if err := writeToTar(out, filepath.Join(base, ChartfileName), cdata); err != nil {
+ return err
+ }
+
+ // Save Chart.lock
+ if c.Lock != nil {
+ ldata, err := yaml.Marshal(c.Lock)
+ if err != nil {
+ return err
+ }
+ if err := writeToTar(out, filepath.Join(base, "Chart.lock"), ldata); err != nil {
+ return err
+ }
+ }
+
+ // Save values.yaml
+ for _, f := range c.Raw {
+ if f.Name == ValuesfileName {
+ if err := writeToTar(out, filepath.Join(base, ValuesfileName), f.Data); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Save values.schema.json if it exists
+ if c.Schema != nil {
+ if !json.Valid(c.Schema) {
+ return errors.New("invalid JSON in " + SchemafileName)
+ }
+ if err := writeToTar(out, filepath.Join(base, SchemafileName), c.Schema); err != nil {
+ return err
+ }
+ }
+
+ // Save templates
+ for _, f := range c.Templates {
+ n := filepath.Join(base, f.Name)
+ if err := writeToTar(out, n, f.Data); err != nil {
+ return err
+ }
+ }
+
+ // Save files
+ for _, f := range c.Files {
+ n := filepath.Join(base, f.Name)
+ if err := writeToTar(out, n, f.Data); err != nil {
+ return err
+ }
+ }
+
+ // Save dependencies
+ for _, dep := range c.Dependencies() {
+ if err := writeTarContents(out, dep, filepath.Join(base, ChartsDir)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// writeToTar writes a single file to a tar archive.
+func writeToTar(out *tar.Writer, name string, body []byte) error {
+ // TODO: Do we need to create dummy parent directory names if none exist?
+ h := &tar.Header{
+ Name: filepath.ToSlash(name),
+ Mode: 0644,
+ Size: int64(len(body)),
+ ModTime: time.Now(),
+ }
+ if err := out.WriteHeader(h); err != nil {
+ return err
+ }
+ _, err := out.Write(body)
+ return err
+}
+
+// If the name has directory name has characters which would change the location
+// they need to be removed.
+func validateName(name string) error {
+ nname := filepath.Base(name)
+
+ if nname != name {
+ return common.ErrInvalidChartName{Name: name}
+ }
+
+ return nil
+}
diff --git a/internal/chart/v3/util/save_test.go b/internal/chart/v3/util/save_test.go
new file mode 100644
index 000000000..9b1b14a4c
--- /dev/null
+++ b/internal/chart/v3/util/save_test.go
@@ -0,0 +1,262 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "io"
+ "os"
+ "path"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "testing"
+ "time"
+
+ chart "helm.sh/helm/v4/internal/chart/v3"
+ "helm.sh/helm/v4/internal/chart/v3/loader"
+ "helm.sh/helm/v4/pkg/chart/common"
+)
+
+func TestSave(t *testing.T) {
+ tmp := t.TempDir()
+
+ for _, dest := range []string{tmp, filepath.Join(tmp, "newdir")} {
+ t.Run("outDir="+dest, func(t *testing.T) {
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{
+ APIVersion: chart.APIVersionV3,
+ Name: "ahab",
+ Version: "1.2.3",
+ },
+ Lock: &chart.Lock{
+ Digest: "testdigest",
+ },
+ Files: []*common.File{
+ {Name: "scheherazade/shahryar.txt", Data: []byte("1,001 Nights")},
+ },
+ Schema: []byte("{\n \"title\": \"Values\"\n}"),
+ }
+ chartWithInvalidJSON := withSchema(*c, []byte("{"))
+
+ where, err := Save(c, dest)
+ if err != nil {
+ t.Fatalf("Failed to save: %s", err)
+ }
+ if !strings.HasPrefix(where, dest) {
+ t.Fatalf("Expected %q to start with %q", where, dest)
+ }
+ if !strings.HasSuffix(where, ".tgz") {
+ t.Fatalf("Expected %q to end with .tgz", where)
+ }
+
+ c2, err := loader.LoadFile(where)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if c2.Name() != c.Name() {
+ t.Fatalf("Expected chart archive to have %q, got %q", c.Name(), c2.Name())
+ }
+ if len(c2.Files) != 1 || c2.Files[0].Name != "scheherazade/shahryar.txt" {
+ t.Fatal("Files data did not match")
+ }
+
+ if !bytes.Equal(c.Schema, c2.Schema) {
+ indentation := 4
+ formattedExpected := Indent(indentation, string(c.Schema))
+ formattedActual := Indent(indentation, string(c2.Schema))
+ t.Fatalf("Schema data did not match.\nExpected:\n%s\nActual:\n%s", formattedExpected, formattedActual)
+ }
+ if _, err := Save(&chartWithInvalidJSON, dest); err == nil {
+ t.Fatalf("Invalid JSON was not caught while saving chart")
+ }
+
+ c.Metadata.APIVersion = chart.APIVersionV3
+ where, err = Save(c, dest)
+ if err != nil {
+ t.Fatalf("Failed to save: %s", err)
+ }
+ c2, err = loader.LoadFile(where)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if c2.Lock == nil {
+ t.Fatal("Expected v3 chart archive to contain a Chart.lock file")
+ }
+ if c2.Lock.Digest != c.Lock.Digest {
+ t.Fatal("Chart.lock data did not match")
+ }
+ })
+ }
+
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{
+ APIVersion: chart.APIVersionV3,
+ Name: "../ahab",
+ Version: "1.2.3",
+ },
+ Lock: &chart.Lock{
+ Digest: "testdigest",
+ },
+ Files: []*common.File{
+ {Name: "scheherazade/shahryar.txt", Data: []byte("1,001 Nights")},
+ },
+ }
+ _, err := Save(c, tmp)
+ if err == nil {
+ t.Fatal("Expected error saving chart with invalid name")
+ }
+}
+
+// Creates a copy with a different schema; does not modify anything.
+func withSchema(chart chart.Chart, schema []byte) chart.Chart {
+ chart.Schema = schema
+ return chart
+}
+
+func Indent(n int, text string) string {
+ startOfLine := regexp.MustCompile(`(?m)^`)
+ indentation := strings.Repeat(" ", n)
+ return startOfLine.ReplaceAllLiteralString(text, indentation)
+}
+
+func TestSavePreservesTimestamps(t *testing.T) {
+ // Test executes so quickly that if we don't subtract a second, the
+ // check will fail because `initialCreateTime` will be identical to the
+ // written timestamp for the files.
+ initialCreateTime := time.Now().Add(-1 * time.Second)
+
+ tmp := t.TempDir()
+
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{
+ APIVersion: chart.APIVersionV3,
+ Name: "ahab",
+ Version: "1.2.3",
+ },
+ Values: map[string]interface{}{
+ "imageName": "testimage",
+ "imageId": 42,
+ },
+ Files: []*common.File{
+ {Name: "scheherazade/shahryar.txt", Data: []byte("1,001 Nights")},
+ },
+ Schema: []byte("{\n \"title\": \"Values\"\n}"),
+ }
+
+ where, err := Save(c, tmp)
+ if err != nil {
+ t.Fatalf("Failed to save: %s", err)
+ }
+
+ allHeaders, err := retrieveAllHeadersFromTar(where)
+ if err != nil {
+ t.Fatalf("Failed to parse tar: %v", err)
+ }
+
+ for _, header := range allHeaders {
+ if header.ModTime.Before(initialCreateTime) {
+ t.Fatalf("File timestamp not preserved: %v", header.ModTime)
+ }
+ }
+}
+
+// We could refactor `load.go` to use this `retrieveAllHeadersFromTar` function
+// as well, so we are not duplicating components of the code which iterate
+// through the tar.
+func retrieveAllHeadersFromTar(path string) ([]*tar.Header, error) {
+ raw, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer raw.Close()
+
+ unzipped, err := gzip.NewReader(raw)
+ if err != nil {
+ return nil, err
+ }
+ defer unzipped.Close()
+
+ tr := tar.NewReader(unzipped)
+ headers := []*tar.Header{}
+ for {
+ hd, err := tr.Next()
+ if err == io.EOF {
+ break
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ headers = append(headers, hd)
+ }
+
+ return headers, nil
+}
+
+func TestSaveDir(t *testing.T) {
+ tmp := t.TempDir()
+
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{
+ APIVersion: chart.APIVersionV3,
+ Name: "ahab",
+ Version: "1.2.3",
+ },
+ Files: []*common.File{
+ {Name: "scheherazade/shahryar.txt", Data: []byte("1,001 Nights")},
+ },
+ Templates: []*common.File{
+ {Name: path.Join(TemplatesDir, "nested", "dir", "thing.yaml"), Data: []byte("abc: {{ .Values.abc }}")},
+ },
+ }
+
+ if err := SaveDir(c, tmp); err != nil {
+ t.Fatalf("Failed to save: %s", err)
+ }
+
+ c2, err := loader.LoadDir(tmp + "/ahab")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if c2.Name() != c.Name() {
+ t.Fatalf("Expected chart archive to have %q, got %q", c.Name(), c2.Name())
+ }
+
+ if len(c2.Templates) != 1 || c2.Templates[0].Name != c.Templates[0].Name {
+ t.Fatal("Templates data did not match")
+ }
+
+ if len(c2.Files) != 1 || c2.Files[0].Name != c.Files[0].Name {
+ t.Fatal("Files data did not match")
+ }
+
+ tmp2 := t.TempDir()
+ c.Metadata.Name = "../ahab"
+ pth := filepath.Join(tmp2, "tmpcharts")
+ if err := os.MkdirAll(filepath.Join(pth), 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := SaveDir(c, pth); err.Error() != "\"../ahab\" is not a valid chart name" {
+ t.Fatalf("Did not get expected error for chart named %q", c.Name())
+ }
+}
diff --git a/internal/chart/v3/util/testdata/chart-with-dependency-aliased-twice/Chart.yaml b/internal/chart/v3/util/testdata/chart-with-dependency-aliased-twice/Chart.yaml
new file mode 100644
index 000000000..4a4da7996
--- /dev/null
+++ b/internal/chart/v3/util/testdata/chart-with-dependency-aliased-twice/Chart.yaml
@@ -0,0 +1,14 @@
+apiVersion: v3
+appVersion: 1.0.0
+name: chart-with-dependency-aliased-twice
+type: application
+version: 1.0.0
+
+dependencies:
+ - name: child
+ alias: foo
+ version: 1.0.0
+ - name: child
+ alias: bar
+ version: 1.0.0
+
diff --git a/internal/chart/v3/util/testdata/chart-with-dependency-aliased-twice/charts/child/Chart.yaml b/internal/chart/v3/util/testdata/chart-with-dependency-aliased-twice/charts/child/Chart.yaml
new file mode 100644
index 000000000..0f3afd8c6
--- /dev/null
+++ b/internal/chart/v3/util/testdata/chart-with-dependency-aliased-twice/charts/child/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v3
+appVersion: 1.0.0
+name: child
+type: application
+version: 1.0.0
+
diff --git a/internal/chart/v3/util/testdata/chart-with-dependency-aliased-twice/charts/child/charts/grandchild/Chart.yaml b/internal/chart/v3/util/testdata/chart-with-dependency-aliased-twice/charts/child/charts/grandchild/Chart.yaml
new file mode 100644
index 000000000..3e0bf725b
--- /dev/null
+++ b/internal/chart/v3/util/testdata/chart-with-dependency-aliased-twice/charts/child/charts/grandchild/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v3
+appVersion: 1.0.0
+name: grandchild
+type: application
+version: 1.0.0
+
diff --git a/internal/chart/v3/util/testdata/chart-with-dependency-aliased-twice/charts/child/charts/grandchild/templates/dummy.yaml b/internal/chart/v3/util/testdata/chart-with-dependency-aliased-twice/charts/child/charts/grandchild/templates/dummy.yaml
new file mode 100644
index 000000000..1830492ef
--- /dev/null
+++ b/internal/chart/v3/util/testdata/chart-with-dependency-aliased-twice/charts/child/charts/grandchild/templates/dummy.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Chart.Name }}-{{ .Values.from }}
+data:
+ {{- toYaml .Values | nindent 2 }}
+
diff --git a/internal/chart/v3/util/testdata/chart-with-dependency-aliased-twice/charts/child/templates/dummy.yaml b/internal/chart/v3/util/testdata/chart-with-dependency-aliased-twice/charts/child/templates/dummy.yaml
new file mode 100644
index 000000000..b5d55af7c
--- /dev/null
+++ b/internal/chart/v3/util/testdata/chart-with-dependency-aliased-twice/charts/child/templates/dummy.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Chart.Name }}
+data:
+ {{- toYaml .Values | nindent 2 }}
+
diff --git a/internal/chart/v3/util/testdata/chart-with-dependency-aliased-twice/values.yaml b/internal/chart/v3/util/testdata/chart-with-dependency-aliased-twice/values.yaml
new file mode 100644
index 000000000..695521a4a
--- /dev/null
+++ b/internal/chart/v3/util/testdata/chart-with-dependency-aliased-twice/values.yaml
@@ -0,0 +1,7 @@
+foo:
+ grandchild:
+ from: foo
+bar:
+ grandchild:
+ from: bar
+
diff --git a/internal/chart/v3/util/testdata/chart-with-import-from-aliased-dependencies/Chart.yaml b/internal/chart/v3/util/testdata/chart-with-import-from-aliased-dependencies/Chart.yaml
new file mode 100644
index 000000000..f2f0610b5
--- /dev/null
+++ b/internal/chart/v3/util/testdata/chart-with-import-from-aliased-dependencies/Chart.yaml
@@ -0,0 +1,20 @@
+apiVersion: v3
+appVersion: 1.0.0
+name: chart-with-dependency-aliased-twice
+type: application
+version: 1.0.0
+
+dependencies:
+ - name: child
+ alias: foo
+ version: 1.0.0
+ import-values:
+ - parent: foo-defaults
+ child: defaults
+ - name: child
+ alias: bar
+ version: 1.0.0
+ import-values:
+ - parent: bar-defaults
+ child: defaults
+
diff --git a/internal/chart/v3/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/Chart.yaml b/internal/chart/v3/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/Chart.yaml
new file mode 100644
index 000000000..08ccac9e5
--- /dev/null
+++ b/internal/chart/v3/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/Chart.yaml
@@ -0,0 +1,12 @@
+apiVersion: v3
+appVersion: 1.0.0
+name: child
+type: application
+version: 1.0.0
+
+dependencies:
+ - name: grandchild
+ version: 1.0.0
+ import-values:
+ - parent: defaults
+ child: defaults
diff --git a/internal/chart/v3/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/charts/grandchild/Chart.yaml b/internal/chart/v3/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/charts/grandchild/Chart.yaml
new file mode 100644
index 000000000..3e0bf725b
--- /dev/null
+++ b/internal/chart/v3/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/charts/grandchild/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v3
+appVersion: 1.0.0
+name: grandchild
+type: application
+version: 1.0.0
+
diff --git a/internal/chart/v3/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/charts/grandchild/values.yaml b/internal/chart/v3/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/charts/grandchild/values.yaml
new file mode 100644
index 000000000..f51c594f4
--- /dev/null
+++ b/internal/chart/v3/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/charts/grandchild/values.yaml
@@ -0,0 +1,2 @@
+defaults:
+ defaultValue: "42"
\ No newline at end of file
diff --git a/internal/chart/v3/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/templates/dummy.yaml b/internal/chart/v3/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/templates/dummy.yaml
new file mode 100644
index 000000000..3140f53dd
--- /dev/null
+++ b/internal/chart/v3/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/templates/dummy.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Chart.Name }}
+data:
+ {{ .Values.defaults | toYaml }}
+
diff --git a/internal/chart/v3/util/testdata/chart-with-import-from-aliased-dependencies/templates/dummy.yaml b/internal/chart/v3/util/testdata/chart-with-import-from-aliased-dependencies/templates/dummy.yaml
new file mode 100644
index 000000000..a2b62c95a
--- /dev/null
+++ b/internal/chart/v3/util/testdata/chart-with-import-from-aliased-dependencies/templates/dummy.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Chart.Name }}
+data:
+ {{ toYaml .Values.defaults | indent 2 }}
+
diff --git a/internal/chart/v3/util/testdata/chartfiletest.yaml b/internal/chart/v3/util/testdata/chartfiletest.yaml
new file mode 100644
index 000000000..d222c8f8d
--- /dev/null
+++ b/internal/chart/v3/util/testdata/chartfiletest.yaml
@@ -0,0 +1,20 @@
+apiVersion: v3
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+annotations:
+ extrakey: extravalue
+ anotherkey: anothervalue
diff --git a/pkg/chart/v2/util/testdata/coleridge.yaml b/internal/chart/v3/util/testdata/coleridge.yaml
similarity index 100%
rename from pkg/chart/v2/util/testdata/coleridge.yaml
rename to internal/chart/v3/util/testdata/coleridge.yaml
diff --git a/internal/chart/v3/util/testdata/dependent-chart-alias/.helmignore b/internal/chart/v3/util/testdata/dependent-chart-alias/.helmignore
new file mode 100644
index 000000000..9973a57b8
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-alias/.helmignore
@@ -0,0 +1 @@
+ignore/
diff --git a/internal/chart/v3/util/testdata/dependent-chart-alias/Chart.lock b/internal/chart/v3/util/testdata/dependent-chart-alias/Chart.lock
new file mode 100644
index 000000000..6fcc2ed9f
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-alias/Chart.lock
@@ -0,0 +1,8 @@
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
+digest: invalid
diff --git a/internal/chart/v3/util/testdata/dependent-chart-alias/Chart.yaml b/internal/chart/v3/util/testdata/dependent-chart-alias/Chart.yaml
new file mode 100644
index 000000000..b8773d0d3
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-alias/Chart.yaml
@@ -0,0 +1,29 @@
+apiVersion: v3
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
+ alias: mariners2
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
+ alias: mariners1
diff --git a/internal/chart/v3/util/testdata/dependent-chart-alias/INSTALL.txt b/internal/chart/v3/util/testdata/dependent-chart-alias/INSTALL.txt
new file mode 100644
index 000000000..2010438c2
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-alias/INSTALL.txt
@@ -0,0 +1 @@
+This is an install document. The client may display this.
diff --git a/internal/chart/v3/util/testdata/dependent-chart-alias/LICENSE b/internal/chart/v3/util/testdata/dependent-chart-alias/LICENSE
new file mode 100644
index 000000000..6121943b1
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-alias/LICENSE
@@ -0,0 +1 @@
+LICENSE placeholder.
diff --git a/internal/chart/v3/util/testdata/dependent-chart-alias/README.md b/internal/chart/v3/util/testdata/dependent-chart-alias/README.md
new file mode 100644
index 000000000..8cf4cc3d7
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-alias/README.md
@@ -0,0 +1,11 @@
+# Frobnitz
+
+This is an example chart.
+
+## Usage
+
+This is an example. It has no usage.
+
+## Development
+
+For developer info, see the top-level repository.
diff --git a/internal/chart/v3/util/testdata/dependent-chart-alias/charts/_ignore_me b/internal/chart/v3/util/testdata/dependent-chart-alias/charts/_ignore_me
new file mode 100644
index 000000000..2cecca682
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-alias/charts/_ignore_me
@@ -0,0 +1 @@
+This should be ignored by the loader, but may be included in a chart.
diff --git a/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/Chart.yaml b/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/Chart.yaml
new file mode 100644
index 000000000..2a2c9c883
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: alpine
+description: Deploy a basic Alpine Linux pod
+version: 0.1.0
+home: https://helm.sh/helm
diff --git a/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/README.md b/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/README.md
new file mode 100644
index 000000000..b30b949dd
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.toml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/charts/mast1/Chart.yaml b/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/charts/mast1/Chart.yaml
new file mode 100644
index 000000000..aea109c75
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/charts/mast1/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: mast1
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/charts/mast1/values.yaml b/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/charts/mast1/values.yaml
new file mode 100644
index 000000000..42c39c262
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/charts/mast1/values.yaml
@@ -0,0 +1,4 @@
+# Default values for mast1.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name = "value"
diff --git a/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/charts/mast2-0.1.0.tgz b/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/charts/mast2-0.1.0.tgz
new file mode 100644
index 000000000..61cb62051
Binary files /dev/null and b/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/charts/mast2-0.1.0.tgz differ
diff --git a/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/templates/alpine-pod.yaml b/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/templates/alpine-pod.yaml
new file mode 100644
index 000000000..5bbae10af
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service}}
+ chartName: {{.Chart.Name}}
+ chartVersion: {{.Chart.Version | quote}}
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.3"
+ command: ["/bin/sleep","9000"]
diff --git a/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/values.yaml b/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/values.yaml
new file mode 100644
index 000000000..6c2aab7ba
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: "my-alpine"
diff --git a/internal/chart/v3/util/testdata/dependent-chart-alias/charts/mariner-4.3.2.tgz b/internal/chart/v3/util/testdata/dependent-chart-alias/charts/mariner-4.3.2.tgz
new file mode 100644
index 000000000..3190136b0
Binary files /dev/null and b/internal/chart/v3/util/testdata/dependent-chart-alias/charts/mariner-4.3.2.tgz differ
diff --git a/internal/chart/v3/util/testdata/dependent-chart-alias/docs/README.md b/internal/chart/v3/util/testdata/dependent-chart-alias/docs/README.md
new file mode 100644
index 000000000..d40747caf
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-alias/docs/README.md
@@ -0,0 +1 @@
+This is a placeholder for documentation.
diff --git a/internal/chart/v3/util/testdata/dependent-chart-alias/icon.svg b/internal/chart/v3/util/testdata/dependent-chart-alias/icon.svg
new file mode 100644
index 000000000..892130606
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-alias/icon.svg
@@ -0,0 +1,8 @@
+
+
diff --git a/internal/chart/v3/util/testdata/dependent-chart-alias/ignore/me.txt b/internal/chart/v3/util/testdata/dependent-chart-alias/ignore/me.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/internal/chart/v3/util/testdata/dependent-chart-alias/templates/template.tpl b/internal/chart/v3/util/testdata/dependent-chart-alias/templates/template.tpl
new file mode 100644
index 000000000..c651ee6a0
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-alias/templates/template.tpl
@@ -0,0 +1 @@
+Hello {{.Name | default "world"}}
diff --git a/internal/chart/v3/util/testdata/dependent-chart-alias/values.yaml b/internal/chart/v3/util/testdata/dependent-chart-alias/values.yaml
new file mode 100644
index 000000000..61f501258
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-alias/values.yaml
@@ -0,0 +1,6 @@
+# A values file contains configuration.
+
+name: "Some Name"
+
+section:
+ name: "Name in a section"
diff --git a/internal/chart/v3/util/testdata/dependent-chart-helmignore/.helmignore b/internal/chart/v3/util/testdata/dependent-chart-helmignore/.helmignore
new file mode 100644
index 000000000..8a71bc82e
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-helmignore/.helmignore
@@ -0,0 +1,2 @@
+ignore/
+.*
diff --git a/internal/chart/v3/util/testdata/dependent-chart-helmignore/Chart.yaml b/internal/chart/v3/util/testdata/dependent-chart-helmignore/Chart.yaml
new file mode 100644
index 000000000..8b4ad8cdd
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-helmignore/Chart.yaml
@@ -0,0 +1,17 @@
+apiVersion: v3
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
diff --git a/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/.ignore_me b/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/.ignore_me
new file mode 100644
index 000000000..e69de29bb
diff --git a/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/_ignore_me b/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/_ignore_me
new file mode 100644
index 000000000..2cecca682
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/_ignore_me
@@ -0,0 +1 @@
+This should be ignored by the loader, but may be included in a chart.
diff --git a/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/Chart.yaml b/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/Chart.yaml
new file mode 100644
index 000000000..2a2c9c883
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: alpine
+description: Deploy a basic Alpine Linux pod
+version: 0.1.0
+home: https://helm.sh/helm
diff --git a/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/README.md b/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/README.md
new file mode 100644
index 000000000..b30b949dd
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.toml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/charts/mast1/Chart.yaml b/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/charts/mast1/Chart.yaml
new file mode 100644
index 000000000..aea109c75
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/charts/mast1/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: mast1
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/charts/mast1/values.yaml b/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/charts/mast1/values.yaml
new file mode 100644
index 000000000..42c39c262
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/charts/mast1/values.yaml
@@ -0,0 +1,4 @@
+# Default values for mast1.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name = "value"
diff --git a/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/charts/mast2-0.1.0.tgz b/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/charts/mast2-0.1.0.tgz
new file mode 100644
index 000000000..61cb62051
Binary files /dev/null and b/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/charts/mast2-0.1.0.tgz differ
diff --git a/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/templates/alpine-pod.yaml b/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/templates/alpine-pod.yaml
new file mode 100644
index 000000000..5bbae10af
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service}}
+ chartName: {{.Chart.Name}}
+ chartVersion: {{.Chart.Version | quote}}
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.3"
+ command: ["/bin/sleep","9000"]
diff --git a/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/values.yaml b/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/values.yaml
new file mode 100644
index 000000000..6c2aab7ba
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: "my-alpine"
diff --git a/internal/chart/v3/util/testdata/dependent-chart-helmignore/templates/template.tpl b/internal/chart/v3/util/testdata/dependent-chart-helmignore/templates/template.tpl
new file mode 100644
index 000000000..c651ee6a0
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-helmignore/templates/template.tpl
@@ -0,0 +1 @@
+Hello {{.Name | default "world"}}
diff --git a/internal/chart/v3/util/testdata/dependent-chart-helmignore/values.yaml b/internal/chart/v3/util/testdata/dependent-chart-helmignore/values.yaml
new file mode 100644
index 000000000..61f501258
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-helmignore/values.yaml
@@ -0,0 +1,6 @@
+# A values file contains configuration.
+
+name: "Some Name"
+
+section:
+ name: "Name in a section"
diff --git a/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/.helmignore b/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/.helmignore
new file mode 100644
index 000000000..9973a57b8
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/.helmignore
@@ -0,0 +1 @@
+ignore/
diff --git a/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/Chart.yaml b/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/Chart.yaml
new file mode 100644
index 000000000..8b4ad8cdd
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/Chart.yaml
@@ -0,0 +1,17 @@
+apiVersion: v3
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
diff --git a/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/INSTALL.txt b/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/INSTALL.txt
new file mode 100644
index 000000000..2010438c2
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/INSTALL.txt
@@ -0,0 +1 @@
+This is an install document. The client may display this.
diff --git a/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/LICENSE b/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/LICENSE
new file mode 100644
index 000000000..6121943b1
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/LICENSE
@@ -0,0 +1 @@
+LICENSE placeholder.
diff --git a/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/README.md b/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/README.md
new file mode 100644
index 000000000..8cf4cc3d7
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/README.md
@@ -0,0 +1,11 @@
+# Frobnitz
+
+This is an example chart.
+
+## Usage
+
+This is an example. It has no usage.
+
+## Development
+
+For developer info, see the top-level repository.
diff --git a/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/_ignore_me b/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/_ignore_me
new file mode 100644
index 000000000..2cecca682
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/_ignore_me
@@ -0,0 +1 @@
+This should be ignored by the loader, but may be included in a chart.
diff --git a/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/Chart.yaml b/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/Chart.yaml
new file mode 100644
index 000000000..2a2c9c883
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: alpine
+description: Deploy a basic Alpine Linux pod
+version: 0.1.0
+home: https://helm.sh/helm
diff --git a/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/README.md b/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/README.md
new file mode 100644
index 000000000..b30b949dd
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.toml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/charts/mast1/Chart.yaml b/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/charts/mast1/Chart.yaml
new file mode 100644
index 000000000..aea109c75
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/charts/mast1/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: mast1
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/charts/mast1/values.yaml b/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/charts/mast1/values.yaml
new file mode 100644
index 000000000..42c39c262
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/charts/mast1/values.yaml
@@ -0,0 +1,4 @@
+# Default values for mast1.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name = "value"
diff --git a/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/charts/mast2-0.1.0.tgz b/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/charts/mast2-0.1.0.tgz
new file mode 100644
index 000000000..61cb62051
Binary files /dev/null and b/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/charts/mast2-0.1.0.tgz differ
diff --git a/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/templates/alpine-pod.yaml b/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/templates/alpine-pod.yaml
new file mode 100644
index 000000000..5bbae10af
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service}}
+ chartName: {{.Chart.Name}}
+ chartVersion: {{.Chart.Version | quote}}
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.3"
+ command: ["/bin/sleep","9000"]
diff --git a/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/values.yaml b/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/values.yaml
new file mode 100644
index 000000000..6c2aab7ba
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: "my-alpine"
diff --git a/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/mariner-4.3.2.tgz b/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/mariner-4.3.2.tgz
new file mode 100644
index 000000000..3190136b0
Binary files /dev/null and b/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/mariner-4.3.2.tgz differ
diff --git a/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/docs/README.md b/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/docs/README.md
new file mode 100644
index 000000000..d40747caf
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/docs/README.md
@@ -0,0 +1 @@
+This is a placeholder for documentation.
diff --git a/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/icon.svg b/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/icon.svg
new file mode 100644
index 000000000..892130606
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/icon.svg
@@ -0,0 +1,8 @@
+
+
diff --git a/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/ignore/me.txt b/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/ignore/me.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/templates/template.tpl b/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/templates/template.tpl
new file mode 100644
index 000000000..c651ee6a0
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/templates/template.tpl
@@ -0,0 +1 @@
+Hello {{.Name | default "world"}}
diff --git a/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/values.yaml b/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/values.yaml
new file mode 100644
index 000000000..61f501258
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/values.yaml
@@ -0,0 +1,6 @@
+# A values file contains configuration.
+
+name: "Some Name"
+
+section:
+ name: "Name in a section"
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/.helmignore b/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/.helmignore
new file mode 100644
index 000000000..9973a57b8
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/.helmignore
@@ -0,0 +1 @@
+ignore/
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/Chart.yaml b/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/Chart.yaml
new file mode 100644
index 000000000..06283093e
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/Chart.yaml
@@ -0,0 +1,24 @@
+apiVersion: v3
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/INSTALL.txt b/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/INSTALL.txt
new file mode 100644
index 000000000..2010438c2
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/INSTALL.txt
@@ -0,0 +1 @@
+This is an install document. The client may display this.
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/LICENSE b/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/LICENSE
new file mode 100644
index 000000000..6121943b1
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/LICENSE
@@ -0,0 +1 @@
+LICENSE placeholder.
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/README.md b/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/README.md
new file mode 100644
index 000000000..8cf4cc3d7
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/README.md
@@ -0,0 +1,11 @@
+# Frobnitz
+
+This is an example chart.
+
+## Usage
+
+This is an example. It has no usage.
+
+## Development
+
+For developer info, see the top-level repository.
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/_ignore_me b/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/_ignore_me
new file mode 100644
index 000000000..2cecca682
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/_ignore_me
@@ -0,0 +1 @@
+This should be ignored by the loader, but may be included in a chart.
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/Chart.yaml b/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/Chart.yaml
new file mode 100644
index 000000000..2a2c9c883
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: alpine
+description: Deploy a basic Alpine Linux pod
+version: 0.1.0
+home: https://helm.sh/helm
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/README.md b/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/README.md
new file mode 100644
index 000000000..b30b949dd
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.toml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/charts/mast1/Chart.yaml b/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/charts/mast1/Chart.yaml
new file mode 100644
index 000000000..aea109c75
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/charts/mast1/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: mast1
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/charts/mast1/values.yaml b/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/charts/mast1/values.yaml
new file mode 100644
index 000000000..42c39c262
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/charts/mast1/values.yaml
@@ -0,0 +1,4 @@
+# Default values for mast1.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name = "value"
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/charts/mast2-0.1.0.tgz b/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/charts/mast2-0.1.0.tgz
new file mode 100644
index 000000000..61cb62051
Binary files /dev/null and b/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/charts/mast2-0.1.0.tgz differ
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/templates/alpine-pod.yaml b/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/templates/alpine-pod.yaml
new file mode 100644
index 000000000..5bbae10af
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service}}
+ chartName: {{.Chart.Name}}
+ chartVersion: {{.Chart.Version | quote}}
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.3"
+ command: ["/bin/sleep","9000"]
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/values.yaml b/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/values.yaml
new file mode 100644
index 000000000..6c2aab7ba
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: "my-alpine"
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/mariner-4.3.2.tgz b/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/mariner-4.3.2.tgz
new file mode 100644
index 000000000..3190136b0
Binary files /dev/null and b/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/mariner-4.3.2.tgz differ
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/docs/README.md b/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/docs/README.md
new file mode 100644
index 000000000..d40747caf
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/docs/README.md
@@ -0,0 +1 @@
+This is a placeholder for documentation.
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/icon.svg b/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/icon.svg
new file mode 100644
index 000000000..892130606
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/icon.svg
@@ -0,0 +1,8 @@
+
+
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/ignore/me.txt b/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/ignore/me.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/templates/template.tpl b/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/templates/template.tpl
new file mode 100644
index 000000000..c651ee6a0
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/templates/template.tpl
@@ -0,0 +1 @@
+Hello {{.Name | default "world"}}
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/values.yaml b/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/values.yaml
new file mode 100644
index 000000000..61f501258
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/values.yaml
@@ -0,0 +1,6 @@
+# A values file contains configuration.
+
+name: "Some Name"
+
+section:
+ name: "Name in a section"
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/.helmignore b/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/.helmignore
new file mode 100644
index 000000000..9973a57b8
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/.helmignore
@@ -0,0 +1 @@
+ignore/
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/Chart.yaml b/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/Chart.yaml
new file mode 100644
index 000000000..6543799d0
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/Chart.yaml
@@ -0,0 +1,21 @@
+apiVersion: v3
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/INSTALL.txt b/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/INSTALL.txt
new file mode 100644
index 000000000..2010438c2
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/INSTALL.txt
@@ -0,0 +1 @@
+This is an install document. The client may display this.
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/LICENSE b/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/LICENSE
new file mode 100644
index 000000000..6121943b1
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/LICENSE
@@ -0,0 +1 @@
+LICENSE placeholder.
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/README.md b/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/README.md
new file mode 100644
index 000000000..8cf4cc3d7
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/README.md
@@ -0,0 +1,11 @@
+# Frobnitz
+
+This is an example chart.
+
+## Usage
+
+This is an example. It has no usage.
+
+## Development
+
+For developer info, see the top-level repository.
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/_ignore_me b/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/_ignore_me
new file mode 100644
index 000000000..2cecca682
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/_ignore_me
@@ -0,0 +1 @@
+This should be ignored by the loader, but may be included in a chart.
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/Chart.yaml b/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/Chart.yaml
new file mode 100644
index 000000000..2a2c9c883
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: alpine
+description: Deploy a basic Alpine Linux pod
+version: 0.1.0
+home: https://helm.sh/helm
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/README.md b/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/README.md
new file mode 100644
index 000000000..b30b949dd
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.toml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/charts/mast1/Chart.yaml b/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/charts/mast1/Chart.yaml
new file mode 100644
index 000000000..aea109c75
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/charts/mast1/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: mast1
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/charts/mast1/values.yaml b/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/charts/mast1/values.yaml
new file mode 100644
index 000000000..42c39c262
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/charts/mast1/values.yaml
@@ -0,0 +1,4 @@
+# Default values for mast1.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name = "value"
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/charts/mast2-0.1.0.tgz b/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/charts/mast2-0.1.0.tgz
new file mode 100644
index 000000000..61cb62051
Binary files /dev/null and b/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/charts/mast2-0.1.0.tgz differ
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/templates/alpine-pod.yaml b/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/templates/alpine-pod.yaml
new file mode 100644
index 000000000..5bbae10af
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service}}
+ chartName: {{.Chart.Name}}
+ chartVersion: {{.Chart.Version | quote}}
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.3"
+ command: ["/bin/sleep","9000"]
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/values.yaml b/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/values.yaml
new file mode 100644
index 000000000..6c2aab7ba
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: "my-alpine"
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/mariner-4.3.2.tgz b/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/mariner-4.3.2.tgz
new file mode 100644
index 000000000..3190136b0
Binary files /dev/null and b/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/mariner-4.3.2.tgz differ
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/docs/README.md b/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/docs/README.md
new file mode 100644
index 000000000..d40747caf
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/docs/README.md
@@ -0,0 +1 @@
+This is a placeholder for documentation.
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/icon.svg b/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/icon.svg
new file mode 100644
index 000000000..892130606
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/icon.svg
@@ -0,0 +1,8 @@
+
+
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/ignore/me.txt b/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/ignore/me.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/templates/template.tpl b/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/templates/template.tpl
new file mode 100644
index 000000000..c651ee6a0
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/templates/template.tpl
@@ -0,0 +1 @@
+Hello {{.Name | default "world"}}
diff --git a/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/values.yaml b/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/values.yaml
new file mode 100644
index 000000000..61f501258
--- /dev/null
+++ b/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/values.yaml
@@ -0,0 +1,6 @@
+# A values file contains configuration.
+
+name: "Some Name"
+
+section:
+ name: "Name in a section"
diff --git a/pkg/repo/testdata/repository/frobnitz-1.2.3.tgz b/internal/chart/v3/util/testdata/frobnitz-1.2.3.tgz
similarity index 100%
rename from pkg/repo/testdata/repository/frobnitz-1.2.3.tgz
rename to internal/chart/v3/util/testdata/frobnitz-1.2.3.tgz
diff --git a/internal/chart/v3/util/testdata/frobnitz/.helmignore b/internal/chart/v3/util/testdata/frobnitz/.helmignore
new file mode 100644
index 000000000..9973a57b8
--- /dev/null
+++ b/internal/chart/v3/util/testdata/frobnitz/.helmignore
@@ -0,0 +1 @@
+ignore/
diff --git a/internal/chart/v3/util/testdata/frobnitz/Chart.lock b/internal/chart/v3/util/testdata/frobnitz/Chart.lock
new file mode 100644
index 000000000..6fcc2ed9f
--- /dev/null
+++ b/internal/chart/v3/util/testdata/frobnitz/Chart.lock
@@ -0,0 +1,8 @@
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
+digest: invalid
diff --git a/internal/chart/v3/util/testdata/frobnitz/Chart.yaml b/internal/chart/v3/util/testdata/frobnitz/Chart.yaml
new file mode 100644
index 000000000..1b63fc3e2
--- /dev/null
+++ b/internal/chart/v3/util/testdata/frobnitz/Chart.yaml
@@ -0,0 +1,27 @@
+apiVersion: v3
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+annotations:
+ extrakey: extravalue
+ anotherkey: anothervalue
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
diff --git a/internal/chart/v3/util/testdata/frobnitz/INSTALL.txt b/internal/chart/v3/util/testdata/frobnitz/INSTALL.txt
new file mode 100644
index 000000000..2010438c2
--- /dev/null
+++ b/internal/chart/v3/util/testdata/frobnitz/INSTALL.txt
@@ -0,0 +1 @@
+This is an install document. The client may display this.
diff --git a/internal/chart/v3/util/testdata/frobnitz/LICENSE b/internal/chart/v3/util/testdata/frobnitz/LICENSE
new file mode 100644
index 000000000..6121943b1
--- /dev/null
+++ b/internal/chart/v3/util/testdata/frobnitz/LICENSE
@@ -0,0 +1 @@
+LICENSE placeholder.
diff --git a/internal/chart/v3/util/testdata/frobnitz/README.md b/internal/chart/v3/util/testdata/frobnitz/README.md
new file mode 100644
index 000000000..8cf4cc3d7
--- /dev/null
+++ b/internal/chart/v3/util/testdata/frobnitz/README.md
@@ -0,0 +1,11 @@
+# Frobnitz
+
+This is an example chart.
+
+## Usage
+
+This is an example. It has no usage.
+
+## Development
+
+For developer info, see the top-level repository.
diff --git a/internal/chart/v3/util/testdata/frobnitz/charts/_ignore_me b/internal/chart/v3/util/testdata/frobnitz/charts/_ignore_me
new file mode 100644
index 000000000..2cecca682
--- /dev/null
+++ b/internal/chart/v3/util/testdata/frobnitz/charts/_ignore_me
@@ -0,0 +1 @@
+This should be ignored by the loader, but may be included in a chart.
diff --git a/internal/chart/v3/util/testdata/frobnitz/charts/alpine/Chart.yaml b/internal/chart/v3/util/testdata/frobnitz/charts/alpine/Chart.yaml
new file mode 100644
index 000000000..2a2c9c883
--- /dev/null
+++ b/internal/chart/v3/util/testdata/frobnitz/charts/alpine/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: alpine
+description: Deploy a basic Alpine Linux pod
+version: 0.1.0
+home: https://helm.sh/helm
diff --git a/internal/chart/v3/util/testdata/frobnitz/charts/alpine/README.md b/internal/chart/v3/util/testdata/frobnitz/charts/alpine/README.md
new file mode 100644
index 000000000..b30b949dd
--- /dev/null
+++ b/internal/chart/v3/util/testdata/frobnitz/charts/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.toml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/internal/chart/v3/util/testdata/frobnitz/charts/alpine/charts/mast1/Chart.yaml b/internal/chart/v3/util/testdata/frobnitz/charts/alpine/charts/mast1/Chart.yaml
new file mode 100644
index 000000000..aea109c75
--- /dev/null
+++ b/internal/chart/v3/util/testdata/frobnitz/charts/alpine/charts/mast1/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: mast1
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/internal/chart/v3/util/testdata/frobnitz/charts/alpine/charts/mast1/values.yaml b/internal/chart/v3/util/testdata/frobnitz/charts/alpine/charts/mast1/values.yaml
new file mode 100644
index 000000000..42c39c262
--- /dev/null
+++ b/internal/chart/v3/util/testdata/frobnitz/charts/alpine/charts/mast1/values.yaml
@@ -0,0 +1,4 @@
+# Default values for mast1.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name = "value"
diff --git a/internal/chart/v3/util/testdata/frobnitz/charts/alpine/charts/mast2-0.1.0.tgz b/internal/chart/v3/util/testdata/frobnitz/charts/alpine/charts/mast2-0.1.0.tgz
new file mode 100644
index 000000000..61cb62051
Binary files /dev/null and b/internal/chart/v3/util/testdata/frobnitz/charts/alpine/charts/mast2-0.1.0.tgz differ
diff --git a/internal/chart/v3/util/testdata/frobnitz/charts/alpine/templates/alpine-pod.yaml b/internal/chart/v3/util/testdata/frobnitz/charts/alpine/templates/alpine-pod.yaml
new file mode 100644
index 000000000..5bbae10af
--- /dev/null
+++ b/internal/chart/v3/util/testdata/frobnitz/charts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service}}
+ chartName: {{.Chart.Name}}
+ chartVersion: {{.Chart.Version | quote}}
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.3"
+ command: ["/bin/sleep","9000"]
diff --git a/internal/chart/v3/util/testdata/frobnitz/charts/alpine/values.yaml b/internal/chart/v3/util/testdata/frobnitz/charts/alpine/values.yaml
new file mode 100644
index 000000000..6c2aab7ba
--- /dev/null
+++ b/internal/chart/v3/util/testdata/frobnitz/charts/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: "my-alpine"
diff --git a/internal/chart/v3/util/testdata/frobnitz/charts/mariner/Chart.yaml b/internal/chart/v3/util/testdata/frobnitz/charts/mariner/Chart.yaml
new file mode 100644
index 000000000..4d3eea730
--- /dev/null
+++ b/internal/chart/v3/util/testdata/frobnitz/charts/mariner/Chart.yaml
@@ -0,0 +1,9 @@
+apiVersion: v3
+name: mariner
+description: A Helm chart for Kubernetes
+version: 4.3.2
+home: ""
+dependencies:
+ - name: albatross
+ repository: https://example.com/mariner/charts
+ version: "0.1.0"
diff --git a/internal/chart/v3/util/testdata/frobnitz/charts/mariner/charts/albatross/Chart.yaml b/internal/chart/v3/util/testdata/frobnitz/charts/mariner/charts/albatross/Chart.yaml
new file mode 100644
index 000000000..da605991b
--- /dev/null
+++ b/internal/chart/v3/util/testdata/frobnitz/charts/mariner/charts/albatross/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: albatross
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/internal/chart/v3/util/testdata/frobnitz/charts/mariner/charts/albatross/values.yaml b/internal/chart/v3/util/testdata/frobnitz/charts/mariner/charts/albatross/values.yaml
new file mode 100644
index 000000000..3121cd7ce
--- /dev/null
+++ b/internal/chart/v3/util/testdata/frobnitz/charts/mariner/charts/albatross/values.yaml
@@ -0,0 +1,4 @@
+albatross: "true"
+
+global:
+ author: Coleridge
diff --git a/internal/chart/v3/util/testdata/frobnitz/charts/mariner/templates/placeholder.tpl b/internal/chart/v3/util/testdata/frobnitz/charts/mariner/templates/placeholder.tpl
new file mode 100644
index 000000000..29c11843a
--- /dev/null
+++ b/internal/chart/v3/util/testdata/frobnitz/charts/mariner/templates/placeholder.tpl
@@ -0,0 +1 @@
+# This is a placeholder.
diff --git a/internal/chart/v3/util/testdata/frobnitz/charts/mariner/values.yaml b/internal/chart/v3/util/testdata/frobnitz/charts/mariner/values.yaml
new file mode 100644
index 000000000..b0ccb0086
--- /dev/null
+++ b/internal/chart/v3/util/testdata/frobnitz/charts/mariner/values.yaml
@@ -0,0 +1,7 @@
+# Default values for .
+# This is a YAML-formatted file. https://github.com/toml-lang/toml
+# Declare name/value pairs to be passed into your templates.
+# name: "value"
+
+:
+ test: true
diff --git a/internal/chart/v3/util/testdata/frobnitz/docs/README.md b/internal/chart/v3/util/testdata/frobnitz/docs/README.md
new file mode 100644
index 000000000..d40747caf
--- /dev/null
+++ b/internal/chart/v3/util/testdata/frobnitz/docs/README.md
@@ -0,0 +1 @@
+This is a placeholder for documentation.
diff --git a/internal/chart/v3/util/testdata/frobnitz/icon.svg b/internal/chart/v3/util/testdata/frobnitz/icon.svg
new file mode 100644
index 000000000..892130606
--- /dev/null
+++ b/internal/chart/v3/util/testdata/frobnitz/icon.svg
@@ -0,0 +1,8 @@
+
+
diff --git a/internal/chart/v3/util/testdata/frobnitz/ignore/me.txt b/internal/chart/v3/util/testdata/frobnitz/ignore/me.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/internal/chart/v3/util/testdata/frobnitz/templates/template.tpl b/internal/chart/v3/util/testdata/frobnitz/templates/template.tpl
new file mode 100644
index 000000000..c651ee6a0
--- /dev/null
+++ b/internal/chart/v3/util/testdata/frobnitz/templates/template.tpl
@@ -0,0 +1 @@
+Hello {{.Name | default "world"}}
diff --git a/internal/chart/v3/util/testdata/frobnitz/values.yaml b/internal/chart/v3/util/testdata/frobnitz/values.yaml
new file mode 100644
index 000000000..61f501258
--- /dev/null
+++ b/internal/chart/v3/util/testdata/frobnitz/values.yaml
@@ -0,0 +1,6 @@
+# A values file contains configuration.
+
+name: "Some Name"
+
+section:
+ name: "Name in a section"
diff --git a/internal/chart/v3/util/testdata/frobnitz_backslash-1.2.3.tgz b/internal/chart/v3/util/testdata/frobnitz_backslash-1.2.3.tgz
new file mode 100644
index 000000000..692965951
Binary files /dev/null and b/internal/chart/v3/util/testdata/frobnitz_backslash-1.2.3.tgz differ
diff --git a/internal/chart/v3/util/testdata/genfrob.sh b/internal/chart/v3/util/testdata/genfrob.sh
new file mode 100755
index 000000000..35fdd59f2
--- /dev/null
+++ b/internal/chart/v3/util/testdata/genfrob.sh
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+# Pack the albatross chart into the mariner chart.
+echo "Packing albatross into mariner"
+tar -zcvf mariner/charts/albatross-0.1.0.tgz albatross
+
+echo "Packing mariner into frobnitz"
+tar -zcvf frobnitz/charts/mariner-4.3.2.tgz mariner
+tar -zcvf frobnitz_backslash/charts/mariner-4.3.2.tgz mariner
+
+# Pack the frobnitz chart.
+echo "Packing frobnitz"
+tar --exclude=ignore/* -zcvf frobnitz-1.2.3.tgz frobnitz
+tar --exclude=ignore/* -zcvf frobnitz_backslash-1.2.3.tgz frobnitz_backslash
diff --git a/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/Chart.lock b/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/Chart.lock
new file mode 100644
index 000000000..b2f17fb39
--- /dev/null
+++ b/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/Chart.lock
@@ -0,0 +1,9 @@
+dependencies:
+- name: dev
+ repository: file://envs/dev
+ version: v0.1.0
+- name: prod
+ repository: file://envs/prod
+ version: v0.1.0
+digest: sha256:9403fc24f6cf9d6055820126cf7633b4bd1fed3c77e4880c674059f536346182
+generated: "2020-02-03T10:38:51.180474+01:00"
diff --git a/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/Chart.yaml b/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/Chart.yaml
new file mode 100644
index 000000000..0b3e9958b
--- /dev/null
+++ b/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/Chart.yaml
@@ -0,0 +1,22 @@
+apiVersion: v3
+name: parent-chart
+version: v0.1.0
+appVersion: v0.1.0
+dependencies:
+ - name: dev
+ repository: "file://envs/dev"
+ version: ">= 0.0.1"
+ condition: dev.enabled,global.dev.enabled
+ tags:
+ - dev
+ import-values:
+ - data
+
+ - name: prod
+ repository: "file://envs/prod"
+ version: ">= 0.0.1"
+ condition: prod.enabled,global.prod.enabled
+ tags:
+ - prod
+ import-values:
+ - data
\ No newline at end of file
diff --git a/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/charts/dev-v0.1.0.tgz b/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/charts/dev-v0.1.0.tgz
new file mode 100644
index 000000000..d28e1621c
Binary files /dev/null and b/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/charts/dev-v0.1.0.tgz differ
diff --git a/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/charts/prod-v0.1.0.tgz b/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/charts/prod-v0.1.0.tgz
new file mode 100644
index 000000000..a0c5aa84b
Binary files /dev/null and b/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/charts/prod-v0.1.0.tgz differ
diff --git a/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/envs/dev/Chart.yaml b/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/envs/dev/Chart.yaml
new file mode 100644
index 000000000..72427c097
--- /dev/null
+++ b/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/envs/dev/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v3
+name: dev
+version: v0.1.0
+appVersion: v0.1.0
\ No newline at end of file
diff --git a/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/envs/dev/values.yaml b/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/envs/dev/values.yaml
new file mode 100644
index 000000000..38f03484d
--- /dev/null
+++ b/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/envs/dev/values.yaml
@@ -0,0 +1,9 @@
+# Dev values parent-chart
+nameOverride: parent-chart-dev
+exports:
+ data:
+ resources:
+ autoscaler:
+ minReplicas: 1
+ maxReplicas: 3
+ targetCPUUtilizationPercentage: 80
diff --git a/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/envs/prod/Chart.yaml b/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/envs/prod/Chart.yaml
new file mode 100644
index 000000000..058ab3942
--- /dev/null
+++ b/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/envs/prod/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v3
+name: prod
+version: v0.1.0
+appVersion: v0.1.0
\ No newline at end of file
diff --git a/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/envs/prod/values.yaml b/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/envs/prod/values.yaml
new file mode 100644
index 000000000..10cc756b2
--- /dev/null
+++ b/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/envs/prod/values.yaml
@@ -0,0 +1,9 @@
+# Prod values parent-chart
+nameOverride: parent-chart-prod
+exports:
+ data:
+ resources:
+ autoscaler:
+ minReplicas: 2
+ maxReplicas: 5
+ targetCPUUtilizationPercentage: 90
diff --git a/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/templates/autoscaler.yaml b/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/templates/autoscaler.yaml
new file mode 100644
index 000000000..976e5a8f1
--- /dev/null
+++ b/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/templates/autoscaler.yaml
@@ -0,0 +1,16 @@
+###################################################################################################
+# parent-chart horizontal pod autoscaler
+###################################################################################################
+apiVersion: autoscaling/v1
+kind: HorizontalPodAutoscaler
+metadata:
+ name: {{ .Release.Name }}-autoscaler
+ namespace: {{ .Release.Namespace }}
+spec:
+ scaleTargetRef:
+ apiVersion: apps/v1beta1
+ kind: Deployment
+ name: {{ .Release.Name }}
+ minReplicas: {{ required "A valid .Values.resources.autoscaler.minReplicas entry required!" .Values.resources.autoscaler.minReplicas }}
+ maxReplicas: {{ required "A valid .Values.resources.autoscaler.maxReplicas entry required!" .Values.resources.autoscaler.maxReplicas }}
+ targetCPUUtilizationPercentage: {{ required "A valid .Values.resources.autoscaler.targetCPUUtilizationPercentage!" .Values.resources.autoscaler.targetCPUUtilizationPercentage }}
\ No newline at end of file
diff --git a/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/values.yaml b/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/values.yaml
new file mode 100644
index 000000000..b812f0a33
--- /dev/null
+++ b/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/values.yaml
@@ -0,0 +1,10 @@
+# Default values for parent-chart.
+nameOverride: parent-chart
+tags:
+ dev: false
+ prod: true
+resources:
+ autoscaler:
+ minReplicas: 0
+ maxReplicas: 0
+ targetCPUUtilizationPercentage: 99
\ No newline at end of file
diff --git a/internal/chart/v3/util/testdata/joonix/Chart.yaml b/internal/chart/v3/util/testdata/joonix/Chart.yaml
new file mode 100644
index 000000000..1860a3df1
--- /dev/null
+++ b/internal/chart/v3/util/testdata/joonix/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v3
+description: A Helm chart for Kubernetes
+name: joonix
+version: 1.2.3
diff --git a/internal/chart/v3/util/testdata/joonix/charts/.gitkeep b/internal/chart/v3/util/testdata/joonix/charts/.gitkeep
new file mode 100644
index 000000000..e69de29bb
diff --git a/internal/chart/v3/util/testdata/subpop/Chart.yaml b/internal/chart/v3/util/testdata/subpop/Chart.yaml
new file mode 100644
index 000000000..53e9ec502
--- /dev/null
+++ b/internal/chart/v3/util/testdata/subpop/Chart.yaml
@@ -0,0 +1,41 @@
+apiVersion: v3
+description: A Helm chart for Kubernetes
+name: parentchart
+version: 0.1.0
+dependencies:
+ - name: subchart1
+ repository: http://localhost:10191
+ version: 0.1.0
+ condition: subchart1.enabled
+ tags:
+ - front-end
+ - subchart1
+ import-values:
+ - child: SC1data
+ parent: imported-chart1
+ - child: SC1data
+ parent: overridden-chart1
+ - child: imported-chartA
+ parent: imported-chartA
+ - child: imported-chartA-B
+ parent: imported-chartA-B
+ - child: overridden-chartA-B
+ parent: overridden-chartA-B
+ - child: SCBexported1A
+ parent: .
+ - SCBexported2
+ - SC1exported1
+
+ - name: subchart2
+ repository: http://localhost:10191
+ version: 0.1.0
+ condition: subchart2.enabled
+ tags:
+ - back-end
+ - subchart2
+
+ - name: subchart2
+ alias: subchart2alias
+ repository: http://localhost:10191
+ version: 0.1.0
+ condition: subchart2alias.enabled
diff --git a/internal/chart/v3/util/testdata/subpop/README.md b/internal/chart/v3/util/testdata/subpop/README.md
new file mode 100644
index 000000000..e43fbfe9c
--- /dev/null
+++ b/internal/chart/v3/util/testdata/subpop/README.md
@@ -0,0 +1,18 @@
+## Subpop
+
+This chart is for testing the processing of enabled/disabled charts
+via conditions and tags.
+
+Currently there are three levels:
+
+````
+parent
+-1 tags: front-end, subchart1
+--A tags: front-end, subchartA
+--B tags: front-end, subchartB
+-2 tags: back-end, subchart2
+--B tags: back-end, subchartB
+--C tags: back-end, subchartC
+````
+
+Tags and conditions are currently in requirements.yaml files.
\ No newline at end of file
diff --git a/internal/chart/v3/util/testdata/subpop/charts/subchart1/Chart.yaml b/internal/chart/v3/util/testdata/subpop/charts/subchart1/Chart.yaml
new file mode 100644
index 000000000..1539fb97d
--- /dev/null
+++ b/internal/chart/v3/util/testdata/subpop/charts/subchart1/Chart.yaml
@@ -0,0 +1,36 @@
+apiVersion: v3
+description: A Helm chart for Kubernetes
+name: subchart1
+version: 0.1.0
+dependencies:
+ - name: subcharta
+ repository: http://localhost:10191
+ version: 0.1.0
+ condition: subcharta.enabled
+ tags:
+ - front-end
+ - subcharta
+ import-values:
+ - child: SCAdata
+ parent: imported-chartA
+ - child: SCAdata
+ parent: overridden-chartA
+ - child: SCAdata
+ parent: imported-chartA-B
+
+ - name: subchartb
+ repository: http://localhost:10191
+ version: 0.1.0
+ condition: subchartb.enabled
+ import-values:
+ - child: SCBdata
+ parent: imported-chartB
+ - child: SCBdata
+ parent: imported-chartA-B
+ - child: exports.SCBexported2
+ parent: exports.SCBexported2
+ - SCBexported1
+
+ tags:
+ - front-end
+ - subchartb
diff --git a/internal/chart/v3/util/testdata/subpop/charts/subchart1/charts/subchartA/Chart.yaml b/internal/chart/v3/util/testdata/subpop/charts/subchart1/charts/subchartA/Chart.yaml
new file mode 100644
index 000000000..2755a821b
--- /dev/null
+++ b/internal/chart/v3/util/testdata/subpop/charts/subchart1/charts/subchartA/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v3
+description: A Helm chart for Kubernetes
+name: subcharta
+version: 0.1.0
diff --git a/internal/chart/v3/util/testdata/subpop/charts/subchart1/charts/subchartA/templates/service.yaml b/internal/chart/v3/util/testdata/subpop/charts/subchart1/charts/subchartA/templates/service.yaml
new file mode 100644
index 000000000..27501e1e0
--- /dev/null
+++ b/internal/chart/v3/util/testdata/subpop/charts/subchart1/charts/subchartA/templates/service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Chart.Name }}
+ labels:
+ helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.externalPort }}
+ targetPort: {{ .Values.service.internalPort }}
+ protocol: TCP
+ name: {{ .Values.service.name }}
+ selector:
+ app.kubernetes.io/name: {{ .Chart.Name }}
diff --git a/internal/chart/v3/util/testdata/subpop/charts/subchart1/charts/subchartA/values.yaml b/internal/chart/v3/util/testdata/subpop/charts/subchart1/charts/subchartA/values.yaml
new file mode 100644
index 000000000..f0381ae6a
--- /dev/null
+++ b/internal/chart/v3/util/testdata/subpop/charts/subchart1/charts/subchartA/values.yaml
@@ -0,0 +1,17 @@
+# Default values for subchart.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+# subchartA
+service:
+ name: apache
+ type: ClusterIP
+ externalPort: 80
+ internalPort: 80
+SCAdata:
+ SCAbool: false
+ SCAfloat: 3.1
+ SCAint: 55
+ SCAstring: "jabba"
+ SCAnested1:
+ SCAnested2: true
+
diff --git a/internal/chart/v3/util/testdata/subpop/charts/subchart1/charts/subchartB/Chart.yaml b/internal/chart/v3/util/testdata/subpop/charts/subchart1/charts/subchartB/Chart.yaml
new file mode 100644
index 000000000..bf12fe8f3
--- /dev/null
+++ b/internal/chart/v3/util/testdata/subpop/charts/subchart1/charts/subchartB/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v3
+description: A Helm chart for Kubernetes
+name: subchartb
+version: 0.1.0
diff --git a/internal/chart/v3/util/testdata/subpop/charts/subchart1/charts/subchartB/templates/service.yaml b/internal/chart/v3/util/testdata/subpop/charts/subchart1/charts/subchartB/templates/service.yaml
new file mode 100644
index 000000000..27501e1e0
--- /dev/null
+++ b/internal/chart/v3/util/testdata/subpop/charts/subchart1/charts/subchartB/templates/service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Chart.Name }}
+ labels:
+ helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.externalPort }}
+ targetPort: {{ .Values.service.internalPort }}
+ protocol: TCP
+ name: {{ .Values.service.name }}
+ selector:
+ app.kubernetes.io/name: {{ .Chart.Name }}
diff --git a/internal/chart/v3/util/testdata/subpop/charts/subchart1/charts/subchartB/values.yaml b/internal/chart/v3/util/testdata/subpop/charts/subchart1/charts/subchartB/values.yaml
new file mode 100644
index 000000000..774fdd75c
--- /dev/null
+++ b/internal/chart/v3/util/testdata/subpop/charts/subchart1/charts/subchartB/values.yaml
@@ -0,0 +1,35 @@
+# Default values for subchart.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+service:
+ name: nginx
+ type: ClusterIP
+ externalPort: 80
+ internalPort: 80
+
+SCBdata:
+ SCBbool: true
+ SCBfloat: 7.77
+ SCBint: 33
+ SCBstring: "boba"
+
+exports:
+ SCBexported1:
+ SCBexported1A:
+ SCBexported1B: 1965
+
+ SCBexported2:
+ SCBexported2A: "blaster"
+
+global:
+ kolla:
+ nova:
+ api:
+ all:
+ port: 8774
+ metadata:
+ all:
+ port: 8775
+
+
+
diff --git a/internal/chart/v3/util/testdata/subpop/charts/subchart1/crds/crdA.yaml b/internal/chart/v3/util/testdata/subpop/charts/subchart1/crds/crdA.yaml
new file mode 100644
index 000000000..fca77fd4b
--- /dev/null
+++ b/internal/chart/v3/util/testdata/subpop/charts/subchart1/crds/crdA.yaml
@@ -0,0 +1,13 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: testCRDs
+spec:
+ group: testCRDGroups
+ names:
+ kind: TestCRD
+ listKind: TestCRDList
+ plural: TestCRDs
+ shortNames:
+ - tc
+ singular: authconfig
diff --git a/internal/chart/v3/util/testdata/subpop/charts/subchart1/templates/NOTES.txt b/internal/chart/v3/util/testdata/subpop/charts/subchart1/templates/NOTES.txt
new file mode 100644
index 000000000..4bdf443f6
--- /dev/null
+++ b/internal/chart/v3/util/testdata/subpop/charts/subchart1/templates/NOTES.txt
@@ -0,0 +1 @@
+Sample notes for {{ .Chart.Name }}
\ No newline at end of file
diff --git a/internal/chart/v3/util/testdata/subpop/charts/subchart1/templates/service.yaml b/internal/chart/v3/util/testdata/subpop/charts/subchart1/templates/service.yaml
new file mode 100644
index 000000000..fee94dced
--- /dev/null
+++ b/internal/chart/v3/util/testdata/subpop/charts/subchart1/templates/service.yaml
@@ -0,0 +1,22 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Chart.Name }}
+ labels:
+ helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ app.kubernetes.io/instance: "{{ .Release.Name }}"
+ kube-version/major: "{{ .Capabilities.KubeVersion.Major }}"
+ kube-version/minor: "{{ .Capabilities.KubeVersion.Minor }}"
+ kube-version/version: "v{{ .Capabilities.KubeVersion.Major }}.{{ .Capabilities.KubeVersion.Minor }}.0"
+{{- if .Capabilities.APIVersions.Has "helm.k8s.io/test" }}
+ kube-api-version/test: v1
+{{- end }}
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.externalPort }}
+ targetPort: {{ .Values.service.internalPort }}
+ protocol: TCP
+ name: {{ .Values.service.name }}
+ selector:
+ app.kubernetes.io/name: {{ .Chart.Name }}
diff --git a/internal/chart/v3/util/testdata/subpop/charts/subchart1/templates/subdir/role.yaml b/internal/chart/v3/util/testdata/subpop/charts/subchart1/templates/subdir/role.yaml
new file mode 100644
index 000000000..91b954e5f
--- /dev/null
+++ b/internal/chart/v3/util/testdata/subpop/charts/subchart1/templates/subdir/role.yaml
@@ -0,0 +1,7 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: {{ .Chart.Name }}-role
+rules:
+- resources: ["*"]
+ verbs: ["get","list","watch"]
diff --git a/internal/chart/v3/util/testdata/subpop/charts/subchart1/templates/subdir/rolebinding.yaml b/internal/chart/v3/util/testdata/subpop/charts/subchart1/templates/subdir/rolebinding.yaml
new file mode 100644
index 000000000..5d193f1a6
--- /dev/null
+++ b/internal/chart/v3/util/testdata/subpop/charts/subchart1/templates/subdir/rolebinding.yaml
@@ -0,0 +1,12 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: {{ .Chart.Name }}-binding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ .Chart.Name }}-role
+subjects:
+- kind: ServiceAccount
+ name: {{ .Chart.Name }}-sa
+ namespace: default
diff --git a/internal/chart/v3/util/testdata/subpop/charts/subchart1/templates/subdir/serviceaccount.yaml b/internal/chart/v3/util/testdata/subpop/charts/subchart1/templates/subdir/serviceaccount.yaml
new file mode 100644
index 000000000..7126c7d89
--- /dev/null
+++ b/internal/chart/v3/util/testdata/subpop/charts/subchart1/templates/subdir/serviceaccount.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ .Chart.Name }}-sa
diff --git a/internal/chart/v3/util/testdata/subpop/charts/subchart1/values.yaml b/internal/chart/v3/util/testdata/subpop/charts/subchart1/values.yaml
new file mode 100644
index 000000000..a974e316a
--- /dev/null
+++ b/internal/chart/v3/util/testdata/subpop/charts/subchart1/values.yaml
@@ -0,0 +1,55 @@
+# Default values for subchart.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+# subchart1
+service:
+ name: nginx
+ type: ClusterIP
+ externalPort: 80
+ internalPort: 80
+
+
+SC1data:
+ SC1bool: true
+ SC1float: 3.14
+ SC1int: 100
+ SC1string: "dollywood"
+ SC1extra1: 11
+
+imported-chartA:
+ SC1extra2: 1.337
+
+overridden-chartA:
+ SCAbool: true
+ SCAfloat: 3.14
+ SCAint: 100
+ SCAstring: "jabbathehut"
+ SC1extra3: true
+
+imported-chartA-B:
+ SC1extra5: "tiller"
+
+overridden-chartA-B:
+ SCAbool: true
+ SCAfloat: 3.33
+ SCAint: 555
+ SCAstring: "wormwood"
+ SCAextra1: 23
+
+ SCBbool: true
+ SCBfloat: 0.25
+ SCBint: 98
+ SCBstring: "murkwood"
+ SCBextra1: 13
+
+ SC1extra6: 77
+
+SCBexported1A:
+ SC1extra7: true
+
+exports:
+ SC1exported1:
+ global:
+ SC1exported2:
+ all:
+ SC1exported3: "SC1expstr"
\ No newline at end of file
diff --git a/internal/chart/v3/util/testdata/subpop/charts/subchart2/Chart.yaml b/internal/chart/v3/util/testdata/subpop/charts/subchart2/Chart.yaml
new file mode 100644
index 000000000..e77657040
--- /dev/null
+++ b/internal/chart/v3/util/testdata/subpop/charts/subchart2/Chart.yaml
@@ -0,0 +1,19 @@
+apiVersion: v3
+description: A Helm chart for Kubernetes
+name: subchart2
+version: 0.1.0
+dependencies:
+ - name: subchartb
+ repository: http://localhost:10191
+ version: 0.1.0
+ condition: subchartb.enabled
+ tags:
+ - back-end
+ - subchartb
+ - name: subchartc
+ repository: http://localhost:10191
+ version: 0.1.0
+ condition: subchartc.enabled
+ tags:
+ - back-end
+ - subchartc
diff --git a/internal/chart/v3/util/testdata/subpop/charts/subchart2/charts/subchartB/Chart.yaml b/internal/chart/v3/util/testdata/subpop/charts/subchart2/charts/subchartB/Chart.yaml
new file mode 100644
index 000000000..bf12fe8f3
--- /dev/null
+++ b/internal/chart/v3/util/testdata/subpop/charts/subchart2/charts/subchartB/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v3
+description: A Helm chart for Kubernetes
+name: subchartb
+version: 0.1.0
diff --git a/internal/chart/v3/util/testdata/subpop/charts/subchart2/charts/subchartB/templates/service.yaml b/internal/chart/v3/util/testdata/subpop/charts/subchart2/charts/subchartB/templates/service.yaml
new file mode 100644
index 000000000..fb3dfc445
--- /dev/null
+++ b/internal/chart/v3/util/testdata/subpop/charts/subchart2/charts/subchartB/templates/service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: subchart2-{{ .Chart.Name }}
+ labels:
+ helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.externalPort }}
+ targetPort: {{ .Values.service.internalPort }}
+ protocol: TCP
+ name: subchart2-{{ .Values.service.name }}
+ selector:
+ app.kubernetes.io/name: {{ .Chart.Name }}
diff --git a/internal/chart/v3/util/testdata/subpop/charts/subchart2/charts/subchartB/values.yaml b/internal/chart/v3/util/testdata/subpop/charts/subchart2/charts/subchartB/values.yaml
new file mode 100644
index 000000000..5e5b21065
--- /dev/null
+++ b/internal/chart/v3/util/testdata/subpop/charts/subchart2/charts/subchartB/values.yaml
@@ -0,0 +1,21 @@
+# Default values for subchart.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+replicaCount: 1
+image:
+ repository: nginx
+ tag: stable
+ pullPolicy: IfNotPresent
+service:
+ name: nginx
+ type: ClusterIP
+ externalPort: 80
+ internalPort: 80
+resources:
+ limits:
+ cpu: 100m
+ memory: 128Mi
+ requests:
+ cpu: 100m
+ memory: 128Mi
+
diff --git a/internal/chart/v3/util/testdata/subpop/charts/subchart2/charts/subchartC/Chart.yaml b/internal/chart/v3/util/testdata/subpop/charts/subchart2/charts/subchartC/Chart.yaml
new file mode 100644
index 000000000..e8c0ef5e5
--- /dev/null
+++ b/internal/chart/v3/util/testdata/subpop/charts/subchart2/charts/subchartC/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v3
+description: A Helm chart for Kubernetes
+name: subchartc
+version: 0.1.0
diff --git a/internal/chart/v3/util/testdata/subpop/charts/subchart2/charts/subchartC/templates/service.yaml b/internal/chart/v3/util/testdata/subpop/charts/subchart2/charts/subchartC/templates/service.yaml
new file mode 100644
index 000000000..27501e1e0
--- /dev/null
+++ b/internal/chart/v3/util/testdata/subpop/charts/subchart2/charts/subchartC/templates/service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Chart.Name }}
+ labels:
+ helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.externalPort }}
+ targetPort: {{ .Values.service.internalPort }}
+ protocol: TCP
+ name: {{ .Values.service.name }}
+ selector:
+ app.kubernetes.io/name: {{ .Chart.Name }}
diff --git a/internal/chart/v3/util/testdata/subpop/charts/subchart2/charts/subchartC/values.yaml b/internal/chart/v3/util/testdata/subpop/charts/subchart2/charts/subchartC/values.yaml
new file mode 100644
index 000000000..5e5b21065
--- /dev/null
+++ b/internal/chart/v3/util/testdata/subpop/charts/subchart2/charts/subchartC/values.yaml
@@ -0,0 +1,21 @@
+# Default values for subchart.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+replicaCount: 1
+image:
+ repository: nginx
+ tag: stable
+ pullPolicy: IfNotPresent
+service:
+ name: nginx
+ type: ClusterIP
+ externalPort: 80
+ internalPort: 80
+resources:
+ limits:
+ cpu: 100m
+ memory: 128Mi
+ requests:
+ cpu: 100m
+ memory: 128Mi
+
diff --git a/internal/chart/v3/util/testdata/subpop/charts/subchart2/templates/service.yaml b/internal/chart/v3/util/testdata/subpop/charts/subchart2/templates/service.yaml
new file mode 100644
index 000000000..27501e1e0
--- /dev/null
+++ b/internal/chart/v3/util/testdata/subpop/charts/subchart2/templates/service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Chart.Name }}
+ labels:
+ helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.externalPort }}
+ targetPort: {{ .Values.service.internalPort }}
+ protocol: TCP
+ name: {{ .Values.service.name }}
+ selector:
+ app.kubernetes.io/name: {{ .Chart.Name }}
diff --git a/internal/chart/v3/util/testdata/subpop/charts/subchart2/values.yaml b/internal/chart/v3/util/testdata/subpop/charts/subchart2/values.yaml
new file mode 100644
index 000000000..5e5b21065
--- /dev/null
+++ b/internal/chart/v3/util/testdata/subpop/charts/subchart2/values.yaml
@@ -0,0 +1,21 @@
+# Default values for subchart.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+replicaCount: 1
+image:
+ repository: nginx
+ tag: stable
+ pullPolicy: IfNotPresent
+service:
+ name: nginx
+ type: ClusterIP
+ externalPort: 80
+ internalPort: 80
+resources:
+ limits:
+ cpu: 100m
+ memory: 128Mi
+ requests:
+ cpu: 100m
+ memory: 128Mi
+
diff --git a/internal/chart/v3/util/testdata/subpop/noreqs/Chart.yaml b/internal/chart/v3/util/testdata/subpop/noreqs/Chart.yaml
new file mode 100644
index 000000000..09eb05a96
--- /dev/null
+++ b/internal/chart/v3/util/testdata/subpop/noreqs/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v3
+description: A Helm chart for Kubernetes
+name: parentchart
+version: 0.1.0
diff --git a/internal/chart/v3/util/testdata/subpop/noreqs/templates/service.yaml b/internal/chart/v3/util/testdata/subpop/noreqs/templates/service.yaml
new file mode 100644
index 000000000..27501e1e0
--- /dev/null
+++ b/internal/chart/v3/util/testdata/subpop/noreqs/templates/service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Chart.Name }}
+ labels:
+ helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.externalPort }}
+ targetPort: {{ .Values.service.internalPort }}
+ protocol: TCP
+ name: {{ .Values.service.name }}
+ selector:
+ app.kubernetes.io/name: {{ .Chart.Name }}
diff --git a/internal/chart/v3/util/testdata/subpop/noreqs/values.yaml b/internal/chart/v3/util/testdata/subpop/noreqs/values.yaml
new file mode 100644
index 000000000..4ed3b7ad3
--- /dev/null
+++ b/internal/chart/v3/util/testdata/subpop/noreqs/values.yaml
@@ -0,0 +1,26 @@
+# Default values for subchart.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+replicaCount: 1
+image:
+ repository: nginx
+ tag: stable
+ pullPolicy: IfNotPresent
+service:
+ name: nginx
+ type: ClusterIP
+ externalPort: 80
+ internalPort: 80
+resources:
+ limits:
+ cpu: 100m
+ memory: 128Mi
+ requests:
+ cpu: 100m
+ memory: 128Mi
+
+
+# switch-like
+tags:
+ front-end: true
+ back-end: false
diff --git a/internal/chart/v3/util/testdata/subpop/values.yaml b/internal/chart/v3/util/testdata/subpop/values.yaml
new file mode 100644
index 000000000..ba70ed406
--- /dev/null
+++ b/internal/chart/v3/util/testdata/subpop/values.yaml
@@ -0,0 +1,45 @@
+# parent/values.yaml
+
+imported-chart1:
+ SPextra1: "helm rocks"
+
+overridden-chart1:
+ SC1bool: false
+ SC1float: 3.141592
+ SC1int: 99
+ SC1string: "pollywog"
+ SPextra2: 42
+
+
+imported-chartA:
+ SPextra3: 1.337
+
+overridden-chartA:
+ SCAbool: true
+ SCAfloat: 41.3
+ SCAint: 808
+ SCAstring: "jabberwocky"
+ SPextra4: true
+
+imported-chartA-B:
+ SPextra5: "k8s"
+
+overridden-chartA-B:
+ SCAbool: true
+ SCAfloat: 41.3
+ SCAint: 808
+ SCAstring: "jabberwocky"
+ SCBbool: false
+ SCBfloat: 1.99
+ SCBint: 77
+ SCBstring: "jango"
+ SPextra6: 111
+
+tags:
+ front-end: true
+ back-end: false
+
+subchart2alias:
+ enabled: false
+
+ensurenull: null
diff --git a/pkg/chart/v2/util/testdata/test-values-invalid.schema.json b/internal/chart/v3/util/testdata/test-values-invalid.schema.json
similarity index 100%
rename from pkg/chart/v2/util/testdata/test-values-invalid.schema.json
rename to internal/chart/v3/util/testdata/test-values-invalid.schema.json
diff --git a/pkg/chart/v2/util/testdata/test-values-negative.yaml b/internal/chart/v3/util/testdata/test-values-negative.yaml
similarity index 100%
rename from pkg/chart/v2/util/testdata/test-values-negative.yaml
rename to internal/chart/v3/util/testdata/test-values-negative.yaml
diff --git a/pkg/chart/v2/util/testdata/test-values.schema.json b/internal/chart/v3/util/testdata/test-values.schema.json
similarity index 100%
rename from pkg/chart/v2/util/testdata/test-values.schema.json
rename to internal/chart/v3/util/testdata/test-values.schema.json
diff --git a/pkg/chart/v2/util/testdata/test-values.yaml b/internal/chart/v3/util/testdata/test-values.yaml
similarity index 100%
rename from pkg/chart/v2/util/testdata/test-values.yaml
rename to internal/chart/v3/util/testdata/test-values.yaml
diff --git a/internal/chart/v3/util/testdata/three-level-dependent-chart/README.md b/internal/chart/v3/util/testdata/three-level-dependent-chart/README.md
new file mode 100644
index 000000000..536bb9792
--- /dev/null
+++ b/internal/chart/v3/util/testdata/three-level-dependent-chart/README.md
@@ -0,0 +1,16 @@
+# Three Level Dependent Chart
+
+This chart is for testing the processing of multi-level dependencies.
+
+Consists of the following charts:
+
+- Library Chart
+- App Chart (Uses Library Chart as dependency, 2x: app1/app2)
+- Umbrella Chart (Has all the app charts as dependencies)
+
+The precedence is as follows: `library < app < umbrella`
+
+Catches two use-cases:
+
+- app overwriting library (app2)
+- umbrella overwriting app and library (app1)
diff --git a/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/Chart.yaml b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/Chart.yaml
new file mode 100644
index 000000000..1026f8901
--- /dev/null
+++ b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/Chart.yaml
@@ -0,0 +1,19 @@
+apiVersion: v3
+name: umbrella
+description: A Helm chart for Kubernetes
+type: application
+version: 0.1.0
+
+dependencies:
+- name: app1
+ version: 0.1.0
+ condition: app1.enabled
+- name: app2
+ version: 0.1.0
+ condition: app2.enabled
+- name: app3
+ version: 0.1.0
+ condition: app3.enabled
+- name: app4
+ version: 0.1.0
+ condition: app4.enabled
diff --git a/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app1/Chart.yaml b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app1/Chart.yaml
new file mode 100644
index 000000000..5bdf21570
--- /dev/null
+++ b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app1/Chart.yaml
@@ -0,0 +1,11 @@
+apiVersion: v3
+name: app1
+description: A Helm chart for Kubernetes
+type: application
+version: 0.1.0
+
+dependencies:
+- name: library
+ version: 0.1.0
+ import-values:
+ - defaults
diff --git a/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app1/charts/library/Chart.yaml b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app1/charts/library/Chart.yaml
new file mode 100644
index 000000000..9bc306361
--- /dev/null
+++ b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app1/charts/library/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: library
+description: A Helm chart for Kubernetes
+type: library
+version: 0.1.0
diff --git a/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app1/charts/library/templates/service.yaml b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app1/charts/library/templates/service.yaml
new file mode 100644
index 000000000..3fd398b53
--- /dev/null
+++ b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app1/charts/library/templates/service.yaml
@@ -0,0 +1,9 @@
+apiVersion: v1
+kind: Service
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.port }}
+ targetPort: http
+ protocol: TCP
+ name: http
diff --git a/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app1/charts/library/values.yaml b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app1/charts/library/values.yaml
new file mode 100644
index 000000000..0c08b6cd2
--- /dev/null
+++ b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app1/charts/library/values.yaml
@@ -0,0 +1,5 @@
+exports:
+ defaults:
+ service:
+ type: ClusterIP
+ port: 9090
diff --git a/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app1/templates/service.yaml b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app1/templates/service.yaml
new file mode 100644
index 000000000..8ed8ddf1f
--- /dev/null
+++ b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app1/templates/service.yaml
@@ -0,0 +1 @@
+{{- include "library.service" . }}
diff --git a/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app1/values.yaml b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app1/values.yaml
new file mode 100644
index 000000000..3728aa930
--- /dev/null
+++ b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app1/values.yaml
@@ -0,0 +1,3 @@
+service:
+ type: ClusterIP
+ port: 1234
diff --git a/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app2/Chart.yaml b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app2/Chart.yaml
new file mode 100644
index 000000000..1313ce4e9
--- /dev/null
+++ b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app2/Chart.yaml
@@ -0,0 +1,11 @@
+apiVersion: v3
+name: app2
+description: A Helm chart for Kubernetes
+type: application
+version: 0.1.0
+
+dependencies:
+- name: library
+ version: 0.1.0
+ import-values:
+ - defaults
diff --git a/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app2/charts/library/Chart.yaml b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app2/charts/library/Chart.yaml
new file mode 100644
index 000000000..9bc306361
--- /dev/null
+++ b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app2/charts/library/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: library
+description: A Helm chart for Kubernetes
+type: library
+version: 0.1.0
diff --git a/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app2/charts/library/templates/service.yaml b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app2/charts/library/templates/service.yaml
new file mode 100644
index 000000000..3fd398b53
--- /dev/null
+++ b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app2/charts/library/templates/service.yaml
@@ -0,0 +1,9 @@
+apiVersion: v1
+kind: Service
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.port }}
+ targetPort: http
+ protocol: TCP
+ name: http
diff --git a/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app2/charts/library/values.yaml b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app2/charts/library/values.yaml
new file mode 100644
index 000000000..0c08b6cd2
--- /dev/null
+++ b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app2/charts/library/values.yaml
@@ -0,0 +1,5 @@
+exports:
+ defaults:
+ service:
+ type: ClusterIP
+ port: 9090
diff --git a/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app2/templates/service.yaml b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app2/templates/service.yaml
new file mode 100644
index 000000000..8ed8ddf1f
--- /dev/null
+++ b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app2/templates/service.yaml
@@ -0,0 +1 @@
+{{- include "library.service" . }}
diff --git a/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app2/values.yaml b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app2/values.yaml
new file mode 100644
index 000000000..98bd6d24b
--- /dev/null
+++ b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app2/values.yaml
@@ -0,0 +1,3 @@
+service:
+ type: ClusterIP
+ port: 8080
diff --git a/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app3/Chart.yaml b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app3/Chart.yaml
new file mode 100644
index 000000000..1a80533d0
--- /dev/null
+++ b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app3/Chart.yaml
@@ -0,0 +1,11 @@
+apiVersion: v3
+name: app3
+description: A Helm chart for Kubernetes
+type: application
+version: 0.1.0
+
+dependencies:
+- name: library
+ version: 0.1.0
+ import-values:
+ - defaults
diff --git a/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app3/charts/library/Chart.yaml b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app3/charts/library/Chart.yaml
new file mode 100644
index 000000000..9bc306361
--- /dev/null
+++ b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app3/charts/library/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: library
+description: A Helm chart for Kubernetes
+type: library
+version: 0.1.0
diff --git a/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app3/charts/library/templates/service.yaml b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app3/charts/library/templates/service.yaml
new file mode 100644
index 000000000..3fd398b53
--- /dev/null
+++ b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app3/charts/library/templates/service.yaml
@@ -0,0 +1,9 @@
+apiVersion: v1
+kind: Service
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.port }}
+ targetPort: http
+ protocol: TCP
+ name: http
diff --git a/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app3/charts/library/values.yaml b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app3/charts/library/values.yaml
new file mode 100644
index 000000000..0c08b6cd2
--- /dev/null
+++ b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app3/charts/library/values.yaml
@@ -0,0 +1,5 @@
+exports:
+ defaults:
+ service:
+ type: ClusterIP
+ port: 9090
diff --git a/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app3/templates/service.yaml b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app3/templates/service.yaml
new file mode 100644
index 000000000..8ed8ddf1f
--- /dev/null
+++ b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app3/templates/service.yaml
@@ -0,0 +1 @@
+{{- include "library.service" . }}
diff --git a/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app3/values.yaml b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app3/values.yaml
new file mode 100644
index 000000000..b738e2a57
--- /dev/null
+++ b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app3/values.yaml
@@ -0,0 +1,2 @@
+service:
+ type: ClusterIP
diff --git a/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app4/Chart.yaml b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app4/Chart.yaml
new file mode 100644
index 000000000..886b4b1e4
--- /dev/null
+++ b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app4/Chart.yaml
@@ -0,0 +1,9 @@
+apiVersion: v3
+name: app4
+description: A Helm chart for Kubernetes
+type: application
+version: 0.1.0
+
+dependencies:
+- name: library
+ version: 0.1.0
diff --git a/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app4/charts/library/Chart.yaml b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app4/charts/library/Chart.yaml
new file mode 100644
index 000000000..9bc306361
--- /dev/null
+++ b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app4/charts/library/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: library
+description: A Helm chart for Kubernetes
+type: library
+version: 0.1.0
diff --git a/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app4/charts/library/templates/service.yaml b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app4/charts/library/templates/service.yaml
new file mode 100644
index 000000000..3fd398b53
--- /dev/null
+++ b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app4/charts/library/templates/service.yaml
@@ -0,0 +1,9 @@
+apiVersion: v1
+kind: Service
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.port }}
+ targetPort: http
+ protocol: TCP
+ name: http
diff --git a/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app4/charts/library/values.yaml b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app4/charts/library/values.yaml
new file mode 100644
index 000000000..0c08b6cd2
--- /dev/null
+++ b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app4/charts/library/values.yaml
@@ -0,0 +1,5 @@
+exports:
+ defaults:
+ service:
+ type: ClusterIP
+ port: 9090
diff --git a/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app4/templates/service.yaml b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app4/templates/service.yaml
new file mode 100644
index 000000000..8ed8ddf1f
--- /dev/null
+++ b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app4/templates/service.yaml
@@ -0,0 +1 @@
+{{- include "library.service" . }}
diff --git a/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app4/values.yaml b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app4/values.yaml
new file mode 100644
index 000000000..3728aa930
--- /dev/null
+++ b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app4/values.yaml
@@ -0,0 +1,3 @@
+service:
+ type: ClusterIP
+ port: 1234
diff --git a/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/values.yaml b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/values.yaml
new file mode 100644
index 000000000..de0bafa51
--- /dev/null
+++ b/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/values.yaml
@@ -0,0 +1,14 @@
+app1:
+ enabled: true
+ service:
+ type: ClusterIP
+ port: 3456
+
+app2:
+ enabled: true
+
+app3:
+ enabled: true
+
+app4:
+ enabled: true
diff --git a/internal/chart/v3/util/validate_name.go b/internal/chart/v3/util/validate_name.go
new file mode 100644
index 000000000..6595e085d
--- /dev/null
+++ b/internal/chart/v3/util/validate_name.go
@@ -0,0 +1,111 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "errors"
+ "fmt"
+ "regexp"
+)
+
+// validName is a regular expression for resource names.
+//
+// According to the Kubernetes help text, the regular expression it uses is:
+//
+// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*
+//
+// This follows the above regular expression (but requires a full string match, not partial).
+//
+// The Kubernetes documentation is here, though it is not entirely correct:
+// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+var validName = regexp.MustCompile(`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`)
+
+var (
+ // errMissingName indicates that a release (name) was not provided.
+ errMissingName = errors.New("no name provided")
+
+ // errInvalidName indicates that an invalid release name was provided
+ errInvalidName = fmt.Errorf(
+ "invalid release name, must match regex %s and the length must not be longer than 53",
+ validName.String())
+
+ // errInvalidKubernetesName indicates that the name does not meet the Kubernetes
+ // restrictions on metadata names.
+ errInvalidKubernetesName = fmt.Errorf(
+ "invalid metadata name, must match regex %s and the length must not be longer than 253",
+ validName.String())
+)
+
+const (
+ // According to the Kubernetes docs (https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#rfc-1035-label-names)
+ // some resource names have a max length of 63 characters while others have a max
+ // length of 253 characters. As we cannot be sure the resources used in a chart, we
+ // therefore need to limit it to 63 chars and reserve 10 chars for additional part to name
+ // of the resource. The reason is that chart maintainers can use release name as part of
+ // the resource name (and some additional chars).
+ maxReleaseNameLen = 53
+ // maxMetadataNameLen is the maximum length Kubernetes allows for any name.
+ maxMetadataNameLen = 253
+)
+
+// ValidateReleaseName performs checks for an entry for a Helm release name
+//
+// For Helm to allow a name, it must be below a certain character count (53) and also match
+// a regular expression.
+//
+// According to the Kubernetes help text, the regular expression it uses is:
+//
+// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*
+//
+// This follows the above regular expression (but requires a full string match, not partial).
+//
+// The Kubernetes documentation is here, though it is not entirely correct:
+// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+func ValidateReleaseName(name string) error {
+ // This case is preserved for backwards compatibility
+ if name == "" {
+ return errMissingName
+
+ }
+ if len(name) > maxReleaseNameLen || !validName.MatchString(name) {
+ return errInvalidName
+ }
+ return nil
+}
+
+// ValidateMetadataName validates the name field of a Kubernetes metadata object.
+//
+// Empty strings, strings longer than 253 chars, or strings that don't match the regexp
+// will fail.
+//
+// According to the Kubernetes help text, the regular expression it uses is:
+//
+// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*
+//
+// This follows the above regular expression (but requires a full string match, not partial).
+//
+// The Kubernetes documentation is here, though it is not entirely correct:
+// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+//
+// Deprecated: remove in Helm 4. Name validation now uses rules defined in
+// pkg/lint/rules.validateMetadataNameFunc()
+func ValidateMetadataName(name string) error {
+ if name == "" || len(name) > maxMetadataNameLen || !validName.MatchString(name) {
+ return errInvalidKubernetesName
+ }
+ return nil
+}
diff --git a/internal/chart/v3/util/validate_name_test.go b/internal/chart/v3/util/validate_name_test.go
new file mode 100644
index 000000000..cfc62a0f7
--- /dev/null
+++ b/internal/chart/v3/util/validate_name_test.go
@@ -0,0 +1,91 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import "testing"
+
+// TestValidateReleaseName is a regression test for ValidateName
+//
+// Kubernetes has strict naming conventions for resource names. This test represents
+// those conventions.
+//
+// See https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+//
+// NOTE: At the time of this writing, the docs above say that names cannot begin with
+// digits. However, `kubectl`'s regular expression explicit allows this, and
+// Kubernetes (at least as of 1.18) also accepts resources whose names begin with digits.
+func TestValidateReleaseName(t *testing.T) {
+ names := map[string]bool{
+ "": false,
+ "foo": true,
+ "foo.bar1234baz.seventyone": true,
+ "FOO": false,
+ "123baz": true,
+ "foo.BAR.baz": false,
+ "one-two": true,
+ "-two": false,
+ "one_two": false,
+ "a..b": false,
+ "%^$%*@^*@^": false,
+ "example:com": false,
+ "example%%com": false,
+ "a1111111111111111111111111111111111111111111111111111111111z": false,
+ }
+ for input, expectPass := range names {
+ if err := ValidateReleaseName(input); (err == nil) != expectPass {
+ st := "fail"
+ if expectPass {
+ st = "succeed"
+ }
+ t.Errorf("Expected %q to %s", input, st)
+ }
+ }
+}
+
+func TestValidateMetadataName(t *testing.T) {
+ names := map[string]bool{
+ "": false,
+ "foo": true,
+ "foo.bar1234baz.seventyone": true,
+ "FOO": false,
+ "123baz": true,
+ "foo.BAR.baz": false,
+ "one-two": true,
+ "-two": false,
+ "one_two": false,
+ "a..b": false,
+ "%^$%*@^*@^": false,
+ "example:com": false,
+ "example%%com": false,
+ "a1111111111111111111111111111111111111111111111111111111111z": true,
+ "a1111111111111111111111111111111111111111111111111111111111z" +
+ "a1111111111111111111111111111111111111111111111111111111111z" +
+ "a1111111111111111111111111111111111111111111111111111111111z" +
+ "a1111111111111111111111111111111111111111111111111111111111z" +
+ "a1111111111111111111111111111111111111111111111111111111111z" +
+ "a1111111111111111111111111111111111111111111111111111111111z": false,
+ }
+ for input, expectPass := range names {
+ if err := ValidateMetadataName(input); (err == nil) != expectPass {
+ st := "fail"
+ if expectPass {
+ st = "succeed"
+ }
+ t.Errorf("Expected %q to %s", input, st)
+ }
+ }
+}
diff --git a/internal/cli/output/color.go b/internal/cli/output/color.go
new file mode 100644
index 000000000..93bbbe56e
--- /dev/null
+++ b/internal/cli/output/color.go
@@ -0,0 +1,67 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package output
+
+import (
+ "github.com/fatih/color"
+
+ release "helm.sh/helm/v4/pkg/release/v1"
+)
+
+// ColorizeStatus returns a colorized version of the status string based on the status value
+func ColorizeStatus(status release.Status, noColor bool) string {
+ // Disable color if requested
+ if noColor {
+ return status.String()
+ }
+
+ switch status {
+ case release.StatusDeployed:
+ return color.GreenString(status.String())
+ case release.StatusFailed:
+ return color.RedString(status.String())
+ case release.StatusPendingInstall, release.StatusPendingUpgrade, release.StatusPendingRollback, release.StatusUninstalling:
+ return color.YellowString(status.String())
+ case release.StatusUnknown:
+ return color.RedString(status.String())
+ default:
+ // For uninstalled, superseded, and any other status
+ return status.String()
+ }
+}
+
+// ColorizeHeader returns a colorized version of a header string
+func ColorizeHeader(header string, noColor bool) string {
+ // Disable color if requested
+ if noColor {
+ return header
+ }
+
+ // Use bold for headers
+ return color.New(color.Bold).Sprint(header)
+}
+
+// ColorizeNamespace returns a colorized version of a namespace string
+func ColorizeNamespace(namespace string, noColor bool) string {
+ // Disable color if requested
+ if noColor {
+ return namespace
+ }
+
+ // Use cyan for namespaces
+ return color.CyanString(namespace)
+}
diff --git a/internal/cli/output/color_test.go b/internal/cli/output/color_test.go
new file mode 100644
index 000000000..c84e2c359
--- /dev/null
+++ b/internal/cli/output/color_test.go
@@ -0,0 +1,191 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package output
+
+import (
+ "strings"
+ "testing"
+
+ release "helm.sh/helm/v4/pkg/release/v1"
+)
+
+func TestColorizeStatus(t *testing.T) {
+
+ tests := []struct {
+ name string
+ status release.Status
+ noColor bool
+ envNoColor string
+ wantColor bool // whether we expect color codes in output
+ }{
+ {
+ name: "deployed status with color",
+ status: release.StatusDeployed,
+ noColor: false,
+ envNoColor: "",
+ wantColor: true,
+ },
+ {
+ name: "deployed status without color flag",
+ status: release.StatusDeployed,
+ noColor: true,
+ envNoColor: "",
+ wantColor: false,
+ },
+ {
+ name: "deployed status with NO_COLOR env",
+ status: release.StatusDeployed,
+ noColor: false,
+ envNoColor: "1",
+ wantColor: false,
+ },
+ {
+ name: "failed status with color",
+ status: release.StatusFailed,
+ noColor: false,
+ envNoColor: "",
+ wantColor: true,
+ },
+ {
+ name: "pending install status with color",
+ status: release.StatusPendingInstall,
+ noColor: false,
+ envNoColor: "",
+ wantColor: true,
+ },
+ {
+ name: "unknown status with color",
+ status: release.StatusUnknown,
+ noColor: false,
+ envNoColor: "",
+ wantColor: true,
+ },
+ {
+ name: "superseded status with color",
+ status: release.StatusSuperseded,
+ noColor: false,
+ envNoColor: "",
+ wantColor: false, // superseded doesn't get colored
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ t.Setenv("NO_COLOR", tt.envNoColor)
+
+ result := ColorizeStatus(tt.status, tt.noColor)
+
+ // Check if result contains ANSI escape codes
+ hasColor := strings.Contains(result, "\033[")
+
+ // In test environment, term.IsTerminal will be false, so we won't get color
+ // unless we're testing the logic without terminal detection
+ if hasColor && !tt.wantColor {
+ t.Errorf("ColorizeStatus() returned color when none expected: %q", result)
+ }
+
+ // Always check the status text is present
+ if !strings.Contains(result, tt.status.String()) {
+ t.Errorf("ColorizeStatus() = %q, want to contain %q", result, tt.status.String())
+ }
+ })
+ }
+}
+
+func TestColorizeHeader(t *testing.T) {
+
+ tests := []struct {
+ name string
+ header string
+ noColor bool
+ envNoColor string
+ }{
+ {
+ name: "header with color",
+ header: "NAME",
+ noColor: false,
+ envNoColor: "",
+ },
+ {
+ name: "header without color flag",
+ header: "NAME",
+ noColor: true,
+ envNoColor: "",
+ },
+ {
+ name: "header with NO_COLOR env",
+ header: "NAME",
+ noColor: false,
+ envNoColor: "1",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ t.Setenv("NO_COLOR", tt.envNoColor)
+
+ result := ColorizeHeader(tt.header, tt.noColor)
+
+ // Always check the header text is present
+ if !strings.Contains(result, tt.header) {
+ t.Errorf("ColorizeHeader() = %q, want to contain %q", result, tt.header)
+ }
+ })
+ }
+}
+
+func TestColorizeNamespace(t *testing.T) {
+
+ tests := []struct {
+ name string
+ namespace string
+ noColor bool
+ envNoColor string
+ }{
+ {
+ name: "namespace with color",
+ namespace: "default",
+ noColor: false,
+ envNoColor: "",
+ },
+ {
+ name: "namespace without color flag",
+ namespace: "default",
+ noColor: true,
+ envNoColor: "",
+ },
+ {
+ name: "namespace with NO_COLOR env",
+ namespace: "default",
+ noColor: false,
+ envNoColor: "1",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ t.Setenv("NO_COLOR", tt.envNoColor)
+
+ result := ColorizeNamespace(tt.namespace, tt.noColor)
+
+ // Always check the namespace text is present
+ if !strings.Contains(result, tt.namespace) {
+ t.Errorf("ColorizeNamespace() = %q, want to contain %q", result, tt.namespace)
+ }
+ })
+ }
+}
diff --git a/internal/logging/logging.go b/internal/logging/logging.go
new file mode 100644
index 000000000..2e8208d08
--- /dev/null
+++ b/internal/logging/logging.go
@@ -0,0 +1,87 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logging
+
+import (
+ "context"
+ "log/slog"
+ "os"
+)
+
+// DebugEnabledFunc is a function type that determines if debug logging is enabled
+// We use a function because we want to check the setting at log time, not when the logger is created
+type DebugEnabledFunc func() bool
+
+// DebugCheckHandler checks settings.Debug at log time
+type DebugCheckHandler struct {
+ handler slog.Handler
+ debugEnabled DebugEnabledFunc
+}
+
+// Enabled implements slog.Handler.Enabled
+func (h *DebugCheckHandler) Enabled(_ context.Context, level slog.Level) bool {
+ if level == slog.LevelDebug {
+ return h.debugEnabled()
+ }
+ return true // Always log other levels
+}
+
+// Handle implements slog.Handler.Handle
+func (h *DebugCheckHandler) Handle(ctx context.Context, r slog.Record) error {
+ return h.handler.Handle(ctx, r)
+}
+
+// WithAttrs implements slog.Handler.WithAttrs
+func (h *DebugCheckHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
+ return &DebugCheckHandler{
+ handler: h.handler.WithAttrs(attrs),
+ debugEnabled: h.debugEnabled,
+ }
+}
+
+// WithGroup implements slog.Handler.WithGroup
+func (h *DebugCheckHandler) WithGroup(name string) slog.Handler {
+ return &DebugCheckHandler{
+ handler: h.handler.WithGroup(name),
+ debugEnabled: h.debugEnabled,
+ }
+}
+
+// NewLogger creates a new logger with dynamic debug checking
+func NewLogger(debugEnabled DebugEnabledFunc) *slog.Logger {
+ // Create base handler that removes timestamps
+ baseHandler := slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{
+ // Always use LevelDebug here to allow all messages through
+ // Our custom handler will do the filtering
+ Level: slog.LevelDebug,
+ ReplaceAttr: func(_ []string, a slog.Attr) slog.Attr {
+ // Remove the time attribute
+ if a.Key == slog.TimeKey {
+ return slog.Attr{}
+ }
+ return a
+ },
+ })
+
+ // Wrap with our dynamic debug-checking handler
+ dynamicHandler := &DebugCheckHandler{
+ handler: baseHandler,
+ debugEnabled: debugEnabled,
+ }
+
+ return slog.New(dynamicHandler)
+}
diff --git a/internal/monocular/client.go b/internal/monocular/client.go
index 88a2564b9..f4ef5d647 100644
--- a/internal/monocular/client.go
+++ b/internal/monocular/client.go
@@ -29,9 +29,6 @@ type Client struct {
// The base URL for requests
BaseURL string
-
- // The internal logger to use
- Log func(string, ...interface{})
}
// New creates a new client
@@ -44,12 +41,9 @@ func New(u string) (*Client, error) {
return &Client{
BaseURL: u,
- Log: nopLogger,
}, nil
}
-var nopLogger = func(_ string, _ ...interface{}) {}
-
// Validate if the base URL for monocular is valid.
func validate(u string) error {
diff --git a/internal/monocular/search.go b/internal/monocular/search.go
index 6912be2ce..fcf04b7a4 100644
--- a/internal/monocular/search.go
+++ b/internal/monocular/search.go
@@ -129,7 +129,7 @@ func (c *Client) Search(term string) ([]SearchResult, error) {
}
defer res.Body.Close()
- if res.StatusCode != 200 {
+ if res.StatusCode != http.StatusOK {
return nil, fmt.Errorf("failed to fetch %s : %s", p.String(), res.Status)
}
diff --git a/pkg/plugin/cache/cache.go b/internal/plugin/cache/cache.go
similarity index 96%
rename from pkg/plugin/cache/cache.go
rename to internal/plugin/cache/cache.go
index f3e847374..f3b737477 100644
--- a/pkg/plugin/cache/cache.go
+++ b/internal/plugin/cache/cache.go
@@ -14,7 +14,7 @@ limitations under the License.
*/
// Package cache provides a key generator for vcs urls.
-package cache // import "helm.sh/helm/v4/pkg/plugin/cache"
+package cache // import "helm.sh/helm/v4/internal/plugin/cache"
import (
"net/url"
diff --git a/internal/plugin/config.go b/internal/plugin/config.go
new file mode 100644
index 000000000..e1f491779
--- /dev/null
+++ b/internal/plugin/config.go
@@ -0,0 +1,54 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+
+ "go.yaml.in/yaml/v3"
+)
+
+// Config represents an plugin type specific configuration
+// It is expected to type assert (cast) the a Config to its expected underlying type (schema.ConfigCLIV1, schema.ConfigGetterV1, etc).
+type Config interface {
+ Validate() error
+}
+
+func unmarshaConfig(pluginType string, configData map[string]any) (Config, error) {
+
+ pluginTypeMeta, ok := pluginTypesIndex[pluginType]
+ if !ok {
+ return nil, fmt.Errorf("unknown plugin type %q", pluginType)
+ }
+
+ // TODO: Avoid (yaml) serialization/deserialization for type conversion here
+
+ data, err := yaml.Marshal(configData)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshel config data (plugin type %s): %w", pluginType, err)
+ }
+
+ config := reflect.New(pluginTypeMeta.configType)
+ d := yaml.NewDecoder(bytes.NewReader(data))
+ d.KnownFields(true)
+ if err := d.Decode(config.Interface()); err != nil {
+ return nil, err
+ }
+
+ return config.Interface().(Config), nil
+}
diff --git a/internal/plugin/config_test.go b/internal/plugin/config_test.go
new file mode 100644
index 000000000..c51b77ff0
--- /dev/null
+++ b/internal/plugin/config_test.go
@@ -0,0 +1,56 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "helm.sh/helm/v4/internal/plugin/schema"
+)
+
+func TestUnmarshaConfig(t *testing.T) {
+ // Test unmarshalling a CLI plugin config
+ {
+ config, err := unmarshaConfig("cli/v1", map[string]any{
+ "usage": "usage string",
+ "shortHelp": "short help string",
+ "longHelp": "long help string",
+ "ignoreFlags": true,
+ })
+ require.NoError(t, err)
+
+ require.IsType(t, &schema.ConfigCLIV1{}, config)
+ assert.Equal(t, schema.ConfigCLIV1{
+ Usage: "usage string",
+ ShortHelp: "short help string",
+ LongHelp: "long help string",
+ IgnoreFlags: true,
+ }, *(config.(*schema.ConfigCLIV1)))
+ }
+
+ // Test unmarshalling invalid config data
+ {
+ config, err := unmarshaConfig("cli/v1", map[string]any{
+ "invalid field": "foo",
+ })
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "field not found")
+ assert.Nil(t, config)
+ }
+}
diff --git a/internal/plugin/descriptor.go b/internal/plugin/descriptor.go
new file mode 100644
index 000000000..ba92b3c55
--- /dev/null
+++ b/internal/plugin/descriptor.go
@@ -0,0 +1,24 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+// Descriptor describes a plugin to find
+type Descriptor struct {
+ // Name is the name of the plugin
+ Name string
+ // Type is the type of the plugin (cli, getter, postrenderer)
+ Type string
+}
diff --git a/internal/plugin/doc.go b/internal/plugin/doc.go
new file mode 100644
index 000000000..39ba6300b
--- /dev/null
+++ b/internal/plugin/doc.go
@@ -0,0 +1,89 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+---
+TODO: move this section to public plugin package
+
+Package plugin provides the implementation of the Helm plugin system.
+
+Conceptually, "plugins" enable extending Helm's functionality external to Helm's core codebase. The plugin system allows
+code to fetch plugins by type, then invoke the plugin with an input as required by that plugin type. The plugin
+returning an output for the caller to consume.
+
+An example of a plugin invocation:
+```
+d := plugin.Descriptor{
+ Type: "example/v1", //
+}
+plgs, err := plugin.FindPlugins([]string{settings.PluginsDirectory}, d)
+
+for _, plg := range plgs {
+ input := &plugin.Input{
+ Message: schema.InputMessageExampleV1{ // The type of the input message is defined by the plugin's "type" (example/v1 here)
+ ...
+ },
+ }
+ output, err := plg.Invoke(context.Background(), input)
+ if err != nil {
+ ...
+ }
+
+ // consume the output, using type assertion to convert to the expected output type (as defined by the plugin's "type")
+ outputMessage, ok := output.Message.(schema.OutputMessageExampleV1)
+}
+
+---
+
+Package `plugin` provides the implementation of the Helm plugin system.
+
+Helm plugins are exposed to uses as the "Plugin" type, the basic interface that primarily support the "Invoke" method.
+
+# Plugin Runtimes
+Internally, plugins must be implemented by a "runtime" that is responsible for creating the plugin instance, and dispatching the plugin's invocation to the plugin's implementation.
+For example:
+- forming environment variables and command line args for subprocess execution
+- converting input to JSON and invoking a function in a Wasm runtime
+
+Internally, the code structure is:
+Runtime.CreatePlugin()
+ |
+ | (creates)
+ |
+ \---> PluginRuntime
+ |
+ | (implements)
+ v
+ Plugin.Invoke()
+
+# Plugin Types
+Each plugin implements a specific functionality, denoted by the plugin's "type" e.g. "getter/v1". The "type" includes a version, in order to allow a given types messaging schema and invocation options to evolve.
+
+Specifically, the plugin's "type" specifies the contract for the input and output messages that are expected to be passed to the plugin, and returned from the plugin. The plugin's "type" also defines the options that can be passed to the plugin when invoking it.
+
+# Metadata
+Each plugin must have a `plugin.yaml`, that defines the plugin's metadata. The metadata includes the plugin's name, version, and other information.
+
+For legacy plugins, the type is inferred by which fields are set on the plugin: a downloader plugin is inferred when metadata contains a "downloaders" yaml node, otherwise it is assumed to define a Helm CLI subcommand.
+
+For v1 plugins, the metadata includes explicit apiVersion and type fields. It will also contain type-specific Config, and RuntimeConfig fields.
+
+# Runtime and type cardinality
+From a cardinality perspective, this means there a "few" runtimes, and "many" plugins types. It is also expected that the subprocess runtime will not be extended to support extra plugin types, and deprecated in a future version of Helm.
+
+Future ideas that are intended to be implemented include extending the plugin system to support future Wasm standards. Or allowing Helm SDK user's to inject "plugins" that are actually implemented as native go modules. Or even moving Helm's internal functionality e.g. yaml rendering engine to be used as an "in-built" plugin, along side other plugins that may implement other (non-go template) rendering engines.
+*/
+
+package plugin
diff --git a/internal/plugin/error.go b/internal/plugin/error.go
new file mode 100644
index 000000000..212460cea
--- /dev/null
+++ b/internal/plugin/error.go
@@ -0,0 +1,29 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+// InvokeExecError is returned when a plugin invocation returns a non-zero status/exit code
+// - subprocess plugin: child process exit code
+// - extism plugin: wasm function return code
+type InvokeExecError struct {
+ ExitCode int // Exit code from plugin code execution
+ Err error // Underlying error
+}
+
+// Error implements the error interface
+func (e *InvokeExecError) Error() string {
+ return e.Err.Error()
+}
diff --git a/pkg/plugin/installer/base.go b/internal/plugin/installer/base.go
similarity index 93%
rename from pkg/plugin/installer/base.go
rename to internal/plugin/installer/base.go
index 3738246ee..c21a245a8 100644
--- a/pkg/plugin/installer/base.go
+++ b/internal/plugin/installer/base.go
@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package installer // import "helm.sh/helm/v4/pkg/plugin/installer"
+package installer // import "helm.sh/helm/v4/internal/plugin/installer"
import (
"path/filepath"
diff --git a/pkg/plugin/installer/base_test.go b/internal/plugin/installer/base_test.go
similarity index 88%
rename from pkg/plugin/installer/base_test.go
rename to internal/plugin/installer/base_test.go
index f4dd6d6be..62b77bde5 100644
--- a/pkg/plugin/installer/base_test.go
+++ b/internal/plugin/installer/base_test.go
@@ -11,10 +11,9 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package installer // import "helm.sh/helm/v4/pkg/plugin/installer"
+package installer // import "helm.sh/helm/v4/internal/plugin/installer"
import (
- "os"
"testing"
)
@@ -37,12 +36,11 @@ func TestPath(t *testing.T) {
for _, tt := range tests {
- os.Setenv("HELM_PLUGINS", tt.helmPluginsDir)
+ t.Setenv("HELM_PLUGINS", tt.helmPluginsDir)
baseIns := newBase(tt.source)
baseInsPath := baseIns.Path()
if baseInsPath != tt.expectPath {
t.Errorf("expected name %s, got %s", tt.expectPath, baseInsPath)
}
- os.Unsetenv("HELM_PLUGINS")
}
}
diff --git a/pkg/plugin/installer/doc.go b/internal/plugin/installer/doc.go
similarity index 89%
rename from pkg/plugin/installer/doc.go
rename to internal/plugin/installer/doc.go
index b927dbd37..a4cf384bf 100644
--- a/pkg/plugin/installer/doc.go
+++ b/internal/plugin/installer/doc.go
@@ -14,4 +14,4 @@ limitations under the License.
*/
// Package installer provides an interface for installing Helm plugins.
-package installer // import "helm.sh/helm/v4/pkg/plugin/installer"
+package installer // import "helm.sh/helm/v4/internal/plugin/installer"
diff --git a/pkg/plugin/installer/http_installer.go b/internal/plugin/installer/extractor.go
similarity index 66%
rename from pkg/plugin/installer/http_installer.go
rename to internal/plugin/installer/extractor.go
index b900fa401..407138197 100644
--- a/pkg/plugin/installer/http_installer.go
+++ b/internal/plugin/installer/extractor.go
@@ -13,38 +13,25 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package installer // import "helm.sh/helm/v4/pkg/plugin/installer"
+package installer // import "helm.sh/helm/v4/internal/plugin/installer"
import (
"archive/tar"
"bytes"
"compress/gzip"
+ "errors"
+ "fmt"
"io"
"os"
"path"
"path/filepath"
"regexp"
+ "slices"
"strings"
securejoin "github.com/cyphar/filepath-securejoin"
- "github.com/pkg/errors"
-
- "helm.sh/helm/v4/internal/third_party/dep/fs"
- "helm.sh/helm/v4/pkg/cli"
- "helm.sh/helm/v4/pkg/getter"
- "helm.sh/helm/v4/pkg/helmpath"
- "helm.sh/helm/v4/pkg/plugin/cache"
)
-// HTTPInstaller installs plugins from an archive served by a web server.
-type HTTPInstaller struct {
- CacheDir string
- PluginName string
- base
- extractor Extractor
- getter getter.Getter
-}
-
// TarGzExtractor extracts gzip compressed tar archives
type TarGzExtractor struct{}
@@ -66,6 +53,9 @@ func mediaTypeToExtension(mt string) (string, bool) {
switch strings.ToLower(mt) {
case "application/gzip", "application/x-gzip", "application/x-tgz", "application/x-gtar":
return ".tgz", true
+ case "application/octet-stream":
+ // Generic binary type - we'll need to check the URL suffix
+ return "", false
default:
return "", false
}
@@ -78,88 +68,7 @@ func NewExtractor(source string) (Extractor, error) {
return extractor, nil
}
}
- return nil, errors.Errorf("no extractor implemented yet for %s", source)
-}
-
-// NewHTTPInstaller creates a new HttpInstaller.
-func NewHTTPInstaller(source string) (*HTTPInstaller, error) {
- key, err := cache.Key(source)
- if err != nil {
- return nil, err
- }
-
- extractor, err := NewExtractor(source)
- if err != nil {
- return nil, err
- }
-
- get, err := getter.All(new(cli.EnvSettings)).ByScheme("http")
- if err != nil {
- return nil, err
- }
-
- i := &HTTPInstaller{
- CacheDir: helmpath.CachePath("plugins", key),
- PluginName: stripPluginName(filepath.Base(source)),
- base: newBase(source),
- extractor: extractor,
- getter: get,
- }
- return i, nil
-}
-
-// helper that relies on some sort of convention for plugin name (plugin-name-)
-func stripPluginName(name string) string {
- var strippedName string
- for suffix := range Extractors {
- if strings.HasSuffix(name, suffix) {
- strippedName = strings.TrimSuffix(name, suffix)
- break
- }
- }
- re := regexp.MustCompile(`(.*)-[0-9]+\..*`)
- return re.ReplaceAllString(strippedName, `$1`)
-}
-
-// Install downloads and extracts the tarball into the cache directory
-// and installs into the plugin directory.
-//
-// Implements Installer.
-func (i *HTTPInstaller) Install() error {
- pluginData, err := i.getter.Get(i.Source)
- if err != nil {
- return err
- }
-
- if err := i.extractor.Extract(pluginData, i.CacheDir); err != nil {
- return errors.Wrap(err, "extracting files from archive")
- }
-
- if !isPlugin(i.CacheDir) {
- return ErrMissingMetadata
- }
-
- src, err := filepath.Abs(i.CacheDir)
- if err != nil {
- return err
- }
-
- debug("copying %s to %s", src, i.Path())
- return fs.CopyDir(src, i.Path())
-}
-
-// Update updates a local repository
-// Not implemented for now since tarball most likely will be packaged by version
-func (i *HTTPInstaller) Update() error {
- return errors.Errorf("method Update() not implemented for HttpInstaller")
-}
-
-// Path is overridden because we want to join on the plugin name not the file name
-func (i HTTPInstaller) Path() string {
- if i.base.Source == "" {
- return ""
- }
- return helmpath.DataPath("plugins", i.PluginName)
+ return nil, fmt.Errorf("no extractor implemented yet for %s", source)
}
// cleanJoin resolves dest as a subpath of root.
@@ -194,10 +103,8 @@ func cleanJoin(root, dest string) (string, error) {
// We want to alert the user that something bad was attempted. Cleaning it
// is not a good practice.
- for _, part := range strings.Split(dest, "/") {
- if part == ".." {
- return "", errors.New("path contains '..', which is illegal")
- }
+ if slices.Contains(strings.Split(dest, "/"), "..") {
+ return "", errors.New("path contains '..', which is illegal")
}
// If a path is absolute, the creator of the TAR is doing something shady.
@@ -247,10 +154,14 @@ func (g *TarGzExtractor) Extract(buffer *bytes.Buffer, targetDir string) error {
switch header.Typeflag {
case tar.TypeDir:
- if err := os.Mkdir(path, 0755); err != nil {
+ if err := os.MkdirAll(path, 0755); err != nil {
return err
}
case tar.TypeReg:
+ // Ensure parent directory exists
+ if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
+ return err
+ }
outFile, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode))
if err != nil {
return err
@@ -264,8 +175,21 @@ func (g *TarGzExtractor) Extract(buffer *bytes.Buffer, targetDir string) error {
case tar.TypeXGlobalHeader, tar.TypeXHeader:
continue
default:
- return errors.Errorf("unknown type: %b in %s", header.Typeflag, header.Name)
+ return fmt.Errorf("unknown type: %b in %s", header.Typeflag, header.Name)
}
}
return nil
}
+
+// stripPluginName is a helper that relies on some sort of convention for plugin name (plugin-name-)
+func stripPluginName(name string) string {
+ var strippedName string
+ for suffix := range Extractors {
+ if before, ok := strings.CutSuffix(name, suffix); ok {
+ strippedName = before
+ break
+ }
+ }
+ re := regexp.MustCompile(`(.*)-[0-9]+\..*`)
+ return re.ReplaceAllString(strippedName, `$1`)
+}
diff --git a/internal/plugin/installer/http_installer.go b/internal/plugin/installer/http_installer.go
new file mode 100644
index 000000000..bb96314f4
--- /dev/null
+++ b/internal/plugin/installer/http_installer.go
@@ -0,0 +1,191 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package installer // import "helm.sh/helm/v4/internal/plugin/installer"
+
+import (
+ "bytes"
+ "fmt"
+ "log/slog"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "helm.sh/helm/v4/internal/plugin"
+ "helm.sh/helm/v4/internal/plugin/cache"
+ "helm.sh/helm/v4/internal/third_party/dep/fs"
+ "helm.sh/helm/v4/pkg/cli"
+ "helm.sh/helm/v4/pkg/getter"
+ "helm.sh/helm/v4/pkg/helmpath"
+)
+
+// HTTPInstaller installs plugins from an archive served by a web server.
+type HTTPInstaller struct {
+ CacheDir string
+ PluginName string
+ base
+ extractor Extractor
+ getter getter.Getter
+ // Cached data to avoid duplicate downloads
+ pluginData []byte
+ provData []byte
+}
+
+// NewHTTPInstaller creates a new HttpInstaller.
+func NewHTTPInstaller(source string) (*HTTPInstaller, error) {
+ key, err := cache.Key(source)
+ if err != nil {
+ return nil, err
+ }
+
+ extractor, err := NewExtractor(source)
+ if err != nil {
+ return nil, err
+ }
+
+ get, err := getter.All(new(cli.EnvSettings)).ByScheme("http")
+ if err != nil {
+ return nil, err
+ }
+
+ i := &HTTPInstaller{
+ CacheDir: helmpath.CachePath("plugins", key),
+ PluginName: stripPluginName(filepath.Base(source)),
+ base: newBase(source),
+ extractor: extractor,
+ getter: get,
+ }
+ return i, nil
+}
+
+// Install downloads and extracts the tarball into the cache directory
+// and installs into the plugin directory.
+//
+// Implements Installer.
+func (i *HTTPInstaller) Install() error {
+ // Ensure plugin data is cached
+ if i.pluginData == nil {
+ pluginData, err := i.getter.Get(i.Source)
+ if err != nil {
+ return err
+ }
+ i.pluginData = pluginData.Bytes()
+ }
+
+ // Save the original tarball to plugins directory for verification
+ // Extract metadata to get the actual plugin name and version
+ metadata, err := plugin.ExtractTgzPluginMetadata(bytes.NewReader(i.pluginData))
+ if err != nil {
+ return fmt.Errorf("failed to extract plugin metadata from tarball: %w", err)
+ }
+ filename := fmt.Sprintf("%s-%s.tgz", metadata.Name, metadata.Version)
+ tarballPath := helmpath.DataPath("plugins", filename)
+ if err := os.MkdirAll(filepath.Dir(tarballPath), 0755); err != nil {
+ return fmt.Errorf("failed to create plugins directory: %w", err)
+ }
+ if err := os.WriteFile(tarballPath, i.pluginData, 0644); err != nil {
+ return fmt.Errorf("failed to save tarball: %w", err)
+ }
+
+ // Ensure prov data is cached if available
+ if i.provData == nil {
+ // Try to download .prov file if it exists
+ provURL := i.Source + ".prov"
+ if provData, err := i.getter.Get(provURL); err == nil {
+ i.provData = provData.Bytes()
+ }
+ }
+
+ // Save prov file if we have the data
+ if i.provData != nil {
+ provPath := tarballPath + ".prov"
+ if err := os.WriteFile(provPath, i.provData, 0644); err != nil {
+ slog.Debug("failed to save provenance file", "error", err)
+ }
+ }
+
+ if err := i.extractor.Extract(bytes.NewBuffer(i.pluginData), i.CacheDir); err != nil {
+ return fmt.Errorf("extracting files from archive: %w", err)
+ }
+
+ // Detect where the plugin.yaml actually is
+ pluginRoot, err := detectPluginRoot(i.CacheDir)
+ if err != nil {
+ return err
+ }
+
+ // Validate plugin structure if needed
+ if err := validatePluginName(pluginRoot, i.PluginName); err != nil {
+ return err
+ }
+
+ src, err := filepath.Abs(pluginRoot)
+ if err != nil {
+ return err
+ }
+
+ slog.Debug("copying", "source", src, "path", i.Path())
+ return fs.CopyDir(src, i.Path())
+}
+
+// Update updates a local repository
+// Not implemented for now since tarball most likely will be packaged by version
+func (i *HTTPInstaller) Update() error {
+ return fmt.Errorf("method Update() not implemented for HttpInstaller")
+}
+
+// Path is overridden because we want to join on the plugin name not the file name
+func (i HTTPInstaller) Path() string {
+ if i.Source == "" {
+ return ""
+ }
+ return helmpath.DataPath("plugins", i.PluginName)
+}
+
+// SupportsVerification returns true if the HTTP installer can verify plugins
+func (i *HTTPInstaller) SupportsVerification() bool {
+ // Only support verification for tarball URLs
+ return strings.HasSuffix(i.Source, ".tgz") || strings.HasSuffix(i.Source, ".tar.gz")
+}
+
+// GetVerificationData returns cached plugin and provenance data for verification
+func (i *HTTPInstaller) GetVerificationData() (archiveData, provData []byte, filename string, err error) {
+ if !i.SupportsVerification() {
+ return nil, nil, "", fmt.Errorf("verification not supported for this source")
+ }
+
+ // Download plugin data once and cache it
+ if i.pluginData == nil {
+ data, err := i.getter.Get(i.Source)
+ if err != nil {
+ return nil, nil, "", fmt.Errorf("failed to download plugin: %w", err)
+ }
+ i.pluginData = data.Bytes()
+ }
+
+ // Download prov data once and cache it if available
+ if i.provData == nil {
+ provData, err := i.getter.Get(i.Source + ".prov")
+ if err != nil {
+ // If provenance file doesn't exist, set provData to nil
+ // The verification logic will handle this gracefully
+ i.provData = nil
+ } else {
+ i.provData = provData.Bytes()
+ }
+ }
+
+ return i.pluginData, i.provData, filepath.Base(i.Source), nil
+}
diff --git a/pkg/plugin/installer/http_installer_test.go b/internal/plugin/installer/http_installer_test.go
similarity index 52%
rename from pkg/plugin/installer/http_installer_test.go
rename to internal/plugin/installer/http_installer_test.go
index c5af1f2cc..be40b1b90 100644
--- a/pkg/plugin/installer/http_installer_test.go
+++ b/internal/plugin/installer/http_installer_test.go
@@ -13,14 +13,16 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package installer // import "helm.sh/helm/v4/pkg/plugin/installer"
+package installer // import "helm.sh/helm/v4/internal/plugin/installer"
import (
"archive/tar"
"bytes"
"compress/gzip"
"encoding/base64"
+ "errors"
"fmt"
+ "io/fs"
"net/http"
"net/http/httptest"
"os"
@@ -29,8 +31,6 @@ import (
"syscall"
"testing"
- "github.com/pkg/errors"
-
"helm.sh/helm/v4/internal/test/ensure"
"helm.sh/helm/v4/pkg/getter"
"helm.sh/helm/v4/pkg/helmpath"
@@ -49,7 +49,7 @@ func (t *TestHTTPGetter) Get(_ string, _ ...getter.Option) (*bytes.Buffer, error
}
// Fake plugin tarball data
-var fakePluginB64 = "H4sIAKRj51kAA+3UX0vCUBgGcC9jn+Iwuk3Peza3GeyiUlJQkcogCOzgli7dJm4TvYk+a5+k479UqquUCJ/fLs549sLO2TnvWnJa9aXnjwujYdYLovxMhsPcfnHOLdNkOXthM/IVQQYjg2yyLLJ4kXGhLp5j0z3P41tZksqxmspL3B/O+j/XtZu1y8rdYzkOZRCxduKPk53ny6Wwz/GfIIf1As8lxzGJSmoHNLJZphKHG4YpTCE0wVk3DULfpSJ3DMMqkj3P5JfMYLdX1Vr9Ie/5E5cstcdC8K04iGLX5HaJuKpWL17F0TCIBi5pf/0pjtLhun5j3f9v6r7wfnI/H0eNp9d1/5P6Gez0vzo7wsoxfrAZbTny/o9k6J8z/VkO/LPlWdC1iVpbEEcq5nmeJ13LEtmbV0k2r2PrOs9PuuNglC5rL1Y5S/syXRQmutaNw1BGnnp8Wq3UG51WvX1da3bKtZtCN/R09DwAAAAAAAAAAAAAAAAAAADAb30AoMczDwAoAAA="
+var fakePluginB64 = "H4sIAAAAAAAAA+3SQUvDMBgG4Jz7K0LwapdvSxrwJig6mCKC5xHabBaXdDSt4L+3cQ56mV42ZPg+lw+SF5LwZmXf3OV206/rMGEnIgdG6zTJaDmee4y01FOlZpqGHJGZSsb1qS401sfOtpyz0FTup9xv+2dqNep/N/IP6zdHPSMVXCh1sH8yhtGMDBUFFTL1r4iIcXnUWxzwz/sP1rsrLkbfQGTvro11E4ZlmcucRNZHu04py1OO73OVi2Vbb7td9vp7nXevtvsKRpGVjfc2VMP2xf3t4mH5tHi5mz8ub+bPk9JXIvvr5wMAAAAAAAAAAAAAAAAAAAAAnLVPqwHcXQAoAAA="
func TestStripName(t *testing.T) {
if stripPluginName("fake-plugin-0.0.1.tar.gz") != "fake-plugin" {
@@ -150,7 +150,7 @@ func TestHTTPInstallerNonExistentVersion(t *testing.T) {
// inject fake http client responding with error
httpInstaller.getter = &TestHTTPGetter{
- MockError: errors.Errorf("failed to download plugin for some reason"),
+ MockError: fmt.Errorf("failed to download plugin for some reason"),
}
// attempt to install the plugin
@@ -210,11 +210,9 @@ func TestExtract(t *testing.T) {
tempDir := t.TempDir()
- // Set the umask to default open permissions so we can actually test
- oldmask := syscall.Umask(0000)
- defer func() {
- syscall.Umask(oldmask)
- }()
+ // Get current umask to predict expected permissions
+ currentUmask := syscall.Umask(0)
+ syscall.Umask(currentUmask)
// Write a tarball to a buffer for us to extract
var tarbuf bytes.Buffer
@@ -274,24 +272,30 @@ func TestExtract(t *testing.T) {
t.Fatalf("Did not expect error but got error: %v", err)
}
+ // Calculate expected permissions after umask is applied
+ expectedPluginYAMLPerm := os.FileMode(0600 &^ currentUmask)
+ expectedReadmePerm := os.FileMode(0777 &^ currentUmask)
+
pluginYAMLFullPath := filepath.Join(tempDir, "plugin.yaml")
if info, err := os.Stat(pluginYAMLFullPath); err != nil {
- if os.IsNotExist(err) {
+ if errors.Is(err, fs.ErrNotExist) {
t.Fatalf("Expected %s to exist but doesn't", pluginYAMLFullPath)
}
t.Fatal(err)
- } else if info.Mode().Perm() != 0600 {
- t.Fatalf("Expected %s to have 0600 mode it but has %o", pluginYAMLFullPath, info.Mode().Perm())
+ } else if info.Mode().Perm() != expectedPluginYAMLPerm {
+ t.Fatalf("Expected %s to have %o mode but has %o (umask: %o)",
+ pluginYAMLFullPath, expectedPluginYAMLPerm, info.Mode().Perm(), currentUmask)
}
readmeFullPath := filepath.Join(tempDir, "README.md")
if info, err := os.Stat(readmeFullPath); err != nil {
- if os.IsNotExist(err) {
+ if errors.Is(err, fs.ErrNotExist) {
t.Fatalf("Expected %s to exist but doesn't", readmeFullPath)
}
t.Fatal(err)
- } else if info.Mode().Perm() != 0777 {
- t.Fatalf("Expected %s to have 0777 mode it but has %o", readmeFullPath, info.Mode().Perm())
+ } else if info.Mode().Perm() != expectedReadmePerm {
+ t.Fatalf("Expected %s to have %o mode but has %o (umask: %o)",
+ readmeFullPath, expectedReadmePerm, info.Mode().Perm(), currentUmask)
}
}
@@ -348,3 +352,250 @@ func TestMediaTypeToExtension(t *testing.T) {
}
}
}
+
+func TestExtractWithNestedDirectories(t *testing.T) {
+ source := "https://repo.localdomain/plugins/nested-plugin-0.0.1.tar.gz"
+ tempDir := t.TempDir()
+
+ // Write a tarball with nested directory structure
+ var tarbuf bytes.Buffer
+ tw := tar.NewWriter(&tarbuf)
+ var files = []struct {
+ Name string
+ Body string
+ Mode int64
+ TypeFlag byte
+ }{
+ {"plugin.yaml", "plugin metadata", 0600, tar.TypeReg},
+ {"bin/", "", 0755, tar.TypeDir},
+ {"bin/plugin", "#!/bin/bash\necho plugin", 0755, tar.TypeReg},
+ {"docs/", "", 0755, tar.TypeDir},
+ {"docs/README.md", "readme content", 0644, tar.TypeReg},
+ {"docs/examples/", "", 0755, tar.TypeDir},
+ {"docs/examples/example1.yaml", "example content", 0644, tar.TypeReg},
+ }
+
+ for _, file := range files {
+ hdr := &tar.Header{
+ Name: file.Name,
+ Typeflag: file.TypeFlag,
+ Mode: file.Mode,
+ Size: int64(len(file.Body)),
+ }
+ if err := tw.WriteHeader(hdr); err != nil {
+ t.Fatal(err)
+ }
+ if file.TypeFlag == tar.TypeReg {
+ if _, err := tw.Write([]byte(file.Body)); err != nil {
+ t.Fatal(err)
+ }
+ }
+ }
+
+ if err := tw.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ var buf bytes.Buffer
+ gz := gzip.NewWriter(&buf)
+ if _, err := gz.Write(tarbuf.Bytes()); err != nil {
+ t.Fatal(err)
+ }
+ gz.Close()
+
+ extractor, err := NewExtractor(source)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // First extraction
+ if err = extractor.Extract(&buf, tempDir); err != nil {
+ t.Fatalf("First extraction failed: %v", err)
+ }
+
+ // Verify nested structure was created
+ nestedFile := filepath.Join(tempDir, "docs", "examples", "example1.yaml")
+ if _, err := os.Stat(nestedFile); err != nil {
+ t.Fatalf("Expected nested file %s to exist but got error: %v", nestedFile, err)
+ }
+
+ // Reset buffer for second extraction
+ buf.Reset()
+ gz = gzip.NewWriter(&buf)
+ if _, err := gz.Write(tarbuf.Bytes()); err != nil {
+ t.Fatal(err)
+ }
+ gz.Close()
+
+ // Second extraction to same directory (should not fail)
+ if err = extractor.Extract(&buf, tempDir); err != nil {
+ t.Fatalf("Second extraction to existing directory failed: %v", err)
+ }
+}
+
+func TestExtractWithExistingDirectory(t *testing.T) {
+ source := "https://repo.localdomain/plugins/test-plugin-0.0.1.tar.gz"
+ tempDir := t.TempDir()
+
+ // Pre-create the cache directory structure
+ cacheDir := filepath.Join(tempDir, "cache")
+ if err := os.MkdirAll(filepath.Join(cacheDir, "existing", "dir"), 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create a file in the existing directory
+ existingFile := filepath.Join(cacheDir, "existing", "file.txt")
+ if err := os.WriteFile(existingFile, []byte("existing content"), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ // Write a tarball
+ var tarbuf bytes.Buffer
+ tw := tar.NewWriter(&tarbuf)
+ files := []struct {
+ Name string
+ Body string
+ Mode int64
+ TypeFlag byte
+ }{
+ {"plugin.yaml", "plugin metadata", 0600, tar.TypeReg},
+ {"existing/", "", 0755, tar.TypeDir},
+ {"existing/dir/", "", 0755, tar.TypeDir},
+ {"existing/dir/newfile.txt", "new content", 0644, tar.TypeReg},
+ }
+
+ for _, file := range files {
+ hdr := &tar.Header{
+ Name: file.Name,
+ Typeflag: file.TypeFlag,
+ Mode: file.Mode,
+ Size: int64(len(file.Body)),
+ }
+ if err := tw.WriteHeader(hdr); err != nil {
+ t.Fatal(err)
+ }
+ if file.TypeFlag == tar.TypeReg {
+ if _, err := tw.Write([]byte(file.Body)); err != nil {
+ t.Fatal(err)
+ }
+ }
+ }
+
+ if err := tw.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ var buf bytes.Buffer
+ gz := gzip.NewWriter(&buf)
+ if _, err := gz.Write(tarbuf.Bytes()); err != nil {
+ t.Fatal(err)
+ }
+ gz.Close()
+
+ extractor, err := NewExtractor(source)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Extract to directory with existing content
+ if err = extractor.Extract(&buf, cacheDir); err != nil {
+ t.Fatalf("Extraction to directory with existing content failed: %v", err)
+ }
+
+ // Verify new file was created
+ newFile := filepath.Join(cacheDir, "existing", "dir", "newfile.txt")
+ if _, err := os.Stat(newFile); err != nil {
+ t.Fatalf("Expected new file %s to exist but got error: %v", newFile, err)
+ }
+
+ // Verify existing file is still there
+ if _, err := os.Stat(existingFile); err != nil {
+ t.Fatalf("Expected existing file %s to still exist but got error: %v", existingFile, err)
+ }
+}
+
+func TestExtractPluginInSubdirectory(t *testing.T) {
+ ensure.HelmHome(t)
+ source := "https://repo.localdomain/plugins/subdir-plugin-1.0.0.tar.gz"
+ tempDir := t.TempDir()
+
+ // Create a tarball where plugin files are in a subdirectory
+ var tarbuf bytes.Buffer
+ tw := tar.NewWriter(&tarbuf)
+ files := []struct {
+ Name string
+ Body string
+ Mode int64
+ TypeFlag byte
+ }{
+ {"my-plugin/", "", 0755, tar.TypeDir},
+ {"my-plugin/plugin.yaml", "name: my-plugin\nversion: 1.0.0\nusage: test\ndescription: test plugin\ncommand: $HELM_PLUGIN_DIR/bin/my-plugin", 0644, tar.TypeReg},
+ {"my-plugin/bin/", "", 0755, tar.TypeDir},
+ {"my-plugin/bin/my-plugin", "#!/bin/bash\necho test", 0755, tar.TypeReg},
+ }
+
+ for _, file := range files {
+ hdr := &tar.Header{
+ Name: file.Name,
+ Typeflag: file.TypeFlag,
+ Mode: file.Mode,
+ Size: int64(len(file.Body)),
+ }
+ if err := tw.WriteHeader(hdr); err != nil {
+ t.Fatal(err)
+ }
+ if file.TypeFlag == tar.TypeReg {
+ if _, err := tw.Write([]byte(file.Body)); err != nil {
+ t.Fatal(err)
+ }
+ }
+ }
+
+ if err := tw.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ var buf bytes.Buffer
+ gz := gzip.NewWriter(&buf)
+ if _, err := gz.Write(tarbuf.Bytes()); err != nil {
+ t.Fatal(err)
+ }
+ gz.Close()
+
+ // Test the installer
+ installer := &HTTPInstaller{
+ CacheDir: tempDir,
+ PluginName: "subdir-plugin",
+ base: newBase(source),
+ extractor: &TarGzExtractor{},
+ }
+
+ // Create a mock getter
+ installer.getter = &TestHTTPGetter{
+ MockResponse: &buf,
+ }
+
+ // Ensure the destination directory doesn't exist
+ // (In a real scenario, this is handled by installer.Install() wrapper)
+ destPath := installer.Path()
+ if err := os.RemoveAll(destPath); err != nil {
+ t.Fatalf("Failed to clean destination path: %v", err)
+ }
+
+ // Install should handle the subdirectory correctly
+ if err := installer.Install(); err != nil {
+ t.Fatalf("Failed to install plugin with subdirectory: %v", err)
+ }
+
+ // The plugin should be installed from the subdirectory
+ // Check that detectPluginRoot found the correct location
+ pluginRoot, err := detectPluginRoot(tempDir)
+ if err != nil {
+ t.Fatalf("Failed to detect plugin root: %v", err)
+ }
+
+ expectedRoot := filepath.Join(tempDir, "my-plugin")
+ if pluginRoot != expectedRoot {
+ t.Errorf("Expected plugin root to be %s but got %s", expectedRoot, pluginRoot)
+ }
+}
diff --git a/pkg/plugin/installer/installer.go b/internal/plugin/installer/installer.go
similarity index 53%
rename from pkg/plugin/installer/installer.go
rename to internal/plugin/installer/installer.go
index 5fad58f99..b65dac2f4 100644
--- a/pkg/plugin/installer/installer.go
+++ b/internal/plugin/installer/installer.go
@@ -16,16 +16,15 @@ limitations under the License.
package installer
import (
+ "errors"
"fmt"
- "log"
"net/http"
"os"
"path/filepath"
"strings"
- "github.com/pkg/errors"
-
- "helm.sh/helm/v4/pkg/plugin"
+ "helm.sh/helm/v4/internal/plugin"
+ "helm.sh/helm/v4/pkg/registry"
)
// ErrMissingMetadata indicates that plugin.yaml is missing.
@@ -34,6 +33,14 @@ var ErrMissingMetadata = errors.New("plugin metadata (plugin.yaml) missing")
// Debug enables verbose output.
var Debug bool
+// Options contains options for plugin installation.
+type Options struct {
+ // Verify enables signature verification before installation
+ Verify bool
+ // Keyring is the path to the keyring for verification
+ Keyring string
+}
+
// Installer provides an interface for installing helm client plugins.
type Installer interface {
// Install adds a plugin.
@@ -44,15 +51,80 @@ type Installer interface {
Update() error
}
+// Verifier provides an interface for installers that support verification.
+type Verifier interface {
+ // SupportsVerification returns true if this installer can verify plugins
+ SupportsVerification() bool
+ // GetVerificationData returns plugin and provenance data for verification
+ GetVerificationData() (archiveData, provData []byte, filename string, err error)
+}
+
// Install installs a plugin.
func Install(i Installer) error {
+ _, err := InstallWithOptions(i, Options{})
+ return err
+}
+
+// VerificationResult contains the result of plugin verification
+type VerificationResult struct {
+ SignedBy []string
+ Fingerprint string
+ FileHash string
+}
+
+// InstallWithOptions installs a plugin with options.
+func InstallWithOptions(i Installer, opts Options) (*VerificationResult, error) {
+
if err := os.MkdirAll(filepath.Dir(i.Path()), 0755); err != nil {
- return err
+ return nil, err
}
if _, pathErr := os.Stat(i.Path()); !os.IsNotExist(pathErr) {
- return errors.New("plugin already exists")
+ return nil, errors.New("plugin already exists")
+ }
+
+ var result *VerificationResult
+
+ // If verification is requested, check if installer supports it
+ if opts.Verify {
+ verifier, ok := i.(Verifier)
+ if !ok || !verifier.SupportsVerification() {
+ return nil, fmt.Errorf("--verify is only supported for plugin tarballs (.tgz files)")
+ }
+
+ // Get verification data (works for both memory and file-based installers)
+ archiveData, provData, filename, err := verifier.GetVerificationData()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get verification data: %w", err)
+ }
+
+ // Check if provenance data exists
+ if len(provData) == 0 {
+ // No .prov file found - emit warning but continue installation
+ fmt.Fprintf(os.Stderr, "WARNING: No provenance file found for plugin. Plugin is not signed and cannot be verified.\n")
+ } else {
+ // Provenance data exists - verify the plugin
+ verification, err := plugin.VerifyPlugin(archiveData, provData, filename, opts.Keyring)
+ if err != nil {
+ return nil, fmt.Errorf("plugin verification failed: %w", err)
+ }
+
+ // Collect verification info
+ result = &VerificationResult{
+ SignedBy: make([]string, 0),
+ Fingerprint: fmt.Sprintf("%X", verification.SignedBy.PrimaryKey.Fingerprint),
+ FileHash: verification.FileHash,
+ }
+ for name := range verification.SignedBy.Identities {
+ result.SignedBy = append(result.SignedBy, name)
+ }
+ }
}
- return i.Install()
+
+ if err := i.Install(); err != nil {
+ return nil, err
+ }
+
+ return result, nil
}
// Update updates a plugin.
@@ -65,6 +137,10 @@ func Update(i Installer) error {
// NewForSource determines the correct Installer for the given source.
func NewForSource(source, version string) (Installer, error) {
+ // Check if source is an OCI registry reference
+ if strings.HasPrefix(source, fmt.Sprintf("%s://", registry.OCIScheme)) {
+ return NewOCIInstaller(source)
+ }
// Check if source is a local directory
if isLocalReference(source) {
return NewLocalInstaller(source)
@@ -95,6 +171,15 @@ func isLocalReference(source string) bool {
// HEAD operation to see if the remote resource is a file that we understand.
func isRemoteHTTPArchive(source string) bool {
if strings.HasPrefix(source, "http://") || strings.HasPrefix(source, "https://") {
+ // First, check if the URL ends with a known archive suffix
+ // This is more reliable than content-type detection
+ for suffix := range Extractors {
+ if strings.HasSuffix(source, suffix) {
+ return true
+ }
+ }
+
+ // If no suffix match, try HEAD request to check content type
res, err := http.Head(source)
if err != nil {
// If we get an error at the network layer, we can't install it. So
@@ -125,11 +210,3 @@ func isPlugin(dirname string) bool {
_, err := os.Stat(filepath.Join(dirname, plugin.PluginFileName))
return err == nil
}
-
-var logger = log.New(os.Stderr, "[debug] ", log.Lshortfile)
-
-func debug(format string, args ...interface{}) {
- if Debug {
- logger.Output(2, fmt.Sprintf(format, args...))
- }
-}
diff --git a/pkg/plugin/installer/installer_test.go b/internal/plugin/installer/installer_test.go
similarity index 71%
rename from pkg/plugin/installer/installer_test.go
rename to internal/plugin/installer/installer_test.go
index a11464924..dcd76fe9c 100644
--- a/pkg/plugin/installer/installer_test.go
+++ b/internal/plugin/installer/installer_test.go
@@ -26,8 +26,15 @@ func TestIsRemoteHTTPArchive(t *testing.T) {
t.Errorf("Expected non-URL to return false")
}
- if isRemoteHTTPArchive("https://127.0.0.1:123/fake/plugin-1.2.3.tgz") {
- t.Errorf("Bad URL should not have succeeded.")
+ // URLs with valid archive extensions are considered valid archives
+ // even if the server is unreachable (optimization to avoid unnecessary HTTP requests)
+ if !isRemoteHTTPArchive("https://127.0.0.1:123/fake/plugin-1.2.3.tgz") {
+ t.Errorf("URL with .tgz extension should be considered a valid archive")
+ }
+
+ // Test with invalid extension and unreachable server
+ if isRemoteHTTPArchive("https://127.0.0.1:123/fake/plugin-1.2.3.notanarchive") {
+ t.Errorf("Bad URL without valid extension should not succeed")
}
if !isRemoteHTTPArchive(source) {
diff --git a/internal/plugin/installer/local_installer.go b/internal/plugin/installer/local_installer.go
new file mode 100644
index 000000000..e02261d59
--- /dev/null
+++ b/internal/plugin/installer/local_installer.go
@@ -0,0 +1,219 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package installer // import "helm.sh/helm/v4/internal/plugin/installer"
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "log/slog"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "helm.sh/helm/v4/internal/plugin"
+ "helm.sh/helm/v4/internal/third_party/dep/fs"
+ "helm.sh/helm/v4/pkg/helmpath"
+)
+
+// ErrPluginNotAFolder indicates that the plugin path is not a folder.
+var ErrPluginNotAFolder = errors.New("expected plugin to be a folder")
+
+// LocalInstaller installs plugins from the filesystem.
+type LocalInstaller struct {
+ base
+ isArchive bool
+ extractor Extractor
+ pluginData []byte // Cached plugin data
+ provData []byte // Cached provenance data
+}
+
+// NewLocalInstaller creates a new LocalInstaller.
+func NewLocalInstaller(source string) (*LocalInstaller, error) {
+ src, err := filepath.Abs(source)
+ if err != nil {
+ return nil, fmt.Errorf("unable to get absolute path to plugin: %w", err)
+ }
+ i := &LocalInstaller{
+ base: newBase(src),
+ }
+
+ // Check if source is an archive
+ if isLocalArchive(src) {
+ i.isArchive = true
+ extractor, err := NewExtractor(src)
+ if err != nil {
+ return nil, fmt.Errorf("unsupported archive format: %w", err)
+ }
+ i.extractor = extractor
+ }
+
+ return i, nil
+}
+
+// isLocalArchive checks if the file is a supported archive format
+func isLocalArchive(path string) bool {
+ for suffix := range Extractors {
+ if strings.HasSuffix(path, suffix) {
+ return true
+ }
+ }
+ return false
+}
+
+// Install creates a symlink to the plugin directory.
+//
+// Implements Installer.
+func (i *LocalInstaller) Install() error {
+ if i.isArchive {
+ return i.installFromArchive()
+ }
+ return i.installFromDirectory()
+}
+
+// installFromDirectory creates a symlink to the plugin directory
+func (i *LocalInstaller) installFromDirectory() error {
+ stat, err := os.Stat(i.Source)
+ if err != nil {
+ return err
+ }
+ if !stat.IsDir() {
+ return ErrPluginNotAFolder
+ }
+
+ if !isPlugin(i.Source) {
+ return ErrMissingMetadata
+ }
+ slog.Debug("symlinking", "source", i.Source, "path", i.Path())
+ return os.Symlink(i.Source, i.Path())
+}
+
+// installFromArchive extracts and installs a plugin from a tarball
+func (i *LocalInstaller) installFromArchive() error {
+ // Read the archive file
+ data, err := os.ReadFile(i.Source)
+ if err != nil {
+ return fmt.Errorf("failed to read archive: %w", err)
+ }
+
+ // Copy the original tarball to plugins directory for verification
+ // Extract metadata to get the actual plugin name and version
+ metadata, err := plugin.ExtractTgzPluginMetadata(bytes.NewReader(data))
+ if err != nil {
+ return fmt.Errorf("failed to extract plugin metadata from tarball: %w", err)
+ }
+ filename := fmt.Sprintf("%s-%s.tgz", metadata.Name, metadata.Version)
+ tarballPath := helmpath.DataPath("plugins", filename)
+ if err := os.MkdirAll(filepath.Dir(tarballPath), 0755); err != nil {
+ return fmt.Errorf("failed to create plugins directory: %w", err)
+ }
+ if err := os.WriteFile(tarballPath, data, 0644); err != nil {
+ return fmt.Errorf("failed to save tarball: %w", err)
+ }
+
+ // Check for and copy .prov file if it exists
+ provSource := i.Source + ".prov"
+ if provData, err := os.ReadFile(provSource); err == nil {
+ provPath := tarballPath + ".prov"
+ if err := os.WriteFile(provPath, provData, 0644); err != nil {
+ slog.Debug("failed to save provenance file", "error", err)
+ }
+ }
+
+ // Create a temporary directory for extraction
+ tempDir, err := os.MkdirTemp("", "helm-plugin-extract-")
+ if err != nil {
+ return fmt.Errorf("failed to create temp directory: %w", err)
+ }
+ defer os.RemoveAll(tempDir)
+
+ // Extract the archive
+ buffer := bytes.NewBuffer(data)
+ if err := i.extractor.Extract(buffer, tempDir); err != nil {
+ return fmt.Errorf("failed to extract archive: %w", err)
+ }
+
+ // Plugin directory should be named after the plugin at the archive root
+ pluginName := stripPluginName(filepath.Base(i.Source))
+ pluginDir := filepath.Join(tempDir, pluginName)
+ if _, err = os.Stat(filepath.Join(pluginDir, "plugin.yaml")); err != nil {
+ return fmt.Errorf("plugin.yaml not found in expected directory %s: %w", pluginDir, err)
+ }
+
+ // Copy to the final destination
+ slog.Debug("copying", "source", pluginDir, "path", i.Path())
+ return fs.CopyDir(pluginDir, i.Path())
+}
+
+// Update updates a local repository
+func (i *LocalInstaller) Update() error {
+ slog.Debug("local repository is auto-updated")
+ return nil
+}
+
+// Path is overridden to handle archive plugin names properly
+func (i *LocalInstaller) Path() string {
+ if i.Source == "" {
+ return ""
+ }
+
+ pluginName := filepath.Base(i.Source)
+ if i.isArchive {
+ // Strip archive extension to get plugin name
+ pluginName = stripPluginName(pluginName)
+ }
+
+ return helmpath.DataPath("plugins", pluginName)
+}
+
+// SupportsVerification returns true if the local installer can verify plugins
+func (i *LocalInstaller) SupportsVerification() bool {
+ // Only support verification for local tarball files
+ return i.isArchive
+}
+
+// GetVerificationData loads plugin and provenance data from local files for verification
+func (i *LocalInstaller) GetVerificationData() (archiveData, provData []byte, filename string, err error) {
+ if !i.SupportsVerification() {
+ return nil, nil, "", fmt.Errorf("verification not supported for directories")
+ }
+
+ // Read and cache the plugin archive file
+ if i.pluginData == nil {
+ i.pluginData, err = os.ReadFile(i.Source)
+ if err != nil {
+ return nil, nil, "", fmt.Errorf("failed to read plugin file: %w", err)
+ }
+ }
+
+ // Read and cache the provenance file if it exists
+ if i.provData == nil {
+ provFile := i.Source + ".prov"
+ i.provData, err = os.ReadFile(provFile)
+ if err != nil {
+ if os.IsNotExist(err) {
+ // If provenance file doesn't exist, set provData to nil
+ // The verification logic will handle this gracefully
+ i.provData = nil
+ } else {
+ // If file exists but can't be read (permissions, etc), return error
+ return nil, nil, "", fmt.Errorf("failed to access provenance file %s: %w", provFile, err)
+ }
+ }
+ }
+
+ return i.pluginData, i.provData, filepath.Base(i.Source), nil
+}
diff --git a/internal/plugin/installer/local_installer_test.go b/internal/plugin/installer/local_installer_test.go
new file mode 100644
index 000000000..189108fdb
--- /dev/null
+++ b/internal/plugin/installer/local_installer_test.go
@@ -0,0 +1,148 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package installer // import "helm.sh/helm/v4/internal/plugin/installer"
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "helm.sh/helm/v4/internal/test/ensure"
+ "helm.sh/helm/v4/pkg/helmpath"
+)
+
+var _ Installer = new(LocalInstaller)
+
+func TestLocalInstaller(t *testing.T) {
+ ensure.HelmHome(t)
+ // Make a temp dir
+ tdir := t.TempDir()
+ if err := os.WriteFile(filepath.Join(tdir, "plugin.yaml"), []byte{}, 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ source := "../testdata/plugdir/good/echo-v1"
+ i, err := NewForSource(source, "")
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+
+ if err := Install(i); err != nil {
+ t.Fatal(err)
+ }
+
+ if i.Path() != helmpath.DataPath("plugins", "echo-v1") {
+ t.Fatalf("expected path '$XDG_CONFIG_HOME/helm/plugins/helm-env', got %q", i.Path())
+ }
+ defer os.RemoveAll(filepath.Dir(helmpath.DataPath())) // helmpath.DataPath is like /tmp/helm013130971/helm
+}
+
+func TestLocalInstallerNotAFolder(t *testing.T) {
+ source := "../testdata/plugdir/good/echo-v1/plugin.yaml"
+ i, err := NewForSource(source, "")
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+
+ err = Install(i)
+ if err == nil {
+ t.Fatal("expected error")
+ }
+ if err != ErrPluginNotAFolder {
+ t.Fatalf("expected error to equal: %q", err)
+ }
+}
+
+func TestLocalInstallerTarball(t *testing.T) {
+ ensure.HelmHome(t)
+
+ // Create a test tarball
+ tempDir := t.TempDir()
+ tarballPath := filepath.Join(tempDir, "test-plugin-1.0.0.tar.gz")
+
+ // Create tarball content
+ var buf bytes.Buffer
+ gw := gzip.NewWriter(&buf)
+ tw := tar.NewWriter(gw)
+
+ files := []struct {
+ Name string
+ Body string
+ Mode int64
+ }{
+ {"test-plugin/plugin.yaml", "name: test-plugin\napiVersion: v1\ntype: cli/v1\nruntime: subprocess\nversion: 1.0.0\nconfig:\n shortHelp: test\n longHelp: test\nruntimeConfig:\n platformCommand:\n - command: echo", 0644},
+ {"test-plugin/bin/test-plugin", "#!/bin/bash\necho test", 0755},
+ }
+
+ for _, file := range files {
+ hdr := &tar.Header{
+ Name: file.Name,
+ Mode: file.Mode,
+ Size: int64(len(file.Body)),
+ }
+ if err := tw.WriteHeader(hdr); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := tw.Write([]byte(file.Body)); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ if err := tw.Close(); err != nil {
+ t.Fatal(err)
+ }
+ if err := gw.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ // Write tarball to file
+ if err := os.WriteFile(tarballPath, buf.Bytes(), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ // Test installation
+ i, err := NewForSource(tarballPath, "")
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+
+ // Verify it's detected as LocalInstaller
+ localInstaller, ok := i.(*LocalInstaller)
+ if !ok {
+ t.Fatal("expected LocalInstaller")
+ }
+
+ if !localInstaller.isArchive {
+ t.Fatal("expected isArchive to be true")
+ }
+
+ if err := Install(i); err != nil {
+ t.Fatal(err)
+ }
+
+ expectedPath := helmpath.DataPath("plugins", "test-plugin")
+ if i.Path() != expectedPath {
+ t.Fatalf("expected path %q, got %q", expectedPath, i.Path())
+ }
+
+ // Verify plugin was installed
+ if _, err := os.Stat(i.Path()); err != nil {
+ t.Fatalf("plugin not found at %s: %v", i.Path(), err)
+ }
+}
diff --git a/internal/plugin/installer/oci_installer.go b/internal/plugin/installer/oci_installer.go
new file mode 100644
index 000000000..afbb42ca5
--- /dev/null
+++ b/internal/plugin/installer/oci_installer.go
@@ -0,0 +1,301 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package installer
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "log/slog"
+ "os"
+ "path/filepath"
+
+ "helm.sh/helm/v4/internal/plugin"
+ "helm.sh/helm/v4/internal/plugin/cache"
+ "helm.sh/helm/v4/internal/third_party/dep/fs"
+ "helm.sh/helm/v4/pkg/cli"
+ "helm.sh/helm/v4/pkg/getter"
+ "helm.sh/helm/v4/pkg/helmpath"
+ "helm.sh/helm/v4/pkg/registry"
+)
+
+// Ensure OCIInstaller implements Verifier
+var _ Verifier = (*OCIInstaller)(nil)
+
+// OCIInstaller installs plugins from OCI registries
+type OCIInstaller struct {
+ CacheDir string
+ PluginName string
+ base
+ settings *cli.EnvSettings
+ getter getter.Getter
+ // Cached data to avoid duplicate downloads
+ pluginData []byte
+ provData []byte
+}
+
+// NewOCIInstaller creates a new OCIInstaller with optional getter options
+func NewOCIInstaller(source string, options ...getter.Option) (*OCIInstaller, error) {
+ // Extract plugin name from OCI reference using robust registry parsing
+ pluginName, err := registry.GetPluginName(source)
+ if err != nil {
+ return nil, err
+ }
+
+ key, err := cache.Key(source)
+ if err != nil {
+ return nil, err
+ }
+
+ settings := cli.New()
+
+ // Always add plugin artifact type and any provided options
+ pluginOptions := append([]getter.Option{getter.WithArtifactType("plugin")}, options...)
+ getterProvider, err := getter.NewOCIGetter(pluginOptions...)
+ if err != nil {
+ return nil, err
+ }
+
+ i := &OCIInstaller{
+ CacheDir: helmpath.CachePath("plugins", key),
+ PluginName: pluginName,
+ base: newBase(source),
+ settings: settings,
+ getter: getterProvider,
+ }
+ return i, nil
+}
+
+// Install downloads and installs a plugin from OCI registry
+// Implements Installer.
+func (i *OCIInstaller) Install() error {
+ slog.Debug("pulling OCI plugin", "source", i.Source)
+
+ // Ensure plugin data is cached
+ if i.pluginData == nil {
+ pluginData, err := i.getter.Get(i.Source)
+ if err != nil {
+ return fmt.Errorf("failed to pull plugin from %s: %w", i.Source, err)
+ }
+ i.pluginData = pluginData.Bytes()
+ }
+
+ // Extract metadata to get the actual plugin name and version
+ metadata, err := plugin.ExtractTgzPluginMetadata(bytes.NewReader(i.pluginData))
+ if err != nil {
+ return fmt.Errorf("failed to extract plugin metadata from tarball: %w", err)
+ }
+ filename := fmt.Sprintf("%s-%s.tgz", metadata.Name, metadata.Version)
+
+ tarballPath := helmpath.DataPath("plugins", filename)
+ if err := os.MkdirAll(filepath.Dir(tarballPath), 0755); err != nil {
+ return fmt.Errorf("failed to create plugins directory: %w", err)
+ }
+ if err := os.WriteFile(tarballPath, i.pluginData, 0644); err != nil {
+ return fmt.Errorf("failed to save tarball: %w", err)
+ }
+
+ // Ensure prov data is cached if available
+ if i.provData == nil {
+ // Try to download .prov file if it exists
+ provSource := i.Source + ".prov"
+ if provData, err := i.getter.Get(provSource); err == nil {
+ i.provData = provData.Bytes()
+ }
+ }
+
+ // Save prov file if we have the data
+ if i.provData != nil {
+ provPath := tarballPath + ".prov"
+ if err := os.WriteFile(provPath, i.provData, 0644); err != nil {
+ slog.Debug("failed to save provenance file", "error", err)
+ }
+ }
+
+ // Check if this is a gzip compressed file
+ if len(i.pluginData) < 2 || i.pluginData[0] != 0x1f || i.pluginData[1] != 0x8b {
+ return fmt.Errorf("plugin data is not a gzip compressed archive")
+ }
+
+ // Create cache directory
+ if err := os.MkdirAll(i.CacheDir, 0755); err != nil {
+ return fmt.Errorf("failed to create cache directory: %w", err)
+ }
+
+ // Extract as gzipped tar
+ if err := extractTarGz(bytes.NewReader(i.pluginData), i.CacheDir); err != nil {
+ return fmt.Errorf("failed to extract plugin: %w", err)
+ }
+
+ // Verify plugin.yaml exists - check root and subdirectories
+ pluginDir := i.CacheDir
+ if !isPlugin(pluginDir) {
+ // Check if plugin.yaml is in a subdirectory
+ entries, err := os.ReadDir(i.CacheDir)
+ if err != nil {
+ return err
+ }
+
+ foundPluginDir := ""
+ for _, entry := range entries {
+ if entry.IsDir() {
+ subDir := filepath.Join(i.CacheDir, entry.Name())
+ if isPlugin(subDir) {
+ foundPluginDir = subDir
+ break
+ }
+ }
+ }
+
+ if foundPluginDir == "" {
+ return ErrMissingMetadata
+ }
+
+ // Use the subdirectory as the plugin directory
+ pluginDir = foundPluginDir
+ }
+
+ // Copy from cache to final destination
+ src, err := filepath.Abs(pluginDir)
+ if err != nil {
+ return err
+ }
+
+ slog.Debug("copying", "source", src, "path", i.Path())
+ return fs.CopyDir(src, i.Path())
+}
+
+// Update updates a plugin by reinstalling it
+func (i *OCIInstaller) Update() error {
+ // For OCI, update means removing the old version and installing the new one
+ if err := os.RemoveAll(i.Path()); err != nil {
+ return err
+ }
+ return i.Install()
+}
+
+// Path is where the plugin will be installed
+func (i OCIInstaller) Path() string {
+ if i.Source == "" {
+ return ""
+ }
+ return filepath.Join(i.settings.PluginsDirectory, i.PluginName)
+}
+
+// extractTarGz extracts a gzipped tar archive to a directory
+func extractTarGz(r io.Reader, targetDir string) error {
+ gzr, err := gzip.NewReader(r)
+ if err != nil {
+ return err
+ }
+ defer gzr.Close()
+
+ return extractTar(gzr, targetDir)
+}
+
+// extractTar extracts a tar archive to a directory
+func extractTar(r io.Reader, targetDir string) error {
+ tarReader := tar.NewReader(r)
+
+ for {
+ header, err := tarReader.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return err
+ }
+
+ path, err := cleanJoin(targetDir, header.Name)
+ if err != nil {
+ return err
+ }
+
+ switch header.Typeflag {
+ case tar.TypeDir:
+ if err := os.MkdirAll(path, 0755); err != nil {
+ return err
+ }
+ case tar.TypeReg:
+ dir := filepath.Dir(path)
+ if err := os.MkdirAll(dir, 0755); err != nil {
+ return err
+ }
+
+ outFile, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode))
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+ if _, err := io.Copy(outFile, tarReader); err != nil {
+ return err
+ }
+ case tar.TypeXGlobalHeader, tar.TypeXHeader:
+ // Skip these
+ continue
+ default:
+ return fmt.Errorf("unknown type: %b in %s", header.Typeflag, header.Name)
+ }
+ }
+
+ return nil
+}
+
+// SupportsVerification returns true since OCI plugins can be verified
+func (i *OCIInstaller) SupportsVerification() bool {
+ return true
+}
+
+// GetVerificationData downloads and caches plugin and provenance data from OCI registry for verification
+func (i *OCIInstaller) GetVerificationData() (archiveData, provData []byte, filename string, err error) {
+ slog.Debug("getting verification data for OCI plugin", "source", i.Source)
+
+ // Download plugin data once and cache it
+ if i.pluginData == nil {
+ pluginDataBuffer, err := i.getter.Get(i.Source)
+ if err != nil {
+ return nil, nil, "", fmt.Errorf("failed to pull plugin from %s: %w", i.Source, err)
+ }
+ i.pluginData = pluginDataBuffer.Bytes()
+ }
+
+ // Download prov data once and cache it if available
+ if i.provData == nil {
+ provSource := i.Source + ".prov"
+ // Calling getter.Get again is reasonable because: 1. The OCI registry client already optimizes the underlying network calls
+ // 2. Both calls use the same underlying manifest and memory store 3. The second .prov call is very fast since the data is already pulled
+ provDataBuffer, err := i.getter.Get(provSource)
+ if err != nil {
+ // If provenance file doesn't exist, set provData to nil
+ // The verification logic will handle this gracefully
+ i.provData = nil
+ } else {
+ i.provData = provDataBuffer.Bytes()
+ }
+ }
+
+ // Extract metadata to get the filename
+ metadata, err := plugin.ExtractTgzPluginMetadata(bytes.NewReader(i.pluginData))
+ if err != nil {
+ return nil, nil, "", fmt.Errorf("failed to extract plugin metadata from tarball: %w", err)
+ }
+ filename = fmt.Sprintf("%s-%s.tgz", metadata.Name, metadata.Version)
+
+ slog.Debug("got verification data for OCI plugin", "filename", filename)
+ return i.pluginData, i.provData, filename, nil
+}
diff --git a/internal/plugin/installer/oci_installer_test.go b/internal/plugin/installer/oci_installer_test.go
new file mode 100644
index 000000000..1280cf97d
--- /dev/null
+++ b/internal/plugin/installer/oci_installer_test.go
@@ -0,0 +1,806 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package installer // import "helm.sh/helm/v4/internal/plugin/installer"
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "crypto/sha256"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/opencontainers/go-digest"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+
+ "helm.sh/helm/v4/internal/test/ensure"
+ "helm.sh/helm/v4/pkg/cli"
+ "helm.sh/helm/v4/pkg/getter"
+ "helm.sh/helm/v4/pkg/helmpath"
+)
+
+var _ Installer = new(OCIInstaller)
+
+// createTestPluginTarGz creates a test plugin tar.gz with plugin.yaml
+func createTestPluginTarGz(t *testing.T, pluginName string) []byte {
+ t.Helper()
+
+ var buf bytes.Buffer
+ gzWriter := gzip.NewWriter(&buf)
+ tarWriter := tar.NewWriter(gzWriter)
+
+ // Add plugin.yaml
+ pluginYAML := fmt.Sprintf(`name: %s
+version: "1.0.0"
+description: "Test plugin for OCI installer"
+command: "$HELM_PLUGIN_DIR/bin/%s"
+`, pluginName, pluginName)
+ header := &tar.Header{
+ Name: "plugin.yaml",
+ Mode: 0644,
+ Size: int64(len(pluginYAML)),
+ Typeflag: tar.TypeReg,
+ }
+ if err := tarWriter.WriteHeader(header); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := tarWriter.Write([]byte(pluginYAML)); err != nil {
+ t.Fatal(err)
+ }
+
+ // Add bin directory
+ dirHeader := &tar.Header{
+ Name: "bin/",
+ Mode: 0755,
+ Typeflag: tar.TypeDir,
+ }
+ if err := tarWriter.WriteHeader(dirHeader); err != nil {
+ t.Fatal(err)
+ }
+
+ // Add executable
+ execContent := fmt.Sprintf("#!/bin/sh\necho '%s test plugin'", pluginName)
+ execHeader := &tar.Header{
+ Name: fmt.Sprintf("bin/%s", pluginName),
+ Mode: 0755,
+ Size: int64(len(execContent)),
+ Typeflag: tar.TypeReg,
+ }
+ if err := tarWriter.WriteHeader(execHeader); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := tarWriter.Write([]byte(execContent)); err != nil {
+ t.Fatal(err)
+ }
+
+ tarWriter.Close()
+ gzWriter.Close()
+
+ return buf.Bytes()
+}
+
+// mockOCIRegistryWithArtifactType creates a mock OCI registry server using the new artifact type approach
+func mockOCIRegistryWithArtifactType(t *testing.T, pluginName string) (*httptest.Server, string) {
+ t.Helper()
+
+ pluginData := createTestPluginTarGz(t, pluginName)
+ layerDigest := fmt.Sprintf("sha256:%x", sha256Sum(pluginData))
+
+ // Create empty config data (as per OCI v1.1+ spec)
+ configData := []byte("{}")
+ configDigest := fmt.Sprintf("sha256:%x", sha256Sum(configData))
+
+ // Create manifest with artifact type
+ manifest := ocispec.Manifest{
+ MediaType: ocispec.MediaTypeImageManifest,
+ ArtifactType: "application/vnd.helm.plugin.v1+json", // Using artifact type
+ Config: ocispec.Descriptor{
+ MediaType: "application/vnd.oci.empty.v1+json", // Empty config
+ Digest: digest.Digest(configDigest),
+ Size: int64(len(configData)),
+ },
+ Layers: []ocispec.Descriptor{
+ {
+ MediaType: "application/vnd.oci.image.layer.v1.tar",
+ Digest: digest.Digest(layerDigest),
+ Size: int64(len(pluginData)),
+ Annotations: map[string]string{
+ ocispec.AnnotationTitle: pluginName + "-1.0.0.tgz", // Layer named with version
+ },
+ },
+ },
+ }
+
+ manifestData, err := json.Marshal(manifest)
+ if err != nil {
+ t.Fatal(err)
+ }
+ manifestDigest := fmt.Sprintf("sha256:%x", sha256Sum(manifestData))
+
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ switch {
+ case r.Method == http.MethodGet && strings.Contains(r.URL.Path, "/v2/") && !strings.Contains(r.URL.Path, "/manifests/") && !strings.Contains(r.URL.Path, "/blobs/"):
+ // API version check
+ w.Header().Set("Docker-Distribution-API-Version", "registry/2.0")
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ w.Write([]byte("{}"))
+
+ case r.Method == http.MethodGet && strings.Contains(r.URL.Path, "/manifests/") && strings.Contains(r.URL.Path, pluginName):
+ // Return manifest
+ w.Header().Set("Content-Type", ocispec.MediaTypeImageManifest)
+ w.Header().Set("Docker-Content-Digest", manifestDigest)
+ w.WriteHeader(http.StatusOK)
+ w.Write(manifestData)
+
+ case r.Method == http.MethodGet && strings.Contains(r.URL.Path, "/blobs/"+layerDigest):
+ // Return layer data
+ w.Header().Set("Content-Type", "application/vnd.oci.image.layer.v1.tar")
+ w.WriteHeader(http.StatusOK)
+ w.Write(pluginData)
+
+ case r.Method == http.MethodGet && strings.Contains(r.URL.Path, "/blobs/"+configDigest):
+ // Return config data
+ w.Header().Set("Content-Type", "application/vnd.oci.empty.v1+json")
+ w.WriteHeader(http.StatusOK)
+ w.Write(configData)
+
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+
+ // Parse server URL to get host:port format for OCI reference
+ serverURL, err := url.Parse(server.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ registryHost := serverURL.Host
+
+ return server, registryHost
+}
+
+// sha256Sum calculates SHA256 sum of data
+func sha256Sum(data []byte) []byte {
+ h := sha256.New()
+ h.Write(data)
+ return h.Sum(nil)
+}
+
+func TestNewOCIInstaller(t *testing.T) {
+ tests := []struct {
+ name string
+ source string
+ expectName string
+ expectError bool
+ }{
+ {
+ name: "valid OCI reference with tag",
+ source: "oci://ghcr.io/user/plugin-name:v1.0.0",
+ expectName: "plugin-name",
+ expectError: false,
+ },
+ {
+ name: "valid OCI reference with digest",
+ source: "oci://ghcr.io/user/plugin-name@sha256:1234567890abcdef",
+ expectName: "plugin-name",
+ expectError: false,
+ },
+ {
+ name: "valid OCI reference without tag",
+ source: "oci://ghcr.io/user/plugin-name",
+ expectName: "plugin-name",
+ expectError: false,
+ },
+ {
+ name: "valid OCI reference with multiple path segments",
+ source: "oci://registry.example.com/org/team/plugin-name:latest",
+ expectName: "plugin-name",
+ expectError: false,
+ },
+ {
+ name: "invalid OCI reference - no path",
+ source: "oci://registry.example.com",
+ expectName: "",
+ expectError: true,
+ },
+ {
+ name: "valid OCI reference - single path segment",
+ source: "oci://registry.example.com/plugin",
+ expectName: "plugin",
+ expectError: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ installer, err := NewOCIInstaller(tt.source)
+
+ if tt.expectError {
+ if err == nil {
+ t.Errorf("expected error but got none")
+ }
+ return
+ }
+
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ return
+ }
+
+ // Check all fields thoroughly
+ if installer.PluginName != tt.expectName {
+ t.Errorf("expected plugin name %s, got %s", tt.expectName, installer.PluginName)
+ }
+
+ if installer.Source != tt.source {
+ t.Errorf("expected source %s, got %s", tt.source, installer.Source)
+ }
+
+ if installer.CacheDir == "" {
+ t.Error("expected non-empty cache directory")
+ }
+
+ if !strings.Contains(installer.CacheDir, "plugins") {
+ t.Errorf("expected cache directory to contain 'plugins', got %s", installer.CacheDir)
+ }
+
+ if installer.settings == nil {
+ t.Error("expected settings to be initialized")
+ }
+
+ // Check that Path() method works
+ expectedPath := helmpath.DataPath("plugins", tt.expectName)
+ if installer.Path() != expectedPath {
+ t.Errorf("expected path %s, got %s", expectedPath, installer.Path())
+ }
+ })
+ }
+}
+
+func TestOCIInstaller_Path(t *testing.T) {
+ tests := []struct {
+ name string
+ source string
+ pluginName string
+ expectPath string
+ }{
+ {
+ name: "valid plugin name",
+ source: "oci://ghcr.io/user/plugin-name:v1.0.0",
+ pluginName: "plugin-name",
+ expectPath: helmpath.DataPath("plugins", "plugin-name"),
+ },
+ {
+ name: "empty source",
+ source: "",
+ pluginName: "",
+ expectPath: "",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ installer := &OCIInstaller{
+ PluginName: tt.pluginName,
+ base: newBase(tt.source),
+ settings: cli.New(),
+ }
+
+ path := installer.Path()
+ if path != tt.expectPath {
+ t.Errorf("expected path %s, got %s", tt.expectPath, path)
+ }
+ })
+ }
+}
+
+func TestOCIInstaller_Install(t *testing.T) {
+ // Set up isolated test environment
+ ensure.HelmHome(t)
+
+ pluginName := "test-plugin-basic"
+ server, registryHost := mockOCIRegistryWithArtifactType(t, pluginName)
+ defer server.Close()
+
+ // Test OCI reference
+ source := fmt.Sprintf("oci://%s/%s:latest", registryHost, pluginName)
+
+ // Test with plain HTTP (since test server uses HTTP)
+ installer, err := NewOCIInstaller(source, getter.WithPlainHTTP(true))
+ if err != nil {
+ t.Fatalf("Expected no error, got %v", err)
+ }
+
+ // The OCI installer uses helmpath.DataPath, which is isolated by ensure.HelmHome(t)
+ actualPath := installer.Path()
+ t.Logf("Installer will use path: %s", actualPath)
+
+ // Install the plugin
+ if err := Install(installer); err != nil {
+ t.Fatalf("Expected installation to succeed, got error: %v", err)
+ }
+
+ // Verify plugin was installed to the correct location
+ if !isPlugin(actualPath) {
+ t.Errorf("Expected plugin directory %s to contain plugin.yaml", actualPath)
+ }
+
+ // Debug: list what was actually created
+ if entries, err := os.ReadDir(actualPath); err != nil {
+ t.Fatalf("Could not read plugin directory %s: %v", actualPath, err)
+ } else {
+ t.Logf("Plugin directory %s contains:", actualPath)
+ for _, entry := range entries {
+ t.Logf(" - %s", entry.Name())
+ }
+ }
+
+ // Verify the plugin.yaml file exists and is valid
+ pluginFile := filepath.Join(actualPath, "plugin.yaml")
+ if _, err := os.Stat(pluginFile); err != nil {
+ t.Errorf("Expected plugin.yaml to exist, got error: %v", err)
+ }
+}
+
+func TestOCIInstaller_Install_WithGetterOptions(t *testing.T) {
+ testCases := []struct {
+ name string
+ pluginName string
+ options []getter.Option
+ wantErr bool
+ }{
+ {
+ name: "plain HTTP",
+ pluginName: "example-cli-plain-http",
+ options: []getter.Option{getter.WithPlainHTTP(true)},
+ wantErr: false,
+ },
+ {
+ name: "insecure skip TLS verify",
+ pluginName: "example-cli-insecure",
+ options: []getter.Option{getter.WithPlainHTTP(true), getter.WithInsecureSkipVerifyTLS(true)},
+ wantErr: false,
+ },
+ {
+ name: "with timeout",
+ pluginName: "example-cli-timeout",
+ options: []getter.Option{getter.WithPlainHTTP(true), getter.WithTimeout(30 * time.Second)},
+ wantErr: false,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ // Set up isolated test environment for each subtest
+ ensure.HelmHome(t)
+
+ server, registryHost := mockOCIRegistryWithArtifactType(t, tc.pluginName)
+ defer server.Close()
+
+ source := fmt.Sprintf("oci://%s/%s:latest", registryHost, tc.pluginName)
+
+ installer, err := NewOCIInstaller(source, tc.options...)
+ if err != nil {
+ if !tc.wantErr {
+ t.Fatalf("Expected no error creating installer, got %v", err)
+ }
+ return
+ }
+
+ // The installer now uses our isolated test directory
+ actualPath := installer.Path()
+
+ // Install the plugin
+ err = Install(installer)
+ if tc.wantErr {
+ if err == nil {
+ t.Errorf("Expected installation to fail, but it succeeded")
+ }
+ } else {
+ if err != nil {
+ t.Errorf("Expected installation to succeed, got error: %v", err)
+ } else {
+ // Verify plugin was installed to the actual path
+ if !isPlugin(actualPath) {
+ t.Errorf("Expected plugin directory %s to contain plugin.yaml", actualPath)
+ }
+ }
+ }
+ })
+ }
+}
+
+func TestOCIInstaller_Install_AlreadyExists(t *testing.T) {
+ // Set up isolated test environment
+ ensure.HelmHome(t)
+
+ pluginName := "test-plugin-exists"
+ server, registryHost := mockOCIRegistryWithArtifactType(t, pluginName)
+ defer server.Close()
+
+ source := fmt.Sprintf("oci://%s/%s:latest", registryHost, pluginName)
+ installer, err := NewOCIInstaller(source, getter.WithPlainHTTP(true))
+ if err != nil {
+ t.Fatalf("Expected no error, got %v", err)
+ }
+
+ // First install should succeed
+ if err := Install(installer); err != nil {
+ t.Fatalf("Expected first installation to succeed, got error: %v", err)
+ }
+
+ // Verify plugin was installed
+ if !isPlugin(installer.Path()) {
+ t.Errorf("Expected plugin directory %s to contain plugin.yaml", installer.Path())
+ }
+
+ // Second install should fail with "plugin already exists"
+ err = Install(installer)
+ if err == nil {
+ t.Error("Expected error when installing plugin that already exists")
+ } else if !strings.Contains(err.Error(), "plugin already exists") {
+ t.Errorf("Expected 'plugin already exists' error, got: %v", err)
+ }
+}
+
+func TestOCIInstaller_Update(t *testing.T) {
+ // Set up isolated test environment
+ ensure.HelmHome(t)
+
+ pluginName := "test-plugin-update"
+ server, registryHost := mockOCIRegistryWithArtifactType(t, pluginName)
+ defer server.Close()
+
+ source := fmt.Sprintf("oci://%s/%s:latest", registryHost, pluginName)
+ installer, err := NewOCIInstaller(source, getter.WithPlainHTTP(true))
+ if err != nil {
+ t.Fatalf("Expected no error, got %v", err)
+ }
+
+ // Test update when plugin does not exist - should fail
+ err = Update(installer)
+ if err == nil {
+ t.Error("Expected error when updating plugin that does not exist")
+ } else if !strings.Contains(err.Error(), "plugin does not exist") {
+ t.Errorf("Expected 'plugin does not exist' error, got: %v", err)
+ }
+
+ // Install plugin first
+ if err := Install(installer); err != nil {
+ t.Fatalf("Expected installation to succeed, got error: %v", err)
+ }
+
+ // Verify plugin was installed
+ if !isPlugin(installer.Path()) {
+ t.Errorf("Expected plugin directory %s to contain plugin.yaml", installer.Path())
+ }
+
+ // Test update when plugin exists - should succeed
+ // For OCI, Update() removes old version and reinstalls
+ if err := Update(installer); err != nil {
+ t.Errorf("Expected update to succeed, got error: %v", err)
+ }
+
+ // Verify plugin is still installed after update
+ if !isPlugin(installer.Path()) {
+ t.Errorf("Expected plugin directory %s to contain plugin.yaml after update", installer.Path())
+ }
+}
+
+func TestOCIInstaller_Install_ComponentExtraction(t *testing.T) {
+ // Test that we can extract a plugin archive properly
+ // This tests the extraction logic that Install() uses
+ tempDir := t.TempDir()
+ pluginName := "test-plugin-extract"
+
+ pluginData := createTestPluginTarGz(t, pluginName)
+
+ // Test extraction
+ err := extractTarGz(bytes.NewReader(pluginData), tempDir)
+ if err != nil {
+ t.Fatalf("Failed to extract plugin: %v", err)
+ }
+
+ // Verify plugin.yaml exists
+ pluginYAMLPath := filepath.Join(tempDir, "plugin.yaml")
+ if _, err := os.Stat(pluginYAMLPath); os.IsNotExist(err) {
+ t.Errorf("plugin.yaml not found after extraction")
+ }
+
+ // Verify bin directory exists
+ binPath := filepath.Join(tempDir, "bin")
+ if _, err := os.Stat(binPath); os.IsNotExist(err) {
+ t.Errorf("bin directory not found after extraction")
+ }
+
+ // Verify executable exists and has correct permissions
+ execPath := filepath.Join(tempDir, "bin", pluginName)
+ if info, err := os.Stat(execPath); err != nil {
+ t.Errorf("executable not found: %v", err)
+ } else if info.Mode()&0111 == 0 {
+ t.Errorf("file is not executable")
+ }
+
+ // Verify this would be recognized as a plugin
+ if !isPlugin(tempDir) {
+ t.Errorf("extracted directory is not a valid plugin")
+ }
+}
+
+func TestExtractTarGz(t *testing.T) {
+ tempDir := t.TempDir()
+
+ // Create a test tar.gz file
+ var buf bytes.Buffer
+ gzWriter := gzip.NewWriter(&buf)
+ tarWriter := tar.NewWriter(gzWriter)
+
+ // Add a test file to the archive
+ testContent := "test content"
+ header := &tar.Header{
+ Name: "test-file.txt",
+ Mode: 0644,
+ Size: int64(len(testContent)),
+ Typeflag: tar.TypeReg,
+ }
+
+ if err := tarWriter.WriteHeader(header); err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err := tarWriter.Write([]byte(testContent)); err != nil {
+ t.Fatal(err)
+ }
+
+ // Add a test directory
+ dirHeader := &tar.Header{
+ Name: "test-dir/",
+ Mode: 0755,
+ Typeflag: tar.TypeDir,
+ }
+
+ if err := tarWriter.WriteHeader(dirHeader); err != nil {
+ t.Fatal(err)
+ }
+
+ tarWriter.Close()
+ gzWriter.Close()
+
+ // Test extraction
+ err := extractTarGz(bytes.NewReader(buf.Bytes()), tempDir)
+ if err != nil {
+ t.Errorf("extractTarGz failed: %v", err)
+ }
+
+ // Verify extracted file
+ extractedFile := filepath.Join(tempDir, "test-file.txt")
+ content, err := os.ReadFile(extractedFile)
+ if err != nil {
+ t.Errorf("failed to read extracted file: %v", err)
+ }
+
+ if string(content) != testContent {
+ t.Errorf("expected content %s, got %s", testContent, string(content))
+ }
+
+ // Verify extracted directory
+ extractedDir := filepath.Join(tempDir, "test-dir")
+ if _, err := os.Stat(extractedDir); os.IsNotExist(err) {
+ t.Errorf("extracted directory does not exist: %s", extractedDir)
+ }
+}
+
+func TestExtractTarGz_InvalidGzip(t *testing.T) {
+ tempDir := t.TempDir()
+
+ // Test with invalid gzip data
+ invalidGzipData := []byte("not gzip data")
+ err := extractTarGz(bytes.NewReader(invalidGzipData), tempDir)
+ if err == nil {
+ t.Error("expected error for invalid gzip data")
+ }
+}
+
+func TestExtractTar_UnknownFileType(t *testing.T) {
+ tempDir := t.TempDir()
+
+ // Create a test tar file
+ var buf bytes.Buffer
+ tarWriter := tar.NewWriter(&buf)
+
+ // Add a test file
+ testContent := "test content"
+ header := &tar.Header{
+ Name: "test-file.txt",
+ Mode: 0644,
+ Size: int64(len(testContent)),
+ Typeflag: tar.TypeReg,
+ }
+
+ if err := tarWriter.WriteHeader(header); err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err := tarWriter.Write([]byte(testContent)); err != nil {
+ t.Fatal(err)
+ }
+
+ // Test unknown file type
+ unknownHeader := &tar.Header{
+ Name: "unknown-type",
+ Mode: 0644,
+ Typeflag: tar.TypeSymlink, // Use a type that's not handled
+ }
+
+ if err := tarWriter.WriteHeader(unknownHeader); err != nil {
+ t.Fatal(err)
+ }
+
+ tarWriter.Close()
+
+ // Test extraction - should fail due to unknown type
+ err := extractTar(bytes.NewReader(buf.Bytes()), tempDir)
+ if err == nil {
+ t.Error("expected error for unknown tar file type")
+ }
+
+ if !strings.Contains(err.Error(), "unknown type") {
+ t.Errorf("expected 'unknown type' error, got: %v", err)
+ }
+}
+
+func TestExtractTar_SuccessfulExtraction(t *testing.T) {
+ tempDir := t.TempDir()
+
+ // Since we can't easily create extended headers with Go's tar package,
+ // we'll test the logic that skips them by creating a simple tar with regular files
+ // and then testing that the extraction works correctly.
+
+ // Create a test tar file
+ var buf bytes.Buffer
+ tarWriter := tar.NewWriter(&buf)
+
+ // Add a regular file
+ testContent := "test content"
+ header := &tar.Header{
+ Name: "test-file.txt",
+ Mode: 0644,
+ Size: int64(len(testContent)),
+ Typeflag: tar.TypeReg,
+ }
+
+ if err := tarWriter.WriteHeader(header); err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err := tarWriter.Write([]byte(testContent)); err != nil {
+ t.Fatal(err)
+ }
+
+ tarWriter.Close()
+
+ // Test extraction
+ err := extractTar(bytes.NewReader(buf.Bytes()), tempDir)
+ if err != nil {
+ t.Errorf("extractTar failed: %v", err)
+ }
+
+ // Verify the regular file was extracted
+ extractedFile := filepath.Join(tempDir, "test-file.txt")
+ content, err := os.ReadFile(extractedFile)
+ if err != nil {
+ t.Errorf("failed to read extracted file: %v", err)
+ }
+
+ if string(content) != testContent {
+ t.Errorf("expected content %s, got %s", testContent, string(content))
+ }
+}
+
+func TestOCIInstaller_Install_PlainHTTPOption(t *testing.T) {
+ // Test that PlainHTTP option is properly passed to getter
+ source := "oci://example.com/test-plugin:v1.0.0"
+
+ // Test with PlainHTTP=false (default)
+ installer1, err := NewOCIInstaller(source)
+ if err != nil {
+ t.Fatalf("failed to create installer: %v", err)
+ }
+ if installer1.getter == nil {
+ t.Error("getter should be initialized")
+ }
+
+ // Test with PlainHTTP=true
+ installer2, err := NewOCIInstaller(source, getter.WithPlainHTTP(true))
+ if err != nil {
+ t.Fatalf("failed to create installer with PlainHTTP=true: %v", err)
+ }
+ if installer2.getter == nil {
+ t.Error("getter should be initialized with PlainHTTP=true")
+ }
+
+ // Both installers should have the same basic properties
+ if installer1.PluginName != installer2.PluginName {
+ t.Error("plugin names should match")
+ }
+ if installer1.Source != installer2.Source {
+ t.Error("sources should match")
+ }
+
+ // Test with multiple options
+ installer3, err := NewOCIInstaller(source,
+ getter.WithPlainHTTP(true),
+ getter.WithBasicAuth("user", "pass"),
+ )
+ if err != nil {
+ t.Fatalf("failed to create installer with multiple options: %v", err)
+ }
+ if installer3.getter == nil {
+ t.Error("getter should be initialized with multiple options")
+ }
+}
+
+func TestOCIInstaller_Install_ValidationErrors(t *testing.T) {
+ tests := []struct {
+ name string
+ layerData []byte
+ expectError bool
+ errorMsg string
+ }{
+ {
+ name: "non-gzip layer",
+ layerData: []byte("not gzip data"),
+ expectError: true,
+ errorMsg: "is not a gzip compressed archive",
+ },
+ {
+ name: "empty layer",
+ layerData: []byte{},
+ expectError: true,
+ errorMsg: "is not a gzip compressed archive",
+ },
+ {
+ name: "single byte layer",
+ layerData: []byte{0x1f},
+ expectError: true,
+ errorMsg: "is not a gzip compressed archive",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ // Test the gzip validation logic that's used in the Install method
+ if len(tt.layerData) < 2 || tt.layerData[0] != 0x1f || tt.layerData[1] != 0x8b {
+ // This matches the validation in the Install method
+ if !tt.expectError {
+ t.Error("expected valid gzip data")
+ }
+ if !strings.Contains(tt.errorMsg, "is not a gzip compressed archive") {
+ t.Errorf("expected error message to contain 'is not a gzip compressed archive'")
+ }
+ }
+ })
+ }
+}
diff --git a/internal/plugin/installer/plugin_structure.go b/internal/plugin/installer/plugin_structure.go
new file mode 100644
index 000000000..10647141e
--- /dev/null
+++ b/internal/plugin/installer/plugin_structure.go
@@ -0,0 +1,80 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package installer
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "helm.sh/helm/v4/internal/plugin"
+)
+
+// detectPluginRoot searches for plugin.yaml in the extracted directory
+// and returns the path to the directory containing it.
+// This handles cases where the tarball contains the plugin in a subdirectory.
+func detectPluginRoot(extractDir string) (string, error) {
+ // First check if plugin.yaml is at the root
+ if _, err := os.Stat(filepath.Join(extractDir, plugin.PluginFileName)); err == nil {
+ return extractDir, nil
+ }
+
+ // Otherwise, look for plugin.yaml in subdirectories (only one level deep)
+ entries, err := os.ReadDir(extractDir)
+ if err != nil {
+ return "", err
+ }
+
+ for _, entry := range entries {
+ if entry.IsDir() {
+ subdir := filepath.Join(extractDir, entry.Name())
+ if _, err := os.Stat(filepath.Join(subdir, plugin.PluginFileName)); err == nil {
+ return subdir, nil
+ }
+ }
+ }
+
+ return "", fmt.Errorf("plugin.yaml not found in %s or its immediate subdirectories", extractDir)
+}
+
+// validatePluginName checks if the plugin directory name matches the plugin name
+// from plugin.yaml when the plugin is in a subdirectory.
+func validatePluginName(pluginRoot string, expectedName string) error {
+ // Only validate if plugin is in a subdirectory
+ dirName := filepath.Base(pluginRoot)
+ if dirName == expectedName {
+ return nil
+ }
+
+ // Load plugin.yaml to get the actual name
+ p, err := plugin.LoadDir(pluginRoot)
+ if err != nil {
+ return fmt.Errorf("failed to load plugin from %s: %w", pluginRoot, err)
+ }
+
+ m := p.Metadata()
+ actualName := m.Name
+
+ // For now, just log a warning if names don't match
+ // In the future, we might want to enforce this more strictly
+ if actualName != dirName && actualName != strings.TrimSuffix(expectedName, filepath.Ext(expectedName)) {
+ // This is just informational - not an error
+ return nil
+ }
+
+ return nil
+}
diff --git a/internal/plugin/installer/plugin_structure_test.go b/internal/plugin/installer/plugin_structure_test.go
new file mode 100644
index 000000000..c8766ce59
--- /dev/null
+++ b/internal/plugin/installer/plugin_structure_test.go
@@ -0,0 +1,165 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package installer
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+)
+
+func TestDetectPluginRoot(t *testing.T) {
+ tests := []struct {
+ name string
+ setup func(dir string) error
+ expectRoot string
+ expectError bool
+ }{
+ {
+ name: "plugin.yaml at root",
+ setup: func(dir string) error {
+ return os.WriteFile(filepath.Join(dir, "plugin.yaml"), []byte("name: test"), 0644)
+ },
+ expectRoot: ".",
+ expectError: false,
+ },
+ {
+ name: "plugin.yaml in subdirectory",
+ setup: func(dir string) error {
+ subdir := filepath.Join(dir, "my-plugin")
+ if err := os.MkdirAll(subdir, 0755); err != nil {
+ return err
+ }
+ return os.WriteFile(filepath.Join(subdir, "plugin.yaml"), []byte("name: test"), 0644)
+ },
+ expectRoot: "my-plugin",
+ expectError: false,
+ },
+ {
+ name: "no plugin.yaml",
+ setup: func(dir string) error {
+ return os.WriteFile(filepath.Join(dir, "README.md"), []byte("test"), 0644)
+ },
+ expectRoot: "",
+ expectError: true,
+ },
+ {
+ name: "plugin.yaml in nested subdirectory (should not find)",
+ setup: func(dir string) error {
+ subdir := filepath.Join(dir, "outer", "inner")
+ if err := os.MkdirAll(subdir, 0755); err != nil {
+ return err
+ }
+ return os.WriteFile(filepath.Join(subdir, "plugin.yaml"), []byte("name: test"), 0644)
+ },
+ expectRoot: "",
+ expectError: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ dir := t.TempDir()
+ if err := tt.setup(dir); err != nil {
+ t.Fatalf("Setup failed: %v", err)
+ }
+
+ root, err := detectPluginRoot(dir)
+ if tt.expectError {
+ if err == nil {
+ t.Error("Expected error but got none")
+ }
+ } else {
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+ expectedPath := dir
+ if tt.expectRoot != "." {
+ expectedPath = filepath.Join(dir, tt.expectRoot)
+ }
+ if root != expectedPath {
+ t.Errorf("Expected root %s but got %s", expectedPath, root)
+ }
+ }
+ })
+ }
+}
+
+func TestValidatePluginName(t *testing.T) {
+ tests := []struct {
+ name string
+ setup func(dir string) error
+ pluginRoot string
+ expectedName string
+ expectError bool
+ }{
+ {
+ name: "matching directory and plugin name",
+ setup: func(dir string) error {
+ subdir := filepath.Join(dir, "my-plugin")
+ if err := os.MkdirAll(subdir, 0755); err != nil {
+ return err
+ }
+ yaml := `name: my-plugin
+version: 1.0.0
+usage: test
+description: test`
+ return os.WriteFile(filepath.Join(subdir, "plugin.yaml"), []byte(yaml), 0644)
+ },
+ pluginRoot: "my-plugin",
+ expectedName: "my-plugin",
+ expectError: false,
+ },
+ {
+ name: "different directory and plugin name",
+ setup: func(dir string) error {
+ subdir := filepath.Join(dir, "wrong-name")
+ if err := os.MkdirAll(subdir, 0755); err != nil {
+ return err
+ }
+ yaml := `name: my-plugin
+version: 1.0.0
+usage: test
+description: test`
+ return os.WriteFile(filepath.Join(subdir, "plugin.yaml"), []byte(yaml), 0644)
+ },
+ pluginRoot: "wrong-name",
+ expectedName: "wrong-name",
+ expectError: false, // Currently we don't error on mismatch
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ dir := t.TempDir()
+ if err := tt.setup(dir); err != nil {
+ t.Fatalf("Setup failed: %v", err)
+ }
+
+ pluginRoot := filepath.Join(dir, tt.pluginRoot)
+ err := validatePluginName(pluginRoot, tt.expectedName)
+ if tt.expectError {
+ if err == nil {
+ t.Error("Expected error but got none")
+ }
+ } else {
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+ }
+ })
+ }
+}
diff --git a/pkg/plugin/installer/vcs_installer.go b/internal/plugin/installer/vcs_installer.go
similarity index 82%
rename from pkg/plugin/installer/vcs_installer.go
rename to internal/plugin/installer/vcs_installer.go
index 3967e46cd..3601ec7a8 100644
--- a/pkg/plugin/installer/vcs_installer.go
+++ b/internal/plugin/installer/vcs_installer.go
@@ -13,19 +13,22 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package installer // import "helm.sh/helm/v4/pkg/plugin/installer"
+package installer // import "helm.sh/helm/v4/internal/plugin/installer"
import (
+ "errors"
+ "fmt"
+ stdfs "io/fs"
+ "log/slog"
"os"
"sort"
"github.com/Masterminds/semver/v3"
"github.com/Masterminds/vcs"
- "github.com/pkg/errors"
+ "helm.sh/helm/v4/internal/plugin/cache"
"helm.sh/helm/v4/internal/third_party/dep/fs"
"helm.sh/helm/v4/pkg/helmpath"
- "helm.sh/helm/v4/pkg/plugin/cache"
)
// VCSInstaller installs plugins from remote a repository.
@@ -88,13 +91,13 @@ func (i *VCSInstaller) Install() error {
return ErrMissingMetadata
}
- debug("copying %s to %s", i.Repo.LocalPath(), i.Path())
+ slog.Debug("copying files", "source", i.Repo.LocalPath(), "destination", i.Path())
return fs.CopyDir(i.Repo.LocalPath(), i.Path())
}
// Update updates a remote repository
func (i *VCSInstaller) Update() error {
- debug("updating %s", i.Repo.Remote())
+ slog.Debug("updating", "source", i.Repo.Remote())
if i.Repo.IsDirty() {
return errors.New("plugin repo was modified")
}
@@ -128,7 +131,7 @@ func (i *VCSInstaller) solveVersion(repo vcs.Repo) (string, error) {
if err != nil {
return "", err
}
- debug("found refs: %s", refs)
+ slog.Debug("found refs", "refs", refs)
// Convert and filter the list to semver.Version instances
semvers := getSemVers(refs)
@@ -139,27 +142,27 @@ func (i *VCSInstaller) solveVersion(repo vcs.Repo) (string, error) {
if constraint.Check(v) {
// If the constraint passes get the original reference
ver := v.Original()
- debug("setting to %s", ver)
+ slog.Debug("setting to version", "version", ver)
return ver, nil
}
}
- return "", errors.Errorf("requested version %q does not exist for plugin %q", i.Version, i.Repo.Remote())
+ return "", fmt.Errorf("requested version %q does not exist for plugin %q", i.Version, i.Repo.Remote())
}
// setVersion attempts to checkout the version
func (i *VCSInstaller) setVersion(repo vcs.Repo, ref string) error {
- debug("setting version to %q", i.Version)
+ slog.Debug("setting version", "version", i.Version)
return repo.UpdateVersion(ref)
}
// sync will clone or update a remote repo.
func (i *VCSInstaller) sync(repo vcs.Repo) error {
- if _, err := os.Stat(repo.LocalPath()); os.IsNotExist(err) {
- debug("cloning %s to %s", repo.Remote(), repo.LocalPath())
+ if _, err := os.Stat(repo.LocalPath()); errors.Is(err, stdfs.ErrNotExist) {
+ slog.Debug("cloning", "source", repo.Remote(), "destination", repo.LocalPath())
return repo.Get()
}
- debug("updating %s", repo.Remote())
+ slog.Debug("updating", "source", repo.Remote(), "destination", repo.LocalPath())
return repo.Update()
}
diff --git a/pkg/plugin/installer/vcs_installer_test.go b/internal/plugin/installer/vcs_installer_test.go
similarity index 89%
rename from pkg/plugin/installer/vcs_installer_test.go
rename to internal/plugin/installer/vcs_installer_test.go
index fbb5d354e..d542a0f75 100644
--- a/pkg/plugin/installer/vcs_installer_test.go
+++ b/internal/plugin/installer/vcs_installer_test.go
@@ -13,12 +13,13 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package installer // import "helm.sh/helm/v4/pkg/plugin/installer"
+package installer // import "helm.sh/helm/v4/internal/plugin/installer"
import (
"fmt"
"os"
"path/filepath"
+ "strings"
"testing"
"github.com/Masterminds/vcs"
@@ -56,7 +57,7 @@ func TestVCSInstaller(t *testing.T) {
}
source := "https://github.com/adamreese/helm-env"
- testRepoPath, _ := filepath.Abs("../testdata/plugdir/good/echo")
+ testRepoPath, _ := filepath.Abs("../testdata/plugdir/good/echo-v1")
repo := &testRepo{
local: testRepoPath,
tags: []string{"0.1.0", "0.1.1"},
@@ -82,8 +83,9 @@ func TestVCSInstaller(t *testing.T) {
if repo.current != "0.1.1" {
t.Fatalf("expected version '0.1.1', got %q", repo.current)
}
- if i.Path() != helmpath.DataPath("plugins", "helm-env") {
- t.Fatalf("expected path '$XDG_CONFIG_HOME/helm/plugins/helm-env', got %q", i.Path())
+ expectedPath := helmpath.DataPath("plugins", "helm-env")
+ if i.Path() != expectedPath {
+ t.Fatalf("expected path %q, got %q", expectedPath, i.Path())
}
// Install again to test plugin exists error
@@ -119,6 +121,8 @@ func TestVCSInstallerNonExistentVersion(t *testing.T) {
if err := Install(i); err == nil {
t.Fatalf("expected error for version does not exists, got none")
+ } else if strings.Contains(err.Error(), "Could not resolve host: github.com") {
+ t.Skip("Unable to run test without Internet access")
} else if err.Error() != fmt.Sprintf("requested version %q does not exist for plugin %q", version, source) {
t.Fatalf("expected error for version does not exists, got (%v)", err)
}
@@ -146,7 +150,11 @@ func TestVCSInstallerUpdate(t *testing.T) {
// Install plugin before update
if err := Install(i); err != nil {
- t.Fatal(err)
+ if strings.Contains(err.Error(), "Could not resolve host: github.com") {
+ t.Skip("Unable to run test without Internet access")
+ } else {
+ t.Fatal(err)
+ }
}
// Test FindSource method for positive result
diff --git a/internal/plugin/installer/verification_test.go b/internal/plugin/installer/verification_test.go
new file mode 100644
index 000000000..22f0a8308
--- /dev/null
+++ b/internal/plugin/installer/verification_test.go
@@ -0,0 +1,421 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package installer
+
+import (
+ "bytes"
+ "crypto/sha256"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "helm.sh/helm/v4/internal/plugin"
+ "helm.sh/helm/v4/internal/test/ensure"
+)
+
+func TestInstallWithOptions_VerifyMissingProvenance(t *testing.T) {
+ ensure.HelmHome(t)
+
+ // Create a temporary plugin tarball without .prov file
+ pluginDir := createTestPluginDir(t)
+ pluginTgz := createTarballFromPluginDir(t, pluginDir)
+ defer os.Remove(pluginTgz)
+
+ // Create local installer
+ installer, err := NewLocalInstaller(pluginTgz)
+ if err != nil {
+ t.Fatalf("Failed to create installer: %v", err)
+ }
+ defer os.RemoveAll(installer.Path())
+
+ // Capture stderr to check warning message
+ oldStderr := os.Stderr
+ r, w, _ := os.Pipe()
+ os.Stderr = w
+
+ // Install with verification enabled (should warn but succeed)
+ result, err := InstallWithOptions(installer, Options{Verify: true, Keyring: "dummy"})
+
+ // Restore stderr and read captured output
+ w.Close()
+ os.Stderr = oldStderr
+ var buf bytes.Buffer
+ io.Copy(&buf, r)
+ output := buf.String()
+
+ // Should succeed with nil result (no verification performed)
+ if err != nil {
+ t.Fatalf("Expected installation to succeed despite missing .prov file, got error: %v", err)
+ }
+ if result != nil {
+ t.Errorf("Expected nil verification result when .prov file is missing, got: %+v", result)
+ }
+
+ // Should contain warning message
+ expectedWarning := "WARNING: No provenance file found for plugin"
+ if !strings.Contains(output, expectedWarning) {
+ t.Errorf("Expected warning message '%s' in output, got: %s", expectedWarning, output)
+ }
+
+ // Plugin should be installed
+ if _, err := os.Stat(installer.Path()); os.IsNotExist(err) {
+ t.Errorf("Plugin should be installed at %s", installer.Path())
+ }
+}
+
+func TestInstallWithOptions_VerifyWithValidProvenance(t *testing.T) {
+ ensure.HelmHome(t)
+
+ // Create a temporary plugin tarball with valid .prov file
+ pluginDir := createTestPluginDir(t)
+ pluginTgz := createTarballFromPluginDir(t, pluginDir)
+
+ provFile := pluginTgz + ".prov"
+ createProvFile(t, provFile, pluginTgz, "")
+ defer os.Remove(provFile)
+
+ // Create keyring with test key (empty for testing)
+ keyring := createTestKeyring(t)
+ defer os.Remove(keyring)
+
+ // Create local installer
+ installer, err := NewLocalInstaller(pluginTgz)
+ if err != nil {
+ t.Fatalf("Failed to create installer: %v", err)
+ }
+ defer os.RemoveAll(installer.Path())
+
+ // Install with verification enabled
+ // This will fail signature verification but pass hash validation
+ result, err := InstallWithOptions(installer, Options{Verify: true, Keyring: keyring})
+
+ // Should fail due to invalid signature (empty keyring) but we test that it gets past the hash check
+ if err == nil {
+ t.Fatalf("Expected installation to fail with empty keyring")
+ }
+ if !strings.Contains(err.Error(), "plugin verification failed") {
+ t.Errorf("Expected plugin verification failed error, got: %v", err)
+ }
+ if result != nil {
+ t.Errorf("Expected nil verification result when verification fails, got: %+v", result)
+ }
+
+ // Plugin should not be installed due to verification failure
+ if _, err := os.Stat(installer.Path()); !os.IsNotExist(err) {
+ t.Errorf("Plugin should not be installed when verification fails")
+ }
+}
+
+func TestInstallWithOptions_VerifyWithInvalidProvenance(t *testing.T) {
+ ensure.HelmHome(t)
+
+ // Create a temporary plugin tarball with invalid .prov file
+ pluginDir := createTestPluginDir(t)
+ pluginTgz := createTarballFromPluginDir(t, pluginDir)
+ defer os.Remove(pluginTgz)
+
+ provFile := pluginTgz + ".prov"
+ createProvFileInvalidFormat(t, provFile)
+ defer os.Remove(provFile)
+
+ // Create keyring with test key
+ keyring := createTestKeyring(t)
+ defer os.Remove(keyring)
+
+ // Create local installer
+ installer, err := NewLocalInstaller(pluginTgz)
+ if err != nil {
+ t.Fatalf("Failed to create installer: %v", err)
+ }
+ defer os.RemoveAll(installer.Path())
+
+ // Install with verification enabled (should fail)
+ result, err := InstallWithOptions(installer, Options{Verify: true, Keyring: keyring})
+
+ // Should fail with verification error
+ if err == nil {
+ t.Fatalf("Expected installation with invalid .prov file to fail")
+ }
+ if result != nil {
+ t.Errorf("Expected nil verification result when verification fails, got: %+v", result)
+ }
+
+ // Should contain verification failure message
+ expectedError := "plugin verification failed"
+ if !strings.Contains(err.Error(), expectedError) {
+ t.Errorf("Expected error message '%s', got: %s", expectedError, err.Error())
+ }
+
+ // Plugin should not be installed
+ if _, err := os.Stat(installer.Path()); !os.IsNotExist(err) {
+ t.Errorf("Plugin should not be installed when verification fails")
+ }
+}
+
+func TestInstallWithOptions_NoVerifyRequested(t *testing.T) {
+ ensure.HelmHome(t)
+
+ // Create a temporary plugin tarball without .prov file
+ pluginDir := createTestPluginDir(t)
+ pluginTgz := createTarballFromPluginDir(t, pluginDir)
+ defer os.Remove(pluginTgz)
+
+ // Create local installer
+ installer, err := NewLocalInstaller(pluginTgz)
+ if err != nil {
+ t.Fatalf("Failed to create installer: %v", err)
+ }
+ defer os.RemoveAll(installer.Path())
+
+ // Install without verification (should succeed without any verification)
+ result, err := InstallWithOptions(installer, Options{Verify: false})
+
+ // Should succeed with no verification
+ if err != nil {
+ t.Fatalf("Expected installation without verification to succeed, got error: %v", err)
+ }
+ if result != nil {
+ t.Errorf("Expected nil verification result when verification is disabled, got: %+v", result)
+ }
+
+ // Plugin should be installed
+ if _, err := os.Stat(installer.Path()); os.IsNotExist(err) {
+ t.Errorf("Plugin should be installed at %s", installer.Path())
+ }
+}
+
+func TestInstallWithOptions_VerifyDirectoryNotSupported(t *testing.T) {
+ ensure.HelmHome(t)
+
+ // Create a directory-based plugin (not an archive)
+ pluginDir := createTestPluginDir(t)
+
+ // Create local installer for directory
+ installer, err := NewLocalInstaller(pluginDir)
+ if err != nil {
+ t.Fatalf("Failed to create installer: %v", err)
+ }
+ defer os.RemoveAll(installer.Path())
+
+ // Install with verification should fail (directories don't support verification)
+ result, err := InstallWithOptions(installer, Options{Verify: true, Keyring: "dummy"})
+
+ // Should fail with verification not supported error
+ if err == nil {
+ t.Fatalf("Expected installation to fail with verification not supported error")
+ }
+ if !strings.Contains(err.Error(), "--verify is only supported for plugin tarballs") {
+ t.Errorf("Expected verification not supported error, got: %v", err)
+ }
+ if result != nil {
+ t.Errorf("Expected nil verification result when verification fails, got: %+v", result)
+ }
+}
+
+func TestInstallWithOptions_VerifyMismatchedProvenance(t *testing.T) {
+ ensure.HelmHome(t)
+
+ // Create plugin tarball
+ pluginDir := createTestPluginDir(t)
+ pluginTgz := createTarballFromPluginDir(t, pluginDir)
+ defer os.Remove(pluginTgz)
+
+ provFile := pluginTgz + ".prov"
+ // Create provenance file with wrong hash (for a different file)
+ createProvFile(t, provFile, pluginTgz, "sha256:wronghash")
+ defer os.Remove(provFile)
+
+ // Create keyring with test key
+ keyring := createTestKeyring(t)
+ defer os.Remove(keyring)
+
+ // Create local installer
+ installer, err := NewLocalInstaller(pluginTgz)
+ if err != nil {
+ t.Fatalf("Failed to create installer: %v", err)
+ }
+ defer os.RemoveAll(installer.Path())
+
+ // Install with verification should fail due to hash mismatch
+ result, err := InstallWithOptions(installer, Options{Verify: true, Keyring: keyring})
+
+ // Should fail with verification error
+ if err == nil {
+ t.Fatalf("Expected installation to fail with hash mismatch")
+ }
+ if !strings.Contains(err.Error(), "plugin verification failed") {
+ t.Errorf("Expected plugin verification failed error, got: %v", err)
+ }
+ if result != nil {
+ t.Errorf("Expected nil verification result when verification fails, got: %+v", result)
+ }
+}
+
+func TestInstallWithOptions_VerifyProvenanceAccessError(t *testing.T) {
+ ensure.HelmHome(t)
+
+ // Create plugin tarball
+ pluginDir := createTestPluginDir(t)
+ pluginTgz := createTarballFromPluginDir(t, pluginDir)
+ defer os.Remove(pluginTgz)
+
+ // Create a .prov file but make it inaccessible (simulate permission error)
+ provFile := pluginTgz + ".prov"
+ if err := os.WriteFile(provFile, []byte("test"), 0000); err != nil {
+ t.Fatalf("Failed to create inaccessible provenance file: %v", err)
+ }
+ defer os.Remove(provFile)
+
+ // Create keyring
+ keyring := createTestKeyring(t)
+ defer os.Remove(keyring)
+
+ // Create local installer
+ installer, err := NewLocalInstaller(pluginTgz)
+ if err != nil {
+ t.Fatalf("Failed to create installer: %v", err)
+ }
+ defer os.RemoveAll(installer.Path())
+
+ // Install with verification should fail due to access error
+ result, err := InstallWithOptions(installer, Options{Verify: true, Keyring: keyring})
+
+ // Should fail with access error (either at stat level or during verification)
+ if err == nil {
+ t.Fatalf("Expected installation to fail with provenance file access error")
+ }
+ // The error could be either "failed to access provenance file" or "plugin verification failed"
+ // depending on when the permission error occurs
+ if !strings.Contains(err.Error(), "failed to access provenance file") &&
+ !strings.Contains(err.Error(), "plugin verification failed") {
+ t.Errorf("Expected provenance file access or verification error, got: %v", err)
+ }
+ if result != nil {
+ t.Errorf("Expected nil verification result when verification fails, got: %+v", result)
+ }
+}
+
+// Helper functions for test setup
+
+func createTestPluginDir(t *testing.T) string {
+ t.Helper()
+
+ // Create temporary directory with plugin structure
+ tmpDir := t.TempDir()
+ pluginDir := filepath.Join(tmpDir, "test-plugin")
+ if err := os.MkdirAll(pluginDir, 0755); err != nil {
+ t.Fatalf("Failed to create plugin directory: %v", err)
+ }
+
+ // Create plugin.yaml using the standardized v1 format
+ pluginYaml := `apiVersion: v1
+name: test-plugin
+type: cli/v1
+runtime: subprocess
+version: 1.0.0
+runtimeConfig:
+ platformCommand:
+ - command: echo`
+ if err := os.WriteFile(filepath.Join(pluginDir, "plugin.yaml"), []byte(pluginYaml), 0644); err != nil {
+ t.Fatalf("Failed to create plugin.yaml: %v", err)
+ }
+
+ return pluginDir
+}
+
+func createTarballFromPluginDir(t *testing.T, pluginDir string) string {
+ t.Helper()
+
+ // Create tarball using the plugin package helper
+ tmpDir := filepath.Dir(pluginDir)
+ tgzPath := filepath.Join(tmpDir, "test-plugin-1.0.0.tgz")
+ tarFile, err := os.Create(tgzPath)
+ if err != nil {
+ t.Fatalf("Failed to create tarball file: %v", err)
+ }
+ defer tarFile.Close()
+
+ if err := plugin.CreatePluginTarball(pluginDir, "test-plugin", tarFile); err != nil {
+ t.Fatalf("Failed to create tarball: %v", err)
+ }
+
+ return tgzPath
+}
+
+func createProvFile(t *testing.T, provFile, pluginTgz, hash string) {
+ t.Helper()
+
+ var hashStr string
+ if hash == "" {
+ // Calculate actual hash of the tarball for realistic testing
+ data, err := os.ReadFile(pluginTgz)
+ if err != nil {
+ t.Fatalf("Failed to read tarball for hashing: %v", err)
+ }
+ hashSum := sha256.Sum256(data)
+ hashStr = fmt.Sprintf("sha256:%x", hashSum)
+ } else {
+ // Use provided hash (could be wrong for testing)
+ hashStr = hash
+ }
+
+ // Create properly formatted provenance file with specified hash
+ provContent := fmt.Sprintf(`-----BEGIN PGP SIGNED MESSAGE-----
+Hash: SHA256
+
+name: test-plugin
+version: 1.0.0
+description: Test plugin for verification
+files:
+ test-plugin-1.0.0.tgz: %s
+-----BEGIN PGP SIGNATURE-----
+Version: GnuPG v1
+
+iQEcBAEBCAAGBQJktest...
+-----END PGP SIGNATURE-----
+`, hashStr)
+ if err := os.WriteFile(provFile, []byte(provContent), 0644); err != nil {
+ t.Fatalf("Failed to create provenance file: %v", err)
+ }
+}
+
+func createProvFileInvalidFormat(t *testing.T, provFile string) {
+ t.Helper()
+
+ // Create an invalid provenance file (not PGP signed format)
+ invalidProv := "This is not a valid PGP signed message"
+ if err := os.WriteFile(provFile, []byte(invalidProv), 0644); err != nil {
+ t.Fatalf("Failed to create invalid provenance file: %v", err)
+ }
+}
+
+func createTestKeyring(t *testing.T) string {
+ t.Helper()
+
+ // Create a temporary keyring file
+ tmpDir := t.TempDir()
+ keyringPath := filepath.Join(tmpDir, "pubring.gpg")
+
+ // Create empty keyring for testing
+ if err := os.WriteFile(keyringPath, []byte{}, 0644); err != nil {
+ t.Fatalf("Failed to create test keyring: %v", err)
+ }
+
+ return keyringPath
+}
diff --git a/internal/plugin/loader.go b/internal/plugin/loader.go
new file mode 100644
index 000000000..a58a84126
--- /dev/null
+++ b/internal/plugin/loader.go
@@ -0,0 +1,266 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+
+ extism "github.com/extism/go-sdk"
+ "github.com/tetratelabs/wazero"
+ "go.yaml.in/yaml/v3"
+
+ "helm.sh/helm/v4/pkg/helmpath"
+)
+
+func peekAPIVersion(r io.Reader) (string, error) {
+ type apiVersion struct {
+ APIVersion string `yaml:"apiVersion"`
+ }
+
+ var v apiVersion
+ d := yaml.NewDecoder(r)
+ if err := d.Decode(&v); err != nil {
+ return "", err
+ }
+
+ return v.APIVersion, nil
+}
+
+func loadMetadataLegacy(metadataData []byte) (*Metadata, error) {
+
+ var ml MetadataLegacy
+ d := yaml.NewDecoder(bytes.NewReader(metadataData))
+ if err := d.Decode(&ml); err != nil {
+ return nil, err
+ }
+
+ if err := ml.Validate(); err != nil {
+ return nil, err
+ }
+
+ m := fromMetadataLegacy(ml)
+ if err := m.Validate(); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func loadMetadataV1(metadataData []byte) (*Metadata, error) {
+
+ var mv1 MetadataV1
+ d := yaml.NewDecoder(bytes.NewReader(metadataData))
+ if err := d.Decode(&mv1); err != nil {
+ return nil, err
+ }
+
+ if err := mv1.Validate(); err != nil {
+ return nil, err
+ }
+
+ m, err := fromMetadataV1(mv1)
+ if err != nil {
+ return nil, fmt.Errorf("failed to convert MetadataV1 to Metadata: %w", err)
+ }
+
+ if err := m.Validate(); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func loadMetadata(metadataData []byte) (*Metadata, error) {
+ apiVersion, err := peekAPIVersion(bytes.NewReader(metadataData))
+ if err != nil {
+ return nil, fmt.Errorf("failed to peek %s API version: %w", PluginFileName, err)
+ }
+
+ switch apiVersion {
+ case "": // legacy
+ return loadMetadataLegacy(metadataData)
+ case "v1":
+ return loadMetadataV1(metadataData)
+ }
+
+ return nil, fmt.Errorf("invalid plugin apiVersion: %q", apiVersion)
+}
+
+type prototypePluginManager struct {
+ runtimes map[string]Runtime
+}
+
+func newPrototypePluginManager() (*prototypePluginManager, error) {
+
+ cc, err := wazero.NewCompilationCacheWithDir(helmpath.CachePath("wazero-build"))
+ if err != nil {
+ return nil, fmt.Errorf("failed to create wazero compilation cache: %w", err)
+ }
+
+ return &prototypePluginManager{
+ runtimes: map[string]Runtime{
+ "subprocess": &RuntimeSubprocess{},
+ "extism/v1": &RuntimeExtismV1{
+ HostFunctions: map[string]extism.HostFunction{},
+ CompilationCache: cc,
+ },
+ },
+ }, nil
+}
+
+func (pm *prototypePluginManager) RegisterRuntime(runtimeName string, runtime Runtime) {
+ pm.runtimes[runtimeName] = runtime
+}
+
+func (pm *prototypePluginManager) CreatePlugin(pluginPath string, metadata *Metadata) (Plugin, error) {
+ rt, ok := pm.runtimes[metadata.Runtime]
+ if !ok {
+ return nil, fmt.Errorf("unsupported plugin runtime type: %q", metadata.Runtime)
+ }
+
+ return rt.CreatePlugin(pluginPath, metadata)
+}
+
+// LoadDir loads a plugin from the given directory.
+func LoadDir(dirname string) (Plugin, error) {
+ pluginfile := filepath.Join(dirname, PluginFileName)
+ metadataData, err := os.ReadFile(pluginfile)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read plugin at %q: %w", pluginfile, err)
+ }
+
+ m, err := loadMetadata(metadataData)
+ if err != nil {
+ return nil, fmt.Errorf("failed to load plugin %q: %w", dirname, err)
+ }
+
+ pm, err := newPrototypePluginManager()
+ if err != nil {
+ return nil, fmt.Errorf("failed to create plugin manager: %w", err)
+ }
+ return pm.CreatePlugin(dirname, m)
+}
+
+// LoadAll loads all plugins found beneath the base directory.
+//
+// This scans only one directory level.
+func LoadAll(basedir string) ([]Plugin, error) {
+ var plugins []Plugin
+ // We want basedir/*/plugin.yaml
+ scanpath := filepath.Join(basedir, "*", PluginFileName)
+ matches, err := filepath.Glob(scanpath)
+ if err != nil {
+ return nil, fmt.Errorf("failed to search for plugins in %q: %w", scanpath, err)
+ }
+
+ // empty dir should load
+ if len(matches) == 0 {
+ return plugins, nil
+ }
+
+ for _, yamlFile := range matches {
+ dir := filepath.Dir(yamlFile)
+ p, err := LoadDir(dir)
+ if err != nil {
+ return plugins, err
+ }
+ plugins = append(plugins, p)
+ }
+ return plugins, detectDuplicates(plugins)
+}
+
+// findFunc is a function that finds plugins in a directory
+type findFunc func(pluginsDir string) ([]Plugin, error)
+
+// filterFunc is a function that filters plugins
+type filterFunc func(Plugin) bool
+
+// FindPlugins returns a list of plugins that match the descriptor
+func FindPlugins(pluginsDirs []string, descriptor Descriptor) ([]Plugin, error) {
+ return findPlugins(pluginsDirs, LoadAll, makeDescriptorFilter(descriptor))
+}
+
+// findPlugins is the internal implementation that uses the find and filter functions
+func findPlugins(pluginsDirs []string, findFn findFunc, filterFn filterFunc) ([]Plugin, error) {
+ var found []Plugin
+ for _, pluginsDir := range pluginsDirs {
+ ps, err := findFn(pluginsDir)
+
+ if err != nil {
+ return nil, err
+ }
+
+ for _, p := range ps {
+ if filterFn(p) {
+ found = append(found, p)
+ }
+ }
+
+ }
+
+ return found, nil
+}
+
+// makeDescriptorFilter creates a filter function from a descriptor
+// Additional plugin filter criteria we wish to support can be added here
+func makeDescriptorFilter(descriptor Descriptor) filterFunc {
+ return func(p Plugin) bool {
+ // If name is specified, it must match
+ if descriptor.Name != "" && p.Metadata().Name != descriptor.Name {
+ return false
+
+ }
+ // If type is specified, it must match
+ if descriptor.Type != "" && p.Metadata().Type != descriptor.Type {
+ return false
+ }
+ return true
+ }
+}
+
+// FindPlugin returns a single plugin that matches the descriptor
+func FindPlugin(dirs []string, descriptor Descriptor) (Plugin, error) {
+ plugins, err := FindPlugins(dirs, descriptor)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(plugins) > 0 {
+ return plugins[0], nil
+ }
+
+ return nil, fmt.Errorf("plugin: %+v not found", descriptor)
+}
+
+func detectDuplicates(plugs []Plugin) error {
+ names := map[string]string{}
+
+ for _, plug := range plugs {
+ if oldpath, ok := names[plug.Metadata().Name]; ok {
+ return fmt.Errorf(
+ "two plugins claim the name %q at %q and %q",
+ plug.Metadata().Name,
+ oldpath,
+ plug.Dir(),
+ )
+ }
+ names[plug.Metadata().Name] = plug.Dir()
+ }
+
+ return nil
+}
diff --git a/internal/plugin/loader_test.go b/internal/plugin/loader_test.go
new file mode 100644
index 000000000..47c214910
--- /dev/null
+++ b/internal/plugin/loader_test.go
@@ -0,0 +1,270 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "bytes"
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "helm.sh/helm/v4/internal/plugin/schema"
+)
+
+func TestPeekAPIVersion(t *testing.T) {
+ testCases := map[string]struct {
+ data []byte
+ expected string
+ }{
+ "v1": {
+ data: []byte(`---
+apiVersion: v1
+name: "test-plugin"
+`),
+ expected: "v1",
+ },
+ "legacy": { // No apiVersion field
+ data: []byte(`---
+name: "test-plugin"
+`),
+ expected: "",
+ },
+ }
+
+ for name, tc := range testCases {
+ t.Run(name, func(t *testing.T) {
+ version, err := peekAPIVersion(bytes.NewReader(tc.data))
+ require.NoError(t, err)
+ assert.Equal(t, tc.expected, version)
+ })
+ }
+
+ // invalid yaml
+ {
+ data := []byte(`bad yaml`)
+ _, err := peekAPIVersion(bytes.NewReader(data))
+ assert.Error(t, err)
+ }
+}
+
+func TestLoadDir(t *testing.T) {
+
+ makeMetadata := func(apiVersion string) Metadata {
+ usage := "hello [params]..."
+ if apiVersion == "legacy" {
+ usage = "" // Legacy plugins don't have Usage field for command syntax
+ }
+ return Metadata{
+ APIVersion: apiVersion,
+ Name: fmt.Sprintf("hello-%s", apiVersion),
+ Version: "0.1.0",
+ Type: "cli/v1",
+ Runtime: "subprocess",
+ Config: &schema.ConfigCLIV1{
+ Usage: usage,
+ ShortHelp: "echo hello message",
+ LongHelp: "description",
+ IgnoreFlags: true,
+ },
+ RuntimeConfig: &RuntimeConfigSubprocess{
+ PlatformCommand: []PlatformCommand{
+ {OperatingSystem: "linux", Architecture: "", Command: "sh", Args: []string{"-c", "${HELM_PLUGIN_DIR}/hello.sh"}},
+ {OperatingSystem: "windows", Architecture: "", Command: "pwsh", Args: []string{"-c", "${HELM_PLUGIN_DIR}/hello.ps1"}},
+ },
+ PlatformHooks: map[string][]PlatformCommand{
+ Install: {
+ {OperatingSystem: "linux", Architecture: "", Command: "sh", Args: []string{"-c", "echo \"installing...\""}},
+ {OperatingSystem: "windows", Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"installing...\""}},
+ },
+ },
+ expandHookArgs: apiVersion == "legacy",
+ },
+ }
+ }
+
+ testCases := map[string]struct {
+ dirname string
+ apiVersion string
+ expect Metadata
+ }{
+ "legacy": {
+ dirname: "testdata/plugdir/good/hello-legacy",
+ apiVersion: "legacy",
+ expect: makeMetadata("legacy"),
+ },
+ "v1": {
+ dirname: "testdata/plugdir/good/hello-v1",
+ apiVersion: "v1",
+ expect: makeMetadata("v1"),
+ },
+ }
+
+ for name, tc := range testCases {
+ t.Run(name, func(t *testing.T) {
+ plug, err := LoadDir(tc.dirname)
+ require.NoError(t, err, "error loading plugin from %s", tc.dirname)
+
+ assert.Equal(t, tc.dirname, plug.Dir())
+ assert.EqualValues(t, tc.expect, plug.Metadata())
+ })
+ }
+}
+
+func TestLoadDirDuplicateEntries(t *testing.T) {
+ testCases := map[string]string{
+ "legacy": "testdata/plugdir/bad/duplicate-entries-legacy",
+ "v1": "testdata/plugdir/bad/duplicate-entries-v1",
+ }
+ for name, dirname := range testCases {
+ t.Run(name, func(t *testing.T) {
+ _, err := LoadDir(dirname)
+ assert.Error(t, err)
+ })
+ }
+}
+
+func TestLoadDirGetter(t *testing.T) {
+ dirname := "testdata/plugdir/good/getter"
+
+ expect := Metadata{
+ Name: "getter",
+ Version: "1.2.3",
+ Type: "getter/v1",
+ APIVersion: "v1",
+ Runtime: "subprocess",
+ Config: &schema.ConfigGetterV1{
+ Protocols: []string{"myprotocol", "myprotocols"},
+ },
+ RuntimeConfig: &RuntimeConfigSubprocess{
+ ProtocolCommands: []SubprocessProtocolCommand{
+ {
+ Protocols: []string{"myprotocol", "myprotocols"},
+ PlatformCommand: []PlatformCommand{{Command: "echo getter"}},
+ },
+ },
+ },
+ }
+
+ plug, err := LoadDir(dirname)
+ require.NoError(t, err)
+ assert.Equal(t, dirname, plug.Dir())
+ assert.Equal(t, expect, plug.Metadata())
+}
+
+func TestPostRenderer(t *testing.T) {
+ dirname := "testdata/plugdir/good/postrenderer-v1"
+
+ expect := Metadata{
+ Name: "postrenderer-v1",
+ Version: "1.2.3",
+ Type: "postrenderer/v1",
+ APIVersion: "v1",
+ Runtime: "subprocess",
+ Config: &schema.ConfigPostRendererV1{},
+ RuntimeConfig: &RuntimeConfigSubprocess{
+ PlatformCommand: []PlatformCommand{
+ {
+ Command: "${HELM_PLUGIN_DIR}/sed-test.sh",
+ },
+ },
+ },
+ }
+
+ plug, err := LoadDir(dirname)
+ require.NoError(t, err)
+ assert.Equal(t, dirname, plug.Dir())
+ assert.Equal(t, expect, plug.Metadata())
+}
+
+func TestDetectDuplicates(t *testing.T) {
+ plugs := []Plugin{
+ mockSubprocessCLIPlugin(t, "foo"),
+ mockSubprocessCLIPlugin(t, "bar"),
+ }
+ if err := detectDuplicates(plugs); err != nil {
+ t.Error("no duplicates in the first set")
+ }
+ plugs = append(plugs, mockSubprocessCLIPlugin(t, "foo"))
+ if err := detectDuplicates(plugs); err == nil {
+ t.Error("duplicates in the second set")
+ }
+}
+
+func TestLoadAll(t *testing.T) {
+ // Verify that empty dir loads:
+ {
+ plugs, err := LoadAll("testdata")
+ require.NoError(t, err)
+ assert.Len(t, plugs, 0)
+ }
+
+ basedir := "testdata/plugdir/good"
+ plugs, err := LoadAll(basedir)
+ require.NoError(t, err)
+ require.NotEmpty(t, plugs, "expected plugins to be loaded from %s", basedir)
+
+ plugsMap := map[string]Plugin{}
+ for _, p := range plugs {
+ plugsMap[p.Metadata().Name] = p
+ }
+
+ assert.Len(t, plugsMap, 7)
+ assert.Contains(t, plugsMap, "downloader")
+ assert.Contains(t, plugsMap, "echo-legacy")
+ assert.Contains(t, plugsMap, "echo-v1")
+ assert.Contains(t, plugsMap, "getter")
+ assert.Contains(t, plugsMap, "hello-legacy")
+ assert.Contains(t, plugsMap, "hello-v1")
+ assert.Contains(t, plugsMap, "postrenderer-v1")
+}
+
+func TestFindPlugins(t *testing.T) {
+ cases := []struct {
+ name string
+ plugdirs string
+ expected int
+ }{
+ {
+ name: "plugdirs is empty",
+ plugdirs: "",
+ expected: 0,
+ },
+ {
+ name: "plugdirs isn't dir",
+ plugdirs: "./plugin_test.go",
+ expected: 0,
+ },
+ {
+ name: "plugdirs doesn't have plugin",
+ plugdirs: ".",
+ expected: 0,
+ },
+ {
+ name: "normal",
+ plugdirs: "./testdata/plugdir/good",
+ expected: 7,
+ },
+ }
+ for _, c := range cases {
+ t.Run(t.Name(), func(t *testing.T) {
+ plugin, err := LoadAll(c.plugdirs)
+ require.NoError(t, err)
+ assert.Len(t, plugin, c.expected, "expected %d plugins, got %d", c.expected, len(plugin))
+ })
+ }
+}
diff --git a/internal/plugin/metadata.go b/internal/plugin/metadata.go
new file mode 100644
index 000000000..111c0599f
--- /dev/null
+++ b/internal/plugin/metadata.go
@@ -0,0 +1,217 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "errors"
+ "fmt"
+
+ "helm.sh/helm/v4/internal/plugin/schema"
+)
+
+// Metadata of a plugin, converted from the "on-disk" legacy or v1 plugin.yaml
+// Specifically, Config and RuntimeConfig are converted to their respective types based on the plugin type and runtime
+type Metadata struct {
+ // APIVersion specifies the plugin API version
+ APIVersion string
+
+ // Name is the name of the plugin
+ Name string
+
+ // Type of plugin (eg, cli/v1, getter/v1, postrenderer/v1)
+ Type string
+
+ // Runtime specifies the runtime type (subprocess, wasm)
+ Runtime string
+
+ // Version is the SemVer 2 version of the plugin.
+ Version string
+
+ // SourceURL is the URL where this plugin can be found
+ SourceURL string
+
+ // Config contains the type-specific configuration for this plugin
+ Config Config
+
+ // RuntimeConfig contains the runtime-specific configuration
+ RuntimeConfig RuntimeConfig
+}
+
+func (m Metadata) Validate() error {
+ var errs []error
+
+ if !validPluginName.MatchString(m.Name) {
+ errs = append(errs, fmt.Errorf("invalid name"))
+ }
+
+ if m.APIVersion == "" {
+ errs = append(errs, fmt.Errorf("empty APIVersion"))
+ }
+
+ if m.Type == "" {
+ errs = append(errs, fmt.Errorf("empty type field"))
+ }
+
+ if m.Runtime == "" {
+ errs = append(errs, fmt.Errorf("empty runtime field"))
+ }
+
+ if m.Config == nil {
+ errs = append(errs, fmt.Errorf("missing config field"))
+ }
+
+ if m.RuntimeConfig == nil {
+ errs = append(errs, fmt.Errorf("missing runtimeConfig field"))
+ }
+
+ // Validate the config itself
+ if m.Config != nil {
+ if err := m.Config.Validate(); err != nil {
+ errs = append(errs, fmt.Errorf("config validation failed: %w", err))
+ }
+ }
+
+ // Validate the runtime config itself
+ if m.RuntimeConfig != nil {
+ if err := m.RuntimeConfig.Validate(); err != nil {
+ errs = append(errs, fmt.Errorf("runtime config validation failed: %w", err))
+ }
+ }
+
+ if len(errs) > 0 {
+ return errors.Join(errs...)
+ }
+
+ return nil
+}
+
+func fromMetadataLegacy(m MetadataLegacy) *Metadata {
+ pluginType := "cli/v1"
+
+ if len(m.Downloaders) > 0 {
+ pluginType = "getter/v1"
+ }
+
+ return &Metadata{
+ APIVersion: "legacy",
+ Name: m.Name,
+ Version: m.Version,
+ Type: pluginType,
+ Runtime: "subprocess",
+ Config: buildLegacyConfig(m, pluginType),
+ RuntimeConfig: buildLegacyRuntimeConfig(m),
+ }
+}
+
+func buildLegacyConfig(m MetadataLegacy, pluginType string) Config {
+ switch pluginType {
+ case "getter/v1":
+ var protocols []string
+ for _, d := range m.Downloaders {
+ protocols = append(protocols, d.Protocols...)
+ }
+ return &schema.ConfigGetterV1{
+ Protocols: protocols,
+ }
+ case "cli/v1":
+ return &schema.ConfigCLIV1{
+ Usage: "", // Legacy plugins don't have Usage field for command syntax
+ ShortHelp: m.Usage, // Map legacy usage to shortHelp
+ LongHelp: m.Description, // Map legacy description to longHelp
+ IgnoreFlags: m.IgnoreFlags,
+ }
+ default:
+ return nil
+ }
+}
+
+func buildLegacyRuntimeConfig(m MetadataLegacy) RuntimeConfig {
+ var protocolCommands []SubprocessProtocolCommand
+ if len(m.Downloaders) > 0 {
+ protocolCommands =
+ make([]SubprocessProtocolCommand, 0, len(m.Downloaders))
+ for _, d := range m.Downloaders {
+ protocolCommands = append(protocolCommands, SubprocessProtocolCommand{
+ Protocols: d.Protocols,
+ PlatformCommand: []PlatformCommand{{Command: d.Command}},
+ })
+ }
+ }
+
+ platformCommand := m.PlatformCommand
+ if len(platformCommand) == 0 && len(m.Command) > 0 {
+ platformCommand = []PlatformCommand{{Command: m.Command}}
+ }
+
+ platformHooks := m.PlatformHooks
+ expandHookArgs := true
+ if len(platformHooks) == 0 && len(m.Hooks) > 0 {
+ platformHooks = make(PlatformHooks, len(m.Hooks))
+ for hookName, hookCommand := range m.Hooks {
+ platformHooks[hookName] = []PlatformCommand{{Command: "sh", Args: []string{"-c", hookCommand}}}
+ expandHookArgs = false
+ }
+ }
+ return &RuntimeConfigSubprocess{
+ PlatformCommand: platformCommand,
+ PlatformHooks: platformHooks,
+ ProtocolCommands: protocolCommands,
+ expandHookArgs: expandHookArgs,
+ }
+}
+
+func fromMetadataV1(mv1 MetadataV1) (*Metadata, error) {
+
+ config, err := unmarshaConfig(mv1.Type, mv1.Config)
+ if err != nil {
+ return nil, err
+ }
+
+ runtimeConfig, err := convertMetdataRuntimeConfig(mv1.Runtime, mv1.RuntimeConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Metadata{
+ APIVersion: mv1.APIVersion,
+ Name: mv1.Name,
+ Type: mv1.Type,
+ Runtime: mv1.Runtime,
+ Version: mv1.Version,
+ SourceURL: mv1.SourceURL,
+ Config: config,
+ RuntimeConfig: runtimeConfig,
+ }, nil
+}
+
+func convertMetdataRuntimeConfig(runtimeType string, runtimeConfigRaw map[string]any) (RuntimeConfig, error) {
+ var runtimeConfig RuntimeConfig
+ var err error
+
+ switch runtimeType {
+ case "subprocess":
+ runtimeConfig, err = remarshalRuntimeConfig[*RuntimeConfigSubprocess](runtimeConfigRaw)
+ case "extism/v1":
+ runtimeConfig, err = remarshalRuntimeConfig[*RuntimeConfigExtismV1](runtimeConfigRaw)
+ default:
+ return nil, fmt.Errorf("unsupported plugin runtime type: %q", runtimeType)
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf("failed to unmarshal runtimeConfig for %s runtime: %w", runtimeType, err)
+ }
+ return runtimeConfig, nil
+}
diff --git a/internal/plugin/metadata_legacy.go b/internal/plugin/metadata_legacy.go
new file mode 100644
index 000000000..a7b245dc0
--- /dev/null
+++ b/internal/plugin/metadata_legacy.go
@@ -0,0 +1,113 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "fmt"
+ "strings"
+ "unicode"
+)
+
+// Downloaders represents the plugins capability if it can retrieve
+// charts from special sources
+type Downloaders struct {
+ // Protocols are the list of schemes from the charts URL.
+ Protocols []string `yaml:"protocols"`
+ // Command is the executable path with which the plugin performs
+ // the actual download for the corresponding Protocols
+ Command string `yaml:"command"`
+}
+
+// MetadataLegacy is the legacy plugin.yaml format
+type MetadataLegacy struct {
+ // Name is the name of the plugin
+ Name string `yaml:"name"`
+
+ // Version is a SemVer 2 version of the plugin.
+ Version string `yaml:"version"`
+
+ // Usage is the single-line usage text shown in help
+ Usage string `yaml:"usage"`
+
+ // Description is a long description shown in places like `helm help`
+ Description string `yaml:"description"`
+
+ // PlatformCommand is the plugin command, with a platform selector and support for args.
+ PlatformCommand []PlatformCommand `yaml:"platformCommand"`
+
+ // Command is the plugin command, as a single string.
+ // DEPRECATED: Use PlatformCommand instead. Removed in subprocess/v1 plugins.
+ Command string `yaml:"command"`
+
+ // IgnoreFlags ignores any flags passed in from Helm
+ IgnoreFlags bool `yaml:"ignoreFlags"`
+
+ // PlatformHooks are commands that will run on plugin events, with a platform selector and support for args.
+ PlatformHooks PlatformHooks `yaml:"platformHooks"`
+
+ // Hooks are commands that will run on plugin events, as a single string.
+ // DEPRECATED: Use PlatformHooks instead. Removed in subprocess/v1 plugins.
+ Hooks Hooks `yaml:"hooks"`
+
+ // Downloaders field is used if the plugin supply downloader mechanism
+ // for special protocols.
+ Downloaders []Downloaders `yaml:"downloaders"`
+}
+
+func (m *MetadataLegacy) Validate() error {
+ if !validPluginName.MatchString(m.Name) {
+ return fmt.Errorf("invalid plugin name")
+ }
+ m.Usage = sanitizeString(m.Usage)
+
+ if len(m.PlatformCommand) > 0 && len(m.Command) > 0 {
+ return fmt.Errorf("both platformCommand and command are set")
+ }
+
+ if len(m.PlatformHooks) > 0 && len(m.Hooks) > 0 {
+ return fmt.Errorf("both platformHooks and hooks are set")
+ }
+
+ // Validate downloader plugins
+ for i, downloader := range m.Downloaders {
+ if downloader.Command == "" {
+ return fmt.Errorf("downloader %d has empty command", i)
+ }
+ if len(downloader.Protocols) == 0 {
+ return fmt.Errorf("downloader %d has no protocols", i)
+ }
+ for j, protocol := range downloader.Protocols {
+ if protocol == "" {
+ return fmt.Errorf("downloader %d has empty protocol at index %d", i, j)
+ }
+ }
+ }
+
+ return nil
+}
+
+// sanitizeString normalize spaces and removes non-printable characters.
+func sanitizeString(str string) string {
+ return strings.Map(func(r rune) rune {
+ if unicode.IsSpace(r) {
+ return ' '
+ }
+ if unicode.IsPrint(r) {
+ return r
+ }
+ return -1
+ }, str)
+}
diff --git a/internal/plugin/metadata_test.go b/internal/plugin/metadata_test.go
new file mode 100644
index 000000000..28bc4cf51
--- /dev/null
+++ b/internal/plugin/metadata_test.go
@@ -0,0 +1,120 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "strings"
+ "testing"
+)
+
+func TestValidatePluginData(t *testing.T) {
+
+ // A mock plugin with no commands
+ mockNoCommand := mockSubprocessCLIPlugin(t, "foo")
+ mockNoCommand.metadata.RuntimeConfig = &RuntimeConfigSubprocess{
+ PlatformCommand: []PlatformCommand{},
+ PlatformHooks: map[string][]PlatformCommand{},
+ }
+
+ // A mock plugin with legacy commands
+ mockLegacyCommand := mockSubprocessCLIPlugin(t, "foo")
+ mockLegacyCommand.metadata.RuntimeConfig = &RuntimeConfigSubprocess{
+ PlatformCommand: []PlatformCommand{
+ {
+ Command: "echo \"mock plugin\"",
+ },
+ },
+ PlatformHooks: map[string][]PlatformCommand{
+ Install: {
+ PlatformCommand{
+ Command: "echo installing...",
+ },
+ },
+ },
+ }
+
+ for i, item := range []struct {
+ pass bool
+ plug Plugin
+ errString string
+ }{
+ {true, mockSubprocessCLIPlugin(t, "abcdefghijklmnopqrstuvwxyz0123456789_-ABC"), ""},
+ {true, mockSubprocessCLIPlugin(t, "foo-bar-FOO-BAR_1234"), ""},
+ {false, mockSubprocessCLIPlugin(t, "foo -bar"), "invalid name"},
+ {false, mockSubprocessCLIPlugin(t, "$foo -bar"), "invalid name"}, // Test leading chars
+ {false, mockSubprocessCLIPlugin(t, "foo -bar "), "invalid name"}, // Test trailing chars
+ {false, mockSubprocessCLIPlugin(t, "foo\nbar"), "invalid name"}, // Test newline
+ {true, mockNoCommand, ""}, // Test no command metadata works
+ {true, mockLegacyCommand, ""}, // Test legacy command metadata works
+ } {
+ err := item.plug.Metadata().Validate()
+ if item.pass && err != nil {
+ t.Errorf("failed to validate case %d: %s", i, err)
+ } else if !item.pass && err == nil {
+ t.Errorf("expected case %d to fail", i)
+ }
+ if !item.pass && err.Error() != item.errString {
+ t.Errorf("index [%d]: expected the following error: %s, but got: %s", i, item.errString, err.Error())
+ }
+ }
+}
+
+func TestMetadataValidateMultipleErrors(t *testing.T) {
+ // Create metadata with multiple validation issues
+ metadata := Metadata{
+ Name: "invalid name with spaces", // Invalid name
+ APIVersion: "", // Empty API version
+ Type: "", // Empty type
+ Runtime: "", // Empty runtime
+ Config: nil, // Missing config
+ RuntimeConfig: nil, // Missing runtime config
+ }
+
+ err := metadata.Validate()
+ if err == nil {
+ t.Fatal("expected validation to fail with multiple errors")
+ }
+
+ errStr := err.Error()
+
+ // Check that all expected errors are present in the joined error
+ expectedErrors := []string{
+ "invalid name",
+ "empty APIVersion",
+ "empty type field",
+ "empty runtime field",
+ "missing config field",
+ "missing runtimeConfig field",
+ }
+
+ for _, expectedErr := range expectedErrors {
+ if !strings.Contains(errStr, expectedErr) {
+ t.Errorf("expected error to contain %q, but got: %v", expectedErr, errStr)
+ }
+ }
+
+ // Verify that the error contains the correct number of error messages
+ errorCount := 0
+ for _, expectedErr := range expectedErrors {
+ if strings.Contains(errStr, expectedErr) {
+ errorCount++
+ }
+ }
+
+ if errorCount < len(expectedErrors) {
+ t.Errorf("expected %d errors, but only found %d in: %v", len(expectedErrors), errorCount, errStr)
+ }
+}
diff --git a/internal/plugin/metadata_v1.go b/internal/plugin/metadata_v1.go
new file mode 100644
index 000000000..81dbc2e20
--- /dev/null
+++ b/internal/plugin/metadata_v1.go
@@ -0,0 +1,67 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "fmt"
+)
+
+// MetadataV1 is the APIVersion V1 plugin.yaml format
+type MetadataV1 struct {
+ // APIVersion specifies the plugin API version
+ APIVersion string `yaml:"apiVersion"`
+
+ // Name is the name of the plugin
+ Name string `yaml:"name"`
+
+ // Type of plugin (eg, cli/v1, getter/v1, postrenderer/v1)
+ Type string `yaml:"type"`
+
+ // Runtime specifies the runtime type (subprocess, wasm)
+ Runtime string `yaml:"runtime"`
+
+ // Version is a SemVer 2 version of the plugin.
+ Version string `yaml:"version"`
+
+ // SourceURL is the URL where this plugin can be found
+ SourceURL string `yaml:"sourceURL,omitempty"`
+
+ // Config contains the type-specific configuration for this plugin
+ Config map[string]any `yaml:"config"`
+
+ // RuntimeConfig contains the runtime-specific configuration
+ RuntimeConfig map[string]any `yaml:"runtimeConfig"`
+}
+
+func (m *MetadataV1) Validate() error {
+ if !validPluginName.MatchString(m.Name) {
+ return fmt.Errorf("invalid plugin `name`")
+ }
+
+ if m.APIVersion != "v1" {
+ return fmt.Errorf("invalid `apiVersion`: %q", m.APIVersion)
+ }
+
+ if m.Type == "" {
+ return fmt.Errorf("`type` missing")
+ }
+
+ if m.Runtime == "" {
+ return fmt.Errorf("`runtime` missing")
+ }
+
+ return nil
+}
diff --git a/internal/plugin/plugin.go b/internal/plugin/plugin.go
new file mode 100644
index 000000000..132b1739e
--- /dev/null
+++ b/internal/plugin/plugin.go
@@ -0,0 +1,81 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin // import "helm.sh/helm/v4/internal/plugin"
+
+import (
+ "context"
+ "io"
+ "regexp"
+)
+
+const PluginFileName = "plugin.yaml"
+
+// Plugin defines a plugin instance. The client (Helm codebase) facing type that can be used to introspect and invoke a plugin
+type Plugin interface {
+ // Dir return the plugin directory (as an absolute path) on the filesystem
+ Dir() string
+
+ // Metadata describes the plugin's type, version, etc.
+ // (This metadata type is the converted and plugin version independented in-memory representation of the plugin.yaml file)
+ Metadata() Metadata
+
+ // Invoke takes the given input, and dispatches the contents to plugin instance
+ // The input is expected to be a JSON-serializable object, which the plugin will interpret according to its type
+ // The plugin is expected to return a JSON-serializable object, which the invoker
+ // will interpret according to the plugin's type
+ //
+ // Invoke can be thought of as a request/response mechanism. Similar to e.g. http.RoundTripper
+ //
+ // If plugin's execution fails with a non-zero "return code" (this is plugin runtime implementation specific)
+ // an InvokeExecError is returned
+ Invoke(ctx context.Context, input *Input) (*Output, error)
+}
+
+// PluginHook allows plugins to implement hooks that are invoked on plugin management events (install, upgrade, etc)
+type PluginHook interface { //nolint:revive
+ InvokeHook(event string) error
+}
+
+// Input defines the input message and parameters to be passed to the plugin
+type Input struct {
+ // Message represents the type-elided value to be passed to the plugin.
+ // The plugin is expected to interpret the message according to its type
+ // The message object must be JSON-serializable
+ Message any
+
+ // Optional: Reader to be consumed plugin's "stdin"
+ Stdin io.Reader
+
+ // Optional: Writers to consume the plugin's "stdout" and "stderr"
+ Stdout, Stderr io.Writer
+
+ // Optional: Env represents the environment as a list of "key=value" strings
+ // see os.Environ
+ Env []string
+}
+
+// Output defines the output message and parameters the passed from the plugin
+type Output struct {
+ // Message represents the type-elided value returned from the plugin
+ // The invoker is expected to interpret the message according to the plugin's type
+ // The message object must be JSON-serializable
+ Message any
+}
+
+// validPluginName is a regular expression that validates plugin names.
+//
+// Plugin names can only contain the ASCII characters a-z, A-Z, 0-9, _ and -.
+var validPluginName = regexp.MustCompile("^[A-Za-z0-9_-]+$")
diff --git a/internal/plugin/plugin_test.go b/internal/plugin/plugin_test.go
new file mode 100644
index 000000000..b6c2245ff
--- /dev/null
+++ b/internal/plugin/plugin_test.go
@@ -0,0 +1,62 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "testing"
+
+ "helm.sh/helm/v4/internal/plugin/schema"
+)
+
+func mockSubprocessCLIPlugin(t *testing.T, pluginName string) *SubprocessPluginRuntime {
+ t.Helper()
+
+ rc := RuntimeConfigSubprocess{
+ PlatformCommand: []PlatformCommand{
+ {OperatingSystem: "darwin", Architecture: "", Command: "sh", Args: []string{"-c", "echo \"mock plugin\""}},
+ {OperatingSystem: "linux", Architecture: "", Command: "sh", Args: []string{"-c", "echo \"mock plugin\""}},
+ {OperatingSystem: "windows", Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"mock plugin\""}},
+ },
+ PlatformHooks: map[string][]PlatformCommand{
+ Install: {
+ {OperatingSystem: "darwin", Architecture: "", Command: "sh", Args: []string{"-c", "echo \"installing...\""}},
+ {OperatingSystem: "linux", Architecture: "", Command: "sh", Args: []string{"-c", "echo \"installing...\""}},
+ {OperatingSystem: "windows", Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"installing...\""}},
+ },
+ },
+ }
+
+ pluginDir := t.TempDir()
+
+ return &SubprocessPluginRuntime{
+ metadata: Metadata{
+ Name: pluginName,
+ Version: "v0.1.2",
+ Type: "cli/v1",
+ APIVersion: "v1",
+ Runtime: "subprocess",
+ Config: &schema.ConfigCLIV1{
+ Usage: "Mock plugin",
+ ShortHelp: "Mock plugin",
+ LongHelp: "Mock plugin for testing",
+ IgnoreFlags: false,
+ },
+ RuntimeConfig: &rc,
+ },
+ pluginDir: pluginDir, // NOTE: dir is empty (ie. plugin.yaml is not present)
+ RuntimeConfig: rc,
+ }
+}
diff --git a/internal/plugin/plugin_type_registry.go b/internal/plugin/plugin_type_registry.go
new file mode 100644
index 000000000..da6546c47
--- /dev/null
+++ b/internal/plugin/plugin_type_registry.go
@@ -0,0 +1,106 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+This file contains a "registry" of supported plugin types.
+
+It enables "dyanmic" operations on the go type associated with a given plugin type (see: `helm.sh/helm/v4/internal/plugin/schema` package)
+
+Examples:
+
+```
+
+ // Create a new instance of the output message type for a given plugin type:
+
+ pluginType := "cli/v1" // for example
+ ptm, ok := pluginTypesIndex[pluginType]
+ if !ok {
+ return fmt.Errorf("unknown plugin type %q", pluginType)
+ }
+
+ outputMessageType := reflect.Zero(ptm.outputType).Interface()
+
+```
+
+```
+// Create a new instance of the config type for a given plugin type
+
+ pluginType := "cli/v1" // for example
+ ptm, ok := pluginTypesIndex[pluginType]
+ if !ok {
+ return nil
+ }
+
+ config := reflect.New(ptm.configType).Interface().(Config) // `config` is variable of type `Config`, with
+
+ // validate
+ err := config.Validate()
+ if err != nil { // handle error }
+
+ // assert to concrete type if needed
+ cliConfig := config.(*schema.ConfigCLIV1)
+
+```
+*/
+
+package plugin
+
+import (
+ "reflect"
+
+ "helm.sh/helm/v4/internal/plugin/schema"
+)
+
+type pluginTypeMeta struct {
+ pluginType string
+ inputType reflect.Type
+ outputType reflect.Type
+ configType reflect.Type
+}
+
+var pluginTypes = []pluginTypeMeta{
+ {
+ pluginType: "test/v1",
+ inputType: reflect.TypeOf(schema.InputMessageTestV1{}),
+ outputType: reflect.TypeOf(schema.OutputMessageTestV1{}),
+ configType: reflect.TypeOf(schema.ConfigTestV1{}),
+ },
+ {
+ pluginType: "cli/v1",
+ inputType: reflect.TypeOf(schema.InputMessageCLIV1{}),
+ outputType: reflect.TypeOf(schema.OutputMessageCLIV1{}),
+ configType: reflect.TypeOf(schema.ConfigCLIV1{}),
+ },
+ {
+ pluginType: "getter/v1",
+ inputType: reflect.TypeOf(schema.InputMessageGetterV1{}),
+ outputType: reflect.TypeOf(schema.OutputMessageGetterV1{}),
+ configType: reflect.TypeOf(schema.ConfigGetterV1{}),
+ },
+ {
+ pluginType: "postrenderer/v1",
+ inputType: reflect.TypeOf(schema.InputMessagePostRendererV1{}),
+ outputType: reflect.TypeOf(schema.OutputMessagePostRendererV1{}),
+ configType: reflect.TypeOf(schema.ConfigPostRendererV1{}),
+ },
+}
+
+var pluginTypesIndex = func() map[string]*pluginTypeMeta {
+ result := make(map[string]*pluginTypeMeta, len(pluginTypes))
+ for _, m := range pluginTypes {
+ result[m.pluginType] = &m
+ }
+ return result
+}()
diff --git a/internal/plugin/plugin_type_registry_test.go b/internal/plugin/plugin_type_registry_test.go
new file mode 100644
index 000000000..22f26262d
--- /dev/null
+++ b/internal/plugin/plugin_type_registry_test.go
@@ -0,0 +1,38 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "helm.sh/helm/v4/internal/plugin/schema"
+)
+
+func TestMakeOutputMessage(t *testing.T) {
+ ptm := pluginTypesIndex["getter/v1"]
+ outputType := reflect.Zero(ptm.outputType).Interface()
+ assert.IsType(t, schema.OutputMessageGetterV1{}, outputType)
+
+}
+
+func TestMakeConfig(t *testing.T) {
+ ptm := pluginTypesIndex["getter/v1"]
+ config := reflect.New(ptm.configType).Interface().(Config)
+ assert.IsType(t, &schema.ConfigGetterV1{}, config)
+}
diff --git a/internal/plugin/runtime.go b/internal/plugin/runtime.go
new file mode 100644
index 000000000..b2ff0b7ca
--- /dev/null
+++ b/internal/plugin/runtime.go
@@ -0,0 +1,84 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "fmt"
+ "strings"
+
+ "go.yaml.in/yaml/v3"
+)
+
+// Runtime represents a plugin runtime (subprocess, extism, etc) ie. how a plugin should be executed
+// Runtime is responsible for instantiating plugins that implement the runtime
+// TODO: could call this something more like "PluginRuntimeCreator"?
+type Runtime interface {
+ // CreatePlugin creates a plugin instance from the given metadata
+ CreatePlugin(pluginDir string, metadata *Metadata) (Plugin, error)
+
+ // TODO: move config unmarshalling to the runtime?
+ // UnmarshalConfig(runtimeConfigRaw map[string]any) (RuntimeConfig, error)
+}
+
+// RuntimeConfig represents the assertable type for a plugin's runtime configuration.
+// It is expected to type assert (cast) the a RuntimeConfig to its expected type
+type RuntimeConfig interface {
+ Validate() error
+}
+
+func remarshalRuntimeConfig[T RuntimeConfig](runtimeData map[string]any) (RuntimeConfig, error) {
+ data, err := yaml.Marshal(runtimeData)
+ if err != nil {
+ return nil, err
+ }
+
+ var config T
+ if err := yaml.Unmarshal(data, &config); err != nil {
+ return nil, err
+ }
+
+ return config, nil
+}
+
+// parseEnv takes a list of "KEY=value" environment variable strings
+// and transforms the result into a map[KEY]=value
+//
+// - empty input strings are ignored
+// - input strings with no value are stored as empty strings
+// - duplicate keys overwrite earlier values
+func parseEnv(env []string) map[string]string {
+ result := make(map[string]string, len(env))
+ for _, envVar := range env {
+ parts := strings.SplitN(envVar, "=", 2)
+ if len(parts) > 0 && parts[0] != "" {
+ key := parts[0]
+ var value string
+ if len(parts) > 1 {
+ value = parts[1]
+ }
+ result[key] = value
+ }
+ }
+ return result
+}
+
+func formatEnv(env map[string]string) []string {
+ result := make([]string, 0, len(env))
+ for key, value := range env {
+ result = append(result, fmt.Sprintf("%s=%s", key, value))
+ }
+ return result
+}
diff --git a/internal/plugin/runtime_extismv1.go b/internal/plugin/runtime_extismv1.go
new file mode 100644
index 000000000..b5cc79a6f
--- /dev/null
+++ b/internal/plugin/runtime_extismv1.go
@@ -0,0 +1,292 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "log/slog"
+ "os"
+ "path/filepath"
+ "reflect"
+
+ extism "github.com/extism/go-sdk"
+ "github.com/tetratelabs/wazero"
+)
+
+const ExtismV1WasmBinaryFilename = "plugin.wasm"
+
+// RuntimeConfigExtismV1Memory exposes the Wasm/Extism memory options for the plugin
+type RuntimeConfigExtismV1Memory struct {
+ // The max amount of pages the plugin can allocate
+ // One page is 64Kib. e.g. 16 pages would require 1MiB.
+ // Default is 4 pages (256KiB)
+ MaxPages uint32 `yaml:"maxPages,omitempty"`
+
+ // The max size of an Extism HTTP response in bytes
+ // Default is 4096 bytes (4KiB)
+ MaxHTTPResponseBytes int64 `yaml:"maxHttpResponseBytes,omitempty"`
+
+ // The max size of all Extism vars in bytes
+ // Default is 4096 bytes (4KiB)
+ MaxVarBytes int64 `yaml:"maxVarBytes,omitempty"`
+}
+
+// RuntimeConfigExtismV1FileSystem exposes filesystem options for the configuration
+// TODO: should Helm expose AllowedPaths?
+type RuntimeConfigExtismV1FileSystem struct {
+ // If specified, a temporary directory will be created and mapped to /tmp in the plugin's filesystem.
+ // Data written to the directory will be visible on the host filesystem.
+ // The directory will be removed when the plugin invocation completes.
+ CreateTempDir bool `yaml:"createTempDir,omitempty"`
+}
+
+// RuntimeConfigExtismV1 defines the user-configurable options the plugin's Extism runtime
+// The format loosely follows the Extism Manifest format: https://extism.org/docs/concepts/manifest/
+type RuntimeConfigExtismV1 struct {
+ // Describes the limits on the memory the plugin may be allocated.
+ Memory RuntimeConfigExtismV1Memory `yaml:"memory"`
+
+ // The "config" key is a free-form map that can be passed to the plugin.
+ // The plugin must interpret arbitrary data this map may contain
+ Config map[string]string `yaml:"config,omitempty"`
+
+ // An optional set of hosts this plugin can communicate with.
+ // This only has an effect if the plugin makes HTTP requests.
+ // If not specified, then no hosts are allowed.
+ AllowedHosts []string `yaml:"allowedHosts,omitempty"`
+
+ FileSystem RuntimeConfigExtismV1FileSystem `yaml:"fileSystem,omitempty"`
+
+ // The timeout in milliseconds for the plugin to execute
+ Timeout uint64 `yaml:"timeout,omitempty"`
+
+ // HostFunction names exposed in Helm the plugin may access
+ // see: https://extism.org/docs/concepts/host-functions/
+ HostFunctions []string `yaml:"hostFunctions,omitempty"`
+
+ // The name of entry function name to call in the plugin
+ // Defaults to "helm_plugin_main".
+ EntryFuncName string `yaml:"entryFuncName,omitempty"`
+}
+
+var _ RuntimeConfig = (*RuntimeConfigExtismV1)(nil)
+
+func (r *RuntimeConfigExtismV1) Validate() error {
+ // TODO
+ return nil
+}
+
+type RuntimeExtismV1 struct {
+ HostFunctions map[string]extism.HostFunction
+ CompilationCache wazero.CompilationCache
+}
+
+var _ Runtime = (*RuntimeExtismV1)(nil)
+
+func (r *RuntimeExtismV1) CreatePlugin(pluginDir string, metadata *Metadata) (Plugin, error) {
+
+ rc, ok := metadata.RuntimeConfig.(*RuntimeConfigExtismV1)
+ if !ok {
+ return nil, fmt.Errorf("invalid extism/v1 plugin runtime config type: %T", metadata.RuntimeConfig)
+ }
+
+ wasmFile := filepath.Join(pluginDir, ExtismV1WasmBinaryFilename)
+ if _, err := os.Stat(wasmFile); err != nil {
+ if os.IsNotExist(err) {
+ return nil, fmt.Errorf("wasm binary missing for extism/v1 plugin: %q", wasmFile)
+ }
+ return nil, fmt.Errorf("failed to stat extism/v1 plugin wasm binary %q: %w", wasmFile, err)
+ }
+
+ return &ExtismV1PluginRuntime{
+ metadata: *metadata,
+ dir: pluginDir,
+ rc: rc,
+ r: r,
+ }, nil
+}
+
+type ExtismV1PluginRuntime struct {
+ metadata Metadata
+ dir string
+ rc *RuntimeConfigExtismV1
+ r *RuntimeExtismV1
+}
+
+var _ Plugin = (*ExtismV1PluginRuntime)(nil)
+
+func (p *ExtismV1PluginRuntime) Metadata() Metadata {
+ return p.metadata
+}
+
+func (p *ExtismV1PluginRuntime) Dir() string {
+ return p.dir
+}
+
+func (p *ExtismV1PluginRuntime) Invoke(ctx context.Context, input *Input) (*Output, error) {
+
+ var tmpDir string
+ if p.rc.FileSystem.CreateTempDir {
+ tmpDirInner, err := os.MkdirTemp(os.TempDir(), "helm-plugin-*")
+ slog.Debug("created plugin temp dir", slog.String("dir", tmpDirInner), slog.String("plugin", p.metadata.Name))
+ if err != nil {
+ return nil, fmt.Errorf("failed to create temp dir for extism compilation cache: %w", err)
+ }
+ defer func() {
+ if err := os.RemoveAll(tmpDir); err != nil {
+ slog.Warn("failed to remove plugin temp dir", slog.String("dir", tmpDir), slog.String("plugin", p.metadata.Name), slog.String("error", err.Error()))
+ }
+ }()
+
+ tmpDir = tmpDirInner
+ }
+
+ manifest, err := buildManifest(p.dir, tmpDir, p.rc)
+ if err != nil {
+ return nil, err
+ }
+
+ config := buildPluginConfig(input, p.r)
+
+ hostFunctions, err := buildHostFunctions(p.r.HostFunctions, p.rc)
+ if err != nil {
+ return nil, err
+ }
+
+ pe, err := extism.NewPlugin(ctx, manifest, config, hostFunctions)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create existing plugin: %w", err)
+ }
+
+ pe.SetLogger(func(logLevel extism.LogLevel, s string) {
+ slog.Debug(s, slog.String("level", logLevel.String()), slog.String("plugin", p.metadata.Name))
+ })
+
+ inputData, err := json.Marshal(input.Message)
+ if err != nil {
+ return nil, fmt.Errorf("failed to json marshal plugin input message: %T: %w", input.Message, err)
+ }
+
+ slog.Debug("plugin input", slog.String("plugin", p.metadata.Name), slog.String("inputData", string(inputData)))
+
+ entryFuncName := p.rc.EntryFuncName
+ if entryFuncName == "" {
+ entryFuncName = "helm_plugin_main"
+ }
+
+ exitCode, outputData, err := pe.Call(entryFuncName, inputData)
+ if err != nil {
+ return nil, fmt.Errorf("plugin error: %w", err)
+ }
+
+ if exitCode != 0 {
+ return nil, &InvokeExecError{
+ ExitCode: int(exitCode),
+ }
+ }
+
+ slog.Debug("plugin output", slog.String("plugin", p.metadata.Name), slog.Int("exitCode", int(exitCode)), slog.String("outputData", string(outputData)))
+
+ outputMessage := reflect.New(pluginTypesIndex[p.metadata.Type].outputType)
+ if err := json.Unmarshal(outputData, outputMessage.Interface()); err != nil {
+ return nil, fmt.Errorf("failed to json marshal plugin output message: %T: %w", outputMessage, err)
+ }
+
+ output := &Output{
+ Message: outputMessage.Elem().Interface(),
+ }
+
+ return output, nil
+}
+
+func buildManifest(pluginDir string, tmpDir string, rc *RuntimeConfigExtismV1) (extism.Manifest, error) {
+ wasmFile := filepath.Join(pluginDir, ExtismV1WasmBinaryFilename)
+
+ allowedHosts := rc.AllowedHosts
+ if allowedHosts == nil {
+ allowedHosts = []string{}
+ }
+
+ allowedPaths := map[string]string{}
+ if tmpDir != "" {
+ allowedPaths[tmpDir] = "/tmp"
+ }
+
+ return extism.Manifest{
+ Wasm: []extism.Wasm{
+ extism.WasmFile{
+ Path: wasmFile,
+ Name: wasmFile,
+ },
+ },
+ Memory: &extism.ManifestMemory{
+ MaxPages: rc.Memory.MaxPages,
+ MaxHttpResponseBytes: rc.Memory.MaxHTTPResponseBytes,
+ MaxVarBytes: rc.Memory.MaxVarBytes,
+ },
+ Config: rc.Config,
+ AllowedHosts: allowedHosts,
+ AllowedPaths: allowedPaths,
+ Timeout: rc.Timeout,
+ }, nil
+}
+
+func buildPluginConfig(input *Input, r *RuntimeExtismV1) extism.PluginConfig {
+ mc := wazero.NewModuleConfig().
+ WithSysWalltime()
+ if input.Stdin != nil {
+ mc = mc.WithStdin(input.Stdin)
+ }
+ if input.Stdout != nil {
+ mc = mc.WithStdout(input.Stdout)
+ }
+ if input.Stderr != nil {
+ mc = mc.WithStderr(input.Stderr)
+ }
+ if len(input.Env) > 0 {
+ env := parseEnv(input.Env)
+ for k, v := range env {
+ mc = mc.WithEnv(k, v)
+ }
+ }
+
+ config := extism.PluginConfig{
+ ModuleConfig: mc,
+ RuntimeConfig: wazero.NewRuntimeConfigCompiler().
+ WithCloseOnContextDone(true).
+ WithCompilationCache(r.CompilationCache),
+ EnableWasi: true,
+ EnableHttpResponseHeaders: true,
+ }
+
+ return config
+}
+
+func buildHostFunctions(hostFunctions map[string]extism.HostFunction, rc *RuntimeConfigExtismV1) ([]extism.HostFunction, error) {
+ result := make([]extism.HostFunction, len(rc.HostFunctions))
+ for _, fnName := range rc.HostFunctions {
+ fn, ok := hostFunctions[fnName]
+ if !ok {
+ return nil, fmt.Errorf("plugin requested host function %q not found", fnName)
+ }
+
+ result = append(result, fn)
+ }
+
+ return result, nil
+}
diff --git a/internal/plugin/runtime_extismv1_test.go b/internal/plugin/runtime_extismv1_test.go
new file mode 100644
index 000000000..8d9c55195
--- /dev/null
+++ b/internal/plugin/runtime_extismv1_test.go
@@ -0,0 +1,124 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "os"
+ "os/exec"
+ "path/filepath"
+ "testing"
+
+ extism "github.com/extism/go-sdk"
+
+ "helm.sh/helm/v4/internal/plugin/schema"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+type pluginRaw struct {
+ Metadata Metadata
+ Dir string
+}
+
+func buildLoadExtismPlugin(t *testing.T, dir string) pluginRaw {
+ t.Helper()
+
+ pluginFile := filepath.Join(dir, PluginFileName)
+
+ metadataData, err := os.ReadFile(pluginFile)
+ require.NoError(t, err)
+
+ m, err := loadMetadata(metadataData)
+ require.NoError(t, err)
+ require.Equal(t, "extism/v1", m.Runtime, "expected plugin runtime to be extism/v1")
+
+ cmd := exec.Command("make", "-C", dir)
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ require.NoError(t, cmd.Run(), "failed to build plugin in %q", dir)
+
+ return pluginRaw{
+ Metadata: *m,
+ Dir: dir,
+ }
+}
+
+func TestRuntimeConfigExtismV1Validate(t *testing.T) {
+ rc := RuntimeConfigExtismV1{}
+ err := rc.Validate()
+ assert.NoError(t, err, "expected no error for empty RuntimeConfigExtismV1")
+}
+
+func TestRuntimeExtismV1InvokePlugin(t *testing.T) {
+ r := RuntimeExtismV1{}
+
+ pr := buildLoadExtismPlugin(t, "testdata/src/extismv1-test")
+ require.Equal(t, "test/v1", pr.Metadata.Type)
+
+ p, err := r.CreatePlugin(pr.Dir, &pr.Metadata)
+
+ assert.NoError(t, err, "expected no error creating plugin")
+ assert.NotNil(t, p, "expected plugin to be created")
+
+ output, err := p.Invoke(t.Context(), &Input{
+ Message: schema.InputMessageTestV1{
+ Name: "Phippy",
+ },
+ })
+ require.Nil(t, err)
+
+ msg := output.Message.(schema.OutputMessageTestV1)
+ assert.Equal(t, "Hello, Phippy! (6)", msg.Greeting)
+}
+
+func TestBuildManifest(t *testing.T) {
+ rc := &RuntimeConfigExtismV1{
+ Memory: RuntimeConfigExtismV1Memory{
+ MaxPages: 8,
+ MaxHTTPResponseBytes: 81920,
+ MaxVarBytes: 8192,
+ },
+ FileSystem: RuntimeConfigExtismV1FileSystem{
+ CreateTempDir: true,
+ },
+ Config: map[string]string{"CONFIG_KEY": "config_value"},
+ AllowedHosts: []string{"example.com", "api.example.com"},
+ Timeout: 5000,
+ }
+
+ expected := extism.Manifest{
+ Wasm: []extism.Wasm{
+ extism.WasmFile{
+ Path: "/path/to/plugin/plugin.wasm",
+ Name: "/path/to/plugin/plugin.wasm",
+ },
+ },
+ Memory: &extism.ManifestMemory{
+ MaxPages: 8,
+ MaxHttpResponseBytes: 81920,
+ MaxVarBytes: 8192,
+ },
+ Config: map[string]string{"CONFIG_KEY": "config_value"},
+ AllowedHosts: []string{"example.com", "api.example.com"},
+ AllowedPaths: map[string]string{"/tmp/foo": "/tmp"},
+ Timeout: 5000,
+ }
+
+ manifest, err := buildManifest("/path/to/plugin", "/tmp/foo", rc)
+ require.NoError(t, err)
+ assert.Equal(t, expected, manifest)
+}
diff --git a/internal/plugin/runtime_subprocess.go b/internal/plugin/runtime_subprocess.go
new file mode 100644
index 000000000..5e6676a00
--- /dev/null
+++ b/internal/plugin/runtime_subprocess.go
@@ -0,0 +1,278 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "log/slog"
+ "maps"
+ "os"
+ "os/exec"
+ "slices"
+
+ "helm.sh/helm/v4/internal/plugin/schema"
+)
+
+// SubprocessProtocolCommand maps a given protocol to the getter command used to retrieve artifacts for that protcol
+type SubprocessProtocolCommand struct {
+ // Protocols are the list of schemes from the charts URL.
+ Protocols []string `yaml:"protocols"`
+ // PlatformCommand is the platform based command which the plugin performs
+ // to download for the corresponding getter Protocols.
+ PlatformCommand []PlatformCommand `yaml:"platformCommand"`
+}
+
+// RuntimeConfigSubprocess implements RuntimeConfig for RuntimeSubprocess
+type RuntimeConfigSubprocess struct {
+ // PlatformCommand is a list containing a plugin command, with a platform selector and support for args.
+ PlatformCommand []PlatformCommand `yaml:"platformCommand"`
+ // PlatformHooks are commands that will run on plugin events, with a platform selector and support for args.
+ PlatformHooks PlatformHooks `yaml:"platformHooks"`
+ // ProtocolCommands allows the plugin to specify protocol specific commands
+ //
+ // Obsolete/deprecated: This is a compatibility hangover from the old plugin downloader mechanism, which was extended
+ // to support multiple protocols in a given plugin. The command supplied in PlatformCommand should implement protocol
+ // specific logic by inspecting the download URL
+ ProtocolCommands []SubprocessProtocolCommand `yaml:"protocolCommands,omitempty"`
+
+ expandHookArgs bool
+}
+
+var _ RuntimeConfig = (*RuntimeConfigSubprocess)(nil)
+
+func (r *RuntimeConfigSubprocess) GetType() string { return "subprocess" }
+
+func (r *RuntimeConfigSubprocess) Validate() error {
+ return nil
+}
+
+type RuntimeSubprocess struct {
+ EnvVars map[string]string
+}
+
+var _ Runtime = (*RuntimeSubprocess)(nil)
+
+// CreatePlugin implementation for Runtime
+func (r *RuntimeSubprocess) CreatePlugin(pluginDir string, metadata *Metadata) (Plugin, error) {
+ return &SubprocessPluginRuntime{
+ metadata: *metadata,
+ pluginDir: pluginDir,
+ RuntimeConfig: *(metadata.RuntimeConfig.(*RuntimeConfigSubprocess)),
+ EnvVars: maps.Clone(r.EnvVars),
+ }, nil
+}
+
+// SubprocessPluginRuntime implements the Plugin interface for subprocess execution
+type SubprocessPluginRuntime struct {
+ metadata Metadata
+ pluginDir string
+ RuntimeConfig RuntimeConfigSubprocess
+ EnvVars map[string]string
+}
+
+var _ Plugin = (*SubprocessPluginRuntime)(nil)
+
+func (r *SubprocessPluginRuntime) Dir() string {
+ return r.pluginDir
+}
+
+func (r *SubprocessPluginRuntime) Metadata() Metadata {
+ return r.metadata
+}
+
+func (r *SubprocessPluginRuntime) Invoke(_ context.Context, input *Input) (*Output, error) {
+ switch input.Message.(type) {
+ case schema.InputMessageCLIV1:
+ return r.runCLI(input)
+ case schema.InputMessageGetterV1:
+ return r.runGetter(input)
+ case schema.InputMessagePostRendererV1:
+ return r.runPostrenderer(input)
+ default:
+ return nil, fmt.Errorf("unsupported subprocess plugin type %q", r.metadata.Type)
+ }
+}
+
+// InvokeWithEnv executes a plugin command with custom environment and I/O streams
+// This method allows execution with different command/args than the plugin's default
+func (r *SubprocessPluginRuntime) InvokeWithEnv(main string, argv []string, env []string, stdin io.Reader, stdout, stderr io.Writer) error {
+ mainCmdExp := os.ExpandEnv(main)
+ cmd := exec.Command(mainCmdExp, argv...)
+ cmd.Env = slices.Clone(os.Environ())
+ cmd.Env = append(
+ cmd.Env,
+ fmt.Sprintf("HELM_PLUGIN_NAME=%s", r.metadata.Name),
+ fmt.Sprintf("HELM_PLUGIN_DIR=%s", r.pluginDir))
+ cmd.Env = append(cmd.Env, env...)
+
+ cmd.Stdin = stdin
+ cmd.Stdout = stdout
+ cmd.Stderr = stderr
+
+ if err := executeCmd(cmd, r.metadata.Name); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *SubprocessPluginRuntime) InvokeHook(event string) error {
+ cmds := r.RuntimeConfig.PlatformHooks[event]
+
+ if len(cmds) == 0 {
+ return nil
+ }
+
+ env := parseEnv(os.Environ())
+ maps.Insert(env, maps.All(r.EnvVars))
+ env["HELM_PLUGIN_NAME"] = r.metadata.Name
+ env["HELM_PLUGIN_DIR"] = r.pluginDir
+
+ main, argv, err := PrepareCommands(cmds, r.RuntimeConfig.expandHookArgs, []string{}, env)
+ if err != nil {
+ return err
+ }
+
+ cmd := exec.Command(main, argv...)
+ cmd.Env = formatEnv(env)
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+
+ slog.Debug("executing plugin hook command", slog.String("pluginName", r.metadata.Name), slog.String("command", cmd.String()))
+ if err := cmd.Run(); err != nil {
+ if eerr, ok := err.(*exec.ExitError); ok {
+ os.Stderr.Write(eerr.Stderr)
+ return fmt.Errorf("plugin %s hook for %q exited with error", event, r.metadata.Name)
+ }
+ return err
+ }
+ return nil
+}
+
+// TODO decide the best way to handle this code
+// right now we implement status and error return in 3 slightly different ways in this file
+// then replace the other three with a call to this func
+func executeCmd(prog *exec.Cmd, pluginName string) error {
+ if err := prog.Run(); err != nil {
+ if eerr, ok := err.(*exec.ExitError); ok {
+ slog.Debug(
+ "plugin execution failed",
+ slog.String("pluginName", pluginName),
+ slog.String("error", err.Error()),
+ slog.Int("exitCode", eerr.ExitCode()),
+ slog.String("stderr", string(bytes.TrimSpace(eerr.Stderr))))
+ return &InvokeExecError{
+ Err: fmt.Errorf("plugin %q exited with error", pluginName),
+ ExitCode: eerr.ExitCode(),
+ }
+ }
+
+ return err
+ }
+
+ return nil
+}
+
+func (r *SubprocessPluginRuntime) runCLI(input *Input) (*Output, error) {
+ if _, ok := input.Message.(schema.InputMessageCLIV1); !ok {
+ return nil, fmt.Errorf("plugin %q input message does not implement InputMessageCLIV1", r.metadata.Name)
+ }
+
+ extraArgs := input.Message.(schema.InputMessageCLIV1).ExtraArgs
+
+ cmds := r.RuntimeConfig.PlatformCommand
+
+ env := parseEnv(os.Environ())
+ maps.Insert(env, maps.All(r.EnvVars))
+ maps.Insert(env, maps.All(parseEnv(input.Env)))
+ env["HELM_PLUGIN_NAME"] = r.metadata.Name
+ env["HELM_PLUGIN_DIR"] = r.pluginDir
+
+ command, args, err := PrepareCommands(cmds, true, extraArgs, env)
+ if err != nil {
+ return nil, fmt.Errorf("failed to prepare plugin command: %w", err)
+ }
+
+ cmd := exec.Command(command, args...)
+ cmd.Env = formatEnv(env)
+
+ cmd.Stdin = input.Stdin
+ cmd.Stdout = input.Stdout
+ cmd.Stderr = input.Stderr
+
+ slog.Debug("executing plugin command", slog.String("pluginName", r.metadata.Name), slog.String("command", cmd.String()))
+ if err := executeCmd(cmd, r.metadata.Name); err != nil {
+ return nil, err
+ }
+
+ return &Output{
+ Message: schema.OutputMessageCLIV1{},
+ }, nil
+}
+
+func (r *SubprocessPluginRuntime) runPostrenderer(input *Input) (*Output, error) {
+ if _, ok := input.Message.(schema.InputMessagePostRendererV1); !ok {
+ return nil, fmt.Errorf("plugin %q input message does not implement InputMessagePostRendererV1", r.metadata.Name)
+ }
+
+ env := parseEnv(os.Environ())
+ maps.Insert(env, maps.All(r.EnvVars))
+ maps.Insert(env, maps.All(parseEnv(input.Env)))
+ env["HELM_PLUGIN_NAME"] = r.metadata.Name
+ env["HELM_PLUGIN_DIR"] = r.pluginDir
+
+ msg := input.Message.(schema.InputMessagePostRendererV1)
+ cmds := r.RuntimeConfig.PlatformCommand
+ command, args, err := PrepareCommands(cmds, true, msg.ExtraArgs, env)
+ if err != nil {
+ return nil, fmt.Errorf("failed to prepare plugin command: %w", err)
+ }
+
+ cmd := exec.Command(
+ command,
+ args...)
+
+ stdin, err := cmd.StdinPipe()
+ if err != nil {
+ return nil, err
+ }
+
+ go func() {
+ defer stdin.Close()
+ io.Copy(stdin, msg.Manifests)
+ }()
+
+ postRendered := &bytes.Buffer{}
+ stderr := &bytes.Buffer{}
+
+ cmd.Env = formatEnv(env)
+ cmd.Stdout = postRendered
+ cmd.Stderr = stderr
+
+ slog.Debug("executing plugin command", slog.String("pluginName", r.metadata.Name), slog.String("command", cmd.String()))
+ if err := executeCmd(cmd, r.metadata.Name); err != nil {
+ return nil, err
+ }
+
+ return &Output{
+ Message: schema.OutputMessagePostRendererV1{
+ Manifests: postRendered,
+ },
+ }, nil
+}
diff --git a/internal/plugin/runtime_subprocess_getter.go b/internal/plugin/runtime_subprocess_getter.go
new file mode 100644
index 000000000..6a41b149f
--- /dev/null
+++ b/internal/plugin/runtime_subprocess_getter.go
@@ -0,0 +1,100 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "bytes"
+ "fmt"
+ "log/slog"
+ "maps"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "slices"
+
+ "helm.sh/helm/v4/internal/plugin/schema"
+)
+
+func getProtocolCommand(commands []SubprocessProtocolCommand, protocol string) *SubprocessProtocolCommand {
+ for _, c := range commands {
+ if slices.Contains(c.Protocols, protocol) {
+ return &c
+ }
+ }
+
+ return nil
+}
+
+// TODO can we replace a lot of this func with RuntimeSubprocess.invokeWithEnv?
+func (r *SubprocessPluginRuntime) runGetter(input *Input) (*Output, error) {
+ msg, ok := (input.Message).(schema.InputMessageGetterV1)
+ if !ok {
+ return nil, fmt.Errorf("expected input type schema.InputMessageGetterV1, got %T", input)
+ }
+
+ tmpDir, err := os.MkdirTemp(os.TempDir(), fmt.Sprintf("helm-plugin-%s-", r.metadata.Name))
+ if err != nil {
+ return nil, fmt.Errorf("failed to create temporary directory: %w", err)
+ }
+ defer os.RemoveAll(tmpDir)
+
+ d := getProtocolCommand(r.RuntimeConfig.ProtocolCommands, msg.Protocol)
+ if d == nil {
+ return nil, fmt.Errorf("no downloader found for protocol %q", msg.Protocol)
+ }
+
+ env := parseEnv(os.Environ())
+ maps.Insert(env, maps.All(r.EnvVars))
+ maps.Insert(env, maps.All(parseEnv(input.Env)))
+ env["HELM_PLUGIN_NAME"] = r.metadata.Name
+ env["HELM_PLUGIN_DIR"] = r.pluginDir
+ env["HELM_PLUGIN_USERNAME"] = msg.Options.Username
+ env["HELM_PLUGIN_PASSWORD"] = msg.Options.Password
+ env["HELM_PLUGIN_PASS_CREDENTIALS_ALL"] = fmt.Sprintf("%t", msg.Options.PassCredentialsAll)
+
+ command, args, err := PrepareCommands(d.PlatformCommand, false, []string{}, env)
+ if err != nil {
+ return nil, fmt.Errorf("failed to prepare commands for protocol %q: %w", msg.Protocol, err)
+ }
+
+ args = append(
+ args,
+ msg.Options.CertFile,
+ msg.Options.KeyFile,
+ msg.Options.CAFile,
+ msg.Href)
+
+ buf := bytes.Buffer{} // subprocess getters are expected to write content to stdout
+
+ pluginCommand := filepath.Join(r.pluginDir, command)
+ cmd := exec.Command(
+ pluginCommand,
+ args...)
+ cmd.Env = formatEnv(env)
+ cmd.Stdout = &buf
+ cmd.Stderr = os.Stderr
+
+ slog.Debug("executing plugin command", slog.String("pluginName", r.metadata.Name), slog.String("command", cmd.String()))
+ if err := executeCmd(cmd, r.metadata.Name); err != nil {
+ return nil, err
+ }
+
+ return &Output{
+ Message: schema.OutputMessageGetterV1{
+ Data: buf.Bytes(),
+ },
+ }, nil
+}
diff --git a/pkg/plugin/hooks.go b/internal/plugin/runtime_subprocess_hooks.go
similarity index 94%
rename from pkg/plugin/hooks.go
rename to internal/plugin/runtime_subprocess_hooks.go
index 10dc8580e..7b4ff5a38 100644
--- a/pkg/plugin/hooks.go
+++ b/internal/plugin/runtime_subprocess_hooks.go
@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package plugin // import "helm.sh/helm/v4/pkg/plugin"
+package plugin // import "helm.sh/helm/v4/internal/plugin"
// Types of hooks
const (
diff --git a/internal/plugin/runtime_subprocess_test.go b/internal/plugin/runtime_subprocess_test.go
new file mode 100644
index 000000000..243f4ad7c
--- /dev/null
+++ b/internal/plugin/runtime_subprocess_test.go
@@ -0,0 +1,84 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "go.yaml.in/yaml/v3"
+
+ "helm.sh/helm/v4/internal/plugin/schema"
+)
+
+func mockSubprocessCLIPluginErrorExit(t *testing.T, pluginName string, exitCode uint8) *SubprocessPluginRuntime {
+ t.Helper()
+
+ rc := RuntimeConfigSubprocess{
+ PlatformCommand: []PlatformCommand{
+ {Command: "sh", Args: []string{"-c", fmt.Sprintf("echo \"mock plugin $@\"; exit %d", exitCode)}},
+ },
+ }
+
+ pluginDir := t.TempDir()
+
+ md := Metadata{
+ Name: pluginName,
+ Version: "v0.1.2",
+ Type: "cli/v1",
+ APIVersion: "v1",
+ Runtime: "subprocess",
+ Config: &schema.ConfigCLIV1{
+ Usage: "Mock plugin",
+ ShortHelp: "Mock plugin",
+ LongHelp: "Mock plugin for testing",
+ IgnoreFlags: false,
+ },
+ RuntimeConfig: &rc,
+ }
+
+ data, err := yaml.Marshal(md)
+ require.NoError(t, err)
+ os.WriteFile(filepath.Join(pluginDir, "plugin.yaml"), data, 0o644)
+
+ return &SubprocessPluginRuntime{
+ metadata: md,
+ pluginDir: pluginDir,
+ RuntimeConfig: rc,
+ }
+}
+
+func TestSubprocessPluginRuntime(t *testing.T) {
+ p := mockSubprocessCLIPluginErrorExit(t, "foo", 56)
+
+ output, err := p.Invoke(t.Context(), &Input{
+ Message: schema.InputMessageCLIV1{
+ ExtraArgs: []string{"arg1", "arg2"},
+ //Env: []string{"FOO=bar"},
+ },
+ })
+
+ require.Error(t, err)
+ ieerr, ok := err.(*InvokeExecError)
+ require.True(t, ok, "expected InvokeExecError, got %T", err)
+ assert.Equal(t, 56, ieerr.ExitCode)
+
+ assert.Nil(t, output)
+}
diff --git a/internal/plugin/runtime_test.go b/internal/plugin/runtime_test.go
new file mode 100644
index 000000000..f8fe481c1
--- /dev/null
+++ b/internal/plugin/runtime_test.go
@@ -0,0 +1,100 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestParseEnv(t *testing.T) {
+ type testCase struct {
+ env []string
+ expected map[string]string
+ }
+
+ testCases := map[string]testCase{
+ "empty": {
+ env: []string{},
+ expected: map[string]string{},
+ },
+ "single": {
+ env: []string{"KEY=value"},
+ expected: map[string]string{"KEY": "value"},
+ },
+ "multiple": {
+ env: []string{"KEY1=value1", "KEY2=value2"},
+ expected: map[string]string{"KEY1": "value1", "KEY2": "value2"},
+ },
+ "no_value": {
+ env: []string{"KEY1=value1", "KEY2="},
+ expected: map[string]string{"KEY1": "value1", "KEY2": ""},
+ },
+ "duplicate_keys": {
+ env: []string{"KEY=value1", "KEY=value2"},
+ expected: map[string]string{"KEY": "value2"}, // last value should overwrite
+ },
+ "empty_strings": {
+ env: []string{"", "KEY=value", ""},
+ expected: map[string]string{"KEY": "value"},
+ },
+ }
+
+ for name, tc := range testCases {
+ t.Run(name, func(t *testing.T) {
+ result := parseEnv(tc.env)
+ assert.Equal(t, tc.expected, result)
+ })
+ }
+}
+
+func TestFormatEnv(t *testing.T) {
+ type testCase struct {
+ env map[string]string
+ expected []string
+ }
+
+ testCases := map[string]testCase{
+ "empty": {
+ env: map[string]string{},
+ expected: []string{},
+ },
+ "single": {
+ env: map[string]string{"KEY": "value"},
+ expected: []string{"KEY=value"},
+ },
+ "multiple": {
+ env: map[string]string{"KEY1": "value1", "KEY2": "value2"},
+ expected: []string{"KEY1=value1", "KEY2=value2"},
+ },
+ "empty_key": {
+ env: map[string]string{"": "value1", "KEY2": "value2"},
+ expected: []string{"=value1", "KEY2=value2"},
+ },
+ "empty_value": {
+ env: map[string]string{"KEY1": "value1", "KEY2": "", "KEY3": "value3"},
+ expected: []string{"KEY1=value1", "KEY2=", "KEY3=value3"},
+ },
+ }
+
+ for name, tc := range testCases {
+ t.Run(name, func(t *testing.T) {
+ result := formatEnv(tc.env)
+ assert.ElementsMatch(t, tc.expected, result)
+ })
+ }
+}
diff --git a/internal/plugin/schema/cli.go b/internal/plugin/schema/cli.go
new file mode 100644
index 000000000..702b27e45
--- /dev/null
+++ b/internal/plugin/schema/cli.go
@@ -0,0 +1,48 @@
+/*
+ Copyright The Helm Authors.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package schema
+
+import (
+ "bytes"
+
+ "helm.sh/helm/v4/pkg/cli"
+)
+
+type InputMessageCLIV1 struct {
+ ExtraArgs []string `json:"extraArgs"`
+ Settings *cli.EnvSettings `json:"settings"`
+}
+
+type OutputMessageCLIV1 struct {
+ Data *bytes.Buffer `json:"data"`
+}
+
+// ConfigCLIV1 represents the configuration for CLI plugins
+type ConfigCLIV1 struct {
+ // Usage is the single-line usage text shown in help
+ // For recommended syntax, see [spf13/cobra.command.Command] Use field comment:
+ // https://pkg.go.dev/github.com/spf13/cobra#Command
+ Usage string `yaml:"usage"`
+ // ShortHelp is the short description shown in the 'helm help' output
+ ShortHelp string `yaml:"shortHelp"`
+ // LongHelp is the long message shown in the 'helm help ' output
+ LongHelp string `yaml:"longHelp"`
+ // IgnoreFlags ignores any flags passed in from Helm
+ IgnoreFlags bool `yaml:"ignoreFlags"`
+}
+
+func (c *ConfigCLIV1) Validate() error {
+ // Config validation for CLI plugins
+ return nil
+}
diff --git a/internal/plugin/schema/doc.go b/internal/plugin/schema/doc.go
new file mode 100644
index 000000000..4b3fe5d49
--- /dev/null
+++ b/internal/plugin/schema/doc.go
@@ -0,0 +1,18 @@
+/*
+ Copyright The Helm Authors.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+/*
+
+ */
+
+package schema
diff --git a/internal/plugin/schema/getter.go b/internal/plugin/schema/getter.go
new file mode 100644
index 000000000..2c5e81df1
--- /dev/null
+++ b/internal/plugin/schema/getter.go
@@ -0,0 +1,66 @@
+/*
+ Copyright The Helm Authors.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package schema
+
+import (
+ "fmt"
+ "time"
+)
+
+// TODO: can we generate these plugin input/output messages?
+
+type GetterOptionsV1 struct {
+ URL string
+ CertFile string
+ KeyFile string
+ CAFile string
+ UNTar bool
+ InsecureSkipVerifyTLS bool
+ PlainHTTP bool
+ AcceptHeader string
+ Username string
+ Password string
+ PassCredentialsAll bool
+ UserAgent string
+ Version string
+ Timeout time.Duration
+}
+
+type InputMessageGetterV1 struct {
+ Href string `json:"href"`
+ Protocol string `json:"protocol"`
+ Options GetterOptionsV1 `json:"options"`
+}
+
+type OutputMessageGetterV1 struct {
+ Data []byte `json:"data"`
+}
+
+// ConfigGetterV1 represents the configuration for download plugins
+type ConfigGetterV1 struct {
+ // Protocols are the list of URL schemes supported by this downloader
+ Protocols []string `yaml:"protocols"`
+}
+
+func (c *ConfigGetterV1) Validate() error {
+ if len(c.Protocols) == 0 {
+ return fmt.Errorf("getter has no protocols")
+ }
+ for i, protocol := range c.Protocols {
+ if protocol == "" {
+ return fmt.Errorf("getter has empty protocol at index %d", i)
+ }
+ }
+ return nil
+}
diff --git a/internal/plugin/schema/postrenderer.go b/internal/plugin/schema/postrenderer.go
new file mode 100644
index 000000000..ef51a8a61
--- /dev/null
+++ b/internal/plugin/schema/postrenderer.go
@@ -0,0 +1,38 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package schema
+
+import (
+ "bytes"
+)
+
+// InputMessagePostRendererV1 implements Input.Message
+type InputMessagePostRendererV1 struct {
+ Manifests *bytes.Buffer `json:"manifests"`
+ // from CLI --post-renderer-args
+ ExtraArgs []string `json:"extraArgs"`
+}
+
+type OutputMessagePostRendererV1 struct {
+ Manifests *bytes.Buffer `json:"manifests"`
+}
+
+type ConfigPostRendererV1 struct{}
+
+func (c *ConfigPostRendererV1) Validate() error {
+ return nil
+}
diff --git a/internal/plugin/schema/test.go b/internal/plugin/schema/test.go
new file mode 100644
index 000000000..97efa0fde
--- /dev/null
+++ b/internal/plugin/schema/test.go
@@ -0,0 +1,28 @@
+/*
+ Copyright The Helm Authors.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package schema
+
+type InputMessageTestV1 struct {
+ Name string
+}
+
+type OutputMessageTestV1 struct {
+ Greeting string
+}
+
+type ConfigTestV1 struct{}
+
+func (c *ConfigTestV1) Validate() error {
+ return nil
+}
diff --git a/internal/plugin/sign.go b/internal/plugin/sign.go
new file mode 100644
index 000000000..6b8aafd3e
--- /dev/null
+++ b/internal/plugin/sign.go
@@ -0,0 +1,156 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+
+ "sigs.k8s.io/yaml"
+
+ "helm.sh/helm/v4/pkg/provenance"
+)
+
+// SignPlugin signs a plugin using the SHA256 hash of the tarball data.
+//
+// This is used when packaging and signing a plugin from tarball data.
+// It creates a signature that includes the tarball hash and plugin metadata,
+// allowing verification of the original tarball later.
+func SignPlugin(tarballData []byte, filename string, signer *provenance.Signatory) (string, error) {
+ // Extract plugin metadata from tarball data
+ pluginMeta, err := ExtractTgzPluginMetadata(bytes.NewReader(tarballData))
+ if err != nil {
+ return "", fmt.Errorf("failed to extract plugin metadata: %w", err)
+ }
+
+ // Marshal plugin metadata to YAML bytes
+ metadataBytes, err := yaml.Marshal(pluginMeta)
+ if err != nil {
+ return "", fmt.Errorf("failed to marshal plugin metadata: %w", err)
+ }
+
+ // Use the generic provenance signing function
+ return signer.ClearSign(tarballData, filename, metadataBytes)
+}
+
+// ExtractTgzPluginMetadata extracts plugin metadata from a gzipped tarball reader
+func ExtractTgzPluginMetadata(r io.Reader) (*Metadata, error) {
+ gzr, err := gzip.NewReader(r)
+ if err != nil {
+ return nil, err
+ }
+ defer gzr.Close()
+
+ tr := tar.NewReader(gzr)
+ for {
+ header, err := tr.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ // Look for plugin.yaml file
+ if filepath.Base(header.Name) == "plugin.yaml" {
+ data, err := io.ReadAll(tr)
+ if err != nil {
+ return nil, err
+ }
+
+ // Parse the plugin metadata
+ metadata, err := loadMetadata(data)
+ if err != nil {
+ return nil, err
+ }
+
+ return metadata, nil
+ }
+ }
+
+ return nil, errors.New("plugin.yaml not found in tarball")
+}
+
+// parsePluginMessageBlock parses a signed message block to extract plugin metadata and checksums
+func parsePluginMessageBlock(data []byte) (*Metadata, *provenance.SumCollection, error) {
+ sc := &provenance.SumCollection{}
+
+ // We only need the checksums for verification, not the full metadata
+ if err := provenance.ParseMessageBlock(data, nil, sc); err != nil {
+ return nil, sc, err
+ }
+ return nil, sc, nil
+}
+
+// CreatePluginTarball creates a gzipped tarball from a plugin directory
+func CreatePluginTarball(sourceDir, pluginName string, w io.Writer) error {
+ gzw := gzip.NewWriter(w)
+ defer gzw.Close()
+
+ tw := tar.NewWriter(gzw)
+ defer tw.Close()
+
+ // Use the plugin name as the base directory in the tarball
+ baseDir := pluginName
+
+ // Walk the directory tree
+ return filepath.Walk(sourceDir, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Create header
+ header, err := tar.FileInfoHeader(info, "")
+ if err != nil {
+ return err
+ }
+
+ // Update the name to be relative to the source directory
+ relPath, err := filepath.Rel(sourceDir, path)
+ if err != nil {
+ return err
+ }
+
+ // Include the base directory name in the tarball
+ header.Name = filepath.Join(baseDir, relPath)
+
+ // Write header
+ if err := tw.WriteHeader(header); err != nil {
+ return err
+ }
+
+ // If it's a regular file, write its content
+ if info.Mode().IsRegular() {
+ file, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+
+ if _, err := io.Copy(tw, file); err != nil {
+ return err
+ }
+ }
+
+ return nil
+ })
+}
diff --git a/internal/plugin/sign_test.go b/internal/plugin/sign_test.go
new file mode 100644
index 000000000..fce2dbeb3
--- /dev/null
+++ b/internal/plugin/sign_test.go
@@ -0,0 +1,98 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "helm.sh/helm/v4/pkg/provenance"
+)
+
+func TestSignPlugin(t *testing.T) {
+ // Create a test plugin directory
+ tempDir := t.TempDir()
+ pluginDir := filepath.Join(tempDir, "test-plugin")
+ if err := os.MkdirAll(pluginDir, 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create a plugin.yaml file
+ pluginYAML := `apiVersion: v1
+name: test-plugin
+type: cli/v1
+runtime: subprocess
+version: 1.0.0
+runtimeConfig:
+ platformCommand:
+ - command: echo`
+ if err := os.WriteFile(filepath.Join(pluginDir, "plugin.yaml"), []byte(pluginYAML), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create a tarball
+ tarballPath := filepath.Join(tempDir, "test-plugin.tgz")
+ tarFile, err := os.Create(tarballPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := CreatePluginTarball(pluginDir, "test-plugin", tarFile); err != nil {
+ tarFile.Close()
+ t.Fatal(err)
+ }
+ tarFile.Close()
+
+ // Create a test key for signing
+ keyring := "../../pkg/cmd/testdata/helm-test-key.secret"
+ signer, err := provenance.NewFromKeyring(keyring, "helm-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := signer.DecryptKey(func(_ string) ([]byte, error) {
+ return []byte(""), nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ // Read the tarball data
+ tarballData, err := os.ReadFile(tarballPath)
+ if err != nil {
+ t.Fatalf("failed to read tarball: %v", err)
+ }
+
+ // Sign the plugin tarball
+ sig, err := SignPlugin(tarballData, filepath.Base(tarballPath), signer)
+ if err != nil {
+ t.Fatalf("failed to sign plugin: %v", err)
+ }
+
+ // Verify the signature contains the expected content
+ if !strings.Contains(sig, "-----BEGIN PGP SIGNED MESSAGE-----") {
+ t.Error("signature does not contain PGP header")
+ }
+
+ // Verify the tarball hash is in the signature
+ expectedHash, err := provenance.DigestFile(tarballPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // The signature should contain the tarball hash
+ if !strings.Contains(sig, "sha256:"+expectedHash) {
+ t.Errorf("signature does not contain expected tarball hash: sha256:%s", expectedHash)
+ }
+}
diff --git a/internal/plugin/signing_info.go b/internal/plugin/signing_info.go
new file mode 100644
index 000000000..43d01c893
--- /dev/null
+++ b/internal/plugin/signing_info.go
@@ -0,0 +1,178 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "crypto/sha256"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "golang.org/x/crypto/openpgp/clearsign" //nolint
+
+ "helm.sh/helm/v4/pkg/helmpath"
+)
+
+// SigningInfo contains information about a plugin's signing status
+type SigningInfo struct {
+ // Status can be:
+ // - "local dev": Plugin is a symlink (development mode)
+ // - "unsigned": No provenance file found
+ // - "invalid provenance": Provenance file is malformed
+ // - "mismatched provenance": Provenance file does not match the installed tarball
+ // - "signed": Valid signature exists for the installed tarball
+ Status string
+ IsSigned bool // True if plugin has a valid signature (even if not verified against keyring)
+}
+
+// GetPluginSigningInfo returns signing information for an installed plugin
+func GetPluginSigningInfo(metadata Metadata) (*SigningInfo, error) {
+ pluginName := metadata.Name
+ pluginDir := helmpath.DataPath("plugins", pluginName)
+
+ // Check if plugin directory exists
+ fi, err := os.Lstat(pluginDir)
+ if err != nil {
+ return nil, fmt.Errorf("plugin %s not found: %w", pluginName, err)
+ }
+
+ // Check if it's a symlink (local development)
+ if fi.Mode()&os.ModeSymlink != 0 {
+ return &SigningInfo{
+ Status: "local dev",
+ IsSigned: false,
+ }, nil
+ }
+
+ // Find the exact tarball file for this plugin
+ pluginsDir := helmpath.DataPath("plugins")
+ tarballPath := filepath.Join(pluginsDir, fmt.Sprintf("%s-%s.tgz", metadata.Name, metadata.Version))
+ if _, err := os.Stat(tarballPath); err != nil {
+ return &SigningInfo{
+ Status: "unsigned",
+ IsSigned: false,
+ }, nil
+ }
+
+ // Check for .prov file associated with the tarball
+ provFile := tarballPath + ".prov"
+ provData, err := os.ReadFile(provFile)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return &SigningInfo{
+ Status: "unsigned",
+ IsSigned: false,
+ }, nil
+ }
+ return nil, fmt.Errorf("failed to read provenance file: %w", err)
+ }
+
+ // Parse the provenance file to check validity
+ block, _ := clearsign.Decode(provData)
+ if block == nil {
+ return &SigningInfo{
+ Status: "invalid provenance",
+ IsSigned: false,
+ }, nil
+ }
+
+ // Check if provenance matches the actual tarball
+ blockContent := string(block.Plaintext)
+ if !validateProvenanceHash(blockContent, tarballPath) {
+ return &SigningInfo{
+ Status: "mismatched provenance",
+ IsSigned: false,
+ }, nil
+ }
+
+ // We have a provenance file that is valid for this plugin
+ // Without a keyring, we can't verify the signature, but we know:
+ // 1. A .prov file exists
+ // 2. It's a valid clearsigned document (cryptographically signed)
+ // 3. The provenance contains valid checksums
+ return &SigningInfo{
+ Status: "signed",
+ IsSigned: true,
+ }, nil
+}
+
+func validateProvenanceHash(blockContent string, tarballPath string) bool {
+ // Parse provenance to get the expected hash
+ _, sums, err := parsePluginMessageBlock([]byte(blockContent))
+ if err != nil {
+ return false
+ }
+
+ // Must have file checksums
+ if len(sums.Files) == 0 {
+ return false
+ }
+
+ // Calculate actual hash of the tarball
+ actualHash, err := calculateFileHash(tarballPath)
+ if err != nil {
+ return false
+ }
+
+ // Check if the actual hash matches the expected hash in the provenance
+ for filename, expectedHash := range sums.Files {
+ if strings.Contains(filename, filepath.Base(tarballPath)) && expectedHash == actualHash {
+ return true
+ }
+ }
+
+ return false
+}
+
+// calculateFileHash calculates the SHA256 hash of a file
+func calculateFileHash(filePath string) (string, error) {
+ file, err := os.Open(filePath)
+ if err != nil {
+ return "", err
+ }
+ defer file.Close()
+
+ hasher := sha256.New()
+ if _, err := io.Copy(hasher, file); err != nil {
+ return "", err
+ }
+
+ return fmt.Sprintf("sha256:%x", hasher.Sum(nil)), nil
+}
+
+// GetSigningInfoForPlugins returns signing info for multiple plugins
+func GetSigningInfoForPlugins(plugins []Plugin) map[string]*SigningInfo {
+ result := make(map[string]*SigningInfo)
+
+ for _, p := range plugins {
+ m := p.Metadata()
+
+ info, err := GetPluginSigningInfo(m)
+ if err != nil {
+ // If there's an error, treat as unsigned
+ result[m.Name] = &SigningInfo{
+ Status: "unknown",
+ IsSigned: false,
+ }
+ } else {
+ result[m.Name] = info
+ }
+ }
+
+ return result
+}
diff --git a/internal/plugin/subprocess_commands.go b/internal/plugin/subprocess_commands.go
new file mode 100644
index 000000000..e21ec2bab
--- /dev/null
+++ b/internal/plugin/subprocess_commands.go
@@ -0,0 +1,113 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "fmt"
+ "os"
+ "runtime"
+ "strings"
+)
+
+// PlatformCommand represents a command for a particular operating system and architecture
+type PlatformCommand struct {
+ OperatingSystem string `yaml:"os"`
+ Architecture string `yaml:"arch"`
+ Command string `yaml:"command"`
+ Args []string `yaml:"args"`
+}
+
+// Returns command and args strings based on the following rules in priority order:
+// - From the PlatformCommand where OS and Arch match the current platform
+// - From the PlatformCommand where OS matches the current platform and Arch is empty/unspecified
+// - From the PlatformCommand where OS is empty/unspecified and Arch matches the current platform
+// - From the PlatformCommand where OS and Arch are both empty/unspecified
+// - Return nil, nil
+func getPlatformCommand(cmds []PlatformCommand) ([]string, []string) {
+ var command, args []string
+ found := false
+ foundOs := false
+
+ eq := strings.EqualFold
+ for _, c := range cmds {
+ if eq(c.OperatingSystem, runtime.GOOS) && eq(c.Architecture, runtime.GOARCH) {
+ // Return early for an exact match
+ return strings.Split(c.Command, " "), c.Args
+ }
+
+ if (len(c.OperatingSystem) > 0 && !eq(c.OperatingSystem, runtime.GOOS)) || len(c.Architecture) > 0 {
+ // Skip if OS is not empty and doesn't match or if arch is set as a set arch requires an OS match
+ continue
+ }
+
+ if !foundOs && len(c.OperatingSystem) > 0 && eq(c.OperatingSystem, runtime.GOOS) {
+ // First OS match with empty arch, can only be overridden by a direct match
+ command = strings.Split(c.Command, " ")
+ args = c.Args
+ found = true
+ foundOs = true
+ } else if !found {
+ // First empty match, can be overridden by a direct match or an OS match
+ command = strings.Split(c.Command, " ")
+ args = c.Args
+ found = true
+ }
+ }
+
+ return command, args
+}
+
+// PrepareCommands takes a []Plugin.PlatformCommand
+// and prepares the command and arguments for execution.
+//
+// It merges extraArgs into any arguments supplied in the plugin. It
+// returns the main command and an args array.
+//
+// The result is suitable to pass to exec.Command.
+func PrepareCommands(cmds []PlatformCommand, expandArgs bool, extraArgs []string, env map[string]string) (string, []string, error) {
+ cmdParts, args := getPlatformCommand(cmds)
+ if len(cmdParts) == 0 || cmdParts[0] == "" {
+ return "", nil, fmt.Errorf("no plugin command is applicable")
+ }
+
+ main := os.Expand(cmdParts[0], func(key string) string {
+ return env[key]
+ })
+ baseArgs := []string{}
+ if len(cmdParts) > 1 {
+ for _, cmdPart := range cmdParts[1:] {
+ if expandArgs {
+ baseArgs = append(baseArgs, os.ExpandEnv(cmdPart))
+ } else {
+ baseArgs = append(baseArgs, cmdPart)
+ }
+ }
+ }
+
+ for _, arg := range args {
+ if expandArgs {
+ baseArgs = append(baseArgs, os.ExpandEnv(arg))
+ } else {
+ baseArgs = append(baseArgs, arg)
+ }
+ }
+
+ if len(extraArgs) > 0 {
+ baseArgs = append(baseArgs, extraArgs...)
+ }
+
+ return main, baseArgs, nil
+}
diff --git a/internal/plugin/subprocess_commands_test.go b/internal/plugin/subprocess_commands_test.go
new file mode 100644
index 000000000..c1eba7a55
--- /dev/null
+++ b/internal/plugin/subprocess_commands_test.go
@@ -0,0 +1,268 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "reflect"
+ "runtime"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestPrepareCommand(t *testing.T) {
+ cmdMain := "sh"
+ cmdArgs := []string{"-c", "echo \"test\""}
+
+ platformCommand := []PlatformCommand{
+ {OperatingSystem: "no-os", Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
+ {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
+ {OperatingSystem: runtime.GOOS, Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
+ {OperatingSystem: runtime.GOOS, Architecture: runtime.GOARCH, Command: cmdMain, Args: cmdArgs},
+ }
+
+ env := map[string]string{}
+ cmd, args, err := PrepareCommands(platformCommand, true, []string{}, env)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if cmd != cmdMain {
+ t.Fatalf("Expected %q, got %q", cmdMain, cmd)
+ }
+ if !reflect.DeepEqual(args, cmdArgs) {
+ t.Fatalf("Expected %v, got %v", cmdArgs, args)
+ }
+}
+
+func TestPrepareCommandExtraArgs(t *testing.T) {
+
+ cmdMain := "sh"
+ cmdArgs := []string{"-c", "echo \"test\""}
+ platformCommand := []PlatformCommand{
+ {OperatingSystem: "no-os", Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
+ {OperatingSystem: runtime.GOOS, Architecture: runtime.GOARCH, Command: cmdMain, Args: cmdArgs},
+ {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
+ {OperatingSystem: runtime.GOOS, Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
+ }
+
+ extraArgs := []string{"--debug", "--foo", "bar"}
+
+ type testCaseExpected struct {
+ cmdMain string
+ args []string
+ }
+
+ testCases := map[string]struct {
+ ignoreFlags bool
+ expected testCaseExpected
+ }{
+ "ignoreFlags false": {
+ ignoreFlags: false,
+ expected: testCaseExpected{
+ cmdMain: cmdMain,
+ args: []string{"-c", "echo \"test\"", "--debug", "--foo", "bar"},
+ },
+ },
+ "ignoreFlags true": {
+ ignoreFlags: true,
+ expected: testCaseExpected{
+ cmdMain: cmdMain,
+ args: []string{"-c", "echo \"test\""},
+ },
+ },
+ }
+
+ for name, tc := range testCases {
+ t.Run(name, func(t *testing.T) {
+ // extra args are expected when ignoreFlags is unset or false
+ testExtraArgs := extraArgs
+ if tc.ignoreFlags {
+ testExtraArgs = []string{}
+ }
+
+ env := map[string]string{}
+ cmd, args, err := PrepareCommands(platformCommand, true, testExtraArgs, env)
+ if err != nil {
+ t.Fatal(err)
+ }
+ assert.Equal(t, tc.expected.cmdMain, cmd, "Expected command to match")
+ assert.Equal(t, tc.expected.args, args, "Expected args to match")
+ })
+ }
+}
+
+func TestPrepareCommands(t *testing.T) {
+ cmdMain := "sh"
+ cmdArgs := []string{"-c", "echo \"test\""}
+
+ cmds := []PlatformCommand{
+ {OperatingSystem: "no-os", Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
+ {OperatingSystem: runtime.GOOS, Architecture: runtime.GOARCH, Command: cmdMain, Args: cmdArgs},
+ {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
+ {OperatingSystem: runtime.GOOS, Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
+ }
+
+ env := map[string]string{}
+ cmd, args, err := PrepareCommands(cmds, true, []string{}, env)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if cmd != cmdMain {
+ t.Fatalf("Expected %q, got %q", cmdMain, cmd)
+ }
+ if !reflect.DeepEqual(args, cmdArgs) {
+ t.Fatalf("Expected %v, got %v", cmdArgs, args)
+ }
+}
+
+func TestPrepareCommandsExtraArgs(t *testing.T) {
+ cmdMain := "sh"
+ cmdArgs := []string{"-c", "echo \"test\""}
+ extraArgs := []string{"--debug", "--foo", "bar"}
+
+ cmds := []PlatformCommand{
+ {OperatingSystem: "no-os", Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
+ {OperatingSystem: runtime.GOOS, Architecture: runtime.GOARCH, Command: "sh", Args: []string{"-c", "echo \"test\""}},
+ {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
+ {OperatingSystem: runtime.GOOS, Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
+ }
+
+ expectedArgs := append(cmdArgs, extraArgs...)
+
+ env := map[string]string{}
+ cmd, args, err := PrepareCommands(cmds, true, extraArgs, env)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if cmd != cmdMain {
+ t.Fatalf("Expected %q, got %q", cmdMain, cmd)
+ }
+ if !reflect.DeepEqual(args, expectedArgs) {
+ t.Fatalf("Expected %v, got %v", expectedArgs, args)
+ }
+}
+
+func TestPrepareCommandsNoArch(t *testing.T) {
+ cmdMain := "sh"
+ cmdArgs := []string{"-c", "echo \"test\""}
+
+ cmds := []PlatformCommand{
+ {OperatingSystem: "no-os", Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
+ {OperatingSystem: runtime.GOOS, Architecture: "", Command: "sh", Args: []string{"-c", "echo \"test\""}},
+ {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
+ }
+
+ env := map[string]string{}
+ cmd, args, err := PrepareCommands(cmds, true, []string{}, env)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if cmd != cmdMain {
+ t.Fatalf("Expected %q, got %q", cmdMain, cmd)
+ }
+ if !reflect.DeepEqual(args, cmdArgs) {
+ t.Fatalf("Expected %v, got %v", cmdArgs, args)
+ }
+}
+
+func TestPrepareCommandsNoOsNoArch(t *testing.T) {
+ cmdMain := "sh"
+ cmdArgs := []string{"-c", "echo \"test\""}
+
+ cmds := []PlatformCommand{
+ {OperatingSystem: "no-os", Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
+ {OperatingSystem: "", Architecture: "", Command: "sh", Args: []string{"-c", "echo \"test\""}},
+ {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
+ }
+
+ env := map[string]string{}
+ cmd, args, err := PrepareCommands(cmds, true, []string{}, env)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if cmd != cmdMain {
+ t.Fatalf("Expected %q, got %q", cmdMain, cmd)
+ }
+ if !reflect.DeepEqual(args, cmdArgs) {
+ t.Fatalf("Expected %v, got %v", cmdArgs, args)
+ }
+}
+
+func TestPrepareCommandsNoMatch(t *testing.T) {
+ cmds := []PlatformCommand{
+ {OperatingSystem: "no-os", Architecture: "no-arch", Command: "sh", Args: []string{"-c", "echo \"test\""}},
+ {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "sh", Args: []string{"-c", "echo \"test\""}},
+ {OperatingSystem: "no-os", Architecture: runtime.GOARCH, Command: "sh", Args: []string{"-c", "echo \"test\""}},
+ }
+
+ env := map[string]string{}
+ if _, _, err := PrepareCommands(cmds, true, []string{}, env); err == nil {
+ t.Fatalf("Expected error to be returned")
+ }
+}
+
+func TestPrepareCommandsNoCommands(t *testing.T) {
+ cmds := []PlatformCommand{}
+
+ env := map[string]string{}
+ if _, _, err := PrepareCommands(cmds, true, []string{}, env); err == nil {
+ t.Fatalf("Expected error to be returned")
+ }
+}
+
+func TestPrepareCommandsExpand(t *testing.T) {
+ t.Setenv("TEST", "test")
+ cmdMain := "sh"
+ cmdArgs := []string{"-c", "echo \"${TEST}\""}
+ cmds := []PlatformCommand{
+ {OperatingSystem: "", Architecture: "", Command: cmdMain, Args: cmdArgs},
+ }
+
+ expectedArgs := []string{"-c", "echo \"test\""}
+
+ env := map[string]string{}
+ cmd, args, err := PrepareCommands(cmds, true, []string{}, env)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if cmd != cmdMain {
+ t.Fatalf("Expected %q, got %q", cmdMain, cmd)
+ }
+ if !reflect.DeepEqual(args, expectedArgs) {
+ t.Fatalf("Expected %v, got %v", expectedArgs, args)
+ }
+}
+
+func TestPrepareCommandsNoExpand(t *testing.T) {
+ t.Setenv("TEST", "test")
+ cmdMain := "sh"
+ cmdArgs := []string{"-c", "echo \"${TEST}\""}
+ cmds := []PlatformCommand{
+ {OperatingSystem: "", Architecture: "", Command: cmdMain, Args: cmdArgs},
+ }
+
+ env := map[string]string{}
+ cmd, args, err := PrepareCommands(cmds, false, []string{}, env)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if cmd != cmdMain {
+ t.Fatalf("Expected %q, got %q", cmdMain, cmd)
+ }
+ if !reflect.DeepEqual(args, cmdArgs) {
+ t.Fatalf("Expected %v, got %v", cmdArgs, args)
+ }
+}
diff --git a/pkg/plugin/testdata/plugdir/bad/duplicate-entries/plugin.yaml b/internal/plugin/testdata/plugdir/bad/duplicate-entries-legacy/plugin.yaml
similarity index 100%
rename from pkg/plugin/testdata/plugdir/bad/duplicate-entries/plugin.yaml
rename to internal/plugin/testdata/plugdir/bad/duplicate-entries-legacy/plugin.yaml
diff --git a/internal/plugin/testdata/plugdir/bad/duplicate-entries-v1/plugin.yaml b/internal/plugin/testdata/plugdir/bad/duplicate-entries-v1/plugin.yaml
new file mode 100644
index 000000000..344141121
--- /dev/null
+++ b/internal/plugin/testdata/plugdir/bad/duplicate-entries-v1/plugin.yaml
@@ -0,0 +1,19 @@
+name: "duplicate-entries"
+version: "0.1.0"
+type: cli/v1
+apiVersion: v1
+runtime: subprocess
+config:
+ shortHelp: "test duplicate entries"
+ longHelp: |-
+ description
+ ignoreFlags: true
+runtimeConfig:
+ platformCommand:
+ - command: "echo hello"
+ platformHooks:
+ install:
+ - command: "echo installing..."
+ platformHooks:
+ install:
+ - command: "echo installing something different"
diff --git a/pkg/plugin/testdata/plugdir/good/downloader/plugin.yaml b/internal/plugin/testdata/plugdir/good/downloader/plugin.yaml
similarity index 98%
rename from pkg/plugin/testdata/plugdir/good/downloader/plugin.yaml
rename to internal/plugin/testdata/plugdir/good/downloader/plugin.yaml
index c0b90379b..4e85f1f79 100644
--- a/pkg/plugin/testdata/plugdir/good/downloader/plugin.yaml
+++ b/internal/plugin/testdata/plugdir/good/downloader/plugin.yaml
@@ -1,3 +1,4 @@
+---
name: "downloader"
version: "1.2.3"
usage: "usage"
diff --git a/pkg/plugin/testdata/plugdir/good/echo/plugin.yaml b/internal/plugin/testdata/plugdir/good/echo-legacy/plugin.yaml
similarity index 85%
rename from pkg/plugin/testdata/plugdir/good/echo/plugin.yaml
rename to internal/plugin/testdata/plugdir/good/echo-legacy/plugin.yaml
index 8baa35b6d..ef84a4d8f 100644
--- a/pkg/plugin/testdata/plugdir/good/echo/plugin.yaml
+++ b/internal/plugin/testdata/plugdir/good/echo-legacy/plugin.yaml
@@ -1,4 +1,5 @@
-name: "echo"
+---
+name: "echo-legacy"
version: "1.2.3"
usage: "echo something"
description: |-
diff --git a/internal/plugin/testdata/plugdir/good/echo-v1/plugin.yaml b/internal/plugin/testdata/plugdir/good/echo-v1/plugin.yaml
new file mode 100644
index 000000000..8bbef9c0f
--- /dev/null
+++ b/internal/plugin/testdata/plugdir/good/echo-v1/plugin.yaml
@@ -0,0 +1,15 @@
+---
+name: "echo-v1"
+version: "1.2.3"
+type: cli/v1
+apiVersion: v1
+runtime: subprocess
+config:
+ shortHelp: "echo something"
+ longHelp: |-
+ This is a testing fixture.
+ ignoreFlags: false
+runtimeConfig:
+ command: "echo Hello"
+ hooks:
+ install: "echo Installing"
diff --git a/internal/plugin/testdata/plugdir/good/getter/plugin.yaml b/internal/plugin/testdata/plugdir/good/getter/plugin.yaml
new file mode 100644
index 000000000..7bdee9bde
--- /dev/null
+++ b/internal/plugin/testdata/plugdir/good/getter/plugin.yaml
@@ -0,0 +1,17 @@
+---
+name: "getter"
+version: "1.2.3"
+type: getter/v1
+apiVersion: v1
+runtime: subprocess
+config:
+ protocols:
+ - "myprotocol"
+ - "myprotocols"
+runtimeConfig:
+ protocolCommands:
+ - platformCommand:
+ - command: "echo getter"
+ protocols:
+ - "myprotocol"
+ - "myprotocols"
diff --git a/pkg/plugin/testdata/plugdir/good/hello/hello.ps1 b/internal/plugin/testdata/plugdir/good/hello-legacy/hello.ps1
similarity index 100%
rename from pkg/plugin/testdata/plugdir/good/hello/hello.ps1
rename to internal/plugin/testdata/plugdir/good/hello-legacy/hello.ps1
diff --git a/pkg/plugin/testdata/plugdir/good/hello/hello.sh b/internal/plugin/testdata/plugdir/good/hello-legacy/hello.sh
similarity index 100%
rename from pkg/plugin/testdata/plugdir/good/hello/hello.sh
rename to internal/plugin/testdata/plugdir/good/hello-legacy/hello.sh
diff --git a/pkg/plugin/testdata/plugdir/good/hello/plugin.yaml b/internal/plugin/testdata/plugdir/good/hello-legacy/plugin.yaml
similarity index 84%
rename from pkg/plugin/testdata/plugdir/good/hello/plugin.yaml
rename to internal/plugin/testdata/plugdir/good/hello-legacy/plugin.yaml
index 71dc88259..bf37e0626 100644
--- a/pkg/plugin/testdata/plugdir/good/hello/plugin.yaml
+++ b/internal/plugin/testdata/plugdir/good/hello-legacy/plugin.yaml
@@ -1,25 +1,22 @@
-name: "hello"
+---
+name: "hello-legacy"
version: "0.1.0"
-usage: "usage"
+usage: "echo hello message"
description: |-
description
platformCommand:
- os: linux
- arch:
command: "sh"
args: ["-c", "${HELM_PLUGIN_DIR}/hello.sh"]
- os: windows
- arch:
command: "pwsh"
args: ["-c", "${HELM_PLUGIN_DIR}/hello.ps1"]
ignoreFlags: true
platformHooks:
install:
- os: linux
- arch: ""
command: "sh"
args: ["-c", 'echo "installing..."']
- os: windows
- arch: ""
command: "pwsh"
args: ["-c", 'echo "installing..."']
diff --git a/internal/plugin/testdata/plugdir/good/hello-v1/hello.ps1 b/internal/plugin/testdata/plugdir/good/hello-v1/hello.ps1
new file mode 100644
index 000000000..bee61f27d
--- /dev/null
+++ b/internal/plugin/testdata/plugdir/good/hello-v1/hello.ps1
@@ -0,0 +1,3 @@
+#!/usr/bin/env pwsh
+
+Write-Host "Hello, world!"
diff --git a/internal/plugin/testdata/plugdir/good/hello-v1/hello.sh b/internal/plugin/testdata/plugdir/good/hello-v1/hello.sh
new file mode 100755
index 000000000..dcfd58876
--- /dev/null
+++ b/internal/plugin/testdata/plugdir/good/hello-v1/hello.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+echo "Hello from a Helm plugin"
+
+echo "PARAMS"
+echo $*
+
+$HELM_BIN ls --all
+
diff --git a/internal/plugin/testdata/plugdir/good/hello-v1/plugin.yaml b/internal/plugin/testdata/plugdir/good/hello-v1/plugin.yaml
new file mode 100644
index 000000000..044a3476d
--- /dev/null
+++ b/internal/plugin/testdata/plugdir/good/hello-v1/plugin.yaml
@@ -0,0 +1,32 @@
+---
+name: "hello-v1"
+version: "0.1.0"
+type: cli/v1
+apiVersion: v1
+runtime: subprocess
+config:
+ usage: hello [params]...
+ shortHelp: "echo hello message"
+ longHelp: |-
+ description
+ ignoreFlags: true
+runtimeConfig:
+ platformCommand:
+ - os: linux
+ arch:
+ command: "sh"
+ args: ["-c", "${HELM_PLUGIN_DIR}/hello.sh"]
+ - os: windows
+ arch:
+ command: "pwsh"
+ args: ["-c", "${HELM_PLUGIN_DIR}/hello.ps1"]
+ platformHooks:
+ install:
+ - os: linux
+ arch: ""
+ command: "sh"
+ args: ["-c", 'echo "installing..."']
+ - os: windows
+ arch: ""
+ command: "pwsh"
+ args: ["-c", 'echo "installing..."']
diff --git a/internal/plugin/testdata/plugdir/good/postrenderer-v1/plugin.yaml b/internal/plugin/testdata/plugdir/good/postrenderer-v1/plugin.yaml
new file mode 100644
index 000000000..30f1599b4
--- /dev/null
+++ b/internal/plugin/testdata/plugdir/good/postrenderer-v1/plugin.yaml
@@ -0,0 +1,8 @@
+name: "postrenderer-v1"
+version: "1.2.3"
+type: postrenderer/v1
+apiVersion: v1
+runtime: subprocess
+runtimeConfig:
+ platformCommand:
+ - command: "${HELM_PLUGIN_DIR}/sed-test.sh"
diff --git a/internal/plugin/testdata/plugdir/good/postrenderer-v1/sed-test.sh b/internal/plugin/testdata/plugdir/good/postrenderer-v1/sed-test.sh
new file mode 100755
index 000000000..a016e398f
--- /dev/null
+++ b/internal/plugin/testdata/plugdir/good/postrenderer-v1/sed-test.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+if [ $# -eq 0 ]; then
+ sed s/FOOTEST/BARTEST/g <&0
+else
+ sed s/FOOTEST/"$*"/g <&0
+fi
diff --git a/internal/plugin/testdata/src/extismv1-test/.gitignore b/internal/plugin/testdata/src/extismv1-test/.gitignore
new file mode 100644
index 000000000..ef7d91fbb
--- /dev/null
+++ b/internal/plugin/testdata/src/extismv1-test/.gitignore
@@ -0,0 +1 @@
+plugin.wasm
diff --git a/internal/plugin/testdata/src/extismv1-test/Makefile b/internal/plugin/testdata/src/extismv1-test/Makefile
new file mode 100644
index 000000000..24da1f371
--- /dev/null
+++ b/internal/plugin/testdata/src/extismv1-test/Makefile
@@ -0,0 +1,12 @@
+
+.DEFAULT: build
+.PHONY: build test vet
+
+.PHONY: plugin.wasm
+plugin.wasm:
+ GOOS=wasip1 GOARCH=wasm go build -buildmode=c-shared -o plugin.wasm .
+
+build: plugin.wasm
+
+vet:
+ GOOS=wasip1 GOARCH=wasm go vet ./...
diff --git a/internal/plugin/testdata/src/extismv1-test/go.mod b/internal/plugin/testdata/src/extismv1-test/go.mod
new file mode 100644
index 000000000..baed75fab
--- /dev/null
+++ b/internal/plugin/testdata/src/extismv1-test/go.mod
@@ -0,0 +1,5 @@
+module helm.sh/helm/v4/internal/plugin/src/extismv1-test
+
+go 1.25.0
+
+require github.com/extism/go-pdk v1.1.3
diff --git a/internal/plugin/testdata/src/extismv1-test/go.sum b/internal/plugin/testdata/src/extismv1-test/go.sum
new file mode 100644
index 000000000..c15d38292
--- /dev/null
+++ b/internal/plugin/testdata/src/extismv1-test/go.sum
@@ -0,0 +1,2 @@
+github.com/extism/go-pdk v1.1.3 h1:hfViMPWrqjN6u67cIYRALZTZLk/enSPpNKa+rZ9X2SQ=
+github.com/extism/go-pdk v1.1.3/go.mod h1:Gz+LIU/YCKnKXhgge8yo5Yu1F/lbv7KtKFkiCSzW/P4=
diff --git a/internal/plugin/testdata/src/extismv1-test/main.go b/internal/plugin/testdata/src/extismv1-test/main.go
new file mode 100644
index 000000000..31c739a5b
--- /dev/null
+++ b/internal/plugin/testdata/src/extismv1-test/main.go
@@ -0,0 +1,68 @@
+package main
+
+import (
+ _ "embed"
+ "fmt"
+ "os"
+
+ pdk "github.com/extism/go-pdk"
+)
+
+type InputMessageTestV1 struct {
+ Name string
+}
+
+type OutputMessageTestV1 struct {
+ Greeting string
+}
+
+type ConfigTestV1 struct{}
+
+func runGetterPluginImpl(input InputMessageTestV1) (*OutputMessageTestV1, error) {
+ name := input.Name
+
+ greeting := fmt.Sprintf("Hello, %s! (%d)", name, len(name))
+ err := os.WriteFile("/tmp/greeting.txt", []byte(greeting), 0o600)
+ if err != nil {
+ return nil, fmt.Errorf("failed to write temp file: %w", err)
+ }
+ return &OutputMessageTestV1{
+ Greeting: greeting,
+ }, nil
+}
+
+func RunGetterPlugin() error {
+ var input InputMessageTestV1
+ if err := pdk.InputJSON(&input); err != nil {
+ return fmt.Errorf("failed to parse input json: %w", err)
+ }
+
+ pdk.Log(pdk.LogDebug, fmt.Sprintf("Received input: %+v", input))
+ output, err := runGetterPluginImpl(input)
+ if err != nil {
+ pdk.Log(pdk.LogError, fmt.Sprintf("failed: %s", err.Error()))
+ return err
+ }
+
+ pdk.Log(pdk.LogDebug, fmt.Sprintf("Sending output: %+v", output))
+ if err := pdk.OutputJSON(output); err != nil {
+ return fmt.Errorf("failed to write output json: %w", err)
+ }
+
+ return nil
+}
+
+//go:wasmexport helm_plugin_main
+func HelmPlugin() uint32 {
+ pdk.Log(pdk.LogDebug, "running example-extism-getter plugin")
+
+ if err := RunGetterPlugin(); err != nil {
+ pdk.Log(pdk.LogError, err.Error())
+ pdk.SetError(err)
+ return 1
+ }
+
+ return 0
+}
+
+func main() {}
diff --git a/internal/plugin/testdata/src/extismv1-test/plugin.yaml b/internal/plugin/testdata/src/extismv1-test/plugin.yaml
new file mode 100644
index 000000000..fea1e3f66
--- /dev/null
+++ b/internal/plugin/testdata/src/extismv1-test/plugin.yaml
@@ -0,0 +1,9 @@
+---
+apiVersion: v1
+type: test/v1
+name: extismv1-test
+version: 0.1.0
+runtime: extism/v1
+runtimeConfig:
+ fileSystem:
+ createTempDir: true
\ No newline at end of file
diff --git a/internal/plugin/verify.go b/internal/plugin/verify.go
new file mode 100644
index 000000000..760a56e67
--- /dev/null
+++ b/internal/plugin/verify.go
@@ -0,0 +1,39 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "path/filepath"
+
+ "helm.sh/helm/v4/pkg/provenance"
+)
+
+// VerifyPlugin verifies plugin data against a signature using data in memory.
+func VerifyPlugin(archiveData, provData []byte, filename, keyring string) (*provenance.Verification, error) {
+ // Create signatory from keyring
+ sig, err := provenance.NewFromKeyring(keyring, "")
+ if err != nil {
+ return nil, err
+ }
+
+ // Use the new VerifyData method directly
+ return sig.Verify(archiveData, provData, filename)
+}
+
+// isTarball checks if a file has a tarball extension
+func IsTarball(filename string) bool {
+ return filepath.Ext(filename) == ".gz" || filepath.Ext(filename) == ".tgz"
+}
diff --git a/internal/plugin/verify_test.go b/internal/plugin/verify_test.go
new file mode 100644
index 000000000..9c907788f
--- /dev/null
+++ b/internal/plugin/verify_test.go
@@ -0,0 +1,214 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+
+ "helm.sh/helm/v4/pkg/provenance"
+)
+
+const testKeyFile = "../../pkg/cmd/testdata/helm-test-key.secret"
+const testPubFile = "../../pkg/cmd/testdata/helm-test-key.pub"
+
+const testPluginYAML = `apiVersion: v1
+name: test-plugin
+type: cli/v1
+runtime: subprocess
+version: 1.0.0
+runtimeConfig:
+ platformCommand:
+ - command: echo`
+
+func TestVerifyPlugin(t *testing.T) {
+ // Create a test plugin and sign it
+ tempDir := t.TempDir()
+
+ // Create plugin directory
+ pluginDir := filepath.Join(tempDir, "verify-test-plugin")
+ if err := os.MkdirAll(pluginDir, 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := os.WriteFile(filepath.Join(pluginDir, "plugin.yaml"), []byte(testPluginYAML), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create tarball
+ tarballPath := filepath.Join(tempDir, "verify-test-plugin.tar.gz")
+ tarFile, err := os.Create(tarballPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := CreatePluginTarball(pluginDir, "test-plugin", tarFile); err != nil {
+ tarFile.Close()
+ t.Fatal(err)
+ }
+ tarFile.Close()
+
+ // Sign the plugin with source directory
+ signer, err := provenance.NewFromKeyring(testKeyFile, "helm-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := signer.DecryptKey(func(_ string) ([]byte, error) {
+ return []byte(""), nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ // Read the tarball data
+ tarballData, err := os.ReadFile(tarballPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ sig, err := SignPlugin(tarballData, filepath.Base(tarballPath), signer)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Write the signature to .prov file
+ provFile := tarballPath + ".prov"
+ if err := os.WriteFile(provFile, []byte(sig), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ // Read the files for verification
+ archiveData, err := os.ReadFile(tarballPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ provData, err := os.ReadFile(provFile)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Now verify the plugin
+ verification, err := VerifyPlugin(archiveData, provData, filepath.Base(tarballPath), testPubFile)
+ if err != nil {
+ t.Fatalf("Failed to verify plugin: %v", err)
+ }
+
+ // Check verification results
+ if verification.SignedBy == nil {
+ t.Error("SignedBy is nil")
+ }
+
+ if verification.FileName != "verify-test-plugin.tar.gz" {
+ t.Errorf("Expected filename 'verify-test-plugin.tar.gz', got %s", verification.FileName)
+ }
+
+ if verification.FileHash == "" {
+ t.Error("FileHash is empty")
+ }
+}
+
+func TestVerifyPluginBadSignature(t *testing.T) {
+ tempDir := t.TempDir()
+
+ // Create a plugin tarball
+ pluginDir := filepath.Join(tempDir, "bad-plugin")
+ if err := os.MkdirAll(pluginDir, 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := os.WriteFile(filepath.Join(pluginDir, "plugin.yaml"), []byte(testPluginYAML), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ tarballPath := filepath.Join(tempDir, "bad-plugin.tar.gz")
+ tarFile, err := os.Create(tarballPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := CreatePluginTarball(pluginDir, "test-plugin", tarFile); err != nil {
+ tarFile.Close()
+ t.Fatal(err)
+ }
+ tarFile.Close()
+
+ // Create a bad signature (just some text)
+ badSig := `-----BEGIN PGP SIGNED MESSAGE-----
+Hash: SHA512
+
+This is not a real signature
+-----BEGIN PGP SIGNATURE-----
+
+InvalidSignatureData
+
+-----END PGP SIGNATURE-----`
+
+ provFile := tarballPath + ".prov"
+ if err := os.WriteFile(provFile, []byte(badSig), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ // Read the files
+ archiveData, err := os.ReadFile(tarballPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ provData, err := os.ReadFile(provFile)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Try to verify - should fail
+ _, err = VerifyPlugin(archiveData, provData, filepath.Base(tarballPath), testPubFile)
+ if err == nil {
+ t.Error("Expected verification to fail with bad signature")
+ }
+}
+
+func TestVerifyPluginMissingProvenance(t *testing.T) {
+ tempDir := t.TempDir()
+ tarballPath := filepath.Join(tempDir, "no-prov.tar.gz")
+
+ // Create a minimal tarball
+ if err := os.WriteFile(tarballPath, []byte("dummy"), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ // Read the tarball data
+ archiveData, err := os.ReadFile(tarballPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Try to verify with empty provenance data
+ _, err = VerifyPlugin(archiveData, nil, filepath.Base(tarballPath), testPubFile)
+ if err == nil {
+ t.Error("Expected verification to fail with empty provenance data")
+ }
+}
+
+func TestVerifyPluginMalformedData(t *testing.T) {
+ // Test with malformed tarball data - should fail
+ malformedData := []byte("not a tarball")
+ provData := []byte("fake provenance")
+
+ _, err := VerifyPlugin(malformedData, provData, "malformed.tar.gz", testPubFile)
+ if err == nil {
+ t.Error("Expected malformed data verification to fail, but it succeeded")
+ }
+}
diff --git a/internal/resolver/resolver.go b/internal/resolver/resolver.go
index 42c9de3b7..3efe94f10 100644
--- a/internal/resolver/resolver.go
+++ b/internal/resolver/resolver.go
@@ -18,21 +18,22 @@ package resolver
import (
"bytes"
"encoding/json"
+ "errors"
"fmt"
+ "io/fs"
"os"
"path/filepath"
"strings"
"time"
"github.com/Masterminds/semver/v3"
- "github.com/pkg/errors"
chart "helm.sh/helm/v4/pkg/chart/v2"
"helm.sh/helm/v4/pkg/chart/v2/loader"
"helm.sh/helm/v4/pkg/helmpath"
"helm.sh/helm/v4/pkg/provenance"
"helm.sh/helm/v4/pkg/registry"
- "helm.sh/helm/v4/pkg/repo"
+ "helm.sh/helm/v4/pkg/repo/v1"
)
// Resolver resolves dependencies from semantic version ranges to a particular version.
@@ -60,7 +61,7 @@ func (r *Resolver) Resolve(reqs []*chart.Dependency, repoNames map[string]string
for i, d := range reqs {
constraint, err := semver.NewConstraint(d.Version)
if err != nil {
- return nil, errors.Wrapf(err, "dependency %q has an invalid version/constraint format", d.Name)
+ return nil, fmt.Errorf("dependency %q has an invalid version/constraint format: %w", d.Name, err)
}
if d.Repository == "" {
@@ -124,12 +125,12 @@ func (r *Resolver) Resolve(reqs []*chart.Dependency, repoNames map[string]string
if !registry.IsOCI(d.Repository) {
repoIndex, err := repo.LoadIndexFile(filepath.Join(r.cachepath, helmpath.CacheIndexFile(repoName)))
if err != nil {
- return nil, errors.Wrapf(err, "no cached repository for %s found. (try 'helm repo update')", repoName)
+ return nil, fmt.Errorf("no cached repository for %s found. (try 'helm repo update'): %w", repoName, err)
}
vs, ok = repoIndex.Entries[d.Name]
if !ok {
- return nil, errors.Errorf("%s chart not found in repo %s", d.Name, d.Repository)
+ return nil, fmt.Errorf("%s chart not found in repo %s", d.Name, d.Repository)
}
found = false
} else {
@@ -151,7 +152,7 @@ func (r *Resolver) Resolve(reqs []*chart.Dependency, repoNames map[string]string
ref := fmt.Sprintf("%s/%s", strings.TrimPrefix(d.Repository, fmt.Sprintf("%s://", registry.OCIScheme)), d.Name)
tags, err := r.registryClient.Tags(ref)
if err != nil {
- return nil, errors.Wrapf(err, "could not retrieve list of tags for repository %s", d.Repository)
+ return nil, fmt.Errorf("could not retrieve list of tags for repository %s: %w", d.Repository, err)
}
vs = make(repo.ChartVersions, len(tags))
@@ -192,7 +193,7 @@ func (r *Resolver) Resolve(reqs []*chart.Dependency, repoNames map[string]string
}
}
if len(missing) > 0 {
- return nil, errors.Errorf("can't get a valid version for %d subchart(s): %s. Make sure a matching chart version exists in the repo, or change the version constraint in Chart.yaml", len(missing), strings.Join(missing, ", "))
+ return nil, fmt.Errorf("can't get a valid version for %d subchart(s): %s. Make sure a matching chart version exists in the repo, or change the version constraint in Chart.yaml", len(missing), strings.Join(missing, ", "))
}
digest, err := HashReq(reqs, locked)
@@ -252,8 +253,8 @@ func GetLocalPath(repo, chartpath string) (string, error) {
depPath = filepath.Join(chartpath, p)
}
- if _, err = os.Stat(depPath); os.IsNotExist(err) {
- return "", errors.Errorf("directory %s not found", depPath)
+ if _, err = os.Stat(depPath); errors.Is(err, fs.ErrNotExist) {
+ return "", fmt.Errorf("directory %s not found", depPath)
} else if err != nil {
return "", err
}
diff --git a/internal/statusreaders/job_status_reader.go b/internal/statusreaders/job_status_reader.go
new file mode 100644
index 000000000..3cd9ac7ac
--- /dev/null
+++ b/internal/statusreaders/job_status_reader.go
@@ -0,0 +1,121 @@
+/*
+Copyright The Helm Authors.
+This file was initially copied and modified from
+ https://github.com/fluxcd/kustomize-controller/blob/main/internal/statusreaders/job.go
+Copyright 2022 The Flux authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package statusreaders
+
+import (
+ "context"
+ "fmt"
+
+ batchv1 "k8s.io/api/batch/v1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ "github.com/fluxcd/cli-utils/pkg/kstatus/polling/engine"
+ "github.com/fluxcd/cli-utils/pkg/kstatus/polling/event"
+ "github.com/fluxcd/cli-utils/pkg/kstatus/polling/statusreaders"
+ "github.com/fluxcd/cli-utils/pkg/kstatus/status"
+ "github.com/fluxcd/cli-utils/pkg/object"
+)
+
+type customJobStatusReader struct {
+ genericStatusReader engine.StatusReader
+}
+
+func NewCustomJobStatusReader(mapper meta.RESTMapper) engine.StatusReader {
+ genericStatusReader := statusreaders.NewGenericStatusReader(mapper, jobConditions)
+ return &customJobStatusReader{
+ genericStatusReader: genericStatusReader,
+ }
+}
+
+func (j *customJobStatusReader) Supports(gk schema.GroupKind) bool {
+ return gk == batchv1.SchemeGroupVersion.WithKind("Job").GroupKind()
+}
+
+func (j *customJobStatusReader) ReadStatus(ctx context.Context, reader engine.ClusterReader, resource object.ObjMetadata) (*event.ResourceStatus, error) {
+ return j.genericStatusReader.ReadStatus(ctx, reader, resource)
+}
+
+func (j *customJobStatusReader) ReadStatusForObject(ctx context.Context, reader engine.ClusterReader, resource *unstructured.Unstructured) (*event.ResourceStatus, error) {
+ return j.genericStatusReader.ReadStatusForObject(ctx, reader, resource)
+}
+
+// Ref: https://github.com/kubernetes-sigs/cli-utils/blob/v0.29.4/pkg/kstatus/status/core.go
+// Modified to return Current status only when the Job has completed as opposed to when it's in progress.
+func jobConditions(u *unstructured.Unstructured) (*status.Result, error) {
+ obj := u.UnstructuredContent()
+
+ parallelism := status.GetIntField(obj, ".spec.parallelism", 1)
+ completions := status.GetIntField(obj, ".spec.completions", parallelism)
+ succeeded := status.GetIntField(obj, ".status.succeeded", 0)
+ failed := status.GetIntField(obj, ".status.failed", 0)
+
+ // Conditions
+ // https://github.com/kubernetes/kubernetes/blob/master/pkg/controller/job/utils.go#L24
+ objc, err := status.GetObjectWithConditions(obj)
+ if err != nil {
+ return nil, err
+ }
+ for _, c := range objc.Status.Conditions {
+ switch c.Type {
+ case "Complete":
+ if c.Status == corev1.ConditionTrue {
+ message := fmt.Sprintf("Job Completed. succeeded: %d/%d", succeeded, completions)
+ return &status.Result{
+ Status: status.CurrentStatus,
+ Message: message,
+ Conditions: []status.Condition{},
+ }, nil
+ }
+ case "Failed":
+ message := fmt.Sprintf("Job Failed. failed: %d/%d", failed, completions)
+ if c.Status == corev1.ConditionTrue {
+ return &status.Result{
+ Status: status.FailedStatus,
+ Message: message,
+ Conditions: []status.Condition{
+ {
+ Type: status.ConditionStalled,
+ Status: corev1.ConditionTrue,
+ Reason: "JobFailed",
+ Message: message,
+ },
+ },
+ }, nil
+ }
+ }
+ }
+
+ message := "Job in progress"
+ return &status.Result{
+ Status: status.InProgressStatus,
+ Message: message,
+ Conditions: []status.Condition{
+ {
+ Type: status.ConditionReconciling,
+ Status: corev1.ConditionTrue,
+ Reason: "JobInProgress",
+ Message: message,
+ },
+ },
+ }, nil
+}
diff --git a/internal/statusreaders/job_status_reader_test.go b/internal/statusreaders/job_status_reader_test.go
new file mode 100644
index 000000000..6e9ed5a79
--- /dev/null
+++ b/internal/statusreaders/job_status_reader_test.go
@@ -0,0 +1,116 @@
+/*
+Copyright The Helm Authors.
+This file was initially copied and modified from
+ https://github.com/fluxcd/kustomize-controller/blob/main/internal/statusreaders/job_test.go
+Copyright 2022 The Flux authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package statusreaders
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ batchv1 "k8s.io/api/batch/v1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+
+ "github.com/fluxcd/cli-utils/pkg/kstatus/status"
+)
+
+func toUnstructured(t *testing.T, obj runtime.Object) (*unstructured.Unstructured, error) {
+ t.Helper()
+ // If the incoming object is already unstructured, perform a deep copy first
+ // otherwise DefaultUnstructuredConverter ends up returning the inner map without
+ // making a copy.
+ if _, ok := obj.(runtime.Unstructured); ok {
+ obj = obj.DeepCopyObject()
+ }
+ rawMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj)
+ if err != nil {
+ return nil, err
+ }
+ return &unstructured.Unstructured{Object: rawMap}, nil
+}
+
+func TestJobConditions(t *testing.T) {
+ t.Parallel()
+ tests := []struct {
+ name string
+ job *batchv1.Job
+ expectedStatus status.Status
+ }{
+ {
+ name: "job without Complete condition returns InProgress status",
+ job: &batchv1.Job{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "job-no-condition",
+ },
+ Spec: batchv1.JobSpec{},
+ Status: batchv1.JobStatus{},
+ },
+ expectedStatus: status.InProgressStatus,
+ },
+ {
+ name: "job with Complete condition as True returns Current status",
+ job: &batchv1.Job{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "job-complete",
+ },
+ Spec: batchv1.JobSpec{},
+ Status: batchv1.JobStatus{
+ Conditions: []batchv1.JobCondition{
+ {
+ Type: batchv1.JobComplete,
+ Status: corev1.ConditionTrue,
+ },
+ },
+ },
+ },
+ expectedStatus: status.CurrentStatus,
+ },
+ {
+ name: "job with Failed condition as True returns Failed status",
+ job: &batchv1.Job{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "job-failed",
+ },
+ Spec: batchv1.JobSpec{},
+ Status: batchv1.JobStatus{
+ Conditions: []batchv1.JobCondition{
+ {
+ Type: batchv1.JobFailed,
+ Status: corev1.ConditionTrue,
+ },
+ },
+ },
+ },
+ expectedStatus: status.FailedStatus,
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+ us, err := toUnstructured(t, tc.job)
+ assert.NoError(t, err)
+ result, err := jobConditions(us)
+ assert.NoError(t, err)
+ assert.Equal(t, tc.expectedStatus, result.Status)
+ })
+ }
+}
diff --git a/internal/statusreaders/pod_status_reader.go b/internal/statusreaders/pod_status_reader.go
new file mode 100644
index 000000000..c074c3487
--- /dev/null
+++ b/internal/statusreaders/pod_status_reader.go
@@ -0,0 +1,104 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package statusreaders
+
+import (
+ "context"
+ "fmt"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ "github.com/fluxcd/cli-utils/pkg/kstatus/polling/engine"
+ "github.com/fluxcd/cli-utils/pkg/kstatus/polling/event"
+ "github.com/fluxcd/cli-utils/pkg/kstatus/polling/statusreaders"
+ "github.com/fluxcd/cli-utils/pkg/kstatus/status"
+ "github.com/fluxcd/cli-utils/pkg/object"
+)
+
+type customPodStatusReader struct {
+ genericStatusReader engine.StatusReader
+}
+
+func NewCustomPodStatusReader(mapper meta.RESTMapper) engine.StatusReader {
+ genericStatusReader := statusreaders.NewGenericStatusReader(mapper, podConditions)
+ return &customPodStatusReader{
+ genericStatusReader: genericStatusReader,
+ }
+}
+
+func (j *customPodStatusReader) Supports(gk schema.GroupKind) bool {
+ return gk == corev1.SchemeGroupVersion.WithKind("Pod").GroupKind()
+}
+
+func (j *customPodStatusReader) ReadStatus(ctx context.Context, reader engine.ClusterReader, resource object.ObjMetadata) (*event.ResourceStatus, error) {
+ return j.genericStatusReader.ReadStatus(ctx, reader, resource)
+}
+
+func (j *customPodStatusReader) ReadStatusForObject(ctx context.Context, reader engine.ClusterReader, resource *unstructured.Unstructured) (*event.ResourceStatus, error) {
+ return j.genericStatusReader.ReadStatusForObject(ctx, reader, resource)
+}
+
+func podConditions(u *unstructured.Unstructured) (*status.Result, error) {
+ obj := u.UnstructuredContent()
+ phase := status.GetStringField(obj, ".status.phase", "")
+ switch corev1.PodPhase(phase) {
+ case corev1.PodSucceeded:
+ message := fmt.Sprintf("pod %s succeeded", u.GetName())
+ return &status.Result{
+ Status: status.CurrentStatus,
+ Message: message,
+ Conditions: []status.Condition{
+ {
+ Type: status.ConditionStalled,
+ Status: corev1.ConditionTrue,
+ Message: message,
+ },
+ },
+ }, nil
+ case corev1.PodFailed:
+ message := fmt.Sprintf("pod %s failed", u.GetName())
+ return &status.Result{
+ Status: status.FailedStatus,
+ Message: message,
+ Conditions: []status.Condition{
+ {
+ Type: status.ConditionStalled,
+ Status: corev1.ConditionTrue,
+ Reason: "PodFailed",
+ Message: message,
+ },
+ },
+ }, nil
+ }
+
+ message := "Pod in progress"
+ return &status.Result{
+ Status: status.InProgressStatus,
+ Message: message,
+ Conditions: []status.Condition{
+ {
+ Type: status.ConditionReconciling,
+ Status: corev1.ConditionTrue,
+ Reason: "PodInProgress",
+ Message: message,
+ },
+ },
+ }, nil
+}
diff --git a/internal/statusreaders/pod_status_reader_test.go b/internal/statusreaders/pod_status_reader_test.go
new file mode 100644
index 000000000..ba0d1f1bb
--- /dev/null
+++ b/internal/statusreaders/pod_status_reader_test.go
@@ -0,0 +1,111 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package statusreaders
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ v1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ "github.com/fluxcd/cli-utils/pkg/kstatus/status"
+)
+
+func TestPodConditions(t *testing.T) {
+ tests := []struct {
+ name string
+ pod *v1.Pod
+ expectedStatus status.Status
+ }{
+ {
+ name: "pod without status returns in progress",
+ pod: &v1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Name: "pod-no-status"},
+ Spec: v1.PodSpec{},
+ Status: v1.PodStatus{},
+ },
+ expectedStatus: status.InProgressStatus,
+ },
+ {
+ name: "pod succeeded returns current status",
+ pod: &v1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Name: "pod-succeeded"},
+ Spec: v1.PodSpec{},
+ Status: v1.PodStatus{
+ Phase: v1.PodSucceeded,
+ },
+ },
+ expectedStatus: status.CurrentStatus,
+ },
+ {
+ name: "pod failed returns failed status",
+ pod: &v1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Name: "pod-failed"},
+ Spec: v1.PodSpec{},
+ Status: v1.PodStatus{
+ Phase: v1.PodFailed,
+ },
+ },
+ expectedStatus: status.FailedStatus,
+ },
+ {
+ name: "pod pending returns in progress status",
+ pod: &v1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Name: "pod-pending"},
+ Spec: v1.PodSpec{},
+ Status: v1.PodStatus{
+ Phase: v1.PodPending,
+ },
+ },
+ expectedStatus: status.InProgressStatus,
+ },
+ {
+ name: "pod running returns in progress status",
+ pod: &v1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Name: "pod-running"},
+ Spec: v1.PodSpec{},
+ Status: v1.PodStatus{
+ Phase: v1.PodRunning,
+ },
+ },
+ expectedStatus: status.InProgressStatus,
+ },
+ {
+ name: "pod with unknown phase returns in progress status",
+ pod: &v1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Name: "pod-unknown"},
+ Spec: v1.PodSpec{},
+ Status: v1.PodStatus{
+ Phase: v1.PodUnknown,
+ },
+ },
+ expectedStatus: status.InProgressStatus,
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+ us, err := toUnstructured(t, tc.pod)
+ assert.NoError(t, err)
+ result, err := podConditions(us)
+ assert.NoError(t, err)
+ assert.Equal(t, tc.expectedStatus, result.Status)
+ })
+ }
+}
diff --git a/internal/sympath/walk.go b/internal/sympath/walk.go
index 6b221fb6c..f67b9f1b9 100644
--- a/internal/sympath/walk.go
+++ b/internal/sympath/walk.go
@@ -21,12 +21,11 @@ limitations under the License.
package sympath
import (
- "log"
+ "fmt"
+ "log/slog"
"os"
"path/filepath"
"sort"
-
- "github.com/pkg/errors"
)
// Walk walks the file tree rooted at root, calling walkFn for each file or directory
@@ -69,10 +68,10 @@ func symwalk(path string, info os.FileInfo, walkFn filepath.WalkFunc) error {
if IsSymlink(info) {
resolved, err := filepath.EvalSymlinks(path)
if err != nil {
- return errors.Wrapf(err, "error evaluating symlink %s", path)
+ return fmt.Errorf("error evaluating symlink %s: %w", path, err)
}
//This log message is to highlight a symlink that is being used within a chart, symlinks can be used for nefarious reasons.
- log.Printf("found symbolic link in path: %s resolves to %s. Contents of linked file included and used", path, resolved)
+ slog.Info("found symbolic link in path. Contents of linked file included and used", "path", path, "resolved", resolved)
if info, err = os.Lstat(resolved); err != nil {
return err
}
diff --git a/internal/sympath/walk_test.go b/internal/sympath/walk_test.go
index d4e2ceeaa..1eba8b996 100644
--- a/internal/sympath/walk_test.go
+++ b/internal/sympath/walk_test.go
@@ -76,6 +76,7 @@ func walkTree(n *Node, path string, f func(path string, n *Node)) {
}
func makeTree(t *testing.T) {
+ t.Helper()
walkTree(tree, tree.name, func(path string, n *Node) {
if n.entries == nil {
if n.symLinkedTo != "" {
@@ -99,6 +100,7 @@ func makeTree(t *testing.T) {
}
func checkMarks(t *testing.T, report bool) {
+ t.Helper()
walkTree(tree, tree.name, func(path string, n *Node) {
if n.marks != n.expectedMarks && report {
t.Errorf("node %s mark = %d; expected %d", path, n.marks, n.expectedMarks)
diff --git a/internal/test/ensure/ensure.go b/internal/test/ensure/ensure.go
index 0d8dd9abc..a72f48c2d 100644
--- a/internal/test/ensure/ensure.go
+++ b/internal/test/ensure/ensure.go
@@ -29,12 +29,12 @@ import (
func HelmHome(t *testing.T) {
t.Helper()
base := t.TempDir()
- os.Setenv(xdg.CacheHomeEnvVar, base)
- os.Setenv(xdg.ConfigHomeEnvVar, base)
- os.Setenv(xdg.DataHomeEnvVar, base)
- os.Setenv(helmpath.CacheHomeEnvVar, "")
- os.Setenv(helmpath.ConfigHomeEnvVar, "")
- os.Setenv(helmpath.DataHomeEnvVar, "")
+ t.Setenv(xdg.CacheHomeEnvVar, base)
+ t.Setenv(xdg.ConfigHomeEnvVar, base)
+ t.Setenv(xdg.DataHomeEnvVar, base)
+ t.Setenv(helmpath.CacheHomeEnvVar, "")
+ t.Setenv(helmpath.ConfigHomeEnvVar, "")
+ t.Setenv(helmpath.DataHomeEnvVar, "")
}
// TempFile ensures a temp file for unit testing purposes.
@@ -46,9 +46,10 @@ func HelmHome(t *testing.T) {
// tempdir := TempFile(t, "foo", []byte("bar"))
// filename := filepath.Join(tempdir, "foo")
func TempFile(t *testing.T, name string, data []byte) string {
+ t.Helper()
path := t.TempDir()
filename := filepath.Join(path, name)
- if err := os.WriteFile(filename, data, 0755); err != nil {
+ if err := os.WriteFile(filename, data, 0o755); err != nil {
t.Fatal(err)
}
return path
diff --git a/internal/test/test.go b/internal/test/test.go
index e6821282c..632bc72fd 100644
--- a/internal/test/test.go
+++ b/internal/test/test.go
@@ -19,10 +19,9 @@ package test
import (
"bytes"
"flag"
+ "fmt"
"os"
"path/filepath"
-
- "github.com/pkg/errors"
)
// UpdateGolden writes out the golden files with the latest values, rather than failing the test.
@@ -75,11 +74,11 @@ func compare(actual []byte, filename string) error {
expected, err := os.ReadFile(filename)
if err != nil {
- return errors.Wrapf(err, "unable to read testdata %s", filename)
+ return fmt.Errorf("unable to read testdata %s: %w", filename, err)
}
expected = normalize(expected)
if !bytes.Equal(expected, actual) {
- return errors.Errorf("does not match golden file %s\n\nWANT:\n'%s'\n\nGOT:\n'%s'", filename, expected, actual)
+ return fmt.Errorf("does not match golden file %s\n\nWANT:\n'%s'\n\nGOT:\n'%s'", filename, expected, actual)
}
return nil
}
@@ -92,5 +91,5 @@ func update(filename string, in []byte) error {
}
func normalize(in []byte) []byte {
- return bytes.Replace(in, []byte("\r\n"), []byte("\n"), -1)
+ return bytes.ReplaceAll(in, []byte("\r\n"), []byte("\n"))
}
diff --git a/internal/third_party/dep/fs/fs.go b/internal/third_party/dep/fs/fs.go
index d29bb5f87..6e2720f3b 100644
--- a/internal/third_party/dep/fs/fs.go
+++ b/internal/third_party/dep/fs/fs.go
@@ -32,13 +32,14 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package fs
import (
+ "errors"
+ "fmt"
"io"
+ "io/fs"
"os"
"path/filepath"
"runtime"
"syscall"
-
- "github.com/pkg/errors"
)
// fs contains a copy of a few functions from dep tool code to avoid a dependency on golang/dep.
@@ -51,7 +52,7 @@ import (
func RenameWithFallback(src, dst string) error {
_, err := os.Stat(src)
if err != nil {
- return errors.Wrapf(err, "cannot stat %s", src)
+ return fmt.Errorf("cannot stat %s: %w", src, err)
}
err = os.Rename(src, dst)
@@ -69,20 +70,24 @@ func renameByCopy(src, dst string) error {
if dir, _ := IsDir(src); dir {
cerr = CopyDir(src, dst)
if cerr != nil {
- cerr = errors.Wrap(cerr, "copying directory failed")
+ cerr = fmt.Errorf("copying directory failed: %w", cerr)
}
} else {
- cerr = copyFile(src, dst)
+ cerr = CopyFile(src, dst)
if cerr != nil {
- cerr = errors.Wrap(cerr, "copying file failed")
+ cerr = fmt.Errorf("copying file failed: %w", cerr)
}
}
if cerr != nil {
- return errors.Wrapf(cerr, "rename fallback failed: cannot rename %s to %s", src, dst)
+ return fmt.Errorf("rename fallback failed: cannot rename %s to %s: %w", src, dst, cerr)
+ }
+
+ if err := os.RemoveAll(src); err != nil {
+ return fmt.Errorf("cannot delete %s: %w", src, err)
}
- return errors.Wrapf(os.RemoveAll(src), "cannot delete %s", src)
+ return nil
}
var (
@@ -107,7 +112,7 @@ func CopyDir(src, dst string) error {
}
_, err = os.Stat(dst)
- if err != nil && !os.IsNotExist(err) {
+ if err != nil && !errors.Is(err, fs.ErrNotExist) {
return err
}
if err == nil {
@@ -115,12 +120,12 @@ func CopyDir(src, dst string) error {
}
if err = os.MkdirAll(dst, fi.Mode()); err != nil {
- return errors.Wrapf(err, "cannot mkdir %s", dst)
+ return fmt.Errorf("cannot mkdir %s: %w", dst, err)
}
entries, err := os.ReadDir(src)
if err != nil {
- return errors.Wrapf(err, "cannot read directory %s", dst)
+ return fmt.Errorf("cannot read directory %s: %w", dst, err)
}
for _, entry := range entries {
@@ -129,13 +134,13 @@ func CopyDir(src, dst string) error {
if entry.IsDir() {
if err = CopyDir(srcPath, dstPath); err != nil {
- return errors.Wrap(err, "copying directory failed")
+ return fmt.Errorf("copying directory failed: %w", err)
}
} else {
// This will include symlinks, which is what we want when
// copying things.
- if err = copyFile(srcPath, dstPath); err != nil {
- return errors.Wrap(err, "copying file failed")
+ if err = CopyFile(srcPath, dstPath); err != nil {
+ return fmt.Errorf("copying file failed: %w", err)
}
}
}
@@ -143,13 +148,13 @@ func CopyDir(src, dst string) error {
return nil
}
-// copyFile copies the contents of the file named src to the file named
+// CopyFile copies the contents of the file named src to the file named
// by dst. The file will be created if it does not already exist. If the
// destination file exists, all its contents will be replaced by the contents
// of the source file. The file mode will be copied from the source.
-func copyFile(src, dst string) (err error) {
+func CopyFile(src, dst string) (err error) {
if sym, err := IsSymlink(src); err != nil {
- return errors.Wrap(err, "symlink check failed")
+ return fmt.Errorf("symlink check failed: %w", err)
} else if sym {
if err := cloneSymlink(src, dst); err != nil {
if runtime.GOOS == "windows" {
@@ -172,28 +177,28 @@ func copyFile(src, dst string) (err error) {
in, err := os.Open(src)
if err != nil {
- return
+ return err
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
- return
+ return err
}
if _, err = io.Copy(out, in); err != nil {
out.Close()
- return
+ return err
}
// Check for write errors on Close
if err = out.Close(); err != nil {
- return
+ return err
}
si, err := os.Stat(src)
if err != nil {
- return
+ return err
}
// Temporary fix for Go < 1.9
@@ -205,7 +210,7 @@ func copyFile(src, dst string) (err error) {
}
err = os.Chmod(dst, si.Mode())
- return
+ return err
}
// cloneSymlink will create a new symlink that points to the resolved path of sl.
@@ -226,7 +231,7 @@ func IsDir(name string) (bool, error) {
return false, err
}
if !fi.IsDir() {
- return false, errors.Errorf("%q is not a directory", name)
+ return false, fmt.Errorf("%q is not a directory", name)
}
return true, nil
}
diff --git a/internal/third_party/dep/fs/fs_test.go b/internal/third_party/dep/fs/fs_test.go
index d42c3f110..610771bc3 100644
--- a/internal/third_party/dep/fs/fs_test.go
+++ b/internal/third_party/dep/fs/fs_test.go
@@ -33,17 +33,11 @@ package fs
import (
"os"
- "os/exec"
"path/filepath"
"runtime"
- "sync"
"testing"
)
-var (
- mu sync.Mutex
-)
-
func TestRenameWithFallback(t *testing.T) {
dir := t.TempDir()
@@ -332,7 +326,7 @@ func TestCopyFile(t *testing.T) {
srcf.Close()
destf := filepath.Join(dir, "destf")
- if err := copyFile(srcf.Name(), destf); err != nil {
+ if err := CopyFile(srcf.Name(), destf); err != nil {
t.Fatal(err)
}
@@ -360,19 +354,6 @@ func TestCopyFile(t *testing.T) {
}
}
-func cleanUpDir(dir string) {
- // NOTE(mattn): It seems that sometimes git.exe is not dead
- // when cleanUpDir() is called. But we do not know any way to wait for it.
- if runtime.GOOS == "windows" {
- mu.Lock()
- exec.Command(`taskkill`, `/F`, `/IM`, `git.exe`).Run()
- mu.Unlock()
- }
- if dir != "" {
- os.RemoveAll(dir)
- }
-}
-
func TestCopyFileSymlink(t *testing.T) {
tempdir := t.TempDir()
@@ -385,7 +366,7 @@ func TestCopyFileSymlink(t *testing.T) {
for symlink, dst := range testcases {
t.Run(symlink, func(t *testing.T) {
var err error
- if err = copyFile(symlink, dst); err != nil {
+ if err = CopyFile(symlink, dst); err != nil {
t.Fatalf("failed to copy symlink: %s", err)
}
@@ -457,7 +438,7 @@ func TestCopyFileFail(t *testing.T) {
defer cleanup()
fn := filepath.Join(dstdir, "file")
- if err := copyFile(srcf.Name(), fn); err == nil {
+ if err := CopyFile(srcf.Name(), fn); err == nil {
t.Fatalf("expected error for %s, got none", fn)
}
}
@@ -476,6 +457,7 @@ func TestCopyFileFail(t *testing.T) {
// files this function creates. It is the caller's responsibility to call
// this function before the test is done running, whether there's an error or not.
func setupInaccessibleDir(t *testing.T, op func(dir string) error) func() {
+ t.Helper()
dir := t.TempDir()
subdir := filepath.Join(dir, "dir")
diff --git a/internal/third_party/dep/fs/rename.go b/internal/third_party/dep/fs/rename.go
index a3e5e56a6..5f13b1ca3 100644
--- a/internal/third_party/dep/fs/rename.go
+++ b/internal/third_party/dep/fs/rename.go
@@ -34,10 +34,9 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package fs
import (
+ "fmt"
"os"
"syscall"
-
- "github.com/pkg/errors"
)
// renameFallback attempts to determine the appropriate fallback to failed rename
@@ -51,7 +50,7 @@ func renameFallback(err error, src, dst string) error {
if !ok {
return err
} else if terr.Err != syscall.EXDEV {
- return errors.Wrapf(terr, "link error: cannot rename %s to %s", src, dst)
+ return fmt.Errorf("link error: cannot rename %s to %s: %w", src, dst, terr)
}
return renameByCopy(src, dst)
diff --git a/internal/third_party/dep/fs/rename_windows.go b/internal/third_party/dep/fs/rename_windows.go
index a377720a6..566f695d3 100644
--- a/internal/third_party/dep/fs/rename_windows.go
+++ b/internal/third_party/dep/fs/rename_windows.go
@@ -34,10 +34,9 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package fs
import (
+ "fmt"
"os"
"syscall"
-
- "github.com/pkg/errors"
)
// renameFallback attempts to determine the appropriate fallback to failed rename
@@ -61,7 +60,7 @@ func renameFallback(err error, src, dst string) error {
// 0x11 (ERROR_NOT_SAME_DEVICE) is the windows error.
// See https://msdn.microsoft.com/en-us/library/cc231199.aspx
if ok && noerr != 0x11 {
- return errors.Wrapf(terr, "link error: cannot rename %s to %s", src, dst)
+ return fmt.Errorf("link error: cannot rename %s to %s: %w", src, dst, terr)
}
}
diff --git a/internal/tlsutil/tls_test.go b/internal/tlsutil/tls_test.go
index eb1cc183e..3d7e75c86 100644
--- a/internal/tlsutil/tls_test.go
+++ b/internal/tlsutil/tls_test.go
@@ -30,8 +30,9 @@ const (
)
func testfile(t *testing.T, file string) (path string) {
- var err error
- if path, err = filepath.Abs(filepath.Join(tlsTestDir, file)); err != nil {
+ t.Helper()
+ path, err := filepath.Abs(filepath.Join(tlsTestDir, file))
+ if err != nil {
t.Fatalf("error getting absolute path to test file %q: %v", file, err)
}
return path
diff --git a/pkg/action/action.go b/pkg/action/action.go
index ea2dc0dd7..bcf6ca8ef 100644
--- a/pkg/action/action.go
+++ b/pkg/action/action.go
@@ -18,29 +18,36 @@ package action
import (
"bytes"
+ "errors"
"fmt"
"io"
+ "log/slog"
+ "maps"
"os"
"path"
"path/filepath"
- "regexp"
+ "slices"
"strings"
+ "sync"
+ "text/template"
- "github.com/pkg/errors"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/client-go/discovery"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
+ "sigs.k8s.io/kustomize/kyaml/kio"
+ kyaml "sigs.k8s.io/kustomize/kyaml/yaml"
+ "helm.sh/helm/v4/pkg/chart/common"
chart "helm.sh/helm/v4/pkg/chart/v2"
chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
"helm.sh/helm/v4/pkg/engine"
"helm.sh/helm/v4/pkg/kube"
- "helm.sh/helm/v4/pkg/postrender"
+ "helm.sh/helm/v4/pkg/postrenderer"
"helm.sh/helm/v4/pkg/registry"
- releaseutil "helm.sh/helm/v4/pkg/release/util"
release "helm.sh/helm/v4/pkg/release/v1"
+ releaseutil "helm.sh/helm/v4/pkg/release/v1/util"
"helm.sh/helm/v4/pkg/storage"
"helm.sh/helm/v4/pkg/storage/driver"
"helm.sh/helm/v4/pkg/time"
@@ -63,21 +70,6 @@ var (
errPending = errors.New("another operation (install/upgrade/rollback) is in progress")
)
-// ValidName is a regular expression for resource names.
-//
-// DEPRECATED: This will be removed in Helm 4, and is no longer used here. See
-// pkg/lint/rules.validateMetadataNameFunc for the replacement.
-//
-// According to the Kubernetes help text, the regular expression it uses is:
-//
-// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*
-//
-// This follows the above regular expression (but requires a full string match, not partial).
-//
-// The Kubernetes documentation is here, though it is not entirely correct:
-// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
-var ValidName = regexp.MustCompile(`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`)
-
// Configuration injects the dependencies that all actions share.
type Configuration struct {
// RESTClientGetter is an interface that loads Kubernetes clients.
@@ -93,12 +85,90 @@ type Configuration struct {
RegistryClient *registry.Client
// Capabilities describes the capabilities of the Kubernetes cluster.
- Capabilities *chartutil.Capabilities
+ Capabilities *common.Capabilities
- Log func(string, ...interface{})
+ // CustomTemplateFuncs is defined by users to provide custom template funcs
+ CustomTemplateFuncs template.FuncMap
// HookOutputFunc called with container name and returns and expects writer that will receive the log output.
HookOutputFunc func(namespace, pod, container string) io.Writer
+
+ mutex sync.Mutex
+}
+
+const (
+ // filenameAnnotation is the annotation key used to store the original filename
+ // information in manifest annotations for post-rendering reconstruction.
+ filenameAnnotation = "postrenderer.helm.sh/postrender-filename"
+)
+
+// annotateAndMerge combines multiple YAML files into a single stream of documents,
+// adding filename annotations to each document for later reconstruction.
+func annotateAndMerge(files map[string]string) (string, error) {
+ var combinedManifests []*kyaml.RNode
+
+ // Get sorted filenames to ensure result is deterministic
+ fnames := slices.Sorted(maps.Keys(files))
+
+ for _, fname := range fnames {
+ content := files[fname]
+ // Skip partials and empty files.
+ if strings.HasPrefix(path.Base(fname), "_") || strings.TrimSpace(content) == "" {
+ continue
+ }
+
+ manifests, err := kio.ParseAll(content)
+ if err != nil {
+ return "", fmt.Errorf("parsing %s: %w", fname, err)
+ }
+ for _, manifest := range manifests {
+ if err := manifest.PipeE(kyaml.SetAnnotation(filenameAnnotation, fname)); err != nil {
+ return "", fmt.Errorf("annotating %s: %w", fname, err)
+ }
+ combinedManifests = append(combinedManifests, manifest)
+ }
+ }
+
+ merged, err := kio.StringAll(combinedManifests)
+ if err != nil {
+ return "", fmt.Errorf("writing merged docs: %w", err)
+ }
+ return merged, nil
+}
+
+// splitAndDeannotate reconstructs individual files from a merged YAML stream,
+// removing filename annotations and grouping documents by their original filenames.
+func splitAndDeannotate(postrendered string) (map[string]string, error) {
+ manifests, err := kio.ParseAll(postrendered)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing YAML: %w", err)
+ }
+
+ manifestsByFilename := make(map[string][]*kyaml.RNode)
+ for i, manifest := range manifests {
+ meta, err := manifest.GetMeta()
+ if err != nil {
+ return nil, fmt.Errorf("getting metadata: %w", err)
+ }
+ fname := meta.Annotations[filenameAnnotation]
+ if fname == "" {
+ fname = fmt.Sprintf("generated-by-postrender-%d.yaml", i)
+ }
+ if err := manifest.PipeE(kyaml.ClearAnnotation(filenameAnnotation)); err != nil {
+ return nil, fmt.Errorf("clearing filename annotation: %w", err)
+ }
+ manifestsByFilename[fname] = append(manifestsByFilename[fname], manifest)
+ }
+
+ reconstructed := make(map[string]string, len(manifestsByFilename))
+ for fname, docs := range manifestsByFilename {
+ fileContents, err := kio.StringAll(docs)
+ if err != nil {
+ return nil, fmt.Errorf("re-writing %s: %w", fname, err)
+ }
+ reconstructed[fname] = fileContents
+ }
+ return reconstructed, nil
}
// renderResources renders the templates in a chart
@@ -107,8 +177,8 @@ type Configuration struct {
// TODO: As part of the refactor the duplicate code in cmd/helm/template.go should be removed
//
// This code has to do with writing files to disk.
-func (cfg *Configuration) renderResources(ch *chart.Chart, values chartutil.Values, releaseName, outputDir string, subNotes, useReleaseName, includeCrds bool, pr postrender.PostRenderer, interactWithRemote, enableDNS, hideSecret bool) ([]*release.Hook, *bytes.Buffer, string, error) {
- hs := []*release.Hook{}
+func (cfg *Configuration) renderResources(ch *chart.Chart, values common.Values, releaseName, outputDir string, subNotes, useReleaseName, includeCrds bool, pr postrenderer.PostRenderer, interactWithRemote, enableDNS, hideSecret bool) ([]*release.Hook, *bytes.Buffer, string, error) {
+ var hs []*release.Hook
b := bytes.NewBuffer(nil)
caps, err := cfg.getCapabilities()
@@ -118,7 +188,7 @@ func (cfg *Configuration) renderResources(ch *chart.Chart, values chartutil.Valu
if ch.Metadata.KubeVersion != "" {
if !chartutil.IsCompatibleRange(ch.Metadata.KubeVersion, caps.KubeVersion.String()) {
- return hs, b, "", errors.Errorf("chart requires kubeVersion: %s which is incompatible with Kubernetes %s", ch.Metadata.KubeVersion, caps.KubeVersion.String())
+ return hs, b, "", fmt.Errorf("chart requires kubeVersion: %s which is incompatible with Kubernetes %s", ch.Metadata.KubeVersion, caps.KubeVersion.String())
}
}
@@ -135,10 +205,14 @@ func (cfg *Configuration) renderResources(ch *chart.Chart, values chartutil.Valu
}
e := engine.New(restConfig)
e.EnableDNS = enableDNS
+ e.CustomTemplateFuncs = cfg.CustomTemplateFuncs
+
files, err2 = e.Render(ch, values)
} else {
var e engine.Engine
e.EnableDNS = enableDNS
+ e.CustomTemplateFuncs = cfg.CustomTemplateFuncs
+
files, err2 = e.Render(ch, values)
}
@@ -166,6 +240,33 @@ func (cfg *Configuration) renderResources(ch *chart.Chart, values chartutil.Valu
}
notes := notesBuffer.String()
+ if pr != nil {
+ // We need to send files to the post-renderer before sorting and splitting
+ // hooks from manifests. The post-renderer interface expects a stream of
+ // manifests (similar to what tools like Kustomize and kubectl expect), whereas
+ // the sorter uses filenames.
+ // Here, we merge the documents into a stream, post-render them, and then split
+ // them back into a map of filename -> content.
+
+ // Merge files as stream of documents for sending to post renderer
+ merged, err := annotateAndMerge(files)
+ if err != nil {
+ return hs, b, notes, fmt.Errorf("error merging manifests: %w", err)
+ }
+
+ // Run the post renderer
+ postRendered, err := pr.Run(bytes.NewBufferString(merged))
+ if err != nil {
+ return hs, b, notes, fmt.Errorf("error while running post render on files: %w", err)
+ }
+
+ // Use the file list and contents received from the post renderer
+ files, err = splitAndDeannotate(postRendered.String())
+ if err != nil {
+ return hs, b, notes, fmt.Errorf("error while parsing post rendered output: %w", err)
+ }
+ }
+
// Sort hooks, manifests, and partials. Only hooks and manifests are returned,
// as partials are not used after renderer.Render. Empty manifests are also
// removed here.
@@ -226,13 +327,6 @@ func (cfg *Configuration) renderResources(ch *chart.Chart, values chartutil.Valu
}
}
- if pr != nil {
- b, err = pr.Run(b)
- if err != nil {
- return hs, b, notes, errors.Wrap(err, "error while running post render on files")
- }
- }
-
return hs, b, notes, nil
}
@@ -243,23 +337,20 @@ type RESTClientGetter interface {
ToRESTMapper() (meta.RESTMapper, error)
}
-// DebugLog sets the logger that writes debug strings
-type DebugLog func(format string, v ...interface{})
-
// capabilities builds a Capabilities from discovery information.
-func (cfg *Configuration) getCapabilities() (*chartutil.Capabilities, error) {
+func (cfg *Configuration) getCapabilities() (*common.Capabilities, error) {
if cfg.Capabilities != nil {
return cfg.Capabilities, nil
}
dc, err := cfg.RESTClientGetter.ToDiscoveryClient()
if err != nil {
- return nil, errors.Wrap(err, "could not get Kubernetes discovery client")
+ return nil, fmt.Errorf("could not get Kubernetes discovery client: %w", err)
}
// force a discovery cache invalidation to always fetch the latest server version/capabilities.
dc.Invalidate()
kubeVersion, err := dc.ServerVersion()
if err != nil {
- return nil, errors.Wrap(err, "could not get server version from Kubernetes")
+ return nil, fmt.Errorf("could not get server version from Kubernetes: %w", err)
}
// Issue #6361:
// Client-Go emits an error when an API service is registered but unimplemented.
@@ -269,21 +360,21 @@ func (cfg *Configuration) getCapabilities() (*chartutil.Capabilities, error) {
apiVersions, err := GetVersionSet(dc)
if err != nil {
if discovery.IsGroupDiscoveryFailedError(err) {
- cfg.Log("WARNING: The Kubernetes server has an orphaned API service. Server reports: %s", err)
- cfg.Log("WARNING: To fix this, kubectl delete apiservice ")
+ slog.Warn("the kubernetes server has an orphaned API service", slog.Any("error", err))
+ slog.Warn("to fix this, kubectl delete apiservice ")
} else {
- return nil, errors.Wrap(err, "could not get apiVersions from Kubernetes")
+ return nil, fmt.Errorf("could not get apiVersions from Kubernetes: %w", err)
}
}
- cfg.Capabilities = &chartutil.Capabilities{
+ cfg.Capabilities = &common.Capabilities{
APIVersions: apiVersions,
- KubeVersion: chartutil.KubeVersion{
+ KubeVersion: common.KubeVersion{
Version: kubeVersion.GitVersion,
Major: kubeVersion.Major,
Minor: kubeVersion.Minor,
},
- HelmVersion: chartutil.DefaultCapabilities.HelmVersion,
+ HelmVersion: common.DefaultCapabilities.HelmVersion,
}
return cfg.Capabilities, nil
}
@@ -292,7 +383,7 @@ func (cfg *Configuration) getCapabilities() (*chartutil.Capabilities, error) {
func (cfg *Configuration) KubernetesClientSet() (kubernetes.Interface, error) {
conf, err := cfg.RESTClientGetter.ToRESTConfig()
if err != nil {
- return nil, errors.Wrap(err, "unable to generate config for kubernetes client")
+ return nil, fmt.Errorf("unable to generate config for kubernetes client: %w", err)
}
return kubernetes.NewForConfig(conf)
@@ -308,7 +399,7 @@ func (cfg *Configuration) Now() time.Time {
func (cfg *Configuration) releaseContent(name string, version int) (*release.Release, error) {
if err := chartutil.ValidateReleaseName(name); err != nil {
- return nil, errors.Errorf("releaseContent: Release name is invalid: %s", name)
+ return nil, fmt.Errorf("releaseContent: Release name is invalid: %s", name)
}
if version <= 0 {
@@ -319,10 +410,10 @@ func (cfg *Configuration) releaseContent(name string, version int) (*release.Rel
}
// GetVersionSet retrieves a set of available k8s API versions
-func GetVersionSet(client discovery.ServerResourcesInterface) (chartutil.VersionSet, error) {
+func GetVersionSet(client discovery.ServerResourcesInterface) (common.VersionSet, error) {
groups, resources, err := client.ServerGroupsAndResources()
if err != nil && !discovery.IsGroupDiscoveryFailedError(err) {
- return chartutil.DefaultVersionSet, errors.Wrap(err, "could not get apiVersions from Kubernetes")
+ return common.DefaultVersionSet, fmt.Errorf("could not get apiVersions from Kubernetes: %w", err)
}
// FIXME: The Kubernetes test fixture for cli appears to always return nil
@@ -330,7 +421,7 @@ func GetVersionSet(client discovery.ServerResourcesInterface) (chartutil.Version
// return the default API list. This is also a safe value to return in any
// other odd-ball case.
if len(groups) == 0 && len(resources) == 0 {
- return chartutil.DefaultVersionSet, nil
+ return common.DefaultVersionSet, nil
}
versionMap := make(map[string]interface{})
@@ -363,20 +454,19 @@ func GetVersionSet(client discovery.ServerResourcesInterface) (chartutil.Version
versions = append(versions, k)
}
- return chartutil.VersionSet(versions), nil
+ return common.VersionSet(versions), nil
}
// recordRelease with an update operation in case reuse has been set.
func (cfg *Configuration) recordRelease(r *release.Release) {
if err := cfg.Releases.Update(r); err != nil {
- cfg.Log("warning: Failed to update release %s: %s", r.Name, err)
+ slog.Warn("failed to update release", "name", r.Name, "revision", r.Version, slog.Any("error", err))
}
}
// Init initializes the action configuration
-func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string, log DebugLog) error {
+func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string) error {
kc := kube.New(getter)
- kc.Log = log
lazyClient := &lazyClient{
namespace: namespace,
@@ -387,11 +477,9 @@ func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namesp
switch helmDriver {
case "secret", "secrets", "":
d := driver.NewSecrets(newSecretClient(lazyClient))
- d.Log = log
store = storage.Init(d)
case "configmap", "configmaps":
d := driver.NewConfigMaps(newConfigMapClient(lazyClient))
- d.Log = log
store = storage.Init(d)
case "memory":
var d *driver.Memory
@@ -411,21 +499,19 @@ func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namesp
case "sql":
d, err := driver.NewSQL(
os.Getenv("HELM_DRIVER_SQL_CONNECTION_STRING"),
- log,
namespace,
)
if err != nil {
- return errors.Wrap(err, "unable to instantiate SQL driver")
+ return fmt.Errorf("unable to instantiate SQL driver: %w", err)
}
store = storage.Init(d)
default:
- return errors.Errorf("unknown driver %q", helmDriver)
+ return fmt.Errorf("unknown driver %q", helmDriver)
}
cfg.RESTClientGetter = getter
cfg.KubeClient = kc
cfg.Releases = store
- cfg.Log = log
cfg.HookOutputFunc = func(_, _, _ string) io.Writer { return io.Discard }
return nil
@@ -435,3 +521,10 @@ func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namesp
func (cfg *Configuration) SetHookOutputFunc(hookOutputFunc func(_, _, _ string) io.Writer) {
cfg.HookOutputFunc = hookOutputFunc
}
+
+func determineReleaseSSApplyMethod(serverSideApply bool) release.ApplyMethod {
+ if serverSideApply {
+ return release.ApplyMethodServerSideApply
+ }
+ return release.ApplyMethodClientSideApply
+}
diff --git a/pkg/action/action_test.go b/pkg/action/action_test.go
index b1cf20597..b65e40024 100644
--- a/pkg/action/action_test.go
+++ b/pkg/action/action_test.go
@@ -16,16 +16,23 @@ limitations under the License.
package action
import (
+ "bytes"
+ "errors"
"flag"
"fmt"
"io"
+ "log/slog"
+ "strings"
"testing"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
fakeclientset "k8s.io/client-go/kubernetes/fake"
+ "helm.sh/helm/v4/internal/logging"
+ "helm.sh/helm/v4/pkg/chart/common"
chart "helm.sh/helm/v4/pkg/chart/v2"
- chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+ "helm.sh/helm/v4/pkg/kube"
kubefake "helm.sh/helm/v4/pkg/kube/fake"
"helm.sh/helm/v4/pkg/registry"
release "helm.sh/helm/v4/pkg/release/v1"
@@ -34,10 +41,20 @@ import (
"helm.sh/helm/v4/pkg/time"
)
-var verbose = flag.Bool("test.log", false, "enable test logging")
+var verbose = flag.Bool("test.log", false, "enable test logging (debug by default)")
func actionConfigFixture(t *testing.T) *Configuration {
t.Helper()
+ return actionConfigFixtureWithDummyResources(t, nil)
+}
+
+func actionConfigFixtureWithDummyResources(t *testing.T, dummyResources kube.ResourceList) *Configuration {
+ t.Helper()
+
+ logger := logging.NewLogger(func() bool {
+ return *verbose
+ })
+ slog.SetDefault(logger)
registryClient, err := registry.NewClient()
if err != nil {
@@ -46,15 +63,9 @@ func actionConfigFixture(t *testing.T) *Configuration {
return &Configuration{
Releases: storage.Init(driver.NewMemory()),
- KubeClient: &kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}},
- Capabilities: chartutil.DefaultCapabilities,
+ KubeClient: &kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: dummyResources},
+ Capabilities: common.DefaultCapabilities,
RegistryClient: registryClient,
- Log: func(format string, v ...interface{}) {
- t.Helper()
- if *verbose {
- t.Logf(format, v...)
- }
- },
}
}
@@ -111,14 +122,14 @@ type chartOptions struct {
type chartOption func(*chartOptions)
func buildChart(opts ...chartOption) *chart.Chart {
- defaultTemplates := []*chart.File{
+ defaultTemplates := []*common.File{
{Name: "templates/hello", Data: []byte("hello: world")},
{Name: "templates/hooks", Data: []byte(manifestWithHook)},
}
return buildChartWithTemplates(defaultTemplates, opts...)
}
-func buildChartWithTemplates(templates []*chart.File, opts ...chartOption) *chart.Chart {
+func buildChartWithTemplates(templates []*common.File, opts ...chartOption) *chart.Chart {
c := &chartOptions{
Chart: &chart.Chart{
// TODO: This should be more complete.
@@ -168,7 +179,7 @@ func withValues(values map[string]interface{}) chartOption {
func withNotes(notes string) chartOption {
return func(opts *chartOptions) {
- opts.Templates = append(opts.Templates, &chart.File{
+ opts.Templates = append(opts.Templates, &common.File{
Name: "templates/NOTES.txt",
Data: []byte(notes),
})
@@ -189,7 +200,7 @@ func withMetadataDependency(dependency chart.Dependency) chartOption {
func withSampleTemplates() chartOption {
return func(opts *chartOptions) {
- sampleTemplates := []*chart.File{
+ sampleTemplates := []*common.File{
// This adds basic templates and partials.
{Name: "templates/goodbye", Data: []byte("goodbye: world")},
{Name: "templates/empty", Data: []byte("")},
@@ -202,14 +213,14 @@ func withSampleTemplates() chartOption {
func withSampleSecret() chartOption {
return func(opts *chartOptions) {
- sampleSecret := &chart.File{Name: "templates/secret.yaml", Data: []byte("apiVersion: v1\nkind: Secret\n")}
+ sampleSecret := &common.File{Name: "templates/secret.yaml", Data: []byte("apiVersion: v1\nkind: Secret\n")}
opts.Templates = append(opts.Templates, sampleSecret)
}
}
func withSampleIncludingIncorrectTemplates() chartOption {
return func(opts *chartOptions) {
- sampleTemplates := []*chart.File{
+ sampleTemplates := []*common.File{
// This adds basic templates and partials.
{Name: "templates/goodbye", Data: []byte("goodbye: world")},
{Name: "templates/empty", Data: []byte("")},
@@ -223,7 +234,7 @@ func withSampleIncludingIncorrectTemplates() chartOption {
func withMultipleManifestTemplate() chartOption {
return func(opts *chartOptions) {
- sampleTemplates := []*chart.File{
+ sampleTemplates := []*common.File{
{Name: "templates/rbac", Data: []byte(rbacManifests)},
}
opts.Templates = append(opts.Templates, sampleTemplates...)
@@ -334,7 +345,7 @@ func TestConfiguration_Init(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
cfg := &Configuration{}
- actualErr := cfg.Init(nil, "default", tt.helmDriver, nil)
+ actualErr := cfg.Init(nil, "default", tt.helmDriver)
if tt.expectErr {
assert.Error(t, actualErr)
assert.Contains(t, actualErr.Error(), tt.errMsg)
@@ -347,7 +358,7 @@ func TestConfiguration_Init(t *testing.T) {
}
func TestGetVersionSet(t *testing.T) {
- client := fakeclientset.NewSimpleClientset()
+ client := fakeclientset.NewClientset()
vs, err := GetVersionSet(client.Discovery())
if err != nil {
@@ -361,3 +372,582 @@ func TestGetVersionSet(t *testing.T) {
t.Error("Non-existent version is reported found.")
}
}
+
+// Mock PostRenderer for testing
+type mockPostRenderer struct {
+ shouldError bool
+ transform func(string) string
+}
+
+func (m *mockPostRenderer) Run(renderedManifests *bytes.Buffer) (*bytes.Buffer, error) {
+ if m.shouldError {
+ return nil, errors.New("mock post-renderer error")
+ }
+
+ content := renderedManifests.String()
+ if m.transform != nil {
+ content = m.transform(content)
+ }
+
+ return bytes.NewBufferString(content), nil
+}
+
+func TestAnnotateAndMerge(t *testing.T) {
+ tests := []struct {
+ name string
+ files map[string]string
+ expectedError string
+ expected string
+ }{
+ {
+ name: "no files",
+ files: map[string]string{},
+ expected: "",
+ },
+ {
+ name: "single file with single manifest",
+ files: map[string]string{
+ "templates/configmap.yaml": `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm
+data:
+ key: value`,
+ },
+ expected: `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm
+ annotations:
+ postrenderer.helm.sh/postrender-filename: 'templates/configmap.yaml'
+data:
+ key: value
+`,
+ },
+ {
+ name: "multiple files with multiple manifests",
+ files: map[string]string{
+ "templates/configmap.yaml": `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm
+data:
+ key: value`,
+ "templates/secret.yaml": `apiVersion: v1
+kind: Secret
+metadata:
+ name: test-secret
+data:
+ password: dGVzdA==`,
+ },
+ expected: `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm
+ annotations:
+ postrenderer.helm.sh/postrender-filename: 'templates/configmap.yaml'
+data:
+ key: value
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: test-secret
+ annotations:
+ postrenderer.helm.sh/postrender-filename: 'templates/secret.yaml'
+data:
+ password: dGVzdA==
+`,
+ },
+ {
+ name: "file with multiple manifests",
+ files: map[string]string{
+ "templates/multi.yaml": `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm1
+data:
+ key: value1
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm2
+data:
+ key: value2`,
+ },
+ expected: `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm1
+ annotations:
+ postrenderer.helm.sh/postrender-filename: 'templates/multi.yaml'
+data:
+ key: value1
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm2
+ annotations:
+ postrenderer.helm.sh/postrender-filename: 'templates/multi.yaml'
+data:
+ key: value2
+`,
+ },
+ {
+ name: "partials and empty files are removed",
+ files: map[string]string{
+ "templates/cm.yaml": `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm1
+`,
+ "templates/_partial.tpl": `
+{{-define name}}
+ {{- "abracadabra"}}
+{{- end -}}`,
+ "templates/empty.yaml": ``,
+ },
+ expected: `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm1
+ annotations:
+ postrenderer.helm.sh/postrender-filename: 'templates/cm.yaml'
+`,
+ },
+ {
+ name: "empty file",
+ files: map[string]string{
+ "templates/empty.yaml": "",
+ },
+ expected: ``,
+ },
+ {
+ name: "invalid yaml",
+ files: map[string]string{
+ "templates/invalid.yaml": `invalid: yaml: content:
+ - malformed`,
+ },
+ expectedError: "parsing templates/invalid.yaml",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ merged, err := annotateAndMerge(tt.files)
+
+ if tt.expectedError != "" {
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), tt.expectedError)
+ } else {
+ assert.NoError(t, err)
+ assert.NotNil(t, merged)
+ assert.Equal(t, tt.expected, merged)
+ }
+ })
+ }
+}
+
+func TestSplitAndDeannotate(t *testing.T) {
+ tests := []struct {
+ name string
+ input string
+ expectedFiles map[string]string
+ expectedError string
+ }{
+ {
+ name: "single annotated manifest",
+ input: `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm
+ annotations:
+ postrenderer.helm.sh/postrender-filename: templates/configmap.yaml
+data:
+ key: value`,
+ expectedFiles: map[string]string{
+ "templates/configmap.yaml": `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm
+data:
+ key: value
+`,
+ },
+ },
+ {
+ name: "multiple manifests with different filenames",
+ input: `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm
+ annotations:
+ postrenderer.helm.sh/postrender-filename: templates/configmap.yaml
+data:
+ key: value
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: test-secret
+ annotations:
+ postrenderer.helm.sh/postrender-filename: templates/secret.yaml
+data:
+ password: dGVzdA==`,
+ expectedFiles: map[string]string{
+ "templates/configmap.yaml": `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm
+data:
+ key: value
+`,
+ "templates/secret.yaml": `apiVersion: v1
+kind: Secret
+metadata:
+ name: test-secret
+data:
+ password: dGVzdA==
+`,
+ },
+ },
+ {
+ name: "multiple manifests with same filename",
+ input: `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm1
+ annotations:
+ postrenderer.helm.sh/postrender-filename: templates/multi.yaml
+data:
+ key: value1
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm2
+ annotations:
+ postrenderer.helm.sh/postrender-filename: templates/multi.yaml
+data:
+ key: value2`,
+ expectedFiles: map[string]string{
+ "templates/multi.yaml": `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm1
+data:
+ key: value1
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm2
+data:
+ key: value2
+`,
+ },
+ },
+ {
+ name: "manifest with other annotations",
+ input: `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm
+ annotations:
+ postrenderer.helm.sh/postrender-filename: templates/configmap.yaml
+ other-annotation: should-remain
+data:
+ key: value`,
+ expectedFiles: map[string]string{
+ "templates/configmap.yaml": `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm
+ annotations:
+ other-annotation: should-remain
+data:
+ key: value
+`,
+ },
+ },
+ {
+ name: "invalid yaml input",
+ input: "invalid: yaml: content:",
+ expectedError: "error parsing YAML: MalformedYAMLError",
+ },
+ {
+ name: "manifest without filename annotation",
+ input: `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm
+data:
+ key: value`,
+ expectedFiles: map[string]string{
+ "generated-by-postrender-0.yaml": `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm
+data:
+ key: value
+`,
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ files, err := splitAndDeannotate(tt.input)
+
+ if tt.expectedError != "" {
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), tt.expectedError)
+ } else {
+ assert.NoError(t, err)
+ assert.Equal(t, len(tt.expectedFiles), len(files))
+
+ for expectedFile, expectedContent := range tt.expectedFiles {
+ actualContent, exists := files[expectedFile]
+ assert.True(t, exists, "Expected file %s not found", expectedFile)
+ assert.Equal(t, expectedContent, actualContent)
+ }
+ }
+ })
+ }
+}
+
+func TestAnnotateAndMerge_SplitAndDeannotate_Roundtrip(t *testing.T) {
+ // Test that merge/split operations are symmetric
+ originalFiles := map[string]string{
+ "templates/configmap.yaml": `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm
+data:
+ key: value`,
+ "templates/secret.yaml": `apiVersion: v1
+kind: Secret
+metadata:
+ name: test-secret
+data:
+ password: dGVzdA==`,
+ "templates/multi.yaml": `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm1
+data:
+ key: value1
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm2
+data:
+ key: value2`,
+ }
+
+ // Merge and annotate
+ merged, err := annotateAndMerge(originalFiles)
+ require.NoError(t, err)
+
+ // Split and deannotate
+ reconstructed, err := splitAndDeannotate(merged)
+ require.NoError(t, err)
+
+ // Compare the results
+ assert.Equal(t, len(originalFiles), len(reconstructed))
+ for filename, originalContent := range originalFiles {
+ reconstructedContent, exists := reconstructed[filename]
+ assert.True(t, exists, "File %s should exist in reconstructed files", filename)
+
+ // Normalize whitespace for comparison since YAML processing might affect formatting
+ normalizeContent := func(content string) string {
+ return strings.TrimSpace(strings.ReplaceAll(content, "\r\n", "\n"))
+ }
+
+ assert.Equal(t, normalizeContent(originalContent), normalizeContent(reconstructedContent))
+ }
+}
+
+func TestRenderResources_PostRenderer_Success(t *testing.T) {
+ cfg := actionConfigFixture(t)
+
+ // Create a simple mock post-renderer
+ mockPR := &mockPostRenderer{
+ transform: func(content string) string {
+ content = strings.ReplaceAll(content, "hello", "yellow")
+ content = strings.ReplaceAll(content, "goodbye", "foodpie")
+ return strings.ReplaceAll(content, "test-cm", "test-cm-postrendered")
+ },
+ }
+
+ ch := buildChart(withSampleTemplates())
+ values := map[string]interface{}{}
+
+ hooks, buf, notes, err := cfg.renderResources(
+ ch, values, "test-release", "", false, false, false,
+ mockPR, false, false, false,
+ )
+
+ assert.NoError(t, err)
+ assert.NotNil(t, hooks)
+ assert.NotNil(t, buf)
+ assert.Equal(t, "", notes)
+ expectedBuf := `---
+# Source: yellow/templates/foodpie
+foodpie: world
+---
+# Source: yellow/templates/with-partials
+yellow: Earth
+---
+# Source: yellow/templates/yellow
+yellow: world
+`
+ expectedHook := `kind: ConfigMap
+metadata:
+ name: test-cm-postrendered
+ annotations:
+ "helm.sh/hook": post-install,pre-delete,post-upgrade
+data:
+ name: value`
+
+ assert.Equal(t, expectedBuf, buf.String())
+ assert.Len(t, hooks, 1)
+ assert.Equal(t, expectedHook, hooks[0].Manifest)
+}
+
+func TestRenderResources_PostRenderer_Error(t *testing.T) {
+ cfg := actionConfigFixture(t)
+
+ // Create a post-renderer that returns an error
+ mockPR := &mockPostRenderer{
+ shouldError: true,
+ }
+
+ ch := buildChart(withSampleTemplates())
+ values := map[string]interface{}{}
+
+ _, _, _, err := cfg.renderResources(
+ ch, values, "test-release", "", false, false, false,
+ mockPR, false, false, false,
+ )
+
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), "error while running post render on files")
+}
+
+func TestRenderResources_PostRenderer_MergeError(t *testing.T) {
+ cfg := actionConfigFixture(t)
+
+ // Create a mock post-renderer
+ mockPR := &mockPostRenderer{}
+
+ // Create a chart with invalid YAML that would cause AnnotateAndMerge to fail
+ ch := &chart.Chart{
+ Metadata: &chart.Metadata{
+ APIVersion: "v1",
+ Name: "test-chart",
+ Version: "0.1.0",
+ },
+ Templates: []*common.File{
+ {Name: "templates/invalid", Data: []byte("invalid: yaml: content:")},
+ },
+ }
+ values := map[string]interface{}{}
+
+ _, _, _, err := cfg.renderResources(
+ ch, values, "test-release", "", false, false, false,
+ mockPR, false, false, false,
+ )
+
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), "error merging manifests")
+}
+
+func TestRenderResources_PostRenderer_SplitError(t *testing.T) {
+ cfg := actionConfigFixture(t)
+
+ // Create a post-renderer that returns invalid YAML
+ mockPR := &mockPostRenderer{
+ transform: func(_ string) string {
+ return "invalid: yaml: content:"
+ },
+ }
+
+ ch := buildChart(withSampleTemplates())
+ values := map[string]interface{}{}
+
+ _, _, _, err := cfg.renderResources(
+ ch, values, "test-release", "", false, false, false,
+ mockPR, false, false, false,
+ )
+
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), "error while parsing post rendered output: error parsing YAML: MalformedYAMLError:")
+}
+
+func TestRenderResources_PostRenderer_Integration(t *testing.T) {
+ cfg := actionConfigFixture(t)
+
+ mockPR := &mockPostRenderer{
+ transform: func(content string) string {
+ return strings.ReplaceAll(content, "metadata:", "color: blue\nmetadata:")
+ },
+ }
+
+ ch := buildChart(withSampleTemplates())
+ values := map[string]interface{}{}
+
+ hooks, buf, notes, err := cfg.renderResources(
+ ch, values, "test-release", "", false, false, false,
+ mockPR, false, false, false,
+ )
+
+ assert.NoError(t, err)
+ assert.NotNil(t, hooks)
+ assert.NotNil(t, buf)
+ assert.Equal(t, "", notes) // Notes should be empty for this test
+
+ // Verify that the post-renderer modifications are present in the output
+ output := buf.String()
+ expected := `---
+# Source: hello/templates/goodbye
+goodbye: world
+color: blue
+---
+# Source: hello/templates/hello
+hello: world
+color: blue
+---
+# Source: hello/templates/with-partials
+hello: Earth
+color: blue
+`
+ assert.Contains(t, output, "color: blue")
+ assert.Equal(t, 3, strings.Count(output, "color: blue"))
+ assert.Equal(t, expected, output)
+}
+
+func TestRenderResources_NoPostRenderer(t *testing.T) {
+ cfg := actionConfigFixture(t)
+
+ ch := buildChart(withSampleTemplates())
+ values := map[string]interface{}{}
+
+ hooks, buf, notes, err := cfg.renderResources(
+ ch, values, "test-release", "", false, false, false,
+ nil, false, false, false,
+ )
+
+ assert.NoError(t, err)
+ assert.NotNil(t, hooks)
+ assert.NotNil(t, buf)
+ assert.Equal(t, "", notes)
+}
+
+func TestDetermineReleaseSSAApplyMethod(t *testing.T) {
+ assert.Equal(t, release.ApplyMethodClientSideApply, determineReleaseSSApplyMethod(false))
+ assert.Equal(t, release.ApplyMethodServerSideApply, determineReleaseSSApplyMethod(true))
+}
diff --git a/pkg/action/get_metadata.go b/pkg/action/get_metadata.go
index e760ae4d1..889545ddc 100644
--- a/pkg/action/get_metadata.go
+++ b/pkg/action/get_metadata.go
@@ -34,16 +34,20 @@ type GetMetadata struct {
}
type Metadata struct {
- Name string `json:"name" yaml:"name"`
- Chart string `json:"chart" yaml:"chart"`
- Version string `json:"version" yaml:"version"`
- AppVersion string `json:"appVersion" yaml:"appVersion"`
- Annotations map[string]string `json:"annotations,omitempty" yaml:"annotations,omitempty"`
+ Name string `json:"name" yaml:"name"`
+ Chart string `json:"chart" yaml:"chart"`
+ Version string `json:"version" yaml:"version"`
+ AppVersion string `json:"appVersion" yaml:"appVersion"`
+ // Annotations are fetched from the Chart.yaml file
+ Annotations map[string]string `json:"annotations,omitempty" yaml:"annotations,omitempty"`
+ // Labels of the release which are stored in driver metadata fields storage
+ Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"`
Dependencies []*chart.Dependency `json:"dependencies,omitempty" yaml:"dependencies,omitempty"`
Namespace string `json:"namespace" yaml:"namespace"`
Revision int `json:"revision" yaml:"revision"`
Status string `json:"status" yaml:"status"`
DeployedAt string `json:"deployedAt" yaml:"deployedAt"`
+ ApplyMethod string `json:"applyMethod,omitempty" yaml:"applyMethod,omitempty"`
}
// NewGetMetadata creates a new GetMetadata object with the given configuration.
@@ -71,10 +75,12 @@ func (g *GetMetadata) Run(name string) (*Metadata, error) {
AppVersion: rel.Chart.Metadata.AppVersion,
Dependencies: rel.Chart.Metadata.Dependencies,
Annotations: rel.Chart.Metadata.Annotations,
+ Labels: rel.Labels,
Namespace: rel.Namespace,
Revision: rel.Version,
Status: rel.Info.Status.String(),
DeployedAt: rel.Info.LastDeployed.Format(time.RFC3339),
+ ApplyMethod: rel.ApplyMethod,
}, nil
}
diff --git a/pkg/action/get_metadata_test.go b/pkg/action/get_metadata_test.go
new file mode 100644
index 000000000..ca612fed7
--- /dev/null
+++ b/pkg/action/get_metadata_test.go
@@ -0,0 +1,626 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "errors"
+ "io"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ kubefake "helm.sh/helm/v4/pkg/kube/fake"
+ release "helm.sh/helm/v4/pkg/release/v1"
+ helmtime "helm.sh/helm/v4/pkg/time"
+)
+
+func TestNewGetMetadata(t *testing.T) {
+ cfg := actionConfigFixture(t)
+ client := NewGetMetadata(cfg)
+
+ assert.NotNil(t, client)
+ assert.Equal(t, cfg, client.cfg)
+ assert.Equal(t, 0, client.Version)
+}
+
+func TestGetMetadata_Run_BasicMetadata(t *testing.T) {
+ cfg := actionConfigFixture(t)
+ client := NewGetMetadata(cfg)
+
+ releaseName := "test-release"
+ deployedTime := helmtime.Now()
+
+ rel := &release.Release{
+ Name: releaseName,
+ Info: &release.Info{
+ Status: release.StatusDeployed,
+ LastDeployed: deployedTime,
+ },
+ Chart: &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "test-chart",
+ Version: "1.0.0",
+ AppVersion: "v1.2.3",
+ },
+ },
+ Version: 1,
+ Namespace: "default",
+ }
+
+ cfg.Releases.Create(rel)
+
+ result, err := client.Run(releaseName)
+ require.NoError(t, err)
+
+ assert.Equal(t, releaseName, result.Name)
+ assert.Equal(t, "test-chart", result.Chart)
+ assert.Equal(t, "1.0.0", result.Version)
+ assert.Equal(t, "v1.2.3", result.AppVersion)
+ assert.Equal(t, "default", result.Namespace)
+ assert.Equal(t, 1, result.Revision)
+ assert.Equal(t, "deployed", result.Status)
+ assert.Equal(t, deployedTime.Format(time.RFC3339), result.DeployedAt)
+ assert.Empty(t, result.Dependencies)
+ assert.Empty(t, result.Annotations)
+}
+
+func TestGetMetadata_Run_WithDependencies(t *testing.T) {
+ cfg := actionConfigFixture(t)
+ client := NewGetMetadata(cfg)
+
+ releaseName := "test-release"
+ deployedTime := helmtime.Now()
+
+ dependencies := []*chart.Dependency{
+ {
+ Name: "mysql",
+ Version: "8.0.25",
+ Repository: "https://charts.bitnami.com/bitnami",
+ },
+ {
+ Name: "redis",
+ Version: "6.2.4",
+ Repository: "https://charts.bitnami.com/bitnami",
+ },
+ }
+
+ rel := &release.Release{
+ Name: releaseName,
+ Info: &release.Info{
+ Status: release.StatusDeployed,
+ LastDeployed: deployedTime,
+ },
+ Chart: &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "test-chart",
+ Version: "1.0.0",
+ AppVersion: "v1.2.3",
+ Dependencies: dependencies,
+ },
+ },
+ Version: 1,
+ Namespace: "default",
+ }
+
+ cfg.Releases.Create(rel)
+
+ result, err := client.Run(releaseName)
+ require.NoError(t, err)
+
+ assert.Equal(t, releaseName, result.Name)
+ assert.Equal(t, "test-chart", result.Chart)
+ assert.Equal(t, "1.0.0", result.Version)
+ assert.Equal(t, dependencies, result.Dependencies)
+ assert.Len(t, result.Dependencies, 2)
+ assert.Equal(t, "mysql", result.Dependencies[0].Name)
+ assert.Equal(t, "redis", result.Dependencies[1].Name)
+}
+
+func TestGetMetadata_Run_WithDependenciesAliases(t *testing.T) {
+ cfg := actionConfigFixture(t)
+ client := NewGetMetadata(cfg)
+
+ releaseName := "test-release"
+ deployedTime := helmtime.Now()
+
+ dependencies := []*chart.Dependency{
+ {
+ Name: "mysql",
+ Version: "8.0.25",
+ Repository: "https://charts.bitnami.com/bitnami",
+ Alias: "database",
+ },
+ {
+ Name: "redis",
+ Version: "6.2.4",
+ Repository: "https://charts.bitnami.com/bitnami",
+ Alias: "cache",
+ },
+ }
+
+ rel := &release.Release{
+ Name: releaseName,
+ Info: &release.Info{
+ Status: release.StatusDeployed,
+ LastDeployed: deployedTime,
+ },
+ Chart: &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "test-chart",
+ Version: "1.0.0",
+ AppVersion: "v1.2.3",
+ Dependencies: dependencies,
+ },
+ },
+ Version: 1,
+ Namespace: "default",
+ }
+
+ cfg.Releases.Create(rel)
+
+ result, err := client.Run(releaseName)
+ require.NoError(t, err)
+
+ assert.Equal(t, releaseName, result.Name)
+ assert.Equal(t, "test-chart", result.Chart)
+ assert.Equal(t, "1.0.0", result.Version)
+ assert.Equal(t, dependencies, result.Dependencies)
+ assert.Len(t, result.Dependencies, 2)
+ assert.Equal(t, "mysql", result.Dependencies[0].Name)
+ assert.Equal(t, "database", result.Dependencies[0].Alias)
+ assert.Equal(t, "redis", result.Dependencies[1].Name)
+ assert.Equal(t, "cache", result.Dependencies[1].Alias)
+}
+
+func TestGetMetadata_Run_WithMixedDependencies(t *testing.T) {
+ cfg := actionConfigFixture(t)
+ client := NewGetMetadata(cfg)
+
+ releaseName := "test-release"
+ deployedTime := helmtime.Now()
+
+ dependencies := []*chart.Dependency{
+ {
+ Name: "mysql",
+ Version: "8.0.25",
+ Repository: "https://charts.bitnami.com/bitnami",
+ Alias: "database",
+ },
+ {
+ Name: "nginx",
+ Version: "1.20.0",
+ Repository: "https://charts.bitnami.com/bitnami",
+ },
+ {
+ Name: "redis",
+ Version: "6.2.4",
+ Repository: "https://charts.bitnami.com/bitnami",
+ Alias: "cache",
+ },
+ {
+ Name: "postgresql",
+ Version: "11.0.0",
+ Repository: "https://charts.bitnami.com/bitnami",
+ },
+ }
+
+ rel := &release.Release{
+ Name: releaseName,
+ Info: &release.Info{
+ Status: release.StatusDeployed,
+ LastDeployed: deployedTime,
+ },
+ Chart: &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "test-chart",
+ Version: "1.0.0",
+ AppVersion: "v1.2.3",
+ Dependencies: dependencies,
+ },
+ },
+ Version: 1,
+ Namespace: "default",
+ }
+
+ cfg.Releases.Create(rel)
+
+ result, err := client.Run(releaseName)
+ require.NoError(t, err)
+
+ assert.Equal(t, releaseName, result.Name)
+ assert.Equal(t, "test-chart", result.Chart)
+ assert.Equal(t, "1.0.0", result.Version)
+ assert.Equal(t, dependencies, result.Dependencies)
+ assert.Len(t, result.Dependencies, 4)
+
+ // Verify dependencies with aliases
+ assert.Equal(t, "mysql", result.Dependencies[0].Name)
+ assert.Equal(t, "database", result.Dependencies[0].Alias)
+ assert.Equal(t, "redis", result.Dependencies[2].Name)
+ assert.Equal(t, "cache", result.Dependencies[2].Alias)
+
+ // Verify dependencies without aliases
+ assert.Equal(t, "nginx", result.Dependencies[1].Name)
+ assert.Equal(t, "", result.Dependencies[1].Alias)
+ assert.Equal(t, "postgresql", result.Dependencies[3].Name)
+ assert.Equal(t, "", result.Dependencies[3].Alias)
+}
+
+func TestGetMetadata_Run_WithAnnotations(t *testing.T) {
+ cfg := actionConfigFixture(t)
+ client := NewGetMetadata(cfg)
+
+ releaseName := "test-release"
+ deployedTime := helmtime.Now()
+
+ annotations := map[string]string{
+ "helm.sh/hook": "pre-install",
+ "helm.sh/hook-weight": "5",
+ "custom.annotation": "test-value",
+ }
+
+ rel := &release.Release{
+ Name: releaseName,
+ Info: &release.Info{
+ Status: release.StatusDeployed,
+ LastDeployed: deployedTime,
+ },
+ Chart: &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "test-chart",
+ Version: "1.0.0",
+ AppVersion: "v1.2.3",
+ Annotations: annotations,
+ },
+ },
+ Version: 1,
+ Namespace: "default",
+ }
+
+ cfg.Releases.Create(rel)
+
+ result, err := client.Run(releaseName)
+ require.NoError(t, err)
+
+ assert.Equal(t, releaseName, result.Name)
+ assert.Equal(t, "test-chart", result.Chart)
+ assert.Equal(t, annotations, result.Annotations)
+ assert.Equal(t, "pre-install", result.Annotations["helm.sh/hook"])
+ assert.Equal(t, "5", result.Annotations["helm.sh/hook-weight"])
+ assert.Equal(t, "test-value", result.Annotations["custom.annotation"])
+}
+
+func TestGetMetadata_Run_SpecificVersion(t *testing.T) {
+ cfg := actionConfigFixture(t)
+ client := NewGetMetadata(cfg)
+ client.Version = 2
+
+ releaseName := "test-release"
+ deployedTime := helmtime.Now()
+
+ rel1 := &release.Release{
+ Name: releaseName,
+ Info: &release.Info{
+ Status: release.StatusSuperseded,
+ LastDeployed: helmtime.Time{Time: deployedTime.Time.Add(-time.Hour)},
+ },
+ Chart: &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "test-chart",
+ Version: "1.0.0",
+ AppVersion: "v1.0.0",
+ },
+ },
+ Version: 1,
+ Namespace: "default",
+ }
+
+ rel2 := &release.Release{
+ Name: releaseName,
+ Info: &release.Info{
+ Status: release.StatusDeployed,
+ LastDeployed: deployedTime,
+ },
+ Chart: &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "test-chart",
+ Version: "1.1.0",
+ AppVersion: "v1.1.0",
+ },
+ },
+ Version: 2,
+ Namespace: "default",
+ }
+
+ cfg.Releases.Create(rel1)
+ cfg.Releases.Create(rel2)
+
+ result, err := client.Run(releaseName)
+ require.NoError(t, err)
+
+ assert.Equal(t, releaseName, result.Name)
+ assert.Equal(t, "test-chart", result.Chart)
+ assert.Equal(t, "1.1.0", result.Version)
+ assert.Equal(t, "v1.1.0", result.AppVersion)
+ assert.Equal(t, 2, result.Revision)
+ assert.Equal(t, "deployed", result.Status)
+}
+
+func TestGetMetadata_Run_DifferentStatuses(t *testing.T) {
+ cfg := actionConfigFixture(t)
+ client := NewGetMetadata(cfg)
+
+ testCases := []struct {
+ name string
+ status release.Status
+ expected string
+ }{
+ {"deployed", release.StatusDeployed, "deployed"},
+ {"failed", release.StatusFailed, "failed"},
+ {"uninstalled", release.StatusUninstalled, "uninstalled"},
+ {"pending-install", release.StatusPendingInstall, "pending-install"},
+ {"pending-upgrade", release.StatusPendingUpgrade, "pending-upgrade"},
+ {"pending-rollback", release.StatusPendingRollback, "pending-rollback"},
+ {"superseded", release.StatusSuperseded, "superseded"},
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ releaseName := "test-release-" + tc.name
+ deployedTime := helmtime.Now()
+
+ rel := &release.Release{
+ Name: releaseName,
+ Info: &release.Info{
+ Status: tc.status,
+ LastDeployed: deployedTime,
+ },
+ Chart: &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "test-chart",
+ Version: "1.0.0",
+ AppVersion: "v1.0.0",
+ },
+ },
+ Version: 1,
+ Namespace: "default",
+ }
+
+ cfg.Releases.Create(rel)
+
+ result, err := client.Run(releaseName)
+ require.NoError(t, err)
+
+ assert.Equal(t, tc.expected, result.Status)
+ })
+ }
+}
+
+func TestGetMetadata_Run_UnreachableKubeClient(t *testing.T) {
+ cfg := actionConfigFixture(t)
+ failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil}
+ failingKubeClient.ConnectionError = errors.New("connection refused")
+ cfg.KubeClient = &failingKubeClient
+
+ client := NewGetMetadata(cfg)
+
+ _, err := client.Run("test-release")
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), "connection refused")
+}
+
+func TestGetMetadata_Run_ReleaseNotFound(t *testing.T) {
+ cfg := actionConfigFixture(t)
+ client := NewGetMetadata(cfg)
+
+ _, err := client.Run("non-existent-release")
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), "not found")
+}
+
+func TestGetMetadata_Run_EmptyAppVersion(t *testing.T) {
+ cfg := actionConfigFixture(t)
+ client := NewGetMetadata(cfg)
+
+ releaseName := "test-release"
+ deployedTime := helmtime.Now()
+
+ rel := &release.Release{
+ Name: releaseName,
+ Info: &release.Info{
+ Status: release.StatusDeployed,
+ LastDeployed: deployedTime,
+ },
+ Chart: &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "test-chart",
+ Version: "1.0.0",
+ AppVersion: "", // Empty app version
+ },
+ },
+ Version: 1,
+ Namespace: "default",
+ }
+
+ cfg.Releases.Create(rel)
+
+ result, err := client.Run(releaseName)
+ require.NoError(t, err)
+
+ assert.Equal(t, "", result.AppVersion)
+}
+
+func TestMetadata_FormattedDepNames(t *testing.T) {
+ testCases := []struct {
+ name string
+ dependencies []*chart.Dependency
+ expected string
+ }{
+ {
+ name: "no dependencies",
+ dependencies: []*chart.Dependency{},
+ expected: "",
+ },
+ {
+ name: "single dependency",
+ dependencies: []*chart.Dependency{
+ {Name: "mysql"},
+ },
+ expected: "mysql",
+ },
+ {
+ name: "multiple dependencies sorted",
+ dependencies: []*chart.Dependency{
+ {Name: "redis"},
+ {Name: "mysql"},
+ {Name: "nginx"},
+ },
+ expected: "mysql,nginx,redis",
+ },
+ {
+ name: "already sorted dependencies",
+ dependencies: []*chart.Dependency{
+ {Name: "apache"},
+ {Name: "mysql"},
+ {Name: "zookeeper"},
+ },
+ expected: "apache,mysql,zookeeper",
+ },
+ {
+ name: "duplicate names",
+ dependencies: []*chart.Dependency{
+ {Name: "mysql"},
+ {Name: "redis"},
+ {Name: "mysql"},
+ },
+ expected: "mysql,mysql,redis",
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ metadata := &Metadata{
+ Dependencies: tc.dependencies,
+ }
+
+ result := metadata.FormattedDepNames()
+ assert.Equal(t, tc.expected, result)
+ })
+ }
+}
+
+func TestMetadata_FormattedDepNames_WithComplexDependencies(t *testing.T) {
+ dependencies := []*chart.Dependency{
+ {
+ Name: "zookeeper",
+ Version: "10.0.0",
+ Repository: "https://charts.bitnami.com/bitnami",
+ Condition: "zookeeper.enabled",
+ },
+ {
+ Name: "apache",
+ Version: "9.0.0",
+ Repository: "https://charts.bitnami.com/bitnami",
+ },
+ {
+ Name: "mysql",
+ Version: "8.0.25",
+ Repository: "https://charts.bitnami.com/bitnami",
+ Condition: "mysql.enabled",
+ },
+ }
+
+ metadata := &Metadata{
+ Dependencies: dependencies,
+ }
+
+ result := metadata.FormattedDepNames()
+ assert.Equal(t, "apache,mysql,zookeeper", result)
+}
+
+func TestMetadata_FormattedDepNames_WithAliases(t *testing.T) {
+ testCases := []struct {
+ name string
+ dependencies []*chart.Dependency
+ expected string
+ }{
+ {
+ name: "dependencies with aliases",
+ dependencies: []*chart.Dependency{
+ {Name: "mysql", Alias: "database"},
+ {Name: "redis", Alias: "cache"},
+ },
+ expected: "mysql,redis",
+ },
+ {
+ name: "mixed dependencies with and without aliases",
+ dependencies: []*chart.Dependency{
+ {Name: "mysql", Alias: "database"},
+ {Name: "nginx"},
+ {Name: "redis", Alias: "cache"},
+ },
+ expected: "mysql,nginx,redis",
+ },
+ {
+ name: "empty alias should use name",
+ dependencies: []*chart.Dependency{
+ {Name: "mysql", Alias: ""},
+ {Name: "redis", Alias: "cache"},
+ },
+ expected: "mysql,redis",
+ },
+ {
+ name: "sorted by name not alias",
+ dependencies: []*chart.Dependency{
+ {Name: "zookeeper", Alias: "a-service"},
+ {Name: "apache", Alias: "z-service"},
+ },
+ expected: "apache,zookeeper",
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ metadata := &Metadata{
+ Dependencies: tc.dependencies,
+ }
+
+ result := metadata.FormattedDepNames()
+ assert.Equal(t, tc.expected, result)
+ })
+ }
+}
+
+func TestGetMetadata_Labels(t *testing.T) {
+ rel := releaseStub()
+ rel.Info.Status = release.StatusDeployed
+ customLabels := map[string]string{"key1": "value1", "key2": "value2"}
+ rel.Labels = customLabels
+
+ metaGetter := NewGetMetadata(actionConfigFixture(t))
+ err := metaGetter.cfg.Releases.Create(rel)
+ assert.NoError(t, err)
+
+ metadata, err := metaGetter.Run(rel.Name)
+ assert.NoError(t, err)
+
+ assert.Equal(t, metadata.Name, rel.Name)
+ assert.Equal(t, metadata.Labels, customLabels)
+}
diff --git a/pkg/action/get_values.go b/pkg/action/get_values.go
index 18b8b4838..a0b5d92c1 100644
--- a/pkg/action/get_values.go
+++ b/pkg/action/get_values.go
@@ -16,9 +16,7 @@ limitations under the License.
package action
-import (
- chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
-)
+import "helm.sh/helm/v4/pkg/chart/common/util"
// GetValues is the action for checking a given release's values.
//
@@ -50,7 +48,7 @@ func (g *GetValues) Run(name string) (map[string]interface{}, error) {
// If the user wants all values, compute the values and return.
if g.AllValues {
- cfg, err := chartutil.CoalesceValues(rel.Chart, rel.Config)
+ cfg, err := util.CoalesceValues(rel.Chart, rel.Config)
if err != nil {
return nil, err
}
diff --git a/pkg/action/get_values_test.go b/pkg/action/get_values_test.go
new file mode 100644
index 000000000..b8630c322
--- /dev/null
+++ b/pkg/action/get_values_test.go
@@ -0,0 +1,219 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "errors"
+ "io"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ kubefake "helm.sh/helm/v4/pkg/kube/fake"
+ release "helm.sh/helm/v4/pkg/release/v1"
+)
+
+func TestNewGetValues(t *testing.T) {
+ cfg := actionConfigFixture(t)
+ client := NewGetValues(cfg)
+
+ assert.NotNil(t, client)
+ assert.Equal(t, cfg, client.cfg)
+ assert.Equal(t, 0, client.Version)
+ assert.Equal(t, false, client.AllValues)
+}
+
+func TestGetValues_Run_UserConfigOnly(t *testing.T) {
+ cfg := actionConfigFixture(t)
+ client := NewGetValues(cfg)
+
+ releaseName := "test-release"
+ userConfig := map[string]interface{}{
+ "database": map[string]interface{}{
+ "host": "localhost",
+ "port": 5432,
+ },
+ "app": map[string]interface{}{
+ "name": "my-app",
+ "replicas": 3,
+ },
+ }
+
+ rel := &release.Release{
+ Name: releaseName,
+ Info: &release.Info{
+ Status: release.StatusDeployed,
+ },
+ Chart: &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "test-chart",
+ Version: "1.0.0",
+ },
+ Values: map[string]interface{}{
+ "defaultKey": "defaultValue",
+ "app": map[string]interface{}{
+ "name": "default-app",
+ "timeout": 30,
+ },
+ },
+ },
+ Config: userConfig,
+ Version: 1,
+ Namespace: "default",
+ }
+
+ cfg.Releases.Create(rel)
+
+ result, err := client.Run(releaseName)
+ require.NoError(t, err)
+ assert.Equal(t, userConfig, result)
+}
+
+func TestGetValues_Run_AllValues(t *testing.T) {
+ cfg := actionConfigFixture(t)
+ client := NewGetValues(cfg)
+ client.AllValues = true
+
+ releaseName := "test-release"
+ userConfig := map[string]interface{}{
+ "database": map[string]interface{}{
+ "host": "localhost",
+ "port": 5432,
+ },
+ "app": map[string]interface{}{
+ "name": "my-app",
+ },
+ }
+
+ chartDefaultValues := map[string]interface{}{
+ "defaultKey": "defaultValue",
+ "app": map[string]interface{}{
+ "name": "default-app",
+ "timeout": 30,
+ },
+ }
+
+ rel := &release.Release{
+ Name: releaseName,
+ Info: &release.Info{
+ Status: release.StatusDeployed,
+ },
+ Chart: &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "test-chart",
+ Version: "1.0.0",
+ },
+ Values: chartDefaultValues,
+ },
+ Config: userConfig,
+ Version: 1,
+ Namespace: "default",
+ }
+
+ cfg.Releases.Create(rel)
+
+ result, err := client.Run(releaseName)
+ require.NoError(t, err)
+
+ assert.Equal(t, "my-app", result["app"].(map[string]interface{})["name"])
+ assert.Equal(t, 30, result["app"].(map[string]interface{})["timeout"])
+ assert.Equal(t, "defaultValue", result["defaultKey"])
+ assert.Equal(t, "localhost", result["database"].(map[string]interface{})["host"])
+ assert.Equal(t, 5432, result["database"].(map[string]interface{})["port"])
+}
+
+func TestGetValues_Run_EmptyValues(t *testing.T) {
+ cfg := actionConfigFixture(t)
+ client := NewGetValues(cfg)
+
+ releaseName := "test-release"
+
+ rel := &release.Release{
+ Name: releaseName,
+ Info: &release.Info{
+ Status: release.StatusDeployed,
+ },
+ Chart: &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "test-chart",
+ Version: "1.0.0",
+ },
+ },
+ Config: map[string]interface{}{},
+ Version: 1,
+ Namespace: "default",
+ }
+
+ cfg.Releases.Create(rel)
+
+ result, err := client.Run(releaseName)
+ require.NoError(t, err)
+ assert.Equal(t, map[string]interface{}{}, result)
+}
+
+func TestGetValues_Run_UnreachableKubeClient(t *testing.T) {
+ cfg := actionConfigFixture(t)
+ failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil}
+ failingKubeClient.ConnectionError = errors.New("connection refused")
+ cfg.KubeClient = &failingKubeClient
+
+ client := NewGetValues(cfg)
+
+ _, err := client.Run("test-release")
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), "connection refused")
+}
+
+func TestGetValues_Run_ReleaseNotFound(t *testing.T) {
+ cfg := actionConfigFixture(t)
+ client := NewGetValues(cfg)
+
+ _, err := client.Run("non-existent-release")
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), "not found")
+}
+
+func TestGetValues_Run_NilConfig(t *testing.T) {
+ cfg := actionConfigFixture(t)
+ client := NewGetValues(cfg)
+
+ releaseName := "test-release"
+
+ rel := &release.Release{
+ Name: releaseName,
+ Info: &release.Info{
+ Status: release.StatusDeployed,
+ },
+ Chart: &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "test-chart",
+ Version: "1.0.0",
+ },
+ },
+ Config: nil,
+ Version: 1,
+ Namespace: "default",
+ }
+
+ cfg.Releases.Create(rel)
+
+ result, err := client.Run(releaseName)
+ require.NoError(t, err)
+ assert.Nil(t, result)
+}
diff --git a/pkg/action/history.go b/pkg/action/history.go
index 04743f4cd..d7af1d6a4 100644
--- a/pkg/action/history.go
+++ b/pkg/action/history.go
@@ -17,7 +17,9 @@ limitations under the License.
package action
import (
- "github.com/pkg/errors"
+ "log/slog"
+
+ "fmt"
chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
release "helm.sh/helm/v4/pkg/release/v1"
@@ -50,9 +52,9 @@ func (h *History) Run(name string) ([]*release.Release, error) {
}
if err := chartutil.ValidateReleaseName(name); err != nil {
- return nil, errors.Errorf("release name is invalid: %s", name)
+ return nil, fmt.Errorf("release name is invalid: %s", name)
}
- h.cfg.Log("getting history for release %s", name)
+ slog.Debug("getting history for release", "release", name)
return h.cfg.Releases.History(name)
}
diff --git a/pkg/action/hooks.go b/pkg/action/hooks.go
index 230e9ec81..458a6342c 100644
--- a/pkg/action/hooks.go
+++ b/pkg/action/hooks.go
@@ -25,17 +25,15 @@ import (
"helm.sh/helm/v4/pkg/kube"
+ "go.yaml.in/yaml/v3"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "github.com/pkg/errors"
- "gopkg.in/yaml.v3"
-
release "helm.sh/helm/v4/pkg/release/v1"
helmtime "helm.sh/helm/v4/pkg/time"
)
// execHook executes all of the hooks for the given hook event.
-func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, timeout time.Duration) error {
+func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, waitStrategy kube.WaitStrategy, timeout time.Duration, serverSideApply bool) error {
executingHooks := []*release.Hook{}
for _, h := range rl.Hooks {
@@ -49,23 +47,17 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent,
// hooke are pre-ordered by kind, so keep order stable
sort.Stable(hookByWeight(executingHooks))
- for _, h := range executingHooks {
+ for i, h := range executingHooks {
// Set default delete policy to before-hook-creation
- if len(h.DeletePolicies) == 0 {
- // TODO(jlegrone): Only apply before-hook-creation delete policy to run to completion
- // resources. For all other resource types update in place if a
- // resource with the same name already exists and is owned by the
- // current release.
- h.DeletePolicies = []release.HookDeletePolicy{release.HookBeforeHookCreation}
- }
+ cfg.hookSetDeletePolicy(h)
- if err := cfg.deleteHookByPolicy(h, release.HookBeforeHookCreation, timeout); err != nil {
+ if err := cfg.deleteHookByPolicy(h, release.HookBeforeHookCreation, waitStrategy, timeout); err != nil {
return err
}
resources, err := cfg.KubeClient.Build(bytes.NewBufferString(h.Manifest), true)
if err != nil {
- return errors.Wrapf(err, "unable to build kubernetes object for %s hook %s", hook, h.Path)
+ return fmt.Errorf("unable to build kubernetes object for %s hook %s: %w", hook, h.Path, err)
}
// Record the time at which the hook was applied to the cluster
@@ -81,14 +73,20 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent,
h.LastRun.Phase = release.HookPhaseUnknown
// Create hook resources
- if _, err := cfg.KubeClient.Create(resources); err != nil {
+ if _, err := cfg.KubeClient.Create(
+ resources,
+ kube.ClientCreateOptionServerSideApply(serverSideApply, false)); err != nil {
h.LastRun.CompletedAt = helmtime.Now()
h.LastRun.Phase = release.HookPhaseFailed
- return errors.Wrapf(err, "warning: Hook %s %s failed", hook, h.Path)
+ return fmt.Errorf("warning: Hook %s %s failed: %w", hook, h.Path, err)
}
+ waiter, err := cfg.KubeClient.GetWaiter(waitStrategy)
+ if err != nil {
+ return fmt.Errorf("unable to get waiter: %w", err)
+ }
// Watch hook resources until they have completed
- err = cfg.KubeClient.WatchUntilReady(resources, timeout)
+ err = waiter.WatchUntilReady(resources, timeout)
// Note the time of success/failure
h.LastRun.CompletedAt = helmtime.Now()
// Mark hook as succeeded or failed
@@ -101,10 +99,17 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent,
}
// If a hook is failed, check the annotation of the hook to determine whether the hook should be deleted
// under failed condition. If so, then clear the corresponding resource object in the hook
- if errDeleting := cfg.deleteHookByPolicy(h, release.HookFailed, timeout); errDeleting != nil {
+ if errDeleting := cfg.deleteHookByPolicy(h, release.HookFailed, waitStrategy, timeout); errDeleting != nil {
// We log the error here as we want to propagate the hook failure upwards to the release object.
log.Printf("error deleting the hook resource on hook failure: %v", errDeleting)
}
+
+ // If a hook is failed, check the annotation of the previous successful hooks to determine whether the hooks
+ // should be deleted under succeeded condition.
+ if err := cfg.deleteHooksByPolicy(executingHooks[0:i], release.HookSucceeded, waitStrategy, timeout); err != nil {
+ return err
+ }
+
return err
}
h.LastRun.Phase = release.HookPhaseSucceeded
@@ -118,7 +123,7 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent,
// We log here as we still want to attempt hook resource deletion even if output logging fails.
log.Printf("error outputting logs for hook failure: %v", err)
}
- if err := cfg.deleteHookByPolicy(h, release.HookSucceeded, timeout); err != nil {
+ if err := cfg.deleteHookByPolicy(h, release.HookSucceeded, waitStrategy, timeout); err != nil {
return err
}
}
@@ -139,41 +144,64 @@ func (x hookByWeight) Less(i, j int) bool {
}
// deleteHookByPolicy deletes a hook if the hook policy instructs it to
-func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.HookDeletePolicy, timeout time.Duration) error {
+func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.HookDeletePolicy, waitStrategy kube.WaitStrategy, timeout time.Duration) error {
// Never delete CustomResourceDefinitions; this could cause lots of
// cascading garbage collection.
if h.Kind == "CustomResourceDefinition" {
return nil
}
- if hookHasDeletePolicy(h, policy) {
+ if cfg.hookHasDeletePolicy(h, policy) {
resources, err := cfg.KubeClient.Build(bytes.NewBufferString(h.Manifest), false)
if err != nil {
- return errors.Wrapf(err, "unable to build kubernetes object for deleting hook %s", h.Path)
+ return fmt.Errorf("unable to build kubernetes object for deleting hook %s: %w", h.Path, err)
}
_, errs := cfg.KubeClient.Delete(resources)
if len(errs) > 0 {
- return errors.New(joinErrors(errs))
+ return joinErrors(errs, "; ")
}
- // wait for resources until they are deleted to avoid conflicts
- if kubeClient, ok := cfg.KubeClient.(kube.InterfaceExt); ok {
- if err := kubeClient.WaitForDelete(resources, timeout); err != nil {
- return err
- }
+ waiter, err := cfg.KubeClient.GetWaiter(waitStrategy)
+ if err != nil {
+ return err
+ }
+ if err := waiter.WaitForDelete(resources, timeout); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// deleteHooksByPolicy deletes all hooks if the hook policy instructs it to
+func (cfg *Configuration) deleteHooksByPolicy(hooks []*release.Hook, policy release.HookDeletePolicy, waitStrategy kube.WaitStrategy, timeout time.Duration) error {
+ for _, h := range hooks {
+ if err := cfg.deleteHookByPolicy(h, policy, waitStrategy, timeout); err != nil {
+ return err
}
}
+
return nil
}
// hookHasDeletePolicy determines whether the defined hook deletion policy matches the hook deletion polices
// supported by helm. If so, mark the hook as one should be deleted.
-func hookHasDeletePolicy(h *release.Hook, policy release.HookDeletePolicy) bool {
- for _, v := range h.DeletePolicies {
- if policy == v {
- return true
- }
+func (cfg *Configuration) hookHasDeletePolicy(h *release.Hook, policy release.HookDeletePolicy) bool {
+ cfg.mutex.Lock()
+ defer cfg.mutex.Unlock()
+ return slices.Contains(h.DeletePolicies, policy)
+}
+
+// hookSetDeletePolicy determines whether the defined hook deletion policy matches the hook deletion polices
+// supported by helm. If so, mark the hook as one should be deleted.
+func (cfg *Configuration) hookSetDeletePolicy(h *release.Hook) {
+ cfg.mutex.Lock()
+ defer cfg.mutex.Unlock()
+ if len(h.DeletePolicies) == 0 {
+ // TODO(jlegrone): Only apply before-hook-creation delete policy to run to completion
+ // resources. For all other resource types update in place if a
+ // resource with the same name already exists and is owned by the
+ // current release.
+ h.DeletePolicies = []release.HookDeletePolicy{release.HookBeforeHookCreation}
}
- return false
}
// outputLogsByPolicy outputs a pods logs if the hook policy instructs it to
@@ -216,7 +244,7 @@ func (cfg *Configuration) deriveNamespace(h *release.Hook, namespace string) (st
}{}
err := yaml.Unmarshal([]byte(h.Manifest), &tmp)
if err != nil {
- return "", errors.Wrapf(err, "unable to parse metadata.namespace from kubernetes manifest for output logs hook %s", h.Path)
+ return "", fmt.Errorf("unable to parse metadata.namespace from kubernetes manifest for output logs hook %s: %w", h.Path, err)
}
if tmp.Metadata.Namespace == "" {
return namespace, nil
diff --git a/pkg/action/hooks_test.go b/pkg/action/hooks_test.go
index 38f25d9ab..091155bc2 100644
--- a/pkg/action/hooks_test.go
+++ b/pkg/action/hooks_test.go
@@ -20,13 +20,21 @@ import (
"bytes"
"fmt"
"io"
+ "reflect"
"testing"
+ "time"
"github.com/stretchr/testify/assert"
+ v1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/util/yaml"
+ "k8s.io/cli-runtime/pkg/resource"
- chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/chart/common"
+ "helm.sh/helm/v4/pkg/kube"
kubefake "helm.sh/helm/v4/pkg/kube/fake"
release "helm.sh/helm/v4/pkg/release/v1"
+ "helm.sh/helm/v4/pkg/storage"
+ "helm.sh/helm/v4/pkg/storage/driver"
)
func podManifestWithOutputLogs(hookDefinitions []release.HookOutputLogPolicy) string {
@@ -158,6 +166,7 @@ func TestInstallRelease_HooksOutputLogsOnSuccessAndFailure(t *testing.T) {
}
func runInstallForHooksWithSuccess(t *testing.T, manifest, expectedNamespace string, shouldOutput bool) {
+ t.Helper()
var expectedOutput string
if shouldOutput {
expectedOutput = fmt.Sprintf("attempted to output logs for namespace: %s", expectedNamespace)
@@ -168,7 +177,7 @@ func runInstallForHooksWithSuccess(t *testing.T, manifest, expectedNamespace str
outBuffer := &bytes.Buffer{}
instAction.cfg.KubeClient = &kubefake.PrintingKubeClient{Out: io.Discard, LogOutput: outBuffer}
- templates := []*chart.File{
+ templates := []*common.File{
{Name: "templates/hello", Data: []byte("hello: world")},
{Name: "templates/hooks", Data: []byte(manifest)},
}
@@ -181,6 +190,7 @@ func runInstallForHooksWithSuccess(t *testing.T, manifest, expectedNamespace str
}
func runInstallForHooksWithFailure(t *testing.T, manifest, expectedNamespace string, shouldOutput bool) {
+ t.Helper()
var expectedOutput string
if shouldOutput {
expectedOutput = fmt.Sprintf("attempted to output logs for namespace: %s", expectedNamespace)
@@ -194,7 +204,7 @@ func runInstallForHooksWithFailure(t *testing.T, manifest, expectedNamespace str
outBuffer := &bytes.Buffer{}
failingClient.PrintingKubeClient = kubefake.PrintingKubeClient{Out: io.Discard, LogOutput: outBuffer}
- templates := []*chart.File{
+ templates := []*common.File{
{Name: "templates/hello", Data: []byte("hello: world")},
{Name: "templates/hooks", Data: []byte(manifest)},
}
@@ -206,3 +216,188 @@ func runInstallForHooksWithFailure(t *testing.T, manifest, expectedNamespace str
is.Equal(expectedOutput, outBuffer.String())
is.Equal(release.StatusFailed, res.Info.Status)
}
+
+type HookFailedError struct{}
+
+func (e *HookFailedError) Error() string {
+ return "Hook failed!"
+}
+
+type HookFailingKubeClient struct {
+ kubefake.PrintingKubeClient
+ failOn resource.Info
+ deleteRecord []resource.Info
+}
+
+type HookFailingKubeWaiter struct {
+ *kubefake.PrintingKubeWaiter
+ failOn resource.Info
+}
+
+func (*HookFailingKubeClient) Build(reader io.Reader, _ bool) (kube.ResourceList, error) {
+ configMap := &v1.ConfigMap{}
+
+ err := yaml.NewYAMLOrJSONDecoder(reader, 1000).Decode(configMap)
+
+ if err != nil {
+ return kube.ResourceList{}, err
+ }
+
+ return kube.ResourceList{{
+ Name: configMap.Name,
+ Namespace: configMap.Namespace,
+ }}, nil
+}
+
+func (h *HookFailingKubeWaiter) WatchUntilReady(resources kube.ResourceList, _ time.Duration) error {
+ for _, res := range resources {
+ if res.Name == h.failOn.Name && res.Namespace == h.failOn.Namespace {
+ return &HookFailedError{}
+ }
+ }
+ return nil
+}
+
+func (h *HookFailingKubeClient) Delete(resources kube.ResourceList) (*kube.Result, []error) {
+ for _, res := range resources {
+ h.deleteRecord = append(h.deleteRecord, resource.Info{
+ Name: res.Name,
+ Namespace: res.Namespace,
+ })
+ }
+
+ return h.PrintingKubeClient.Delete(resources)
+}
+
+func (h *HookFailingKubeClient) GetWaiter(strategy kube.WaitStrategy) (kube.Waiter, error) {
+ waiter, _ := h.PrintingKubeClient.GetWaiter(strategy)
+ return &HookFailingKubeWaiter{
+ PrintingKubeWaiter: waiter.(*kubefake.PrintingKubeWaiter),
+ failOn: h.failOn,
+ }, nil
+}
+
+func TestHooksCleanUp(t *testing.T) {
+ hookEvent := release.HookPreInstall
+
+ testCases := []struct {
+ name string
+ inputRelease release.Release
+ failOn resource.Info
+ expectedDeleteRecord []resource.Info
+ expectError bool
+ }{
+ {
+ "Deletion hook runs for previously successful hook on failure of a heavier weight hook",
+ release.Release{
+ Name: "test-release",
+ Namespace: "test",
+ Hooks: []*release.Hook{
+ {
+ Name: "hook-1",
+ Kind: "ConfigMap",
+ Path: "templates/service_account.yaml",
+ Manifest: `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: build-config-1
+ namespace: test
+data:
+ foo: bar
+`,
+ Weight: -5,
+ Events: []release.HookEvent{
+ hookEvent,
+ },
+ DeletePolicies: []release.HookDeletePolicy{
+ release.HookBeforeHookCreation,
+ release.HookSucceeded,
+ release.HookFailed,
+ },
+ LastRun: release.HookExecution{
+ Phase: release.HookPhaseSucceeded,
+ },
+ },
+ {
+ Name: "hook-2",
+ Kind: "ConfigMap",
+ Path: "templates/job.yaml",
+ Manifest: `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: build-config-2
+ namespace: test
+data:
+ foo: bar
+`,
+ Weight: 0,
+ Events: []release.HookEvent{
+ hookEvent,
+ },
+ DeletePolicies: []release.HookDeletePolicy{
+ release.HookBeforeHookCreation,
+ release.HookSucceeded,
+ release.HookFailed,
+ },
+ LastRun: release.HookExecution{
+ Phase: release.HookPhaseFailed,
+ },
+ },
+ },
+ }, resource.Info{
+ Name: "build-config-2",
+ Namespace: "test",
+ }, []resource.Info{
+ {
+ // This should be in the record for `before-hook-creation`
+ Name: "build-config-1",
+ Namespace: "test",
+ },
+ {
+ // This should be in the record for `before-hook-creation`
+ Name: "build-config-2",
+ Namespace: "test",
+ },
+ {
+ // This should be in the record for cleaning up (the failure first)
+ Name: "build-config-2",
+ Namespace: "test",
+ },
+ {
+ // This should be in the record for cleaning up (then the previously successful)
+ Name: "build-config-1",
+ Namespace: "test",
+ },
+ }, true,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ kubeClient := &HookFailingKubeClient{
+ kubefake.PrintingKubeClient{Out: io.Discard}, tc.failOn, []resource.Info{},
+ }
+
+ configuration := &Configuration{
+ Releases: storage.Init(driver.NewMemory()),
+ KubeClient: kubeClient,
+ Capabilities: common.DefaultCapabilities,
+ }
+
+ serverSideApply := true
+ err := configuration.execHook(&tc.inputRelease, hookEvent, kube.StatusWatcherStrategy, 600, serverSideApply)
+
+ if !reflect.DeepEqual(kubeClient.deleteRecord, tc.expectedDeleteRecord) {
+ t.Fatalf("Got unexpected delete record, expected: %#v, but got: %#v", kubeClient.deleteRecord, tc.expectedDeleteRecord)
+ }
+
+ if err != nil && !tc.expectError {
+ t.Fatalf("Got an unexpected error.")
+ }
+
+ if err == nil && tc.expectError {
+ t.Fatalf("Expected and error but did not get it.")
+ }
+ })
+ }
+}
diff --git a/pkg/action/install.go b/pkg/action/install.go
index f1896351e..5ae12904d 100644
--- a/pkg/action/install.go
+++ b/pkg/action/install.go
@@ -19,19 +19,22 @@ package action
import (
"bytes"
"context"
+ "errors"
"fmt"
"io"
+ "io/fs"
+ "log/slog"
"net/url"
"os"
"path"
"path/filepath"
"strings"
"sync"
+ "sync/atomic"
"text/template"
"time"
"github.com/Masterminds/sprig/v3"
- "github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
@@ -39,6 +42,8 @@ import (
"k8s.io/cli-runtime/pkg/resource"
"sigs.k8s.io/yaml"
+ "helm.sh/helm/v4/pkg/chart/common"
+ "helm.sh/helm/v4/pkg/chart/common/util"
chart "helm.sh/helm/v4/pkg/chart/v2"
chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
"helm.sh/helm/v4/pkg/cli"
@@ -46,11 +51,11 @@ import (
"helm.sh/helm/v4/pkg/getter"
"helm.sh/helm/v4/pkg/kube"
kubefake "helm.sh/helm/v4/pkg/kube/fake"
- "helm.sh/helm/v4/pkg/postrender"
+ "helm.sh/helm/v4/pkg/postrenderer"
"helm.sh/helm/v4/pkg/registry"
- releaseutil "helm.sh/helm/v4/pkg/release/util"
release "helm.sh/helm/v4/pkg/release/v1"
- "helm.sh/helm/v4/pkg/repo"
+ releaseutil "helm.sh/helm/v4/pkg/release/v1/util"
+ "helm.sh/helm/v4/pkg/repo/v1"
"helm.sh/helm/v4/pkg/storage"
"helm.sh/helm/v4/pkg/storage/driver"
)
@@ -69,28 +74,38 @@ type Install struct {
ChartPathOptions
- ClientOnly bool
- Force bool
+ ClientOnly bool
+ // ForceReplace will, if set to `true`, ignore certain warnings and perform the install anyway.
+ //
+ // This should be used with caution.
+ ForceReplace bool
+ // ForceConflicts causes server-side apply to force conflicts ("Overwrite value, become sole manager")
+ // see: https://kubernetes.io/docs/reference/using-api/server-side-apply/#conflicts
+ ForceConflicts bool
+ // ServerSideApply when true (default) will enable changes to be applied via Kubernetes server-side apply
+ // see: https://kubernetes.io/docs/reference/using-api/server-side-apply/
+ ServerSideApply bool
CreateNamespace bool
DryRun bool
DryRunOption string
// HideSecret can be set to true when DryRun is enabled in order to hide
// Kubernetes Secrets in the output. It cannot be used outside of DryRun.
- HideSecret bool
- DisableHooks bool
- Replace bool
- Wait bool
- WaitForJobs bool
- Devel bool
- DependencyUpdate bool
- Timeout time.Duration
- Namespace string
- ReleaseName string
- GenerateName bool
- NameTemplate string
- Description string
- OutputDir string
- Atomic bool
+ HideSecret bool
+ DisableHooks bool
+ Replace bool
+ WaitStrategy kube.WaitStrategy
+ WaitForJobs bool
+ Devel bool
+ DependencyUpdate bool
+ Timeout time.Duration
+ Namespace string
+ ReleaseName string
+ GenerateName bool
+ NameTemplate string
+ Description string
+ OutputDir string
+ // RollbackOnFailure enables rolling back (uninstalling) the release on failure if set
+ RollbackOnFailure bool
SkipCRDs bool
SubNotes bool
HideNotes bool
@@ -101,8 +116,8 @@ type Install struct {
// KubeVersion allows specifying a custom kubernetes version to use and
// APIVersions allows a manual set of supported API Versions to be passed
// (for things like templating). These are ignored if ClientOnly is false
- KubeVersion *chartutil.KubeVersion
- APIVersions chartutil.VersionSet
+ KubeVersion *common.KubeVersion
+ APIVersions common.VersionSet
// Used by helm template to render charts with .Release.IsUpgrade. Ignored if Dry-Run is false
IsUpgrade bool
// Enable DNS lookups when rendering templates
@@ -112,9 +127,10 @@ type Install struct {
UseReleaseName bool
// TakeOwnership will ignore the check for helm annotations and take ownership of the resources.
TakeOwnership bool
- PostRenderer postrender.PostRenderer
+ PostRenderer postrenderer.PostRenderer
// Lock to control raceconditions when the process receives a SIGTERM
- Lock sync.Mutex
+ Lock sync.Mutex
+ goroutineCount atomic.Int32
}
// ChartPathOptions captures common options used for controlling chart paths
@@ -140,21 +156,22 @@ type ChartPathOptions struct {
// NewInstall creates a new Install object with the given configuration.
func NewInstall(cfg *Configuration) *Install {
in := &Install{
- cfg: cfg,
+ cfg: cfg,
+ ServerSideApply: true,
}
- in.ChartPathOptions.registryClient = cfg.RegistryClient
+ in.registryClient = cfg.RegistryClient
return in
}
// SetRegistryClient sets the registry client for the install action
func (i *Install) SetRegistryClient(registryClient *registry.Client) {
- i.ChartPathOptions.registryClient = registryClient
+ i.registryClient = registryClient
}
// GetRegistryClient get the registry client.
func (i *Install) GetRegistryClient() *registry.Client {
- return i.ChartPathOptions.registryClient
+ return i.registryClient
}
func (i *Install) installCRDs(crds []chart.CRD) error {
@@ -164,24 +181,30 @@ func (i *Install) installCRDs(crds []chart.CRD) error {
// Read in the resources
res, err := i.cfg.KubeClient.Build(bytes.NewBuffer(obj.File.Data), false)
if err != nil {
- return errors.Wrapf(err, "failed to install CRD %s", obj.Name)
+ return fmt.Errorf("failed to install CRD %s: %w", obj.Name, err)
}
// Send them to Kube
- if _, err := i.cfg.KubeClient.Create(res); err != nil {
+ if _, err := i.cfg.KubeClient.Create(
+ res,
+ kube.ClientCreateOptionServerSideApply(i.ServerSideApply, i.ForceConflicts)); err != nil {
// If the error is CRD already exists, continue.
if apierrors.IsAlreadyExists(err) {
crdName := res[0].Name
- i.cfg.Log("CRD %s is already present. Skipping.", crdName)
+ slog.Debug("CRD is already present. Skipping", "crd", crdName)
continue
}
- return errors.Wrapf(err, "failed to install CRD %s", obj.Name)
+ return fmt.Errorf("failed to install CRD %s: %w", obj.Name, err)
}
totalItems = append(totalItems, res...)
}
if len(totalItems) > 0 {
+ waiter, err := i.cfg.KubeClient.GetWaiter(i.WaitStrategy)
+ if err != nil {
+ return fmt.Errorf("unable to get waiter: %w", err)
+ }
// Give time for the CRD to be recognized.
- if err := i.cfg.KubeClient.Wait(totalItems, 60*time.Second); err != nil {
+ if err := waiter.Wait(totalItems, 60*time.Second); err != nil {
return err
}
@@ -196,7 +219,7 @@ func (i *Install) installCRDs(crds []chart.CRD) error {
return err
}
- i.cfg.Log("Clearing discovery cache")
+ slog.Debug("clearing discovery cache")
discoveryClient.Invalidate()
_, _ = discoveryClient.ServerGroups()
@@ -209,7 +232,7 @@ func (i *Install) installCRDs(crds []chart.CRD) error {
return err
}
if resettable, ok := restMapper.(meta.ResettableRESTMapper); ok {
- i.cfg.Log("Clearing REST mapper cache")
+ slog.Debug("clearing REST mapper cache")
resettable.Reset()
}
}
@@ -225,7 +248,7 @@ func (i *Install) Run(chrt *chart.Chart, vals map[string]interface{}) (*release.
return i.RunWithContext(ctx, chrt, vals)
}
-// Run executes the installation with Context
+// RunWithContext executes the installation with Context
//
// When the task is cancelled through ctx, the function returns and the install
// proceeds in the background.
@@ -233,25 +256,25 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma
// Check reachability of cluster unless in client-only mode (e.g. `helm template` without `--validate`)
if !i.ClientOnly {
if err := i.cfg.KubeClient.IsReachable(); err != nil {
- i.cfg.Log(fmt.Sprintf("ERROR: Cluster reachability check failed: %v", err))
- return nil, errors.Wrap(err, "cluster reachability check failed")
+ slog.Error(fmt.Sprintf("cluster reachability check failed: %v", err))
+ return nil, fmt.Errorf("cluster reachability check failed: %w", err)
}
}
// HideSecret must be used with dry run. Otherwise, return an error.
if !i.isDryRun() && i.HideSecret {
- i.cfg.Log("ERROR: Hiding Kubernetes secrets requires a dry-run mode")
- return nil, errors.New("Hiding Kubernetes secrets requires a dry-run mode")
+ slog.Error("hiding Kubernetes secrets requires a dry-run mode")
+ return nil, errors.New("hiding Kubernetes secrets requires a dry-run mode")
}
if err := i.availableName(); err != nil {
- i.cfg.Log(fmt.Sprintf("ERROR: Release name check failed: %v", err))
- return nil, errors.Wrap(err, "release name check failed")
+ slog.Error("release name check failed", slog.Any("error", err))
+ return nil, fmt.Errorf("release name check failed: %w", err)
}
if err := chartutil.ProcessDependencies(chrt, vals); err != nil {
- i.cfg.Log(fmt.Sprintf("ERROR: Processing chart dependencies failed: %v", err))
- return nil, errors.Wrap(err, "chart dependencies processing failed")
+ slog.Error("chart dependencies processing failed", slog.Any("error", err))
+ return nil, fmt.Errorf("chart dependencies processing failed: %w", err)
}
var interactWithRemote bool
@@ -264,7 +287,7 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma
if crds := chrt.CRDObjects(); !i.ClientOnly && !i.SkipCRDs && len(crds) > 0 {
// On dry run, bail here
if i.isDryRun() {
- i.cfg.Log("WARNING: This chart or one of its subcharts contains CRDs. Rendering may fail or contain inaccuracies.")
+ slog.Warn("This chart or one of its subcharts contains CRDs. Rendering may fail or contain inaccuracies.")
} else if err := i.installCRDs(crds); err != nil {
return nil, err
}
@@ -273,7 +296,7 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma
if i.ClientOnly {
// Add mock objects in here so it doesn't use Kube API server
// NOTE(bacongobbler): used for `helm template`
- i.cfg.Capabilities = chartutil.DefaultCapabilities.Copy()
+ i.cfg.Capabilities = common.DefaultCapabilities.Copy()
if i.KubeVersion != nil {
i.cfg.Capabilities.KubeVersion = *i.KubeVersion
}
@@ -284,12 +307,14 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma
mem.SetNamespace(i.Namespace)
i.cfg.Releases = storage.Init(mem)
} else if !i.ClientOnly && len(i.APIVersions) > 0 {
- i.cfg.Log("API Version list given outside of client only mode, this list will be ignored")
+ slog.Debug("API Version list given outside of client only mode, this list will be ignored")
}
- // Make sure if Atomic is set, that wait is set as well. This makes it so
+ // Make sure if RollbackOnFailure is set, that wait is set as well. This makes it so
// the user doesn't have to specify both
- i.Wait = i.Wait || i.Atomic
+ if i.WaitStrategy == kube.HookOnlyStrategy && i.RollbackOnFailure {
+ i.WaitStrategy = kube.StatusWatcherStrategy
+ }
caps, err := i.cfg.getCapabilities()
if err != nil {
@@ -298,14 +323,14 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma
// special case for helm template --is-upgrade
isUpgrade := i.IsUpgrade && i.isDryRun()
- options := chartutil.ReleaseOptions{
+ options := common.ReleaseOptions{
Name: i.ReleaseName,
Namespace: i.Namespace,
Revision: 1,
IsInstall: !isUpgrade,
IsUpgrade: isUpgrade,
}
- valuesToRender, err := chartutil.ToRenderValuesWithSchemaValidation(chrt, vals, options, caps, i.SkipSchemaValidation)
+ valuesToRender, err := util.ToRenderValuesWithSchemaValidation(chrt, vals, options, caps, i.SkipSchemaValidation)
if err != nil {
return nil, err
}
@@ -335,10 +360,10 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma
var toBeAdopted kube.ResourceList
resources, err := i.cfg.KubeClient.Build(bytes.NewBufferString(rel.Manifest), !i.DisableOpenAPIValidation)
if err != nil {
- return nil, errors.Wrap(err, "unable to build kubernetes objects from release manifest")
+ return nil, fmt.Errorf("unable to build kubernetes objects from release manifest: %w", err)
}
- // It is safe to use "force" here because these are resources currently rendered by the chart.
+ // It is safe to use "forceOwnership" here because these are resources currently rendered by the chart.
err = resources.Visit(setMetadataVisitor(rel.Name, rel.Namespace, true))
if err != nil {
return nil, err
@@ -357,7 +382,7 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma
toBeAdopted, err = existingResourceConflict(resources, rel.Name, rel.Namespace)
}
if err != nil {
- return nil, errors.Wrap(err, "Unable to continue with install")
+ return nil, fmt.Errorf("unable to continue with install: %w", err)
}
}
@@ -388,7 +413,9 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma
if err != nil {
return nil, err
}
- if _, err := i.cfg.KubeClient.Create(resourceList); err != nil && !apierrors.IsAlreadyExists(err) {
+ if _, err := i.cfg.KubeClient.Create(
+ resourceList,
+ kube.ClientCreateOptionServerSideApply(i.ServerSideApply, false)); err != nil && !apierrors.IsAlreadyExists(err) {
return nil, err
}
}
@@ -400,8 +427,7 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma
}
}
- // Store the release in history before continuing (new in Helm 3). We always know
- // that this is a create operation.
+ // Store the release in history before continuing. We always know that this is a create operation
if err := i.cfg.Releases.Create(rel); err != nil {
// We could try to recover gracefully here, but since nothing has been installed
// yet, this is probably safer than trying to continue when we know storage is
@@ -424,8 +450,10 @@ func (i *Install) performInstallCtx(ctx context.Context, rel *release.Release, t
resultChan := make(chan Msg, 1)
go func() {
+ i.goroutineCount.Add(1)
rel, err := i.performInstall(rel, toBeAdopted, resources)
resultChan <- Msg{rel, err}
+ i.goroutineCount.Add(-1)
}()
select {
case <-ctx.Done():
@@ -436,6 +464,11 @@ func (i *Install) performInstallCtx(ctx context.Context, rel *release.Release, t
}
}
+// getGoroutineCount return the number of running routines
+func (i *Install) getGoroutineCount() int32 {
+ return i.goroutineCount.Load()
+}
+
// isDryRun returns true if Upgrade is set to run as a DryRun
func (i *Install) isDryRun() bool {
if i.DryRun || i.DryRunOption == "client" || i.DryRunOption == "server" || i.DryRunOption == "true" {
@@ -448,7 +481,7 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource
var err error
// pre-install hooks
if !i.DisableHooks {
- if err := i.cfg.execHook(rel, release.HookPreInstall, i.Timeout); err != nil {
+ if err := i.cfg.execHook(rel, release.HookPreInstall, i.WaitStrategy, i.Timeout, i.ServerSideApply); err != nil {
return rel, fmt.Errorf("failed pre-install: %s", err)
}
}
@@ -457,27 +490,39 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource
// do an update, but it's not clear whether we WANT to do an update if the reuse is set
// to true, since that is basically an upgrade operation.
if len(toBeAdopted) == 0 && len(resources) > 0 {
- _, err = i.cfg.KubeClient.Create(resources)
+ _, err = i.cfg.KubeClient.Create(
+ resources,
+ kube.ClientCreateOptionServerSideApply(i.ServerSideApply, false))
} else if len(resources) > 0 {
- _, err = i.cfg.KubeClient.Update(toBeAdopted, resources, i.Force)
+ updateThreeWayMergeForUnstructured := i.TakeOwnership && !i.ServerSideApply // Use three-way merge when taking ownership (and not using server-side apply)
+ _, err = i.cfg.KubeClient.Update(
+ toBeAdopted,
+ resources,
+ kube.ClientUpdateOptionForceReplace(i.ForceReplace),
+ kube.ClientUpdateOptionServerSideApply(i.ServerSideApply, i.ForceConflicts),
+ kube.ClientUpdateOptionThreeWayMergeForUnstructured(updateThreeWayMergeForUnstructured),
+ kube.ClientUpdateOptionUpgradeClientSideFieldManager(true))
}
if err != nil {
return rel, err
}
- if i.Wait {
- if i.WaitForJobs {
- err = i.cfg.KubeClient.WaitWithJobs(resources, i.Timeout)
- } else {
- err = i.cfg.KubeClient.Wait(resources, i.Timeout)
- }
- if err != nil {
- return rel, err
- }
+ waiter, err := i.cfg.KubeClient.GetWaiter(i.WaitStrategy)
+ if err != nil {
+ return rel, fmt.Errorf("failed to get waiter: %w", err)
+ }
+
+ if i.WaitForJobs {
+ err = waiter.WaitWithJobs(resources, i.Timeout)
+ } else {
+ err = waiter.Wait(resources, i.Timeout)
+ }
+ if err != nil {
+ return rel, err
}
if !i.DisableHooks {
- if err := i.cfg.execHook(rel, release.HookPostInstall, i.Timeout); err != nil {
+ if err := i.cfg.execHook(rel, release.HookPostInstall, i.WaitStrategy, i.Timeout, i.ServerSideApply); err != nil {
return rel, fmt.Errorf("failed post-install: %s", err)
}
}
@@ -496,7 +541,7 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource
// One possible strategy would be to do a timed retry to see if we can get
// this stored in the future.
if err := i.recordRelease(rel); err != nil {
- i.cfg.Log("failed to record the release: %s", err)
+ slog.Error("failed to record the release", slog.Any("error", err))
}
return rel, nil
@@ -504,16 +549,16 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource
func (i *Install) failRelease(rel *release.Release, err error) (*release.Release, error) {
rel.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", i.ReleaseName, err.Error()))
- if i.Atomic {
- i.cfg.Log("Install failed and atomic is set, uninstalling release")
+ if i.RollbackOnFailure {
+ slog.Debug("install failed and rollback-on-failure is set, uninstalling release", "release", i.ReleaseName)
uninstall := NewUninstall(i.cfg)
uninstall.DisableHooks = i.DisableHooks
uninstall.KeepHistory = false
uninstall.Timeout = i.Timeout
if _, uninstallErr := uninstall.Run(i.ReleaseName); uninstallErr != nil {
- return rel, errors.Wrapf(uninstallErr, "an error occurred while uninstalling the release. original install error: %s", err)
+ return rel, fmt.Errorf("an error occurred while uninstalling the release. original install error: %w: %w", err, uninstallErr)
}
- return rel, errors.Wrapf(err, "release %s failed, and has been uninstalled due to atomic being set", i.ReleaseName)
+ return rel, fmt.Errorf("release %s failed, and has been uninstalled due to rollback-on-failure being set: %w", i.ReleaseName, err)
}
i.recordRelease(rel) // Ignore the error, since we have another error to deal with.
return rel, err
@@ -531,7 +576,7 @@ func (i *Install) availableName() error {
start := i.ReleaseName
if err := chartutil.ValidateReleaseName(start); err != nil {
- return errors.Wrapf(err, "release name %q", start)
+ return fmt.Errorf("release name %q: %w", start, err)
}
// On dry run, bail here
if i.isDryRun() {
@@ -554,7 +599,8 @@ func (i *Install) availableName() error {
// createRelease creates a new release object
func (i *Install) createRelease(chrt *chart.Chart, rawVals map[string]interface{}, labels map[string]string) *release.Release {
ts := i.cfg.Now()
- return &release.Release{
+
+ r := &release.Release{
Name: i.ReleaseName,
Namespace: i.Namespace,
Chart: chrt,
@@ -564,9 +610,12 @@ func (i *Install) createRelease(chrt *chart.Chart, rawVals map[string]interface{
LastDeployed: ts,
Status: release.StatusUnknown,
},
- Version: 1,
- Labels: labels,
+ Version: 1,
+ Labels: labels,
+ ApplyMethod: string(determineReleaseSSApplyMethod(i.ServerSideApply)),
}
+
+ return r
}
// recordRelease with an update operation in case reuse has been set.
@@ -618,7 +667,7 @@ func writeToFile(outputDir string, name string, data string, appendData bool) er
defer f.Close()
- _, err = f.WriteString(fmt.Sprintf("---\n# Source: %s\n%s\n", name, data))
+ _, err = fmt.Fprintf(f, "---\n# Source: %s\n%s\n", name, data)
if err != nil {
return err
@@ -639,7 +688,7 @@ func createOrOpenFile(filename string, appendData bool) (*os.File, error) {
func ensureDirectoryForFile(file string) error {
baseDir := path.Dir(file)
_, err := os.Stat(baseDir)
- if err != nil && !os.IsNotExist(err) {
+ if err != nil && !errors.Is(err, fs.ErrNotExist) {
return err
}
@@ -661,7 +710,7 @@ func (i *Install) NameAndChart(args []string) (string, string, error) {
}
if len(args) > 2 {
- return args[0], args[1], errors.Errorf("expected at most two arguments, unexpected arguments: %v", strings.Join(args[2:], ", "))
+ return args[0], args[1], fmt.Errorf("expected at most two arguments, unexpected arguments: %v", strings.Join(args[2:], ", "))
}
if len(args) == 2 {
@@ -726,11 +775,30 @@ OUTER:
}
if len(missing) > 0 {
- return errors.Errorf("found in Chart.yaml, but missing in charts/ directory: %s", strings.Join(missing, ", "))
+ return fmt.Errorf("found in Chart.yaml, but missing in charts/ directory: %s", strings.Join(missing, ", "))
}
return nil
}
+func portOrDefault(u *url.URL) string {
+ if p := u.Port(); p != "" {
+ return p
+ }
+
+ switch u.Scheme {
+ case "http":
+ return "80"
+ case "https":
+ return "443"
+ default:
+ return ""
+ }
+}
+
+func urlEqual(u1, u2 *url.URL) bool {
+ return u1.Scheme == u2.Scheme && u1.Hostname() == u2.Hostname() && portOrDefault(u1) == portOrDefault(u2)
+}
+
// LocateChart looks for a chart directory in known places, and returns either the full path or an error.
//
// This does not ensure that the chart is well-formed; only that the requested filename exists.
@@ -755,14 +823,14 @@ func (c *ChartPathOptions) LocateChart(name string, settings *cli.EnvSettings) (
return abs, err
}
if c.Verify {
- if _, err := downloader.VerifyChart(abs, c.Keyring); err != nil {
+ if _, err := downloader.VerifyChart(abs, abs+".prov", c.Keyring); err != nil {
return "", err
}
}
return abs, nil
}
if filepath.IsAbs(name) || strings.HasPrefix(name, ".") {
- return name, errors.Errorf("path %q not found", name)
+ return name, fmt.Errorf("path %q not found", name)
}
dl := downloader.ChartDownloader{
@@ -778,6 +846,7 @@ func (c *ChartPathOptions) LocateChart(name string, settings *cli.EnvSettings) (
},
RepositoryConfig: settings.RepositoryConfig,
RepositoryCache: settings.RepositoryCache,
+ ContentCache: settings.ContentCache,
RegistryClient: c.registryClient,
}
@@ -818,7 +887,7 @@ func (c *ChartPathOptions) LocateChart(name string, settings *cli.EnvSettings) (
// Host on URL (returned from url.Parse) contains the port if present.
// This check ensures credentials are not passed between different
// services on different ports.
- if c.PassCredentialsAll || (u1.Scheme == u2.Scheme && u1.Host == u2.Host) {
+ if c.PassCredentialsAll || urlEqual(u1, u2) {
dl.Options = append(dl.Options, getter.WithBasicAuth(c.Username, c.Password))
} else {
dl.Options = append(dl.Options, getter.WithBasicAuth("", ""))
@@ -831,7 +900,7 @@ func (c *ChartPathOptions) LocateChart(name string, settings *cli.EnvSettings) (
return "", err
}
- filename, _, err := dl.DownloadTo(name, version, settings.RepositoryCache)
+ filename, _, err := dl.DownloadToCache(name, version)
if err != nil {
return "", err
}
diff --git a/pkg/action/install_test.go b/pkg/action/install_test.go
index 869055657..b2b1508be 100644
--- a/pkg/action/install_test.go
+++ b/pkg/action/install_test.go
@@ -19,22 +19,33 @@ package action
import (
"bytes"
"context"
+ "errors"
"fmt"
"io"
+ "io/fs"
+ "net/http"
+ "net/url"
"os"
"path/filepath"
"regexp"
- "runtime"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ appsv1 "k8s.io/api/apps/v1"
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ kuberuntime "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/cli-runtime/pkg/resource"
+ "k8s.io/client-go/kubernetes/scheme"
+ "k8s.io/client-go/rest/fake"
"helm.sh/helm/v4/internal/test"
- chart "helm.sh/helm/v4/pkg/chart/v2"
- chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+ "helm.sh/helm/v4/pkg/chart/common"
+ "helm.sh/helm/v4/pkg/kube"
kubefake "helm.sh/helm/v4/pkg/kube/fake"
release "helm.sh/helm/v4/pkg/release/v1"
"helm.sh/helm/v4/pkg/storage/driver"
@@ -47,7 +58,64 @@ type nameTemplateTestCase struct {
expectedErrorStr string
}
+func createDummyResourceList(owned bool) kube.ResourceList {
+ obj := &appsv1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "dummyName",
+ Namespace: "spaced",
+ },
+ }
+
+ if owned {
+ obj.Labels = map[string]string{
+ "app.kubernetes.io/managed-by": "Helm",
+ }
+ obj.Annotations = map[string]string{
+ "meta.helm.sh/release-name": "test-install-release",
+ "meta.helm.sh/release-namespace": "spaced",
+ }
+ }
+
+ resInfo := resource.Info{
+ Name: "dummyName",
+ Namespace: "spaced",
+ Mapping: &meta.RESTMapping{
+ Resource: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployment"},
+ GroupVersionKind: schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "Deployment"},
+ Scope: meta.RESTScopeNamespace,
+ },
+ Object: obj,
+ }
+ body := io.NopCloser(bytes.NewReader([]byte(kuberuntime.EncodeOrDie(appsv1Codec, obj))))
+
+ resInfo.Client = &fake.RESTClient{
+ GroupVersion: schema.GroupVersion{Group: "apps", Version: "v1"},
+ NegotiatedSerializer: scheme.Codecs.WithoutConversion(),
+ Client: fake.CreateHTTPClient(func(_ *http.Request) (*http.Response, error) {
+ header := http.Header{}
+ header.Set("Content-Type", kuberuntime.ContentTypeJSON)
+ return &http.Response{
+ StatusCode: http.StatusOK,
+ Header: header,
+ Body: body,
+ }, nil
+ }),
+ }
+ var resourceList kube.ResourceList
+ resourceList.Append(&resInfo)
+ return resourceList
+}
+
+func installActionWithConfig(config *Configuration) *Install {
+ instAction := NewInstall(config)
+ instAction.Namespace = "spaced"
+ instAction.ReleaseName = "test-install-release"
+
+ return instAction
+}
+
func installAction(t *testing.T) *Install {
+ t.Helper()
config := actionConfigFixture(t)
instAction := NewInstall(config)
instAction.Namespace = "spaced"
@@ -62,7 +130,7 @@ func TestInstallRelease(t *testing.T) {
instAction := installAction(t)
vals := map[string]interface{}{}
- ctx, done := context.WithCancel(context.Background())
+ ctx, done := context.WithCancel(t.Context())
res, err := instAction.RunWithContext(ctx, buildChart(), vals)
if err != nil {
t.Fatalf("Failed install: %s", err)
@@ -92,6 +160,61 @@ func TestInstallRelease(t *testing.T) {
is.Equal(lastRelease.Info.Status, release.StatusDeployed)
}
+func TestInstallReleaseWithTakeOwnership_ResourceNotOwned(t *testing.T) {
+ // This test will test checking ownership of a resource
+ // returned by the fake client. If the resource is not
+ // owned by the chart, ownership is taken.
+ // To verify ownership has been taken, the fake client
+ // needs to store state which is a bigger rewrite.
+ // TODO: Ensure fake kube client stores state. Maybe using
+ // "k8s.io/client-go/kubernetes/fake" could be sufficient? i.e
+ // "Client{Namespace: namespace, kubeClient: k8sfake.NewClientset()}"
+
+ is := assert.New(t)
+
+ // Resource list from cluster is NOT owned by helm chart
+ config := actionConfigFixtureWithDummyResources(t, createDummyResourceList(false))
+ instAction := installActionWithConfig(config)
+ instAction.TakeOwnership = true
+ res, err := instAction.Run(buildChart(), nil)
+ if err != nil {
+ t.Fatalf("Failed install: %s", err)
+ }
+
+ rel, err := instAction.cfg.Releases.Get(res.Name, res.Version)
+ is.NoError(err)
+
+ is.Equal(rel.Info.Description, "Install complete")
+}
+
+func TestInstallReleaseWithTakeOwnership_ResourceOwned(t *testing.T) {
+ is := assert.New(t)
+
+ // Resource list from cluster is owned by helm chart
+ config := actionConfigFixtureWithDummyResources(t, createDummyResourceList(true))
+ instAction := installActionWithConfig(config)
+ instAction.TakeOwnership = false
+ res, err := instAction.Run(buildChart(), nil)
+ if err != nil {
+ t.Fatalf("Failed install: %s", err)
+ }
+ rel, err := instAction.cfg.Releases.Get(res.Name, res.Version)
+ is.NoError(err)
+
+ is.Equal(rel.Info.Description, "Install complete")
+}
+
+func TestInstallReleaseWithTakeOwnership_ResourceOwnedNoFlag(t *testing.T) {
+ is := assert.New(t)
+
+ // Resource list from cluster is NOT owned by helm chart
+ config := actionConfigFixtureWithDummyResources(t, createDummyResourceList(false))
+ instAction := installActionWithConfig(config)
+ _, err := instAction.Run(buildChart(), nil)
+ is.Error(err)
+ is.Contains(err.Error(), "unable to continue with install")
+}
+
func TestInstallReleaseWithValues(t *testing.T) {
is := assert.New(t)
instAction := installAction(t)
@@ -133,7 +256,7 @@ func TestInstallReleaseClientOnly(t *testing.T) {
instAction.ClientOnly = true
instAction.Run(buildChart(), nil) // disregard output
- is.Equal(instAction.cfg.Capabilities, chartutil.DefaultCapabilities)
+ is.Equal(instAction.cfg.Capabilities, common.DefaultCapabilities)
is.Equal(instAction.cfg.KubeClient, &kubefake.PrintingKubeClient{Out: io.Discard})
}
@@ -205,8 +328,8 @@ func TestInstallRelease_WithChartAndDependencyParentNotes(t *testing.T) {
}
rel, err := instAction.cfg.Releases.Get(res.Name, res.Version)
- is.Equal("with-notes", rel.Name)
is.NoError(err)
+ is.Equal("with-notes", rel.Name)
is.Equal("parent", rel.Info.Notes)
is.Equal(rel.Info.Description, "Install complete")
}
@@ -224,8 +347,8 @@ func TestInstallRelease_WithChartAndDependencyAllNotes(t *testing.T) {
}
rel, err := instAction.cfg.Releases.Get(res.Name, res.Version)
- is.Equal("with-notes", rel.Name)
is.NoError(err)
+ is.Equal("with-notes", rel.Name)
// test run can return as either 'parent\nchild' or 'child\nparent'
if !strings.Contains(rel.Info.Notes, "parent") && !strings.Contains(rel.Info.Notes, "child") {
t.Fatalf("Expected 'parent\nchild' or 'child\nparent', got '%s'", rel.Info.Notes)
@@ -304,7 +427,7 @@ func TestInstallRelease_DryRun_Lookup(t *testing.T) {
vals := map[string]interface{}{}
mockChart := buildChart(withSampleTemplates())
- mockChart.Templates = append(mockChart.Templates, &chart.File{
+ mockChart.Templates = append(mockChart.Templates, &common.File{
Name: "templates/lookup",
Data: []byte(`goodbye: {{ lookup "v1" "Namespace" "" "___" }}`),
})
@@ -323,13 +446,13 @@ func TestInstallReleaseIncorrectTemplate_DryRun(t *testing.T) {
instAction.DryRun = true
vals := map[string]interface{}{}
_, err := instAction.Run(buildChart(withSampleIncludingIncorrectTemplates()), vals)
- expectedErr := "\"hello/templates/incorrect\" at <.Values.bad.doh>: nil pointer evaluating interface {}.doh"
+ expectedErr := `hello/templates/incorrect:1:10
+ executing "hello/templates/incorrect" at <.Values.bad.doh>:
+ nil pointer evaluating interface {}.doh`
if err == nil {
t.Fatalf("Install should fail containing error: %s", expectedErr)
}
- if err != nil {
- is.Contains(err.Error(), expectedErr)
- }
+ is.Contains(err.Error(), expectedErr)
}
func TestInstallRelease_NoHooks(t *testing.T) {
@@ -411,17 +534,17 @@ func TestInstallRelease_Wait(t *testing.T) {
failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
failer.WaitError = fmt.Errorf("I timed out")
instAction.cfg.KubeClient = failer
- instAction.Wait = true
+ instAction.WaitStrategy = kube.StatusWatcherStrategy
vals := map[string]interface{}{}
- goroutines := runtime.NumGoroutine()
+ goroutines := instAction.getGoroutineCount()
res, err := instAction.Run(buildChart(), vals)
is.Error(err)
is.Contains(res.Info.Description, "I timed out")
is.Equal(res.Info.Status, release.StatusFailed)
- is.Equal(goroutines, runtime.NumGoroutine())
+ is.Equal(goroutines, instAction.getGoroutineCount())
}
func TestInstallRelease_Wait_Interrupted(t *testing.T) {
is := assert.New(t)
@@ -430,21 +553,21 @@ func TestInstallRelease_Wait_Interrupted(t *testing.T) {
failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
failer.WaitDuration = 10 * time.Second
instAction.cfg.KubeClient = failer
- instAction.Wait = true
+ instAction.WaitStrategy = kube.StatusWatcherStrategy
vals := map[string]interface{}{}
- ctx, cancel := context.WithCancel(context.Background())
+ ctx, cancel := context.WithCancel(t.Context())
time.AfterFunc(time.Second, cancel)
- goroutines := runtime.NumGoroutine()
+ goroutines := instAction.getGoroutineCount()
_, err := instAction.RunWithContext(ctx, buildChart(), vals)
is.Error(err)
is.Contains(err.Error(), "context canceled")
- is.Equal(goroutines+1, runtime.NumGoroutine()) // installation goroutine still is in background
- time.Sleep(10 * time.Second) // wait for goroutine to finish
- is.Equal(goroutines, runtime.NumGoroutine())
+ is.Equal(goroutines+1, instAction.getGoroutineCount()) // installation goroutine still is in background
+ time.Sleep(10 * time.Second) // wait for goroutine to finish
+ is.Equal(goroutines, instAction.getGoroutineCount())
}
func TestInstallRelease_WaitForJobs(t *testing.T) {
is := assert.New(t)
@@ -453,7 +576,7 @@ func TestInstallRelease_WaitForJobs(t *testing.T) {
failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
failer.WaitError = fmt.Errorf("I timed out")
instAction.cfg.KubeClient = failer
- instAction.Wait = true
+ instAction.WaitStrategy = kube.StatusWatcherStrategy
instAction.WaitForJobs = true
vals := map[string]interface{}{}
@@ -463,16 +586,16 @@ func TestInstallRelease_WaitForJobs(t *testing.T) {
is.Equal(res.Info.Status, release.StatusFailed)
}
-func TestInstallRelease_Atomic(t *testing.T) {
+func TestInstallRelease_RollbackOnFailure(t *testing.T) {
is := assert.New(t)
- t.Run("atomic uninstall succeeds", func(t *testing.T) {
+ t.Run("rollback-on-failure uninstall succeeds", func(t *testing.T) {
instAction := installAction(t)
instAction.ReleaseName = "come-fail-away"
failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
failer.WaitError = fmt.Errorf("I timed out")
instAction.cfg.KubeClient = failer
- instAction.Atomic = true
+ instAction.RollbackOnFailure = true
// disabling hooks to avoid an early fail when
// WaitForDelete is called on the pre-delete hook execution
instAction.DisableHooks = true
@@ -481,7 +604,7 @@ func TestInstallRelease_Atomic(t *testing.T) {
res, err := instAction.Run(buildChart(), vals)
is.Error(err)
is.Contains(err.Error(), "I timed out")
- is.Contains(err.Error(), "atomic")
+ is.Contains(err.Error(), "rollback-on-failure")
// Now make sure it isn't in storage anymore
_, err = instAction.cfg.Releases.Get(res.Name, res.Version)
@@ -489,14 +612,14 @@ func TestInstallRelease_Atomic(t *testing.T) {
is.Equal(err, driver.ErrReleaseNotFound)
})
- t.Run("atomic uninstall fails", func(t *testing.T) {
+ t.Run("rollback-on-failure uninstall fails", func(t *testing.T) {
instAction := installAction(t)
instAction.ReleaseName = "come-fail-away-with-me"
failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
failer.WaitError = fmt.Errorf("I timed out")
failer.DeleteError = fmt.Errorf("uninstall fail")
instAction.cfg.KubeClient = failer
- instAction.Atomic = true
+ instAction.RollbackOnFailure = true
vals := map[string]interface{}{}
_, err := instAction.Run(buildChart(), vals)
@@ -506,7 +629,7 @@ func TestInstallRelease_Atomic(t *testing.T) {
is.Contains(err.Error(), "an error occurred while uninstalling the release")
})
}
-func TestInstallRelease_Atomic_Interrupted(t *testing.T) {
+func TestInstallRelease_RollbackOnFailure_Interrupted(t *testing.T) {
is := assert.New(t)
instAction := installAction(t)
@@ -514,22 +637,27 @@ func TestInstallRelease_Atomic_Interrupted(t *testing.T) {
failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
failer.WaitDuration = 10 * time.Second
instAction.cfg.KubeClient = failer
- instAction.Atomic = true
+ instAction.RollbackOnFailure = true
vals := map[string]interface{}{}
- ctx, cancel := context.WithCancel(context.Background())
+ ctx, cancel := context.WithCancel(t.Context())
time.AfterFunc(time.Second, cancel)
+ goroutines := instAction.getGoroutineCount()
+
res, err := instAction.RunWithContext(ctx, buildChart(), vals)
is.Error(err)
is.Contains(err.Error(), "context canceled")
- is.Contains(err.Error(), "atomic")
+ is.Contains(err.Error(), "rollback-on-failure")
is.Contains(err.Error(), "uninstalled")
// Now make sure it isn't in storage anymore
_, err = instAction.cfg.Releases.Get(res.Name, res.Version)
is.Error(err)
is.Equal(err, driver.ErrReleaseNotFound)
+ is.Equal(goroutines+1, instAction.getGoroutineCount()) // installation goroutine still is in background
+ time.Sleep(10 * time.Second) // wait for goroutine to finish
+ is.Equal(goroutines, instAction.getGoroutineCount())
}
func TestNameTemplate(t *testing.T) {
@@ -630,7 +758,7 @@ func TestInstallReleaseOutputDir(t *testing.T) {
test.AssertGoldenFile(t, filepath.Join(dir, "hello/templates/rbac"), "rbac.txt")
_, err = os.Stat(filepath.Join(dir, "hello/templates/empty"))
- is.True(os.IsNotExist(err))
+ is.True(errors.Is(err, fs.ErrNotExist))
}
func TestInstallOutputDirWithReleaseName(t *testing.T) {
@@ -666,7 +794,7 @@ func TestInstallOutputDirWithReleaseName(t *testing.T) {
test.AssertGoldenFile(t, filepath.Join(newDir, "hello/templates/rbac"), "rbac.txt")
_, err = os.Stat(filepath.Join(newDir, "hello/templates/empty"))
- is.True(os.IsNotExist(err))
+ is.True(errors.Is(err, fs.ErrNotExist))
}
func TestNameAndChart(t *testing.T) {
@@ -758,7 +886,6 @@ func TestNameAndChartGenerateName(t *testing.T) {
}
for _, tc := range tests {
- tc := tc
t.Run(tc.Name, func(t *testing.T) {
t.Parallel()
@@ -802,3 +929,84 @@ func TestInstallWithSystemLabels(t *testing.T) {
is.Equal(fmt.Errorf("user supplied labels contains system reserved label name. System labels: %+v", driver.GetSystemLabels()), err)
}
+
+func TestUrlEqual(t *testing.T) {
+ is := assert.New(t)
+
+ tests := []struct {
+ name string
+ url1 string
+ url2 string
+ expected bool
+ }{
+ {
+ name: "identical URLs",
+ url1: "https://example.com:443",
+ url2: "https://example.com:443",
+ expected: true,
+ },
+ {
+ name: "same host, scheme, default HTTPS port vs explicit",
+ url1: "https://example.com",
+ url2: "https://example.com:443",
+ expected: true,
+ },
+ {
+ name: "same host, scheme, default HTTP port vs explicit",
+ url1: "http://example.com",
+ url2: "http://example.com:80",
+ expected: true,
+ },
+ {
+ name: "different schemes",
+ url1: "http://example.com",
+ url2: "https://example.com",
+ expected: false,
+ },
+ {
+ name: "different hosts",
+ url1: "https://example.com",
+ url2: "https://www.example.com",
+ expected: false,
+ },
+ {
+ name: "different ports",
+ url1: "https://example.com:8080",
+ url2: "https://example.com:9090",
+ expected: false,
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+
+ u1, err := url.Parse(tc.url1)
+ if err != nil {
+ t.Fatalf("Failed to parse URL1 %s: %v", tc.url1, err)
+ }
+ u2, err := url.Parse(tc.url2)
+ if err != nil {
+ t.Fatalf("Failed to parse URL2 %s: %v", tc.url2, err)
+ }
+
+ is.Equal(tc.expected, urlEqual(u1, u2))
+ })
+ }
+}
+
+func TestInstallRun_UnreachableKubeClient(t *testing.T) {
+ config := actionConfigFixture(t)
+ failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil}
+ failingKubeClient.ConnectionError = errors.New("connection refused")
+ config.KubeClient = &failingKubeClient
+
+ instAction := NewInstall(config)
+ instAction.ClientOnly = false
+ ctx, done := context.WithCancel(t.Context())
+ res, err := instAction.RunWithContext(ctx, nil, nil)
+
+ done()
+ assert.Nil(t, res)
+ assert.ErrorContains(t, err, "connection refused")
+}
diff --git a/pkg/action/lint.go b/pkg/action/lint.go
index 451eb65b0..208fd4637 100644
--- a/pkg/action/lint.go
+++ b/pkg/action/lint.go
@@ -17,15 +17,15 @@ limitations under the License.
package action
import (
+ "fmt"
"os"
"path/filepath"
"strings"
- "github.com/pkg/errors"
-
+ "helm.sh/helm/v4/pkg/chart/common"
+ "helm.sh/helm/v4/pkg/chart/v2/lint"
+ "helm.sh/helm/v4/pkg/chart/v2/lint/support"
chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
- "helm.sh/helm/v4/pkg/lint"
- "helm.sh/helm/v4/pkg/lint/support"
)
// Lint is the action for checking that the semantics of a chart are well-formed.
@@ -37,7 +37,7 @@ type Lint struct {
WithSubcharts bool
Quiet bool
SkipSchemaValidation bool
- KubeVersion *chartutil.KubeVersion
+ KubeVersion *common.KubeVersion
}
// LintResult is the result of Lint
@@ -87,33 +87,33 @@ func HasWarningsOrErrors(result *LintResult) bool {
return len(result.Errors) > 0
}
-func lintChart(path string, vals map[string]interface{}, namespace string, kubeVersion *chartutil.KubeVersion, skipSchemaValidation bool) (support.Linter, error) {
+func lintChart(path string, vals map[string]interface{}, namespace string, kubeVersion *common.KubeVersion, skipSchemaValidation bool) (support.Linter, error) {
var chartPath string
linter := support.Linter{}
if strings.HasSuffix(path, ".tgz") || strings.HasSuffix(path, ".tar.gz") {
tempDir, err := os.MkdirTemp("", "helm-lint")
if err != nil {
- return linter, errors.Wrap(err, "unable to create temp dir to extract tarball")
+ return linter, fmt.Errorf("unable to create temp dir to extract tarball: %w", err)
}
defer os.RemoveAll(tempDir)
file, err := os.Open(path)
if err != nil {
- return linter, errors.Wrap(err, "unable to open tarball")
+ return linter, fmt.Errorf("unable to open tarball: %w", err)
}
defer file.Close()
if err = chartutil.Expand(tempDir, file); err != nil {
- return linter, errors.Wrap(err, "unable to extract tarball")
+ return linter, fmt.Errorf("unable to extract tarball: %w", err)
}
files, err := os.ReadDir(tempDir)
if err != nil {
- return linter, errors.Wrapf(err, "unable to read temporary output directory %s", tempDir)
+ return linter, fmt.Errorf("unable to read temporary output directory %s: %w", tempDir, err)
}
if !files[0].IsDir() {
- return linter, errors.Errorf("unexpected file %s in temporary output directory %s", files[0].Name(), tempDir)
+ return linter, fmt.Errorf("unexpected file %s in temporary output directory %s", files[0].Name(), tempDir)
}
chartPath = filepath.Join(tempDir, files[0].Name())
@@ -123,7 +123,7 @@ func lintChart(path string, vals map[string]interface{}, namespace string, kubeV
// Guard: Error out if this is not a chart.
if _, err := os.Stat(filepath.Join(chartPath, "Chart.yaml")); err != nil {
- return linter, errors.Wrap(err, "unable to check Chart.yaml file in chart")
+ return linter, fmt.Errorf("unable to check Chart.yaml file in chart: %w", err)
}
return lint.RunAll(
diff --git a/pkg/action/lint_test.go b/pkg/action/lint_test.go
index a01580b0a..613149a4d 100644
--- a/pkg/action/lint_test.go
+++ b/pkg/action/lint_test.go
@@ -154,12 +154,12 @@ func TestLint_ChartWithWarnings(t *testing.T) {
}
})
- t.Run("should pass with no errors when strict", func(t *testing.T) {
+ t.Run("should fail with one error when strict", func(t *testing.T) {
testCharts := []string{chartWithNoTemplatesDir}
testLint := NewLint()
testLint.Strict = true
- if result := testLint.Run(testCharts, values); len(result.Errors) != 0 {
- t.Error("expected no errors, but got", len(result.Errors))
+ if result := testLint.Run(testCharts, values); len(result.Errors) != 1 {
+ t.Error("expected one error, but got", len(result.Errors))
}
})
}
diff --git a/pkg/action/list.go b/pkg/action/list.go
index 82500582f..c6d6f2037 100644
--- a/pkg/action/list.go
+++ b/pkg/action/list.go
@@ -22,8 +22,8 @@ import (
"k8s.io/apimachinery/pkg/labels"
- releaseutil "helm.sh/helm/v4/pkg/release/util"
release "helm.sh/helm/v4/pkg/release/v1"
+ releaseutil "helm.sh/helm/v4/pkg/release/v1/util"
)
// ListStates represents zero or more status codes that a list item may have set
diff --git a/pkg/action/list_test.go b/pkg/action/list_test.go
index e41949310..75737d635 100644
--- a/pkg/action/list_test.go
+++ b/pkg/action/list_test.go
@@ -17,10 +17,13 @@ limitations under the License.
package action
import (
+ "errors"
+ "io"
"testing"
"github.com/stretchr/testify/assert"
+ kubefake "helm.sh/helm/v4/pkg/kube/fake"
release "helm.sh/helm/v4/pkg/release/v1"
"helm.sh/helm/v4/pkg/storage"
)
@@ -64,13 +67,14 @@ func TestList_Empty(t *testing.T) {
}
func newListFixture(t *testing.T) *List {
+ t.Helper()
return NewList(actionConfigFixture(t))
}
func TestList_OneNamespace(t *testing.T) {
is := assert.New(t)
lister := newListFixture(t)
- makeMeSomeReleases(lister.cfg.Releases, t)
+ makeMeSomeReleases(t, lister.cfg.Releases)
list, err := lister.Run()
is.NoError(err)
is.Len(list, 3)
@@ -79,7 +83,7 @@ func TestList_OneNamespace(t *testing.T) {
func TestList_AllNamespaces(t *testing.T) {
is := assert.New(t)
lister := newListFixture(t)
- makeMeSomeReleases(lister.cfg.Releases, t)
+ makeMeSomeReleases(t, lister.cfg.Releases)
lister.AllNamespaces = true
lister.SetStateMask()
list, err := lister.Run()
@@ -91,7 +95,7 @@ func TestList_Sort(t *testing.T) {
is := assert.New(t)
lister := newListFixture(t)
lister.Sort = ByNameDesc // Other sorts are tested elsewhere
- makeMeSomeReleases(lister.cfg.Releases, t)
+ makeMeSomeReleases(t, lister.cfg.Releases)
list, err := lister.Run()
is.NoError(err)
is.Len(list, 3)
@@ -104,7 +108,7 @@ func TestList_Limit(t *testing.T) {
is := assert.New(t)
lister := newListFixture(t)
lister.Limit = 2
- makeMeSomeReleases(lister.cfg.Releases, t)
+ makeMeSomeReleases(t, lister.cfg.Releases)
list, err := lister.Run()
is.NoError(err)
is.Len(list, 2)
@@ -117,7 +121,7 @@ func TestList_BigLimit(t *testing.T) {
is := assert.New(t)
lister := newListFixture(t)
lister.Limit = 20
- makeMeSomeReleases(lister.cfg.Releases, t)
+ makeMeSomeReleases(t, lister.cfg.Releases)
list, err := lister.Run()
is.NoError(err)
is.Len(list, 3)
@@ -133,7 +137,7 @@ func TestList_LimitOffset(t *testing.T) {
lister := newListFixture(t)
lister.Limit = 2
lister.Offset = 1
- makeMeSomeReleases(lister.cfg.Releases, t)
+ makeMeSomeReleases(t, lister.cfg.Releases)
list, err := lister.Run()
is.NoError(err)
is.Len(list, 2)
@@ -148,7 +152,7 @@ func TestList_LimitOffsetOutOfBounds(t *testing.T) {
lister := newListFixture(t)
lister.Limit = 2
lister.Offset = 3 // Last item is index 2
- makeMeSomeReleases(lister.cfg.Releases, t)
+ makeMeSomeReleases(t, lister.cfg.Releases)
list, err := lister.Run()
is.NoError(err)
is.Len(list, 0)
@@ -163,7 +167,7 @@ func TestList_LimitOffsetOutOfBounds(t *testing.T) {
func TestList_StateMask(t *testing.T) {
is := assert.New(t)
lister := newListFixture(t)
- makeMeSomeReleases(lister.cfg.Releases, t)
+ makeMeSomeReleases(t, lister.cfg.Releases)
one, err := lister.cfg.Releases.Get("one", 1)
is.NoError(err)
one.SetStatus(release.StatusUninstalled, "uninstalled")
@@ -193,7 +197,7 @@ func TestList_StateMaskWithStaleRevisions(t *testing.T) {
lister := newListFixture(t)
lister.StateMask = ListFailed
- makeMeSomeReleasesWithStaleFailure(lister.cfg.Releases, t)
+ makeMeSomeReleasesWithStaleFailure(t, lister.cfg.Releases)
res, err := lister.Run()
@@ -205,7 +209,7 @@ func TestList_StateMaskWithStaleRevisions(t *testing.T) {
is.Equal("failed", res[0].Name)
}
-func makeMeSomeReleasesWithStaleFailure(store *storage.Storage, t *testing.T) {
+func makeMeSomeReleasesWithStaleFailure(t *testing.T, store *storage.Storage) {
t.Helper()
one := namedReleaseStub("clean", release.StatusDeployed)
one.Namespace = "default"
@@ -242,7 +246,7 @@ func TestList_Filter(t *testing.T) {
is := assert.New(t)
lister := newListFixture(t)
lister.Filter = "th."
- makeMeSomeReleases(lister.cfg.Releases, t)
+ makeMeSomeReleases(t, lister.cfg.Releases)
res, err := lister.Run()
is.NoError(err)
@@ -254,13 +258,13 @@ func TestList_FilterFailsCompile(t *testing.T) {
is := assert.New(t)
lister := newListFixture(t)
lister.Filter = "t[h.{{{"
- makeMeSomeReleases(lister.cfg.Releases, t)
+ makeMeSomeReleases(t, lister.cfg.Releases)
_, err := lister.Run()
is.Error(err)
}
-func makeMeSomeReleases(store *storage.Storage, t *testing.T) {
+func makeMeSomeReleases(t *testing.T, store *storage.Storage) {
t.Helper()
one := releaseStub()
one.Name = "one"
@@ -366,3 +370,16 @@ func TestSelectorList(t *testing.T) {
assert.ElementsMatch(t, expectedFilteredList, res)
})
}
+
+func TestListRun_UnreachableKubeClient(t *testing.T) {
+ config := actionConfigFixture(t)
+ failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil}
+ failingKubeClient.ConnectionError = errors.New("connection refused")
+ config.KubeClient = &failingKubeClient
+
+ lister := NewList(config)
+ result, err := lister.Run()
+
+ assert.Nil(t, result)
+ assert.ErrorContains(t, err, "connection refused")
+}
diff --git a/pkg/action/package.go b/pkg/action/package.go
index 9ffe1722e..6e762b507 100644
--- a/pkg/action/package.go
+++ b/pkg/action/package.go
@@ -18,13 +18,15 @@ package action
import (
"bufio"
+ "errors"
"fmt"
"os"
+ "path/filepath"
"syscall"
"github.com/Masterminds/semver/v3"
- "github.com/pkg/errors"
"golang.org/x/term"
+ "sigs.k8s.io/yaml"
"helm.sh/helm/v4/pkg/chart/v2/loader"
chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
@@ -39,6 +41,7 @@ type Package struct {
Key string
Keyring string
PassphraseFile string
+ cachedPassphrase []byte
Version string
AppVersion string
Destination string
@@ -55,6 +58,10 @@ type Package struct {
InsecureSkipTLSverify bool
}
+const (
+ passPhraseFileStdin = "-"
+)
+
// NewPackage creates a new Package object with the given configuration.
func NewPackage() *Package {
return &Package{}
@@ -100,7 +107,7 @@ func (p *Package) Run(path string, _ map[string]interface{}) (string, error) {
name, err := chartutil.Save(ch, dest)
if err != nil {
- return "", errors.Wrap(err, "failed to save")
+ return "", fmt.Errorf("failed to save: %w", err)
}
if p.Sign {
@@ -128,7 +135,7 @@ func (p *Package) Clearsign(filename string) error {
passphraseFetcher := promptUser
if p.PassphraseFile != "" {
- passphraseFetcher, err = passphraseFileFetcher(p.PassphraseFile, os.Stdin)
+ passphraseFetcher, err = p.passphraseFileFetcher(p.PassphraseFile, os.Stdin)
if err != nil {
return err
}
@@ -138,7 +145,26 @@ func (p *Package) Clearsign(filename string) error {
return err
}
- sig, err := signer.ClearSign(filename)
+ // Load the chart archive to extract metadata
+ chart, err := loader.LoadFile(filename)
+ if err != nil {
+ return fmt.Errorf("failed to load chart for signing: %w", err)
+ }
+
+ // Marshal chart metadata to YAML bytes
+ metadataBytes, err := yaml.Marshal(chart.Metadata)
+ if err != nil {
+ return fmt.Errorf("failed to marshal chart metadata: %w", err)
+ }
+
+ // Read the chart archive file
+ archiveData, err := os.ReadFile(filename)
+ if err != nil {
+ return fmt.Errorf("failed to read chart archive: %w", err)
+ }
+
+ // Use the generic provenance signing function
+ sig, err := signer.ClearSign(archiveData, filepath.Base(filename), metadataBytes)
if err != nil {
return err
}
@@ -156,25 +182,42 @@ func promptUser(name string) ([]byte, error) {
return pw, err
}
-func passphraseFileFetcher(passphraseFile string, stdin *os.File) (provenance.PassphraseFetcher, error) {
- file, err := openPassphraseFile(passphraseFile, stdin)
- if err != nil {
- return nil, err
- }
- defer file.Close()
+func (p *Package) passphraseFileFetcher(passphraseFile string, stdin *os.File) (provenance.PassphraseFetcher, error) {
+ // When reading from stdin we cache the passphrase here. If we are
+ // packaging multiple charts, we reuse the cached passphrase. This
+ // allows giving the passphrase once on stdin without failing with
+ // complaints about stdin already being closed.
+ //
+ // An alternative to this would be to omit file.Close() for stdin
+ // below and require the user to provide the same passphrase once
+ // per chart on stdin, but that does not seem very user-friendly.
+
+ if p.cachedPassphrase == nil {
+ file, err := openPassphraseFile(passphraseFile, stdin)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
- reader := bufio.NewReader(file)
- passphrase, _, err := reader.ReadLine()
- if err != nil {
- return nil, err
+ reader := bufio.NewReader(file)
+ passphrase, _, err := reader.ReadLine()
+ if err != nil {
+ return nil, err
+ }
+ p.cachedPassphrase = passphrase
+
+ return func(_ string) ([]byte, error) {
+ return passphrase, nil
+ }, nil
}
+
return func(_ string) ([]byte, error) {
- return passphrase, nil
+ return p.cachedPassphrase, nil
}, nil
}
func openPassphraseFile(passphraseFile string, stdin *os.File) (*os.File, error) {
- if passphraseFile == "-" {
+ if passphraseFile == passPhraseFileStdin {
stat, err := stdin.Stat()
if err != nil {
return nil, err
diff --git a/pkg/action/package_test.go b/pkg/action/package_test.go
index 26eeb1a2b..12bea10dd 100644
--- a/pkg/action/package_test.go
+++ b/pkg/action/package_test.go
@@ -29,8 +29,9 @@ import (
func TestPassphraseFileFetcher(t *testing.T) {
secret := "secret"
directory := ensure.TempFile(t, "passphrase-file", []byte(secret))
+ testPkg := NewPackage()
- fetcher, err := passphraseFileFetcher(path.Join(directory, "passphrase-file"), nil)
+ fetcher, err := testPkg.passphraseFileFetcher(path.Join(directory, "passphrase-file"), nil)
if err != nil {
t.Fatal("Unable to create passphraseFileFetcher", err)
}
@@ -48,8 +49,9 @@ func TestPassphraseFileFetcher(t *testing.T) {
func TestPassphraseFileFetcher_WithLineBreak(t *testing.T) {
secret := "secret"
directory := ensure.TempFile(t, "passphrase-file", []byte(secret+"\n\n."))
+ testPkg := NewPackage()
- fetcher, err := passphraseFileFetcher(path.Join(directory, "passphrase-file"), nil)
+ fetcher, err := testPkg.passphraseFileFetcher(path.Join(directory, "passphrase-file"), nil)
if err != nil {
t.Fatal("Unable to create passphraseFileFetcher", err)
}
@@ -66,17 +68,48 @@ func TestPassphraseFileFetcher_WithLineBreak(t *testing.T) {
func TestPassphraseFileFetcher_WithInvalidStdin(t *testing.T) {
directory := t.TempDir()
+ testPkg := NewPackage()
stdin, err := os.CreateTemp(directory, "non-existing")
if err != nil {
t.Fatal("Unable to create test file", err)
}
- if _, err := passphraseFileFetcher("-", stdin); err == nil {
+ if _, err := testPkg.passphraseFileFetcher("-", stdin); err == nil {
t.Error("Expected passphraseFileFetcher returning an error")
}
}
+func TestPassphraseFileFetcher_WithStdinAndMultipleFetches(t *testing.T) {
+ testPkg := NewPackage()
+ stdin, w, err := os.Pipe()
+ if err != nil {
+ t.Fatal("Unable to create pipe", err)
+ }
+
+ passphrase := "secret-from-stdin"
+
+ go func() {
+ w.Write([]byte(passphrase + "\n"))
+ }()
+
+ for i := 0; i < 4; i++ {
+ fetcher, err := testPkg.passphraseFileFetcher("-", stdin)
+ if err != nil {
+ t.Errorf("Expected passphraseFileFetcher to not return an error, but got %v", err)
+ }
+
+ pass, err := fetcher("key")
+ if err != nil {
+ t.Errorf("Expected passphraseFileFetcher invocation to succeed, failed with %v", err)
+ }
+
+ if string(pass) != string(passphrase) {
+ t.Errorf("Expected multiple passphrase fetch to return %q, got %q", passphrase, pass)
+ }
+ }
+}
+
func TestValidateVersion(t *testing.T) {
type args struct {
ver string
diff --git a/pkg/action/pull.go b/pkg/action/pull.go
index eb208ca7b..be71d0ed0 100644
--- a/pkg/action/pull.go
+++ b/pkg/action/pull.go
@@ -22,14 +22,12 @@ import (
"path/filepath"
"strings"
- "github.com/pkg/errors"
-
chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
"helm.sh/helm/v4/pkg/cli"
"helm.sh/helm/v4/pkg/downloader"
"helm.sh/helm/v4/pkg/getter"
"helm.sh/helm/v4/pkg/registry"
- "helm.sh/helm/v4/pkg/repo"
+ "helm.sh/helm/v4/pkg/repo/v1"
)
// Pull is the action for checking a given release's information.
@@ -90,6 +88,7 @@ func (p *Pull) Run(chartRef string) (string, error) {
RegistryClient: p.cfg.RegistryClient,
RepositoryConfig: p.Settings.RepositoryConfig,
RepositoryCache: p.Settings.RepositoryCache,
+ ContentCache: p.Settings.ContentCache,
}
if registry.IsOCI(chartRef) {
@@ -111,11 +110,12 @@ func (p *Pull) Run(chartRef string) (string, error) {
var err error
dest, err = os.MkdirTemp("", "helm-")
if err != nil {
- return out.String(), errors.Wrap(err, "failed to untar")
+ return out.String(), fmt.Errorf("failed to untar: %w", err)
}
defer os.RemoveAll(dest)
}
+ downloadSourceRef := chartRef
if p.RepoURL != "" {
chartURL, err := repo.FindChartInRepoURL(
p.RepoURL,
@@ -130,10 +130,10 @@ func (p *Pull) Run(chartRef string) (string, error) {
if err != nil {
return out.String(), err
}
- chartRef = chartURL
+ downloadSourceRef = chartURL
}
- saved, v, err := c.DownloadTo(chartRef, p.Version, dest)
+ saved, v, err := c.DownloadTo(downloadSourceRef, p.Version, dest)
if err != nil {
return out.String(), err
}
@@ -163,11 +163,10 @@ func (p *Pull) Run(chartRef string) (string, error) {
if _, err := os.Stat(udCheck); err != nil {
if err := os.MkdirAll(udCheck, 0755); err != nil {
- return out.String(), errors.Wrap(err, "failed to untar (mkdir)")
+ return out.String(), fmt.Errorf("failed to untar (mkdir): %w", err)
}
-
} else {
- return out.String(), errors.Errorf("failed to untar: a file or directory with the name %s already exists", udCheck)
+ return out.String(), fmt.Errorf("failed to untar: a file or directory with the name %s already exists", udCheck)
}
return out.String(), chartutil.ExpandFile(ud, saved)
diff --git a/pkg/action/release_testing.go b/pkg/action/release_testing.go
index c6374523e..009f4d793 100644
--- a/pkg/action/release_testing.go
+++ b/pkg/action/release_testing.go
@@ -24,10 +24,10 @@ import (
"sort"
"time"
- "github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+ "helm.sh/helm/v4/pkg/kube"
release "helm.sh/helm/v4/pkg/release/v1"
)
@@ -63,7 +63,7 @@ func (r *ReleaseTesting) Run(name string) (*release.Release, error) {
}
if err := chartutil.ValidateReleaseName(name); err != nil {
- return nil, errors.Errorf("releaseTest: Release name is invalid: %s", name)
+ return nil, fmt.Errorf("releaseTest: Release name is invalid: %s", name)
}
// finds the non-deleted release with the given name
@@ -96,7 +96,8 @@ func (r *ReleaseTesting) Run(name string) (*release.Release, error) {
rel.Hooks = executingHooks
}
- if err := r.cfg.execHook(rel, release.HookTest, r.Timeout); err != nil {
+ serverSideApply := rel.ApplyMethod == string(release.ApplyMethodServerSideApply)
+ if err := r.cfg.execHook(rel, release.HookTest, kube.StatusWatcherStrategy, r.Timeout, serverSideApply); err != nil {
rel.Hooks = append(skippedHooks, rel.Hooks...)
r.cfg.Releases.Update(rel)
return rel, err
@@ -112,7 +113,7 @@ func (r *ReleaseTesting) Run(name string) (*release.Release, error) {
func (r *ReleaseTesting) GetPodLogs(out io.Writer, rel *release.Release) error {
client, err := r.cfg.KubernetesClientSet()
if err != nil {
- return errors.Wrap(err, "unable to get kubernetes client to fetch pod logs")
+ return fmt.Errorf("unable to get kubernetes client to fetch pod logs: %w", err)
}
hooksByWight := append([]*release.Hook{}, rel.Hooks...)
@@ -129,14 +130,14 @@ func (r *ReleaseTesting) GetPodLogs(out io.Writer, rel *release.Release) error {
req := client.CoreV1().Pods(r.Namespace).GetLogs(h.Name, &v1.PodLogOptions{})
logReader, err := req.Stream(context.Background())
if err != nil {
- return errors.Wrapf(err, "unable to get pod logs for %s", h.Name)
+ return fmt.Errorf("unable to get pod logs for %s: %w", h.Name, err)
}
fmt.Fprintf(out, "POD LOGS: %s\n", h.Name)
_, err = io.Copy(out, logReader)
fmt.Fprintln(out)
if err != nil {
- return errors.Wrapf(err, "unable to write pod logs for %s", h.Name)
+ return fmt.Errorf("unable to write pod logs for %s: %w", h.Name, err)
}
}
}
diff --git a/pkg/action/resource_policy.go b/pkg/action/resource_policy.go
index b72e94124..fcea98ad6 100644
--- a/pkg/action/resource_policy.go
+++ b/pkg/action/resource_policy.go
@@ -20,7 +20,7 @@ import (
"strings"
"helm.sh/helm/v4/pkg/kube"
- releaseutil "helm.sh/helm/v4/pkg/release/util"
+ releaseutil "helm.sh/helm/v4/pkg/release/v1/util"
)
func filterManifestsToKeep(manifests []releaseutil.Manifest) (keep, remaining []releaseutil.Manifest) {
diff --git a/pkg/action/rollback.go b/pkg/action/rollback.go
index 4006f565f..adaf22615 100644
--- a/pkg/action/rollback.go
+++ b/pkg/action/rollback.go
@@ -19,12 +19,12 @@ package action
import (
"bytes"
"fmt"
+ "log/slog"
"strings"
"time"
- "github.com/pkg/errors"
-
chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+ "helm.sh/helm/v4/pkg/kube"
release "helm.sh/helm/v4/pkg/release/v1"
helmtime "helm.sh/helm/v4/pkg/time"
)
@@ -35,16 +35,26 @@ import (
type Rollback struct {
cfg *Configuration
- Version int
- Timeout time.Duration
- Wait bool
- WaitForJobs bool
- DisableHooks bool
- DryRun bool
- Recreate bool // will (if true) recreate pods after a rollback.
- Force bool // will (if true) force resource upgrade through uninstall/recreate if needed
- CleanupOnFail bool
- MaxHistory int // MaxHistory limits the maximum number of revisions saved per release
+ Version int
+ Timeout time.Duration
+ WaitStrategy kube.WaitStrategy
+ WaitForJobs bool
+ DisableHooks bool
+ DryRun bool
+ // ForceReplace will, if set to `true`, ignore certain warnings and perform the rollback anyway.
+ //
+ // This should be used with caution.
+ ForceReplace bool
+ // ForceConflicts causes server-side apply to force conflicts ("Overwrite value, become sole manager")
+ // see: https://kubernetes.io/docs/reference/using-api/server-side-apply/#conflicts
+ ForceConflicts bool
+ // ServerSideApply enables changes to be applied via Kubernetes server-side apply
+ // Can be the string: "true", "false" or "auto"
+ // When "auto", sever-side usage will be based upon the releases previous usage
+ // see: https://kubernetes.io/docs/reference/using-api/server-side-apply/
+ ServerSideApply string
+ CleanupOnFail bool
+ MaxHistory int // MaxHistory limits the maximum number of revisions saved per release
}
// NewRollback creates a new Rollback object with the given configuration.
@@ -62,26 +72,26 @@ func (r *Rollback) Run(name string) error {
r.cfg.Releases.MaxHistory = r.MaxHistory
- r.cfg.Log("preparing rollback of %s", name)
- currentRelease, targetRelease, err := r.prepareRollback(name)
+ slog.Debug("preparing rollback", "name", name)
+ currentRelease, targetRelease, serverSideApply, err := r.prepareRollback(name)
if err != nil {
return err
}
if !r.DryRun {
- r.cfg.Log("creating rolled back release for %s", name)
+ slog.Debug("creating rolled back release", "name", name)
if err := r.cfg.Releases.Create(targetRelease); err != nil {
return err
}
}
- r.cfg.Log("performing rollback of %s", name)
- if _, err := r.performRollback(currentRelease, targetRelease); err != nil {
+ slog.Debug("performing rollback", "name", name)
+ if _, err := r.performRollback(currentRelease, targetRelease, serverSideApply); err != nil {
return err
}
if !r.DryRun {
- r.cfg.Log("updating status for rolled back release for %s", name)
+ slog.Debug("updating status for rolled back release", "name", name)
if err := r.cfg.Releases.Update(targetRelease); err != nil {
return err
}
@@ -91,18 +101,18 @@ func (r *Rollback) Run(name string) error {
// prepareRollback finds the previous release and prepares a new release object with
// the previous release's configuration
-func (r *Rollback) prepareRollback(name string) (*release.Release, *release.Release, error) {
+func (r *Rollback) prepareRollback(name string) (*release.Release, *release.Release, bool, error) {
if err := chartutil.ValidateReleaseName(name); err != nil {
- return nil, nil, errors.Errorf("prepareRollback: Release name is invalid: %s", name)
+ return nil, nil, false, fmt.Errorf("prepareRollback: Release name is invalid: %s", name)
}
if r.Version < 0 {
- return nil, nil, errInvalidRevision
+ return nil, nil, false, errInvalidRevision
}
currentRelease, err := r.cfg.Releases.Last(name)
if err != nil {
- return nil, nil, err
+ return nil, nil, false, err
}
previousVersion := r.Version
@@ -112,7 +122,7 @@ func (r *Rollback) prepareRollback(name string) (*release.Release, *release.Rele
historyReleases, err := r.cfg.Releases.History(name)
if err != nil {
- return nil, nil, err
+ return nil, nil, false, err
}
// Check if the history version to be rolled back exists
@@ -125,14 +135,19 @@ func (r *Rollback) prepareRollback(name string) (*release.Release, *release.Rele
}
}
if !previousVersionExist {
- return nil, nil, errors.Errorf("release has no %d version", previousVersion)
+ return nil, nil, false, fmt.Errorf("release has no %d version", previousVersion)
}
- r.cfg.Log("rolling back %s (current: v%d, target: v%d)", name, currentRelease.Version, previousVersion)
+ slog.Debug("rolling back", "name", name, "currentVersion", currentRelease.Version, "targetVersion", previousVersion)
previousRelease, err := r.cfg.Releases.Get(name, previousVersion)
if err != nil {
- return nil, nil, err
+ return nil, nil, false, err
+ }
+
+ serverSideApply, err := getUpgradeServerSideValue(r.ServerSideApply, previousRelease.ApplyMethod)
+ if err != nil {
+ return nil, nil, false, err
}
// Store a new release object with previous release's configuration
@@ -150,100 +165,98 @@ func (r *Rollback) prepareRollback(name string) (*release.Release, *release.Rele
// message here, and only override it later if we experience failure.
Description: fmt.Sprintf("Rollback to %d", previousVersion),
},
- Version: currentRelease.Version + 1,
- Labels: previousRelease.Labels,
- Manifest: previousRelease.Manifest,
- Hooks: previousRelease.Hooks,
+ Version: currentRelease.Version + 1,
+ Labels: previousRelease.Labels,
+ Manifest: previousRelease.Manifest,
+ Hooks: previousRelease.Hooks,
+ ApplyMethod: string(determineReleaseSSApplyMethod(serverSideApply)),
}
- return currentRelease, targetRelease, nil
+ return currentRelease, targetRelease, serverSideApply, nil
}
-func (r *Rollback) performRollback(currentRelease, targetRelease *release.Release) (*release.Release, error) {
+func (r *Rollback) performRollback(currentRelease, targetRelease *release.Release, serverSideApply bool) (*release.Release, error) {
if r.DryRun {
- r.cfg.Log("dry run for %s", targetRelease.Name)
+ slog.Debug("dry run", "name", targetRelease.Name)
return targetRelease, nil
}
current, err := r.cfg.KubeClient.Build(bytes.NewBufferString(currentRelease.Manifest), false)
if err != nil {
- return targetRelease, errors.Wrap(err, "unable to build kubernetes objects from current release manifest")
+ return targetRelease, fmt.Errorf("unable to build kubernetes objects from current release manifest: %w", err)
}
target, err := r.cfg.KubeClient.Build(bytes.NewBufferString(targetRelease.Manifest), false)
if err != nil {
- return targetRelease, errors.Wrap(err, "unable to build kubernetes objects from new release manifest")
+ return targetRelease, fmt.Errorf("unable to build kubernetes objects from new release manifest: %w", err)
}
// pre-rollback hooks
+
if !r.DisableHooks {
- if err := r.cfg.execHook(targetRelease, release.HookPreRollback, r.Timeout); err != nil {
+ if err := r.cfg.execHook(targetRelease, release.HookPreRollback, r.WaitStrategy, r.Timeout, serverSideApply); err != nil {
return targetRelease, err
}
} else {
- r.cfg.Log("rollback hooks disabled for %s", targetRelease.Name)
+ slog.Debug("rollback hooks disabled", "name", targetRelease.Name)
}
- // It is safe to use "force" here because these are resources currently rendered by the chart.
+ // It is safe to use "forceOwnership" here because these are resources currently rendered by the chart.
err = target.Visit(setMetadataVisitor(targetRelease.Name, targetRelease.Namespace, true))
if err != nil {
- return targetRelease, errors.Wrap(err, "unable to set metadata visitor from target release")
+ return targetRelease, fmt.Errorf("unable to set metadata visitor from target release: %w", err)
}
- results, err := r.cfg.KubeClient.Update(current, target, r.Force)
+ results, err := r.cfg.KubeClient.Update(
+ current,
+ target,
+ kube.ClientUpdateOptionForceReplace(r.ForceReplace),
+ kube.ClientUpdateOptionServerSideApply(serverSideApply, r.ForceConflicts),
+ kube.ClientUpdateOptionThreeWayMergeForUnstructured(false),
+ kube.ClientUpdateOptionUpgradeClientSideFieldManager(true))
if err != nil {
msg := fmt.Sprintf("Rollback %q failed: %s", targetRelease.Name, err)
- r.cfg.Log("warning: %s", msg)
+ slog.Warn(msg)
currentRelease.Info.Status = release.StatusSuperseded
targetRelease.Info.Status = release.StatusFailed
targetRelease.Info.Description = msg
r.cfg.recordRelease(currentRelease)
r.cfg.recordRelease(targetRelease)
if r.CleanupOnFail {
- r.cfg.Log("Cleanup on fail set, cleaning up %d resources", len(results.Created))
+ slog.Debug("cleanup on fail set, cleaning up resources", "count", len(results.Created))
_, errs := r.cfg.KubeClient.Delete(results.Created)
if errs != nil {
- var errorList []string
- for _, e := range errs {
- errorList = append(errorList, e.Error())
- }
- return targetRelease, errors.Wrapf(fmt.Errorf("unable to cleanup resources: %s", strings.Join(errorList, ", ")), "an error occurred while cleaning up resources. original rollback error: %s", err)
+ return targetRelease, fmt.Errorf(
+ "an error occurred while cleaning up resources. original rollback error: %w",
+ fmt.Errorf("unable to cleanup resources: %w", joinErrors(errs, ", ")))
}
- r.cfg.Log("Resource cleanup complete")
+ slog.Debug("resource cleanup complete")
}
return targetRelease, err
}
- if r.Recreate {
- // NOTE: Because this is not critical for a release to succeed, we just
- // log if an error occurs and continue onward. If we ever introduce log
- // levels, we should make these error level logs so users are notified
- // that they'll need to go do the cleanup on their own
- if err := recreate(r.cfg, results.Updated); err != nil {
- r.cfg.Log(err.Error())
- }
+ waiter, err := r.cfg.KubeClient.GetWaiter(r.WaitStrategy)
+ if err != nil {
+ return nil, fmt.Errorf("unable to set metadata visitor from target release: %w", err)
}
-
- if r.Wait {
- if r.WaitForJobs {
- if err := r.cfg.KubeClient.WaitWithJobs(target, r.Timeout); err != nil {
- targetRelease.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", targetRelease.Name, err.Error()))
- r.cfg.recordRelease(currentRelease)
- r.cfg.recordRelease(targetRelease)
- return targetRelease, errors.Wrapf(err, "release %s failed", targetRelease.Name)
- }
- } else {
- if err := r.cfg.KubeClient.Wait(target, r.Timeout); err != nil {
- targetRelease.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", targetRelease.Name, err.Error()))
- r.cfg.recordRelease(currentRelease)
- r.cfg.recordRelease(targetRelease)
- return targetRelease, errors.Wrapf(err, "release %s failed", targetRelease.Name)
- }
+ if r.WaitForJobs {
+ if err := waiter.WaitWithJobs(target, r.Timeout); err != nil {
+ targetRelease.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", targetRelease.Name, err.Error()))
+ r.cfg.recordRelease(currentRelease)
+ r.cfg.recordRelease(targetRelease)
+ return targetRelease, fmt.Errorf("release %s failed: %w", targetRelease.Name, err)
+ }
+ } else {
+ if err := waiter.Wait(target, r.Timeout); err != nil {
+ targetRelease.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", targetRelease.Name, err.Error()))
+ r.cfg.recordRelease(currentRelease)
+ r.cfg.recordRelease(targetRelease)
+ return targetRelease, fmt.Errorf("release %s failed: %w", targetRelease.Name, err)
}
}
// post-rollback hooks
if !r.DisableHooks {
- if err := r.cfg.execHook(targetRelease, release.HookPostRollback, r.Timeout); err != nil {
+ if err := r.cfg.execHook(targetRelease, release.HookPostRollback, r.WaitStrategy, r.Timeout, serverSideApply); err != nil {
return targetRelease, err
}
}
@@ -254,7 +267,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas
}
// Supersede all previous deployments, see issue #2941.
for _, rel := range deployed {
- r.cfg.Log("superseding previous deployment %d", rel.Version)
+ slog.Debug("superseding previous deployment", "version", rel.Version)
rel.Info.Status = release.StatusSuperseded
r.cfg.recordRelease(rel)
}
diff --git a/pkg/action/show.go b/pkg/action/show.go
index 8f9da58e9..4195d69a5 100644
--- a/pkg/action/show.go
+++ b/pkg/action/show.go
@@ -21,10 +21,10 @@ import (
"fmt"
"strings"
- "github.com/pkg/errors"
"k8s.io/cli-runtime/pkg/printers"
"sigs.k8s.io/yaml"
+ "helm.sh/helm/v4/pkg/chart/common"
chart "helm.sh/helm/v4/pkg/chart/v2"
"helm.sh/helm/v4/pkg/chart/v2/loader"
chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
@@ -69,14 +69,14 @@ func NewShow(output ShowOutputFormat, cfg *Configuration) *Show {
sh := &Show{
OutputFormat: output,
}
- sh.ChartPathOptions.registryClient = cfg.RegistryClient
+ sh.registryClient = cfg.RegistryClient
return sh
}
// SetRegistryClient sets the registry client to use when pulling a chart from a registry.
func (s *Show) SetRegistryClient(client *registry.Client) {
- s.ChartPathOptions.registryClient = client
+ s.registryClient = client
}
// Run executes 'helm show' against the given release.
@@ -105,7 +105,7 @@ func (s *Show) Run(chartpath string) (string, error) {
if s.JSONPathTemplate != "" {
printer, err := printers.NewJSONPathPrinter(s.JSONPathTemplate)
if err != nil {
- return "", errors.Wrapf(err, "error parsing jsonpath %s", s.JSONPathTemplate)
+ return "", fmt.Errorf("error parsing jsonpath %s: %w", s.JSONPathTemplate, err)
}
printer.Execute(&out, s.chart.Values)
} else {
@@ -130,10 +130,10 @@ func (s *Show) Run(chartpath string) (string, error) {
if s.OutputFormat == ShowCRDs || s.OutputFormat == ShowAll {
crds := s.chart.CRDObjects()
if len(crds) > 0 {
- if s.OutputFormat == ShowAll && !bytes.HasPrefix(crds[0].File.Data, []byte("---")) {
- fmt.Fprintln(&out, "---")
- }
for _, crd := range crds {
+ if !bytes.HasPrefix(crd.File.Data, []byte("---")) {
+ fmt.Fprintln(&out, "---")
+ }
fmt.Fprintf(&out, "%s\n", string(crd.File.Data))
}
}
@@ -141,7 +141,7 @@ func (s *Show) Run(chartpath string) (string, error) {
return out.String(), nil
}
-func findReadme(files []*chart.File) (file *chart.File) {
+func findReadme(files []*common.File) (file *common.File) {
for _, file := range files {
for _, n := range readmeFileNames {
if file == nil {
diff --git a/pkg/action/show_test.go b/pkg/action/show_test.go
index b1c5d6164..faf306f2a 100644
--- a/pkg/action/show_test.go
+++ b/pkg/action/show_test.go
@@ -19,6 +19,7 @@ package action
import (
"testing"
+ "helm.sh/helm/v4/pkg/chart/common"
chart "helm.sh/helm/v4/pkg/chart/v2"
)
@@ -27,13 +28,14 @@ func TestShow(t *testing.T) {
client := NewShow(ShowAll, config)
client.chart = &chart.Chart{
Metadata: &chart.Metadata{Name: "alpine"},
- Files: []*chart.File{
+ Files: []*common.File{
{Name: "README.md", Data: []byte("README\n")},
{Name: "crds/ignoreme.txt", Data: []byte("error")},
{Name: "crds/foo.yaml", Data: []byte("---\nfoo\n")},
{Name: "crds/bar.json", Data: []byte("---\nbar\n")},
+ {Name: "crds/baz.yaml", Data: []byte("baz\n")},
},
- Raw: []*chart.File{
+ Raw: []*common.File{
{Name: "values.yaml", Data: []byte("VALUES\n")},
},
Values: map[string]interface{}{},
@@ -58,6 +60,9 @@ foo
---
bar
+---
+baz
+
`
if output != expect {
t.Errorf("Expected\n%q\nGot\n%q\n", expect, output)
@@ -101,10 +106,11 @@ func TestShowCRDs(t *testing.T) {
client := NewShow(ShowCRDs, config)
client.chart = &chart.Chart{
Metadata: &chart.Metadata{Name: "alpine"},
- Files: []*chart.File{
+ Files: []*common.File{
{Name: "crds/ignoreme.txt", Data: []byte("error")},
{Name: "crds/foo.yaml", Data: []byte("---\nfoo\n")},
{Name: "crds/bar.json", Data: []byte("---\nbar\n")},
+ {Name: "crds/baz.yaml", Data: []byte("baz\n")},
},
}
@@ -119,6 +125,9 @@ foo
---
bar
+---
+baz
+
`
if output != expect {
t.Errorf("Expected\n%q\nGot\n%q\n", expect, output)
@@ -130,7 +139,7 @@ func TestShowNoReadme(t *testing.T) {
client := NewShow(ShowAll, config)
client.chart = &chart.Chart{
Metadata: &chart.Metadata{Name: "alpine"},
- Files: []*chart.File{
+ Files: []*common.File{
{Name: "crds/ignoreme.txt", Data: []byte("error")},
{Name: "crds/foo.yaml", Data: []byte("---\nfoo\n")},
{Name: "crds/bar.json", Data: []byte("---\nbar\n")},
diff --git a/pkg/action/uninstall.go b/pkg/action/uninstall.go
index fdbeb5dc8..866be5d54 100644
--- a/pkg/action/uninstall.go
+++ b/pkg/action/uninstall.go
@@ -17,17 +17,19 @@ limitations under the License.
package action
import (
+ "errors"
+ "fmt"
+ "log/slog"
"strings"
"time"
- "github.com/pkg/errors"
-
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
"helm.sh/helm/v4/pkg/kube"
- releaseutil "helm.sh/helm/v4/pkg/release/util"
release "helm.sh/helm/v4/pkg/release/v1"
+ releaseutil "helm.sh/helm/v4/pkg/release/v1/util"
+ "helm.sh/helm/v4/pkg/storage/driver"
helmtime "helm.sh/helm/v4/pkg/time"
)
@@ -41,7 +43,7 @@ type Uninstall struct {
DryRun bool
IgnoreNotFound bool
KeepHistory bool
- Wait bool
+ WaitStrategy kube.WaitStrategy
DeletionPropagation string
Timeout time.Duration
Description string
@@ -60,17 +62,24 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error)
return nil, err
}
+ waiter, err := u.cfg.KubeClient.GetWaiter(u.WaitStrategy)
+ if err != nil {
+ return nil, err
+ }
+
if u.DryRun {
- // In the dry run case, just see if the release exists
r, err := u.cfg.releaseContent(name, 0)
if err != nil {
+ if u.IgnoreNotFound && errors.Is(err, driver.ErrReleaseNotFound) {
+ return nil, nil
+ }
return &release.UninstallReleaseResponse{}, err
}
return &release.UninstallReleaseResponse{Release: r}, nil
}
if err := chartutil.ValidateReleaseName(name); err != nil {
- return nil, errors.Errorf("uninstall: Release name is invalid: %s", name)
+ return nil, fmt.Errorf("uninstall: Release name is invalid: %s", name)
}
rels, err := u.cfg.Releases.History(name)
@@ -78,7 +87,7 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error)
if u.IgnoreNotFound {
return nil, nil
}
- return nil, errors.Wrapf(err, "uninstall: Release not loaded: %s", name)
+ return nil, fmt.Errorf("uninstall: Release not loaded: %s: %w", name, err)
}
if len(rels) < 1 {
return nil, errMissingRelease
@@ -92,37 +101,38 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error)
if rel.Info.Status == release.StatusUninstalled {
if !u.KeepHistory {
if err := u.purgeReleases(rels...); err != nil {
- return nil, errors.Wrap(err, "uninstall: Failed to purge the release")
+ return nil, fmt.Errorf("uninstall: Failed to purge the release: %w", err)
}
return &release.UninstallReleaseResponse{Release: rel}, nil
}
- return nil, errors.Errorf("the release named %q is already deleted", name)
+ return nil, fmt.Errorf("the release named %q is already deleted", name)
}
- u.cfg.Log("uninstall: Deleting %s", name)
+ slog.Debug("uninstall: deleting release", "name", name)
rel.Info.Status = release.StatusUninstalling
rel.Info.Deleted = helmtime.Now()
rel.Info.Description = "Deletion in progress (or silently failed)"
res := &release.UninstallReleaseResponse{Release: rel}
if !u.DisableHooks {
- if err := u.cfg.execHook(rel, release.HookPreDelete, u.Timeout); err != nil {
+ serverSideApply := true
+ if err := u.cfg.execHook(rel, release.HookPreDelete, u.WaitStrategy, u.Timeout, serverSideApply); err != nil {
return res, err
}
} else {
- u.cfg.Log("delete hooks disabled for %s", name)
+ slog.Debug("delete hooks disabled", "release", name)
}
// From here on out, the release is currently considered to be in StatusUninstalling
// state.
if err := u.cfg.Releases.Update(rel); err != nil {
- u.cfg.Log("uninstall: Failed to store updated release: %s", err)
+ slog.Debug("uninstall: Failed to store updated release", slog.Any("error", err))
}
deletedResources, kept, errs := u.deleteRelease(rel)
if errs != nil {
- u.cfg.Log("uninstall: Failed to delete release: %s", errs)
- return nil, errors.Errorf("failed to delete release: %s", name)
+ slog.Debug("uninstall: Failed to delete release", slog.Any("error", errs))
+ return nil, fmt.Errorf("failed to delete release: %s", name)
}
if kept != "" {
@@ -130,16 +140,13 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error)
}
res.Info = kept
- if u.Wait {
- if kubeClient, ok := u.cfg.KubeClient.(kube.InterfaceExt); ok {
- if err := kubeClient.WaitForDelete(deletedResources, u.Timeout); err != nil {
- errs = append(errs, err)
- }
- }
+ if err := waiter.WaitForDelete(deletedResources, u.Timeout); err != nil {
+ errs = append(errs, err)
}
if !u.DisableHooks {
- if err := u.cfg.execHook(rel, release.HookPostDelete, u.Timeout); err != nil {
+ serverSideApply := true
+ if err := u.cfg.execHook(rel, release.HookPostDelete, u.WaitStrategy, u.Timeout, serverSideApply); err != nil {
errs = append(errs, err)
}
}
@@ -152,26 +159,26 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error)
}
if !u.KeepHistory {
- u.cfg.Log("purge requested for %s", name)
+ slog.Debug("purge requested", "release", name)
err := u.purgeReleases(rels...)
if err != nil {
- errs = append(errs, errors.Wrap(err, "uninstall: Failed to purge the release"))
+ errs = append(errs, fmt.Errorf("uninstall: Failed to purge the release: %w", err))
}
// Return the errors that occurred while deleting the release, if any
if len(errs) > 0 {
- return res, errors.Errorf("uninstallation completed with %d error(s): %s", len(errs), joinErrors(errs))
+ return res, fmt.Errorf("uninstallation completed with %d error(s): %w", len(errs), joinErrors(errs, "; "))
}
return res, nil
}
if err := u.cfg.Releases.Update(rel); err != nil {
- u.cfg.Log("uninstall: Failed to store updated release: %s", err)
+ slog.Debug("uninstall: Failed to store updated release", slog.Any("error", err))
}
if len(errs) > 0 {
- return res, errors.Errorf("uninstallation completed with %d error(s): %s", len(errs), joinErrors(errs))
+ return res, fmt.Errorf("uninstallation completed with %d error(s): %w", len(errs), joinErrors(errs, "; "))
}
return res, nil
}
@@ -185,12 +192,28 @@ func (u *Uninstall) purgeReleases(rels ...*release.Release) error {
return nil
}
-func joinErrors(errs []error) string {
- es := make([]string, 0, len(errs))
- for _, e := range errs {
- es = append(es, e.Error())
+type joinedErrors struct {
+ errs []error
+ sep string
+}
+
+func joinErrors(errs []error, sep string) error {
+ return &joinedErrors{
+ errs: errs,
+ sep: sep,
}
- return strings.Join(es, "; ")
+}
+
+func (e *joinedErrors) Error() string {
+ errs := make([]string, 0, len(e.errs))
+ for _, err := range e.errs {
+ errs = append(errs, err.Error())
+ }
+ return strings.Join(errs, e.sep)
+}
+
+func (e *joinedErrors) Unwrap() []error {
+ return e.errs
}
// deleteRelease deletes the release and returns list of delete resources and manifests that were kept in the deletion process
@@ -204,7 +227,7 @@ func (u *Uninstall) deleteRelease(rel *release.Release) (kube.ResourceList, stri
// FIXME: One way to delete at this point would be to try a label-based
// deletion. The problem with this is that we could get a false positive
// and delete something that was not legitimately part of this release.
- return nil, rel.Manifest, []error{errors.Wrap(err, "corrupted release record. You must manually delete the resources")}
+ return nil, rel.Manifest, []error{fmt.Errorf("corrupted release record. You must manually delete the resources: %w", err)}
}
filesToKeep, filesToDelete := filterManifestsToKeep(files)
@@ -220,11 +243,11 @@ func (u *Uninstall) deleteRelease(rel *release.Release) (kube.ResourceList, stri
resources, err := u.cfg.KubeClient.Build(strings.NewReader(builder.String()), false)
if err != nil {
- return nil, "", []error{errors.Wrap(err, "unable to build kubernetes objects for delete")}
+ return nil, "", []error{fmt.Errorf("unable to build kubernetes objects for delete: %w", err)}
}
if len(resources) > 0 {
if kubeClient, ok := u.cfg.KubeClient.(kube.InterfaceDeletionPropagation); ok {
- _, errs = kubeClient.DeleteWithPropagationPolicy(resources, parseCascadingFlag(u.cfg, u.DeletionPropagation))
+ _, errs = kubeClient.DeleteWithPropagationPolicy(resources, parseCascadingFlag(u.DeletionPropagation))
return resources, kept, errs
}
_, errs = u.cfg.KubeClient.Delete(resources)
@@ -232,7 +255,7 @@ func (u *Uninstall) deleteRelease(rel *release.Release) (kube.ResourceList, stri
return resources, kept, errs
}
-func parseCascadingFlag(cfg *Configuration, cascadingFlag string) v1.DeletionPropagation {
+func parseCascadingFlag(cascadingFlag string) v1.DeletionPropagation {
switch cascadingFlag {
case "orphan":
return v1.DeletePropagationOrphan
@@ -241,7 +264,7 @@ func parseCascadingFlag(cfg *Configuration, cascadingFlag string) v1.DeletionPro
case "background":
return v1.DeletePropagationBackground
default:
- cfg.Log("uninstall: given cascade value: %s, defaulting to delete propagation background", cascadingFlag)
+ slog.Debug("uninstall: given cascade value, defaulting to delete propagation background", "value", cascadingFlag)
return v1.DeletePropagationBackground
}
}
diff --git a/pkg/action/uninstall_test.go b/pkg/action/uninstall_test.go
index 071b76943..7c7344383 100644
--- a/pkg/action/uninstall_test.go
+++ b/pkg/action/uninstall_test.go
@@ -17,21 +17,37 @@ limitations under the License.
package action
import (
+ "errors"
"fmt"
+ "io"
"testing"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "helm.sh/helm/v4/pkg/kube"
kubefake "helm.sh/helm/v4/pkg/kube/fake"
release "helm.sh/helm/v4/pkg/release/v1"
)
func uninstallAction(t *testing.T) *Uninstall {
+ t.Helper()
config := actionConfigFixture(t)
unAction := NewUninstall(config)
return unAction
}
+func TestUninstallRelease_dryRun_ignoreNotFound(t *testing.T) {
+ unAction := uninstallAction(t)
+ unAction.DryRun = true
+ unAction.IgnoreNotFound = true
+
+ is := assert.New(t)
+ res, err := unAction.Run("release-non-exist")
+ is.Nil(res)
+ is.NoError(err)
+}
+
func TestUninstallRelease_ignoreNotFound(t *testing.T) {
unAction := uninstallAction(t)
unAction.DryRun = false
@@ -42,7 +58,6 @@ func TestUninstallRelease_ignoreNotFound(t *testing.T) {
is.Nil(res)
is.NoError(err)
}
-
func TestUninstallRelease_deleteRelease(t *testing.T) {
is := assert.New(t)
@@ -82,7 +97,7 @@ func TestUninstallRelease_Wait(t *testing.T) {
unAction := uninstallAction(t)
unAction.DisableHooks = true
unAction.DryRun = false
- unAction.Wait = true
+ unAction.WaitStrategy = kube.StatusWatcherStrategy
rel := releaseStub()
rel.Name = "come-fail-away"
@@ -99,7 +114,7 @@ func TestUninstallRelease_Wait(t *testing.T) {
}`
unAction.cfg.Releases.Create(rel)
failer := unAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
- failer.WaitError = fmt.Errorf("U timed out")
+ failer.WaitForDeleteError = fmt.Errorf("U timed out")
unAction.cfg.KubeClient = failer
res, err := unAction.Run(rel.Name)
is.Error(err)
@@ -113,7 +128,7 @@ func TestUninstallRelease_Cascade(t *testing.T) {
unAction := uninstallAction(t)
unAction.DisableHooks = true
unAction.DryRun = false
- unAction.Wait = false
+ unAction.WaitStrategy = kube.HookOnlyStrategy
unAction.DeletionPropagation = "foreground"
rel := releaseStub()
@@ -135,6 +150,20 @@ func TestUninstallRelease_Cascade(t *testing.T) {
failer.BuildDummy = true
unAction.cfg.KubeClient = failer
_, err := unAction.Run(rel.Name)
- is.Error(err)
+ require.Error(t, err)
is.Contains(err.Error(), "failed to delete release: come-fail-away")
}
+
+func TestUninstallRun_UnreachableKubeClient(t *testing.T) {
+ t.Helper()
+ config := actionConfigFixture(t)
+ failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil}
+ failingKubeClient.ConnectionError = errors.New("connection refused")
+ config.KubeClient = &failingKubeClient
+
+ client := NewUninstall(config)
+ result, err := client.Run("")
+
+ assert.Nil(t, result)
+ assert.ErrorContains(t, err, "connection refused")
+}
diff --git a/pkg/action/upgrade.go b/pkg/action/upgrade.go
index e32c8dcaf..3688adf0e 100644
--- a/pkg/action/upgrade.go
+++ b/pkg/action/upgrade.go
@@ -19,22 +19,24 @@ package action
import (
"bytes"
"context"
+ "errors"
"fmt"
+ "log/slog"
"strings"
"sync"
"time"
- "github.com/pkg/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/cli-runtime/pkg/resource"
+ "helm.sh/helm/v4/pkg/chart/common"
+ "helm.sh/helm/v4/pkg/chart/common/util"
chart "helm.sh/helm/v4/pkg/chart/v2"
chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
"helm.sh/helm/v4/pkg/kube"
- "helm.sh/helm/v4/pkg/postrender"
+ "helm.sh/helm/v4/pkg/postrenderer"
"helm.sh/helm/v4/pkg/registry"
- releaseutil "helm.sh/helm/v4/pkg/release/util"
release "helm.sh/helm/v4/pkg/release/v1"
+ releaseutil "helm.sh/helm/v4/pkg/release/v1/util"
"helm.sh/helm/v4/pkg/storage/driver"
)
@@ -64,8 +66,8 @@ type Upgrade struct {
SkipCRDs bool
// Timeout is the timeout for this operation
Timeout time.Duration
- // Wait determines whether the wait operation should be performed after the upgrade is requested.
- Wait bool
+ // WaitStrategy determines what type of waiting should be done
+ WaitStrategy kube.WaitStrategy
// WaitForJobs determines whether the wait operation for the Jobs should be performed after the upgrade is requested.
WaitForJobs bool
// DisableHooks disables hook processing if set to true.
@@ -77,22 +79,28 @@ type Upgrade struct {
// HideSecret can be set to true when DryRun is enabled in order to hide
// Kubernetes Secrets in the output. It cannot be used outside of DryRun.
HideSecret bool
- // Force will, if set to `true`, ignore certain warnings and perform the upgrade anyway.
+ // ForceReplace will, if set to `true`, ignore certain warnings and perform the upgrade anyway.
//
// This should be used with caution.
- Force bool
+ ForceReplace bool
+ // ForceConflicts causes server-side apply to force conflicts ("Overwrite value, become sole manager")
+ // see: https://kubernetes.io/docs/reference/using-api/server-side-apply/#conflicts
+ ForceConflicts bool
+ // ServerSideApply enables changes to be applied via Kubernetes server-side apply
+ // Can be the string: "true", "false" or "auto"
+ // When "auto", sever-side usage will be based upon the releases previous usage
+ // see: https://kubernetes.io/docs/reference/using-api/server-side-apply/
+ ServerSideApply string
// ResetValues will reset the values to the chart's built-ins rather than merging with existing.
ResetValues bool
// ReuseValues will reuse the user's last supplied values.
ReuseValues bool
// ResetThenReuseValues will reset the values to the chart's built-ins then merge with user's last supplied values.
ResetThenReuseValues bool
- // Recreate will (if true) recreate pods after a rollback.
- Recreate bool
// MaxHistory limits the maximum number of revisions saved per release
MaxHistory int
- // Atomic, if true, will roll back on failure.
- Atomic bool
+ // RollbackOnFailure enables rolling back the upgraded release on failure
+ RollbackOnFailure bool
// CleanupOnFail will, if true, cause the upgrade to delete newly-created resources on a failed update.
CleanupOnFail bool
// SubNotes determines whether sub-notes are rendered in the chart.
@@ -108,7 +116,7 @@ type Upgrade struct {
//
// If this is non-nil, then after templates are rendered, they will be sent to the
// post renderer before sending to the Kubernetes API server.
- PostRenderer postrender.PostRenderer
+ PostRenderer postrenderer.PostRenderer
// DisableOpenAPIValidation controls whether OpenAPI validation is enforced.
DisableOpenAPIValidation bool
// Get missing dependencies
@@ -129,16 +137,17 @@ type resultMessage struct {
// NewUpgrade creates a new Upgrade object with the given configuration.
func NewUpgrade(cfg *Configuration) *Upgrade {
up := &Upgrade{
- cfg: cfg,
+ cfg: cfg,
+ ServerSideApply: "auto",
}
- up.ChartPathOptions.registryClient = cfg.RegistryClient
+ up.registryClient = cfg.RegistryClient
return up
}
// SetRegistryClient sets the registry client to use when fetching charts.
func (u *Upgrade) SetRegistryClient(client *registry.Client) {
- u.ChartPathOptions.registryClient = client
+ u.registryClient = client
}
// Run executes the upgrade on the given release.
@@ -153,31 +162,33 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart.
return nil, err
}
- // Make sure if Atomic is set, that wait is set as well. This makes it so
+ // Make sure wait is set if RollbackOnFailure. This makes it so
// the user doesn't have to specify both
- u.Wait = u.Wait || u.Atomic
+ if u.WaitStrategy == kube.HookOnlyStrategy && u.RollbackOnFailure {
+ u.WaitStrategy = kube.StatusWatcherStrategy
+ }
if err := chartutil.ValidateReleaseName(name); err != nil {
- return nil, errors.Errorf("release name is invalid: %s", name)
+ return nil, fmt.Errorf("release name is invalid: %s", name)
}
- u.cfg.Log("preparing upgrade for %s", name)
- currentRelease, upgradedRelease, err := u.prepareUpgrade(name, chart, vals)
+ slog.Debug("preparing upgrade", "name", name)
+ currentRelease, upgradedRelease, serverSideApply, err := u.prepareUpgrade(name, chart, vals)
if err != nil {
return nil, err
}
u.cfg.Releases.MaxHistory = u.MaxHistory
- u.cfg.Log("performing update for %s", name)
- res, err := u.performUpgrade(ctx, currentRelease, upgradedRelease)
+ slog.Debug("performing update", "name", name)
+ res, err := u.performUpgrade(ctx, currentRelease, upgradedRelease, serverSideApply)
if err != nil {
return res, err
}
// Do not update for dry runs
if !u.isDryRun() {
- u.cfg.Log("updating status for upgraded release for %s", name)
+ slog.Debug("updating status for upgraded release", "name", name)
if err := u.cfg.Releases.Update(upgradedRelease); err != nil {
return res, err
}
@@ -195,14 +206,14 @@ func (u *Upgrade) isDryRun() bool {
}
// prepareUpgrade builds an upgraded release for an upgrade operation.
-func (u *Upgrade) prepareUpgrade(name string, chart *chart.Chart, vals map[string]interface{}) (*release.Release, *release.Release, error) {
+func (u *Upgrade) prepareUpgrade(name string, chart *chart.Chart, vals map[string]interface{}) (*release.Release, *release.Release, bool, error) {
if chart == nil {
- return nil, nil, errMissingChart
+ return nil, nil, false, errMissingChart
}
// HideSecret must be used with dry run. Otherwise, return an error.
if !u.isDryRun() && u.HideSecret {
- return nil, nil, errors.New("Hiding Kubernetes secrets requires a dry-run mode")
+ return nil, nil, false, errors.New("hiding Kubernetes secrets requires a dry-run mode")
}
// finds the last non-deleted release with the given name
@@ -210,14 +221,14 @@ func (u *Upgrade) prepareUpgrade(name string, chart *chart.Chart, vals map[strin
if err != nil {
// to keep existing behavior of returning the "%q has no deployed releases" error when an existing release does not exist
if errors.Is(err, driver.ErrReleaseNotFound) {
- return nil, nil, driver.NewErrNoDeployedReleases(name)
+ return nil, nil, false, driver.NewErrNoDeployedReleases(name)
}
- return nil, nil, err
+ return nil, nil, false, err
}
// Concurrent `helm upgrade`s will either fail here with `errPending` or when creating the release with "already exists". This should act as a pessimistic lock.
if lastRelease.Info.Status.IsPending() {
- return nil, nil, errPending
+ return nil, nil, false, errPending
}
var currentRelease *release.Release
@@ -232,7 +243,7 @@ func (u *Upgrade) prepareUpgrade(name string, chart *chart.Chart, vals map[strin
(lastRelease.Info.Status == release.StatusFailed || lastRelease.Info.Status == release.StatusSuperseded) {
currentRelease = lastRelease
} else {
- return nil, nil, err
+ return nil, nil, false, err
}
}
}
@@ -240,18 +251,18 @@ func (u *Upgrade) prepareUpgrade(name string, chart *chart.Chart, vals map[strin
// determine if values will be reused
vals, err = u.reuseValues(chart, currentRelease, vals)
if err != nil {
- return nil, nil, err
+ return nil, nil, false, err
}
if err := chartutil.ProcessDependencies(chart, vals); err != nil {
- return nil, nil, err
+ return nil, nil, false, err
}
// Increment revision count. This is passed to templates, and also stored on
// the release object.
revision := lastRelease.Version + 1
- options := chartutil.ReleaseOptions{
+ options := common.ReleaseOptions{
Name: name,
Namespace: currentRelease.Namespace,
Revision: revision,
@@ -260,11 +271,11 @@ func (u *Upgrade) prepareUpgrade(name string, chart *chart.Chart, vals map[strin
caps, err := u.cfg.getCapabilities()
if err != nil {
- return nil, nil, err
+ return nil, nil, false, err
}
- valuesToRender, err := chartutil.ToRenderValuesWithSchemaValidation(chart, vals, options, caps, u.SkipSchemaValidation)
+ valuesToRender, err := util.ToRenderValuesWithSchemaValidation(chart, vals, options, caps, u.SkipSchemaValidation)
if err != nil {
- return nil, nil, err
+ return nil, nil, false, err
}
// Determine whether or not to interact with remote
@@ -275,13 +286,20 @@ func (u *Upgrade) prepareUpgrade(name string, chart *chart.Chart, vals map[strin
hooks, manifestDoc, notesTxt, err := u.cfg.renderResources(chart, valuesToRender, "", "", u.SubNotes, false, false, u.PostRenderer, interactWithRemote, u.EnableDNS, u.HideSecret)
if err != nil {
- return nil, nil, err
+ return nil, nil, false, err
}
if driver.ContainsSystemLabels(u.Labels) {
- return nil, nil, fmt.Errorf("user supplied labels contains system reserved label name. System labels: %+v", driver.GetSystemLabels())
+ return nil, nil, false, fmt.Errorf("user supplied labels contains system reserved label name. System labels: %+v", driver.GetSystemLabels())
+ }
+
+ serverSideApply, err := getUpgradeServerSideValue(u.ServerSideApply, lastRelease.ApplyMethod)
+ if err != nil {
+ return nil, nil, false, err
}
+ slog.Debug("determined release apply method", slog.Bool("server_side_apply", serverSideApply), slog.String("previous_release_apply_method", lastRelease.ApplyMethod))
+
// Store an upgraded release.
upgradedRelease := &release.Release{
Name: name,
@@ -294,34 +312,35 @@ func (u *Upgrade) prepareUpgrade(name string, chart *chart.Chart, vals map[strin
Status: release.StatusPendingUpgrade,
Description: "Preparing upgrade", // This should be overwritten later.
},
- Version: revision,
- Manifest: manifestDoc.String(),
- Hooks: hooks,
- Labels: mergeCustomLabels(lastRelease.Labels, u.Labels),
+ Version: revision,
+ Manifest: manifestDoc.String(),
+ Hooks: hooks,
+ Labels: mergeCustomLabels(lastRelease.Labels, u.Labels),
+ ApplyMethod: string(determineReleaseSSApplyMethod(serverSideApply)),
}
if len(notesTxt) > 0 {
upgradedRelease.Info.Notes = notesTxt
}
err = validateManifest(u.cfg.KubeClient, manifestDoc.Bytes(), !u.DisableOpenAPIValidation)
- return currentRelease, upgradedRelease, err
+ return currentRelease, upgradedRelease, serverSideApply, err
}
-func (u *Upgrade) performUpgrade(ctx context.Context, originalRelease, upgradedRelease *release.Release) (*release.Release, error) {
+func (u *Upgrade) performUpgrade(ctx context.Context, originalRelease, upgradedRelease *release.Release, serverSideApply bool) (*release.Release, error) {
current, err := u.cfg.KubeClient.Build(bytes.NewBufferString(originalRelease.Manifest), false)
if err != nil {
// Checking for removed Kubernetes API error so can provide a more informative error message to the user
// Ref: https://github.com/helm/helm/issues/7219
if strings.Contains(err.Error(), "unable to recognize \"\": no matches for kind") {
- return upgradedRelease, errors.Wrap(err, "current release manifest contains removed kubernetes api(s) for this "+
+ return upgradedRelease, fmt.Errorf("current release manifest contains removed kubernetes api(s) for this "+
"kubernetes version and it is therefore unable to build the kubernetes "+
- "objects for performing the diff. error from kubernetes")
+ "objects for performing the diff. error from kubernetes: %w", err)
}
- return upgradedRelease, errors.Wrap(err, "unable to build kubernetes objects from current release manifest")
+ return upgradedRelease, fmt.Errorf("unable to build kubernetes objects from current release manifest: %w", err)
}
target, err := u.cfg.KubeClient.Build(bytes.NewBufferString(upgradedRelease.Manifest), !u.DisableOpenAPIValidation)
if err != nil {
- return upgradedRelease, errors.Wrap(err, "unable to build kubernetes objects from new release manifest")
+ return upgradedRelease, fmt.Errorf("unable to build kubernetes objects from new release manifest: %w", err)
}
// It is safe to use force only on target because these are resources currently rendered by the chart.
@@ -350,7 +369,7 @@ func (u *Upgrade) performUpgrade(ctx context.Context, originalRelease, upgradedR
toBeUpdated, err = existingResourceConflict(toBeCreated, upgradedRelease.Name, upgradedRelease.Namespace)
}
if err != nil {
- return nil, errors.Wrap(err, "Unable to continue with update")
+ return nil, fmt.Errorf("unable to continue with update: %w", err)
}
toBeUpdated.Visit(func(r *resource.Info, err error) error {
@@ -363,7 +382,7 @@ func (u *Upgrade) performUpgrade(ctx context.Context, originalRelease, upgradedR
// Run if it is a dry run
if u.isDryRun() {
- u.cfg.Log("dry run for %s", upgradedRelease.Name)
+ slog.Debug("dry run for release", "name", upgradedRelease.Name)
if len(u.Description) > 0 {
upgradedRelease.Info.Description = u.Description
} else {
@@ -372,7 +391,7 @@ func (u *Upgrade) performUpgrade(ctx context.Context, originalRelease, upgradedR
return upgradedRelease, nil
}
- u.cfg.Log("creating upgraded release for %s", upgradedRelease.Name)
+ slog.Debug("creating upgraded release", "name", upgradedRelease.Name)
if err := u.cfg.Releases.Create(upgradedRelease); err != nil {
return nil, err
}
@@ -380,8 +399,9 @@ func (u *Upgrade) performUpgrade(ctx context.Context, originalRelease, upgradedR
ctxChan := make(chan resultMessage)
doneChan := make(chan interface{})
defer close(doneChan)
- go u.releasingUpgrade(rChan, upgradedRelease, current, target, originalRelease)
+ go u.releasingUpgrade(rChan, upgradedRelease, current, target, originalRelease, serverSideApply)
go u.handleContext(ctx, doneChan, ctxChan, upgradedRelease)
+
select {
case result := <-rChan:
return result.r, result.e
@@ -390,7 +410,7 @@ func (u *Upgrade) performUpgrade(ctx context.Context, originalRelease, upgradedR
}
}
-// Function used to lock the Mutex, this is important for the case when the atomic flag is set.
+// Function used to lock the Mutex, this is important for the case when RollbackOnFailure is set.
// In that case the upgrade will finish before the rollback is finished so it is necessary to wait for the rollback to finish.
// The rollback will be trigger by the function failRelease
func (u *Upgrade) reportToPerformUpgrade(c chan<- resultMessage, rel *release.Release, created kube.ResourceList, err error) {
@@ -408,63 +428,65 @@ func (u *Upgrade) handleContext(ctx context.Context, done chan interface{}, c ch
case <-ctx.Done():
err := ctx.Err()
- // when the atomic flag is set the ongoing release finish first and doesn't give time for the rollback happens.
+ // when RollbackOnFailure is set, the ongoing release finish first and doesn't give time for the rollback happens.
u.reportToPerformUpgrade(c, upgradedRelease, kube.ResourceList{}, err)
case <-done:
return
}
}
-func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *release.Release, current kube.ResourceList, target kube.ResourceList, originalRelease *release.Release) {
+
+func isReleaseApplyMethodClientSideApply(applyMethod string) bool {
+ return applyMethod == "" || applyMethod == string(release.ApplyMethodClientSideApply)
+}
+
+func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *release.Release, current kube.ResourceList, target kube.ResourceList, originalRelease *release.Release, serverSideApply bool) {
// pre-upgrade hooks
if !u.DisableHooks {
- if err := u.cfg.execHook(upgradedRelease, release.HookPreUpgrade, u.Timeout); err != nil {
+ if err := u.cfg.execHook(upgradedRelease, release.HookPreUpgrade, u.WaitStrategy, u.Timeout, serverSideApply); err != nil {
u.reportToPerformUpgrade(c, upgradedRelease, kube.ResourceList{}, fmt.Errorf("pre-upgrade hooks failed: %s", err))
return
}
} else {
- u.cfg.Log("upgrade hooks disabled for %s", upgradedRelease.Name)
+ slog.Debug("upgrade hooks disabled", "name", upgradedRelease.Name)
}
- results, err := u.cfg.KubeClient.Update(current, target, u.Force)
+ upgradeClientSideFieldManager := isReleaseApplyMethodClientSideApply(originalRelease.ApplyMethod) && serverSideApply // Update client-side field manager if transitioning from client-side to server-side apply
+ results, err := u.cfg.KubeClient.Update(
+ current,
+ target,
+ kube.ClientUpdateOptionForceReplace(u.ForceReplace),
+ kube.ClientUpdateOptionServerSideApply(serverSideApply, u.ForceConflicts),
+ kube.ClientUpdateOptionUpgradeClientSideFieldManager(upgradeClientSideFieldManager))
if err != nil {
u.cfg.recordRelease(originalRelease)
u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err)
return
}
- if u.Recreate {
- // NOTE: Because this is not critical for a release to succeed, we just
- // log if an error occurs and continue onward. If we ever introduce log
- // levels, we should make these error level logs so users are notified
- // that they'll need to go do the cleanup on their own
- if err := recreate(u.cfg, results.Updated); err != nil {
- u.cfg.Log(err.Error())
- }
+ waiter, err := u.cfg.KubeClient.GetWaiter(u.WaitStrategy)
+ if err != nil {
+ u.cfg.recordRelease(originalRelease)
+ u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err)
+ return
}
-
- if u.Wait {
- u.cfg.Log(
- "waiting for release %s resources (created: %d updated: %d deleted: %d)",
- upgradedRelease.Name, len(results.Created), len(results.Updated), len(results.Deleted))
- if u.WaitForJobs {
- if err := u.cfg.KubeClient.WaitWithJobs(target, u.Timeout); err != nil {
- u.cfg.recordRelease(originalRelease)
- u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err)
- return
- }
- } else {
- if err := u.cfg.KubeClient.Wait(target, u.Timeout); err != nil {
- u.cfg.recordRelease(originalRelease)
- u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err)
- return
- }
+ if u.WaitForJobs {
+ if err := waiter.WaitWithJobs(target, u.Timeout); err != nil {
+ u.cfg.recordRelease(originalRelease)
+ u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err)
+ return
+ }
+ } else {
+ if err := waiter.Wait(target, u.Timeout); err != nil {
+ u.cfg.recordRelease(originalRelease)
+ u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err)
+ return
}
}
// post-upgrade hooks
if !u.DisableHooks {
- if err := u.cfg.execHook(upgradedRelease, release.HookPostUpgrade, u.Timeout); err != nil {
+ if err := u.cfg.execHook(upgradedRelease, release.HookPostUpgrade, u.WaitStrategy, u.Timeout, serverSideApply); err != nil {
u.reportToPerformUpgrade(c, upgradedRelease, results.Created, fmt.Errorf("post-upgrade hooks failed: %s", err))
return
}
@@ -484,32 +506,36 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele
func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, err error) (*release.Release, error) {
msg := fmt.Sprintf("Upgrade %q failed: %s", rel.Name, err)
- u.cfg.Log("warning: %s", msg)
+ slog.Warn("upgrade failed", "name", rel.Name, slog.Any("error", err))
rel.Info.Status = release.StatusFailed
rel.Info.Description = msg
u.cfg.recordRelease(rel)
if u.CleanupOnFail && len(created) > 0 {
- u.cfg.Log("Cleanup on fail set, cleaning up %d resources", len(created))
+ slog.Debug("cleanup on fail set", "cleaning_resources", len(created))
_, errs := u.cfg.KubeClient.Delete(created)
if errs != nil {
- var errorList []string
- for _, e := range errs {
- errorList = append(errorList, e.Error())
- }
- return rel, errors.Wrapf(fmt.Errorf("unable to cleanup resources: %s", strings.Join(errorList, ", ")), "an error occurred while cleaning up resources. original upgrade error: %s", err)
+ return rel, fmt.Errorf(
+ "an error occurred while cleaning up resources. original upgrade error: %w: %w",
+ err,
+ fmt.Errorf(
+ "unable to cleanup resources: %w",
+ joinErrors(errs, ", "),
+ ),
+ )
}
- u.cfg.Log("Resource cleanup complete")
+ slog.Debug("resource cleanup complete")
}
- if u.Atomic {
- u.cfg.Log("Upgrade failed and atomic is set, rolling back to last successful release")
+
+ if u.RollbackOnFailure {
+ slog.Debug("Upgrade failed and rollback-on-failure is set, rolling back to previous successful release")
// As a protection, get the last successful release before rollback.
// If there are no successful releases, bail out
hist := NewHistory(u.cfg)
fullHistory, herr := hist.Run(rel.Name)
if herr != nil {
- return rel, errors.Wrapf(herr, "an error occurred while finding last successful release. original upgrade error: %s", err)
+ return rel, fmt.Errorf("an error occurred while finding last successful release. original upgrade error: %w: %w", err, herr)
}
// There isn't a way to tell if a previous release was successful, but
@@ -519,23 +545,26 @@ func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, e
return r.Info.Status == release.StatusSuperseded || r.Info.Status == release.StatusDeployed
}).Filter(fullHistory)
if len(filteredHistory) == 0 {
- return rel, errors.Wrap(err, "unable to find a previously successful release when attempting to rollback. original upgrade error")
+ return rel, fmt.Errorf("unable to find a previously successful release when attempting to rollback. original upgrade error: %w", err)
}
releaseutil.Reverse(filteredHistory, releaseutil.SortByRevision)
rollin := NewRollback(u.cfg)
rollin.Version = filteredHistory[0].Version
- rollin.Wait = true
+ if u.WaitStrategy == kube.HookOnlyStrategy {
+ rollin.WaitStrategy = kube.StatusWatcherStrategy
+ }
rollin.WaitForJobs = u.WaitForJobs
rollin.DisableHooks = u.DisableHooks
- rollin.Recreate = u.Recreate
- rollin.Force = u.Force
+ rollin.ForceReplace = u.ForceReplace
+ rollin.ForceConflicts = u.ForceConflicts
+ rollin.ServerSideApply = u.ServerSideApply
rollin.Timeout = u.Timeout
if rollErr := rollin.Run(rel.Name); rollErr != nil {
- return rel, errors.Wrapf(rollErr, "an error occurred while rolling back the release. original upgrade error: %s", err)
+ return rel, fmt.Errorf("an error occurred while rolling back the release. original upgrade error: %w: %w", err, rollErr)
}
- return rel, errors.Wrapf(err, "release %s failed, and has been rolled back due to atomic being set", rel.Name)
+ return rel, fmt.Errorf("release %s failed, and has been rolled back due to rollback-on-failure being set: %w", rel.Name, err)
}
return rel, err
@@ -552,21 +581,21 @@ func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, e
func (u *Upgrade) reuseValues(chart *chart.Chart, current *release.Release, newVals map[string]interface{}) (map[string]interface{}, error) {
if u.ResetValues {
// If ResetValues is set, we completely ignore current.Config.
- u.cfg.Log("resetting values to the chart's original version")
+ slog.Debug("resetting values to the chart's original version")
return newVals, nil
}
// If the ReuseValues flag is set, we always copy the old values over the new config's values.
if u.ReuseValues {
- u.cfg.Log("reusing the old release's values")
+ slog.Debug("reusing the old release's values")
// We have to regenerate the old coalesced values:
- oldVals, err := chartutil.CoalesceValues(current.Chart, current.Config)
+ oldVals, err := util.CoalesceValues(current.Chart, current.Config)
if err != nil {
- return nil, errors.Wrap(err, "failed to rebuild old values")
+ return nil, fmt.Errorf("failed to rebuild old values: %w", err)
}
- newVals = chartutil.CoalesceTables(newVals, current.Config)
+ newVals = util.CoalesceTables(newVals, current.Config)
chart.Values = oldVals
@@ -575,15 +604,15 @@ func (u *Upgrade) reuseValues(chart *chart.Chart, current *release.Release, newV
// If the ResetThenReuseValues flag is set, we use the new chart's values, but we copy the old config's values over the new config's values.
if u.ResetThenReuseValues {
- u.cfg.Log("merging values from old release to new values")
+ slog.Debug("merging values from old release to new values")
- newVals = chartutil.CoalesceTables(newVals, current.Config)
+ newVals = util.CoalesceTables(newVals, current.Config)
return newVals, nil
}
if len(newVals) == 0 && len(current.Config) > 0 {
- u.cfg.Log("copying values from %s (v%d) to new release.", current.Name, current.Version)
+ slog.Debug("copying values from old release", "name", current.Name, "version", current.Version)
newVals = current.Config
}
return newVals, nil
@@ -594,42 +623,6 @@ func validateManifest(c kube.Interface, manifest []byte, openAPIValidation bool)
return err
}
-// recreate captures all the logic for recreating pods for both upgrade and
-// rollback. If we end up refactoring rollback to use upgrade, this can just be
-// made an unexported method on the upgrade action.
-func recreate(cfg *Configuration, resources kube.ResourceList) error {
- for _, res := range resources {
- versioned := kube.AsVersioned(res)
- selector, err := kube.SelectorsForObject(versioned)
- if err != nil {
- // If no selector is returned, it means this object is
- // definitely not a pod, so continue onward
- continue
- }
-
- client, err := cfg.KubernetesClientSet()
- if err != nil {
- return errors.Wrapf(err, "unable to recreate pods for object %s/%s because an error occurred", res.Namespace, res.Name)
- }
-
- pods, err := client.CoreV1().Pods(res.Namespace).List(context.Background(), metav1.ListOptions{
- LabelSelector: selector.String(),
- })
- if err != nil {
- return errors.Wrapf(err, "unable to recreate pods for object %s/%s because an error occurred", res.Namespace, res.Name)
- }
-
- // Restart pods
- for _, pod := range pods.Items {
- // Delete each pod for get them restarted with changed spec.
- if err := client.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, *metav1.NewPreconditionDeleteOptions(string(pod.UID))); err != nil {
- return errors.Wrapf(err, "unable to recreate pods for object %s/%s because an error occurred", res.Namespace, res.Name)
- }
- }
- }
- return nil
-}
-
func objectKey(r *resource.Info) string {
gvk := r.Object.GetObjectKind().GroupVersionKind()
return fmt.Sprintf("%s/%s/%s/%s", gvk.GroupVersion().String(), gvk.Kind, r.Namespace, r.Name)
@@ -644,3 +637,16 @@ func mergeCustomLabels(current, desired map[string]string) map[string]string {
}
return labels
}
+
+func getUpgradeServerSideValue(serverSideOption string, releaseApplyMethod string) (bool, error) {
+ switch serverSideOption {
+ case "auto":
+ return releaseApplyMethod == "ssa", nil
+ case "false":
+ return false, nil
+ case "true":
+ return true, nil
+ default:
+ return false, fmt.Errorf("invalid/unknown release server-side apply method: %s", serverSideOption)
+ }
+}
diff --git a/pkg/action/upgrade_test.go b/pkg/action/upgrade_test.go
index 303f49e70..d31804b87 100644
--- a/pkg/action/upgrade_test.go
+++ b/pkg/action/upgrade_test.go
@@ -18,12 +18,15 @@ package action
import (
"context"
+ "errors"
"fmt"
+ "io"
"reflect"
"testing"
"time"
chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/kube"
"helm.sh/helm/v4/pkg/storage/driver"
"github.com/stretchr/testify/assert"
@@ -35,6 +38,7 @@ import (
)
func upgradeAction(t *testing.T) *Upgrade {
+ t.Helper()
config := actionConfigFixture(t)
upAction := NewUpgrade(config)
upAction.Namespace = "spaced"
@@ -52,10 +56,10 @@ func TestUpgradeRelease_Success(t *testing.T) {
rel.Info.Status = release.StatusDeployed
req.NoError(upAction.cfg.Releases.Create(rel))
- upAction.Wait = true
+ upAction.WaitStrategy = kube.StatusWatcherStrategy
vals := map[string]interface{}{}
- ctx, done := context.WithCancel(context.Background())
+ ctx, done := context.WithCancel(t.Context())
res, err := upAction.RunWithContext(ctx, rel.Name, buildChart(), vals)
done()
req.NoError(err)
@@ -82,7 +86,7 @@ func TestUpgradeRelease_Wait(t *testing.T) {
failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
failer.WaitError = fmt.Errorf("I timed out")
upAction.cfg.KubeClient = failer
- upAction.Wait = true
+ upAction.WaitStrategy = kube.StatusWatcherStrategy
vals := map[string]interface{}{}
res, err := upAction.Run(rel.Name, buildChart(), vals)
@@ -104,7 +108,7 @@ func TestUpgradeRelease_WaitForJobs(t *testing.T) {
failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
failer.WaitError = fmt.Errorf("I timed out")
upAction.cfg.KubeClient = failer
- upAction.Wait = true
+ upAction.WaitStrategy = kube.StatusWatcherStrategy
upAction.WaitForJobs = true
vals := map[string]interface{}{}
@@ -128,7 +132,7 @@ func TestUpgradeRelease_CleanupOnFail(t *testing.T) {
failer.WaitError = fmt.Errorf("I timed out")
failer.DeleteError = fmt.Errorf("I tried to delete nil")
upAction.cfg.KubeClient = failer
- upAction.Wait = true
+ upAction.WaitStrategy = kube.StatusWatcherStrategy
upAction.CleanupOnFail = true
vals := map[string]interface{}{}
@@ -139,11 +143,11 @@ func TestUpgradeRelease_CleanupOnFail(t *testing.T) {
is.Equal(res.Info.Status, release.StatusFailed)
}
-func TestUpgradeRelease_Atomic(t *testing.T) {
+func TestUpgradeRelease_RollbackOnFailure(t *testing.T) {
is := assert.New(t)
req := require.New(t)
- t.Run("atomic rollback succeeds", func(t *testing.T) {
+ t.Run("rollback-on-failure rollback succeeds", func(t *testing.T) {
upAction := upgradeAction(t)
rel := releaseStub()
@@ -155,13 +159,13 @@ func TestUpgradeRelease_Atomic(t *testing.T) {
// We can't make Update error because then the rollback won't work
failer.WatchUntilReadyError = fmt.Errorf("arming key removed")
upAction.cfg.KubeClient = failer
- upAction.Atomic = true
+ upAction.RollbackOnFailure = true
vals := map[string]interface{}{}
res, err := upAction.Run(rel.Name, buildChart(), vals)
req.Error(err)
is.Contains(err.Error(), "arming key removed")
- is.Contains(err.Error(), "atomic")
+ is.Contains(err.Error(), "rollback-on-failure")
// Now make sure it is actually upgraded
updatedRes, err := upAction.cfg.Releases.Get(res.Name, 3)
@@ -170,7 +174,7 @@ func TestUpgradeRelease_Atomic(t *testing.T) {
is.Equal(updatedRes.Info.Status, release.StatusDeployed)
})
- t.Run("atomic uninstall fails", func(t *testing.T) {
+ t.Run("rollback-on-failure uninstall fails", func(t *testing.T) {
upAction := upgradeAction(t)
rel := releaseStub()
rel.Name = "fallout"
@@ -180,7 +184,7 @@ func TestUpgradeRelease_Atomic(t *testing.T) {
failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
failer.UpdateError = fmt.Errorf("update fail")
upAction.cfg.KubeClient = failer
- upAction.Atomic = true
+ upAction.RollbackOnFailure = true
vals := map[string]interface{}{}
_, err := upAction.Run(rel.Name, buildChart(), vals)
@@ -382,7 +386,6 @@ func TestUpgradeRelease_Pending(t *testing.T) {
}
func TestUpgradeRelease_Interrupted_Wait(t *testing.T) {
-
is := assert.New(t)
req := require.New(t)
@@ -395,11 +398,10 @@ func TestUpgradeRelease_Interrupted_Wait(t *testing.T) {
failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
failer.WaitDuration = 10 * time.Second
upAction.cfg.KubeClient = failer
- upAction.Wait = true
+ upAction.WaitStrategy = kube.StatusWatcherStrategy
vals := map[string]interface{}{}
- ctx := context.Background()
- ctx, cancel := context.WithCancel(ctx)
+ ctx, cancel := context.WithCancel(t.Context())
time.AfterFunc(time.Second, cancel)
res, err := upAction.RunWithContext(ctx, rel.Name, buildChart(), vals)
@@ -407,10 +409,9 @@ func TestUpgradeRelease_Interrupted_Wait(t *testing.T) {
req.Error(err)
is.Contains(res.Info.Description, "Upgrade \"interrupted-release\" failed: context canceled")
is.Equal(res.Info.Status, release.StatusFailed)
-
}
-func TestUpgradeRelease_Interrupted_Atomic(t *testing.T) {
+func TestUpgradeRelease_Interrupted_RollbackOnFailure(t *testing.T) {
is := assert.New(t)
req := require.New(t)
@@ -424,17 +425,16 @@ func TestUpgradeRelease_Interrupted_Atomic(t *testing.T) {
failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
failer.WaitDuration = 5 * time.Second
upAction.cfg.KubeClient = failer
- upAction.Atomic = true
+ upAction.RollbackOnFailure = true
vals := map[string]interface{}{}
- ctx := context.Background()
- ctx, cancel := context.WithCancel(ctx)
+ ctx, cancel := context.WithCancel(t.Context())
time.AfterFunc(time.Second, cancel)
res, err := upAction.RunWithContext(ctx, rel.Name, buildChart(), vals)
req.Error(err)
- is.Contains(err.Error(), "release interrupted-release failed, and has been rolled back due to atomic being set: context canceled")
+ is.Contains(err.Error(), "release interrupted-release failed, and has been rolled back due to rollback-on-failure being set: context canceled")
// Now make sure it is actually upgraded
updatedRes, err := upAction.cfg.Releases.Get(res.Name, 3)
@@ -444,7 +444,7 @@ func TestUpgradeRelease_Interrupted_Atomic(t *testing.T) {
}
func TestMergeCustomLabels(t *testing.T) {
- var tests = [][3]map[string]string{
+ tests := [][3]map[string]string{
{nil, nil, map[string]string{}},
{map[string]string{}, map[string]string{}, map[string]string{}},
{map[string]string{"k1": "v1", "k2": "v2"}, nil, map[string]string{"k1": "v1", "k2": "v2"}},
@@ -549,7 +549,7 @@ func TestUpgradeRelease_DryRun(t *testing.T) {
upAction.DryRun = true
vals := map[string]interface{}{}
- ctx, done := context.WithCancel(context.Background())
+ ctx, done := context.WithCancel(t.Context())
res, err := upAction.RunWithContext(ctx, rel.Name, buildChart(withSampleSecret()), vals)
done()
req.NoError(err)
@@ -565,7 +565,7 @@ func TestUpgradeRelease_DryRun(t *testing.T) {
upAction.HideSecret = true
vals = map[string]interface{}{}
- ctx, done = context.WithCancel(context.Background())
+ ctx, done = context.WithCancel(t.Context())
res, err = upAction.RunWithContext(ctx, rel.Name, buildChart(withSampleSecret()), vals)
done()
req.NoError(err)
@@ -581,8 +581,129 @@ func TestUpgradeRelease_DryRun(t *testing.T) {
upAction.DryRun = false
vals = map[string]interface{}{}
- ctx, done = context.WithCancel(context.Background())
+ ctx, done = context.WithCancel(t.Context())
_, err = upAction.RunWithContext(ctx, rel.Name, buildChart(withSampleSecret()), vals)
done()
req.Error(err)
}
+
+func TestGetUpgradeServerSideValue(t *testing.T) {
+ tests := []struct {
+ name string
+ actionServerSideOption string
+ releaseApplyMethod string
+ expectedServerSideApply bool
+ }{
+ {
+ name: "action ssa auto / release csa",
+ actionServerSideOption: "auto",
+ releaseApplyMethod: "csa",
+ expectedServerSideApply: false,
+ },
+ {
+ name: "action ssa auto / release ssa",
+ actionServerSideOption: "auto",
+ releaseApplyMethod: "ssa",
+ expectedServerSideApply: true,
+ },
+ {
+ name: "action ssa auto / release empty",
+ actionServerSideOption: "auto",
+ releaseApplyMethod: "",
+ expectedServerSideApply: false,
+ },
+ {
+ name: "action ssa true / release csa",
+ actionServerSideOption: "true",
+ releaseApplyMethod: "csa",
+ expectedServerSideApply: true,
+ },
+ {
+ name: "action ssa true / release ssa",
+ actionServerSideOption: "true",
+ releaseApplyMethod: "ssa",
+ expectedServerSideApply: true,
+ },
+ {
+ name: "action ssa true / release 'unknown'",
+ actionServerSideOption: "true",
+ releaseApplyMethod: "foo",
+ expectedServerSideApply: true,
+ },
+ {
+ name: "action ssa true / release empty",
+ actionServerSideOption: "true",
+ releaseApplyMethod: "",
+ expectedServerSideApply: true,
+ },
+ {
+ name: "action ssa false / release csa",
+ actionServerSideOption: "false",
+ releaseApplyMethod: "ssa",
+ expectedServerSideApply: false,
+ },
+ {
+ name: "action ssa false / release ssa",
+ actionServerSideOption: "false",
+ releaseApplyMethod: "ssa",
+ expectedServerSideApply: false,
+ },
+ {
+ name: "action ssa false / release 'unknown'",
+ actionServerSideOption: "false",
+ releaseApplyMethod: "foo",
+ expectedServerSideApply: false,
+ },
+ {
+ name: "action ssa false / release empty",
+ actionServerSideOption: "false",
+ releaseApplyMethod: "ssa",
+ expectedServerSideApply: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ serverSideApply, err := getUpgradeServerSideValue(tt.actionServerSideOption, tt.releaseApplyMethod)
+ assert.Nil(t, err)
+ assert.Equal(t, tt.expectedServerSideApply, serverSideApply)
+ })
+ }
+
+ testsError := []struct {
+ name string
+ actionServerSideOption string
+ releaseApplyMethod string
+ expectedErrorMsg string
+ }{
+ {
+ name: "action invalid option",
+ actionServerSideOption: "invalid",
+ releaseApplyMethod: "ssa",
+ expectedErrorMsg: "invalid/unknown release server-side apply method: invalid",
+ },
+ }
+
+ for _, tt := range testsError {
+ t.Run(tt.name, func(t *testing.T) {
+ _, err := getUpgradeServerSideValue(tt.actionServerSideOption, tt.releaseApplyMethod)
+ assert.ErrorContains(t, err, tt.expectedErrorMsg)
+ })
+ }
+
+}
+
+func TestUpgradeRun_UnreachableKubeClient(t *testing.T) {
+ t.Helper()
+ config := actionConfigFixture(t)
+ failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil}
+ failingKubeClient.ConnectionError = errors.New("connection refused")
+ config.KubeClient = &failingKubeClient
+
+ client := NewUpgrade(config)
+ vals := map[string]interface{}{}
+ result, err := client.Run("", buildChart(), vals)
+
+ assert.Nil(t, result)
+ assert.ErrorContains(t, err, "connection refused")
+}
diff --git a/pkg/action/validate.go b/pkg/action/validate.go
index 4bc70e90c..761ccba47 100644
--- a/pkg/action/validate.go
+++ b/pkg/action/validate.go
@@ -18,8 +18,8 @@ package action
import (
"fmt"
+ "maps"
- "github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
@@ -52,7 +52,7 @@ func requireAdoption(resources kube.ResourceList) (kube.ResourceList, error) {
if apierrors.IsNotFound(err) {
return nil
}
- return errors.Wrapf(err, "could not get information about the resource %s", resourceString(info))
+ return fmt.Errorf("could not get information about the resource %s: %w", resourceString(info), err)
}
requireUpdate.Append(info)
@@ -76,7 +76,7 @@ func existingResourceConflict(resources kube.ResourceList, releaseName, releaseN
if apierrors.IsNotFound(err) {
return nil
}
- return errors.Wrapf(err, "could not get information about the resource %s", resourceString(info))
+ return fmt.Errorf("could not get information about the resource %s: %w", resourceString(info), err)
}
// Allow adoption of the resource if it is managed by Helm and is annotated with correct release name and namespace.
@@ -113,11 +113,7 @@ func checkOwnership(obj runtime.Object, releaseName, releaseNamespace string) er
}
if len(errs) > 0 {
- err := errors.New("invalid ownership metadata")
- for _, e := range errs {
- err = fmt.Errorf("%w; %s", err, e)
- }
- return err
+ return fmt.Errorf("invalid ownership metadata; %w", joinErrors(errs, "; "))
}
return nil
@@ -134,16 +130,16 @@ func requireValue(meta map[string]string, k, v string) error {
return nil
}
-// setMetadataVisitor adds release tracking metadata to all resources. If force is enabled, existing
+// setMetadataVisitor adds release tracking metadata to all resources. If forceOwnership is enabled, existing
// ownership metadata will be overwritten. Otherwise an error will be returned if any resource has an
// existing and conflicting value for the managed by label or Helm release/namespace annotations.
-func setMetadataVisitor(releaseName, releaseNamespace string, force bool) resource.VisitorFunc {
+func setMetadataVisitor(releaseName, releaseNamespace string, forceOwnership bool) resource.VisitorFunc {
return func(info *resource.Info, err error) error {
if err != nil {
return err
}
- if !force {
+ if !forceOwnership {
if err := checkOwnership(info.Object, releaseName, releaseNamespace); err != nil {
return fmt.Errorf("%s cannot be owned: %s", resourceString(info), err)
}
@@ -199,11 +195,7 @@ func mergeAnnotations(obj runtime.Object, annotations map[string]string) error {
// merge two maps, always taking the value on the right
func mergeStrStrMaps(current, desired map[string]string) map[string]string {
result := make(map[string]string)
- for k, v := range current {
- result[k] = v
- }
- for k, desiredVal := range desired {
- result[k] = desiredVal
- }
+ maps.Copy(result, current)
+ maps.Copy(result, desired)
return result
}
diff --git a/pkg/action/verify.go b/pkg/action/verify.go
index 68a5e2d88..ca2f4fa63 100644
--- a/pkg/action/verify.go
+++ b/pkg/action/verify.go
@@ -39,7 +39,7 @@ func NewVerify() *Verify {
// Run executes 'helm verify'.
func (v *Verify) Run(chartfile string) error {
var out strings.Builder
- p, err := downloader.VerifyChart(chartfile, v.Keyring)
+ p, err := downloader.VerifyChart(chartfile, chartfile+".prov", v.Keyring)
if err != nil {
return err
}
diff --git a/pkg/chart/common.go b/pkg/chart/common.go
new file mode 100644
index 000000000..8b1dd58c3
--- /dev/null
+++ b/pkg/chart/common.go
@@ -0,0 +1,219 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package chart
+
+import (
+ "errors"
+ "fmt"
+ "log/slog"
+ "reflect"
+ "strings"
+
+ v3chart "helm.sh/helm/v4/internal/chart/v3"
+ common "helm.sh/helm/v4/pkg/chart/common"
+ v2chart "helm.sh/helm/v4/pkg/chart/v2"
+)
+
+var NewAccessor func(chrt Charter) (Accessor, error) = NewDefaultAccessor //nolint:revive
+
+func NewDefaultAccessor(chrt Charter) (Accessor, error) {
+ switch v := chrt.(type) {
+ case v2chart.Chart:
+ return &v2Accessor{&v}, nil
+ case *v2chart.Chart:
+ return &v2Accessor{v}, nil
+ case v3chart.Chart:
+ return &v3Accessor{&v}, nil
+ case *v3chart.Chart:
+ return &v3Accessor{v}, nil
+ default:
+ return nil, errors.New("unsupported chart type")
+ }
+}
+
+type v2Accessor struct {
+ chrt *v2chart.Chart
+}
+
+func (r *v2Accessor) Name() string {
+ return r.chrt.Metadata.Name
+}
+
+func (r *v2Accessor) IsRoot() bool {
+ return r.chrt.IsRoot()
+}
+
+func (r *v2Accessor) MetadataAsMap() map[string]interface{} {
+ var ret map[string]interface{}
+ if r.chrt.Metadata == nil {
+ return ret
+ }
+
+ ret, err := structToMap(r.chrt.Metadata)
+ if err != nil {
+ slog.Error("error converting metadata to map", "error", err)
+ }
+ return ret
+}
+
+func (r *v2Accessor) Files() []*common.File {
+ return r.chrt.Files
+}
+
+func (r *v2Accessor) Templates() []*common.File {
+ return r.chrt.Templates
+}
+
+func (r *v2Accessor) ChartFullPath() string {
+ return r.chrt.ChartFullPath()
+}
+
+func (r *v2Accessor) IsLibraryChart() bool {
+ return strings.EqualFold(r.chrt.Metadata.Type, "library")
+}
+
+func (r *v2Accessor) Dependencies() []Charter {
+ var deps = make([]Charter, len(r.chrt.Dependencies()))
+ for i, c := range r.chrt.Dependencies() {
+ deps[i] = c
+ }
+ return deps
+}
+
+func (r *v2Accessor) Values() map[string]interface{} {
+ return r.chrt.Values
+}
+
+func (r *v2Accessor) Schema() []byte {
+ return r.chrt.Schema
+}
+
+type v3Accessor struct {
+ chrt *v3chart.Chart
+}
+
+func (r *v3Accessor) Name() string {
+ return r.chrt.Metadata.Name
+}
+
+func (r *v3Accessor) IsRoot() bool {
+ return r.chrt.IsRoot()
+}
+
+func (r *v3Accessor) MetadataAsMap() map[string]interface{} {
+ var ret map[string]interface{}
+ if r.chrt.Metadata == nil {
+ return ret
+ }
+
+ ret, err := structToMap(r.chrt.Metadata)
+ if err != nil {
+ slog.Error("error converting metadata to map", "error", err)
+ }
+ return ret
+}
+
+func (r *v3Accessor) Files() []*common.File {
+ return r.chrt.Files
+}
+
+func (r *v3Accessor) Templates() []*common.File {
+ return r.chrt.Templates
+}
+
+func (r *v3Accessor) ChartFullPath() string {
+ return r.chrt.ChartFullPath()
+}
+
+func (r *v3Accessor) IsLibraryChart() bool {
+ return strings.EqualFold(r.chrt.Metadata.Type, "library")
+}
+
+func (r *v3Accessor) Dependencies() []Charter {
+ var deps = make([]Charter, len(r.chrt.Dependencies()))
+ for i, c := range r.chrt.Dependencies() {
+ deps[i] = c
+ }
+ return deps
+}
+
+func (r *v3Accessor) Values() map[string]interface{} {
+ return r.chrt.Values
+}
+
+func (r *v3Accessor) Schema() []byte {
+ return r.chrt.Schema
+}
+
+func structToMap(obj interface{}) (map[string]interface{}, error) {
+ objValue := reflect.ValueOf(obj)
+
+ // If the value is a pointer, dereference it
+ if objValue.Kind() == reflect.Ptr {
+ objValue = objValue.Elem()
+ }
+
+ // Check if the input is a struct
+ if objValue.Kind() != reflect.Struct {
+ return nil, fmt.Errorf("input must be a struct or a pointer to a struct")
+ }
+
+ result := make(map[string]interface{})
+ objType := objValue.Type()
+
+ for i := 0; i < objValue.NumField(); i++ {
+ field := objType.Field(i)
+ value := objValue.Field(i)
+
+ switch value.Kind() {
+ case reflect.Struct:
+ nestedMap, err := structToMap(value.Interface())
+ if err != nil {
+ return nil, err
+ }
+ result[field.Name] = nestedMap
+ case reflect.Ptr:
+ // Recurse for pointers by dereferencing
+ if value.IsNil() {
+ result[field.Name] = nil
+ } else {
+ nestedMap, err := structToMap(value.Interface())
+ if err != nil {
+ return nil, err
+ }
+ result[field.Name] = nestedMap
+ }
+ case reflect.Slice:
+ sliceOfMaps := make([]interface{}, value.Len())
+ for j := 0; j < value.Len(); j++ {
+ sliceElement := value.Index(j)
+ if sliceElement.Kind() == reflect.Struct || sliceElement.Kind() == reflect.Ptr {
+ nestedMap, err := structToMap(sliceElement.Interface())
+ if err != nil {
+ return nil, err
+ }
+ sliceOfMaps[j] = nestedMap
+ } else {
+ sliceOfMaps[j] = sliceElement.Interface()
+ }
+ }
+ result[field.Name] = sliceOfMaps
+ default:
+ result[field.Name] = value.Interface()
+ }
+ }
+ return result, nil
+}
diff --git a/pkg/chart/v2/util/capabilities.go b/pkg/chart/common/capabilities.go
similarity index 87%
rename from pkg/chart/v2/util/capabilities.go
rename to pkg/chart/common/capabilities.go
index d4b420b2f..355c3978a 100644
--- a/pkg/chart/v2/util/capabilities.go
+++ b/pkg/chart/common/capabilities.go
@@ -13,17 +13,18 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package util
+package common
import (
"fmt"
+ "slices"
"strconv"
- "github.com/Masterminds/semver/v3"
"k8s.io/client-go/kubernetes/scheme"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
+ k8sversion "k8s.io/apimachinery/pkg/util/version"
helmversion "helm.sh/helm/v4/internal/version"
)
@@ -84,14 +85,16 @@ func (kv *KubeVersion) GitVersion() string { return kv.Version }
// ParseKubeVersion parses kubernetes version from string
func ParseKubeVersion(version string) (*KubeVersion, error) {
- sv, err := semver.NewVersion(version)
+ // Based on the original k8s version parser.
+ // https://github.com/kubernetes/kubernetes/blob/b266ac2c3e42c2c4843f81e20213d2b2f43e450a/staging/src/k8s.io/apimachinery/pkg/util/version/version.go#L137
+ sv, err := k8sversion.ParseGeneric(version)
if err != nil {
return nil, err
}
return &KubeVersion{
Version: "v" + sv.String(),
- Major: strconv.FormatUint(sv.Major(), 10),
- Minor: strconv.FormatUint(sv.Minor(), 10),
+ Major: strconv.FormatUint(uint64(sv.Major()), 10),
+ Minor: strconv.FormatUint(uint64(sv.Minor()), 10),
}, nil
}
@@ -102,12 +105,7 @@ type VersionSet []string
//
// vs.Has("apps/v1")
func (v VersionSet) Has(apiVersion string) bool {
- for _, x := range v {
- if x == apiVersion {
- return true
- }
- }
- return false
+ return slices.Contains(v, apiVersion)
}
func allKnownVersions() VersionSet {
diff --git a/pkg/chart/v2/util/capabilities_test.go b/pkg/chart/common/capabilities_test.go
similarity index 82%
rename from pkg/chart/v2/util/capabilities_test.go
rename to pkg/chart/common/capabilities_test.go
index aa9be9db8..bf32b1f3f 100644
--- a/pkg/chart/v2/util/capabilities_test.go
+++ b/pkg/chart/common/capabilities_test.go
@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package util
+package common
import (
"testing"
@@ -82,3 +82,19 @@ func TestParseKubeVersion(t *testing.T) {
t.Errorf("Expected parsed KubeVersion.Minor to be 16, got %q", kv.Minor)
}
}
+
+func TestParseKubeVersionSuffix(t *testing.T) {
+ kv, err := ParseKubeVersion("v1.28+")
+ if err != nil {
+ t.Errorf("Expected v1.28+ to parse successfully")
+ }
+ if kv.Version != "v1.28" {
+ t.Errorf("Expected parsed KubeVersion.Version to be v1.28, got %q", kv.String())
+ }
+ if kv.Major != "1" {
+ t.Errorf("Expected parsed KubeVersion.Major to be 1, got %q", kv.Major)
+ }
+ if kv.Minor != "28" {
+ t.Errorf("Expected parsed KubeVersion.Minor to be 28, got %q", kv.Minor)
+ }
+}
diff --git a/pkg/chart/v2/util/errors.go b/pkg/chart/common/errors.go
similarity index 98%
rename from pkg/chart/v2/util/errors.go
rename to pkg/chart/common/errors.go
index a175b9758..b0a2d650e 100644
--- a/pkg/chart/v2/util/errors.go
+++ b/pkg/chart/common/errors.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package util
+package common
import (
"fmt"
diff --git a/pkg/chart/v2/util/errors_test.go b/pkg/chart/common/errors_test.go
similarity index 98%
rename from pkg/chart/v2/util/errors_test.go
rename to pkg/chart/common/errors_test.go
index b8ae86384..06b3b054c 100644
--- a/pkg/chart/v2/util/errors_test.go
+++ b/pkg/chart/common/errors_test.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package util
+package common
import (
"testing"
diff --git a/pkg/chart/v2/file.go b/pkg/chart/common/file.go
similarity index 98%
rename from pkg/chart/v2/file.go
rename to pkg/chart/common/file.go
index a2eeb0fcd..304643f1a 100644
--- a/pkg/chart/v2/file.go
+++ b/pkg/chart/common/file.go
@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v2
+package common
// File represents a file as a name/value pair.
//
diff --git a/pkg/chart/common/testdata/coleridge.yaml b/pkg/chart/common/testdata/coleridge.yaml
new file mode 100644
index 000000000..b6579628b
--- /dev/null
+++ b/pkg/chart/common/testdata/coleridge.yaml
@@ -0,0 +1,12 @@
+poet: "Coleridge"
+title: "Rime of the Ancient Mariner"
+stanza: ["at", "length", "did", "cross", "an", "Albatross"]
+
+mariner:
+ with: "crossbow"
+ shot: "ALBATROSS"
+
+water:
+ water:
+ where: "everywhere"
+ nor: "any drop to drink"
diff --git a/pkg/chart/v2/util/coalesce.go b/pkg/chart/common/util/coalesce.go
similarity index 80%
rename from pkg/chart/v2/util/coalesce.go
rename to pkg/chart/common/util/coalesce.go
index 33d2d2833..5bfa1c608 100644
--- a/pkg/chart/v2/util/coalesce.go
+++ b/pkg/chart/common/util/coalesce.go
@@ -19,11 +19,12 @@ package util
import (
"fmt"
"log"
+ "maps"
"github.com/mitchellh/copystructure"
- "github.com/pkg/errors"
- chart "helm.sh/helm/v4/pkg/chart/v2"
+ chart "helm.sh/helm/v4/pkg/chart"
+ "helm.sh/helm/v4/pkg/chart/common"
)
func concatPrefix(a, b string) string {
@@ -42,7 +43,7 @@ func concatPrefix(a, b string) string {
// - Scalar values and arrays are replaced, maps are merged
// - A chart has access to all of the variables for it, as well as all of
// the values destined for its dependencies.
-func CoalesceValues(chrt *chart.Chart, vals map[string]interface{}) (Values, error) {
+func CoalesceValues(chrt chart.Charter, vals map[string]interface{}) (common.Values, error) {
valsCopy, err := copyValues(vals)
if err != nil {
return vals, err
@@ -64,7 +65,7 @@ func CoalesceValues(chrt *chart.Chart, vals map[string]interface{}) (Values, err
// Retaining Nils is useful when processes early in a Helm action or business
// logic need to retain them for when Coalescing will happen again later in the
// business logic.
-func MergeValues(chrt *chart.Chart, vals map[string]interface{}) (Values, error) {
+func MergeValues(chrt chart.Charter, vals map[string]interface{}) (common.Values, error) {
valsCopy, err := copyValues(vals)
if err != nil {
return vals, err
@@ -72,7 +73,7 @@ func MergeValues(chrt *chart.Chart, vals map[string]interface{}) (Values, error)
return coalesce(log.Printf, chrt, valsCopy, "", true)
}
-func copyValues(vals map[string]interface{}) (Values, error) {
+func copyValues(vals map[string]interface{}) (common.Values, error) {
v, err := copystructure.Copy(vals)
if err != nil {
return vals, err
@@ -96,28 +97,36 @@ type printFn func(format string, v ...interface{})
// Note, the merge argument specifies whether this is being used by MergeValues
// or CoalesceValues. Coalescing removes null values and their keys in some
// situations while merging keeps the null values.
-func coalesce(printf printFn, ch *chart.Chart, dest map[string]interface{}, prefix string, merge bool) (map[string]interface{}, error) {
+func coalesce(printf printFn, ch chart.Charter, dest map[string]interface{}, prefix string, merge bool) (map[string]interface{}, error) {
coalesceValues(printf, ch, dest, prefix, merge)
return coalesceDeps(printf, ch, dest, prefix, merge)
}
// coalesceDeps coalesces the dependencies of the given chart.
-func coalesceDeps(printf printFn, chrt *chart.Chart, dest map[string]interface{}, prefix string, merge bool) (map[string]interface{}, error) {
- for _, subchart := range chrt.Dependencies() {
- if c, ok := dest[subchart.Name()]; !ok {
+func coalesceDeps(printf printFn, chrt chart.Charter, dest map[string]interface{}, prefix string, merge bool) (map[string]interface{}, error) {
+ ch, err := chart.NewAccessor(chrt)
+ if err != nil {
+ return dest, err
+ }
+ for _, subchart := range ch.Dependencies() {
+ sub, err := chart.NewAccessor(subchart)
+ if err != nil {
+ return dest, err
+ }
+ if c, ok := dest[sub.Name()]; !ok {
// If dest doesn't already have the key, create it.
- dest[subchart.Name()] = make(map[string]interface{})
+ dest[sub.Name()] = make(map[string]interface{})
} else if !istable(c) {
- return dest, errors.Errorf("type mismatch on %s: %t", subchart.Name(), c)
+ return dest, fmt.Errorf("type mismatch on %s: %t", sub.Name(), c)
}
- if dv, ok := dest[subchart.Name()]; ok {
+ if dv, ok := dest[sub.Name()]; ok {
dvmap := dv.(map[string]interface{})
- subPrefix := concatPrefix(prefix, chrt.Metadata.Name)
+ subPrefix := concatPrefix(prefix, ch.Name())
// Get globals out of dest and merge them into dvmap.
coalesceGlobals(printf, dvmap, dest, subPrefix, merge)
// Now coalesce the rest of the values.
var err error
- dest[subchart.Name()], err = coalesce(printf, subchart, dvmap, subPrefix, merge)
+ dest[sub.Name()], err = coalesce(printf, subchart, dvmap, subPrefix, merge)
if err != nil {
return dest, err
}
@@ -132,17 +141,17 @@ func coalesceDeps(printf printFn, chrt *chart.Chart, dest map[string]interface{}
func coalesceGlobals(printf printFn, dest, src map[string]interface{}, prefix string, _ bool) {
var dg, sg map[string]interface{}
- if destglob, ok := dest[GlobalKey]; !ok {
+ if destglob, ok := dest[common.GlobalKey]; !ok {
dg = make(map[string]interface{})
} else if dg, ok = destglob.(map[string]interface{}); !ok {
- printf("warning: skipping globals because destination %s is not a table.", GlobalKey)
+ printf("warning: skipping globals because destination %s is not a table.", common.GlobalKey)
return
}
- if srcglob, ok := src[GlobalKey]; !ok {
+ if srcglob, ok := src[common.GlobalKey]; !ok {
sg = make(map[string]interface{})
} else if sg, ok = srcglob.(map[string]interface{}); !ok {
- printf("warning: skipping globals because source %s is not a table.", GlobalKey)
+ printf("warning: skipping globals because source %s is not a table.", common.GlobalKey)
return
}
@@ -178,27 +187,30 @@ func coalesceGlobals(printf printFn, dest, src map[string]interface{}, prefix st
dg[key] = val
}
}
- dest[GlobalKey] = dg
+ dest[common.GlobalKey] = dg
}
func copyMap(src map[string]interface{}) map[string]interface{} {
m := make(map[string]interface{}, len(src))
- for k, v := range src {
- m[k] = v
- }
+ maps.Copy(m, src)
return m
}
// coalesceValues builds up a values map for a particular chart.
//
// Values in v will override the values in the chart.
-func coalesceValues(printf printFn, c *chart.Chart, v map[string]interface{}, prefix string, merge bool) {
- subPrefix := concatPrefix(prefix, c.Metadata.Name)
+func coalesceValues(printf printFn, c chart.Charter, v map[string]interface{}, prefix string, merge bool) {
+ ch, err := chart.NewAccessor(c)
+ if err != nil {
+ return
+ }
+
+ subPrefix := concatPrefix(prefix, ch.Name())
// Using c.Values directly when coalescing a table can cause problems where
// the original c.Values is altered. Creating a deep copy stops the problem.
// This section is fault-tolerant as there is no ability to return an error.
- valuesCopy, err := copystructure.Copy(c.Values)
+ valuesCopy, err := copystructure.Copy(ch.Values())
var vc map[string]interface{}
var ok bool
if err != nil {
@@ -207,7 +219,7 @@ func coalesceValues(printf printFn, c *chart.Chart, v map[string]interface{}, pr
// wrong with c.Values. In this case we will use c.Values and report
// an error.
printf("warning: unable to copy values, err: %s", err)
- vc = c.Values
+ vc = ch.Values()
} else {
vc, ok = valuesCopy.(map[string]interface{})
if !ok {
@@ -215,7 +227,7 @@ func coalesceValues(printf printFn, c *chart.Chart, v map[string]interface{}, pr
// it cannot be treated as map[string]interface{} there is something
// strangely wrong. Log it and use c.Values
printf("warning: unable to convert values copy to values type")
- vc = c.Values
+ vc = ch.Values()
}
}
@@ -252,9 +264,17 @@ func coalesceValues(printf printFn, c *chart.Chart, v map[string]interface{}, pr
}
}
-func childChartMergeTrue(chrt *chart.Chart, key string, merge bool) bool {
- for _, subchart := range chrt.Dependencies() {
- if subchart.Name() == key {
+func childChartMergeTrue(chrt chart.Charter, key string, merge bool) bool {
+ ch, err := chart.NewAccessor(chrt)
+ if err != nil {
+ return merge
+ }
+ for _, subchart := range ch.Dependencies() {
+ sub, err := chart.NewAccessor(subchart)
+ if err != nil {
+ return merge
+ }
+ if sub.Name() == key {
return true
}
}
@@ -283,6 +303,11 @@ func coalesceTablesFullKey(printf printFn, dst, src map[string]interface{}, pref
if dst == nil {
return src
}
+ for key, val := range dst {
+ if val == nil {
+ src[key] = nil
+ }
+ }
// Because dest has higher precedence than src, dest values override src
// values.
for key, val := range src {
@@ -303,3 +328,9 @@ func coalesceTablesFullKey(printf printFn, dst, src map[string]interface{}, pref
}
return dst
}
+
+// istable is a special-purpose function to see if the present thing matches the definition of a YAML table.
+func istable(v interface{}) bool {
+ _, ok := v.(map[string]interface{})
+ return ok
+}
diff --git a/pkg/chart/v2/util/coalesce_test.go b/pkg/chart/common/util/coalesce_test.go
similarity index 97%
rename from pkg/chart/v2/util/coalesce_test.go
rename to pkg/chart/common/util/coalesce_test.go
index 3d4ee4fa8..871bfa8da 100644
--- a/pkg/chart/v2/util/coalesce_test.go
+++ b/pkg/chart/common/util/coalesce_test.go
@@ -17,12 +17,16 @@ limitations under the License.
package util
import (
+ "bytes"
"encoding/json"
"fmt"
+ "maps"
"testing"
+ "text/template"
"github.com/stretchr/testify/assert"
+ "helm.sh/helm/v4/pkg/chart/common"
chart "helm.sh/helm/v4/pkg/chart/v2"
)
@@ -135,7 +139,7 @@ func TestCoalesceValues(t *testing.T) {
},
)
- vals, err := ReadValues(testCoalesceValuesYaml)
+ vals, err := common.ReadValues(testCoalesceValuesYaml)
if err != nil {
t.Fatal(err)
}
@@ -143,10 +147,8 @@ func TestCoalesceValues(t *testing.T) {
// taking a copy of the values before passing it
// to CoalesceValues as argument, so that we can
// use it for asserting later
- valsCopy := make(Values, len(vals))
- for key, value := range vals {
- valsCopy[key] = value
- }
+ valsCopy := make(common.Values, len(vals))
+ maps.Copy(valsCopy, vals)
v, err := CoalesceValues(c, vals)
if err != nil {
@@ -239,6 +241,13 @@ func TestCoalesceValues(t *testing.T) {
is.Equal(valsCopy, vals)
}
+func ttpl(tpl string, v map[string]interface{}) (string, error) {
+ var b bytes.Buffer
+ tt := template.Must(template.New("t").Parse(tpl))
+ err := tt.Execute(&b, v)
+ return b.String(), err
+}
+
func TestMergeValues(t *testing.T) {
is := assert.New(t)
@@ -295,7 +304,7 @@ func TestMergeValues(t *testing.T) {
},
)
- vals, err := ReadValues(testCoalesceValuesYaml)
+ vals, err := common.ReadValues(testCoalesceValuesYaml)
if err != nil {
t.Fatal(err)
}
@@ -303,10 +312,8 @@ func TestMergeValues(t *testing.T) {
// taking a copy of the values before passing it
// to MergeValues as argument, so that we can
// use it for asserting later
- valsCopy := make(Values, len(vals))
- for key, value := range vals {
- valsCopy[key] = value
- }
+ valsCopy := make(common.Values, len(vals))
+ maps.Copy(valsCopy, vals)
v, err := MergeValues(c, vals)
if err != nil {
diff --git a/pkg/chart/common/util/jsonschema.go b/pkg/chart/common/util/jsonschema.go
new file mode 100644
index 000000000..acd2ca100
--- /dev/null
+++ b/pkg/chart/common/util/jsonschema.go
@@ -0,0 +1,172 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "bytes"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/santhosh-tekuri/jsonschema/v6"
+
+ "helm.sh/helm/v4/internal/version"
+
+ chart "helm.sh/helm/v4/pkg/chart"
+ "helm.sh/helm/v4/pkg/chart/common"
+)
+
+// HTTPURLLoader implements a loader for HTTP/HTTPS URLs
+type HTTPURLLoader http.Client
+
+func (l *HTTPURLLoader) Load(urlStr string) (any, error) {
+ client := (*http.Client)(l)
+
+ req, err := http.NewRequest(http.MethodGet, urlStr, nil)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create HTTP request for %s: %w", urlStr, err)
+ }
+ req.Header.Set("User-Agent", version.GetUserAgent())
+
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("HTTP request failed for %s: %w", urlStr, err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("HTTP request to %s returned status %d (%s)", urlStr, resp.StatusCode, http.StatusText(resp.StatusCode))
+ }
+
+ return jsonschema.UnmarshalJSON(resp.Body)
+}
+
+// newHTTPURLLoader creates a HTTP URL loader with proxy support.
+func newHTTPURLLoader() *HTTPURLLoader {
+ httpLoader := HTTPURLLoader(http.Client{
+ Timeout: 15 * time.Second,
+ Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ TLSClientConfig: &tls.Config{},
+ },
+ })
+ return &httpLoader
+}
+
+// ValidateAgainstSchema checks that values does not violate the structure laid out in schema
+func ValidateAgainstSchema(ch chart.Charter, values map[string]interface{}) error {
+ chrt, err := chart.NewAccessor(ch)
+ if err != nil {
+ return err
+ }
+ var sb strings.Builder
+ if chrt.Schema() != nil {
+ slog.Debug("chart name", "chart-name", chrt.Name())
+ err := ValidateAgainstSingleSchema(values, chrt.Schema())
+ if err != nil {
+ sb.WriteString(fmt.Sprintf("%s:\n", chrt.Name()))
+ sb.WriteString(err.Error())
+ }
+ }
+ slog.Debug("number of dependencies in the chart", "dependencies", len(chrt.Dependencies()))
+ // For each dependency, recursively call this function with the coalesced values
+ for _, subchart := range chrt.Dependencies() {
+ sub, err := chart.NewAccessor(subchart)
+ if err != nil {
+ return err
+ }
+ subchartValues := values[sub.Name()].(map[string]interface{})
+ if err := ValidateAgainstSchema(subchart, subchartValues); err != nil {
+ sb.WriteString(err.Error())
+ }
+ }
+
+ if sb.Len() > 0 {
+ return errors.New(sb.String())
+ }
+
+ return nil
+}
+
+// ValidateAgainstSingleSchema checks that values does not violate the structure laid out in this schema
+func ValidateAgainstSingleSchema(values common.Values, schemaJSON []byte) (reterr error) {
+ defer func() {
+ if r := recover(); r != nil {
+ reterr = fmt.Errorf("unable to validate schema: %s", r)
+ }
+ }()
+
+ // This unmarshal function leverages UseNumber() for number precision. The parser
+ // used for values does this as well.
+ schema, err := jsonschema.UnmarshalJSON(bytes.NewReader(schemaJSON))
+ if err != nil {
+ return err
+ }
+ slog.Debug("unmarshalled JSON schema", "schema", schemaJSON)
+
+ // Configure compiler with loaders for different URL schemes
+ loader := jsonschema.SchemeURLLoader{
+ "file": jsonschema.FileLoader{},
+ "http": newHTTPURLLoader(),
+ "https": newHTTPURLLoader(),
+ }
+
+ compiler := jsonschema.NewCompiler()
+ compiler.UseLoader(loader)
+ err = compiler.AddResource("file:///values.schema.json", schema)
+ if err != nil {
+ return err
+ }
+
+ validator, err := compiler.Compile("file:///values.schema.json")
+ if err != nil {
+ return err
+ }
+
+ err = validator.Validate(values.AsMap())
+ if err != nil {
+ return JSONSchemaValidationError{err}
+ }
+
+ return nil
+}
+
+// Note, JSONSchemaValidationError is used to wrap the error from the underlying
+// validation package so that Helm has a clean interface and the validation package
+// could be replaced without changing the Helm SDK API.
+
+// JSONSchemaValidationError is the error returned when there is a schema validation
+// error.
+type JSONSchemaValidationError struct {
+ embeddedErr error
+}
+
+// Error prints the error message
+func (e JSONSchemaValidationError) Error() string {
+ errStr := e.embeddedErr.Error()
+
+ // This string prefixes all of our error details. Further up the stack of helm error message
+ // building more detail is provided to users. This is removed.
+ errStr = strings.TrimPrefix(errStr, "jsonschema validation failed with 'file:///values.schema.json#'\n")
+
+ // The extra new line is needed for when there are sub-charts.
+ return errStr + "\n"
+}
diff --git a/pkg/chart/v2/util/jsonschema_test.go b/pkg/chart/common/util/jsonschema_test.go
similarity index 51%
rename from pkg/chart/v2/util/jsonschema_test.go
rename to pkg/chart/common/util/jsonschema_test.go
index 3e3315732..b34f9d514 100644
--- a/pkg/chart/v2/util/jsonschema_test.go
+++ b/pkg/chart/common/util/jsonschema_test.go
@@ -17,14 +17,18 @@ limitations under the License.
package util
import (
+ "net/http"
+ "net/http/httptest"
"os"
+ "strings"
"testing"
+ "helm.sh/helm/v4/pkg/chart/common"
chart "helm.sh/helm/v4/pkg/chart/v2"
)
func TestValidateAgainstSingleSchema(t *testing.T) {
- values, err := ReadValuesFile("./testdata/test-values.yaml")
+ values, err := common.ReadValuesFile("./testdata/test-values.yaml")
if err != nil {
t.Fatalf("Error reading YAML file: %s", err)
}
@@ -39,7 +43,7 @@ func TestValidateAgainstSingleSchema(t *testing.T) {
}
func TestValidateAgainstInvalidSingleSchema(t *testing.T) {
- values, err := ReadValuesFile("./testdata/test-values.yaml")
+ values, err := common.ReadValuesFile("./testdata/test-values.yaml")
if err != nil {
t.Fatalf("Error reading YAML file: %s", err)
}
@@ -55,21 +59,21 @@ func TestValidateAgainstInvalidSingleSchema(t *testing.T) {
errString = err.Error()
}
- expectedErrString := "unable to validate schema: runtime error: invalid " +
- "memory address or nil pointer dereference"
+ expectedErrString := `"file:///values.schema.json#" is not valid against metaschema: jsonschema validation failed with 'https://json-schema.org/draft/2020-12/schema#'
+- at '': got number, want boolean or object`
if errString != expectedErrString {
t.Errorf("Error string :\n`%s`\ndoes not match expected\n`%s`", errString, expectedErrString)
}
}
func TestValidateAgainstSingleSchemaNegative(t *testing.T) {
- values, err := ReadValuesFile("./testdata/test-values-negative.yaml")
+ values, err := common.ReadValuesFile("./testdata/test-values-negative.yaml")
if err != nil {
t.Fatalf("Error reading YAML file: %s", err)
}
schema, err := os.ReadFile("./testdata/test-values.schema.json")
if err != nil {
- t.Fatalf("Error reading YAML file: %s", err)
+ t.Fatalf("Error reading JSON file: %s", err)
}
var errString string
@@ -79,8 +83,8 @@ func TestValidateAgainstSingleSchemaNegative(t *testing.T) {
errString = err.Error()
}
- expectedErrString := `- (root): employmentInfo is required
-- age: Must be greater than or equal to 0
+ expectedErrString := `- at '': missing property 'employmentInfo'
+- at '/age': minimum: got -5, want 0
`
if errString != expectedErrString {
t.Errorf("Error string :\n`%s`\ndoes not match expected\n`%s`", errString, expectedErrString)
@@ -104,6 +108,21 @@ const subchartSchema = `{
}
`
+const subchartSchema2020 = `{
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
+ "title": "Values",
+ "type": "object",
+ "properties": {
+ "data": {
+ "type": "array",
+ "contains": { "type": "string" },
+ "unevaluatedItems": { "type": "number" }
+ }
+ },
+ "required": ["data"]
+}
+`
+
func TestValidateAgainstSchema(t *testing.T) {
subchartJSON := []byte(subchartSchema)
subchart := &chart.Chart{
@@ -159,9 +178,111 @@ func TestValidateAgainstSchemaNegative(t *testing.T) {
}
expectedErrString := `subchart:
-- (root): age is required
+- at '': missing property 'age'
+`
+ if errString != expectedErrString {
+ t.Errorf("Error string :\n`%s`\ndoes not match expected\n`%s`", errString, expectedErrString)
+ }
+}
+
+func TestValidateAgainstSchema2020(t *testing.T) {
+ subchartJSON := []byte(subchartSchema2020)
+ subchart := &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "subchart",
+ },
+ Schema: subchartJSON,
+ }
+ chrt := &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "chrt",
+ },
+ }
+ chrt.AddDependency(subchart)
+
+ vals := map[string]interface{}{
+ "name": "John",
+ "subchart": map[string]interface{}{
+ "data": []any{"hello", 12},
+ },
+ }
+
+ if err := ValidateAgainstSchema(chrt, vals); err != nil {
+ t.Errorf("Error validating Values against Schema: %s", err)
+ }
+}
+
+func TestValidateAgainstSchema2020Negative(t *testing.T) {
+ subchartJSON := []byte(subchartSchema2020)
+ subchart := &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "subchart",
+ },
+ Schema: subchartJSON,
+ }
+ chrt := &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "chrt",
+ },
+ }
+ chrt.AddDependency(subchart)
+
+ vals := map[string]interface{}{
+ "name": "John",
+ "subchart": map[string]interface{}{
+ "data": []any{12},
+ },
+ }
+
+ var errString string
+ if err := ValidateAgainstSchema(chrt, vals); err == nil {
+ t.Fatalf("Expected an error, but got nil")
+ } else {
+ errString = err.Error()
+ }
+
+ expectedErrString := `subchart:
+- at '/data': no items match contains schema
+ - at '/data/0': got number, want string
`
if errString != expectedErrString {
t.Errorf("Error string :\n`%s`\ndoes not match expected\n`%s`", errString, expectedErrString)
}
}
+
+func TestHTTPURLLoader_Load(t *testing.T) {
+ // Test successful JSON schema loading
+ t.Run("successful load", func(t *testing.T) {
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ w.Write([]byte(`{"type": "object", "properties": {"name": {"type": "string"}}}`))
+ }))
+ defer server.Close()
+
+ loader := newHTTPURLLoader()
+ result, err := loader.Load(server.URL)
+ if err != nil {
+ t.Fatalf("Expected no error, got: %v", err)
+ }
+ if result == nil {
+ t.Fatal("Expected result to be non-nil")
+ }
+ })
+
+ t.Run("HTTP error status", func(t *testing.T) {
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ defer server.Close()
+
+ loader := newHTTPURLLoader()
+ _, err := loader.Load(server.URL)
+ if err == nil {
+ t.Fatal("Expected error for HTTP 404")
+ }
+ if !strings.Contains(err.Error(), "404") {
+ t.Errorf("Expected error message to contain '404', got: %v", err)
+ }
+ })
+}
diff --git a/pkg/chart/common/util/testdata/test-values-invalid.schema.json b/pkg/chart/common/util/testdata/test-values-invalid.schema.json
new file mode 100644
index 000000000..35a16a2c4
--- /dev/null
+++ b/pkg/chart/common/util/testdata/test-values-invalid.schema.json
@@ -0,0 +1 @@
+ 1E1111111
diff --git a/pkg/chart/common/util/testdata/test-values-negative.yaml b/pkg/chart/common/util/testdata/test-values-negative.yaml
new file mode 100644
index 000000000..5a1250bff
--- /dev/null
+++ b/pkg/chart/common/util/testdata/test-values-negative.yaml
@@ -0,0 +1,14 @@
+firstname: John
+lastname: Doe
+age: -5
+likesCoffee: true
+addresses:
+ - city: Springfield
+ street: Main
+ number: 12345
+ - city: New York
+ street: Broadway
+ number: 67890
+phoneNumbers:
+ - "(888) 888-8888"
+ - "(555) 555-5555"
diff --git a/pkg/chart/common/util/testdata/test-values.schema.json b/pkg/chart/common/util/testdata/test-values.schema.json
new file mode 100644
index 000000000..4df89bbe8
--- /dev/null
+++ b/pkg/chart/common/util/testdata/test-values.schema.json
@@ -0,0 +1,67 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "properties": {
+ "addresses": {
+ "description": "List of addresses",
+ "items": {
+ "properties": {
+ "city": {
+ "type": "string"
+ },
+ "number": {
+ "type": "number"
+ },
+ "street": {
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "age": {
+ "description": "Age",
+ "minimum": 0,
+ "type": "integer"
+ },
+ "employmentInfo": {
+ "properties": {
+ "salary": {
+ "minimum": 0,
+ "type": "number"
+ },
+ "title": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "salary"
+ ],
+ "type": "object"
+ },
+ "firstname": {
+ "description": "First name",
+ "type": "string"
+ },
+ "lastname": {
+ "type": "string"
+ },
+ "likesCoffee": {
+ "type": "boolean"
+ },
+ "phoneNumbers": {
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "required": [
+ "firstname",
+ "lastname",
+ "addresses",
+ "employmentInfo"
+ ],
+ "title": "Values",
+ "type": "object"
+}
diff --git a/pkg/chart/common/util/testdata/test-values.yaml b/pkg/chart/common/util/testdata/test-values.yaml
new file mode 100644
index 000000000..042dea664
--- /dev/null
+++ b/pkg/chart/common/util/testdata/test-values.yaml
@@ -0,0 +1,17 @@
+firstname: John
+lastname: Doe
+age: 25
+likesCoffee: true
+employmentInfo:
+ title: Software Developer
+ salary: 100000
+addresses:
+ - city: Springfield
+ street: Main
+ number: 12345
+ - city: New York
+ street: Broadway
+ number: 67890
+phoneNumbers:
+ - "(888) 888-8888"
+ - "(555) 555-5555"
diff --git a/pkg/chart/common/util/values.go b/pkg/chart/common/util/values.go
new file mode 100644
index 000000000..85cb29012
--- /dev/null
+++ b/pkg/chart/common/util/values.go
@@ -0,0 +1,70 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "fmt"
+
+ "helm.sh/helm/v4/pkg/chart"
+ "helm.sh/helm/v4/pkg/chart/common"
+)
+
+// ToRenderValues composes the struct from the data coming from the Releases, Charts and Values files
+//
+// This takes both ReleaseOptions and Capabilities to merge into the render values.
+func ToRenderValues(chrt chart.Charter, chrtVals map[string]interface{}, options common.ReleaseOptions, caps *common.Capabilities) (common.Values, error) {
+ return ToRenderValuesWithSchemaValidation(chrt, chrtVals, options, caps, false)
+}
+
+// ToRenderValuesWithSchemaValidation composes the struct from the data coming from the Releases, Charts and Values files
+//
+// This takes both ReleaseOptions and Capabilities to merge into the render values.
+func ToRenderValuesWithSchemaValidation(chrt chart.Charter, chrtVals map[string]interface{}, options common.ReleaseOptions, caps *common.Capabilities, skipSchemaValidation bool) (common.Values, error) {
+ if caps == nil {
+ caps = common.DefaultCapabilities
+ }
+ accessor, err := chart.NewAccessor(chrt)
+ if err != nil {
+ return nil, err
+ }
+ top := map[string]interface{}{
+ "Chart": accessor.MetadataAsMap(),
+ "Capabilities": caps,
+ "Release": map[string]interface{}{
+ "Name": options.Name,
+ "Namespace": options.Namespace,
+ "IsUpgrade": options.IsUpgrade,
+ "IsInstall": options.IsInstall,
+ "Revision": options.Revision,
+ "Service": "Helm",
+ },
+ }
+
+ vals, err := CoalesceValues(chrt, chrtVals)
+ if err != nil {
+ return common.Values(top), err
+ }
+
+ if !skipSchemaValidation {
+ if err := ValidateAgainstSchema(chrt, vals); err != nil {
+ return top, fmt.Errorf("values don't meet the specifications of the schema(s) in the following chart(s):\n%w", err)
+ }
+ }
+
+ top["Values"] = vals
+ return top, nil
+}
diff --git a/pkg/chart/common/util/values_test.go b/pkg/chart/common/util/values_test.go
new file mode 100644
index 000000000..5fc030567
--- /dev/null
+++ b/pkg/chart/common/util/values_test.go
@@ -0,0 +1,111 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "testing"
+
+ "helm.sh/helm/v4/pkg/chart/common"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+)
+
+func TestToRenderValues(t *testing.T) {
+
+ chartValues := map[string]interface{}{
+ "name": "al Rashid",
+ "where": map[string]interface{}{
+ "city": "Basrah",
+ "title": "caliph",
+ },
+ }
+
+ overrideValues := map[string]interface{}{
+ "name": "Haroun",
+ "where": map[string]interface{}{
+ "city": "Baghdad",
+ "date": "809 CE",
+ },
+ }
+
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "test"},
+ Templates: []*common.File{},
+ Values: chartValues,
+ Files: []*common.File{
+ {Name: "scheherazade/shahryar.txt", Data: []byte("1,001 Nights")},
+ },
+ }
+ c.AddDependency(&chart.Chart{
+ Metadata: &chart.Metadata{Name: "where"},
+ })
+
+ o := common.ReleaseOptions{
+ Name: "Seven Voyages",
+ Namespace: "default",
+ Revision: 1,
+ IsInstall: true,
+ }
+
+ res, err := ToRenderValuesWithSchemaValidation(c, overrideValues, o, nil, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Ensure that the top-level values are all set.
+ metamap := res["Chart"].(map[string]interface{})
+ if name := metamap["Name"]; name.(string) != "test" {
+ t.Errorf("Expected chart name 'test', got %q", name)
+ }
+ relmap := res["Release"].(map[string]interface{})
+ if name := relmap["Name"]; name.(string) != "Seven Voyages" {
+ t.Errorf("Expected release name 'Seven Voyages', got %q", name)
+ }
+ if namespace := relmap["Namespace"]; namespace.(string) != "default" {
+ t.Errorf("Expected namespace 'default', got %q", namespace)
+ }
+ if revision := relmap["Revision"]; revision.(int) != 1 {
+ t.Errorf("Expected revision '1', got %d", revision)
+ }
+ if relmap["IsUpgrade"].(bool) {
+ t.Error("Expected upgrade to be false.")
+ }
+ if !relmap["IsInstall"].(bool) {
+ t.Errorf("Expected install to be true.")
+ }
+ if !res["Capabilities"].(*common.Capabilities).APIVersions.Has("v1") {
+ t.Error("Expected Capabilities to have v1 as an API")
+ }
+ if res["Capabilities"].(*common.Capabilities).KubeVersion.Major != "1" {
+ t.Error("Expected Capabilities to have a Kube version")
+ }
+
+ vals := res["Values"].(common.Values)
+ if vals["name"] != "Haroun" {
+ t.Errorf("Expected 'Haroun', got %q (%v)", vals["name"], vals)
+ }
+ where := vals["where"].(map[string]interface{})
+ expects := map[string]string{
+ "city": "Baghdad",
+ "date": "809 CE",
+ "title": "caliph",
+ }
+ for field, expect := range expects {
+ if got := where[field]; got != expect {
+ t.Errorf("Expected %q, got %q (%v)", expect, got, where)
+ }
+ }
+}
diff --git a/pkg/chart/v2/util/values.go b/pkg/chart/common/values.go
similarity index 72%
rename from pkg/chart/v2/util/values.go
rename to pkg/chart/common/values.go
index 404ba9842..94958a779 100644
--- a/pkg/chart/v2/util/values.go
+++ b/pkg/chart/common/values.go
@@ -14,19 +14,15 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package util
+package common
import (
- "encoding/json"
- "fmt"
+ "errors"
"io"
"os"
"strings"
- "github.com/pkg/errors"
"sigs.k8s.io/yaml"
-
- chart "helm.sh/helm/v4/pkg/chart/v2"
)
// GlobalKey is the name of the Values key that is used for storing global vars.
@@ -106,10 +102,7 @@ func tableLookup(v Values, simple string) (Values, error) {
// ReadValues will parse YAML byte data into a Values.
func ReadValues(data []byte) (vals Values, err error) {
- err = yaml.Unmarshal(data, &vals, func(d *json.Decoder) *json.Decoder {
- d.UseNumber()
- return d
- })
+ err = yaml.Unmarshal(data, &vals)
if len(vals) == 0 {
vals = Values{}
}
@@ -135,49 +128,6 @@ type ReleaseOptions struct {
IsInstall bool
}
-// ToRenderValues composes the struct from the data coming from the Releases, Charts and Values files
-//
-// This takes both ReleaseOptions and Capabilities to merge into the render values.
-func ToRenderValues(chrt *chart.Chart, chrtVals map[string]interface{}, options ReleaseOptions, caps *Capabilities) (Values, error) {
- return ToRenderValuesWithSchemaValidation(chrt, chrtVals, options, caps, false)
-}
-
-// ToRenderValuesWithSchemaValidation composes the struct from the data coming from the Releases, Charts and Values files
-//
-// This takes both ReleaseOptions and Capabilities to merge into the render values.
-func ToRenderValuesWithSchemaValidation(chrt *chart.Chart, chrtVals map[string]interface{}, options ReleaseOptions, caps *Capabilities, skipSchemaValidation bool) (Values, error) {
- if caps == nil {
- caps = DefaultCapabilities
- }
- top := map[string]interface{}{
- "Chart": chrt.Metadata,
- "Capabilities": caps,
- "Release": map[string]interface{}{
- "Name": options.Name,
- "Namespace": options.Namespace,
- "IsUpgrade": options.IsUpgrade,
- "IsInstall": options.IsInstall,
- "Revision": options.Revision,
- "Service": "Helm",
- },
- }
-
- vals, err := CoalesceValues(chrt, chrtVals)
- if err != nil {
- return top, err
- }
-
- if !skipSchemaValidation {
- if err := ValidateAgainstSchema(chrt, vals); err != nil {
- errFmt := "values don't meet the specifications of the schema(s) in the following chart(s):\n%s"
- return top, fmt.Errorf(errFmt, err.Error())
- }
- }
-
- top["Values"] = vals
- return top, nil
-}
-
// istable is a special-purpose function to see if the present thing matches the definition of a YAML table.
func istable(v interface{}) bool {
_, ok := v.(map[string]interface{})
diff --git a/pkg/chart/v2/util/values_test.go b/pkg/chart/common/values_test.go
similarity index 66%
rename from pkg/chart/v2/util/values_test.go
rename to pkg/chart/common/values_test.go
index 6a5400f78..3cceeb2b5 100644
--- a/pkg/chart/v2/util/values_test.go
+++ b/pkg/chart/common/values_test.go
@@ -14,15 +14,13 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package util
+package common
import (
"bytes"
"fmt"
"testing"
"text/template"
-
- chart "helm.sh/helm/v4/pkg/chart/v2"
)
func TestReadValues(t *testing.T) {
@@ -66,92 +64,6 @@ water:
}
}
-func TestToRenderValues(t *testing.T) {
-
- chartValues := map[string]interface{}{
- "name": "al Rashid",
- "where": map[string]interface{}{
- "city": "Basrah",
- "title": "caliph",
- },
- }
-
- overrideValues := map[string]interface{}{
- "name": "Haroun",
- "where": map[string]interface{}{
- "city": "Baghdad",
- "date": "809 CE",
- },
- }
-
- c := &chart.Chart{
- Metadata: &chart.Metadata{Name: "test"},
- Templates: []*chart.File{},
- Values: chartValues,
- Files: []*chart.File{
- {Name: "scheherazade/shahryar.txt", Data: []byte("1,001 Nights")},
- },
- }
- c.AddDependency(&chart.Chart{
- Metadata: &chart.Metadata{Name: "where"},
- })
-
- o := ReleaseOptions{
- Name: "Seven Voyages",
- Namespace: "default",
- Revision: 1,
- IsInstall: true,
- }
-
- res, err := ToRenderValuesWithSchemaValidation(c, overrideValues, o, nil, false)
- if err != nil {
- t.Fatal(err)
- }
-
- // Ensure that the top-level values are all set.
- if name := res["Chart"].(*chart.Metadata).Name; name != "test" {
- t.Errorf("Expected chart name 'test', got %q", name)
- }
- relmap := res["Release"].(map[string]interface{})
- if name := relmap["Name"]; name.(string) != "Seven Voyages" {
- t.Errorf("Expected release name 'Seven Voyages', got %q", name)
- }
- if namespace := relmap["Namespace"]; namespace.(string) != "default" {
- t.Errorf("Expected namespace 'default', got %q", namespace)
- }
- if revision := relmap["Revision"]; revision.(int) != 1 {
- t.Errorf("Expected revision '1', got %d", revision)
- }
- if relmap["IsUpgrade"].(bool) {
- t.Error("Expected upgrade to be false.")
- }
- if !relmap["IsInstall"].(bool) {
- t.Errorf("Expected install to be true.")
- }
- if !res["Capabilities"].(*Capabilities).APIVersions.Has("v1") {
- t.Error("Expected Capabilities to have v1 as an API")
- }
- if res["Capabilities"].(*Capabilities).KubeVersion.Major != "1" {
- t.Error("Expected Capabilities to have a Kube version")
- }
-
- vals := res["Values"].(Values)
- if vals["name"] != "Haroun" {
- t.Errorf("Expected 'Haroun', got %q (%v)", vals["name"], vals)
- }
- where := vals["where"].(map[string]interface{})
- expects := map[string]string{
- "city": "Baghdad",
- "date": "809 CE",
- "title": "caliph",
- }
- for field, expect := range expects {
- if got := where[field]; got != expect {
- t.Errorf("Expected %q, got %q (%v)", expect, got, where)
- }
- }
-}
-
func TestReadValuesFile(t *testing.T) {
data, err := ReadValuesFile("./testdata/coleridge.yaml")
if err != nil {
@@ -224,6 +136,7 @@ chapter:
}
func matchValues(t *testing.T, data map[string]interface{}) {
+ t.Helper()
if data["poet"] != "Coleridge" {
t.Errorf("Unexpected poet: %s", data["poet"])
}
diff --git a/pkg/chart/interfaces.go b/pkg/chart/interfaces.go
new file mode 100644
index 000000000..e87dd2c08
--- /dev/null
+++ b/pkg/chart/interfaces.go
@@ -0,0 +1,35 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package chart
+
+import (
+ common "helm.sh/helm/v4/pkg/chart/common"
+)
+
+type Charter interface{}
+
+type Accessor interface {
+ Name() string
+ IsRoot() bool
+ MetadataAsMap() map[string]interface{}
+ Files() []*common.File
+ Templates() []*common.File
+ ChartFullPath() string
+ IsLibraryChart() bool
+ Dependencies() []Charter
+ Values() map[string]interface{}
+ Schema() []byte
+}
diff --git a/pkg/chart/v2/chart.go b/pkg/chart/v2/chart.go
index dcc2a43eb..f59bcd8b3 100644
--- a/pkg/chart/v2/chart.go
+++ b/pkg/chart/v2/chart.go
@@ -19,6 +19,8 @@ import (
"path/filepath"
"regexp"
"strings"
+
+ "helm.sh/helm/v4/pkg/chart/common"
)
// APIVersionV1 is the API version number for version 1.
@@ -37,20 +39,20 @@ type Chart struct {
//
// This should not be used except in special cases like `helm show values`,
// where we want to display the raw values, comments and all.
- Raw []*File `json:"-"`
+ Raw []*common.File `json:"-"`
// Metadata is the contents of the Chartfile.
Metadata *Metadata `json:"metadata"`
// Lock is the contents of Chart.lock.
Lock *Lock `json:"lock"`
// Templates for this chart.
- Templates []*File `json:"templates"`
+ Templates []*common.File `json:"templates"`
// Values are default config for this chart.
Values map[string]interface{} `json:"values"`
// Schema is an optional JSON schema for imposing structure on Values
Schema []byte `json:"schema"`
// Files are miscellaneous files in a chart archive,
// e.g. README, LICENSE, etc.
- Files []*File `json:"files"`
+ Files []*common.File `json:"files"`
parent *Chart
dependencies []*Chart
@@ -62,7 +64,7 @@ type CRD struct {
// Filename is the File obj Name including (sub-)chart.ChartFullPath
Filename string
// File is the File obj for the crd
- File *File
+ File *common.File
}
// SetDependencies replaces the chart dependencies.
@@ -113,6 +115,8 @@ func (ch *Chart) ChartPath() string {
}
// ChartFullPath returns the full path to this chart.
+// Note that the path may not correspond to the path where the file can be found on the file system if the path
+// points to an aliased subchart.
func (ch *Chart) ChartFullPath() string {
if !ch.IsRoot() {
return ch.Parent().ChartFullPath() + "/charts/" + ch.Name()
@@ -135,8 +139,8 @@ func (ch *Chart) AppVersion() string {
// CRDs returns a list of File objects in the 'crds/' directory of a Helm chart.
// Deprecated: use CRDObjects()
-func (ch *Chart) CRDs() []*File {
- files := []*File{}
+func (ch *Chart) CRDs() []*common.File {
+ files := []*common.File{}
// Find all resources in the crds/ directory
for _, f := range ch.Files {
if strings.HasPrefix(f.Name, "crds/") && hasManifestExtension(f.Name) {
diff --git a/pkg/chart/v2/chart_test.go b/pkg/chart/v2/chart_test.go
index d6311085b..a96d8c0c0 100644
--- a/pkg/chart/v2/chart_test.go
+++ b/pkg/chart/v2/chart_test.go
@@ -20,11 +20,13 @@ import (
"testing"
"github.com/stretchr/testify/assert"
+
+ "helm.sh/helm/v4/pkg/chart/common"
)
func TestCRDs(t *testing.T) {
chrt := Chart{
- Files: []*File{
+ Files: []*common.File{
{
Name: "crds/foo.yaml",
Data: []byte("hello"),
@@ -57,7 +59,7 @@ func TestCRDs(t *testing.T) {
func TestSaveChartNoRawData(t *testing.T) {
chrt := Chart{
- Raw: []*File{
+ Raw: []*common.File{
{
Name: "fhqwhgads.yaml",
Data: []byte("Everybody to the Limit"),
@@ -76,7 +78,7 @@ func TestSaveChartNoRawData(t *testing.T) {
t.Fatal(err)
}
- is.Equal([]*File(nil), res.Raw)
+ is.Equal([]*common.File(nil), res.Raw)
}
func TestMetadata(t *testing.T) {
@@ -162,7 +164,7 @@ func TestChartFullPath(t *testing.T) {
func TestCRDObjects(t *testing.T) {
chrt := Chart{
- Files: []*File{
+ Files: []*common.File{
{
Name: "crds/foo.yaml",
Data: []byte("hello"),
@@ -190,7 +192,7 @@ func TestCRDObjects(t *testing.T) {
{
Name: "crds/foo.yaml",
Filename: "crds/foo.yaml",
- File: &File{
+ File: &common.File{
Name: "crds/foo.yaml",
Data: []byte("hello"),
},
@@ -198,7 +200,7 @@ func TestCRDObjects(t *testing.T) {
{
Name: "crds/foo/bar/baz.yaml",
Filename: "crds/foo/bar/baz.yaml",
- File: &File{
+ File: &common.File{
Name: "crds/foo/bar/baz.yaml",
Data: []byte("hello"),
},
diff --git a/pkg/lint/lint.go b/pkg/chart/v2/lint/lint.go
similarity index 78%
rename from pkg/lint/lint.go
rename to pkg/chart/v2/lint/lint.go
index a61d5e43f..b26d65a34 100644
--- a/pkg/lint/lint.go
+++ b/pkg/chart/v2/lint/lint.go
@@ -14,24 +14,24 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package lint // import "helm.sh/helm/v4/pkg/lint"
+package lint // import "helm.sh/helm/v4/pkg/chart/v2/lint"
import (
"path/filepath"
- chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
- "helm.sh/helm/v4/pkg/lint/rules"
- "helm.sh/helm/v4/pkg/lint/support"
+ "helm.sh/helm/v4/pkg/chart/common"
+ "helm.sh/helm/v4/pkg/chart/v2/lint/rules"
+ "helm.sh/helm/v4/pkg/chart/v2/lint/support"
)
type linterOptions struct {
- KubeVersion *chartutil.KubeVersion
+ KubeVersion *common.KubeVersion
SkipSchemaValidation bool
}
type LinterOption func(lo *linterOptions)
-func WithKubeVersion(kubeVersion *chartutil.KubeVersion) LinterOption {
+func WithKubeVersion(kubeVersion *common.KubeVersion) LinterOption {
return func(lo *linterOptions) {
lo.KubeVersion = kubeVersion
}
@@ -57,9 +57,10 @@ func RunAll(baseDir string, values map[string]interface{}, namespace string, opt
}
rules.Chartfile(&result)
- rules.ValuesWithOverrides(&result, values)
+ rules.ValuesWithOverrides(&result, values, lo.SkipSchemaValidation)
rules.TemplatesWithSkipSchemaValidation(&result, values, namespace, lo.KubeVersion, lo.SkipSchemaValidation)
rules.Dependencies(&result)
+ rules.Crds(&result)
return result
}
diff --git a/pkg/lint/lint_test.go b/pkg/chart/v2/lint/lint_test.go
similarity index 83%
rename from pkg/lint/lint_test.go
rename to pkg/chart/v2/lint/lint_test.go
index 067d140f6..bd3ec1f1f 100644
--- a/pkg/lint/lint_test.go
+++ b/pkg/chart/v2/lint/lint_test.go
@@ -21,8 +21,10 @@ import (
"testing"
"time"
+ "github.com/stretchr/testify/assert"
+
+ "helm.sh/helm/v4/pkg/chart/v2/lint/support"
chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
- "helm.sh/helm/v4/pkg/lint/support"
)
var values map[string]interface{}
@@ -32,24 +34,31 @@ const namespace = "testNamespace"
const badChartDir = "rules/testdata/badchartfile"
const badValuesFileDir = "rules/testdata/badvaluesfile"
const badYamlFileDir = "rules/testdata/albatross"
+const badCrdFileDir = "rules/testdata/badcrdfile"
const goodChartDir = "rules/testdata/goodone"
const subChartValuesDir = "rules/testdata/withsubchart"
const malformedTemplate = "rules/testdata/malformed-template"
+const invalidChartFileDir = "rules/testdata/invalidchartfile"
func TestBadChart(t *testing.T) {
m := RunAll(badChartDir, values, namespace).Messages
- if len(m) != 8 {
+ if len(m) != 9 {
t.Errorf("Number of errors %v", len(m))
t.Errorf("All didn't fail with expected errors, got %#v", m)
}
- // There should be one INFO, and 2 ERROR messages, check for them
- var i, e, e2, e3, e4, e5, e6 bool
+ // There should be one INFO, 2 WARNING and 2 ERROR messages, check for them
+ var i, w, w2, e, e2, e3, e4, e5, e6 bool
for _, msg := range m {
if msg.Severity == support.InfoSev {
if strings.Contains(msg.Err.Error(), "icon is recommended") {
i = true
}
}
+ if msg.Severity == support.WarningSev {
+ if strings.Contains(msg.Err.Error(), "does not exist") {
+ w = true
+ }
+ }
if msg.Severity == support.ErrorSev {
if strings.Contains(msg.Err.Error(), "version '0.0.0.0' is not a valid SemVer") {
e = true
@@ -74,8 +83,13 @@ func TestBadChart(t *testing.T) {
e6 = true
}
}
+ if msg.Severity == support.WarningSev {
+ if strings.Contains(msg.Err.Error(), "version '0.0.0.0' is not a valid SemVerV2") {
+ w2 = true
+ }
+ }
}
- if !e || !e2 || !e3 || !e4 || !e5 || !i || !e6 {
+ if !e || !e2 || !e3 || !e4 || !e5 || !i || !e6 || !w || !w2 {
t.Errorf("Didn't find all the expected errors, got %#v", m)
}
}
@@ -90,6 +104,16 @@ func TestInvalidYaml(t *testing.T) {
}
}
+func TestInvalidChartYaml(t *testing.T) {
+ m := RunAll(invalidChartFileDir, values, namespace).Messages
+ if len(m) != 2 {
+ t.Fatalf("All didn't fail with expected errors, got %#v", m)
+ }
+ if !strings.Contains(m[0].Err.Error(), "failed to strictly parse chart metadata file") {
+ t.Errorf("All didn't have the error for duplicate YAML keys")
+ }
+}
+
func TestBadValues(t *testing.T) {
m := RunAll(badValuesFileDir, values, namespace).Messages
if len(m) < 1 {
@@ -100,6 +124,13 @@ func TestBadValues(t *testing.T) {
}
}
+func TestBadCrdFile(t *testing.T) {
+ m := RunAll(badCrdFileDir, values, namespace).Messages
+ assert.Lenf(t, m, 2, "All didn't fail with expected errors, got %#v", m)
+ assert.ErrorContains(t, m[0].Err, "apiVersion is not in 'apiextensions.k8s.io'")
+ assert.ErrorContains(t, m[1].Err, "object kind is not 'CustomResourceDefinition'")
+}
+
func TestGoodChart(t *testing.T) {
m := RunAll(goodChartDir, values, namespace).Messages
if len(m) != 0 {
diff --git a/pkg/lint/rules/chartfile.go b/pkg/chart/v2/lint/rules/chartfile.go
similarity index 80%
rename from pkg/lint/rules/chartfile.go
rename to pkg/chart/v2/lint/rules/chartfile.go
index 598557a97..806363477 100644
--- a/pkg/lint/rules/chartfile.go
+++ b/pkg/chart/v2/lint/rules/chartfile.go
@@ -14,21 +14,21 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package rules // import "helm.sh/helm/v4/pkg/lint/rules"
+package rules // import "helm.sh/helm/v4/pkg/chart/v2/lint/rules"
import (
+ "errors"
"fmt"
"os"
"path/filepath"
"github.com/Masterminds/semver/v3"
"github.com/asaskevich/govalidator"
- "github.com/pkg/errors"
"sigs.k8s.io/yaml"
chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/chart/v2/lint/support"
chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
- "helm.sh/helm/v4/pkg/lint/support"
)
// Chartfile runs a set of linter rules related to Chart.yaml file
@@ -46,6 +46,9 @@ func Chartfile(linter *support.Linter) {
return
}
+ _, err = chartutil.StrictLoadChartfile(chartPath)
+ linter.RunLinterRule(support.WarningSev, chartFileName, validateChartYamlStrictFormat(err))
+
// type check for Chart.yaml . ignoring error as any parse
// errors would already be caught in the above load function
chartFileForTypeCheck, _ := loadChartFileForTypeCheck(chartPath)
@@ -64,6 +67,7 @@ func Chartfile(linter *support.Linter) {
linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartIconURL(chartFile))
linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartType(chartFile))
linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartDependencies(chartFile))
+ linter.RunLinterRule(support.WarningSev, chartFileName, validateChartVersionStrictSemVerV2(chartFile))
}
func validateChartVersionType(data map[string]interface{}) error {
@@ -81,7 +85,7 @@ func isStringValue(data map[string]interface{}, key string) error {
}
valueType := fmt.Sprintf("%T", value)
if valueType != "string" {
- return errors.Errorf("%s should be of type string but it's of type %s", key, valueType)
+ return fmt.Errorf("%s should be of type string but it's of type %s", key, valueType)
}
return nil
}
@@ -97,7 +101,14 @@ func validateChartYamlNotDirectory(chartPath string) error {
func validateChartYamlFormat(chartFileError error) error {
if chartFileError != nil {
- return errors.Errorf("unable to parse YAML\n\t%s", chartFileError.Error())
+ return fmt.Errorf("unable to parse YAML\n\t%w", chartFileError)
+ }
+ return nil
+}
+
+func validateChartYamlStrictFormat(chartFileError error) error {
+ if chartFileError != nil {
+ return fmt.Errorf("failed to strictly parse chart metadata file\n\t%w", chartFileError)
}
return nil
}
@@ -131,9 +142,8 @@ func validateChartVersion(cf *chart.Metadata) error {
}
version, err := semver.NewVersion(cf.Version)
-
if err != nil {
- return errors.Errorf("version '%s' is not a valid SemVer", cf.Version)
+ return fmt.Errorf("version '%s' is not a valid SemVer", cf.Version)
}
c, err := semver.NewConstraint(">0.0.0-0")
@@ -143,7 +153,17 @@ func validateChartVersion(cf *chart.Metadata) error {
valid, msg := c.Validate(version)
if !valid && len(msg) > 0 {
- return errors.Errorf("version %v", msg[0])
+ return fmt.Errorf("version %v", msg[0])
+ }
+
+ return nil
+}
+
+func validateChartVersionStrictSemVerV2(cf *chart.Metadata) error {
+ _, err := semver.StrictNewVersion(cf.Version)
+
+ if err != nil {
+ return fmt.Errorf("version '%s' is not a valid SemVerV2", cf.Version)
}
return nil
@@ -151,12 +171,15 @@ func validateChartVersion(cf *chart.Metadata) error {
func validateChartMaintainer(cf *chart.Metadata) error {
for _, maintainer := range cf.Maintainers {
+ if maintainer == nil {
+ return errors.New("a maintainer entry is empty")
+ }
if maintainer.Name == "" {
return errors.New("each maintainer requires a name")
} else if maintainer.Email != "" && !govalidator.IsEmail(maintainer.Email) {
- return errors.Errorf("invalid email '%s' for maintainer '%s'", maintainer.Email, maintainer.Name)
+ return fmt.Errorf("invalid email '%s' for maintainer '%s'", maintainer.Email, maintainer.Name)
} else if maintainer.URL != "" && !govalidator.IsURL(maintainer.URL) {
- return errors.Errorf("invalid url '%s' for maintainer '%s'", maintainer.URL, maintainer.Name)
+ return fmt.Errorf("invalid url '%s' for maintainer '%s'", maintainer.URL, maintainer.Name)
}
}
return nil
@@ -165,7 +188,7 @@ func validateChartMaintainer(cf *chart.Metadata) error {
func validateChartSources(cf *chart.Metadata) error {
for _, source := range cf.Sources {
if source == "" || !govalidator.IsRequestURL(source) {
- return errors.Errorf("invalid source URL '%s'", source)
+ return fmt.Errorf("invalid source URL '%s'", source)
}
}
return nil
@@ -180,7 +203,7 @@ func validateChartIconPresence(cf *chart.Metadata) error {
func validateChartIconURL(cf *chart.Metadata) error {
if cf.Icon != "" && !govalidator.IsRequestURL(cf.Icon) {
- return errors.Errorf("invalid icon URL '%s'", cf.Icon)
+ return fmt.Errorf("invalid icon URL '%s'", cf.Icon)
}
return nil
}
diff --git a/pkg/chart/v2/lint/rules/chartfile_test.go b/pkg/chart/v2/lint/rules/chartfile_test.go
new file mode 100644
index 000000000..692358426
--- /dev/null
+++ b/pkg/chart/v2/lint/rules/chartfile_test.go
@@ -0,0 +1,319 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules
+
+import (
+ "errors"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/chart/v2/lint/support"
+ chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+)
+
+const (
+ badChartNameDir = "testdata/badchartname"
+ badChartDir = "testdata/badchartfile"
+ anotherBadChartDir = "testdata/anotherbadchartfile"
+)
+
+var (
+ badChartNamePath = filepath.Join(badChartNameDir, "Chart.yaml")
+ badChartFilePath = filepath.Join(badChartDir, "Chart.yaml")
+ nonExistingChartFilePath = filepath.Join(os.TempDir(), "Chart.yaml")
+)
+
+var badChart, _ = chartutil.LoadChartfile(badChartFilePath)
+var badChartName, _ = chartutil.LoadChartfile(badChartNamePath)
+
+// Validation functions Test
+func TestValidateChartYamlNotDirectory(t *testing.T) {
+ _ = os.Mkdir(nonExistingChartFilePath, os.ModePerm)
+ defer os.Remove(nonExistingChartFilePath)
+
+ err := validateChartYamlNotDirectory(nonExistingChartFilePath)
+ if err == nil {
+ t.Errorf("validateChartYamlNotDirectory to return a linter error, got no error")
+ }
+}
+
+func TestValidateChartYamlFormat(t *testing.T) {
+ err := validateChartYamlFormat(errors.New("Read error"))
+ if err == nil {
+ t.Errorf("validateChartYamlFormat to return a linter error, got no error")
+ }
+
+ err = validateChartYamlFormat(nil)
+ if err != nil {
+ t.Errorf("validateChartYamlFormat to return no error, got a linter error")
+ }
+}
+
+func TestValidateChartName(t *testing.T) {
+ err := validateChartName(badChart)
+ if err == nil {
+ t.Errorf("validateChartName to return a linter error, got no error")
+ }
+
+ err = validateChartName(badChartName)
+ if err == nil {
+ t.Error("expected validateChartName to return a linter error for an invalid name, got no error")
+ }
+}
+
+func TestValidateChartVersion(t *testing.T) {
+ var failTest = []struct {
+ Version string
+ ErrorMsg string
+ }{
+ {"", "version is required"},
+ {"1.2.3.4", "version '1.2.3.4' is not a valid SemVer"},
+ {"waps", "'waps' is not a valid SemVer"},
+ {"-3", "'-3' is not a valid SemVer"},
+ }
+
+ var successTest = []string{"0.0.1", "0.0.1+build", "0.0.1-beta"}
+
+ for _, test := range failTest {
+ badChart.Version = test.Version
+ err := validateChartVersion(badChart)
+ if err == nil || !strings.Contains(err.Error(), test.ErrorMsg) {
+ t.Errorf("validateChartVersion(%s) to return \"%s\", got no error", test.Version, test.ErrorMsg)
+ }
+ }
+
+ for _, version := range successTest {
+ badChart.Version = version
+ err := validateChartVersion(badChart)
+ if err != nil {
+ t.Errorf("validateChartVersion(%s) to return no error, got a linter error", version)
+ }
+ }
+}
+
+func TestValidateChartVersionStrictSemVerV2(t *testing.T) {
+ var failTest = []struct {
+ Version string
+ ErrorMsg string
+ }{
+ {"", "version '' is not a valid SemVerV2"},
+ {"1", "version '1' is not a valid SemVerV2"},
+ {"1.1", "version '1.1' is not a valid SemVerV2"},
+ }
+
+ var successTest = []string{"1.1.1", "0.0.1+build", "0.0.1-beta"}
+
+ for _, test := range failTest {
+ badChart.Version = test.Version
+ err := validateChartVersionStrictSemVerV2(badChart)
+ if err == nil || !strings.Contains(err.Error(), test.ErrorMsg) {
+ t.Errorf("validateChartVersionStrictSemVerV2(%s) to return \"%s\", got no error", test.Version, test.ErrorMsg)
+ }
+ }
+
+ for _, version := range successTest {
+ badChart.Version = version
+ err := validateChartVersionStrictSemVerV2(badChart)
+ if err != nil {
+ t.Errorf("validateChartVersionStrictSemVerV2(%s) to return no error, got a linter error", version)
+ }
+ }
+}
+
+func TestValidateChartMaintainer(t *testing.T) {
+ var failTest = []struct {
+ Name string
+ Email string
+ ErrorMsg string
+ }{
+ {"", "", "each maintainer requires a name"},
+ {"", "test@test.com", "each maintainer requires a name"},
+ {"John Snow", "wrongFormatEmail.com", "invalid email"},
+ }
+
+ var successTest = []struct {
+ Name string
+ Email string
+ }{
+ {"John Snow", ""},
+ {"John Snow", "john@winterfell.com"},
+ }
+
+ for _, test := range failTest {
+ badChart.Maintainers = []*chart.Maintainer{{Name: test.Name, Email: test.Email}}
+ err := validateChartMaintainer(badChart)
+ if err == nil || !strings.Contains(err.Error(), test.ErrorMsg) {
+ t.Errorf("validateChartMaintainer(%s, %s) to return \"%s\", got no error", test.Name, test.Email, test.ErrorMsg)
+ }
+ }
+
+ for _, test := range successTest {
+ badChart.Maintainers = []*chart.Maintainer{{Name: test.Name, Email: test.Email}}
+ err := validateChartMaintainer(badChart)
+ if err != nil {
+ t.Errorf("validateChartMaintainer(%s, %s) to return no error, got %s", test.Name, test.Email, err.Error())
+ }
+ }
+
+ // Testing for an empty maintainer
+ badChart.Maintainers = []*chart.Maintainer{nil}
+ err := validateChartMaintainer(badChart)
+ if err == nil {
+ t.Errorf("validateChartMaintainer did not return error for nil maintainer as expected")
+ }
+ if err.Error() != "a maintainer entry is empty" {
+ t.Errorf("validateChartMaintainer returned unexpected error for nil maintainer: %s", err.Error())
+ }
+}
+
+func TestValidateChartSources(t *testing.T) {
+ var failTest = []string{"", "RiverRun", "john@winterfell", "riverrun.io"}
+ var successTest = []string{"http://riverrun.io", "https://riverrun.io", "https://riverrun.io/blackfish"}
+ for _, test := range failTest {
+ badChart.Sources = []string{test}
+ err := validateChartSources(badChart)
+ if err == nil || !strings.Contains(err.Error(), "invalid source URL") {
+ t.Errorf("validateChartSources(%s) to return \"invalid source URL\", got no error", test)
+ }
+ }
+
+ for _, test := range successTest {
+ badChart.Sources = []string{test}
+ err := validateChartSources(badChart)
+ if err != nil {
+ t.Errorf("validateChartSources(%s) to return no error, got %s", test, err.Error())
+ }
+ }
+}
+
+func TestValidateChartIconPresence(t *testing.T) {
+ t.Run("Icon absent", func(t *testing.T) {
+ testChart := &chart.Metadata{
+ Icon: "",
+ }
+
+ err := validateChartIconPresence(testChart)
+
+ if err == nil {
+ t.Errorf("validateChartIconPresence to return a linter error, got no error")
+ } else if !strings.Contains(err.Error(), "icon is recommended") {
+ t.Errorf("expected %q, got %q", "icon is recommended", err.Error())
+ }
+ })
+ t.Run("Icon present", func(t *testing.T) {
+ testChart := &chart.Metadata{
+ Icon: "http://example.org/icon.png",
+ }
+
+ err := validateChartIconPresence(testChart)
+
+ if err != nil {
+ t.Errorf("Unexpected error: %q", err.Error())
+ }
+ })
+}
+
+func TestValidateChartIconURL(t *testing.T) {
+ var failTest = []string{"RiverRun", "john@winterfell", "riverrun.io"}
+ var successTest = []string{"http://riverrun.io", "https://riverrun.io", "https://riverrun.io/blackfish.png"}
+ for _, test := range failTest {
+ badChart.Icon = test
+ err := validateChartIconURL(badChart)
+ if err == nil || !strings.Contains(err.Error(), "invalid icon URL") {
+ t.Errorf("validateChartIconURL(%s) to return \"invalid icon URL\", got no error", test)
+ }
+ }
+
+ for _, test := range successTest {
+ badChart.Icon = test
+ err := validateChartSources(badChart)
+ if err != nil {
+ t.Errorf("validateChartIconURL(%s) to return no error, got %s", test, err.Error())
+ }
+ }
+}
+
+func TestChartfile(t *testing.T) {
+ t.Run("Chart.yaml basic validity issues", func(t *testing.T) {
+ linter := support.Linter{ChartDir: badChartDir}
+ Chartfile(&linter)
+ msgs := linter.Messages
+ expectedNumberOfErrorMessages := 7
+
+ if len(msgs) != expectedNumberOfErrorMessages {
+ t.Errorf("Expected %d errors, got %d", expectedNumberOfErrorMessages, len(msgs))
+ return
+ }
+
+ if !strings.Contains(msgs[0].Err.Error(), "name is required") {
+ t.Errorf("Unexpected message 0: %s", msgs[0].Err)
+ }
+
+ if !strings.Contains(msgs[1].Err.Error(), "apiVersion is required. The value must be either \"v1\" or \"v2\"") {
+ t.Errorf("Unexpected message 1: %s", msgs[1].Err)
+ }
+
+ if !strings.Contains(msgs[2].Err.Error(), "version '0.0.0.0' is not a valid SemVer") {
+ t.Errorf("Unexpected message 2: %s", msgs[2].Err)
+ }
+
+ if !strings.Contains(msgs[3].Err.Error(), "icon is recommended") {
+ t.Errorf("Unexpected message 3: %s", msgs[3].Err)
+ }
+
+ if !strings.Contains(msgs[4].Err.Error(), "chart type is not valid in apiVersion") {
+ t.Errorf("Unexpected message 4: %s", msgs[4].Err)
+ }
+
+ if !strings.Contains(msgs[5].Err.Error(), "dependencies are not valid in the Chart file with apiVersion") {
+ t.Errorf("Unexpected message 5: %s", msgs[5].Err)
+ }
+ if !strings.Contains(msgs[6].Err.Error(), "version '0.0.0.0' is not a valid SemVerV2") {
+ t.Errorf("Unexpected message 6: %s", msgs[6].Err)
+ }
+ })
+
+ t.Run("Chart.yaml validity issues due to type mismatch", func(t *testing.T) {
+ linter := support.Linter{ChartDir: anotherBadChartDir}
+ Chartfile(&linter)
+ msgs := linter.Messages
+ expectedNumberOfErrorMessages := 4
+
+ if len(msgs) != expectedNumberOfErrorMessages {
+ t.Errorf("Expected %d errors, got %d", expectedNumberOfErrorMessages, len(msgs))
+ return
+ }
+
+ if !strings.Contains(msgs[0].Err.Error(), "version should be of type string") {
+ t.Errorf("Unexpected message 0: %s", msgs[0].Err)
+ }
+
+ if !strings.Contains(msgs[1].Err.Error(), "version '7.2445e+06' is not a valid SemVer") {
+ t.Errorf("Unexpected message 1: %s", msgs[1].Err)
+ }
+
+ if !strings.Contains(msgs[2].Err.Error(), "appVersion should be of type string") {
+ t.Errorf("Unexpected message 2: %s", msgs[2].Err)
+ }
+ if !strings.Contains(msgs[3].Err.Error(), "version '7.2445e+06' is not a valid SemVerV2") {
+ t.Errorf("Unexpected message 3: %s", msgs[3].Err)
+ }
+ })
+}
diff --git a/pkg/chart/v2/lint/rules/crds.go b/pkg/chart/v2/lint/rules/crds.go
new file mode 100644
index 000000000..49e30192a
--- /dev/null
+++ b/pkg/chart/v2/lint/rules/crds.go
@@ -0,0 +1,113 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "k8s.io/apimachinery/pkg/util/yaml"
+
+ "helm.sh/helm/v4/pkg/chart/v2/lint/support"
+ "helm.sh/helm/v4/pkg/chart/v2/loader"
+)
+
+// Crds lints the CRDs in the Linter.
+func Crds(linter *support.Linter) {
+ fpath := "crds/"
+ crdsPath := filepath.Join(linter.ChartDir, fpath)
+
+ // crds directory is optional
+ if _, err := os.Stat(crdsPath); errors.Is(err, fs.ErrNotExist) {
+ return
+ }
+
+ crdsDirValid := linter.RunLinterRule(support.ErrorSev, fpath, validateCrdsDir(crdsPath))
+ if !crdsDirValid {
+ return
+ }
+
+ // Load chart and parse CRDs
+ chart, err := loader.Load(linter.ChartDir)
+
+ chartLoaded := linter.RunLinterRule(support.ErrorSev, fpath, err)
+
+ if !chartLoaded {
+ return
+ }
+
+ /* Iterate over all the CRDs to check:
+ 1. It is a YAML file and not a template
+ 2. The API version is apiextensions.k8s.io
+ 3. The kind is CustomResourceDefinition
+ */
+ for _, crd := range chart.CRDObjects() {
+ fileName := crd.Name
+ fpath = fileName
+
+ decoder := yaml.NewYAMLOrJSONDecoder(bytes.NewReader(crd.File.Data), 4096)
+ for {
+ var yamlStruct *k8sYamlStruct
+
+ err := decoder.Decode(&yamlStruct)
+ if err == io.EOF {
+ break
+ }
+
+ // If YAML parsing fails here, it will always fail in the next block as well, so we should return here.
+ // This also confirms the YAML is not a template, since templates can't be decoded into a K8sYamlStruct.
+ if !linter.RunLinterRule(support.ErrorSev, fpath, validateYamlContent(err)) {
+ return
+ }
+
+ linter.RunLinterRule(support.ErrorSev, fpath, validateCrdAPIVersion(yamlStruct))
+ linter.RunLinterRule(support.ErrorSev, fpath, validateCrdKind(yamlStruct))
+ }
+ }
+}
+
+// Validation functions
+func validateCrdsDir(crdsPath string) error {
+ fi, err := os.Stat(crdsPath)
+ if err != nil {
+ return err
+ }
+ if !fi.IsDir() {
+ return errors.New("not a directory")
+ }
+ return nil
+}
+
+func validateCrdAPIVersion(obj *k8sYamlStruct) error {
+ if !strings.HasPrefix(obj.APIVersion, "apiextensions.k8s.io") {
+ return fmt.Errorf("apiVersion is not in 'apiextensions.k8s.io'")
+ }
+ return nil
+}
+
+func validateCrdKind(obj *k8sYamlStruct) error {
+ if obj.Kind != "CustomResourceDefinition" {
+ return fmt.Errorf("object kind is not 'CustomResourceDefinition'")
+ }
+ return nil
+}
diff --git a/pkg/chart/v2/lint/rules/crds_test.go b/pkg/chart/v2/lint/rules/crds_test.go
new file mode 100644
index 000000000..e644f182f
--- /dev/null
+++ b/pkg/chart/v2/lint/rules/crds_test.go
@@ -0,0 +1,36 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "helm.sh/helm/v4/pkg/chart/v2/lint/support"
+)
+
+const invalidCrdsDir = "./testdata/invalidcrdsdir"
+
+func TestInvalidCrdsDir(t *testing.T) {
+ linter := support.Linter{ChartDir: invalidCrdsDir}
+ Crds(&linter)
+ res := linter.Messages
+
+ assert.Len(t, res, 1)
+ assert.ErrorContains(t, res[0].Err, "not a directory")
+}
diff --git a/pkg/lint/rules/dependencies.go b/pkg/chart/v2/lint/rules/dependencies.go
similarity index 93%
rename from pkg/lint/rules/dependencies.go
rename to pkg/chart/v2/lint/rules/dependencies.go
index 2ab56eca5..d944a016d 100644
--- a/pkg/lint/rules/dependencies.go
+++ b/pkg/chart/v2/lint/rules/dependencies.go
@@ -14,17 +14,15 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package rules // import "helm.sh/helm/v4/pkg/lint/rules"
+package rules // import "helm.sh/helm/v4/pkg/chart/v2/lint/rules"
import (
"fmt"
"strings"
- "github.com/pkg/errors"
-
chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/chart/v2/lint/support"
"helm.sh/helm/v4/pkg/chart/v2/loader"
- "helm.sh/helm/v4/pkg/lint/support"
)
// Dependencies runs lints against a chart's dependencies
@@ -43,7 +41,7 @@ func Dependencies(linter *support.Linter) {
func validateChartFormat(chartError error) error {
if chartError != nil {
- return errors.Errorf("unable to load chart\n\t%s", chartError)
+ return fmt.Errorf("unable to load chart\n\t%w", chartError)
}
return nil
}
diff --git a/pkg/lint/rules/dependencies_test.go b/pkg/chart/v2/lint/rules/dependencies_test.go
similarity index 98%
rename from pkg/lint/rules/dependencies_test.go
rename to pkg/chart/v2/lint/rules/dependencies_test.go
index 1369b2372..08a6646cd 100644
--- a/pkg/lint/rules/dependencies_test.go
+++ b/pkg/chart/v2/lint/rules/dependencies_test.go
@@ -20,8 +20,8 @@ import (
"testing"
chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/chart/v2/lint/support"
chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
- "helm.sh/helm/v4/pkg/lint/support"
)
func chartWithBadDependencies() chart.Chart {
diff --git a/pkg/chart/v2/lint/rules/deprecations.go b/pkg/chart/v2/lint/rules/deprecations.go
new file mode 100644
index 000000000..6eba316bc
--- /dev/null
+++ b/pkg/chart/v2/lint/rules/deprecations.go
@@ -0,0 +1,106 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules // import "helm.sh/helm/v4/pkg/chart/v2/lint/rules"
+
+import (
+ "fmt"
+ "strconv"
+
+ "helm.sh/helm/v4/pkg/chart/common"
+
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apiserver/pkg/endpoints/deprecation"
+ kscheme "k8s.io/client-go/kubernetes/scheme"
+)
+
+var (
+ // This should be set in the Makefile based on the version of client-go being imported.
+ // These constants will be overwritten with LDFLAGS. The version components must be
+ // strings in order for LDFLAGS to set them.
+ k8sVersionMajor = "1"
+ k8sVersionMinor = "20"
+)
+
+// deprecatedAPIError indicates than an API is deprecated in Kubernetes
+type deprecatedAPIError struct {
+ Deprecated string
+ Message string
+}
+
+func (e deprecatedAPIError) Error() string {
+ msg := e.Message
+ return msg
+}
+
+func validateNoDeprecations(resource *k8sYamlStruct, kubeVersion *common.KubeVersion) error {
+ // if `resource` does not have an APIVersion or Kind, we cannot test it for deprecation
+ if resource.APIVersion == "" {
+ return nil
+ }
+ if resource.Kind == "" {
+ return nil
+ }
+
+ majorVersion := k8sVersionMajor
+ minorVersion := k8sVersionMinor
+
+ if kubeVersion != nil {
+ majorVersion = kubeVersion.Major
+ minorVersion = kubeVersion.Minor
+ }
+
+ runtimeObject, err := resourceToRuntimeObject(resource)
+ if err != nil {
+ // do not error for non-kubernetes resources
+ if runtime.IsNotRegisteredError(err) {
+ return nil
+ }
+ return err
+ }
+
+ major, err := strconv.Atoi(majorVersion)
+ if err != nil {
+ return err
+ }
+ minor, err := strconv.Atoi(minorVersion)
+ if err != nil {
+ return err
+ }
+
+ if !deprecation.IsDeprecated(runtimeObject, major, minor) {
+ return nil
+ }
+ gvk := fmt.Sprintf("%s %s", resource.APIVersion, resource.Kind)
+ return deprecatedAPIError{
+ Deprecated: gvk,
+ Message: deprecation.WarningMessage(runtimeObject),
+ }
+}
+
+func resourceToRuntimeObject(resource *k8sYamlStruct) (runtime.Object, error) {
+ scheme := runtime.NewScheme()
+ kscheme.AddToScheme(scheme)
+
+ gvk := schema.FromAPIVersionAndKind(resource.APIVersion, resource.Kind)
+ out, err := scheme.New(gvk)
+ if err != nil {
+ return nil, err
+ }
+ out.GetObjectKind().SetGroupVersionKind(gvk)
+ return out, nil
+}
diff --git a/pkg/lint/rules/deprecations_test.go b/pkg/chart/v2/lint/rules/deprecations_test.go
similarity index 87%
rename from pkg/lint/rules/deprecations_test.go
rename to pkg/chart/v2/lint/rules/deprecations_test.go
index c0e64d04f..e153f67e6 100644
--- a/pkg/lint/rules/deprecations_test.go
+++ b/pkg/chart/v2/lint/rules/deprecations_test.go
@@ -14,12 +14,12 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package rules // import "helm.sh/helm/v4/pkg/lint/rules"
+package rules // import "helm.sh/helm/v4/pkg/chart/v2/lint/rules"
import "testing"
func TestValidateNoDeprecations(t *testing.T) {
- deprecated := &K8sYamlStruct{
+ deprecated := &k8sYamlStruct{
APIVersion: "extensions/v1beta1",
Kind: "Deployment",
}
@@ -32,7 +32,7 @@ func TestValidateNoDeprecations(t *testing.T) {
t.Fatalf("Expected error message to be non-blank: %v", err)
}
- if err := validateNoDeprecations(&K8sYamlStruct{
+ if err := validateNoDeprecations(&k8sYamlStruct{
APIVersion: "v1",
Kind: "Pod",
}, nil); err != nil {
diff --git a/pkg/lint/rules/template.go b/pkg/chart/v2/lint/rules/template.go
similarity index 78%
rename from pkg/lint/rules/template.go
rename to pkg/chart/v2/lint/rules/template.go
index 287968340..5c84d0f68 100644
--- a/pkg/lint/rules/template.go
+++ b/pkg/chart/v2/lint/rules/template.go
@@ -19,29 +19,26 @@ package rules
import (
"bufio"
"bytes"
+ "errors"
"fmt"
"io"
"os"
"path"
"path/filepath"
- "regexp"
+ "slices"
"strings"
- "github.com/pkg/errors"
"k8s.io/apimachinery/pkg/api/validation"
apipath "k8s.io/apimachinery/pkg/api/validation/path"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/apimachinery/pkg/util/yaml"
+ "helm.sh/helm/v4/pkg/chart/common"
+ "helm.sh/helm/v4/pkg/chart/common/util"
+ "helm.sh/helm/v4/pkg/chart/v2/lint/support"
"helm.sh/helm/v4/pkg/chart/v2/loader"
chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
"helm.sh/helm/v4/pkg/engine"
- "helm.sh/helm/v4/pkg/lint/support"
-)
-
-var (
- crdHookSearch = regexp.MustCompile(`"?helm\.sh/hook"?:\s+crd-install`)
- releaseTimeSearch = regexp.MustCompile(`\.Release\.Time`)
)
// Templates lints the templates in the Linter.
@@ -50,19 +47,23 @@ func Templates(linter *support.Linter, values map[string]interface{}, namespace
}
// TemplatesWithKubeVersion lints the templates in the Linter, allowing to specify the kubernetes version.
-func TemplatesWithKubeVersion(linter *support.Linter, values map[string]interface{}, namespace string, kubeVersion *chartutil.KubeVersion) {
+func TemplatesWithKubeVersion(linter *support.Linter, values map[string]interface{}, namespace string, kubeVersion *common.KubeVersion) {
TemplatesWithSkipSchemaValidation(linter, values, namespace, kubeVersion, false)
}
// TemplatesWithSkipSchemaValidation lints the templates in the Linter, allowing to specify the kubernetes version and if schema validation is enabled or not.
-func TemplatesWithSkipSchemaValidation(linter *support.Linter, values map[string]interface{}, namespace string, kubeVersion *chartutil.KubeVersion, skipSchemaValidation bool) {
+func TemplatesWithSkipSchemaValidation(linter *support.Linter, values map[string]interface{}, namespace string, kubeVersion *common.KubeVersion, skipSchemaValidation bool) {
fpath := "templates/"
templatesPath := filepath.Join(linter.ChartDir, fpath)
- templatesDirExist := linter.RunLinterRule(support.WarningSev, fpath, validateTemplatesDir(templatesPath))
-
// Templates directory is optional for now
- if !templatesDirExist {
+ templatesDirExists := linter.RunLinterRule(support.WarningSev, fpath, templatesDirExists(templatesPath))
+ if !templatesDirExists {
+ return
+ }
+
+ validTemplatesDir := linter.RunLinterRule(support.ErrorSev, fpath, validateTemplatesDir(templatesPath))
+ if !validTemplatesDir {
return
}
@@ -75,12 +76,12 @@ func TemplatesWithSkipSchemaValidation(linter *support.Linter, values map[string
return
}
- options := chartutil.ReleaseOptions{
+ options := common.ReleaseOptions{
Name: "test-release",
Namespace: namespace,
}
- caps := chartutil.DefaultCapabilities.Copy()
+ caps := common.DefaultCapabilities.Copy()
if kubeVersion != nil {
caps.KubeVersion = *kubeVersion
}
@@ -91,12 +92,12 @@ func TemplatesWithSkipSchemaValidation(linter *support.Linter, values map[string
return
}
- cvals, err := chartutil.CoalesceValues(chart, values)
+ cvals, err := util.CoalesceValues(chart, values)
if err != nil {
return
}
- valuesToRender, err := chartutil.ToRenderValuesWithSchemaValidation(chart, cvals, options, caps, skipSchemaValidation)
+ valuesToRender, err := util.ToRenderValuesWithSchemaValidation(chart, cvals, options, caps, skipSchemaValidation)
if err != nil {
linter.RunLinterRule(support.ErrorSev, fpath, err)
return
@@ -119,14 +120,10 @@ func TemplatesWithSkipSchemaValidation(linter *support.Linter, values map[string
- Metadata.Namespace is not set
*/
for _, template := range chart.Templates {
- fileName, data := template.Name, template.Data
+ fileName := template.Name
fpath = fileName
linter.RunLinterRule(support.ErrorSev, fpath, validateAllowedExtension(fileName))
- // These are v3 specific checks to make sure and warn people if their
- // chart is not compatible with v3
- linter.RunLinterRule(support.WarningSev, fpath, validateNoCRDHooks(data))
- linter.RunLinterRule(support.ErrorSev, fpath, validateNoReleaseTime(data))
// We only apply the following lint rules to yaml files
if filepath.Ext(fileName) != ".yaml" || filepath.Ext(fileName) == ".yml" {
@@ -148,9 +145,9 @@ func TemplatesWithSkipSchemaValidation(linter *support.Linter, values map[string
// Lint all resources if the file contains multiple documents separated by ---
for {
- // Even though K8sYamlStruct only defines a few fields, an error in any other
+ // Even though k8sYamlStruct only defines a few fields, an error in any other
// key will be raised as well
- var yamlStruct *K8sYamlStruct
+ var yamlStruct *k8sYamlStruct
err := decoder.Decode(&yamlStruct)
if err == io.EOF {
@@ -203,11 +200,21 @@ func validateTopIndentLevel(content string) error {
}
// Validation functions
+func templatesDirExists(templatesPath string) error {
+ _, err := os.Stat(templatesPath)
+ if errors.Is(err, os.ErrNotExist) {
+ return errors.New("directory does not exist")
+ }
+ return nil
+}
+
func validateTemplatesDir(templatesPath string) error {
- if fi, err := os.Stat(templatesPath); err == nil {
- if !fi.IsDir() {
- return errors.New("not a directory")
- }
+ fi, err := os.Stat(templatesPath)
+ if err != nil {
+ return err
+ }
+ if !fi.IsDir() {
+ return errors.New("not a directory")
}
return nil
}
@@ -216,30 +223,31 @@ func validateAllowedExtension(fileName string) error {
ext := filepath.Ext(fileName)
validExtensions := []string{".yaml", ".yml", ".tpl", ".txt"}
- for _, b := range validExtensions {
- if b == ext {
- return nil
- }
+ if slices.Contains(validExtensions, ext) {
+ return nil
}
- return errors.Errorf("file extension '%s' not valid. Valid extensions are .yaml, .yml, .tpl, or .txt", ext)
+ return fmt.Errorf("file extension '%s' not valid. Valid extensions are .yaml, .yml, .tpl, or .txt", ext)
}
func validateYamlContent(err error) error {
- return errors.Wrap(err, "unable to parse YAML")
+ if err != nil {
+ return fmt.Errorf("unable to parse YAML: %w", err)
+ }
+ return nil
}
// validateMetadataName uses the correct validation function for the object
// Kind, or if not set, defaults to the standard definition of a subdomain in
// DNS (RFC 1123), used by most resources.
-func validateMetadataName(obj *K8sYamlStruct) error {
+func validateMetadataName(obj *k8sYamlStruct) error {
fn := validateMetadataNameFunc(obj)
allErrs := field.ErrorList{}
for _, msg := range fn(obj.Metadata.Name, false) {
allErrs = append(allErrs, field.Invalid(field.NewPath("metadata").Child("name"), obj.Metadata.Name, msg))
}
if len(allErrs) > 0 {
- return errors.Wrapf(allErrs.ToAggregate(), "object name does not conform to Kubernetes naming requirements: %q", obj.Metadata.Name)
+ return fmt.Errorf("object name does not conform to Kubernetes naming requirements: %q: %w", obj.Metadata.Name, allErrs.ToAggregate())
}
return nil
}
@@ -257,7 +265,7 @@ func validateMetadataName(obj *K8sYamlStruct) error {
// If no mapping is defined, returns NameIsDNSSubdomain. This is used by object
// kinds that don't have special requirements, so is the most likely to work if
// new kinds are added.
-func validateMetadataNameFunc(obj *K8sYamlStruct) validation.ValidateNameFunc {
+func validateMetadataNameFunc(obj *k8sYamlStruct) validation.ValidateNameFunc {
switch strings.ToLower(obj.Kind) {
case "pod", "node", "secret", "endpoints", "resourcequota", // core
"controllerrevision", "daemonset", "deployment", "replicaset", "statefulset", // apps
@@ -291,33 +299,20 @@ func validateMetadataNameFunc(obj *K8sYamlStruct) validation.ValidateNameFunc {
}
}
-func validateNoCRDHooks(manifest []byte) error {
- if crdHookSearch.Match(manifest) {
- return errors.New("manifest is a crd-install hook. This hook is no longer supported in v3 and all CRDs should also exist the crds/ directory at the top level of the chart")
- }
- return nil
-}
-
-func validateNoReleaseTime(manifest []byte) error {
- if releaseTimeSearch.Match(manifest) {
- return errors.New(".Release.Time has been removed in v3, please replace with the `now` function in your templates")
- }
- return nil
-}
-
// validateMatchSelector ensures that template specs have a selector declared.
// See https://github.com/helm/helm/issues/1990
-func validateMatchSelector(yamlStruct *K8sYamlStruct, manifest string) error {
+func validateMatchSelector(yamlStruct *k8sYamlStruct, manifest string) error {
switch yamlStruct.Kind {
case "Deployment", "ReplicaSet", "DaemonSet", "StatefulSet":
// verify that matchLabels or matchExpressions is present
- if !(strings.Contains(manifest, "matchLabels") || strings.Contains(manifest, "matchExpressions")) {
+ if !strings.Contains(manifest, "matchLabels") && !strings.Contains(manifest, "matchExpressions") {
return fmt.Errorf("a %s must contain matchLabels or matchExpressions, and %q does not", yamlStruct.Kind, yamlStruct.Metadata.Name)
}
}
return nil
}
-func validateListAnnotations(yamlStruct *K8sYamlStruct, manifest string) error {
+
+func validateListAnnotations(yamlStruct *k8sYamlStruct, manifest string) error {
if yamlStruct.Kind == "List" {
m := struct {
Items []struct {
@@ -333,18 +328,15 @@ func validateListAnnotations(yamlStruct *K8sYamlStruct, manifest string) error {
for _, i := range m.Items {
if _, ok := i.Metadata.Annotations["helm.sh/resource-policy"]; ok {
- return errors.New("Annotation 'helm.sh/resource-policy' within List objects are ignored")
+ return errors.New("annotation 'helm.sh/resource-policy' within List objects are ignored")
}
}
}
return nil
}
-// K8sYamlStruct stubs a Kubernetes YAML file.
-//
-// DEPRECATED: In Helm 4, this will be made a private type, as it is for use only within
-// the rules package.
-type K8sYamlStruct struct {
+// k8sYamlStruct stubs a Kubernetes YAML file.
+type k8sYamlStruct struct {
APIVersion string `json:"apiVersion"`
Kind string
Metadata k8sYamlMetadata
diff --git a/pkg/lint/rules/template_test.go b/pkg/chart/v2/lint/rules/template_test.go
similarity index 71%
rename from pkg/lint/rules/template_test.go
rename to pkg/chart/v2/lint/rules/template_test.go
index 7205ace6d..3e8e0b371 100644
--- a/pkg/lint/rules/template_test.go
+++ b/pkg/chart/v2/lint/rules/template_test.go
@@ -23,9 +23,10 @@ import (
"strings"
"testing"
+ "helm.sh/helm/v4/pkg/chart/common"
chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/chart/v2/lint/support"
chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
- "helm.sh/helm/v4/pkg/lint/support"
)
const templateTestBasedir = "./testdata/albatross"
@@ -85,26 +86,6 @@ func TestTemplateIntegrationHappyPath(t *testing.T) {
}
}
-func TestV3Fail(t *testing.T) {
- linter := support.Linter{ChartDir: "./testdata/v3-fail"}
- Templates(&linter, values, namespace, strict)
- res := linter.Messages
-
- if len(res) != 3 {
- t.Fatalf("Expected 3 errors, got %d, %v", len(res), res)
- }
-
- if !strings.Contains(res[0].Err.Error(), ".Release.Time has been removed in v3") {
- t.Errorf("Unexpected error: %s", res[0].Err)
- }
- if !strings.Contains(res[1].Err.Error(), "manifest is a crd-install hook") {
- t.Errorf("Unexpected error: %s", res[1].Err)
- }
- if !strings.Contains(res[2].Err.Error(), "manifest is a crd-install hook") {
- t.Errorf("Unexpected error: %s", res[2].Err)
- }
-}
-
func TestMultiTemplateFail(t *testing.T) {
linter := support.Linter{ChartDir: "./testdata/multi-template-fail"}
Templates(&linter, values, namespace, strict)
@@ -121,76 +102,76 @@ func TestMultiTemplateFail(t *testing.T) {
func TestValidateMetadataName(t *testing.T) {
tests := []struct {
- obj *K8sYamlStruct
+ obj *k8sYamlStruct
wantErr bool
}{
// Most kinds use IsDNS1123Subdomain.
- {&K8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: ""}}, true},
- {&K8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "foo"}}, false},
- {&K8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "foo.bar1234baz.seventyone"}}, false},
- {&K8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "FOO"}}, true},
- {&K8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "123baz"}}, false},
- {&K8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "foo.BAR.baz"}}, true},
- {&K8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "one-two"}}, false},
- {&K8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "-two"}}, true},
- {&K8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "one_two"}}, true},
- {&K8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "a..b"}}, true},
- {&K8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "%^$%*@^*@^"}}, true},
- {&K8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "operator:pod"}}, true},
- {&K8sYamlStruct{Kind: "ServiceAccount", Metadata: k8sYamlMetadata{Name: "foo"}}, false},
- {&K8sYamlStruct{Kind: "ServiceAccount", Metadata: k8sYamlMetadata{Name: "foo.bar1234baz.seventyone"}}, false},
- {&K8sYamlStruct{Kind: "ServiceAccount", Metadata: k8sYamlMetadata{Name: "FOO"}}, true},
- {&K8sYamlStruct{Kind: "ServiceAccount", Metadata: k8sYamlMetadata{Name: "operator:sa"}}, true},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: ""}}, true},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "foo"}}, false},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "foo.bar1234baz.seventyone"}}, false},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "FOO"}}, true},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "123baz"}}, false},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "foo.BAR.baz"}}, true},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "one-two"}}, false},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "-two"}}, true},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "one_two"}}, true},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "a..b"}}, true},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "%^$%*@^*@^"}}, true},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "operator:pod"}}, true},
+ {&k8sYamlStruct{Kind: "ServiceAccount", Metadata: k8sYamlMetadata{Name: "foo"}}, false},
+ {&k8sYamlStruct{Kind: "ServiceAccount", Metadata: k8sYamlMetadata{Name: "foo.bar1234baz.seventyone"}}, false},
+ {&k8sYamlStruct{Kind: "ServiceAccount", Metadata: k8sYamlMetadata{Name: "FOO"}}, true},
+ {&k8sYamlStruct{Kind: "ServiceAccount", Metadata: k8sYamlMetadata{Name: "operator:sa"}}, true},
// Service uses IsDNS1035Label.
- {&K8sYamlStruct{Kind: "Service", Metadata: k8sYamlMetadata{Name: "foo"}}, false},
- {&K8sYamlStruct{Kind: "Service", Metadata: k8sYamlMetadata{Name: "123baz"}}, true},
- {&K8sYamlStruct{Kind: "Service", Metadata: k8sYamlMetadata{Name: "foo.bar"}}, true},
+ {&k8sYamlStruct{Kind: "Service", Metadata: k8sYamlMetadata{Name: "foo"}}, false},
+ {&k8sYamlStruct{Kind: "Service", Metadata: k8sYamlMetadata{Name: "123baz"}}, true},
+ {&k8sYamlStruct{Kind: "Service", Metadata: k8sYamlMetadata{Name: "foo.bar"}}, true},
// Namespace uses IsDNS1123Label.
- {&K8sYamlStruct{Kind: "Namespace", Metadata: k8sYamlMetadata{Name: "foo"}}, false},
- {&K8sYamlStruct{Kind: "Namespace", Metadata: k8sYamlMetadata{Name: "123baz"}}, false},
- {&K8sYamlStruct{Kind: "Namespace", Metadata: k8sYamlMetadata{Name: "foo.bar"}}, true},
- {&K8sYamlStruct{Kind: "Namespace", Metadata: k8sYamlMetadata{Name: "foo-bar"}}, false},
+ {&k8sYamlStruct{Kind: "Namespace", Metadata: k8sYamlMetadata{Name: "foo"}}, false},
+ {&k8sYamlStruct{Kind: "Namespace", Metadata: k8sYamlMetadata{Name: "123baz"}}, false},
+ {&k8sYamlStruct{Kind: "Namespace", Metadata: k8sYamlMetadata{Name: "foo.bar"}}, true},
+ {&k8sYamlStruct{Kind: "Namespace", Metadata: k8sYamlMetadata{Name: "foo-bar"}}, false},
// CertificateSigningRequest has no validation.
- {&K8sYamlStruct{Kind: "CertificateSigningRequest", Metadata: k8sYamlMetadata{Name: ""}}, false},
- {&K8sYamlStruct{Kind: "CertificateSigningRequest", Metadata: k8sYamlMetadata{Name: "123baz"}}, false},
- {&K8sYamlStruct{Kind: "CertificateSigningRequest", Metadata: k8sYamlMetadata{Name: "%^$%*@^*@^"}}, false},
+ {&k8sYamlStruct{Kind: "CertificateSigningRequest", Metadata: k8sYamlMetadata{Name: ""}}, false},
+ {&k8sYamlStruct{Kind: "CertificateSigningRequest", Metadata: k8sYamlMetadata{Name: "123baz"}}, false},
+ {&k8sYamlStruct{Kind: "CertificateSigningRequest", Metadata: k8sYamlMetadata{Name: "%^$%*@^*@^"}}, false},
// RBAC uses path validation.
- {&K8sYamlStruct{Kind: "Role", Metadata: k8sYamlMetadata{Name: "foo"}}, false},
- {&K8sYamlStruct{Kind: "Role", Metadata: k8sYamlMetadata{Name: "123baz"}}, false},
- {&K8sYamlStruct{Kind: "Role", Metadata: k8sYamlMetadata{Name: "foo.bar"}}, false},
- {&K8sYamlStruct{Kind: "Role", Metadata: k8sYamlMetadata{Name: "operator:role"}}, false},
- {&K8sYamlStruct{Kind: "Role", Metadata: k8sYamlMetadata{Name: "operator/role"}}, true},
- {&K8sYamlStruct{Kind: "Role", Metadata: k8sYamlMetadata{Name: "operator%role"}}, true},
- {&K8sYamlStruct{Kind: "ClusterRole", Metadata: k8sYamlMetadata{Name: "foo"}}, false},
- {&K8sYamlStruct{Kind: "ClusterRole", Metadata: k8sYamlMetadata{Name: "123baz"}}, false},
- {&K8sYamlStruct{Kind: "ClusterRole", Metadata: k8sYamlMetadata{Name: "foo.bar"}}, false},
- {&K8sYamlStruct{Kind: "ClusterRole", Metadata: k8sYamlMetadata{Name: "operator:role"}}, false},
- {&K8sYamlStruct{Kind: "ClusterRole", Metadata: k8sYamlMetadata{Name: "operator/role"}}, true},
- {&K8sYamlStruct{Kind: "ClusterRole", Metadata: k8sYamlMetadata{Name: "operator%role"}}, true},
- {&K8sYamlStruct{Kind: "RoleBinding", Metadata: k8sYamlMetadata{Name: "operator:role"}}, false},
- {&K8sYamlStruct{Kind: "ClusterRoleBinding", Metadata: k8sYamlMetadata{Name: "operator:role"}}, false},
+ {&k8sYamlStruct{Kind: "Role", Metadata: k8sYamlMetadata{Name: "foo"}}, false},
+ {&k8sYamlStruct{Kind: "Role", Metadata: k8sYamlMetadata{Name: "123baz"}}, false},
+ {&k8sYamlStruct{Kind: "Role", Metadata: k8sYamlMetadata{Name: "foo.bar"}}, false},
+ {&k8sYamlStruct{Kind: "Role", Metadata: k8sYamlMetadata{Name: "operator:role"}}, false},
+ {&k8sYamlStruct{Kind: "Role", Metadata: k8sYamlMetadata{Name: "operator/role"}}, true},
+ {&k8sYamlStruct{Kind: "Role", Metadata: k8sYamlMetadata{Name: "operator%role"}}, true},
+ {&k8sYamlStruct{Kind: "ClusterRole", Metadata: k8sYamlMetadata{Name: "foo"}}, false},
+ {&k8sYamlStruct{Kind: "ClusterRole", Metadata: k8sYamlMetadata{Name: "123baz"}}, false},
+ {&k8sYamlStruct{Kind: "ClusterRole", Metadata: k8sYamlMetadata{Name: "foo.bar"}}, false},
+ {&k8sYamlStruct{Kind: "ClusterRole", Metadata: k8sYamlMetadata{Name: "operator:role"}}, false},
+ {&k8sYamlStruct{Kind: "ClusterRole", Metadata: k8sYamlMetadata{Name: "operator/role"}}, true},
+ {&k8sYamlStruct{Kind: "ClusterRole", Metadata: k8sYamlMetadata{Name: "operator%role"}}, true},
+ {&k8sYamlStruct{Kind: "RoleBinding", Metadata: k8sYamlMetadata{Name: "operator:role"}}, false},
+ {&k8sYamlStruct{Kind: "ClusterRoleBinding", Metadata: k8sYamlMetadata{Name: "operator:role"}}, false},
// Unknown Kind
- {&K8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: ""}}, true},
- {&K8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "foo"}}, false},
- {&K8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "foo.bar1234baz.seventyone"}}, false},
- {&K8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "FOO"}}, true},
- {&K8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "123baz"}}, false},
- {&K8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "foo.BAR.baz"}}, true},
- {&K8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "one-two"}}, false},
- {&K8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "-two"}}, true},
- {&K8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "one_two"}}, true},
- {&K8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "a..b"}}, true},
- {&K8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "%^$%*@^*@^"}}, true},
- {&K8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "operator:pod"}}, true},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: ""}}, true},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "foo"}}, false},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "foo.bar1234baz.seventyone"}}, false},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "FOO"}}, true},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "123baz"}}, false},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "foo.BAR.baz"}}, true},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "one-two"}}, false},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "-two"}}, true},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "one_two"}}, true},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "a..b"}}, true},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "%^$%*@^*@^"}}, true},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "operator:pod"}}, true},
// No kind
- {&K8sYamlStruct{Metadata: k8sYamlMetadata{Name: "foo"}}, false},
- {&K8sYamlStruct{Metadata: k8sYamlMetadata{Name: "operator:pod"}}, true},
+ {&k8sYamlStruct{Metadata: k8sYamlMetadata{Name: "foo"}}, false},
+ {&k8sYamlStruct{Metadata: k8sYamlMetadata{Name: "operator:pod"}}, true},
}
for _, tt := range tests {
t.Run(fmt.Sprintf("%s/%s", tt.obj.Kind, tt.obj.Metadata.Name), func(t *testing.T) {
@@ -209,7 +190,7 @@ func TestDeprecatedAPIFails(t *testing.T) {
Version: "0.1.0",
Icon: "satisfy-the-linting-gods.gif",
},
- Templates: []*chart.File{
+ Templates: []*common.File{
{
Name: "templates/baddeployment.yaml",
Data: []byte("apiVersion: apps/v1beta1\nkind: Deployment\nmetadata:\n name: baddep\nspec: {selector: {matchLabels: {foo: bar}}}"),
@@ -269,7 +250,7 @@ func TestStrictTemplateParsingMapError(t *testing.T) {
"key1": "val1",
},
},
- Templates: []*chart.File{
+ Templates: []*common.File{
{
Name: "templates/configmap.yaml",
Data: []byte(manifest),
@@ -293,7 +274,7 @@ func TestStrictTemplateParsingMapError(t *testing.T) {
}
func TestValidateMatchSelector(t *testing.T) {
- md := &K8sYamlStruct{
+ md := &k8sYamlStruct{
APIVersion: "apps/v1",
Kind: "Deployment",
Metadata: k8sYamlMetadata{
@@ -398,7 +379,7 @@ func TestEmptyWithCommentsManifests(t *testing.T) {
Version: "0.1.0",
Icon: "satisfy-the-linting-gods.gif",
},
- Templates: []*chart.File{
+ Templates: []*common.File{
{
Name: "templates/empty-with-comments.yaml",
Data: []byte("#@formatter:off\n"),
@@ -421,7 +402,7 @@ func TestEmptyWithCommentsManifests(t *testing.T) {
}
}
func TestValidateListAnnotations(t *testing.T) {
- md := &K8sYamlStruct{
+ md := &k8sYamlStruct{
APIVersion: "v1",
Kind: "List",
Metadata: k8sYamlMetadata{
diff --git a/pkg/lint/rules/testdata/albatross/Chart.yaml b/pkg/chart/v2/lint/rules/testdata/albatross/Chart.yaml
similarity index 100%
rename from pkg/lint/rules/testdata/albatross/Chart.yaml
rename to pkg/chart/v2/lint/rules/testdata/albatross/Chart.yaml
diff --git a/pkg/chart/v2/lint/rules/testdata/albatross/templates/_helpers.tpl b/pkg/chart/v2/lint/rules/testdata/albatross/templates/_helpers.tpl
new file mode 100644
index 000000000..24f76db73
--- /dev/null
+++ b/pkg/chart/v2/lint/rules/testdata/albatross/templates/_helpers.tpl
@@ -0,0 +1,16 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{define "name"}}{{default "nginx" .Values.nameOverride | trunc 63 | trimSuffix "-" }}{{end}}
+
+{{/*
+Create a default fully qualified app name.
+
+We truncate at 63 chars because some Kubernetes name fields are limited to this
+(by the DNS naming spec).
+*/}}
+{{define "fullname"}}
+{{- $name := default "nginx" .Values.nameOverride -}}
+{{printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{end}}
diff --git a/pkg/chart/v2/lint/rules/testdata/albatross/templates/fail.yaml b/pkg/chart/v2/lint/rules/testdata/albatross/templates/fail.yaml
new file mode 100644
index 000000000..a11e0e90e
--- /dev/null
+++ b/pkg/chart/v2/lint/rules/testdata/albatross/templates/fail.yaml
@@ -0,0 +1 @@
+{{ deliberateSyntaxError }}
diff --git a/pkg/chart/v2/lint/rules/testdata/albatross/templates/svc.yaml b/pkg/chart/v2/lint/rules/testdata/albatross/templates/svc.yaml
new file mode 100644
index 000000000..16bb27d55
--- /dev/null
+++ b/pkg/chart/v2/lint/rules/testdata/albatross/templates/svc.yaml
@@ -0,0 +1,19 @@
+# This is a service gateway to the replica set created by the deployment.
+# Take a look at the deployment.yaml for general notes about this chart.
+apiVersion: v1
+kind: Service
+metadata:
+ name: "{{ .Values.name }}"
+ labels:
+ app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
+ app.kubernetes.io/instance: {{ .Release.Name | quote }}
+ helm.sh/chart: "{{.Chart.Name}}-{{.Chart.Version}}"
+ kubeVersion: {{ .Capabilities.KubeVersion.Major }}
+spec:
+ ports:
+ - port: {{default 80 .Values.httpPort | quote}}
+ targetPort: 80
+ protocol: TCP
+ name: http
+ selector:
+ app.kubernetes.io/name: {{template "fullname" .}}
diff --git a/pkg/chart/v2/lint/rules/testdata/albatross/values.yaml b/pkg/chart/v2/lint/rules/testdata/albatross/values.yaml
new file mode 100644
index 000000000..74cc6a0dc
--- /dev/null
+++ b/pkg/chart/v2/lint/rules/testdata/albatross/values.yaml
@@ -0,0 +1 @@
+name: "mariner"
diff --git a/pkg/lint/rules/testdata/anotherbadchartfile/Chart.yaml b/pkg/chart/v2/lint/rules/testdata/anotherbadchartfile/Chart.yaml
similarity index 100%
rename from pkg/lint/rules/testdata/anotherbadchartfile/Chart.yaml
rename to pkg/chart/v2/lint/rules/testdata/anotherbadchartfile/Chart.yaml
diff --git a/pkg/chart/v2/lint/rules/testdata/badchartfile/Chart.yaml b/pkg/chart/v2/lint/rules/testdata/badchartfile/Chart.yaml
new file mode 100644
index 000000000..3564ede3e
--- /dev/null
+++ b/pkg/chart/v2/lint/rules/testdata/badchartfile/Chart.yaml
@@ -0,0 +1,11 @@
+description: A Helm chart for Kubernetes
+version: 0.0.0.0
+home: ""
+type: application
+dependencies:
+- name: mariadb
+ version: 5.x.x
+ repository: https://charts.helm.sh/stable/
+ condition: mariadb.enabled
+ tags:
+ - database
diff --git a/pkg/chart/v2/lint/rules/testdata/badchartfile/values.yaml b/pkg/chart/v2/lint/rules/testdata/badchartfile/values.yaml
new file mode 100644
index 000000000..9f367033b
--- /dev/null
+++ b/pkg/chart/v2/lint/rules/testdata/badchartfile/values.yaml
@@ -0,0 +1 @@
+# Default values for badchartfile.
diff --git a/pkg/lint/rules/testdata/badchartname/Chart.yaml b/pkg/chart/v2/lint/rules/testdata/badchartname/Chart.yaml
similarity index 100%
rename from pkg/lint/rules/testdata/badchartname/Chart.yaml
rename to pkg/chart/v2/lint/rules/testdata/badchartname/Chart.yaml
diff --git a/pkg/chart/v2/lint/rules/testdata/badchartname/values.yaml b/pkg/chart/v2/lint/rules/testdata/badchartname/values.yaml
new file mode 100644
index 000000000..9f367033b
--- /dev/null
+++ b/pkg/chart/v2/lint/rules/testdata/badchartname/values.yaml
@@ -0,0 +1 @@
+# Default values for badchartfile.
diff --git a/pkg/chart/v2/lint/rules/testdata/badcrdfile/Chart.yaml b/pkg/chart/v2/lint/rules/testdata/badcrdfile/Chart.yaml
new file mode 100644
index 000000000..08c4b61ac
--- /dev/null
+++ b/pkg/chart/v2/lint/rules/testdata/badcrdfile/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+description: A Helm chart for Kubernetes
+version: 0.1.0
+name: badcrdfile
+type: application
+icon: http://riverrun.io
diff --git a/pkg/chart/v2/lint/rules/testdata/badcrdfile/crds/bad-apiversion.yaml b/pkg/chart/v2/lint/rules/testdata/badcrdfile/crds/bad-apiversion.yaml
new file mode 100644
index 000000000..468916053
--- /dev/null
+++ b/pkg/chart/v2/lint/rules/testdata/badcrdfile/crds/bad-apiversion.yaml
@@ -0,0 +1,2 @@
+apiVersion: bad.k8s.io/v1beta1
+kind: CustomResourceDefinition
diff --git a/pkg/chart/v2/lint/rules/testdata/badcrdfile/crds/bad-crd.yaml b/pkg/chart/v2/lint/rules/testdata/badcrdfile/crds/bad-crd.yaml
new file mode 100644
index 000000000..523b97f85
--- /dev/null
+++ b/pkg/chart/v2/lint/rules/testdata/badcrdfile/crds/bad-crd.yaml
@@ -0,0 +1,2 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: NotACustomResourceDefinition
diff --git a/pkg/chart/v2/lint/rules/testdata/badcrdfile/templates/.gitkeep b/pkg/chart/v2/lint/rules/testdata/badcrdfile/templates/.gitkeep
new file mode 100644
index 000000000..e69de29bb
diff --git a/pkg/chart/v2/lint/rules/testdata/badcrdfile/values.yaml b/pkg/chart/v2/lint/rules/testdata/badcrdfile/values.yaml
new file mode 100644
index 000000000..2fffc7715
--- /dev/null
+++ b/pkg/chart/v2/lint/rules/testdata/badcrdfile/values.yaml
@@ -0,0 +1 @@
+# Default values for badcrdfile.
diff --git a/pkg/lint/rules/testdata/badvaluesfile/Chart.yaml b/pkg/chart/v2/lint/rules/testdata/badvaluesfile/Chart.yaml
similarity index 100%
rename from pkg/lint/rules/testdata/badvaluesfile/Chart.yaml
rename to pkg/chart/v2/lint/rules/testdata/badvaluesfile/Chart.yaml
diff --git a/pkg/chart/v2/lint/rules/testdata/badvaluesfile/templates/badvaluesfile.yaml b/pkg/chart/v2/lint/rules/testdata/badvaluesfile/templates/badvaluesfile.yaml
new file mode 100644
index 000000000..6c2ceb8db
--- /dev/null
+++ b/pkg/chart/v2/lint/rules/testdata/badvaluesfile/templates/badvaluesfile.yaml
@@ -0,0 +1,2 @@
+metadata:
+ name: {{.name | default "foo" | title}}
diff --git a/pkg/chart/v2/lint/rules/testdata/badvaluesfile/values.yaml b/pkg/chart/v2/lint/rules/testdata/badvaluesfile/values.yaml
new file mode 100644
index 000000000..b5a10271c
--- /dev/null
+++ b/pkg/chart/v2/lint/rules/testdata/badvaluesfile/values.yaml
@@ -0,0 +1,2 @@
+# Invalid value for badvaluesfile for testing lint fails with invalid yaml format
+name= "value"
diff --git a/pkg/lint/rules/testdata/goodone/Chart.yaml b/pkg/chart/v2/lint/rules/testdata/goodone/Chart.yaml
similarity index 100%
rename from pkg/lint/rules/testdata/goodone/Chart.yaml
rename to pkg/chart/v2/lint/rules/testdata/goodone/Chart.yaml
diff --git a/pkg/chart/v2/lint/rules/testdata/goodone/crds/test-crd.yaml b/pkg/chart/v2/lint/rules/testdata/goodone/crds/test-crd.yaml
new file mode 100644
index 000000000..1d7350f1d
--- /dev/null
+++ b/pkg/chart/v2/lint/rules/testdata/goodone/crds/test-crd.yaml
@@ -0,0 +1,19 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: tests.test.io
+spec:
+ group: test.io
+ names:
+ kind: Test
+ listKind: TestList
+ plural: tests
+ singular: test
+ scope: Namespaced
+ versions:
+ - name : v1alpha2
+ served: true
+ storage: true
+ - name : v1alpha1
+ served: true
+ storage: false
diff --git a/pkg/chart/v2/lint/rules/testdata/goodone/templates/goodone.yaml b/pkg/chart/v2/lint/rules/testdata/goodone/templates/goodone.yaml
new file mode 100644
index 000000000..cd46f62c7
--- /dev/null
+++ b/pkg/chart/v2/lint/rules/testdata/goodone/templates/goodone.yaml
@@ -0,0 +1,2 @@
+metadata:
+ name: {{ .Values.name | default "foo" | lower }}
diff --git a/pkg/chart/v2/lint/rules/testdata/goodone/values.yaml b/pkg/chart/v2/lint/rules/testdata/goodone/values.yaml
new file mode 100644
index 000000000..92c3d9bb9
--- /dev/null
+++ b/pkg/chart/v2/lint/rules/testdata/goodone/values.yaml
@@ -0,0 +1 @@
+name: "goodone-here"
diff --git a/pkg/chart/v2/lint/rules/testdata/invalidchartfile/Chart.yaml b/pkg/chart/v2/lint/rules/testdata/invalidchartfile/Chart.yaml
new file mode 100644
index 000000000..0fd58d1d4
--- /dev/null
+++ b/pkg/chart/v2/lint/rules/testdata/invalidchartfile/Chart.yaml
@@ -0,0 +1,6 @@
+name: some-chart
+apiVersion: v2
+apiVersion: v1
+description: A Helm chart for Kubernetes
+version: 1.3.0
+icon: http://example.com
diff --git a/pkg/chart/v2/lint/rules/testdata/invalidchartfile/values.yaml b/pkg/chart/v2/lint/rules/testdata/invalidchartfile/values.yaml
new file mode 100644
index 000000000..e69de29bb
diff --git a/pkg/chart/v2/lint/rules/testdata/invalidcrdsdir/Chart.yaml b/pkg/chart/v2/lint/rules/testdata/invalidcrdsdir/Chart.yaml
new file mode 100644
index 000000000..18e30f70f
--- /dev/null
+++ b/pkg/chart/v2/lint/rules/testdata/invalidcrdsdir/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+description: A Helm chart for Kubernetes
+version: 0.1.0
+name: invalidcrdsdir
+type: application
+icon: http://riverrun.io
diff --git a/pkg/chart/v2/lint/rules/testdata/invalidcrdsdir/crds b/pkg/chart/v2/lint/rules/testdata/invalidcrdsdir/crds
new file mode 100644
index 000000000..e69de29bb
diff --git a/pkg/chart/v2/lint/rules/testdata/invalidcrdsdir/values.yaml b/pkg/chart/v2/lint/rules/testdata/invalidcrdsdir/values.yaml
new file mode 100644
index 000000000..6b1611a64
--- /dev/null
+++ b/pkg/chart/v2/lint/rules/testdata/invalidcrdsdir/values.yaml
@@ -0,0 +1 @@
+# Default values for invalidcrdsdir.
diff --git a/pkg/chart/v2/lint/rules/testdata/malformed-template/.helmignore b/pkg/chart/v2/lint/rules/testdata/malformed-template/.helmignore
new file mode 100644
index 000000000..0e8a0eb36
--- /dev/null
+++ b/pkg/chart/v2/lint/rules/testdata/malformed-template/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/pkg/lint/rules/testdata/malformed-template/Chart.yaml b/pkg/chart/v2/lint/rules/testdata/malformed-template/Chart.yaml
similarity index 100%
rename from pkg/lint/rules/testdata/malformed-template/Chart.yaml
rename to pkg/chart/v2/lint/rules/testdata/malformed-template/Chart.yaml
diff --git a/pkg/chart/v2/lint/rules/testdata/malformed-template/templates/bad.yaml b/pkg/chart/v2/lint/rules/testdata/malformed-template/templates/bad.yaml
new file mode 100644
index 000000000..213198fda
--- /dev/null
+++ b/pkg/chart/v2/lint/rules/testdata/malformed-template/templates/bad.yaml
@@ -0,0 +1 @@
+{ {- $relname := .Release.Name -}}
diff --git a/pkg/chart/v2/lint/rules/testdata/malformed-template/values.yaml b/pkg/chart/v2/lint/rules/testdata/malformed-template/values.yaml
new file mode 100644
index 000000000..1cc3182ea
--- /dev/null
+++ b/pkg/chart/v2/lint/rules/testdata/malformed-template/values.yaml
@@ -0,0 +1,82 @@
+# Default values for test.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+replicaCount: 1
+
+image:
+ repository: nginx
+ pullPolicy: IfNotPresent
+ # Overrides the image tag whose default is the chart appVersion.
+ tag: ""
+
+imagePullSecrets: []
+nameOverride: ""
+fullnameOverride: ""
+
+serviceAccount:
+ # Specifies whether a service account should be created
+ create: true
+ # Annotations to add to the service account
+ annotations: {}
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name: ""
+
+podAnnotations: {}
+
+podSecurityContext: {}
+ # fsGroup: 2000
+
+securityContext: {}
+ # capabilities:
+ # drop:
+ # - ALL
+ # readOnlyRootFilesystem: true
+ # runAsNonRoot: true
+ # runAsUser: 1000
+
+service:
+ type: ClusterIP
+ port: 80
+
+ingress:
+ enabled: false
+ className: ""
+ annotations: {}
+ # kubernetes.io/ingress.class: nginx
+ # kubernetes.io/tls-acme: "true"
+ hosts:
+ - host: chart-example.local
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ tls: []
+ # - secretName: chart-example-tls
+ # hosts:
+ # - chart-example.local
+
+resources: {}
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+autoscaling:
+ enabled: false
+ minReplicas: 1
+ maxReplicas: 100
+ targetCPUUtilizationPercentage: 80
+ # targetMemoryUtilizationPercentage: 80
+
+nodeSelector: {}
+
+tolerations: []
+
+affinity: {}
diff --git a/pkg/lint/rules/testdata/multi-template-fail/Chart.yaml b/pkg/chart/v2/lint/rules/testdata/multi-template-fail/Chart.yaml
similarity index 100%
rename from pkg/lint/rules/testdata/multi-template-fail/Chart.yaml
rename to pkg/chart/v2/lint/rules/testdata/multi-template-fail/Chart.yaml
diff --git a/pkg/chart/v2/lint/rules/testdata/multi-template-fail/templates/multi-fail.yaml b/pkg/chart/v2/lint/rules/testdata/multi-template-fail/templates/multi-fail.yaml
new file mode 100644
index 000000000..835be07be
--- /dev/null
+++ b/pkg/chart/v2/lint/rules/testdata/multi-template-fail/templates/multi-fail.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: game-config
+data:
+ game.properties: cheat
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: -this:name-is-not_valid$
+data:
+ game.properties: empty
diff --git a/pkg/lint/rules/testdata/v3-fail/Chart.yaml b/pkg/chart/v2/lint/rules/testdata/v3-fail/Chart.yaml
similarity index 100%
rename from pkg/lint/rules/testdata/v3-fail/Chart.yaml
rename to pkg/chart/v2/lint/rules/testdata/v3-fail/Chart.yaml
diff --git a/pkg/chart/v2/lint/rules/testdata/v3-fail/templates/_helpers.tpl b/pkg/chart/v2/lint/rules/testdata/v3-fail/templates/_helpers.tpl
new file mode 100644
index 000000000..0b89e723b
--- /dev/null
+++ b/pkg/chart/v2/lint/rules/testdata/v3-fail/templates/_helpers.tpl
@@ -0,0 +1,63 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "v3-fail.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "v3-fail.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "v3-fail.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "v3-fail.labels" -}}
+helm.sh/chart: {{ include "v3-fail.chart" . }}
+{{ include "v3-fail.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
+
+{{/*
+Selector labels
+*/}}
+{{- define "v3-fail.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "v3-fail.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "v3-fail.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+ {{ default (include "v3-fail.fullname" .) .Values.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
diff --git a/pkg/chart/v2/lint/rules/testdata/v3-fail/templates/deployment.yaml b/pkg/chart/v2/lint/rules/testdata/v3-fail/templates/deployment.yaml
new file mode 100644
index 000000000..6d651ab8e
--- /dev/null
+++ b/pkg/chart/v2/lint/rules/testdata/v3-fail/templates/deployment.yaml
@@ -0,0 +1,56 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "v3-fail.fullname" . }}
+ labels:
+ nope: {{ .Release.Time }}
+ {{- include "v3-fail.labels" . | nindent 4 }}
+spec:
+ replicas: {{ .Values.replicaCount }}
+ selector:
+ matchLabels:
+ {{- include "v3-fail.selectorLabels" . | nindent 6 }}
+ template:
+ metadata:
+ labels:
+ {{- include "v3-fail.selectorLabels" . | nindent 8 }}
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ serviceAccountName: {{ include "v3-fail.serviceAccountName" . }}
+ securityContext:
+ {{- toYaml .Values.podSecurityContext | nindent 8 }}
+ containers:
+ - name: {{ .Chart.Name }}
+ securityContext:
+ {{- toYaml .Values.securityContext | nindent 12 }}
+ image: "{{ .Values.image.repository }}:{{ .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ ports:
+ - name: http
+ containerPort: 80
+ protocol: TCP
+ livenessProbe:
+ httpGet:
+ path: /
+ port: http
+ readinessProbe:
+ httpGet:
+ path: /
+ port: http
+ resources:
+ {{- toYaml .Values.resources | nindent 12 }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
diff --git a/pkg/chart/v2/lint/rules/testdata/v3-fail/templates/ingress.yaml b/pkg/chart/v2/lint/rules/testdata/v3-fail/templates/ingress.yaml
new file mode 100644
index 000000000..4790650d0
--- /dev/null
+++ b/pkg/chart/v2/lint/rules/testdata/v3-fail/templates/ingress.yaml
@@ -0,0 +1,62 @@
+{{- if .Values.ingress.enabled -}}
+{{- $fullName := include "v3-fail.fullname" . -}}
+{{- $svcPort := .Values.service.port -}}
+{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
+ {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
+ {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
+ {{- end }}
+{{- end }}
+{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
+apiVersion: networking.k8s.io/v1
+{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
+apiVersion: networking.k8s.io/v1beta1
+{{- else -}}
+apiVersion: extensions/v1beta1
+{{- end }}
+kind: Ingress
+metadata:
+ name: {{ $fullName }}
+ labels:
+ {{- include "v3-fail.labels" . | nindent 4 }}
+ {{- with .Values.ingress.annotations }}
+ annotations:
+ "helm.sh/hook": crd-install
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
+ ingressClassName: {{ .Values.ingress.className }}
+ {{- end }}
+ {{- if .Values.ingress.tls }}
+ tls:
+ {{- range .Values.ingress.tls }}
+ - hosts:
+ {{- range .hosts }}
+ - {{ . | quote }}
+ {{- end }}
+ secretName: {{ .secretName }}
+ {{- end }}
+ {{- end }}
+ rules:
+ {{- range .Values.ingress.hosts }}
+ - host: {{ .host | quote }}
+ http:
+ paths:
+ {{- range .paths }}
+ - path: {{ .path }}
+ {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
+ pathType: {{ .pathType }}
+ {{- end }}
+ backend:
+ {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
+ service:
+ name: {{ $fullName }}
+ port:
+ number: {{ $svcPort }}
+ {{- else }}
+ serviceName: {{ $fullName }}
+ servicePort: {{ $svcPort }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+{{- end }}
diff --git a/pkg/chart/v2/lint/rules/testdata/v3-fail/templates/service.yaml b/pkg/chart/v2/lint/rules/testdata/v3-fail/templates/service.yaml
new file mode 100644
index 000000000..79a0f40b0
--- /dev/null
+++ b/pkg/chart/v2/lint/rules/testdata/v3-fail/templates/service.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "v3-fail.fullname" . }}
+ annotations:
+ helm.sh/hook: crd-install
+ labels:
+ {{- include "v3-fail.labels" . | nindent 4 }}
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.port }}
+ targetPort: http
+ protocol: TCP
+ name: http
+ selector:
+ {{- include "v3-fail.selectorLabels" . | nindent 4 }}
diff --git a/pkg/chart/v2/lint/rules/testdata/v3-fail/values.yaml b/pkg/chart/v2/lint/rules/testdata/v3-fail/values.yaml
new file mode 100644
index 000000000..01d99b4e6
--- /dev/null
+++ b/pkg/chart/v2/lint/rules/testdata/v3-fail/values.yaml
@@ -0,0 +1,66 @@
+# Default values for v3-fail.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+replicaCount: 1
+
+image:
+ repository: nginx
+ pullPolicy: IfNotPresent
+
+imagePullSecrets: []
+nameOverride: ""
+fullnameOverride: ""
+
+serviceAccount:
+ # Specifies whether a service account should be created
+ create: true
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name:
+
+podSecurityContext: {}
+ # fsGroup: 2000
+
+securityContext: {}
+ # capabilities:
+ # drop:
+ # - ALL
+ # readOnlyRootFilesystem: true
+ # runAsNonRoot: true
+ # runAsUser: 1000
+
+service:
+ type: ClusterIP
+ port: 80
+
+ingress:
+ enabled: false
+ annotations: {}
+ # kubernetes.io/ingress.class: nginx
+ # kubernetes.io/tls-acme: "true"
+ hosts:
+ - host: chart-example.local
+ paths: []
+ tls: []
+ # - secretName: chart-example-tls
+ # hosts:
+ # - chart-example.local
+
+resources: {}
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+nodeSelector: {}
+
+tolerations: []
+
+affinity: {}
diff --git a/pkg/lint/rules/testdata/withsubchart/Chart.yaml b/pkg/chart/v2/lint/rules/testdata/withsubchart/Chart.yaml
similarity index 100%
rename from pkg/lint/rules/testdata/withsubchart/Chart.yaml
rename to pkg/chart/v2/lint/rules/testdata/withsubchart/Chart.yaml
diff --git a/pkg/lint/rules/testdata/withsubchart/charts/subchart/Chart.yaml b/pkg/chart/v2/lint/rules/testdata/withsubchart/charts/subchart/Chart.yaml
similarity index 100%
rename from pkg/lint/rules/testdata/withsubchart/charts/subchart/Chart.yaml
rename to pkg/chart/v2/lint/rules/testdata/withsubchart/charts/subchart/Chart.yaml
diff --git a/pkg/chart/v2/lint/rules/testdata/withsubchart/charts/subchart/templates/subchart.yaml b/pkg/chart/v2/lint/rules/testdata/withsubchart/charts/subchart/templates/subchart.yaml
new file mode 100644
index 000000000..6cb6cc2af
--- /dev/null
+++ b/pkg/chart/v2/lint/rules/testdata/withsubchart/charts/subchart/templates/subchart.yaml
@@ -0,0 +1,2 @@
+metadata:
+ name: {{ .Values.subchart.name | lower }}
diff --git a/pkg/chart/v2/lint/rules/testdata/withsubchart/charts/subchart/values.yaml b/pkg/chart/v2/lint/rules/testdata/withsubchart/charts/subchart/values.yaml
new file mode 100644
index 000000000..422a359d5
--- /dev/null
+++ b/pkg/chart/v2/lint/rules/testdata/withsubchart/charts/subchart/values.yaml
@@ -0,0 +1,2 @@
+subchart:
+ name: subchart
\ No newline at end of file
diff --git a/pkg/chart/v2/lint/rules/testdata/withsubchart/templates/mainchart.yaml b/pkg/chart/v2/lint/rules/testdata/withsubchart/templates/mainchart.yaml
new file mode 100644
index 000000000..6cb6cc2af
--- /dev/null
+++ b/pkg/chart/v2/lint/rules/testdata/withsubchart/templates/mainchart.yaml
@@ -0,0 +1,2 @@
+metadata:
+ name: {{ .Values.subchart.name | lower }}
diff --git a/pkg/chart/v2/lint/rules/testdata/withsubchart/values.yaml b/pkg/chart/v2/lint/rules/testdata/withsubchart/values.yaml
new file mode 100644
index 000000000..e69de29bb
diff --git a/pkg/lint/rules/values.go b/pkg/chart/v2/lint/rules/values.go
similarity index 74%
rename from pkg/lint/rules/values.go
rename to pkg/chart/v2/lint/rules/values.go
index 8aae250c6..994a6a463 100644
--- a/pkg/lint/rules/values.go
+++ b/pkg/chart/v2/lint/rules/values.go
@@ -17,13 +17,13 @@ limitations under the License.
package rules
import (
+ "fmt"
"os"
"path/filepath"
- "github.com/pkg/errors"
-
- chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
- "helm.sh/helm/v4/pkg/lint/support"
+ "helm.sh/helm/v4/pkg/chart/common"
+ "helm.sh/helm/v4/pkg/chart/common/util"
+ "helm.sh/helm/v4/pkg/chart/v2/lint/support"
)
// ValuesWithOverrides tests the values.yaml file.
@@ -32,7 +32,7 @@ import (
// they are only tested for well-formedness.
//
// If additional values are supplied, they are coalesced into the values in values.yaml.
-func ValuesWithOverrides(linter *support.Linter, valueOverrides map[string]interface{}) {
+func ValuesWithOverrides(linter *support.Linter, valueOverrides map[string]interface{}, skipSchemaValidation bool) {
file := "values.yaml"
vf := filepath.Join(linter.ChartDir, file)
fileExists := linter.RunLinterRule(support.InfoSev, file, validateValuesFileExistence(vf))
@@ -41,21 +41,21 @@ func ValuesWithOverrides(linter *support.Linter, valueOverrides map[string]inter
return
}
- linter.RunLinterRule(support.ErrorSev, file, validateValuesFile(vf, valueOverrides))
+ linter.RunLinterRule(support.ErrorSev, file, validateValuesFile(vf, valueOverrides, skipSchemaValidation))
}
func validateValuesFileExistence(valuesPath string) error {
_, err := os.Stat(valuesPath)
if err != nil {
- return errors.Errorf("file does not exist")
+ return fmt.Errorf("file does not exist")
}
return nil
}
-func validateValuesFile(valuesPath string, overrides map[string]interface{}) error {
- values, err := chartutil.ReadValuesFile(valuesPath)
+func validateValuesFile(valuesPath string, overrides map[string]interface{}, skipSchemaValidation bool) error {
+ values, err := common.ReadValuesFile(valuesPath)
if err != nil {
- return errors.Wrap(err, "unable to parse YAML")
+ return fmt.Errorf("unable to parse YAML: %w", err)
}
// Helm 3.0.0 carried over the values linting from Helm 2.x, which only tests the top
@@ -63,8 +63,8 @@ func validateValuesFile(valuesPath string, overrides map[string]interface{}) err
// We could change that. For now, though, we retain that strategy, and thus can
// coalesce tables (like reuse-values does) instead of doing the full chart
// CoalesceValues
- coalescedValues := chartutil.CoalesceTables(make(map[string]interface{}, len(overrides)), overrides)
- coalescedValues = chartutil.CoalesceTables(coalescedValues, values)
+ coalescedValues := util.CoalesceTables(make(map[string]interface{}, len(overrides)), overrides)
+ coalescedValues = util.CoalesceTables(coalescedValues, values)
ext := filepath.Ext(valuesPath)
schemaPath := valuesPath[:len(valuesPath)-len(ext)] + ".schema.json"
@@ -75,5 +75,10 @@ func validateValuesFile(valuesPath string, overrides map[string]interface{}) err
if err != nil {
return err
}
- return chartutil.ValidateAgainstSingleSchema(coalescedValues, schema)
+
+ if !skipSchemaValidation {
+ return util.ValidateAgainstSingleSchema(coalescedValues, schema)
+ }
+
+ return nil
}
diff --git a/pkg/chart/v2/lint/rules/values_test.go b/pkg/chart/v2/lint/rules/values_test.go
new file mode 100644
index 000000000..288b77436
--- /dev/null
+++ b/pkg/chart/v2/lint/rules/values_test.go
@@ -0,0 +1,183 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "helm.sh/helm/v4/internal/test/ensure"
+)
+
+var nonExistingValuesFilePath = filepath.Join("/fake/dir", "values.yaml")
+
+const testSchema = `
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "helm values test schema",
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "username",
+ "password"
+ ],
+ "properties": {
+ "username": {
+ "description": "Your username",
+ "type": "string"
+ },
+ "password": {
+ "description": "Your password",
+ "type": "string"
+ }
+ }
+}
+`
+
+func TestValidateValuesYamlNotDirectory(t *testing.T) {
+ _ = os.Mkdir(nonExistingValuesFilePath, os.ModePerm)
+ defer os.Remove(nonExistingValuesFilePath)
+
+ err := validateValuesFileExistence(nonExistingValuesFilePath)
+ if err == nil {
+ t.Errorf("validateValuesFileExistence to return a linter error, got no error")
+ }
+}
+
+func TestValidateValuesFileWellFormed(t *testing.T) {
+ badYaml := `
+ not:well[]{}formed
+ `
+ tmpdir := ensure.TempFile(t, "values.yaml", []byte(badYaml))
+ valfile := filepath.Join(tmpdir, "values.yaml")
+ if err := validateValuesFile(valfile, map[string]interface{}{}, false); err == nil {
+ t.Fatal("expected values file to fail parsing")
+ }
+}
+
+func TestValidateValuesFileSchema(t *testing.T) {
+ yaml := "username: admin\npassword: swordfish"
+ tmpdir := ensure.TempFile(t, "values.yaml", []byte(yaml))
+ createTestingSchema(t, tmpdir)
+
+ valfile := filepath.Join(tmpdir, "values.yaml")
+ if err := validateValuesFile(valfile, map[string]interface{}{}, false); err != nil {
+ t.Fatalf("Failed validation with %s", err)
+ }
+}
+
+func TestValidateValuesFileSchemaFailure(t *testing.T) {
+ // 1234 is an int, not a string. This should fail.
+ yaml := "username: 1234\npassword: swordfish"
+ tmpdir := ensure.TempFile(t, "values.yaml", []byte(yaml))
+ createTestingSchema(t, tmpdir)
+
+ valfile := filepath.Join(tmpdir, "values.yaml")
+
+ err := validateValuesFile(valfile, map[string]interface{}{}, false)
+ if err == nil {
+ t.Fatal("expected values file to fail parsing")
+ }
+
+ assert.Contains(t, err.Error(), "- at '/username': got number, want string")
+}
+
+func TestValidateValuesFileSchemaFailureButWithSkipSchemaValidation(t *testing.T) {
+ // 1234 is an int, not a string. This should fail normally but pass with skipSchemaValidation.
+ yaml := "username: 1234\npassword: swordfish"
+ tmpdir := ensure.TempFile(t, "values.yaml", []byte(yaml))
+ createTestingSchema(t, tmpdir)
+
+ valfile := filepath.Join(tmpdir, "values.yaml")
+
+ err := validateValuesFile(valfile, map[string]interface{}{}, true)
+ if err != nil {
+ t.Fatal("expected values file to pass parsing because of skipSchemaValidation")
+ }
+}
+
+func TestValidateValuesFileSchemaOverrides(t *testing.T) {
+ yaml := "username: admin"
+ overrides := map[string]interface{}{
+ "password": "swordfish",
+ }
+ tmpdir := ensure.TempFile(t, "values.yaml", []byte(yaml))
+ createTestingSchema(t, tmpdir)
+
+ valfile := filepath.Join(tmpdir, "values.yaml")
+ if err := validateValuesFile(valfile, overrides, false); err != nil {
+ t.Fatalf("Failed validation with %s", err)
+ }
+}
+
+func TestValidateValuesFile(t *testing.T) {
+ tests := []struct {
+ name string
+ yaml string
+ overrides map[string]interface{}
+ errorMessage string
+ }{
+ {
+ name: "value added",
+ yaml: "username: admin",
+ overrides: map[string]interface{}{"password": "swordfish"},
+ },
+ {
+ name: "value not overridden",
+ yaml: "username: admin\npassword:",
+ overrides: map[string]interface{}{"username": "anotherUser"},
+ errorMessage: "- at '/password': got null, want string",
+ },
+ {
+ name: "value overridden",
+ yaml: "username: admin\npassword:",
+ overrides: map[string]interface{}{"username": "anotherUser", "password": "swordfish"},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ tmpdir := ensure.TempFile(t, "values.yaml", []byte(tt.yaml))
+ createTestingSchema(t, tmpdir)
+
+ valfile := filepath.Join(tmpdir, "values.yaml")
+
+ err := validateValuesFile(valfile, tt.overrides, false)
+
+ switch {
+ case err != nil && tt.errorMessage == "":
+ t.Errorf("Failed validation with %s", err)
+ case err == nil && tt.errorMessage != "":
+ t.Error("expected values file to fail parsing")
+ case err != nil && tt.errorMessage != "":
+ assert.Contains(t, err.Error(), tt.errorMessage, "Failed with unexpected error")
+ }
+ })
+ }
+}
+
+func createTestingSchema(t *testing.T, dir string) string {
+ t.Helper()
+ schemafile := filepath.Join(dir, "values.schema.json")
+ if err := os.WriteFile(schemafile, []byte(testSchema), 0700); err != nil {
+ t.Fatalf("Failed to write schema to tmpdir: %s", err)
+ }
+ return schemafile
+}
diff --git a/pkg/lint/support/doc.go b/pkg/chart/v2/lint/support/doc.go
similarity index 91%
rename from pkg/lint/support/doc.go
rename to pkg/chart/v2/lint/support/doc.go
index b007804dc..7e050b8c2 100644
--- a/pkg/lint/support/doc.go
+++ b/pkg/chart/v2/lint/support/doc.go
@@ -20,4 +20,4 @@ Package support contains tools for linting charts.
Linting is the process of testing charts for errors or warnings regarding
formatting, compilation, or standards compliance.
*/
-package support // import "helm.sh/helm/v4/pkg/lint/support"
+package support // import "helm.sh/helm/v4/pkg/chart/v2/lint/support"
diff --git a/pkg/chart/v2/lint/support/message.go b/pkg/chart/v2/lint/support/message.go
new file mode 100644
index 000000000..5efbc7a61
--- /dev/null
+++ b/pkg/chart/v2/lint/support/message.go
@@ -0,0 +1,76 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package support
+
+import "fmt"
+
+// Severity indicates the severity of a Message.
+const (
+ // UnknownSev indicates that the severity of the error is unknown, and should not stop processing.
+ UnknownSev = iota
+ // InfoSev indicates information, for example missing values.yaml file
+ InfoSev
+ // WarningSev indicates that something does not meet code standards, but will likely function.
+ WarningSev
+ // ErrorSev indicates that something will not likely function.
+ ErrorSev
+)
+
+// sev matches the *Sev states.
+var sev = []string{"UNKNOWN", "INFO", "WARNING", "ERROR"}
+
+// Linter encapsulates a linting run of a particular chart.
+type Linter struct {
+ Messages []Message
+ // The highest severity of all the failing lint rules
+ HighestSeverity int
+ ChartDir string
+}
+
+// Message describes an error encountered while linting.
+type Message struct {
+ // Severity is one of the *Sev constants
+ Severity int
+ Path string
+ Err error
+}
+
+func (m Message) Error() string {
+ return fmt.Sprintf("[%s] %s: %s", sev[m.Severity], m.Path, m.Err.Error())
+}
+
+// NewMessage creates a new Message struct
+func NewMessage(severity int, path string, err error) Message {
+ return Message{Severity: severity, Path: path, Err: err}
+}
+
+// RunLinterRule returns true if the validation passed
+func (l *Linter) RunLinterRule(severity int, path string, err error) bool {
+ // severity is out of bound
+ if severity < 0 || severity >= len(sev) {
+ return false
+ }
+
+ if err != nil {
+ l.Messages = append(l.Messages, NewMessage(severity, path, err))
+
+ if severity > l.HighestSeverity {
+ l.HighestSeverity = severity
+ }
+ }
+ return err == nil
+}
diff --git a/pkg/chart/v2/lint/support/message_test.go b/pkg/chart/v2/lint/support/message_test.go
new file mode 100644
index 000000000..ce5b5e42e
--- /dev/null
+++ b/pkg/chart/v2/lint/support/message_test.go
@@ -0,0 +1,79 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package support
+
+import (
+ "errors"
+ "testing"
+)
+
+var errLint = errors.New("lint failed")
+
+func TestRunLinterRule(t *testing.T) {
+ var tests = []struct {
+ Severity int
+ LintError error
+ ExpectedMessages int
+ ExpectedReturn bool
+ ExpectedHighestSeverity int
+ }{
+ {InfoSev, errLint, 1, false, InfoSev},
+ {WarningSev, errLint, 2, false, WarningSev},
+ {ErrorSev, errLint, 3, false, ErrorSev},
+ // No error so it returns true
+ {ErrorSev, nil, 3, true, ErrorSev},
+ // Retains highest severity
+ {InfoSev, errLint, 4, false, ErrorSev},
+ // Invalid severity values
+ {4, errLint, 4, false, ErrorSev},
+ {22, errLint, 4, false, ErrorSev},
+ {-1, errLint, 4, false, ErrorSev},
+ }
+
+ linter := Linter{}
+ for _, test := range tests {
+ isValid := linter.RunLinterRule(test.Severity, "chart", test.LintError)
+ if len(linter.Messages) != test.ExpectedMessages {
+ t.Errorf("RunLinterRule(%d, \"chart\", %v), linter.Messages should now have %d message, we got %d", test.Severity, test.LintError, test.ExpectedMessages, len(linter.Messages))
+ }
+
+ if linter.HighestSeverity != test.ExpectedHighestSeverity {
+ t.Errorf("RunLinterRule(%d, \"chart\", %v), linter.HighestSeverity should be %d, we got %d", test.Severity, test.LintError, test.ExpectedHighestSeverity, linter.HighestSeverity)
+ }
+
+ if isValid != test.ExpectedReturn {
+ t.Errorf("RunLinterRule(%d, \"chart\", %v), should have returned %t but returned %t", test.Severity, test.LintError, test.ExpectedReturn, isValid)
+ }
+ }
+}
+
+func TestMessage(t *testing.T) {
+ m := Message{ErrorSev, "Chart.yaml", errors.New("Foo")}
+ if m.Error() != "[ERROR] Chart.yaml: Foo" {
+ t.Errorf("Unexpected output: %s", m.Error())
+ }
+
+ m = Message{WarningSev, "templates/", errors.New("Bar")}
+ if m.Error() != "[WARNING] templates/: Bar" {
+ t.Errorf("Unexpected output: %s", m.Error())
+ }
+
+ m = Message{InfoSev, "templates/rc.yaml", errors.New("FooBar")}
+ if m.Error() != "[INFO] templates/rc.yaml: FooBar" {
+ t.Errorf("Unexpected output: %s", m.Error())
+ }
+}
diff --git a/pkg/chart/v2/loader/archive.go b/pkg/chart/v2/loader/archive.go
index cb6d3bfe8..b9f370f56 100644
--- a/pkg/chart/v2/loader/archive.go
+++ b/pkg/chart/v2/loader/archive.go
@@ -20,6 +20,7 @@ import (
"archive/tar"
"bytes"
"compress/gzip"
+ "errors"
"fmt"
"io"
"net/http"
@@ -28,11 +29,18 @@ import (
"regexp"
"strings"
- "github.com/pkg/errors"
-
chart "helm.sh/helm/v4/pkg/chart/v2"
)
+// MaxDecompressedChartSize is the maximum size of a chart archive that will be
+// decompressed. This is the decompressed size of all the files.
+// The default value is 100 MiB.
+var MaxDecompressedChartSize int64 = 100 * 1024 * 1024 // Default 100 MiB
+
+// MaxDecompressedFileSize is the size of the largest file that Helm will attempt to load.
+// The size of the file is the decompressed version of it when it is stored in an archive.
+var MaxDecompressedFileSize int64 = 5 * 1024 * 1024 // Default 5 MiB
+
var drivePathPattern = regexp.MustCompile(`^[a-zA-Z]:/`)
// FileLoader loads a chart from a file
@@ -119,6 +127,7 @@ func LoadArchiveFiles(in io.Reader) ([]*BufferedFile, error) {
files := []*BufferedFile{}
tr := tar.NewReader(unzipped)
+ remainingSize := MaxDecompressedChartSize
for {
b := bytes.NewBuffer(nil)
hd, err := tr.Next()
@@ -160,7 +169,7 @@ func LoadArchiveFiles(in io.Reader) ([]*BufferedFile, error) {
n = path.Clean(n)
if n == "." {
// In this case, the original path was relative when it should have been absolute.
- return nil, errors.Errorf("chart illegally contains content outside the base directory: %q", hd.Name)
+ return nil, fmt.Errorf("chart illegally contains content outside the base directory: %q", hd.Name)
}
if strings.HasPrefix(n, "..") {
return nil, errors.New("chart illegally references parent directory")
@@ -178,10 +187,30 @@ func LoadArchiveFiles(in io.Reader) ([]*BufferedFile, error) {
return nil, errors.New("chart yaml not in base directory")
}
- if _, err := io.Copy(b, tr); err != nil {
+ if hd.Size > remainingSize {
+ return nil, fmt.Errorf("decompressed chart is larger than the maximum size %d", MaxDecompressedChartSize)
+ }
+
+ if hd.Size > MaxDecompressedFileSize {
+ return nil, fmt.Errorf("decompressed chart file %q is larger than the maximum file size %d", hd.Name, MaxDecompressedFileSize)
+ }
+
+ limitedReader := io.LimitReader(tr, remainingSize)
+
+ bytesWritten, err := io.Copy(b, limitedReader)
+ if err != nil {
return nil, err
}
+ remainingSize -= bytesWritten
+ // When the bytesWritten are less than the file size it means the limit reader ended
+ // copying early. Here we report that error. This is important if the last file extracted
+ // is the one that goes over the limit. It assumes the Size stored in the tar header
+ // is correct, something many applications do.
+ if bytesWritten < hd.Size || remainingSize <= 0 {
+ return nil, fmt.Errorf("decompressed chart is larger than the maximum size %d", MaxDecompressedChartSize)
+ }
+
data := bytes.TrimPrefix(b.Bytes(), utf8bom)
files = append(files, &BufferedFile{Name: n, Data: data})
diff --git a/pkg/chart/v2/loader/archive_test.go b/pkg/chart/v2/loader/archive_test.go
index 4d6db9ed4..d16c47563 100644
--- a/pkg/chart/v2/loader/archive_test.go
+++ b/pkg/chart/v2/loader/archive_test.go
@@ -33,6 +33,7 @@ func TestLoadArchiveFiles(t *testing.T) {
name: "empty input should return no files",
generate: func(_ *tar.Writer) {},
check: func(t *testing.T, _ []*BufferedFile, err error) {
+ t.Helper()
if err.Error() != "no files in chart archive" {
t.Fatalf(`expected "no files in chart archive", got [%#v]`, err)
}
@@ -61,6 +62,7 @@ func TestLoadArchiveFiles(t *testing.T) {
}
},
check: func(t *testing.T, files []*BufferedFile, err error) {
+ t.Helper()
if err != nil {
t.Fatalf(`got unwanted error [%#v] for tar file with pax_global_header content`, err)
}
diff --git a/pkg/chart/v2/loader/directory.go b/pkg/chart/v2/loader/directory.go
index 37b24d3f9..4f72925dc 100644
--- a/pkg/chart/v2/loader/directory.go
+++ b/pkg/chart/v2/loader/directory.go
@@ -23,8 +23,6 @@ import (
"path/filepath"
"strings"
- "github.com/pkg/errors"
-
"helm.sh/helm/v4/internal/sympath"
chart "helm.sh/helm/v4/pkg/chart/v2"
"helm.sh/helm/v4/pkg/ignore"
@@ -101,9 +99,13 @@ func LoadDir(dir string) (*chart.Chart, error) {
return fmt.Errorf("cannot load irregular file %s as it has file mode type bits set", name)
}
+ if fi.Size() > MaxDecompressedFileSize {
+ return fmt.Errorf("chart file %q is larger than the maximum file size %d", fi.Name(), MaxDecompressedFileSize)
+ }
+
data, err := os.ReadFile(name)
if err != nil {
- return errors.Wrapf(err, "error reading %s", n)
+ return fmt.Errorf("error reading %s: %w", n, err)
}
data = bytes.TrimPrefix(data, utf8bom)
diff --git a/pkg/chart/v2/loader/load.go b/pkg/chart/v2/loader/load.go
index e87436218..9f8f2dc7e 100644
--- a/pkg/chart/v2/loader/load.go
+++ b/pkg/chart/v2/loader/load.go
@@ -19,17 +19,19 @@ package loader
import (
"bufio"
"bytes"
- "encoding/json"
+ "errors"
+ "fmt"
"io"
"log"
+ "maps"
"os"
"path/filepath"
"strings"
- "github.com/pkg/errors"
utilyaml "k8s.io/apimachinery/pkg/util/yaml"
"sigs.k8s.io/yaml"
+ "helm.sh/helm/v4/pkg/chart/common"
chart "helm.sh/helm/v4/pkg/chart/v2"
)
@@ -50,7 +52,6 @@ func Loader(name string) (ChartLoader, error) {
return DirLoader(name), nil
}
return FileLoader(name), nil
-
}
// Load takes a string name, tries to resolve it to a file or directory, and then loads it.
@@ -82,13 +83,13 @@ func LoadFiles(files []*BufferedFile) (*chart.Chart, error) {
// do not rely on assumed ordering of files in the chart and crash
// if Chart.yaml was not coming early enough to initialize metadata
for _, f := range files {
- c.Raw = append(c.Raw, &chart.File{Name: f.Name, Data: f.Data})
+ c.Raw = append(c.Raw, &common.File{Name: f.Name, Data: f.Data})
if f.Name == "Chart.yaml" {
if c.Metadata == nil {
c.Metadata = new(chart.Metadata)
}
if err := yaml.Unmarshal(f.Data, c.Metadata); err != nil {
- return c, errors.Wrap(err, "cannot load Chart.yaml")
+ return c, fmt.Errorf("cannot load Chart.yaml: %w", err)
}
// NOTE(bacongobbler): while the chart specification says that APIVersion must be set,
// Helm 2 accepted charts that did not provide an APIVersion in their chart metadata.
@@ -106,12 +107,12 @@ func LoadFiles(files []*BufferedFile) (*chart.Chart, error) {
case f.Name == "Chart.lock":
c.Lock = new(chart.Lock)
if err := yaml.Unmarshal(f.Data, &c.Lock); err != nil {
- return c, errors.Wrap(err, "cannot load Chart.lock")
+ return c, fmt.Errorf("cannot load Chart.lock: %w", err)
}
case f.Name == "values.yaml":
values, err := LoadValues(bytes.NewReader(f.Data))
if err != nil {
- return c, errors.Wrap(err, "cannot load values.yaml")
+ return c, fmt.Errorf("cannot load values.yaml: %w", err)
}
c.Values = values
case f.Name == "values.schema.json":
@@ -127,16 +128,16 @@ func LoadFiles(files []*BufferedFile) (*chart.Chart, error) {
log.Printf("Warning: Dependencies are handled in Chart.yaml since apiVersion \"v2\". We recommend migrating dependencies to Chart.yaml.")
}
if err := yaml.Unmarshal(f.Data, c.Metadata); err != nil {
- return c, errors.Wrap(err, "cannot load requirements.yaml")
+ return c, fmt.Errorf("cannot load requirements.yaml: %w", err)
}
if c.Metadata.APIVersion == chart.APIVersionV1 {
- c.Files = append(c.Files, &chart.File{Name: f.Name, Data: f.Data})
+ c.Files = append(c.Files, &common.File{Name: f.Name, Data: f.Data})
}
// Deprecated: requirements.lock is deprecated use Chart.lock.
case f.Name == "requirements.lock":
c.Lock = new(chart.Lock)
if err := yaml.Unmarshal(f.Data, &c.Lock); err != nil {
- return c, errors.Wrap(err, "cannot load requirements.lock")
+ return c, fmt.Errorf("cannot load requirements.lock: %w", err)
}
if c.Metadata == nil {
c.Metadata = new(chart.Metadata)
@@ -145,14 +146,14 @@ func LoadFiles(files []*BufferedFile) (*chart.Chart, error) {
log.Printf("Warning: Dependency locking is handled in Chart.lock since apiVersion \"v2\". We recommend migrating to Chart.lock.")
}
if c.Metadata.APIVersion == chart.APIVersionV1 {
- c.Files = append(c.Files, &chart.File{Name: f.Name, Data: f.Data})
+ c.Files = append(c.Files, &common.File{Name: f.Name, Data: f.Data})
}
case strings.HasPrefix(f.Name, "templates/"):
- c.Templates = append(c.Templates, &chart.File{Name: f.Name, Data: f.Data})
+ c.Templates = append(c.Templates, &common.File{Name: f.Name, Data: f.Data})
case strings.HasPrefix(f.Name, "charts/"):
if filepath.Ext(f.Name) == ".prov" {
- c.Files = append(c.Files, &chart.File{Name: f.Name, Data: f.Data})
+ c.Files = append(c.Files, &common.File{Name: f.Name, Data: f.Data})
continue
}
@@ -160,12 +161,12 @@ func LoadFiles(files []*BufferedFile) (*chart.Chart, error) {
cname := strings.SplitN(fname, "/", 2)[0]
subcharts[cname] = append(subcharts[cname], &BufferedFile{Name: fname, Data: f.Data})
default:
- c.Files = append(c.Files, &chart.File{Name: f.Name, Data: f.Data})
+ c.Files = append(c.Files, &common.File{Name: f.Name, Data: f.Data})
}
}
if c.Metadata == nil {
- return c, errors.New("Chart.yaml file is missing")
+ return c, errors.New("Chart.yaml file is missing") //nolint:staticcheck
}
if err := c.Validate(); err != nil {
@@ -181,7 +182,7 @@ func LoadFiles(files []*BufferedFile) (*chart.Chart, error) {
case filepath.Ext(n) == ".tgz":
file := files[0]
if file.Name != n {
- return c, errors.Errorf("error unpacking subchart tar in %s: expected %s, got %s", c.Name(), n, file.Name)
+ return c, fmt.Errorf("error unpacking subchart tar in %s: expected %s, got %s", c.Name(), n, file.Name)
}
// Untar the chart and add to c.Dependencies
sc, err = LoadArchive(bytes.NewBuffer(file.Data))
@@ -201,7 +202,7 @@ func LoadFiles(files []*BufferedFile) (*chart.Chart, error) {
}
if err != nil {
- return c, errors.Wrapf(err, "error unpacking subchart %s in %s", n, c.Name())
+ return c, fmt.Errorf("error unpacking subchart %s in %s: %w", n, c.Name(), err)
}
c.AddDependency(sc)
}
@@ -223,13 +224,10 @@ func LoadValues(data io.Reader) (map[string]interface{}, error) {
if err == io.EOF {
break
}
- return nil, errors.Wrap(err, "error reading yaml document")
+ return nil, fmt.Errorf("error reading yaml document: %w", err)
}
- if err := yaml.Unmarshal(raw, ¤tMap, func(d *json.Decoder) *json.Decoder {
- d.UseNumber()
- return d
- }); err != nil {
- return nil, errors.Wrap(err, "cannot unmarshal yaml document")
+ if err := yaml.Unmarshal(raw, ¤tMap); err != nil {
+ return nil, fmt.Errorf("cannot unmarshal yaml document: %w", err)
}
values = MergeMaps(values, currentMap)
@@ -242,9 +240,7 @@ func LoadValues(data io.Reader) (map[string]interface{}, error) {
// If the value is a list, the lists will be merged
func MergeMaps(a, b map[string]interface{}) map[string]interface{} {
out := make(map[string]interface{}, len(a))
- for k, v := range a {
- out[k] = v
- }
+ maps.Copy(out, a)
for k, v := range b {
if val, ok := v.(map[string]interface{}); ok {
if bv, ok := out[k]; ok {
diff --git a/pkg/chart/v2/loader/load_test.go b/pkg/chart/v2/loader/load_test.go
index 2e16b8560..c4ae646f6 100644
--- a/pkg/chart/v2/loader/load_test.go
+++ b/pkg/chart/v2/loader/load_test.go
@@ -30,6 +30,7 @@ import (
"testing"
"time"
+ "helm.sh/helm/v4/pkg/chart/common"
chart "helm.sh/helm/v4/pkg/chart/v2"
)
@@ -543,7 +544,7 @@ foo:
}
}
-func TestMergeValues(t *testing.T) {
+func TestMergeValuesV2(t *testing.T) {
nestedMap := map[string]interface{}{
"foo": "bar",
"baz": map[string]string{
@@ -648,6 +649,7 @@ func verifyChart(t *testing.T, c *chart.Chart) {
}
func verifyDependencies(t *testing.T, c *chart.Chart) {
+ t.Helper()
if len(c.Metadata.Dependencies) != 2 {
t.Errorf("Expected 2 dependencies, got %d", len(c.Metadata.Dependencies))
}
@@ -670,6 +672,7 @@ func verifyDependencies(t *testing.T, c *chart.Chart) {
}
func verifyDependenciesLock(t *testing.T, c *chart.Chart) {
+ t.Helper()
if len(c.Metadata.Dependencies) != 2 {
t.Errorf("Expected 2 dependencies, got %d", len(c.Metadata.Dependencies))
}
@@ -692,10 +695,12 @@ func verifyDependenciesLock(t *testing.T, c *chart.Chart) {
}
func verifyFrobnitz(t *testing.T, c *chart.Chart) {
+ t.Helper()
verifyChartFileAndTemplate(t, c, "frobnitz")
}
func verifyChartFileAndTemplate(t *testing.T, c *chart.Chart, name string) {
+ t.Helper()
if c.Metadata == nil {
t.Fatal("Metadata is nil")
}
@@ -749,7 +754,8 @@ func verifyChartFileAndTemplate(t *testing.T, c *chart.Chart, name string) {
}
}
-func verifyBomStripped(t *testing.T, files []*chart.File) {
+func verifyBomStripped(t *testing.T, files []*common.File) {
+ t.Helper()
for _, file := range files {
if bytes.HasPrefix(file.Data, utf8bom) {
t.Errorf("Byte Order Mark still present in processed file %s", file.Name)
diff --git a/pkg/chart/v2/metadata.go b/pkg/chart/v2/metadata.go
index d213a3491..c46007863 100644
--- a/pkg/chart/v2/metadata.go
+++ b/pkg/chart/v2/metadata.go
@@ -52,7 +52,7 @@ type Metadata struct {
Home string `json:"home,omitempty"`
// Source is the URL to the source code of this chart
Sources []string `json:"sources,omitempty"`
- // A SemVer 2 conformant version string of the chart. Required.
+ // A version string of the chart. Required.
Version string `json:"version,omitempty"`
// A one-sentence description of the chart
Description string `json:"description,omitempty"`
diff --git a/pkg/chart/v2/util/chartfile.go b/pkg/chart/v2/util/chartfile.go
index 87323c201..1f9c712b2 100644
--- a/pkg/chart/v2/util/chartfile.go
+++ b/pkg/chart/v2/util/chartfile.go
@@ -17,10 +17,12 @@ limitations under the License.
package util
import (
+ "errors"
+ "fmt"
+ "io/fs"
"os"
"path/filepath"
- "github.com/pkg/errors"
"sigs.k8s.io/yaml"
chart "helm.sh/helm/v4/pkg/chart/v2"
@@ -37,6 +39,17 @@ func LoadChartfile(filename string) (*chart.Metadata, error) {
return y, err
}
+// StrictLoadChartfile loads a Chart.yaml into a *chart.Metadata using a strict unmarshaling
+func StrictLoadChartfile(filename string) (*chart.Metadata, error) {
+ b, err := os.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ y := new(chart.Metadata)
+ err = yaml.UnmarshalStrict(b, y)
+ return y, err
+}
+
// SaveChartfile saves the given metadata as a Chart.yaml file at the given path.
//
// 'filename' should be the complete path and filename ('foo/Chart.yaml')
@@ -64,17 +77,17 @@ func IsChartDir(dirName string) (bool, error) {
if fi, err := os.Stat(dirName); err != nil {
return false, err
} else if !fi.IsDir() {
- return false, errors.Errorf("%q is not a directory", dirName)
+ return false, fmt.Errorf("%q is not a directory", dirName)
}
chartYaml := filepath.Join(dirName, ChartfileName)
- if _, err := os.Stat(chartYaml); os.IsNotExist(err) {
- return false, errors.Errorf("no %s exists in directory %q", ChartfileName, dirName)
+ if _, err := os.Stat(chartYaml); errors.Is(err, fs.ErrNotExist) {
+ return false, fmt.Errorf("no %s exists in directory %q", ChartfileName, dirName)
}
chartYamlContent, err := os.ReadFile(chartYaml)
if err != nil {
- return false, errors.Errorf("cannot read %s in directory %q", ChartfileName, dirName)
+ return false, fmt.Errorf("cannot read %s in directory %q", ChartfileName, dirName)
}
chartContent := new(chart.Metadata)
@@ -82,10 +95,10 @@ func IsChartDir(dirName string) (bool, error) {
return false, err
}
if chartContent == nil {
- return false, errors.Errorf("chart metadata (%s) missing", ChartfileName)
+ return false, fmt.Errorf("chart metadata (%s) missing", ChartfileName)
}
if chartContent.Name == "" {
- return false, errors.Errorf("invalid chart (%s): name must not be empty", ChartfileName)
+ return false, fmt.Errorf("invalid chart (%s): name must not be empty", ChartfileName)
}
return true, nil
diff --git a/pkg/chart/v2/util/chartfile_test.go b/pkg/chart/v2/util/chartfile_test.go
index a2896b235..00c530b8a 100644
--- a/pkg/chart/v2/util/chartfile_test.go
+++ b/pkg/chart/v2/util/chartfile_test.go
@@ -34,7 +34,7 @@ func TestLoadChartfile(t *testing.T) {
}
func verifyChartfile(t *testing.T, f *chart.Metadata, name string) {
-
+ t.Helper()
if f == nil { //nolint:staticcheck
t.Fatal("Failed verifyChartfile because f is nil")
}
diff --git a/pkg/chart/v2/util/create.go b/pkg/chart/v2/util/create.go
index 7eb3398f5..d7c1fe31c 100644
--- a/pkg/chart/v2/util/create.go
+++ b/pkg/chart/v2/util/create.go
@@ -24,9 +24,9 @@ import (
"regexp"
"strings"
- "github.com/pkg/errors"
"sigs.k8s.io/yaml"
+ "helm.sh/helm/v4/pkg/chart/common"
chart "helm.sh/helm/v4/pkg/chart/v2"
"helm.sh/helm/v4/pkg/chart/v2/loader"
)
@@ -54,6 +54,8 @@ const (
IgnorefileName = ".helmignore"
// IngressFileName is the name of the example ingress file.
IngressFileName = TemplatesDir + sep + "ingress.yaml"
+ // HTTPRouteFileName is the name of the example HTTPRoute file.
+ HTTPRouteFileName = TemplatesDir + sep + "httproute.yaml"
// DeploymentName is the name of the example deployment file.
DeploymentName = TemplatesDir + sep + "deployment.yaml"
// ServiceName is the name of the example service file.
@@ -125,14 +127,14 @@ fullnameOverride: ""
# This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/
serviceAccount:
- # Specifies whether a service account should be created
+ # Specifies whether a service account should be created.
create: true
# Automatically mount a ServiceAccount's API credentials?
automount: true
- # Annotations to add to the service account
+ # Annotations to add to the service account.
annotations: {}
# The name of the service account to use.
- # If not set and create is true, a name is generated using the fullname template
+ # If not set and create is true, a name is generated using the fullname template.
name: ""
# This is for setting Kubernetes Annotations to a Pod.
@@ -173,9 +175,47 @@ ingress:
- path: /
pathType: ImplementationSpecific
tls: []
- # - secretName: chart-example-tls
- # hosts:
- # - chart-example.local
+ # - secretName: chart-example-tls
+ # hosts:
+ # - chart-example.local
+
+# -- Expose the service via gateway-api HTTPRoute
+# Requires Gateway API resources and suitable controller installed within the cluster
+# (see: https://gateway-api.sigs.k8s.io/guides/)
+httpRoute:
+ # HTTPRoute enabled.
+ enabled: false
+ # HTTPRoute annotations.
+ annotations: {}
+ # Which Gateways this Route is attached to.
+ parentRefs:
+ - name: gateway
+ sectionName: http
+ # namespace: default
+ # Hostnames matching HTTP header.
+ hostnames:
+ - chart-example.local
+ # List of rules and filters applied.
+ rules:
+ - matches:
+ - path:
+ type: PathPrefix
+ value: /headers
+ # filters:
+ # - type: RequestHeaderModifier
+ # requestHeaderModifier:
+ # set:
+ # - name: My-Overwrite-Header
+ # value: this-is-the-only-value
+ # remove:
+ # - User-Agent
+ # - matches:
+ # - path:
+ # type: PathPrefix
+ # value: /echo
+ # headers:
+ # - name: version
+ # value: v2
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
@@ -209,16 +249,16 @@ autoscaling:
# Additional volumes on the output Deployment definition.
volumes: []
-# - name: foo
-# secret:
-# secretName: mysecret
-# optional: false
+ # - name: foo
+ # secret:
+ # secretName: mysecret
+ # optional: false
# Additional volumeMounts on the output Deployment definition.
volumeMounts: []
-# - name: foo
-# mountPath: "/etc/foo"
-# readOnly: true
+ # - name: foo
+ # mountPath: "/etc/foo"
+ # readOnly: true
nodeSelector: {}
@@ -297,6 +337,46 @@ spec:
{{- end }}
`
+const defaultHTTPRoute = `{{- if .Values.httpRoute.enabled -}}
+{{- $fullName := include ".fullname" . -}}
+{{- $svcPort := .Values.service.port -}}
+apiVersion: gateway.networking.k8s.io/v1
+kind: HTTPRoute
+metadata:
+ name: {{ $fullName }}
+ labels:
+ {{- include ".labels" . | nindent 4 }}
+ {{- with .Values.httpRoute.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ parentRefs:
+ {{- with .Values.httpRoute.parentRefs }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ {{- with .Values.httpRoute.hostnames }}
+ hostnames:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ rules:
+ {{- range .Values.httpRoute.rules }}
+ {{- with .matches }}
+ - matches:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .filters }}
+ filters:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ backendRefs:
+ - name: {{ $fullName }}
+ port: {{ $svcPort }}
+ weight: 1
+ {{- end }}
+{{- end }}
+`
+
const defaultDeployment = `apiVersion: apps/v1
kind: Deployment
metadata:
@@ -444,7 +524,20 @@ spec:
`
const defaultNotes = `1. Get the application URL by running these commands:
-{{- if .Values.ingress.enabled }}
+{{- if .Values.httpRoute.enabled }}
+{{- if .Values.httpRoute.hostnames }}
+ export APP_HOSTNAME={{ .Values.httpRoute.hostnames | first }}
+{{- else }}
+ export APP_HOSTNAME=$(kubectl get --namespace {{(first .Values.httpRoute.parentRefs).namespace | default .Release.Namespace }} gateway/{{ (first .Values.httpRoute.parentRefs).name }} -o jsonpath="{.spec.listeners[0].hostname}")
+ {{- end }}
+{{- if and .Values.httpRoute.rules (first .Values.httpRoute.rules).matches (first (first .Values.httpRoute.rules).matches).path.value }}
+ echo "Visit http://$APP_HOSTNAME{{ (first (first .Values.httpRoute.rules).matches).path.value }} to use your application"
+
+ NOTE: Your HTTPRoute depends on the listener configuration of your gateway and your HTTPRoute rules.
+ The rules can be set for path, method, header and query parameters.
+ You can check the gateway configuration with 'kubectl get --namespace {{(first .Values.httpRoute.parentRefs).namespace | default .Release.Namespace }} gateway/{{ (first .Values.httpRoute.parentRefs).name }} -o yaml'
+{{- end }}
+{{- else if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
@@ -558,27 +651,27 @@ var Stderr io.Writer = os.Stderr
func CreateFrom(chartfile *chart.Metadata, dest, src string) error {
schart, err := loader.Load(src)
if err != nil {
- return errors.Wrapf(err, "could not load %s", src)
+ return fmt.Errorf("could not load %s: %w", src, err)
}
schart.Metadata = chartfile
- var updatedTemplates []*chart.File
+ var updatedTemplates []*common.File
for _, template := range schart.Templates {
newData := transform(string(template.Data), schart.Name())
- updatedTemplates = append(updatedTemplates, &chart.File{Name: template.Name, Data: newData})
+ updatedTemplates = append(updatedTemplates, &common.File{Name: template.Name, Data: newData})
}
schart.Templates = updatedTemplates
b, err := yaml.Marshal(schart.Values)
if err != nil {
- return errors.Wrap(err, "reading values file")
+ return fmt.Errorf("reading values file: %w", err)
}
var m map[string]interface{}
if err := yaml.Unmarshal(transform(string(b), schart.Name()), &m); err != nil {
- return errors.Wrap(err, "transforming values file")
+ return fmt.Errorf("transforming values file: %w", err)
}
schart.Values = m
@@ -622,12 +715,12 @@ func Create(name, dir string) (string, error) {
if fi, err := os.Stat(path); err != nil {
return path, err
} else if !fi.IsDir() {
- return path, errors.Errorf("no such directory %s", path)
+ return path, fmt.Errorf("no such directory %s", path)
}
cdir := filepath.Join(path, name)
if fi, err := os.Stat(cdir); err == nil && !fi.IsDir() {
- return cdir, errors.Errorf("file %s already exists and is not a directory", cdir)
+ return cdir, fmt.Errorf("file %s already exists and is not a directory", cdir)
}
// Note: If adding a new template below (i.e., to `helm create`) which is disabled by default (similar to hpa and
@@ -641,12 +734,12 @@ func Create(name, dir string) (string, error) {
{
// Chart.yaml
path: filepath.Join(cdir, ChartfileName),
- content: []byte(fmt.Sprintf(defaultChartfile, name)),
+ content: fmt.Appendf(nil, defaultChartfile, name),
},
{
// values.yaml
path: filepath.Join(cdir, ValuesfileName),
- content: []byte(fmt.Sprintf(defaultValues, name)),
+ content: fmt.Appendf(nil, defaultValues, name),
},
{
// .helmignore
@@ -658,6 +751,11 @@ func Create(name, dir string) (string, error) {
path: filepath.Join(cdir, IngressFileName),
content: transform(defaultIngress, name),
},
+ {
+ // httproute.yaml
+ path: filepath.Join(cdir, HTTPRouteFileName),
+ content: transform(defaultHTTPRoute, name),
+ },
{
// deployment.yaml
path: filepath.Join(cdir, DeploymentName),
diff --git a/pkg/chart/v2/util/dependencies.go b/pkg/chart/v2/util/dependencies.go
index 78ed46517..a52f09f82 100644
--- a/pkg/chart/v2/util/dependencies.go
+++ b/pkg/chart/v2/util/dependencies.go
@@ -16,16 +16,19 @@ limitations under the License.
package util
import (
- "log"
+ "fmt"
+ "log/slog"
"strings"
"github.com/mitchellh/copystructure"
+ "helm.sh/helm/v4/pkg/chart/common"
+ "helm.sh/helm/v4/pkg/chart/common/util"
chart "helm.sh/helm/v4/pkg/chart/v2"
)
// ProcessDependencies checks through this chart's dependencies, processing accordingly.
-func ProcessDependencies(c *chart.Chart, v Values) error {
+func ProcessDependencies(c *chart.Chart, v common.Values) error {
if err := processDependencyEnabled(c, v, ""); err != nil {
return err
}
@@ -33,12 +36,12 @@ func ProcessDependencies(c *chart.Chart, v Values) error {
}
// processDependencyConditions disables charts based on condition path value in values
-func processDependencyConditions(reqs []*chart.Dependency, cvals Values, cpath string) {
+func processDependencyConditions(reqs []*chart.Dependency, cvals common.Values, cpath string) {
if reqs == nil {
return
}
for _, r := range reqs {
- for _, c := range strings.Split(strings.TrimSpace(r.Condition), ",") {
+ for c := range strings.SplitSeq(strings.TrimSpace(r.Condition), ",") {
if len(c) > 0 {
// retrieve value
vv, err := cvals.PathValue(cpath + c)
@@ -48,10 +51,10 @@ func processDependencyConditions(reqs []*chart.Dependency, cvals Values, cpath s
r.Enabled = bv
break
}
- log.Printf("Warning: Condition path '%s' for chart %s returned non-bool value", c, r.Name)
- } else if _, ok := err.(ErrNoValue); !ok {
+ slog.Warn("returned non-bool value", "path", c, "chart", r.Name)
+ } else if _, ok := err.(common.ErrNoValue); !ok {
// this is a real error
- log.Printf("Warning: PathValue returned error %v", err)
+ slog.Warn("the method PathValue returned error", slog.Any("error", err))
}
}
}
@@ -59,7 +62,7 @@ func processDependencyConditions(reqs []*chart.Dependency, cvals Values, cpath s
}
// processDependencyTags disables charts based on tags in values
-func processDependencyTags(reqs []*chart.Dependency, cvals Values) {
+func processDependencyTags(reqs []*chart.Dependency, cvals common.Values) {
if reqs == nil {
return
}
@@ -79,7 +82,7 @@ func processDependencyTags(reqs []*chart.Dependency, cvals Values) {
hasFalse = true
}
} else {
- log.Printf("Warning: Tag '%s' for chart %s returned non-bool value", k, r.Name)
+ slog.Warn("returned non-bool value", "tag", k, "chart", r.Name)
}
}
}
@@ -91,6 +94,7 @@ func processDependencyTags(reqs []*chart.Dependency, cvals Values) {
}
}
+// getAliasDependency finds the chart for an alias dependency and copies parts that will be modified
func getAliasDependency(charts []*chart.Chart, dep *chart.Dependency) *chart.Chart {
for _, c := range charts {
if c == nil {
@@ -104,17 +108,38 @@ func getAliasDependency(charts []*chart.Chart, dep *chart.Dependency) *chart.Cha
}
out := *c
- md := *c.Metadata
- out.Metadata = &md
+ out.Metadata = copyMetadata(c.Metadata)
+
+ // empty dependencies and shallow copy all dependencies, otherwise parent info may be corrupted if
+ // there is more than one dependency aliasing this chart
+ out.SetDependencies()
+ for _, dependency := range c.Dependencies() {
+ cpy := *dependency
+ out.AddDependency(&cpy)
+ }
if dep.Alias != "" {
- md.Name = dep.Alias
+ out.Metadata.Name = dep.Alias
}
return &out
}
return nil
}
+func copyMetadata(metadata *chart.Metadata) *chart.Metadata {
+ md := *metadata
+
+ if md.Dependencies != nil {
+ dependencies := make([]*chart.Dependency, len(md.Dependencies))
+ for i := range md.Dependencies {
+ dependency := *md.Dependencies[i]
+ dependencies[i] = &dependency
+ }
+ md.Dependencies = dependencies
+ }
+ return &md
+}
+
// processDependencyEnabled removes disabled charts from dependencies
func processDependencyEnabled(c *chart.Chart, v map[string]interface{}, path string) error {
if c.Metadata.Dependencies == nil {
@@ -154,7 +179,7 @@ Loop:
for _, lr := range c.Metadata.Dependencies {
lr.Enabled = true
}
- cvals, err := CoalesceValues(c, v)
+ cvals, err := util.CoalesceValues(c, v)
if err != nil {
return err
}
@@ -209,6 +234,8 @@ func pathToMap(path string, data map[string]interface{}) map[string]interface{}
return set(parsePath(path), data)
}
+func parsePath(key string) []string { return strings.Split(key, ".") }
+
func set(path []string, data map[string]interface{}) map[string]interface{} {
if len(path) == 0 {
return nil
@@ -226,12 +253,12 @@ func processImportValues(c *chart.Chart, merge bool) error {
return nil
}
// combine chart values and empty config to get Values
- var cvals Values
+ var cvals common.Values
var err error
if merge {
- cvals, err = MergeValues(c, nil)
+ cvals, err = util.MergeValues(c, nil)
} else {
- cvals, err = CoalesceValues(c, nil)
+ cvals, err = util.CoalesceValues(c, nil)
}
if err != nil {
return err
@@ -243,8 +270,8 @@ func processImportValues(c *chart.Chart, merge bool) error {
for _, riv := range r.ImportValues {
switch iv := riv.(type) {
case map[string]interface{}:
- child := iv["child"].(string)
- parent := iv["parent"].(string)
+ child := fmt.Sprintf("%v", iv["child"])
+ parent := fmt.Sprintf("%v", iv["parent"])
outiv = append(outiv, map[string]string{
"child": child,
@@ -254,14 +281,14 @@ func processImportValues(c *chart.Chart, merge bool) error {
// get child table
vv, err := cvals.Table(r.Name + "." + child)
if err != nil {
- log.Printf("Warning: ImportValues missing table from chart %s: %v", r.Name, err)
+ slog.Warn("ImportValues missing table from chart", "chart", r.Name, slog.Any("error", err))
continue
}
// create value map from child to be merged into parent
if merge {
- b = MergeTables(b, pathToMap(parent, vv.AsMap()))
+ b = util.MergeTables(b, pathToMap(parent, vv.AsMap()))
} else {
- b = CoalesceTables(b, pathToMap(parent, vv.AsMap()))
+ b = util.CoalesceTables(b, pathToMap(parent, vv.AsMap()))
}
case string:
child := "exports." + iv
@@ -271,13 +298,13 @@ func processImportValues(c *chart.Chart, merge bool) error {
})
vm, err := cvals.Table(r.Name + "." + child)
if err != nil {
- log.Printf("Warning: ImportValues missing table: %v", err)
+ slog.Warn("ImportValues missing table", slog.Any("error", err))
continue
}
if merge {
- b = MergeTables(b, vm.AsMap())
+ b = util.MergeTables(b, vm.AsMap())
} else {
- b = CoalesceTables(b, vm.AsMap())
+ b = util.CoalesceTables(b, vm.AsMap())
}
}
}
@@ -292,14 +319,14 @@ func processImportValues(c *chart.Chart, merge bool) error {
// deep copying the cvals as there are cases where pointers can end
// up in the cvals when they are copied onto b in ways that break things.
cvals = deepCopyMap(cvals)
- c.Values = MergeTables(cvals, b)
+ c.Values = util.MergeTables(cvals, b)
} else {
// Trimming the nil values from cvals is needed for backwards compatibility.
// Previously, the b value had been populated with cvals along with some
// overrides. This caused the coalescing functionality to remove the
// nil/null values. This trimming is for backwards compat.
cvals = trimNilValues(cvals)
- c.Values = CoalesceTables(cvals, b)
+ c.Values = util.CoalesceTables(cvals, b)
}
return nil
@@ -332,6 +359,12 @@ func trimNilValues(vals map[string]interface{}) map[string]interface{} {
return valsCopyMap
}
+// istable is a special-purpose function to see if the present thing matches the definition of a YAML table.
+func istable(v interface{}) bool {
+ _, ok := v.(map[string]interface{})
+ return ok
+}
+
// processDependencyImportValues imports specified chart values from child to parent.
func processDependencyImportValues(c *chart.Chart, merge bool) error {
for _, d := range c.Dependencies() {
diff --git a/pkg/chart/v2/util/dependencies_test.go b/pkg/chart/v2/util/dependencies_test.go
index 5bd332990..c817b0b89 100644
--- a/pkg/chart/v2/util/dependencies_test.go
+++ b/pkg/chart/v2/util/dependencies_test.go
@@ -15,13 +15,13 @@ limitations under the License.
package util
import (
- "encoding/json"
"os"
"path/filepath"
"sort"
"strconv"
"testing"
+ "helm.sh/helm/v4/pkg/chart/common"
chart "helm.sh/helm/v4/pkg/chart/v2"
"helm.sh/helm/v4/pkg/chart/v2/loader"
)
@@ -134,7 +134,7 @@ func TestDependencyEnabled(t *testing.T) {
}
}
-// extractCharts recursively searches chart dependencies returning all charts found
+// extractChartNames recursively searches chart dependencies returning all charts found
func extractChartNames(c *chart.Chart) []string {
var out []string
var fn func(c *chart.Chart)
@@ -222,7 +222,7 @@ func TestProcessDependencyImportValues(t *testing.T) {
if err := processDependencyImportValues(c, false); err != nil {
t.Fatalf("processing import values dependencies %v", err)
}
- cc := Values(c.Values)
+ cc := common.Values(c.Values)
for kk, vv := range e {
pv, err := cc.PathValue(kk)
if err != nil {
@@ -238,20 +238,6 @@ func TestProcessDependencyImportValues(t *testing.T) {
if b := strconv.FormatBool(pv); b != vv {
t.Errorf("failed to match imported bool value %v with expected %v for key %q", b, vv, kk)
}
- case json.Number:
- if fv, err := pv.Float64(); err == nil {
- if sfv := strconv.FormatFloat(fv, 'f', -1, 64); sfv != vv {
- t.Errorf("failed to match imported float value %v with expected %v for key %q", sfv, vv, kk)
- }
- }
- if iv, err := pv.Int64(); err == nil {
- if siv := strconv.FormatInt(iv, 10); siv != vv {
- t.Errorf("failed to match imported int value %v with expected %v for key %q", siv, vv, kk)
- }
- }
- if pv.String() != vv {
- t.Errorf("failed to match imported string value %q with expected %q for key %q", pv, vv, kk)
- }
default:
if pv != vv {
t.Errorf("failed to match imported string value %q with expected %q for key %q", pv, vv, kk)
@@ -266,7 +252,7 @@ func TestProcessDependencyImportValues(t *testing.T) {
t.Error("expect nil value not found but found it")
}
switch xerr := err.(type) {
- case ErrNoValue:
+ case common.ErrNoValue:
// We found what we expected
default:
t.Errorf("expected an ErrNoValue but got %q instead", xerr)
@@ -276,7 +262,7 @@ func TestProcessDependencyImportValues(t *testing.T) {
if err := processDependencyImportValues(c, true); err != nil {
t.Fatalf("processing import values dependencies %v", err)
}
- cc = Values(c.Values)
+ cc = common.Values(c.Values)
val, err := cc.PathValue("ensurenull")
if err != nil {
t.Error("expect value but ensurenull was not found")
@@ -286,6 +272,38 @@ func TestProcessDependencyImportValues(t *testing.T) {
}
}
+func TestProcessDependencyImportValuesFromSharedDependencyToAliases(t *testing.T) {
+ c := loadChart(t, "testdata/chart-with-import-from-aliased-dependencies")
+
+ if err := processDependencyEnabled(c, c.Values, ""); err != nil {
+ t.Fatalf("expected no errors but got %q", err)
+ }
+ if err := processDependencyImportValues(c, true); err != nil {
+ t.Fatalf("processing import values dependencies %v", err)
+ }
+ e := make(map[string]string)
+
+ e["foo-defaults.defaultValue"] = "42"
+ e["bar-defaults.defaultValue"] = "42"
+
+ e["foo.defaults.defaultValue"] = "42"
+ e["bar.defaults.defaultValue"] = "42"
+
+ e["foo.grandchild.defaults.defaultValue"] = "42"
+ e["bar.grandchild.defaults.defaultValue"] = "42"
+
+ cValues := common.Values(c.Values)
+ for kk, vv := range e {
+ pv, err := cValues.PathValue(kk)
+ if err != nil {
+ t.Fatalf("retrieving import values table %v %v", kk, err)
+ }
+ if pv != vv {
+ t.Errorf("failed to match imported value %v with expected %v", pv, vv)
+ }
+ }
+}
+
func TestProcessDependencyImportValuesMultiLevelPrecedence(t *testing.T) {
c := loadChart(t, "testdata/three-level-dependent-chart/umbrella")
@@ -312,7 +330,7 @@ func TestProcessDependencyImportValuesMultiLevelPrecedence(t *testing.T) {
if err := processDependencyImportValues(c, true); err != nil {
t.Fatalf("processing import values dependencies %v", err)
}
- cc := Values(c.Values)
+ cc := common.Values(c.Values)
for kk, vv := range e {
pv, err := cc.PathValue(kk)
if err != nil {
@@ -324,10 +342,6 @@ func TestProcessDependencyImportValuesMultiLevelPrecedence(t *testing.T) {
if s := strconv.FormatFloat(pv, 'f', -1, 64); s != vv {
t.Errorf("failed to match imported float value %v with expected %v", s, vv)
}
- case json.Number:
- if pv.String() != vv {
- t.Errorf("failed to match imported string value %q with expected %q", pv, vv)
- }
default:
if pv != vv {
t.Errorf("failed to match imported string value %q with expected %q", pv, vv)
@@ -430,6 +444,9 @@ func TestDependentChartAliases(t *testing.T) {
if aliasChart == nil {
t.Fatalf("failed to get dependency chart for alias %s", req[2].Name)
}
+ if aliasChart.Parent() != c {
+ t.Fatalf("dependency chart has wrong parent, expected %s but got %s", c.Name(), aliasChart.Parent().Name())
+ }
if req[2].Alias != "" {
if aliasChart.Name() != req[2].Alias {
t.Fatalf("dependency chart name should be %s but got %s", req[2].Alias, aliasChart.Name())
@@ -521,3 +538,33 @@ func TestDependentChartsWithSomeSubchartsSpecifiedInDependency(t *testing.T) {
t.Fatalf("expected 1 dependency specified in Chart.yaml, got %d", len(c.Metadata.Dependencies))
}
}
+
+func validateDependencyTree(t *testing.T, c *chart.Chart) {
+ t.Helper()
+ for _, dependency := range c.Dependencies() {
+ if dependency.Parent() != c {
+ if dependency.Parent() != c {
+ t.Fatalf("dependency chart %s has wrong parent, expected %s but got %s", dependency.Name(), c.Name(), dependency.Parent().Name())
+ }
+ }
+ // recurse entire tree
+ validateDependencyTree(t, dependency)
+ }
+}
+
+func TestChartWithDependencyAliasedTwiceAndDoublyReferencedSubDependency(t *testing.T) {
+ c := loadChart(t, "testdata/chart-with-dependency-aliased-twice")
+
+ if len(c.Dependencies()) != 1 {
+ t.Fatalf("expected one dependency for this chart, but got %d", len(c.Dependencies()))
+ }
+
+ if err := processDependencyEnabled(c, c.Values, ""); err != nil {
+ t.Fatalf("expected no errors but got %q", err)
+ }
+
+ if len(c.Dependencies()) != 2 {
+ t.Fatal("expected two dependencies after processing aliases")
+ }
+ validateDependencyTree(t, c)
+}
diff --git a/pkg/chart/v2/util/expand.go b/pkg/chart/v2/util/expand.go
index e05a1a984..9d08571ed 100644
--- a/pkg/chart/v2/util/expand.go
+++ b/pkg/chart/v2/util/expand.go
@@ -17,12 +17,13 @@ limitations under the License.
package util
import (
+ "errors"
+ "fmt"
"io"
"os"
"path/filepath"
securejoin "github.com/cyphar/filepath-securejoin"
- "github.com/pkg/errors"
"sigs.k8s.io/yaml"
chart "helm.sh/helm/v4/pkg/chart/v2"
@@ -42,7 +43,7 @@ func Expand(dir string, r io.Reader) error {
if file.Name == "Chart.yaml" {
ch := &chart.Metadata{}
if err := yaml.Unmarshal(file.Data, ch); err != nil {
- return errors.Wrap(err, "cannot load Chart.yaml")
+ return fmt.Errorf("cannot load Chart.yaml: %w", err)
}
chartName = ch.Name
}
diff --git a/pkg/chart/v2/util/jsonschema.go b/pkg/chart/v2/util/jsonschema.go
deleted file mode 100644
index 615dc5320..000000000
--- a/pkg/chart/v2/util/jsonschema.go
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
-Copyright The Helm Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package util
-
-import (
- "bytes"
- "fmt"
- "strings"
-
- "github.com/pkg/errors"
- "github.com/xeipuuv/gojsonschema"
- "sigs.k8s.io/yaml"
-
- chart "helm.sh/helm/v4/pkg/chart/v2"
-)
-
-// ValidateAgainstSchema checks that values does not violate the structure laid out in schema
-func ValidateAgainstSchema(chrt *chart.Chart, values map[string]interface{}) error {
- var sb strings.Builder
- if chrt.Schema != nil {
- err := ValidateAgainstSingleSchema(values, chrt.Schema)
- if err != nil {
- sb.WriteString(fmt.Sprintf("%s:\n", chrt.Name()))
- sb.WriteString(err.Error())
- }
- }
-
- // For each dependency, recursively call this function with the coalesced values
- for _, subchart := range chrt.Dependencies() {
- subchartValues := values[subchart.Name()].(map[string]interface{})
- if err := ValidateAgainstSchema(subchart, subchartValues); err != nil {
- sb.WriteString(err.Error())
- }
- }
-
- if sb.Len() > 0 {
- return errors.New(sb.String())
- }
-
- return nil
-}
-
-// ValidateAgainstSingleSchema checks that values does not violate the structure laid out in this schema
-func ValidateAgainstSingleSchema(values Values, schemaJSON []byte) (reterr error) {
- defer func() {
- if r := recover(); r != nil {
- reterr = fmt.Errorf("unable to validate schema: %s", r)
- }
- }()
-
- valuesData, err := yaml.Marshal(values)
- if err != nil {
- return err
- }
- valuesJSON, err := yaml.YAMLToJSON(valuesData)
- if err != nil {
- return err
- }
- if bytes.Equal(valuesJSON, []byte("null")) {
- valuesJSON = []byte("{}")
- }
- schemaLoader := gojsonschema.NewBytesLoader(schemaJSON)
- valuesLoader := gojsonschema.NewBytesLoader(valuesJSON)
-
- result, err := gojsonschema.Validate(schemaLoader, valuesLoader)
- if err != nil {
- return err
- }
-
- if !result.Valid() {
- var sb strings.Builder
- for _, desc := range result.Errors() {
- sb.WriteString(fmt.Sprintf("- %s\n", desc))
- }
- return errors.New(sb.String())
- }
-
- return nil
-}
diff --git a/pkg/chart/v2/util/save.go b/pkg/chart/v2/util/save.go
index e1285ac88..69a98924c 100644
--- a/pkg/chart/v2/util/save.go
+++ b/pkg/chart/v2/util/save.go
@@ -20,14 +20,16 @@ import (
"archive/tar"
"compress/gzip"
"encoding/json"
+ "errors"
"fmt"
+ "io/fs"
"os"
"path/filepath"
"time"
- "github.com/pkg/errors"
"sigs.k8s.io/yaml"
+ "helm.sh/helm/v4/pkg/chart/common"
chart "helm.sh/helm/v4/pkg/chart/v2"
)
@@ -45,7 +47,7 @@ func SaveDir(c *chart.Chart, dest string) error {
}
outdir := filepath.Join(dest, c.Name())
if fi, err := os.Stat(outdir); err == nil && !fi.IsDir() {
- return errors.Errorf("file %s already exists and is not a directory", outdir)
+ return fmt.Errorf("file %s already exists and is not a directory", outdir)
}
if err := os.MkdirAll(outdir, 0755); err != nil {
return err
@@ -75,7 +77,7 @@ func SaveDir(c *chart.Chart, dest string) error {
}
// Save templates and files
- for _, o := range [][]*chart.File{c.Templates, c.Files} {
+ for _, o := range [][]*common.File{c.Templates, c.Files} {
for _, f := range o {
n := filepath.Join(outdir, f.Name)
if err := writeFile(n, f.Data); err != nil {
@@ -89,7 +91,7 @@ func SaveDir(c *chart.Chart, dest string) error {
for _, dep := range c.Dependencies() {
// Here, we write each dependency as a tar file.
if _, err := Save(dep, base); err != nil {
- return errors.Wrapf(err, "saving %s", dep.ChartFullPath())
+ return fmt.Errorf("saving %s: %w", dep.ChartFullPath(), err)
}
}
return nil
@@ -105,22 +107,22 @@ func SaveDir(c *chart.Chart, dest string) error {
// This returns the absolute path to the chart archive file.
func Save(c *chart.Chart, outDir string) (string, error) {
if err := c.Validate(); err != nil {
- return "", errors.Wrap(err, "chart validation")
+ return "", fmt.Errorf("chart validation: %w", err)
}
filename := fmt.Sprintf("%s-%s.tgz", c.Name(), c.Metadata.Version)
filename = filepath.Join(outDir, filename)
dir := filepath.Dir(filename)
if stat, err := os.Stat(dir); err != nil {
- if os.IsNotExist(err) {
+ if errors.Is(err, fs.ErrNotExist) {
if err2 := os.MkdirAll(dir, 0755); err2 != nil {
return "", err2
}
} else {
- return "", errors.Wrapf(err, "stat %s", dir)
+ return "", fmt.Errorf("stat %s: %w", dir, err)
}
} else if !stat.IsDir() {
- return "", errors.Errorf("is not a directory: %s", dir)
+ return "", fmt.Errorf("is not a directory: %s", dir)
}
f, err := os.Create(filename)
@@ -130,8 +132,8 @@ func Save(c *chart.Chart, outDir string) (string, error) {
// Wrap in gzip writer
zipper := gzip.NewWriter(f)
- zipper.Header.Extra = headerBytes
- zipper.Header.Comment = "Helm"
+ zipper.Extra = headerBytes
+ zipper.Comment = "Helm"
// Wrap in tar writer
twriter := tar.NewWriter(zipper)
@@ -203,7 +205,7 @@ func writeTarContents(out *tar.Writer, c *chart.Chart, prefix string) error {
// Save values.schema.json if it exists
if c.Schema != nil {
if !json.Valid(c.Schema) {
- return errors.New("Invalid JSON in " + SchemafileName)
+ return errors.New("invalid JSON in " + SchemafileName)
}
if err := writeToTar(out, filepath.Join(base, SchemafileName), c.Schema); err != nil {
return err
@@ -257,7 +259,7 @@ func validateName(name string) error {
nname := filepath.Base(name)
if nname != name {
- return ErrInvalidChartName{name}
+ return common.ErrInvalidChartName{Name: name}
}
return nil
diff --git a/pkg/chart/v2/util/save_test.go b/pkg/chart/v2/util/save_test.go
index ff96331b5..ef822a82a 100644
--- a/pkg/chart/v2/util/save_test.go
+++ b/pkg/chart/v2/util/save_test.go
@@ -29,6 +29,7 @@ import (
"testing"
"time"
+ "helm.sh/helm/v4/pkg/chart/common"
chart "helm.sh/helm/v4/pkg/chart/v2"
"helm.sh/helm/v4/pkg/chart/v2/loader"
)
@@ -47,7 +48,7 @@ func TestSave(t *testing.T) {
Lock: &chart.Lock{
Digest: "testdigest",
},
- Files: []*chart.File{
+ Files: []*common.File{
{Name: "scheherazade/shahryar.txt", Data: []byte("1,001 Nights")},
},
Schema: []byte("{\n \"title\": \"Values\"\n}"),
@@ -116,7 +117,7 @@ func TestSave(t *testing.T) {
Lock: &chart.Lock{
Digest: "testdigest",
},
- Files: []*chart.File{
+ Files: []*common.File{
{Name: "scheherazade/shahryar.txt", Data: []byte("1,001 Nights")},
},
}
@@ -156,7 +157,7 @@ func TestSavePreservesTimestamps(t *testing.T) {
"imageName": "testimage",
"imageId": 42,
},
- Files: []*chart.File{
+ Files: []*common.File{
{Name: "scheherazade/shahryar.txt", Data: []byte("1,001 Nights")},
},
Schema: []byte("{\n \"title\": \"Values\"\n}"),
@@ -222,10 +223,10 @@ func TestSaveDir(t *testing.T) {
Name: "ahab",
Version: "1.2.3",
},
- Files: []*chart.File{
+ Files: []*common.File{
{Name: "scheherazade/shahryar.txt", Data: []byte("1,001 Nights")},
},
- Templates: []*chart.File{
+ Templates: []*common.File{
{Name: path.Join(TemplatesDir, "nested", "dir", "thing.yaml"), Data: []byte("abc: {{ .Values.abc }}")},
},
}
diff --git a/pkg/chart/v2/util/testdata/chart-with-dependency-aliased-twice/Chart.yaml b/pkg/chart/v2/util/testdata/chart-with-dependency-aliased-twice/Chart.yaml
new file mode 100644
index 000000000..d778f8fe9
--- /dev/null
+++ b/pkg/chart/v2/util/testdata/chart-with-dependency-aliased-twice/Chart.yaml
@@ -0,0 +1,14 @@
+apiVersion: v2
+appVersion: 1.0.0
+name: chart-with-dependency-aliased-twice
+type: application
+version: 1.0.0
+
+dependencies:
+ - name: child
+ alias: foo
+ version: 1.0.0
+ - name: child
+ alias: bar
+ version: 1.0.0
+
diff --git a/pkg/chart/v2/util/testdata/chart-with-dependency-aliased-twice/charts/child/Chart.yaml b/pkg/chart/v2/util/testdata/chart-with-dependency-aliased-twice/charts/child/Chart.yaml
new file mode 100644
index 000000000..220fda663
--- /dev/null
+++ b/pkg/chart/v2/util/testdata/chart-with-dependency-aliased-twice/charts/child/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+appVersion: 1.0.0
+name: child
+type: application
+version: 1.0.0
+
diff --git a/pkg/chart/v2/util/testdata/chart-with-dependency-aliased-twice/charts/child/charts/grandchild/Chart.yaml b/pkg/chart/v2/util/testdata/chart-with-dependency-aliased-twice/charts/child/charts/grandchild/Chart.yaml
new file mode 100644
index 000000000..50e620a8d
--- /dev/null
+++ b/pkg/chart/v2/util/testdata/chart-with-dependency-aliased-twice/charts/child/charts/grandchild/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+appVersion: 1.0.0
+name: grandchild
+type: application
+version: 1.0.0
+
diff --git a/pkg/chart/v2/util/testdata/chart-with-dependency-aliased-twice/charts/child/charts/grandchild/templates/dummy.yaml b/pkg/chart/v2/util/testdata/chart-with-dependency-aliased-twice/charts/child/charts/grandchild/templates/dummy.yaml
new file mode 100644
index 000000000..1830492ef
--- /dev/null
+++ b/pkg/chart/v2/util/testdata/chart-with-dependency-aliased-twice/charts/child/charts/grandchild/templates/dummy.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Chart.Name }}-{{ .Values.from }}
+data:
+ {{- toYaml .Values | nindent 2 }}
+
diff --git a/pkg/chart/v2/util/testdata/chart-with-dependency-aliased-twice/charts/child/templates/dummy.yaml b/pkg/chart/v2/util/testdata/chart-with-dependency-aliased-twice/charts/child/templates/dummy.yaml
new file mode 100644
index 000000000..b5d55af7c
--- /dev/null
+++ b/pkg/chart/v2/util/testdata/chart-with-dependency-aliased-twice/charts/child/templates/dummy.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Chart.Name }}
+data:
+ {{- toYaml .Values | nindent 2 }}
+
diff --git a/pkg/chart/v2/util/testdata/chart-with-dependency-aliased-twice/values.yaml b/pkg/chart/v2/util/testdata/chart-with-dependency-aliased-twice/values.yaml
new file mode 100644
index 000000000..695521a4a
--- /dev/null
+++ b/pkg/chart/v2/util/testdata/chart-with-dependency-aliased-twice/values.yaml
@@ -0,0 +1,7 @@
+foo:
+ grandchild:
+ from: foo
+bar:
+ grandchild:
+ from: bar
+
diff --git a/pkg/chart/v2/util/testdata/chart-with-import-from-aliased-dependencies/Chart.yaml b/pkg/chart/v2/util/testdata/chart-with-import-from-aliased-dependencies/Chart.yaml
new file mode 100644
index 000000000..c408f0ca8
--- /dev/null
+++ b/pkg/chart/v2/util/testdata/chart-with-import-from-aliased-dependencies/Chart.yaml
@@ -0,0 +1,20 @@
+apiVersion: v2
+appVersion: 1.0.0
+name: chart-with-dependency-aliased-twice
+type: application
+version: 1.0.0
+
+dependencies:
+ - name: child
+ alias: foo
+ version: 1.0.0
+ import-values:
+ - parent: foo-defaults
+ child: defaults
+ - name: child
+ alias: bar
+ version: 1.0.0
+ import-values:
+ - parent: bar-defaults
+ child: defaults
+
diff --git a/pkg/chart/v2/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/Chart.yaml b/pkg/chart/v2/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/Chart.yaml
new file mode 100644
index 000000000..ecdaf04dc
--- /dev/null
+++ b/pkg/chart/v2/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/Chart.yaml
@@ -0,0 +1,12 @@
+apiVersion: v2
+appVersion: 1.0.0
+name: child
+type: application
+version: 1.0.0
+
+dependencies:
+ - name: grandchild
+ version: 1.0.0
+ import-values:
+ - parent: defaults
+ child: defaults
diff --git a/pkg/chart/v2/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/charts/grandchild/Chart.yaml b/pkg/chart/v2/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/charts/grandchild/Chart.yaml
new file mode 100644
index 000000000..50e620a8d
--- /dev/null
+++ b/pkg/chart/v2/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/charts/grandchild/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+appVersion: 1.0.0
+name: grandchild
+type: application
+version: 1.0.0
+
diff --git a/pkg/chart/v2/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/charts/grandchild/values.yaml b/pkg/chart/v2/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/charts/grandchild/values.yaml
new file mode 100644
index 000000000..f51c594f4
--- /dev/null
+++ b/pkg/chart/v2/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/charts/grandchild/values.yaml
@@ -0,0 +1,2 @@
+defaults:
+ defaultValue: "42"
\ No newline at end of file
diff --git a/pkg/chart/v2/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/templates/dummy.yaml b/pkg/chart/v2/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/templates/dummy.yaml
new file mode 100644
index 000000000..3140f53dd
--- /dev/null
+++ b/pkg/chart/v2/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/templates/dummy.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Chart.Name }}
+data:
+ {{ .Values.defaults | toYaml }}
+
diff --git a/pkg/chart/v2/util/testdata/chart-with-import-from-aliased-dependencies/templates/dummy.yaml b/pkg/chart/v2/util/testdata/chart-with-import-from-aliased-dependencies/templates/dummy.yaml
new file mode 100644
index 000000000..a2b62c95a
--- /dev/null
+++ b/pkg/chart/v2/util/testdata/chart-with-import-from-aliased-dependencies/templates/dummy.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Chart.Name }}
+data:
+ {{ toYaml .Values.defaults | indent 2 }}
+
diff --git a/pkg/chart/v2/util/validate_name.go b/pkg/chart/v2/util/validate_name.go
index 73be43303..6595e085d 100644
--- a/pkg/chart/v2/util/validate_name.go
+++ b/pkg/chart/v2/util/validate_name.go
@@ -17,10 +17,9 @@ limitations under the License.
package util
import (
+ "errors"
"fmt"
"regexp"
-
- "github.com/pkg/errors"
)
// validName is a regular expression for resource names.
diff --git a/pkg/cli/environment.go b/pkg/cli/environment.go
index 3f2dc00b2..106d24336 100644
--- a/pkg/cli/environment.go
+++ b/pkg/cli/environment.go
@@ -89,6 +89,10 @@ type EnvSettings struct {
BurstLimit int
// QPS is queries per second which may be used to avoid throttling.
QPS float32
+ // ColorMode controls colorized output (never, auto, always)
+ ColorMode string
+ // ContentCache is the location where cached charts are stored
+ ContentCache string
}
func New() *EnvSettings {
@@ -107,8 +111,10 @@ func New() *EnvSettings {
RegistryConfig: envOr("HELM_REGISTRY_CONFIG", helmpath.ConfigPath("registry/config.json")),
RepositoryConfig: envOr("HELM_REPOSITORY_CONFIG", helmpath.ConfigPath("repositories.yaml")),
RepositoryCache: envOr("HELM_REPOSITORY_CACHE", helmpath.CachePath("repository")),
+ ContentCache: envOr("HELM_CONTENT_CACHE", helmpath.CachePath("content")),
BurstLimit: envIntOr("HELM_BURST_LIMIT", defaultBurstLimit),
QPS: envFloat32Or("HELM_QPS", defaultQPS),
+ ColorMode: envColorMode(),
}
env.Debug, _ = strconv.ParseBool(os.Getenv("HELM_DEBUG"))
@@ -158,8 +164,11 @@ func (s *EnvSettings) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&s.RegistryConfig, "registry-config", s.RegistryConfig, "path to the registry config file")
fs.StringVar(&s.RepositoryConfig, "repository-config", s.RepositoryConfig, "path to the file containing repository names and URLs")
fs.StringVar(&s.RepositoryCache, "repository-cache", s.RepositoryCache, "path to the directory containing cached repository indexes")
+ fs.StringVar(&s.ContentCache, "content-cache", s.ContentCache, "path to the directory containing cached content (e.g. charts)")
fs.IntVar(&s.BurstLimit, "burst-limit", s.BurstLimit, "client-side default throttling limit")
fs.Float32Var(&s.QPS, "qps", s.QPS, "queries per second used when communicating with the Kubernetes API, not including bursting")
+ fs.StringVar(&s.ColorMode, "color", s.ColorMode, "use colored output (never, auto, always)")
+ fs.StringVar(&s.ColorMode, "colour", s.ColorMode, "use colored output (never, auto, always)")
}
func envOr(name, def string) string {
@@ -213,6 +222,23 @@ func envCSV(name string) (ls []string) {
return
}
+func envColorMode() string {
+ // Check NO_COLOR environment variable first (standard)
+ if v, ok := os.LookupEnv("NO_COLOR"); ok && v != "" {
+ return "never"
+ }
+ // Check HELM_COLOR environment variable
+ if v, ok := os.LookupEnv("HELM_COLOR"); ok {
+ v = strings.ToLower(v)
+ switch v {
+ case "never", "auto", "always":
+ return v
+ }
+ }
+ // Default to auto
+ return "auto"
+}
+
func (s *EnvSettings) EnvVars() map[string]string {
envvars := map[string]string{
"HELM_BIN": os.Args[0],
@@ -223,6 +249,7 @@ func (s *EnvSettings) EnvVars() map[string]string {
"HELM_PLUGINS": s.PluginsDirectory,
"HELM_REGISTRY_CONFIG": s.RegistryConfig,
"HELM_REPOSITORY_CACHE": s.RepositoryCache,
+ "HELM_CONTENT_CACHE": s.ContentCache,
"HELM_REPOSITORY_CONFIG": s.RepositoryConfig,
"HELM_NAMESPACE": s.Namespace(),
"HELM_MAX_HISTORY": strconv.Itoa(s.MaxHistory),
@@ -265,3 +292,8 @@ func (s *EnvSettings) SetNamespace(namespace string) {
func (s *EnvSettings) RESTClientGetter() genericclioptions.RESTClientGetter {
return s.config
}
+
+// ShouldDisableColor returns true if color output should be disabled
+func (s *EnvSettings) ShouldDisableColor() bool {
+ return s.ColorMode == "never"
+}
diff --git a/pkg/cli/environment_test.go b/pkg/cli/environment_test.go
index 8a3b87936..52326eeff 100644
--- a/pkg/cli/environment_test.go
+++ b/pkg/cli/environment_test.go
@@ -38,7 +38,6 @@ func TestSetNamespace(t *testing.T) {
if settings.namespace != "testns" {
t.Errorf("Expected namespace testns, got %s", settings.namespace)
}
-
}
func TestEnvSettings(t *testing.T) {
@@ -126,7 +125,7 @@ func TestEnvSettings(t *testing.T) {
defer resetEnv()()
for k, v := range tt.envvars {
- os.Setenv(k, v)
+ t.Setenv(k, v)
}
flags := pflag.NewFlagSet("testing", pflag.ContinueOnError)
@@ -233,10 +232,7 @@ func TestEnvOrBool(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.env != "" {
- t.Cleanup(func() {
- os.Unsetenv(tt.env)
- })
- os.Setenv(tt.env, tt.val)
+ t.Setenv(tt.env, tt.val)
}
actual := envBoolOr(tt.env, tt.def)
if actual != tt.expected {
diff --git a/pkg/cli/output/output.go b/pkg/cli/output/output.go
index 01649c812..28d503741 100644
--- a/pkg/cli/output/output.go
+++ b/pkg/cli/output/output.go
@@ -22,7 +22,6 @@ import (
"io"
"github.com/gosuri/uitable"
- "github.com/pkg/errors"
"sigs.k8s.io/yaml"
)
@@ -107,7 +106,7 @@ func EncodeJSON(out io.Writer, obj interface{}) error {
enc := json.NewEncoder(out)
err := enc.Encode(obj)
if err != nil {
- return errors.Wrap(err, "unable to write JSON output")
+ return fmt.Errorf("unable to write JSON output: %w", err)
}
return nil
}
@@ -117,12 +116,12 @@ func EncodeJSON(out io.Writer, obj interface{}) error {
func EncodeYAML(out io.Writer, obj interface{}) error {
raw, err := yaml.Marshal(obj)
if err != nil {
- return errors.Wrap(err, "unable to write YAML output")
+ return fmt.Errorf("unable to write YAML output: %w", err)
}
_, err = out.Write(raw)
if err != nil {
- return errors.Wrap(err, "unable to write YAML output")
+ return fmt.Errorf("unable to write YAML output: %w", err)
}
return nil
}
@@ -134,7 +133,7 @@ func EncodeTable(out io.Writer, table *uitable.Table) error {
raw = append(raw, []byte("\n")...)
_, err := out.Write(raw)
if err != nil {
- return errors.Wrap(err, "unable to write table output")
+ return fmt.Errorf("unable to write table output: %w", err)
}
return nil
}
diff --git a/pkg/cli/values/options.go b/pkg/cli/values/options.go
index 461db3cc2..cd65fa885 100644
--- a/pkg/cli/values/options.go
+++ b/pkg/cli/values/options.go
@@ -19,13 +19,12 @@ package values
import (
"bytes"
"encoding/json"
+ "fmt"
"io"
"net/url"
"os"
"strings"
- "github.com/pkg/errors"
-
"helm.sh/helm/v4/pkg/chart/v2/loader"
"helm.sh/helm/v4/pkg/getter"
"helm.sh/helm/v4/pkg/strvals"
@@ -54,7 +53,7 @@ func (opts *Options) MergeValues(p getter.Providers) (map[string]interface{}, er
}
currentMap, err := loader.LoadValues(bytes.NewReader(raw))
if err != nil {
- return nil, errors.Wrapf(err, "failed to parse %s", filePath)
+ return nil, fmt.Errorf("failed to parse %s: %w", filePath, err)
}
// Merge with the previous map
base = loader.MergeMaps(base, currentMap)
@@ -67,13 +66,13 @@ func (opts *Options) MergeValues(p getter.Providers) (map[string]interface{}, er
// If value is JSON object format, parse it as map
var jsonMap map[string]interface{}
if err := json.Unmarshal([]byte(trimmedValue), &jsonMap); err != nil {
- return nil, errors.Errorf("failed parsing --set-json data JSON: %s", value)
+ return nil, fmt.Errorf("failed parsing --set-json data JSON: %s", value)
}
base = loader.MergeMaps(base, jsonMap)
} else {
// Otherwise, parse it as key=value format
if err := strvals.ParseJSON(value, base); err != nil {
- return nil, errors.Errorf("failed parsing --set-json data %s", value)
+ return nil, fmt.Errorf("failed parsing --set-json data %s", value)
}
}
}
@@ -81,14 +80,14 @@ func (opts *Options) MergeValues(p getter.Providers) (map[string]interface{}, er
// User specified a value via --set
for _, value := range opts.Values {
if err := strvals.ParseInto(value, base); err != nil {
- return nil, errors.Wrap(err, "failed parsing --set data")
+ return nil, fmt.Errorf("failed parsing --set data: %w", err)
}
}
// User specified a value via --set-string
for _, value := range opts.StringValues {
if err := strvals.ParseIntoString(value, base); err != nil {
- return nil, errors.Wrap(err, "failed parsing --set-string data")
+ return nil, fmt.Errorf("failed parsing --set-string data: %w", err)
}
}
@@ -102,14 +101,14 @@ func (opts *Options) MergeValues(p getter.Providers) (map[string]interface{}, er
return string(bytes), err
}
if err := strvals.ParseIntoFile(value, base, reader); err != nil {
- return nil, errors.Wrap(err, "failed parsing --set-file data")
+ return nil, fmt.Errorf("failed parsing --set-file data: %w", err)
}
}
// User specified a value via --set-literal
for _, value := range opts.LiteralValues {
if err := strvals.ParseLiteralInto(value, base); err != nil {
- return nil, errors.Wrap(err, "failed parsing --set-literal data")
+ return nil, fmt.Errorf("failed parsing --set-literal data: %w", err)
}
}
diff --git a/pkg/cli/values/options_test.go b/pkg/cli/values/options_test.go
index c3bb0af33..fe1afc5d2 100644
--- a/pkg/cli/values/options_test.go
+++ b/pkg/cli/values/options_test.go
@@ -17,13 +17,275 @@ limitations under the License.
package values
import (
+ "bytes"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
"reflect"
+ "strings"
"testing"
"helm.sh/helm/v4/pkg/getter"
)
+// mockGetter implements getter.Getter for testing
+type mockGetter struct {
+ content []byte
+ err error
+}
+
+func (m *mockGetter) Get(_ string, _ ...getter.Option) (*bytes.Buffer, error) {
+ if m.err != nil {
+ return nil, m.err
+ }
+ return bytes.NewBuffer(m.content), nil
+}
+
+// mockProvider creates a test provider
+func mockProvider(schemes []string, content []byte, err error) getter.Provider {
+ return getter.Provider{
+ Schemes: schemes,
+ New: func(_ ...getter.Option) (getter.Getter, error) {
+ return &mockGetter{content: content, err: err}, nil
+ },
+ }
+}
+
func TestReadFile(t *testing.T) {
+ tests := []struct {
+ name string
+ filePath string
+ providers getter.Providers
+ setupFunc func(*testing.T) (string, func()) // setup temp files, return cleanup
+ expectError bool
+ expectStdin bool
+ expectedData []byte
+ }{
+ {
+ name: "stdin input with dash",
+ filePath: "-",
+ providers: getter.Providers{},
+ expectStdin: true,
+ expectError: false,
+ },
+ {
+ name: "stdin input with whitespace",
+ filePath: " - ",
+ providers: getter.Providers{},
+ expectStdin: true,
+ expectError: false,
+ },
+ {
+ name: "invalid URL parsing",
+ filePath: "://invalid-url",
+ providers: getter.Providers{},
+ expectError: true,
+ },
+ {
+ name: "local file - existing",
+ filePath: "test.txt",
+ providers: getter.Providers{},
+ setupFunc: func(t *testing.T) (string, func()) {
+ t.Helper()
+ tmpDir := t.TempDir()
+ filePath := filepath.Join(tmpDir, "test.txt")
+ content := []byte("local file content")
+ err := os.WriteFile(filePath, content, 0644)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return filePath, func() {} // cleanup handled by t.TempDir()
+ },
+ expectError: false,
+ expectedData: []byte("local file content"),
+ },
+ {
+ name: "local file - non-existent",
+ filePath: "/non/existent/file.txt",
+ providers: getter.Providers{},
+ expectError: true,
+ },
+ {
+ name: "remote file with http scheme - success",
+ filePath: "http://example.com/values.yaml",
+ providers: getter.Providers{
+ mockProvider([]string{"http", "https"}, []byte("remote content"), nil),
+ },
+ expectError: false,
+ expectedData: []byte("remote content"),
+ },
+ {
+ name: "remote file with https scheme - success",
+ filePath: "https://example.com/values.yaml",
+ providers: getter.Providers{
+ mockProvider([]string{"http", "https"}, []byte("https content"), nil),
+ },
+ expectError: false,
+ expectedData: []byte("https content"),
+ },
+ {
+ name: "remote file with custom scheme - success",
+ filePath: "oci://registry.example.com/chart",
+ providers: getter.Providers{
+ mockProvider([]string{"oci"}, []byte("oci content"), nil),
+ },
+ expectError: false,
+ expectedData: []byte("oci content"),
+ },
+ {
+ name: "remote file - getter error",
+ filePath: "http://example.com/values.yaml",
+ providers: getter.Providers{
+ mockProvider([]string{"http"}, nil, errors.New("network error")),
+ },
+ expectError: true,
+ },
+ {
+ name: "unsupported scheme fallback to local file",
+ filePath: "ftp://example.com/file.txt",
+ providers: getter.Providers{
+ mockProvider([]string{"http"}, []byte("should not be used"), nil),
+ },
+ setupFunc: func(t *testing.T) (string, func()) {
+ t.Helper()
+ // Create a local file named "ftp://example.com/file.txt"
+ // This tests the fallback behavior when scheme is not supported
+ tmpDir := t.TempDir()
+ fileName := "ftp_file.txt" // Valid filename for filesystem
+ filePath := filepath.Join(tmpDir, fileName)
+ content := []byte("local fallback content")
+ err := os.WriteFile(filePath, content, 0644)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return filePath, func() {}
+ },
+ expectError: false,
+ expectedData: []byte("local fallback content"),
+ },
+ {
+ name: "empty file path",
+ filePath: "",
+ providers: getter.Providers{},
+ expectError: true, // Empty path should cause error
+ },
+ {
+ name: "multiple providers - correct selection",
+ filePath: "custom://example.com/resource",
+ providers: getter.Providers{
+ mockProvider([]string{"http", "https"}, []byte("wrong content"), nil),
+ mockProvider([]string{"custom"}, []byte("correct content"), nil),
+ mockProvider([]string{"oci"}, []byte("also wrong"), nil),
+ },
+ expectError: false,
+ expectedData: []byte("correct content"),
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ var actualFilePath string
+ var cleanup func()
+
+ if tt.setupFunc != nil {
+ actualFilePath, cleanup = tt.setupFunc(t)
+ defer cleanup()
+ } else {
+ actualFilePath = tt.filePath
+ }
+
+ // Handle stdin test case
+ if tt.expectStdin {
+ // Save original stdin
+ originalStdin := os.Stdin
+ defer func() { os.Stdin = originalStdin }()
+
+ // Create a pipe for stdin
+ r, w, err := os.Pipe()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer r.Close()
+ defer w.Close()
+
+ // Replace stdin with our pipe
+ os.Stdin = r
+
+ // Write test data to stdin
+ testData := []byte("stdin test data")
+ go func() {
+ defer w.Close()
+ w.Write(testData)
+ }()
+
+ // Test the function
+ got, err := readFile(actualFilePath, tt.providers)
+ if err != nil {
+ t.Errorf("readFile() error = %v, expected no error for stdin", err)
+ return
+ }
+
+ if !bytes.Equal(got, testData) {
+ t.Errorf("readFile() = %v, want %v", got, testData)
+ }
+ return
+ }
+
+ // Regular test cases
+ got, err := readFile(actualFilePath, tt.providers)
+ if (err != nil) != tt.expectError {
+ t.Errorf("readFile() error = %v, expectError %v", err, tt.expectError)
+ return
+ }
+
+ if !tt.expectError && tt.expectedData != nil {
+ if !bytes.Equal(got, tt.expectedData) {
+ t.Errorf("readFile() = %v, want %v", got, tt.expectedData)
+ }
+ }
+ })
+ }
+}
+
+// TestReadFileErrorMessages tests specific error scenarios and their messages
+func TestReadFileErrorMessages(t *testing.T) {
+ tests := []struct {
+ name string
+ filePath string
+ providers getter.Providers
+ wantErr string
+ }{
+ {
+ name: "URL parse error",
+ filePath: "://invalid",
+ providers: getter.Providers{},
+ wantErr: "missing protocol scheme",
+ },
+ {
+ name: "getter error with message",
+ filePath: "http://example.com/file",
+ providers: getter.Providers{mockProvider([]string{"http"}, nil, fmt.Errorf("connection refused"))},
+ wantErr: "connection refused",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ _, err := readFile(tt.filePath, tt.providers)
+ if err == nil {
+ t.Errorf("readFile() expected error containing %q, got nil", tt.wantErr)
+ return
+ }
+ if !strings.Contains(err.Error(), tt.wantErr) {
+ t.Errorf("readFile() error = %v, want error containing %q", err, tt.wantErr)
+ }
+ })
+ }
+}
+
+// Original test case - keeping for backward compatibility
+func TestReadFileOriginal(t *testing.T) {
var p getter.Providers
filePath := "%a.txt"
_, err := readFile(filePath, p)
@@ -32,7 +294,7 @@ func TestReadFile(t *testing.T) {
}
}
-func TestMergeValues(t *testing.T) {
+func TestMergeValuesCLI(t *testing.T) {
tests := []struct {
name string
opts Options
diff --git a/pkg/cmd/completion_test.go b/pkg/cmd/completion_test.go
index 872da25f3..375a9a97d 100644
--- a/pkg/cmd/completion_test.go
+++ b/pkg/cmd/completion_test.go
@@ -27,6 +27,7 @@ import (
// Check if file completion should be performed according to parameter 'shouldBePerformed'
func checkFileCompletion(t *testing.T, cmdName string, shouldBePerformed bool) {
+ t.Helper()
storage := storageFixture()
storage.Create(&release.Release{
Name: "myrelease",
@@ -64,6 +65,7 @@ func TestCompletionFileCompletion(t *testing.T) {
}
func checkReleaseCompletion(t *testing.T, cmdName string, multiReleasesAllowed bool) {
+ t.Helper()
multiReleaseTestGolden := "output/empty_nofile_comp.txt"
if multiReleasesAllowed {
multiReleaseTestGolden = "output/release_list_repeat_comp.txt"
diff --git a/pkg/cmd/create_test.go b/pkg/cmd/create_test.go
index bfdf3db5a..90ed90eff 100644
--- a/pkg/cmd/create_test.go
+++ b/pkg/cmd/create_test.go
@@ -30,10 +30,9 @@ import (
)
func TestCreateCmd(t *testing.T) {
+ t.Chdir(t.TempDir())
ensure.HelmHome(t)
cname := "testchart"
- dir := t.TempDir()
- defer testChdir(t, dir)()
// Run a create
if _, _, err := executeActionCommand("create " + cname); err != nil {
@@ -61,22 +60,20 @@ func TestCreateCmd(t *testing.T) {
}
func TestCreateStarterCmd(t *testing.T) {
+ t.Chdir(t.TempDir())
ensure.HelmHome(t)
cname := "testchart"
defer resetEnv()()
- os.MkdirAll(helmpath.CachePath(), 0755)
- defer testChdir(t, helmpath.CachePath())()
-
// Create a starter.
starterchart := helmpath.DataPath("starters")
- os.MkdirAll(starterchart, 0755)
+ os.MkdirAll(starterchart, 0o755)
if dest, err := chartutil.Create("starterchart", starterchart); err != nil {
t.Fatalf("Could not create chart: %s", err)
} else {
t.Logf("Created %s", dest)
}
tplpath := filepath.Join(starterchart, "starterchart", "templates", "foo.tpl")
- if err := os.WriteFile(tplpath, []byte("test"), 0644); err != nil {
+ if err := os.WriteFile(tplpath, []byte("test"), 0o644); err != nil {
t.Fatalf("Could not write template: %s", err)
}
@@ -105,7 +102,7 @@ func TestCreateStarterCmd(t *testing.T) {
t.Errorf("Wrong API version: %q", c.Metadata.APIVersion)
}
- expectedNumberOfTemplates := 9
+ expectedNumberOfTemplates := 10
if l := len(c.Templates); l != expectedNumberOfTemplates {
t.Errorf("Expected %d templates, got %d", expectedNumberOfTemplates, l)
}
@@ -122,30 +119,27 @@ func TestCreateStarterCmd(t *testing.T) {
if !found {
t.Error("Did not find foo.tpl")
}
-
}
func TestCreateStarterAbsoluteCmd(t *testing.T) {
+ t.Chdir(t.TempDir())
defer resetEnv()()
ensure.HelmHome(t)
cname := "testchart"
// Create a starter.
starterchart := helmpath.DataPath("starters")
- os.MkdirAll(starterchart, 0755)
+ os.MkdirAll(starterchart, 0o755)
if dest, err := chartutil.Create("starterchart", starterchart); err != nil {
t.Fatalf("Could not create chart: %s", err)
} else {
t.Logf("Created %s", dest)
}
tplpath := filepath.Join(starterchart, "starterchart", "templates", "foo.tpl")
- if err := os.WriteFile(tplpath, []byte("test"), 0644); err != nil {
+ if err := os.WriteFile(tplpath, []byte("test"), 0o644); err != nil {
t.Fatalf("Could not write template: %s", err)
}
- os.MkdirAll(helmpath.CachePath(), 0755)
- defer testChdir(t, helmpath.CachePath())()
-
starterChartPath := filepath.Join(starterchart, "starterchart")
// Run a create
@@ -173,7 +167,7 @@ func TestCreateStarterAbsoluteCmd(t *testing.T) {
t.Errorf("Wrong API version: %q", c.Metadata.APIVersion)
}
- expectedNumberOfTemplates := 9
+ expectedNumberOfTemplates := 10
if l := len(c.Templates); l != expectedNumberOfTemplates {
t.Errorf("Expected %d templates, got %d", expectedNumberOfTemplates, l)
}
diff --git a/pkg/cmd/dependency_build.go b/pkg/cmd/dependency_build.go
index 16907facf..320fe12ae 100644
--- a/pkg/cmd/dependency_build.go
+++ b/pkg/cmd/dependency_build.go
@@ -69,6 +69,7 @@ func newDependencyBuildCmd(out io.Writer) *cobra.Command {
RegistryClient: registryClient,
RepositoryConfig: settings.RepositoryConfig,
RepositoryCache: settings.RepositoryCache,
+ ContentCache: settings.ContentCache,
Debug: settings.Debug,
}
if client.Verify {
diff --git a/pkg/cmd/dependency_build_test.go b/pkg/cmd/dependency_build_test.go
index a4a89b7a9..a3473301d 100644
--- a/pkg/cmd/dependency_build_test.go
+++ b/pkg/cmd/dependency_build_test.go
@@ -24,8 +24,8 @@ import (
chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
"helm.sh/helm/v4/pkg/provenance"
- "helm.sh/helm/v4/pkg/repo"
- "helm.sh/helm/v4/pkg/repo/repotest"
+ "helm.sh/helm/v4/pkg/repo/v1"
+ "helm.sh/helm/v4/pkg/repo/v1/repotest"
)
func TestDependencyBuildCmd(t *testing.T) {
diff --git a/pkg/cmd/dependency_update.go b/pkg/cmd/dependency_update.go
index 921e5ef49..b534fb48a 100644
--- a/pkg/cmd/dependency_update.go
+++ b/pkg/cmd/dependency_update.go
@@ -73,6 +73,7 @@ func newDependencyUpdateCmd(_ *action.Configuration, out io.Writer) *cobra.Comma
RegistryClient: registryClient,
RepositoryConfig: settings.RepositoryConfig,
RepositoryCache: settings.RepositoryCache,
+ ContentCache: settings.ContentCache,
Debug: settings.Debug,
}
if client.Verify {
diff --git a/pkg/cmd/dependency_update_test.go b/pkg/cmd/dependency_update_test.go
index 890767720..3eaa51df1 100644
--- a/pkg/cmd/dependency_update_test.go
+++ b/pkg/cmd/dependency_update_test.go
@@ -16,7 +16,9 @@ limitations under the License.
package cmd
import (
+ "errors"
"fmt"
+ "io/fs"
"os"
"path/filepath"
"strings"
@@ -27,8 +29,8 @@ import (
chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
"helm.sh/helm/v4/pkg/helmpath"
"helm.sh/helm/v4/pkg/provenance"
- "helm.sh/helm/v4/pkg/repo"
- "helm.sh/helm/v4/pkg/repo/repotest"
+ "helm.sh/helm/v4/pkg/repo/v1"
+ "helm.sh/helm/v4/pkg/repo/v1/repotest"
)
func TestDependencyUpdateCmd(t *testing.T) {
@@ -43,6 +45,7 @@ func TestDependencyUpdateCmd(t *testing.T) {
if err != nil {
t.Fatal(err)
}
+ contentCache := t.TempDir()
ociChartName := "oci-depending-chart"
c := createTestingMetadataForOCI(ociChartName, ociSrv.RegistryURL)
@@ -67,7 +70,7 @@ func TestDependencyUpdateCmd(t *testing.T) {
}
_, out, err := executeActionCommand(
- fmt.Sprintf("dependency update '%s' --repository-config %s --repository-cache %s --plain-http", dir(chartname), dir("repositories.yaml"), dir()),
+ fmt.Sprintf("dependency update '%s' --repository-config %s --repository-cache %s --content-cache %s --plain-http", dir(chartname), dir("repositories.yaml"), dir(), contentCache),
)
if err != nil {
t.Logf("Output: %s", out)
@@ -110,7 +113,7 @@ func TestDependencyUpdateCmd(t *testing.T) {
t.Fatal(err)
}
- _, out, err = executeActionCommand(fmt.Sprintf("dependency update '%s' --repository-config %s --repository-cache %s --plain-http", dir(chartname), dir("repositories.yaml"), dir()))
+ _, out, err = executeActionCommand(fmt.Sprintf("dependency update '%s' --repository-config %s --repository-cache %s --content-cache %s --plain-http", dir(chartname), dir("repositories.yaml"), dir(), contentCache))
if err != nil {
t.Logf("Output: %s", out)
t.Fatal(err)
@@ -131,11 +134,12 @@ func TestDependencyUpdateCmd(t *testing.T) {
if err := chartutil.SaveDir(c, dir()); err != nil {
t.Fatal(err)
}
- cmd := fmt.Sprintf("dependency update '%s' --repository-config %s --repository-cache %s --registry-config %s/config.json --plain-http",
+ cmd := fmt.Sprintf("dependency update '%s' --repository-config %s --repository-cache %s --registry-config %s/config.json --content-cache %s --plain-http",
dir(ociChartName),
dir("repositories.yaml"),
dir(),
- dir())
+ dir(),
+ contentCache)
_, out, err = executeActionCommand(cmd)
if err != nil {
t.Logf("Output: %s", out)
@@ -177,8 +181,9 @@ func TestDependencyUpdateCmd_DoNotDeleteOldChartsOnError(t *testing.T) {
// Chart repo is down
srv.Stop()
+ contentCache := t.TempDir()
- _, output, err = executeActionCommand(fmt.Sprintf("dependency update %s --repository-config %s --repository-cache %s --plain-http", dir(chartname), dir("repositories.yaml"), dir()))
+ _, output, err = executeActionCommand(fmt.Sprintf("dependency update %s --repository-config %s --repository-cache %s --content-cache %s --plain-http", dir(chartname), dir("repositories.yaml"), dir(), contentCache))
if err == nil {
t.Logf("Output: %s", output)
t.Fatal("Expected error, got nil")
@@ -202,7 +207,7 @@ func TestDependencyUpdateCmd_DoNotDeleteOldChartsOnError(t *testing.T) {
// Make sure tmpcharts-x is deleted
tmpPath := filepath.Join(dir(chartname), fmt.Sprintf("tmpcharts-%d", os.Getpid()))
- if _, err := os.Stat(tmpPath); !os.IsNotExist(err) {
+ if _, err := os.Stat(tmpPath); !errors.Is(err, fs.ErrNotExist) {
t.Fatalf("tmpcharts dir still exists")
}
}
@@ -230,9 +235,11 @@ func TestDependencyUpdateCmd_WithRepoThatWasNotAdded(t *testing.T) {
t.Fatal(err)
}
+ contentCache := t.TempDir()
+
_, out, err := executeActionCommand(
- fmt.Sprintf("dependency update '%s' --repository-config %s --repository-cache %s", dir(chartname),
- dir("repositories.yaml"), dir()),
+ fmt.Sprintf("dependency update '%s' --repository-config %s --repository-cache %s --content-cache %s", dir(chartname),
+ dir("repositories.yaml"), dir(), contentCache),
)
if err != nil {
@@ -248,6 +255,7 @@ func TestDependencyUpdateCmd_WithRepoThatWasNotAdded(t *testing.T) {
}
func setupMockRepoServer(t *testing.T) *repotest.Server {
+ t.Helper()
srv := repotest.NewTempServer(
t,
repotest.WithChartSourceGlob("testdata/testcharts/*.tgz"),
diff --git a/pkg/cmd/docs.go b/pkg/cmd/docs.go
index b3fd773f9..7fae60743 100644
--- a/pkg/cmd/docs.go
+++ b/pkg/cmd/docs.go
@@ -22,7 +22,6 @@ import (
"path/filepath"
"strings"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/cobra/doc"
"golang.org/x/text/cases"
@@ -86,7 +85,7 @@ func (o *docsOptions) run(_ io.Writer) error {
hdrFunc := func(filename string) string {
base := filepath.Base(filename)
name := strings.TrimSuffix(base, path.Ext(base))
- title := cases.Title(language.Und, cases.NoLower).String(strings.Replace(name, "_", " ", -1))
+ title := cases.Title(language.Und, cases.NoLower).String(strings.ReplaceAll(name, "_", " "))
return fmt.Sprintf("---\ntitle: \"%s\"\n---\n\n", title)
}
@@ -99,6 +98,6 @@ func (o *docsOptions) run(_ io.Writer) error {
case "bash":
return o.topCmd.GenBashCompletionFile(filepath.Join(o.dest, "completions.bash"))
default:
- return errors.Errorf("unknown doc type %q. Try 'markdown' or 'man'", o.docTypeString)
+ return fmt.Errorf("unknown doc type %q. Try 'markdown' or 'man'", o.docTypeString)
}
}
diff --git a/pkg/cmd/flags.go b/pkg/cmd/flags.go
index 333b97006..b20772ef9 100644
--- a/pkg/cmd/flags.go
+++ b/pkg/cmd/flags.go
@@ -20,20 +20,24 @@ import (
"flag"
"fmt"
"log"
+ "log/slog"
"path/filepath"
"sort"
"strings"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
+
"k8s.io/klog/v2"
"helm.sh/helm/v4/pkg/action"
+ "helm.sh/helm/v4/pkg/cli"
"helm.sh/helm/v4/pkg/cli/output"
"helm.sh/helm/v4/pkg/cli/values"
"helm.sh/helm/v4/pkg/helmpath"
- "helm.sh/helm/v4/pkg/postrender"
- "helm.sh/helm/v4/pkg/repo"
+ "helm.sh/helm/v4/pkg/kube"
+ "helm.sh/helm/v4/pkg/postrenderer"
+ "helm.sh/helm/v4/pkg/repo/v1"
)
const (
@@ -51,6 +55,52 @@ func addValueOptionsFlags(f *pflag.FlagSet, v *values.Options) {
f.StringArrayVar(&v.LiteralValues, "set-literal", []string{}, "set a literal STRING value on the command line")
}
+func AddWaitFlag(cmd *cobra.Command, wait *kube.WaitStrategy) {
+ cmd.Flags().Var(
+ newWaitValue(kube.HookOnlyStrategy, wait),
+ "wait",
+ "if specified, will wait until all resources are in the expected state before marking the operation as successful. It will wait for as long as --timeout. Valid inputs are 'watcher' and 'legacy'",
+ )
+ // Sets the strategy to use the watcher strategy if `--wait` is used without an argument
+ cmd.Flags().Lookup("wait").NoOptDefVal = string(kube.StatusWatcherStrategy)
+}
+
+type waitValue kube.WaitStrategy
+
+func newWaitValue(defaultValue kube.WaitStrategy, ws *kube.WaitStrategy) *waitValue {
+ *ws = defaultValue
+ return (*waitValue)(ws)
+}
+
+func (ws *waitValue) String() string {
+ if ws == nil {
+ return ""
+ }
+ return string(*ws)
+}
+
+func (ws *waitValue) Set(s string) error {
+ switch s {
+ case string(kube.StatusWatcherStrategy), string(kube.LegacyStrategy):
+ *ws = waitValue(s)
+ return nil
+ case "true":
+ slog.Warn("--wait=true is deprecated (boolean value) and can be replaced with --wait=watcher")
+ *ws = waitValue(kube.StatusWatcherStrategy)
+ return nil
+ case "false":
+ slog.Warn("--wait=false is deprecated (boolean value) and can be replaced by omitting the --wait flag")
+ *ws = waitValue(kube.HookOnlyStrategy)
+ return nil
+ default:
+ return fmt.Errorf("invalid wait input %q. Valid inputs are %s, and %s", s, kube.StatusWatcherStrategy, kube.LegacyStrategy)
+ }
+}
+
+func (ws *waitValue) Type() string {
+ return "WaitStrategy"
+}
+
func addChartPathOptionsFlags(f *pflag.FlagSet, c *action.ChartPathOptions) {
f.StringVar(&c.Version, "version", "", "specify a version constraint for the chart version to use. This constraint can be a specific tag (e.g. 1.1.1) or it may reference a valid range (e.g. ^2.0.0). If this is not specified, the latest version is used")
f.BoolVar(&c.Verify, "verify", false, "verify the package before using it")
@@ -115,16 +165,18 @@ func (o *outputValue) Set(s string) error {
return nil
}
-func bindPostRenderFlag(cmd *cobra.Command, varRef *postrender.PostRenderer) {
- p := &postRendererOptions{varRef, "", []string{}}
- cmd.Flags().Var(&postRendererString{p}, postRenderFlag, "the path to an executable to be used for post rendering. If it exists in $PATH, the binary will be used, otherwise it will try to look for the executable at the given path")
+// TODO there is probably a better way to pass cobra settings than as a param
+func bindPostRenderFlag(cmd *cobra.Command, varRef *postrenderer.PostRenderer, settings *cli.EnvSettings) {
+ p := &postRendererOptions{varRef, "", []string{}, settings}
+ cmd.Flags().Var(&postRendererString{p}, postRenderFlag, "the name of a postrenderer type plugin to be used for post rendering. If it exists, the plugin will be used")
cmd.Flags().Var(&postRendererArgsSlice{p}, postRenderArgsFlag, "an argument to the post-renderer (can specify multiple)")
}
type postRendererOptions struct {
- renderer *postrender.PostRenderer
- binaryPath string
+ renderer *postrenderer.PostRenderer
+ pluginName string
args []string
+ settings *cli.EnvSettings
}
type postRendererString struct {
@@ -132,7 +184,7 @@ type postRendererString struct {
}
func (p *postRendererString) String() string {
- return p.options.binaryPath
+ return p.options.pluginName
}
func (p *postRendererString) Type() string {
@@ -143,11 +195,11 @@ func (p *postRendererString) Set(val string) error {
if val == "" {
return nil
}
- if p.options.binaryPath != "" {
+ if p.options.pluginName != "" {
return fmt.Errorf("cannot specify --post-renderer flag more than once")
}
- p.options.binaryPath = val
- pr, err := postrender.NewExec(p.options.binaryPath, p.options.args...)
+ p.options.pluginName = val
+ pr, err := postrenderer.NewPostRendererPlugin(p.options.settings, p.options.pluginName, p.options.args...)
if err != nil {
return err
}
@@ -172,11 +224,11 @@ func (p *postRendererArgsSlice) Set(val string) error {
// a post-renderer defined by a user may accept empty arguments
p.options.args = append(p.options.args, val)
- if p.options.binaryPath == "" {
+ if p.options.pluginName == "" {
return nil
}
// overwrite if already create PostRenderer by `post-renderer` flags
- pr, err := postrender.NewExec(p.options.binaryPath, p.options.args...)
+ pr, err := postrenderer.NewPostRendererPlugin(p.options.settings, p.options.pluginName, p.options.args...)
if err != nil {
return err
}
@@ -212,7 +264,7 @@ func compVersionFlag(chartRef string, _ string) ([]string, cobra.ShellCompDirect
var versions []string
if indexFile, err := repo.LoadIndexFile(path); err == nil {
for _, details := range indexFile.Entries[chartName] {
- appVersion := details.Metadata.AppVersion
+ appVersion := details.AppVersion
appVersionDesc := ""
if appVersion != "" {
appVersionDesc = fmt.Sprintf("App: %s, ", appVersion)
@@ -223,10 +275,10 @@ func compVersionFlag(chartRef string, _ string) ([]string, cobra.ShellCompDirect
createdDesc = fmt.Sprintf("Created: %s ", created)
}
deprecated := ""
- if details.Metadata.Deprecated {
+ if details.Deprecated {
deprecated = "(deprecated)"
}
- versions = append(versions, fmt.Sprintf("%s\t%s%s%s", details.Metadata.Version, appVersionDesc, createdDesc, deprecated))
+ versions = append(versions, fmt.Sprintf("%s\t%s%s%s", details.Version, appVersionDesc, createdDesc, deprecated))
}
}
diff --git a/pkg/cmd/flags_test.go b/pkg/cmd/flags_test.go
index 9d416f216..dce748a6b 100644
--- a/pkg/cmd/flags_test.go
+++ b/pkg/cmd/flags_test.go
@@ -29,6 +29,7 @@ import (
)
func outputFlagCompletionTest(t *testing.T, cmdName string) {
+ t.Helper()
releasesMockWithStatus := func(info *release.Info, hooks ...*release.Hook) []*release.Release {
info.LastDeployed = helmtime.Unix(1452902400, 0).UTC()
return []*release.Release{{
@@ -100,20 +101,22 @@ func outputFlagCompletionTest(t *testing.T, cmdName string) {
func TestPostRendererFlagSetOnce(t *testing.T) {
cfg := action.Configuration{}
client := action.NewInstall(&cfg)
+ settings.PluginsDirectory = "testdata/helmhome/helm/plugins"
str := postRendererString{
options: &postRendererOptions{
renderer: &client.PostRenderer,
+ settings: settings,
},
}
- // Set the binary once
- err := str.Set("echo")
+ // Set the plugin name once
+ err := str.Set("postrenderer-v1")
require.NoError(t, err)
- // Set the binary again to the same value is not ok
- err = str.Set("echo")
+ // Set the plugin name again to the same value is not ok
+ err = str.Set("postrenderer-v1")
require.Error(t, err)
- // Set the binary again to a different value is not ok
+ // Set the plugin name again to a different value is not ok
err = str.Set("cat")
require.Error(t, err)
}
diff --git a/pkg/cmd/get_all.go b/pkg/cmd/get_all.go
index aee92df51..32744796c 100644
--- a/pkg/cmd/get_all.go
+++ b/pkg/cmd/get_all.go
@@ -63,6 +63,7 @@ func newGetAllCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
debug: true,
showMetadata: true,
hideNotes: false,
+ noColor: settings.ShouldDisableColor(),
})
},
}
diff --git a/pkg/cmd/get_metadata.go b/pkg/cmd/get_metadata.go
index 9f58e0f4e..eb90b6e44 100644
--- a/pkg/cmd/get_metadata.go
+++ b/pkg/cmd/get_metadata.go
@@ -27,6 +27,8 @@ import (
"helm.sh/helm/v4/pkg/action"
"helm.sh/helm/v4/pkg/cli/output"
"helm.sh/helm/v4/pkg/cmd/require"
+
+ release "helm.sh/helm/v4/pkg/release/v1"
)
type metadataWriter struct {
@@ -75,16 +77,32 @@ func newGetMetadataCmd(cfg *action.Configuration, out io.Writer) *cobra.Command
}
func (w metadataWriter) WriteTable(out io.Writer) error {
+
+ formatApplyMethod := func(applyMethod string) string {
+ switch applyMethod {
+ case "":
+ return "client-side apply (defaulted)"
+ case string(release.ApplyMethodClientSideApply):
+ return "client-side apply"
+ case string(release.ApplyMethodServerSideApply):
+ return "server-side apply"
+ default:
+ return fmt.Sprintf("unknown (%q)", applyMethod)
+ }
+ }
+
_, _ = fmt.Fprintf(out, "NAME: %v\n", w.metadata.Name)
_, _ = fmt.Fprintf(out, "CHART: %v\n", w.metadata.Chart)
_, _ = fmt.Fprintf(out, "VERSION: %v\n", w.metadata.Version)
_, _ = fmt.Fprintf(out, "APP_VERSION: %v\n", w.metadata.AppVersion)
_, _ = fmt.Fprintf(out, "ANNOTATIONS: %v\n", k8sLabels.Set(w.metadata.Annotations).String())
+ _, _ = fmt.Fprintf(out, "LABELS: %v\n", k8sLabels.Set(w.metadata.Labels).String())
_, _ = fmt.Fprintf(out, "DEPENDENCIES: %v\n", w.metadata.FormattedDepNames())
_, _ = fmt.Fprintf(out, "NAMESPACE: %v\n", w.metadata.Namespace)
_, _ = fmt.Fprintf(out, "REVISION: %v\n", w.metadata.Revision)
_, _ = fmt.Fprintf(out, "STATUS: %v\n", w.metadata.Status)
_, _ = fmt.Fprintf(out, "DEPLOYED_AT: %v\n", w.metadata.DeployedAt)
+ _, _ = fmt.Fprintf(out, "APPLY_METHOD: %v\n", formatApplyMethod(w.metadata.ApplyMethod))
return nil
}
diff --git a/pkg/cmd/get_metadata_test.go b/pkg/cmd/get_metadata_test.go
index a2ab2cba1..59fc3b82c 100644
--- a/pkg/cmd/get_metadata_test.go
+++ b/pkg/cmd/get_metadata_test.go
@@ -27,23 +27,23 @@ func TestGetMetadataCmd(t *testing.T) {
name: "get metadata with a release",
cmd: "get metadata thomas-guide",
golden: "output/get-metadata.txt",
- rels: []*release.Release{release.Mock(&release.MockReleaseOptions{Name: "thomas-guide"})},
+ rels: []*release.Release{release.Mock(&release.MockReleaseOptions{Name: "thomas-guide", Labels: map[string]string{"key1": "value1"}})},
}, {
name: "get metadata requires release name arg",
cmd: "get metadata",
golden: "output/get-metadata-args.txt",
- rels: []*release.Release{release.Mock(&release.MockReleaseOptions{Name: "thomas-guide"})},
+ rels: []*release.Release{release.Mock(&release.MockReleaseOptions{Name: "thomas-guide", Labels: map[string]string{"key1": "value1"}})},
wantError: true,
}, {
name: "get metadata to json",
cmd: "get metadata thomas-guide --output json",
golden: "output/get-metadata.json",
- rels: []*release.Release{release.Mock(&release.MockReleaseOptions{Name: "thomas-guide"})},
+ rels: []*release.Release{release.Mock(&release.MockReleaseOptions{Name: "thomas-guide", Labels: map[string]string{"key1": "value1"}})},
}, {
name: "get metadata to yaml",
cmd: "get metadata thomas-guide --output yaml",
golden: "output/get-metadata.yaml",
- rels: []*release.Release{release.Mock(&release.MockReleaseOptions{Name: "thomas-guide"})},
+ rels: []*release.Release{release.Mock(&release.MockReleaseOptions{Name: "thomas-guide", Labels: map[string]string{"key1": "value1"}})},
}}
runTestCmd(t, tests)
}
diff --git a/pkg/cmd/helpers_test.go b/pkg/cmd/helpers_test.go
index effbc1673..55e3a842f 100644
--- a/pkg/cmd/helpers_test.go
+++ b/pkg/cmd/helpers_test.go
@@ -28,7 +28,7 @@ import (
"helm.sh/helm/v4/internal/test"
"helm.sh/helm/v4/pkg/action"
- chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+ "helm.sh/helm/v4/pkg/chart/common"
"helm.sh/helm/v4/pkg/cli"
kubefake "helm.sh/helm/v4/pkg/kube/fake"
release "helm.sh/helm/v4/pkg/release/v1"
@@ -91,11 +91,10 @@ func executeActionCommandStdinC(store *storage.Storage, in *os.File, cmd string)
actionConfig := &action.Configuration{
Releases: store,
KubeClient: &kubefake.PrintingKubeClient{Out: io.Discard},
- Capabilities: chartutil.DefaultCapabilities,
- Log: func(_ string, _ ...interface{}) {},
+ Capabilities: common.DefaultCapabilities,
}
- root, err := newRootCmdWithConfig(actionConfig, buf, args)
+ root, err := newRootCmdWithConfig(actionConfig, buf, args, SetupLogging)
if err != nil {
return nil, "", err
}
@@ -105,6 +104,10 @@ func executeActionCommandStdinC(store *storage.Storage, in *os.File, cmd string)
root.SetArgs(args)
oldStdin := os.Stdin
+ defer func() {
+ os.Stdin = oldStdin
+ }()
+
if in != nil {
root.SetIn(in)
os.Stdin = in
@@ -117,8 +120,6 @@ func executeActionCommandStdinC(store *storage.Storage, in *os.File, cmd string)
result := buf.String()
- os.Stdin = oldStdin
-
return c, result, err
}
@@ -150,15 +151,3 @@ func resetEnv() func() {
settings = cli.New()
}
}
-
-func testChdir(t *testing.T, dir string) func() {
- t.Helper()
- old, err := os.Getwd()
- if err != nil {
- t.Fatal(err)
- }
- if err := os.Chdir(dir); err != nil {
- t.Fatal(err)
- }
- return func() { os.Chdir(old) }
-}
diff --git a/pkg/cmd/history.go b/pkg/cmd/history.go
index ec2a1bc12..9f029268c 100644
--- a/pkg/cmd/history.go
+++ b/pkg/cmd/history.go
@@ -29,8 +29,8 @@ import (
chart "helm.sh/helm/v4/pkg/chart/v2"
"helm.sh/helm/v4/pkg/cli/output"
"helm.sh/helm/v4/pkg/cmd/require"
- releaseutil "helm.sh/helm/v4/pkg/release/util"
release "helm.sh/helm/v4/pkg/release/v1"
+ releaseutil "helm.sh/helm/v4/pkg/release/v1/util"
helmtime "helm.sh/helm/v4/pkg/time"
)
diff --git a/pkg/cmd/history_test.go b/pkg/cmd/history_test.go
index 594d93d21..d26ed9ecf 100644
--- a/pkg/cmd/history_test.go
+++ b/pkg/cmd/history_test.go
@@ -75,6 +75,7 @@ func TestHistoryOutputCompletion(t *testing.T) {
}
func revisionFlagCompletionTest(t *testing.T, cmdName string) {
+ t.Helper()
mk := func(name string, vers int, status release.Status) *release.Release {
return release.Mock(&release.MockReleaseOptions{
Name: name,
diff --git a/pkg/cmd/install.go b/pkg/cmd/install.go
index 907f3ee19..c4e121c1f 100644
--- a/pkg/cmd/install.go
+++ b/pkg/cmd/install.go
@@ -18,15 +18,17 @@ package cmd
import (
"context"
+ "errors"
"fmt"
"io"
"log"
+ "log/slog"
"os"
"os/signal"
+ "slices"
"syscall"
"time"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
@@ -158,7 +160,7 @@ func newInstallCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
}
rel, err := runInstall(args, client, valueOpts, out)
if err != nil {
- return errors.Wrap(err, "INSTALLATION FAILED")
+ return fmt.Errorf("INSTALLATION FAILED: %w", err)
}
return outfmt.Write(out, &statusPrinter{
@@ -166,6 +168,7 @@ func newInstallCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
debug: settings.Debug,
showMetadata: false,
hideNotes: client.HideNotes,
+ noColor: settings.ShouldDisableColor(),
})
},
}
@@ -176,7 +179,7 @@ func newInstallCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
f := cmd.Flags()
f.BoolVar(&client.HideSecret, "hide-secret", false, "hide Kubernetes Secrets when also using the --dry-run flag")
bindOutputFlag(cmd, &outfmt)
- bindPostRenderFlag(cmd, &client.PostRenderer)
+ bindPostRenderFlag(cmd, &client.PostRenderer, settings)
return cmd
}
@@ -190,11 +193,14 @@ func addInstallFlags(cmd *cobra.Command, f *pflag.FlagSet, client *action.Instal
// The true/false part is meant to reflect some legacy behavior while none is equal to "".
f.StringVar(&client.DryRunOption, "dry-run", "", "simulate an install. If --dry-run is set with no option being specified or as '--dry-run=client', it will not attempt cluster connections. Setting '--dry-run=server' allows attempting cluster connections.")
f.Lookup("dry-run").NoOptDefVal = "client"
- f.BoolVar(&client.Force, "force", false, "force resource updates through a replacement strategy")
+ f.BoolVar(&client.ForceReplace, "force-replace", false, "force resource updates by replacement")
+ f.BoolVar(&client.ForceReplace, "force", false, "deprecated")
+ f.MarkDeprecated("force", "use --force-replace instead")
+ f.BoolVar(&client.ForceConflicts, "force-conflicts", false, "if set server-side apply will force changes against conflicts")
+ f.BoolVar(&client.ServerSideApply, "server-side", true, "object updates run in the server instead of the client")
f.BoolVar(&client.DisableHooks, "no-hooks", false, "prevent hooks from running during install")
f.BoolVar(&client.Replace, "replace", false, "reuse the given name, only if that name is a deleted release which remains in the history. This is unsafe in production")
f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)")
- f.BoolVar(&client.Wait, "wait", false, "if set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment, StatefulSet, or ReplicaSet are in a ready state before marking the release as successful. It will wait for as long as --timeout")
f.BoolVar(&client.WaitForJobs, "wait-for-jobs", false, "if set and --wait enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as --timeout")
f.BoolVarP(&client.GenerateName, "generate-name", "g", false, "generate the name (and omit the NAME parameter)")
f.StringVar(&client.NameTemplate, "name-template", "", "specify template used to name the release")
@@ -202,7 +208,8 @@ func addInstallFlags(cmd *cobra.Command, f *pflag.FlagSet, client *action.Instal
f.BoolVar(&client.Devel, "devel", false, "use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored")
f.BoolVar(&client.DependencyUpdate, "dependency-update", false, "update dependencies if they are missing before installing the chart")
f.BoolVar(&client.DisableOpenAPIValidation, "disable-openapi-validation", false, "if set, the installation process will not validate rendered templates against the Kubernetes OpenAPI Schema")
- f.BoolVar(&client.Atomic, "atomic", false, "if set, the installation process deletes the installation on failure. The --wait flag will be set automatically if --atomic is used")
+ f.BoolVar(&client.RollbackOnFailure, "rollback-on-failure", false, "if set, Helm will rollback (uninstall) the installation upon failure. The --wait flag will be default to \"watcher\" if --rollback-on-failure is set")
+ f.MarkDeprecated("atomic", "use --rollback-on-failure instead")
f.BoolVar(&client.SkipCRDs, "skip-crds", false, "if set, no CRDs will be installed. By default, CRDs are installed if not already present")
f.BoolVar(&client.SubNotes, "render-subchart-notes", false, "if set, render subchart notes along with the parent")
f.BoolVar(&client.SkipSchemaValidation, "skip-schema-validation", false, "if set, disables JSON schema validation")
@@ -212,6 +219,9 @@ func addInstallFlags(cmd *cobra.Command, f *pflag.FlagSet, client *action.Instal
f.BoolVar(&client.TakeOwnership, "take-ownership", false, "if set, install will ignore the check for helm annotations and take ownership of the existing resources")
addValueOptionsFlags(f, valueOpts)
addChartPathOptionsFlags(f, &client.ChartPathOptions)
+ AddWaitFlag(cmd, &client.WaitStrategy)
+ cmd.MarkFlagsMutuallyExclusive("force-replace", "force-conflicts")
+ cmd.MarkFlagsMutuallyExclusive("force", "force-conflicts")
err := cmd.RegisterFlagCompletionFunc("version", func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
requiredArgs := 2
@@ -229,24 +239,24 @@ func addInstallFlags(cmd *cobra.Command, f *pflag.FlagSet, client *action.Instal
}
func runInstall(args []string, client *action.Install, valueOpts *values.Options, out io.Writer) (*release.Release, error) {
- Debug("Original chart version: %q", client.Version)
+ slog.Debug("Original chart version", "version", client.Version)
if client.Version == "" && client.Devel {
- Debug("setting version to >0.0.0-0")
+ slog.Debug("setting version to >0.0.0-0")
client.Version = ">0.0.0-0"
}
- name, chart, err := client.NameAndChart(args)
+ name, chartRef, err := client.NameAndChart(args)
if err != nil {
return nil, err
}
client.ReleaseName = name
- cp, err := client.ChartPathOptions.LocateChart(chart, settings)
+ cp, err := client.LocateChart(chartRef, settings)
if err != nil {
return nil, err
}
- Debug("CHART PATH: %s\n", cp)
+ slog.Debug("Chart path", "path", cp)
p := getter.All(settings)
vals, err := valueOpts.MergeValues(p)
@@ -265,7 +275,7 @@ func runInstall(args []string, client *action.Install, valueOpts *values.Options
}
if chartRequested.Metadata.Deprecated {
- Warning("This chart is deprecated")
+ slog.Warn("this chart is deprecated")
}
if req := chartRequested.Metadata.Dependencies; req != nil {
@@ -273,16 +283,16 @@ func runInstall(args []string, client *action.Install, valueOpts *values.Options
// As of Helm 2.4.0, this is treated as a stopping condition:
// https://github.com/helm/helm/issues/2209
if err := action.CheckDependencies(chartRequested, req); err != nil {
- err = errors.Wrap(err, "An error occurred while checking for chart dependencies. You may need to run `helm dependency build` to fetch missing dependencies")
if client.DependencyUpdate {
man := &downloader.Manager{
Out: out,
ChartPath: cp,
- Keyring: client.ChartPathOptions.Keyring,
+ Keyring: client.Keyring,
SkipUpdate: false,
Getters: p,
RepositoryConfig: settings.RepositoryConfig,
RepositoryCache: settings.RepositoryCache,
+ ContentCache: settings.ContentCache,
Debug: settings.Debug,
RegistryClient: client.GetRegistryClient(),
}
@@ -291,10 +301,10 @@ func runInstall(args []string, client *action.Install, valueOpts *values.Options
}
// Reload the chart with the updated Chart.lock file.
if chartRequested, err = loader.Load(cp); err != nil {
- return nil, errors.Wrap(err, "failed reloading chart after repo update")
+ return nil, fmt.Errorf("failed reloading chart after repo update: %w", err)
}
} else {
- return nil, err
+ return nil, fmt.Errorf("an error occurred while checking for chart dependencies. You may need to run `helm dependency build` to fetch missing dependencies: %w", err)
}
}
}
@@ -332,7 +342,7 @@ func checkIfInstallable(ch *chart.Chart) error {
case "", "application":
return nil
}
- return errors.Errorf("%s charts are not installable", ch.Metadata.Type)
+ return fmt.Errorf("%s charts are not installable", ch.Metadata.Type)
}
// Provide dynamic auto-completion for the install and template commands
@@ -350,15 +360,9 @@ func compInstall(args []string, toComplete string, client *action.Install) ([]st
func validateDryRunOptionFlag(dryRunOptionFlagValue string) error {
// Validate dry-run flag value with a set of allowed value
allowedDryRunValues := []string{"false", "true", "none", "client", "server"}
- isAllowed := false
- for _, v := range allowedDryRunValues {
- if dryRunOptionFlagValue == v {
- isAllowed = true
- break
- }
- }
+ isAllowed := slices.Contains(allowedDryRunValues, dryRunOptionFlagValue)
if !isAllowed {
- return errors.New("Invalid dry-run flag. Flag must one of the following: false, true, none, client, server")
+ return errors.New("invalid dry-run flag. Flag must one of the following: false, true, none, client, server")
}
return nil
}
diff --git a/pkg/cmd/install_test.go b/pkg/cmd/install_test.go
index 9cd244e84..f0f12e4f7 100644
--- a/pkg/cmd/install_test.go
+++ b/pkg/cmd/install_test.go
@@ -23,7 +23,7 @@ import (
"path/filepath"
"testing"
- "helm.sh/helm/v4/pkg/repo/repotest"
+ "helm.sh/helm/v4/pkg/repo/v1/repotest"
)
func TestInstall(t *testing.T) {
diff --git a/pkg/cmd/lint.go b/pkg/cmd/lint.go
index 18a43a1ef..71540f1be 100644
--- a/pkg/cmd/lint.go
+++ b/pkg/cmd/lint.go
@@ -17,20 +17,20 @@ limitations under the License.
package cmd
import (
+ "errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
"helm.sh/helm/v4/pkg/action"
- chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+ "helm.sh/helm/v4/pkg/chart/common"
+ "helm.sh/helm/v4/pkg/chart/v2/lint/support"
"helm.sh/helm/v4/pkg/cli/values"
"helm.sh/helm/v4/pkg/getter"
- "helm.sh/helm/v4/pkg/lint/support"
)
var longLintHelp = `
@@ -58,7 +58,7 @@ func newLintCmd(out io.Writer) *cobra.Command {
}
if kubeVersion != "" {
- parsedKubeVersion, err := chartutil.ParseKubeVersion(kubeVersion)
+ parsedKubeVersion, err := common.ParseKubeVersion(kubeVersion)
if err != nil {
return fmt.Errorf("invalid kube version '%s': %s", kubeVersion, err)
}
diff --git a/pkg/cmd/list.go b/pkg/cmd/list.go
index 85acbc97f..55d828036 100644
--- a/pkg/cmd/list.go
+++ b/pkg/cmd/list.go
@@ -20,11 +20,13 @@ import (
"fmt"
"io"
"os"
+ "slices"
"strconv"
"github.com/gosuri/uitable"
"github.com/spf13/cobra"
+ coloroutput "helm.sh/helm/v4/internal/cli/output"
"helm.sh/helm/v4/pkg/action"
"helm.sh/helm/v4/pkg/cli/output"
"helm.sh/helm/v4/pkg/cmd/require"
@@ -71,7 +73,7 @@ func newListCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
ValidArgsFunction: noMoreArgsCompFunc,
RunE: func(cmd *cobra.Command, _ []string) error {
if client.AllNamespaces {
- if err := cfg.Init(settings.RESTClientGetter(), "", os.Getenv("HELM_DRIVER"), Debug); err != nil {
+ if err := cfg.Init(settings.RESTClientGetter(), "", os.Getenv("HELM_DRIVER")); err != nil {
return err
}
}
@@ -105,7 +107,7 @@ func newListCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
}
}
- return outfmt.Write(out, newReleaseListWriter(results, client.TimeFormat, client.NoHeaders))
+ return outfmt.Write(out, newReleaseListWriter(results, client.TimeFormat, client.NoHeaders, settings.ShouldDisableColor()))
},
}
@@ -145,9 +147,10 @@ type releaseElement struct {
type releaseListWriter struct {
releases []releaseElement
noHeaders bool
+ noColor bool
}
-func newReleaseListWriter(releases []*release.Release, timeFormat string, noHeaders bool) *releaseListWriter {
+func newReleaseListWriter(releases []*release.Release, timeFormat string, noHeaders bool, noColor bool) *releaseListWriter {
// Initialize the array so no results returns an empty array instead of null
elements := make([]releaseElement, 0, len(releases))
for _, r := range releases {
@@ -172,26 +175,58 @@ func newReleaseListWriter(releases []*release.Release, timeFormat string, noHead
elements = append(elements, element)
}
- return &releaseListWriter{elements, noHeaders}
+ return &releaseListWriter{elements, noHeaders, noColor}
}
-func (r *releaseListWriter) WriteTable(out io.Writer) error {
+func (w *releaseListWriter) WriteTable(out io.Writer) error {
table := uitable.New()
- if !r.noHeaders {
- table.AddRow("NAME", "NAMESPACE", "REVISION", "UPDATED", "STATUS", "CHART", "APP VERSION")
+ if !w.noHeaders {
+ table.AddRow(
+ coloroutput.ColorizeHeader("NAME", w.noColor),
+ coloroutput.ColorizeHeader("NAMESPACE", w.noColor),
+ coloroutput.ColorizeHeader("REVISION", w.noColor),
+ coloroutput.ColorizeHeader("UPDATED", w.noColor),
+ coloroutput.ColorizeHeader("STATUS", w.noColor),
+ coloroutput.ColorizeHeader("CHART", w.noColor),
+ coloroutput.ColorizeHeader("APP VERSION", w.noColor),
+ )
}
- for _, r := range r.releases {
- table.AddRow(r.Name, r.Namespace, r.Revision, r.Updated, r.Status, r.Chart, r.AppVersion)
+ for _, r := range w.releases {
+ // Parse the status string back to a release.Status to use color
+ var status release.Status
+ switch r.Status {
+ case "deployed":
+ status = release.StatusDeployed
+ case "failed":
+ status = release.StatusFailed
+ case "pending-install":
+ status = release.StatusPendingInstall
+ case "pending-upgrade":
+ status = release.StatusPendingUpgrade
+ case "pending-rollback":
+ status = release.StatusPendingRollback
+ case "uninstalling":
+ status = release.StatusUninstalling
+ case "uninstalled":
+ status = release.StatusUninstalled
+ case "superseded":
+ status = release.StatusSuperseded
+ case "unknown":
+ status = release.StatusUnknown
+ default:
+ status = release.Status(r.Status)
+ }
+ table.AddRow(r.Name, coloroutput.ColorizeNamespace(r.Namespace, w.noColor), r.Revision, r.Updated, coloroutput.ColorizeStatus(status, w.noColor), r.Chart, r.AppVersion)
}
return output.EncodeTable(out, table)
}
-func (r *releaseListWriter) WriteJSON(out io.Writer) error {
- return output.EncodeJSON(out, r.releases)
+func (w *releaseListWriter) WriteJSON(out io.Writer) error {
+ return output.EncodeJSON(out, w.releases)
}
-func (r *releaseListWriter) WriteYAML(out io.Writer) error {
- return output.EncodeYAML(out, r.releases)
+func (w *releaseListWriter) WriteYAML(out io.Writer) error {
+ return output.EncodeYAML(out, w.releases)
}
// Returns all releases from 'releases', except those with names matching 'ignoredReleases'
@@ -203,13 +238,7 @@ func filterReleases(releases []*release.Release, ignoredReleaseNames []string) [
var filteredReleases []*release.Release
for _, rel := range releases {
- found := false
- for _, ignoredName := range ignoredReleaseNames {
- if rel.Name == ignoredName {
- found = true
- break
- }
- }
+ found := slices.Contains(ignoredReleaseNames, rel.Name)
if !found {
filteredReleases = append(filteredReleases, rel)
}
diff --git a/pkg/cmd/load_plugins.go b/pkg/cmd/load_plugins.go
index 3cf701242..c0593f384 100644
--- a/pkg/cmd/load_plugins.go
+++ b/pkg/cmd/load_plugins.go
@@ -17,46 +17,51 @@ package cmd
import (
"bytes"
+ "context"
"fmt"
"io"
"log"
"os"
- "os/exec"
"path/filepath"
+ "slices"
"strconv"
"strings"
- "syscall"
- "github.com/pkg/errors"
+ "helm.sh/helm/v4/internal/plugin/schema"
+
"github.com/spf13/cobra"
"sigs.k8s.io/yaml"
- "helm.sh/helm/v4/pkg/plugin"
+ "helm.sh/helm/v4/internal/plugin"
)
+// TODO: move pluginDynamicCompletionExecutable pkg/plugin/runtime_subprocess.go
+// any references to executables should be for [plugin.SubprocessPluginRuntime] only
+// this should also be for backwards compatibility in [plugin.Legacy] only
+//
+// TODO: for v1 make this configurable with a new CompletionCommand field for
+// [plugin.RuntimeConfigSubprocess]
const (
pluginStaticCompletionFile = "completion.yaml"
pluginDynamicCompletionExecutable = "plugin.complete"
)
-type PluginError struct {
- error
- Code int
-}
-
-// loadPlugins loads plugins into the command list.
+// loadCLIPlugins loads CLI plugins into the command list.
//
// This follows a different pattern than the other commands because it has
// to inspect its environment and then add commands to the base command
// as it finds them.
-func loadPlugins(baseCmd *cobra.Command, out io.Writer) {
-
+func loadCLIPlugins(baseCmd *cobra.Command, out io.Writer) {
// If HELM_NO_PLUGINS is set to 1, do not load plugins.
if os.Getenv("HELM_NO_PLUGINS") == "1" {
return
}
- found, err := plugin.FindPlugins(settings.PluginsDirectory)
+ dirs := filepath.SplitList(settings.PluginsDirectory)
+ descriptor := plugin.Descriptor{
+ Type: "cli/v1",
+ }
+ found, err := plugin.FindPlugins(dirs, descriptor)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to load plugins: %s\n", err)
return
@@ -64,33 +69,65 @@ func loadPlugins(baseCmd *cobra.Command, out io.Writer) {
// Now we create commands for all of these.
for _, plug := range found {
- plug := plug
- md := plug.Metadata
- if md.Usage == "" {
- md.Usage = fmt.Sprintf("the %q plugin", md.Name)
+ var use, short, long string
+ var ignoreFlags bool
+ if cliConfig, ok := plug.Metadata().Config.(*schema.ConfigCLIV1); ok {
+ use = cliConfig.Usage
+ short = cliConfig.ShortHelp
+ long = cliConfig.LongHelp
+ ignoreFlags = cliConfig.IgnoreFlags
+ }
+
+ // Set defaults
+ if use == "" {
+ use = plug.Metadata().Name
+ }
+ if short == "" {
+ short = fmt.Sprintf("the %q plugin", plug.Metadata().Name)
}
+ // long has no default, empty is ok
c := &cobra.Command{
- Use: md.Name,
- Short: md.Usage,
- Long: md.Description,
+ Use: use,
+ Short: short,
+ Long: long,
RunE: func(cmd *cobra.Command, args []string) error {
u, err := processParent(cmd, args)
if err != nil {
return err
}
- // Call setupEnv before PrepareCommand because
- // PrepareCommand uses os.ExpandEnv and expects the
- // setupEnv vars.
- plugin.SetupPluginEnv(settings, md.Name, plug.Dir)
- main, argv, prepCmdErr := plug.PrepareCommand(u)
- if prepCmdErr != nil {
- os.Stderr.WriteString(prepCmdErr.Error())
- return errors.Errorf("plugin %q exited with error", md.Name)
+ // For CLI plugin types runtime, set extra args and settings
+ extraArgs := []string{}
+ if !ignoreFlags {
+ extraArgs = u
}
- return callPluginExecutable(md.Name, main, argv, out)
+ // Prepare environment
+ env := os.Environ()
+ for k, v := range settings.EnvVars() {
+ env = append(env, fmt.Sprintf("%s=%s", k, v))
+ }
+
+ // Invoke plugin
+ input := &plugin.Input{
+ Message: schema.InputMessageCLIV1{
+ ExtraArgs: extraArgs,
+ Settings: settings,
+ },
+ Env: env,
+ Stdin: os.Stdin,
+ Stdout: out,
+ Stderr: os.Stderr,
+ }
+ _, err = plug.Invoke(context.Background(), input)
+ if execErr, ok := err.(*plugin.InvokeExecError); ok {
+ return CommandError{
+ error: execErr.Err,
+ ExitCode: execErr.ExitCode,
+ }
+ }
+ return err
},
// This passes all the flags to the subcommand.
DisableFlagParsing: true,
@@ -120,34 +157,6 @@ func processParent(cmd *cobra.Command, args []string) ([]string, error) {
return u, nil
}
-// This function is used to setup the environment for the plugin and then
-// call the executable specified by the parameter 'main'
-func callPluginExecutable(pluginName string, main string, argv []string, out io.Writer) error {
- env := os.Environ()
- for k, v := range settings.EnvVars() {
- env = append(env, fmt.Sprintf("%s=%s", k, v))
- }
-
- mainCmdExp := os.ExpandEnv(main)
- prog := exec.Command(mainCmdExp, argv...)
- prog.Env = env
- prog.Stdin = os.Stdin
- prog.Stdout = out
- prog.Stderr = os.Stderr
- if err := prog.Run(); err != nil {
- if eerr, ok := err.(*exec.ExitError); ok {
- os.Stderr.Write(eerr.Stderr)
- status := eerr.Sys().(syscall.WaitStatus)
- return PluginError{
- error: errors.Errorf("plugin %q exited with error", pluginName),
- Code: status.ExitStatus(),
- }
- }
- return err
- }
- return nil
-}
-
// manuallyProcessArgs processes an arg array, removing special args.
//
// Returns two sets of args: known and unknown (in that order)
@@ -165,10 +174,8 @@ func manuallyProcessArgs(args []string) ([]string, []string) {
}
isKnown := func(v string) string {
- for _, i := range kvargs {
- if i == v {
- return v
- }
+ if slices.Contains(kvargs, v) {
+ return v
}
return ""
}
@@ -204,10 +211,10 @@ type pluginCommand struct {
// loadCompletionForPlugin will load and parse any completion.yaml provided by the plugin
// and add the dynamic completion hook to call the optional plugin.complete
-func loadCompletionForPlugin(pluginCmd *cobra.Command, plugin *plugin.Plugin) {
+func loadCompletionForPlugin(pluginCmd *cobra.Command, plug plugin.Plugin) {
// Parse the yaml file providing the plugin's sub-commands and flags
cmds, err := loadFile(strings.Join(
- []string{plugin.Dir, pluginStaticCompletionFile}, string(filepath.Separator)))
+ []string{plug.Dir(), pluginStaticCompletionFile}, string(filepath.Separator)))
if err != nil {
// The file could be missing or invalid. No static completion for this plugin.
@@ -221,12 +228,12 @@ func loadCompletionForPlugin(pluginCmd *cobra.Command, plugin *plugin.Plugin) {
// Preserve the Usage string specified for the plugin
cmds.Name = pluginCmd.Use
- addPluginCommands(plugin, pluginCmd, cmds)
+ addPluginCommands(plug, pluginCmd, cmds)
}
// addPluginCommands is a recursive method that adds each different level
// of sub-commands and flags for the plugins that have provided such information
-func addPluginCommands(plugin *plugin.Plugin, baseCmd *cobra.Command, cmds *pluginCommand) {
+func addPluginCommands(plug plugin.Plugin, baseCmd *cobra.Command, cmds *pluginCommand) {
if cmds == nil {
return
}
@@ -249,7 +256,7 @@ func addPluginCommands(plugin *plugin.Plugin, baseCmd *cobra.Command, cmds *plug
// calling plugin.complete at every completion, which greatly simplifies
// development of plugin.complete for plugin developers.
baseCmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
- return pluginDynamicComp(plugin, cmd, args, toComplete)
+ return pluginDynamicComp(plug, cmd, args, toComplete)
}
}
@@ -304,7 +311,7 @@ func addPluginCommands(plugin *plugin.Plugin, baseCmd *cobra.Command, cmds *plug
Run: func(_ *cobra.Command, _ []string) {},
}
baseCmd.AddCommand(subCmd)
- addPluginCommands(plugin, subCmd, &cmd)
+ addPluginCommands(plug, subCmd, &cmd)
}
}
@@ -323,8 +330,19 @@ func loadFile(path string) (*pluginCommand, error) {
// pluginDynamicComp call the plugin.complete script of the plugin (if available)
// to obtain the dynamic completion choices. It must pass all the flags and sub-commands
// specified in the command-line to the plugin.complete executable (except helm's global flags)
-func pluginDynamicComp(plug *plugin.Plugin, cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
- md := plug.Metadata
+func pluginDynamicComp(plug plugin.Plugin, cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+
+ subprocessPlug, ok := plug.(*plugin.SubprocessPluginRuntime)
+ if !ok {
+ // Completion only supported for subprocess plugins (TODO: fix this)
+ cobra.CompDebugln(fmt.Sprintf("Unsupported plugin runtime: %q", plug.Metadata().Runtime), settings.Debug)
+ return nil, cobra.ShellCompDirectiveDefault
+ }
+
+ var ignoreFlags bool
+ if cliConfig, ok := subprocessPlug.Metadata().Config.(*schema.ConfigCLIV1); ok {
+ ignoreFlags = cliConfig.IgnoreFlags
+ }
u, err := processParent(cmd, args)
if err != nil {
@@ -332,28 +350,35 @@ func pluginDynamicComp(plug *plugin.Plugin, cmd *cobra.Command, args []string, t
}
// We will call the dynamic completion script of the plugin
- main := strings.Join([]string{plug.Dir, pluginDynamicCompletionExecutable}, string(filepath.Separator))
+ main := strings.Join([]string{plug.Dir(), pluginDynamicCompletionExecutable}, string(filepath.Separator))
// We must include all sub-commands passed on the command-line.
// To do that, we pass-in the entire CommandPath, except the first two elements
// which are 'helm' and 'pluginName'.
argv := strings.Split(cmd.CommandPath(), " ")[2:]
- if !md.IgnoreFlags {
+ if !ignoreFlags {
argv = append(argv, u...)
argv = append(argv, toComplete)
}
- plugin.SetupPluginEnv(settings, md.Name, plug.Dir)
cobra.CompDebugln(fmt.Sprintf("calling %s with args %v", main, argv), settings.Debug)
buf := new(bytes.Buffer)
- if err := callPluginExecutable(md.Name, main, argv, buf); err != nil {
+
+ // Prepare environment
+ env := os.Environ()
+ for k, v := range settings.EnvVars() {
+ env = append(env, fmt.Sprintf("%s=%s", k, v))
+ }
+
+ // For subprocess runtime, use InvokeWithEnv for dynamic completion
+ if err := subprocessPlug.InvokeWithEnv(main, argv, env, nil, buf, buf); err != nil {
// The dynamic completion file is optional for a plugin, so this error is ok.
cobra.CompDebugln(fmt.Sprintf("Unable to call %s: %v", main, err.Error()), settings.Debug)
return nil, cobra.ShellCompDirectiveDefault
}
var completions []string
- for _, comp := range strings.Split(buf.String(), "\n") {
+ for comp := range strings.SplitSeq(buf.String(), "\n") {
// Remove any empty lines
if len(comp) > 0 {
completions = append(completions, comp)
diff --git a/pkg/cmd/package.go b/pkg/cmd/package.go
index 7bc22dfb4..fc56e936a 100644
--- a/pkg/cmd/package.go
+++ b/pkg/cmd/package.go
@@ -17,12 +17,12 @@ limitations under the License.
package cmd
import (
+ "errors"
"fmt"
"io"
"os"
"path/filepath"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
"helm.sh/helm/v4/pkg/action"
@@ -57,7 +57,7 @@ func newPackageCmd(out io.Writer) *cobra.Command {
Long: packageDesc,
RunE: func(_ *cobra.Command, args []string) error {
if len(args) == 0 {
- return errors.Errorf("need at least one argument, the path to the chart")
+ return fmt.Errorf("need at least one argument, the path to the chart")
}
if client.Sign {
if client.Key == "" {
@@ -100,6 +100,7 @@ func newPackageCmd(out io.Writer) *cobra.Command {
RegistryClient: registryClient,
RepositoryConfig: settings.RepositoryConfig,
RepositoryCache: settings.RepositoryCache,
+ ContentCache: settings.ContentCache,
}
if err := downloadManager.Update(); err != nil {
diff --git a/pkg/cmd/package_test.go b/pkg/cmd/package_test.go
index 54358fc12..db4a2523a 100644
--- a/pkg/cmd/package_test.go
+++ b/pkg/cmd/package_test.go
@@ -23,6 +23,7 @@ import (
"strings"
"testing"
+ "helm.sh/helm/v4/internal/test/ensure"
chart "helm.sh/helm/v4/pkg/chart/v2"
"helm.sh/helm/v4/pkg/chart/v2/loader"
)
@@ -110,10 +111,10 @@ func TestPackage(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- cachePath := t.TempDir()
- defer testChdir(t, cachePath)()
+ t.Chdir(t.TempDir())
+ ensure.HelmHome(t)
- if err := os.MkdirAll("toot", 0777); err != nil {
+ if err := os.MkdirAll("toot", 0o777); err != nil {
t.Fatal(err)
}
diff --git a/pkg/cmd/plugin.go b/pkg/cmd/plugin.go
index 3340e76e6..ba904ef5f 100644
--- a/pkg/cmd/plugin.go
+++ b/pkg/cmd/plugin.go
@@ -17,13 +17,10 @@ package cmd
import (
"io"
- "os"
- "os/exec"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
- "helm.sh/helm/v4/pkg/plugin"
+ "helm.sh/helm/v4/internal/plugin"
)
const pluginHelp = `
@@ -41,40 +38,18 @@ func newPluginCmd(out io.Writer) *cobra.Command {
newPluginListCmd(out),
newPluginUninstallCmd(out),
newPluginUpdateCmd(out),
+ newPluginPackageCmd(out),
+ newPluginVerifyCmd(out),
)
return cmd
}
// runHook will execute a plugin hook.
-func runHook(p *plugin.Plugin, event string) error {
- plugin.SetupPluginEnv(settings, p.Metadata.Name, p.Dir)
-
- cmds := p.Metadata.PlatformHooks[event]
- expandArgs := true
- if len(cmds) == 0 && len(p.Metadata.Hooks) > 0 {
- cmd := p.Metadata.Hooks[event]
- if len(cmd) > 0 {
- cmds = []plugin.PlatformCommand{{Command: "sh", Args: []string{"-c", cmd}}}
- expandArgs = false
- }
- }
-
- main, argv, err := plugin.PrepareCommands(cmds, expandArgs, []string{})
- if err != nil {
- return nil
+func runHook(p plugin.Plugin, event string) error {
+ pluginHook, ok := p.(plugin.PluginHook)
+ if ok {
+ return pluginHook.InvokeHook(event)
}
- prog := exec.Command(main, argv...)
-
- Debug("running %s hook: %s", event, prog)
-
- prog.Stdout, prog.Stderr = os.Stdout, os.Stderr
- if err := prog.Run(); err != nil {
- if eerr, ok := err.(*exec.ExitError); ok {
- os.Stderr.Write(eerr.Stderr)
- return errors.Errorf("plugin %s hook for %q exited with error", event, p.Metadata.Name)
- }
- return err
- }
return nil
}
diff --git a/pkg/cmd/plugin_install.go b/pkg/cmd/plugin_install.go
index e17744cbb..0abefa76b 100644
--- a/pkg/cmd/plugin_install.go
+++ b/pkg/cmd/plugin_install.go
@@ -18,22 +18,43 @@ package cmd
import (
"fmt"
"io"
+ "log/slog"
+ "strings"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
+ "helm.sh/helm/v4/internal/plugin"
+ "helm.sh/helm/v4/internal/plugin/installer"
"helm.sh/helm/v4/pkg/cmd/require"
- "helm.sh/helm/v4/pkg/plugin"
- "helm.sh/helm/v4/pkg/plugin/installer"
+ "helm.sh/helm/v4/pkg/getter"
+ "helm.sh/helm/v4/pkg/registry"
)
type pluginInstallOptions struct {
source string
version string
+ // signing options
+ verify bool
+ keyring string
+ // OCI-specific options
+ certFile string
+ keyFile string
+ caFile string
+ insecureSkipTLSverify bool
+ plainHTTP bool
+ password string
+ username string
}
const pluginInstallDesc = `
This command allows you to install a plugin from a url to a VCS repo or a local path.
+
+By default, plugin signatures are verified before installation when installing from
+tarballs (.tgz or .tar.gz). This requires a corresponding .prov file to be available
+alongside the tarball.
+For local development, plugins installed from local directories are automatically
+treated as "local dev" and do not require signatures.
+Use --verify=false to skip signature verification for remote plugins.
`
func newPluginInstallCmd(out io.Writer) *cobra.Command {
@@ -60,6 +81,17 @@ func newPluginInstallCmd(out io.Writer) *cobra.Command {
},
}
cmd.Flags().StringVar(&o.version, "version", "", "specify a version constraint. If this is not specified, the latest version is installed")
+ cmd.Flags().BoolVar(&o.verify, "verify", true, "verify the plugin signature before installing")
+ cmd.Flags().StringVar(&o.keyring, "keyring", defaultKeyring(), "location of public keys used for verification")
+
+ // Add OCI-specific flags
+ cmd.Flags().StringVar(&o.certFile, "cert-file", "", "identify registry client using this SSL certificate file")
+ cmd.Flags().StringVar(&o.keyFile, "key-file", "", "identify registry client using this SSL key file")
+ cmd.Flags().StringVar(&o.caFile, "ca-file", "", "verify certificates of HTTPS-enabled servers using this CA bundle")
+ cmd.Flags().BoolVar(&o.insecureSkipTLSverify, "insecure-skip-tls-verify", false, "skip tls certificate checks for the plugin download")
+ cmd.Flags().BoolVar(&o.plainHTTP, "plain-http", false, "use insecure HTTP connections for the plugin download")
+ cmd.Flags().StringVar(&o.username, "username", "", "registry username")
+ cmd.Flags().StringVar(&o.password, "password", "", "registry password")
return cmd
}
@@ -68,27 +100,86 @@ func (o *pluginInstallOptions) complete(args []string) error {
return nil
}
+func (o *pluginInstallOptions) newInstallerForSource() (installer.Installer, error) {
+ // Check if source is an OCI registry reference
+ if strings.HasPrefix(o.source, fmt.Sprintf("%s://", registry.OCIScheme)) {
+ // Build getter options for OCI
+ options := []getter.Option{
+ getter.WithTLSClientConfig(o.certFile, o.keyFile, o.caFile),
+ getter.WithInsecureSkipVerifyTLS(o.insecureSkipTLSverify),
+ getter.WithPlainHTTP(o.plainHTTP),
+ getter.WithBasicAuth(o.username, o.password),
+ }
+
+ return installer.NewOCIInstaller(o.source, options...)
+ }
+
+ // For non-OCI sources, use the original logic
+ return installer.NewForSource(o.source, o.version)
+}
+
func (o *pluginInstallOptions) run(out io.Writer) error {
installer.Debug = settings.Debug
- i, err := installer.NewForSource(o.source, o.version)
+ i, err := o.newInstallerForSource()
if err != nil {
return err
}
- if err := installer.Install(i); err != nil {
+
+ // Determine if we should verify based on installer type and flags
+ shouldVerify := o.verify
+
+ // Check if this is a local directory installation (for development)
+ if localInst, ok := i.(*installer.LocalInstaller); ok && !localInst.SupportsVerification() {
+ // Local directory installations are allowed without verification
+ shouldVerify = false
+ fmt.Fprintf(out, "Installing plugin from local directory (development mode)\n")
+ } else if shouldVerify {
+ // For remote installations, check if verification is supported
+ if verifier, ok := i.(installer.Verifier); !ok || !verifier.SupportsVerification() {
+ return fmt.Errorf("plugin source does not support verification. Use --verify=false to skip verification")
+ }
+ } else {
+ // User explicitly disabled verification
+ fmt.Fprintf(out, "WARNING: Skipping plugin signature verification\n")
+ }
+
+ // Set up installation options
+ opts := installer.Options{
+ Verify: shouldVerify,
+ Keyring: o.keyring,
+ }
+
+ // If verify is requested, show verification output
+ if shouldVerify {
+ fmt.Fprintf(out, "Verifying plugin signature...\n")
+ }
+
+ // Install the plugin with options
+ verifyResult, err := installer.InstallWithOptions(i, opts)
+ if err != nil {
return err
}
- Debug("loading plugin from %s", i.Path())
+ // If verification was successful, show the details
+ if verifyResult != nil {
+ for _, signer := range verifyResult.SignedBy {
+ fmt.Fprintf(out, "Signed by: %s\n", signer)
+ }
+ fmt.Fprintf(out, "Using Key With Fingerprint: %s\n", verifyResult.Fingerprint)
+ fmt.Fprintf(out, "Plugin Hash Verified: %s\n", verifyResult.FileHash)
+ }
+
+ slog.Debug("loading plugin", "path", i.Path())
p, err := plugin.LoadDir(i.Path())
if err != nil {
- return errors.Wrap(err, "plugin is installed but unusable")
+ return fmt.Errorf("plugin is installed but unusable: %w", err)
}
if err := runHook(p, plugin.Install); err != nil {
return err
}
- fmt.Fprintf(out, "Installed plugin: %s\n", p.Metadata.Name)
+ fmt.Fprintf(out, "Installed plugin: %s\n", p.Metadata().Name)
return nil
}
diff --git a/pkg/cmd/plugin_list.go b/pkg/cmd/plugin_list.go
index 9cca790ae..74e969e04 100644
--- a/pkg/cmd/plugin_list.go
+++ b/pkg/cmd/plugin_list.go
@@ -18,54 +18,74 @@ package cmd
import (
"fmt"
"io"
+ "log/slog"
+ "path/filepath"
+ "slices"
"github.com/gosuri/uitable"
"github.com/spf13/cobra"
- "helm.sh/helm/v4/pkg/plugin"
+ "helm.sh/helm/v4/internal/plugin"
+ "helm.sh/helm/v4/internal/plugin/schema"
)
func newPluginListCmd(out io.Writer) *cobra.Command {
+ var pluginType string
cmd := &cobra.Command{
Use: "list",
Aliases: []string{"ls"},
Short: "list installed Helm plugins",
ValidArgsFunction: noMoreArgsCompFunc,
RunE: func(_ *cobra.Command, _ []string) error {
- Debug("pluginDirs: %s", settings.PluginsDirectory)
- plugins, err := plugin.FindPlugins(settings.PluginsDirectory)
+ slog.Debug("pluginDirs", "directory", settings.PluginsDirectory)
+ dirs := filepath.SplitList(settings.PluginsDirectory)
+ descriptor := plugin.Descriptor{
+ Type: pluginType,
+ }
+ plugins, err := plugin.FindPlugins(dirs, descriptor)
if err != nil {
return err
}
+ // Get signing info for all plugins
+ signingInfo := plugin.GetSigningInfoForPlugins(plugins)
+
table := uitable.New()
- table.AddRow("NAME", "VERSION", "DESCRIPTION")
+ table.AddRow("NAME", "VERSION", "TYPE", "APIVERSION", "PROVENANCE", "SOURCE")
for _, p := range plugins {
- table.AddRow(p.Metadata.Name, p.Metadata.Version, p.Metadata.Description)
+ m := p.Metadata()
+ sourceURL := m.SourceURL
+ if sourceURL == "" {
+ sourceURL = "unknown"
+ }
+ // Get signing status
+ signedStatus := "unknown"
+ if info, ok := signingInfo[m.Name]; ok {
+ signedStatus = info.Status
+ }
+ table.AddRow(m.Name, m.Version, m.Type, m.APIVersion, signedStatus, sourceURL)
}
fmt.Fprintln(out, table)
return nil
},
}
+
+ f := cmd.Flags()
+ f.StringVar(&pluginType, "type", "", "Plugin type")
+
return cmd
}
// Returns all plugins from plugins, except those with names matching ignoredPluginNames
-func filterPlugins(plugins []*plugin.Plugin, ignoredPluginNames []string) []*plugin.Plugin {
- // if ignoredPluginNames is nil, just return plugins
- if ignoredPluginNames == nil {
+func filterPlugins(plugins []plugin.Plugin, ignoredPluginNames []string) []plugin.Plugin {
+ // if ignoredPluginNames is nil or empty, just return plugins
+ if len(ignoredPluginNames) == 0 {
return plugins
}
- var filteredPlugins []*plugin.Plugin
+ var filteredPlugins []plugin.Plugin
for _, plugin := range plugins {
- found := false
- for _, ignoredName := range ignoredPluginNames {
- if plugin.Metadata.Name == ignoredName {
- found = true
- break
- }
- }
+ found := slices.Contains(ignoredPluginNames, plugin.Metadata().Name)
if !found {
filteredPlugins = append(filteredPlugins, plugin)
}
@@ -77,11 +97,20 @@ func filterPlugins(plugins []*plugin.Plugin, ignoredPluginNames []string) []*plu
// Provide dynamic auto-completion for plugin names
func compListPlugins(_ string, ignoredPluginNames []string) []string {
var pNames []string
- plugins, err := plugin.FindPlugins(settings.PluginsDirectory)
+ dirs := filepath.SplitList(settings.PluginsDirectory)
+ descriptor := plugin.Descriptor{
+ Type: "cli/v1",
+ }
+ plugins, err := plugin.FindPlugins(dirs, descriptor)
if err == nil && len(plugins) > 0 {
filteredPlugins := filterPlugins(plugins, ignoredPluginNames)
for _, p := range filteredPlugins {
- pNames = append(pNames, fmt.Sprintf("%s\t%s", p.Metadata.Name, p.Metadata.Usage))
+ m := p.Metadata()
+ var shortHelp string
+ if config, ok := m.Config.(*schema.ConfigCLIV1); ok {
+ shortHelp = config.ShortHelp
+ }
+ pNames = append(pNames, fmt.Sprintf("%s\t%s", p.Metadata().Name, shortHelp))
}
}
return pNames
diff --git a/pkg/cmd/plugin_package.go b/pkg/cmd/plugin_package.go
new file mode 100644
index 000000000..05f8bb5ad
--- /dev/null
+++ b/pkg/cmd/plugin_package.go
@@ -0,0 +1,216 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "syscall"
+
+ "github.com/spf13/cobra"
+ "golang.org/x/term"
+
+ "helm.sh/helm/v4/internal/plugin"
+ "helm.sh/helm/v4/pkg/cmd/require"
+ "helm.sh/helm/v4/pkg/provenance"
+)
+
+const pluginPackageDesc = `
+This command packages a Helm plugin directory into a tarball.
+
+By default, the command will generate a provenance file signed with a PGP key.
+This ensures the plugin can be verified after installation.
+
+Use --sign=false to skip signing (not recommended for distribution).
+`
+
+type pluginPackageOptions struct {
+ sign bool
+ keyring string
+ key string
+ passphraseFile string
+ pluginPath string
+ destination string
+}
+
+func newPluginPackageCmd(out io.Writer) *cobra.Command {
+ o := &pluginPackageOptions{}
+
+ cmd := &cobra.Command{
+ Use: "package [PATH]",
+ Short: "package a plugin directory into a plugin archive",
+ Long: pluginPackageDesc,
+ Args: require.ExactArgs(1),
+ RunE: func(_ *cobra.Command, args []string) error {
+ o.pluginPath = args[0]
+ return o.run(out)
+ },
+ }
+
+ f := cmd.Flags()
+ f.BoolVar(&o.sign, "sign", true, "use a PGP private key to sign this plugin")
+ f.StringVar(&o.key, "key", "", "name of the key to use when signing. Used if --sign is true")
+ f.StringVar(&o.keyring, "keyring", defaultKeyring(), "location of a public keyring")
+ f.StringVar(&o.passphraseFile, "passphrase-file", "", "location of a file which contains the passphrase for the signing key. Use \"-\" to read from stdin.")
+ f.StringVarP(&o.destination, "destination", "d", ".", "location to write the plugin tarball.")
+
+ return cmd
+}
+
+func (o *pluginPackageOptions) run(out io.Writer) error {
+ // Check if the plugin path exists and is a directory
+ fi, err := os.Stat(o.pluginPath)
+ if err != nil {
+ return err
+ }
+ if !fi.IsDir() {
+ return fmt.Errorf("plugin package only supports directories, not tarballs")
+ }
+
+ // Load and validate plugin metadata
+ pluginMeta, err := plugin.LoadDir(o.pluginPath)
+ if err != nil {
+ return fmt.Errorf("invalid plugin directory: %w", err)
+ }
+
+ // Create destination directory if needed
+ if err := os.MkdirAll(o.destination, 0755); err != nil {
+ return err
+ }
+
+ // If signing is requested, prepare the signer first
+ var signer *provenance.Signatory
+ if o.sign {
+ // Load the signing key
+ signer, err = provenance.NewFromKeyring(o.keyring, o.key)
+ if err != nil {
+ return fmt.Errorf("error reading from keyring: %w", err)
+ }
+
+ // Get passphrase
+ passphraseFetcher := o.promptUser
+ if o.passphraseFile != "" {
+ passphraseFetcher, err = o.passphraseFileFetcher()
+ if err != nil {
+ return err
+ }
+ }
+
+ // Decrypt the key
+ if err := signer.DecryptKey(passphraseFetcher); err != nil {
+ return err
+ }
+ } else {
+ // User explicitly disabled signing
+ fmt.Fprintf(out, "WARNING: Skipping plugin signing. This is not recommended for plugins intended for distribution.\n")
+ }
+
+ // Now create the tarball (only after signing prerequisites are met)
+ // Use plugin metadata for filename: PLUGIN_NAME-SEMVER.tgz
+ metadata := pluginMeta.Metadata()
+ filename := fmt.Sprintf("%s-%s.tgz", metadata.Name, metadata.Version)
+ tarballPath := filepath.Join(o.destination, filename)
+
+ tarFile, err := os.Create(tarballPath)
+ if err != nil {
+ return fmt.Errorf("failed to create tarball: %w", err)
+ }
+ defer tarFile.Close()
+
+ if err := plugin.CreatePluginTarball(o.pluginPath, metadata.Name, tarFile); err != nil {
+ os.Remove(tarballPath)
+ return fmt.Errorf("failed to create plugin tarball: %w", err)
+ }
+ tarFile.Close() // Ensure file is closed before signing
+
+ // If signing was requested, sign the tarball
+ if o.sign {
+ // Read the tarball data
+ tarballData, err := os.ReadFile(tarballPath)
+ if err != nil {
+ os.Remove(tarballPath)
+ return fmt.Errorf("failed to read tarball for signing: %w", err)
+ }
+
+ // Sign the plugin tarball data
+ sig, err := plugin.SignPlugin(tarballData, filepath.Base(tarballPath), signer)
+ if err != nil {
+ os.Remove(tarballPath)
+ return fmt.Errorf("failed to sign plugin: %w", err)
+ }
+
+ // Write the signature
+ provFile := tarballPath + ".prov"
+ if err := os.WriteFile(provFile, []byte(sig), 0644); err != nil {
+ os.Remove(tarballPath)
+ return err
+ }
+
+ fmt.Fprintf(out, "Successfully signed. Signature written to: %s\n", provFile)
+ }
+
+ fmt.Fprintf(out, "Successfully packaged plugin and saved it to: %s\n", tarballPath)
+
+ return nil
+}
+
+func (o *pluginPackageOptions) promptUser(name string) ([]byte, error) {
+ fmt.Printf("Password for key %q > ", name)
+ pw, err := term.ReadPassword(int(syscall.Stdin))
+ fmt.Println()
+ return pw, err
+}
+
+func (o *pluginPackageOptions) passphraseFileFetcher() (provenance.PassphraseFetcher, error) {
+ file, err := openPassphraseFile(o.passphraseFile, os.Stdin)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ // Read the entire passphrase
+ passphrase, err := io.ReadAll(file)
+ if err != nil {
+ return nil, err
+ }
+
+ // Trim any trailing newline characters (both \n and \r\n)
+ passphrase = bytes.TrimRight(passphrase, "\r\n")
+
+ return func(_ string) ([]byte, error) {
+ return passphrase, nil
+ }, nil
+}
+
+// copied from action.openPassphraseFile
+// TODO: should we move this to pkg/action so we can reuse the func from there?
+func openPassphraseFile(passphraseFile string, stdin *os.File) (*os.File, error) {
+ if passphraseFile == "-" {
+ stat, err := stdin.Stat()
+ if err != nil {
+ return nil, err
+ }
+ if (stat.Mode() & os.ModeNamedPipe) == 0 {
+ return nil, errors.New("specified reading passphrase from stdin, without input on stdin")
+ }
+ return stdin, nil
+ }
+ return os.Open(passphraseFile)
+}
diff --git a/pkg/cmd/plugin_package_test.go b/pkg/cmd/plugin_package_test.go
new file mode 100644
index 000000000..7d97562f8
--- /dev/null
+++ b/pkg/cmd/plugin_package_test.go
@@ -0,0 +1,170 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "bytes"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+)
+
+// Common plugin.yaml content for v1 format tests
+const testPluginYAML = `apiVersion: v1
+name: test-plugin
+version: 1.0.0
+type: cli/v1
+runtime: subprocess
+config:
+ usage: test-plugin [flags]
+ shortHelp: A test plugin
+ longHelp: A test plugin for testing purposes
+runtimeConfig:
+ platformCommand:
+ - os: linux
+ command: echo
+ args: ["test"]`
+
+func TestPluginPackageWithoutSigning(t *testing.T) {
+ // Create a test plugin directory
+ tempDir := t.TempDir()
+ pluginDir := filepath.Join(tempDir, "test-plugin")
+ if err := os.MkdirAll(pluginDir, 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create a plugin.yaml file
+ if err := os.WriteFile(filepath.Join(pluginDir, "plugin.yaml"), []byte(testPluginYAML), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create package options with sign=false
+ o := &pluginPackageOptions{
+ sign: false, // Explicitly disable signing
+ pluginPath: pluginDir,
+ destination: tempDir,
+ }
+
+ // Run the package command
+ out := &bytes.Buffer{}
+ err := o.run(out)
+
+ // Should succeed without error
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+
+ // Check that tarball was created with plugin name and version
+ tarballPath := filepath.Join(tempDir, "test-plugin-1.0.0.tgz")
+ if _, err := os.Stat(tarballPath); os.IsNotExist(err) {
+ t.Error("tarball should exist when sign=false")
+ }
+
+ // Check that no .prov file was created
+ provPath := tarballPath + ".prov"
+ if _, err := os.Stat(provPath); !os.IsNotExist(err) {
+ t.Error("provenance file should not exist when sign=false")
+ }
+
+ // Output should contain warning about skipping signing
+ output := out.String()
+ if !strings.Contains(output, "WARNING: Skipping plugin signing") {
+ t.Error("should print warning when signing is skipped")
+ }
+ if !strings.Contains(output, "Successfully packaged") {
+ t.Error("should print success message")
+ }
+}
+
+func TestPluginPackageDefaultRequiresSigning(t *testing.T) {
+ // Create a test plugin directory
+ tempDir := t.TempDir()
+ pluginDir := filepath.Join(tempDir, "test-plugin")
+ if err := os.MkdirAll(pluginDir, 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create a plugin.yaml file
+ if err := os.WriteFile(filepath.Join(pluginDir, "plugin.yaml"), []byte(testPluginYAML), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create package options with default sign=true and invalid keyring
+ o := &pluginPackageOptions{
+ sign: true, // This is now the default
+ keyring: "/non/existent/keyring",
+ pluginPath: pluginDir,
+ destination: tempDir,
+ }
+
+ // Run the package command
+ out := &bytes.Buffer{}
+ err := o.run(out)
+
+ // Should fail because signing is required by default
+ if err == nil {
+ t.Error("expected error when signing fails with default settings")
+ }
+
+ // Check that no tarball was created
+ tarballPath := filepath.Join(tempDir, "test-plugin.tgz")
+ if _, err := os.Stat(tarballPath); !os.IsNotExist(err) {
+ t.Error("tarball should not exist when signing fails")
+ }
+}
+
+func TestPluginPackageSigningFailure(t *testing.T) {
+ // Create a test plugin directory
+ tempDir := t.TempDir()
+ pluginDir := filepath.Join(tempDir, "test-plugin")
+ if err := os.MkdirAll(pluginDir, 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create a plugin.yaml file
+ if err := os.WriteFile(filepath.Join(pluginDir, "plugin.yaml"), []byte(testPluginYAML), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create package options with sign flag but invalid keyring
+ o := &pluginPackageOptions{
+ sign: true,
+ keyring: "/non/existent/keyring", // This will cause signing to fail
+ pluginPath: pluginDir,
+ destination: tempDir,
+ }
+
+ // Run the package command
+ out := &bytes.Buffer{}
+ err := o.run(out)
+
+ // Should get an error
+ if err == nil {
+ t.Error("expected error when signing fails, got nil")
+ }
+
+ // Check that no tarball was created
+ tarballPath := filepath.Join(tempDir, "test-plugin.tgz")
+ if _, err := os.Stat(tarballPath); !os.IsNotExist(err) {
+ t.Error("tarball should not exist when signing fails")
+ }
+
+ // Output should not contain success message
+ if bytes.Contains(out.Bytes(), []byte("Successfully packaged")) {
+ t.Error("should not print success message when signing fails")
+ }
+}
diff --git a/pkg/cmd/plugin_test.go b/pkg/cmd/plugin_test.go
index 7c36698b1..f7a418569 100644
--- a/pkg/cmd/plugin_test.go
+++ b/pkg/cmd/plugin_test.go
@@ -17,14 +17,16 @@ package cmd
import (
"bytes"
+ "fmt"
"os"
"runtime"
- "sort"
"strings"
"testing"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
release "helm.sh/helm/v4/pkg/release/v1"
)
@@ -79,10 +81,9 @@ func TestManuallyProcessArgs(t *testing.T) {
t.Errorf("expected unknown flag %d to be %q, got %q", i, expectUnknown[i], k)
}
}
-
}
-func TestLoadPlugins(t *testing.T) {
+func TestLoadCLIPlugins(t *testing.T) {
settings.PluginsDirectory = "testdata/helmhome/helm/plugins"
settings.RepositoryConfig = "testdata/helmhome/helm/repositories.yaml"
settings.RepositoryCache = "testdata/helmhome/helm/repository"
@@ -91,16 +92,16 @@ func TestLoadPlugins(t *testing.T) {
out bytes.Buffer
cmd cobra.Command
)
- loadPlugins(&cmd, &out)
+ loadCLIPlugins(&cmd, &out)
- envs := strings.Join([]string{
- "fullenv",
- "testdata/helmhome/helm/plugins/fullenv",
- "testdata/helmhome/helm/plugins",
- "testdata/helmhome/helm/repositories.yaml",
- "testdata/helmhome/helm/repository",
- os.Args[0],
- }, "\n")
+ fullEnvOutput := strings.Join([]string{
+ "HELM_PLUGIN_NAME=fullenv",
+ "HELM_PLUGIN_DIR=testdata/helmhome/helm/plugins/fullenv",
+ "HELM_PLUGINS=testdata/helmhome/helm/plugins",
+ "HELM_REPOSITORY_CONFIG=testdata/helmhome/helm/repositories.yaml",
+ "HELM_REPOSITORY_CACHE=testdata/helmhome/helm/repository",
+ fmt.Sprintf("HELM_BIN=%s", os.Args[0]),
+ }, "\n") + "\n"
// Test that the YAML file was correctly converted to a command.
tests := []struct {
@@ -113,51 +114,50 @@ func TestLoadPlugins(t *testing.T) {
}{
{"args", "echo args", "This echos args", "-a -b -c\n", []string{"-a", "-b", "-c"}, 0},
{"echo", "echo stuff", "This echos stuff", "hello\n", []string{}, 0},
- {"env", "env stuff", "show the env", "env\n", []string{}, 0},
+ {"env", "env stuff", "show the env", "HELM_PLUGIN_NAME=env\n", []string{}, 0},
{"exitwith", "exitwith code", "This exits with the specified exit code", "", []string{"2"}, 2},
- {"fullenv", "show env vars", "show all env vars", envs + "\n", []string{}, 0},
+ {"fullenv", "show env vars", "show all env vars", fullEnvOutput, []string{}, 0},
}
- plugins := cmd.Commands()
+ pluginCmds := cmd.Commands()
- if len(plugins) != len(tests) {
- t.Fatalf("Expected %d plugins, got %d", len(tests), len(plugins))
- }
+ require.Len(t, pluginCmds, len(tests), "Expected %d plugins, got %d", len(tests), len(pluginCmds))
- for i := 0; i < len(plugins); i++ {
+ for i := range pluginCmds {
out.Reset()
tt := tests[i]
- pp := plugins[i]
- if pp.Use != tt.use {
- t.Errorf("%d: Expected Use=%q, got %q", i, tt.use, pp.Use)
- }
- if pp.Short != tt.short {
- t.Errorf("%d: Expected Use=%q, got %q", i, tt.short, pp.Short)
- }
- if pp.Long != tt.long {
- t.Errorf("%d: Expected Use=%q, got %q", i, tt.long, pp.Long)
- }
+ pluginCmd := pluginCmds[i]
+ t.Run(fmt.Sprintf("%s-%d", pluginCmd.Name(), i), func(t *testing.T) {
+ out.Reset()
+ if pluginCmd.Use != tt.use {
+ t.Errorf("%d: Expected Use=%q, got %q", i, tt.use, pluginCmd.Use)
+ }
+ if pluginCmd.Short != tt.short {
+ t.Errorf("%d: Expected Use=%q, got %q", i, tt.short, pluginCmd.Short)
+ }
+ if pluginCmd.Long != tt.long {
+ t.Errorf("%d: Expected Use=%q, got %q", i, tt.long, pluginCmd.Long)
+ }
- // Currently, plugins assume a Linux subsystem. Skip the execution
- // tests until this is fixed
- if runtime.GOOS != "windows" {
- if err := pp.RunE(pp, tt.args); err != nil {
- if tt.code > 0 {
- perr, ok := err.(PluginError)
- if !ok {
- t.Errorf("Expected %s to return pluginError: got %v(%T)", tt.use, err, err)
+ // Currently, plugins assume a Linux subsystem. Skip the execution
+ // tests until this is fixed
+ if runtime.GOOS != "windows" {
+ if err := pluginCmd.RunE(pluginCmd, tt.args); err != nil {
+ if tt.code > 0 {
+ cerr, ok := err.(CommandError)
+ if !ok {
+ t.Errorf("Expected %s to return pluginError: got %v(%T)", tt.use, err, err)
+ }
+ if cerr.ExitCode != tt.code {
+ t.Errorf("Expected %s to return %d: got %d", tt.use, tt.code, cerr.ExitCode)
+ }
+ } else {
+ t.Errorf("Error running %s: %+v", tt.use, err)
}
- if perr.Code != tt.code {
- t.Errorf("Expected %s to return %d: got %d", tt.use, tt.code, perr.Code)
- }
- } else {
- t.Errorf("Error running %s: %+v", tt.use, err)
}
+ assert.Equal(t, tt.expect, out.String(), "expected output for %q", tt.use)
}
- if out.String() != tt.expect {
- t.Errorf("Expected %s to output:\n%s\ngot\n%s", tt.use, tt.expect, out.String())
- }
- }
+ })
}
}
@@ -170,7 +170,7 @@ func TestLoadPluginsWithSpace(t *testing.T) {
out bytes.Buffer
cmd cobra.Command
)
- loadPlugins(&cmd, &out)
+ loadCLIPlugins(&cmd, &out)
envs := strings.Join([]string{
"fullenv",
@@ -218,20 +218,18 @@ func TestLoadPluginsWithSpace(t *testing.T) {
if runtime.GOOS != "windows" {
if err := pp.RunE(pp, tt.args); err != nil {
if tt.code > 0 {
- perr, ok := err.(PluginError)
+ cerr, ok := err.(CommandError)
if !ok {
t.Errorf("Expected %s to return pluginError: got %v(%T)", tt.use, err, err)
}
- if perr.Code != tt.code {
- t.Errorf("Expected %s to return %d: got %d", tt.use, tt.code, perr.Code)
+ if cerr.ExitCode != tt.code {
+ t.Errorf("Expected %s to return %d: got %d", tt.use, tt.code, cerr.ExitCode)
}
} else {
t.Errorf("Error running %s: %+v", tt.use, err)
}
}
- if out.String() != tt.expect {
- t.Errorf("Expected %s to output:\n%s\ngot\n%s", tt.use, tt.expect, out.String())
- }
+ assert.Equal(t, tt.expect, out.String(), "expected output for %s", tt.use)
}
}
}
@@ -243,7 +241,7 @@ type staticCompletionDetails struct {
next []staticCompletionDetails
}
-func TestLoadPluginsForCompletion(t *testing.T) {
+func TestLoadCLIPluginsForCompletion(t *testing.T) {
settings.PluginsDirectory = "testdata/helmhome/helm/plugins"
var out bytes.Buffer
@@ -251,8 +249,7 @@ func TestLoadPluginsForCompletion(t *testing.T) {
cmd := &cobra.Command{
Use: "completion",
}
-
- loadPlugins(cmd, &out)
+ loadCLIPlugins(cmd, &out)
tests := []staticCompletionDetails{
{"args", []string{}, []string{}, []staticCompletionDetails{}},
@@ -276,30 +273,18 @@ func TestLoadPluginsForCompletion(t *testing.T) {
}
func checkCommand(t *testing.T, plugins []*cobra.Command, tests []staticCompletionDetails) {
- if len(plugins) != len(tests) {
- t.Fatalf("Expected commands %v, got %v", tests, plugins)
- }
+ t.Helper()
+ require.Len(t, plugins, len(tests), "Expected commands %v, got %v", tests, plugins)
- for i := 0; i < len(plugins); i++ {
+ is := assert.New(t)
+ for i := range plugins {
pp := plugins[i]
tt := tests[i]
- if pp.Use != tt.use {
- t.Errorf("%s: Expected Use=%q, got %q", pp.Name(), tt.use, pp.Use)
- }
+ is.Equal(pp.Use, tt.use, "Expected Use=%q, got %q", tt.use, pp.Use)
targs := tt.validArgs
pargs := pp.ValidArgs
- if len(targs) != len(pargs) {
- t.Fatalf("%s: expected args %v, got %v", pp.Name(), targs, pargs)
- }
-
- sort.Strings(targs)
- sort.Strings(pargs)
- for j := range targs {
- if targs[j] != pargs[j] {
- t.Errorf("%s: expected validArg=%q, got %q", pp.Name(), targs[j], pargs[j])
- }
- }
+ is.ElementsMatch(targs, pargs)
tflags := tt.flags
var pflags []string
@@ -309,24 +294,14 @@ func checkCommand(t *testing.T, plugins []*cobra.Command, tests []staticCompleti
pflags = append(pflags, flag.Shorthand)
}
})
- if len(tflags) != len(pflags) {
- t.Fatalf("%s: expected flags %v, got %v", pp.Name(), tflags, pflags)
- }
+ is.ElementsMatch(tflags, pflags)
- sort.Strings(tflags)
- sort.Strings(pflags)
- for j := range tflags {
- if tflags[j] != pflags[j] {
- t.Errorf("%s: expected flag=%q, got %q", pp.Name(), tflags[j], pflags[j])
- }
- }
// Check the next level
checkCommand(t, pp.Commands(), tt.next)
}
}
func TestPluginDynamicCompletion(t *testing.T) {
-
tests := []cmdTestCase{{
name: "completion for plugin",
cmd: "__complete args ''",
@@ -359,15 +334,15 @@ func TestPluginDynamicCompletion(t *testing.T) {
}
}
-func TestLoadPlugins_HelmNoPlugins(t *testing.T) {
+func TestLoadCLIPlugins_HelmNoPlugins(t *testing.T) {
settings.PluginsDirectory = "testdata/helmhome/helm/plugins"
settings.RepositoryConfig = "testdata/helmhome/helm/repository"
- os.Setenv("HELM_NO_PLUGINS", "1")
+ t.Setenv("HELM_NO_PLUGINS", "1")
out := bytes.NewBuffer(nil)
cmd := &cobra.Command{}
- loadPlugins(cmd, out)
+ loadCLIPlugins(cmd, out)
plugins := cmd.Commands()
if len(plugins) != 0 {
@@ -376,7 +351,6 @@ func TestLoadPlugins_HelmNoPlugins(t *testing.T) {
}
func TestPluginCmdsCompletion(t *testing.T) {
-
tests := []cmdTestCase{{
name: "completion for plugin update",
cmd: "__complete plugin update ''",
diff --git a/pkg/cmd/plugin_uninstall.go b/pkg/cmd/plugin_uninstall.go
index c1f90ca49..85eb46219 100644
--- a/pkg/cmd/plugin_uninstall.go
+++ b/pkg/cmd/plugin_uninstall.go
@@ -16,15 +16,16 @@ limitations under the License.
package cmd
import (
+ "errors"
"fmt"
"io"
+ "log/slog"
"os"
- "strings"
+ "path/filepath"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
- "helm.sh/helm/v4/pkg/plugin"
+ "helm.sh/helm/v4/internal/plugin"
)
type pluginUninstallOptions struct {
@@ -60,39 +61,70 @@ func (o *pluginUninstallOptions) complete(args []string) error {
}
func (o *pluginUninstallOptions) run(out io.Writer) error {
- Debug("loading installed plugins from %s", settings.PluginsDirectory)
- plugins, err := plugin.FindPlugins(settings.PluginsDirectory)
+ slog.Debug("loading installer plugins", "dir", settings.PluginsDirectory)
+ plugins, err := plugin.LoadAll(settings.PluginsDirectory)
if err != nil {
return err
}
- var errorPlugins []string
+ var errorPlugins []error
for _, name := range o.names {
if found := findPlugin(plugins, name); found != nil {
if err := uninstallPlugin(found); err != nil {
- errorPlugins = append(errorPlugins, fmt.Sprintf("Failed to uninstall plugin %s, got error (%v)", name, err))
+ errorPlugins = append(errorPlugins, fmt.Errorf("failed to uninstall plugin %s, got error (%v)", name, err))
} else {
fmt.Fprintf(out, "Uninstalled plugin: %s\n", name)
}
} else {
- errorPlugins = append(errorPlugins, fmt.Sprintf("Plugin: %s not found", name))
+ errorPlugins = append(errorPlugins, fmt.Errorf("plugin: %s not found", name))
}
}
if len(errorPlugins) > 0 {
- return errors.New(strings.Join(errorPlugins, "\n"))
+ return errors.Join(errorPlugins...)
}
return nil
}
-func uninstallPlugin(p *plugin.Plugin) error {
- if err := os.RemoveAll(p.Dir); err != nil {
+func uninstallPlugin(p plugin.Plugin) error {
+ if err := os.RemoveAll(p.Dir()); err != nil {
return err
}
+
+ // Clean up versioned tarball and provenance files from HELM_PLUGINS directory
+ // These files are saved with pattern: PLUGIN_NAME-VERSION.tgz and PLUGIN_NAME-VERSION.tgz.prov
+ pluginName := p.Metadata().Name
+ pluginVersion := p.Metadata().Version
+ pluginsDir := settings.PluginsDirectory
+
+ // Remove versioned files: plugin-name-version.tgz and plugin-name-version.tgz.prov
+ if pluginVersion != "" {
+ versionedBasename := fmt.Sprintf("%s-%s.tgz", pluginName, pluginVersion)
+
+ // Remove tarball file
+ tarballPath := filepath.Join(pluginsDir, versionedBasename)
+ if _, err := os.Stat(tarballPath); err == nil {
+ slog.Debug("removing versioned tarball", "path", tarballPath)
+ if err := os.Remove(tarballPath); err != nil {
+ slog.Debug("failed to remove tarball file", "path", tarballPath, "error", err)
+ }
+ }
+
+ // Remove provenance file
+ provPath := filepath.Join(pluginsDir, versionedBasename+".prov")
+ if _, err := os.Stat(provPath); err == nil {
+ slog.Debug("removing versioned provenance", "path", provPath)
+ if err := os.Remove(provPath); err != nil {
+ slog.Debug("failed to remove provenance file", "path", provPath, "error", err)
+ }
+ }
+ }
+
return runHook(p, plugin.Delete)
}
-func findPlugin(plugins []*plugin.Plugin, name string) *plugin.Plugin {
+// TODO should this be in pkg/plugin/loader.go?
+func findPlugin(plugins []plugin.Plugin, name string) plugin.Plugin {
for _, p := range plugins {
- if p.Metadata.Name == name {
+ if p.Metadata().Name == name {
return p
}
}
diff --git a/pkg/cmd/plugin_uninstall_test.go b/pkg/cmd/plugin_uninstall_test.go
new file mode 100644
index 000000000..93d4dc8a8
--- /dev/null
+++ b/pkg/cmd/plugin_uninstall_test.go
@@ -0,0 +1,146 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "helm.sh/helm/v4/internal/plugin"
+ "helm.sh/helm/v4/internal/test/ensure"
+ "helm.sh/helm/v4/pkg/cli"
+)
+
+func TestPluginUninstallCleansUpVersionedFiles(t *testing.T) {
+ ensure.HelmHome(t)
+
+ // Create a fake plugin directory structure in a temp directory
+ pluginsDir := t.TempDir()
+ t.Setenv("HELM_PLUGINS", pluginsDir)
+
+ // Create a new settings instance that will pick up the environment variable
+ testSettings := cli.New()
+ pluginName := "test-plugin"
+
+ // Create plugin directory
+ pluginDir := filepath.Join(pluginsDir, pluginName)
+ if err := os.MkdirAll(pluginDir, 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create plugin.yaml
+ pluginYAML := `name: test-plugin
+version: 1.2.3
+description: Test plugin
+command: $HELM_PLUGIN_DIR/test-plugin
+`
+ if err := os.WriteFile(filepath.Join(pluginDir, "plugin.yaml"), []byte(pluginYAML), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create versioned tarball and provenance files
+ tarballFile := filepath.Join(pluginsDir, "test-plugin-1.2.3.tgz")
+ provFile := filepath.Join(pluginsDir, "test-plugin-1.2.3.tgz.prov")
+ otherVersionTarball := filepath.Join(pluginsDir, "test-plugin-2.0.0.tgz")
+
+ if err := os.WriteFile(tarballFile, []byte("fake tarball"), 0644); err != nil {
+ t.Fatal(err)
+ }
+ if err := os.WriteFile(provFile, []byte("fake provenance"), 0644); err != nil {
+ t.Fatal(err)
+ }
+ // Create another version that should NOT be removed
+ if err := os.WriteFile(otherVersionTarball, []byte("other version"), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ // Load the plugin
+ p, err := plugin.LoadDir(pluginDir)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Create a test uninstall function that uses our test settings
+ testUninstallPlugin := func(plugin plugin.Plugin) error {
+ if err := os.RemoveAll(plugin.Dir()); err != nil {
+ return err
+ }
+
+ // Clean up versioned tarball and provenance files from test HELM_PLUGINS directory
+ pluginName := plugin.Metadata().Name
+ pluginVersion := plugin.Metadata().Version
+ testPluginsDir := testSettings.PluginsDirectory
+
+ // Remove versioned files: plugin-name-version.tgz and plugin-name-version.tgz.prov
+ if pluginVersion != "" {
+ versionedBasename := fmt.Sprintf("%s-%s.tgz", pluginName, pluginVersion)
+
+ // Remove tarball file
+ tarballPath := filepath.Join(testPluginsDir, versionedBasename)
+ if _, err := os.Stat(tarballPath); err == nil {
+ if err := os.Remove(tarballPath); err != nil {
+ t.Logf("failed to remove tarball file: %v", err)
+ }
+ }
+
+ // Remove provenance file
+ provPath := filepath.Join(testPluginsDir, versionedBasename+".prov")
+ if _, err := os.Stat(provPath); err == nil {
+ if err := os.Remove(provPath); err != nil {
+ t.Logf("failed to remove provenance file: %v", err)
+ }
+ }
+ }
+
+ // Skip runHook in test
+ return nil
+ }
+
+ // Verify files exist before uninstall
+ if _, err := os.Stat(tarballFile); os.IsNotExist(err) {
+ t.Fatal("tarball file should exist before uninstall")
+ }
+ if _, err := os.Stat(provFile); os.IsNotExist(err) {
+ t.Fatal("provenance file should exist before uninstall")
+ }
+ if _, err := os.Stat(otherVersionTarball); os.IsNotExist(err) {
+ t.Fatal("other version tarball should exist before uninstall")
+ }
+
+ // Uninstall the plugin
+ if err := testUninstallPlugin(p); err != nil {
+ t.Fatal(err)
+ }
+
+ // Verify plugin directory is removed
+ if _, err := os.Stat(pluginDir); !os.IsNotExist(err) {
+ t.Error("plugin directory should be removed")
+ }
+
+ // Verify only exact version files are removed
+ if _, err := os.Stat(tarballFile); !os.IsNotExist(err) {
+ t.Error("versioned tarball file should be removed")
+ }
+ if _, err := os.Stat(provFile); !os.IsNotExist(err) {
+ t.Error("versioned provenance file should be removed")
+ }
+ // Verify other version files are NOT removed
+ if _, err := os.Stat(otherVersionTarball); os.IsNotExist(err) {
+ t.Error("other version tarball should NOT be removed")
+ }
+}
diff --git a/pkg/cmd/plugin_update.go b/pkg/cmd/plugin_update.go
index cbbd8994c..c6d4b8530 100644
--- a/pkg/cmd/plugin_update.go
+++ b/pkg/cmd/plugin_update.go
@@ -16,16 +16,16 @@ limitations under the License.
package cmd
import (
+ "errors"
"fmt"
"io"
+ "log/slog"
"path/filepath"
- "strings"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
- "helm.sh/helm/v4/pkg/plugin"
- "helm.sh/helm/v4/pkg/plugin/installer"
+ "helm.sh/helm/v4/internal/plugin"
+ "helm.sh/helm/v4/internal/plugin/installer"
)
type pluginUpdateOptions struct {
@@ -62,32 +62,32 @@ func (o *pluginUpdateOptions) complete(args []string) error {
func (o *pluginUpdateOptions) run(out io.Writer) error {
installer.Debug = settings.Debug
- Debug("loading installed plugins from %s", settings.PluginsDirectory)
- plugins, err := plugin.FindPlugins(settings.PluginsDirectory)
+ slog.Debug("loading installed plugins", "path", settings.PluginsDirectory)
+ plugins, err := plugin.LoadAll(settings.PluginsDirectory)
if err != nil {
return err
}
- var errorPlugins []string
+ var errorPlugins []error
for _, name := range o.names {
if found := findPlugin(plugins, name); found != nil {
if err := updatePlugin(found); err != nil {
- errorPlugins = append(errorPlugins, fmt.Sprintf("Failed to update plugin %s, got error (%v)", name, err))
+ errorPlugins = append(errorPlugins, fmt.Errorf("failed to update plugin %s, got error (%v)", name, err))
} else {
fmt.Fprintf(out, "Updated plugin: %s\n", name)
}
} else {
- errorPlugins = append(errorPlugins, fmt.Sprintf("Plugin: %s not found", name))
+ errorPlugins = append(errorPlugins, fmt.Errorf("plugin: %s not found", name))
}
}
if len(errorPlugins) > 0 {
- return errors.New(strings.Join(errorPlugins, "\n"))
+ return errors.Join(errorPlugins...)
}
return nil
}
-func updatePlugin(p *plugin.Plugin) error {
- exactLocation, err := filepath.EvalSymlinks(p.Dir)
+func updatePlugin(p plugin.Plugin) error {
+ exactLocation, err := filepath.EvalSymlinks(p.Dir())
if err != nil {
return err
}
@@ -104,7 +104,7 @@ func updatePlugin(p *plugin.Plugin) error {
return err
}
- Debug("loading plugin from %s", i.Path())
+ slog.Debug("loading plugin", "path", i.Path())
updatedPlugin, err := plugin.LoadDir(i.Path())
if err != nil {
return err
diff --git a/pkg/cmd/plugin_verify.go b/pkg/cmd/plugin_verify.go
new file mode 100644
index 000000000..5f89e743e
--- /dev/null
+++ b/pkg/cmd/plugin_verify.go
@@ -0,0 +1,123 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/internal/plugin"
+ "helm.sh/helm/v4/pkg/cmd/require"
+)
+
+const pluginVerifyDesc = `
+This command verifies that a Helm plugin has a valid provenance file,
+and that the provenance file is signed by a trusted PGP key.
+
+It supports both:
+- Plugin tarballs (.tgz or .tar.gz files)
+- Installed plugin directories
+
+For installed plugins, use the path shown by 'helm env HELM_PLUGINS' followed
+by the plugin name. For example:
+ helm plugin verify ~/.local/share/helm/plugins/example-cli
+
+To generate a signed plugin, use the 'helm plugin package --sign' command.
+`
+
+type pluginVerifyOptions struct {
+ keyring string
+ pluginPath string
+}
+
+func newPluginVerifyCmd(out io.Writer) *cobra.Command {
+ o := &pluginVerifyOptions{}
+
+ cmd := &cobra.Command{
+ Use: "verify [PATH]",
+ Short: "verify that a plugin at the given path has been signed and is valid",
+ Long: pluginVerifyDesc,
+ Args: require.ExactArgs(1),
+ RunE: func(_ *cobra.Command, args []string) error {
+ o.pluginPath = args[0]
+ return o.run(out)
+ },
+ }
+
+ cmd.Flags().StringVar(&o.keyring, "keyring", defaultKeyring(), "keyring containing public keys")
+
+ return cmd
+}
+
+func (o *pluginVerifyOptions) run(out io.Writer) error {
+ // Verify the plugin path exists
+ fi, err := os.Stat(o.pluginPath)
+ if err != nil {
+ return err
+ }
+
+ // Only support tarball verification
+ if fi.IsDir() {
+ return fmt.Errorf("directory verification not supported - only plugin tarballs can be verified")
+ }
+
+ // Verify it's a tarball
+ if !plugin.IsTarball(o.pluginPath) {
+ return fmt.Errorf("plugin file must be a gzipped tarball (.tar.gz or .tgz)")
+ }
+
+ // Look for provenance file
+ provFile := o.pluginPath + ".prov"
+ if _, err := os.Stat(provFile); err != nil {
+ return fmt.Errorf("could not find provenance file %s: %w", provFile, err)
+ }
+
+ // Read the files
+ archiveData, err := os.ReadFile(o.pluginPath)
+ if err != nil {
+ return fmt.Errorf("failed to read plugin file: %w", err)
+ }
+
+ provData, err := os.ReadFile(provFile)
+ if err != nil {
+ return fmt.Errorf("failed to read provenance file: %w", err)
+ }
+
+ // Verify the plugin using data
+ verification, err := plugin.VerifyPlugin(archiveData, provData, filepath.Base(o.pluginPath), o.keyring)
+ if err != nil {
+ return err
+ }
+
+ // Output verification details
+ for name := range verification.SignedBy.Identities {
+ fmt.Fprintf(out, "Signed by: %v\n", name)
+ }
+ fmt.Fprintf(out, "Using Key With Fingerprint: %X\n", verification.SignedBy.PrimaryKey.Fingerprint)
+
+ // Only show hash for tarballs
+ if verification.FileHash != "" {
+ fmt.Fprintf(out, "Plugin Hash Verified: %s\n", verification.FileHash)
+ } else {
+ fmt.Fprintf(out, "Plugin Metadata Verified: %s\n", verification.FileName)
+ }
+
+ return nil
+}
diff --git a/pkg/cmd/plugin_verify_test.go b/pkg/cmd/plugin_verify_test.go
new file mode 100644
index 000000000..e631814dd
--- /dev/null
+++ b/pkg/cmd/plugin_verify_test.go
@@ -0,0 +1,264 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "bytes"
+ "crypto/sha256"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "helm.sh/helm/v4/internal/plugin"
+ "helm.sh/helm/v4/internal/test/ensure"
+)
+
+func TestPluginVerifyCmd_NoArgs(t *testing.T) {
+ ensure.HelmHome(t)
+
+ out := &bytes.Buffer{}
+ cmd := newPluginVerifyCmd(out)
+ cmd.SetArgs([]string{})
+
+ err := cmd.Execute()
+ if err == nil {
+ t.Error("expected error when no arguments provided")
+ }
+ if !strings.Contains(err.Error(), "requires 1 argument") {
+ t.Errorf("expected 'requires 1 argument' error, got: %v", err)
+ }
+}
+
+func TestPluginVerifyCmd_TooManyArgs(t *testing.T) {
+ ensure.HelmHome(t)
+
+ out := &bytes.Buffer{}
+ cmd := newPluginVerifyCmd(out)
+ cmd.SetArgs([]string{"plugin1", "plugin2"})
+
+ err := cmd.Execute()
+ if err == nil {
+ t.Error("expected error when too many arguments provided")
+ }
+ if !strings.Contains(err.Error(), "requires 1 argument") {
+ t.Errorf("expected 'requires 1 argument' error, got: %v", err)
+ }
+}
+
+func TestPluginVerifyCmd_NonexistentFile(t *testing.T) {
+ ensure.HelmHome(t)
+
+ out := &bytes.Buffer{}
+ cmd := newPluginVerifyCmd(out)
+ cmd.SetArgs([]string{"/nonexistent/plugin.tgz"})
+
+ err := cmd.Execute()
+ if err == nil {
+ t.Error("expected error when plugin file doesn't exist")
+ }
+}
+
+func TestPluginVerifyCmd_MissingProvenance(t *testing.T) {
+ ensure.HelmHome(t)
+
+ // Create a plugin tarball without .prov file
+ pluginTgz := createTestPluginTarball(t)
+ defer os.Remove(pluginTgz)
+
+ out := &bytes.Buffer{}
+ cmd := newPluginVerifyCmd(out)
+ cmd.SetArgs([]string{pluginTgz})
+
+ err := cmd.Execute()
+ if err == nil {
+ t.Error("expected error when .prov file is missing")
+ }
+ if !strings.Contains(err.Error(), "could not find provenance file") {
+ t.Errorf("expected 'could not find provenance file' error, got: %v", err)
+ }
+}
+
+func TestPluginVerifyCmd_InvalidProvenance(t *testing.T) {
+ ensure.HelmHome(t)
+
+ // Create a plugin tarball with invalid .prov file
+ pluginTgz := createTestPluginTarball(t)
+ defer os.Remove(pluginTgz)
+
+ // Create invalid .prov file
+ provFile := pluginTgz + ".prov"
+ if err := os.WriteFile(provFile, []byte("invalid provenance"), 0644); err != nil {
+ t.Fatal(err)
+ }
+ defer os.Remove(provFile)
+
+ out := &bytes.Buffer{}
+ cmd := newPluginVerifyCmd(out)
+ cmd.SetArgs([]string{pluginTgz})
+
+ err := cmd.Execute()
+ if err == nil {
+ t.Error("expected error when .prov file is invalid")
+ }
+}
+
+func TestPluginVerifyCmd_DirectoryNotSupported(t *testing.T) {
+ ensure.HelmHome(t)
+
+ // Create a plugin directory
+ pluginDir := createTestPluginDir(t)
+
+ out := &bytes.Buffer{}
+ cmd := newPluginVerifyCmd(out)
+ cmd.SetArgs([]string{pluginDir})
+
+ err := cmd.Execute()
+ if err == nil {
+ t.Error("expected error when verifying directory")
+ }
+ if !strings.Contains(err.Error(), "directory verification not supported") {
+ t.Errorf("expected 'directory verification not supported' error, got: %v", err)
+ }
+}
+
+func TestPluginVerifyCmd_KeyringFlag(t *testing.T) {
+ ensure.HelmHome(t)
+
+ // Create a plugin tarball with .prov file
+ pluginTgz := createTestPluginTarball(t)
+ defer os.Remove(pluginTgz)
+
+ // Create .prov file
+ provFile := pluginTgz + ".prov"
+ createProvFile(t, provFile, pluginTgz, "")
+ defer os.Remove(provFile)
+
+ // Create empty keyring file
+ keyring := createTestKeyring(t)
+ defer os.Remove(keyring)
+
+ out := &bytes.Buffer{}
+ cmd := newPluginVerifyCmd(out)
+ cmd.SetArgs([]string{"--keyring", keyring, pluginTgz})
+
+ // Should fail with keyring error but command parsing should work
+ err := cmd.Execute()
+ if err == nil {
+ t.Error("expected error with empty keyring")
+ }
+ // The important thing is that the keyring flag was parsed and used
+}
+
+func TestPluginVerifyOptions_Run_Success(t *testing.T) {
+ // Skip this test as it would require real PGP keys and valid signatures
+ // The core verification logic is thoroughly tested in internal/plugin/verify_test.go
+ t.Skip("Success case requires real PGP keys - core logic tested in internal/plugin/verify_test.go")
+}
+
+// Helper functions for test setup
+
+func createTestPluginDir(t *testing.T) string {
+ t.Helper()
+
+ // Create temporary directory with plugin structure
+ tmpDir := t.TempDir()
+ pluginDir := filepath.Join(tmpDir, "test-plugin")
+ if err := os.MkdirAll(pluginDir, 0755); err != nil {
+ t.Fatalf("Failed to create plugin directory: %v", err)
+ }
+
+ // Use the same plugin YAML as other cmd tests
+ if err := os.WriteFile(filepath.Join(pluginDir, "plugin.yaml"), []byte(testPluginYAML), 0644); err != nil {
+ t.Fatalf("Failed to create plugin.yaml: %v", err)
+ }
+
+ return pluginDir
+}
+
+func createTestPluginTarball(t *testing.T) string {
+ t.Helper()
+
+ pluginDir := createTestPluginDir(t)
+
+ // Create tarball using the plugin package helper
+ tmpDir := filepath.Dir(pluginDir)
+ tgzPath := filepath.Join(tmpDir, "test-plugin-1.0.0.tgz")
+ tarFile, err := os.Create(tgzPath)
+ if err != nil {
+ t.Fatalf("Failed to create tarball file: %v", err)
+ }
+ defer tarFile.Close()
+
+ if err := plugin.CreatePluginTarball(pluginDir, "test-plugin", tarFile); err != nil {
+ t.Fatalf("Failed to create tarball: %v", err)
+ }
+
+ return tgzPath
+}
+
+func createProvFile(t *testing.T, provFile, pluginTgz, hash string) {
+ t.Helper()
+
+ var hashStr string
+ if hash == "" {
+ // Calculate actual hash of the tarball
+ data, err := os.ReadFile(pluginTgz)
+ if err != nil {
+ t.Fatalf("Failed to read tarball for hashing: %v", err)
+ }
+ hashSum := sha256.Sum256(data)
+ hashStr = fmt.Sprintf("sha256:%x", hashSum)
+ } else {
+ // Use provided hash
+ hashStr = hash
+ }
+
+ // Create properly formatted provenance file with specified hash
+ provContent := fmt.Sprintf(`-----BEGIN PGP SIGNED MESSAGE-----
+Hash: SHA256
+
+name: test-plugin
+version: 1.0.0
+description: Test plugin for verification
+files:
+ test-plugin-1.0.0.tgz: %s
+-----BEGIN PGP SIGNATURE-----
+Version: GnuPG v1
+
+iQEcBAEBCAAGBQJktest...
+-----END PGP SIGNATURE-----
+`, hashStr)
+ if err := os.WriteFile(provFile, []byte(provContent), 0644); err != nil {
+ t.Fatalf("Failed to create provenance file: %v", err)
+ }
+}
+
+func createTestKeyring(t *testing.T) string {
+ t.Helper()
+
+ // Create a temporary keyring file
+ tmpDir := t.TempDir()
+ keyringPath := filepath.Join(tmpDir, "pubring.gpg")
+
+ // Create empty keyring for testing
+ if err := os.WriteFile(keyringPath, []byte{}, 0644); err != nil {
+ t.Fatalf("Failed to create test keyring: %v", err)
+ }
+
+ return keyringPath
+}
diff --git a/pkg/cmd/pull.go b/pkg/cmd/pull.go
index 5d188ee4f..e3d93c049 100644
--- a/pkg/cmd/pull.go
+++ b/pkg/cmd/pull.go
@@ -20,6 +20,7 @@ import (
"fmt"
"io"
"log"
+ "log/slog"
"github.com/spf13/cobra"
@@ -60,7 +61,7 @@ func newPullCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
RunE: func(_ *cobra.Command, args []string) error {
client.Settings = settings
if client.Version == "" && client.Devel {
- Debug("setting version to >0.0.0-0")
+ slog.Debug("setting version to >0.0.0-0")
client.Version = ">0.0.0-0"
}
diff --git a/pkg/cmd/pull_test.go b/pkg/cmd/pull_test.go
index c30c94b49..c24bf33b7 100644
--- a/pkg/cmd/pull_test.go
+++ b/pkg/cmd/pull_test.go
@@ -24,7 +24,7 @@ import (
"path/filepath"
"testing"
- "helm.sh/helm/v4/pkg/repo/repotest"
+ "helm.sh/helm/v4/pkg/repo/v1/repotest"
)
func TestPullCmd(t *testing.T) {
@@ -147,6 +147,18 @@ func TestPullCmd(t *testing.T) {
failExpect: "Failed to fetch chart version",
wantError: true,
},
+ {
+ name: "Chart fetch using repo URL with untardir",
+ args: "signtest --version=0.1.0 --untar --untardir repo-url-test --repo " + srv.URL(),
+ expectFile: "./signtest",
+ expectDir: true,
+ },
+ {
+ name: "Chart fetch using repo URL with untardir and previous pull",
+ args: "signtest --version=0.1.0 --untar --untardir repo-url-test --repo " + srv.URL(),
+ failExpect: "failed to untar",
+ wantError: true,
+ },
{
name: "Fetch OCI Chart",
args: fmt.Sprintf("oci://%s/u/ocitestuser/oci-dependent-chart --version 0.1.0", ociSrv.RegistryURL),
@@ -200,15 +212,18 @@ func TestPullCmd(t *testing.T) {
},
}
+ contentCache := t.TempDir()
+
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
outdir := srv.Root()
- cmd := fmt.Sprintf("fetch %s -d '%s' --repository-config %s --repository-cache %s --registry-config %s --plain-http",
+ cmd := fmt.Sprintf("fetch %s -d '%s' --repository-config %s --repository-cache %s --registry-config %s --content-cache %s --plain-http",
tt.args,
outdir,
filepath.Join(outdir, "repositories.yaml"),
outdir,
filepath.Join(outdir, "config.json"),
+ contentCache,
)
// Create file or Dir before helm pull --untar, see: https://github.com/helm/helm/issues/7182
if tt.existFile != "" {
@@ -256,6 +271,78 @@ func TestPullCmd(t *testing.T) {
}
}
+// runPullTests is a helper function to run pull command tests with common logic
+func runPullTests(t *testing.T, tests []struct {
+ name string
+ args string
+ existFile string
+ existDir string
+ wantError bool
+ wantErrorMsg string
+ expectFile string
+ expectDir bool
+}, outdir string, additionalFlags string) {
+ t.Helper()
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ cmd := fmt.Sprintf("pull %s -d '%s' --repository-config %s --repository-cache %s --registry-config %s %s",
+ tt.args,
+ outdir,
+ filepath.Join(outdir, "repositories.yaml"),
+ outdir,
+ filepath.Join(outdir, "config.json"),
+ additionalFlags,
+ )
+ // Create file or Dir before helm pull --untar, see: https://github.com/helm/helm/issues/7182
+ if tt.existFile != "" {
+ file := filepath.Join(outdir, tt.existFile)
+ _, err := os.Create(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+ if tt.existDir != "" {
+ file := filepath.Join(outdir, tt.existDir)
+ err := os.Mkdir(file, 0755)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+ _, _, err := executeActionCommand(cmd)
+ if err != nil {
+ if tt.wantError {
+ if tt.wantErrorMsg != "" && tt.wantErrorMsg == err.Error() {
+ t.Fatalf("Actual error %s, not equal to expected error %s", err, tt.wantErrorMsg)
+ }
+ return
+ }
+ t.Fatalf("%q reported error: %s", tt.name, err)
+ }
+
+ ef := filepath.Join(outdir, tt.expectFile)
+ fi, err := os.Stat(ef)
+ if err != nil {
+ t.Errorf("%q: expected a file at %s. %s", tt.name, ef, err)
+ }
+ if fi.IsDir() != tt.expectDir {
+ t.Errorf("%q: expected directory=%t, but it's not.", tt.name, tt.expectDir)
+ }
+ })
+ }
+}
+
+// buildOCIURL is a helper function to build OCI URLs with credentials
+func buildOCIURL(registryURL, chartName, version, username, password string) string {
+ baseURL := fmt.Sprintf("oci://%s/u/ocitestuser/%s", registryURL, chartName)
+ if version != "" {
+ baseURL += fmt.Sprintf(" --version %s", version)
+ }
+ if username != "" && password != "" {
+ baseURL += fmt.Sprintf(" --username %s --password %s", username, password)
+ }
+ return baseURL
+}
+
func TestPullWithCredentialsCmd(t *testing.T) {
srv := repotest.NewTempServer(
t,
@@ -311,52 +398,7 @@ func TestPullWithCredentialsCmd(t *testing.T) {
},
}
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- outdir := srv.Root()
- cmd := fmt.Sprintf("pull %s -d '%s' --repository-config %s --repository-cache %s --registry-config %s",
- tt.args,
- outdir,
- filepath.Join(outdir, "repositories.yaml"),
- outdir,
- filepath.Join(outdir, "config.json"),
- )
- // Create file or Dir before helm pull --untar, see: https://github.com/helm/helm/issues/7182
- if tt.existFile != "" {
- file := filepath.Join(outdir, tt.existFile)
- _, err := os.Create(file)
- if err != nil {
- t.Fatal(err)
- }
- }
- if tt.existDir != "" {
- file := filepath.Join(outdir, tt.existDir)
- err := os.Mkdir(file, 0755)
- if err != nil {
- t.Fatal(err)
- }
- }
- _, _, err := executeActionCommand(cmd)
- if err != nil {
- if tt.wantError {
- if tt.wantErrorMsg != "" && tt.wantErrorMsg == err.Error() {
- t.Fatalf("Actual error %s, not equal to expected error %s", err, tt.wantErrorMsg)
- }
- return
- }
- t.Fatalf("%q reported error: %s", tt.name, err)
- }
-
- ef := filepath.Join(outdir, tt.expectFile)
- fi, err := os.Stat(ef)
- if err != nil {
- t.Errorf("%q: expected a file at %s. %s", tt.name, ef, err)
- }
- if fi.IsDir() != tt.expectDir {
- t.Errorf("%q: expected directory=%t, but it's not.", tt.name, tt.expectDir)
- }
- })
- }
+ runPullTests(t, tests, srv.Root(), "")
}
func TestPullVersionCompletion(t *testing.T) {
@@ -389,6 +431,72 @@ func TestPullVersionCompletion(t *testing.T) {
runTestCmd(t, tests)
}
+func TestPullWithCredentialsCmdOCIRegistry(t *testing.T) {
+ srv := repotest.NewTempServer(
+ t,
+ repotest.WithChartSourceGlob("testdata/testcharts/*.tgz*"),
+ )
+ defer srv.Stop()
+
+ ociSrv, err := repotest.NewOCIServer(t, srv.Root())
+ if err != nil {
+ t.Fatal(err)
+ }
+ ociSrv.Run(t)
+
+ if err := srv.LinkIndices(); err != nil {
+ t.Fatal(err)
+ }
+
+ // all flags will get "-d outdir" appended.
+ tests := []struct {
+ name string
+ args string
+ existFile string
+ existDir string
+ wantError bool
+ wantErrorMsg string
+ expectFile string
+ expectDir bool
+ }{
+ {
+ name: "OCI Chart fetch with credentials",
+ args: buildOCIURL(ociSrv.RegistryURL, "oci-dependent-chart", "0.1.0", ociSrv.TestUsername, ociSrv.TestPassword),
+ expectFile: "./oci-dependent-chart-0.1.0.tgz",
+ },
+ {
+ name: "OCI Chart fetch with credentials and untar",
+ args: buildOCIURL(ociSrv.RegistryURL, "oci-dependent-chart", "0.1.0", ociSrv.TestUsername, ociSrv.TestPassword) + " --untar",
+ expectFile: "./oci-dependent-chart",
+ expectDir: true,
+ },
+ {
+ name: "OCI Chart fetch with credentials and untardir",
+ args: buildOCIURL(ociSrv.RegistryURL, "oci-dependent-chart", "0.1.0", ociSrv.TestUsername, ociSrv.TestPassword) + " --untar --untardir ocitest-credentials",
+ expectFile: "./ocitest-credentials",
+ expectDir: true,
+ },
+ {
+ name: "Fail fetching OCI chart with wrong credentials",
+ args: buildOCIURL(ociSrv.RegistryURL, "oci-dependent-chart", "0.1.0", "wronguser", "wrongpass"),
+ wantError: true,
+ },
+ {
+ name: "Fail fetching non-existent OCI chart with credentials",
+ args: buildOCIURL(ociSrv.RegistryURL, "nosuchthing", "0.1.0", ociSrv.TestUsername, ociSrv.TestPassword),
+ wantError: true,
+ },
+ {
+ name: "Fail fetching OCI chart without version specified",
+ args: buildOCIURL(ociSrv.RegistryURL, "nosuchthing", "", ociSrv.TestUsername, ociSrv.TestPassword),
+ wantErrorMsg: "Error: --version flag is explicitly required for OCI registries",
+ wantError: true,
+ },
+ }
+
+ runPullTests(t, tests, srv.Root(), "--plain-http")
+}
+
func TestPullFileCompletion(t *testing.T) {
checkFileCompletion(t, "pull", false)
checkFileCompletion(t, "pull repo/chart", false)
diff --git a/pkg/cmd/registry_login.go b/pkg/cmd/registry_login.go
index 1dfb3c798..1350fb244 100644
--- a/pkg/cmd/registry_login.go
+++ b/pkg/cmd/registry_login.go
@@ -21,6 +21,7 @@ import (
"errors"
"fmt"
"io"
+ "log/slog"
"os"
"strings"
@@ -33,6 +34,10 @@ import (
const registryLoginDesc = `
Authenticate to a remote registry.
+
+For example for Github Container Registry:
+
+ echo "$GITHUB_TOKEN" | helm registry login ghcr.io -u $GITHUB_USER --password-stdin
`
type registryLoginOptions struct {
@@ -122,7 +127,7 @@ func getUsernamePassword(usernameOpt string, passwordOpt string, passwordFromStd
}
}
} else {
- Warning("Using --password via the CLI is insecure. Use --password-stdin.")
+ slog.Warn("using --password via the CLI is insecure. Use --password-stdin")
}
return username, password, nil
diff --git a/pkg/cmd/release_testing.go b/pkg/cmd/release_testing.go
index 4904aa9f1..b660a16c5 100644
--- a/pkg/cmd/release_testing.go
+++ b/pkg/cmd/release_testing.go
@@ -17,6 +17,7 @@ limitations under the License.
package cmd
import (
+ "errors"
"fmt"
"io"
"regexp"
@@ -58,8 +59,8 @@ func newReleaseTestCmd(cfg *action.Configuration, out io.Writer) *cobra.Command
client.Namespace = settings.Namespace()
notName := regexp.MustCompile(`^!\s?name=`)
for _, f := range filter {
- if strings.HasPrefix(f, "name=") {
- client.Filters[action.IncludeNameFilter] = append(client.Filters[action.IncludeNameFilter], strings.TrimPrefix(f, "name="))
+ if after, ok := strings.CutPrefix(f, "name="); ok {
+ client.Filters[action.IncludeNameFilter] = append(client.Filters[action.IncludeNameFilter], after)
} else if notName.MatchString(f) {
client.Filters[action.ExcludeNameFilter] = append(client.Filters[action.ExcludeNameFilter], notName.ReplaceAllLiteralString(f, ""))
}
@@ -77,6 +78,7 @@ func newReleaseTestCmd(cfg *action.Configuration, out io.Writer) *cobra.Command
debug: settings.Debug,
showMetadata: false,
hideNotes: client.HideNotes,
+ noColor: settings.ShouldDisableColor(),
}); err != nil {
return err
}
@@ -85,7 +87,7 @@ func newReleaseTestCmd(cfg *action.Configuration, out io.Writer) *cobra.Command
// Print a newline to stdout to separate the output
fmt.Fprintln(out)
if err := client.GetPodLogs(out, rel); err != nil {
- return err
+ return errors.Join(runErr, err)
}
}
diff --git a/pkg/cmd/repo.go b/pkg/cmd/repo.go
index 925669e13..0dc2a7175 100644
--- a/pkg/cmd/repo.go
+++ b/pkg/cmd/repo.go
@@ -17,10 +17,10 @@ limitations under the License.
package cmd
import (
+ "errors"
"io"
- "os"
+ "io/fs"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
"helm.sh/helm/v4/pkg/cmd/require"
@@ -50,5 +50,5 @@ func newRepoCmd(out io.Writer) *cobra.Command {
}
func isNotExist(err error) bool {
- return os.IsNotExist(errors.Cause(err))
+ return errors.Is(err, fs.ErrNotExist)
}
diff --git a/pkg/cmd/repo_add.go b/pkg/cmd/repo_add.go
index f6c0c11c0..00e698daf 100644
--- a/pkg/cmd/repo_add.go
+++ b/pkg/cmd/repo_add.go
@@ -18,22 +18,23 @@ package cmd
import (
"context"
+ "errors"
"fmt"
"io"
+ "io/fs"
"os"
"path/filepath"
"strings"
"time"
"github.com/gofrs/flock"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
"golang.org/x/term"
"sigs.k8s.io/yaml"
"helm.sh/helm/v4/pkg/cmd/require"
"helm.sh/helm/v4/pkg/getter"
- "helm.sh/helm/v4/pkg/repo"
+ "helm.sh/helm/v4/pkg/repo/v1"
)
// Repositories that have been permanently deleted and no longer work
@@ -51,6 +52,7 @@ type repoAddOptions struct {
passCredentialsAll bool
forceUpdate bool
allowDeprecatedRepos bool
+ timeout time.Duration
certFile string
keyFile string
@@ -95,6 +97,7 @@ func newRepoAddCmd(out io.Writer) *cobra.Command {
f.BoolVar(&o.insecureSkipTLSverify, "insecure-skip-tls-verify", false, "skip tls certificate checks for the repository")
f.BoolVar(&o.allowDeprecatedRepos, "allow-deprecated-repos", false, "by default, this command will not allow adding official repos that have been permanently deleted. This disables that behavior")
f.BoolVar(&o.passCredentialsAll, "pass-credentials", false, "pass credentials to all domains")
+ f.DurationVar(&o.timeout, "timeout", getter.DefaultHTTPTimeout*time.Second, "time to wait for the index file download to complete")
return cmd
}
@@ -135,7 +138,7 @@ func (o *repoAddOptions) run(out io.Writer) error {
}
b, err := os.ReadFile(o.repoFile)
- if err != nil && !os.IsNotExist(err) {
+ if err != nil && !errors.Is(err, fs.ErrNotExist) {
return err
}
@@ -179,7 +182,7 @@ func (o *repoAddOptions) run(out io.Writer) error {
// Check if the repo name is legal
if strings.Contains(o.name, "/") {
- return errors.Errorf("repository name (%s) contains '/', please specify a different name without '/'", o.name)
+ return fmt.Errorf("repository name (%s) contains '/', please specify a different name without '/'", o.name)
}
// If the repo exists do one of two things:
@@ -188,10 +191,9 @@ func (o *repoAddOptions) run(out io.Writer) error {
if !o.forceUpdate && f.Has(o.name) {
existing := f.Get(o.name)
if c != *existing {
-
// The input coming in for the name is different from what is already
// configured. Return an error.
- return errors.Errorf("repository name (%s) already exists, please specify a different name", o.name)
+ return fmt.Errorf("repository name (%s) already exists, please specify a different name", o.name)
}
// The add is idempotent so do nothing
@@ -199,7 +201,7 @@ func (o *repoAddOptions) run(out io.Writer) error {
return nil
}
- r, err := repo.NewChartRepository(&c, getter.All(settings))
+ r, err := repo.NewChartRepository(&c, getter.All(settings, getter.WithTimeout(o.timeout)))
if err != nil {
return err
}
@@ -208,12 +210,12 @@ func (o *repoAddOptions) run(out io.Writer) error {
r.CachePath = o.repoCache
}
if _, err := r.DownloadIndexFile(); err != nil {
- return errors.Wrapf(err, "looks like %q is not a valid chart repository or cannot be reached", o.url)
+ return fmt.Errorf("looks like %q is not a valid chart repository or cannot be reached: %w", o.url, err)
}
f.Update(&c)
- if err := f.WriteFile(o.repoFile, 0600); err != nil {
+ if err := f.WriteFile(o.repoFile, 0o600); err != nil {
return err
}
fmt.Fprintf(out, "%q has been added to your repositories\n", o.name)
diff --git a/pkg/cmd/repo_add_test.go b/pkg/cmd/repo_add_test.go
index 0f3a3de4f..6d3696f52 100644
--- a/pkg/cmd/repo_add_test.go
+++ b/pkg/cmd/repo_add_test.go
@@ -17,8 +17,10 @@ limitations under the License.
package cmd
import (
+ "errors"
"fmt"
"io"
+ "io/fs"
"os"
"path/filepath"
"strings"
@@ -29,8 +31,8 @@ import (
"helm.sh/helm/v4/pkg/helmpath"
"helm.sh/helm/v4/pkg/helmpath/xdg"
- "helm.sh/helm/v4/pkg/repo"
- "helm.sh/helm/v4/pkg/repo/repotest"
+ "helm.sh/helm/v4/pkg/repo/v1"
+ "helm.sh/helm/v4/pkg/repo/v1/repotest"
)
func TestRepoAddCmd(t *testing.T) {
@@ -48,7 +50,7 @@ func TestRepoAddCmd(t *testing.T) {
defer srv2.Stop()
tmpdir := filepath.Join(t.TempDir(), "path-component.yaml/data")
- if err := os.MkdirAll(tmpdir, 0777); err != nil {
+ if err := os.MkdirAll(tmpdir, 0o777); err != nil {
t.Fatal(err)
}
repoFile := filepath.Join(tmpdir, "repositories.yaml")
@@ -97,7 +99,7 @@ func TestRepoAdd(t *testing.T) {
forceUpdate: false,
repoFile: repoFile,
}
- os.Setenv(xdg.CacheHomeEnvVar, rootDir)
+ t.Setenv(xdg.CacheHomeEnvVar, rootDir)
if err := o.run(io.Discard); err != nil {
t.Error(err)
@@ -113,11 +115,11 @@ func TestRepoAdd(t *testing.T) {
}
idx := filepath.Join(helmpath.CachePath("repository"), helmpath.CacheIndexFile(testRepoName))
- if _, err := os.Stat(idx); os.IsNotExist(err) {
+ if _, err := os.Stat(idx); errors.Is(err, fs.ErrNotExist) {
t.Errorf("Error cache index file was not created for repository %s", testRepoName)
}
idx = filepath.Join(helmpath.CachePath("repository"), helmpath.CacheChartsFile(testRepoName))
- if _, err := os.Stat(idx); os.IsNotExist(err) {
+ if _, err := os.Stat(idx); errors.Is(err, fs.ErrNotExist) {
t.Errorf("Error cache charts file was not created for repository %s", testRepoName)
}
@@ -151,7 +153,7 @@ func TestRepoAddCheckLegalName(t *testing.T) {
forceUpdate: false,
repoFile: repoFile,
}
- os.Setenv(xdg.CacheHomeEnvVar, rootDir)
+ t.Setenv(xdg.CacheHomeEnvVar, rootDir)
wantErrorMsg := fmt.Sprintf("repository name (%s) contains '/', please specify a different name without '/'", testRepoName)
@@ -189,6 +191,7 @@ func TestRepoAddConcurrentHiddenFile(t *testing.T) {
}
func repoAddConcurrent(t *testing.T, testName, repoFile string) {
+ t.Helper()
ts := repotest.NewTempServer(
t,
repotest.WithChartSourceGlob("testdata/testserver/*.*"),
diff --git a/pkg/cmd/repo_index.go b/pkg/cmd/repo_index.go
index 13a0a9439..ece0ce811 100644
--- a/pkg/cmd/repo_index.go
+++ b/pkg/cmd/repo_index.go
@@ -17,15 +17,17 @@ limitations under the License.
package cmd
import (
+ "errors"
+ "fmt"
"io"
+ "io/fs"
"os"
"path/filepath"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
"helm.sh/helm/v4/pkg/cmd/require"
- "helm.sh/helm/v4/pkg/repo"
+ "helm.sh/helm/v4/pkg/repo/v1"
)
const repoIndexDesc = `
@@ -97,13 +99,13 @@ func index(dir, url, mergeTo string, json bool) error {
if mergeTo != "" {
// if index.yaml is missing then create an empty one to merge into
var i2 *repo.IndexFile
- if _, err := os.Stat(mergeTo); os.IsNotExist(err) {
+ if _, err := os.Stat(mergeTo); errors.Is(err, fs.ErrNotExist) {
i2 = repo.NewIndexFile()
writeIndexFile(i2, mergeTo, json)
} else {
i2, err = repo.LoadIndexFile(mergeTo)
if err != nil {
- return errors.Wrap(err, "merge failed")
+ return fmt.Errorf("merge failed: %w", err)
}
}
i.Merge(i2)
@@ -114,7 +116,7 @@ func index(dir, url, mergeTo string, json bool) error {
func writeIndexFile(i *repo.IndexFile, out string, json bool) error {
if json {
- return i.WriteJSONFile(out, 0644)
+ return i.WriteJSONFile(out, 0o644)
}
- return i.WriteFile(out, 0644)
+ return i.WriteFile(out, 0o644)
}
diff --git a/pkg/cmd/repo_index_test.go b/pkg/cmd/repo_index_test.go
index c865c8a5d..c8959f21e 100644
--- a/pkg/cmd/repo_index_test.go
+++ b/pkg/cmd/repo_index_test.go
@@ -24,7 +24,7 @@ import (
"path/filepath"
"testing"
- "helm.sh/helm/v4/pkg/repo"
+ "helm.sh/helm/v4/pkg/repo/v1"
)
func TestRepoIndexCmd(t *testing.T) {
diff --git a/pkg/cmd/repo_list.go b/pkg/cmd/repo_list.go
index 5b6113a13..10b4442a0 100644
--- a/pkg/cmd/repo_list.go
+++ b/pkg/cmd/repo_list.go
@@ -21,12 +21,11 @@ import (
"io"
"github.com/gosuri/uitable"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
"helm.sh/helm/v4/pkg/cli/output"
"helm.sh/helm/v4/pkg/cmd/require"
- "helm.sh/helm/v4/pkg/repo"
+ "helm.sh/helm/v4/pkg/repo/v1"
)
func newRepoListCmd(out io.Writer) *cobra.Command {
@@ -37,10 +36,14 @@ func newRepoListCmd(out io.Writer) *cobra.Command {
Short: "list chart repositories",
Args: require.NoArgs,
ValidArgsFunction: noMoreArgsCompFunc,
- RunE: func(_ *cobra.Command, _ []string) error {
+ RunE: func(cmd *cobra.Command, _ []string) error {
+ // The error is silently ignored. If no repository file exists, it cannot be loaded,
+ // or the file isn't the right format to be parsed the error is ignored. The
+ // repositories will be 0.
f, _ := repo.LoadFile(settings.RepositoryConfig)
- if len(f.Repositories) == 0 && !(outfmt == output.JSON || outfmt == output.YAML) {
- return errors.New("no repositories to show")
+ if len(f.Repositories) == 0 && outfmt != output.JSON && outfmt != output.YAML {
+ fmt.Fprintln(cmd.ErrOrStderr(), "no repositories to show")
+ return nil
}
return outfmt.Write(out, &repoListWriter{f.Repositories})
diff --git a/pkg/cmd/repo_list_test.go b/pkg/cmd/repo_list_test.go
index 1da5484cc..2f6a9e4ad 100644
--- a/pkg/cmd/repo_list_test.go
+++ b/pkg/cmd/repo_list_test.go
@@ -17,6 +17,8 @@ limitations under the License.
package cmd
import (
+ "fmt"
+ "path/filepath"
"testing"
)
@@ -27,3 +29,26 @@ func TestRepoListOutputCompletion(t *testing.T) {
func TestRepoListFileCompletion(t *testing.T) {
checkFileCompletion(t, "repo list", false)
}
+
+func TestRepoList(t *testing.T) {
+ rootDir := t.TempDir()
+ repoFile := filepath.Join(rootDir, "repositories.yaml")
+ repoFile2 := "testdata/repositories.yaml"
+
+ tests := []cmdTestCase{
+ {
+ name: "list with no repos",
+ cmd: fmt.Sprintf("repo list --repository-config %s --repository-cache %s", repoFile, rootDir),
+ golden: "output/repo-list-empty.txt",
+ wantError: false,
+ },
+ {
+ name: "list with repos",
+ cmd: fmt.Sprintf("repo list --repository-config %s --repository-cache %s", repoFile2, rootDir),
+ golden: "output/repo-list.txt",
+ wantError: false,
+ },
+ }
+
+ runTestCmd(t, tests)
+}
diff --git a/pkg/cmd/repo_remove.go b/pkg/cmd/repo_remove.go
index 97630810a..330e69d3a 100644
--- a/pkg/cmd/repo_remove.go
+++ b/pkg/cmd/repo_remove.go
@@ -17,17 +17,18 @@ limitations under the License.
package cmd
import (
+ "errors"
"fmt"
"io"
+ "io/fs"
"os"
"path/filepath"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
"helm.sh/helm/v4/pkg/cmd/require"
"helm.sh/helm/v4/pkg/helmpath"
- "helm.sh/helm/v4/pkg/repo"
+ "helm.sh/helm/v4/pkg/repo/v1"
)
type repoRemoveOptions struct {
@@ -65,7 +66,7 @@ func (o *repoRemoveOptions) run(out io.Writer) error {
for _, name := range o.names {
if !r.Remove(name) {
- return errors.Errorf("no repo named %q found", name)
+ return fmt.Errorf("no repo named %q found", name)
}
if err := r.WriteFile(o.repoFile, 0600); err != nil {
return err
@@ -87,10 +88,10 @@ func removeRepoCache(root, name string) error {
}
idx = filepath.Join(root, helmpath.CacheIndexFile(name))
- if _, err := os.Stat(idx); os.IsNotExist(err) {
+ if _, err := os.Stat(idx); errors.Is(err, fs.ErrNotExist) {
return nil
} else if err != nil {
- return errors.Wrapf(err, "can't remove index file %s", idx)
+ return fmt.Errorf("can't remove index file %s: %w", idx, err)
}
return os.Remove(idx)
}
diff --git a/pkg/cmd/repo_remove_test.go b/pkg/cmd/repo_remove_test.go
index b8bc7179a..fce15bb73 100644
--- a/pkg/cmd/repo_remove_test.go
+++ b/pkg/cmd/repo_remove_test.go
@@ -25,8 +25,8 @@ import (
"testing"
"helm.sh/helm/v4/pkg/helmpath"
- "helm.sh/helm/v4/pkg/repo"
- "helm.sh/helm/v4/pkg/repo/repotest"
+ "helm.sh/helm/v4/pkg/repo/v1"
+ "helm.sh/helm/v4/pkg/repo/v1/repotest"
)
func TestRepoRemove(t *testing.T) {
@@ -153,6 +153,7 @@ func createCacheFiles(rootDir string, repoName string) (cacheIndexFile string, c
}
func testCacheFiles(t *testing.T, cacheIndexFile string, cacheChartsFile string, repoName string) {
+ t.Helper()
if _, err := os.Stat(cacheIndexFile); err == nil {
t.Errorf("Error cache index file was not removed for repository %s", repoName)
}
diff --git a/pkg/cmd/repo_update.go b/pkg/cmd/repo_update.go
index 25071377b..f2e7c0e0f 100644
--- a/pkg/cmd/repo_update.go
+++ b/pkg/cmd/repo_update.go
@@ -17,17 +17,18 @@ limitations under the License.
package cmd
import (
+ "errors"
"fmt"
"io"
"slices"
"sync"
+ "time"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
"helm.sh/helm/v4/pkg/cmd/require"
"helm.sh/helm/v4/pkg/getter"
- "helm.sh/helm/v4/pkg/repo"
+ "helm.sh/helm/v4/pkg/repo/v1"
)
const updateDesc = `
@@ -42,11 +43,11 @@ To update all the repositories, use 'helm repo update'.
var errNoRepositories = errors.New("no repositories found. You must add one before updating")
type repoUpdateOptions struct {
- update func([]*repo.ChartRepository, io.Writer, bool) error
- repoFile string
- repoCache string
- names []string
- failOnRepoUpdateFail bool
+ update func([]*repo.ChartRepository, io.Writer) error
+ repoFile string
+ repoCache string
+ names []string
+ timeout time.Duration
}
func newRepoUpdateCmd(out io.Writer) *cobra.Command {
@@ -70,10 +71,7 @@ func newRepoUpdateCmd(out io.Writer) *cobra.Command {
}
f := cmd.Flags()
-
- // Adding this flag for Helm 3 as stop gap functionality for https://github.com/helm/helm/issues/10016.
- // This should be deprecated in Helm 4 by update to the behaviour of `helm repo update` command.
- f.BoolVar(&o.failOnRepoUpdateFail, "fail-on-repo-update-fail", false, "update fails if any of the repository updates fail")
+ f.DurationVar(&o.timeout, "timeout", getter.DefaultHTTPTimeout*time.Second, "time to wait for the index file download to complete")
return cmd
}
@@ -84,7 +82,7 @@ func (o *repoUpdateOptions) run(out io.Writer) error {
case isNotExist(err):
return errNoRepositories
case err != nil:
- return errors.Wrapf(err, "failed loading file: %s", o.repoFile)
+ return fmt.Errorf("failed loading file: %s: %w", o.repoFile, err)
case len(f.Repositories) == 0:
return errNoRepositories
}
@@ -101,7 +99,7 @@ func (o *repoUpdateOptions) run(out io.Writer) error {
for _, cfg := range f.Repositories {
if updateAllRepos || isRepoRequested(cfg.Name, o.names) {
- r, err := repo.NewChartRepository(cfg, getter.All(settings))
+ r, err := repo.NewChartRepository(cfg, getter.All(settings, getter.WithTimeout(o.timeout)))
if err != nil {
return err
}
@@ -112,29 +110,44 @@ func (o *repoUpdateOptions) run(out io.Writer) error {
}
}
- return o.update(repos, out, o.failOnRepoUpdateFail)
+ return o.update(repos, out)
}
-func updateCharts(repos []*repo.ChartRepository, out io.Writer, failOnRepoUpdateFail bool) error {
+func updateCharts(repos []*repo.ChartRepository, out io.Writer) error {
fmt.Fprintln(out, "Hang tight while we grab the latest from your chart repositories...")
var wg sync.WaitGroup
- var repoFailList []string
+ failRepoURLChan := make(chan string, len(repos))
+
+ writeMutex := sync.Mutex{}
for _, re := range repos {
wg.Add(1)
go func(re *repo.ChartRepository) {
defer wg.Done()
if _, err := re.DownloadIndexFile(); err != nil {
+ writeMutex.Lock()
+ defer writeMutex.Unlock()
fmt.Fprintf(out, "...Unable to get an update from the %q chart repository (%s):\n\t%s\n", re.Config.Name, re.Config.URL, err)
- repoFailList = append(repoFailList, re.Config.URL)
+ failRepoURLChan <- re.Config.URL
} else {
+ writeMutex.Lock()
+ defer writeMutex.Unlock()
fmt.Fprintf(out, "...Successfully got an update from the %q chart repository\n", re.Config.Name)
}
}(re)
}
- wg.Wait()
- if len(repoFailList) > 0 && failOnRepoUpdateFail {
- return fmt.Errorf("Failed to update the following repositories: %s",
+ go func() {
+ wg.Wait()
+ close(failRepoURLChan)
+ }()
+
+ var repoFailList []string
+ for url := range failRepoURLChan {
+ repoFailList = append(repoFailList, url)
+ }
+
+ if len(repoFailList) > 0 {
+ return fmt.Errorf("failed to update the following repositories: %s",
repoFailList)
}
@@ -152,7 +165,7 @@ func checkRequestedRepos(requestedRepos []string, validRepos []*repo.Entry) erro
}
}
if !found {
- return errors.Errorf("no repositories found matching '%s'. Nothing will be updated", requestedRepo)
+ return fmt.Errorf("no repositories found matching '%s'. Nothing will be updated", requestedRepo)
}
}
return nil
diff --git a/pkg/cmd/repo_update_test.go b/pkg/cmd/repo_update_test.go
index 5b27a6dfb..7aa4d414f 100644
--- a/pkg/cmd/repo_update_test.go
+++ b/pkg/cmd/repo_update_test.go
@@ -26,15 +26,15 @@ import (
"helm.sh/helm/v4/internal/test/ensure"
"helm.sh/helm/v4/pkg/getter"
- "helm.sh/helm/v4/pkg/repo"
- "helm.sh/helm/v4/pkg/repo/repotest"
+ "helm.sh/helm/v4/pkg/repo/v1"
+ "helm.sh/helm/v4/pkg/repo/v1/repotest"
)
func TestUpdateCmd(t *testing.T) {
var out bytes.Buffer
// Instead of using the HTTP updater, we provide our own for this test.
// The TestUpdateCharts test verifies the HTTP behavior independently.
- updater := func(repos []*repo.ChartRepository, out io.Writer, _ bool) error {
+ updater := func(repos []*repo.ChartRepository, out io.Writer) error {
for _, re := range repos {
fmt.Fprintln(out, re.Config.Name)
}
@@ -59,7 +59,7 @@ func TestUpdateCmdMultiple(t *testing.T) {
var out bytes.Buffer
// Instead of using the HTTP updater, we provide our own for this test.
// The TestUpdateCharts test verifies the HTTP behavior independently.
- updater := func(repos []*repo.ChartRepository, out io.Writer, _ bool) error {
+ updater := func(repos []*repo.ChartRepository, out io.Writer) error {
for _, re := range repos {
fmt.Fprintln(out, re.Config.Name)
}
@@ -85,7 +85,7 @@ func TestUpdateCmdInvalid(t *testing.T) {
var out bytes.Buffer
// Instead of using the HTTP updater, we provide our own for this test.
// The TestUpdateCharts test verifies the HTTP behavior independently.
- updater := func(repos []*repo.ChartRepository, out io.Writer, _ bool) error {
+ updater := func(repos []*repo.ChartRepository, out io.Writer) error {
for _, re := range repos {
fmt.Fprintln(out, re.Config.Name)
}
@@ -145,7 +145,7 @@ func TestUpdateCharts(t *testing.T) {
}
b := bytes.NewBuffer(nil)
- updateCharts([]*repo.ChartRepository{r}, b, false)
+ updateCharts([]*repo.ChartRepository{r}, b)
got := b.String()
if strings.Contains(got, "Unable to get an update") {
@@ -161,7 +161,7 @@ func TestRepoUpdateFileCompletion(t *testing.T) {
checkFileCompletion(t, "repo update repo1", false)
}
-func TestUpdateChartsFail(t *testing.T) {
+func TestUpdateChartsFailWithError(t *testing.T) {
defer resetEnv()()
ensure.HelmHome(t)
@@ -172,40 +172,14 @@ func TestUpdateChartsFail(t *testing.T) {
defer ts.Stop()
var invalidURL = ts.URL() + "55"
- r, err := repo.NewChartRepository(&repo.Entry{
+ r1, err := repo.NewChartRepository(&repo.Entry{
Name: "charts",
URL: invalidURL,
}, getter.All(settings))
if err != nil {
t.Error(err)
}
-
- b := bytes.NewBuffer(nil)
- if err := updateCharts([]*repo.ChartRepository{r}, b, false); err != nil {
- t.Error("Repo update should not return error if update of repository fails")
- }
-
- got := b.String()
- if !strings.Contains(got, "Unable to get an update") {
- t.Errorf("Repo should have failed update but instead got: %q", got)
- }
- if !strings.Contains(got, "Update Complete.") {
- t.Error("Update was not successful")
- }
-}
-
-func TestUpdateChartsFailWithError(t *testing.T) {
- defer resetEnv()()
- ensure.HelmHome(t)
-
- ts := repotest.NewTempServer(
- t,
- repotest.WithChartSourceGlob("testdata/testserver/*.*"),
- )
- defer ts.Stop()
-
- var invalidURL = ts.URL() + "55"
- r, err := repo.NewChartRepository(&repo.Entry{
+ r2, err := repo.NewChartRepository(&repo.Entry{
Name: "charts",
URL: invalidURL,
}, getter.All(settings))
@@ -214,12 +188,12 @@ func TestUpdateChartsFailWithError(t *testing.T) {
}
b := bytes.NewBuffer(nil)
- err = updateCharts([]*repo.ChartRepository{r}, b, true)
+ err = updateCharts([]*repo.ChartRepository{r1, r2}, b)
if err == nil {
t.Error("Repo update should return error because update of repository fails and 'fail-on-repo-update-fail' flag set")
return
}
- var expectedErr = "Failed to update the following repositories"
+ var expectedErr = "failed to update the following repositories"
var receivedErr = err.Error()
if !strings.Contains(receivedErr, expectedErr) {
t.Errorf("Expected error (%s) but got (%s) instead", expectedErr, receivedErr)
diff --git a/pkg/cmd/require/args.go b/pkg/cmd/require/args.go
index cfa8a0169..f5e0888f1 100644
--- a/pkg/cmd/require/args.go
+++ b/pkg/cmd/require/args.go
@@ -16,14 +16,15 @@ limitations under the License.
package require
import (
- "github.com/pkg/errors"
+ "fmt"
+
"github.com/spf13/cobra"
)
// NoArgs returns an error if any args are included.
func NoArgs(cmd *cobra.Command, args []string) error {
if len(args) > 0 {
- return errors.Errorf(
+ return fmt.Errorf(
"%q accepts no arguments\n\nUsage: %s",
cmd.CommandPath(),
cmd.UseLine(),
@@ -36,7 +37,7 @@ func NoArgs(cmd *cobra.Command, args []string) error {
func ExactArgs(n int) cobra.PositionalArgs {
return func(cmd *cobra.Command, args []string) error {
if len(args) != n {
- return errors.Errorf(
+ return fmt.Errorf(
"%q requires %d %s\n\nUsage: %s",
cmd.CommandPath(),
n,
@@ -52,7 +53,7 @@ func ExactArgs(n int) cobra.PositionalArgs {
func MaximumNArgs(n int) cobra.PositionalArgs {
return func(cmd *cobra.Command, args []string) error {
if len(args) > n {
- return errors.Errorf(
+ return fmt.Errorf(
"%q accepts at most %d %s\n\nUsage: %s",
cmd.CommandPath(),
n,
@@ -68,7 +69,7 @@ func MaximumNArgs(n int) cobra.PositionalArgs {
func MinimumNArgs(n int) cobra.PositionalArgs {
return func(cmd *cobra.Command, args []string) error {
if len(args) < n {
- return errors.Errorf(
+ return fmt.Errorf(
"%q requires at least %d %s\n\nUsage: %s",
cmd.CommandPath(),
n,
diff --git a/pkg/cmd/require/args_test.go b/pkg/cmd/require/args_test.go
index cd5850650..b6c430fc0 100644
--- a/pkg/cmd/require/args_test.go
+++ b/pkg/cmd/require/args_test.go
@@ -63,6 +63,7 @@ type testCase struct {
}
func runTestCases(t *testing.T, testCases []testCase) {
+ t.Helper()
for i, tc := range testCases {
t.Run(fmt.Sprint(i), func(t *testing.T) {
cmd := &cobra.Command{
diff --git a/pkg/cmd/rollback.go b/pkg/cmd/rollback.go
index 155c9fb01..ff60aaedf 100644
--- a/pkg/cmd/rollback.go
+++ b/pkg/cmd/rollback.go
@@ -77,14 +77,19 @@ func newRollbackCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
f := cmd.Flags()
f.BoolVar(&client.DryRun, "dry-run", false, "simulate a rollback")
- f.BoolVar(&client.Recreate, "recreate-pods", false, "performs pods restart for the resource if applicable")
- f.BoolVar(&client.Force, "force", false, "force resource update through delete/recreate if needed")
+ f.BoolVar(&client.ForceReplace, "force-replace", false, "force resource updates by replacement")
+ f.BoolVar(&client.ForceReplace, "force", false, "deprecated")
+ f.MarkDeprecated("force", "use --force-replace instead")
+ f.BoolVar(&client.ForceConflicts, "force-conflicts", false, "if set server-side apply will force changes against conflicts")
+ f.StringVar(&client.ServerSideApply, "server-side", "auto", "must be \"true\", \"false\" or \"auto\". Object updates run in the server instead of the client (\"auto\" defaults the value from the previous chart release's method)")
f.BoolVar(&client.DisableHooks, "no-hooks", false, "prevent hooks from running during rollback")
f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)")
- f.BoolVar(&client.Wait, "wait", false, "if set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment, StatefulSet, or ReplicaSet are in a ready state before marking the release as successful. It will wait for as long as --timeout")
f.BoolVar(&client.WaitForJobs, "wait-for-jobs", false, "if set and --wait enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as --timeout")
f.BoolVar(&client.CleanupOnFail, "cleanup-on-fail", false, "allow deletion of new resources created in this rollback when rollback fails")
f.IntVar(&client.MaxHistory, "history-max", settings.MaxHistory, "limit the maximum number of revisions saved per release. Use 0 for no limit")
+ AddWaitFlag(cmd, &client.WaitStrategy)
+ cmd.MarkFlagsMutuallyExclusive("force-replace", "force-conflicts")
+ cmd.MarkFlagsMutuallyExclusive("force", "force-conflicts")
return cmd
}
diff --git a/pkg/cmd/root.go b/pkg/cmd/root.go
index ea686be7c..4f1be88d6 100644
--- a/pkg/cmd/root.go
+++ b/pkg/cmd/root.go
@@ -21,23 +21,26 @@ import (
"fmt"
"io"
"log"
+ "log/slog"
"net/http"
"os"
"strings"
+ "github.com/fatih/color"
"github.com/spf13/cobra"
"sigs.k8s.io/yaml"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/clientcmd"
+ "helm.sh/helm/v4/internal/logging"
"helm.sh/helm/v4/internal/tlsutil"
"helm.sh/helm/v4/pkg/action"
"helm.sh/helm/v4/pkg/cli"
kubefake "helm.sh/helm/v4/pkg/kube/fake"
"helm.sh/helm/v4/pkg/registry"
release "helm.sh/helm/v4/pkg/release/v1"
- "helm.sh/helm/v4/pkg/repo"
+ "helm.sh/helm/v4/pkg/repo/v1"
"helm.sh/helm/v4/pkg/storage/driver"
)
@@ -78,6 +81,8 @@ Environment variables:
| $HELM_KUBETLS_SERVER_NAME | set the server name used to validate the Kubernetes API server certificate |
| $HELM_BURST_LIMIT | set the default burst limit in the case the server contains many CRDs (default 100, -1 to disable) |
| $HELM_QPS | set the Queries Per Second in cases where a high number of calls exceed the option for higher burst values |
+| $HELM_COLOR | set color output mode. Allowed values: never, always, auto (default: never) |
+| $NO_COLOR | set to any non-empty value to disable all colored output (overrides $HELM_COLOR) |
Helm stores cache, configuration, and data based on the following configuration order:
@@ -96,25 +101,15 @@ By default, the default directories depend on the Operating System. The defaults
var settings = cli.New()
-func Debug(format string, v ...interface{}) {
- if settings.Debug {
- log.Output(2, fmt.Sprintf("[debug] "+format+"\n", v...))
- }
-}
-
-func Warning(format string, v ...interface{}) {
- fmt.Fprintf(os.Stderr, "WARNING: "+format+"\n", v...)
-}
-
-func NewRootCmd(out io.Writer, args []string) (*cobra.Command, error) {
+func NewRootCmd(out io.Writer, args []string, logSetup func(bool)) (*cobra.Command, error) {
actionConfig := new(action.Configuration)
- cmd, err := newRootCmdWithConfig(actionConfig, out, args)
+ cmd, err := newRootCmdWithConfig(actionConfig, out, args, logSetup)
if err != nil {
return nil, err
}
cobra.OnInitialize(func() {
helmDriver := os.Getenv("HELM_DRIVER")
- if err := actionConfig.Init(settings.RESTClientGetter(), settings.Namespace(), helmDriver, Debug); err != nil {
+ if err := actionConfig.Init(settings.RESTClientGetter(), settings.Namespace(), helmDriver); err != nil {
log.Fatal(err)
}
if helmDriver == "memory" {
@@ -125,7 +120,33 @@ func NewRootCmd(out io.Writer, args []string) (*cobra.Command, error) {
return cmd, nil
}
-func newRootCmdWithConfig(actionConfig *action.Configuration, out io.Writer, args []string) (*cobra.Command, error) {
+// SetupLogging sets up Helm logging used by the Helm client.
+// This function is passed to the NewRootCmd function to enable logging. Any other
+// application that uses the NewRootCmd function to setup all the Helm commands may
+// use this function to setup logging or their own. Using a custom logging setup function
+// enables applications using Helm commands to integrate with their existing logging
+// system.
+// The debug argument is the value if Helm is set for debugging (i.e. --debug flag)
+func SetupLogging(debug bool) {
+ logger := logging.NewLogger(func() bool { return debug })
+ slog.SetDefault(logger)
+}
+
+// configureColorOutput configures the color output based on the ColorMode setting
+func configureColorOutput(settings *cli.EnvSettings) {
+ switch settings.ColorMode {
+ case "never":
+ color.NoColor = true
+ case "always":
+ color.NoColor = false
+ case "auto":
+ // Let fatih/color handle automatic detection
+ // It will check if output is a terminal and NO_COLOR env var
+ // We don't need to do anything here
+ }
+}
+
+func newRootCmdWithConfig(actionConfig *action.Configuration, out io.Writer, args []string, logSetup func(bool)) (*cobra.Command, error) {
cmd := &cobra.Command{
Use: "helm",
Short: "The Helm package manager for Kubernetes.",
@@ -148,6 +169,36 @@ func newRootCmdWithConfig(actionConfig *action.Configuration, out io.Writer, arg
settings.AddFlags(flags)
addKlogFlags(flags)
+ // We can safely ignore any errors that flags.Parse encounters since
+ // those errors will be caught later during the call to cmd.Execution.
+ // This call is required to gather configuration information prior to
+ // execution.
+ flags.ParseErrorsAllowlist.UnknownFlags = true
+ flags.Parse(args)
+
+ logSetup(settings.Debug)
+
+ // Validate color mode setting
+ switch settings.ColorMode {
+ case "never", "auto", "always":
+ // Valid color mode
+ default:
+ return nil, fmt.Errorf("invalid color mode %q: must be one of: never, auto, always", settings.ColorMode)
+ }
+
+ // Configure color output based on ColorMode setting
+ configureColorOutput(settings)
+
+ // Setup shell completion for the color flag
+ _ = cmd.RegisterFlagCompletionFunc("color", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
+ return []string{"never", "auto", "always"}, cobra.ShellCompDirectiveNoFileComp
+ })
+
+ // Setup shell completion for the colour flag
+ _ = cmd.RegisterFlagCompletionFunc("colour", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
+ return []string{"never", "auto", "always"}, cobra.ShellCompDirectiveNoFileComp
+ })
+
// Setup shell completion for the namespace flag
err := cmd.RegisterFlagCompletionFunc("namespace", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
if client, err := actionConfig.KubernetesClientSet(); err == nil {
@@ -195,13 +246,6 @@ func newRootCmdWithConfig(actionConfig *action.Configuration, out io.Writer, arg
log.Fatal(err)
}
- // We can safely ignore any errors that flags.Parse encounters since
- // those errors will be caught later during the call to cmd.Execution.
- // This call is required to gather configuration information prior to
- // execution.
- flags.ParseErrorsWhitelist.UnknownFlags = true
- flags.Parse(args)
-
registryClient, err := newDefaultRegistryClient(false, "", "")
if err != nil {
return nil, err
@@ -247,8 +291,8 @@ func newRootCmdWithConfig(actionConfig *action.Configuration, out io.Writer, arg
newPushCmd(actionConfig, out),
)
- // Find and add plugins
- loadPlugins(cmd, out)
+ // Find and add CLI plugins
+ loadCLIPlugins(cmd, out)
// Check for expired repositories
checkForExpiredRepos(settings.RepositoryConfig)
@@ -416,3 +460,8 @@ func newRegistryClientWithTLS(
}
return registryClient, nil
}
+
+type CommandError struct {
+ error
+ ExitCode int
+}
diff --git a/pkg/cmd/root_test.go b/pkg/cmd/root_test.go
index 9521a5aa2..84e3d9ed2 100644
--- a/pkg/cmd/root_test.go
+++ b/pkg/cmd/root_test.go
@@ -80,7 +80,7 @@ func TestRootCmd(t *testing.T) {
ensure.HelmHome(t)
for k, v := range tt.envvars {
- os.Setenv(k, v)
+ t.Setenv(k, v)
}
if _, _, err := executeActionCommand(tt.args); err != nil {
diff --git a/pkg/cmd/search/search.go b/pkg/cmd/search/search.go
index f9e229154..1c7bb1d06 100644
--- a/pkg/cmd/search/search.go
+++ b/pkg/cmd/search/search.go
@@ -31,7 +31,7 @@ import (
"github.com/Masterminds/semver/v3"
- "helm.sh/helm/v4/pkg/repo"
+ "helm.sh/helm/v4/pkg/repo/v1"
)
// Result is a search result.
diff --git a/pkg/cmd/search/search_test.go b/pkg/cmd/search/search_test.go
index 7a4ba786b..a24eb1f64 100644
--- a/pkg/cmd/search/search_test.go
+++ b/pkg/cmd/search/search_test.go
@@ -21,7 +21,7 @@ import (
"testing"
chart "helm.sh/helm/v4/pkg/chart/v2"
- "helm.sh/helm/v4/pkg/repo"
+ "helm.sh/helm/v4/pkg/repo/v1"
)
func TestSortScore(t *testing.T) {
diff --git a/pkg/cmd/search_hub.go b/pkg/cmd/search_hub.go
index b7f25444e..cfeeec59b 100644
--- a/pkg/cmd/search_hub.go
+++ b/pkg/cmd/search_hub.go
@@ -19,10 +19,10 @@ package cmd
import (
"fmt"
"io"
+ "log/slog"
"strings"
"github.com/gosuri/uitable"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
"helm.sh/helm/v4/internal/monocular"
@@ -83,13 +83,13 @@ func newSearchHubCmd(out io.Writer) *cobra.Command {
func (o *searchHubOptions) run(out io.Writer, args []string) error {
c, err := monocular.New(o.searchEndpoint)
if err != nil {
- return errors.Wrap(err, fmt.Sprintf("unable to create connection to %q", o.searchEndpoint))
+ return fmt.Errorf("unable to create connection to %q: %w", o.searchEndpoint, err)
}
q := strings.Join(args, " ")
results, err := c.Search(q)
if err != nil {
- Debug("%s", err)
+ slog.Debug("search failed", slog.Any("error", err))
return fmt.Errorf("unable to perform search against %q", o.searchEndpoint)
}
diff --git a/pkg/cmd/search_repo.go b/pkg/cmd/search_repo.go
index bc73e52b2..35608e22e 100644
--- a/pkg/cmd/search_repo.go
+++ b/pkg/cmd/search_repo.go
@@ -19,21 +19,22 @@ package cmd
import (
"bufio"
"bytes"
+ "errors"
"fmt"
"io"
+ "log/slog"
"os"
"path/filepath"
"strings"
"github.com/Masterminds/semver/v3"
"github.com/gosuri/uitable"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
"helm.sh/helm/v4/pkg/cli/output"
"helm.sh/helm/v4/pkg/cmd/search"
"helm.sh/helm/v4/pkg/helmpath"
- "helm.sh/helm/v4/pkg/repo"
+ "helm.sh/helm/v4/pkg/repo/v1"
)
const searchRepoDesc = `
@@ -130,17 +131,17 @@ func (o *searchRepoOptions) run(out io.Writer, args []string) error {
}
func (o *searchRepoOptions) setupSearchedVersion() {
- Debug("Original chart version: %q", o.version)
+ slog.Debug("original chart version", "version", o.version)
if o.version != "" {
return
}
if o.devel { // search for releases and prereleases (alpha, beta, and release candidate releases).
- Debug("setting version to >0.0.0-0")
+ slog.Debug("setting version to >0.0.0-0")
o.version = ">0.0.0-0"
} else { // search only for stable releases, prerelease versions will be skipped
- Debug("setting version to >0.0.0")
+ slog.Debug("setting version to >0.0.0")
o.version = ">0.0.0"
}
}
@@ -152,7 +153,7 @@ func (o *searchRepoOptions) applyConstraint(res []*search.Result) ([]*search.Res
constraint, err := semver.NewConstraint(o.version)
if err != nil {
- return res, errors.Wrap(err, "an invalid version/constraint format")
+ return res, fmt.Errorf("an invalid version/constraint format: %w", err)
}
data := res[:0]
@@ -189,8 +190,7 @@ func (o *searchRepoOptions) buildIndex() (*search.Index, error) {
f := filepath.Join(o.repoCacheDir, helmpath.CacheIndexFile(n))
ind, err := repo.LoadIndexFile(f)
if err != nil {
- Warning("Repo %q is corrupt or missing. Try 'helm repo update'.", n)
- Warning("%s", err)
+ slog.Warn("repo is corrupt or missing", "repo", n, slog.Any("error", err))
continue
}
diff --git a/pkg/cmd/show.go b/pkg/cmd/show.go
index a02af6f18..1c7e7be44 100644
--- a/pkg/cmd/show.go
+++ b/pkg/cmd/show.go
@@ -20,6 +20,7 @@ import (
"fmt"
"io"
"log"
+ "log/slog"
"github.com/spf13/cobra"
@@ -211,13 +212,13 @@ func addShowFlags(subCmd *cobra.Command, client *action.Show) {
}
func runShow(args []string, client *action.Show) (string, error) {
- Debug("Original chart version: %q", client.Version)
+ slog.Debug("original chart version", "version", client.Version)
if client.Version == "" && client.Devel {
- Debug("setting version to >0.0.0-0")
+ slog.Debug("setting version to >0.0.0-0")
client.Version = ">0.0.0-0"
}
- cp, err := client.ChartPathOptions.LocateChart(args[0], settings)
+ cp, err := client.LocateChart(args[0], settings)
if err != nil {
return "", err
}
diff --git a/pkg/cmd/show_test.go b/pkg/cmd/show_test.go
index ab8cafc37..ff3671dbc 100644
--- a/pkg/cmd/show_test.go
+++ b/pkg/cmd/show_test.go
@@ -22,7 +22,7 @@ import (
"strings"
"testing"
- "helm.sh/helm/v4/pkg/repo/repotest"
+ "helm.sh/helm/v4/pkg/repo/v1/repotest"
)
func TestShowPreReleaseChart(t *testing.T) {
@@ -64,14 +64,17 @@ func TestShowPreReleaseChart(t *testing.T) {
},
}
+ contentTmp := t.TempDir()
+
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
outdir := srv.Root()
- cmd := fmt.Sprintf("show all '%s' %s --repository-config %s --repository-cache %s",
+ cmd := fmt.Sprintf("show all '%s' %s --repository-config %s --repository-cache %s --content-cache %s",
tt.args,
tt.flags,
filepath.Join(outdir, "repositories.yaml"),
outdir,
+ contentTmp,
)
//_, out, err := executeActionCommand(cmd)
_, _, err := executeActionCommand(cmd)
diff --git a/pkg/cmd/status.go b/pkg/cmd/status.go
index 2b1138786..3d1309c3e 100644
--- a/pkg/cmd/status.go
+++ b/pkg/cmd/status.go
@@ -28,8 +28,9 @@ import (
"k8s.io/kubectl/pkg/cmd/get"
+ coloroutput "helm.sh/helm/v4/internal/cli/output"
"helm.sh/helm/v4/pkg/action"
- chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+ "helm.sh/helm/v4/pkg/chart/common/util"
"helm.sh/helm/v4/pkg/cli/output"
"helm.sh/helm/v4/pkg/cmd/require"
release "helm.sh/helm/v4/pkg/release/v1"
@@ -84,6 +85,7 @@ func newStatusCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
debug: false,
showMetadata: false,
hideNotes: false,
+ noColor: settings.ShouldDisableColor(),
})
},
}
@@ -112,6 +114,7 @@ type statusPrinter struct {
debug bool
showMetadata bool
hideNotes bool
+ noColor bool
}
func (s statusPrinter) WriteJSON(out io.Writer) error {
@@ -130,8 +133,8 @@ func (s statusPrinter) WriteTable(out io.Writer) error {
if !s.release.Info.LastDeployed.IsZero() {
_, _ = fmt.Fprintf(out, "LAST DEPLOYED: %s\n", s.release.Info.LastDeployed.Format(time.ANSIC))
}
- _, _ = fmt.Fprintf(out, "NAMESPACE: %s\n", s.release.Namespace)
- _, _ = fmt.Fprintf(out, "STATUS: %s\n", s.release.Info.Status.String())
+ _, _ = fmt.Fprintf(out, "NAMESPACE: %s\n", coloroutput.ColorizeNamespace(s.release.Namespace, s.noColor))
+ _, _ = fmt.Fprintf(out, "STATUS: %s\n", coloroutput.ColorizeStatus(s.release.Info.Status, s.noColor))
_, _ = fmt.Fprintf(out, "REVISION: %d\n", s.release.Version)
if s.showMetadata {
_, _ = fmt.Fprintf(out, "CHART: %s\n", s.release.Chart.Metadata.Name)
@@ -194,7 +197,7 @@ func (s statusPrinter) WriteTable(out io.Writer) error {
// Print an extra newline
_, _ = fmt.Fprintln(out)
- cfg, err := chartutil.CoalesceValues(s.release.Chart, s.release.Config)
+ cfg, err := util.CoalesceValues(s.release.Chart, s.release.Config)
if err != nil {
return err
}
@@ -218,7 +221,7 @@ func (s statusPrinter) WriteTable(out io.Writer) error {
// Hide notes from output - option in install and upgrades
if !s.hideNotes && len(s.release.Info.Notes) > 0 {
- fmt.Fprintf(out, "NOTES:\n%s\n", strings.TrimSpace(s.release.Info.Notes))
+ _, _ = fmt.Fprintf(out, "NOTES:\n%s\n", strings.TrimSpace(s.release.Info.Notes))
}
return nil
}
diff --git a/pkg/cmd/template.go b/pkg/cmd/template.go
index 25ff31ade..81c112d51 100644
--- a/pkg/cmd/template.go
+++ b/pkg/cmd/template.go
@@ -18,8 +18,10 @@ package cmd
import (
"bytes"
+ "errors"
"fmt"
"io"
+ "io/fs"
"os"
"path"
"path/filepath"
@@ -33,10 +35,10 @@ import (
"github.com/spf13/cobra"
"helm.sh/helm/v4/pkg/action"
- chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+ "helm.sh/helm/v4/pkg/chart/common"
"helm.sh/helm/v4/pkg/cli/values"
"helm.sh/helm/v4/pkg/cmd/require"
- releaseutil "helm.sh/helm/v4/pkg/release/util"
+ releaseutil "helm.sh/helm/v4/pkg/release/v1/util"
)
const templateDesc = `
@@ -67,7 +69,7 @@ func newTemplateCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
},
RunE: func(_ *cobra.Command, args []string) error {
if kubeVersion != "" {
- parsedKubeVersion, err := chartutil.ParseKubeVersion(kubeVersion)
+ parsedKubeVersion, err := common.ParseKubeVersion(kubeVersion)
if err != nil {
return fmt.Errorf("invalid kube version '%s': %s", kubeVersion, err)
}
@@ -91,7 +93,7 @@ func newTemplateCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
client.ReleaseName = "release-name"
client.Replace = true // Skip the name check
client.ClientOnly = !validate
- client.APIVersions = chartutil.VersionSet(extraAPIs)
+ client.APIVersions = common.VersionSet(extraAPIs)
client.IncludeCRDs = includeCrds
rel, err := runInstall(args, client, valueOpts, out)
@@ -199,9 +201,9 @@ func newTemplateCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
f.BoolVar(&skipTests, "skip-tests", false, "skip tests from templated output")
f.BoolVar(&client.IsUpgrade, "is-upgrade", false, "set .Release.IsUpgrade instead of .Release.IsInstall")
f.StringVar(&kubeVersion, "kube-version", "", "Kubernetes version used for Capabilities.KubeVersion")
- f.StringSliceVarP(&extraAPIs, "api-versions", "a", []string{}, "Kubernetes api versions used for Capabilities.APIVersions")
+ f.StringSliceVarP(&extraAPIs, "api-versions", "a", []string{}, "Kubernetes api versions used for Capabilities.APIVersions (multiple can be specified)")
f.BoolVar(&client.UseReleaseName, "release-name", false, "use release name in the output-dir path.")
- bindPostRenderFlag(cmd, &client.PostRenderer)
+ bindPostRenderFlag(cmd, &client.PostRenderer, settings)
return cmd
}
@@ -230,7 +232,7 @@ func writeToFile(outputDir string, name string, data string, appendData bool) er
defer f.Close()
- _, err = f.WriteString(fmt.Sprintf("---\n# Source: %s\n%s\n", name, data))
+ _, err = fmt.Fprintf(f, "---\n# Source: %s\n%s\n", name, data)
if err != nil {
return err
@@ -250,7 +252,7 @@ func createOrOpenFile(filename string, appendData bool) (*os.File, error) {
func ensureDirectoryForFile(file string) error {
baseDir := path.Dir(file)
_, err := os.Stat(baseDir)
- if err != nil && !os.IsNotExist(err) {
+ if err != nil && !errors.Is(err, fs.ErrNotExist) {
return err
}
diff --git a/pkg/cmd/template_test.go b/pkg/cmd/template_test.go
index c478fced4..5bcccf5d0 100644
--- a/pkg/cmd/template_test.go
+++ b/pkg/cmd/template_test.go
@@ -22,18 +22,6 @@ import (
"testing"
)
-func TestTemplateCmdWithToml(t *testing.T) {
-
- tests := []cmdTestCase{
- {
- name: "check toToml function rendering",
- cmd: fmt.Sprintf("template '%s'", "testdata/testcharts/issue-totoml"),
- golden: "output/issue-totoml.txt",
- },
- }
- runTestCmd(t, tests)
-}
-
var chartPath = "testdata/testcharts/subchart"
func TestTemplateCmd(t *testing.T) {
@@ -95,7 +83,12 @@ func TestTemplateCmd(t *testing.T) {
},
{
name: "check kube api versions",
- cmd: fmt.Sprintf("template --api-versions helm.k8s.io/test '%s'", chartPath),
+ cmd: fmt.Sprintf("template --api-versions helm.k8s.io/test,helm.k8s.io/test2 '%s'", chartPath),
+ golden: "output/template-with-api-version.txt",
+ },
+ {
+ name: "check kube api versions",
+ cmd: fmt.Sprintf("template --api-versions helm.k8s.io/test --api-versions helm.k8s.io/test2 '%s'", chartPath),
golden: "output/template-with-api-version.txt",
},
{
diff --git a/pkg/cmd/testdata/helm home with space/helm/plugins/fullenv/plugin.yaml b/pkg/cmd/testdata/helm home with space/helm/plugins/fullenv/plugin.yaml
index 63f2f12db..a58544b03 100644
--- a/pkg/cmd/testdata/helm home with space/helm/plugins/fullenv/plugin.yaml
+++ b/pkg/cmd/testdata/helm home with space/helm/plugins/fullenv/plugin.yaml
@@ -1,4 +1,12 @@
+---
+apiVersion: v1
name: fullenv
-usage: "show env vars"
-description: "show all env vars"
-command: "$HELM_PLUGIN_DIR/fullenv.sh"
+type: cli/v1
+runtime: subprocess
+config:
+ shortHelp: "show env vars"
+ longHelp: "show all env vars"
+ ignoreFlags: false
+runtimeConfig:
+ platformCommand:
+ - command: "$HELM_PLUGIN_DIR/fullenv.sh"
diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/args/plugin.yaml b/pkg/cmd/testdata/helmhome/helm/plugins/args/plugin.yaml
index 21e28a7c2..4156e7f17 100644
--- a/pkg/cmd/testdata/helmhome/helm/plugins/args/plugin.yaml
+++ b/pkg/cmd/testdata/helmhome/helm/plugins/args/plugin.yaml
@@ -1,4 +1,11 @@
name: args
-usage: "echo args"
-description: "This echos args"
-command: "$HELM_PLUGIN_DIR/args.sh"
+type: cli/v1
+apiVersion: v1
+runtime: subprocess
+config:
+ shortHelp: "echo args"
+ longHelp: "This echos args"
+ ignoreFlags: false
+runtimeConfig:
+ platformCommand:
+ - command: "$HELM_PLUGIN_DIR/args.sh"
diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/echo/plugin.yaml b/pkg/cmd/testdata/helmhome/helm/plugins/echo/plugin.yaml
index 7b9362a08..a0a0b5255 100644
--- a/pkg/cmd/testdata/helmhome/helm/plugins/echo/plugin.yaml
+++ b/pkg/cmd/testdata/helmhome/helm/plugins/echo/plugin.yaml
@@ -1,4 +1,11 @@
name: echo
-usage: "echo stuff"
-description: "This echos stuff"
-command: "echo hello"
+type: cli/v1
+apiVersion: v1
+runtime: subprocess
+config:
+ shortHelp: "echo stuff"
+ longHelp: "This echos stuff"
+ ignoreFlags: false
+runtimeConfig:
+ platformCommand:
+ - command: "echo hello"
diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/env/plugin-name.sh b/pkg/cmd/testdata/helmhome/helm/plugins/env/plugin-name.sh
new file mode 100755
index 000000000..9e823ac13
--- /dev/null
+++ b/pkg/cmd/testdata/helmhome/helm/plugins/env/plugin-name.sh
@@ -0,0 +1,3 @@
+#!/usr/bin/env sh
+
+echo HELM_PLUGIN_NAME=${HELM_PLUGIN_NAME}
diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/env/plugin.yaml b/pkg/cmd/testdata/helmhome/helm/plugins/env/plugin.yaml
index 52cb7a848..78a0a23fb 100644
--- a/pkg/cmd/testdata/helmhome/helm/plugins/env/plugin.yaml
+++ b/pkg/cmd/testdata/helmhome/helm/plugins/env/plugin.yaml
@@ -1,4 +1,12 @@
+---
+apiVersion: v1
name: env
-usage: "env stuff"
-description: "show the env"
-command: "echo $HELM_PLUGIN_NAME"
+type: cli/v1
+runtime: subprocess
+config:
+ shortHelp: "env stuff"
+ longHelp: "show the env"
+ ignoreFlags: false
+runtimeConfig:
+ platformCommand:
+ - command: ${HELM_PLUGIN_DIR}/plugin-name.sh
diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/exitwith/plugin.yaml b/pkg/cmd/testdata/helmhome/helm/plugins/exitwith/plugin.yaml
index 5691d1712..ba9508255 100644
--- a/pkg/cmd/testdata/helmhome/helm/plugins/exitwith/plugin.yaml
+++ b/pkg/cmd/testdata/helmhome/helm/plugins/exitwith/plugin.yaml
@@ -1,4 +1,12 @@
+---
+apiVersion: v1
name: exitwith
-usage: "exitwith code"
-description: "This exits with the specified exit code"
-command: "$HELM_PLUGIN_DIR/exitwith.sh"
+type: cli/v1
+runtime: subprocess
+config:
+ shortHelp: "exitwith code"
+ longHelp: "This exits with the specified exit code"
+ ignoreFlags: false
+runtimeConfig:
+ platformCommand:
+ - command: "$HELM_PLUGIN_DIR/exitwith.sh"
diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/fullenv/fullenv.sh b/pkg/cmd/testdata/helmhome/helm/plugins/fullenv/fullenv.sh
index 2efad9b3c..cc0c64a6a 100755
--- a/pkg/cmd/testdata/helmhome/helm/plugins/fullenv/fullenv.sh
+++ b/pkg/cmd/testdata/helmhome/helm/plugins/fullenv/fullenv.sh
@@ -1,7 +1,7 @@
#!/bin/sh
-echo $HELM_PLUGIN_NAME
-echo $HELM_PLUGIN_DIR
-echo $HELM_PLUGINS
-echo $HELM_REPOSITORY_CONFIG
-echo $HELM_REPOSITORY_CACHE
-echo $HELM_BIN
+echo HELM_PLUGIN_NAME=${HELM_PLUGIN_NAME}
+echo HELM_PLUGIN_DIR=${HELM_PLUGIN_DIR}
+echo HELM_PLUGINS=${HELM_PLUGINS}
+echo HELM_REPOSITORY_CONFIG=${HELM_REPOSITORY_CONFIG}
+echo HELM_REPOSITORY_CACHE=${HELM_REPOSITORY_CACHE}
+echo HELM_BIN=${HELM_BIN}
diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/fullenv/plugin.yaml b/pkg/cmd/testdata/helmhome/helm/plugins/fullenv/plugin.yaml
index 63f2f12db..a58544b03 100644
--- a/pkg/cmd/testdata/helmhome/helm/plugins/fullenv/plugin.yaml
+++ b/pkg/cmd/testdata/helmhome/helm/plugins/fullenv/plugin.yaml
@@ -1,4 +1,12 @@
+---
+apiVersion: v1
name: fullenv
-usage: "show env vars"
-description: "show all env vars"
-command: "$HELM_PLUGIN_DIR/fullenv.sh"
+type: cli/v1
+runtime: subprocess
+config:
+ shortHelp: "show env vars"
+ longHelp: "show all env vars"
+ ignoreFlags: false
+runtimeConfig:
+ platformCommand:
+ - command: "$HELM_PLUGIN_DIR/fullenv.sh"
diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/postrenderer-v1/plugin.yaml b/pkg/cmd/testdata/helmhome/helm/plugins/postrenderer-v1/plugin.yaml
new file mode 100644
index 000000000..b6e8afa57
--- /dev/null
+++ b/pkg/cmd/testdata/helmhome/helm/plugins/postrenderer-v1/plugin.yaml
@@ -0,0 +1,9 @@
+---
+apiVersion: v1
+name: "postrenderer-v1"
+version: "1.2.3"
+type: postrenderer/v1
+runtime: subprocess
+runtimeConfig:
+ platformCommand:
+ - command: "${HELM_PLUGIN_DIR}/sed-test.sh"
diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/postrenderer-v1/sed-test.sh b/pkg/cmd/testdata/helmhome/helm/plugins/postrenderer-v1/sed-test.sh
new file mode 100755
index 000000000..a016e398f
--- /dev/null
+++ b/pkg/cmd/testdata/helmhome/helm/plugins/postrenderer-v1/sed-test.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+if [ $# -eq 0 ]; then
+ sed s/FOOTEST/BARTEST/g <&0
+else
+ sed s/FOOTEST/"$*"/g <&0
+fi
diff --git a/pkg/cmd/testdata/output/env-comp.txt b/pkg/cmd/testdata/output/env-comp.txt
index 8f9c53fc7..9d38ee464 100644
--- a/pkg/cmd/testdata/output/env-comp.txt
+++ b/pkg/cmd/testdata/output/env-comp.txt
@@ -2,6 +2,7 @@ HELM_BIN
HELM_BURST_LIMIT
HELM_CACHE_HOME
HELM_CONFIG_HOME
+HELM_CONTENT_CACHE
HELM_DATA_HOME
HELM_DEBUG
HELM_KUBEAPISERVER
diff --git a/pkg/cmd/testdata/output/get-metadata.json b/pkg/cmd/testdata/output/get-metadata.json
index 4c015b977..9166f87ac 100644
--- a/pkg/cmd/testdata/output/get-metadata.json
+++ b/pkg/cmd/testdata/output/get-metadata.json
@@ -1 +1 @@
-{"name":"thomas-guide","chart":"foo","version":"0.1.0-beta.1","appVersion":"1.0","annotations":{"category":"web-apps","supported":"true"},"dependencies":[{"name":"cool-plugin","version":"1.0.0","repository":"https://coolplugin.io/charts","condition":"coolPlugin.enabled","enabled":true},{"name":"crds","version":"2.7.1","repository":"","condition":"crds.enabled"}],"namespace":"default","revision":1,"status":"deployed","deployedAt":"1977-09-02T22:04:05Z"}
+{"name":"thomas-guide","chart":"foo","version":"0.1.0-beta.1","appVersion":"1.0","annotations":{"category":"web-apps","supported":"true"},"labels":{"key1":"value1"},"dependencies":[{"name":"cool-plugin","version":"1.0.0","repository":"https://coolplugin.io/charts","condition":"coolPlugin.enabled","enabled":true},{"name":"crds","version":"2.7.1","repository":"","condition":"crds.enabled"}],"namespace":"default","revision":1,"status":"deployed","deployedAt":"1977-09-02T22:04:05Z"}
diff --git a/pkg/cmd/testdata/output/get-metadata.txt b/pkg/cmd/testdata/output/get-metadata.txt
index 01083b333..b3cb73ee2 100644
--- a/pkg/cmd/testdata/output/get-metadata.txt
+++ b/pkg/cmd/testdata/output/get-metadata.txt
@@ -3,8 +3,10 @@ CHART: foo
VERSION: 0.1.0-beta.1
APP_VERSION: 1.0
ANNOTATIONS: category=web-apps,supported=true
+LABELS: key1=value1
DEPENDENCIES: cool-plugin,crds
NAMESPACE: default
REVISION: 1
STATUS: deployed
DEPLOYED_AT: 1977-09-02T22:04:05Z
+APPLY_METHOD: client-side apply (defaulted)
diff --git a/pkg/cmd/testdata/output/get-metadata.yaml b/pkg/cmd/testdata/output/get-metadata.yaml
index 6298436c9..98f567837 100644
--- a/pkg/cmd/testdata/output/get-metadata.yaml
+++ b/pkg/cmd/testdata/output/get-metadata.yaml
@@ -14,6 +14,8 @@ dependencies:
repository: ""
version: 2.7.1
deployedAt: "1977-09-02T22:04:05Z"
+labels:
+ key1: value1
name: thomas-guide
namespace: default
revision: 1
diff --git a/pkg/cmd/testdata/output/install-hide-secret.txt b/pkg/cmd/testdata/output/install-hide-secret.txt
index aaf73b478..165f14f73 100644
--- a/pkg/cmd/testdata/output/install-hide-secret.txt
+++ b/pkg/cmd/testdata/output/install-hide-secret.txt
@@ -1 +1 @@
-Error: INSTALLATION FAILED: Hiding Kubernetes secrets requires a dry-run mode
+Error: INSTALLATION FAILED: hiding Kubernetes secrets requires a dry-run mode
diff --git a/pkg/cmd/testdata/output/issue-totoml.txt b/pkg/cmd/testdata/output/issue-totoml.txt
deleted file mode 100644
index 06cf4bb8d..000000000
--- a/pkg/cmd/testdata/output/issue-totoml.txt
+++ /dev/null
@@ -1,8 +0,0 @@
----
-# Source: issue-totoml/templates/configmap.yaml
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: issue-totoml
-data: |
- key = 13
diff --git a/pkg/cmd/testdata/output/lint-chart-with-bad-subcharts-with-subcharts.txt b/pkg/cmd/testdata/output/lint-chart-with-bad-subcharts-with-subcharts.txt
index 6e2efcecd..67ed58ec3 100644
--- a/pkg/cmd/testdata/output/lint-chart-with-bad-subcharts-with-subcharts.txt
+++ b/pkg/cmd/testdata/output/lint-chart-with-bad-subcharts-with-subcharts.txt
@@ -1,6 +1,6 @@
==> Linting testdata/testcharts/chart-with-bad-subcharts
[INFO] Chart.yaml: icon is recommended
-[ERROR] templates/: error unpacking subchart bad-subchart in chart-with-bad-subcharts: validation: chart.metadata.name is required
+[WARNING] templates/: directory does not exist
[ERROR] : unable to load chart
error unpacking subchart bad-subchart in chart-with-bad-subcharts: validation: chart.metadata.name is required
@@ -9,11 +9,13 @@
[ERROR] Chart.yaml: apiVersion is required. The value must be either "v1" or "v2"
[ERROR] Chart.yaml: version is required
[INFO] Chart.yaml: icon is recommended
-[ERROR] templates/: validation: chart.metadata.name is required
+[WARNING] Chart.yaml: version '' is not a valid SemVerV2
+[WARNING] templates/: directory does not exist
[ERROR] : unable to load chart
validation: chart.metadata.name is required
==> Linting testdata/testcharts/chart-with-bad-subcharts/charts/good-subchart
[INFO] Chart.yaml: icon is recommended
+[WARNING] templates/: directory does not exist
Error: 3 chart(s) linted, 2 chart(s) failed
diff --git a/pkg/cmd/testdata/output/lint-chart-with-bad-subcharts.txt b/pkg/cmd/testdata/output/lint-chart-with-bad-subcharts.txt
index af533797b..5a1c388bb 100644
--- a/pkg/cmd/testdata/output/lint-chart-with-bad-subcharts.txt
+++ b/pkg/cmd/testdata/output/lint-chart-with-bad-subcharts.txt
@@ -1,6 +1,6 @@
==> Linting testdata/testcharts/chart-with-bad-subcharts
[INFO] Chart.yaml: icon is recommended
-[ERROR] templates/: error unpacking subchart bad-subchart in chart-with-bad-subcharts: validation: chart.metadata.name is required
+[WARNING] templates/: directory does not exist
[ERROR] : unable to load chart
error unpacking subchart bad-subchart in chart-with-bad-subcharts: validation: chart.metadata.name is required
diff --git a/pkg/cmd/testdata/output/lint-quiet-with-error.txt b/pkg/cmd/testdata/output/lint-quiet-with-error.txt
index e3d29a5a3..0731a07d1 100644
--- a/pkg/cmd/testdata/output/lint-quiet-with-error.txt
+++ b/pkg/cmd/testdata/output/lint-quiet-with-error.txt
@@ -1,7 +1,7 @@
==> Linting testdata/testcharts/chart-bad-requirements
[ERROR] Chart.yaml: unable to parse YAML
error converting YAML to JSON: yaml: line 6: did not find expected '-' indicator
-[ERROR] templates/: cannot load Chart.yaml: error converting YAML to JSON: yaml: line 6: did not find expected '-' indicator
+[WARNING] templates/: directory does not exist
[ERROR] : unable to load chart
cannot load Chart.yaml: error converting YAML to JSON: yaml: line 6: did not find expected '-' indicator
diff --git a/pkg/cmd/testdata/output/lint-quiet-with-warning.txt b/pkg/cmd/testdata/output/lint-quiet-with-warning.txt
index e69de29bb..ebf6c1989 100644
--- a/pkg/cmd/testdata/output/lint-quiet-with-warning.txt
+++ b/pkg/cmd/testdata/output/lint-quiet-with-warning.txt
@@ -0,0 +1,4 @@
+==> Linting testdata/testcharts/chart-with-only-crds
+[WARNING] templates/: directory does not exist
+
+1 chart(s) linted, 0 chart(s) failed
diff --git a/pkg/cmd/testdata/output/repo-list-empty.txt b/pkg/cmd/testdata/output/repo-list-empty.txt
new file mode 100644
index 000000000..c6edb659a
--- /dev/null
+++ b/pkg/cmd/testdata/output/repo-list-empty.txt
@@ -0,0 +1 @@
+no repositories to show
diff --git a/pkg/cmd/testdata/output/repo-list.txt b/pkg/cmd/testdata/output/repo-list.txt
new file mode 100644
index 000000000..edbd0ecc1
--- /dev/null
+++ b/pkg/cmd/testdata/output/repo-list.txt
@@ -0,0 +1,4 @@
+NAME URL
+charts https://charts.helm.sh/stable
+firstexample http://firstexample.com
+secondexample http://secondexample.com
diff --git a/pkg/cmd/testdata/output/schema-negative-cli.txt b/pkg/cmd/testdata/output/schema-negative-cli.txt
index c4a5cc516..12bcc5103 100644
--- a/pkg/cmd/testdata/output/schema-negative-cli.txt
+++ b/pkg/cmd/testdata/output/schema-negative-cli.txt
@@ -1,4 +1,4 @@
Error: INSTALLATION FAILED: values don't meet the specifications of the schema(s) in the following chart(s):
empty:
-- age: Must be greater than or equal to 0
+- at '/age': minimum: got -5, want 0
diff --git a/pkg/cmd/testdata/output/schema-negative.txt b/pkg/cmd/testdata/output/schema-negative.txt
index 929af5518..daf132635 100644
--- a/pkg/cmd/testdata/output/schema-negative.txt
+++ b/pkg/cmd/testdata/output/schema-negative.txt
@@ -1,5 +1,5 @@
Error: INSTALLATION FAILED: values don't meet the specifications of the schema(s) in the following chart(s):
empty:
-- (root): employmentInfo is required
-- age: Must be greater than or equal to 0
+- at '': missing property 'employmentInfo'
+- at '/age': minimum: got -5, want 0
diff --git a/pkg/cmd/testdata/output/subchart-schema-cli-negative.txt b/pkg/cmd/testdata/output/subchart-schema-cli-negative.txt
index 7396b4bfe..179550f69 100644
--- a/pkg/cmd/testdata/output/subchart-schema-cli-negative.txt
+++ b/pkg/cmd/testdata/output/subchart-schema-cli-negative.txt
@@ -1,4 +1,4 @@
Error: INSTALLATION FAILED: values don't meet the specifications of the schema(s) in the following chart(s):
subchart-with-schema:
-- age: Must be greater than or equal to 0
+- at '/age': minimum: got -25, want 0
diff --git a/pkg/cmd/testdata/output/subchart-schema-negative.txt b/pkg/cmd/testdata/output/subchart-schema-negative.txt
index 7b1f654a2..7522ef3e4 100644
--- a/pkg/cmd/testdata/output/subchart-schema-negative.txt
+++ b/pkg/cmd/testdata/output/subchart-schema-negative.txt
@@ -1,6 +1,6 @@
Error: INSTALLATION FAILED: values don't meet the specifications of the schema(s) in the following chart(s):
chart-without-schema:
-- (root): lastname is required
+- at '': missing property 'lastname'
subchart-with-schema:
-- (root): age is required
+- at '': missing property 'age'
diff --git a/pkg/cmd/testdata/output/template-with-api-version.txt b/pkg/cmd/testdata/output/template-with-api-version.txt
index 7e1c35001..8b6074cdb 100644
--- a/pkg/cmd/testdata/output/template-with-api-version.txt
+++ b/pkg/cmd/testdata/output/template-with-api-version.txt
@@ -75,6 +75,7 @@ metadata:
kube-version/minor: "20"
kube-version/version: "v1.20.0"
kube-api-version/test: v1
+ kube-api-version/test2: v2
spec:
type: ClusterIP
ports:
diff --git a/pkg/cmd/testdata/output/upgrade-with-missing-dependencies.txt b/pkg/cmd/testdata/output/upgrade-with-missing-dependencies.txt
index adf2ae899..b2c154a80 100644
--- a/pkg/cmd/testdata/output/upgrade-with-missing-dependencies.txt
+++ b/pkg/cmd/testdata/output/upgrade-with-missing-dependencies.txt
@@ -1 +1 @@
-Error: An error occurred while checking for chart dependencies. You may need to run `helm dependency build` to fetch missing dependencies: found in Chart.yaml, but missing in charts/ directory: reqsubchart2
+Error: an error occurred while checking for chart dependencies. You may need to run `helm dependency build` to fetch missing dependencies: found in Chart.yaml, but missing in charts/ directory: reqsubchart2
diff --git a/pkg/cmd/testdata/testcharts/issue-totoml/Chart.yaml b/pkg/cmd/testdata/testcharts/issue-totoml/Chart.yaml
deleted file mode 100644
index f4be7a213..000000000
--- a/pkg/cmd/testdata/testcharts/issue-totoml/Chart.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-apiVersion: v2
-name: issue-totoml
-version: 0.1.0
diff --git a/pkg/cmd/testdata/testcharts/issue-totoml/templates/configmap.yaml b/pkg/cmd/testdata/testcharts/issue-totoml/templates/configmap.yaml
deleted file mode 100644
index 621e70d48..000000000
--- a/pkg/cmd/testdata/testcharts/issue-totoml/templates/configmap.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: issue-totoml
-data: |
- {{ .Values.global | toToml }}
diff --git a/pkg/cmd/testdata/testcharts/issue-totoml/values.yaml b/pkg/cmd/testdata/testcharts/issue-totoml/values.yaml
deleted file mode 100644
index dd0140449..000000000
--- a/pkg/cmd/testdata/testcharts/issue-totoml/values.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
-global:
- key: 13
\ No newline at end of file
diff --git a/pkg/cmd/testdata/testcharts/subchart/templates/service.yaml b/pkg/cmd/testdata/testcharts/subchart/templates/service.yaml
index fee94dced..19c931cc3 100644
--- a/pkg/cmd/testdata/testcharts/subchart/templates/service.yaml
+++ b/pkg/cmd/testdata/testcharts/subchart/templates/service.yaml
@@ -11,6 +11,9 @@ metadata:
{{- if .Capabilities.APIVersions.Has "helm.k8s.io/test" }}
kube-api-version/test: v1
{{- end }}
+{{- if .Capabilities.APIVersions.Has "helm.k8s.io/test2" }}
+ kube-api-version/test2: v2
+{{- end }}
spec:
type: {{ .Values.service.type }}
ports:
diff --git a/pkg/cmd/testdata/testplugin/plugin.yaml b/pkg/cmd/testdata/testplugin/plugin.yaml
index 890292cbf..3ee5d04f6 100644
--- a/pkg/cmd/testdata/testplugin/plugin.yaml
+++ b/pkg/cmd/testdata/testplugin/plugin.yaml
@@ -1,4 +1,12 @@
+---
+apiVersion: v1
name: testplugin
-usage: "echo test"
-description: "This echos test"
-command: "echo test"
+type: cli/v1
+runtime: subprocess
+config:
+ shortHelp: "echo test"
+ longHelp: "This echos test"
+ ignoreFlags: false
+runtimeConfig:
+ platformCommand:
+ - command: "echo test"
diff --git a/pkg/cmd/uninstall.go b/pkg/cmd/uninstall.go
index c4e70cf75..4680c324a 100644
--- a/pkg/cmd/uninstall.go
+++ b/pkg/cmd/uninstall.go
@@ -76,10 +76,10 @@ func newUninstallCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
f.BoolVar(&client.DisableHooks, "no-hooks", false, "prevent hooks from running during uninstallation")
f.BoolVar(&client.IgnoreNotFound, "ignore-not-found", false, `Treat "release not found" as a successful uninstall`)
f.BoolVar(&client.KeepHistory, "keep-history", false, "remove all associated resources and mark the release as deleted, but retain the release history")
- f.BoolVar(&client.Wait, "wait", false, "if set, will wait until all the resources are deleted before returning. It will wait for as long as --timeout")
f.StringVar(&client.DeletionPropagation, "cascade", "background", "Must be \"background\", \"orphan\", or \"foreground\". Selects the deletion cascading strategy for the dependents. Defaults to background.")
f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)")
f.StringVar(&client.Description, "description", "", "add a custom description")
+ AddWaitFlag(cmd, &client.WaitStrategy)
return cmd
}
diff --git a/pkg/cmd/upgrade.go b/pkg/cmd/upgrade.go
index 3296e29ab..c8fbf8bd3 100644
--- a/pkg/cmd/upgrade.go
+++ b/pkg/cmd/upgrade.go
@@ -21,12 +21,12 @@ import (
"fmt"
"io"
"log"
+ "log/slog"
"os"
"os/signal"
"syscall"
"time"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
"helm.sh/helm/v4/pkg/action"
@@ -130,17 +130,17 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
instClient := action.NewInstall(cfg)
instClient.CreateNamespace = createNamespace
instClient.ChartPathOptions = client.ChartPathOptions
- instClient.Force = client.Force
+ instClient.ForceReplace = client.ForceReplace
instClient.DryRun = client.DryRun
instClient.DryRunOption = client.DryRunOption
instClient.DisableHooks = client.DisableHooks
instClient.SkipCRDs = client.SkipCRDs
instClient.Timeout = client.Timeout
- instClient.Wait = client.Wait
+ instClient.WaitStrategy = client.WaitStrategy
instClient.WaitForJobs = client.WaitForJobs
instClient.Devel = client.Devel
instClient.Namespace = client.Namespace
- instClient.Atomic = client.Atomic
+ instClient.RollbackOnFailure = client.RollbackOnFailure
instClient.PostRenderer = client.PostRenderer
instClient.DisableOpenAPIValidation = client.DisableOpenAPIValidation
instClient.SubNotes = client.SubNotes
@@ -166,6 +166,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
debug: settings.Debug,
showMetadata: false,
hideNotes: instClient.HideNotes,
+ noColor: settings.ShouldDisableColor(),
})
} else if err != nil {
return err
@@ -173,11 +174,11 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
}
if client.Version == "" && client.Devel {
- Debug("setting version to >0.0.0-0")
+ slog.Debug("setting version to >0.0.0-0")
client.Version = ">0.0.0-0"
}
- chartPath, err := client.ChartPathOptions.LocateChart(args[1], settings)
+ chartPath, err := client.LocateChart(args[1], settings)
if err != nil {
return err
}
@@ -199,16 +200,17 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
}
if req := ch.Metadata.Dependencies; req != nil {
if err := action.CheckDependencies(ch, req); err != nil {
- err = errors.Wrap(err, "An error occurred while checking for chart dependencies. You may need to run `helm dependency build` to fetch missing dependencies")
+ err = fmt.Errorf("an error occurred while checking for chart dependencies. You may need to run `helm dependency build` to fetch missing dependencies: %w", err)
if client.DependencyUpdate {
man := &downloader.Manager{
Out: out,
ChartPath: chartPath,
- Keyring: client.ChartPathOptions.Keyring,
+ Keyring: client.Keyring,
SkipUpdate: false,
Getters: p,
RepositoryConfig: settings.RepositoryConfig,
RepositoryCache: settings.RepositoryCache,
+ ContentCache: settings.ContentCache,
Debug: settings.Debug,
}
if err := man.Update(); err != nil {
@@ -216,7 +218,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
}
// Reload the chart with the updated Chart.lock file.
if ch, err = loader.Load(chartPath); err != nil {
- return errors.Wrap(err, "failed reloading chart after repo update")
+ return fmt.Errorf("failed reloading chart after repo update: %w", err)
}
} else {
return err
@@ -225,7 +227,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
}
if ch.Metadata.Deprecated {
- Warning("This chart is deprecated")
+ slog.Warn("this chart is deprecated")
}
// Create context and prepare the handle of SIGTERM
@@ -245,7 +247,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
rel, err := client.RunWithContext(ctx, args[0], ch, vals)
if err != nil {
- return errors.Wrap(err, "UPGRADE FAILED")
+ return fmt.Errorf("UPGRADE FAILED: %w", err)
}
if outfmt == output.Table {
@@ -257,6 +259,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
debug: settings.Debug,
showMetadata: false,
hideNotes: client.HideNotes,
+ noColor: settings.ShouldDisableColor(),
})
},
}
@@ -268,9 +271,11 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
f.StringVar(&client.DryRunOption, "dry-run", "", "simulate an install. If --dry-run is set with no option being specified or as '--dry-run=client', it will not attempt cluster connections. Setting '--dry-run=server' allows attempting cluster connections.")
f.BoolVar(&client.HideSecret, "hide-secret", false, "hide Kubernetes Secrets when also using the --dry-run flag")
f.Lookup("dry-run").NoOptDefVal = "client"
- f.BoolVar(&client.Recreate, "recreate-pods", false, "performs pods restart for the resource if applicable")
- f.MarkDeprecated("recreate-pods", "functionality will no longer be updated. Consult the documentation for other methods to recreate pods")
- f.BoolVar(&client.Force, "force", false, "force resource updates through a replacement strategy")
+ f.BoolVar(&client.ForceReplace, "force-replace", false, "force resource updates by replacement")
+ f.BoolVar(&client.ForceReplace, "force", false, "deprecated")
+ f.MarkDeprecated("force", "use --force-replace instead")
+ f.BoolVar(&client.ForceConflicts, "force-conflicts", false, "if set server-side apply will force changes against conflicts")
+ f.StringVar(&client.ServerSideApply, "server-side", "auto", "must be \"true\", \"false\" or \"auto\". Object updates run in the server instead of the client (\"auto\" defaults the value from the previous chart release's method)")
f.BoolVar(&client.DisableHooks, "no-hooks", false, "disable pre/post upgrade hooks")
f.BoolVar(&client.DisableOpenAPIValidation, "disable-openapi-validation", false, "if set, the upgrade process will not validate rendered templates against the Kubernetes OpenAPI Schema")
f.BoolVar(&client.SkipCRDs, "skip-crds", false, "if set, no CRDs will be installed when an upgrade is performed with install flag enabled. By default, CRDs are installed if not already present, when an upgrade is performed with install flag enabled")
@@ -278,9 +283,10 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
f.BoolVar(&client.ResetValues, "reset-values", false, "when upgrading, reset the values to the ones built into the chart")
f.BoolVar(&client.ReuseValues, "reuse-values", false, "when upgrading, reuse the last release's values and merge in any overrides from the command line via --set and -f. If '--reset-values' is specified, this is ignored")
f.BoolVar(&client.ResetThenReuseValues, "reset-then-reuse-values", false, "when upgrading, reset the values to the ones built into the chart, apply the last release's values and merge in any overrides from the command line via --set and -f. If '--reset-values' or '--reuse-values' is specified, this is ignored")
- f.BoolVar(&client.Wait, "wait", false, "if set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment, StatefulSet, or ReplicaSet are in a ready state before marking the release as successful. It will wait for as long as --timeout")
f.BoolVar(&client.WaitForJobs, "wait-for-jobs", false, "if set and --wait enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as --timeout")
- f.BoolVar(&client.Atomic, "atomic", false, "if set, upgrade process rolls back changes made in case of failed upgrade. The --wait flag will be set automatically if --atomic is used")
+ f.BoolVar(&client.RollbackOnFailure, "rollback-on-failure", false, "if set, Helm will rollback the upgrade to previous success release upon failure. The --wait flag will be defaulted to \"watcher\" if --rollback-on-failure is set")
+ f.BoolVar(&client.RollbackOnFailure, "atomic", false, "deprecated")
+ f.MarkDeprecated("atomic", "use --rollback-on-failure instead")
f.IntVar(&client.MaxHistory, "history-max", settings.MaxHistory, "limit the maximum number of revisions saved per release. Use 0 for no limit")
f.BoolVar(&client.CleanupOnFail, "cleanup-on-fail", false, "allow deletion of new resources created in this upgrade when upgrade fails")
f.BoolVar(&client.SubNotes, "render-subchart-notes", false, "if set, render subchart notes along with the parent")
@@ -294,7 +300,10 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
addChartPathOptionsFlags(f, &client.ChartPathOptions)
addValueOptionsFlags(f, valueOpts)
bindOutputFlag(cmd, &outfmt)
- bindPostRenderFlag(cmd, &client.PostRenderer)
+ bindPostRenderFlag(cmd, &client.PostRenderer, settings)
+ AddWaitFlag(cmd, &client.WaitStrategy)
+ cmd.MarkFlagsMutuallyExclusive("force-replace", "force-conflicts")
+ cmd.MarkFlagsMutuallyExclusive("force", "force-conflicts")
err := cmd.RegisterFlagCompletionFunc("version", func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
if len(args) != 2 {
diff --git a/pkg/cmd/upgrade_test.go b/pkg/cmd/upgrade_test.go
index 8a840f149..9b17f187d 100644
--- a/pkg/cmd/upgrade_test.go
+++ b/pkg/cmd/upgrade_test.go
@@ -24,6 +24,7 @@ import (
"strings"
"testing"
+ "helm.sh/helm/v4/pkg/chart/common"
chart "helm.sh/helm/v4/pkg/chart/v2"
"helm.sh/helm/v4/pkg/chart/v2/loader"
chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
@@ -193,7 +194,7 @@ func TestUpgradeCmd(t *testing.T) {
func TestUpgradeWithValue(t *testing.T) {
releaseName := "funny-bunny-v2"
- relMock, ch, chartPath := prepareMockRelease(releaseName, t)
+ relMock, ch, chartPath := prepareMockRelease(t, releaseName)
defer resetEnv()()
@@ -220,7 +221,7 @@ func TestUpgradeWithValue(t *testing.T) {
func TestUpgradeWithStringValue(t *testing.T) {
releaseName := "funny-bunny-v3"
- relMock, ch, chartPath := prepareMockRelease(releaseName, t)
+ relMock, ch, chartPath := prepareMockRelease(t, releaseName)
defer resetEnv()()
@@ -248,7 +249,7 @@ func TestUpgradeWithStringValue(t *testing.T) {
func TestUpgradeInstallWithSubchartNotes(t *testing.T) {
releaseName := "wacky-bunny-v1"
- relMock, ch, _ := prepareMockRelease(releaseName, t)
+ relMock, ch, _ := prepareMockRelease(t, releaseName)
defer resetEnv()()
@@ -280,7 +281,7 @@ func TestUpgradeInstallWithSubchartNotes(t *testing.T) {
func TestUpgradeWithValuesFile(t *testing.T) {
releaseName := "funny-bunny-v4"
- relMock, ch, chartPath := prepareMockRelease(releaseName, t)
+ relMock, ch, chartPath := prepareMockRelease(t, releaseName)
defer resetEnv()()
@@ -308,7 +309,7 @@ func TestUpgradeWithValuesFile(t *testing.T) {
func TestUpgradeWithValuesFromStdin(t *testing.T) {
releaseName := "funny-bunny-v5"
- relMock, ch, chartPath := prepareMockRelease(releaseName, t)
+ relMock, ch, chartPath := prepareMockRelease(t, releaseName)
defer resetEnv()()
@@ -340,7 +341,7 @@ func TestUpgradeWithValuesFromStdin(t *testing.T) {
func TestUpgradeInstallWithValuesFromStdin(t *testing.T) {
releaseName := "funny-bunny-v6"
- _, _, chartPath := prepareMockRelease(releaseName, t)
+ _, _, chartPath := prepareMockRelease(t, releaseName)
defer resetEnv()()
@@ -368,7 +369,8 @@ func TestUpgradeInstallWithValuesFromStdin(t *testing.T) {
}
-func prepareMockRelease(releaseName string, t *testing.T) (func(n string, v int, ch *chart.Chart) *release.Release, *chart.Chart, string) {
+func prepareMockRelease(t *testing.T, releaseName string) (func(n string, v int, ch *chart.Chart) *release.Release, *chart.Chart, string) {
+ t.Helper()
tmpChart := t.TempDir()
configmapData, err := os.ReadFile("testdata/testcharts/upgradetest/templates/configmap.yaml")
if err != nil {
@@ -381,7 +383,7 @@ func prepareMockRelease(releaseName string, t *testing.T) (func(n string, v int,
Description: "A Helm chart for Kubernetes",
Version: "0.1.0",
},
- Templates: []*chart.File{{Name: "templates/configmap.yaml", Data: configmapData}},
+ Templates: []*common.File{{Name: "templates/configmap.yaml", Data: configmapData}},
}
chartPath := filepath.Join(tmpChart, cfile.Metadata.Name)
if err := chartutil.SaveDir(cfile, tmpChart); err != nil {
@@ -445,7 +447,7 @@ func TestUpgradeFileCompletion(t *testing.T) {
func TestUpgradeInstallWithLabels(t *testing.T) {
releaseName := "funny-bunny-labels"
- _, _, chartPath := prepareMockRelease(releaseName, t)
+ _, _, chartPath := prepareMockRelease(t, releaseName)
defer resetEnv()()
@@ -471,7 +473,8 @@ func TestUpgradeInstallWithLabels(t *testing.T) {
}
}
-func prepareMockReleaseWithSecret(releaseName string, t *testing.T) (func(n string, v int, ch *chart.Chart) *release.Release, *chart.Chart, string) {
+func prepareMockReleaseWithSecret(t *testing.T, releaseName string) (func(n string, v int, ch *chart.Chart) *release.Release, *chart.Chart, string) {
+ t.Helper()
tmpChart := t.TempDir()
configmapData, err := os.ReadFile("testdata/testcharts/chart-with-secret/templates/configmap.yaml")
if err != nil {
@@ -488,7 +491,7 @@ func prepareMockReleaseWithSecret(releaseName string, t *testing.T) (func(n stri
Description: "A Helm chart for Kubernetes",
Version: "0.1.0",
},
- Templates: []*chart.File{{Name: "templates/configmap.yaml", Data: configmapData}, {Name: "templates/secret.yaml", Data: secretData}},
+ Templates: []*common.File{{Name: "templates/configmap.yaml", Data: configmapData}, {Name: "templates/secret.yaml", Data: secretData}},
}
chartPath := filepath.Join(tmpChart, cfile.Metadata.Name)
if err := chartutil.SaveDir(cfile, tmpChart); err != nil {
@@ -512,7 +515,7 @@ func prepareMockReleaseWithSecret(releaseName string, t *testing.T) (func(n stri
func TestUpgradeWithDryRun(t *testing.T) {
releaseName := "funny-bunny-labels"
- _, _, chartPath := prepareMockReleaseWithSecret(releaseName, t)
+ _, _, chartPath := prepareMockReleaseWithSecret(t, releaseName)
defer resetEnv()()
diff --git a/pkg/downloader/cache.go b/pkg/downloader/cache.go
new file mode 100644
index 000000000..cecfc8bd7
--- /dev/null
+++ b/pkg/downloader/cache.go
@@ -0,0 +1,89 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package downloader
+
+import (
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "io"
+ "log/slog"
+ "os"
+ "path/filepath"
+
+ "helm.sh/helm/v4/internal/fileutil"
+)
+
+// Cache describes a cache that can get and put chart data.
+// The cache key is the sha256 has of the content. sha256 is used in Helm for
+// digests in index files providing a common key for checking content.
+type Cache interface {
+ // Get returns a reader for the given key.
+ Get(key [sha256.Size]byte, cacheType string) (string, error)
+ // Put stores the given reader for the given key.
+ Put(key [sha256.Size]byte, data io.Reader, cacheType string) (string, error)
+}
+
+// CacheChart specifies the content is a chart
+var CacheChart = ".chart"
+
+// CacheProv specifies the content is a provenance file
+var CacheProv = ".prov"
+
+// TODO: The cache assumes files because much of Helm assumes files. Convert
+// Helm to pass content around instead of file locations.
+
+// DiskCache is a cache that stores data on disk.
+type DiskCache struct {
+ Root string
+}
+
+// Get returns a reader for the given key.
+func (c *DiskCache) Get(key [sha256.Size]byte, cacheType string) (string, error) {
+ p := c.fileName(key, cacheType)
+ fi, err := os.Stat(p)
+ if err != nil {
+ return "", err
+ }
+ // Empty files treated as not exist because there is no content.
+ if fi.Size() == 0 {
+ return p, os.ErrNotExist
+ }
+ // directories should never happen unless something outside helm is operating
+ // on this content.
+ if fi.IsDir() {
+ return p, errors.New("is a directory")
+ }
+ return p, nil
+}
+
+// Put stores the given reader for the given key.
+// It returns the path to the stored file.
+func (c *DiskCache) Put(key [sha256.Size]byte, data io.Reader, cacheType string) (string, error) {
+ // TODO: verify the key and digest of the key are the same.
+ p := c.fileName(key, cacheType)
+ if err := os.MkdirAll(filepath.Dir(p), 0755); err != nil {
+ slog.Error("failed to create cache directory")
+ return p, err
+ }
+ return p, fileutil.AtomicWriteFile(p, data, 0644)
+}
+
+// fileName generates the filename in a structured manner where the first part is the
+// directory and the full hash is the filename.
+func (c *DiskCache) fileName(id [sha256.Size]byte, cacheType string) string {
+ return filepath.Join(c.Root, fmt.Sprintf("%02x", id[0]), fmt.Sprintf("%x", id)+cacheType)
+}
diff --git a/pkg/downloader/cache_test.go b/pkg/downloader/cache_test.go
new file mode 100644
index 000000000..340c77aba
--- /dev/null
+++ b/pkg/downloader/cache_test.go
@@ -0,0 +1,122 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package downloader
+
+import (
+ "bytes"
+ "crypto/sha256"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// compiler check to ensure DiskCache implements the Cache interface.
+var _ Cache = (*DiskCache)(nil)
+
+func TestDiskCache_PutAndGet(t *testing.T) {
+ // Setup a temporary directory for the cache
+ tmpDir := t.TempDir()
+ cache := &DiskCache{Root: tmpDir}
+
+ // Test data
+ content := []byte("hello world")
+ key := sha256.Sum256(content)
+
+ // --- Test case 1: Put and Get a regular file (prov=false) ---
+ t.Run("PutAndGetTgz", func(t *testing.T) {
+ // Put the data into the cache
+ path, err := cache.Put(key, bytes.NewReader(content), CacheChart)
+ require.NoError(t, err, "Put should not return an error")
+
+ // Verify the file exists at the returned path
+ _, err = os.Stat(path)
+ require.NoError(t, err, "File should exist after Put")
+
+ // Get the file from the cache
+ retrievedPath, err := cache.Get(key, CacheChart)
+ require.NoError(t, err, "Get should not return an error for existing file")
+ assert.Equal(t, path, retrievedPath, "Get should return the same path as Put")
+
+ // Verify content
+ data, err := os.ReadFile(retrievedPath)
+ require.NoError(t, err)
+ assert.Equal(t, content, data, "Content of retrieved file should match original content")
+ })
+
+ // --- Test case 2: Put and Get a provenance file (prov=true) ---
+ t.Run("PutAndGetProv", func(t *testing.T) {
+ provContent := []byte("provenance data")
+ provKey := sha256.Sum256(provContent)
+
+ path, err := cache.Put(provKey, bytes.NewReader(provContent), CacheProv)
+ require.NoError(t, err)
+
+ retrievedPath, err := cache.Get(provKey, CacheProv)
+ require.NoError(t, err)
+ assert.Equal(t, path, retrievedPath)
+
+ data, err := os.ReadFile(retrievedPath)
+ require.NoError(t, err)
+ assert.Equal(t, provContent, data)
+ })
+
+ // --- Test case 3: Get a non-existent file ---
+ t.Run("GetNonExistent", func(t *testing.T) {
+ nonExistentKey := sha256.Sum256([]byte("does not exist"))
+ _, err := cache.Get(nonExistentKey, CacheChart)
+ assert.ErrorIs(t, err, os.ErrNotExist, "Get for a non-existent key should return os.ErrNotExist")
+ })
+
+ // --- Test case 4: Put an empty file ---
+ t.Run("PutEmptyFile", func(t *testing.T) {
+ emptyContent := []byte{}
+ emptyKey := sha256.Sum256(emptyContent)
+
+ path, err := cache.Put(emptyKey, bytes.NewReader(emptyContent), CacheChart)
+ require.NoError(t, err)
+
+ // Get should return ErrNotExist for empty files
+ _, err = cache.Get(emptyKey, CacheChart)
+ assert.ErrorIs(t, err, os.ErrNotExist, "Get for an empty file should return os.ErrNotExist")
+
+ // But the file should exist
+ _, err = os.Stat(path)
+ require.NoError(t, err, "Empty file should still exist on disk")
+ })
+
+ // --- Test case 5: Get a directory ---
+ t.Run("GetDirectory", func(t *testing.T) {
+ dirKey := sha256.Sum256([]byte("i am a directory"))
+ dirPath := cache.fileName(dirKey, CacheChart)
+ err := os.MkdirAll(dirPath, 0755)
+ require.NoError(t, err)
+
+ _, err = cache.Get(dirKey, CacheChart)
+ assert.EqualError(t, err, "is a directory")
+ })
+}
+
+func TestDiskCache_fileName(t *testing.T) {
+ cache := &DiskCache{Root: "/tmp/cache"}
+ key := sha256.Sum256([]byte("some data"))
+
+ assert.Equal(t, filepath.Join("/tmp/cache", "13", "1307990e6ba5ca145eb35e99182a9bec46531bc54ddf656a602c780fa0240dee.chart"), cache.fileName(key, CacheChart))
+ assert.Equal(t, filepath.Join("/tmp/cache", "13", "1307990e6ba5ca145eb35e99182a9bec46531bc54ddf656a602c780fa0240dee.prov"), cache.fileName(key, CacheProv))
+}
diff --git a/pkg/downloader/chart_downloader.go b/pkg/downloader/chart_downloader.go
index f5d1deac9..00c8c56e8 100644
--- a/pkg/downloader/chart_downloader.go
+++ b/pkg/downloader/chart_downloader.go
@@ -16,22 +16,27 @@ limitations under the License.
package downloader
import (
+ "bytes"
+ "crypto/sha256"
+ "encoding/hex"
+ "errors"
"fmt"
"io"
+ "io/fs"
+ "log/slog"
"net/url"
"os"
"path/filepath"
"strings"
- "github.com/pkg/errors"
-
"helm.sh/helm/v4/internal/fileutil"
+ ifs "helm.sh/helm/v4/internal/third_party/dep/fs"
"helm.sh/helm/v4/internal/urlutil"
"helm.sh/helm/v4/pkg/getter"
"helm.sh/helm/v4/pkg/helmpath"
"helm.sh/helm/v4/pkg/provenance"
"helm.sh/helm/v4/pkg/registry"
- "helm.sh/helm/v4/pkg/repo"
+ "helm.sh/helm/v4/pkg/repo/v1"
)
// VerificationStrategy describes a strategy for determining whether to verify a chart.
@@ -72,6 +77,14 @@ type ChartDownloader struct {
RegistryClient *registry.Client
RepositoryConfig string
RepositoryCache string
+
+ // ContentCache is the location where Cache stores its files by default
+ // In previous versions of Helm the charts were put in the RepositoryCache. The
+ // repositories and charts are stored in 2 difference caches.
+ ContentCache string
+
+ // Cache specifies the cache implementation to use.
+ Cache Cache
}
// DownloadTo retrieves a chart. Depending on the settings, it may also download a provenance file.
@@ -86,7 +99,14 @@ type ChartDownloader struct {
// Returns a string path to the location where the file was downloaded and a verification
// (if provenance was verified), or an error if something bad happened.
func (c *ChartDownloader) DownloadTo(ref, version, dest string) (string, *provenance.Verification, error) {
- u, err := c.ResolveChartVersion(ref, version)
+ if c.Cache == nil {
+ if c.ContentCache == "" {
+ return "", nil, errors.New("content cache must be set")
+ }
+ c.Cache = &DiskCache{Root: c.ContentCache}
+ slog.Debug("setup up default downloader cache")
+ }
+ hash, u, err := c.ResolveChartVersion(ref, version)
if err != nil {
return "", nil, err
}
@@ -96,11 +116,37 @@ func (c *ChartDownloader) DownloadTo(ref, version, dest string) (string, *proven
return "", nil, err
}
- c.Options = append(c.Options, getter.WithAcceptHeader("application/gzip,application/octet-stream"))
+ // Check the cache for the content. Otherwise download it.
+ // Note, this process will pull from the cache but does not automatically populate
+ // the cache with the file it downloads.
+ var data *bytes.Buffer
+ var found bool
+ var digest []byte
+ var digest32 [32]byte
+ if hash != "" {
+ // if there is a hash, populate the other formats
+ digest, err = hex.DecodeString(hash)
+ if err != nil {
+ return "", nil, err
+ }
+ copy(digest32[:], digest)
+ if pth, err := c.Cache.Get(digest32, CacheChart); err == nil {
+ fdata, err := os.ReadFile(pth)
+ if err == nil {
+ found = true
+ data = bytes.NewBuffer(fdata)
+ slog.Debug("found chart in cache", "id", hash)
+ }
+ }
+ }
- data, err := g.Get(u.String(), c.Options...)
- if err != nil {
- return "", nil, err
+ if !found {
+ c.Options = append(c.Options, getter.WithAcceptHeader("application/gzip,application/octet-stream"))
+
+ data, err = g.Get(u.String(), c.Options...)
+ if err != nil {
+ return "", nil, err
+ }
}
name := filepath.Base(u.Path)
@@ -117,13 +163,27 @@ func (c *ChartDownloader) DownloadTo(ref, version, dest string) (string, *proven
// If provenance is requested, verify it.
ver := &provenance.Verification{}
if c.Verify > VerifyNever {
- body, err := g.Get(u.String() + ".prov")
- if err != nil {
- if c.Verify == VerifyAlways {
- return destfile, ver, errors.Errorf("failed to fetch provenance %q", u.String()+".prov")
+ found = false
+ var body *bytes.Buffer
+ if hash != "" {
+ if pth, err := c.Cache.Get(digest32, CacheProv); err == nil {
+ fdata, err := os.ReadFile(pth)
+ if err == nil {
+ found = true
+ body = bytes.NewBuffer(fdata)
+ slog.Debug("found provenance in cache", "id", hash)
+ }
+ }
+ }
+ if !found {
+ body, err = g.Get(u.String() + ".prov")
+ if err != nil {
+ if c.Verify == VerifyAlways {
+ return destfile, ver, fmt.Errorf("failed to fetch provenance %q", u.String()+".prov")
+ }
+ fmt.Fprintf(c.Out, "WARNING: Verification not found for %s: %s\n", ref, err)
+ return destfile, ver, nil
}
- fmt.Fprintf(c.Out, "WARNING: Verification not found for %s: %s\n", ref, err)
- return destfile, ver, nil
}
provfile := destfile + ".prov"
if err := fileutil.AtomicWriteFile(provfile, body, 0644); err != nil {
@@ -131,7 +191,7 @@ func (c *ChartDownloader) DownloadTo(ref, version, dest string) (string, *proven
}
if c.Verify != VerifyLater {
- ver, err = VerifyChart(destfile, c.Keyring)
+ ver, err = VerifyChart(destfile, destfile+".prov", c.Keyring)
if err != nil {
// Fail always in this case, since it means the verification step
// failed.
@@ -142,10 +202,143 @@ func (c *ChartDownloader) DownloadTo(ref, version, dest string) (string, *proven
return destfile, ver, nil
}
+// DownloadToCache retrieves resources while using a content based cache.
+func (c *ChartDownloader) DownloadToCache(ref, version string) (string, *provenance.Verification, error) {
+ if c.Cache == nil {
+ if c.ContentCache == "" {
+ return "", nil, errors.New("content cache must be set")
+ }
+ c.Cache = &DiskCache{Root: c.ContentCache}
+ slog.Debug("setup up default downloader cache")
+ }
+
+ digestString, u, err := c.ResolveChartVersion(ref, version)
+ if err != nil {
+ return "", nil, err
+ }
+
+ g, err := c.Getters.ByScheme(u.Scheme)
+ if err != nil {
+ return "", nil, err
+ }
+
+ c.Options = append(c.Options, getter.WithAcceptHeader("application/gzip,application/octet-stream"))
+
+ // Check the cache for the file
+ digest, err := hex.DecodeString(digestString)
+ if err != nil {
+ return "", nil, err
+ }
+ var digest32 [32]byte
+ copy(digest32[:], digest)
+ if err != nil {
+ return "", nil, fmt.Errorf("unable to decode digest: %w", err)
+ }
+
+ var pth string
+ // only fetch from the cache if we have a digest
+ if len(digest) > 0 {
+ pth, err = c.Cache.Get(digest32, CacheChart)
+ if err == nil {
+ slog.Debug("found chart in cache", "id", digestString)
+ }
+ }
+ if len(digest) == 0 || err != nil {
+ slog.Debug("attempting to download chart", "ref", ref, "version", version)
+ if err != nil && !os.IsNotExist(err) {
+ return "", nil, err
+ }
+
+ // Get file not in the cache
+ data, gerr := g.Get(u.String(), c.Options...)
+ if gerr != nil {
+ return "", nil, gerr
+ }
+
+ // Generate the digest
+ if len(digest) == 0 {
+ digest32 = sha256.Sum256(data.Bytes())
+ }
+
+ pth, err = c.Cache.Put(digest32, data, CacheChart)
+ if err != nil {
+ return "", nil, err
+ }
+ slog.Debug("put downloaded chart in cache", "id", hex.EncodeToString(digest32[:]))
+ }
+
+ // If provenance is requested, verify it.
+ ver := &provenance.Verification{}
+ if c.Verify > VerifyNever {
+
+ ppth, err := c.Cache.Get(digest32, CacheProv)
+ if err == nil {
+ slog.Debug("found provenance in cache", "id", digestString)
+ } else {
+ if !os.IsNotExist(err) {
+ return pth, ver, err
+ }
+
+ body, err := g.Get(u.String() + ".prov")
+ if err != nil {
+ if c.Verify == VerifyAlways {
+ return pth, ver, fmt.Errorf("failed to fetch provenance %q", u.String()+".prov")
+ }
+ fmt.Fprintf(c.Out, "WARNING: Verification not found for %s: %s\n", ref, err)
+ return pth, ver, nil
+ }
+
+ ppth, err = c.Cache.Put(digest32, body, CacheProv)
+ if err != nil {
+ return "", nil, err
+ }
+ slog.Debug("put downloaded provenance file in cache", "id", hex.EncodeToString(digest32[:]))
+ }
+
+ if c.Verify != VerifyLater {
+
+ // provenance files pin to a specific name so this needs to be accounted for
+ // when verifying.
+ // Note, this does make an assumption that the name/version is unique to a
+ // hash when a provenance file is used. If this isn't true, this section of code
+ // will need to be reworked.
+ name := filepath.Base(u.Path)
+ if u.Scheme == registry.OCIScheme {
+ idx := strings.LastIndexByte(name, ':')
+ name = fmt.Sprintf("%s-%s.tgz", name[:idx], name[idx+1:])
+ }
+
+ // Copy chart to a known location with the right name for verification and then
+ // clean it up.
+ tmpdir := filepath.Dir(filepath.Join(c.ContentCache, "tmp"))
+ if err := os.MkdirAll(tmpdir, 0755); err != nil {
+ return pth, ver, err
+ }
+ tmpfile := filepath.Join(tmpdir, name)
+ err = ifs.CopyFile(pth, tmpfile)
+ if err != nil {
+ return pth, ver, err
+ }
+ // Not removing the tmp dir itself because a concurrent process may be using it
+ defer os.RemoveAll(tmpfile)
+
+ ver, err = VerifyChart(tmpfile, ppth, c.Keyring)
+ if err != nil {
+ // Fail always in this case, since it means the verification step
+ // failed.
+ return pth, ver, err
+ }
+ }
+ }
+ return pth, ver, nil
+}
+
// ResolveChartVersion resolves a chart reference to a URL.
//
-// It returns the URL and sets the ChartDownloader's Options that can fetch
-// the URL using the appropriate Getter.
+// It returns:
+// - A hash of the content if available
+// - The URL and sets the ChartDownloader's Options that can fetch the URL using the appropriate Getter.
+// - An error if there is one
//
// A reference may be an HTTP URL, an oci reference URL, a 'reponame/chartname'
// reference, or a local path.
@@ -157,19 +350,26 @@ func (c *ChartDownloader) DownloadTo(ref, version, dest string) (string, *proven
// - If version is non-empty, this will return the URL for that version
// - If version is empty, this will return the URL for the latest version
// - If no version can be found, an error is returned
-func (c *ChartDownloader) ResolveChartVersion(ref, version string) (*url.URL, error) {
+//
+// TODO: support OCI hash
+func (c *ChartDownloader) ResolveChartVersion(ref, version string) (string, *url.URL, error) {
u, err := url.Parse(ref)
if err != nil {
- return nil, errors.Errorf("invalid chart URL format: %s", ref)
+ return "", nil, fmt.Errorf("invalid chart URL format: %s", ref)
}
if registry.IsOCI(u.String()) {
- return c.RegistryClient.ValidateReference(ref, version, u)
+ if c.RegistryClient == nil {
+ return "", nil, fmt.Errorf("unable to lookup ref %s at version '%s', missing registry client", ref, version)
+ }
+
+ digest, OCIref, err := c.RegistryClient.ValidateReference(ref, version, u)
+ return digest, OCIref, err
}
rf, err := loadRepoConfig(c.RepositoryConfig)
if err != nil {
- return u, err
+ return "", u, err
}
if u.IsAbs() && len(u.Host) > 0 && len(u.Path) > 0 {
@@ -186,9 +386,9 @@ func (c *ChartDownloader) ResolveChartVersion(ref, version string) (*url.URL, er
if err == ErrNoOwnerRepo {
// Make sure to add the ref URL as the URL for the getter
c.Options = append(c.Options, getter.WithURL(ref))
- return u, nil
+ return "", u, nil
}
- return u, err
+ return "", u, err
}
// If we get here, we don't need to go through the next phase of looking
@@ -207,21 +407,20 @@ func (c *ChartDownloader) ResolveChartVersion(ref, version string) (*url.URL, er
getter.WithPassCredentialsAll(rc.PassCredentialsAll),
)
}
- return u, nil
+ return "", u, nil
}
// See if it's of the form: repo/path_to_chart
p := strings.SplitN(u.Path, "/", 2)
if len(p) < 2 {
- return u, errors.Errorf("non-absolute URLs should be in form of repo_name/path_to_chart, got: %s", u)
+ return "", u, fmt.Errorf("non-absolute URLs should be in form of repo_name/path_to_chart, got: %s", u)
}
repoName := p[0]
chartName := p[1]
rc, err := pickChartRepositoryConfigByName(repoName, rf.Repositories)
-
if err != nil {
- return u, err
+ return "", u, err
}
// Now that we have the chart repository information we can use that URL
@@ -230,7 +429,7 @@ func (c *ChartDownloader) ResolveChartVersion(ref, version string) (*url.URL, er
r, err := repo.NewChartRepository(rc, c.Getters)
if err != nil {
- return u, err
+ return "", u, err
}
if r != nil && r.Config != nil {
@@ -249,33 +448,33 @@ func (c *ChartDownloader) ResolveChartVersion(ref, version string) (*url.URL, er
idxFile := filepath.Join(c.RepositoryCache, helmpath.CacheIndexFile(r.Config.Name))
i, err := repo.LoadIndexFile(idxFile)
if err != nil {
- return u, errors.Wrap(err, "no cached repo found. (try 'helm repo update')")
+ return "", u, fmt.Errorf("no cached repo found. (try 'helm repo update'): %w", err)
}
cv, err := i.Get(chartName, version)
if err != nil {
- return u, errors.Wrapf(err, "chart %q matching %s not found in %s index. (try 'helm repo update')", chartName, version, r.Config.Name)
+ return "", u, fmt.Errorf("chart %q matching %s not found in %s index. (try 'helm repo update'): %w", chartName, version, r.Config.Name, err)
}
if len(cv.URLs) == 0 {
- return u, errors.Errorf("chart %q has no downloadable URLs", ref)
+ return "", u, fmt.Errorf("chart %q has no downloadable URLs", ref)
}
// TODO: Seems that picking first URL is not fully correct
resolvedURL, err := repo.ResolveReferenceURL(rc.URL, cv.URLs[0])
-
if err != nil {
- return u, errors.Errorf("invalid chart URL format: %s", ref)
+ return cv.Digest, u, fmt.Errorf("invalid chart URL format: %s", ref)
}
- return url.Parse(resolvedURL)
+ loc, err := url.Parse(resolvedURL)
+ return cv.Digest, loc, err
}
// VerifyChart takes a path to a chart archive and a keyring, and verifies the chart.
//
// It assumes that a chart archive file is accompanied by a provenance file whose
// name is the archive file name plus the ".prov" extension.
-func VerifyChart(path, keyring string) (*provenance.Verification, error) {
+func VerifyChart(path, provfile, keyring string) (*provenance.Verification, error) {
// For now, error out if it's not a tar file.
switch fi, err := os.Stat(path); {
case err != nil:
@@ -286,16 +485,26 @@ func VerifyChart(path, keyring string) (*provenance.Verification, error) {
return nil, errors.New("chart must be a tgz file")
}
- provfile := path + ".prov"
if _, err := os.Stat(provfile); err != nil {
- return nil, errors.Wrapf(err, "could not load provenance file %s", provfile)
+ return nil, fmt.Errorf("could not load provenance file %s: %w", provfile, err)
}
sig, err := provenance.NewFromKeyring(keyring, "")
if err != nil {
- return nil, errors.Wrap(err, "failed to load keyring")
+ return nil, fmt.Errorf("failed to load keyring: %w", err)
}
- return sig.Verify(path, provfile)
+
+ // Read archive and provenance files
+ archiveData, err := os.ReadFile(path)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read chart archive: %w", err)
+ }
+ provData, err := os.ReadFile(provfile)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read provenance file: %w", err)
+ }
+
+ return sig.Verify(archiveData, provData, filepath.Base(path))
}
// isTar tests whether the given file is a tar file.
@@ -310,12 +519,12 @@ func pickChartRepositoryConfigByName(name string, cfgs []*repo.Entry) (*repo.Ent
for _, rc := range cfgs {
if rc.Name == name {
if rc.URL == "" {
- return nil, errors.Errorf("no URL found for repository %s", name)
+ return nil, fmt.Errorf("no URL found for repository %s", name)
}
return rc, nil
}
}
- return nil, errors.Errorf("repo %s not found", name)
+ return nil, fmt.Errorf("repo %s not found", name)
}
// scanReposForURL scans all repos to find which repo contains the given URL.
@@ -348,7 +557,7 @@ func (c *ChartDownloader) scanReposForURL(u string, rf *repo.File) (*repo.Entry,
idxFile := filepath.Join(c.RepositoryCache, helmpath.CacheIndexFile(r.Config.Name))
i, err := repo.LoadIndexFile(idxFile)
if err != nil {
- return nil, errors.Wrap(err, "no cached repo found. (try 'helm repo update')")
+ return nil, fmt.Errorf("no cached repo found. (try 'helm repo update'): %w", err)
}
for _, entry := range i.Entries {
@@ -367,7 +576,7 @@ func (c *ChartDownloader) scanReposForURL(u string, rf *repo.File) (*repo.Entry,
func loadRepoConfig(file string) (*repo.File, error) {
r, err := repo.LoadFile(file)
- if err != nil && !os.IsNotExist(errors.Cause(err)) {
+ if err != nil && !errors.Is(err, fs.ErrNotExist) {
return nil, err
}
return r, nil
diff --git a/pkg/downloader/chart_downloader_test.go b/pkg/downloader/chart_downloader_test.go
index 26dcc58ff..4349ecef9 100644
--- a/pkg/downloader/chart_downloader_test.go
+++ b/pkg/downloader/chart_downloader_test.go
@@ -16,15 +16,20 @@ limitations under the License.
package downloader
import (
+ "crypto/sha256"
+ "encoding/hex"
"os"
"path/filepath"
"testing"
+ "github.com/stretchr/testify/require"
+
"helm.sh/helm/v4/internal/test/ensure"
"helm.sh/helm/v4/pkg/cli"
"helm.sh/helm/v4/pkg/getter"
- "helm.sh/helm/v4/pkg/repo"
- "helm.sh/helm/v4/pkg/repo/repotest"
+ "helm.sh/helm/v4/pkg/registry"
+ "helm.sh/helm/v4/pkg/repo/v1"
+ "helm.sh/helm/v4/pkg/repo/v1/repotest"
)
const (
@@ -46,6 +51,7 @@ func TestResolveChartRef(t *testing.T) {
{name: "reference, querystring repo", ref: "testing-querystring/alpine", expect: "http://example.com/alpine-1.2.3.tgz?key=value"},
{name: "reference, testing-relative repo", ref: "testing-relative/foo", expect: "http://example.com/helm/charts/foo-1.2.3.tgz"},
{name: "reference, testing-relative repo", ref: "testing-relative/bar", expect: "http://example.com/helm/bar-1.2.3.tgz"},
+ {name: "reference, testing-relative repo", ref: "testing-relative/baz", expect: "http://example.com/path/to/baz-1.2.3.tgz"},
{name: "reference, testing-relative-trailing-slash repo", ref: "testing-relative-trailing-slash/foo", expect: "http://example.com/helm/charts/foo-1.2.3.tgz"},
{name: "reference, testing-relative-trailing-slash repo", ref: "testing-relative-trailing-slash/bar", expect: "http://example.com/helm/bar-1.2.3.tgz"},
{name: "encoded URL", ref: "encoded-url/foobar", expect: "http://example.com/with%2Fslash/charts/foobar-4.2.1.tgz"},
@@ -59,10 +65,17 @@ func TestResolveChartRef(t *testing.T) {
{name: "oci ref with sha256 and version mismatch", ref: "oci://example.com/install/by/sha:0.1.1@sha256:d234555386402a5867ef0169fefe5486858b6d8d209eaf32fd26d29b16807fd6", version: "0.1.2", fail: true},
}
+ // Create a mock registry client for OCI references
+ registryClient, err := registry.NewClient()
+ if err != nil {
+ t.Fatal(err)
+ }
+
c := ChartDownloader{
Out: os.Stderr,
RepositoryConfig: repoConfig,
RepositoryCache: repoCache,
+ RegistryClient: registryClient,
Getters: getter.All(&cli.EnvSettings{
RepositoryConfig: repoConfig,
RepositoryCache: repoCache,
@@ -70,7 +83,7 @@ func TestResolveChartRef(t *testing.T) {
}
for _, tt := range tests {
- u, err := c.ResolveChartVersion(tt.ref, tt.version)
+ _, u, err := c.ResolveChartVersion(tt.ref, tt.version)
if err != nil {
if tt.fail {
continue
@@ -122,7 +135,7 @@ func TestResolveChartOpts(t *testing.T) {
continue
}
- u, err := c.ResolveChartVersion(tt.ref, tt.version)
+ _, u, err := c.ResolveChartVersion(tt.ref, tt.version)
if err != nil {
t.Errorf("%s: failed with error %s", tt.name, err)
continue
@@ -146,7 +159,7 @@ func TestResolveChartOpts(t *testing.T) {
}
func TestVerifyChart(t *testing.T) {
- v, err := VerifyChart("testdata/signtest-0.1.0.tgz", "testdata/helm-test-key.pub")
+ v, err := VerifyChart("testdata/signtest-0.1.0.tgz", "testdata/signtest-0.1.0.tgz.prov", "testdata/helm-test-key.pub")
if err != nil {
t.Fatal(err)
}
@@ -189,15 +202,19 @@ func TestDownloadTo(t *testing.T) {
t.Fatal(err)
}
+ contentCache := t.TempDir()
+
c := ChartDownloader{
Out: os.Stderr,
Verify: VerifyAlways,
Keyring: "testdata/helm-test-key.pub",
RepositoryConfig: repoConfig,
RepositoryCache: repoCache,
+ ContentCache: contentCache,
Getters: getter.All(&cli.EnvSettings{
RepositoryConfig: repoConfig,
RepositoryCache: repoCache,
+ ContentCache: contentCache,
}),
Options: []getter.Option{
getter.WithBasicAuth("username", "password"),
@@ -241,6 +258,7 @@ func TestDownloadTo_TLS(t *testing.T) {
repoConfig := filepath.Join(srv.Root(), "repositories.yaml")
repoCache := srv.Root()
+ contentCache := t.TempDir()
c := ChartDownloader{
Out: os.Stderr,
@@ -248,9 +266,11 @@ func TestDownloadTo_TLS(t *testing.T) {
Keyring: "testdata/helm-test-key.pub",
RepositoryConfig: repoConfig,
RepositoryCache: repoCache,
+ ContentCache: contentCache,
Getters: getter.All(&cli.EnvSettings{
RepositoryConfig: repoConfig,
RepositoryCache: repoCache,
+ ContentCache: contentCache,
}),
Options: []getter.Option{
getter.WithTLSClientConfig(
@@ -295,15 +315,18 @@ func TestDownloadTo_VerifyLater(t *testing.T) {
if err := srv.LinkIndices(); err != nil {
t.Fatal(err)
}
+ contentCache := t.TempDir()
c := ChartDownloader{
Out: os.Stderr,
Verify: VerifyLater,
RepositoryConfig: repoConfig,
RepositoryCache: repoCache,
+ ContentCache: contentCache,
Getters: getter.All(&cli.EnvSettings{
RepositoryConfig: repoConfig,
RepositoryCache: repoCache,
+ ContentCache: contentCache,
}),
}
cname := "/signtest-0.1.0.tgz"
@@ -357,3 +380,108 @@ func TestScanReposForURL(t *testing.T) {
t.Fatalf("expected ErrNoOwnerRepo, got %v", err)
}
}
+
+func TestDownloadToCache(t *testing.T) {
+ srv := repotest.NewTempServer(t,
+ repotest.WithChartSourceGlob("testdata/*.tgz*"),
+ )
+ defer srv.Stop()
+ if err := srv.CreateIndex(); err != nil {
+ t.Fatal(err)
+ }
+ if err := srv.LinkIndices(); err != nil {
+ t.Fatal(err)
+ }
+
+ // The repo file needs to point to our server.
+ repoFile := filepath.Join(srv.Root(), "repositories.yaml")
+ repoCache := srv.Root()
+ contentCache := t.TempDir()
+
+ c := ChartDownloader{
+ Out: os.Stderr,
+ Verify: VerifyNever,
+ RepositoryConfig: repoFile,
+ RepositoryCache: repoCache,
+ Getters: getter.All(&cli.EnvSettings{
+ RepositoryConfig: repoFile,
+ RepositoryCache: repoCache,
+ ContentCache: contentCache,
+ }),
+ Cache: &DiskCache{Root: contentCache},
+ }
+
+ // Case 1: Chart not in cache, download it.
+ t.Run("download and cache chart", func(t *testing.T) {
+ // Clear cache for this test
+ os.RemoveAll(contentCache)
+ os.MkdirAll(contentCache, 0755)
+ c.Cache = &DiskCache{Root: contentCache}
+
+ pth, v, err := c.DownloadToCache("test/signtest", "0.1.0")
+ require.NoError(t, err)
+ require.NotNil(t, v)
+
+ // Check that the file exists at the returned path
+ _, err = os.Stat(pth)
+ require.NoError(t, err, "chart should exist at returned path")
+
+ // Check that it's in the cache
+ digest, _, err := c.ResolveChartVersion("test/signtest", "0.1.0")
+ require.NoError(t, err)
+ digestBytes, err := hex.DecodeString(digest)
+ require.NoError(t, err)
+ var digestArray [sha256.Size]byte
+ copy(digestArray[:], digestBytes)
+
+ cachePath, err := c.Cache.Get(digestArray, CacheChart)
+ require.NoError(t, err, "chart should now be in cache")
+ require.Equal(t, pth, cachePath)
+ })
+
+ // Case 2: Chart is in cache, get from cache.
+ t.Run("get chart from cache", func(t *testing.T) {
+ // The cache should be populated from the previous test.
+ // To prove it's coming from cache, we can stop the server.
+ // But repotest doesn't support restarting.
+ // Let's just call it again and assume it works if it's fast and doesn't error.
+ pth, v, err := c.DownloadToCache("test/signtest", "0.1.0")
+ require.NoError(t, err)
+ require.NotNil(t, v)
+
+ _, err = os.Stat(pth)
+ require.NoError(t, err, "chart should exist at returned path")
+ })
+
+ // Case 3: Download with verification
+ t.Run("download and verify", func(t *testing.T) {
+ // Clear cache
+ os.RemoveAll(contentCache)
+ os.MkdirAll(contentCache, 0755)
+ c.Cache = &DiskCache{Root: contentCache}
+ c.Verify = VerifyAlways
+ c.Keyring = "testdata/helm-test-key.pub"
+
+ _, v, err := c.DownloadToCache("test/signtest", "0.1.0")
+ require.NoError(t, err)
+ require.NotNil(t, v)
+ require.NotEmpty(t, v.FileHash, "verification should have a file hash")
+
+ // Check that both chart and prov are in cache
+ digest, _, err := c.ResolveChartVersion("test/signtest", "0.1.0")
+ require.NoError(t, err)
+ digestBytes, err := hex.DecodeString(digest)
+ require.NoError(t, err)
+ var digestArray [sha256.Size]byte
+ copy(digestArray[:], digestBytes)
+
+ _, err = c.Cache.Get(digestArray, CacheChart)
+ require.NoError(t, err, "chart should be in cache")
+ _, err = c.Cache.Get(digestArray, CacheProv)
+ require.NoError(t, err, "provenance file should be in cache")
+
+ // Reset for other tests
+ c.Verify = VerifyNever
+ c.Keyring = ""
+ })
+}
diff --git a/pkg/downloader/manager.go b/pkg/downloader/manager.go
index d38509311..d41b8fdb4 100644
--- a/pkg/downloader/manager.go
+++ b/pkg/downloader/manager.go
@@ -18,19 +18,19 @@ package downloader
import (
"crypto"
"encoding/hex"
+ "errors"
"fmt"
"io"
+ stdfs "io/fs"
"log"
"net/url"
"os"
- "path"
"path/filepath"
"regexp"
"strings"
"sync"
"github.com/Masterminds/semver/v3"
- "github.com/pkg/errors"
"sigs.k8s.io/yaml"
"helm.sh/helm/v4/internal/resolver"
@@ -42,7 +42,7 @@ import (
"helm.sh/helm/v4/pkg/getter"
"helm.sh/helm/v4/pkg/helmpath"
"helm.sh/helm/v4/pkg/registry"
- "helm.sh/helm/v4/pkg/repo"
+ "helm.sh/helm/v4/pkg/repo/v1"
)
// ErrRepoNotFound indicates that chart repositories can't be found in local repo cache.
@@ -75,6 +75,9 @@ type Manager struct {
RegistryClient *registry.Client
RepositoryConfig string
RepositoryCache string
+
+ // ContentCache is a location where a cache of charts can be stored
+ ContentCache string
}
// Build rebuilds a local charts directory from a lockfile.
@@ -220,7 +223,7 @@ func (m *Manager) Update() error {
func (m *Manager) loadChartDir() (*chart.Chart, error) {
if fi, err := os.Stat(m.ChartPath); err != nil {
- return nil, errors.Wrapf(err, "could not find %s", m.ChartPath)
+ return nil, fmt.Errorf("could not find %s: %w", m.ChartPath, err)
} else if !fi.IsDir() {
return nil, errors.New("only unpacked charts can be updated")
}
@@ -251,9 +254,9 @@ func (m *Manager) downloadAll(deps []*chart.Dependency) error {
// Check if 'charts' directory is not actually a directory. If it does not exist, create it.
if fi, err := os.Stat(destPath); err == nil {
if !fi.IsDir() {
- return errors.Errorf("%q is not a directory", destPath)
+ return fmt.Errorf("%q is not a directory", destPath)
}
- } else if os.IsNotExist(err) {
+ } else if errors.Is(err, stdfs.ErrNotExist) {
if err := os.MkdirAll(destPath, 0755); err != nil {
return err
}
@@ -314,7 +317,7 @@ func (m *Manager) downloadAll(deps []*chart.Dependency) error {
// https://github.com/helm/helm/issues/1439
churl, username, password, insecureskiptlsverify, passcredentialsall, caFile, certFile, keyFile, err := m.findChartURL(dep.Name, dep.Version, dep.Repository, repos)
if err != nil {
- saveError = errors.Wrapf(err, "could not find %s", churl)
+ saveError = fmt.Errorf("could not find %s: %w", churl, err)
break
}
@@ -331,6 +334,7 @@ func (m *Manager) downloadAll(deps []*chart.Dependency) error {
Keyring: m.Keyring,
RepositoryConfig: m.RepositoryConfig,
RepositoryCache: m.RepositoryCache,
+ ContentCache: m.ContentCache,
RegistryClient: m.RegistryClient,
Getters: m.Getters,
Options: []getter.Option{
@@ -345,7 +349,7 @@ func (m *Manager) downloadAll(deps []*chart.Dependency) error {
if registry.IsOCI(churl) {
churl, version, err = parseOCIRef(churl)
if err != nil {
- return errors.Wrapf(err, "could not parse OCI reference")
+ return fmt.Errorf("could not parse OCI reference: %w", err)
}
dl.Options = append(dl.Options,
getter.WithRegistryClient(m.RegistryClient),
@@ -353,7 +357,7 @@ func (m *Manager) downloadAll(deps []*chart.Dependency) error {
}
if _, _, err = dl.DownloadTo(churl, version, tmpPath); err != nil {
- saveError = errors.Wrapf(err, "could not download %s", churl)
+ saveError = fmt.Errorf("could not download %s: %w", churl, err)
break
}
@@ -377,7 +381,7 @@ func parseOCIRef(chartRef string) (string, string, error) {
refTagRegexp := regexp.MustCompile(`^(oci://[^:]+(:[0-9]{1,5})?[^:]+):(.*)$`)
caps := refTagRegexp.FindStringSubmatch(chartRef)
if len(caps) != 4 {
- return "", "", errors.Errorf("improperly formatted oci chart reference: %s", chartRef)
+ return "", "", fmt.Errorf("improperly formatted oci chart reference: %s", chartRef)
}
chartRef = caps[1]
tag := caps[3]
@@ -385,7 +389,7 @@ func parseOCIRef(chartRef string) (string, string, error) {
return chartRef, tag, nil
}
-// safeMoveDep moves all dependencies in the source and moves them into dest.
+// safeMoveDeps moves all dependencies in the source and moves them into dest.
//
// It does this by first matching the file name to an expected pattern, then loading
// the file to verify that it is a chart.
@@ -559,7 +563,7 @@ func (m *Manager) ensureMissingRepos(repoNames map[string]string, deps []*chart.
func (m *Manager) resolveRepoNames(deps []*chart.Dependency) (map[string]string, error) {
rf, err := loadRepoConfig(m.RepositoryConfig)
if err != nil {
- if os.IsNotExist(err) {
+ if errors.Is(err, stdfs.ErrNotExist) {
return make(map[string]string), nil
}
return nil, err
@@ -727,7 +731,6 @@ func (m *Manager) findChartURL(name, version, repoURL string, repos map[string]*
}
for _, cr := range repos {
-
if urlutil.Equal(repoURL, cr.Config.URL) {
var entry repo.ChartVersions
entry, err = findEntryByName(name, cr)
@@ -744,7 +747,7 @@ func (m *Manager) findChartURL(name, version, repoURL string, repos map[string]*
//nolint:nakedret
return
}
- url, err = normalizeURL(repoURL, ve.URLs[0])
+ url, err = repo.ResolveReferenceURL(repoURL, ve.URLs[0])
if err != nil {
//nolint:nakedret
return
@@ -764,7 +767,7 @@ func (m *Manager) findChartURL(name, version, repoURL string, repos map[string]*
if err == nil {
return url, username, password, false, false, "", "", "", err
}
- err = errors.Errorf("chart %s not found in %s: %s", name, repoURL, err)
+ err = fmt.Errorf("chart %s not found in %s: %w", name, repoURL, err)
return url, username, password, false, false, "", "", "", err
}
@@ -810,24 +813,6 @@ func versionEquals(v1, v2 string) bool {
return sv1.Equal(sv2)
}
-func normalizeURL(baseURL, urlOrPath string) (string, error) {
- u, err := url.Parse(urlOrPath)
- if err != nil {
- return urlOrPath, err
- }
- if u.IsAbs() {
- return u.String(), nil
- }
- u2, err := url.Parse(baseURL)
- if err != nil {
- return urlOrPath, errors.Wrap(err, "base URL failed to parse")
- }
-
- u2.RawPath = path.Join(u2.RawPath, urlOrPath)
- u2.Path = path.Join(u2.Path, urlOrPath)
- return u2.String(), nil
-}
-
// loadChartRepositories reads the repositories.yaml, and then builds a map of
// ChartRepositories.
//
@@ -838,7 +823,7 @@ func (m *Manager) loadChartRepositories() (map[string]*repo.ChartRepository, err
// Load repositories.yaml file
rf, err := loadRepoConfig(m.RepositoryConfig)
if err != nil {
- return indices, errors.Wrapf(err, "failed to load %s", m.RepositoryConfig)
+ return indices, fmt.Errorf("failed to load %s: %w", m.RepositoryConfig, err)
}
for _, re := range rf.Repositories {
@@ -870,13 +855,27 @@ func writeLock(chartpath string, lock *chart.Lock, legacyLockfile bool) error {
lockfileName = "requirements.lock"
}
dest := filepath.Join(chartpath, lockfileName)
+
+ info, err := os.Lstat(dest)
+ if err != nil && !os.IsNotExist(err) {
+ return fmt.Errorf("error getting info for %q: %w", dest, err)
+ } else if err == nil {
+ if info.Mode()&os.ModeSymlink != 0 {
+ link, err := os.Readlink(dest)
+ if err != nil {
+ return fmt.Errorf("error reading symlink for %q: %w", dest, err)
+ }
+ return fmt.Errorf("the %s file is a symlink to %q", lockfileName, link)
+ }
+ }
+
return os.WriteFile(dest, data, 0644)
}
// archive a dep chart from local directory and save it into destPath
func tarFromLocalDir(chartpath, name, repo, version, destPath string) (string, error) {
if !strings.HasPrefix(repo, "file://") {
- return "", errors.Errorf("wrong format: chart %s repository %s", name, repo)
+ return "", fmt.Errorf("wrong format: chart %s repository %s", name, repo)
}
origPath, err := resolver.GetLocalPath(repo, chartpath)
@@ -891,7 +890,7 @@ func tarFromLocalDir(chartpath, name, repo, version, destPath string) (string, e
constraint, err := semver.NewConstraint(version)
if err != nil {
- return "", errors.Wrapf(err, "dependency %s has an invalid version/constraint format", name)
+ return "", fmt.Errorf("dependency %s has an invalid version/constraint format: %w", name, err)
}
v, err := semver.NewVersion(ch.Metadata.Version)
@@ -904,7 +903,7 @@ func tarFromLocalDir(chartpath, name, repo, version, destPath string) (string, e
return ch.Metadata.Version, err
}
- return "", errors.Errorf("can't get a valid version for dependency %s", name)
+ return "", fmt.Errorf("can't get a valid version for dependency %s", name)
}
// The prefix to use for cache keys created by the manager for repo names
diff --git a/pkg/downloader/manager_test.go b/pkg/downloader/manager_test.go
index b8b009f1b..9e27f183f 100644
--- a/pkg/downloader/manager_test.go
+++ b/pkg/downloader/manager_test.go
@@ -17,19 +17,23 @@ package downloader
import (
"bytes"
+ "errors"
+ "io/fs"
"os"
"path/filepath"
"reflect"
"testing"
+ "time"
"github.com/stretchr/testify/assert"
+ "sigs.k8s.io/yaml"
chart "helm.sh/helm/v4/pkg/chart/v2"
"helm.sh/helm/v4/pkg/chart/v2/loader"
chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
"helm.sh/helm/v4/pkg/getter"
- "helm.sh/helm/v4/pkg/repo"
- "helm.sh/helm/v4/pkg/repo/repotest"
+ "helm.sh/helm/v4/pkg/repo/v1"
+ "helm.sh/helm/v4/pkg/repo/v1/repotest"
)
func TestVersionEquals(t *testing.T) {
@@ -51,26 +55,6 @@ func TestVersionEquals(t *testing.T) {
}
}
-func TestNormalizeURL(t *testing.T) {
- tests := []struct {
- name, base, path, expect string
- }{
- {name: "basic URL", base: "https://example.com", path: "http://helm.sh/foo", expect: "http://helm.sh/foo"},
- {name: "relative path", base: "https://helm.sh/charts", path: "foo", expect: "https://helm.sh/charts/foo"},
- {name: "Encoded path", base: "https://helm.sh/a%2Fb/charts", path: "foo", expect: "https://helm.sh/a%2Fb/charts/foo"},
- }
-
- for _, tt := range tests {
- got, err := normalizeURL(tt.base, tt.path)
- if err != nil {
- t.Errorf("%s: error %s", tt.name, err)
- continue
- } else if got != tt.expect {
- t.Errorf("%s: expected %q, got %q", tt.name, tt.expect, got)
- }
- }
-}
-
func TestFindChartURL(t *testing.T) {
var b bytes.Buffer
m := &Manager{
@@ -132,6 +116,31 @@ func TestFindChartURL(t *testing.T) {
if passcredentialsall != false {
t.Errorf("Unexpected passcredentialsall %t", passcredentialsall)
}
+
+ name = "foo"
+ version = "1.2.3"
+ repoURL = "http://example.com/helm"
+
+ churl, username, password, insecureSkipTLSVerify, passcredentialsall, _, _, _, err = m.findChartURL(name, version, repoURL, repos)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if churl != "http://example.com/helm/charts/foo-1.2.3.tgz" {
+ t.Errorf("Unexpected URL %q", churl)
+ }
+ if username != "" {
+ t.Errorf("Unexpected username %q", username)
+ }
+ if password != "" {
+ t.Errorf("Unexpected password %q", password)
+ }
+ if passcredentialsall != false {
+ t.Errorf("Unexpected passcredentialsall %t", passcredentialsall)
+ }
+ if insecureSkipTLSVerify {
+ t.Errorf("Unexpected insecureSkipTLSVerify %t", insecureSkipTLSVerify)
+ }
}
func TestGetRepoNames(t *testing.T) {
@@ -262,7 +271,7 @@ func TestDownloadAll(t *testing.T) {
t.Error(err)
}
- if _, err := os.Stat(filepath.Join(chartPath, "charts", "signtest-0.1.0.tgz")); os.IsNotExist(err) {
+ if _, err := os.Stat(filepath.Join(chartPath, "charts", "signtest-0.1.0.tgz")); errors.Is(err, fs.ErrNotExist) {
t.Error(err)
}
@@ -435,6 +444,7 @@ func TestUpdateWithNoRepo(t *testing.T) {
// Parent chart includes local-subchart 0.1.0 subchart from a fake repository, by default.
// If each of these main fields (name, version, repository) is not supplied by dep param, default value will be used.
func checkBuildWithOptionalFields(t *testing.T, chartName string, dep chart.Dependency) {
+ t.Helper()
// Set up a fake repo
srv := repotest.NewTempServer(
t,
@@ -478,12 +488,14 @@ func checkBuildWithOptionalFields(t *testing.T, chartName string, dep chart.Depe
Schemes: []string{"http", "https"},
New: getter.NewHTTPGetter,
}}
+ contentCache := t.TempDir()
m := &Manager{
ChartPath: dir(chartName),
Out: b,
Getters: g,
RepositoryConfig: dir("repositories.yaml"),
RepositoryCache: dir(),
+ ContentCache: contentCache,
}
// First build will update dependencies and create Chart.lock file.
@@ -664,3 +676,94 @@ func TestDedupeRepos(t *testing.T) {
})
}
}
+
+func TestWriteLock(t *testing.T) {
+ fixedTime, err := time.Parse(time.RFC3339, "2025-07-04T00:00:00Z")
+ assert.NoError(t, err)
+ lock := &chart.Lock{
+ Generated: fixedTime,
+ Digest: "sha256:12345",
+ Dependencies: []*chart.Dependency{
+ {
+ Name: "fantastic-chart",
+ Version: "1.2.3",
+ Repository: "https://example.com/charts",
+ },
+ },
+ }
+ expectedContent, err := yaml.Marshal(lock)
+ assert.NoError(t, err)
+
+ t.Run("v2 lock file", func(t *testing.T) {
+ dir := t.TempDir()
+ err := writeLock(dir, lock, false)
+ assert.NoError(t, err)
+
+ lockfilePath := filepath.Join(dir, "Chart.lock")
+ _, err = os.Stat(lockfilePath)
+ assert.NoError(t, err, "Chart.lock should exist")
+
+ content, err := os.ReadFile(lockfilePath)
+ assert.NoError(t, err)
+ assert.Equal(t, expectedContent, content)
+
+ // Check that requirements.lock does not exist
+ _, err = os.Stat(filepath.Join(dir, "requirements.lock"))
+ assert.Error(t, err)
+ assert.True(t, os.IsNotExist(err))
+ })
+
+ t.Run("v1 lock file", func(t *testing.T) {
+ dir := t.TempDir()
+ err := writeLock(dir, lock, true)
+ assert.NoError(t, err)
+
+ lockfilePath := filepath.Join(dir, "requirements.lock")
+ _, err = os.Stat(lockfilePath)
+ assert.NoError(t, err, "requirements.lock should exist")
+
+ content, err := os.ReadFile(lockfilePath)
+ assert.NoError(t, err)
+ assert.Equal(t, expectedContent, content)
+
+ // Check that Chart.lock does not exist
+ _, err = os.Stat(filepath.Join(dir, "Chart.lock"))
+ assert.Error(t, err)
+ assert.True(t, os.IsNotExist(err))
+ })
+
+ t.Run("overwrite existing lock file", func(t *testing.T) {
+ dir := t.TempDir()
+ lockfilePath := filepath.Join(dir, "Chart.lock")
+ assert.NoError(t, os.WriteFile(lockfilePath, []byte("old content"), 0644))
+
+ err = writeLock(dir, lock, false)
+ assert.NoError(t, err)
+
+ content, err := os.ReadFile(lockfilePath)
+ assert.NoError(t, err)
+ assert.Equal(t, expectedContent, content)
+ })
+
+ t.Run("lock file is a symlink", func(t *testing.T) {
+ dir := t.TempDir()
+ dummyFile := filepath.Join(dir, "dummy.txt")
+ assert.NoError(t, os.WriteFile(dummyFile, []byte("dummy"), 0644))
+
+ lockfilePath := filepath.Join(dir, "Chart.lock")
+ assert.NoError(t, os.Symlink(dummyFile, lockfilePath))
+
+ err = writeLock(dir, lock, false)
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), "the Chart.lock file is a symlink to")
+ })
+
+ t.Run("chart path is not a directory", func(t *testing.T) {
+ dir := t.TempDir()
+ filePath := filepath.Join(dir, "not-a-dir")
+ assert.NoError(t, os.WriteFile(filePath, []byte("file"), 0644))
+
+ err = writeLock(filePath, lock, false)
+ assert.Error(t, err)
+ })
+}
diff --git a/pkg/downloader/testdata/repository/testing-relative-index.yaml b/pkg/downloader/testdata/repository/testing-relative-index.yaml
index ba27ed257..9524daf6e 100644
--- a/pkg/downloader/testdata/repository/testing-relative-index.yaml
+++ b/pkg/downloader/testdata/repository/testing-relative-index.yaml
@@ -26,3 +26,16 @@ entries:
version: 1.2.3
checksum: 0e6661f193211d7a5206918d42f5c2a9470b737d
apiVersion: v2
+ baz:
+ - name: baz
+ description: Baz Chart With Absolute Path
+ home: https://helm.sh/helm
+ keywords: []
+ maintainers: []
+ sources:
+ - https://github.com/helm/charts
+ urls:
+ - /path/to/baz-1.2.3.tgz
+ version: 1.2.3
+ checksum: 0e6661f193211d7a5206918d42f5c2a9470b737d
+ apiVersion: v2
diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go
index 0d0a398be..7c858690f 100644
--- a/pkg/engine/engine.go
+++ b/pkg/engine/engine.go
@@ -17,8 +17,10 @@ limitations under the License.
package engine
import (
+ "errors"
"fmt"
- "log"
+ "log/slog"
+ "maps"
"path"
"path/filepath"
"regexp"
@@ -26,13 +28,24 @@ import (
"strings"
"text/template"
- "github.com/pkg/errors"
"k8s.io/client-go/rest"
- chart "helm.sh/helm/v4/pkg/chart/v2"
- chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+ ci "helm.sh/helm/v4/pkg/chart"
+ "helm.sh/helm/v4/pkg/chart/common"
)
+// taken from https://cs.opensource.google/go/go/+/refs/tags/go1.23.6:src/text/template/exec.go;l=141
+// > "template: %s: executing %q at <%s>: %s"
+var execErrFmt = regexp.MustCompile(`^template: (?P(?U).+): executing (?P(?U).+) at (?P(?U).+): (?P(?U).+)(?P( template:.*)?)$`)
+
+// taken from https://cs.opensource.google/go/go/+/refs/tags/go1.23.6:src/text/template/exec.go;l=138
+// > "template: %s: %s"
+var execErrFmtWithoutTemplate = regexp.MustCompile(`^template: (?P(?U).+): (?P.*)(?P( template:.*)?)$`)
+
+// taken from https://cs.opensource.google/go/go/+/refs/tags/go1.23.6:src/text/template/exec.go;l=191
+// > "template: no template %q associated with template %q"
+var execErrNoTemplateAssociated = regexp.MustCompile(`^template: no template (?P.*) associated with template (?P(.*)?)$`)
+
// Engine is an implementation of the Helm rendering implementation for templates.
type Engine struct {
// If strict is enabled, template rendering will fail if a template references
@@ -44,6 +57,8 @@ type Engine struct {
clientProvider *ClientProvider
// EnableDNS tells the engine to allow DNS lookups when rendering templates
EnableDNS bool
+ // CustomTemplateFuncs is defined by users to provide custom template funcs
+ CustomTemplateFuncs template.FuncMap
}
// New creates a new instance of Engine using the passed in rest config.
@@ -73,21 +88,21 @@ func New(config *rest.Config) Engine {
// that section of the values will be passed into the "foo" chart. And if that
// section contains a value named "bar", that value will be passed on to the
// bar chart during render time.
-func (e Engine) Render(chrt *chart.Chart, values chartutil.Values) (map[string]string, error) {
+func (e Engine) Render(chrt ci.Charter, values common.Values) (map[string]string, error) {
tmap := allTemplates(chrt, values)
return e.render(tmap)
}
// Render takes a chart, optional values, and value overrides, and attempts to
// render the Go templates using the default options.
-func Render(chrt *chart.Chart, values chartutil.Values) (map[string]string, error) {
+func Render(chrt ci.Charter, values common.Values) (map[string]string, error) {
return new(Engine).Render(chrt, values)
}
// RenderWithClient takes a chart, optional values, and value overrides, and attempts to
// render the Go templates using the default options. This engine is client aware and so can have template
// functions that interact with the client.
-func RenderWithClient(chrt *chart.Chart, values chartutil.Values, config *rest.Config) (map[string]string, error) {
+func RenderWithClient(chrt ci.Charter, values common.Values, config *rest.Config) (map[string]string, error) {
var clientProvider ClientProvider = clientProviderFromConfig{config}
return Engine{
clientProvider: &clientProvider,
@@ -98,7 +113,7 @@ func RenderWithClient(chrt *chart.Chart, values chartutil.Values, config *rest.C
// render the Go templates using the default options. This engine is client aware and so can have template
// functions that interact with the client.
// This function differs from RenderWithClient in that it lets you customize the way a dynamic client is constructed.
-func RenderWithClientProvider(chrt *chart.Chart, values chartutil.Values, clientProvider ClientProvider) (map[string]string, error) {
+func RenderWithClientProvider(chrt ci.Charter, values common.Values, clientProvider ClientProvider) (map[string]string, error) {
return Engine{
clientProvider: &clientProvider,
}.Render(chrt, values)
@@ -109,7 +124,7 @@ type renderable struct {
// tpl is the current template.
tpl string
// vals are the values to be supplied to the template.
- vals chartutil.Values
+ vals common.Values
// namespace prefix to the templates of the current chart
basePath string
}
@@ -131,7 +146,9 @@ func includeFun(t *template.Template, includedNames map[string]int) func(string,
var buf strings.Builder
if v, ok := includedNames[name]; ok {
if v > recursionMaxNums {
- return "", errors.Wrapf(fmt.Errorf("unable to execute template"), "rendering template has a nested reference name: %s", name)
+ return "", fmt.Errorf(
+ "rendering template has a nested reference name: %s: %w",
+ name, errors.New("unable to execute template"))
}
includedNames[name]++
} else {
@@ -149,7 +166,7 @@ func tplFun(parent *template.Template, includedNames map[string]int, strict bool
return func(tpl string, vals interface{}) (string, error) {
t, err := parent.Clone()
if err != nil {
- return "", errors.Wrapf(err, "cannot clone template")
+ return "", fmt.Errorf("cannot clone template: %w", err)
}
// Re-inject the missingkey option, see text/template issue https://github.com/golang/go/issues/43022
@@ -176,12 +193,12 @@ func tplFun(parent *template.Template, includedNames map[string]int, strict bool
// text string. (Maybe we could use a hash appended to the name?)
t, err = t.New(parent.Name()).Parse(tpl)
if err != nil {
- return "", errors.Wrapf(err, "cannot parse template %q", tpl)
+ return "", fmt.Errorf("cannot parse template %q: %w", tpl, err)
}
var buf strings.Builder
if err := t.Execute(&buf, vals); err != nil {
- return "", errors.Wrapf(err, "error during tpl function execution for %q", tpl)
+ return "", fmt.Errorf("error during tpl function execution for %q: %w", tpl, err)
}
// See comment in renderWithReferences explaining the hack.
@@ -203,7 +220,7 @@ func (e Engine) initFunMap(t *template.Template) {
if val == nil {
if e.LintMode {
// Don't fail on missing required values when linting
- log.Printf("[INFO] Missing required value: %s", warn)
+ slog.Warn("missing required value", "message", warn)
return "", nil
}
return val, errors.New(warnWrap(warn))
@@ -211,7 +228,7 @@ func (e Engine) initFunMap(t *template.Template) {
if val == "" {
if e.LintMode {
// Don't fail on missing required values when linting
- log.Printf("[INFO] Missing required value: %s", warn)
+ slog.Warn("missing required values", "message", warn)
return "", nil
}
return val, errors.New(warnWrap(warn))
@@ -224,7 +241,7 @@ func (e Engine) initFunMap(t *template.Template) {
funcMap["fail"] = func(msg string) (string, error) {
if e.LintMode {
// Don't fail when linting
- log.Printf("[INFO] Fail: %s", msg)
+ slog.Info("funcMap fail", "message", msg)
return "", nil
}
return "", errors.New(warnWrap(msg))
@@ -244,6 +261,9 @@ func (e Engine) initFunMap(t *template.Template) {
}
}
+ // Set custom template funcs
+ maps.Copy(funcMap, e.CustomTemplateFuncs)
+
t.Funcs(funcMap)
}
@@ -258,7 +278,7 @@ func (e Engine) render(tpls map[string]renderable) (rendered map[string]string,
// template engine.
defer func() {
if r := recover(); r != nil {
- err = errors.Errorf("rendering template failed: %v", r)
+ err = fmt.Errorf("rendering template failed: %v", r)
}
}()
t := template.New("gotpl")
@@ -292,10 +312,10 @@ func (e Engine) render(tpls map[string]renderable) (rendered map[string]string,
}
// At render time, add information about the template that is being rendered.
vals := tpls[filename].vals
- vals["Template"] = chartutil.Values{"Name": filename, "BasePath": tpls[filename].basePath}
+ vals["Template"] = common.Values{"Name": filename, "BasePath": tpls[filename].basePath}
var buf strings.Builder
if err := t.ExecuteTemplate(&buf, filename, vals); err != nil {
- return map[string]string{}, cleanupExecError(filename, err)
+ return map[string]string{}, reformatExecErrorMsg(filename, err)
}
// Work around the issue where Go will emit "" even if Options(missing=zero)
@@ -321,7 +341,33 @@ func cleanupParseError(filename string, err error) error {
return fmt.Errorf("parse error at (%s): %s", string(location), errMsg)
}
-func cleanupExecError(filename string, err error) error {
+type TraceableError struct {
+ location string
+ message string
+ executedFunction string
+}
+
+func (t TraceableError) String() string {
+ var errorString strings.Builder
+ if t.location != "" {
+ fmt.Fprintf(&errorString, "%s\n ", t.location)
+ }
+ if t.executedFunction != "" {
+ fmt.Fprintf(&errorString, "%s\n ", t.executedFunction)
+ }
+ if t.message != "" {
+ fmt.Fprintf(&errorString, "%s\n", t.message)
+ }
+ return errorString.String()
+}
+
+// reformatExecErrorMsg takes an error message for template rendering and formats it into a formatted
+// multi-line error string
+func reformatExecErrorMsg(filename string, err error) error {
+ // This function matches the error message against regex's for the text/template package.
+ // If the regex's can parse out details from that error message such as the line number, template it failed on,
+ // and error description, then it will construct a new error that displays these details in a structured way.
+ // If there are issues with parsing the error message, the err passed into the function should return instead.
if _, isExecError := err.(template.ExecError); !isExecError {
return err
}
@@ -340,8 +386,46 @@ func cleanupExecError(filename string, err error) error {
if len(parts) >= 2 {
return fmt.Errorf("execution error at (%s): %s", string(location), parts[1])
}
+ current := err
+ fileLocations := []TraceableError{}
+ for current != nil {
+ var traceable TraceableError
+ if matches := execErrFmt.FindStringSubmatch(current.Error()); matches != nil {
+ templateName := matches[execErrFmt.SubexpIndex("templateName")]
+ functionName := matches[execErrFmt.SubexpIndex("functionName")]
+ locationName := matches[execErrFmt.SubexpIndex("location")]
+ errMsg := matches[execErrFmt.SubexpIndex("errMsg")]
+ traceable = TraceableError{
+ location: templateName,
+ message: errMsg,
+ executedFunction: "executing " + functionName + " at " + locationName + ":",
+ }
+ } else if matches := execErrFmtWithoutTemplate.FindStringSubmatch(current.Error()); matches != nil {
+ templateName := matches[execErrFmt.SubexpIndex("templateName")]
+ errMsg := matches[execErrFmt.SubexpIndex("errMsg")]
+ traceable = TraceableError{
+ location: templateName,
+ message: errMsg,
+ }
+ } else if matches := execErrNoTemplateAssociated.FindStringSubmatch(current.Error()); matches != nil {
+ traceable = TraceableError{
+ message: current.Error(),
+ }
+ } else {
+ return err
+ }
+ if len(fileLocations) == 0 || fileLocations[len(fileLocations)-1] != traceable {
+ fileLocations = append(fileLocations, traceable)
+ }
+ current = errors.Unwrap(current)
+ }
- return err
+ var finalErrorString strings.Builder
+ for _, fileLocation := range fileLocations {
+ fmt.Fprintf(&finalErrorString, "%s", fileLocation.String())
+ }
+
+ return errors.New(strings.TrimSpace(finalErrorString.String()))
}
func sortTemplates(tpls map[string]renderable) []string {
@@ -371,7 +455,7 @@ func (p byPathLen) Less(i, j int) bool {
// allTemplates returns all templates for a chart and its dependencies.
//
// As it goes, it also prepares the values in a scope-sensitive manner.
-func allTemplates(c *chart.Chart, vals chartutil.Values) map[string]renderable {
+func allTemplates(c ci.Charter, vals common.Values) map[string]renderable {
templates := make(map[string]renderable)
recAllTpls(c, templates, vals)
return templates
@@ -381,40 +465,45 @@ func allTemplates(c *chart.Chart, vals chartutil.Values) map[string]renderable {
//
// As it recurses, it also sets the values to be appropriate for the template
// scope.
-func recAllTpls(c *chart.Chart, templates map[string]renderable, vals chartutil.Values) map[string]interface{} {
+func recAllTpls(c ci.Charter, templates map[string]renderable, values common.Values) map[string]interface{} {
+ vals := values.AsMap()
subCharts := make(map[string]interface{})
- chartMetaData := struct {
- chart.Metadata
- IsRoot bool
- }{*c.Metadata, c.IsRoot()}
+ accessor, err := ci.NewAccessor(c)
+ if err != nil {
+ slog.Error("error accessing chart", "error", err)
+ }
+ chartMetaData := accessor.MetadataAsMap()
+ chartMetaData["IsRoot"] = accessor.IsRoot()
next := map[string]interface{}{
"Chart": chartMetaData,
- "Files": newFiles(c.Files),
+ "Files": newFiles(accessor.Files()),
"Release": vals["Release"],
"Capabilities": vals["Capabilities"],
- "Values": make(chartutil.Values),
+ "Values": make(common.Values),
"Subcharts": subCharts,
}
// If there is a {{.Values.ThisChart}} in the parent metadata,
// copy that into the {{.Values}} for this template.
- if c.IsRoot() {
+ if accessor.IsRoot() {
next["Values"] = vals["Values"]
- } else if vs, err := vals.Table("Values." + c.Name()); err == nil {
+ } else if vs, err := values.Table("Values." + accessor.Name()); err == nil {
next["Values"] = vs
}
- for _, child := range c.Dependencies() {
- subCharts[child.Name()] = recAllTpls(child, templates, next)
+ for _, child := range accessor.Dependencies() {
+ // TODO: Handle error
+ sub, _ := ci.NewAccessor(child)
+ subCharts[sub.Name()] = recAllTpls(child, templates, next)
}
- newParentID := c.ChartFullPath()
- for _, t := range c.Templates {
+ newParentID := accessor.ChartFullPath()
+ for _, t := range accessor.Templates() {
if t == nil {
continue
}
- if !isTemplateValid(c, t.Name) {
+ if !isTemplateValid(accessor, t.Name) {
continue
}
templates[path.Join(newParentID, t.Name)] = renderable{
@@ -428,14 +517,9 @@ func recAllTpls(c *chart.Chart, templates map[string]renderable, vals chartutil.
}
// isTemplateValid returns true if the template is valid for the chart type
-func isTemplateValid(ch *chart.Chart, templateName string) bool {
- if isLibraryChart(ch) {
+func isTemplateValid(accessor ci.Accessor, templateName string) bool {
+ if accessor.IsLibraryChart() {
return strings.HasPrefix(filepath.Base(templateName), "_")
}
return true
}
-
-// isLibraryChart returns true if the chart is a library chart
-func isLibraryChart(c *chart.Chart) bool {
- return strings.EqualFold(c.Metadata.Type, "library")
-}
diff --git a/pkg/engine/engine_test.go b/pkg/engine/engine_test.go
index a54e99cad..7ac892cec 100644
--- a/pkg/engine/engine_test.go
+++ b/pkg/engine/engine_test.go
@@ -24,14 +24,17 @@ import (
"testing"
"text/template"
+ "github.com/stretchr/testify/assert"
+
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/dynamic/fake"
+ "helm.sh/helm/v4/pkg/chart/common"
+ "helm.sh/helm/v4/pkg/chart/common/util"
chart "helm.sh/helm/v4/pkg/chart/v2"
- chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
)
func TestSortTemplates(t *testing.T) {
@@ -92,7 +95,7 @@ func TestRender(t *testing.T) {
Name: "moby",
Version: "1.2.3",
},
- Templates: []*chart.File{
+ Templates: []*common.File{
{Name: "templates/test1", Data: []byte("{{.Values.outer | title }} {{.Values.inner | title}}")},
{Name: "templates/test2", Data: []byte("{{.Values.global.callme | lower }}")},
{Name: "templates/test3", Data: []byte("{{.noValue}}")},
@@ -112,7 +115,7 @@ func TestRender(t *testing.T) {
},
}
- v, err := chartutil.CoalesceValues(c, vals)
+ v, err := util.CoalesceValues(c, vals)
if err != nil {
t.Fatalf("Failed to coalesce values: %s", err)
}
@@ -142,7 +145,7 @@ func TestRenderRefsOrdering(t *testing.T) {
Name: "parent",
Version: "1.2.3",
},
- Templates: []*chart.File{
+ Templates: []*common.File{
{Name: "templates/_helpers.tpl", Data: []byte(`{{- define "test" -}}parent value{{- end -}}`)},
{Name: "templates/test.yaml", Data: []byte(`{{ tpl "{{ include \"test\" . }}" . }}`)},
},
@@ -152,7 +155,7 @@ func TestRenderRefsOrdering(t *testing.T) {
Name: "child",
Version: "1.2.3",
},
- Templates: []*chart.File{
+ Templates: []*common.File{
{Name: "templates/_helpers.tpl", Data: []byte(`{{- define "test" -}}child value{{- end -}}`)},
},
}
@@ -163,7 +166,7 @@ func TestRenderRefsOrdering(t *testing.T) {
}
for i := 0; i < 100; i++ {
- out, err := Render(parentChart, chartutil.Values{})
+ out, err := Render(parentChart, common.Values{})
if err != nil {
t.Fatalf("Failed to render templates: %s", err)
}
@@ -179,7 +182,7 @@ func TestRenderRefsOrdering(t *testing.T) {
func TestRenderInternals(t *testing.T) {
// Test the internals of the rendering tool.
- vals := chartutil.Values{"Name": "one", "Value": "two"}
+ vals := common.Values{"Name": "one", "Value": "two"}
tpls := map[string]renderable{
"one": {tpl: `Hello {{title .Name}}`, vals: vals},
"two": {tpl: `Goodbye {{upper .Value}}`, vals: vals},
@@ -216,7 +219,7 @@ func TestRenderWithDNS(t *testing.T) {
Name: "moby",
Version: "1.2.3",
},
- Templates: []*chart.File{
+ Templates: []*common.File{
{Name: "templates/test1", Data: []byte("{{getHostByName \"helm.sh\"}}")},
},
Values: map[string]interface{}{},
@@ -226,7 +229,7 @@ func TestRenderWithDNS(t *testing.T) {
"Values": map[string]interface{}{},
}
- v, err := chartutil.CoalesceValues(c, vals)
+ v, err := util.CoalesceValues(c, vals)
if err != nil {
t.Fatalf("Failed to coalesce values: %s", err)
}
@@ -353,7 +356,7 @@ func TestRenderWithClientProvider(t *testing.T) {
}
for name, exp := range cases {
- c.Templates = append(c.Templates, &chart.File{
+ c.Templates = append(c.Templates, &common.File{
Name: path.Join("templates", name),
Data: []byte(exp.template),
})
@@ -363,7 +366,7 @@ func TestRenderWithClientProvider(t *testing.T) {
"Values": map[string]interface{}{},
}
- v, err := chartutil.CoalesceValues(c, vals)
+ v, err := util.CoalesceValues(c, vals)
if err != nil {
t.Fatalf("Failed to coalesce values: %s", err)
}
@@ -389,7 +392,7 @@ func TestRenderWithClientProvider_error(t *testing.T) {
Name: "moby",
Version: "1.2.3",
},
- Templates: []*chart.File{
+ Templates: []*common.File{
{Name: "templates/error", Data: []byte(`{{ lookup "v1" "Error" "" "" }}`)},
},
Values: map[string]interface{}{},
@@ -399,7 +402,7 @@ func TestRenderWithClientProvider_error(t *testing.T) {
"Values": map[string]interface{}{},
}
- v, err := chartutil.CoalesceValues(c, vals)
+ v, err := util.CoalesceValues(c, vals)
if err != nil {
t.Fatalf("Failed to coalesce values: %s", err)
}
@@ -446,7 +449,7 @@ func TestParallelRenderInternals(t *testing.T) {
}
func TestParseErrors(t *testing.T) {
- vals := chartutil.Values{"Values": map[string]interface{}{}}
+ vals := common.Values{"Values": map[string]interface{}{}}
tplsUndefinedFunction := map[string]renderable{
"undefined_function": {tpl: `{{foo}}`, vals: vals},
@@ -462,7 +465,7 @@ func TestParseErrors(t *testing.T) {
}
func TestExecErrors(t *testing.T) {
- vals := chartutil.Values{"Values": map[string]interface{}{}}
+ vals := common.Values{"Values": map[string]interface{}{}}
cases := []struct {
name string
tpls map[string]renderable
@@ -526,7 +529,7 @@ linebreak`,
}
func TestFailErrors(t *testing.T) {
- vals := chartutil.Values{"Values": map[string]interface{}{}}
+ vals := common.Values{"Values": map[string]interface{}{}}
failtpl := `All your base are belong to us{{ fail "This is an error" }}`
tplsFailed := map[string]renderable{
@@ -557,14 +560,14 @@ func TestFailErrors(t *testing.T) {
func TestAllTemplates(t *testing.T) {
ch1 := &chart.Chart{
Metadata: &chart.Metadata{Name: "ch1"},
- Templates: []*chart.File{
+ Templates: []*common.File{
{Name: "templates/foo", Data: []byte("foo")},
{Name: "templates/bar", Data: []byte("bar")},
},
}
dep1 := &chart.Chart{
Metadata: &chart.Metadata{Name: "laboratory mice"},
- Templates: []*chart.File{
+ Templates: []*common.File{
{Name: "templates/pinky", Data: []byte("pinky")},
{Name: "templates/brain", Data: []byte("brain")},
},
@@ -573,13 +576,13 @@ func TestAllTemplates(t *testing.T) {
dep2 := &chart.Chart{
Metadata: &chart.Metadata{Name: "same thing we do every night"},
- Templates: []*chart.File{
+ Templates: []*common.File{
{Name: "templates/innermost", Data: []byte("innermost")},
},
}
dep1.AddDependency(dep2)
- tpls := allTemplates(ch1, chartutil.Values{})
+ tpls := allTemplates(ch1, common.Values{})
if len(tpls) != 5 {
t.Errorf("Expected 5 charts, got %d", len(tpls))
}
@@ -588,19 +591,19 @@ func TestAllTemplates(t *testing.T) {
func TestChartValuesContainsIsRoot(t *testing.T) {
ch1 := &chart.Chart{
Metadata: &chart.Metadata{Name: "parent"},
- Templates: []*chart.File{
+ Templates: []*common.File{
{Name: "templates/isroot", Data: []byte("{{.Chart.IsRoot}}")},
},
}
dep1 := &chart.Chart{
Metadata: &chart.Metadata{Name: "child"},
- Templates: []*chart.File{
+ Templates: []*common.File{
{Name: "templates/isroot", Data: []byte("{{.Chart.IsRoot}}")},
},
}
ch1.AddDependency(dep1)
- out, err := Render(ch1, chartutil.Values{})
+ out, err := Render(ch1, common.Values{})
if err != nil {
t.Fatalf("failed to render templates: %s", err)
}
@@ -620,13 +623,13 @@ func TestRenderDependency(t *testing.T) {
toptpl := `Hello {{template "myblock"}}`
ch := &chart.Chart{
Metadata: &chart.Metadata{Name: "outerchart"},
- Templates: []*chart.File{
+ Templates: []*common.File{
{Name: "templates/outer", Data: []byte(toptpl)},
},
}
ch.AddDependency(&chart.Chart{
Metadata: &chart.Metadata{Name: "innerchart"},
- Templates: []*chart.File{
+ Templates: []*common.File{
{Name: "templates/inner", Data: []byte(deptpl)},
},
})
@@ -658,7 +661,7 @@ func TestRenderNestedValues(t *testing.T) {
deepest := &chart.Chart{
Metadata: &chart.Metadata{Name: "deepest"},
- Templates: []*chart.File{
+ Templates: []*common.File{
{Name: deepestpath, Data: []byte(`And this same {{.Values.what}} that smiles {{.Values.global.when}}`)},
{Name: checkrelease, Data: []byte(`Tomorrow will be {{default "happy" .Release.Name }}`)},
},
@@ -667,7 +670,7 @@ func TestRenderNestedValues(t *testing.T) {
inner := &chart.Chart{
Metadata: &chart.Metadata{Name: "herrick"},
- Templates: []*chart.File{
+ Templates: []*common.File{
{Name: innerpath, Data: []byte(`Old {{.Values.who}} is still a-flyin'`)},
},
Values: map[string]interface{}{"who": "Robert", "what": "glasses"},
@@ -676,7 +679,7 @@ func TestRenderNestedValues(t *testing.T) {
outer := &chart.Chart{
Metadata: &chart.Metadata{Name: "top"},
- Templates: []*chart.File{
+ Templates: []*common.File{
{Name: outerpath, Data: []byte(`Gather ye {{.Values.what}} while ye may`)},
{Name: subchartspath, Data: []byte(`The glorious Lamp of {{.Subcharts.herrick.Subcharts.deepest.Values.where}}, the {{.Subcharts.herrick.Values.what}}`)},
},
@@ -704,15 +707,15 @@ func TestRenderNestedValues(t *testing.T) {
},
}
- tmp, err := chartutil.CoalesceValues(outer, injValues)
+ tmp, err := util.CoalesceValues(outer, injValues)
if err != nil {
t.Fatalf("Failed to coalesce values: %s", err)
}
- inject := chartutil.Values{
+ inject := common.Values{
"Values": tmp,
"Chart": outer.Metadata,
- "Release": chartutil.Values{
+ "Release": common.Values{
"Name": "dyin",
},
}
@@ -752,30 +755,30 @@ func TestRenderNestedValues(t *testing.T) {
func TestRenderBuiltinValues(t *testing.T) {
inner := &chart.Chart{
- Metadata: &chart.Metadata{Name: "Latium"},
- Templates: []*chart.File{
+ Metadata: &chart.Metadata{Name: "Latium", APIVersion: chart.APIVersionV2},
+ Templates: []*common.File{
{Name: "templates/Lavinia", Data: []byte(`{{.Template.Name}}{{.Chart.Name}}{{.Release.Name}}`)},
{Name: "templates/From", Data: []byte(`{{.Files.author | printf "%s"}} {{.Files.Get "book/title.txt"}}`)},
},
- Files: []*chart.File{
+ Files: []*common.File{
{Name: "author", Data: []byte("Virgil")},
{Name: "book/title.txt", Data: []byte("Aeneid")},
},
}
outer := &chart.Chart{
- Metadata: &chart.Metadata{Name: "Troy"},
- Templates: []*chart.File{
+ Metadata: &chart.Metadata{Name: "Troy", APIVersion: chart.APIVersionV2},
+ Templates: []*common.File{
{Name: "templates/Aeneas", Data: []byte(`{{.Template.Name}}{{.Chart.Name}}{{.Release.Name}}`)},
{Name: "templates/Amata", Data: []byte(`{{.Subcharts.Latium.Chart.Name}} {{.Subcharts.Latium.Files.author | printf "%s"}}`)},
},
}
outer.AddDependency(inner)
- inject := chartutil.Values{
+ inject := common.Values{
"Values": "",
"Chart": outer.Metadata,
- "Release": chartutil.Values{
+ "Release": common.Values{
"Name": "Aeneid",
},
}
@@ -804,7 +807,7 @@ func TestRenderBuiltinValues(t *testing.T) {
func TestAlterFuncMap_include(t *testing.T) {
c := &chart.Chart{
Metadata: &chart.Metadata{Name: "conrad"},
- Templates: []*chart.File{
+ Templates: []*common.File{
{Name: "templates/quote", Data: []byte(`{{include "conrad/templates/_partial" . | indent 2}} dead.`)},
{Name: "templates/_partial", Data: []byte(`{{.Release.Name}} - he`)},
},
@@ -813,16 +816,16 @@ func TestAlterFuncMap_include(t *testing.T) {
// Check nested reference in include FuncMap
d := &chart.Chart{
Metadata: &chart.Metadata{Name: "nested"},
- Templates: []*chart.File{
+ Templates: []*common.File{
{Name: "templates/quote", Data: []byte(`{{include "nested/templates/quote" . | indent 2}} dead.`)},
{Name: "templates/_partial", Data: []byte(`{{.Release.Name}} - he`)},
},
}
- v := chartutil.Values{
+ v := common.Values{
"Values": "",
"Chart": c.Metadata,
- "Release": chartutil.Values{
+ "Release": common.Values{
"Name": "Mistah Kurtz",
},
}
@@ -847,19 +850,19 @@ func TestAlterFuncMap_include(t *testing.T) {
func TestAlterFuncMap_require(t *testing.T) {
c := &chart.Chart{
Metadata: &chart.Metadata{Name: "conan"},
- Templates: []*chart.File{
+ Templates: []*common.File{
{Name: "templates/quote", Data: []byte(`All your base are belong to {{ required "A valid 'who' is required" .Values.who }}`)},
{Name: "templates/bases", Data: []byte(`All {{ required "A valid 'bases' is required" .Values.bases }} of them!`)},
},
}
- v := chartutil.Values{
- "Values": chartutil.Values{
+ v := common.Values{
+ "Values": common.Values{
"who": "us",
"bases": 2,
},
"Chart": c.Metadata,
- "Release": chartutil.Values{
+ "Release": common.Values{
"Name": "That 90s meme",
},
}
@@ -880,12 +883,12 @@ func TestAlterFuncMap_require(t *testing.T) {
// test required without passing in needed values with lint mode on
// verifies lint replaces required with an empty string (should not fail)
- lintValues := chartutil.Values{
- "Values": chartutil.Values{
+ lintValues := common.Values{
+ "Values": common.Values{
"who": "us",
},
"Chart": c.Metadata,
- "Release": chartutil.Values{
+ "Release": common.Values{
"Name": "That 90s meme",
},
}
@@ -909,17 +912,17 @@ func TestAlterFuncMap_require(t *testing.T) {
func TestAlterFuncMap_tpl(t *testing.T) {
c := &chart.Chart{
Metadata: &chart.Metadata{Name: "TplFunction"},
- Templates: []*chart.File{
+ Templates: []*common.File{
{Name: "templates/base", Data: []byte(`Evaluate tpl {{tpl "Value: {{ .Values.value}}" .}}`)},
},
}
- v := chartutil.Values{
- "Values": chartutil.Values{
+ v := common.Values{
+ "Values": common.Values{
"value": "myvalue",
},
"Chart": c.Metadata,
- "Release": chartutil.Values{
+ "Release": common.Values{
"Name": "TestRelease",
},
}
@@ -938,17 +941,17 @@ func TestAlterFuncMap_tpl(t *testing.T) {
func TestAlterFuncMap_tplfunc(t *testing.T) {
c := &chart.Chart{
Metadata: &chart.Metadata{Name: "TplFunction"},
- Templates: []*chart.File{
+ Templates: []*common.File{
{Name: "templates/base", Data: []byte(`Evaluate tpl {{tpl "Value: {{ .Values.value | quote}}" .}}`)},
},
}
- v := chartutil.Values{
- "Values": chartutil.Values{
+ v := common.Values{
+ "Values": common.Values{
"value": "myvalue",
},
"Chart": c.Metadata,
- "Release": chartutil.Values{
+ "Release": common.Values{
"Name": "TestRelease",
},
}
@@ -967,17 +970,17 @@ func TestAlterFuncMap_tplfunc(t *testing.T) {
func TestAlterFuncMap_tplinclude(t *testing.T) {
c := &chart.Chart{
Metadata: &chart.Metadata{Name: "TplFunction"},
- Templates: []*chart.File{
+ Templates: []*common.File{
{Name: "templates/base", Data: []byte(`{{ tpl "{{include ` + "`" + `TplFunction/templates/_partial` + "`" + ` . | quote }}" .}}`)},
{Name: "templates/_partial", Data: []byte(`{{.Template.Name}}`)},
},
}
- v := chartutil.Values{
- "Values": chartutil.Values{
+ v := common.Values{
+ "Values": common.Values{
"value": "myvalue",
},
"Chart": c.Metadata,
- "Release": chartutil.Values{
+ "Release": common.Values{
"Name": "TestRelease",
},
}
@@ -998,15 +1001,15 @@ func TestRenderRecursionLimit(t *testing.T) {
// endless recursion should produce an error
c := &chart.Chart{
Metadata: &chart.Metadata{Name: "bad"},
- Templates: []*chart.File{
+ Templates: []*common.File{
{Name: "templates/base", Data: []byte(`{{include "recursion" . }}`)},
{Name: "templates/recursion", Data: []byte(`{{define "recursion"}}{{include "recursion" . }}{{end}}`)},
},
}
- v := chartutil.Values{
+ v := common.Values{
"Values": "",
"Chart": c.Metadata,
- "Release": chartutil.Values{
+ "Release": common.Values{
"Name": "TestRelease",
},
}
@@ -1028,7 +1031,7 @@ func TestRenderRecursionLimit(t *testing.T) {
d := &chart.Chart{
Metadata: &chart.Metadata{Name: "overlook"},
- Templates: []*chart.File{
+ Templates: []*common.File{
{Name: "templates/quote", Data: []byte(repeatedIncl)},
{Name: "templates/_function", Data: []byte(printFunc)},
},
@@ -1052,23 +1055,23 @@ func TestRenderRecursionLimit(t *testing.T) {
func TestRenderLoadTemplateForTplFromFile(t *testing.T) {
c := &chart.Chart{
Metadata: &chart.Metadata{Name: "TplLoadFromFile"},
- Templates: []*chart.File{
+ Templates: []*common.File{
{Name: "templates/base", Data: []byte(`{{ tpl (.Files.Get .Values.filename) . }}`)},
{Name: "templates/_function", Data: []byte(`{{define "test-function"}}test-function{{end}}`)},
},
- Files: []*chart.File{
+ Files: []*common.File{
{Name: "test", Data: []byte(`{{ tpl (.Files.Get .Values.filename2) .}}`)},
{Name: "test2", Data: []byte(`{{include "test-function" .}}{{define "nested-define"}}nested-define-content{{end}} {{include "nested-define" .}}`)},
},
}
- v := chartutil.Values{
- "Values": chartutil.Values{
+ v := common.Values{
+ "Values": common.Values{
"filename": "test",
"filename2": "test2",
},
"Chart": c.Metadata,
- "Release": chartutil.Values{
+ "Release": common.Values{
"Name": "TestRelease",
},
}
@@ -1087,15 +1090,15 @@ func TestRenderLoadTemplateForTplFromFile(t *testing.T) {
func TestRenderTplEmpty(t *testing.T) {
c := &chart.Chart{
Metadata: &chart.Metadata{Name: "TplEmpty"},
- Templates: []*chart.File{
+ Templates: []*common.File{
{Name: "templates/empty-string", Data: []byte(`{{tpl "" .}}`)},
{Name: "templates/empty-action", Data: []byte(`{{tpl "{{ \"\"}}" .}}`)},
{Name: "templates/only-defines", Data: []byte(`{{tpl "{{define \"not-invoked\"}}not-rendered{{end}}" .}}`)},
},
}
- v := chartutil.Values{
+ v := common.Values{
"Chart": c.Metadata,
- "Release": chartutil.Values{
+ "Release": common.Values{
"Name": "TestRelease",
},
}
@@ -1121,7 +1124,7 @@ func TestRenderTplTemplateNames(t *testing.T) {
// .Template.BasePath and .Name make it through
c := &chart.Chart{
Metadata: &chart.Metadata{Name: "TplTemplateNames"},
- Templates: []*chart.File{
+ Templates: []*common.File{
{Name: "templates/default-basepath", Data: []byte(`{{tpl "{{ .Template.BasePath }}" .}}`)},
{Name: "templates/default-name", Data: []byte(`{{tpl "{{ .Template.Name }}" .}}`)},
{Name: "templates/modified-basepath", Data: []byte(`{{tpl "{{ .Template.BasePath }}" .Values.dot}}`)},
@@ -1129,10 +1132,10 @@ func TestRenderTplTemplateNames(t *testing.T) {
{Name: "templates/modified-field", Data: []byte(`{{tpl "{{ .Template.Field }}" .Values.dot}}`)},
},
}
- v := chartutil.Values{
- "Values": chartutil.Values{
- "dot": chartutil.Values{
- "Template": chartutil.Values{
+ v := common.Values{
+ "Values": common.Values{
+ "dot": common.Values{
+ "Template": common.Values{
"BasePath": "path/to/template",
"Name": "name-of-template",
"Field": "extra-field",
@@ -1140,7 +1143,7 @@ func TestRenderTplTemplateNames(t *testing.T) {
},
},
"Chart": c.Metadata,
- "Release": chartutil.Values{
+ "Release": common.Values{
"Name": "TestRelease",
},
}
@@ -1168,7 +1171,7 @@ func TestRenderTplRedefines(t *testing.T) {
// Redefining a template inside 'tpl' does not affect the outer definition
c := &chart.Chart{
Metadata: &chart.Metadata{Name: "TplRedefines"},
- Templates: []*chart.File{
+ Templates: []*common.File{
{Name: "templates/_partials", Data: []byte(`{{define "partial"}}original-in-partial{{end}}`)},
{Name: "templates/partial", Data: []byte(
`before: {{include "partial" .}}\n{{tpl .Values.partialText .}}\nafter: {{include "partial" .}}`,
@@ -1190,8 +1193,8 @@ func TestRenderTplRedefines(t *testing.T) {
)},
},
}
- v := chartutil.Values{
- "Values": chartutil.Values{
+ v := common.Values{
+ "Values": common.Values{
"partialText": `{{define "partial"}}redefined-in-tpl{{end}}tpl: {{include "partial" .}}`,
"manifestText": `{{define "manifest"}}redefined-in-tpl{{end}}tpl: {{include "manifest" .}}`,
"manifestOnlyText": `tpl: {{include "manifest-only" .}}`,
@@ -1203,7 +1206,7 @@ func TestRenderTplRedefines(t *testing.T) {
"innerText": `{{define "nested"}}redefined-in-inner-tpl{{end}}inner-tpl: {{include "nested" .}} {{include "nested-outer" . }}`,
},
"Chart": c.Metadata,
- "Release": chartutil.Values{
+ "Release": common.Values{
"Name": "TestRelease",
},
}
@@ -1234,16 +1237,16 @@ func TestRenderTplMissingKey(t *testing.T) {
// Rendering a missing key results in empty/zero output.
c := &chart.Chart{
Metadata: &chart.Metadata{Name: "TplMissingKey"},
- Templates: []*chart.File{
+ Templates: []*common.File{
{Name: "templates/manifest", Data: []byte(
`missingValue: {{tpl "{{.Values.noSuchKey}}" .}}`,
)},
},
}
- v := chartutil.Values{
- "Values": chartutil.Values{},
+ v := common.Values{
+ "Values": common.Values{},
"Chart": c.Metadata,
- "Release": chartutil.Values{
+ "Release": common.Values{
"Name": "TestRelease",
},
}
@@ -1267,16 +1270,16 @@ func TestRenderTplMissingKeyString(t *testing.T) {
// Rendering a missing key results in error
c := &chart.Chart{
Metadata: &chart.Metadata{Name: "TplMissingKeyStrict"},
- Templates: []*chart.File{
+ Templates: []*common.File{
{Name: "templates/manifest", Data: []byte(
`missingValue: {{tpl "{{.Values.noSuchKey}}" .}}`,
)},
},
}
- v := chartutil.Values{
- "Values": chartutil.Values{},
+ v := common.Values{
+ "Values": common.Values{},
"Chart": c.Metadata,
- "Release": chartutil.Values{
+ "Release": common.Values{
"Name": "TestRelease",
},
}
@@ -1289,14 +1292,140 @@ func TestRenderTplMissingKeyString(t *testing.T) {
t.Errorf("Expected error, got %v", out)
return
}
- switch err.(type) {
- case (template.ExecError):
- errTxt := fmt.Sprint(err)
- if !strings.Contains(errTxt, "noSuchKey") {
- t.Errorf("Expected error to contain 'noSuchKey', got %s", errTxt)
- }
- default:
- // Some unexpected error.
+ errTxt := fmt.Sprint(err)
+ if !strings.Contains(errTxt, "noSuchKey") {
+ t.Errorf("Expected error to contain 'noSuchKey', got %s", errTxt)
+ }
+
+}
+
+func TestNestedHelpersProducesMultilineStacktrace(t *testing.T) {
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "NestedHelperFunctions"},
+ Templates: []*common.File{
+ {Name: "templates/svc.yaml", Data: []byte(
+ `name: {{ include "nested_helper.name" . }}`,
+ )},
+ {Name: "templates/_helpers_1.tpl", Data: []byte(
+ `{{- define "nested_helper.name" -}}{{- include "common.names.get_name" . -}}{{- end -}}`,
+ )},
+ {Name: "charts/common/templates/_helpers_2.tpl", Data: []byte(
+ `{{- define "common.names.get_name" -}}{{- .Values.nonexistant.key | trunc 63 | trimSuffix "-" -}}{{- end -}}`,
+ )},
+ },
+ }
+
+ expectedErrorMessage := `NestedHelperFunctions/templates/svc.yaml:1:9
+ executing "NestedHelperFunctions/templates/svc.yaml" at :
+ error calling include:
+NestedHelperFunctions/templates/_helpers_1.tpl:1:39
+ executing "nested_helper.name" at :
+ error calling include:
+NestedHelperFunctions/charts/common/templates/_helpers_2.tpl:1:49
+ executing "common.names.get_name" at <.Values.nonexistant.key>:
+ nil pointer evaluating interface {}.key`
+
+ v := common.Values{}
+
+ val, _ := util.CoalesceValues(c, v)
+ vals := map[string]interface{}{
+ "Values": val.AsMap(),
+ }
+ _, err := Render(c, vals)
+
+ assert.NotNil(t, err)
+ assert.Equal(t, expectedErrorMessage, err.Error())
+}
+
+func TestMultilineNoTemplateAssociatedError(t *testing.T) {
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "multiline"},
+ Templates: []*common.File{
+ {Name: "templates/svc.yaml", Data: []byte(
+ `name: {{ include "nested_helper.name" . }}`,
+ )},
+ {Name: "templates/test.yaml", Data: []byte(
+ `{{ toYaml .Values }}`,
+ )},
+ {Name: "charts/common/templates/_helpers_2.tpl", Data: []byte(
+ `{{ toYaml .Values }}`,
+ )},
+ },
+ }
+
+ expectedErrorMessage := `multiline/templates/svc.yaml:1:9
+ executing "multiline/templates/svc.yaml" at :
+ error calling include:
+template: no template "nested_helper.name" associated with template "gotpl"`
+
+ v := common.Values{}
+
+ val, _ := util.CoalesceValues(c, v)
+ vals := map[string]interface{}{
+ "Values": val.AsMap(),
+ }
+ _, err := Render(c, vals)
+
+ assert.NotNil(t, err)
+ assert.Equal(t, expectedErrorMessage, err.Error())
+}
+
+func TestRenderCustomTemplateFuncs(t *testing.T) {
+ // Create a chart with two templates that use custom functions
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "CustomFunc"},
+ Templates: []*common.File{
+ {
+ Name: "templates/manifest",
+ Data: []byte(`{{exclaim .Values.message}}`),
+ },
+ {
+ Name: "templates/override",
+ Data: []byte(`{{ upper .Values.message }}`),
+ },
+ },
+ }
+ v := common.Values{
+ "Values": common.Values{
+ "message": "hello",
+ },
+ "Chart": c.Metadata,
+ "Release": common.Values{
+ "Name": "TestRelease",
+ },
+ }
+
+ // Define a custom template function "exclaim" that appends "!!!" to a string and override "upper" function
+ customFuncs := template.FuncMap{
+ "exclaim": func(input string) string {
+ return input + "!!!"
+ },
+ "upper": func(s string) string {
+ return "custom:" + s
+ },
+ }
+
+ // Create an engine instance and set the CustomTemplateFuncs.
+ e := new(Engine)
+ e.CustomTemplateFuncs = customFuncs
+
+ // Render the chart.
+ out, err := e.Render(c, v)
+ if err != nil {
t.Fatal(err)
}
+
+ // Expected output should be "hello!!!".
+ expected := "hello!!!"
+ key := "CustomFunc/templates/manifest"
+ if rendered, ok := out[key]; !ok || rendered != expected {
+ t.Errorf("Expected %q, got %q", expected, rendered)
+ }
+
+ // Verify that the rendered template used the custom "upper" function.
+ expected = "custom:hello"
+ key = "CustomFunc/templates/override"
+ if rendered, ok := out[key]; !ok || rendered != expected {
+ t.Errorf("Expected %q, got %q", expected, rendered)
+ }
}
diff --git a/pkg/engine/files.go b/pkg/engine/files.go
index 87166728c..f0a86988e 100644
--- a/pkg/engine/files.go
+++ b/pkg/engine/files.go
@@ -23,7 +23,7 @@ import (
"github.com/gobwas/glob"
- chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/chart/common"
)
// files is a map of files in a chart that can be accessed from a template.
@@ -31,7 +31,7 @@ type files map[string][]byte
// NewFiles creates a new files from chart files.
// Given an []*chart.File (the format for files in a chart.Chart), extract a map of files.
-func newFiles(from []*chart.File) files {
+func newFiles(from []*common.File) files {
files := make(map[string][]byte)
for _, f := range from {
files[f.Name] = f.Data
diff --git a/pkg/engine/funcs.go b/pkg/engine/funcs.go
index d03a818c2..a97f8f104 100644
--- a/pkg/engine/funcs.go
+++ b/pkg/engine/funcs.go
@@ -19,6 +19,7 @@ package engine
import (
"bytes"
"encoding/json"
+ "maps"
"strings"
"text/template"
@@ -51,10 +52,12 @@ func funcMap() template.FuncMap {
"toToml": toTOML,
"fromToml": fromTOML,
"toYaml": toYAML,
+ "mustToYaml": mustToYAML,
"toYamlPretty": toYAMLPretty,
"fromYaml": fromYAML,
"fromYamlArray": fromYAMLArray,
"toJson": toJSON,
+ "mustToJson": mustToJSON,
"fromJson": fromJSON,
"fromJsonArray": fromJSONArray,
@@ -71,9 +74,7 @@ func funcMap() template.FuncMap {
},
}
- for k, v := range extra {
- f[k] = v
- }
+ maps.Copy(f, extra)
return f
}
@@ -91,6 +92,19 @@ func toYAML(v interface{}) string {
return strings.TrimSuffix(string(data), "\n")
}
+// mustToYAML takes an interface, marshals it to yaml, and returns a string.
+// It will panic if there is an error.
+//
+// This is designed to be called from a template when need to ensure that the
+// output YAML is valid.
+func mustToYAML(v interface{}) string {
+ data, err := yaml.Marshal(v)
+ if err != nil {
+ panic(err)
+ }
+ return strings.TrimSuffix(string(data), "\n")
+}
+
func toYAMLPretty(v interface{}) string {
var data bytes.Buffer
encoder := goYaml.NewEncoder(&data)
@@ -176,6 +190,19 @@ func toJSON(v interface{}) string {
return string(data)
}
+// mustToJSON takes an interface, marshals it to json, and returns a string.
+// It will panic if there is an error.
+//
+// This is designed to be called from a template when need to ensure that the
+// output JSON is valid.
+func mustToJSON(v interface{}) string {
+ data, err := json.Marshal(v)
+ if err != nil {
+ panic(err)
+ }
+ return string(data)
+}
+
// fromJSON converts a JSON document into a map[string]interface{}.
//
// This is not a general-purpose JSON parser, and will not parse all valid
diff --git a/pkg/engine/funcs_test.go b/pkg/engine/funcs_test.go
index a4f4d604f..71a72e2e4 100644
--- a/pkg/engine/funcs_test.go
+++ b/pkg/engine/funcs_test.go
@@ -63,7 +63,7 @@ keyInElement0 = "valueInElement0"
keyInElement1 = "valueInElement1"`,
}, {
tpl: `{{ fromToml . }}`,
- expect: "map[Error:toml: line 0: unexpected EOF; expected key separator '=']",
+ expect: "map[Error:toml: line 1: unexpected EOF; expected key separator '=']",
vars: "one",
}, {
tpl: `{{ toJson . }}`,
@@ -135,6 +135,43 @@ keyInElement1 = "valueInElement1"`,
assert.NoError(t, err)
assert.Equal(t, tt.expect, b.String(), tt.tpl)
}
+
+ loopMap := map[string]interface{}{
+ "foo": "bar",
+ }
+ loopMap["loop"] = []interface{}{loopMap}
+
+ mustFuncsTests := []struct {
+ tpl string
+ expect interface{}
+ vars interface{}
+ }{{
+ tpl: `{{ mustToYaml . }}`,
+ vars: loopMap,
+ }, {
+ tpl: `{{ mustToJson . }}`,
+ vars: loopMap,
+ }, {
+ tpl: `{{ toYaml . }}`,
+ expect: "", // should return empty string and swallow error
+ vars: loopMap,
+ }, {
+ tpl: `{{ toJson . }}`,
+ expect: "", // should return empty string and swallow error
+ vars: loopMap,
+ },
+ }
+
+ for _, tt := range mustFuncsTests {
+ var b strings.Builder
+ err := template.Must(template.New("test").Funcs(funcMap()).Parse(tt.tpl)).Execute(&b, tt.vars)
+ if tt.expect != nil {
+ assert.NoError(t, err)
+ assert.Equal(t, tt.expect, b.String(), tt.tpl)
+ } else {
+ assert.Error(t, err)
+ }
+ }
}
// This test to check a function provided by sprig is due to a change in a
diff --git a/pkg/engine/lookup_func.go b/pkg/engine/lookup_func.go
index 75e85098d..18ed2b63b 100644
--- a/pkg/engine/lookup_func.go
+++ b/pkg/engine/lookup_func.go
@@ -18,10 +18,10 @@ package engine
import (
"context"
- "log"
+ "fmt"
+ "log/slog"
"strings"
- "github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
@@ -35,10 +35,7 @@ type lookupFunc = func(apiversion string, resource string, namespace string, nam
// NewLookupFunction returns a function for looking up objects in the cluster.
//
// If the resource does not exist, no error is raised.
-//
-// This function is considered deprecated, and will be renamed in Helm 4. It will no
-// longer be a public function.
-func NewLookupFunction(config *rest.Config) lookupFunc {
+func NewLookupFunction(config *rest.Config) lookupFunc { //nolint:revive
return newLookupFunction(clientProviderFromConfig{config: config})
}
@@ -101,8 +98,8 @@ func getDynamicClientOnKind(apiversion string, kind string, config *rest.Config)
gvk := schema.FromAPIVersionAndKind(apiversion, kind)
apiRes, err := getAPIResourceForGVK(gvk, config)
if err != nil {
- log.Printf("[ERROR] unable to get apiresource from unstructured: %s , error %s", gvk.String(), err)
- return nil, false, errors.Wrapf(err, "unable to get apiresource from unstructured: %s", gvk.String())
+ slog.Error("unable to get apiresource", "groupVersionKind", gvk.String(), slog.Any("error", err))
+ return nil, false, fmt.Errorf("unable to get apiresource from unstructured: %s: %w", gvk.String(), err)
}
gvr := schema.GroupVersionResource{
Group: apiRes.Group,
@@ -111,7 +108,7 @@ func getDynamicClientOnKind(apiversion string, kind string, config *rest.Config)
}
intf, err := dynamic.NewForConfig(config)
if err != nil {
- log.Printf("[ERROR] unable to get dynamic client %s", err)
+ slog.Error("unable to get dynamic client", slog.Any("error", err))
return nil, false, err
}
res := intf.Resource(gvr)
@@ -122,12 +119,12 @@ func getAPIResourceForGVK(gvk schema.GroupVersionKind, config *rest.Config) (met
res := metav1.APIResource{}
discoveryClient, err := discovery.NewDiscoveryClientForConfig(config)
if err != nil {
- log.Printf("[ERROR] unable to create discovery client %s", err)
+ slog.Error("unable to create discovery client", slog.Any("error", err))
return res, err
}
resList, err := discoveryClient.ServerResourcesForGroupVersion(gvk.GroupVersion().String())
if err != nil {
- log.Printf("[ERROR] unable to retrieve resource list for: %s , error: %s", gvk.GroupVersion().String(), err)
+ slog.Error("unable to retrieve resource list", "GroupVersion", gvk.GroupVersion().String(), slog.Any("error", err))
return res, err
}
for _, resource := range resList.APIResources {
diff --git a/pkg/gates/gates_test.go b/pkg/gates/gates_test.go
index 6bdd17ed6..4d77199e6 100644
--- a/pkg/gates/gates_test.go
+++ b/pkg/gates/gates_test.go
@@ -23,14 +23,13 @@ import (
const name string = "HELM_EXPERIMENTAL_FEATURE"
func TestIsEnabled(t *testing.T) {
- os.Unsetenv(name)
g := Gate(name)
if g.IsEnabled() {
t.Errorf("feature gate shows as available, but the environment variable %s was not set", name)
}
- os.Setenv(name, "1")
+ t.Setenv(name, "1")
if !g.IsEnabled() {
t.Errorf("feature gate shows as disabled, but the environment variable %s was set", name)
diff --git a/pkg/getter/getter.go b/pkg/getter/getter.go
index 5014784bc..a2d0f0ee2 100644
--- a/pkg/getter/getter.go
+++ b/pkg/getter/getter.go
@@ -18,19 +18,20 @@ package getter
import (
"bytes"
+ "fmt"
"net/http"
+ "slices"
"time"
- "github.com/pkg/errors"
-
"helm.sh/helm/v4/pkg/cli"
"helm.sh/helm/v4/pkg/registry"
)
-// options are generic parameters to be provided to the getter during instantiation.
+// getterOptions are generic parameters to be provided to the getter during instantiation.
//
// Getters may or may not ignore these parameters as they are passed in.
-type options struct {
+// TODO what is the difference between this and schema.GetterOptionsV1?
+type getterOptions struct {
url string
certFile string
keyFile string
@@ -47,58 +48,59 @@ type options struct {
registryClient *registry.Client
timeout time.Duration
transport *http.Transport
+ artifactType string
}
// Option allows specifying various settings configurable by the user for overriding the defaults
// used when performing Get operations with the Getter.
-type Option func(*options)
+type Option func(*getterOptions)
// WithURL informs the getter the server name that will be used when fetching objects. Used in conjunction with
// WithTLSClientConfig to set the TLSClientConfig's server name.
func WithURL(url string) Option {
- return func(opts *options) {
+ return func(opts *getterOptions) {
opts.url = url
}
}
// WithAcceptHeader sets the request's Accept header as some REST APIs serve multiple content types
func WithAcceptHeader(header string) Option {
- return func(opts *options) {
+ return func(opts *getterOptions) {
opts.acceptHeader = header
}
}
// WithBasicAuth sets the request's Authorization header to use the provided credentials
func WithBasicAuth(username, password string) Option {
- return func(opts *options) {
+ return func(opts *getterOptions) {
opts.username = username
opts.password = password
}
}
func WithPassCredentialsAll(pass bool) Option {
- return func(opts *options) {
+ return func(opts *getterOptions) {
opts.passCredentialsAll = pass
}
}
// WithUserAgent sets the request's User-Agent header to use the provided agent name.
func WithUserAgent(userAgent string) Option {
- return func(opts *options) {
+ return func(opts *getterOptions) {
opts.userAgent = userAgent
}
}
// WithInsecureSkipVerifyTLS determines if a TLS Certificate will be checked
func WithInsecureSkipVerifyTLS(insecureSkipVerifyTLS bool) Option {
- return func(opts *options) {
+ return func(opts *getterOptions) {
opts.insecureSkipVerifyTLS = insecureSkipVerifyTLS
}
}
// WithTLSClientConfig sets the client auth with the provided credentials.
func WithTLSClientConfig(certFile, keyFile, caFile string) Option {
- return func(opts *options) {
+ return func(opts *getterOptions) {
opts.certFile = certFile
opts.keyFile = keyFile
opts.caFile = caFile
@@ -106,43 +108,50 @@ func WithTLSClientConfig(certFile, keyFile, caFile string) Option {
}
func WithPlainHTTP(plainHTTP bool) Option {
- return func(opts *options) {
+ return func(opts *getterOptions) {
opts.plainHTTP = plainHTTP
}
}
// WithTimeout sets the timeout for requests
func WithTimeout(timeout time.Duration) Option {
- return func(opts *options) {
+ return func(opts *getterOptions) {
opts.timeout = timeout
}
}
func WithTagName(tagname string) Option {
- return func(opts *options) {
+ return func(opts *getterOptions) {
opts.version = tagname
}
}
func WithRegistryClient(client *registry.Client) Option {
- return func(opts *options) {
+ return func(opts *getterOptions) {
opts.registryClient = client
}
}
func WithUntar() Option {
- return func(opts *options) {
+ return func(opts *getterOptions) {
opts.unTar = true
}
}
// WithTransport sets the http.Transport to allow overwriting the HTTPGetter default.
func WithTransport(transport *http.Transport) Option {
- return func(opts *options) {
+ return func(opts *getterOptions) {
opts.transport = transport
}
}
+// WithArtifactType sets the type of OCI artifact ("chart" or "plugin")
+func WithArtifactType(artifactType string) Option {
+ return func(opts *getterOptions) {
+ opts.artifactType = artifactType
+ }
+}
+
// Getter is an interface to support GET to the specified URL.
type Getter interface {
// Get file content by url string
@@ -164,12 +173,7 @@ type Provider struct {
// Provides returns true if the given scheme is supported by this Provider.
func (p Provider) Provides(scheme string) bool {
- for _, i := range p.Schemes {
- if i == scheme {
- return true
- }
- }
- return false
+ return slices.Contains(p.Schemes, scheme)
}
// Providers is a collection of Provider objects.
@@ -184,7 +188,7 @@ func (p Providers) ByScheme(scheme string) (Getter, error) {
return pp.New()
}
}
- return nil, errors.Errorf("scheme %q not supported", scheme)
+ return nil, fmt.Errorf("scheme %q not supported", scheme)
}
const (
@@ -196,25 +200,33 @@ const (
var defaultOptions = []Option{WithTimeout(time.Second * DefaultHTTPTimeout)}
-var httpProvider = Provider{
- Schemes: []string{"http", "https"},
- New: func(options ...Option) (Getter, error) {
- options = append(options, defaultOptions...)
- return NewHTTPGetter(options...)
- },
-}
-
-var ociProvider = Provider{
- Schemes: []string{registry.OCIScheme},
- New: NewOCIGetter,
+func Getters(extraOpts ...Option) Providers {
+ return Providers{
+ Provider{
+ Schemes: []string{"http", "https"},
+ New: func(options ...Option) (Getter, error) {
+ options = append(options, defaultOptions...)
+ options = append(options, extraOpts...)
+ return NewHTTPGetter(options...)
+ },
+ },
+ Provider{
+ Schemes: []string{registry.OCIScheme},
+ New: func(options ...Option) (Getter, error) {
+ options = append(options, defaultOptions...)
+ options = append(options, extraOpts...)
+ return NewOCIGetter(options...)
+ },
+ },
+ }
}
// All finds all of the registered getters as a list of Provider instances.
// Currently, the built-in getters and the discovered plugins with downloader
// notations are collected.
-func All(settings *cli.EnvSettings) Providers {
- result := Providers{httpProvider, ociProvider}
- pluginDownloaders, _ := collectPlugins(settings)
+func All(settings *cli.EnvSettings, opts ...Option) Providers {
+ result := Getters(opts...)
+ pluginDownloaders, _ := collectGetterPlugins(settings)
result = append(result, pluginDownloaders...)
return result
}
diff --git a/pkg/getter/getter_test.go b/pkg/getter/getter_test.go
index a14301900..83920e809 100644
--- a/pkg/getter/getter_test.go
+++ b/pkg/getter/getter_test.go
@@ -17,6 +17,7 @@ package getter
import (
"testing"
+ "time"
"helm.sh/helm/v4/pkg/cli"
)
@@ -52,6 +53,23 @@ func TestProviders(t *testing.T) {
}
}
+func TestProvidersWithTimeout(t *testing.T) {
+ want := time.Hour
+ getters := Getters(WithTimeout(want))
+ getter, err := getters.ByScheme("http")
+ if err != nil {
+ t.Error(err)
+ }
+ client, err := getter.(*HTTPGetter).httpClient()
+ if err != nil {
+ t.Error(err)
+ }
+ got := client.Timeout
+ if got != want {
+ t.Errorf("Expected %q, got %q", want, got)
+ }
+}
+
func TestAll(t *testing.T) {
env := cli.New()
env.PluginsDirectory = pluginDir
diff --git a/pkg/getter/httpgetter.go b/pkg/getter/httpgetter.go
index 37d80cda7..110f45c54 100644
--- a/pkg/getter/httpgetter.go
+++ b/pkg/getter/httpgetter.go
@@ -18,21 +18,19 @@ package getter
import (
"bytes"
"crypto/tls"
+ "fmt"
"io"
"net/http"
"net/url"
"sync"
- "github.com/pkg/errors"
-
"helm.sh/helm/v4/internal/tlsutil"
- "helm.sh/helm/v4/internal/urlutil"
"helm.sh/helm/v4/internal/version"
)
// HTTPGetter is the default HTTP(/S) backend handler
type HTTPGetter struct {
- opts options
+ opts getterOptions
transport *http.Transport
once sync.Once
}
@@ -66,11 +64,11 @@ func (g *HTTPGetter) get(href string) (*bytes.Buffer, error) {
// with the basic auth is the one being fetched.
u1, err := url.Parse(g.opts.url)
if err != nil {
- return nil, errors.Wrap(err, "Unable to parse getter URL")
+ return nil, fmt.Errorf("unable to parse getter URL: %w", err)
}
u2, err := url.Parse(href)
if err != nil {
- return nil, errors.Wrap(err, "Unable to parse URL getting from")
+ return nil, fmt.Errorf("unable to parse URL getting from: %w", err)
}
// Host on URL (returned from url.Parse) contains the port if present.
@@ -93,7 +91,7 @@ func (g *HTTPGetter) get(href string) (*bytes.Buffer, error) {
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
- return nil, errors.Errorf("failed to fetch %s : %s", href, resp.Status)
+ return nil, fmt.Errorf("failed to fetch %s : %s", href, resp.Status)
}
buf := bytes.NewBuffer(nil)
@@ -124,6 +122,9 @@ func (g *HTTPGetter) httpClient() (*http.Client, error) {
g.transport = &http.Transport{
DisableCompression: true,
Proxy: http.ProxyFromEnvironment,
+ // Being nil would cause the tls.Config default to be used
+ // "NewTLSConfig" modifies an empty TLS config, not the default one
+ TLSClientConfig: &tls.Config{},
}
})
@@ -134,14 +135,8 @@ func (g *HTTPGetter) httpClient() (*http.Client, error) {
tlsutil.WithCAFile(g.opts.caFile),
)
if err != nil {
- return nil, errors.Wrap(err, "can't create TLS config for client")
- }
-
- sni, err := urlutil.ExtractHostname(g.opts.url)
- if err != nil {
- return nil, err
+ return nil, fmt.Errorf("can't create TLS config for client: %w", err)
}
- tlsConf.ServerName = sni
g.transport.TLSClientConfig = tlsConf
}
diff --git a/pkg/getter/httpgetter_test.go b/pkg/getter/httpgetter_test.go
index 24e670f6e..f87d71877 100644
--- a/pkg/getter/httpgetter_test.go
+++ b/pkg/getter/httpgetter_test.go
@@ -28,8 +28,6 @@ import (
"testing"
"time"
- "github.com/pkg/errors"
-
"helm.sh/helm/v4/internal/tlsutil"
"helm.sh/helm/v4/internal/version"
"helm.sh/helm/v4/pkg/cli"
@@ -52,7 +50,7 @@ func TestHTTPGetter(t *testing.T) {
timeout := time.Second * 5
transport := &http.Transport{}
- // Test with options
+ // Test with getterOptions
g, err = NewHTTPGetter(
WithBasicAuth("I", "Am"),
WithPassCredentialsAll(false),
@@ -317,7 +315,7 @@ func TestDownloadTLS(t *testing.T) {
tlsutil.WithCAFile(ca),
)
if err != nil {
- t.Fatal(errors.Wrap(err, "can't create TLS config for client"))
+ t.Fatal(fmt.Errorf("can't create TLS config for client: %w", err))
}
tlsConf.ServerName = "helm.sh"
tlsSrv.TLS = tlsConf
@@ -358,6 +356,131 @@ func TestDownloadTLS(t *testing.T) {
}
}
+func TestDownloadTLSWithRedirect(t *testing.T) {
+ cd := "../../testdata"
+ srv2Resp := "hello"
+ insecureSkipTLSverify := false
+
+ // Server 2 that will actually fulfil the request.
+ ca, pub, priv := filepath.Join(cd, "rootca.crt"), filepath.Join(cd, "localhost-crt.pem"), filepath.Join(cd, "key.pem")
+ tlsConf, err := tlsutil.NewTLSConfig(
+ tlsutil.WithCAFile(ca),
+ tlsutil.WithCertKeyPairFiles(pub, priv),
+ tlsutil.WithInsecureSkipVerify(insecureSkipTLSverify),
+ )
+
+ if err != nil {
+ t.Fatal(fmt.Errorf("can't create TLS config for client: %w", err))
+ }
+
+ tlsSrv2 := httptest.NewUnstartedServer(http.HandlerFunc(func(rw http.ResponseWriter, _ *http.Request) {
+ rw.Header().Set("Content-Type", "text/plain")
+ rw.Write([]byte(srv2Resp))
+ }))
+
+ tlsSrv2.TLS = tlsConf
+ tlsSrv2.StartTLS()
+ defer tlsSrv2.Close()
+
+ // Server 1 responds with a redirect to Server 2.
+ ca, pub, priv = filepath.Join(cd, "rootca.crt"), filepath.Join(cd, "crt.pem"), filepath.Join(cd, "key.pem")
+ tlsConf, err = tlsutil.NewTLSConfig(
+ tlsutil.WithCAFile(ca),
+ tlsutil.WithCertKeyPairFiles(pub, priv),
+ tlsutil.WithInsecureSkipVerify(insecureSkipTLSverify),
+ )
+
+ if err != nil {
+ t.Fatal(fmt.Errorf("can't create TLS config for client: %w", err))
+ }
+
+ tlsSrv1 := httptest.NewUnstartedServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
+ u, _ := url.ParseRequestURI(tlsSrv2.URL)
+
+ // Make the request using the hostname 'localhost' (to which 'localhost-crt.pem' is issued)
+ // to verify that a successful TLS connection is made even if the client doesn't specify
+ // the hostname (SNI) in `tls.Config.ServerName`. By default the hostname is derived from the
+ // request URL for every request (including redirects). Setting `tls.Config.ServerName` on the
+ // client just overrides the remote endpoint's hostname.
+ // See https://github.com/golang/go/blob/3979fb9/src/net/http/transport.go#L1505-L1513.
+ u.Host = fmt.Sprintf("localhost:%s", u.Port())
+
+ http.Redirect(rw, r, u.String(), http.StatusTemporaryRedirect)
+ }))
+
+ tlsSrv1.TLS = tlsConf
+ tlsSrv1.StartTLS()
+ defer tlsSrv1.Close()
+
+ u, _ := url.ParseRequestURI(tlsSrv1.URL)
+
+ t.Run("Test with TLS", func(t *testing.T) {
+ g, err := NewHTTPGetter(
+ WithURL(u.String()),
+ WithTLSClientConfig(pub, priv, ca),
+ )
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ buf, err := g.Get(u.String())
+ if err != nil {
+ t.Error(err)
+ }
+
+ b, err := io.ReadAll(buf)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(b) != srv2Resp {
+ t.Errorf("expected response from Server2 to be '%s', instead got: %s", srv2Resp, string(b))
+ }
+ })
+
+ t.Run("Test with TLS config being passed along in .Get (see #6635)", func(t *testing.T) {
+ g, err := NewHTTPGetter()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ buf, err := g.Get(u.String(), WithURL(u.String()), WithTLSClientConfig(pub, priv, ca))
+ if err != nil {
+ t.Error(err)
+ }
+
+ b, err := io.ReadAll(buf)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(b) != srv2Resp {
+ t.Errorf("expected response from Server2 to be '%s', instead got: %s", srv2Resp, string(b))
+ }
+ })
+
+ t.Run("Test with only the CA file (see also #6635)", func(t *testing.T) {
+ g, err := NewHTTPGetter()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ buf, err := g.Get(u.String(), WithURL(u.String()), WithTLSClientConfig("", "", ca))
+ if err != nil {
+ t.Error(err)
+ }
+
+ b, err := io.ReadAll(buf)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(b) != srv2Resp {
+ t.Errorf("expected response from Server2 to be '%s', instead got: %s", srv2Resp, string(b))
+ }
+ })
+}
+
func TestDownloadInsecureSkipTLSVerify(t *testing.T) {
ts := httptest.NewTLSServer(http.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) {}))
defer ts.Close()
@@ -450,12 +573,10 @@ func TestHttpClientInsecureSkipVerify(t *testing.T) {
if len(transport.TLSClientConfig.Certificates) <= 0 {
t.Fatal("transport.TLSClientConfig.Certificates is not present")
}
- if transport.TLSClientConfig.ServerName == "" {
- t.Fatal("TLSClientConfig.ServerName is blank")
- }
}
func verifyInsecureSkipVerify(t *testing.T, g *HTTPGetter, caseName string, expectedValue bool) *http.Transport {
+ t.Helper()
returnVal, err := g.httpClient()
if err != nil {
diff --git a/pkg/getter/ocigetter.go b/pkg/getter/ocigetter.go
index 2a611e13a..24fc60c56 100644
--- a/pkg/getter/ocigetter.go
+++ b/pkg/getter/ocigetter.go
@@ -17,6 +17,7 @@ package getter
import (
"bytes"
+ "crypto/tls"
"fmt"
"net"
"net/http"
@@ -32,7 +33,7 @@ import (
// OCIGetter is the default HTTP(/S) backend handler
type OCIGetter struct {
- opts options
+ opts getterOptions
transport *http.Transport
once sync.Once
}
@@ -62,6 +63,12 @@ func (g *OCIGetter) get(href string) (*bytes.Buffer, error) {
if version := g.opts.version; version != "" && !strings.Contains(path.Base(ref), ":") {
ref = fmt.Sprintf("%s:%s", ref, version)
}
+ // Check if this is a plugin request
+ if g.opts.artifactType == "plugin" {
+ return g.getPlugin(client, ref)
+ }
+
+ // Default to chart behavior for backward compatibility
var pullOpts []registry.PullOption
requestingProv := strings.HasSuffix(ref, ".prov")
if requestingProv {
@@ -124,6 +131,9 @@ func (g *OCIGetter) newRegistryClient() (*registry.Client, error) {
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
Proxy: http.ProxyFromEnvironment,
+ // Being nil would cause the tls.Config default to be used
+ // "NewTLSConfig" modifies an empty TLS config, not the default one
+ TLSClientConfig: &tls.Config{},
}
})
@@ -162,3 +172,42 @@ func (g *OCIGetter) newRegistryClient() (*registry.Client, error) {
return client, nil
}
+
+// getPlugin handles plugin-specific OCI pulls
+func (g *OCIGetter) getPlugin(client *registry.Client, ref string) (*bytes.Buffer, error) {
+ // Check if this is a provenance file request
+ requestingProv := strings.HasSuffix(ref, ".prov")
+ if requestingProv {
+ ref = strings.TrimSuffix(ref, ".prov")
+ }
+
+ // Extract plugin name from the reference
+ // e.g., "ghcr.io/user/plugin-name:v1.0.0" -> "plugin-name"
+ parts := strings.Split(ref, "/")
+ if len(parts) < 2 {
+ return nil, fmt.Errorf("invalid OCI reference: %s", ref)
+ }
+ lastPart := parts[len(parts)-1]
+ pluginName := lastPart
+ if idx := strings.LastIndex(lastPart, ":"); idx > 0 {
+ pluginName = lastPart[:idx]
+ }
+ if idx := strings.LastIndex(lastPart, "@"); idx > 0 {
+ pluginName = lastPart[:idx]
+ }
+
+ var pullOpts []registry.PluginPullOption
+ if requestingProv {
+ pullOpts = append(pullOpts, registry.PullPluginOptWithProv(true))
+ }
+
+ result, err := client.PullPlugin(ref, pluginName, pullOpts...)
+ if err != nil {
+ return nil, err
+ }
+
+ if requestingProv {
+ return bytes.NewBuffer(result.Prov.Data), nil
+ }
+ return bytes.NewBuffer(result.PluginData), nil
+}
diff --git a/pkg/getter/ocigetter_test.go b/pkg/getter/ocigetter_test.go
index e3d9278a5..ef196afcc 100644
--- a/pkg/getter/ocigetter_test.go
+++ b/pkg/getter/ocigetter_test.go
@@ -42,7 +42,7 @@ func TestOCIGetter(t *testing.T) {
insecureSkipVerifyTLS := false
plainHTTP := false
- // Test with options
+ // Test with getterOptions
g, err = NewOCIGetter(
WithBasicAuth("I", "Am"),
WithTLSClientConfig(pub, priv, ca),
diff --git a/pkg/getter/plugingetter.go b/pkg/getter/plugingetter.go
index 2b734fdf0..32dbc70c9 100644
--- a/pkg/getter/plugingetter.go
+++ b/pkg/getter/plugingetter.go
@@ -17,94 +17,109 @@ package getter
import (
"bytes"
+ "context"
"fmt"
- "os"
- "os/exec"
- "path/filepath"
- "strings"
- "github.com/pkg/errors"
+ "net/url"
+ "helm.sh/helm/v4/internal/plugin"
+
+ "helm.sh/helm/v4/internal/plugin/schema"
"helm.sh/helm/v4/pkg/cli"
- "helm.sh/helm/v4/pkg/plugin"
)
-// collectPlugins scans for getter plugins.
+// collectGetterPlugins scans for getter plugins.
// This will load plugins according to the cli.
-func collectPlugins(settings *cli.EnvSettings) (Providers, error) {
- plugins, err := plugin.FindPlugins(settings.PluginsDirectory)
+func collectGetterPlugins(settings *cli.EnvSettings) (Providers, error) {
+ d := plugin.Descriptor{
+ Type: "getter/v1",
+ }
+ plgs, err := plugin.FindPlugins([]string{settings.PluginsDirectory}, d)
if err != nil {
return nil, err
}
- var result Providers
- for _, plugin := range plugins {
- for _, downloader := range plugin.Metadata.Downloaders {
- result = append(result, Provider{
- Schemes: downloader.Protocols,
- New: NewPluginGetter(
- downloader.Command,
- settings,
- plugin.Metadata.Name,
- plugin.Dir,
- ),
+ pluginConstructorBuilder := func(plg plugin.Plugin) Constructor {
+ return func(option ...Option) (Getter, error) {
+
+ return &getterPlugin{
+ options: append([]Option{}, option...),
+ plg: plg,
+ }, nil
+ }
+ }
+ results := make([]Provider, 0, len(plgs))
+ for _, plg := range plgs {
+ if c, ok := plg.Metadata().Config.(*schema.ConfigGetterV1); ok {
+ results = append(results, Provider{
+ Schemes: c.Protocols,
+ New: pluginConstructorBuilder(plg),
})
}
}
- return result, nil
+ return results, nil
}
-// pluginGetter is a generic type to invoke custom downloaders,
-// implemented in plugins.
-type pluginGetter struct {
- command string
- settings *cli.EnvSettings
- name string
- base string
- opts options
+func convertOptions(globalOptions, options []Option) schema.GetterOptionsV1 {
+ opts := getterOptions{}
+ for _, opt := range globalOptions {
+ opt(&opts)
+ }
+ for _, opt := range options {
+ opt(&opts)
+ }
+
+ result := schema.GetterOptionsV1{
+ URL: opts.url,
+ CertFile: opts.certFile,
+ KeyFile: opts.keyFile,
+ CAFile: opts.caFile,
+ UNTar: opts.unTar,
+ InsecureSkipVerifyTLS: opts.insecureSkipVerifyTLS,
+ PlainHTTP: opts.plainHTTP,
+ AcceptHeader: opts.acceptHeader,
+ Username: opts.username,
+ Password: opts.password,
+ PassCredentialsAll: opts.passCredentialsAll,
+ UserAgent: opts.userAgent,
+ Version: opts.version,
+ Timeout: opts.timeout,
+ }
+
+ return result
}
-func (p *pluginGetter) setupOptionsEnv(env []string) []string {
- env = append(env, fmt.Sprintf("HELM_PLUGIN_USERNAME=%s", p.opts.username))
- env = append(env, fmt.Sprintf("HELM_PLUGIN_PASSWORD=%s", p.opts.password))
- env = append(env, fmt.Sprintf("HELM_PLUGIN_PASS_CREDENTIALS_ALL=%t", p.opts.passCredentialsAll))
- return env
+type getterPlugin struct {
+ options []Option
+ plg plugin.Plugin
}
-// Get runs downloader plugin command
-func (p *pluginGetter) Get(href string, options ...Option) (*bytes.Buffer, error) {
- for _, opt := range options {
- opt(&p.opts)
- }
- commands := strings.Split(p.command, " ")
- argv := append(commands[1:], p.opts.certFile, p.opts.keyFile, p.opts.caFile, href)
- prog := exec.Command(filepath.Join(p.base, commands[0]), argv...)
- plugin.SetupPluginEnv(p.settings, p.name, p.base)
- prog.Env = p.setupOptionsEnv(os.Environ())
- buf := bytes.NewBuffer(nil)
- prog.Stdout = buf
- prog.Stderr = os.Stderr
- if err := prog.Run(); err != nil {
- if eerr, ok := err.(*exec.ExitError); ok {
- os.Stderr.Write(eerr.Stderr)
- return nil, errors.Errorf("plugin %q exited with error", p.command)
- }
+func (g *getterPlugin) Get(href string, options ...Option) (*bytes.Buffer, error) {
+ opts := convertOptions(g.options, options)
+
+ // TODO optimization: pass this along to Get() instead of re-parsing here
+ u, err := url.Parse(href)
+ if err != nil {
return nil, err
}
- return buf, nil
-}
-// NewPluginGetter constructs a valid plugin getter
-func NewPluginGetter(command string, settings *cli.EnvSettings, name, base string) Constructor {
- return func(options ...Option) (Getter, error) {
- result := &pluginGetter{
- command: command,
- settings: settings,
- name: name,
- base: base,
- }
- for _, opt := range options {
- opt(&result.opts)
- }
- return result, nil
+ input := &plugin.Input{
+ Message: schema.InputMessageGetterV1{
+ Href: href,
+ Options: opts,
+ Protocol: u.Scheme,
+ },
+ // TODO should we pass Stdin, Stdout, and Stderr through Input here to getter plugins?
+ //Stdout: os.Stdout,
+ }
+ output, err := g.plg.Invoke(context.Background(), input)
+ if err != nil {
+ return nil, fmt.Errorf("plugin %q failed to invoke: %w", g.plg, err)
}
+
+ outputMessage, ok := output.Message.(schema.OutputMessageGetterV1)
+ if !ok {
+ return nil, fmt.Errorf("invalid output message type from plugin %q", g.plg.Metadata().Name)
+ }
+
+ return bytes.NewBuffer(outputMessage.Data), nil
}
diff --git a/pkg/getter/plugingetter_test.go b/pkg/getter/plugingetter_test.go
index 310ab9e07..8faaf7329 100644
--- a/pkg/getter/plugingetter_test.go
+++ b/pkg/getter/plugingetter_test.go
@@ -16,9 +16,16 @@ limitations under the License.
package getter
import (
- "runtime"
- "strings"
+ "context"
+
"testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "helm.sh/helm/v4/internal/plugin"
+ "helm.sh/helm/v4/internal/plugin/schema"
"helm.sh/helm/v4/pkg/cli"
)
@@ -27,7 +34,7 @@ func TestCollectPlugins(t *testing.T) {
env := cli.New()
env.PluginsDirectory = pluginDir
- p, err := collectPlugins(env)
+ p, err := collectGetterPlugins(env)
if err != nil {
t.Fatal(err)
}
@@ -49,53 +56,91 @@ func TestCollectPlugins(t *testing.T) {
}
}
-func TestPluginGetter(t *testing.T) {
- if runtime.GOOS == "windows" {
- t.Skip("TODO: refactor this test to work on windows")
+func TestConvertOptions(t *testing.T) {
+ opts := convertOptions(
+ []Option{
+ WithURL("example://foo"),
+ WithAcceptHeader("Accept-Header"),
+ WithBasicAuth("username", "password"),
+ WithPassCredentialsAll(true),
+ WithUserAgent("User-agent"),
+ WithInsecureSkipVerifyTLS(true),
+ WithTLSClientConfig("certFile.pem", "keyFile.pem", "caFile.pem"),
+ WithPlainHTTP(true),
+ WithTimeout(10),
+ WithTagName("1.2.3"),
+ WithUntar(),
+ },
+ []Option{
+ WithTimeout(20),
+ },
+ )
+
+ expected := schema.GetterOptionsV1{
+ URL: "example://foo",
+ CertFile: "certFile.pem",
+ KeyFile: "keyFile.pem",
+ CAFile: "caFile.pem",
+ UNTar: true,
+ Timeout: 20,
+ InsecureSkipVerifyTLS: true,
+ PlainHTTP: true,
+ AcceptHeader: "Accept-Header",
+ Username: "username",
+ Password: "password",
+ PassCredentialsAll: true,
+ UserAgent: "User-agent",
+ Version: "1.2.3",
}
+ assert.Equal(t, expected, opts)
+}
- env := cli.New()
- env.PluginsDirectory = pluginDir
- pg := NewPluginGetter("echo", env, "test", ".")
- g, err := pg()
- if err != nil {
- t.Fatal(err)
- }
+type testPlugin struct {
+ t *testing.T
+ dir string
+}
- data, err := g.Get("test://foo/bar")
- if err != nil {
- t.Fatal(err)
- }
+func (t *testPlugin) Dir() string {
+ return t.dir
+}
- expect := "test://foo/bar"
- got := strings.TrimSpace(data.String())
- if got != expect {
- t.Errorf("Expected %q, got %q", expect, got)
+func (t *testPlugin) Metadata() plugin.Metadata {
+ return plugin.Metadata{
+ Name: "fake-plugin",
+ Type: "cli/v1",
+ APIVersion: "v1",
+ Runtime: "subprocess",
+ Config: &schema.ConfigCLIV1{},
+ RuntimeConfig: &plugin.RuntimeConfigSubprocess{
+ PlatformCommand: []plugin.PlatformCommand{
+ {
+ Command: "echo fake-plugin",
+ },
+ },
+ },
}
}
-func TestPluginSubCommands(t *testing.T) {
- if runtime.GOOS == "windows" {
- t.Skip("TODO: refactor this test to work on windows")
+func (t *testPlugin) Invoke(_ context.Context, _ *plugin.Input) (*plugin.Output, error) {
+ // Simulate a plugin invocation
+ output := &plugin.Output{
+ Message: schema.OutputMessageGetterV1{
+ Data: []byte("fake-plugin output"),
+ },
}
+ return output, nil
+}
- env := cli.New()
- env.PluginsDirectory = pluginDir
+var _ plugin.Plugin = (*testPlugin)(nil)
- pg := NewPluginGetter("echo -n", env, "test", ".")
- g, err := pg()
- if err != nil {
- t.Fatal(err)
+func TestGetterPlugin(t *testing.T) {
+ gp := getterPlugin{
+ options: []Option{},
+ plg: &testPlugin{t: t, dir: "fake/dir"},
}
- data, err := g.Get("test://foo/bar")
- if err != nil {
- t.Fatal(err)
- }
+ buf, err := gp.Get("test://example.com", WithTimeout(5*time.Second))
+ require.NoError(t, err)
- expect := " test://foo/bar"
- got := data.String()
- if got != expect {
- t.Errorf("Expected %q, got %q", expect, got)
- }
+ assert.Equal(t, "fake-plugin output", buf.String())
}
diff --git a/pkg/getter/testdata/plugins/testgetter/get.sh b/pkg/getter/testdata/plugins/testgetter/get.sh
deleted file mode 100755
index cdd992369..000000000
--- a/pkg/getter/testdata/plugins/testgetter/get.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-
-echo ENVIRONMENT
-env
-
-echo ""
-echo ARGUMENTS
-echo $@
diff --git a/pkg/getter/testdata/plugins/testgetter/plugin.yaml b/pkg/getter/testdata/plugins/testgetter/plugin.yaml
index d1b929e3f..ca11b95ea 100644
--- a/pkg/getter/testdata/plugins/testgetter/plugin.yaml
+++ b/pkg/getter/testdata/plugins/testgetter/plugin.yaml
@@ -1,15 +1,13 @@
name: "testgetter"
version: "0.1.0"
-usage: "Fetch a package from a test:// source"
-description: |-
- Print the environment that the plugin was given, then exit.
-
- This registers the test:// protocol.
-
-command: "$HELM_PLUGIN_DIR/get.sh"
-ignoreFlags: true
-downloaders:
-#- command: "$HELM_PLUGIN_DIR/get.sh"
-- command: "echo"
+type: getter/v1
+apiVersion: v1
+runtime: subprocess
+config:
protocols:
- "test"
+runtimeConfig:
+ protocolCommands:
+ - command: "echo"
+ protocols:
+ - "test"
diff --git a/pkg/getter/testdata/plugins/testgetter2/get.sh b/pkg/getter/testdata/plugins/testgetter2/get.sh
deleted file mode 100755
index cdd992369..000000000
--- a/pkg/getter/testdata/plugins/testgetter2/get.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-
-echo ENVIRONMENT
-env
-
-echo ""
-echo ARGUMENTS
-echo $@
diff --git a/pkg/getter/testdata/plugins/testgetter2/plugin.yaml b/pkg/getter/testdata/plugins/testgetter2/plugin.yaml
index f1a527ef9..1c944a7c7 100644
--- a/pkg/getter/testdata/plugins/testgetter2/plugin.yaml
+++ b/pkg/getter/testdata/plugins/testgetter2/plugin.yaml
@@ -1,10 +1,13 @@
name: "testgetter2"
version: "0.1.0"
-usage: "Fetch a different package from a test2:// source"
-description: "Handle test2 scheme"
-command: "$HELM_PLUGIN_DIR/get.sh"
-ignoreFlags: true
-downloaders:
-- command: "echo"
+type: getter/v1
+apiVersion: v1
+runtime: subprocess
+config:
protocols:
- "test2"
+runtimeConfig:
+ protocolCommands:
+ - command: "echo"
+ protocols:
+ - "test2"
diff --git a/pkg/helmpath/home_unix_test.go b/pkg/helmpath/home_unix_test.go
index 6e4189bc9..a64c9bcd6 100644
--- a/pkg/helmpath/home_unix_test.go
+++ b/pkg/helmpath/home_unix_test.go
@@ -16,7 +16,6 @@
package helmpath
import (
- "os"
"runtime"
"testing"
@@ -24,9 +23,9 @@ import (
)
func TestHelmHome(t *testing.T) {
- os.Setenv(xdg.CacheHomeEnvVar, "/cache")
- os.Setenv(xdg.ConfigHomeEnvVar, "/config")
- os.Setenv(xdg.DataHomeEnvVar, "/data")
+ t.Setenv(xdg.CacheHomeEnvVar, "/cache")
+ t.Setenv(xdg.ConfigHomeEnvVar, "/config")
+ t.Setenv(xdg.DataHomeEnvVar, "/data")
isEq := func(t *testing.T, got, expected string) {
t.Helper()
if expected != got {
@@ -40,7 +39,7 @@ func TestHelmHome(t *testing.T) {
isEq(t, DataPath(), "/data/helm")
// test to see if lazy-loading environment variables at runtime works
- os.Setenv(xdg.CacheHomeEnvVar, "/cache2")
+ t.Setenv(xdg.CacheHomeEnvVar, "/cache2")
isEq(t, CachePath(), "/cache2/helm")
}
diff --git a/pkg/helmpath/lazypath_darwin_test.go b/pkg/helmpath/lazypath_darwin_test.go
index e04e20756..e3006d0d5 100644
--- a/pkg/helmpath/lazypath_darwin_test.go
+++ b/pkg/helmpath/lazypath_darwin_test.go
@@ -40,7 +40,7 @@ func TestDataPath(t *testing.T) {
t.Errorf("expected '%s', got '%s'", expected, lazy.dataPath(testFile))
}
- os.Setenv(xdg.DataHomeEnvVar, "/tmp")
+ t.Setenv(xdg.DataHomeEnvVar, "/tmp")
expected = filepath.Join("/tmp", appName, testFile)
@@ -58,7 +58,7 @@ func TestConfigPath(t *testing.T) {
t.Errorf("expected '%s', got '%s'", expected, lazy.configPath(testFile))
}
- os.Setenv(xdg.ConfigHomeEnvVar, "/tmp")
+ t.Setenv(xdg.ConfigHomeEnvVar, "/tmp")
expected = filepath.Join("/tmp", appName, testFile)
@@ -76,7 +76,7 @@ func TestCachePath(t *testing.T) {
t.Errorf("expected '%s', got '%s'", expected, lazy.cachePath(testFile))
}
- os.Setenv(xdg.CacheHomeEnvVar, "/tmp")
+ t.Setenv(xdg.CacheHomeEnvVar, "/tmp")
expected = filepath.Join("/tmp", appName, testFile)
diff --git a/pkg/helmpath/lazypath_unix_test.go b/pkg/helmpath/lazypath_unix_test.go
index 534735d10..4b0f2429b 100644
--- a/pkg/helmpath/lazypath_unix_test.go
+++ b/pkg/helmpath/lazypath_unix_test.go
@@ -16,7 +16,6 @@
package helmpath
import (
- "os"
"path/filepath"
"testing"
@@ -32,15 +31,13 @@ const (
)
func TestDataPath(t *testing.T) {
- os.Unsetenv(xdg.DataHomeEnvVar)
-
expected := filepath.Join(homedir.HomeDir(), ".local", "share", appName, testFile)
if lazy.dataPath(testFile) != expected {
t.Errorf("expected '%s', got '%s'", expected, lazy.dataPath(testFile))
}
- os.Setenv(xdg.DataHomeEnvVar, "/tmp")
+ t.Setenv(xdg.DataHomeEnvVar, "/tmp")
expected = filepath.Join("/tmp", appName, testFile)
@@ -50,15 +47,13 @@ func TestDataPath(t *testing.T) {
}
func TestConfigPath(t *testing.T) {
- os.Unsetenv(xdg.ConfigHomeEnvVar)
-
expected := filepath.Join(homedir.HomeDir(), ".config", appName, testFile)
if lazy.configPath(testFile) != expected {
t.Errorf("expected '%s', got '%s'", expected, lazy.configPath(testFile))
}
- os.Setenv(xdg.ConfigHomeEnvVar, "/tmp")
+ t.Setenv(xdg.ConfigHomeEnvVar, "/tmp")
expected = filepath.Join("/tmp", appName, testFile)
@@ -68,15 +63,13 @@ func TestConfigPath(t *testing.T) {
}
func TestCachePath(t *testing.T) {
- os.Unsetenv(xdg.CacheHomeEnvVar)
-
expected := filepath.Join(homedir.HomeDir(), ".cache", appName, testFile)
if lazy.cachePath(testFile) != expected {
t.Errorf("expected '%s', got '%s'", expected, lazy.cachePath(testFile))
}
- os.Setenv(xdg.CacheHomeEnvVar, "/tmp")
+ t.Setenv(xdg.CacheHomeEnvVar, "/tmp")
expected = filepath.Join("/tmp", appName, testFile)
diff --git a/pkg/ignore/rules.go b/pkg/ignore/rules.go
index 88de407ad..3511c2d40 100644
--- a/pkg/ignore/rules.go
+++ b/pkg/ignore/rules.go
@@ -19,13 +19,12 @@ package ignore
import (
"bufio"
"bytes"
+ "errors"
"io"
- "log"
+ "log/slog"
"os"
"path/filepath"
"strings"
-
- "github.com/pkg/errors"
)
// HelmIgnore default name of an ignorefile.
@@ -102,7 +101,7 @@ func (r *Rules) Ignore(path string, fi os.FileInfo) bool {
}
for _, p := range r.patterns {
if p.match == nil {
- log.Printf("ignore: no matcher supplied for %q", p.raw)
+ slog.Info("this will be ignored no matcher supplied", "patterns", p.raw)
return false
}
@@ -171,13 +170,13 @@ func (r *Rules) parseRule(rule string) error {
rule = strings.TrimSuffix(rule, "/")
}
- if strings.HasPrefix(rule, "/") {
+ if after, ok := strings.CutPrefix(rule, "/"); ok {
// Require path matches the root path.
p.match = func(n string, _ os.FileInfo) bool {
- rule = strings.TrimPrefix(rule, "/")
+ rule = after
ok, err := filepath.Match(rule, n)
if err != nil {
- log.Printf("Failed to compile %q: %s", rule, err)
+ slog.Error("failed to compile", "rule", rule, slog.Any("error", err))
return false
}
return ok
@@ -187,7 +186,7 @@ func (r *Rules) parseRule(rule string) error {
p.match = func(n string, _ os.FileInfo) bool {
ok, err := filepath.Match(rule, n)
if err != nil {
- log.Printf("Failed to compile %q: %s", rule, err)
+ slog.Error("failed to compile", "rule", rule, slog.Any("error", err))
return false
}
return ok
@@ -199,7 +198,7 @@ func (r *Rules) parseRule(rule string) error {
n = filepath.Base(n)
ok, err := filepath.Match(rule, n)
if err != nil {
- log.Printf("Failed to compile %q: %s", rule, err)
+ slog.Error("failed to compile", "rule", rule, slog.Any("error", err))
return false
}
return ok
diff --git a/pkg/kube/client.go b/pkg/kube/client.go
index fd111c647..26ba7abfc 100644
--- a/pkg/kube/client.go
+++ b/pkg/kube/client.go
@@ -20,41 +20,41 @@ import (
"bytes"
"context"
"encoding/json"
+ "errors"
"fmt"
"io"
+ "log/slog"
+ "net/http"
"os"
"path/filepath"
"reflect"
"strings"
"sync"
- "time"
- jsonpatch "github.com/evanphx/json-patch"
- "github.com/pkg/errors"
- batch "k8s.io/api/batch/v1"
+ jsonpatch "github.com/evanphx/json-patch/v5"
v1 "k8s.io/api/core/v1"
apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client/apiutil"
- multierror "github.com/hashicorp/go-multierror"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1"
- "k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
- "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/jsonmergepatch"
+ "k8s.io/apimachinery/pkg/util/mergepatch"
+ "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/strategicpatch"
- "k8s.io/apimachinery/pkg/watch"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/cli-runtime/pkg/resource"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
- cachetools "k8s.io/client-go/tools/cache"
- watchtools "k8s.io/client-go/tools/watch"
+ "k8s.io/client-go/util/csaupgrade"
"k8s.io/client-go/util/retry"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
)
@@ -79,13 +79,29 @@ type Client struct {
// needs. The smaller surface area of the interface means there is a lower
// chance of it changing.
Factory Factory
- Log func(string, ...interface{})
// Namespace allows to bypass the kubeconfig file for the choice of the namespace
Namespace string
+ Waiter
kubeClient kubernetes.Interface
}
+type WaitStrategy string
+
+const (
+ StatusWatcherStrategy WaitStrategy = "watcher"
+ LegacyStrategy WaitStrategy = "legacy"
+ HookOnlyStrategy WaitStrategy = "hookOnly"
+)
+
+type FieldValidationDirective string
+
+const (
+ FieldValidationDirectiveIgnore FieldValidationDirective = "Ignore"
+ FieldValidationDirectiveWarn FieldValidationDirective = "Warn"
+ FieldValidationDirectiveStrict FieldValidationDirective = "Strict"
+)
+
func init() {
// Add CRDs to the scheme. They are missing by default.
if err := apiextv1.AddToScheme(scheme.Scheme); err != nil {
@@ -97,19 +113,71 @@ func init() {
}
}
+func (c *Client) newStatusWatcher() (*statusWaiter, error) {
+ cfg, err := c.Factory.ToRESTConfig()
+ if err != nil {
+ return nil, err
+ }
+ dynamicClient, err := c.Factory.DynamicClient()
+ if err != nil {
+ return nil, err
+ }
+ httpClient, err := rest.HTTPClientFor(cfg)
+ if err != nil {
+ return nil, err
+ }
+ restMapper, err := apiutil.NewDynamicRESTMapper(cfg, httpClient)
+ if err != nil {
+ return nil, err
+ }
+ return &statusWaiter{
+ restMapper: restMapper,
+ client: dynamicClient,
+ }, nil
+}
+
+func (c *Client) GetWaiter(strategy WaitStrategy) (Waiter, error) {
+ switch strategy {
+ case LegacyStrategy:
+ kc, err := c.Factory.KubernetesClientSet()
+ if err != nil {
+ return nil, err
+ }
+ return &legacyWaiter{kubeClient: kc}, nil
+ case StatusWatcherStrategy:
+ return c.newStatusWatcher()
+ case HookOnlyStrategy:
+ sw, err := c.newStatusWatcher()
+ if err != nil {
+ return nil, err
+ }
+ return &hookOnlyWaiter{sw: sw}, nil
+ default:
+ return nil, errors.New("unknown wait strategy")
+ }
+}
+
+func (c *Client) SetWaiter(ws WaitStrategy) error {
+ var err error
+ c.Waiter, err = c.GetWaiter(ws)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
// New creates a new Client.
func New(getter genericclioptions.RESTClientGetter) *Client {
if getter == nil {
getter = genericclioptions.NewConfigFlags(true)
}
- return &Client{
- Factory: cmdutil.NewFactory(getter),
- Log: nopLogger,
+ factory := cmdutil.NewFactory(getter)
+ c := &Client{
+ Factory: factory,
}
+ return c
}
-var nopLogger = func(_ string, _ ...interface{}) {}
-
// getKubeClient get or create a new KubernetesClientSet
func (c *Client) getKubeClient() (kubernetes.Interface, error) {
var err error
@@ -126,21 +194,113 @@ func (c *Client) IsReachable() error {
if err == genericclioptions.ErrEmptyConfig {
// re-replace kubernetes ErrEmptyConfig error with a friendly error
// moar workarounds for Kubernetes API breaking.
- return errors.New("Kubernetes cluster unreachable")
+ return errors.New("kubernetes cluster unreachable")
}
if err != nil {
- return errors.Wrap(err, "Kubernetes cluster unreachable")
+ return fmt.Errorf("kubernetes cluster unreachable: %w", err)
}
if _, err := client.Discovery().ServerVersion(); err != nil {
- return errors.Wrap(err, "Kubernetes cluster unreachable")
+ return fmt.Errorf("kubernetes cluster unreachable: %w", err)
}
return nil
}
+type clientCreateOptions struct {
+ serverSideApply bool
+ forceConflicts bool
+ dryRun bool
+ fieldValidationDirective FieldValidationDirective
+}
+
+type ClientCreateOption func(*clientCreateOptions) error
+
+// ClientCreateOptionServerSideApply enables performing object apply server-side
+// see: https://kubernetes.io/docs/reference/using-api/server-side-apply/
+//
+// `forceConflicts` forces conflicts to be resolved (may be when serverSideApply enabled only)
+// see: https://kubernetes.io/docs/reference/using-api/server-side-apply/#conflicts
+func ClientCreateOptionServerSideApply(serverSideApply, forceConflicts bool) ClientCreateOption {
+ return func(o *clientCreateOptions) error {
+ if !serverSideApply && forceConflicts {
+ return fmt.Errorf("forceConflicts enabled when serverSideApply disabled")
+ }
+
+ o.serverSideApply = serverSideApply
+ o.forceConflicts = forceConflicts
+
+ return nil
+ }
+}
+
+// ClientCreateOptionDryRun requests the server to perform non-mutating operations only
+func ClientCreateOptionDryRun(dryRun bool) ClientCreateOption {
+ return func(o *clientCreateOptions) error {
+ o.dryRun = dryRun
+
+ return nil
+ }
+}
+
+// ClientCreateOptionFieldValidationDirective specifies show API operations validate object's schema
+// - For client-side apply: this is ignored
+// - For server-side apply: the directive is sent to the server to perform the validation
+//
+// Defaults to `FieldValidationDirectiveStrict`
+func ClientCreateOptionFieldValidationDirective(fieldValidationDirective FieldValidationDirective) ClientCreateOption {
+ return func(o *clientCreateOptions) error {
+ o.fieldValidationDirective = fieldValidationDirective
+
+ return nil
+ }
+}
+
// Create creates Kubernetes resources specified in the resource list.
-func (c *Client) Create(resources ResourceList) (*Result, error) {
- c.Log("creating %d resource(s)", len(resources))
- if err := perform(resources, createResource); err != nil {
+func (c *Client) Create(resources ResourceList, options ...ClientCreateOption) (*Result, error) {
+ slog.Debug("creating resource(s)", "resources", len(resources))
+
+ createOptions := clientCreateOptions{
+ serverSideApply: true, // Default to server-side apply
+ fieldValidationDirective: FieldValidationDirectiveStrict,
+ }
+
+ errs := make([]error, 0, len(options))
+ for _, o := range options {
+ errs = append(errs, o(&createOptions))
+ }
+ if err := errors.Join(errs...); err != nil {
+ return nil, fmt.Errorf("invalid client create option(s): %w", err)
+ }
+
+ if createOptions.forceConflicts && !createOptions.serverSideApply {
+ return nil, fmt.Errorf("invalid operation: force conflicts can only be used with server-side apply")
+ }
+
+ makeCreateApplyFunc := func() func(target *resource.Info) error {
+ if createOptions.serverSideApply {
+ slog.Debug("using server-side apply for resource creation", slog.Bool("forceConflicts", createOptions.forceConflicts), slog.Bool("dryRun", createOptions.dryRun), slog.String("fieldValidationDirective", string(createOptions.fieldValidationDirective)))
+ return func(target *resource.Info) error {
+ err := patchResourceServerSide(target, createOptions.dryRun, createOptions.forceConflicts, createOptions.fieldValidationDirective)
+
+ logger := slog.With(
+ slog.String("namespace", target.Namespace),
+ slog.String("name", target.Name),
+ slog.String("gvk", target.Mapping.GroupVersionKind.String()))
+ if err != nil {
+ logger.Debug("Error patching resource", slog.Any("error", err))
+ return err
+ }
+
+ logger.Debug("Patched resource")
+
+ return nil
+ }
+ }
+
+ slog.Debug("using client-side apply for resource creation")
+ return createResource
+ }
+
+ if err := perform(resources, makeCreateApplyFunc()); err != nil {
return nil, err
}
return &Result{Created: resources}, nil
@@ -191,7 +351,7 @@ func (c *Client) Get(resources ResourceList, related bool) (map[string][]runtime
objs, err = c.getSelectRelationPod(info, objs, isTable, &podSelectors)
if err != nil {
- c.Log("Warning: get the relation pod is failed, err:%s", err.Error())
+ slog.Warn("get the relation pod is failed", slog.Any("error", err))
}
}
}
@@ -209,7 +369,7 @@ func (c *Client) getSelectRelationPod(info *resource.Info, objs map[string][]run
if info == nil {
return objs, nil
}
- c.Log("get relation pod of object: %s/%s/%s", info.Namespace, info.Mapping.GroupVersionKind.Kind, info.Name)
+ slog.Debug("get relation pod of object", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind)
selector, ok, _ := getSelectorFromObject(info.Object)
if !ok {
return objs, nil
@@ -281,45 +441,6 @@ func getResource(info *resource.Info) (runtime.Object, error) {
return obj, nil
}
-// Wait waits up to the given timeout for the specified resources to be ready.
-func (c *Client) Wait(resources ResourceList, timeout time.Duration) error {
- cs, err := c.getKubeClient()
- if err != nil {
- return err
- }
- checker := NewReadyChecker(cs, c.Log, PausedAsReady(true))
- w := waiter{
- c: checker,
- log: c.Log,
- timeout: timeout,
- }
- return w.waitForResources(resources)
-}
-
-// WaitWithJobs wait up to the given timeout for the specified resources to be ready, including jobs.
-func (c *Client) WaitWithJobs(resources ResourceList, timeout time.Duration) error {
- cs, err := c.getKubeClient()
- if err != nil {
- return err
- }
- checker := NewReadyChecker(cs, c.Log, PausedAsReady(true), CheckJobs(true))
- w := waiter{
- c: checker,
- log: c.Log,
- timeout: timeout,
- }
- return w.waitForResources(resources)
-}
-
-// WaitForDelete wait up to the given timeout for the specified resources to be deleted.
-func (c *Client) WaitForDelete(resources ResourceList, timeout time.Duration) error {
- w := waiter{
- log: c.Log,
- timeout: timeout,
- }
- return w.waitForDeletedResources(resources)
-}
-
func (c *Client) namespace() string {
if c.Namespace != "" {
return c.Namespace
@@ -330,103 +451,98 @@ func (c *Client) namespace() string {
return v1.NamespaceDefault
}
-// newBuilder returns a new resource builder for structured api objects.
-func (c *Client) newBuilder() *resource.Builder {
- return c.Factory.NewBuilder().
- ContinueOnError().
- NamespaceParam(c.namespace()).
- DefaultNamespace().
- Flatten()
-}
-
-// Build validates for Kubernetes objects and returns unstructured infos.
-func (c *Client) Build(reader io.Reader, validate bool) (ResourceList, error) {
- validationDirective := metav1.FieldValidationIgnore
+func determineFieldValidationDirective(validate bool) FieldValidationDirective {
if validate {
- validationDirective = metav1.FieldValidationStrict
+ return FieldValidationDirectiveStrict
}
- schema, err := c.Factory.Validator(validationDirective)
+ return FieldValidationDirectiveIgnore
+}
+
+func buildResourceList(f Factory, namespace string, validationDirective FieldValidationDirective, reader io.Reader, transformRequest resource.RequestTransform) (ResourceList, error) {
+
+ schema, err := f.Validator(string(validationDirective))
if err != nil {
return nil, err
}
- result, err := c.newBuilder().
+
+ builder := f.NewBuilder().
+ ContinueOnError().
+ NamespaceParam(namespace).
+ DefaultNamespace().
+ Flatten().
Unstructured().
Schema(schema).
- Stream(reader, "").
- Do().Infos()
+ Stream(reader, "")
+ if transformRequest != nil {
+ builder.TransformRequests(transformRequest)
+ }
+ result, err := builder.Do().Infos()
return result, scrubValidationError(err)
}
+// Build validates for Kubernetes objects and returns unstructured infos.
+func (c *Client) Build(reader io.Reader, validate bool) (ResourceList, error) {
+ return buildResourceList(
+ c.Factory,
+ c.namespace(),
+ determineFieldValidationDirective(validate),
+ reader,
+ nil)
+}
+
// BuildTable validates for Kubernetes objects and returns unstructured infos.
// The returned kind is a Table.
func (c *Client) BuildTable(reader io.Reader, validate bool) (ResourceList, error) {
- validationDirective := metav1.FieldValidationIgnore
- if validate {
- validationDirective = metav1.FieldValidationStrict
- }
-
- schema, err := c.Factory.Validator(validationDirective)
- if err != nil {
- return nil, err
- }
- result, err := c.newBuilder().
- Unstructured().
- Schema(schema).
- Stream(reader, "").
- TransformRequests(transformRequests).
- Do().Infos()
- return result, scrubValidationError(err)
+ return buildResourceList(
+ c.Factory,
+ c.namespace(),
+ determineFieldValidationDirective(validate),
+ reader,
+ transformRequests)
}
-// Update takes the current list of objects and target list of objects and
-// creates resources that don't already exist, updates resources that have been
-// modified in the target configuration, and deletes resources from the current
-// configuration that are not present in the target configuration. If an error
-// occurs, a Result will still be returned with the error, containing all
-// resource updates, creations, and deletions that were attempted. These can be
-// used for cleanup or other logging purposes.
-func (c *Client) Update(original, target ResourceList, force bool) (*Result, error) {
- updateErrors := []string{}
+func (c *Client) update(originals, targets ResourceList, updateApplyFunc UpdateApplyFunc) (*Result, error) {
+ updateErrors := []error{}
res := &Result{}
- c.Log("checking %d resources for changes", len(target))
- err := target.Visit(func(info *resource.Info, err error) error {
+ slog.Debug("checking resources for changes", "resources", len(targets))
+ err := targets.Visit(func(target *resource.Info, err error) error {
if err != nil {
return err
}
- helper := resource.NewHelper(info.Client, info.Mapping).WithFieldManager(getManagedFieldsManager())
- if _, err := helper.Get(info.Namespace, info.Name); err != nil {
+ helper := resource.NewHelper(target.Client, target.Mapping).WithFieldManager(getManagedFieldsManager())
+ if _, err := helper.Get(target.Namespace, target.Name); err != nil {
if !apierrors.IsNotFound(err) {
- return errors.Wrap(err, "could not get information about the resource")
+ return fmt.Errorf("could not get information about the resource: %w", err)
}
// Append the created resource to the results, even if something fails
- res.Created = append(res.Created, info)
+ res.Created = append(res.Created, target)
// Since the resource does not exist, create it.
- if err := createResource(info); err != nil {
- return errors.Wrap(err, "failed to create resource")
+ if err := createResource(target); err != nil {
+ return fmt.Errorf("failed to create resource: %w", err)
}
- kind := info.Mapping.GroupVersionKind.Kind
- c.Log("Created a new %s called %q in %s\n", kind, info.Name, info.Namespace)
+ kind := target.Mapping.GroupVersionKind.Kind
+ slog.Debug("created a new resource", "namespace", target.Namespace, "name", target.Name, "kind", kind)
return nil
}
- originalInfo := original.Get(info)
- if originalInfo == nil {
- kind := info.Mapping.GroupVersionKind.Kind
- return errors.Errorf("no %s with the name %q found", kind, info.Name)
+ original := originals.Get(target)
+ if original == nil {
+ kind := target.Mapping.GroupVersionKind.Kind
+ return fmt.Errorf("original object %s with the name %q not found", kind, target.Name)
}
- if err := updateResource(c, info, originalInfo.Object, force); err != nil {
- c.Log("error updating the resource %q:\n\t %v", info.Name, err)
- updateErrors = append(updateErrors, err.Error())
+ if err := updateApplyFunc(original, target); err != nil {
+ updateErrors = append(updateErrors, err)
}
+
// Because we check for errors later, append the info regardless
- res.Updated = append(res.Updated, info)
+ res.Updated = append(res.Updated, target)
return nil
})
@@ -435,26 +551,26 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err
case err != nil:
return res, err
case len(updateErrors) != 0:
- return res, errors.New(strings.Join(updateErrors, " && "))
+ return res, joinErrors(updateErrors, " && ")
}
- for _, info := range original.Difference(target) {
- c.Log("Deleting %s %q in namespace %s...", info.Mapping.GroupVersionKind.Kind, info.Name, info.Namespace)
+ for _, info := range originals.Difference(targets) {
+ slog.Debug("deleting resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind)
if err := info.Get(); err != nil {
- c.Log("Unable to get obj %q, err: %s", info.Name, err)
+ slog.Debug("unable to get object", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err))
continue
}
annotations, err := metadataAccessor.Annotations(info.Object)
if err != nil {
- c.Log("Unable to get annotations on %q, err: %s", info.Name, err)
+ slog.Debug("unable to get annotations", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err))
}
if annotations != nil && annotations[ResourcePolicyAnno] == KeepPolicy {
- c.Log("Skipping delete of %q due to annotation [%s=%s]", info.Name, ResourcePolicyAnno, KeepPolicy)
+ slog.Debug("skipping delete due to annotation", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, "annotation", ResourcePolicyAnno, "value", KeepPolicy)
continue
}
if err := deleteResource(info, metav1.DeletePropagationBackground); err != nil {
- c.Log("Failed to delete %q, err: %s", info.ObjectName(), err)
+ slog.Debug("failed to delete resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err))
continue
}
res.Deleted = append(res.Deleted, info)
@@ -462,12 +578,203 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err
return res, nil
}
+type clientUpdateOptions struct {
+ threeWayMergeForUnstructured bool
+ serverSideApply bool
+ forceReplace bool
+ forceConflicts bool
+ dryRun bool
+ fieldValidationDirective FieldValidationDirective
+ upgradeClientSideFieldManager bool
+}
+
+type ClientUpdateOption func(*clientUpdateOptions) error
+
+// ClientUpdateOptionThreeWayMergeForUnstructured enables performing three-way merge for unstructured objects
+// Must not be enabled when ClientUpdateOptionServerSideApply is enabled
+func ClientUpdateOptionThreeWayMergeForUnstructured(threeWayMergeForUnstructured bool) ClientUpdateOption {
+ return func(o *clientUpdateOptions) error {
+ o.threeWayMergeForUnstructured = threeWayMergeForUnstructured
+
+ return nil
+ }
+}
+
+// ClientUpdateOptionServerSideApply enables performing object apply server-side (default)
+// see: https://kubernetes.io/docs/reference/using-api/server-side-apply/
+// Must not be enabled when ClientUpdateOptionThreeWayMerge is enabled
+//
+// `forceConflicts` forces conflicts to be resolved (may be enabled when serverSideApply enabled only)
+// see: https://kubernetes.io/docs/reference/using-api/server-side-apply/#conflicts
+func ClientUpdateOptionServerSideApply(serverSideApply, forceConflicts bool) ClientUpdateOption {
+ return func(o *clientUpdateOptions) error {
+ if !serverSideApply && forceConflicts {
+ return fmt.Errorf("forceConflicts enabled when serverSideApply disabled")
+ }
+
+ o.serverSideApply = serverSideApply
+ o.forceConflicts = forceConflicts
+
+ return nil
+ }
+}
+
+// ClientUpdateOptionForceReplace forces objects to be replaced rather than updated via patch
+// Must not be enabled when ClientUpdateOptionForceConflicts is enabled
+func ClientUpdateOptionForceReplace(forceReplace bool) ClientUpdateOption {
+ return func(o *clientUpdateOptions) error {
+ o.forceReplace = forceReplace
+
+ return nil
+ }
+}
+
+// ClientUpdateOptionDryRun requests the server to perform non-mutating operations only
+func ClientUpdateOptionDryRun(dryRun bool) ClientUpdateOption {
+ return func(o *clientUpdateOptions) error {
+ o.dryRun = dryRun
+
+ return nil
+ }
+}
+
+// ClientUpdateOptionFieldValidationDirective specifies show API operations validate object's schema
+// - For client-side apply: this is ignored
+// - For server-side apply: the directive is sent to the server to perform the validation
+//
+// Defaults to `FieldValidationDirectiveStrict`
+func ClientUpdateOptionFieldValidationDirective(fieldValidationDirective FieldValidationDirective) ClientUpdateOption {
+ return func(o *clientUpdateOptions) error {
+ o.fieldValidationDirective = fieldValidationDirective
+
+ return nil
+ }
+}
+
+// ClientUpdateOptionUpgradeClientSideFieldManager specifies that resources client-side field manager should be upgraded to server-side apply
+// (before applying the object server-side)
+// This is required when upgrading a chart from client-side to server-side apply, otherwise the client-side field management remains. Conflicting with server-side applied updates.
+//
+// Note:
+// if this option is specified, but the object is not managed by client-side field manager, it will be a no-op. However, the cost of fetching the objects will be incurred.
+//
+// see:
+// - https://github.com/kubernetes/kubernetes/pull/112905
+// - `UpgradeManagedFields` / https://github.com/kubernetes/kubernetes/blob/f47e9696d7237f1011d23c9b55f6947e60526179/staging/src/k8s.io/client-go/util/csaupgrade/upgrade.go#L81
+func ClientUpdateOptionUpgradeClientSideFieldManager(upgradeClientSideFieldManager bool) ClientUpdateOption {
+ return func(o *clientUpdateOptions) error {
+ o.upgradeClientSideFieldManager = upgradeClientSideFieldManager
+
+ return nil
+ }
+}
+
+type UpdateApplyFunc func(original, target *resource.Info) error
+
+// Update takes the current list of objects and target list of objects and
+// creates resources that don't already exist, updates resources that have been
+// modified in the target configuration, and deletes resources from the current
+// configuration that are not present in the target configuration. If an error
+// occurs, a Result will still be returned with the error, containing all
+// resource updates, creations, and deletions that were attempted. These can be
+// used for cleanup or other logging purposes.
+//
+// The default is to use server-side apply, equivalent to: `ClientUpdateOptionServerSideApply(true)`
+func (c *Client) Update(originals, targets ResourceList, options ...ClientUpdateOption) (*Result, error) {
+ updateOptions := clientUpdateOptions{
+ serverSideApply: true, // Default to server-side apply
+ fieldValidationDirective: FieldValidationDirectiveStrict,
+ }
+
+ errs := make([]error, 0, len(options))
+ for _, o := range options {
+ errs = append(errs, o(&updateOptions))
+ }
+ if err := errors.Join(errs...); err != nil {
+ return nil, fmt.Errorf("invalid client update option(s): %w", err)
+ }
+
+ if updateOptions.threeWayMergeForUnstructured && updateOptions.serverSideApply {
+ return nil, fmt.Errorf("invalid operation: cannot use three-way merge for unstructured and server-side apply together")
+ }
+
+ if updateOptions.forceConflicts && updateOptions.forceReplace {
+ return nil, fmt.Errorf("invalid operation: cannot use force conflicts and force replace together")
+ }
+
+ if updateOptions.serverSideApply && updateOptions.forceReplace {
+ return nil, fmt.Errorf("invalid operation: cannot use server-side apply and force replace together")
+ }
+
+ makeUpdateApplyFunc := func() UpdateApplyFunc {
+ if updateOptions.forceReplace {
+ slog.Debug(
+ "using resource replace update strategy",
+ slog.String("fieldValidationDirective", string(updateOptions.fieldValidationDirective)))
+ return func(original, target *resource.Info) error {
+ if err := replaceResource(target, updateOptions.fieldValidationDirective); err != nil {
+ slog.Debug("error replacing the resource", "namespace", target.Namespace, "name", target.Name, "kind", target.Mapping.GroupVersionKind.Kind, slog.Any("error", err))
+ return err
+ }
+
+ originalObject := original.Object
+ kind := target.Mapping.GroupVersionKind.Kind
+ slog.Debug("replace succeeded", "name", original.Name, "initialKind", originalObject.GetObjectKind().GroupVersionKind().Kind, "kind", kind)
+
+ return nil
+ }
+ } else if updateOptions.serverSideApply {
+ slog.Debug(
+ "using server-side apply for resource update",
+ slog.Bool("forceConflicts", updateOptions.forceConflicts),
+ slog.Bool("dryRun", updateOptions.dryRun),
+ slog.String("fieldValidationDirective", string(updateOptions.fieldValidationDirective)),
+ slog.Bool("upgradeClientSideFieldManager", updateOptions.upgradeClientSideFieldManager))
+ return func(original, target *resource.Info) error {
+
+ logger := slog.With(
+ slog.String("namespace", target.Namespace),
+ slog.String("name", target.Name),
+ slog.String("gvk", target.Mapping.GroupVersionKind.String()))
+
+ if updateOptions.upgradeClientSideFieldManager {
+ patched, err := upgradeClientSideFieldManager(original, updateOptions.dryRun, updateOptions.fieldValidationDirective)
+ if err != nil {
+ slog.Debug("Error patching resource to replace CSA field management", slog.Any("error", err))
+ return err
+ }
+
+ if patched {
+ logger.Debug("Upgraded object client-side field management with server-side apply field management")
+ }
+ }
+
+ if err := patchResourceServerSide(target, updateOptions.dryRun, updateOptions.forceConflicts, updateOptions.fieldValidationDirective); err != nil {
+ logger.Debug("Error patching resource", slog.Any("error", err))
+ return err
+ }
+
+ logger.Debug("Patched resource")
+
+ return nil
+ }
+ }
+
+ slog.Debug("using client-side apply for resource update", slog.Bool("threeWayMergeForUnstructured", updateOptions.threeWayMergeForUnstructured))
+ return func(original, target *resource.Info) error {
+ return patchResourceClientSide(original.Object, target, updateOptions.threeWayMergeForUnstructured)
+ }
+ }
+
+ return c.update(originals, targets, makeUpdateApplyFunc())
+}
+
// Delete deletes Kubernetes resources specified in the resources list with
// background cascade deletion. It will attempt to delete all resources even
// if one or more fail and collect any errors. All successfully deleted items
// will be returned in the `Deleted` ResourceList that is part of the result.
func (c *Client) Delete(resources ResourceList) (*Result, []error) {
- return rdelete(c, resources, metav1.DeletePropagationBackground)
+ return deleteResources(resources, metav1.DeletePropagationBackground)
}
// Delete deletes Kubernetes resources specified in the resources list with
@@ -475,23 +782,23 @@ func (c *Client) Delete(resources ResourceList) (*Result, []error) {
// if one or more fail and collect any errors. All successfully deleted items
// will be returned in the `Deleted` ResourceList that is part of the result.
func (c *Client) DeleteWithPropagationPolicy(resources ResourceList, policy metav1.DeletionPropagation) (*Result, []error) {
- return rdelete(c, resources, policy)
+ return deleteResources(resources, policy)
}
-func rdelete(c *Client, resources ResourceList, propagation metav1.DeletionPropagation) (*Result, []error) {
+func deleteResources(resources ResourceList, propagation metav1.DeletionPropagation) (*Result, []error) {
var errs []error
res := &Result{}
mtx := sync.Mutex{}
- err := perform(resources, func(info *resource.Info) error {
- c.Log("Starting delete for %q %s", info.Name, info.Mapping.GroupVersionKind.Kind)
- err := deleteResource(info, propagation)
+ err := perform(resources, func(target *resource.Info) error {
+ slog.Debug("starting delete resource", "namespace", target.Namespace, "name", target.Name, "kind", target.Mapping.GroupVersionKind.Kind)
+ err := deleteResource(target, propagation)
if err == nil || apierrors.IsNotFound(err) {
if err != nil {
- c.Log("Ignoring delete failure for %q %s: %v", info.Name, info.Mapping.GroupVersionKind, err)
+ slog.Debug("ignoring delete failure", "namespace", target.Namespace, "name", target.Name, "kind", target.Mapping.GroupVersionKind.Kind, slog.Any("error", err))
}
mtx.Lock()
defer mtx.Unlock()
- res.Deleted = append(res.Deleted, info)
+ res.Deleted = append(res.Deleted, target)
return nil
}
mtx.Lock()
@@ -512,30 +819,35 @@ func rdelete(c *Client, resources ResourceList, propagation metav1.DeletionPropa
return res, nil
}
-func (c *Client) watchTimeout(t time.Duration) func(*resource.Info) error {
- return func(info *resource.Info) error {
- return c.watchUntilReady(t, info)
+// https://github.com/kubernetes/kubectl/blob/197123726db24c61aa0f78d1f0ba6e91a2ec2f35/pkg/cmd/apply/apply.go#L439
+func isIncompatibleServerError(err error) bool {
+ // 415: Unsupported media type means we're talking to a server which doesn't
+ // support server-side apply.
+ if _, ok := err.(*apierrors.StatusError); !ok {
+ // Non-StatusError means the error isn't because the server is incompatible.
+ return false
}
+ return err.(*apierrors.StatusError).Status().Code == http.StatusUnsupportedMediaType
}
-// WatchUntilReady watches the resources given and waits until it is ready.
-//
-// This method is mainly for hook implementations. It watches for a resource to
-// hit a particular milestone. The milestone depends on the Kind.
-//
-// For most kinds, it checks to see if the resource is marked as Added or Modified
-// by the Kubernetes event stream. For some kinds, it does more:
-//
-// - Jobs: A job is marked "Ready" when it has successfully completed. This is
-// ascertained by watching the Status fields in a job's output.
-// - Pods: A pod is marked "Ready" when it has successfully completed. This is
-// ascertained by watching the status.phase field in a pod's output.
-//
-// Handling for other kinds will be added as necessary.
-func (c *Client) WatchUntilReady(resources ResourceList, timeout time.Duration) error {
- // For jobs, there's also the option to do poll c.Jobs(namespace).Get():
- // https://github.com/adamreese/kubernetes/blob/master/test/e2e/job.go#L291-L300
- return perform(resources, c.watchTimeout(timeout))
+// getManagedFieldsManager returns the manager string. If one was set it will be returned.
+// Otherwise, one is calculated based on the name of the binary.
+func getManagedFieldsManager() string {
+
+ // When a manager is explicitly set use it
+ if ManagedFieldsManager != "" {
+ return ManagedFieldsManager
+ }
+
+ // When no manager is set and no calling application can be found it is unknown
+ if len(os.Args[0]) == 0 {
+ return "unknown"
+ }
+
+ // When there is an application that can be determined and no set manager
+ // use the base name. This is one of the ways Kubernetes libs handle figuring
+ // names out.
+ return filepath.Base(os.Args[0])
}
func perform(infos ResourceList, fn func(*resource.Info) error) error {
@@ -551,58 +863,46 @@ func perform(infos ResourceList, fn func(*resource.Info) error) error {
for range infos {
err := <-errs
if err != nil {
- result = multierror.Append(result, err)
+ result = errors.Join(result, err)
}
}
return result
}
-// getManagedFieldsManager returns the manager string. If one was set it will be returned.
-// Otherwise, one is calculated based on the name of the binary.
-func getManagedFieldsManager() string {
-
- // When a manager is explicitly set use it
- if ManagedFieldsManager != "" {
- return ManagedFieldsManager
- }
-
- // When no manager is set and no calling application can be found it is unknown
- if len(os.Args[0]) == 0 {
- return "unknown"
- }
-
- // When there is an application that can be determined and no set manager
- // use the base name. This is one of the ways Kubernetes libs handle figuring
- // names out.
- return filepath.Base(os.Args[0])
-}
-
func batchPerform(infos ResourceList, fn func(*resource.Info) error, errs chan<- error) {
var kind string
var wg sync.WaitGroup
+ defer wg.Wait()
+
for _, info := range infos {
currentKind := info.Object.GetObjectKind().GroupVersionKind().Kind
if kind != currentKind {
wg.Wait()
kind = currentKind
}
+
wg.Add(1)
- go func(i *resource.Info) {
- errs <- fn(i)
+ go func(info *resource.Info) {
+ errs <- fn(info)
wg.Done()
}(info)
}
}
+var createMutex sync.Mutex
+
func createResource(info *resource.Info) error {
return retry.RetryOnConflict(
retry.DefaultRetry,
func() error {
+ createMutex.Lock()
+ defer createMutex.Unlock()
obj, err := resource.NewHelper(info.Client, info.Mapping).WithFieldManager(getManagedFieldsManager()).Create(info.Namespace, true, info.Object)
if err != nil {
return err
}
+
return info.Refresh(obj, true)
})
}
@@ -617,27 +917,27 @@ func deleteResource(info *resource.Info, policy metav1.DeletionPropagation) erro
})
}
-func createPatch(target *resource.Info, current runtime.Object) ([]byte, types.PatchType, error) {
- oldData, err := json.Marshal(current)
+func createPatch(original runtime.Object, target *resource.Info, threeWayMergeForUnstructured bool) ([]byte, types.PatchType, error) {
+ oldData, err := json.Marshal(original)
if err != nil {
- return nil, types.StrategicMergePatchType, errors.Wrap(err, "serializing current configuration")
+ return nil, types.StrategicMergePatchType, fmt.Errorf("serializing current configuration: %w", err)
}
newData, err := json.Marshal(target.Object)
if err != nil {
- return nil, types.StrategicMergePatchType, errors.Wrap(err, "serializing target configuration")
+ return nil, types.StrategicMergePatchType, fmt.Errorf("serializing target configuration: %w", err)
}
// Fetch the current object for the three way merge
helper := resource.NewHelper(target.Client, target.Mapping).WithFieldManager(getManagedFieldsManager())
currentObj, err := helper.Get(target.Namespace, target.Name)
if err != nil && !apierrors.IsNotFound(err) {
- return nil, types.StrategicMergePatchType, errors.Wrapf(err, "unable to get data for current object %s/%s", target.Namespace, target.Name)
+ return nil, types.StrategicMergePatchType, fmt.Errorf("unable to get data for current object %s/%s: %w", target.Namespace, target.Name, err)
}
// Even if currentObj is nil (because it was not found), it will marshal just fine
currentData, err := json.Marshal(currentObj)
if err != nil {
- return nil, types.StrategicMergePatchType, errors.Wrap(err, "serializing live configuration")
+ return nil, types.StrategicMergePatchType, fmt.Errorf("serializing live configuration: %w", err)
}
// Get a versioned object
@@ -645,7 +945,7 @@ func createPatch(target *resource.Info, current runtime.Object) ([]byte, types.P
// Unstructured objects, such as CRDs, may not have a not registered error
// returned from ConvertToVersion. Anything that's unstructured should
- // use the jsonpatch.CreateMergePatch. Strategic Merge Patch is not supported
+ // use generic JSON merge patch. Strategic Merge Patch is not supported
// on objects like CRDs.
_, isUnstructured := versionedObject.(runtime.Unstructured)
@@ -653,6 +953,19 @@ func createPatch(target *resource.Info, current runtime.Object) ([]byte, types.P
_, isCRD := versionedObject.(*apiextv1beta1.CustomResourceDefinition)
if isUnstructured || isCRD {
+ if threeWayMergeForUnstructured {
+ // from https://github.com/kubernetes/kubectl/blob/b83b2ec7d15f286720bccf7872b5c72372cb8e80/pkg/cmd/apply/patcher.go#L129
+ preconditions := []mergepatch.PreconditionFunc{
+ mergepatch.RequireKeyUnchanged("apiVersion"),
+ mergepatch.RequireKeyUnchanged("kind"),
+ mergepatch.RequireMetadataKeyUnchanged("name"),
+ }
+ patch, err := jsonmergepatch.CreateThreeWayJSONMergePatch(oldData, newData, currentData, preconditions...)
+ if err != nil && mergepatch.IsPreconditionFailed(err) {
+ err = fmt.Errorf("%w: at least one field was changed: apiVersion, kind or name", err)
+ }
+ return patch, types.MergePatchType, err
+ }
// fall back to generic JSON merge patch
patch, err := jsonpatch.CreateMergePatch(oldData, newData)
return patch, types.MergePatchType, err
@@ -660,156 +973,157 @@ func createPatch(target *resource.Info, current runtime.Object) ([]byte, types.P
patchMeta, err := strategicpatch.NewPatchMetaFromStruct(versionedObject)
if err != nil {
- return nil, types.StrategicMergePatchType, errors.Wrap(err, "unable to create patch metadata from object")
+ return nil, types.StrategicMergePatchType, fmt.Errorf("unable to create patch metadata from object: %w", err)
}
patch, err := strategicpatch.CreateThreeWayMergePatch(oldData, newData, currentData, patchMeta, true)
return patch, types.StrategicMergePatchType, err
}
-func updateResource(c *Client, target *resource.Info, currentObj runtime.Object, force bool) error {
- var (
- obj runtime.Object
- helper = resource.NewHelper(target.Client, target.Mapping).WithFieldManager(getManagedFieldsManager())
- kind = target.Mapping.GroupVersionKind.Kind
- )
+func replaceResource(target *resource.Info, fieldValidationDirective FieldValidationDirective) error {
- // if --force is applied, attempt to replace the existing resource with the new object.
- if force {
- var err error
- obj, err = helper.Replace(target.Namespace, target.Name, true, target.Object)
- if err != nil {
- return errors.Wrap(err, "failed to replace object")
- }
- c.Log("Replaced %q with kind %s for kind %s", target.Name, currentObj.GetObjectKind().GroupVersionKind().Kind, kind)
- } else {
- patch, patchType, err := createPatch(target, currentObj)
- if err != nil {
- return errors.Wrap(err, "failed to create patch")
- }
+ helper := resource.NewHelper(target.Client, target.Mapping).
+ WithFieldValidation(string(fieldValidationDirective)).
+ WithFieldManager(getManagedFieldsManager())
- if patch == nil || string(patch) == "{}" {
- c.Log("Looks like there are no changes for %s %q", kind, target.Name)
- // This needs to happen to make sure that Helm has the latest info from the API
- // Otherwise there will be no labels and other functions that use labels will panic
- if err := target.Get(); err != nil {
- return errors.Wrap(err, "failed to refresh resource information")
- }
- return nil
- }
- // send patch to server
- c.Log("Patch %s %q in namespace %s", kind, target.Name, target.Namespace)
- obj, err = helper.Patch(target.Namespace, target.Name, patchType, patch, nil)
- if err != nil {
- return errors.Wrapf(err, "cannot patch %q with kind %s", target.Name, kind)
- }
+ obj, err := helper.Replace(target.Namespace, target.Name, true, target.Object)
+ if err != nil {
+ return fmt.Errorf("failed to replace object: %w", err)
+ }
+
+ if err := target.Refresh(obj, true); err != nil {
+ return fmt.Errorf("failed to refresh object after replace: %w", err)
}
- target.Refresh(obj, true)
return nil
+
}
-func (c *Client) watchUntilReady(timeout time.Duration, info *resource.Info) error {
- kind := info.Mapping.GroupVersionKind.Kind
- switch kind {
- case "Job", "Pod":
- default:
- return nil
+func patchResourceClientSide(original runtime.Object, target *resource.Info, threeWayMergeForUnstructured bool) error {
+
+ patch, patchType, err := createPatch(original, target, threeWayMergeForUnstructured)
+ if err != nil {
+ return fmt.Errorf("failed to create patch: %w", err)
}
- c.Log("Watching for changes to %s %s with timeout of %v", kind, info.Name, timeout)
+ kind := target.Mapping.GroupVersionKind.Kind
+ if patch == nil || string(patch) == "{}" {
+ slog.Debug("no changes detected", "kind", kind, "name", target.Name)
+ // This needs to happen to make sure that Helm has the latest info from the API
+ // Otherwise there will be no labels and other functions that use labels will panic
+ if err := target.Get(); err != nil {
+ return fmt.Errorf("failed to refresh resource information: %w", err)
+ }
+ return nil
+ }
- // Use a selector on the name of the resource. This should be unique for the
- // given version and kind
- selector, err := fields.ParseSelector(fmt.Sprintf("metadata.name=%s", info.Name))
+ // send patch to server
+ slog.Debug("patching resource", "kind", kind, "name", target.Name, "namespace", target.Namespace)
+ helper := resource.NewHelper(target.Client, target.Mapping).WithFieldManager(getManagedFieldsManager())
+ obj, err := helper.Patch(target.Namespace, target.Name, patchType, patch, nil)
if err != nil {
- return err
+ return fmt.Errorf("cannot patch %q with kind %s: %w", target.Name, kind, err)
}
- lw := cachetools.NewListWatchFromClient(info.Client, info.Mapping.Resource.Resource, info.Namespace, selector)
-
- // What we watch for depends on the Kind.
- // - For a Job, we watch for completion.
- // - For all else, we watch until Ready.
- // In the future, we might want to add some special logic for types
- // like Ingress, Volume, etc.
-
- ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout)
- defer cancel()
- _, err = watchtools.UntilWithSync(ctx, lw, &unstructured.Unstructured{}, nil, func(e watch.Event) (bool, error) {
- // Make sure the incoming object is versioned as we use unstructured
- // objects when we build manifests
- obj := convertWithMapper(e.Object, info.Mapping)
- switch e.Type {
- case watch.Added, watch.Modified:
- // For things like a secret or a config map, this is the best indicator
- // we get. We care mostly about jobs, where what we want to see is
- // the status go into a good state. For other types, like ReplicaSet
- // we don't really do anything to support these as hooks.
- c.Log("Add/Modify event for %s: %v", info.Name, e.Type)
- switch kind {
- case "Job":
- return c.waitForJob(obj, info.Name)
- case "Pod":
- return c.waitForPodSuccess(obj, info.Name)
- }
- return true, nil
- case watch.Deleted:
- c.Log("Deleted event for %s", info.Name)
- return true, nil
- case watch.Error:
- // Handle error and return with an error.
- c.Log("Error event for %s", info.Name)
- return true, errors.Errorf("failed to deploy %s", info.Name)
- default:
- return false, nil
- }
- })
- return err
+
+ target.Refresh(obj, true)
+
+ return nil
}
-// waitForJob is a helper that waits for a job to complete.
-//
-// This operates on an event returned from a watcher.
-func (c *Client) waitForJob(obj runtime.Object, name string) (bool, error) {
- o, ok := obj.(*batch.Job)
- if !ok {
- return true, errors.Errorf("expected %s to be a *batch.Job, got %T", name, obj)
- }
+// upgradeClientSideFieldManager is simply a wrapper around csaupgrade.UpgradeManagedFields
+// that ugrade CSA managed fields to SSA apply
+// see: https://github.com/kubernetes/kubernetes/pull/112905
+func upgradeClientSideFieldManager(info *resource.Info, dryRun bool, fieldValidationDirective FieldValidationDirective) (bool, error) {
- for _, c := range o.Status.Conditions {
- if c.Type == batch.JobComplete && c.Status == "True" {
- return true, nil
- } else if c.Type == batch.JobFailed && c.Status == "True" {
- return true, errors.Errorf("job %s failed: %s", name, c.Reason)
- }
- }
+ fieldManagerName := getManagedFieldsManager()
+
+ patched := false
+ err := retry.RetryOnConflict(
+ retry.DefaultRetry,
+ func() error {
+
+ if err := info.Get(); err != nil {
+ return fmt.Errorf("failed to get object %s/%s %s: %w", info.Namespace, info.Name, info.Mapping.GroupVersionKind.String(), err)
+ }
+
+ helper := resource.NewHelper(
+ info.Client,
+ info.Mapping).
+ DryRun(dryRun).
+ WithFieldManager(fieldManagerName).
+ WithFieldValidation(string(fieldValidationDirective))
+
+ patchData, err := csaupgrade.UpgradeManagedFieldsPatch(
+ info.Object,
+ sets.New(fieldManagerName),
+ fieldManagerName)
+ if err != nil {
+ return fmt.Errorf("failed to upgrade managed fields for object %s/%s %s: %w", info.Namespace, info.Name, info.Mapping.GroupVersionKind.String(), err)
+ }
+
+ if len(patchData) == 0 {
+ return nil
+ }
- c.Log("%s: Jobs active: %d, jobs failed: %d, jobs succeeded: %d", name, o.Status.Active, o.Status.Failed, o.Status.Succeeded)
- return false, nil
+ obj, err := helper.Patch(
+ info.Namespace,
+ info.Name,
+ types.JSONPatchType,
+ patchData,
+ nil)
+
+ if err == nil {
+ patched = true
+ return info.Refresh(obj, true)
+ }
+
+ if !apierrors.IsConflict(err) {
+ return fmt.Errorf("failed to patch object to upgrade CSA field manager %s/%s %s: %w", info.Namespace, info.Name, info.Mapping.GroupVersionKind.String(), err)
+ }
+
+ return err
+ })
+
+ return patched, err
}
-// waitForPodSuccess is a helper that waits for a pod to complete.
-//
-// This operates on an event returned from a watcher.
-func (c *Client) waitForPodSuccess(obj runtime.Object, name string) (bool, error) {
- o, ok := obj.(*v1.Pod)
- if !ok {
- return true, errors.Errorf("expected %s to be a *v1.Pod, got %T", name, obj)
+// Patch reource using server-side apply
+func patchResourceServerSide(target *resource.Info, dryRun bool, forceConflicts bool, fieldValidationDirective FieldValidationDirective) error {
+ helper := resource.NewHelper(
+ target.Client,
+ target.Mapping).
+ DryRun(dryRun).
+ WithFieldManager(getManagedFieldsManager()).
+ WithFieldValidation(string(fieldValidationDirective))
+
+ // Send the full object to be applied on the server side.
+ data, err := runtime.Encode(unstructured.UnstructuredJSONScheme, target.Object)
+ if err != nil {
+ return fmt.Errorf("failed to encode object %s/%s %s: %w", target.Namespace, target.Name, target.Mapping.GroupVersionKind.String(), err)
+ }
+ options := metav1.PatchOptions{
+ Force: &forceConflicts,
}
+ obj, err := helper.Patch(
+ target.Namespace,
+ target.Name,
+ types.ApplyPatchType,
+ data,
+ &options,
+ )
+ if err != nil {
+ if isIncompatibleServerError(err) {
+ return fmt.Errorf("server-side apply not available on the server: %v", err)
+ }
- switch o.Status.Phase {
- case v1.PodSucceeded:
- c.Log("Pod %s succeeded", o.Name)
- return true, nil
- case v1.PodFailed:
- return true, errors.Errorf("pod %s failed", o.Name)
- case v1.PodPending:
- c.Log("Pod %s pending", o.Name)
- case v1.PodRunning:
- c.Log("Pod %s running", o.Name)
+ if apierrors.IsConflict(err) {
+ return fmt.Errorf("conflict occurred while applying object %s/%s %s: %w", target.Namespace, target.Name, target.Mapping.GroupVersionKind.String(), err)
+ }
+
+ return err
}
- return false, nil
+ return target.Refresh(obj, true)
}
// GetPodList uses the kubernetes interface to get the list of pods filtered by listOptions
@@ -841,15 +1155,12 @@ func (c *Client) OutputContainerLogsForPodList(podList *v1.PodList, namespace st
func copyRequestStreamToWriter(request *rest.Request, podName, containerName string, writer io.Writer) error {
readCloser, err := request.Stream(context.Background())
if err != nil {
- return errors.Errorf("Failed to stream pod logs for pod: %s, container: %s", podName, containerName)
+ return fmt.Errorf("failed to stream pod logs for pod: %s, container: %s", podName, containerName)
}
defer readCloser.Close()
_, err = io.Copy(writer, readCloser)
if err != nil {
- return errors.Errorf("Failed to copy IO from logs for pod: %s, container: %s", podName, containerName)
- }
- if err != nil {
- return errors.Errorf("Failed to close reader for pod: %s, container: %s", podName, containerName)
+ return fmt.Errorf("failed to copy IO from logs for pod: %s, container: %s", podName, containerName)
}
return nil
}
@@ -866,3 +1177,27 @@ func scrubValidationError(err error) error {
}
return err
}
+
+type joinedErrors struct {
+ errs []error
+ sep string
+}
+
+func joinErrors(errs []error, sep string) error {
+ return &joinedErrors{
+ errs: errs,
+ sep: sep,
+ }
+}
+
+func (e *joinedErrors) Error() string {
+ errs := make([]string, 0, len(e.errs))
+ for _, err := range e.errs {
+ errs = append(errs, err.Error())
+ }
+ return strings.Join(errs, e.sep)
+}
+
+func (e *joinedErrors) Unwrap() []error {
+ return e.errs
+}
diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go
index ff1335f0f..a8a8668c7 100644
--- a/pkg/kube/client_test.go
+++ b/pkg/kube/client_test.go
@@ -18,26 +18,40 @@ package kube
import (
"bytes"
+ "errors"
+ "fmt"
"io"
"net/http"
"strings"
+ "sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ jsonserializer "k8s.io/apimachinery/pkg/runtime/serializer/json"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/cli-runtime/pkg/resource"
+ "k8s.io/client-go/kubernetes"
k8sfake "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest/fake"
cmdtesting "k8s.io/kubectl/pkg/cmd/testing"
)
-var unstructuredSerializer = resource.UnstructuredPlusDefaultContentConfig().NegotiatedSerializer
-var codec = scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...)
+var (
+ unstructuredSerializer = resource.UnstructuredPlusDefaultContentConfig().NegotiatedSerializer
+ codec = scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...)
+)
func objBody(obj runtime.Object) io.ReadCloser {
return io.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(codec, obj))))
@@ -102,209 +116,214 @@ func newResponseJSON(code int, json []byte) (*http.Response, error) {
}
func newTestClient(t *testing.T) *Client {
+ t.Helper()
testFactory := cmdtesting.NewTestFactory()
t.Cleanup(testFactory.Cleanup)
return &Client{
- Factory: testFactory.WithNamespace("default"),
- Log: nopLogger,
+ Factory: testFactory.WithNamespace(v1.NamespaceDefault),
}
}
-func TestCreate(t *testing.T) {
- // Note: c.Create with the fake client can currently only test creation of a single pod in the same list. When testing
- // with more than one pod, c.Create will run into a data race as it calls perform->batchPerform which performs creation
- // in batches. The first data race is on accessing var actions and can be fixed easily with a mutex lock in the Client
- // function. The second data race though is something in the fake client itself in func (c *RESTClient) do(...)
- // when it stores the req: c.Req = req and cannot (?) be fixed easily.
- listA := newPodList("starfish")
- listB := newPodList("dolphin")
+type RequestResponseAction struct {
+ Request http.Request
+ Response http.Response
+ Error error
+}
- var actions []string
- var iterationCounter int
+type RoundTripperTestFunc func(previous []RequestResponseAction, req *http.Request) (*http.Response, error)
- c := newTestClient(t)
- c.Factory.(*cmdtesting.TestFactory).UnstructuredClient = &fake.RESTClient{
- NegotiatedSerializer: unstructuredSerializer,
- Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
- path, method := req.URL.Path, req.Method
- bodyReader := new(strings.Builder)
- _, _ = io.Copy(bodyReader, req.Body)
- body := bodyReader.String()
- actions = append(actions, path+":"+method)
- t.Logf("got request %s %s", path, method)
- switch {
- case path == "/namespaces/default/pods" && method == "POST":
- if strings.Contains(body, "starfish") {
- if iterationCounter < 2 {
- iterationCounter++
- return newResponseJSON(409, resourceQuotaConflict)
- }
- return newResponse(200, &listA.Items[0])
- }
- return newResponseJSON(409, resourceQuotaConflict)
- default:
- t.Fatalf("unexpected request: %s %s", method, path)
- return nil, nil
- }
- }),
+func NewRequestResponseLogClient(t *testing.T, cb RoundTripperTestFunc) RequestResponseLogClient {
+ t.Helper()
+ return RequestResponseLogClient{
+ t: t,
+ cb: cb,
}
+}
- t.Run("Create success", func(t *testing.T) {
- list, err := c.Build(objBody(&listA), false)
- if err != nil {
- t.Fatal(err)
- }
+// RequestResponseLogClient is a test client that logs requests and responses
+// Satifying http.RoundTripper interface, it can be used to mock HTTP requests in tests.
+// Forwarding requests to a callback function (cb) that can be used to simulate server responses.
+type RequestResponseLogClient struct {
+ t *testing.T
+ cb RoundTripperTestFunc
+ actionsLock sync.Mutex
+ Actions []RequestResponseAction
+}
- result, err := c.Create(list)
- if err != nil {
- t.Fatal(err)
- }
+func (r *RequestResponseLogClient) Do(req *http.Request) (*http.Response, error) {
+ t := r.t
+ t.Helper()
- if len(result.Created) != 1 {
- t.Errorf("expected 1 resource created, got %d", len(result.Created))
+ readBodyBytes := func(body io.ReadCloser) []byte {
+ if body == nil {
+ return []byte{}
}
- expectedActions := []string{
- "/namespaces/default/pods:POST",
- "/namespaces/default/pods:POST",
- "/namespaces/default/pods:POST",
- }
- if len(expectedActions) != len(actions) {
- t.Fatalf("unexpected number of requests, expected %d, got %d", len(expectedActions), len(actions))
- }
- for k, v := range expectedActions {
- if actions[k] != v {
- t.Errorf("expected %s request got %s", v, actions[k])
- }
- }
- })
+ defer body.Close()
+ bodyBytes, err := io.ReadAll(body)
+ require.NoError(t, err)
- t.Run("Create failure", func(t *testing.T) {
- list, err := c.Build(objBody(&listB), false)
- if err != nil {
- t.Fatal(err)
- }
+ return bodyBytes
+ }
- _, err = c.Create(list)
- if err == nil {
- t.Errorf("expected error")
- }
+ reqBytes := readBodyBytes(req.Body)
- expectedString := "Operation cannot be fulfilled on resourcequotas \"quota\": the object has been modified; " +
- "please apply your changes to the latest version and try again"
- if !strings.Contains(err.Error(), expectedString) {
- t.Errorf("Unexpected error message: %q", err)
- }
+ t.Logf("Request: %s %s %s", req.Method, req.URL.String(), reqBytes)
+ if req.Body != nil {
+ req.Body = io.NopCloser(bytes.NewReader(reqBytes))
+ }
- expectedActions := []string{
- "/namespaces/default/pods:POST",
- }
- for k, v := range actions {
- if expectedActions[0] != v {
- t.Errorf("expected %s request got %s", v, actions[k])
- }
- }
+ resp, err := r.cb(r.Actions, req)
+
+ respBytes := readBodyBytes(resp.Body)
+ t.Logf("Response: %d %s", resp.StatusCode, string(respBytes))
+ if resp.Body != nil {
+ resp.Body = io.NopCloser(bytes.NewReader(respBytes))
+ }
+
+ r.actionsLock.Lock()
+ defer r.actionsLock.Unlock()
+ r.Actions = append(r.Actions, RequestResponseAction{
+ Request: *req,
+ Response: *resp,
+ Error: err,
})
+
+ return resp, err
}
-func TestUpdate(t *testing.T) {
- listA := newPodList("starfish", "otter", "squid")
- listB := newPodList("starfish", "otter", "dolphin")
- listC := newPodList("starfish", "otter", "dolphin")
- listB.Items[0].Spec.Containers[0].Ports = []v1.ContainerPort{{Name: "https", ContainerPort: 443}}
- listC.Items[0].Spec.Containers[0].Ports = []v1.ContainerPort{{Name: "https", ContainerPort: 443}}
+func TestCreate(t *testing.T) {
+ // Note: c.Create with the fake client can currently only test creation of a single pod/object in the same list. When testing
+ // with more than one pod, c.Create will run into a data race as it calls perform->batchPerform which performs creation
+ // in batches. The race is something in the fake client itself in `func (c *RESTClient) do(...)`
+ // when it stores the req: c.Req = req and cannot (?) be fixed easily.
- var actions []string
- var iterationCounter int
+ type testCase struct {
+ Name string
+ Pods v1.PodList
+ Callback func(t *testing.T, tc testCase, previous []RequestResponseAction, req *http.Request) (*http.Response, error)
+ ServerSideApply bool
+ ExpectedActions []string
+ ExpectedErrorContains string
+ }
- c := newTestClient(t)
- c.Factory.(*cmdtesting.TestFactory).UnstructuredClient = &fake.RESTClient{
- NegotiatedSerializer: unstructuredSerializer,
- Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
- p, m := req.URL.Path, req.Method
- actions = append(actions, p+":"+m)
- t.Logf("got request %s %s", p, m)
- switch {
- case p == "/namespaces/default/pods/starfish" && m == "GET":
- return newResponse(200, &listA.Items[0])
- case p == "/namespaces/default/pods/otter" && m == "GET":
- return newResponse(200, &listA.Items[1])
- case p == "/namespaces/default/pods/otter" && m == "PATCH":
- data, err := io.ReadAll(req.Body)
- if err != nil {
- t.Fatalf("could not dump request: %s", err)
- }
- req.Body.Close()
- expected := `{}`
- if string(data) != expected {
- t.Errorf("expected patch\n%s\ngot\n%s", expected, string(data))
- }
- return newResponse(200, &listB.Items[0])
- case p == "/namespaces/default/pods/dolphin" && m == "GET":
- return newResponse(404, notFoundBody())
- case p == "/namespaces/default/pods/starfish" && m == "PATCH":
- data, err := io.ReadAll(req.Body)
- if err != nil {
- t.Fatalf("could not dump request: %s", err)
- }
- req.Body.Close()
- expected := `{"spec":{"$setElementOrder/containers":[{"name":"app:v4"}],"containers":[{"$setElementOrder/ports":[{"containerPort":443}],"name":"app:v4","ports":[{"containerPort":443,"name":"https"},{"$patch":"delete","containerPort":80}]}]}}`
- if string(data) != expected {
- t.Errorf("expected patch\n%s\ngot\n%s", expected, string(data))
- }
- return newResponse(200, &listB.Items[0])
- case p == "/namespaces/default/pods" && m == "POST":
- if iterationCounter < 2 {
- iterationCounter++
- return newResponseJSON(409, resourceQuotaConflict)
+ testCases := map[string]testCase{
+ "Create success (client-side apply)": {
+ Pods: newPodList("starfish"),
+ ServerSideApply: false,
+ Callback: func(t *testing.T, tc testCase, previous []RequestResponseAction, _ *http.Request) (*http.Response, error) {
+ t.Helper()
+
+ if len(previous) < 2 { // simulate a conflict
+ return newResponseJSON(http.StatusConflict, resourceQuotaConflict)
}
- return newResponse(200, &listB.Items[1])
- case p == "/namespaces/default/pods/squid" && m == "DELETE":
- return newResponse(200, &listB.Items[1])
- case p == "/namespaces/default/pods/squid" && m == "GET":
- return newResponse(200, &listB.Items[2])
- default:
- t.Fatalf("unexpected request: %s %s", req.Method, req.URL.Path)
- return nil, nil
- }
- }),
- }
- first, err := c.Build(objBody(&listA), false)
- if err != nil {
- t.Fatal(err)
- }
- second, err := c.Build(objBody(&listB), false)
- if err != nil {
- t.Fatal(err)
- }
- result, err := c.Update(first, second, false)
- if err != nil {
- t.Fatal(err)
+ return newResponse(http.StatusOK, &tc.Pods.Items[0])
+ },
+ ExpectedActions: []string{
+ "/namespaces/default/pods:POST",
+ "/namespaces/default/pods:POST",
+ "/namespaces/default/pods:POST",
+ },
+ },
+ "Create success (server-side apply)": {
+ Pods: newPodList("whale"),
+ ServerSideApply: true,
+ Callback: func(t *testing.T, tc testCase, _ []RequestResponseAction, _ *http.Request) (*http.Response, error) {
+ t.Helper()
+
+ return newResponse(http.StatusOK, &tc.Pods.Items[0])
+ },
+ ExpectedActions: []string{
+ "/namespaces/default/pods/whale:PATCH",
+ },
+ },
+ "Create fail: incompatible server (server-side apply)": {
+ Pods: newPodList("lobster"),
+ ServerSideApply: true,
+ Callback: func(t *testing.T, _ testCase, _ []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ t.Helper()
+
+ return &http.Response{
+ StatusCode: http.StatusUnsupportedMediaType,
+ Request: req,
+ }, nil
+ },
+ ExpectedErrorContains: "server-side apply not available on the server:",
+ ExpectedActions: []string{
+ "/namespaces/default/pods/lobster:PATCH",
+ },
+ },
+ "Create fail: quota (server-side apply)": {
+ Pods: newPodList("dolphin"),
+ ServerSideApply: true,
+ Callback: func(t *testing.T, _ testCase, _ []RequestResponseAction, _ *http.Request) (*http.Response, error) {
+ t.Helper()
+
+ return newResponseJSON(http.StatusConflict, resourceQuotaConflict)
+ },
+ ExpectedErrorContains: "Operation cannot be fulfilled on resourcequotas \"quota\": the object has been modified; " +
+ "please apply your changes to the latest version and try again",
+ ExpectedActions: []string{
+ "/namespaces/default/pods/dolphin:PATCH",
+ },
+ },
}
- if len(result.Created) != 1 {
- t.Errorf("expected 1 resource created, got %d", len(result.Created))
- }
- if len(result.Updated) != 2 {
- t.Errorf("expected 2 resource updated, got %d", len(result.Updated))
+ c := newTestClient(t)
+ for name, tc := range testCases {
+ t.Run(name, func(t *testing.T) {
+
+ client := NewRequestResponseLogClient(t, func(previous []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ return tc.Callback(t, tc, previous, req)
+ })
+
+ c.Factory.(*cmdtesting.TestFactory).UnstructuredClient = &fake.RESTClient{
+ NegotiatedSerializer: unstructuredSerializer,
+ Client: fake.CreateHTTPClient(client.Do),
+ }
+
+ list, err := c.Build(objBody(&tc.Pods), false)
+ require.NoError(t, err)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ result, err := c.Create(
+ list,
+ ClientCreateOptionServerSideApply(tc.ServerSideApply, false))
+ if tc.ExpectedErrorContains != "" {
+ require.ErrorContains(t, err, tc.ExpectedErrorContains)
+ } else {
+ require.NoError(t, err)
+
+ // See note above about limitations in supporting more than a single object
+ assert.Len(t, result.Created, 1, "expected 1 object created, got %d", len(result.Created))
+ }
+
+ actions := []string{}
+ for _, action := range client.Actions {
+ path, method := action.Request.URL.Path, action.Request.Method
+ actions = append(actions, path+":"+method)
+ }
+
+ assert.Equal(t, tc.ExpectedActions, actions)
+
+ })
}
- if len(result.Deleted) != 1 {
- t.Errorf("expected 1 resource deleted, got %d", len(result.Deleted))
+}
+
+func TestUpdate(t *testing.T) {
+ type testCase struct {
+ OriginalPods v1.PodList
+ TargetPods v1.PodList
+ ThreeWayMergeForUnstructured bool
+ ServerSideApply bool
+ ExpectedActions []string
}
- // TODO: Find a way to test methods that use Client Set
- // Test with a wait
- // if err := c.Update("test", objBody(codec, &listB), objBody(codec, &listC), false, 300, true); err != nil {
- // t.Fatal(err)
- // }
- // Test with a wait should fail
- // TODO: A way to make this not based off of an extremely short timeout?
- // if err := c.Update("test", objBody(codec, &listC), objBody(codec, &listA), false, 2, true); err != nil {
- // t.Fatal(err)
- // }
- expectedActions := []string{
+ expectedActionsClientSideApply := []string{
"/namespaces/default/pods/starfish:GET",
"/namespaces/default/pods/starfish:GET",
"/namespaces/default/pods/starfish:PATCH",
@@ -318,13 +337,154 @@ func TestUpdate(t *testing.T) {
"/namespaces/default/pods/squid:GET",
"/namespaces/default/pods/squid:DELETE",
}
- if len(expectedActions) != len(actions) {
- t.Fatalf("unexpected number of requests, expected %d, got %d", len(expectedActions), len(actions))
+
+ expectedActionsServerSideApply := []string{
+ "/namespaces/default/pods/starfish:GET",
+ "/namespaces/default/pods/starfish:GET",
+ "/namespaces/default/pods/starfish:PATCH",
+ "/namespaces/default/pods/otter:GET",
+ "/namespaces/default/pods/otter:GET",
+ "/namespaces/default/pods/otter:PATCH",
+ "/namespaces/default/pods/dolphin:GET",
+ "/namespaces/default/pods:POST", // create dolphin
+ "/namespaces/default/pods:POST", // retry due to 409
+ "/namespaces/default/pods:POST", // retry due to 409
+ "/namespaces/default/pods/squid:GET",
+ "/namespaces/default/pods/squid:DELETE",
+ }
+
+ testCases := map[string]testCase{
+ "client-side apply": {
+ OriginalPods: newPodList("starfish", "otter", "squid"),
+ TargetPods: func() v1.PodList {
+ listTarget := newPodList("starfish", "otter", "dolphin")
+ listTarget.Items[0].Spec.Containers[0].Ports = []v1.ContainerPort{{Name: "https", ContainerPort: 443}}
+
+ return listTarget
+ }(),
+ ThreeWayMergeForUnstructured: false,
+ ServerSideApply: false,
+ ExpectedActions: expectedActionsClientSideApply,
+ },
+ "client-side apply (three-way merge for unstructured)": {
+ OriginalPods: newPodList("starfish", "otter", "squid"),
+ TargetPods: func() v1.PodList {
+ listTarget := newPodList("starfish", "otter", "dolphin")
+ listTarget.Items[0].Spec.Containers[0].Ports = []v1.ContainerPort{{Name: "https", ContainerPort: 443}}
+
+ return listTarget
+ }(),
+ ThreeWayMergeForUnstructured: true,
+ ServerSideApply: false,
+ ExpectedActions: expectedActionsClientSideApply,
+ },
+ "serverSideApply": {
+ OriginalPods: newPodList("starfish", "otter", "squid"),
+ TargetPods: func() v1.PodList {
+ listTarget := newPodList("starfish", "otter", "dolphin")
+ listTarget.Items[0].Spec.Containers[0].Ports = []v1.ContainerPort{{Name: "https", ContainerPort: 443}}
+
+ return listTarget
+ }(),
+ ThreeWayMergeForUnstructured: false,
+ ServerSideApply: true,
+ ExpectedActions: expectedActionsServerSideApply,
+ },
}
- for k, v := range expectedActions {
- if actions[k] != v {
- t.Errorf("expected %s request got %s", v, actions[k])
- }
+
+ c := newTestClient(t)
+
+ for name, tc := range testCases {
+ t.Run(name, func(t *testing.T) {
+
+ listOriginal := tc.OriginalPods
+ listTarget := tc.TargetPods
+
+ iterationCounter := 0
+ cb := func(_ []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ p, m := req.URL.Path, req.Method
+
+ switch {
+ case p == "/namespaces/default/pods/starfish" && m == http.MethodGet:
+ return newResponse(http.StatusOK, &listOriginal.Items[0])
+ case p == "/namespaces/default/pods/otter" && m == http.MethodGet:
+ return newResponse(http.StatusOK, &listOriginal.Items[1])
+ case p == "/namespaces/default/pods/otter" && m == http.MethodPatch:
+ if !tc.ServerSideApply {
+ defer req.Body.Close()
+ data, err := io.ReadAll(req.Body)
+ require.NoError(t, err)
+
+ assert.Equal(t, `{}`, string(data))
+ }
+
+ return newResponse(http.StatusOK, &listTarget.Items[0])
+ case p == "/namespaces/default/pods/dolphin" && m == http.MethodGet:
+ return newResponse(http.StatusNotFound, notFoundBody())
+ case p == "/namespaces/default/pods/starfish" && m == http.MethodPatch:
+ if !tc.ServerSideApply {
+ // Ensure client-side apply specifies correct patch
+ defer req.Body.Close()
+ data, err := io.ReadAll(req.Body)
+ require.NoError(t, err)
+
+ expected := `{"spec":{"$setElementOrder/containers":[{"name":"app:v4"}],"containers":[{"$setElementOrder/ports":[{"containerPort":443}],"name":"app:v4","ports":[{"containerPort":443,"name":"https"},{"$patch":"delete","containerPort":80}]}]}}`
+ assert.Equal(t, expected, string(data))
+ }
+
+ return newResponse(http.StatusOK, &listTarget.Items[0])
+ case p == "/namespaces/default/pods" && m == http.MethodPost:
+ if iterationCounter < 2 {
+ iterationCounter++
+ return newResponseJSON(http.StatusConflict, resourceQuotaConflict)
+ }
+
+ return newResponse(http.StatusOK, &listTarget.Items[1])
+ case p == "/namespaces/default/pods/squid" && m == http.MethodDelete:
+ return newResponse(http.StatusOK, &listTarget.Items[1])
+ case p == "/namespaces/default/pods/squid" && m == http.MethodGet:
+ return newResponse(http.StatusOK, &listTarget.Items[2])
+ default:
+ }
+
+ t.Fail()
+ return nil, nil
+ }
+
+ client := NewRequestResponseLogClient(t, cb)
+
+ c.Factory.(*cmdtesting.TestFactory).UnstructuredClient = &fake.RESTClient{
+ NegotiatedSerializer: unstructuredSerializer,
+ Client: fake.CreateHTTPClient(client.Do),
+ }
+
+ first, err := c.Build(objBody(&listOriginal), false)
+ require.NoError(t, err)
+
+ second, err := c.Build(objBody(&listTarget), false)
+ require.NoError(t, err)
+
+ result, err := c.Update(
+ first,
+ second,
+ ClientUpdateOptionThreeWayMergeForUnstructured(tc.ThreeWayMergeForUnstructured),
+ ClientUpdateOptionForceReplace(false),
+ ClientUpdateOptionServerSideApply(tc.ServerSideApply, false),
+ ClientUpdateOptionUpgradeClientSideFieldManager(true))
+ require.NoError(t, err)
+
+ assert.Len(t, result.Created, 1, "expected 1 resource created, got %d", len(result.Created))
+ assert.Len(t, result.Updated, 2, "expected 2 resource updated, got %d", len(result.Updated))
+ assert.Len(t, result.Deleted, 1, "expected 1 resource deleted, got %d", len(result.Deleted))
+
+ actions := []string{}
+ for _, action := range client.Actions {
+ path, method := action.Request.URL.Path, action.Request.Method
+ actions = append(actions, path+":"+method)
+ }
+
+ assert.Equal(t, tc.ExpectedActions, actions)
+ })
}
}
@@ -468,7 +628,7 @@ func TestWait(t *testing.T) {
p, m := req.URL.Path, req.Method
t.Logf("got request %s %s", p, m)
switch {
- case p == "/api/v1/namespaces/default/pods/starfish" && m == "GET":
+ case p == "/api/v1/namespaces/default/pods/starfish" && m == http.MethodGet:
pod := &podList.Items[0]
if created != nil && time.Since(*created) >= time.Second*5 {
pod.Status.Conditions = []v1.PodCondition{
@@ -478,8 +638,8 @@ func TestWait(t *testing.T) {
},
}
}
- return newResponse(200, pod)
- case p == "/api/v1/namespaces/default/pods/otter" && m == "GET":
+ return newResponse(http.StatusOK, pod)
+ case p == "/api/v1/namespaces/default/pods/otter" && m == http.MethodGet:
pod := &podList.Items[1]
if created != nil && time.Since(*created) >= time.Second*5 {
pod.Status.Conditions = []v1.PodCondition{
@@ -489,8 +649,8 @@ func TestWait(t *testing.T) {
},
}
}
- return newResponse(200, pod)
- case p == "/api/v1/namespaces/default/pods/squid" && m == "GET":
+ return newResponse(http.StatusOK, pod)
+ case p == "/api/v1/namespaces/default/pods/squid" && m == http.MethodGet:
pod := &podList.Items[2]
if created != nil && time.Since(*created) >= time.Second*5 {
pod.Status.Conditions = []v1.PodCondition{
@@ -500,26 +660,35 @@ func TestWait(t *testing.T) {
},
}
}
- return newResponse(200, pod)
- case p == "/namespaces/default/pods" && m == "POST":
+ return newResponse(http.StatusOK, pod)
+ case p == "/namespaces/default/pods" && m == http.MethodPost:
resources, err := c.Build(req.Body, false)
if err != nil {
t.Fatal(err)
}
now := time.Now()
created = &now
- return newResponse(200, resources[0].Object)
+ return newResponse(http.StatusOK, resources[0].Object)
default:
t.Fatalf("unexpected request: %s %s", req.Method, req.URL.Path)
return nil, nil
}
}),
}
+ var err error
+ c.Waiter, err = c.GetWaiter(LegacyStrategy)
+ if err != nil {
+ t.Fatal(err)
+ }
resources, err := c.Build(objBody(&podList), false)
if err != nil {
t.Fatal(err)
}
- result, err := c.Create(resources)
+
+ result, err := c.Create(
+ resources,
+ ClientCreateOptionServerSideApply(false, false))
+
if err != nil {
t.Fatal(err)
}
@@ -548,30 +717,38 @@ func TestWaitJob(t *testing.T) {
p, m := req.URL.Path, req.Method
t.Logf("got request %s %s", p, m)
switch {
- case p == "/apis/batch/v1/namespaces/default/jobs/starfish" && m == "GET":
+ case p == "/apis/batch/v1/namespaces/default/jobs/starfish" && m == http.MethodGet:
if created != nil && time.Since(*created) >= time.Second*5 {
job.Status.Succeeded = 1
}
- return newResponse(200, job)
- case p == "/namespaces/default/jobs" && m == "POST":
+ return newResponse(http.StatusOK, job)
+ case p == "/namespaces/default/jobs" && m == http.MethodPost:
resources, err := c.Build(req.Body, false)
if err != nil {
t.Fatal(err)
}
now := time.Now()
created = &now
- return newResponse(200, resources[0].Object)
+ return newResponse(http.StatusOK, resources[0].Object)
default:
t.Fatalf("unexpected request: %s %s", req.Method, req.URL.Path)
return nil, nil
}
}),
}
+ var err error
+ c.Waiter, err = c.GetWaiter(LegacyStrategy)
+ if err != nil {
+ t.Fatal(err)
+ }
resources, err := c.Build(objBody(job), false)
if err != nil {
t.Fatal(err)
}
- result, err := c.Create(resources)
+ result, err := c.Create(
+ resources,
+ ClientCreateOptionServerSideApply(false, false))
+
if err != nil {
t.Fatal(err)
}
@@ -600,32 +777,39 @@ func TestWaitDelete(t *testing.T) {
p, m := req.URL.Path, req.Method
t.Logf("got request %s %s", p, m)
switch {
- case p == "/namespaces/default/pods/starfish" && m == "GET":
+ case p == "/namespaces/default/pods/starfish" && m == http.MethodGet:
if deleted != nil && time.Since(*deleted) >= time.Second*5 {
- return newResponse(404, notFoundBody())
+ return newResponse(http.StatusNotFound, notFoundBody())
}
- return newResponse(200, &pod)
- case p == "/namespaces/default/pods/starfish" && m == "DELETE":
+ return newResponse(http.StatusOK, &pod)
+ case p == "/namespaces/default/pods/starfish" && m == http.MethodDelete:
now := time.Now()
deleted = &now
- return newResponse(200, &pod)
- case p == "/namespaces/default/pods" && m == "POST":
+ return newResponse(http.StatusOK, &pod)
+ case p == "/namespaces/default/pods" && m == http.MethodPost:
resources, err := c.Build(req.Body, false)
if err != nil {
t.Fatal(err)
}
- return newResponse(200, resources[0].Object)
+ return newResponse(http.StatusOK, resources[0].Object)
default:
t.Fatalf("unexpected request: %s %s", req.Method, req.URL.Path)
return nil, nil
}
}),
}
+ var err error
+ c.Waiter, err = c.GetWaiter(LegacyStrategy)
+ if err != nil {
+ t.Fatal(err)
+ }
resources, err := c.Build(objBody(&pod), false)
if err != nil {
t.Fatal(err)
}
- result, err := c.Create(resources)
+ result, err := c.Create(
+ resources,
+ ClientCreateOptionServerSideApply(false, false))
if err != nil {
t.Fatal(err)
}
@@ -686,7 +870,6 @@ func TestReal(t *testing.T) {
}
func TestGetPodList(t *testing.T) {
-
namespace := "some-namespace"
names := []string{"dave", "jimmy"}
var responsePodList v1.PodList
@@ -701,7 +884,6 @@ func TestGetPodList(t *testing.T) {
clientAssertions := assert.New(t)
clientAssertions.NoError(err)
clientAssertions.Equal(&responsePodList, podList)
-
}
func TestOutputContainerLogsForPodList(t *testing.T) {
@@ -788,11 +970,11 @@ spec:
apiVersion: v1
kind: Service
metadata:
- name: redis-slave
+ name: redis-replica
labels:
app: redis
tier: backend
- role: slave
+ role: replica
spec:
ports:
# the port that this service should serve on
@@ -800,24 +982,24 @@ spec:
selector:
app: redis
tier: backend
- role: slave
+ role: replica
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
- name: redis-slave
+ name: redis-replica
spec:
replicas: 2
template:
metadata:
labels:
app: redis
- role: slave
+ role: replica
tier: backend
spec:
containers:
- - name: slave
- image: gcr.io/google_samples/gb-redisslave:v1
+ - name: replica
+ image: gcr.io/google_samples/gb-redisreplica:v1
resources:
requests:
cpu: 100m
@@ -898,3 +1080,675 @@ spec:
var resourceQuotaConflict = []byte(`
{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Operation cannot be fulfilled on resourcequotas \"quota\": the object has been modified; please apply your changes to the latest version and try again","reason":"Conflict","details":{"name":"quota","kind":"resourcequotas"},"code":409}`)
+
+type createPatchTestCase struct {
+ name string
+
+ // The target state.
+ target *unstructured.Unstructured
+ // The state as it exists in the release.
+ original *unstructured.Unstructured
+ // The actual state as it exists in the cluster.
+ actual *unstructured.Unstructured
+
+ threeWayMergeForUnstructured bool
+ // The patch is supposed to transfer the current state to the target state,
+ // thereby preserving the actual state, wherever possible.
+ expectedPatch string
+ expectedPatchType types.PatchType
+}
+
+func (c createPatchTestCase) run(t *testing.T) {
+ scheme := runtime.NewScheme()
+ v1.AddToScheme(scheme)
+ encoder := jsonserializer.NewSerializerWithOptions(
+ jsonserializer.DefaultMetaFactory, scheme, scheme, jsonserializer.SerializerOptions{
+ Yaml: false, Pretty: false, Strict: true,
+ },
+ )
+ objBody := func(obj runtime.Object) io.ReadCloser {
+ return io.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(encoder, obj))))
+ }
+ header := make(http.Header)
+ header.Set("Content-Type", runtime.ContentTypeJSON)
+ restClient := &fake.RESTClient{
+ NegotiatedSerializer: unstructuredSerializer,
+ Resp: &http.Response{
+ StatusCode: http.StatusOK,
+ Body: objBody(c.actual),
+ Header: header,
+ },
+ }
+
+ targetInfo := &resource.Info{
+ Client: restClient,
+ Namespace: "default",
+ Name: "test-obj",
+ Object: c.target,
+ Mapping: &meta.RESTMapping{
+ Resource: schema.GroupVersionResource{
+ Group: "crd.com",
+ Version: "v1",
+ Resource: "datas",
+ },
+ Scope: meta.RESTScopeNamespace,
+ },
+ }
+
+ patch, patchType, err := createPatch(c.original, targetInfo, c.threeWayMergeForUnstructured)
+ if err != nil {
+ t.Fatalf("Failed to create patch: %v", err)
+ }
+
+ if c.expectedPatch != string(patch) {
+ t.Errorf("Unexpected patch.\nTarget:\n%s\nOriginal:\n%s\nActual:\n%s\n\nExpected:\n%s\nGot:\n%s",
+ c.target,
+ c.original,
+ c.actual,
+ c.expectedPatch,
+ string(patch),
+ )
+ }
+
+ if patchType != types.MergePatchType {
+ t.Errorf("Expected patch type %s, got %s", types.MergePatchType, patchType)
+ }
+}
+
+func newTestCustomResourceData(metadata map[string]string, spec map[string]interface{}) *unstructured.Unstructured {
+ if metadata == nil {
+ metadata = make(map[string]string)
+ }
+ if _, ok := metadata["name"]; !ok {
+ metadata["name"] = "test-obj"
+ }
+ if _, ok := metadata["namespace"]; !ok {
+ metadata["namespace"] = "default"
+ }
+ o := map[string]interface{}{
+ "apiVersion": "crd.com/v1",
+ "kind": "Data",
+ "metadata": metadata,
+ }
+ if len(spec) > 0 {
+ o["spec"] = spec
+ }
+ return &unstructured.Unstructured{
+ Object: o,
+ }
+}
+
+func TestCreatePatchCustomResourceMetadata(t *testing.T) {
+ target := newTestCustomResourceData(map[string]string{
+ "meta.helm.sh/release-name": "foo-simple",
+ "meta.helm.sh/release-namespace": "default",
+ "objectset.rio.cattle.io/id": "default-foo-simple",
+ }, nil)
+ testCase := createPatchTestCase{
+ name: "take ownership of resource",
+ target: target,
+ original: target,
+ actual: newTestCustomResourceData(nil, map[string]interface{}{
+ "color": "red",
+ }),
+ threeWayMergeForUnstructured: true,
+ expectedPatch: `{"metadata":{"meta.helm.sh/release-name":"foo-simple","meta.helm.sh/release-namespace":"default","objectset.rio.cattle.io/id":"default-foo-simple"}}`,
+ expectedPatchType: types.MergePatchType,
+ }
+ t.Run(testCase.name, testCase.run)
+
+ // Previous behavior.
+ testCase.threeWayMergeForUnstructured = false
+ testCase.expectedPatch = `{}`
+ t.Run(testCase.name, testCase.run)
+}
+
+func TestCreatePatchCustomResourceSpec(t *testing.T) {
+ target := newTestCustomResourceData(nil, map[string]interface{}{
+ "color": "red",
+ "size": "large",
+ })
+ testCase := createPatchTestCase{
+ name: "merge with spec of existing custom resource",
+ target: target,
+ original: target,
+ actual: newTestCustomResourceData(nil, map[string]interface{}{
+ "color": "red",
+ "weight": "heavy",
+ }),
+ threeWayMergeForUnstructured: true,
+ expectedPatch: `{"spec":{"size":"large"}}`,
+ expectedPatchType: types.MergePatchType,
+ }
+ t.Run(testCase.name, testCase.run)
+
+ // Previous behavior.
+ testCase.threeWayMergeForUnstructured = false
+ testCase.expectedPatch = `{}`
+ t.Run(testCase.name, testCase.run)
+}
+
+type errorFactory struct {
+ *cmdtesting.TestFactory
+ err error
+}
+
+func (f *errorFactory) KubernetesClientSet() (*kubernetes.Clientset, error) {
+ return nil, f.err
+}
+
+func newTestClientWithDiscoveryError(t *testing.T, err error) *Client {
+ t.Helper()
+ c := newTestClient(t)
+ c.Factory.(*cmdtesting.TestFactory).Client = &fake.RESTClient{
+ NegotiatedSerializer: unstructuredSerializer,
+ Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
+ if req.URL.Path == "/version" {
+ return nil, err
+ }
+ resp, respErr := newResponse(http.StatusOK, &v1.Pod{})
+ return resp, respErr
+ }),
+ }
+ return c
+}
+
+func TestIsReachable(t *testing.T) {
+ const (
+ expectedUnreachableMsg = "kubernetes cluster unreachable"
+ )
+ tests := []struct {
+ name string
+ setupClient func(*testing.T) *Client
+ expectError bool
+ errorContains string
+ }{
+ {
+ name: "successful reachability test",
+ setupClient: func(t *testing.T) *Client {
+ t.Helper()
+ client := newTestClient(t)
+ client.kubeClient = k8sfake.NewSimpleClientset()
+ return client
+ },
+ expectError: false,
+ },
+ {
+ name: "client creation error with ErrEmptyConfig",
+ setupClient: func(t *testing.T) *Client {
+ t.Helper()
+ client := newTestClient(t)
+ client.Factory = &errorFactory{err: genericclioptions.ErrEmptyConfig}
+ return client
+ },
+ expectError: true,
+ errorContains: expectedUnreachableMsg,
+ },
+ {
+ name: "client creation error with general error",
+ setupClient: func(t *testing.T) *Client {
+ t.Helper()
+ client := newTestClient(t)
+ client.Factory = &errorFactory{err: errors.New("connection refused")}
+ return client
+ },
+ expectError: true,
+ errorContains: "kubernetes cluster unreachable: connection refused",
+ },
+ {
+ name: "discovery error with cluster unreachable",
+ setupClient: func(t *testing.T) *Client {
+ t.Helper()
+ return newTestClientWithDiscoveryError(t, http.ErrServerClosed)
+ },
+ expectError: true,
+ errorContains: expectedUnreachableMsg,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ client := tt.setupClient(t)
+ err := client.IsReachable()
+
+ if tt.expectError {
+ if err == nil {
+ t.Error("expected error but got nil")
+ return
+ }
+
+ if !strings.Contains(err.Error(), tt.errorContains) {
+ t.Errorf("expected error message to contain '%s', got: %v", tt.errorContains, err)
+ }
+
+ } else {
+ if err != nil {
+ t.Errorf("expected no error but got: %v", err)
+ }
+ }
+ })
+ }
+}
+
+func TestIsIncompatibleServerError(t *testing.T) {
+ testCases := map[string]struct {
+ Err error
+ Want bool
+ }{
+ "Unsupported media type": {
+ Err: &apierrors.StatusError{ErrStatus: metav1.Status{Code: http.StatusUnsupportedMediaType}},
+ Want: true,
+ },
+ "Not found error": {
+ Err: &apierrors.StatusError{ErrStatus: metav1.Status{Code: http.StatusNotFound}},
+ Want: false,
+ },
+ "Generic error": {
+ Err: fmt.Errorf("some generic error"),
+ Want: false,
+ },
+ }
+
+ for name, tc := range testCases {
+ t.Run(name, func(t *testing.T) {
+ if got := isIncompatibleServerError(tc.Err); got != tc.Want {
+ t.Errorf("isIncompatibleServerError() = %v, want %v", got, tc.Want)
+ }
+ })
+ }
+}
+
+func TestReplaceResource(t *testing.T) {
+ type testCase struct {
+ Pods v1.PodList
+ Callback func(t *testing.T, tc testCase, previous []RequestResponseAction, req *http.Request) (*http.Response, error)
+ ExpectedErrorContains string
+ }
+
+ testCases := map[string]testCase{
+ "normal": {
+ Pods: newPodList("whale"),
+ Callback: func(t *testing.T, tc testCase, previous []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ t.Helper()
+
+ assert.Equal(t, "/namespaces/default/pods/whale", req.URL.Path)
+ switch len(previous) {
+ case 0:
+ assert.Equal(t, "GET", req.Method)
+ case 1:
+ assert.Equal(t, "PUT", req.Method)
+ }
+
+ return newResponse(http.StatusOK, &tc.Pods.Items[0])
+ },
+ },
+ "conflict": {
+ Pods: newPodList("whale"),
+ Callback: func(t *testing.T, _ testCase, _ []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ t.Helper()
+
+ return &http.Response{
+ StatusCode: http.StatusConflict,
+ Request: req,
+ }, nil
+ },
+ ExpectedErrorContains: "failed to replace object: the server reported a conflict",
+ },
+ }
+
+ for name, tc := range testCases {
+ t.Run(name, func(t *testing.T) {
+
+ testFactory := cmdtesting.NewTestFactory()
+ t.Cleanup(testFactory.Cleanup)
+
+ client := NewRequestResponseLogClient(t, func(previous []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ t.Helper()
+
+ return tc.Callback(t, tc, previous, req)
+ })
+
+ testFactory.UnstructuredClient = &fake.RESTClient{
+ NegotiatedSerializer: unstructuredSerializer,
+ Client: fake.CreateHTTPClient(client.Do),
+ }
+
+ resourceList, err := buildResourceList(testFactory, v1.NamespaceDefault, FieldValidationDirectiveStrict, objBody(&tc.Pods), nil)
+ require.NoError(t, err)
+
+ require.Len(t, resourceList, 1)
+ info := resourceList[0]
+
+ err = replaceResource(info, FieldValidationDirectiveStrict)
+ if tc.ExpectedErrorContains != "" {
+ require.ErrorContains(t, err, tc.ExpectedErrorContains)
+ } else {
+ require.NoError(t, err)
+ require.NotNil(t, info.Object)
+ }
+ })
+ }
+}
+
+func TestPatchResourceClientSide(t *testing.T) {
+ type testCase struct {
+ OriginalPods v1.PodList
+ TargetPods v1.PodList
+ ThreeWayMergeForUnstructured bool
+ Callback func(t *testing.T, tc testCase, previous []RequestResponseAction, req *http.Request) (*http.Response, error)
+ ExpectedErrorContains string
+ }
+
+ testCases := map[string]testCase{
+ "normal": {
+ OriginalPods: newPodList("whale"),
+ TargetPods: func() v1.PodList {
+ pods := newPodList("whale")
+ pods.Items[0].Spec.Containers[0].Ports = []v1.ContainerPort{{Name: "https", ContainerPort: 443}}
+
+ return pods
+ }(),
+ ThreeWayMergeForUnstructured: false,
+ Callback: func(t *testing.T, tc testCase, previous []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ t.Helper()
+
+ assert.Equal(t, "/namespaces/default/pods/whale", req.URL.Path)
+ switch len(previous) {
+ case 0:
+ assert.Equal(t, "GET", req.Method)
+ return newResponse(http.StatusOK, &tc.OriginalPods.Items[0])
+ case 1:
+ assert.Equal(t, "PATCH", req.Method)
+ assert.Equal(t, "application/strategic-merge-patch+json", req.Header.Get("Content-Type"))
+ return newResponse(http.StatusOK, &tc.TargetPods.Items[0])
+ }
+
+ t.Fail()
+ return nil, nil
+ },
+ },
+ "three way merge for unstructured": {
+ OriginalPods: newPodList("whale"),
+ TargetPods: func() v1.PodList {
+ pods := newPodList("whale")
+ pods.Items[0].Spec.Containers[0].Ports = []v1.ContainerPort{{Name: "https", ContainerPort: 443}}
+
+ return pods
+ }(),
+ ThreeWayMergeForUnstructured: true,
+ Callback: func(t *testing.T, tc testCase, previous []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ t.Helper()
+
+ assert.Equal(t, "/namespaces/default/pods/whale", req.URL.Path)
+ switch len(previous) {
+ case 0:
+ assert.Equal(t, "GET", req.Method)
+ return newResponse(http.StatusOK, &tc.OriginalPods.Items[0])
+ case 1:
+ t.Logf("patcher: %+v", req.Header)
+ assert.Equal(t, "PATCH", req.Method)
+ assert.Equal(t, "application/strategic-merge-patch+json", req.Header.Get("Content-Type"))
+ return newResponse(http.StatusOK, &tc.TargetPods.Items[0])
+ }
+
+ t.Fail()
+ return nil, nil
+ },
+ },
+ "conflict": {
+ OriginalPods: newPodList("whale"),
+ TargetPods: func() v1.PodList {
+ pods := newPodList("whale")
+ pods.Items[0].Spec.Containers[0].Ports = []v1.ContainerPort{{Name: "https", ContainerPort: 443}}
+
+ return pods
+ }(),
+ Callback: func(t *testing.T, tc testCase, previous []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ t.Helper()
+
+ assert.Equal(t, "/namespaces/default/pods/whale", req.URL.Path)
+ switch len(previous) {
+ case 0:
+ assert.Equal(t, "GET", req.Method)
+ return newResponse(http.StatusOK, &tc.OriginalPods.Items[0])
+ case 1:
+ assert.Equal(t, "PATCH", req.Method)
+ return &http.Response{
+ StatusCode: http.StatusConflict,
+ Request: req,
+ }, nil
+ }
+
+ t.Fail()
+ return nil, nil
+
+ },
+ ExpectedErrorContains: "cannot patch \"whale\" with kind Pod: the server reported a conflict",
+ },
+ "no patch": {
+ OriginalPods: newPodList("whale"),
+ TargetPods: newPodList("whale"),
+ Callback: func(t *testing.T, tc testCase, previous []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ t.Helper()
+
+ assert.Equal(t, "/namespaces/default/pods/whale", req.URL.Path)
+ switch len(previous) {
+ case 0:
+ assert.Equal(t, "GET", req.Method)
+ return newResponse(http.StatusOK, &tc.OriginalPods.Items[0])
+ case 1:
+ assert.Equal(t, "GET", req.Method)
+ return newResponse(http.StatusOK, &tc.TargetPods.Items[0])
+ }
+
+ t.Fail()
+ return nil, nil // newResponse(http.StatusOK, &tc.TargetPods.Items[0])
+
+ },
+ },
+ }
+
+ for name, tc := range testCases {
+ t.Run(name, func(t *testing.T) {
+
+ testFactory := cmdtesting.NewTestFactory()
+ t.Cleanup(testFactory.Cleanup)
+
+ client := NewRequestResponseLogClient(t, func(previous []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ return tc.Callback(t, tc, previous, req)
+ })
+
+ testFactory.UnstructuredClient = &fake.RESTClient{
+ NegotiatedSerializer: unstructuredSerializer,
+ Client: fake.CreateHTTPClient(client.Do),
+ }
+
+ resourceListOriginal, err := buildResourceList(testFactory, v1.NamespaceDefault, FieldValidationDirectiveStrict, objBody(&tc.OriginalPods), nil)
+ require.NoError(t, err)
+ require.Len(t, resourceListOriginal, 1)
+
+ resourceListTarget, err := buildResourceList(testFactory, v1.NamespaceDefault, FieldValidationDirectiveStrict, objBody(&tc.TargetPods), nil)
+ require.NoError(t, err)
+ require.Len(t, resourceListTarget, 1)
+
+ original := resourceListOriginal[0]
+ target := resourceListTarget[0]
+
+ err = patchResourceClientSide(original.Object, target, tc.ThreeWayMergeForUnstructured)
+ if tc.ExpectedErrorContains != "" {
+ require.ErrorContains(t, err, tc.ExpectedErrorContains)
+ } else {
+ require.NoError(t, err)
+ require.NotNil(t, target.Object)
+ }
+ })
+ }
+}
+
+func TestPatchResourceServerSide(t *testing.T) {
+ type testCase struct {
+ Pods v1.PodList
+ DryRun bool
+ ForceConflicts bool
+ FieldValidationDirective FieldValidationDirective
+ Callback func(t *testing.T, tc testCase, previous []RequestResponseAction, req *http.Request) (*http.Response, error)
+ ExpectedErrorContains string
+ }
+
+ testCases := map[string]testCase{
+ "normal": {
+ Pods: newPodList("whale"),
+ DryRun: false,
+ ForceConflicts: false,
+ FieldValidationDirective: FieldValidationDirectiveStrict,
+ Callback: func(t *testing.T, tc testCase, _ []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ t.Helper()
+
+ assert.Equal(t, "PATCH", req.Method)
+ assert.Equal(t, "application/apply-patch+yaml", req.Header.Get("Content-Type"))
+ assert.Equal(t, "/namespaces/default/pods/whale", req.URL.Path)
+ assert.Equal(t, "false", req.URL.Query().Get("force"))
+ assert.Equal(t, "Strict", req.URL.Query().Get("fieldValidation"))
+
+ return newResponse(http.StatusOK, &tc.Pods.Items[0])
+ },
+ },
+ "dry run": {
+ Pods: newPodList("whale"),
+ DryRun: true,
+ ForceConflicts: false,
+ FieldValidationDirective: FieldValidationDirectiveStrict,
+ Callback: func(t *testing.T, tc testCase, _ []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ t.Helper()
+
+ assert.Equal(t, "PATCH", req.Method)
+ assert.Equal(t, "application/apply-patch+yaml", req.Header.Get("Content-Type"))
+ assert.Equal(t, "/namespaces/default/pods/whale", req.URL.Path)
+ assert.Equal(t, "All", req.URL.Query().Get("dryRun"))
+ assert.Equal(t, "false", req.URL.Query().Get("force"))
+ assert.Equal(t, "Strict", req.URL.Query().Get("fieldValidation"))
+
+ return newResponse(http.StatusOK, &tc.Pods.Items[0])
+ },
+ },
+ "force conflicts": {
+ Pods: newPodList("whale"),
+ DryRun: false,
+ ForceConflicts: true,
+ FieldValidationDirective: FieldValidationDirectiveStrict,
+ Callback: func(t *testing.T, tc testCase, _ []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ t.Helper()
+
+ assert.Equal(t, "PATCH", req.Method)
+ assert.Equal(t, "application/apply-patch+yaml", req.Header.Get("Content-Type"))
+ assert.Equal(t, "/namespaces/default/pods/whale", req.URL.Path)
+ assert.Equal(t, "true", req.URL.Query().Get("force"))
+ assert.Equal(t, "Strict", req.URL.Query().Get("fieldValidation"))
+
+ return newResponse(http.StatusOK, &tc.Pods.Items[0])
+ },
+ },
+ "dry run + force conflicts": {
+ Pods: newPodList("whale"),
+ DryRun: true,
+ ForceConflicts: true,
+ FieldValidationDirective: FieldValidationDirectiveStrict,
+ Callback: func(t *testing.T, tc testCase, _ []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ t.Helper()
+
+ assert.Equal(t, "PATCH", req.Method)
+ assert.Equal(t, "application/apply-patch+yaml", req.Header.Get("Content-Type"))
+ assert.Equal(t, "/namespaces/default/pods/whale", req.URL.Path)
+ assert.Equal(t, "All", req.URL.Query().Get("dryRun"))
+ assert.Equal(t, "true", req.URL.Query().Get("force"))
+ assert.Equal(t, "Strict", req.URL.Query().Get("fieldValidation"))
+
+ return newResponse(http.StatusOK, &tc.Pods.Items[0])
+ },
+ },
+ "field validation ignore": {
+ Pods: newPodList("whale"),
+ DryRun: false,
+ ForceConflicts: false,
+ FieldValidationDirective: FieldValidationDirectiveIgnore,
+ Callback: func(t *testing.T, tc testCase, _ []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ t.Helper()
+
+ assert.Equal(t, "PATCH", req.Method)
+ assert.Equal(t, "application/apply-patch+yaml", req.Header.Get("Content-Type"))
+ assert.Equal(t, "/namespaces/default/pods/whale", req.URL.Path)
+ assert.Equal(t, "false", req.URL.Query().Get("force"))
+ assert.Equal(t, "Ignore", req.URL.Query().Get("fieldValidation"))
+
+ return newResponse(http.StatusOK, &tc.Pods.Items[0])
+ },
+ },
+ "incompatible server": {
+ Pods: newPodList("whale"),
+ DryRun: false,
+ ForceConflicts: false,
+ FieldValidationDirective: FieldValidationDirectiveStrict,
+ Callback: func(t *testing.T, _ testCase, _ []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ t.Helper()
+
+ return &http.Response{
+ StatusCode: http.StatusUnsupportedMediaType,
+ Request: req,
+ }, nil
+ },
+ ExpectedErrorContains: "server-side apply not available on the server:",
+ },
+ "conflict": {
+ Pods: newPodList("whale"),
+ DryRun: false,
+ ForceConflicts: false,
+ FieldValidationDirective: FieldValidationDirectiveStrict,
+ Callback: func(t *testing.T, _ testCase, _ []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ t.Helper()
+
+ return &http.Response{
+ StatusCode: http.StatusConflict,
+ Request: req,
+ }, nil
+ },
+ ExpectedErrorContains: "the server reported a conflict",
+ },
+ }
+
+ for name, tc := range testCases {
+ t.Run(name, func(t *testing.T) {
+
+ testFactory := cmdtesting.NewTestFactory()
+ t.Cleanup(testFactory.Cleanup)
+
+ client := NewRequestResponseLogClient(t, func(previous []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ return tc.Callback(t, tc, previous, req)
+ })
+
+ testFactory.UnstructuredClient = &fake.RESTClient{
+ NegotiatedSerializer: unstructuredSerializer,
+ Client: fake.CreateHTTPClient(client.Do),
+ }
+
+ resourceList, err := buildResourceList(testFactory, v1.NamespaceDefault, tc.FieldValidationDirective, objBody(&tc.Pods), nil)
+ require.NoError(t, err)
+
+ require.Len(t, resourceList, 1)
+ info := resourceList[0]
+
+ err = patchResourceServerSide(info, tc.DryRun, tc.ForceConflicts, tc.FieldValidationDirective)
+ if tc.ExpectedErrorContains != "" {
+ require.ErrorContains(t, err, tc.ExpectedErrorContains)
+ } else {
+ require.NoError(t, err)
+ require.NotNil(t, info.Object)
+ }
+ })
+ }
+}
+
+func TestDetermineFieldValidationDirective(t *testing.T) {
+
+ assert.Equal(t, FieldValidationDirectiveIgnore, determineFieldValidationDirective(false))
+ assert.Equal(t, FieldValidationDirectiveStrict, determineFieldValidationDirective(true))
+}
diff --git a/pkg/kube/factory.go b/pkg/kube/factory.go
index 9dae398d7..1d237c307 100644
--- a/pkg/kube/factory.go
+++ b/pkg/kube/factory.go
@@ -20,6 +20,7 @@ import (
"k8s.io/cli-runtime/pkg/resource"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/kubectl/pkg/validation"
)
@@ -33,6 +34,9 @@ import (
// Helm does not need are not impacted or exposed. This minimizes the impact of Kubernetes changes
// being exposed.
type Factory interface {
+ // ToRESTConfig returns restconfig
+ ToRESTConfig() (*rest.Config, error)
+
// ToRawKubeConfigLoader return kubeconfig loader as-is
ToRawKubeConfigLoader() clientcmd.ClientConfig
diff --git a/pkg/kube/fake/fake.go b/pkg/kube/fake/failing_kube_client.go
similarity index 64%
rename from pkg/kube/fake/fake.go
rename to pkg/kube/fake/failing_kube_client.go
index ceca3c113..154419ebf 100644
--- a/pkg/kube/fake/fake.go
+++ b/pkg/kube/fake/failing_kube_client.go
@@ -35,24 +35,37 @@ type FailingKubeClient struct {
PrintingKubeClient
CreateError error
GetError error
- WaitError error
DeleteError error
DeleteWithPropagationError error
- WatchUntilReadyError error
UpdateError error
BuildError error
BuildTableError error
+ ConnectionError error
BuildDummy bool
+ DummyResources kube.ResourceList
BuildUnstructuredError error
+ WaitError error
+ WaitForDeleteError error
+ WatchUntilReadyError error
WaitDuration time.Duration
}
+// FailingKubeWaiter implements kube.Waiter for testing purposes.
+// It also has additional errors you can set to fail different functions, otherwise it delegates all its calls to `PrintingKubeWaiter`
+type FailingKubeWaiter struct {
+ *PrintingKubeWaiter
+ waitError error
+ waitForDeleteError error
+ watchUntilReadyError error
+ waitDuration time.Duration
+}
+
// Create returns the configured error if set or prints
-func (f *FailingKubeClient) Create(resources kube.ResourceList) (*kube.Result, error) {
+func (f *FailingKubeClient) Create(resources kube.ResourceList, options ...kube.ClientCreateOption) (*kube.Result, error) {
if f.CreateError != nil {
return nil, f.CreateError
}
- return f.PrintingKubeClient.Create(resources)
+ return f.PrintingKubeClient.Create(resources, options...)
}
// Get returns the configured error if set or prints
@@ -64,28 +77,28 @@ func (f *FailingKubeClient) Get(resources kube.ResourceList, related bool) (map[
}
// Waits the amount of time defined on f.WaitDuration, then returns the configured error if set or prints.
-func (f *FailingKubeClient) Wait(resources kube.ResourceList, d time.Duration) error {
- time.Sleep(f.WaitDuration)
- if f.WaitError != nil {
- return f.WaitError
+func (f *FailingKubeWaiter) Wait(resources kube.ResourceList, d time.Duration) error {
+ time.Sleep(f.waitDuration)
+ if f.waitError != nil {
+ return f.waitError
}
- return f.PrintingKubeClient.Wait(resources, d)
+ return f.PrintingKubeWaiter.Wait(resources, d)
}
// WaitWithJobs returns the configured error if set or prints
-func (f *FailingKubeClient) WaitWithJobs(resources kube.ResourceList, d time.Duration) error {
- if f.WaitError != nil {
- return f.WaitError
+func (f *FailingKubeWaiter) WaitWithJobs(resources kube.ResourceList, d time.Duration) error {
+ if f.waitError != nil {
+ return f.waitError
}
- return f.PrintingKubeClient.WaitWithJobs(resources, d)
+ return f.PrintingKubeWaiter.WaitWithJobs(resources, d)
}
// WaitForDelete returns the configured error if set or prints
-func (f *FailingKubeClient) WaitForDelete(resources kube.ResourceList, d time.Duration) error {
- if f.WaitError != nil {
- return f.WaitError
+func (f *FailingKubeWaiter) WaitForDelete(resources kube.ResourceList, d time.Duration) error {
+ if f.waitForDeleteError != nil {
+ return f.waitForDeleteError
}
- return f.PrintingKubeClient.WaitForDelete(resources, d)
+ return f.PrintingKubeWaiter.WaitForDelete(resources, d)
}
// Delete returns the configured error if set or prints
@@ -97,19 +110,19 @@ func (f *FailingKubeClient) Delete(resources kube.ResourceList) (*kube.Result, [
}
// WatchUntilReady returns the configured error if set or prints
-func (f *FailingKubeClient) WatchUntilReady(resources kube.ResourceList, d time.Duration) error {
- if f.WatchUntilReadyError != nil {
- return f.WatchUntilReadyError
+func (f *FailingKubeWaiter) WatchUntilReady(resources kube.ResourceList, d time.Duration) error {
+ if f.watchUntilReadyError != nil {
+ return f.watchUntilReadyError
}
- return f.PrintingKubeClient.WatchUntilReady(resources, d)
+ return f.PrintingKubeWaiter.WatchUntilReady(resources, d)
}
// Update returns the configured error if set or prints
-func (f *FailingKubeClient) Update(r, modified kube.ResourceList, ignoreMe bool) (*kube.Result, error) {
+func (f *FailingKubeClient) Update(r, modified kube.ResourceList, options ...kube.ClientUpdateOption) (*kube.Result, error) {
if f.UpdateError != nil {
return &kube.Result{}, f.UpdateError
}
- return f.PrintingKubeClient.Update(r, modified, ignoreMe)
+ return f.PrintingKubeClient.Update(r, modified, options...)
}
// Build returns the configured error if set or prints
@@ -117,6 +130,9 @@ func (f *FailingKubeClient) Build(r io.Reader, _ bool) (kube.ResourceList, error
if f.BuildError != nil {
return []*resource.Info{}, f.BuildError
}
+ if f.DummyResources != nil {
+ return f.DummyResources, nil
+ }
if f.BuildDummy {
return createDummyResourceList(), nil
}
@@ -139,6 +155,25 @@ func (f *FailingKubeClient) DeleteWithPropagationPolicy(resources kube.ResourceL
return f.PrintingKubeClient.DeleteWithPropagationPolicy(resources, policy)
}
+func (f *FailingKubeClient) GetWaiter(ws kube.WaitStrategy) (kube.Waiter, error) {
+ waiter, _ := f.PrintingKubeClient.GetWaiter(ws)
+ printingKubeWaiter, _ := waiter.(*PrintingKubeWaiter)
+ return &FailingKubeWaiter{
+ PrintingKubeWaiter: printingKubeWaiter,
+ waitError: f.WaitError,
+ waitForDeleteError: f.WaitForDeleteError,
+ watchUntilReadyError: f.WatchUntilReadyError,
+ waitDuration: f.WaitDuration,
+ }, nil
+}
+
+func (f *FailingKubeClient) IsReachable() error {
+ if f.ConnectionError != nil {
+ return f.ConnectionError
+ }
+ return f.PrintingKubeClient.IsReachable()
+}
+
func createDummyResourceList() kube.ResourceList {
var resInfo resource.Info
resInfo.Name = "dummyName"
@@ -146,5 +181,4 @@ func createDummyResourceList() kube.ResourceList {
var resourceList kube.ResourceList
resourceList.Append(&resInfo)
return resourceList
-
}
diff --git a/pkg/kube/fake/printer.go b/pkg/kube/fake/printer.go
index dcce9a3be..16c93615a 100644
--- a/pkg/kube/fake/printer.go
+++ b/pkg/kube/fake/printer.go
@@ -37,13 +37,19 @@ type PrintingKubeClient struct {
LogOutput io.Writer
}
+// PrintingKubeWaiter implements kube.Waiter, but simply prints the reader to the given output
+type PrintingKubeWaiter struct {
+ Out io.Writer
+ LogOutput io.Writer
+}
+
// IsReachable checks if the cluster is reachable
func (p *PrintingKubeClient) IsReachable() error {
return nil
}
// Create prints the values of what would be created with a real KubeClient.
-func (p *PrintingKubeClient) Create(resources kube.ResourceList) (*kube.Result, error) {
+func (p *PrintingKubeClient) Create(resources kube.ResourceList, _ ...kube.ClientCreateOption) (*kube.Result, error) {
_, err := io.Copy(p.Out, bufferize(resources))
if err != nil {
return nil, err
@@ -59,17 +65,23 @@ func (p *PrintingKubeClient) Get(resources kube.ResourceList, _ bool) (map[strin
return make(map[string][]runtime.Object), nil
}
-func (p *PrintingKubeClient) Wait(resources kube.ResourceList, _ time.Duration) error {
+func (p *PrintingKubeWaiter) Wait(resources kube.ResourceList, _ time.Duration) error {
+ _, err := io.Copy(p.Out, bufferize(resources))
+ return err
+}
+
+func (p *PrintingKubeWaiter) WaitWithJobs(resources kube.ResourceList, _ time.Duration) error {
_, err := io.Copy(p.Out, bufferize(resources))
return err
}
-func (p *PrintingKubeClient) WaitWithJobs(resources kube.ResourceList, _ time.Duration) error {
+func (p *PrintingKubeWaiter) WaitForDelete(resources kube.ResourceList, _ time.Duration) error {
_, err := io.Copy(p.Out, bufferize(resources))
return err
}
-func (p *PrintingKubeClient) WaitForDelete(resources kube.ResourceList, _ time.Duration) error {
+// WatchUntilReady implements KubeClient WatchUntilReady.
+func (p *PrintingKubeWaiter) WatchUntilReady(resources kube.ResourceList, _ time.Duration) error {
_, err := io.Copy(p.Out, bufferize(resources))
return err
}
@@ -85,14 +97,8 @@ func (p *PrintingKubeClient) Delete(resources kube.ResourceList) (*kube.Result,
return &kube.Result{Deleted: resources}, nil
}
-// WatchUntilReady implements KubeClient WatchUntilReady.
-func (p *PrintingKubeClient) WatchUntilReady(resources kube.ResourceList, _ time.Duration) error {
- _, err := io.Copy(p.Out, bufferize(resources))
- return err
-}
-
// Update implements KubeClient Update.
-func (p *PrintingKubeClient) Update(_, modified kube.ResourceList, _ bool) (*kube.Result, error) {
+func (p *PrintingKubeClient) Update(_, modified kube.ResourceList, _ ...kube.ClientUpdateOption) (*kube.Result, error) {
_, err := io.Copy(p.Out, bufferize(modified))
if err != nil {
return nil, err
@@ -140,6 +146,10 @@ func (p *PrintingKubeClient) DeleteWithPropagationPolicy(resources kube.Resource
return &kube.Result{Deleted: resources}, nil
}
+func (p *PrintingKubeClient) GetWaiter(_ kube.WaitStrategy) (kube.Waiter, error) {
+ return &PrintingKubeWaiter{Out: p.Out, LogOutput: p.LogOutput}, nil
+}
+
func bufferize(resources kube.ResourceList) io.Reader {
var builder strings.Builder
for _, info := range resources {
diff --git a/pkg/kube/interface.go b/pkg/kube/interface.go
index c9776cacd..7339ae0ff 100644
--- a/pkg/kube/interface.go
+++ b/pkg/kube/interface.go
@@ -30,31 +30,14 @@ import (
// A KubernetesClient must be concurrency safe.
type Interface interface {
// Create creates one or more resources.
- Create(resources ResourceList) (*Result, error)
-
- // Wait waits up to the given timeout for the specified resources to be ready.
- Wait(resources ResourceList, timeout time.Duration) error
-
- // WaitWithJobs wait up to the given timeout for the specified resources to be ready, including jobs.
- WaitWithJobs(resources ResourceList, timeout time.Duration) error
+ Create(resources ResourceList, options ...ClientCreateOption) (*Result, error)
// Delete destroys one or more resources.
Delete(resources ResourceList) (*Result, []error)
- // WatchUntilReady watches the resources given and waits until it is ready.
- //
- // This method is mainly for hook implementations. It watches for a resource to
- // hit a particular milestone. The milestone depends on the Kind.
- //
- // For Jobs, "ready" means the Job ran to completion (exited without error).
- // For Pods, "ready" means the Pod phase is marked "succeeded".
- // For all other kinds, it means the kind was created or modified without
- // error.
- WatchUntilReady(resources ResourceList, timeout time.Duration) error
-
// Update updates one or more resources or creates the resource
// if it doesn't exist.
- Update(original, target ResourceList, force bool) (*Result, error)
+ Update(original, target ResourceList, options ...ClientUpdateOption) (*Result, error)
// Build creates a resource list from a Reader.
//
@@ -63,17 +46,34 @@ type Interface interface {
//
// Validates against OpenAPI schema if validate is true.
Build(reader io.Reader, validate bool) (ResourceList, error)
-
// IsReachable checks whether the client is able to connect to the cluster.
IsReachable() error
+
+ // Get Waiter gets the Kube.Waiter
+ GetWaiter(ws WaitStrategy) (Waiter, error)
}
-// InterfaceExt was introduced to avoid breaking backwards compatibility for Interface implementers.
-//
-// TODO Helm 4: Remove InterfaceExt and integrate its method(s) into the Interface.
-type InterfaceExt interface {
+// Waiter defines methods related to waiting for resource states.
+type Waiter interface {
+ // Wait waits up to the given timeout for the specified resources to be ready.
+ Wait(resources ResourceList, timeout time.Duration) error
+
+ // WaitWithJobs wait up to the given timeout for the specified resources to be ready, including jobs.
+ WaitWithJobs(resources ResourceList, timeout time.Duration) error
+
// WaitForDelete wait up to the given timeout for the specified resources to be deleted.
WaitForDelete(resources ResourceList, timeout time.Duration) error
+
+ // WatchUntilReady watches the resources given and waits until it is ready.
+ //
+ // This method is mainly for hook implementations. It watches for a resource to
+ // hit a particular milestone. The milestone depends on the Kind.
+ //
+ // For Jobs, "ready" means the Job ran to completion (exited without error).
+ // For Pods, "ready" means the Pod phase is marked "succeeded".
+ // For all other kinds, it means the kind was created or modified without
+ // error.
+ WatchUntilReady(resources ResourceList, timeout time.Duration) error
}
// InterfaceLogs was introduced to avoid breaking backwards compatibility for Interface implementers.
@@ -118,7 +118,6 @@ type InterfaceResources interface {
}
var _ Interface = (*Client)(nil)
-var _ InterfaceExt = (*Client)(nil)
var _ InterfaceLogs = (*Client)(nil)
var _ InterfaceDeletionPropagation = (*Client)(nil)
var _ InterfaceResources = (*Client)(nil)
diff --git a/pkg/kube/ready.go b/pkg/kube/ready.go
index dd5869e6a..7a06c72f9 100644
--- a/pkg/kube/ready.go
+++ b/pkg/kube/ready.go
@@ -19,6 +19,7 @@ package kube // import "helm.sh/helm/v4/pkg/kube"
import (
"context"
"fmt"
+ "log/slog"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
@@ -57,13 +58,9 @@ func CheckJobs(checkJobs bool) ReadyCheckerOption {
// NewReadyChecker creates a new checker. Passed ReadyCheckerOptions can
// be used to override defaults.
-func NewReadyChecker(cl kubernetes.Interface, log func(string, ...interface{}), opts ...ReadyCheckerOption) ReadyChecker {
+func NewReadyChecker(cl kubernetes.Interface, opts ...ReadyCheckerOption) ReadyChecker {
c := ReadyChecker{
client: cl,
- log: log,
- }
- if c.log == nil {
- c.log = nopLogger
}
for _, opt := range opts {
opt(&c)
@@ -74,7 +71,6 @@ func NewReadyChecker(cl kubernetes.Interface, log func(string, ...interface{}),
// ReadyChecker is a type that can check core Kubernetes types for readiness.
type ReadyChecker struct {
client kubernetes.Interface
- log func(string, ...interface{})
checkJobs bool
pausedAsReady bool
}
@@ -230,20 +226,21 @@ func (c *ReadyChecker) isPodReady(pod *corev1.Pod) bool {
return true
}
}
- c.log("Pod is not ready: %s/%s", pod.GetNamespace(), pod.GetName())
+ slog.Debug("Pod is not ready", "namespace", pod.GetNamespace(), "name", pod.GetName())
return false
}
func (c *ReadyChecker) jobReady(job *batchv1.Job) (bool, error) {
if job.Status.Failed > *job.Spec.BackoffLimit {
- c.log("Job is failed: %s/%s", job.GetNamespace(), job.GetName())
+ slog.Debug("Job is failed", "namespace", job.GetNamespace(), "name", job.GetName())
// If a job is failed, it can't recover, so throw an error
return false, fmt.Errorf("job is failed: %s/%s", job.GetNamespace(), job.GetName())
}
if job.Spec.Completions != nil && job.Status.Succeeded < *job.Spec.Completions {
- c.log("Job is not completed: %s/%s", job.GetNamespace(), job.GetName())
+ slog.Debug("Job is not completed", "namespace", job.GetNamespace(), "name", job.GetName())
return false, nil
}
+ slog.Debug("Job is completed", "namespace", job.GetNamespace(), "name", job.GetName())
return true, nil
}
@@ -255,7 +252,7 @@ func (c *ReadyChecker) serviceReady(s *corev1.Service) bool {
// Ensure that the service cluster IP is not empty
if s.Spec.ClusterIP == "" {
- c.log("Service does not have cluster IP address: %s/%s", s.GetNamespace(), s.GetName())
+ slog.Debug("Service does not have cluster IP address", "namespace", s.GetNamespace(), "name", s.GetName())
return false
}
@@ -263,24 +260,25 @@ func (c *ReadyChecker) serviceReady(s *corev1.Service) bool {
if s.Spec.Type == corev1.ServiceTypeLoadBalancer {
// do not wait when at least 1 external IP is set
if len(s.Spec.ExternalIPs) > 0 {
- c.log("Service %s/%s has external IP addresses (%v), marking as ready", s.GetNamespace(), s.GetName(), s.Spec.ExternalIPs)
+ slog.Debug("Service has external IP addresses", "namespace", s.GetNamespace(), "name", s.GetName(), "externalIPs", s.Spec.ExternalIPs)
return true
}
if s.Status.LoadBalancer.Ingress == nil {
- c.log("Service does not have load balancer ingress IP address: %s/%s", s.GetNamespace(), s.GetName())
+ slog.Debug("Service does not have load balancer ingress IP address", "namespace", s.GetNamespace(), "name", s.GetName())
return false
}
}
-
+ slog.Debug("Service is ready", "namespace", s.GetNamespace(), "name", s.GetName(), "clusterIP", s.Spec.ClusterIP, "externalIPs", s.Spec.ExternalIPs)
return true
}
func (c *ReadyChecker) volumeReady(v *corev1.PersistentVolumeClaim) bool {
if v.Status.Phase != corev1.ClaimBound {
- c.log("PersistentVolumeClaim is not bound: %s/%s", v.GetNamespace(), v.GetName())
+ slog.Debug("PersistentVolumeClaim is not bound", "namespace", v.GetNamespace(), "name", v.GetName())
return false
}
+ slog.Debug("PersistentVolumeClaim is bound", "namespace", v.GetNamespace(), "name", v.GetName(), "phase", v.Status.Phase)
return true
}
@@ -290,23 +288,24 @@ func (c *ReadyChecker) deploymentReady(rs *appsv1.ReplicaSet, dep *appsv1.Deploy
return false
}
// Verify the generation observed by the deployment controller matches the spec generation
- if dep.Status.ObservedGeneration != dep.ObjectMeta.Generation {
- c.log("Deployment is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", dep.Namespace, dep.Name, dep.Status.ObservedGeneration, dep.ObjectMeta.Generation)
+ if dep.Status.ObservedGeneration != dep.Generation {
+ slog.Debug("Deployment is not ready, observedGeneration does not match spec generation", "namespace", dep.GetNamespace(), "name", dep.GetName(), "actualGeneration", dep.Status.ObservedGeneration, "expectedGeneration", dep.Generation)
return false
}
expectedReady := *dep.Spec.Replicas - deploymentutil.MaxUnavailable(*dep)
- if !(rs.Status.ReadyReplicas >= expectedReady) {
- c.log("Deployment is not ready: %s/%s. %d out of %d expected pods are ready", dep.Namespace, dep.Name, rs.Status.ReadyReplicas, expectedReady)
+ if rs.Status.ReadyReplicas < expectedReady {
+ slog.Debug("Deployment does not have enough pods ready", "namespace", dep.GetNamespace(), "name", dep.GetName(), "readyPods", rs.Status.ReadyReplicas, "totalPods", expectedReady)
return false
}
+ slog.Debug("Deployment is ready", "namespace", dep.GetNamespace(), "name", dep.GetName(), "readyPods", rs.Status.ReadyReplicas, "totalPods", expectedReady)
return true
}
func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool {
// Verify the generation observed by the daemonSet controller matches the spec generation
- if ds.Status.ObservedGeneration != ds.ObjectMeta.Generation {
- c.log("DaemonSet is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", ds.Namespace, ds.Name, ds.Status.ObservedGeneration, ds.ObjectMeta.Generation)
+ if ds.Status.ObservedGeneration != ds.Generation {
+ slog.Debug("DaemonSet is not ready, observedGeneration does not match spec generation", "namespace", ds.GetNamespace(), "name", ds.GetName(), "observedGeneration", ds.Status.ObservedGeneration, "expectedGeneration", ds.Generation)
return false
}
@@ -317,7 +316,7 @@ func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool {
// Make sure all the updated pods have been scheduled
if ds.Status.UpdatedNumberScheduled != ds.Status.DesiredNumberScheduled {
- c.log("DaemonSet is not ready: %s/%s. %d out of %d expected pods have been scheduled", ds.Namespace, ds.Name, ds.Status.UpdatedNumberScheduled, ds.Status.DesiredNumberScheduled)
+ slog.Debug("DaemonSet does not have enough Pods scheduled", "namespace", ds.GetNamespace(), "name", ds.GetName(), "scheduledPods", ds.Status.UpdatedNumberScheduled, "totalPods", ds.Status.DesiredNumberScheduled)
return false
}
maxUnavailable, err := intstr.GetScaledValueFromIntOrPercent(ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable, int(ds.Status.DesiredNumberScheduled), true)
@@ -329,10 +328,11 @@ func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool {
}
expectedReady := int(ds.Status.DesiredNumberScheduled) - maxUnavailable
- if !(int(ds.Status.NumberReady) >= expectedReady) {
- c.log("DaemonSet is not ready: %s/%s. %d out of %d expected pods are ready", ds.Namespace, ds.Name, ds.Status.NumberReady, expectedReady)
+ if int(ds.Status.NumberReady) < expectedReady {
+ slog.Debug("DaemonSet does not have enough Pods ready", "namespace", ds.GetNamespace(), "name", ds.GetName(), "readyPods", ds.Status.NumberReady, "totalPods", expectedReady)
return false
}
+ slog.Debug("DaemonSet is ready", "namespace", ds.GetNamespace(), "name", ds.GetName(), "readyPods", ds.Status.NumberReady, "totalPods", expectedReady)
return true
}
@@ -381,14 +381,14 @@ func (c *ReadyChecker) crdReady(crd apiextv1.CustomResourceDefinition) bool {
func (c *ReadyChecker) statefulSetReady(sts *appsv1.StatefulSet) bool {
// Verify the generation observed by the statefulSet controller matches the spec generation
- if sts.Status.ObservedGeneration != sts.ObjectMeta.Generation {
- c.log("StatefulSet is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", sts.Namespace, sts.Name, sts.Status.ObservedGeneration, sts.ObjectMeta.Generation)
+ if sts.Status.ObservedGeneration != sts.Generation {
+ slog.Debug("StatefulSet is not ready, observedGeneration doest not match spec generation", "namespace", sts.GetNamespace(), "name", sts.GetName(), "actualGeneration", sts.Status.ObservedGeneration, "expectedGeneration", sts.Generation)
return false
}
// If the update strategy is not a rolling update, there will be nothing to wait for
if sts.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType {
- c.log("StatefulSet skipped ready check: %s/%s. updateStrategy is %v", sts.Namespace, sts.Name, sts.Spec.UpdateStrategy.Type)
+ slog.Debug("StatefulSet skipped ready check", "namespace", sts.GetNamespace(), "name", sts.GetName(), "updateStrategy", sts.Spec.UpdateStrategy.Type)
return true
}
@@ -414,30 +414,29 @@ func (c *ReadyChecker) statefulSetReady(sts *appsv1.StatefulSet) bool {
// Make sure all the updated pods have been scheduled
if int(sts.Status.UpdatedReplicas) < expectedReplicas {
- c.log("StatefulSet is not ready: %s/%s. %d out of %d expected pods have been scheduled", sts.Namespace, sts.Name, sts.Status.UpdatedReplicas, expectedReplicas)
+ slog.Debug("StatefulSet does not have enough Pods scheduled", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.UpdatedReplicas, "totalPods", expectedReplicas)
return false
}
if int(sts.Status.ReadyReplicas) != replicas {
- c.log("StatefulSet is not ready: %s/%s. %d out of %d expected pods are ready", sts.Namespace, sts.Name, sts.Status.ReadyReplicas, replicas)
+ slog.Debug("StatefulSet does not have enough Pods ready", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.ReadyReplicas, "totalPods", replicas)
return false
}
// This check only makes sense when all partitions are being upgraded otherwise during a
// partitioned rolling upgrade, this condition will never evaluate to true, leading to
// error.
if partition == 0 && sts.Status.CurrentRevision != sts.Status.UpdateRevision {
- c.log("StatefulSet is not ready: %s/%s. currentRevision %s does not yet match updateRevision %s", sts.Namespace, sts.Name, sts.Status.CurrentRevision, sts.Status.UpdateRevision)
+ slog.Debug("StatefulSet is not ready, currentRevision does not match updateRevision", "namespace", sts.GetNamespace(), "name", sts.GetName(), "currentRevision", sts.Status.CurrentRevision, "updateRevision", sts.Status.UpdateRevision)
return false
}
-
- c.log("StatefulSet is ready: %s/%s. %d out of %d expected pods are ready", sts.Namespace, sts.Name, sts.Status.ReadyReplicas, replicas)
+ slog.Debug("StatefulSet is ready", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.ReadyReplicas, "totalPods", replicas)
return true
}
func (c *ReadyChecker) replicationControllerReady(rc *corev1.ReplicationController) bool {
// Verify the generation observed by the replicationController controller matches the spec generation
- if rc.Status.ObservedGeneration != rc.ObjectMeta.Generation {
- c.log("ReplicationController is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", rc.Namespace, rc.Name, rc.Status.ObservedGeneration, rc.ObjectMeta.Generation)
+ if rc.Status.ObservedGeneration != rc.Generation {
+ slog.Debug("ReplicationController is not ready, observedGeneration doest not match spec generation", "namespace", rc.GetNamespace(), "name", rc.GetName(), "actualGeneration", rc.Status.ObservedGeneration, "expectedGeneration", rc.Generation)
return false
}
return true
@@ -445,8 +444,8 @@ func (c *ReadyChecker) replicationControllerReady(rc *corev1.ReplicationControll
func (c *ReadyChecker) replicaSetReady(rs *appsv1.ReplicaSet) bool {
// Verify the generation observed by the replicaSet controller matches the spec generation
- if rs.Status.ObservedGeneration != rs.ObjectMeta.Generation {
- c.log("ReplicaSet is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", rs.Namespace, rs.Name, rs.Status.ObservedGeneration, rs.ObjectMeta.Generation)
+ if rs.Status.ObservedGeneration != rs.Generation {
+ slog.Debug("ReplicaSet is not ready, observedGeneration doest not match spec generation", "namespace", rs.GetNamespace(), "name", rs.GetName(), "actualGeneration", rs.Status.ObservedGeneration, "expectedGeneration", rs.Generation)
return false
}
return true
diff --git a/pkg/kube/ready_test.go b/pkg/kube/ready_test.go
index a8ba05287..db0d02cbe 100644
--- a/pkg/kube/ready_test.go
+++ b/pkg/kube/ready_test.go
@@ -37,7 +37,6 @@ const defaultNamespace = metav1.NamespaceDefault
func Test_ReadyChecker_IsReady_Pod(t *testing.T) {
type fields struct {
client kubernetes.Interface
- log func(string, ...interface{})
checkJobs bool
pausedAsReady bool
}
@@ -56,13 +55,12 @@ func Test_ReadyChecker_IsReady_Pod(t *testing.T) {
{
name: "IsReady Pod",
fields: fields{
- client: fake.NewSimpleClientset(),
- log: func(string, ...interface{}) {},
+ client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
args: args{
- ctx: context.TODO(),
+ ctx: t.Context(),
resource: &resource.Info{Object: &corev1.Pod{}, Name: "foo", Namespace: defaultNamespace},
},
pod: newPodWithCondition("foo", corev1.ConditionTrue),
@@ -72,13 +70,12 @@ func Test_ReadyChecker_IsReady_Pod(t *testing.T) {
{
name: "IsReady Pod returns error",
fields: fields{
- client: fake.NewSimpleClientset(),
- log: func(string, ...interface{}) {},
+ client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
args: args{
- ctx: context.TODO(),
+ ctx: t.Context(),
resource: &resource.Info{Object: &corev1.Pod{}, Name: "foo", Namespace: defaultNamespace},
},
pod: newPodWithCondition("bar", corev1.ConditionTrue),
@@ -90,11 +87,10 @@ func Test_ReadyChecker_IsReady_Pod(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{
client: tt.fields.client,
- log: tt.fields.log,
checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady,
}
- if _, err := c.client.CoreV1().Pods(defaultNamespace).Create(context.TODO(), tt.pod, metav1.CreateOptions{}); err != nil {
+ if _, err := c.client.CoreV1().Pods(defaultNamespace).Create(t.Context(), tt.pod, metav1.CreateOptions{}); err != nil {
t.Errorf("Failed to create Pod error: %v", err)
return
}
@@ -113,7 +109,6 @@ func Test_ReadyChecker_IsReady_Pod(t *testing.T) {
func Test_ReadyChecker_IsReady_Job(t *testing.T) {
type fields struct {
client kubernetes.Interface
- log func(string, ...interface{})
checkJobs bool
pausedAsReady bool
}
@@ -132,13 +127,12 @@ func Test_ReadyChecker_IsReady_Job(t *testing.T) {
{
name: "IsReady Job error while getting job",
fields: fields{
- client: fake.NewSimpleClientset(),
- log: func(string, ...interface{}) {},
+ client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
args: args{
- ctx: context.TODO(),
+ ctx: t.Context(),
resource: &resource.Info{Object: &batchv1.Job{}, Name: "foo", Namespace: defaultNamespace},
},
job: newJob("bar", 1, intToInt32(1), 1, 0),
@@ -148,13 +142,12 @@ func Test_ReadyChecker_IsReady_Job(t *testing.T) {
{
name: "IsReady Job",
fields: fields{
- client: fake.NewSimpleClientset(),
- log: func(string, ...interface{}) {},
+ client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
args: args{
- ctx: context.TODO(),
+ ctx: t.Context(),
resource: &resource.Info{Object: &batchv1.Job{}, Name: "foo", Namespace: defaultNamespace},
},
job: newJob("foo", 1, intToInt32(1), 1, 0),
@@ -166,11 +159,10 @@ func Test_ReadyChecker_IsReady_Job(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{
client: tt.fields.client,
- log: tt.fields.log,
checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady,
}
- if _, err := c.client.BatchV1().Jobs(defaultNamespace).Create(context.TODO(), tt.job, metav1.CreateOptions{}); err != nil {
+ if _, err := c.client.BatchV1().Jobs(defaultNamespace).Create(t.Context(), tt.job, metav1.CreateOptions{}); err != nil {
t.Errorf("Failed to create Job error: %v", err)
return
}
@@ -188,7 +180,6 @@ func Test_ReadyChecker_IsReady_Job(t *testing.T) {
func Test_ReadyChecker_IsReady_Deployment(t *testing.T) {
type fields struct {
client kubernetes.Interface
- log func(string, ...interface{})
checkJobs bool
pausedAsReady bool
}
@@ -208,13 +199,12 @@ func Test_ReadyChecker_IsReady_Deployment(t *testing.T) {
{
name: "IsReady Deployments error while getting current Deployment",
fields: fields{
- client: fake.NewSimpleClientset(),
- log: func(string, ...interface{}) {},
+ client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
args: args{
- ctx: context.TODO(),
+ ctx: t.Context(),
resource: &resource.Info{Object: &appsv1.Deployment{}, Name: "foo", Namespace: defaultNamespace},
},
replicaSet: newReplicaSet("foo", 0, 0, true),
@@ -225,13 +215,12 @@ func Test_ReadyChecker_IsReady_Deployment(t *testing.T) {
{
name: "IsReady Deployments", //TODO fix this one
fields: fields{
- client: fake.NewSimpleClientset(),
- log: func(string, ...interface{}) {},
+ client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
args: args{
- ctx: context.TODO(),
+ ctx: t.Context(),
resource: &resource.Info{Object: &appsv1.Deployment{}, Name: "foo", Namespace: defaultNamespace},
},
replicaSet: newReplicaSet("foo", 0, 0, true),
@@ -244,15 +233,14 @@ func Test_ReadyChecker_IsReady_Deployment(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{
client: tt.fields.client,
- log: tt.fields.log,
checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady,
}
- if _, err := c.client.AppsV1().Deployments(defaultNamespace).Create(context.TODO(), tt.deployment, metav1.CreateOptions{}); err != nil {
+ if _, err := c.client.AppsV1().Deployments(defaultNamespace).Create(t.Context(), tt.deployment, metav1.CreateOptions{}); err != nil {
t.Errorf("Failed to create Deployment error: %v", err)
return
}
- if _, err := c.client.AppsV1().ReplicaSets(defaultNamespace).Create(context.TODO(), tt.replicaSet, metav1.CreateOptions{}); err != nil {
+ if _, err := c.client.AppsV1().ReplicaSets(defaultNamespace).Create(t.Context(), tt.replicaSet, metav1.CreateOptions{}); err != nil {
t.Errorf("Failed to create ReplicaSet error: %v", err)
return
}
@@ -270,7 +258,6 @@ func Test_ReadyChecker_IsReady_Deployment(t *testing.T) {
func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) {
type fields struct {
client kubernetes.Interface
- log func(string, ...interface{})
checkJobs bool
pausedAsReady bool
}
@@ -289,13 +276,12 @@ func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) {
{
name: "IsReady PersistentVolumeClaim",
fields: fields{
- client: fake.NewSimpleClientset(),
- log: func(string, ...interface{}) {},
+ client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
args: args{
- ctx: context.TODO(),
+ ctx: t.Context(),
resource: &resource.Info{Object: &corev1.PersistentVolumeClaim{}, Name: "foo", Namespace: defaultNamespace},
},
pvc: newPersistentVolumeClaim("foo", corev1.ClaimPending),
@@ -305,13 +291,12 @@ func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) {
{
name: "IsReady PersistentVolumeClaim with error",
fields: fields{
- client: fake.NewSimpleClientset(),
- log: func(string, ...interface{}) {},
+ client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
args: args{
- ctx: context.TODO(),
+ ctx: t.Context(),
resource: &resource.Info{Object: &corev1.PersistentVolumeClaim{}, Name: "foo", Namespace: defaultNamespace},
},
pvc: newPersistentVolumeClaim("bar", corev1.ClaimPending),
@@ -323,11 +308,10 @@ func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{
client: tt.fields.client,
- log: tt.fields.log,
checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady,
}
- if _, err := c.client.CoreV1().PersistentVolumeClaims(defaultNamespace).Create(context.TODO(), tt.pvc, metav1.CreateOptions{}); err != nil {
+ if _, err := c.client.CoreV1().PersistentVolumeClaims(defaultNamespace).Create(t.Context(), tt.pvc, metav1.CreateOptions{}); err != nil {
t.Errorf("Failed to create PersistentVolumeClaim error: %v", err)
return
}
@@ -345,7 +329,6 @@ func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) {
func Test_ReadyChecker_IsReady_Service(t *testing.T) {
type fields struct {
client kubernetes.Interface
- log func(string, ...interface{})
checkJobs bool
pausedAsReady bool
}
@@ -364,13 +347,12 @@ func Test_ReadyChecker_IsReady_Service(t *testing.T) {
{
name: "IsReady Service",
fields: fields{
- client: fake.NewSimpleClientset(),
- log: func(string, ...interface{}) {},
+ client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
args: args{
- ctx: context.TODO(),
+ ctx: t.Context(),
resource: &resource.Info{Object: &corev1.Service{}, Name: "foo", Namespace: defaultNamespace},
},
svc: newService("foo", corev1.ServiceSpec{Type: corev1.ServiceTypeLoadBalancer, ClusterIP: ""}),
@@ -380,13 +362,12 @@ func Test_ReadyChecker_IsReady_Service(t *testing.T) {
{
name: "IsReady Service with error",
fields: fields{
- client: fake.NewSimpleClientset(),
- log: func(string, ...interface{}) {},
+ client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
args: args{
- ctx: context.TODO(),
+ ctx: t.Context(),
resource: &resource.Info{Object: &corev1.Service{}, Name: "foo", Namespace: defaultNamespace},
},
svc: newService("bar", corev1.ServiceSpec{Type: corev1.ServiceTypeExternalName, ClusterIP: ""}),
@@ -398,11 +379,10 @@ func Test_ReadyChecker_IsReady_Service(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{
client: tt.fields.client,
- log: tt.fields.log,
checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady,
}
- if _, err := c.client.CoreV1().Services(defaultNamespace).Create(context.TODO(), tt.svc, metav1.CreateOptions{}); err != nil {
+ if _, err := c.client.CoreV1().Services(defaultNamespace).Create(t.Context(), tt.svc, metav1.CreateOptions{}); err != nil {
t.Errorf("Failed to create Service error: %v", err)
return
}
@@ -420,7 +400,6 @@ func Test_ReadyChecker_IsReady_Service(t *testing.T) {
func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) {
type fields struct {
client kubernetes.Interface
- log func(string, ...interface{})
checkJobs bool
pausedAsReady bool
}
@@ -439,13 +418,12 @@ func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) {
{
name: "IsReady DaemonSet",
fields: fields{
- client: fake.NewSimpleClientset(),
- log: func(string, ...interface{}) {},
+ client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
args: args{
- ctx: context.TODO(),
+ ctx: t.Context(),
resource: &resource.Info{Object: &appsv1.DaemonSet{}, Name: "foo", Namespace: defaultNamespace},
},
ds: newDaemonSet("foo", 0, 0, 1, 0, true),
@@ -455,13 +433,12 @@ func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) {
{
name: "IsReady DaemonSet with error",
fields: fields{
- client: fake.NewSimpleClientset(),
- log: func(string, ...interface{}) {},
+ client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
args: args{
- ctx: context.TODO(),
+ ctx: t.Context(),
resource: &resource.Info{Object: &appsv1.DaemonSet{}, Name: "foo", Namespace: defaultNamespace},
},
ds: newDaemonSet("bar", 0, 1, 1, 1, true),
@@ -473,11 +450,10 @@ func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{
client: tt.fields.client,
- log: tt.fields.log,
checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady,
}
- if _, err := c.client.AppsV1().DaemonSets(defaultNamespace).Create(context.TODO(), tt.ds, metav1.CreateOptions{}); err != nil {
+ if _, err := c.client.AppsV1().DaemonSets(defaultNamespace).Create(t.Context(), tt.ds, metav1.CreateOptions{}); err != nil {
t.Errorf("Failed to create DaemonSet error: %v", err)
return
}
@@ -495,7 +471,6 @@ func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) {
func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) {
type fields struct {
client kubernetes.Interface
- log func(string, ...interface{})
checkJobs bool
pausedAsReady bool
}
@@ -514,13 +489,12 @@ func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) {
{
name: "IsReady StatefulSet",
fields: fields{
- client: fake.NewSimpleClientset(),
- log: func(string, ...interface{}) {},
+ client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
args: args{
- ctx: context.TODO(),
+ ctx: t.Context(),
resource: &resource.Info{Object: &appsv1.StatefulSet{}, Name: "foo", Namespace: defaultNamespace},
},
ss: newStatefulSet("foo", 1, 0, 0, 1, true),
@@ -530,13 +504,12 @@ func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) {
{
name: "IsReady StatefulSet with error",
fields: fields{
- client: fake.NewSimpleClientset(),
- log: func(string, ...interface{}) {},
+ client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
args: args{
- ctx: context.TODO(),
+ ctx: t.Context(),
resource: &resource.Info{Object: &appsv1.StatefulSet{}, Name: "foo", Namespace: defaultNamespace},
},
ss: newStatefulSet("bar", 1, 0, 1, 1, true),
@@ -548,11 +521,10 @@ func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{
client: tt.fields.client,
- log: tt.fields.log,
checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady,
}
- if _, err := c.client.AppsV1().StatefulSets(defaultNamespace).Create(context.TODO(), tt.ss, metav1.CreateOptions{}); err != nil {
+ if _, err := c.client.AppsV1().StatefulSets(defaultNamespace).Create(t.Context(), tt.ss, metav1.CreateOptions{}); err != nil {
t.Errorf("Failed to create StatefulSet error: %v", err)
return
}
@@ -570,7 +542,6 @@ func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) {
func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) {
type fields struct {
client kubernetes.Interface
- log func(string, ...interface{})
checkJobs bool
pausedAsReady bool
}
@@ -589,13 +560,12 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) {
{
name: "IsReady ReplicationController",
fields: fields{
- client: fake.NewSimpleClientset(),
- log: func(string, ...interface{}) {},
+ client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
args: args{
- ctx: context.TODO(),
+ ctx: t.Context(),
resource: &resource.Info{Object: &corev1.ReplicationController{}, Name: "foo", Namespace: defaultNamespace},
},
rc: newReplicationController("foo", false),
@@ -605,13 +575,12 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) {
{
name: "IsReady ReplicationController with error",
fields: fields{
- client: fake.NewSimpleClientset(),
- log: func(string, ...interface{}) {},
+ client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
args: args{
- ctx: context.TODO(),
+ ctx: t.Context(),
resource: &resource.Info{Object: &corev1.ReplicationController{}, Name: "foo", Namespace: defaultNamespace},
},
rc: newReplicationController("bar", false),
@@ -621,13 +590,12 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) {
{
name: "IsReady ReplicationController and pods not ready for object",
fields: fields{
- client: fake.NewSimpleClientset(),
- log: func(string, ...interface{}) {},
+ client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
args: args{
- ctx: context.TODO(),
+ ctx: t.Context(),
resource: &resource.Info{Object: &corev1.ReplicationController{}, Name: "foo", Namespace: defaultNamespace},
},
rc: newReplicationController("foo", true),
@@ -639,11 +607,10 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{
client: tt.fields.client,
- log: tt.fields.log,
checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady,
}
- if _, err := c.client.CoreV1().ReplicationControllers(defaultNamespace).Create(context.TODO(), tt.rc, metav1.CreateOptions{}); err != nil {
+ if _, err := c.client.CoreV1().ReplicationControllers(defaultNamespace).Create(t.Context(), tt.rc, metav1.CreateOptions{}); err != nil {
t.Errorf("Failed to create ReplicationController error: %v", err)
return
}
@@ -661,7 +628,6 @@ func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) {
func Test_ReadyChecker_IsReady_ReplicaSet(t *testing.T) {
type fields struct {
client kubernetes.Interface
- log func(string, ...interface{})
checkJobs bool
pausedAsReady bool
}
@@ -680,13 +646,12 @@ func Test_ReadyChecker_IsReady_ReplicaSet(t *testing.T) {
{
name: "IsReady ReplicaSet",
fields: fields{
- client: fake.NewSimpleClientset(),
- log: func(string, ...interface{}) {},
+ client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
args: args{
- ctx: context.TODO(),
+ ctx: t.Context(),
resource: &resource.Info{Object: &appsv1.ReplicaSet{}, Name: "foo", Namespace: defaultNamespace},
},
rs: newReplicaSet("foo", 1, 1, true),
@@ -696,13 +661,12 @@ func Test_ReadyChecker_IsReady_ReplicaSet(t *testing.T) {
{
name: "IsReady ReplicaSet not ready",
fields: fields{
- client: fake.NewSimpleClientset(),
- log: func(string, ...interface{}) {},
+ client: fake.NewClientset(),
checkJobs: true,
pausedAsReady: false,
},
args: args{
- ctx: context.TODO(),
+ ctx: t.Context(),
resource: &resource.Info{Object: &appsv1.ReplicaSet{}, Name: "foo", Namespace: defaultNamespace},
},
rs: newReplicaSet("bar", 1, 1, false),
@@ -714,7 +678,6 @@ func Test_ReadyChecker_IsReady_ReplicaSet(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
c := &ReadyChecker{
client: tt.fields.client,
- log: tt.fields.log,
checkJobs: tt.fields.checkJobs,
pausedAsReady: tt.fields.pausedAsReady,
}
@@ -791,7 +754,7 @@ func Test_ReadyChecker_deploymentReady(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- c := NewReadyChecker(fake.NewSimpleClientset(), nil)
+ c := NewReadyChecker(fake.NewClientset())
if got := c.deploymentReady(tt.args.rs, tt.args.dep); got != tt.want {
t.Errorf("deploymentReady() = %v, want %v", got, tt.want)
}
@@ -825,7 +788,7 @@ func Test_ReadyChecker_replicaSetReady(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- c := NewReadyChecker(fake.NewSimpleClientset(), nil)
+ c := NewReadyChecker(fake.NewClientset())
if got := c.replicaSetReady(tt.args.rs); got != tt.want {
t.Errorf("replicaSetReady() = %v, want %v", got, tt.want)
}
@@ -859,7 +822,7 @@ func Test_ReadyChecker_replicationControllerReady(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- c := NewReadyChecker(fake.NewSimpleClientset(), nil)
+ c := NewReadyChecker(fake.NewClientset())
if got := c.replicationControllerReady(tt.args.rc); got != tt.want {
t.Errorf("replicationControllerReady() = %v, want %v", got, tt.want)
}
@@ -914,7 +877,7 @@ func Test_ReadyChecker_daemonSetReady(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- c := NewReadyChecker(fake.NewSimpleClientset(), nil)
+ c := NewReadyChecker(fake.NewClientset())
if got := c.daemonSetReady(tt.args.ds); got != tt.want {
t.Errorf("daemonSetReady() = %v, want %v", got, tt.want)
}
@@ -990,7 +953,7 @@ func Test_ReadyChecker_statefulSetReady(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- c := NewReadyChecker(fake.NewSimpleClientset(), nil)
+ c := NewReadyChecker(fake.NewClientset())
if got := c.statefulSetReady(tt.args.sts); got != tt.want {
t.Errorf("statefulSetReady() = %v, want %v", got, tt.want)
}
@@ -1049,14 +1012,14 @@ func Test_ReadyChecker_podsReadyForObject(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- c := NewReadyChecker(fake.NewSimpleClientset(), nil)
+ c := NewReadyChecker(fake.NewClientset())
for _, pod := range tt.existPods {
- if _, err := c.client.CoreV1().Pods(defaultNamespace).Create(context.TODO(), &pod, metav1.CreateOptions{}); err != nil {
+ if _, err := c.client.CoreV1().Pods(defaultNamespace).Create(t.Context(), &pod, metav1.CreateOptions{}); err != nil {
t.Errorf("Failed to create Pod error: %v", err)
return
}
}
- got, err := c.podsReadyForObject(context.TODO(), tt.args.namespace, tt.args.obj)
+ got, err := c.podsReadyForObject(t.Context(), tt.args.namespace, tt.args.obj)
if (err != nil) != tt.wantErr {
t.Errorf("podsReadyForObject() error = %v, wantErr %v", err, tt.wantErr)
return
@@ -1128,7 +1091,7 @@ func Test_ReadyChecker_jobReady(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- c := NewReadyChecker(fake.NewSimpleClientset(), nil)
+ c := NewReadyChecker(fake.NewClientset())
got, err := c.jobReady(tt.args.job)
if (err != nil) != tt.wantErr {
t.Errorf("jobReady() error = %v, wantErr %v", err, tt.wantErr)
@@ -1167,7 +1130,7 @@ func Test_ReadyChecker_volumeReady(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- c := NewReadyChecker(fake.NewSimpleClientset(), nil)
+ c := NewReadyChecker(fake.NewClientset())
if got := c.volumeReady(tt.args.v); got != tt.want {
t.Errorf("volumeReady() = %v, want %v", got, tt.want)
}
@@ -1212,7 +1175,7 @@ func Test_ReadyChecker_serviceReady(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- c := NewReadyChecker(fake.NewSimpleClientset(), nil)
+ c := NewReadyChecker(fake.NewClientset())
got := c.serviceReady(tt.args.service)
if got != tt.want {
t.Errorf("serviceReady() = %v, want %v", got, tt.want)
@@ -1281,7 +1244,7 @@ func Test_ReadyChecker_crdBetaReady(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- c := NewReadyChecker(fake.NewSimpleClientset(), nil)
+ c := NewReadyChecker(fake.NewClientset())
got := c.crdBetaReady(tt.args.crdBeta)
if got != tt.want {
t.Errorf("crdBetaReady() = %v, want %v", got, tt.want)
@@ -1350,7 +1313,7 @@ func Test_ReadyChecker_crdReady(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- c := NewReadyChecker(fake.NewSimpleClientset(), nil)
+ c := NewReadyChecker(fake.NewClientset())
got := c.crdReady(tt.args.crdBeta)
if got != tt.want {
t.Errorf("crdBetaReady() = %v, want %v", got, tt.want)
diff --git a/pkg/kube/resource.go b/pkg/kube/resource.go
index 600f256b3..d88b171f0 100644
--- a/pkg/kube/resource.go
+++ b/pkg/kube/resource.go
@@ -81,5 +81,5 @@ func (r ResourceList) Intersect(rs ResourceList) ResourceList {
// isMatchingInfo returns true if infos match on Name and GroupVersionKind.
func isMatchingInfo(a, b *resource.Info) bool {
- return a.Name == b.Name && a.Namespace == b.Namespace && a.Mapping.GroupVersionKind.Kind == b.Mapping.GroupVersionKind.Kind && a.Mapping.GroupVersionKind.Group == b.Mapping.GroupVersionKind.Group
+ return a.Name == b.Name && a.Namespace == b.Namespace && a.Mapping.GroupVersionKind == b.Mapping.GroupVersionKind
}
diff --git a/pkg/kube/resource_test.go b/pkg/kube/resource_test.go
index c405ca382..ccc613c1b 100644
--- a/pkg/kube/resource_test.go
+++ b/pkg/kube/resource_test.go
@@ -59,3 +59,42 @@ func TestResourceList(t *testing.T) {
t.Error("expected intersect to return bar")
}
}
+
+func TestIsMatchingInfo(t *testing.T) {
+ gvk := schema.GroupVersionKind{Group: "group1", Version: "version1", Kind: "pod"}
+ resourceInfo := resource.Info{Name: "name1", Namespace: "namespace1", Mapping: &meta.RESTMapping{GroupVersionKind: gvk}}
+
+ gvkDiffGroup := schema.GroupVersionKind{Group: "diff", Version: "version1", Kind: "pod"}
+ resourceInfoDiffGroup := resource.Info{Name: "name1", Namespace: "namespace1", Mapping: &meta.RESTMapping{GroupVersionKind: gvkDiffGroup}}
+ if isMatchingInfo(&resourceInfo, &resourceInfoDiffGroup) {
+ t.Error("expected resources not equal")
+ }
+
+ gvkDiffVersion := schema.GroupVersionKind{Group: "group1", Version: "diff", Kind: "pod"}
+ resourceInfoDiffVersion := resource.Info{Name: "name1", Namespace: "namespace1", Mapping: &meta.RESTMapping{GroupVersionKind: gvkDiffVersion}}
+ if isMatchingInfo(&resourceInfo, &resourceInfoDiffVersion) {
+ t.Error("expected resources not equal")
+ }
+
+ gvkDiffKind := schema.GroupVersionKind{Group: "group1", Version: "version1", Kind: "deployment"}
+ resourceInfoDiffKind := resource.Info{Name: "name1", Namespace: "namespace1", Mapping: &meta.RESTMapping{GroupVersionKind: gvkDiffKind}}
+ if isMatchingInfo(&resourceInfo, &resourceInfoDiffKind) {
+ t.Error("expected resources not equal")
+ }
+
+ resourceInfoDiffName := resource.Info{Name: "diff", Namespace: "namespace1", Mapping: &meta.RESTMapping{GroupVersionKind: gvk}}
+ if isMatchingInfo(&resourceInfo, &resourceInfoDiffName) {
+ t.Error("expected resources not equal")
+ }
+
+ resourceInfoDiffNamespace := resource.Info{Name: "name1", Namespace: "diff", Mapping: &meta.RESTMapping{GroupVersionKind: gvk}}
+ if isMatchingInfo(&resourceInfo, &resourceInfoDiffNamespace) {
+ t.Error("expected resources not equal")
+ }
+
+ gvkEqual := schema.GroupVersionKind{Group: "group1", Version: "version1", Kind: "pod"}
+ resourceInfoEqual := resource.Info{Name: "name1", Namespace: "namespace1", Mapping: &meta.RESTMapping{GroupVersionKind: gvkEqual}}
+ if !isMatchingInfo(&resourceInfo, &resourceInfoEqual) {
+ t.Error("expected resources to be equal")
+ }
+}
diff --git a/pkg/kube/roundtripper_test.go b/pkg/kube/roundtripper_test.go
new file mode 100644
index 000000000..96602c1f4
--- /dev/null
+++ b/pkg/kube/roundtripper_test.go
@@ -0,0 +1,161 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kube
+
+import (
+ "encoding/json"
+ "errors"
+ "io"
+ "net/http"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type fakeRoundTripper struct {
+ resp *http.Response
+ err error
+ calls int
+}
+
+func (f *fakeRoundTripper) RoundTrip(_ *http.Request) (*http.Response, error) {
+ f.calls++
+ return f.resp, f.err
+}
+
+func newRespWithBody(statusCode int, contentType, body string) *http.Response {
+ return &http.Response{
+ StatusCode: statusCode,
+ Header: http.Header{"Content-Type": []string{contentType}},
+ Body: io.NopCloser(strings.NewReader(body)),
+ }
+}
+
+func TestRetryingRoundTripper_RoundTrip(t *testing.T) {
+ marshalErr := func(code int, msg string) string {
+ b, _ := json.Marshal(kubernetesError{
+ Code: code,
+ Message: msg,
+ })
+ return string(b)
+ }
+
+ tests := []struct {
+ name string
+ resp *http.Response
+ err error
+ expectedCalls int
+ expectedErr string
+ expectedCode int
+ }{
+ {
+ name: "no retry, status < 500 returns response",
+ resp: newRespWithBody(200, "application/json", `{"message":"ok","code":200}`),
+ err: nil,
+ expectedCalls: 1,
+ expectedCode: 200,
+ },
+ {
+ name: "error from wrapped RoundTripper propagates",
+ resp: nil,
+ err: errors.New("wrapped error"),
+ expectedCalls: 1,
+ expectedErr: "wrapped error",
+ },
+ {
+ name: "no retry, content-type not application/json",
+ resp: newRespWithBody(500, "text/plain", "server error"),
+ err: nil,
+ expectedCalls: 1,
+ expectedCode: 500,
+ },
+ {
+ name: "error reading body returns error",
+ resp: &http.Response{
+ StatusCode: http.StatusInternalServerError,
+ Header: http.Header{"Content-Type": []string{"application/json"}},
+ Body: &errReader{},
+ },
+ err: nil,
+ expectedCalls: 1,
+ expectedErr: "read error",
+ },
+ {
+ name: "error decoding JSON returns error",
+ resp: newRespWithBody(500, "application/json", `invalid-json`),
+ err: nil,
+ expectedCalls: 1,
+ expectedErr: "invalid character",
+ },
+ {
+ name: "retry on etcdserver leader changed message",
+ resp: newRespWithBody(500, "application/json", marshalErr(500, "some error etcdserver: leader changed")),
+ err: nil,
+ expectedCalls: 2,
+ expectedCode: 500,
+ },
+ {
+ name: "retry on raft proposal dropped message",
+ resp: newRespWithBody(500, "application/json", marshalErr(500, "rpc error: code = Unknown desc = raft proposal dropped")),
+ err: nil,
+ expectedCalls: 2,
+ expectedCode: 500,
+ },
+ {
+ name: "no retry on other error message",
+ resp: newRespWithBody(500, "application/json", marshalErr(500, "other server error")),
+ err: nil,
+ expectedCalls: 1,
+ expectedCode: 500,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ fakeRT := &fakeRoundTripper{
+ resp: tt.resp,
+ err: tt.err,
+ }
+ rt := RetryingRoundTripper{
+ Wrapped: fakeRT,
+ }
+ req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil)
+ resp, err := rt.RoundTrip(req)
+
+ if tt.expectedErr != "" {
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), tt.expectedErr)
+ return
+ }
+ assert.NoError(t, err)
+
+ assert.Equal(t, tt.expectedCode, resp.StatusCode)
+ assert.Equal(t, tt.expectedCalls, fakeRT.calls)
+ })
+ }
+}
+
+type errReader struct{}
+
+func (e *errReader) Read(_ []byte) (int, error) {
+ return 0, errors.New("read error")
+}
+
+func (e *errReader) Close() error {
+ return nil
+}
diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go
new file mode 100644
index 000000000..2d7cfe971
--- /dev/null
+++ b/pkg/kube/statuswait.go
@@ -0,0 +1,235 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kube // import "helm.sh/helm/v3/pkg/kube"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "log/slog"
+ "sort"
+ "time"
+
+ "github.com/fluxcd/cli-utils/pkg/kstatus/polling/aggregator"
+ "github.com/fluxcd/cli-utils/pkg/kstatus/polling/collector"
+ "github.com/fluxcd/cli-utils/pkg/kstatus/polling/engine"
+ "github.com/fluxcd/cli-utils/pkg/kstatus/polling/event"
+ "github.com/fluxcd/cli-utils/pkg/kstatus/polling/statusreaders"
+ "github.com/fluxcd/cli-utils/pkg/kstatus/status"
+ "github.com/fluxcd/cli-utils/pkg/kstatus/watcher"
+ "github.com/fluxcd/cli-utils/pkg/object"
+ appsv1 "k8s.io/api/apps/v1"
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/client-go/dynamic"
+
+ helmStatusReaders "helm.sh/helm/v4/internal/statusreaders"
+)
+
+type statusWaiter struct {
+ client dynamic.Interface
+ restMapper meta.RESTMapper
+}
+
+func alwaysReady(_ *unstructured.Unstructured) (*status.Result, error) {
+ return &status.Result{
+ Status: status.CurrentStatus,
+ Message: "Resource is current",
+ }, nil
+}
+
+func (w *statusWaiter) WatchUntilReady(resourceList ResourceList, timeout time.Duration) error {
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ defer cancel()
+ slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout)
+ sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper)
+ jobSR := helmStatusReaders.NewCustomJobStatusReader(w.restMapper)
+ podSR := helmStatusReaders.NewCustomPodStatusReader(w.restMapper)
+ // We don't want to wait on any other resources as watchUntilReady is only for Helm hooks
+ genericSR := statusreaders.NewGenericStatusReader(w.restMapper, alwaysReady)
+
+ sr := &statusreaders.DelegatingStatusReader{
+ StatusReaders: []engine.StatusReader{
+ jobSR,
+ podSR,
+ genericSR,
+ },
+ }
+ sw.StatusReader = sr
+ return w.wait(ctx, resourceList, sw)
+}
+
+func (w *statusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error {
+ ctx, cancel := context.WithTimeout(context.TODO(), timeout)
+ defer cancel()
+ slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout)
+ sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper)
+ return w.wait(ctx, resourceList, sw)
+}
+
+func (w *statusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Duration) error {
+ ctx, cancel := context.WithTimeout(context.TODO(), timeout)
+ defer cancel()
+ slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout)
+ sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper)
+ newCustomJobStatusReader := helmStatusReaders.NewCustomJobStatusReader(w.restMapper)
+ customSR := statusreaders.NewStatusReader(w.restMapper, newCustomJobStatusReader)
+ sw.StatusReader = customSR
+ return w.wait(ctx, resourceList, sw)
+}
+
+func (w *statusWaiter) WaitForDelete(resourceList ResourceList, timeout time.Duration) error {
+ ctx, cancel := context.WithTimeout(context.TODO(), timeout)
+ defer cancel()
+ slog.Debug("waiting for resources to be deleted", "count", len(resourceList), "timeout", timeout)
+ sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper)
+ return w.waitForDelete(ctx, resourceList, sw)
+}
+
+func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceList, sw watcher.StatusWatcher) error {
+ cancelCtx, cancel := context.WithCancel(ctx)
+ defer cancel()
+ resources := []object.ObjMetadata{}
+ for _, resource := range resourceList {
+ obj, err := object.RuntimeToObjMeta(resource.Object)
+ if err != nil {
+ return err
+ }
+ resources = append(resources, obj)
+ }
+ eventCh := sw.Watch(cancelCtx, resources, watcher.Options{})
+ statusCollector := collector.NewResourceStatusCollector(resources)
+ done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.NotFoundStatus))
+ <-done
+
+ if statusCollector.Error != nil {
+ return statusCollector.Error
+ }
+
+ // Only check parent context error, otherwise we would error when desired status is achieved.
+ if ctx.Err() != nil {
+ errs := []error{}
+ for _, id := range resources {
+ rs := statusCollector.ResourceStatuses[id]
+ if rs.Status == status.NotFoundStatus {
+ continue
+ }
+ errs = append(errs, fmt.Errorf("resource still exists, name: %s, kind: %s, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status))
+ }
+ errs = append(errs, ctx.Err())
+ return errors.Join(errs...)
+ }
+ return nil
+}
+
+func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, sw watcher.StatusWatcher) error {
+ cancelCtx, cancel := context.WithCancel(ctx)
+ defer cancel()
+ resources := []object.ObjMetadata{}
+ for _, resource := range resourceList {
+ switch value := AsVersioned(resource).(type) {
+ case *appsv1.Deployment:
+ if value.Spec.Paused {
+ continue
+ }
+ }
+ obj, err := object.RuntimeToObjMeta(resource.Object)
+ if err != nil {
+ return err
+ }
+ resources = append(resources, obj)
+ }
+
+ eventCh := sw.Watch(cancelCtx, resources, watcher.Options{})
+ statusCollector := collector.NewResourceStatusCollector(resources)
+ done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.CurrentStatus))
+ <-done
+
+ if statusCollector.Error != nil {
+ return statusCollector.Error
+ }
+
+ // Only check parent context error, otherwise we would error when desired status is achieved.
+ if ctx.Err() != nil {
+ errs := []error{}
+ for _, id := range resources {
+ rs := statusCollector.ResourceStatuses[id]
+ if rs.Status == status.CurrentStatus {
+ continue
+ }
+ errs = append(errs, fmt.Errorf("resource not ready, name: %s, kind: %s, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status))
+ }
+ errs = append(errs, ctx.Err())
+ return errors.Join(errs...)
+ }
+ return nil
+}
+
+func statusObserver(cancel context.CancelFunc, desired status.Status) collector.ObserverFunc {
+ return func(statusCollector *collector.ResourceStatusCollector, _ event.Event) {
+ var rss []*event.ResourceStatus
+ var nonDesiredResources []*event.ResourceStatus
+ for _, rs := range statusCollector.ResourceStatuses {
+ if rs == nil {
+ continue
+ }
+ // If a resource is already deleted before waiting has started, it will show as unknown
+ // this check ensures we don't wait forever for a resource that is already deleted
+ if rs.Status == status.UnknownStatus && desired == status.NotFoundStatus {
+ continue
+ }
+ rss = append(rss, rs)
+ if rs.Status != desired {
+ nonDesiredResources = append(nonDesiredResources, rs)
+ }
+ }
+
+ if aggregator.AggregateStatus(rss, desired) == desired {
+ cancel()
+ return
+ }
+
+ if len(nonDesiredResources) > 0 {
+ // Log a single resource so the user knows what they're waiting for without an overwhelming amount of output
+ sort.Slice(nonDesiredResources, func(i, j int) bool {
+ return nonDesiredResources[i].Identifier.Name < nonDesiredResources[j].Identifier.Name
+ })
+ first := nonDesiredResources[0]
+ slog.Debug("waiting for resource", "name", first.Identifier.Name, "kind", first.Identifier.GroupKind.Kind, "expectedStatus", desired, "actualStatus", first.Status)
+ }
+ }
+}
+
+type hookOnlyWaiter struct {
+ sw *statusWaiter
+}
+
+func (w *hookOnlyWaiter) WatchUntilReady(resourceList ResourceList, timeout time.Duration) error {
+ return w.sw.WatchUntilReady(resourceList, timeout)
+}
+
+func (w *hookOnlyWaiter) Wait(_ ResourceList, _ time.Duration) error {
+ return nil
+}
+
+func (w *hookOnlyWaiter) WaitWithJobs(_ ResourceList, _ time.Duration) error {
+ return nil
+}
+
+func (w *hookOnlyWaiter) WaitForDelete(_ ResourceList, _ time.Duration) error {
+ return nil
+}
diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go
new file mode 100644
index 000000000..4b06da896
--- /dev/null
+++ b/pkg/kube/statuswait_test.go
@@ -0,0 +1,450 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kube // import "helm.sh/helm/v3/pkg/kube"
+
+import (
+ "errors"
+ "testing"
+ "time"
+
+ "github.com/fluxcd/cli-utils/pkg/testutil"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ appsv1 "k8s.io/api/apps/v1"
+ batchv1 "k8s.io/api/batch/v1"
+ v1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/util/yaml"
+ dynamicfake "k8s.io/client-go/dynamic/fake"
+ "k8s.io/kubectl/pkg/scheme"
+)
+
+var podCurrentManifest = `
+apiVersion: v1
+kind: Pod
+metadata:
+ name: current-pod
+ namespace: ns
+status:
+ conditions:
+ - type: Ready
+ status: "True"
+ phase: Running
+`
+
+var podNoStatusManifest = `
+apiVersion: v1
+kind: Pod
+metadata:
+ name: in-progress-pod
+ namespace: ns
+`
+
+var jobNoStatusManifest = `
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: test
+ namespace: qual
+ generation: 1
+`
+
+var jobReadyManifest = `
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: ready-not-complete
+ namespace: default
+ generation: 1
+status:
+ startTime: 2025-02-06T16:34:20-05:00
+ active: 1
+ ready: 1
+`
+
+var jobCompleteManifest = `
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: test
+ namespace: qual
+ generation: 1
+status:
+ succeeded: 1
+ active: 0
+ conditions:
+ - type: Complete
+ status: "True"
+`
+
+var podCompleteManifest = `
+apiVersion: v1
+kind: Pod
+metadata:
+ name: good-pod
+ namespace: ns
+status:
+ phase: Succeeded
+`
+
+var pausedDeploymentManifest = `
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: paused
+ namespace: ns-1
+ generation: 1
+spec:
+ paused: true
+ replicas: 1
+ selector:
+ matchLabels:
+ app: nginx
+ template:
+ metadata:
+ labels:
+ app: nginx
+ spec:
+ containers:
+ - name: nginx
+ image: nginx:1.19.6
+ ports:
+ - containerPort: 80
+`
+
+var notReadyDeploymentManifest = `
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: not-ready
+ namespace: ns-1
+ generation: 1
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: nginx
+ template:
+ metadata:
+ labels:
+ app: nginx
+ spec:
+ containers:
+ - name: nginx
+ image: nginx:1.19.6
+ ports:
+ - containerPort: 80
+`
+
+func getGVR(t *testing.T, mapper meta.RESTMapper, obj *unstructured.Unstructured) schema.GroupVersionResource {
+ t.Helper()
+ gvk := obj.GroupVersionKind()
+ mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
+ require.NoError(t, err)
+ return mapping.Resource
+}
+
+func getRuntimeObjFromManifests(t *testing.T, manifests []string) []runtime.Object {
+ t.Helper()
+ objects := []runtime.Object{}
+ for _, manifest := range manifests {
+ m := make(map[string]interface{})
+ err := yaml.Unmarshal([]byte(manifest), &m)
+ assert.NoError(t, err)
+ resource := &unstructured.Unstructured{Object: m}
+ objects = append(objects, resource)
+ }
+ return objects
+}
+
+func getResourceListFromRuntimeObjs(t *testing.T, c *Client, objs []runtime.Object) ResourceList {
+ t.Helper()
+ resourceList := ResourceList{}
+ for _, obj := range objs {
+ list, err := c.Build(objBody(obj), false)
+ assert.NoError(t, err)
+ resourceList = append(resourceList, list...)
+ }
+ return resourceList
+}
+
+func TestStatusWaitForDelete(t *testing.T) {
+ t.Parallel()
+ tests := []struct {
+ name string
+ manifestsToCreate []string
+ manifestsToDelete []string
+ expectErrs []error
+ }{
+ {
+ name: "wait for pod to be deleted",
+ manifestsToCreate: []string{podCurrentManifest},
+ manifestsToDelete: []string{podCurrentManifest},
+ expectErrs: nil,
+ },
+ {
+ name: "error when not all objects are deleted",
+ manifestsToCreate: []string{jobCompleteManifest, podCurrentManifest},
+ manifestsToDelete: []string{jobCompleteManifest},
+ expectErrs: []error{errors.New("resource still exists, name: current-pod, kind: Pod, status: Current"), errors.New("context deadline exceeded")},
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ t.Parallel()
+ c := newTestClient(t)
+ timeout := time.Second
+ timeUntilPodDelete := time.Millisecond * 500
+ fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
+ fakeMapper := testutil.NewFakeRESTMapper(
+ v1.SchemeGroupVersion.WithKind("Pod"),
+ batchv1.SchemeGroupVersion.WithKind("Job"),
+ )
+ statusWaiter := statusWaiter{
+ restMapper: fakeMapper,
+ client: fakeClient,
+ }
+ objsToCreate := getRuntimeObjFromManifests(t, tt.manifestsToCreate)
+ for _, objToCreate := range objsToCreate {
+ u := objToCreate.(*unstructured.Unstructured)
+ gvr := getGVR(t, fakeMapper, u)
+ err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace())
+ assert.NoError(t, err)
+ }
+ objsToDelete := getRuntimeObjFromManifests(t, tt.manifestsToDelete)
+ for _, objToDelete := range objsToDelete {
+ u := objToDelete.(*unstructured.Unstructured)
+ gvr := getGVR(t, fakeMapper, u)
+ go func() {
+ time.Sleep(timeUntilPodDelete)
+ err := fakeClient.Tracker().Delete(gvr, u.GetNamespace(), u.GetName())
+ assert.NoError(t, err)
+ }()
+ }
+ resourceList := getResourceListFromRuntimeObjs(t, c, objsToCreate)
+ err := statusWaiter.WaitForDelete(resourceList, timeout)
+ if tt.expectErrs != nil {
+ assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error())
+ return
+ }
+ assert.NoError(t, err)
+ })
+ }
+}
+
+func TestStatusWaitForDeleteNonExistentObject(t *testing.T) {
+ t.Parallel()
+ c := newTestClient(t)
+ timeout := time.Second
+ fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
+ fakeMapper := testutil.NewFakeRESTMapper(
+ v1.SchemeGroupVersion.WithKind("Pod"),
+ )
+ statusWaiter := statusWaiter{
+ restMapper: fakeMapper,
+ client: fakeClient,
+ }
+ // Don't create the object to test that the wait for delete works when the object doesn't exist
+ objManifest := getRuntimeObjFromManifests(t, []string{podCurrentManifest})
+ resourceList := getResourceListFromRuntimeObjs(t, c, objManifest)
+ err := statusWaiter.WaitForDelete(resourceList, timeout)
+ assert.NoError(t, err)
+}
+
+func TestStatusWait(t *testing.T) {
+ t.Parallel()
+ tests := []struct {
+ name string
+ objManifests []string
+ expectErrs []error
+ waitForJobs bool
+ }{
+ {
+ name: "Job is not complete",
+ objManifests: []string{jobNoStatusManifest},
+ expectErrs: []error{errors.New("resource not ready, name: test, kind: Job, status: InProgress"), errors.New("context deadline exceeded")},
+ waitForJobs: true,
+ },
+ {
+ name: "Job is ready but not complete",
+ objManifests: []string{jobReadyManifest},
+ expectErrs: nil,
+ waitForJobs: false,
+ },
+ {
+ name: "Pod is ready",
+ objManifests: []string{podCurrentManifest},
+ expectErrs: nil,
+ },
+ {
+ name: "one of the pods never becomes ready",
+ objManifests: []string{podNoStatusManifest, podCurrentManifest},
+ expectErrs: []error{errors.New("resource not ready, name: in-progress-pod, kind: Pod, status: InProgress"), errors.New("context deadline exceeded")},
+ },
+ {
+ name: "paused deployment passes",
+ objManifests: []string{pausedDeploymentManifest},
+ expectErrs: nil,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ t.Parallel()
+ c := newTestClient(t)
+ fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
+ fakeMapper := testutil.NewFakeRESTMapper(
+ v1.SchemeGroupVersion.WithKind("Pod"),
+ appsv1.SchemeGroupVersion.WithKind("Deployment"),
+ batchv1.SchemeGroupVersion.WithKind("Job"),
+ )
+ statusWaiter := statusWaiter{
+ client: fakeClient,
+ restMapper: fakeMapper,
+ }
+ objs := getRuntimeObjFromManifests(t, tt.objManifests)
+ for _, obj := range objs {
+ u := obj.(*unstructured.Unstructured)
+ gvr := getGVR(t, fakeMapper, u)
+ err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace())
+ assert.NoError(t, err)
+ }
+ resourceList := getResourceListFromRuntimeObjs(t, c, objs)
+ err := statusWaiter.Wait(resourceList, time.Second*3)
+ if tt.expectErrs != nil {
+ assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error())
+ return
+ }
+ assert.NoError(t, err)
+ })
+ }
+}
+
+func TestWaitForJobComplete(t *testing.T) {
+ t.Parallel()
+ tests := []struct {
+ name string
+ objManifests []string
+ expectErrs []error
+ }{
+ {
+ name: "Job is complete",
+ objManifests: []string{jobCompleteManifest},
+ },
+ {
+ name: "Job is not ready",
+ objManifests: []string{jobNoStatusManifest},
+ expectErrs: []error{errors.New("resource not ready, name: test, kind: Job, status: InProgress"), errors.New("context deadline exceeded")},
+ },
+ {
+ name: "Job is ready but not complete",
+ objManifests: []string{jobReadyManifest},
+ expectErrs: []error{errors.New("resource not ready, name: ready-not-complete, kind: Job, status: InProgress"), errors.New("context deadline exceeded")},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ t.Parallel()
+ c := newTestClient(t)
+ fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
+ fakeMapper := testutil.NewFakeRESTMapper(
+ batchv1.SchemeGroupVersion.WithKind("Job"),
+ )
+ statusWaiter := statusWaiter{
+ client: fakeClient,
+ restMapper: fakeMapper,
+ }
+ objs := getRuntimeObjFromManifests(t, tt.objManifests)
+ for _, obj := range objs {
+ u := obj.(*unstructured.Unstructured)
+ gvr := getGVR(t, fakeMapper, u)
+ err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace())
+ assert.NoError(t, err)
+ }
+ resourceList := getResourceListFromRuntimeObjs(t, c, objs)
+ err := statusWaiter.WaitWithJobs(resourceList, time.Second*3)
+ if tt.expectErrs != nil {
+ assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error())
+ return
+ }
+ assert.NoError(t, err)
+ })
+ }
+}
+
+func TestWatchForReady(t *testing.T) {
+ t.Parallel()
+ tests := []struct {
+ name string
+ objManifests []string
+ expectErrs []error
+ }{
+ {
+ name: "succeeds if pod and job are complete",
+ objManifests: []string{jobCompleteManifest, podCompleteManifest},
+ },
+ {
+ name: "succeeds when a resource that's not a pod or job is not ready",
+ objManifests: []string{notReadyDeploymentManifest},
+ },
+ {
+ name: "Fails if job is not complete",
+ objManifests: []string{jobReadyManifest},
+ expectErrs: []error{errors.New("resource not ready, name: ready-not-complete, kind: Job, status: InProgress"), errors.New("context deadline exceeded")},
+ },
+ {
+ name: "Fails if pod is not complete",
+ objManifests: []string{podCurrentManifest},
+ expectErrs: []error{errors.New("resource not ready, name: current-pod, kind: Pod, status: InProgress"), errors.New("context deadline exceeded")},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ t.Parallel()
+ c := newTestClient(t)
+ fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
+ fakeMapper := testutil.NewFakeRESTMapper(
+ v1.SchemeGroupVersion.WithKind("Pod"),
+ appsv1.SchemeGroupVersion.WithKind("Deployment"),
+ batchv1.SchemeGroupVersion.WithKind("Job"),
+ )
+ statusWaiter := statusWaiter{
+ client: fakeClient,
+ restMapper: fakeMapper,
+ }
+ objs := getRuntimeObjFromManifests(t, tt.objManifests)
+ for _, obj := range objs {
+ u := obj.(*unstructured.Unstructured)
+ gvr := getGVR(t, fakeMapper, u)
+ err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace())
+ assert.NoError(t, err)
+ }
+ resourceList := getResourceListFromRuntimeObjs(t, c, objs)
+ err := statusWaiter.WatchUntilReady(resourceList, time.Second*3)
+ if tt.expectErrs != nil {
+ assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error())
+ return
+ }
+ assert.NoError(t, err)
+ })
+ }
+}
diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go
index 7eb931496..9bfa1ef6d 100644
--- a/pkg/kube/wait.go
+++ b/pkg/kube/wait.go
@@ -19,10 +19,10 @@ package kube // import "helm.sh/helm/v4/pkg/kube"
import (
"context"
"fmt"
+ "log/slog"
"net/http"
"time"
- "github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
appsv1beta1 "k8s.io/api/apps/v1beta1"
appsv1beta2 "k8s.io/api/apps/v1beta2"
@@ -31,25 +31,42 @@ import (
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/watch"
"k8s.io/cli-runtime/pkg/resource"
+ "k8s.io/client-go/kubernetes"
+ cachetools "k8s.io/client-go/tools/cache"
+ watchtools "k8s.io/client-go/tools/watch"
"k8s.io/apimachinery/pkg/util/wait"
)
-type waiter struct {
- c ReadyChecker
- timeout time.Duration
- log func(string, ...interface{})
+// legacyWaiter is the legacy implementation of the Waiter interface. This logic was used by default in Helm 3
+// Helm 4 now uses the StatusWaiter implementation instead
+type legacyWaiter struct {
+ c ReadyChecker
+ kubeClient *kubernetes.Clientset
+}
+
+func (hw *legacyWaiter) Wait(resources ResourceList, timeout time.Duration) error {
+ hw.c = NewReadyChecker(hw.kubeClient, PausedAsReady(true))
+ return hw.waitForResources(resources, timeout)
+}
+
+func (hw *legacyWaiter) WaitWithJobs(resources ResourceList, timeout time.Duration) error {
+ hw.c = NewReadyChecker(hw.kubeClient, PausedAsReady(true), CheckJobs(true))
+ return hw.waitForResources(resources, timeout)
}
// waitForResources polls to get the current status of all pods, PVCs, Services and
// Jobs(optional) until all are ready or a timeout is reached
-func (w *waiter) waitForResources(created ResourceList) error {
- w.log("beginning wait for %d resources with timeout of %v", len(created), w.timeout)
+func (hw *legacyWaiter) waitForResources(created ResourceList, timeout time.Duration) error {
+ slog.Debug("beginning wait for resources", "count", len(created), "timeout", timeout)
- ctx, cancel := context.WithTimeout(context.Background(), w.timeout)
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
numberOfErrors := make([]int, len(created))
@@ -60,15 +77,15 @@ func (w *waiter) waitForResources(created ResourceList) error {
return wait.PollUntilContextCancel(ctx, 2*time.Second, true, func(ctx context.Context) (bool, error) {
waitRetries := 30
for i, v := range created {
- ready, err := w.c.IsReady(ctx, v)
+ ready, err := hw.c.IsReady(ctx, v)
- if waitRetries > 0 && w.isRetryableError(err, v) {
+ if waitRetries > 0 && hw.isRetryableError(err, v) {
numberOfErrors[i]++
if numberOfErrors[i] > waitRetries {
- w.log("Max number of retries reached")
+ slog.Debug("max number of retries reached", "resource", v.Name, "retries", numberOfErrors[i])
return false, err
}
- w.log("Retrying as current number of retries %d less than max number of retries %d", numberOfErrors[i]-1, waitRetries)
+ slog.Debug("retrying resource readiness", "resource", v.Name, "currentRetries", numberOfErrors[i]-1, "maxRetries", waitRetries)
return false, nil
}
numberOfErrors[i] = 0
@@ -80,33 +97,34 @@ func (w *waiter) waitForResources(created ResourceList) error {
})
}
-func (w *waiter) isRetryableError(err error, resource *resource.Info) bool {
+func (hw *legacyWaiter) isRetryableError(err error, resource *resource.Info) bool {
if err == nil {
return false
}
- w.log("Error received when checking status of resource %s. Error: '%s', Resource details: '%s'", resource.Name, err, resource)
+ slog.Debug("error received when checking resource status", "resource", resource.Name, slog.Any("error", err))
if ev, ok := err.(*apierrors.StatusError); ok {
statusCode := ev.Status().Code
- retryable := w.isRetryableHTTPStatusCode(statusCode)
- w.log("Status code received: %d. Retryable error? %t", statusCode, retryable)
+ retryable := hw.isRetryableHTTPStatusCode(statusCode)
+ slog.Debug("status code received", "resource", resource.Name, "statusCode", statusCode, "retryable", retryable)
return retryable
}
- w.log("Retryable error? %t", true)
+ slog.Debug("retryable error assumed", "resource", resource.Name)
return true
}
-func (w *waiter) isRetryableHTTPStatusCode(httpStatusCode int32) bool {
+func (hw *legacyWaiter) isRetryableHTTPStatusCode(httpStatusCode int32) bool {
return httpStatusCode == 0 || httpStatusCode == http.StatusTooManyRequests || (httpStatusCode >= 500 && httpStatusCode != http.StatusNotImplemented)
}
-// waitForDeletedResources polls to check if all the resources are deleted or a timeout is reached
-func (w *waiter) waitForDeletedResources(deleted ResourceList) error {
- w.log("beginning wait for %d resources to be deleted with timeout of %v", len(deleted), w.timeout)
+// WaitForDelete polls to check if all the resources are deleted or a timeout is reached
+func (hw *legacyWaiter) WaitForDelete(deleted ResourceList, timeout time.Duration) error {
+ slog.Debug("beginning wait for resources to be deleted", "count", len(deleted), "timeout", timeout)
- ctx, cancel := context.WithTimeout(context.Background(), w.timeout)
+ startTime := time.Now()
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
- return wait.PollUntilContextCancel(ctx, 2*time.Second, true, func(_ context.Context) (bool, error) {
+ err := wait.PollUntilContextCancel(ctx, 2*time.Second, true, func(_ context.Context) (bool, error) {
for _, v := range deleted {
err := v.Get()
if err == nil || !apierrors.IsNotFound(err) {
@@ -115,6 +133,15 @@ func (w *waiter) waitForDeletedResources(deleted ResourceList) error {
}
return true, nil
})
+
+ elapsed := time.Since(startTime).Round(time.Second)
+ if err != nil {
+ slog.Debug("wait for resources failed", "elapsed", elapsed, slog.Any("error", err))
+ } else {
+ slog.Debug("wait for resources succeeded", "elapsed", elapsed)
+ }
+
+ return err
}
// SelectorsForObject returns the pod label selector for a given object
@@ -162,5 +189,141 @@ func SelectorsForObject(object runtime.Object) (selector labels.Selector, err er
return nil, fmt.Errorf("selector for %T not implemented", object)
}
- return selector, errors.Wrap(err, "invalid label selector")
+ if err != nil {
+ return selector, fmt.Errorf("invalid label selector: %w", err)
+ }
+
+ return selector, nil
+}
+
+func (hw *legacyWaiter) watchTimeout(t time.Duration) func(*resource.Info) error {
+ return func(info *resource.Info) error {
+ return hw.watchUntilReady(t, info)
+ }
+}
+
+// WatchUntilReady watches the resources given and waits until it is ready.
+//
+// This method is mainly for hook implementations. It watches for a resource to
+// hit a particular milestone. The milestone depends on the Kind.
+//
+// For most kinds, it checks to see if the resource is marked as Added or Modified
+// by the Kubernetes event stream. For some kinds, it does more:
+//
+// - Jobs: A job is marked "Ready" when it has successfully completed. This is
+// ascertained by watching the Status fields in a job's output.
+// - Pods: A pod is marked "Ready" when it has successfully completed. This is
+// ascertained by watching the status.phase field in a pod's output.
+//
+// Handling for other kinds will be added as necessary.
+func (hw *legacyWaiter) WatchUntilReady(resources ResourceList, timeout time.Duration) error {
+ // For jobs, there's also the option to do poll c.Jobs(namespace).Get():
+ // https://github.com/adamreese/kubernetes/blob/master/test/e2e/job.go#L291-L300
+ return perform(resources, hw.watchTimeout(timeout))
+}
+
+func (hw *legacyWaiter) watchUntilReady(timeout time.Duration, info *resource.Info) error {
+ kind := info.Mapping.GroupVersionKind.Kind
+ switch kind {
+ case "Job", "Pod":
+ default:
+ return nil
+ }
+
+ slog.Debug("watching for resource changes", "kind", kind, "resource", info.Name, "timeout", timeout)
+
+ // Use a selector on the name of the resource. This should be unique for the
+ // given version and kind
+ selector, err := fields.ParseSelector(fmt.Sprintf("metadata.name=%s", info.Name))
+ if err != nil {
+ return err
+ }
+ lw := cachetools.NewListWatchFromClient(info.Client, info.Mapping.Resource.Resource, info.Namespace, selector)
+
+ // What we watch for depends on the Kind.
+ // - For a Job, we watch for completion.
+ // - For all else, we watch until Ready.
+ // In the future, we might want to add some special logic for types
+ // like Ingress, Volume, etc.
+
+ ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout)
+ defer cancel()
+ _, err = watchtools.UntilWithSync(ctx, lw, &unstructured.Unstructured{}, nil, func(e watch.Event) (bool, error) {
+ // Make sure the incoming object is versioned as we use unstructured
+ // objects when we build manifests
+ obj := convertWithMapper(e.Object, info.Mapping)
+ switch e.Type {
+ case watch.Added, watch.Modified:
+ // For things like a secret or a config map, this is the best indicator
+ // we get. We care mostly about jobs, where what we want to see is
+ // the status go into a good state. For other types, like ReplicaSet
+ // we don't really do anything to support these as hooks.
+ slog.Debug("add/modify event received", "resource", info.Name, "eventType", e.Type)
+
+ switch kind {
+ case "Job":
+ return hw.waitForJob(obj, info.Name)
+ case "Pod":
+ return hw.waitForPodSuccess(obj, info.Name)
+ }
+ return true, nil
+ case watch.Deleted:
+ slog.Debug("deleted event received", "resource", info.Name)
+ return true, nil
+ case watch.Error:
+ // Handle error and return with an error.
+ slog.Error("error event received", "resource", info.Name)
+ return true, fmt.Errorf("failed to deploy %s", info.Name)
+ default:
+ return false, nil
+ }
+ })
+ return err
+}
+
+// waitForJob is a helper that waits for a job to complete.
+//
+// This operates on an event returned from a watcher.
+func (hw *legacyWaiter) waitForJob(obj runtime.Object, name string) (bool, error) {
+ o, ok := obj.(*batchv1.Job)
+ if !ok {
+ return true, fmt.Errorf("expected %s to be a *batch.Job, got %T", name, obj)
+ }
+
+ for _, c := range o.Status.Conditions {
+ if c.Type == batchv1.JobComplete && c.Status == "True" {
+ return true, nil
+ } else if c.Type == batchv1.JobFailed && c.Status == "True" {
+ slog.Error("job failed", "job", name, "reason", c.Reason)
+ return true, fmt.Errorf("job %s failed: %s", name, c.Reason)
+ }
+ }
+
+ slog.Debug("job status update", "job", name, "active", o.Status.Active, "failed", o.Status.Failed, "succeeded", o.Status.Succeeded)
+ return false, nil
+}
+
+// waitForPodSuccess is a helper that waits for a pod to complete.
+//
+// This operates on an event returned from a watcher.
+func (hw *legacyWaiter) waitForPodSuccess(obj runtime.Object, name string) (bool, error) {
+ o, ok := obj.(*corev1.Pod)
+ if !ok {
+ return true, fmt.Errorf("expected %s to be a *v1.Pod, got %T", name, obj)
+ }
+
+ switch o.Status.Phase {
+ case corev1.PodSucceeded:
+ slog.Debug("pod succeeded", "pod", o.Name)
+ return true, nil
+ case corev1.PodFailed:
+ slog.Error("pod failed", "pod", o.Name)
+ return true, fmt.Errorf("pod %s failed", o.Name)
+ case corev1.PodPending:
+ slog.Debug("pod pending", "pod", o.Name)
+ case corev1.PodRunning:
+ slog.Debug("pod running", "pod", o.Name)
+ }
+
+ return false, nil
}
diff --git a/pkg/kube/wait_test.go b/pkg/kube/wait_test.go
new file mode 100644
index 000000000..d96f2c486
--- /dev/null
+++ b/pkg/kube/wait_test.go
@@ -0,0 +1,467 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kube
+
+import (
+ "fmt"
+ "net/http"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ appsv1 "k8s.io/api/apps/v1"
+ appsv1beta1 "k8s.io/api/apps/v1beta1"
+ appsv1beta2 "k8s.io/api/apps/v1beta2"
+ batchv1 "k8s.io/api/batch/v1"
+ corev1 "k8s.io/api/core/v1"
+ extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/cli-runtime/pkg/resource"
+)
+
+func TestSelectorsForObject(t *testing.T) {
+ tests := []struct {
+ name string
+ object interface{}
+ expectError bool
+ errorContains string
+ expectedLabels map[string]string
+ }{
+ {
+ name: "appsv1 ReplicaSet",
+ object: &appsv1.ReplicaSet{
+ Spec: appsv1.ReplicaSetSpec{
+ Selector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{"app": "test"},
+ },
+ },
+ },
+ expectError: false,
+ expectedLabels: map[string]string{"app": "test"},
+ },
+ {
+ name: "extensionsv1beta1 ReplicaSet",
+ object: &extensionsv1beta1.ReplicaSet{
+ Spec: extensionsv1beta1.ReplicaSetSpec{
+ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "ext-rs"}},
+ },
+ },
+ expectedLabels: map[string]string{"app": "ext-rs"},
+ },
+ {
+ name: "appsv1beta2 ReplicaSet",
+ object: &appsv1beta2.ReplicaSet{
+ Spec: appsv1beta2.ReplicaSetSpec{
+ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "beta2-rs"}},
+ },
+ },
+ expectedLabels: map[string]string{"app": "beta2-rs"},
+ },
+ {
+ name: "corev1 ReplicationController",
+ object: &corev1.ReplicationController{
+ Spec: corev1.ReplicationControllerSpec{
+ Selector: map[string]string{"rc": "test"},
+ },
+ },
+ expectError: false,
+ expectedLabels: map[string]string{"rc": "test"},
+ },
+ {
+ name: "appsv1 StatefulSet",
+ object: &appsv1.StatefulSet{
+ Spec: appsv1.StatefulSetSpec{
+ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "statefulset-v1"}},
+ },
+ },
+ expectedLabels: map[string]string{"app": "statefulset-v1"},
+ },
+ {
+ name: "appsv1beta1 StatefulSet",
+ object: &appsv1beta1.StatefulSet{
+ Spec: appsv1beta1.StatefulSetSpec{
+ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "statefulset-beta1"}},
+ },
+ },
+ expectedLabels: map[string]string{"app": "statefulset-beta1"},
+ },
+ {
+ name: "appsv1beta2 StatefulSet",
+ object: &appsv1beta2.StatefulSet{
+ Spec: appsv1beta2.StatefulSetSpec{
+ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "statefulset-beta2"}},
+ },
+ },
+ expectedLabels: map[string]string{"app": "statefulset-beta2"},
+ },
+ {
+ name: "extensionsv1beta1 DaemonSet",
+ object: &extensionsv1beta1.DaemonSet{
+ Spec: extensionsv1beta1.DaemonSetSpec{
+ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "daemonset-ext-beta1"}},
+ },
+ },
+ expectedLabels: map[string]string{"app": "daemonset-ext-beta1"},
+ },
+ {
+ name: "appsv1 DaemonSet",
+ object: &appsv1.DaemonSet{
+ Spec: appsv1.DaemonSetSpec{
+ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "daemonset-v1"}},
+ },
+ },
+ expectedLabels: map[string]string{"app": "daemonset-v1"},
+ },
+ {
+ name: "appsv1beta2 DaemonSet",
+ object: &appsv1beta2.DaemonSet{
+ Spec: appsv1beta2.DaemonSetSpec{
+ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "daemonset-beta2"}},
+ },
+ },
+ expectedLabels: map[string]string{"app": "daemonset-beta2"},
+ },
+ {
+ name: "extensionsv1beta1 Deployment",
+ object: &extensionsv1beta1.Deployment{
+ Spec: extensionsv1beta1.DeploymentSpec{
+ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "deployment-ext-beta1"}},
+ },
+ },
+ expectedLabels: map[string]string{"app": "deployment-ext-beta1"},
+ },
+ {
+ name: "appsv1 Deployment",
+ object: &appsv1.Deployment{
+ Spec: appsv1.DeploymentSpec{
+ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "deployment-v1"}},
+ },
+ },
+ expectedLabels: map[string]string{"app": "deployment-v1"},
+ },
+ {
+ name: "appsv1beta1 Deployment",
+ object: &appsv1beta1.Deployment{
+ Spec: appsv1beta1.DeploymentSpec{
+ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "deployment-beta1"}},
+ },
+ },
+ expectedLabels: map[string]string{"app": "deployment-beta1"},
+ },
+ {
+ name: "appsv1beta2 Deployment",
+ object: &appsv1beta2.Deployment{
+ Spec: appsv1beta2.DeploymentSpec{
+ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "deployment-beta2"}},
+ },
+ },
+ expectedLabels: map[string]string{"app": "deployment-beta2"},
+ },
+ {
+ name: "batchv1 Job",
+ object: &batchv1.Job{
+ Spec: batchv1.JobSpec{
+ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"job": "batch-job"}},
+ },
+ },
+ expectedLabels: map[string]string{"job": "batch-job"},
+ },
+ {
+ name: "corev1 Service with selector",
+ object: &corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{Name: "svc"},
+ Spec: corev1.ServiceSpec{
+ Selector: map[string]string{"svc": "yes"},
+ },
+ },
+ expectError: false,
+ expectedLabels: map[string]string{"svc": "yes"},
+ },
+ {
+ name: "corev1 Service without selector",
+ object: &corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{Name: "svc"},
+ Spec: corev1.ServiceSpec{Selector: map[string]string{}},
+ },
+ expectError: true,
+ errorContains: "invalid service 'svc': Service is defined without a selector",
+ },
+ {
+ name: "invalid label selector",
+ object: &appsv1.ReplicaSet{
+ Spec: appsv1.ReplicaSetSpec{
+ Selector: &metav1.LabelSelector{
+ MatchExpressions: []metav1.LabelSelectorRequirement{
+ {
+ Key: "foo",
+ Operator: "InvalidOperator",
+ Values: []string{"bar"},
+ },
+ },
+ },
+ },
+ },
+ expectError: true,
+ errorContains: "invalid label selector:",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ selector, err := SelectorsForObject(tt.object.(runtime.Object))
+ if tt.expectError {
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), tt.errorContains)
+ } else {
+ assert.NoError(t, err)
+ expected := labels.Set(tt.expectedLabels)
+ assert.True(t, selector.Matches(expected), "expected selector to match")
+ }
+ })
+ }
+}
+
+func TestLegacyWaiter_waitForPodSuccess(t *testing.T) {
+ lw := &legacyWaiter{}
+
+ tests := []struct {
+ name string
+ obj runtime.Object
+ wantDone bool
+ wantErr bool
+ errMessage string
+ }{
+ {
+ name: "pod succeeded",
+ obj: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Name: "pod1"},
+ Status: corev1.PodStatus{Phase: corev1.PodSucceeded},
+ },
+ wantDone: true,
+ wantErr: false,
+ },
+ {
+ name: "pod failed",
+ obj: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Name: "pod2"},
+ Status: corev1.PodStatus{Phase: corev1.PodFailed},
+ },
+ wantDone: true,
+ wantErr: true,
+ errMessage: "pod pod2 failed",
+ },
+ {
+ name: "pod pending",
+ obj: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Name: "pod3"},
+ Status: corev1.PodStatus{Phase: corev1.PodPending},
+ },
+ wantDone: false,
+ wantErr: false,
+ },
+ {
+ name: "pod running",
+ obj: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Name: "pod4"},
+ Status: corev1.PodStatus{Phase: corev1.PodRunning},
+ },
+ wantDone: false,
+ wantErr: false,
+ },
+ {
+ name: "wrong object type",
+ obj: &metav1.Status{},
+ wantDone: true,
+ wantErr: true,
+ errMessage: "expected foo to be a *v1.Pod, got *v1.Status",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ done, err := lw.waitForPodSuccess(tt.obj, "foo")
+ if tt.wantErr {
+ if err == nil {
+ t.Errorf("expected error, got none")
+ } else if !strings.Contains(err.Error(), tt.errMessage) {
+ t.Errorf("expected error to contain %q, got %q", tt.errMessage, err.Error())
+ }
+ } else if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ if done != tt.wantDone {
+ t.Errorf("got done=%v, want %v", done, tt.wantDone)
+ }
+ })
+ }
+}
+
+func TestLegacyWaiter_waitForJob(t *testing.T) {
+ lw := &legacyWaiter{}
+
+ tests := []struct {
+ name string
+ obj runtime.Object
+ wantDone bool
+ wantErr bool
+ errMessage string
+ }{
+ {
+ name: "job complete",
+ obj: &batchv1.Job{
+ Status: batchv1.JobStatus{
+ Conditions: []batchv1.JobCondition{
+ {
+ Type: batchv1.JobComplete,
+ Status: "True",
+ },
+ },
+ },
+ },
+ wantDone: true,
+ wantErr: false,
+ },
+ {
+ name: "job failed",
+ obj: &batchv1.Job{
+ Status: batchv1.JobStatus{
+ Conditions: []batchv1.JobCondition{
+ {
+ Type: batchv1.JobFailed,
+ Status: "True",
+ Reason: "FailedReason",
+ },
+ },
+ },
+ },
+ wantDone: true,
+ wantErr: true,
+ errMessage: "job test-job failed: FailedReason",
+ },
+ {
+ name: "job in progress",
+ obj: &batchv1.Job{
+ Status: batchv1.JobStatus{
+ Active: 1,
+ Failed: 0,
+ Succeeded: 0,
+ Conditions: []batchv1.JobCondition{
+ {
+ Type: batchv1.JobComplete,
+ Status: "False",
+ },
+ {
+ Type: batchv1.JobFailed,
+ Status: "False",
+ },
+ },
+ },
+ },
+ wantDone: false,
+ wantErr: false,
+ },
+ {
+ name: "wrong object type",
+ obj: &metav1.Status{},
+ wantDone: true,
+ wantErr: true,
+ errMessage: "expected test-job to be a *batch.Job, got *v1.Status",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ done, err := lw.waitForJob(tt.obj, "test-job")
+ if tt.wantErr {
+ if err == nil {
+ t.Errorf("expected error, got none")
+ } else if !strings.Contains(err.Error(), tt.errMessage) {
+ t.Errorf("expected error to contain %q, got %q", tt.errMessage, err.Error())
+ }
+ } else if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+
+ if done != tt.wantDone {
+ t.Errorf("got done=%v, want %v", done, tt.wantDone)
+ }
+ })
+ }
+}
+
+func TestLegacyWaiter_isRetryableError(t *testing.T) {
+ lw := &legacyWaiter{}
+
+ info := &resource.Info{
+ Name: "test-resource",
+ }
+
+ tests := []struct {
+ name string
+ err error
+ wantRetry bool
+ description string
+ }{
+ {
+ name: "nil error",
+ err: nil,
+ wantRetry: false,
+ },
+ {
+ name: "status error - 0 code",
+ err: &apierrors.StatusError{ErrStatus: metav1.Status{Code: 0}},
+ wantRetry: true,
+ },
+ {
+ name: "status error - 429 (TooManyRequests)",
+ err: &apierrors.StatusError{ErrStatus: metav1.Status{Code: http.StatusTooManyRequests}},
+ wantRetry: true,
+ },
+ {
+ name: "status error - 503",
+ err: &apierrors.StatusError{ErrStatus: metav1.Status{Code: http.StatusServiceUnavailable}},
+ wantRetry: true,
+ },
+ {
+ name: "status error - 501 (NotImplemented)",
+ err: &apierrors.StatusError{ErrStatus: metav1.Status{Code: http.StatusNotImplemented}},
+ wantRetry: false,
+ },
+ {
+ name: "status error - 400 (Bad Request)",
+ err: &apierrors.StatusError{ErrStatus: metav1.Status{Code: http.StatusBadRequest}},
+ wantRetry: false,
+ },
+ {
+ name: "non-status error",
+ err: fmt.Errorf("some generic error"),
+ wantRetry: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got := lw.isRetryableError(tt.err, info)
+ if got != tt.wantRetry {
+ t.Errorf("isRetryableError() = %v, want %v", got, tt.wantRetry)
+ }
+ })
+ }
+}
diff --git a/pkg/plugin/installer/local_installer.go b/pkg/plugin/installer/local_installer.go
deleted file mode 100644
index a79ca7ec7..000000000
--- a/pkg/plugin/installer/local_installer.go
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
-Copyright The Helm Authors.
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package installer // import "helm.sh/helm/v4/pkg/plugin/installer"
-
-import (
- "os"
- "path/filepath"
-
- "github.com/pkg/errors"
-)
-
-// ErrPluginNotAFolder indicates that the plugin path is not a folder.
-var ErrPluginNotAFolder = errors.New("expected plugin to be a folder")
-
-// LocalInstaller installs plugins from the filesystem.
-type LocalInstaller struct {
- base
-}
-
-// NewLocalInstaller creates a new LocalInstaller.
-func NewLocalInstaller(source string) (*LocalInstaller, error) {
- src, err := filepath.Abs(source)
- if err != nil {
- return nil, errors.Wrap(err, "unable to get absolute path to plugin")
- }
- i := &LocalInstaller{
- base: newBase(src),
- }
- return i, nil
-}
-
-// Install creates a symlink to the plugin directory.
-//
-// Implements Installer.
-func (i *LocalInstaller) Install() error {
- stat, err := os.Stat(i.Source)
- if err != nil {
- return err
- }
- if !stat.IsDir() {
- return ErrPluginNotAFolder
- }
-
- if !isPlugin(i.Source) {
- return ErrMissingMetadata
- }
- debug("symlinking %s to %s", i.Source, i.Path())
- return os.Symlink(i.Source, i.Path())
-}
-
-// Update updates a local repository
-func (i *LocalInstaller) Update() error {
- debug("local repository is auto-updated")
- return nil
-}
diff --git a/pkg/plugin/installer/local_installer_test.go b/pkg/plugin/installer/local_installer_test.go
deleted file mode 100644
index b28920af4..000000000
--- a/pkg/plugin/installer/local_installer_test.go
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
-Copyright The Helm Authors.
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package installer // import "helm.sh/helm/v4/pkg/plugin/installer"
-
-import (
- "os"
- "path/filepath"
- "testing"
-
- "helm.sh/helm/v4/pkg/helmpath"
-)
-
-var _ Installer = new(LocalInstaller)
-
-func TestLocalInstaller(t *testing.T) {
- // Make a temp dir
- tdir := t.TempDir()
- if err := os.WriteFile(filepath.Join(tdir, "plugin.yaml"), []byte{}, 0644); err != nil {
- t.Fatal(err)
- }
-
- source := "../testdata/plugdir/good/echo"
- i, err := NewForSource(source, "")
- if err != nil {
- t.Fatalf("unexpected error: %s", err)
- }
-
- if err := Install(i); err != nil {
- t.Fatal(err)
- }
-
- if i.Path() != helmpath.DataPath("plugins", "echo") {
- t.Fatalf("expected path '$XDG_CONFIG_HOME/helm/plugins/helm-env', got %q", i.Path())
- }
- defer os.RemoveAll(filepath.Dir(helmpath.DataPath())) // helmpath.DataPath is like /tmp/helm013130971/helm
-}
-
-func TestLocalInstallerNotAFolder(t *testing.T) {
- source := "../testdata/plugdir/good/echo/plugin.yaml"
- i, err := NewForSource(source, "")
- if err != nil {
- t.Fatalf("unexpected error: %s", err)
- }
-
- err = Install(i)
- if err == nil {
- t.Fatal("expected error")
- }
- if err != ErrPluginNotAFolder {
- t.Fatalf("expected error to equal: %q", err)
- }
-}
diff --git a/pkg/plugin/plugin.go b/pkg/plugin/plugin.go
deleted file mode 100644
index 3456664c1..000000000
--- a/pkg/plugin/plugin.go
+++ /dev/null
@@ -1,377 +0,0 @@
-/*
-Copyright The Helm Authors.
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package plugin // import "helm.sh/helm/v4/pkg/plugin"
-
-import (
- "fmt"
- "os"
- "path/filepath"
- "regexp"
- "runtime"
- "strings"
- "unicode"
-
- "github.com/pkg/errors"
- "sigs.k8s.io/yaml"
-
- "helm.sh/helm/v4/pkg/cli"
-)
-
-const PluginFileName = "plugin.yaml"
-
-// Downloaders represents the plugins capability if it can retrieve
-// charts from special sources
-type Downloaders struct {
- // Protocols are the list of schemes from the charts URL.
- Protocols []string `json:"protocols"`
- // Command is the executable path with which the plugin performs
- // the actual download for the corresponding Protocols
- Command string `json:"command"`
-}
-
-// PlatformCommand represents a command for a particular operating system and architecture
-type PlatformCommand struct {
- OperatingSystem string `json:"os"`
- Architecture string `json:"arch"`
- Command string `json:"command"`
- Args []string `json:"args"`
-}
-
-// Metadata describes a plugin.
-//
-// This is the plugin equivalent of a chart.Metadata.
-type Metadata struct {
- // Name is the name of the plugin
- Name string `json:"name"`
-
- // Version is a SemVer 2 version of the plugin.
- Version string `json:"version"`
-
- // Usage is the single-line usage text shown in help
- Usage string `json:"usage"`
-
- // Description is a long description shown in places like `helm help`
- Description string `json:"description"`
-
- // PlatformCommand is the plugin command, with a platform selector and support for args.
- //
- // The command and args will be passed through environment expansion, so env vars can
- // be present in this command. Unless IgnoreFlags is set, this will
- // also merge the flags passed from Helm.
- //
- // Note that the command is not executed in a shell. To do so, we suggest
- // pointing the command to a shell script.
- //
- // The following rules will apply to processing platform commands:
- // - If PlatformCommand is present, it will be used
- // - If both OS and Arch match the current platform, search will stop and the command will be executed
- // - If OS matches and Arch is empty, the command will be executed
- // - If no OS/Arch match is found, the default command will be executed
- // - If no matches are found in platformCommand, Helm will exit with an error
- PlatformCommand []PlatformCommand `json:"platformCommand"`
-
- // Command is the plugin command, as a single string.
- // Providing a command will result in an error if PlatformCommand is also set.
- //
- // The command will be passed through environment expansion, so env vars can
- // be present in this command. Unless IgnoreFlags is set, this will
- // also merge the flags passed from Helm.
- //
- // Note that command is not executed in a shell. To do so, we suggest
- // pointing the command to a shell script.
- //
- // DEPRECATED: Use PlatformCommand instead. Remove in Helm 4.
- Command string `json:"command"`
-
- // IgnoreFlags ignores any flags passed in from Helm
- //
- // For example, if the plugin is invoked as `helm --debug myplugin`, if this
- // is false, `--debug` will be appended to `--command`. If this is true,
- // the `--debug` flag will be discarded.
- IgnoreFlags bool `json:"ignoreFlags"`
-
- // PlatformHooks are commands that will run on plugin events, with a platform selector and support for args.
- //
- // The command and args will be passed through environment expansion, so env vars can
- // be present in the command.
- //
- // Note that the command is not executed in a shell. To do so, we suggest
- // pointing the command to a shell script.
- //
- // The following rules will apply to processing platform hooks:
- // - If PlatformHooks is present, it will be used
- // - If both OS and Arch match the current platform, search will stop and the command will be executed
- // - If OS matches and Arch is empty, the command will be executed
- // - If no OS/Arch match is found, the default command will be executed
- // - If no matches are found in platformHooks, Helm will skip the event
- PlatformHooks PlatformHooks `json:"platformHooks"`
-
- // Hooks are commands that will run on plugin events, as a single string.
- // Providing a hooks will result in an error if PlatformHooks is also set.
- //
- // The command will be passed through environment expansion, so env vars can
- // be present in this command.
- //
- // Note that the command is executed in the sh shell.
- //
- // DEPRECATED: Use PlatformHooks instead. Remove in Helm 4.
- Hooks Hooks
-
- // Downloaders field is used if the plugin supply downloader mechanism
- // for special protocols.
- Downloaders []Downloaders `json:"downloaders"`
-
- // UseTunnelDeprecated indicates that this command needs a tunnel.
- // Setting this will cause a number of side effects, such as the
- // automatic setting of HELM_HOST.
- // DEPRECATED and unused, but retained for backwards compatibility with Helm 2 plugins. Remove in Helm 4
- UseTunnelDeprecated bool `json:"useTunnel,omitempty"`
-}
-
-// Plugin represents a plugin.
-type Plugin struct {
- // Metadata is a parsed representation of a plugin.yaml
- Metadata *Metadata
- // Dir is the string path to the directory that holds the plugin.
- Dir string
-}
-
-// Returns command and args strings based on the following rules in priority order:
-// - From the PlatformCommand where OS and Arch match the current platform
-// - From the PlatformCommand where OS matches the current platform and Arch is empty/unspecified
-// - From the PlatformCommand where OS is empty/unspecified and Arch matches the current platform
-// - From the PlatformCommand where OS and Arch are both empty/unspecified
-// - Return nil, nil
-func getPlatformCommand(cmds []PlatformCommand) ([]string, []string) {
- var command, args []string
- found := false
- foundOs := false
-
- eq := strings.EqualFold
- for _, c := range cmds {
- if eq(c.OperatingSystem, runtime.GOOS) && eq(c.Architecture, runtime.GOARCH) {
- // Return early for an exact match
- return strings.Split(c.Command, " "), c.Args
- }
-
- if (len(c.OperatingSystem) > 0 && !eq(c.OperatingSystem, runtime.GOOS)) || len(c.Architecture) > 0 {
- // Skip if OS is not empty and doesn't match or if arch is set as a set arch requires an OS match
- continue
- }
-
- if !foundOs && len(c.OperatingSystem) > 0 && eq(c.OperatingSystem, runtime.GOOS) {
- // First OS match with empty arch, can only be overridden by a direct match
- command = strings.Split(c.Command, " ")
- args = c.Args
- found = true
- foundOs = true
- } else if !found {
- // First empty match, can be overridden by a direct match or an OS match
- command = strings.Split(c.Command, " ")
- args = c.Args
- found = true
- }
- }
-
- return command, args
-}
-
-// PrepareCommands takes a []Plugin.PlatformCommand
-// and prepares the command and arguments for execution.
-//
-// It merges extraArgs into any arguments supplied in the plugin. It
-// returns the main command and an args array.
-//
-// The result is suitable to pass to exec.Command.
-func PrepareCommands(cmds []PlatformCommand, expandArgs bool, extraArgs []string) (string, []string, error) {
- cmdParts, args := getPlatformCommand(cmds)
- if len(cmdParts) == 0 || cmdParts[0] == "" {
- return "", nil, fmt.Errorf("no plugin command is applicable")
- }
-
- main := os.ExpandEnv(cmdParts[0])
- baseArgs := []string{}
- if len(cmdParts) > 1 {
- for _, cmdPart := range cmdParts[1:] {
- if expandArgs {
- baseArgs = append(baseArgs, os.ExpandEnv(cmdPart))
- } else {
- baseArgs = append(baseArgs, cmdPart)
- }
- }
- }
-
- for _, arg := range args {
- if expandArgs {
- baseArgs = append(baseArgs, os.ExpandEnv(arg))
- } else {
- baseArgs = append(baseArgs, arg)
- }
- }
-
- if len(extraArgs) > 0 {
- baseArgs = append(baseArgs, extraArgs...)
- }
-
- return main, baseArgs, nil
-}
-
-// PrepareCommand gets the correct command and arguments for a plugin.
-//
-// It merges extraArgs into any arguments supplied in the plugin. It returns the name of the command and an args array.
-//
-// The result is suitable to pass to exec.Command.
-func (p *Plugin) PrepareCommand(extraArgs []string) (string, []string, error) {
- var extraArgsIn []string
-
- if !p.Metadata.IgnoreFlags {
- extraArgsIn = extraArgs
- }
-
- cmds := p.Metadata.PlatformCommand
- if len(cmds) == 0 && len(p.Metadata.Command) > 0 {
- cmds = []PlatformCommand{{Command: p.Metadata.Command}}
- }
-
- return PrepareCommands(cmds, true, extraArgsIn)
-}
-
-// validPluginName is a regular expression that validates plugin names.
-//
-// Plugin names can only contain the ASCII characters a-z, A-Z, 0-9, _ and -.
-var validPluginName = regexp.MustCompile("^[A-Za-z0-9_-]+$")
-
-// validatePluginData validates a plugin's YAML data.
-func validatePluginData(plug *Plugin, filepath string) error {
- // When metadata section missing, initialize with no data
- if plug.Metadata == nil {
- plug.Metadata = &Metadata{}
- }
- if !validPluginName.MatchString(plug.Metadata.Name) {
- return fmt.Errorf("invalid plugin name at %q", filepath)
- }
- plug.Metadata.Usage = sanitizeString(plug.Metadata.Usage)
-
- if len(plug.Metadata.PlatformCommand) > 0 && len(plug.Metadata.Command) > 0 {
- return fmt.Errorf("both platformCommand and command are set in %q", filepath)
- }
-
- if len(plug.Metadata.PlatformHooks) > 0 && len(plug.Metadata.Hooks) > 0 {
- return fmt.Errorf("both platformHooks and hooks are set in %q", filepath)
- }
-
- // We could also validate SemVer, executable, and other fields should we so choose.
- return nil
-}
-
-// sanitizeString normalize spaces and removes non-printable characters.
-func sanitizeString(str string) string {
- return strings.Map(func(r rune) rune {
- if unicode.IsSpace(r) {
- return ' '
- }
- if unicode.IsPrint(r) {
- return r
- }
- return -1
- }, str)
-}
-
-func detectDuplicates(plugs []*Plugin) error {
- names := map[string]string{}
-
- for _, plug := range plugs {
- if oldpath, ok := names[plug.Metadata.Name]; ok {
- return fmt.Errorf(
- "two plugins claim the name %q at %q and %q",
- plug.Metadata.Name,
- oldpath,
- plug.Dir,
- )
- }
- names[plug.Metadata.Name] = plug.Dir
- }
-
- return nil
-}
-
-// LoadDir loads a plugin from the given directory.
-func LoadDir(dirname string) (*Plugin, error) {
- pluginfile := filepath.Join(dirname, PluginFileName)
- data, err := os.ReadFile(pluginfile)
- if err != nil {
- return nil, errors.Wrapf(err, "failed to read plugin at %q", pluginfile)
- }
-
- plug := &Plugin{Dir: dirname}
- if err := yaml.UnmarshalStrict(data, &plug.Metadata); err != nil {
- return nil, errors.Wrapf(err, "failed to load plugin at %q", pluginfile)
- }
- return plug, validatePluginData(plug, pluginfile)
-}
-
-// LoadAll loads all plugins found beneath the base directory.
-//
-// This scans only one directory level.
-func LoadAll(basedir string) ([]*Plugin, error) {
- plugins := []*Plugin{}
- // We want basedir/*/plugin.yaml
- scanpath := filepath.Join(basedir, "*", PluginFileName)
- matches, err := filepath.Glob(scanpath)
- if err != nil {
- return plugins, errors.Wrapf(err, "failed to find plugins in %q", scanpath)
- }
-
- if matches == nil {
- return plugins, nil
- }
-
- for _, yaml := range matches {
- dir := filepath.Dir(yaml)
- p, err := LoadDir(dir)
- if err != nil {
- return plugins, err
- }
- plugins = append(plugins, p)
- }
- return plugins, detectDuplicates(plugins)
-}
-
-// FindPlugins returns a list of YAML files that describe plugins.
-func FindPlugins(plugdirs string) ([]*Plugin, error) {
- found := []*Plugin{}
- // Let's get all UNIXy and allow path separators
- for _, p := range filepath.SplitList(plugdirs) {
- matches, err := LoadAll(p)
- if err != nil {
- return matches, err
- }
- found = append(found, matches...)
- }
- return found, nil
-}
-
-// SetupPluginEnv prepares os.Env for plugins. It operates on os.Env because
-// the plugin subsystem itself needs access to the environment variables
-// created here.
-func SetupPluginEnv(settings *cli.EnvSettings, name, base string) {
- env := settings.EnvVars()
- env["HELM_PLUGIN_NAME"] = name
- env["HELM_PLUGIN_DIR"] = base
- for key, val := range env {
- os.Setenv(key, val)
- }
-}
diff --git a/pkg/plugin/plugin_test.go b/pkg/plugin/plugin_test.go
deleted file mode 100644
index b96428f6b..000000000
--- a/pkg/plugin/plugin_test.go
+++ /dev/null
@@ -1,545 +0,0 @@
-/*
-Copyright The Helm Authors.
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package plugin // import "helm.sh/helm/v4/pkg/plugin"
-
-import (
- "fmt"
- "os"
- "path/filepath"
- "reflect"
- "runtime"
- "testing"
-
- "helm.sh/helm/v4/pkg/cli"
-)
-
-func TestPrepareCommand(t *testing.T) {
- cmdMain := "sh"
- cmdArgs := []string{"-c", "echo \"test\""}
-
- p := &Plugin{
- Dir: "/tmp", // Unused
- Metadata: &Metadata{
- Name: "test",
- Command: "echo \"error\"",
- PlatformCommand: []PlatformCommand{
- {OperatingSystem: "no-os", Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
- {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
- {OperatingSystem: runtime.GOOS, Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
- {OperatingSystem: runtime.GOOS, Architecture: runtime.GOARCH, Command: cmdMain, Args: cmdArgs},
- },
- },
- }
-
- cmd, args, err := p.PrepareCommand([]string{})
- if err != nil {
- t.Fatal(err)
- }
- if cmd != cmdMain {
- t.Fatalf("Expected %q, got %q", cmdMain, cmd)
- }
- if !reflect.DeepEqual(args, cmdArgs) {
- t.Fatalf("Expected %v, got %v", cmdArgs, args)
- }
-}
-
-func TestPrepareCommandExtraArgs(t *testing.T) {
- cmdMain := "sh"
- cmdArgs := []string{"-c", "echo \"test\""}
- extraArgs := []string{"--debug", "--foo", "bar"}
-
- p := &Plugin{
- Dir: "/tmp", // Unused
- Metadata: &Metadata{
- Name: "test",
- Command: "echo \"error\"",
- PlatformCommand: []PlatformCommand{
- {OperatingSystem: "no-os", Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
- {OperatingSystem: runtime.GOOS, Architecture: runtime.GOARCH, Command: cmdMain, Args: cmdArgs},
- {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
- {OperatingSystem: runtime.GOOS, Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
- },
- },
- }
-
- expectedArgs := append(cmdArgs, extraArgs...)
-
- cmd, args, err := p.PrepareCommand(extraArgs)
- if err != nil {
- t.Fatal(err)
- }
- if cmd != cmdMain {
- t.Fatalf("Expected %q, got %q", cmdMain, cmd)
- }
- if !reflect.DeepEqual(args, expectedArgs) {
- t.Fatalf("Expected %v, got %v", expectedArgs, args)
- }
-}
-
-func TestPrepareCommandExtraArgsIgnored(t *testing.T) {
- cmdMain := "sh"
- cmdArgs := []string{"-c", "echo \"test\""}
- extraArgs := []string{"--debug", "--foo", "bar"}
-
- p := &Plugin{
- Dir: "/tmp", // Unused
- Metadata: &Metadata{
- Name: "test",
- Command: "echo \"error\"",
- PlatformCommand: []PlatformCommand{
- {OperatingSystem: "no-os", Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
- {OperatingSystem: runtime.GOOS, Architecture: runtime.GOARCH, Command: cmdMain, Args: cmdArgs},
- {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
- {OperatingSystem: runtime.GOOS, Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
- },
- IgnoreFlags: true,
- },
- }
-
- cmd, args, err := p.PrepareCommand(extraArgs)
- if err != nil {
- t.Fatal(err)
- }
- if cmd != cmdMain {
- t.Fatalf("Expected %q, got %q", cmdMain, cmd)
- }
- if !reflect.DeepEqual(args, cmdArgs) {
- t.Fatalf("Expected %v, got %v", cmdArgs, args)
- }
-}
-
-func TestPrepareCommands(t *testing.T) {
- cmdMain := "sh"
- cmdArgs := []string{"-c", "echo \"test\""}
-
- cmds := []PlatformCommand{
- {OperatingSystem: "no-os", Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
- {OperatingSystem: runtime.GOOS, Architecture: runtime.GOARCH, Command: cmdMain, Args: cmdArgs},
- {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
- {OperatingSystem: runtime.GOOS, Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
- }
-
- cmd, args, err := PrepareCommands(cmds, true, []string{})
- if err != nil {
- t.Fatal(err)
- }
- if cmd != cmdMain {
- t.Fatalf("Expected %q, got %q", cmdMain, cmd)
- }
- if !reflect.DeepEqual(args, cmdArgs) {
- t.Fatalf("Expected %v, got %v", cmdArgs, args)
- }
-}
-
-func TestPrepareCommandsExtraArgs(t *testing.T) {
- cmdMain := "sh"
- cmdArgs := []string{"-c", "echo \"test\""}
- extraArgs := []string{"--debug", "--foo", "bar"}
-
- cmds := []PlatformCommand{
- {OperatingSystem: "no-os", Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
- {OperatingSystem: runtime.GOOS, Architecture: runtime.GOARCH, Command: "sh", Args: []string{"-c", "echo \"test\""}},
- {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
- {OperatingSystem: runtime.GOOS, Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
- }
-
- expectedArgs := append(cmdArgs, extraArgs...)
-
- cmd, args, err := PrepareCommands(cmds, true, extraArgs)
- if err != nil {
- t.Fatal(err)
- }
- if cmd != cmdMain {
- t.Fatalf("Expected %q, got %q", cmdMain, cmd)
- }
- if !reflect.DeepEqual(args, expectedArgs) {
- t.Fatalf("Expected %v, got %v", expectedArgs, args)
- }
-}
-
-func TestPrepareCommandsNoArch(t *testing.T) {
- cmdMain := "sh"
- cmdArgs := []string{"-c", "echo \"test\""}
-
- cmds := []PlatformCommand{
- {OperatingSystem: "no-os", Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
- {OperatingSystem: runtime.GOOS, Architecture: "", Command: "sh", Args: []string{"-c", "echo \"test\""}},
- {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
- }
-
- cmd, args, err := PrepareCommands(cmds, true, []string{})
- if err != nil {
- t.Fatal(err)
- }
- if cmd != cmdMain {
- t.Fatalf("Expected %q, got %q", cmdMain, cmd)
- }
- if !reflect.DeepEqual(args, cmdArgs) {
- t.Fatalf("Expected %v, got %v", cmdArgs, args)
- }
-}
-
-func TestPrepareCommandsNoOsNoArch(t *testing.T) {
- cmdMain := "sh"
- cmdArgs := []string{"-c", "echo \"test\""}
-
- cmds := []PlatformCommand{
- {OperatingSystem: "no-os", Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
- {OperatingSystem: "", Architecture: "", Command: "sh", Args: []string{"-c", "echo \"test\""}},
- {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
- }
-
- cmd, args, err := PrepareCommands(cmds, true, []string{})
- if err != nil {
- t.Fatal(err)
- }
- if cmd != cmdMain {
- t.Fatalf("Expected %q, got %q", cmdMain, cmd)
- }
- if !reflect.DeepEqual(args, cmdArgs) {
- t.Fatalf("Expected %v, got %v", cmdArgs, args)
- }
-}
-
-func TestPrepareCommandsNoMatch(t *testing.T) {
- cmds := []PlatformCommand{
- {OperatingSystem: "no-os", Architecture: "no-arch", Command: "sh", Args: []string{"-c", "echo \"test\""}},
- {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "sh", Args: []string{"-c", "echo \"test\""}},
- {OperatingSystem: "no-os", Architecture: runtime.GOARCH, Command: "sh", Args: []string{"-c", "echo \"test\""}},
- }
-
- if _, _, err := PrepareCommands(cmds, true, []string{}); err == nil {
- t.Fatalf("Expected error to be returned")
- }
-}
-
-func TestPrepareCommandsNoCommands(t *testing.T) {
- cmds := []PlatformCommand{}
-
- if _, _, err := PrepareCommands(cmds, true, []string{}); err == nil {
- t.Fatalf("Expected error to be returned")
- }
-}
-
-func TestPrepareCommandsExpand(t *testing.T) {
- t.Setenv("TEST", "test")
- cmdMain := "sh"
- cmdArgs := []string{"-c", "echo \"${TEST}\""}
- cmds := []PlatformCommand{
- {OperatingSystem: "", Architecture: "", Command: cmdMain, Args: cmdArgs},
- }
-
- expectedArgs := []string{"-c", "echo \"test\""}
-
- cmd, args, err := PrepareCommands(cmds, true, []string{})
- if err != nil {
- t.Fatal(err)
- }
- if cmd != cmdMain {
- t.Fatalf("Expected %q, got %q", cmdMain, cmd)
- }
- if !reflect.DeepEqual(args, expectedArgs) {
- t.Fatalf("Expected %v, got %v", expectedArgs, args)
- }
-}
-
-func TestPrepareCommandsNoExpand(t *testing.T) {
- t.Setenv("TEST", "test")
- cmdMain := "sh"
- cmdArgs := []string{"-c", "echo \"${TEST}\""}
- cmds := []PlatformCommand{
- {OperatingSystem: "", Architecture: "", Command: cmdMain, Args: cmdArgs},
- }
-
- cmd, args, err := PrepareCommands(cmds, false, []string{})
- if err != nil {
- t.Fatal(err)
- }
- if cmd != cmdMain {
- t.Fatalf("Expected %q, got %q", cmdMain, cmd)
- }
- if !reflect.DeepEqual(args, cmdArgs) {
- t.Fatalf("Expected %v, got %v", cmdArgs, args)
- }
-}
-
-func TestLoadDir(t *testing.T) {
- dirname := "testdata/plugdir/good/hello"
- plug, err := LoadDir(dirname)
- if err != nil {
- t.Fatalf("error loading Hello plugin: %s", err)
- }
-
- if plug.Dir != dirname {
- t.Fatalf("Expected dir %q, got %q", dirname, plug.Dir)
- }
-
- expect := &Metadata{
- Name: "hello",
- Version: "0.1.0",
- Usage: "usage",
- Description: "description",
- PlatformCommand: []PlatformCommand{
- {OperatingSystem: "linux", Architecture: "", Command: "sh", Args: []string{"-c", "${HELM_PLUGIN_DIR}/hello.sh"}},
- {OperatingSystem: "windows", Architecture: "", Command: "pwsh", Args: []string{"-c", "${HELM_PLUGIN_DIR}/hello.ps1"}},
- },
- IgnoreFlags: true,
- PlatformHooks: map[string][]PlatformCommand{
- Install: {
- {OperatingSystem: "linux", Architecture: "", Command: "sh", Args: []string{"-c", "echo \"installing...\""}},
- {OperatingSystem: "windows", Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"installing...\""}},
- },
- },
- }
-
- if !reflect.DeepEqual(expect, plug.Metadata) {
- t.Fatalf("Expected plugin metadata %v, got %v", expect, plug.Metadata)
- }
-}
-
-func TestLoadDirDuplicateEntries(t *testing.T) {
- dirname := "testdata/plugdir/bad/duplicate-entries"
- if _, err := LoadDir(dirname); err == nil {
- t.Errorf("successfully loaded plugin with duplicate entries when it should've failed")
- }
-}
-
-func TestDownloader(t *testing.T) {
- dirname := "testdata/plugdir/good/downloader"
- plug, err := LoadDir(dirname)
- if err != nil {
- t.Fatalf("error loading Hello plugin: %s", err)
- }
-
- if plug.Dir != dirname {
- t.Fatalf("Expected dir %q, got %q", dirname, plug.Dir)
- }
-
- expect := &Metadata{
- Name: "downloader",
- Version: "1.2.3",
- Usage: "usage",
- Description: "download something",
- Command: "echo Hello",
- Downloaders: []Downloaders{
- {
- Protocols: []string{"myprotocol", "myprotocols"},
- Command: "echo Download",
- },
- },
- }
-
- if !reflect.DeepEqual(expect, plug.Metadata) {
- t.Fatalf("Expected metadata %v, got %v", expect, plug.Metadata)
- }
-}
-
-func TestLoadAll(t *testing.T) {
- // Verify that empty dir loads:
- if plugs, err := LoadAll("testdata"); err != nil {
- t.Fatalf("error loading dir with no plugins: %s", err)
- } else if len(plugs) > 0 {
- t.Fatalf("expected empty dir to have 0 plugins")
- }
-
- basedir := "testdata/plugdir/good"
- plugs, err := LoadAll(basedir)
- if err != nil {
- t.Fatalf("Could not load %q: %s", basedir, err)
- }
-
- if l := len(plugs); l != 3 {
- t.Fatalf("expected 3 plugins, found %d", l)
- }
-
- if plugs[0].Metadata.Name != "downloader" {
- t.Errorf("Expected first plugin to be echo, got %q", plugs[0].Metadata.Name)
- }
- if plugs[1].Metadata.Name != "echo" {
- t.Errorf("Expected first plugin to be echo, got %q", plugs[0].Metadata.Name)
- }
- if plugs[2].Metadata.Name != "hello" {
- t.Errorf("Expected second plugin to be hello, got %q", plugs[1].Metadata.Name)
- }
-}
-
-func TestFindPlugins(t *testing.T) {
- cases := []struct {
- name string
- plugdirs string
- expected int
- }{
- {
- name: "plugdirs is empty",
- plugdirs: "",
- expected: 0,
- },
- {
- name: "plugdirs isn't dir",
- plugdirs: "./plugin_test.go",
- expected: 0,
- },
- {
- name: "plugdirs doesn't have plugin",
- plugdirs: ".",
- expected: 0,
- },
- {
- name: "normal",
- plugdirs: "./testdata/plugdir/good",
- expected: 3,
- },
- }
- for _, c := range cases {
- t.Run(t.Name(), func(t *testing.T) {
- plugin, _ := FindPlugins(c.plugdirs)
- if len(plugin) != c.expected {
- t.Errorf("expected: %v, got: %v", c.expected, len(plugin))
- }
- })
- }
-}
-
-func TestSetupEnv(t *testing.T) {
- name := "pequod"
- base := filepath.Join("testdata/helmhome/helm/plugins", name)
-
- s := cli.New()
- s.PluginsDirectory = "testdata/helmhome/helm/plugins"
-
- SetupPluginEnv(s, name, base)
- for _, tt := range []struct {
- name, expect string
- }{
- {"HELM_PLUGIN_NAME", name},
- {"HELM_PLUGIN_DIR", base},
- } {
- if got := os.Getenv(tt.name); got != tt.expect {
- t.Errorf("Expected $%s=%q, got %q", tt.name, tt.expect, got)
- }
- }
-}
-
-func TestSetupEnvWithSpace(t *testing.T) {
- name := "sureshdsk"
- base := filepath.Join("testdata/helm home/helm/plugins", name)
-
- s := cli.New()
- s.PluginsDirectory = "testdata/helm home/helm/plugins"
-
- SetupPluginEnv(s, name, base)
- for _, tt := range []struct {
- name, expect string
- }{
- {"HELM_PLUGIN_NAME", name},
- {"HELM_PLUGIN_DIR", base},
- } {
- if got := os.Getenv(tt.name); got != tt.expect {
- t.Errorf("Expected $%s=%q, got %q", tt.name, tt.expect, got)
- }
- }
-}
-
-func TestValidatePluginData(t *testing.T) {
- // A mock plugin missing any metadata.
- mockMissingMeta := &Plugin{
- Dir: "no-such-dir",
- }
-
- // A mock plugin with no commands
- mockNoCommand := mockPlugin("foo")
- mockNoCommand.Metadata.PlatformCommand = []PlatformCommand{}
- mockNoCommand.Metadata.PlatformHooks = map[string][]PlatformCommand{}
-
- // A mock plugin with legacy commands
- mockLegacyCommand := mockPlugin("foo")
- mockLegacyCommand.Metadata.PlatformCommand = []PlatformCommand{}
- mockLegacyCommand.Metadata.Command = "echo \"mock plugin\""
- mockLegacyCommand.Metadata.PlatformHooks = map[string][]PlatformCommand{}
- mockLegacyCommand.Metadata.Hooks = map[string]string{
- Install: "echo installing...",
- }
-
- // A mock plugin with a command also set
- mockWithCommand := mockPlugin("foo")
- mockWithCommand.Metadata.Command = "echo \"mock plugin\""
-
- // A mock plugin with a hooks also set
- mockWithHooks := mockPlugin("foo")
- mockWithHooks.Metadata.Hooks = map[string]string{
- Install: "echo installing...",
- }
-
- for i, item := range []struct {
- pass bool
- plug *Plugin
- }{
- {true, mockPlugin("abcdefghijklmnopqrstuvwxyz0123456789_-ABC")},
- {true, mockPlugin("foo-bar-FOO-BAR_1234")},
- {false, mockPlugin("foo -bar")},
- {false, mockPlugin("$foo -bar")}, // Test leading chars
- {false, mockPlugin("foo -bar ")}, // Test trailing chars
- {false, mockPlugin("foo\nbar")}, // Test newline
- {false, mockMissingMeta}, // Test if the metadata section missing
- {true, mockNoCommand}, // Test no command metadata works
- {true, mockLegacyCommand}, // Test legacy command metadata works
- {false, mockWithCommand}, // Test platformCommand and command both set fails
- {false, mockWithHooks}, // Test platformHooks and hooks both set fails
- } {
- err := validatePluginData(item.plug, fmt.Sprintf("test-%d", i))
- if item.pass && err != nil {
- t.Errorf("failed to validate case %d: %s", i, err)
- } else if !item.pass && err == nil {
- t.Errorf("expected case %d to fail", i)
- }
- }
-}
-
-func TestDetectDuplicates(t *testing.T) {
- plugs := []*Plugin{
- mockPlugin("foo"),
- mockPlugin("bar"),
- }
- if err := detectDuplicates(plugs); err != nil {
- t.Error("no duplicates in the first set")
- }
- plugs = append(plugs, mockPlugin("foo"))
- if err := detectDuplicates(plugs); err == nil {
- t.Error("duplicates in the second set")
- }
-}
-
-func mockPlugin(name string) *Plugin {
- return &Plugin{
- Metadata: &Metadata{
- Name: name,
- Version: "v0.1.2",
- Usage: "Mock plugin",
- Description: "Mock plugin for testing",
- PlatformCommand: []PlatformCommand{
- {OperatingSystem: "linux", Architecture: "", Command: "sh", Args: []string{"-c", "echo \"mock plugin\""}},
- {OperatingSystem: "windows", Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"mock plugin\""}},
- },
- PlatformHooks: map[string][]PlatformCommand{
- Install: {
- {OperatingSystem: "linux", Architecture: "", Command: "sh", Args: []string{"-c", "echo \"installing...\""}},
- {OperatingSystem: "windows", Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"installing...\""}},
- },
- },
- },
- Dir: "no-such-dir",
- }
-}
diff --git a/pkg/postrender/exec.go b/pkg/postrender/exec.go
deleted file mode 100644
index 84357c656..000000000
--- a/pkg/postrender/exec.go
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
-Copyright The Helm Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package postrender
-
-import (
- "bytes"
- "io"
- "os/exec"
- "path/filepath"
-
- "github.com/pkg/errors"
-)
-
-type execRender struct {
- binaryPath string
- args []string
-}
-
-// NewExec returns a PostRenderer implementation that calls the provided binary.
-// It returns an error if the binary cannot be found. If the path does not
-// contain any separators, it will search in $PATH, otherwise it will resolve
-// any relative paths to a fully qualified path
-func NewExec(binaryPath string, args ...string) (PostRenderer, error) {
- fullPath, err := getFullPath(binaryPath)
- if err != nil {
- return nil, err
- }
- return &execRender{fullPath, args}, nil
-}
-
-// Run the configured binary for the post render
-func (p *execRender) Run(renderedManifests *bytes.Buffer) (*bytes.Buffer, error) {
- cmd := exec.Command(p.binaryPath, p.args...)
- stdin, err := cmd.StdinPipe()
- if err != nil {
- return nil, err
- }
-
- var postRendered = &bytes.Buffer{}
- var stderr = &bytes.Buffer{}
- cmd.Stdout = postRendered
- cmd.Stderr = stderr
-
- go func() {
- defer stdin.Close()
- io.Copy(stdin, renderedManifests)
- }()
- err = cmd.Run()
- if err != nil {
- return nil, errors.Wrapf(err, "error while running command %s. error output:\n%s", p.binaryPath, stderr.String())
- }
-
- // If the binary returned almost nothing, it's likely that it didn't
- // successfully render anything
- if len(bytes.TrimSpace(postRendered.Bytes())) == 0 {
- return nil, errors.Errorf("post-renderer %q produced empty output", p.binaryPath)
- }
-
- return postRendered, nil
-}
-
-// getFullPath returns the full filepath to the binary to execute. If the path
-// does not contain any separators, it will search in $PATH, otherwise it will
-// resolve any relative paths to a fully qualified path
-func getFullPath(binaryPath string) (string, error) {
- // NOTE(thomastaylor312): I am leaving this code commented out here. During
- // the implementation of post-render, it was brought up that if we are
- // relying on plugins, we should actually use the plugin system so it can
- // properly handle multiple OSs. This will be a feature add in the future,
- // so I left this code for reference. It can be deleted or reused once the
- // feature is implemented
-
- // Manually check the plugin dir first
- // if !strings.Contains(binaryPath, string(filepath.Separator)) {
- // // First check the plugin dir
- // pluginDir := helmpath.DataPath("plugins") // Default location
- // // If location for plugins is explicitly set, check there
- // if v, ok := os.LookupEnv("HELM_PLUGINS"); ok {
- // pluginDir = v
- // }
- // // The plugins variable can actually contain multiple paths, so loop through those
- // for _, p := range filepath.SplitList(pluginDir) {
- // _, err := os.Stat(filepath.Join(p, binaryPath))
- // if err != nil && !os.IsNotExist(err) {
- // return "", err
- // } else if err == nil {
- // binaryPath = filepath.Join(p, binaryPath)
- // break
- // }
- // }
- // }
-
- // Now check for the binary using the given path or check if it exists in
- // the path and is executable
- checkedPath, err := exec.LookPath(binaryPath)
- if err != nil {
- return "", errors.Wrapf(err, "unable to find binary at %s", binaryPath)
- }
-
- return filepath.Abs(checkedPath)
-}
diff --git a/pkg/postrender/exec_test.go b/pkg/postrender/exec_test.go
deleted file mode 100644
index 2b091cc12..000000000
--- a/pkg/postrender/exec_test.go
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
-Copyright The Helm Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package postrender
-
-import (
- "bytes"
- "os"
- "path/filepath"
- "runtime"
- "testing"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-)
-
-const testingScript = `#!/bin/sh
-if [ $# -eq 0 ]; then
-sed s/FOOTEST/BARTEST/g <&0
-else
-sed s/FOOTEST/"$*"/g <&0
-fi
-`
-
-func TestGetFullPath(t *testing.T) {
- is := assert.New(t)
- t.Run("full path resolves correctly", func(t *testing.T) {
- testpath := setupTestingScript(t)
-
- fullPath, err := getFullPath(testpath)
- is.NoError(err)
- is.Equal(testpath, fullPath)
- })
-
- t.Run("relative path resolves correctly", func(t *testing.T) {
- testpath := setupTestingScript(t)
-
- currentDir, err := os.Getwd()
- require.NoError(t, err)
- relative, err := filepath.Rel(currentDir, testpath)
- require.NoError(t, err)
- fullPath, err := getFullPath(relative)
- is.NoError(err)
- is.Equal(testpath, fullPath)
- })
-
- t.Run("binary in PATH resolves correctly", func(t *testing.T) {
- testpath := setupTestingScript(t)
-
- realPath := os.Getenv("PATH")
- os.Setenv("PATH", filepath.Dir(testpath))
- defer func() {
- os.Setenv("PATH", realPath)
- }()
-
- fullPath, err := getFullPath(filepath.Base(testpath))
- is.NoError(err)
- is.Equal(testpath, fullPath)
- })
-
- // NOTE(thomastaylor312): See note in getFullPath for more details why this
- // is here
-
- // t.Run("binary in plugin path resolves correctly", func(t *testing.T) {
- // testpath, cleanup := setupTestingScript(t)
- // defer cleanup()
-
- // realPath := os.Getenv("HELM_PLUGINS")
- // os.Setenv("HELM_PLUGINS", filepath.Dir(testpath))
- // defer func() {
- // os.Setenv("HELM_PLUGINS", realPath)
- // }()
-
- // fullPath, err := getFullPath(filepath.Base(testpath))
- // is.NoError(err)
- // is.Equal(testpath, fullPath)
- // })
-
- // t.Run("binary in multiple plugin paths resolves correctly", func(t *testing.T) {
- // testpath, cleanup := setupTestingScript(t)
- // defer cleanup()
-
- // realPath := os.Getenv("HELM_PLUGINS")
- // os.Setenv("HELM_PLUGINS", filepath.Dir(testpath)+string(os.PathListSeparator)+"/another/dir")
- // defer func() {
- // os.Setenv("HELM_PLUGINS", realPath)
- // }()
-
- // fullPath, err := getFullPath(filepath.Base(testpath))
- // is.NoError(err)
- // is.Equal(testpath, fullPath)
- // })
-}
-
-func TestExecRun(t *testing.T) {
- if runtime.GOOS == "windows" {
- // the actual Run test uses a basic sed example, so skip this test on windows
- t.Skip("skipping on windows")
- }
- is := assert.New(t)
- testpath := setupTestingScript(t)
-
- renderer, err := NewExec(testpath)
- require.NoError(t, err)
-
- output, err := renderer.Run(bytes.NewBufferString("FOOTEST"))
- is.NoError(err)
- is.Contains(output.String(), "BARTEST")
-}
-
-func TestExecRunWithNoOutput(t *testing.T) {
- if runtime.GOOS == "windows" {
- // the actual Run test uses a basic sed example, so skip this test on windows
- t.Skip("skipping on windows")
- }
- is := assert.New(t)
- testpath := setupTestingScript(t)
-
- renderer, err := NewExec(testpath)
- require.NoError(t, err)
-
- _, err = renderer.Run(bytes.NewBufferString(""))
- is.Error(err)
-}
-
-func TestNewExecWithOneArgsRun(t *testing.T) {
- if runtime.GOOS == "windows" {
- // the actual Run test uses a basic sed example, so skip this test on windows
- t.Skip("skipping on windows")
- }
- is := assert.New(t)
- testpath := setupTestingScript(t)
-
- renderer, err := NewExec(testpath, "ARG1")
- require.NoError(t, err)
-
- output, err := renderer.Run(bytes.NewBufferString("FOOTEST"))
- is.NoError(err)
- is.Contains(output.String(), "ARG1")
-}
-
-func TestNewExecWithTwoArgsRun(t *testing.T) {
- if runtime.GOOS == "windows" {
- // the actual Run test uses a basic sed example, so skip this test on windows
- t.Skip("skipping on windows")
- }
- is := assert.New(t)
- testpath := setupTestingScript(t)
-
- renderer, err := NewExec(testpath, "ARG1", "ARG2")
- require.NoError(t, err)
-
- output, err := renderer.Run(bytes.NewBufferString("FOOTEST"))
- is.NoError(err)
- is.Contains(output.String(), "ARG1 ARG2")
-}
-
-func setupTestingScript(t *testing.T) (filepath string) {
- t.Helper()
-
- tempdir := t.TempDir()
-
- f, err := os.CreateTemp(tempdir, "post-render-test.sh")
- if err != nil {
- t.Fatalf("unable to create tempfile for testing: %s", err)
- }
-
- _, err = f.WriteString(testingScript)
- if err != nil {
- t.Fatalf("unable to write tempfile for testing: %s", err)
- }
-
- err = f.Chmod(0755)
- if err != nil {
- t.Fatalf("unable to make tempfile executable for testing: %s", err)
- }
-
- err = f.Close()
- if err != nil {
- t.Fatalf("unable to close tempfile after writing: %s", err)
- }
-
- return f.Name()
-}
diff --git a/pkg/postrenderer/postrenderer.go b/pkg/postrenderer/postrenderer.go
new file mode 100644
index 000000000..55e6d3adf
--- /dev/null
+++ b/pkg/postrenderer/postrenderer.go
@@ -0,0 +1,84 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package postrenderer
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "path/filepath"
+
+ "helm.sh/helm/v4/internal/plugin/schema"
+
+ "helm.sh/helm/v4/internal/plugin"
+ "helm.sh/helm/v4/pkg/cli"
+)
+
+// PostRenderer is an interface different plugin runtimes
+// it may be also be used without the factory for custom post-renderers
+type PostRenderer interface {
+ // Run expects a single buffer filled with Helm rendered manifests. It
+ // expects the modified results to be returned on a separate buffer or an
+ // error if there was an issue or failure while running the post render step
+ Run(renderedManifests *bytes.Buffer) (modifiedManifests *bytes.Buffer, err error)
+}
+
+// NewPostRendererPlugin creates a PostRenderer that uses the plugin's Runtime
+func NewPostRendererPlugin(settings *cli.EnvSettings, pluginName string, args ...string) (PostRenderer, error) {
+ descriptor := plugin.Descriptor{
+ Name: pluginName,
+ Type: "postrenderer/v1",
+ }
+ p, err := plugin.FindPlugin(filepath.SplitList(settings.PluginsDirectory), descriptor)
+ if err != nil {
+ return nil, err
+ }
+
+ return &postRendererPlugin{
+ plugin: p,
+ args: args,
+ settings: settings,
+ }, nil
+}
+
+// postRendererPlugin implements PostRenderer by delegating to the plugin's Runtime
+type postRendererPlugin struct {
+ plugin plugin.Plugin
+ args []string
+ settings *cli.EnvSettings
+}
+
+// Run implements PostRenderer by using the plugin's Runtime
+func (r *postRendererPlugin) Run(renderedManifests *bytes.Buffer) (*bytes.Buffer, error) {
+ input := &plugin.Input{
+ Message: schema.InputMessagePostRendererV1{
+ ExtraArgs: r.args,
+ Manifests: renderedManifests,
+ },
+ }
+ output, err := r.plugin.Invoke(context.Background(), input)
+ if err != nil {
+ return nil, fmt.Errorf("failed to invoke post-renderer plugin %q: %w", r.plugin.Metadata().Name, err)
+ }
+
+ outputMessage := output.Message.(schema.OutputMessagePostRendererV1)
+
+ // If the binary returned almost nothing, it's likely that it didn't
+ // successfully render anything
+ if len(bytes.TrimSpace(outputMessage.Manifests.Bytes())) == 0 {
+ return nil, fmt.Errorf("post-renderer %q produced empty output", r.plugin.Metadata().Name)
+ }
+
+ return outputMessage.Manifests, nil
+}
diff --git a/pkg/postrenderer/postrenderer_test.go b/pkg/postrenderer/postrenderer_test.go
new file mode 100644
index 000000000..824a1d179
--- /dev/null
+++ b/pkg/postrenderer/postrenderer_test.go
@@ -0,0 +1,81 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package postrenderer
+
+import (
+ "bytes"
+ "runtime"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "helm.sh/helm/v4/pkg/cli"
+)
+
+func TestNewPostRenderPluginRunWithNoOutput(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ // the actual Run test uses a basic sed example, so skip this test on windows
+ t.Skip("skipping on windows")
+ }
+ is := assert.New(t)
+ s := cli.New()
+ s.PluginsDirectory = "testdata/plugins"
+ name := "postrenderer-v1"
+
+ renderer, err := NewPostRendererPlugin(s, name, "")
+ require.NoError(t, err)
+
+ _, err = renderer.Run(bytes.NewBufferString(""))
+ is.Error(err)
+}
+
+func TestNewPostRenderPluginWithOneArgsRun(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ // the actual Run test uses a basic sed example, so skip this test on windows
+ t.Skip("skipping on windows")
+ }
+ is := assert.New(t)
+ s := cli.New()
+ s.PluginsDirectory = "testdata/plugins"
+ name := "postrenderer-v1"
+
+ renderer, err := NewPostRendererPlugin(s, name, "ARG1")
+ require.NoError(t, err)
+
+ output, err := renderer.Run(bytes.NewBufferString("FOOTEST"))
+ is.NoError(err)
+ is.Contains(output.String(), "ARG1")
+}
+
+func TestNewPostRenderPluginWithTwoArgsRun(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ // the actual Run test uses a basic sed example, so skip this test on windows
+ t.Skip("skipping on windows")
+ }
+ is := assert.New(t)
+ s := cli.New()
+ s.PluginsDirectory = "testdata/plugins"
+ name := "postrenderer-v1"
+
+ renderer, err := NewPostRendererPlugin(s, name, "ARG1", "ARG2")
+ require.NoError(t, err)
+
+ output, err := renderer.Run(bytes.NewBufferString("FOOTEST"))
+ is.NoError(err)
+ is.Contains(output.String(), "ARG1 ARG2")
+}
diff --git a/pkg/postrenderer/testdata/plugins/postrenderer-v1/plugin.yaml b/pkg/postrenderer/testdata/plugins/postrenderer-v1/plugin.yaml
new file mode 100644
index 000000000..423a5191e
--- /dev/null
+++ b/pkg/postrenderer/testdata/plugins/postrenderer-v1/plugin.yaml
@@ -0,0 +1,8 @@
+name: "postrenderer-v1"
+version: "1.2.3"
+type: postrenderer/v1
+apiVersion: v1
+runtime: subprocess
+runtimeConfig:
+ platformCommand:
+ - command: "${HELM_PLUGIN_DIR}/sed-test.sh"
diff --git a/pkg/postrenderer/testdata/plugins/postrenderer-v1/sed-test.sh b/pkg/postrenderer/testdata/plugins/postrenderer-v1/sed-test.sh
new file mode 100755
index 000000000..a016e398f
--- /dev/null
+++ b/pkg/postrenderer/testdata/plugins/postrenderer-v1/sed-test.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+if [ $# -eq 0 ]; then
+ sed s/FOOTEST/BARTEST/g <&0
+else
+ sed s/FOOTEST/"$*"/g <&0
+fi
diff --git a/pkg/provenance/doc.go b/pkg/provenance/doc.go
index 883c0e724..dd14568d9 100644
--- a/pkg/provenance/doc.go
+++ b/pkg/provenance/doc.go
@@ -14,15 +14,15 @@ limitations under the License.
*/
/*
-Package provenance provides tools for establishing the authenticity of a chart.
+Package provenance provides tools for establishing the authenticity of packages.
In Helm, provenance is established via several factors. The primary factor is the
-cryptographic signature of a chart. Chart authors may sign charts, which in turn
-provide the necessary metadata to ensure the integrity of the chart file, the
-Chart.yaml, and the referenced Docker images.
+cryptographic signature of a package. Package authors may sign packages, which in turn
+provide the necessary metadata to ensure the integrity of the package file, the
+metadata, and the referenced Docker images.
A provenance file is clear-signed. This provides cryptographic verification that
-a particular block of information (Chart.yaml, archive file, images) have not
+a particular block of information (metadata, archive file, images) have not
been tampered with or altered. To learn more, read the GnuPG documentation on
clear signatures:
https://www.gnupg.org/gph/en/manual/x135.html
diff --git a/pkg/provenance/sign.go b/pkg/provenance/sign.go
index cd7664edd..3ffad2765 100644
--- a/pkg/provenance/sign.go
+++ b/pkg/provenance/sign.go
@@ -19,19 +19,16 @@ import (
"bytes"
"crypto"
"encoding/hex"
+ "errors"
+ "fmt"
"io"
"os"
- "path/filepath"
"strings"
- "github.com/pkg/errors"
"golang.org/x/crypto/openpgp" //nolint
"golang.org/x/crypto/openpgp/clearsign" //nolint
"golang.org/x/crypto/openpgp/packet" //nolint
"sigs.k8s.io/yaml"
-
- hapi "helm.sh/helm/v4/pkg/chart/v2"
- "helm.sh/helm/v4/pkg/chart/v2/loader"
)
var defaultPGPConfig = packet.Config{
@@ -57,7 +54,7 @@ type SumCollection struct {
// Verification contains information about a verification operation.
type Verification struct {
- // SignedBy contains the entity that signed a chart.
+ // SignedBy contains the entity that signed a package.
SignedBy *openpgp.Entity
// FileHash is the hash, prepended with the scheme, for the file that was verified.
FileHash string
@@ -67,11 +64,11 @@ type Verification struct {
// Signatory signs things.
//
-// Signatories can be constructed from a PGP private key file using NewFromFiles
+// Signatories can be constructed from a PGP private key file using NewFromFiles,
// or they can be constructed manually by setting the Entity to a valid
// PGP entity.
//
-// The same Signatory can be used to sign or validate multiple charts.
+// The same Signatory can be used to sign or validate multiple packages.
type Signatory struct {
// The signatory for this instance of Helm. This is used for signing.
Entity *openpgp.Entity
@@ -143,7 +140,7 @@ func NewFromKeyring(keyringfile, id string) (*Signatory, error) {
}
}
if vague {
- return s, errors.Errorf("more than one key contain the id %q", id)
+ return s, fmt.Errorf("more than one key contain the id %q", id)
}
s.Entity = candidate
@@ -196,28 +193,20 @@ func (s *Signatory) DecryptKey(fn PassphraseFetcher) error {
return s.Entity.PrivateKey.Decrypt(p)
}
-// ClearSign signs a chart with the given key.
-//
-// This takes the path to a chart archive file and a key, and it returns a clear signature.
+// ClearSign signs package data with the given key and pre-marshalled metadata.
//
-// The Signatory must have a valid Entity.PrivateKey for this to work. If it does
-// not, an error will be returned.
-func (s *Signatory) ClearSign(chartpath string) (string, error) {
+// This is the core signing method that works with data in memory.
+// The Signatory must have a valid Entity.PrivateKey for this to work.
+func (s *Signatory) ClearSign(archiveData []byte, filename string, metadataBytes []byte) (string, error) {
if s.Entity == nil {
return "", errors.New("private key not found")
} else if s.Entity.PrivateKey == nil {
return "", errors.New("provided key is not a private key. Try providing a keyring with secret keys")
}
- if fi, err := os.Stat(chartpath); err != nil {
- return "", err
- } else if fi.IsDir() {
- return "", errors.New("cannot sign a directory")
- }
-
out := bytes.NewBuffer(nil)
- b, err := messageBlock(chartpath)
+ b, err := messageBlock(archiveData, filename, metadataBytes)
if err != nil {
return "", err
}
@@ -236,80 +225,58 @@ func (s *Signatory) ClearSign(chartpath string) (string, error) {
// In other words, if we call Close here, there's a risk that there's an attempt to use the
// private key to sign garbage data (since we know that io.Copy failed, `w` won't contain
// anything useful).
- return "", errors.Wrap(err, "failed to write to clearsign encoder")
+ return "", fmt.Errorf("failed to write to clearsign encoder: %w", err)
}
err = w.Close()
if err != nil {
- return "", errors.Wrap(err, "failed to either sign or armor message block")
+ return "", fmt.Errorf("failed to either sign or armor message block: %w", err)
}
return out.String(), nil
}
-// Verify checks a signature and verifies that it is legit for a chart.
-func (s *Signatory) Verify(chartpath, sigpath string) (*Verification, error) {
+// Verify checks a signature and verifies that it is legit for package data.
+// This is the core verification method that works with data in memory.
+func (s *Signatory) Verify(archiveData, provData []byte, filename string) (*Verification, error) {
ver := &Verification{}
- for _, fname := range []string{chartpath, sigpath} {
- if fi, err := os.Stat(fname); err != nil {
- return ver, err
- } else if fi.IsDir() {
- return ver, errors.Errorf("%s cannot be a directory", fname)
- }
- }
// First verify the signature
- sig, err := s.decodeSignature(sigpath)
- if err != nil {
- return ver, errors.Wrap(err, "failed to decode signature")
+ block, _ := clearsign.Decode(provData)
+ if block == nil {
+ return ver, errors.New("signature block not found")
}
- by, err := s.verifySignature(sig)
+ by, err := s.verifySignature(block)
if err != nil {
return ver, err
}
ver.SignedBy = by
- // Second, verify the hash of the tarball.
- sum, err := DigestFile(chartpath)
+ // Second, verify the hash of the data.
+ sum, err := Digest(bytes.NewBuffer(archiveData))
if err != nil {
return ver, err
}
- _, sums, err := parseMessageBlock(sig.Plaintext)
+ sums, err := parseMessageBlock(block.Plaintext)
if err != nil {
return ver, err
}
sum = "sha256:" + sum
- basename := filepath.Base(chartpath)
- if sha, ok := sums.Files[basename]; !ok {
- return ver, errors.Errorf("provenance does not contain a SHA for a file named %q", basename)
+ if sha, ok := sums.Files[filename]; !ok {
+ return ver, fmt.Errorf("provenance does not contain a SHA for a file named %q", filename)
} else if sha != sum {
- return ver, errors.Errorf("sha256 sum does not match for %s: %q != %q", basename, sha, sum)
+ return ver, fmt.Errorf("sha256 sum does not match for %s: %q != %q", filename, sha, sum)
}
ver.FileHash = sum
- ver.FileName = basename
+ ver.FileName = filename
// TODO: when image signing is added, verify that here.
return ver, nil
}
-func (s *Signatory) decodeSignature(filename string) (*clearsign.Block, error) {
- data, err := os.ReadFile(filename)
- if err != nil {
- return nil, err
- }
-
- block, _ := clearsign.Decode(data)
- if block == nil {
- // There was no sig in the file.
- return nil, errors.New("signature block not found")
- }
-
- return block, nil
-}
-
// verifySignature verifies that the given block is validly signed, and returns the signer.
func (s *Signatory) verifySignature(block *clearsign.Block) (*openpgp.Entity, error) {
return openpgp.CheckDetachedSignature(
@@ -319,64 +286,63 @@ func (s *Signatory) verifySignature(block *clearsign.Block) (*openpgp.Entity, er
)
}
-func messageBlock(chartpath string) (*bytes.Buffer, error) {
- var b *bytes.Buffer
- // Checksum the archive
- chash, err := DigestFile(chartpath)
+// messageBlock creates a message block from archive data and pre-marshalled metadata
+func messageBlock(archiveData []byte, filename string, metadataBytes []byte) (*bytes.Buffer, error) {
+ // Checksum the archive data
+ chash, err := Digest(bytes.NewBuffer(archiveData))
if err != nil {
- return b, err
+ return nil, err
}
- base := filepath.Base(chartpath)
sums := &SumCollection{
Files: map[string]string{
- base: "sha256:" + chash,
+ filename: "sha256:" + chash,
},
}
- // Load the archive into memory.
- chart, err := loader.LoadFile(chartpath)
- if err != nil {
- return b, err
- }
-
- // Buffer a hash + checksums YAML file
- data, err := yaml.Marshal(chart.Metadata)
- if err != nil {
- return b, err
- }
-
+ // Buffer the metadata + checksums YAML file
// FIXME: YAML uses ---\n as a file start indicator, but this is not legal in a PGP
// clearsign block. So we use ...\n, which is the YAML document end marker.
// http://yaml.org/spec/1.2/spec.html#id2800168
- b = bytes.NewBuffer(data)
+ b := bytes.NewBuffer(metadataBytes)
b.WriteString("\n...\n")
- data, err = yaml.Marshal(sums)
+ data, err := yaml.Marshal(sums)
if err != nil {
- return b, err
+ return nil, err
}
b.Write(data)
return b, nil
}
-// parseMessageBlock
-func parseMessageBlock(data []byte) (*hapi.Metadata, *SumCollection, error) {
- // This sucks.
+// parseMessageBlock parses a message block and returns only checksums (metadata ignored like upstream)
+func parseMessageBlock(data []byte) (*SumCollection, error) {
+ sc := &SumCollection{}
+
+ // We ignore metadata, just like upstream - only need checksums for verification
+ if err := ParseMessageBlock(data, nil, sc); err != nil {
+ return sc, err
+ }
+ return sc, nil
+}
+
+// ParseMessageBlock parses a message block containing metadata and checksums.
+//
+// This is the generic version that can work with any metadata type.
+// The metadata parameter should be a pointer to a struct that can be unmarshaled from YAML.
+func ParseMessageBlock(data []byte, metadata interface{}, sums *SumCollection) error {
parts := bytes.Split(data, []byte("\n...\n"))
if len(parts) < 2 {
- return nil, nil, errors.New("message block must have at least two parts")
+ return errors.New("message block must have at least two parts")
}
- md := &hapi.Metadata{}
- sc := &SumCollection{}
-
- if err := yaml.Unmarshal(parts[0], md); err != nil {
- return md, sc, err
+ if metadata != nil {
+ if err := yaml.Unmarshal(parts[0], metadata); err != nil {
+ return err
+ }
}
- err := yaml.Unmarshal(parts[1], sc)
- return md, sc, err
+ return yaml.Unmarshal(parts[1], sums)
}
// loadKey loads a GPG key found at a particular path.
@@ -405,7 +371,7 @@ func loadKeyRing(ringpath string) (openpgp.EntityList, error) {
// It takes the path to the archive file, and returns a string representation of
// the SHA256 sum.
//
-// The intended use of this function is to generate a sum of a chart TGZ file.
+// This function can be used to generate a sum of any package archive file.
func DigestFile(filename string) (string, error) {
f, err := os.Open(filename)
if err != nil {
diff --git a/pkg/provenance/sign_test.go b/pkg/provenance/sign_test.go
index 69a6dad5b..4f2fc7298 100644
--- a/pkg/provenance/sign_test.go
+++ b/pkg/provenance/sign_test.go
@@ -25,6 +25,9 @@ import (
"testing"
pgperrors "golang.org/x/crypto/openpgp/errors" //nolint
+ "sigs.k8s.io/yaml"
+
+ "helm.sh/helm/v4/pkg/chart/v2/loader"
)
const (
@@ -75,8 +78,33 @@ files:
hashtest-1.2.3.tgz: sha256:c6841b3a895f1444a6738b5d04564a57e860ce42f8519c3be807fb6d9bee7888
`
+// loadChartMetadataForSigning is a test helper that loads chart metadata and marshals it to YAML bytes
+func loadChartMetadataForSigning(t *testing.T, chartPath string) []byte {
+ t.Helper()
+
+ chart, err := loader.LoadFile(chartPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ metadataBytes, err := yaml.Marshal(chart.Metadata)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ return metadataBytes
+}
+
func TestMessageBlock(t *testing.T) {
- out, err := messageBlock(testChartfile)
+ metadataBytes := loadChartMetadataForSigning(t, testChartfile)
+
+ // Read the chart file data
+ archiveData, err := os.ReadFile(testChartfile)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ out, err := messageBlock(archiveData, filepath.Base(testChartfile), metadataBytes)
if err != nil {
t.Fatal(err)
}
@@ -88,14 +116,12 @@ func TestMessageBlock(t *testing.T) {
}
func TestParseMessageBlock(t *testing.T) {
- md, sc, err := parseMessageBlock([]byte(testMessageBlock))
+ sc, err := parseMessageBlock([]byte(testMessageBlock))
if err != nil {
t.Fatal(err)
}
- if md.Name != "hashtest" {
- t.Errorf("Expected name %q, got %q", "hashtest", md.Name)
- }
+ // parseMessageBlock only returns checksums, not metadata (like upstream)
if lsc := len(sc.Files); lsc != 1 {
t.Errorf("Expected 1 file, got %d", lsc)
@@ -221,7 +247,15 @@ func TestClearSign(t *testing.T) {
t.Fatal(err)
}
- sig, err := signer.ClearSign(testChartfile)
+ metadataBytes := loadChartMetadataForSigning(t, testChartfile)
+
+ // Read the chart file data
+ archiveData, err := os.ReadFile(testChartfile)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ sig, err := signer.ClearSign(archiveData, filepath.Base(testChartfile), metadataBytes)
if err != nil {
t.Fatal(err)
}
@@ -252,7 +286,15 @@ func TestClearSignError(t *testing.T) {
// ensure that signing always fails
signer.Entity.PrivateKey.PrivateKey = failSigner{}
- sig, err := signer.ClearSign(testChartfile)
+ metadataBytes := loadChartMetadataForSigning(t, testChartfile)
+
+ // Read the chart file data
+ archiveData, err := os.ReadFile(testChartfile)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ sig, err := signer.ClearSign(archiveData, filepath.Base(testChartfile), metadataBytes)
if err == nil {
t.Fatal("didn't get an error from ClearSign but expected one")
}
@@ -262,54 +304,25 @@ func TestClearSignError(t *testing.T) {
}
}
-func TestDecodeSignature(t *testing.T) {
- // Unlike other tests, this does a round-trip test, ensuring that a signature
- // generated by the library can also be verified by the library.
-
+func TestVerify(t *testing.T) {
signer, err := NewFromFiles(testKeyfile, testPubfile)
if err != nil {
t.Fatal(err)
}
- sig, err := signer.ClearSign(testChartfile)
- if err != nil {
- t.Fatal(err)
- }
-
- f, err := os.CreateTemp("", "helm-test-sig-")
- if err != nil {
- t.Fatal(err)
- }
-
- tname := f.Name()
- defer func() {
- os.Remove(tname)
- }()
- f.WriteString(sig)
- f.Close()
-
- sig2, err := signer.decodeSignature(tname)
- if err != nil {
- t.Fatal(err)
- }
-
- by, err := signer.verifySignature(sig2)
+ // Read the chart file data
+ archiveData, err := os.ReadFile(testChartfile)
if err != nil {
t.Fatal(err)
}
- if _, ok := by.Identities[testKeyName]; !ok {
- t.Errorf("Expected identity %q", testKeyName)
- }
-}
-
-func TestVerify(t *testing.T) {
- signer, err := NewFromFiles(testKeyfile, testPubfile)
+ // Read the signature file data
+ sigData, err := os.ReadFile(testSigBlock)
if err != nil {
t.Fatal(err)
}
- if ver, err := signer.Verify(testChartfile, testSigBlock); err != nil {
+ if ver, err := signer.Verify(archiveData, sigData, filepath.Base(testChartfile)); err != nil {
t.Errorf("Failed to pass verify. Err: %s", err)
} else if len(ver.FileHash) == 0 {
t.Error("Verification is missing hash.")
@@ -319,7 +332,13 @@ func TestVerify(t *testing.T) {
t.Errorf("FileName is unexpectedly %q", ver.FileName)
}
- if _, err = signer.Verify(testChartfile, testTamperedSigBlock); err == nil {
+ // Read the tampered signature file data
+ tamperedSigData, err := os.ReadFile(testTamperedSigBlock)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err = signer.Verify(archiveData, tamperedSigData, filepath.Base(testChartfile)); err == nil {
t.Errorf("Expected %s to fail.", testTamperedSigBlock)
}
diff --git a/pkg/pusher/ocipusher.go b/pkg/pusher/ocipusher.go
index 5cea78a44..699d27caf 100644
--- a/pkg/pusher/ocipusher.go
+++ b/pkg/pusher/ocipusher.go
@@ -16,7 +16,9 @@ limitations under the License.
package pusher
import (
+ "errors"
"fmt"
+ "io/fs"
"net"
"net/http"
"os"
@@ -24,8 +26,6 @@ import (
"strings"
"time"
- "github.com/pkg/errors"
-
"helm.sh/helm/v4/internal/tlsutil"
"helm.sh/helm/v4/pkg/chart/v2/loader"
"helm.sh/helm/v4/pkg/registry"
@@ -48,8 +48,8 @@ func (pusher *OCIPusher) Push(chartRef, href string, options ...Option) error {
func (pusher *OCIPusher) push(chartRef, href string) error {
stat, err := os.Stat(chartRef)
if err != nil {
- if os.IsNotExist(err) {
- return errors.Errorf("%s: no such file", chartRef)
+ if errors.Is(err, fs.ErrNotExist) {
+ return fmt.Errorf("%s: no such file", chartRef)
}
return err
}
@@ -117,7 +117,7 @@ func (pusher *OCIPusher) newRegistryClient() (*registry.Client, error) {
tlsutil.WithCAFile(pusher.opts.caFile),
)
if err != nil {
- return nil, errors.Wrap(err, "can't create TLS config for client")
+ return nil, fmt.Errorf("can't create TLS config for client: %w", err)
}
registryClient, err := registry.NewClient(
diff --git a/pkg/pusher/ocipusher_test.go b/pkg/pusher/ocipusher_test.go
index 760da8404..24f52a7ad 100644
--- a/pkg/pusher/ocipusher_test.go
+++ b/pkg/pusher/ocipusher_test.go
@@ -1,3 +1,5 @@
+//go:build !windows
+
/*
Copyright The Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
@@ -16,7 +18,10 @@ limitations under the License.
package pusher
import (
+ "io"
+ "os"
"path/filepath"
+ "strings"
"testing"
"helm.sh/helm/v4/pkg/registry"
@@ -94,3 +99,330 @@ func TestNewOCIPusher(t *testing.T) {
t.Errorf("Expected NewOCIPusher to contain %p as RegistryClient, got %p", registryClient, op.opts.registryClient)
}
}
+
+func TestOCIPusher_Push_ErrorHandling(t *testing.T) {
+ tests := []struct {
+ name string
+ chartRef string
+ expectedError string
+ setupFunc func() string
+ }{
+ {
+ name: "non-existent file",
+ chartRef: "/non/existent/file.tgz",
+ expectedError: "no such file",
+ },
+ {
+ name: "directory instead of file",
+ expectedError: "cannot push directory, must provide chart archive (.tgz)",
+ setupFunc: func() string {
+ tempDir := t.TempDir()
+ return tempDir
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ pusher, err := NewOCIPusher()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ chartRef := tt.chartRef
+ if tt.setupFunc != nil {
+ chartRef = tt.setupFunc()
+ }
+
+ err = pusher.Push(chartRef, "oci://localhost:5000/test")
+ if err == nil {
+ t.Fatal("Expected error but got none")
+ }
+
+ if !strings.Contains(err.Error(), tt.expectedError) {
+ t.Errorf("Expected error containing %q, got %q", tt.expectedError, err.Error())
+ }
+ })
+ }
+}
+
+func TestOCIPusher_newRegistryClient(t *testing.T) {
+ cd := "../../testdata"
+ join := filepath.Join
+ ca, pub, priv := join(cd, "rootca.crt"), join(cd, "crt.pem"), join(cd, "key.pem")
+
+ tests := []struct {
+ name string
+ opts []Option
+ expectError bool
+ errorContains string
+ }{
+ {
+ name: "plain HTTP",
+ opts: []Option{WithPlainHTTP(true)},
+ },
+ {
+ name: "with TLS client config",
+ opts: []Option{
+ WithTLSClientConfig(pub, priv, ca),
+ },
+ },
+ {
+ name: "with insecure skip TLS verify",
+ opts: []Option{
+ WithInsecureSkipTLSVerify(true),
+ },
+ },
+ {
+ name: "with cert and key only",
+ opts: []Option{
+ WithTLSClientConfig(pub, priv, ""),
+ },
+ },
+ {
+ name: "with CA file only",
+ opts: []Option{
+ WithTLSClientConfig("", "", ca),
+ },
+ },
+ {
+ name: "default client without options",
+ opts: []Option{},
+ },
+ {
+ name: "invalid cert file",
+ opts: []Option{
+ WithTLSClientConfig("/non/existent/cert.pem", priv, ca),
+ },
+ expectError: true,
+ errorContains: "can't create TLS config",
+ },
+ {
+ name: "invalid key file",
+ opts: []Option{
+ WithTLSClientConfig(pub, "/non/existent/key.pem", ca),
+ },
+ expectError: true,
+ errorContains: "can't create TLS config",
+ },
+ {
+ name: "invalid CA file",
+ opts: []Option{
+ WithTLSClientConfig("", "", "/non/existent/ca.crt"),
+ },
+ expectError: true,
+ errorContains: "can't create TLS config",
+ },
+ {
+ name: "combined TLS options",
+ opts: []Option{
+ WithTLSClientConfig(pub, priv, ca),
+ WithInsecureSkipTLSVerify(true),
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ pusher, err := NewOCIPusher(tt.opts...)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ op, ok := pusher.(*OCIPusher)
+ if !ok {
+ t.Fatal("Expected *OCIPusher")
+ }
+
+ client, err := op.newRegistryClient()
+ if tt.expectError {
+ if err == nil {
+ t.Fatal("Expected error but got none")
+ }
+ if tt.errorContains != "" && !strings.Contains(err.Error(), tt.errorContains) {
+ t.Errorf("Expected error containing %q, got %q", tt.errorContains, err.Error())
+ }
+ } else {
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ if client == nil {
+ t.Fatal("Expected non-nil registry client")
+ }
+ }
+ })
+ }
+}
+
+func TestOCIPusher_Push_ChartOperations(t *testing.T) {
+ // Path to test charts
+ chartPath := "../../pkg/cmd/testdata/testcharts/compressedchart-0.1.0.tgz"
+ chartWithProvPath := "../../pkg/cmd/testdata/testcharts/signtest-0.1.0.tgz"
+
+ tests := []struct {
+ name string
+ chartRef string
+ href string
+ options []Option
+ setupFunc func(t *testing.T) (string, func())
+ expectError bool
+ errorContains string
+ }{
+ {
+ name: "invalid chart file",
+ chartRef: "../../pkg/action/testdata/charts/corrupted-compressed-chart.tgz",
+ href: "oci://localhost:5000/test",
+ expectError: true,
+ errorContains: "does not appear to be a gzipped archive",
+ },
+ {
+ name: "chart read error",
+ setupFunc: func(t *testing.T) (string, func()) {
+ t.Helper()
+ // Create a valid chart file that we'll make unreadable
+ tempDir := t.TempDir()
+ tempChart := filepath.Join(tempDir, "temp-chart.tgz")
+
+ // Copy a valid chart
+ src, err := os.Open(chartPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer src.Close()
+
+ dst, err := os.Create(tempChart)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err := io.Copy(dst, src); err != nil {
+ t.Fatal(err)
+ }
+ dst.Close()
+
+ // Make the file unreadable
+ if err := os.Chmod(tempChart, 0000); err != nil {
+ t.Fatal(err)
+ }
+
+ return tempChart, func() {
+ os.Chmod(tempChart, 0644) // Restore permissions for cleanup
+ }
+ },
+ href: "oci://localhost:5000/test",
+ expectError: true,
+ errorContains: "permission denied",
+ },
+ {
+ name: "push with provenance file - loading phase",
+ chartRef: chartWithProvPath,
+ href: "oci://registry.example.com/charts",
+ setupFunc: func(t *testing.T) (string, func()) {
+ t.Helper()
+ // Copy chart and create a .prov file for it
+ tempDir := t.TempDir()
+ tempChart := filepath.Join(tempDir, "signtest-0.1.0.tgz")
+ tempProv := filepath.Join(tempDir, "signtest-0.1.0.tgz.prov")
+
+ // Copy chart file
+ src, err := os.Open(chartWithProvPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer src.Close()
+
+ dst, err := os.Create(tempChart)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err := io.Copy(dst, src); err != nil {
+ t.Fatal(err)
+ }
+ dst.Close()
+
+ // Create provenance file
+ if err := os.WriteFile(tempProv, []byte("test provenance data"), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ return tempChart, func() {}
+ },
+ expectError: true, // Will fail at the registry push step
+ errorContains: "", // Error depends on registry client behavior
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ chartRef := tt.chartRef
+ var cleanup func()
+
+ if tt.setupFunc != nil {
+ chartRef, cleanup = tt.setupFunc(t)
+ if cleanup != nil {
+ defer cleanup()
+ }
+ }
+
+ // Skip test if chart file doesn't exist and we're not expecting an error
+ if _, err := os.Stat(chartRef); err != nil && !tt.expectError {
+ t.Skipf("Test chart %s not found, skipping test", chartRef)
+ }
+
+ pusher, err := NewOCIPusher(tt.options...)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = pusher.Push(chartRef, tt.href)
+
+ if tt.expectError {
+ if err == nil {
+ t.Fatal("Expected error but got none")
+ }
+ if tt.errorContains != "" && !strings.Contains(err.Error(), tt.errorContains) {
+ t.Errorf("Expected error containing %q, got %q", tt.errorContains, err.Error())
+ }
+ } else {
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ }
+ })
+ }
+}
+
+func TestOCIPusher_Push_MultipleOptions(t *testing.T) {
+ chartPath := "../../pkg/cmd/testdata/testcharts/compressedchart-0.1.0.tgz"
+
+ // Skip test if chart file doesn't exist
+ if _, err := os.Stat(chartPath); err != nil {
+ t.Skipf("Test chart %s not found, skipping test", chartPath)
+ }
+
+ pusher, err := NewOCIPusher()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Test that multiple options are applied correctly
+ err = pusher.Push(chartPath, "oci://localhost:5000/test",
+ WithPlainHTTP(true),
+ WithInsecureSkipTLSVerify(true),
+ )
+
+ // We expect an error since we're not actually pushing to a registry
+ if err == nil {
+ t.Fatal("Expected error when pushing without a valid registry")
+ }
+
+ // Verify options were applied
+ op := pusher.(*OCIPusher)
+ if !op.opts.plainHTTP {
+ t.Error("Expected plainHTTP option to be applied")
+ }
+ if !op.opts.insecureSkipTLSverify {
+ t.Error("Expected insecureSkipTLSverify option to be applied")
+ }
+}
diff --git a/pkg/pusher/pusher.go b/pkg/pusher/pusher.go
index 0e07ad2d7..e3c767be9 100644
--- a/pkg/pusher/pusher.go
+++ b/pkg/pusher/pusher.go
@@ -17,7 +17,8 @@ limitations under the License.
package pusher
import (
- "github.com/pkg/errors"
+ "fmt"
+ "slices"
"helm.sh/helm/v4/pkg/cli"
"helm.sh/helm/v4/pkg/registry"
@@ -86,12 +87,7 @@ type Provider struct {
// Provides returns true if the given scheme is supported by this Provider.
func (p Provider) Provides(scheme string) bool {
- for _, i := range p.Schemes {
- if i == scheme {
- return true
- }
- }
- return false
+ return slices.Contains(p.Schemes, scheme)
}
// Providers is a collection of Provider objects.
@@ -106,7 +102,7 @@ func (p Providers) ByScheme(scheme string) (Pusher, error) {
return pp.New()
}
}
- return nil, errors.Errorf("scheme %q not supported", scheme)
+ return nil, fmt.Errorf("scheme %q not supported", scheme)
}
var ociProvider = Provider{
diff --git a/pkg/registry/client.go b/pkg/registry/client.go
index ecc7a0d04..95250f8da 100644
--- a/pkg/registry/client.go
+++ b/pkg/registry/client.go
@@ -21,6 +21,7 @@ import (
"crypto/tls"
"crypto/x509"
"encoding/json"
+ "errors"
"fmt"
"io"
"net/http"
@@ -28,15 +29,11 @@ import (
"os"
"sort"
"strings"
- "sync"
"github.com/Masterminds/semver/v3"
- "github.com/containerd/containerd/remotes"
"github.com/opencontainers/image-spec/specs-go"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
"oras.land/oras-go/v2"
- "oras.land/oras-go/v2/content"
"oras.land/oras-go/v2/content/memory"
"oras.land/oras-go/v2/registry"
"oras.land/oras-go/v2/registry/remote"
@@ -56,8 +53,6 @@ storing semantic versions, Helm adopts the convention of changing plus (+) to
an underscore (_) in chart version tags when pushing to a registry and back to
a plus (+) when pulling from a registry.`
-var errDeprecatedRemote = errors.New("providing github.com/containerd/containerd/remotes.Resolver via ClientOptResolver is no longer suported")
-
type (
// RemoteClient shadows the ORAS remote.Client interface
// (hiding the ORAS type from Helm client visibility)
@@ -103,27 +98,8 @@ func NewClient(options ...ClientOption) (*Client, error) {
client.credentialsFile = helmpath.ConfigPath(CredentialsFileBasename)
}
if client.httpClient == nil {
- type cloner[T any] interface {
- Clone() T
- }
-
- // try to copy (clone) the http.DefaultTransport so any mutations we
- // perform on it (e.g. TLS config) are not reflected globally
- // follow https://github.com/golang/go/issues/39299 for a more elegant
- // solution in the future
- transport := http.DefaultTransport
- if t, ok := transport.(cloner[*http.Transport]); ok {
- transport = t.Clone()
- } else if t, ok := transport.(cloner[http.RoundTripper]); ok {
- // this branch will not be used with go 1.20, it was added
- // optimistically to try to clone if the http.DefaultTransport
- // implementation changes, still the Clone method in that case
- // might not return http.RoundTripper...
- transport = t.Clone()
- }
-
client.httpClient = &http.Client{
- Transport: retry.NewTransport(transport),
+ Transport: NewTransport(client.debug),
}
}
@@ -150,18 +126,28 @@ func NewClient(options ...ClientOption) (*Client, error) {
}
authorizer.SetUserAgent(version.GetUserAgent())
- authorizer.Credential = credentials.Credential(client.credentialsStore)
+ if client.username != "" && client.password != "" {
+ authorizer.Credential = func(_ context.Context, _ string) (auth.Credential, error) {
+ return auth.Credential{Username: client.username, Password: client.password}, nil
+ }
+ } else {
+ authorizer.Credential = credentials.Credential(client.credentialsStore)
+ }
if client.enableCache {
authorizer.Cache = auth.NewCache()
}
-
client.authorizer = &authorizer
}
return client, nil
}
+// Generic returns a GenericClient for low-level OCI operations
+func (c *Client) Generic() *GenericClient {
+ return NewGenericClient(c)
+}
+
// ClientOptDebug returns a function that sets the debug setting on client options set
func ClientOptDebug(debug bool) ClientOption {
return func(client *Client) {
@@ -231,12 +217,6 @@ func ClientOptPlainHTTP() ClientOption {
}
}
-func ClientOptResolver(_ remotes.Resolver) ClientOption {
- return func(c *Client) {
- c.err = errDeprecatedRemote
- }
-}
-
type (
// LoginOption allows specifying various settings on login
LoginOption func(*loginOperation)
@@ -258,19 +238,22 @@ func (c *Client) Login(host string, options ...LoginOption) error {
return err
}
reg.PlainHTTP = c.plainHTTP
+ cred := auth.Credential{Username: c.username, Password: c.password}
+ c.authorizer.ForceAttemptOAuth2 = true
reg.Client = c.authorizer
ctx := context.Background()
- cred, err := c.authorizer.Credential(ctx, host)
- if err != nil {
- return fmt.Errorf("fetching credentials for %q: %w", host, err)
- }
-
if err := reg.Ping(ctx); err != nil {
- return fmt.Errorf("authenticating to %q: %w", host, err)
+ c.authorizer.ForceAttemptOAuth2 = false
+ if err := reg.Ping(ctx); err != nil {
+ return fmt.Errorf("authenticating to %q: %w", host, err)
+ }
}
+ // Always restore to false after probing, to avoid forcing POST to token endpoints like GHCR.
+ c.authorizer.ForceAttemptOAuth2 = false
key := credentials.ServerAddressFromRegistry(host)
+ key = credentials.ServerAddressFromHostname(key)
if err := c.credentialsStore.Put(ctx, key, cred); err != nil {
return err
}
@@ -295,7 +278,7 @@ func LoginOptPlainText(isPlainText bool) LoginOption {
}
}
-func ensureTLSConfig(client *auth.Client) (*tls.Config, error) {
+func ensureTLSConfig(client *auth.Client, setConfig *tls.Config) (*tls.Config, error) {
var transport *http.Transport
switch t := client.Client.Transport.(type) {
@@ -305,6 +288,11 @@ func ensureTLSConfig(client *auth.Client) (*tls.Config, error) {
switch t := t.Base.(type) {
case *http.Transport:
transport = t
+ case *LoggingTransport:
+ switch t := t.RoundTripper.(type) {
+ case *http.Transport:
+ transport = t
+ }
}
}
@@ -314,7 +302,10 @@ func ensureTLSConfig(client *auth.Client) (*tls.Config, error) {
return nil, fmt.Errorf("unable to access TLS client configuration, the provided HTTP Transport is not supported, given: %T", client.Client.Transport)
}
- if transport.TLSClientConfig == nil {
+ switch {
+ case setConfig != nil:
+ transport.TLSClientConfig = setConfig
+ case transport.TLSClientConfig == nil:
transport.TLSClientConfig = &tls.Config{}
}
@@ -324,7 +315,7 @@ func ensureTLSConfig(client *auth.Client) (*tls.Config, error) {
// LoginOptInsecure returns a function that sets the insecure setting on login
func LoginOptInsecure(insecure bool) LoginOption {
return func(o *loginOperation) {
- tlsConfig, err := ensureTLSConfig(o.client.authorizer)
+ tlsConfig, err := ensureTLSConfig(o.client.authorizer, nil)
if err != nil {
panic(err)
@@ -340,7 +331,7 @@ func LoginOptTLSClientConfig(certFile, keyFile, caFile string) LoginOption {
if (certFile == "" || keyFile == "") && caFile == "" {
return
}
- tlsConfig, err := ensureTLSConfig(o.client.authorizer)
+ tlsConfig, err := ensureTLSConfig(o.client.authorizer, nil)
if err != nil {
panic(err)
}
@@ -367,6 +358,17 @@ func LoginOptTLSClientConfig(certFile, keyFile, caFile string) LoginOption {
}
}
+// LoginOptTLSClientConfigFromConfig returns a function that sets the TLS settings on login
+// receiving the configuration in memory rather than from files.
+func LoginOptTLSClientConfigFromConfig(conf *tls.Config) LoginOption {
+ return func(o *loginOperation) {
+ _, err := ensureTLSConfig(o.client.authorizer, conf)
+ if err != nil {
+ panic(err)
+ }
+ }
+}
+
type (
// LogoutOption allows specifying various settings on logout
LogoutOption func(*logoutOperation)
@@ -419,85 +421,31 @@ type (
}
)
-// Pull downloads a chart from a registry
-func (c *Client) Pull(ref string, options ...PullOption) (*PullResult, error) {
- parsedRef, err := newReference(ref)
- if err != nil {
- return nil, err
- }
+// processChartPull handles chart-specific processing of a generic pull result
+func (c *Client) processChartPull(genericResult *GenericPullResult, operation *pullOperation) (*PullResult, error) {
+ var err error
- operation := &pullOperation{
- withChart: true, // By default, always download the chart layer
- }
- for _, option := range options {
- option(operation)
- }
- if !operation.withChart && !operation.withProv {
- return nil, errors.New(
- "must specify at least one layer to pull (chart/prov)")
- }
- memoryStore := memory.New()
- allowedMediaTypes := []string{
- ocispec.MediaTypeImageManifest,
- ConfigMediaType,
- }
+ // Chart-specific validation
minNumDescriptors := 1 // 1 for the config
if operation.withChart {
minNumDescriptors++
- allowedMediaTypes = append(allowedMediaTypes, ChartLayerMediaType, LegacyChartLayerMediaType)
- }
- if operation.withProv {
- if !operation.ignoreMissingProv {
- minNumDescriptors++
- }
- allowedMediaTypes = append(allowedMediaTypes, ProvLayerMediaType)
}
-
- var descriptors, layers []ocispec.Descriptor
-
- repository, err := remote.NewRepository(parsedRef.String())
- if err != nil {
- return nil, err
- }
- repository.PlainHTTP = c.plainHTTP
- repository.Client = c.authorizer
-
- ctx := context.Background()
-
- sort.Strings(allowedMediaTypes)
-
- var mu sync.Mutex
- manifest, err := oras.Copy(ctx, repository, parsedRef.String(), memoryStore, "", oras.CopyOptions{
- CopyGraphOptions: oras.CopyGraphOptions{
- PreCopy: func(_ context.Context, desc ocispec.Descriptor) error {
- mediaType := desc.MediaType
- if i := sort.SearchStrings(allowedMediaTypes, mediaType); i >= len(allowedMediaTypes) || allowedMediaTypes[i] != mediaType {
- return errors.Errorf("media type %q is not allowed, found in descriptor with digest: %q", mediaType, desc.Digest)
- }
-
- mu.Lock()
- layers = append(layers, desc)
- mu.Unlock()
- return nil
- },
- },
- })
- if err != nil {
- return nil, err
+ if operation.withProv && !operation.ignoreMissingProv {
+ minNumDescriptors++
}
- descriptors = append(descriptors, manifest)
- descriptors = append(descriptors, layers...)
-
- numDescriptors := len(descriptors)
+ numDescriptors := len(genericResult.Descriptors)
if numDescriptors < minNumDescriptors {
return nil, fmt.Errorf("manifest does not contain minimum number of descriptors (%d), descriptors found: %d",
minNumDescriptors, numDescriptors)
}
+
+ // Find chart-specific descriptors
var configDescriptor *ocispec.Descriptor
var chartDescriptor *ocispec.Descriptor
var provDescriptor *ocispec.Descriptor
- for _, descriptor := range descriptors {
+
+ for _, descriptor := range genericResult.Descriptors {
d := descriptor
switch d.MediaType {
case ConfigMediaType:
@@ -511,6 +459,8 @@ func (c *Client) Pull(ref string, options ...PullOption) (*PullResult, error) {
fmt.Fprintf(c.out, "Warning: chart media type %s is deprecated\n", LegacyChartLayerMediaType)
}
}
+
+ // Chart-specific validation
if configDescriptor == nil {
return nil, fmt.Errorf("could not load config with mediatype %s", ConfigMediaType)
}
@@ -518,6 +468,7 @@ func (c *Client) Pull(ref string, options ...PullOption) (*PullResult, error) {
return nil, fmt.Errorf("manifest does not contain a layer with mediatype %s",
ChartLayerMediaType)
}
+
var provMissing bool
if operation.withProv && provDescriptor == nil {
if operation.ignoreMissingProv {
@@ -527,10 +478,12 @@ func (c *Client) Pull(ref string, options ...PullOption) (*PullResult, error) {
ProvLayerMediaType)
}
}
+
+ // Build chart-specific result
result := &PullResult{
Manifest: &DescriptorPullSummary{
- Digest: manifest.Digest.String(),
- Size: manifest.Size,
+ Digest: genericResult.Manifest.Digest.String(),
+ Size: genericResult.Manifest.Size,
},
Config: &DescriptorPullSummary{
Digest: configDescriptor.Digest.String(),
@@ -538,15 +491,18 @@ func (c *Client) Pull(ref string, options ...PullOption) (*PullResult, error) {
},
Chart: &DescriptorPullSummaryWithMeta{},
Prov: &DescriptorPullSummary{},
- Ref: parsedRef.String(),
+ Ref: genericResult.Ref,
}
- result.Manifest.Data, err = content.FetchAll(ctx, memoryStore, manifest)
+ // Fetch data using generic client
+ genericClient := c.Generic()
+
+ result.Manifest.Data, err = genericClient.GetDescriptorData(genericResult.MemoryStore, genericResult.Manifest)
if err != nil {
- return nil, fmt.Errorf("unable to retrieve blob with digest %s: %w", manifest.Digest, err)
+ return nil, fmt.Errorf("unable to retrieve blob with digest %s: %w", genericResult.Manifest.Digest, err)
}
- result.Config.Data, err = content.FetchAll(ctx, memoryStore, *configDescriptor)
+ result.Config.Data, err = genericClient.GetDescriptorData(genericResult.MemoryStore, *configDescriptor)
if err != nil {
return nil, fmt.Errorf("unable to retrieve blob with digest %s: %w", configDescriptor.Digest, err)
}
@@ -556,7 +512,7 @@ func (c *Client) Pull(ref string, options ...PullOption) (*PullResult, error) {
}
if operation.withChart {
- result.Chart.Data, err = content.FetchAll(ctx, memoryStore, *chartDescriptor)
+ result.Chart.Data, err = genericClient.GetDescriptorData(genericResult.MemoryStore, *chartDescriptor)
if err != nil {
return nil, fmt.Errorf("unable to retrieve blob with digest %s: %w", chartDescriptor.Digest, err)
}
@@ -565,7 +521,7 @@ func (c *Client) Pull(ref string, options ...PullOption) (*PullResult, error) {
}
if operation.withProv && !provMissing {
- result.Prov.Data, err = content.FetchAll(ctx, memoryStore, *provDescriptor)
+ result.Prov.Data, err = genericClient.GetDescriptorData(genericResult.MemoryStore, *provDescriptor)
if err != nil {
return nil, fmt.Errorf("unable to retrieve blob with digest %s: %w", provDescriptor.Digest, err)
}
@@ -584,6 +540,44 @@ func (c *Client) Pull(ref string, options ...PullOption) (*PullResult, error) {
return result, nil
}
+// Pull downloads a chart from a registry
+func (c *Client) Pull(ref string, options ...PullOption) (*PullResult, error) {
+ operation := &pullOperation{
+ withChart: true, // By default, always download the chart layer
+ }
+ for _, option := range options {
+ option(operation)
+ }
+ if !operation.withChart && !operation.withProv {
+ return nil, errors.New(
+ "must specify at least one layer to pull (chart/prov)")
+ }
+
+ // Build allowed media types for chart pull
+ allowedMediaTypes := []string{
+ ocispec.MediaTypeImageManifest,
+ ConfigMediaType,
+ }
+ if operation.withChart {
+ allowedMediaTypes = append(allowedMediaTypes, ChartLayerMediaType, LegacyChartLayerMediaType)
+ }
+ if operation.withProv {
+ allowedMediaTypes = append(allowedMediaTypes, ProvLayerMediaType)
+ }
+
+ // Use generic client for the pull operation
+ genericClient := c.Generic()
+ genericResult, err := genericClient.PullGeneric(ref, GenericPullOptions{
+ AllowedMediaTypes: allowedMediaTypes,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Process the result with chart-specific logic
+ return c.processChartPull(genericResult, operation)
+}
+
// PullOptWithChart returns a function that sets the withChart setting on pull
func PullOptWithChart(withChart bool) PullOption {
return func(operation *pullOperation) {
@@ -694,19 +688,9 @@ func (c *Client) Push(data []byte, ref string, options ...PushOption) (*PushResu
})
ociAnnotations := generateOCIAnnotations(meta, operation.creationTime)
- manifest := ocispec.Manifest{
- Versioned: specs.Versioned{SchemaVersion: 2},
- Config: configDescriptor,
- Layers: layers,
- Annotations: ociAnnotations,
- }
-
- manifestData, err := json.Marshal(manifest)
- if err != nil {
- return nil, err
- }
- manifestDescriptor, err := oras.TagBytes(ctx, memoryStore, ocispec.MediaTypeImageManifest, manifestData, ref)
+ manifestDescriptor, err := c.tagManifest(ctx, memoryStore, configDescriptor,
+ layers, ociAnnotations, parsedRef)
if err != nil {
return nil, err
}
@@ -771,7 +755,7 @@ func PushOptStrictMode(strictMode bool) PushOption {
}
}
-// PushOptCreationDate returns a function that sets the creation time
+// PushOptCreationTime returns a function that sets the creation time
func PushOptCreationTime(creationTime string) PushOption {
return func(operation *pushOperation) {
operation.creationTime = creationTime
@@ -830,6 +814,7 @@ func (c *Client) Resolve(ref string) (desc ocispec.Descriptor, err error) {
return desc, err
}
remoteRepository.PlainHTTP = c.plainHTTP
+ remoteRepository.Client = c.authorizer
parsedReference, err := newReference(ref)
if err != nil {
@@ -842,12 +827,12 @@ func (c *Client) Resolve(ref string) (desc ocispec.Descriptor, err error) {
}
// ValidateReference for path and version
-func (c *Client) ValidateReference(ref, version string, u *url.URL) (*url.URL, error) {
+func (c *Client) ValidateReference(ref, version string, u *url.URL) (string, *url.URL, error) {
var tag string
registryReference, err := newReference(u.Host + u.Path)
if err != nil {
- return nil, err
+ return "", nil, err
}
if version == "" {
@@ -855,14 +840,14 @@ func (c *Client) ValidateReference(ref, version string, u *url.URL) (*url.URL, e
version = registryReference.Tag
} else {
if registryReference.Tag != "" && registryReference.Tag != version {
- return nil, errors.Errorf("chart reference and version mismatch: %s is not %s", version, registryReference.Tag)
+ return "", nil, fmt.Errorf("chart reference and version mismatch: %s is not %s", version, registryReference.Tag)
}
}
if registryReference.Digest != "" {
if version == "" {
// Install by digest only
- return u, nil
+ return "", u, nil
}
u.Path = fmt.Sprintf("%s@%s", registryReference.Repository, registryReference.Digest)
@@ -871,12 +856,12 @@ func (c *Client) ValidateReference(ref, version string, u *url.URL) (*url.URL, e
desc, err := c.Resolve(path)
if err != nil {
// The resource does not have to be tagged when digest is specified
- return u, nil
+ return "", u, nil
}
if desc.Digest.String() != registryReference.Digest {
- return nil, errors.Errorf("chart reference digest mismatch: %s is not %s", desc.Digest.String(), registryReference.Digest)
+ return "", nil, fmt.Errorf("chart reference digest mismatch: %s is not %s", desc.Digest.String(), registryReference.Digest)
}
- return u, nil
+ return registryReference.Digest, u, nil
}
// Evaluate whether an explicit version has been provided. Otherwise, determine version to use
@@ -887,10 +872,10 @@ func (c *Client) ValidateReference(ref, version string, u *url.URL) (*url.URL, e
// Retrieve list of repository tags
tags, err := c.Tags(strings.TrimPrefix(ref, fmt.Sprintf("%s://", OCIScheme)))
if err != nil {
- return nil, err
+ return "", nil, err
}
if len(tags) == 0 {
- return nil, errors.Errorf("Unable to locate any tags in provided repository: %s", ref)
+ return "", nil, fmt.Errorf("unable to locate any tags in provided repository: %s", ref)
}
// Determine if version provided
@@ -899,11 +884,33 @@ func (c *Client) ValidateReference(ref, version string, u *url.URL) (*url.URL, e
// If semver constraint string, try to find a match
tag, err = GetTagMatchingVersionOrConstraint(tags, version)
if err != nil {
- return nil, err
+ return "", nil, err
}
}
u.Path = fmt.Sprintf("%s:%s", registryReference.Repository, tag)
+ // desc, err := c.Resolve(u.Path)
+
+ return "", u, err
+}
+
+// tagManifest prepares and tags a manifest in memory storage
+func (c *Client) tagManifest(ctx context.Context, memoryStore *memory.Store,
+ configDescriptor ocispec.Descriptor, layers []ocispec.Descriptor,
+ ociAnnotations map[string]string, parsedRef reference) (ocispec.Descriptor, error) {
+
+ manifest := ocispec.Manifest{
+ Versioned: specs.Versioned{SchemaVersion: 2},
+ Config: configDescriptor,
+ Layers: layers,
+ Annotations: ociAnnotations,
+ }
+
+ manifestData, err := json.Marshal(manifest)
+ if err != nil {
+ return ocispec.Descriptor{}, err
+ }
- return u, err
+ return oras.TagBytes(ctx, memoryStore, ocispec.MediaTypeImageManifest,
+ manifestData, parsedRef.String())
}
diff --git a/pkg/registry/client_http_test.go b/pkg/registry/client_http_test.go
index 043fd4205..dddd29ee9 100644
--- a/pkg/registry/client_http_test.go
+++ b/pkg/registry/client_http_test.go
@@ -32,10 +32,7 @@ type HTTPRegistryClientTestSuite struct {
func (suite *HTTPRegistryClientTestSuite) SetupSuite() {
// init test client
- dockerRegistry := setup(&suite.TestSuite, false, false)
-
- // Start Docker registry
- go dockerRegistry.ListenAndServe()
+ setup(&suite.TestSuite, false, false)
}
func (suite *HTTPRegistryClientTestSuite) TearDownSuite() {
diff --git a/pkg/registry/client_insecure_tls_test.go b/pkg/registry/client_insecure_tls_test.go
index accbf1670..03354475a 100644
--- a/pkg/registry/client_insecure_tls_test.go
+++ b/pkg/registry/client_insecure_tls_test.go
@@ -29,10 +29,7 @@ type InsecureTLSRegistryClientTestSuite struct {
func (suite *InsecureTLSRegistryClientTestSuite) SetupSuite() {
// init test client
- dockerRegistry := setup(&suite.TestSuite, true, true)
-
- // Start Docker registry
- go dockerRegistry.ListenAndServe()
+ setup(&suite.TestSuite, true, true)
}
func (suite *InsecureTLSRegistryClientTestSuite) TearDownSuite() {
diff --git a/pkg/registry/client_test.go b/pkg/registry/client_test.go
index 4c5a78849..6ae32e342 100644
--- a/pkg/registry/client_test.go
+++ b/pkg/registry/client_test.go
@@ -17,17 +17,106 @@ limitations under the License.
package registry
import (
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "path/filepath"
+ "strings"
"testing"
- "github.com/containerd/containerd/remotes"
- "github.com/stretchr/testify/assert"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/stretchr/testify/require"
+ "oras.land/oras-go/v2/content/memory"
)
-func TestNewClientResolverNotSupported(t *testing.T) {
- var r remotes.Resolver
+// Inspired by oras test
+// https://github.com/oras-project/oras-go/blob/05a2b09cbf2eab1df691411884dc4df741ec56ab/content_test.go#L1802
+func TestTagManifestTransformsReferences(t *testing.T) {
+ memStore := memory.New()
+ client := &Client{out: io.Discard}
+ ctx := t.Context()
- client, err := NewClient(ClientOptResolver(r))
- require.Equal(t, err, errDeprecatedRemote)
- assert.Nil(t, client)
+ refWithPlus := "test-registry.io/charts/test:1.0.0+metadata"
+ expectedRef := "test-registry.io/charts/test:1.0.0_metadata" // + becomes _
+
+ configDesc := ocispec.Descriptor{MediaType: ConfigMediaType, Digest: "sha256:config", Size: 100}
+ layers := []ocispec.Descriptor{{MediaType: ChartLayerMediaType, Digest: "sha256:layer", Size: 200}}
+
+ parsedRef, err := newReference(refWithPlus)
+ require.NoError(t, err)
+
+ desc, err := client.tagManifest(ctx, memStore, configDesc, layers, nil, parsedRef)
+ require.NoError(t, err)
+
+ transformedDesc, err := memStore.Resolve(ctx, expectedRef)
+ require.NoError(t, err, "Should find the reference with _ instead of +")
+ require.Equal(t, desc.Digest, transformedDesc.Digest)
+
+ _, err = memStore.Resolve(ctx, refWithPlus)
+ require.Error(t, err, "Should NOT find the reference with the original +")
+}
+
+// Verifies that Login always restores ForceAttemptOAuth2 to false on success.
+func TestLogin_ResetsForceAttemptOAuth2_OnSuccess(t *testing.T) {
+ t.Parallel()
+
+ srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.Path == "/v2/" {
+ // Accept either HEAD or GET
+ w.WriteHeader(http.StatusOK)
+ return
+ }
+ http.NotFound(w, r)
+ }))
+ defer srv.Close()
+
+ host := strings.TrimPrefix(srv.URL, "http://")
+
+ credFile := filepath.Join(t.TempDir(), "config.json")
+ c, err := NewClient(
+ ClientOptWriter(io.Discard),
+ ClientOptCredentialsFile(credFile),
+ )
+ if err != nil {
+ t.Fatalf("NewClient error: %v", err)
+ }
+
+ if c.authorizer == nil || c.authorizer.ForceAttemptOAuth2 {
+ t.Fatalf("expected ForceAttemptOAuth2 default to be false")
+ }
+
+ // Call Login with plain HTTP against our test server
+ if err := c.Login(host, LoginOptPlainText(true), LoginOptBasicAuth("u", "p")); err != nil {
+ t.Fatalf("Login error: %v", err)
+ }
+
+ if c.authorizer.ForceAttemptOAuth2 {
+ t.Errorf("ForceAttemptOAuth2 should be false after successful Login")
+ }
+}
+
+// Verifies that Login restores ForceAttemptOAuth2 to false even when ping fails.
+func TestLogin_ResetsForceAttemptOAuth2_OnFailure(t *testing.T) {
+ t.Parallel()
+
+ // Start and immediately close, so connections will fail
+ srv := httptest.NewServer(http.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) {}))
+ host := strings.TrimPrefix(srv.URL, "http://")
+ srv.Close()
+
+ credFile := filepath.Join(t.TempDir(), "config.json")
+ c, err := NewClient(
+ ClientOptWriter(io.Discard),
+ ClientOptCredentialsFile(credFile),
+ )
+ if err != nil {
+ t.Fatalf("NewClient error: %v", err)
+ }
+
+ // Invoke Login, expect an error but ForceAttemptOAuth2 must end false
+ _ = c.Login(host, LoginOptPlainText(true), LoginOptBasicAuth("u", "p"))
+
+ if c.authorizer.ForceAttemptOAuth2 {
+ t.Errorf("ForceAttemptOAuth2 should be false after failed Login")
+ }
}
diff --git a/pkg/registry/client_tls_test.go b/pkg/registry/client_tls_test.go
index 156ae4816..2bf1750a9 100644
--- a/pkg/registry/client_tls_test.go
+++ b/pkg/registry/client_tls_test.go
@@ -17,6 +17,8 @@ limitations under the License.
package registry
import (
+ "crypto/tls"
+ "crypto/x509"
"os"
"testing"
@@ -29,10 +31,7 @@ type TLSRegistryClientTestSuite struct {
func (suite *TLSRegistryClientTestSuite) SetupSuite() {
// init test client
- dockerRegistry := setup(&suite.TestSuite, true, false)
-
- // Start Docker registry
- go dockerRegistry.ListenAndServe()
+ setup(&suite.TestSuite, true, false)
}
func (suite *TLSRegistryClientTestSuite) TearDownSuite() {
@@ -52,6 +51,30 @@ func (suite *TLSRegistryClientTestSuite) Test_0_Login() {
suite.Nil(err, "no error logging into registry with good credentials")
}
+func (suite *TLSRegistryClientTestSuite) Test_1_Login() {
+ err := suite.RegistryClient.Login(suite.DockerRegistryHost,
+ LoginOptBasicAuth("badverybad", "ohsobad"),
+ LoginOptTLSClientConfigFromConfig(&tls.Config{}))
+ suite.NotNil(err, "error logging into registry with bad credentials")
+
+ // Create a *tls.Config from tlsCert, tlsKey, and tlsCA.
+ cert, err := tls.LoadX509KeyPair(tlsCert, tlsKey)
+ suite.Nil(err, "error loading x509 key pair")
+ rootCAs := x509.NewCertPool()
+ caCert, err := os.ReadFile(tlsCA)
+ suite.Nil(err, "error reading CA certificate")
+ rootCAs.AppendCertsFromPEM(caCert)
+ conf := &tls.Config{
+ Certificates: []tls.Certificate{cert},
+ RootCAs: rootCAs,
+ }
+
+ err = suite.RegistryClient.Login(suite.DockerRegistryHost,
+ LoginOptBasicAuth(testUsername, testPassword),
+ LoginOptTLSClientConfigFromConfig(conf))
+ suite.Nil(err, "no error logging into registry with good credentials")
+}
+
func (suite *TLSRegistryClientTestSuite) Test_1_Push() {
testPush(&suite.TestSuite)
}
diff --git a/pkg/registry/generic.go b/pkg/registry/generic.go
new file mode 100644
index 000000000..b82132338
--- /dev/null
+++ b/pkg/registry/generic.go
@@ -0,0 +1,162 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package registry
+
+import (
+ "context"
+ "io"
+ "net/http"
+ "sort"
+ "sync"
+
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+ "oras.land/oras-go/v2"
+ "oras.land/oras-go/v2/content"
+ "oras.land/oras-go/v2/content/memory"
+ "oras.land/oras-go/v2/registry/remote"
+ "oras.land/oras-go/v2/registry/remote/auth"
+ "oras.land/oras-go/v2/registry/remote/credentials"
+)
+
+// GenericClient provides low-level OCI operations without artifact-specific assumptions
+type GenericClient struct {
+ debug bool
+ enableCache bool
+ credentialsFile string
+ username string
+ password string
+ out io.Writer
+ authorizer *auth.Client
+ registryAuthorizer RemoteClient
+ credentialsStore credentials.Store
+ httpClient *http.Client
+ plainHTTP bool
+}
+
+// GenericPullOptions configures a generic pull operation
+type GenericPullOptions struct {
+ // MediaTypes to include in the pull (empty means all)
+ AllowedMediaTypes []string
+ // Skip descriptors with these media types
+ SkipMediaTypes []string
+ // Custom PreCopy function for filtering
+ PreCopy func(context.Context, ocispec.Descriptor) error
+}
+
+// GenericPullResult contains the result of a generic pull operation
+type GenericPullResult struct {
+ Manifest ocispec.Descriptor
+ Descriptors []ocispec.Descriptor
+ MemoryStore *memory.Store
+ Ref string
+}
+
+// NewGenericClient creates a new generic OCI client from an existing Client
+func NewGenericClient(client *Client) *GenericClient {
+ return &GenericClient{
+ debug: client.debug,
+ enableCache: client.enableCache,
+ credentialsFile: client.credentialsFile,
+ username: client.username,
+ password: client.password,
+ out: client.out,
+ authorizer: client.authorizer,
+ registryAuthorizer: client.registryAuthorizer,
+ credentialsStore: client.credentialsStore,
+ httpClient: client.httpClient,
+ plainHTTP: client.plainHTTP,
+ }
+}
+
+// PullGeneric performs a generic OCI pull without artifact-specific assumptions
+func (c *GenericClient) PullGeneric(ref string, options GenericPullOptions) (*GenericPullResult, error) {
+ parsedRef, err := newReference(ref)
+ if err != nil {
+ return nil, err
+ }
+
+ memoryStore := memory.New()
+ var descriptors []ocispec.Descriptor
+
+ // Set up repository with authentication and configuration
+ repository, err := remote.NewRepository(parsedRef.String())
+ if err != nil {
+ return nil, err
+ }
+ repository.PlainHTTP = c.plainHTTP
+ repository.Client = c.authorizer
+
+ ctx := context.Background()
+
+ // Prepare allowed media types for filtering
+ var allowedMediaTypes []string
+ if len(options.AllowedMediaTypes) > 0 {
+ allowedMediaTypes = make([]string, len(options.AllowedMediaTypes))
+ copy(allowedMediaTypes, options.AllowedMediaTypes)
+ sort.Strings(allowedMediaTypes)
+ }
+
+ var mu sync.Mutex
+ manifest, err := oras.Copy(ctx, repository, parsedRef.String(), memoryStore, "", oras.CopyOptions{
+ CopyGraphOptions: oras.CopyGraphOptions{
+ PreCopy: func(ctx context.Context, desc ocispec.Descriptor) error {
+ // Apply custom PreCopy function if provided
+ if options.PreCopy != nil {
+ if err := options.PreCopy(ctx, desc); err != nil {
+ return err
+ }
+ }
+
+ mediaType := desc.MediaType
+
+ // Skip media types if specified
+ for _, skipType := range options.SkipMediaTypes {
+ if mediaType == skipType {
+ return oras.SkipNode
+ }
+ }
+
+ // Filter by allowed media types if specified
+ if len(allowedMediaTypes) > 0 {
+ if i := sort.SearchStrings(allowedMediaTypes, mediaType); i >= len(allowedMediaTypes) || allowedMediaTypes[i] != mediaType {
+ return oras.SkipNode
+ }
+ }
+
+ mu.Lock()
+ descriptors = append(descriptors, desc)
+ mu.Unlock()
+ return nil
+ },
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return &GenericPullResult{
+ Manifest: manifest,
+ Descriptors: descriptors,
+ MemoryStore: memoryStore,
+ Ref: parsedRef.String(),
+ }, nil
+}
+
+// GetDescriptorData retrieves the data for a specific descriptor
+func (c *GenericClient) GetDescriptorData(store *memory.Store, desc ocispec.Descriptor) ([]byte, error) {
+ return content.FetchAll(context.Background(), store, desc)
+}
diff --git a/pkg/registry/main_test.go b/pkg/registry/main_test.go
new file mode 100644
index 000000000..4f6e11e4f
--- /dev/null
+++ b/pkg/registry/main_test.go
@@ -0,0 +1,51 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package registry
+
+import (
+ "net"
+ "os"
+ "testing"
+
+ "github.com/foxcpp/go-mockdns"
+)
+
+func TestMain(m *testing.M) {
+ // A mock DNS server needed for TLS connection testing.
+ var srv *mockdns.Server
+ var err error
+
+ srv, err = mockdns.NewServer(map[string]mockdns.Zone{
+ "helm-test-registry.": {
+ A: []string{"127.0.0.1"},
+ },
+ }, false)
+ if err != nil {
+ panic(err)
+ }
+
+ saveDialFunction := net.DefaultResolver.Dial
+ srv.PatchNet(net.DefaultResolver)
+
+ // Run all tests in the package
+ code := m.Run()
+
+ net.DefaultResolver.Dial = saveDialFunction
+ _ = srv.Close()
+
+ os.Exit(code)
+}
diff --git a/pkg/registry/plugin.go b/pkg/registry/plugin.go
new file mode 100644
index 000000000..991bace76
--- /dev/null
+++ b/pkg/registry/plugin.go
@@ -0,0 +1,212 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package registry
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// Plugin-specific constants
+const (
+ // PluginArtifactType is the artifact type for Helm plugins
+ PluginArtifactType = "application/vnd.helm.plugin.v1+json"
+)
+
+// PluginPullOptions configures a plugin pull operation
+type PluginPullOptions struct {
+ // PluginName specifies the expected plugin name for layer validation
+ PluginName string
+}
+
+// PluginPullResult contains the result of a plugin pull operation
+type PluginPullResult struct {
+ Manifest ocispec.Descriptor
+ PluginData []byte
+ Prov struct {
+ Data []byte
+ }
+ Ref string
+ PluginName string
+}
+
+// PullPlugin downloads a plugin from an OCI registry using artifact type
+func (c *Client) PullPlugin(ref string, pluginName string, options ...PluginPullOption) (*PluginPullResult, error) {
+ operation := &pluginPullOperation{
+ pluginName: pluginName,
+ }
+ for _, option := range options {
+ option(operation)
+ }
+
+ // Use generic client for the pull operation with artifact type filtering
+ genericClient := c.Generic()
+ genericResult, err := genericClient.PullGeneric(ref, GenericPullOptions{
+ // Allow manifests and all layer types - we'll validate artifact type after download
+ AllowedMediaTypes: []string{
+ ocispec.MediaTypeImageManifest,
+ "application/vnd.oci.image.layer.v1.tar",
+ "application/vnd.oci.image.layer.v1.tar+gzip",
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Process the result with plugin-specific logic
+ return c.processPluginPull(genericResult, operation.pluginName)
+}
+
+// processPluginPull handles plugin-specific processing of a generic pull result using artifact type
+func (c *Client) processPluginPull(genericResult *GenericPullResult, pluginName string) (*PluginPullResult, error) {
+ // First validate that this is actually a plugin artifact
+ manifestData, err := c.Generic().GetDescriptorData(genericResult.MemoryStore, genericResult.Manifest)
+ if err != nil {
+ return nil, fmt.Errorf("unable to retrieve manifest: %w", err)
+ }
+
+ // Parse the manifest to check artifact type
+ var manifest ocispec.Manifest
+ if err := json.Unmarshal(manifestData, &manifest); err != nil {
+ return nil, fmt.Errorf("unable to parse manifest: %w", err)
+ }
+
+ // Validate artifact type (for OCI v1.1+ manifests)
+ if manifest.ArtifactType != "" && manifest.ArtifactType != PluginArtifactType {
+ return nil, fmt.Errorf("expected artifact type %s, got %s", PluginArtifactType, manifest.ArtifactType)
+ }
+
+ // For backwards compatibility, also check config media type if no artifact type
+ if manifest.ArtifactType == "" && manifest.Config.MediaType != PluginArtifactType {
+ return nil, fmt.Errorf("expected config media type %s for legacy compatibility, got %s", PluginArtifactType, manifest.Config.MediaType)
+ }
+
+ // Find the plugin tarball and optional provenance using NAME-VERSION.tgz format
+ var pluginDescriptor *ocispec.Descriptor
+ var provenanceDescriptor *ocispec.Descriptor
+ var foundProvenanceName string
+
+ // Look for layers with the expected titles/annotations
+ for _, layer := range manifest.Layers {
+ d := layer
+ // Check for title annotation
+ if title, exists := d.Annotations[ocispec.AnnotationTitle]; exists {
+ // Check if this looks like a plugin tarball: {pluginName}-{version}.tgz
+ if pluginDescriptor == nil && strings.HasPrefix(title, pluginName+"-") && strings.HasSuffix(title, ".tgz") {
+ pluginDescriptor = &d
+ }
+ // Check if this looks like a plugin provenance: {pluginName}-{version}.tgz.prov
+ if provenanceDescriptor == nil && strings.HasPrefix(title, pluginName+"-") && strings.HasSuffix(title, ".tgz.prov") {
+ provenanceDescriptor = &d
+ foundProvenanceName = title
+ }
+ }
+ }
+
+ // Plugin tarball is required
+ if pluginDescriptor == nil {
+ return nil, fmt.Errorf("required layer matching pattern %s-VERSION.tgz not found in manifest", pluginName)
+ }
+
+ // Build plugin-specific result
+ result := &PluginPullResult{
+ Manifest: genericResult.Manifest,
+ Ref: genericResult.Ref,
+ PluginName: pluginName,
+ }
+
+ // Fetch plugin data using generic client
+ genericClient := c.Generic()
+ result.PluginData, err = genericClient.GetDescriptorData(genericResult.MemoryStore, *pluginDescriptor)
+ if err != nil {
+ return nil, fmt.Errorf("unable to retrieve plugin data with digest %s: %w", pluginDescriptor.Digest, err)
+ }
+
+ // Fetch provenance data if available
+ if provenanceDescriptor != nil {
+ result.Prov.Data, err = genericClient.GetDescriptorData(genericResult.MemoryStore, *provenanceDescriptor)
+ if err != nil {
+ return nil, fmt.Errorf("unable to retrieve provenance data with digest %s: %w", provenanceDescriptor.Digest, err)
+ }
+ }
+
+ fmt.Fprintf(c.out, "Pulled plugin: %s\n", result.Ref)
+ fmt.Fprintf(c.out, "Digest: %s\n", result.Manifest.Digest)
+ if result.Prov.Data != nil {
+ fmt.Fprintf(c.out, "Provenance: %s\n", foundProvenanceName)
+ }
+
+ if strings.Contains(result.Ref, "_") {
+ fmt.Fprintf(c.out, "%s contains an underscore.\n", result.Ref)
+ fmt.Fprint(c.out, registryUnderscoreMessage+"\n")
+ }
+
+ return result, nil
+}
+
+// Plugin pull operation types and options
+type (
+ pluginPullOperation struct {
+ pluginName string
+ withProv bool
+ }
+
+ // PluginPullOption allows customizing plugin pull operations
+ PluginPullOption func(*pluginPullOperation)
+)
+
+// PluginPullOptWithPluginName sets the plugin name for validation
+func PluginPullOptWithPluginName(name string) PluginPullOption {
+ return func(operation *pluginPullOperation) {
+ operation.pluginName = name
+ }
+}
+
+// GetPluginName extracts the plugin name from an OCI reference using proper reference parsing
+func GetPluginName(source string) (string, error) {
+ ref, err := newReference(source)
+ if err != nil {
+ return "", fmt.Errorf("invalid OCI reference: %w", err)
+ }
+
+ // Extract plugin name from the repository path
+ // e.g., "ghcr.io/user/plugin-name:v1.0.0" -> Repository: "user/plugin-name"
+ repository := ref.Repository
+ if repository == "" {
+ return "", fmt.Errorf("invalid OCI reference: missing repository")
+ }
+
+ // Get the last part of the repository path as the plugin name
+ parts := strings.Split(repository, "/")
+ pluginName := parts[len(parts)-1]
+
+ if pluginName == "" {
+ return "", fmt.Errorf("invalid OCI reference: cannot determine plugin name from repository %s", repository)
+ }
+
+ return pluginName, nil
+}
+
+// PullPluginOptWithProv configures the pull to fetch provenance data
+func PullPluginOptWithProv(withProv bool) PluginPullOption {
+ return func(operation *pluginPullOperation) {
+ operation.withProv = withProv
+ }
+}
diff --git a/pkg/registry/plugin_test.go b/pkg/registry/plugin_test.go
new file mode 100644
index 000000000..f8525829c
--- /dev/null
+++ b/pkg/registry/plugin_test.go
@@ -0,0 +1,93 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package registry
+
+import (
+ "testing"
+)
+
+func TestGetPluginName(t *testing.T) {
+ tests := []struct {
+ name string
+ source string
+ expected string
+ expectErr bool
+ }{
+ {
+ name: "valid OCI reference with tag",
+ source: "oci://ghcr.io/user/plugin-name:v1.0.0",
+ expected: "plugin-name",
+ },
+ {
+ name: "valid OCI reference with digest",
+ source: "oci://ghcr.io/user/plugin-name@sha256:1234567890abcdef",
+ expected: "plugin-name",
+ },
+ {
+ name: "valid OCI reference without tag",
+ source: "oci://ghcr.io/user/plugin-name",
+ expected: "plugin-name",
+ },
+ {
+ name: "valid OCI reference with multiple path segments",
+ source: "oci://registry.example.com/org/team/plugin-name:latest",
+ expected: "plugin-name",
+ },
+ {
+ name: "valid OCI reference with plus signs in tag",
+ source: "oci://registry.example.com/user/plugin-name:v1.0.0+build.1",
+ expected: "plugin-name",
+ },
+ {
+ name: "valid OCI reference - single path segment",
+ source: "oci://registry.example.com/plugin",
+ expected: "plugin",
+ },
+ {
+ name: "invalid OCI reference - no repository",
+ source: "oci://registry.example.com",
+ expectErr: true,
+ },
+ {
+ name: "invalid OCI reference - malformed",
+ source: "not-an-oci-reference",
+ expectErr: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ pluginName, err := GetPluginName(tt.source)
+
+ if tt.expectErr {
+ if err == nil {
+ t.Errorf("expected error but got none")
+ }
+ return
+ }
+
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ return
+ }
+
+ if pluginName != tt.expected {
+ t.Errorf("expected plugin name %q, got %q", tt.expected, pluginName)
+ }
+ })
+ }
+}
diff --git a/pkg/registry/reference_test.go b/pkg/registry/reference_test.go
index 31317d18f..b6872cc37 100644
--- a/pkg/registry/reference_test.go
+++ b/pkg/registry/reference_test.go
@@ -19,6 +19,7 @@ package registry
import "testing"
func verify(t *testing.T, actual reference, registry, repository, tag, digest string) {
+ t.Helper()
if registry != actual.orasReference.Registry {
t.Errorf("Oras reference registry expected %v actual %v", registry, actual.Registry)
}
diff --git a/pkg/registry/transport.go b/pkg/registry/transport.go
new file mode 100644
index 000000000..9d6a37326
--- /dev/null
+++ b/pkg/registry/transport.go
@@ -0,0 +1,175 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package registry
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "log/slog"
+ "mime"
+ "net/http"
+ "strings"
+ "sync/atomic"
+
+ "oras.land/oras-go/v2/registry/remote/retry"
+)
+
+var (
+ // requestCount records the number of logged request-response pairs and will
+ // be used as the unique id for the next pair.
+ requestCount atomic.Uint64
+
+ // toScrub is a set of headers that should be scrubbed from the log.
+ toScrub = []string{
+ "Authorization",
+ "Set-Cookie",
+ }
+)
+
+// payloadSizeLimit limits the maximum size of the response body to be printed.
+const payloadSizeLimit int64 = 16 * 1024 // 16 KiB
+
+// LoggingTransport is an http.RoundTripper that keeps track of the in-flight
+// request and add hooks to report HTTP tracing events.
+type LoggingTransport struct {
+ http.RoundTripper
+}
+
+// NewTransport creates and returns a new instance of LoggingTransport
+func NewTransport(debug bool) *retry.Transport {
+ type cloner[T any] interface {
+ Clone() T
+ }
+
+ // try to copy (clone) the http.DefaultTransport so any mutations we
+ // perform on it (e.g. TLS config) are not reflected globally
+ // follow https://github.com/golang/go/issues/39299 for a more elegant
+ // solution in the future
+ transport := http.DefaultTransport
+ if t, ok := transport.(cloner[*http.Transport]); ok {
+ transport = t.Clone()
+ } else if t, ok := transport.(cloner[http.RoundTripper]); ok {
+ // this branch will not be used with go 1.20, it was added
+ // optimistically to try to clone if the http.DefaultTransport
+ // implementation changes, still the Clone method in that case
+ // might not return http.RoundTripper...
+ transport = t.Clone()
+ }
+ if debug {
+ transport = &LoggingTransport{RoundTripper: transport}
+ }
+
+ return retry.NewTransport(transport)
+}
+
+// RoundTrip calls base round trip while keeping track of the current request.
+func (t *LoggingTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
+ id := requestCount.Add(1) - 1
+
+ slog.Debug(req.Method, "id", id, "url", req.URL, "header", logHeader(req.Header))
+ resp, err = t.RoundTripper.RoundTrip(req)
+ if err != nil {
+ slog.Debug("Response"[:len(req.Method)], "id", id, "error", err)
+ } else if resp != nil {
+ slog.Debug("Response"[:len(req.Method)], "id", id, "status", resp.Status, "header", logHeader(resp.Header), "body", logResponseBody(resp))
+ } else {
+ slog.Debug("Response"[:len(req.Method)], "id", id, "response", "nil")
+ }
+
+ return resp, err
+}
+
+// logHeader prints out the provided header keys and values, with auth header scrubbed.
+func logHeader(header http.Header) string {
+ if len(header) > 0 {
+ headers := []string{}
+ for k, v := range header {
+ for _, h := range toScrub {
+ if strings.EqualFold(k, h) {
+ v = []string{"*****"}
+ }
+ }
+ headers = append(headers, fmt.Sprintf(" %q: %q", k, strings.Join(v, ", ")))
+ }
+ return strings.Join(headers, "\n")
+ }
+ return " Empty header"
+}
+
+// logResponseBody prints out the response body if it is printable and within size limit.
+func logResponseBody(resp *http.Response) string {
+ if resp.Body == nil || resp.Body == http.NoBody {
+ return " No response body to print"
+ }
+
+ // non-applicable body is not printed and remains untouched for subsequent processing
+ contentType := resp.Header.Get("Content-Type")
+ if contentType == "" {
+ return " Response body without a content type is not printed"
+ }
+ if !isPrintableContentType(contentType) {
+ return fmt.Sprintf(" Response body of content type %q is not printed", contentType)
+ }
+
+ buf := bytes.NewBuffer(nil)
+ body := resp.Body
+ // restore the body by concatenating the read body with the remaining body
+ resp.Body = struct {
+ io.Reader
+ io.Closer
+ }{
+ Reader: io.MultiReader(buf, body),
+ Closer: body,
+ }
+ // read the body up to limit+1 to check if the body exceeds the limit
+ if _, err := io.CopyN(buf, body, payloadSizeLimit+1); err != nil && err != io.EOF {
+ return fmt.Sprintf(" Error reading response body: %v", err)
+ }
+
+ readBody := buf.String()
+ if len(readBody) == 0 {
+ return " Response body is empty"
+ }
+ if containsCredentials(readBody) {
+ return " Response body redacted due to potential credentials"
+ }
+ if len(readBody) > int(payloadSizeLimit) {
+ return readBody[:payloadSizeLimit] + "\n...(truncated)"
+ }
+ return readBody
+}
+
+// isPrintableContentType returns true if the contentType is printable.
+func isPrintableContentType(contentType string) bool {
+ mediaType, _, err := mime.ParseMediaType(contentType)
+ if err != nil {
+ return false
+ }
+
+ switch mediaType {
+ case "application/json", // JSON types
+ "text/plain", "text/html": // text types
+ return true
+ }
+ return strings.HasSuffix(mediaType, "+json")
+}
+
+// containsCredentials returns true if the body contains potential credentials.
+func containsCredentials(body string) bool {
+ return strings.Contains(body, `"token"`) || strings.Contains(body, `"access_token"`)
+}
diff --git a/pkg/registry/transport_test.go b/pkg/registry/transport_test.go
new file mode 100644
index 000000000..b4990c526
--- /dev/null
+++ b/pkg/registry/transport_test.go
@@ -0,0 +1,399 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package registry
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "net/http"
+ "testing"
+)
+
+var errMockRead = errors.New("mock read error")
+
+type errorReader struct{}
+
+func (e *errorReader) Read(_ []byte) (n int, err error) {
+ return 0, errMockRead
+}
+
+func Test_isPrintableContentType(t *testing.T) {
+ tests := []struct {
+ name string
+ contentType string
+ want bool
+ }{
+ {
+ name: "Empty content type",
+ contentType: "",
+ want: false,
+ },
+ {
+ name: "General JSON type",
+ contentType: "application/json",
+ want: true,
+ },
+ {
+ name: "General JSON type with charset",
+ contentType: "application/json; charset=utf-8",
+ want: true,
+ },
+ {
+ name: "Random type with application/json prefix",
+ contentType: "application/jsonwhatever",
+ want: false,
+ },
+ {
+ name: "Manifest type in JSON",
+ contentType: "application/vnd.oci.image.manifest.v1+json",
+ want: true,
+ },
+ {
+ name: "Manifest type in JSON with charset",
+ contentType: "application/vnd.oci.image.manifest.v1+json; charset=utf-8",
+ want: true,
+ },
+ {
+ name: "Random content type in JSON",
+ contentType: "application/whatever+json",
+ want: true,
+ },
+ {
+ name: "Plain text type",
+ contentType: "text/plain",
+ want: true,
+ },
+ {
+ name: "Plain text type with charset",
+ contentType: "text/plain; charset=utf-8",
+ want: true,
+ },
+ {
+ name: "Random type with text/plain prefix",
+ contentType: "text/plainnnnn",
+ want: false,
+ },
+ {
+ name: "HTML type",
+ contentType: "text/html",
+ want: true,
+ },
+ {
+ name: "Plain text type with charset",
+ contentType: "text/html; charset=utf-8",
+ want: true,
+ },
+ {
+ name: "Random type with text/html prefix",
+ contentType: "text/htmlllll",
+ want: false,
+ },
+ {
+ name: "Binary type",
+ contentType: "application/octet-stream",
+ want: false,
+ },
+ {
+ name: "Unknown type",
+ contentType: "unknown/unknown",
+ want: false,
+ },
+ {
+ name: "Invalid type",
+ contentType: "text/",
+ want: false,
+ },
+ {
+ name: "Random string",
+ contentType: "random123!@#",
+ want: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := isPrintableContentType(tt.contentType); got != tt.want {
+ t.Errorf("isPrintableContentType() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_logResponseBody(t *testing.T) {
+ tests := []struct {
+ name string
+ resp *http.Response
+ want string
+ wantData []byte
+ }{
+ {
+ name: "Nil body",
+ resp: &http.Response{
+ Body: nil,
+ Header: http.Header{"Content-Type": []string{"application/json"}},
+ },
+ want: " No response body to print",
+ },
+ {
+ name: "No body",
+ wantData: nil,
+ resp: &http.Response{
+ Body: http.NoBody,
+ ContentLength: 100, // in case of HEAD response, the content length is set but the body is empty
+ Header: http.Header{"Content-Type": []string{"application/json"}},
+ },
+ want: " No response body to print",
+ },
+ {
+ name: "Empty body",
+ wantData: []byte(""),
+ resp: &http.Response{
+ Body: io.NopCloser(bytes.NewReader([]byte(""))),
+ ContentLength: 0,
+ Header: http.Header{"Content-Type": []string{"text/plain"}},
+ },
+ want: " Response body is empty",
+ },
+ {
+ name: "Unknown content length",
+ wantData: []byte("whatever"),
+ resp: &http.Response{
+ Body: io.NopCloser(bytes.NewReader([]byte("whatever"))),
+ ContentLength: -1,
+ Header: http.Header{"Content-Type": []string{"text/plain"}},
+ },
+ want: "whatever",
+ },
+ {
+ name: "Missing content type header",
+ wantData: []byte("whatever"),
+ resp: &http.Response{
+ Body: io.NopCloser(bytes.NewReader([]byte("whatever"))),
+ ContentLength: 8,
+ },
+ want: " Response body without a content type is not printed",
+ },
+ {
+ name: "Empty content type header",
+ wantData: []byte("whatever"),
+ resp: &http.Response{
+ Body: io.NopCloser(bytes.NewReader([]byte("whatever"))),
+ ContentLength: 8,
+ Header: http.Header{"Content-Type": []string{""}},
+ },
+ want: " Response body without a content type is not printed",
+ },
+ {
+ name: "Non-printable content type",
+ wantData: []byte("binary data"),
+ resp: &http.Response{
+ Body: io.NopCloser(bytes.NewReader([]byte("binary data"))),
+ ContentLength: 11,
+ Header: http.Header{"Content-Type": []string{"application/octet-stream"}},
+ },
+ want: " Response body of content type \"application/octet-stream\" is not printed",
+ },
+ {
+ name: "Body at the limit",
+ wantData: bytes.Repeat([]byte("a"), int(payloadSizeLimit)),
+ resp: &http.Response{
+ Body: io.NopCloser(bytes.NewReader(bytes.Repeat([]byte("a"), int(payloadSizeLimit)))),
+ ContentLength: payloadSizeLimit,
+ Header: http.Header{"Content-Type": []string{"text/plain"}},
+ },
+ want: string(bytes.Repeat([]byte("a"), int(payloadSizeLimit))),
+ },
+ {
+ name: "Body larger than limit",
+ wantData: bytes.Repeat([]byte("a"), int(payloadSizeLimit+1)),
+ resp: &http.Response{
+ Body: io.NopCloser(bytes.NewReader(bytes.Repeat([]byte("a"), int(payloadSizeLimit+1)))), // 1 byte larger than limit
+ ContentLength: payloadSizeLimit + 1,
+ Header: http.Header{"Content-Type": []string{"text/plain"}},
+ },
+ want: string(bytes.Repeat([]byte("a"), int(payloadSizeLimit))) + "\n...(truncated)",
+ },
+ {
+ name: "Printable content type within limit",
+ wantData: []byte("data"),
+ resp: &http.Response{
+ Body: io.NopCloser(bytes.NewReader([]byte("data"))),
+ ContentLength: 4,
+ Header: http.Header{"Content-Type": []string{"text/plain"}},
+ },
+ want: "data",
+ },
+ {
+ name: "Actual body size is larger than content length",
+ wantData: []byte("data"),
+ resp: &http.Response{
+ Body: io.NopCloser(bytes.NewReader([]byte("data"))),
+ ContentLength: 3, // mismatched content length
+ Header: http.Header{"Content-Type": []string{"text/plain"}},
+ },
+ want: "data",
+ },
+ {
+ name: "Actual body size is larger than content length and exceeds limit",
+ wantData: bytes.Repeat([]byte("a"), int(payloadSizeLimit+1)),
+ resp: &http.Response{
+ Body: io.NopCloser(bytes.NewReader(bytes.Repeat([]byte("a"), int(payloadSizeLimit+1)))), // 1 byte larger than limit
+ ContentLength: 1, // mismatched content length
+ Header: http.Header{"Content-Type": []string{"text/plain"}},
+ },
+ want: string(bytes.Repeat([]byte("a"), int(payloadSizeLimit))) + "\n...(truncated)",
+ },
+ {
+ name: "Actual body size is smaller than content length",
+ wantData: []byte("data"),
+ resp: &http.Response{
+ Body: io.NopCloser(bytes.NewReader([]byte("data"))),
+ ContentLength: 5, // mismatched content length
+ Header: http.Header{"Content-Type": []string{"text/plain"}},
+ },
+ want: "data",
+ },
+ {
+ name: "Body contains token",
+ resp: &http.Response{
+ Body: io.NopCloser(bytes.NewReader([]byte(`{"token":"12345"}`))),
+ ContentLength: 17,
+ Header: http.Header{"Content-Type": []string{"application/json"}},
+ },
+ wantData: []byte(`{"token":"12345"}`),
+ want: " Response body redacted due to potential credentials",
+ },
+ {
+ name: "Body contains access_token",
+ resp: &http.Response{
+ Body: io.NopCloser(bytes.NewReader([]byte(`{"access_token":"12345"}`))),
+ ContentLength: 17,
+ Header: http.Header{"Content-Type": []string{"application/json"}},
+ },
+ wantData: []byte(`{"access_token":"12345"}`),
+ want: " Response body redacted due to potential credentials",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := logResponseBody(tt.resp); got != tt.want {
+ t.Errorf("logResponseBody() = %v, want %v", got, tt.want)
+ }
+ // validate the response body
+ if tt.resp.Body != nil {
+ readBytes, err := io.ReadAll(tt.resp.Body)
+ if err != nil {
+ t.Errorf("failed to read body after logResponseBody(), err= %v", err)
+ }
+ if !bytes.Equal(readBytes, tt.wantData) {
+ t.Errorf("resp.Body after logResponseBody() = %v, want %v", readBytes, tt.wantData)
+ }
+ if closeErr := tt.resp.Body.Close(); closeErr != nil {
+ t.Errorf("failed to close body after logResponseBody(), err= %v", closeErr)
+ }
+ }
+ })
+ }
+}
+
+func Test_logResponseBody_error(t *testing.T) {
+ tests := []struct {
+ name string
+ resp *http.Response
+ want string
+ }{
+ {
+ name: "Error reading body",
+ resp: &http.Response{
+ Body: io.NopCloser(&errorReader{}),
+ ContentLength: 10,
+ Header: http.Header{"Content-Type": []string{"text/plain"}},
+ },
+ want: " Error reading response body: mock read error",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := logResponseBody(tt.resp); got != tt.want {
+ t.Errorf("logResponseBody() = %v, want %v", got, tt.want)
+ }
+ if closeErr := tt.resp.Body.Close(); closeErr != nil {
+ t.Errorf("failed to close body after logResponseBody(), err= %v", closeErr)
+ }
+ })
+ }
+}
+
+func Test_containsCredentials(t *testing.T) {
+ tests := []struct {
+ name string
+ body string
+ want bool
+ }{
+ {
+ name: "Contains token keyword",
+ body: `{"token": "12345"}`,
+ want: true,
+ },
+ {
+ name: "Contains quoted token keyword",
+ body: `whatever "token" blah`,
+ want: true,
+ },
+ {
+ name: "Contains unquoted token keyword",
+ body: `whatever token blah`,
+ want: false,
+ },
+ {
+ name: "Contains access_token keyword",
+ body: `{"access_token": "12345"}`,
+ want: true,
+ },
+ {
+ name: "Contains quoted access_token keyword",
+ body: `whatever "access_token" blah`,
+ want: true,
+ },
+ {
+ name: "Contains unquoted access_token keyword",
+ body: `whatever access_token blah`,
+ want: false,
+ },
+ {
+ name: "Does not contain credentials",
+ body: `{"key": "value"}`,
+ want: false,
+ },
+ {
+ name: "Empty body",
+ body: ``,
+ want: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := containsCredentials(tt.body); got != tt.want {
+ t.Errorf("containsCredentials() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/pkg/registry/util.go b/pkg/registry/util.go
index 1a96b0768..b31ab63fe 100644
--- a/pkg/registry/util.go
+++ b/pkg/registry/util.go
@@ -21,6 +21,7 @@ import (
"fmt"
"io"
"net/http"
+ "slices"
"strings"
"time"
@@ -31,7 +32,6 @@ import (
"github.com/Masterminds/semver/v3"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
)
var immutableOciAnnotations = []string{
@@ -46,12 +46,7 @@ func IsOCI(url string) bool {
// ContainsTag determines whether a tag is found in a provided list of tags
func ContainsTag(tags []string, tag string) bool {
- for _, t := range tags {
- if tag == t {
- return true
- }
- }
- return false
+ return slices.Contains(tags, tag)
}
func GetTagMatchingVersionOrConstraint(tags []string, versionString string) (string, error) {
@@ -87,7 +82,7 @@ func GetTagMatchingVersionOrConstraint(tags []string, versionString string) (str
}
}
- return "", errors.Errorf("Could not locate a version matching provided version string %s", versionString)
+ return "", fmt.Errorf("could not locate a version matching provided version string %s", versionString)
}
// extractChartMeta is used to extract a chart metadata from a byte array
diff --git a/pkg/registry/utils_test.go b/pkg/registry/utils_test.go
index 8e6943222..501860e03 100644
--- a/pkg/registry/utils_test.go
+++ b/pkg/registry/utils_test.go
@@ -35,8 +35,6 @@ import (
"github.com/distribution/distribution/v3/registry"
_ "github.com/distribution/distribution/v3/registry/auth/htpasswd"
_ "github.com/distribution/distribution/v3/registry/storage/driver/inmemory"
- "github.com/foxcpp/go-mockdns"
- "github.com/phayes/freeport"
"github.com/stretchr/testify/suite"
"golang.org/x/crypto/bcrypt"
@@ -65,12 +63,10 @@ type TestSuite struct {
CompromisedRegistryHost string
WorkspaceDir string
RegistryClient *Client
-
- // A mock DNS server needed for TLS connection testing.
- srv *mockdns.Server
+ dockerRegistry *registry.Registry
}
-func setup(suite *TestSuite, tlsEnabled, insecure bool) *registry.Registry {
+func setup(suite *TestSuite, tlsEnabled, insecure bool) {
suite.WorkspaceDir = testWorkspaceDir
os.RemoveAll(suite.WorkspaceDir)
os.Mkdir(suite.WorkspaceDir, 0700)
@@ -121,27 +117,22 @@ func setup(suite *TestSuite, tlsEnabled, insecure bool) *registry.Registry {
pwBytes, err := bcrypt.GenerateFromPassword([]byte(testPassword), bcrypt.DefaultCost)
suite.Nil(err, "no error generating bcrypt password for test htpasswd file")
htpasswdPath := filepath.Join(suite.WorkspaceDir, testHtpasswdFileBasename)
- err = os.WriteFile(htpasswdPath, []byte(fmt.Sprintf("%s:%s\n", testUsername, string(pwBytes))), 0644)
+ err = os.WriteFile(htpasswdPath, fmt.Appendf(nil, "%s:%s\n", testUsername, string(pwBytes)), 0644)
suite.Nil(err, "no error creating test htpasswd file")
// Registry config
config := &configuration.Configuration{}
- port, err := freeport.GetFreePort()
+ ln, err := net.Listen("tcp", "127.0.0.1:0")
suite.Nil(err, "no error finding free port for test registry")
+ defer ln.Close()
// Change the registry host to another host which is not localhost.
// This is required because Docker enforces HTTP if the registry
// host is localhost/127.0.0.1.
+ port := ln.Addr().(*net.TCPAddr).Port
suite.DockerRegistryHost = fmt.Sprintf("helm-test-registry:%d", port)
- suite.srv, err = mockdns.NewServer(map[string]mockdns.Zone{
- "helm-test-registry.": {
- A: []string{"127.0.0.1"},
- },
- }, false)
- suite.Nil(err, "no error creating mock DNS server")
- suite.srv.PatchNet(net.DefaultResolver)
- config.HTTP.Addr = fmt.Sprintf(":%d", port)
+ config.HTTP.Addr = ln.Addr().String()
config.HTTP.DrainTimeout = time.Duration(10) * time.Second
config.Storage = map[string]configuration.Parameters{"inmemory": map[string]interface{}{}}
@@ -164,17 +155,18 @@ func setup(suite *TestSuite, tlsEnabled, insecure bool) *registry.Registry {
config.HTTP.TLS.ClientCAs = []string{tlsCA}
}
}
- dockerRegistry, err := registry.NewRegistry(context.Background(), config)
+ suite.dockerRegistry, err = registry.NewRegistry(context.Background(), config)
suite.Nil(err, "no error creating test registry")
suite.CompromisedRegistryHost = initCompromisedRegistryTestServer()
- return dockerRegistry
+ go func() {
+ _ = suite.dockerRegistry.ListenAndServe()
+ }()
}
func teardown(suite *TestSuite) {
- if suite.srv != nil {
- mockdns.UnpatchNet(net.DefaultResolver)
- suite.srv.Close()
+ if suite.dockerRegistry != nil {
+ _ = suite.dockerRegistry.Shutdown(context.Background())
}
}
@@ -182,11 +174,9 @@ func initCompromisedRegistryTestServer() string {
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if strings.Contains(r.URL.Path, "manifests") {
w.Header().Set("Content-Type", "application/vnd.oci.image.manifest.v1+json")
- w.WriteHeader(200)
+ w.WriteHeader(http.StatusOK)
- // layers[0] is the blob []byte("a")
- w.Write([]byte(
- fmt.Sprintf(`{ "schemaVersion": 2, "config": {
+ fmt.Fprintf(w, `{ "schemaVersion": 2, "config": {
"mediaType": "%s",
"digest": "sha256:a705ee2789ab50a5ba20930f246dbd5cc01ff9712825bb98f57ee8414377f133",
"size": 181
@@ -198,19 +188,19 @@ func initCompromisedRegistryTestServer() string {
"size": 1
}
]
-}`, ConfigMediaType, ChartLayerMediaType)))
+}`, ConfigMediaType, ChartLayerMediaType)
} else if r.URL.Path == "/v2/testrepo/supposedlysafechart/blobs/sha256:a705ee2789ab50a5ba20930f246dbd5cc01ff9712825bb98f57ee8414377f133" {
w.Header().Set("Content-Type", "application/json")
- w.WriteHeader(200)
+ w.WriteHeader(http.StatusOK)
w.Write([]byte("{\"name\":\"mychart\",\"version\":\"0.1.0\",\"description\":\"A Helm chart for Kubernetes\\n" +
"an 'application' or a 'library' chart.\",\"apiVersion\":\"v2\",\"appVersion\":\"1.16.0\",\"type\":" +
"\"application\"}"))
} else if r.URL.Path == "/v2/testrepo/supposedlysafechart/blobs/sha256:ca978112ca1bbdcafac231b39a23dc4da786eff8147c4e72b9807785afee48bb" {
w.Header().Set("Content-Type", ChartLayerMediaType)
- w.WriteHeader(200)
+ w.WriteHeader(http.StatusOK)
w.Write([]byte("b"))
} else {
- w.WriteHeader(500)
+ w.WriteHeader(http.StatusInternalServerError)
}
}))
@@ -228,7 +218,7 @@ func testPush(suite *TestSuite) {
suite.NotNil(err, "error pushing non-chart bytes")
// Load a test chart
- chartData, err := os.ReadFile("../repo/repotest/testdata/examplechart-0.1.0.tgz")
+ chartData, err := os.ReadFile("../repo/v1/repotest/testdata/examplechart-0.1.0.tgz")
suite.Nil(err, "no error loading test chart")
meta, err := extractChartMeta(chartData)
suite.Nil(err, "no error extracting chart meta")
diff --git a/pkg/release/v1/mock.go b/pkg/release/v1/mock.go
index 9ca57284c..c3a6594cc 100644
--- a/pkg/release/v1/mock.go
+++ b/pkg/release/v1/mock.go
@@ -20,6 +20,7 @@ import (
"fmt"
"math/rand"
+ "helm.sh/helm/v4/pkg/chart/common"
chart "helm.sh/helm/v4/pkg/chart/v2"
"helm.sh/helm/v4/pkg/time"
)
@@ -46,6 +47,7 @@ type MockReleaseOptions struct {
Chart *chart.Chart
Status Status
Namespace string
+ Labels map[string]string
}
// Mock creates a mock release object based on options set by MockReleaseOptions. This function should typically not be used outside of testing.
@@ -66,6 +68,10 @@ func Mock(opts *MockReleaseOptions) *Release {
if namespace == "" {
namespace = "default"
}
+ var labels map[string]string
+ if len(opts.Labels) > 0 {
+ labels = opts.Labels
+ }
ch := opts.Chart
if opts.Chart == nil {
@@ -93,7 +99,7 @@ func Mock(opts *MockReleaseOptions) *Release {
},
},
},
- Templates: []*chart.File{
+ Templates: []*common.File{
{Name: "templates/foo.tpl", Data: []byte(MockManifest)},
},
}
@@ -130,5 +136,6 @@ func Mock(opts *MockReleaseOptions) *Release {
},
},
Manifest: MockManifest,
+ Labels: labels,
}
}
diff --git a/pkg/release/v1/release.go b/pkg/release/v1/release.go
index 74e834f7b..a7f076e04 100644
--- a/pkg/release/v1/release.go
+++ b/pkg/release/v1/release.go
@@ -19,6 +19,11 @@ import (
chart "helm.sh/helm/v4/pkg/chart/v2"
)
+type ApplyMethod string
+
+const ApplyMethodClientSideApply ApplyMethod = "csa"
+const ApplyMethodServerSideApply ApplyMethod = "ssa"
+
// Release describes a deployment of a chart, together with the chart
// and the variables used to deploy that chart.
type Release struct {
@@ -42,6 +47,9 @@ type Release struct {
// Labels of the release.
// Disabled encoding into Json cause labels are stored in storage driver metadata field.
Labels map[string]string `json:"-"`
+ // ApplyMethod stores whether server-side or client-side apply was used for the release
+ // Unset (empty string) should be treated as the default of client-side apply
+ ApplyMethod string `json:"apply_method,omitempty"` // "ssa" | "csa"
}
// SetStatus is a helper for setting the status on a release.
diff --git a/pkg/release/util/filter.go b/pkg/release/v1/util/filter.go
similarity index 97%
rename from pkg/release/util/filter.go
rename to pkg/release/v1/util/filter.go
index f0a082cfd..f818a6196 100644
--- a/pkg/release/util/filter.go
+++ b/pkg/release/v1/util/filter.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package util // import "helm.sh/helm/v4/pkg/release/util"
+package util // import "helm.sh/helm/v4/pkg/release/v1/util"
import rspb "helm.sh/helm/v4/pkg/release/v1"
diff --git a/pkg/release/util/filter_test.go b/pkg/release/v1/util/filter_test.go
similarity index 96%
rename from pkg/release/util/filter_test.go
rename to pkg/release/v1/util/filter_test.go
index 5d2564619..c8b23d526 100644
--- a/pkg/release/util/filter_test.go
+++ b/pkg/release/v1/util/filter_test.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package util // import "helm.sh/helm/v4/pkg/release/util"
+package util // import "helm.sh/helm/v4/pkg/release/v1/util"
import (
"testing"
diff --git a/pkg/release/util/kind_sorter.go b/pkg/release/v1/util/kind_sorter.go
similarity index 94%
rename from pkg/release/util/kind_sorter.go
rename to pkg/release/v1/util/kind_sorter.go
index 22795733c..bc074340f 100644
--- a/pkg/release/util/kind_sorter.go
+++ b/pkg/release/v1/util/kind_sorter.go
@@ -65,12 +65,17 @@ var InstallOrder KindSortOrder = []string{
"IngressClass",
"Ingress",
"APIService",
+ "MutatingWebhookConfiguration",
+ "ValidatingWebhookConfiguration",
}
// UninstallOrder is the order in which manifests should be uninstalled (by Kind).
//
// Those occurring earlier in the list get uninstalled before those occurring later in the list.
var UninstallOrder KindSortOrder = []string{
+ // For uninstall, we remove validation before mutation to ensure webhooks don't block removal
+ "ValidatingWebhookConfiguration",
+ "MutatingWebhookConfiguration",
"APIService",
"Ingress",
"IngressClass",
diff --git a/pkg/release/util/kind_sorter_test.go b/pkg/release/v1/util/kind_sorter_test.go
similarity index 96%
rename from pkg/release/util/kind_sorter_test.go
rename to pkg/release/v1/util/kind_sorter_test.go
index 00d80ecf2..919de24e5 100644
--- a/pkg/release/util/kind_sorter_test.go
+++ b/pkg/release/v1/util/kind_sorter_test.go
@@ -173,6 +173,14 @@ func TestKindSorter(t *testing.T) {
Name: "F",
Head: &SimpleHead{Kind: "PriorityClass"},
},
+ {
+ Name: "M",
+ Head: &SimpleHead{Kind: "MutatingWebhookConfiguration"},
+ },
+ {
+ Name: "V",
+ Head: &SimpleHead{Kind: "ValidatingWebhookConfiguration"},
+ },
}
for _, test := range []struct {
@@ -180,8 +188,8 @@ func TestKindSorter(t *testing.T) {
order KindSortOrder
expected string
}{
- {"install", InstallOrder, "FaAbcC3deEf1gh2iIjJkKlLmnopqrxstuUvw!"},
- {"uninstall", UninstallOrder, "wvUmutsxrqponLlKkJjIi2hg1fEed3CcbAaF!"},
+ {"install", InstallOrder, "FaAbcC3deEf1gh2iIjJkKlLmnopqrxstuUvwMV!"},
+ {"uninstall", UninstallOrder, "VMwvUmutsxrqponLlKkJjIi2hg1fEed3CcbAaF!"},
} {
var buf bytes.Buffer
t.Run(test.description, func(t *testing.T) {
diff --git a/pkg/release/util/manifest.go b/pkg/release/v1/util/manifest.go
similarity index 100%
rename from pkg/release/util/manifest.go
rename to pkg/release/v1/util/manifest.go
diff --git a/pkg/release/util/manifest_sorter.go b/pkg/release/v1/util/manifest_sorter.go
similarity index 93%
rename from pkg/release/util/manifest_sorter.go
rename to pkg/release/v1/util/manifest_sorter.go
index 15eb76174..6f7b4ea8b 100644
--- a/pkg/release/util/manifest_sorter.go
+++ b/pkg/release/v1/util/manifest_sorter.go
@@ -17,16 +17,16 @@ limitations under the License.
package util
import (
- "log"
+ "fmt"
+ "log/slog"
"path"
"sort"
"strconv"
"strings"
- "github.com/pkg/errors"
"sigs.k8s.io/yaml"
- chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+ "helm.sh/helm/v4/pkg/chart/common"
release "helm.sh/helm/v4/pkg/release/v1"
)
@@ -74,7 +74,7 @@ var events = map[string]release.HookEvent{
//
// Files that do not parse into the expected format are simply placed into a map and
// returned.
-func SortManifests(files map[string]string, _ chartutil.VersionSet, ordering KindSortOrder) ([]*release.Hook, []Manifest, error) {
+func SortManifests(files map[string]string, _ common.VersionSet, ordering KindSortOrder) ([]*release.Hook, []Manifest, error) {
result := &result{}
var sortedFilePaths []string
@@ -149,7 +149,7 @@ func (file *manifestFile) sort(result *result) error {
var entry SimpleHead
if err := yaml.Unmarshal([]byte(m), &entry); err != nil {
- return errors.Wrapf(err, "YAML parse error on %s", file.path)
+ return fmt.Errorf("YAML parse error on %s: %w", file.path, err)
}
if !hasAnyAnnotation(entry) {
@@ -185,7 +185,7 @@ func (file *manifestFile) sort(result *result) error {
}
isUnknownHook := false
- for _, hookType := range strings.Split(hookTypes, ",") {
+ for hookType := range strings.SplitSeq(hookTypes, ",") {
hookType = strings.ToLower(strings.TrimSpace(hookType))
e, ok := events[hookType]
if !ok {
@@ -196,7 +196,7 @@ func (file *manifestFile) sort(result *result) error {
}
if isUnknownHook {
- log.Printf("info: skipping unknown hook: %q", hookTypes)
+ slog.Info("skipping unknown hooks", "hookTypes", hookTypes)
continue
}
@@ -236,7 +236,7 @@ func calculateHookWeight(entry SimpleHead) int {
// operateAnnotationValues finds the given annotation and runs the operate function with the value of that annotation
func operateAnnotationValues(entry SimpleHead, annotation string, operate func(p string)) {
if dps, ok := entry.Metadata.Annotations[annotation]; ok {
- for _, dp := range strings.Split(dps, ",") {
+ for dp := range strings.SplitSeq(dps, ",") {
dp = strings.ToLower(strings.TrimSpace(dp))
operate(dp)
}
diff --git a/pkg/release/util/manifest_sorter_test.go b/pkg/release/v1/util/manifest_sorter_test.go
similarity index 100%
rename from pkg/release/util/manifest_sorter_test.go
rename to pkg/release/v1/util/manifest_sorter_test.go
diff --git a/pkg/release/util/manifest_test.go b/pkg/release/v1/util/manifest_test.go
similarity index 95%
rename from pkg/release/util/manifest_test.go
rename to pkg/release/v1/util/manifest_test.go
index cfc19563d..754ac1367 100644
--- a/pkg/release/util/manifest_test.go
+++ b/pkg/release/v1/util/manifest_test.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package util // import "helm.sh/helm/v4/pkg/release/util"
+package util // import "helm.sh/helm/v4/pkg/release/v1/util"
import (
"reflect"
diff --git a/pkg/release/util/sorter.go b/pkg/release/v1/util/sorter.go
similarity index 59%
rename from pkg/release/util/sorter.go
rename to pkg/release/v1/util/sorter.go
index 949adbda9..3712a58ef 100644
--- a/pkg/release/util/sorter.go
+++ b/pkg/release/v1/util/sorter.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package util // import "helm.sh/helm/v4/pkg/release/util"
+package util // import "helm.sh/helm/v4/pkg/release/v1/util"
import (
"sort"
@@ -22,35 +22,6 @@ import (
rspb "helm.sh/helm/v4/pkg/release/v1"
)
-type list []*rspb.Release
-
-func (s list) Len() int { return len(s) }
-func (s list) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-
-// ByName sorts releases by name
-type ByName struct{ list }
-
-// Less compares to releases
-func (s ByName) Less(i, j int) bool { return s.list[i].Name < s.list[j].Name }
-
-// ByDate sorts releases by date
-type ByDate struct{ list }
-
-// Less compares to releases
-func (s ByDate) Less(i, j int) bool {
- ti := s.list[i].Info.LastDeployed.Unix()
- tj := s.list[j].Info.LastDeployed.Unix()
- return ti < tj
-}
-
-// ByRevision sorts releases by revision number
-type ByRevision struct{ list }
-
-// Less compares to releases
-func (s ByRevision) Less(i, j int) bool {
- return s.list[i].Version < s.list[j].Version
-}
-
// Reverse reverses the list of releases sorted by the sort func.
func Reverse(list []*rspb.Release, sortFn func([]*rspb.Release)) {
sortFn(list)
@@ -62,17 +33,25 @@ func Reverse(list []*rspb.Release, sortFn func([]*rspb.Release)) {
// SortByName returns the list of releases sorted
// in lexicographical order.
func SortByName(list []*rspb.Release) {
- sort.Sort(ByName{list})
+ sort.Slice(list, func(i, j int) bool {
+ return list[i].Name < list[j].Name
+ })
}
// SortByDate returns the list of releases sorted by a
// release's last deployed time (in seconds).
func SortByDate(list []*rspb.Release) {
- sort.Sort(ByDate{list})
+ sort.Slice(list, func(i, j int) bool {
+ ti := list[i].Info.LastDeployed.Unix()
+ tj := list[j].Info.LastDeployed.Unix()
+ return ti < tj
+ })
}
// SortByRevision returns the list of releases sorted by a
// release's revision number (release.Version).
func SortByRevision(list []*rspb.Release) {
- sort.Sort(ByRevision{list})
+ sort.Slice(list, func(i, j int) bool {
+ return list[i].Version < list[j].Version
+ })
}
diff --git a/pkg/release/util/sorter_test.go b/pkg/release/v1/util/sorter_test.go
similarity index 97%
rename from pkg/release/util/sorter_test.go
rename to pkg/release/v1/util/sorter_test.go
index 8a766efc9..4628a5192 100644
--- a/pkg/release/util/sorter_test.go
+++ b/pkg/release/v1/util/sorter_test.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package util // import "helm.sh/helm/v4/pkg/release/util"
+package util // import "helm.sh/helm/v4/pkg/release/v1/util"
import (
"testing"
@@ -43,6 +43,7 @@ func tsRelease(name string, vers int, dur time.Duration, status rspb.Status) *rs
}
func check(t *testing.T, by string, fn func(int, int) bool) {
+ t.Helper()
for i := len(releases) - 1; i > 0; i-- {
if fn(i, i-1) {
t.Errorf("release at positions '(%d,%d)' not sorted by %s", i-1, i, by)
diff --git a/pkg/repo/chartrepo.go b/pkg/repo/v1/chartrepo.go
similarity index 71%
rename from pkg/repo/chartrepo.go
rename to pkg/repo/v1/chartrepo.go
index 52f81be57..95c04ccef 100644
--- a/pkg/repo/chartrepo.go
+++ b/pkg/repo/v1/chartrepo.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package repo // import "helm.sh/helm/v4/pkg/repo"
+package repo // import "helm.sh/helm/v4/pkg/repo/v1"
import (
"crypto/rand"
@@ -22,19 +22,14 @@ import (
"encoding/json"
"fmt"
"io"
- "log"
+ "log/slog"
"net/url"
"os"
"path/filepath"
"strings"
- "github.com/pkg/errors"
- "sigs.k8s.io/yaml"
-
- "helm.sh/helm/v4/pkg/chart/v2/loader"
"helm.sh/helm/v4/pkg/getter"
"helm.sh/helm/v4/pkg/helmpath"
- "helm.sh/helm/v4/pkg/provenance"
)
// Entry represents a collection of parameters for chart repository
@@ -52,23 +47,22 @@ type Entry struct {
// ChartRepository represents a chart repository
type ChartRepository struct {
- Config *Entry
- ChartPaths []string
- IndexFile *IndexFile
- Client getter.Getter
- CachePath string
+ Config *Entry
+ IndexFile *IndexFile
+ Client getter.Getter
+ CachePath string
}
// NewChartRepository constructs ChartRepository
func NewChartRepository(cfg *Entry, getters getter.Providers) (*ChartRepository, error) {
u, err := url.Parse(cfg.URL)
if err != nil {
- return nil, errors.Errorf("invalid chart URL format: %s", cfg.URL)
+ return nil, fmt.Errorf("invalid chart URL format: %s", cfg.URL)
}
client, err := getters.ByScheme(u.Scheme)
if err != nil {
- return nil, errors.Errorf("could not find protocol handler for: %s", u.Scheme)
+ return nil, fmt.Errorf("could not find protocol handler for: %s", u.Scheme)
}
return &ChartRepository{
@@ -79,40 +73,6 @@ func NewChartRepository(cfg *Entry, getters getter.Providers) (*ChartRepository,
}, nil
}
-// Load loads a directory of charts as if it were a repository.
-//
-// It requires the presence of an index.yaml file in the directory.
-//
-// Deprecated: remove in Helm 4.
-func (r *ChartRepository) Load() error {
- dirInfo, err := os.Stat(r.Config.Name)
- if err != nil {
- return err
- }
- if !dirInfo.IsDir() {
- return errors.Errorf("%q is not a directory", r.Config.Name)
- }
-
- // FIXME: Why are we recursively walking directories?
- // FIXME: Why are we not reading the repositories.yaml to figure out
- // what repos to use?
- filepath.Walk(r.Config.Name, func(path string, f os.FileInfo, _ error) error {
- if !f.IsDir() {
- if strings.Contains(f.Name(), "-index.yaml") {
- i, err := LoadIndexFile(path)
- if err != nil {
- return err
- }
- r.IndexFile = i
- } else if strings.HasSuffix(f.Name(), ".tgz") {
- r.ChartPaths = append(r.ChartPaths, path)
- }
- }
- return nil
- })
- return nil
-}
-
// DownloadIndexFile fetches the index from a repository.
func (r *ChartRepository) DownloadIndexFile() (string, error) {
indexURL, err := ResolveReferenceURL(r.Config.URL, "index.yaml")
@@ -156,46 +116,6 @@ func (r *ChartRepository) DownloadIndexFile() (string, error) {
return fname, os.WriteFile(fname, index, 0644)
}
-// Index generates an index for the chart repository and writes an index.yaml file.
-func (r *ChartRepository) Index() error {
- err := r.generateIndex()
- if err != nil {
- return err
- }
- return r.saveIndexFile()
-}
-
-func (r *ChartRepository) saveIndexFile() error {
- index, err := yaml.Marshal(r.IndexFile)
- if err != nil {
- return err
- }
- return os.WriteFile(filepath.Join(r.Config.Name, indexPath), index, 0644)
-}
-
-func (r *ChartRepository) generateIndex() error {
- for _, path := range r.ChartPaths {
- ch, err := loader.Load(path)
- if err != nil {
- return err
- }
-
- digest, err := provenance.DigestFile(path)
- if err != nil {
- return err
- }
-
- if !r.IndexFile.Has(ch.Name(), ch.Metadata.Version) {
- if err := r.IndexFile.MustAdd(ch.Metadata, path, r.Config.URL, digest); err != nil {
- return errors.Wrapf(err, "failed adding to %s to index", path)
- }
- }
- // TODO: If a chart exists, but has a different Digest, should we error?
- }
- r.IndexFile.SortEntries()
- return nil
-}
-
type findChartInRepoURLOptions struct {
Username string
Password string
@@ -240,7 +160,7 @@ func WithClientTLS(certFile, keyFile, caFile string) FindChartInRepoURLOption {
}
}
-// WithInsecureSkipTLSverify skips TLS verification for repostory communication
+// WithInsecureSkipTLSverify skips TLS verification for repository communication
func WithInsecureSkipTLSverify(insecureSkipTLSverify bool) FindChartInRepoURLOption {
return func(options *findChartInRepoURLOptions) {
options.InsecureSkipTLSverify = insecureSkipTLSverify
@@ -278,7 +198,7 @@ func FindChartInRepoURL(repoURL string, chartName string, getters getter.Provide
}
idx, err := r.DownloadIndexFile()
if err != nil {
- return "", errors.Wrapf(err, "looks like %q is not a valid chart repository or cannot be reached", repoURL)
+ return "", fmt.Errorf("looks like %q is not a valid chart repository or cannot be reached: %w", repoURL, err)
}
defer func() {
os.RemoveAll(filepath.Join(r.CachePath, helmpath.CacheChartsFile(r.Config.Name)))
@@ -297,18 +217,21 @@ func FindChartInRepoURL(repoURL string, chartName string, getters getter.Provide
}
cv, err := repoIndex.Get(chartName, opts.ChartVersion)
if err != nil {
- return "", errors.Errorf("%s not found in %s repository", errMsg, repoURL)
+ return "", ChartNotFoundError{
+ Chart: errMsg,
+ RepoURL: repoURL,
+ }
}
if len(cv.URLs) == 0 {
- return "", errors.Errorf("%s has no downloadable URLs", errMsg)
+ return "", fmt.Errorf("%s has no downloadable URLs", errMsg)
}
chartURL := cv.URLs[0]
absoluteChartURL, err := ResolveReferenceURL(repoURL, chartURL)
if err != nil {
- return "", errors.Wrap(err, "failed to make chart URL absolute")
+ return "", fmt.Errorf("failed to make chart URL absolute: %w", err)
}
return absoluteChartURL, nil
@@ -319,7 +242,7 @@ func FindChartInRepoURL(repoURL string, chartName string, getters getter.Provide
func ResolveReferenceURL(baseURL, refURL string) (string, error) {
parsedRefURL, err := url.Parse(refURL)
if err != nil {
- return "", errors.Wrapf(err, "failed to parse %s as URL", refURL)
+ return "", fmt.Errorf("failed to parse %s as URL: %w", refURL, err)
}
if parsedRefURL.IsAbs() {
@@ -328,7 +251,7 @@ func ResolveReferenceURL(baseURL, refURL string) (string, error) {
parsedBaseURL, err := url.Parse(baseURL)
if err != nil {
- return "", errors.Wrapf(err, "failed to parse %s as URL", baseURL)
+ return "", fmt.Errorf("failed to parse %s as URL: %w", baseURL, err)
}
// We need a trailing slash for ResolveReference to work, but make sure there isn't already one
@@ -343,7 +266,8 @@ func ResolveReferenceURL(baseURL, refURL string) (string, error) {
func (e *Entry) String() string {
buf, err := json.Marshal(e)
if err != nil {
- log.Panic(err)
+ slog.Error("failed to marshal entry", slog.Any("error", err))
+ panic(err)
}
return string(buf)
}
diff --git a/pkg/repo/chartrepo_test.go b/pkg/repo/v1/chartrepo_test.go
similarity index 66%
rename from pkg/repo/chartrepo_test.go
rename to pkg/repo/v1/chartrepo_test.go
index 42d00e0ee..05e034dd8 100644
--- a/pkg/repo/chartrepo_test.go
+++ b/pkg/repo/v1/chartrepo_test.go
@@ -18,11 +18,10 @@ package repo
import (
"bytes"
+ "errors"
"net/http"
"net/http/httptest"
"os"
- "path/filepath"
- "reflect"
"runtime"
"strings"
"testing"
@@ -30,87 +29,10 @@ import (
"sigs.k8s.io/yaml"
- chart "helm.sh/helm/v4/pkg/chart/v2"
"helm.sh/helm/v4/pkg/cli"
"helm.sh/helm/v4/pkg/getter"
)
-const (
- testRepository = "testdata/repository"
- testURL = "http://example-charts.com"
-)
-
-func TestLoadChartRepository(t *testing.T) {
- r, err := NewChartRepository(&Entry{
- Name: testRepository,
- URL: testURL,
- }, getter.All(&cli.EnvSettings{}))
- if err != nil {
- t.Errorf("Problem creating chart repository from %s: %v", testRepository, err)
- }
-
- if err := r.Load(); err != nil {
- t.Errorf("Problem loading chart repository from %s: %v", testRepository, err)
- }
-
- paths := []string{
- filepath.Join(testRepository, "frobnitz-1.2.3.tgz"),
- filepath.Join(testRepository, "sprocket-1.1.0.tgz"),
- filepath.Join(testRepository, "sprocket-1.2.0.tgz"),
- filepath.Join(testRepository, "universe/zarthal-1.0.0.tgz"),
- }
-
- if r.Config.Name != testRepository {
- t.Errorf("Expected %s as Name but got %s", testRepository, r.Config.Name)
- }
-
- if !reflect.DeepEqual(r.ChartPaths, paths) {
- t.Errorf("Expected %#v but got %#v\n", paths, r.ChartPaths)
- }
-
- if r.Config.URL != testURL {
- t.Errorf("Expected url for chart repository to be %s but got %s", testURL, r.Config.URL)
- }
-}
-
-func TestIndex(t *testing.T) {
- r, err := NewChartRepository(&Entry{
- Name: testRepository,
- URL: testURL,
- }, getter.All(&cli.EnvSettings{}))
- if err != nil {
- t.Errorf("Problem creating chart repository from %s: %v", testRepository, err)
- }
-
- if err := r.Load(); err != nil {
- t.Errorf("Problem loading chart repository from %s: %v", testRepository, err)
- }
-
- err = r.Index()
- if err != nil {
- t.Errorf("Error performing index: %v\n", err)
- }
-
- tempIndexPath := filepath.Join(testRepository, indexPath)
- actual, err := LoadIndexFile(tempIndexPath)
- defer os.Remove(tempIndexPath) // clean up
- if err != nil {
- t.Errorf("Error loading index file %v", err)
- }
- verifyIndex(t, actual)
-
- // Re-index and test again.
- err = r.Index()
- if err != nil {
- t.Errorf("Error performing re-index: %s\n", err)
- }
- second, err := LoadIndexFile(tempIndexPath)
- if err != nil {
- t.Errorf("Error re-loading index file %v", err)
- }
- verifyIndex(t, second)
-}
-
type CustomGetter struct {
repoUrls []string
}
@@ -148,7 +70,7 @@ func TestIndexCustomSchemeDownload(t *testing.T) {
}
repo.CachePath = t.TempDir()
- tempIndexFile, err := os.CreateTemp("", "test-repo")
+ tempIndexFile, err := os.CreateTemp(t.TempDir(), "test-repo")
if err != nil {
t.Fatalf("Failed to create temp index file: %v", err)
}
@@ -169,97 +91,6 @@ func TestIndexCustomSchemeDownload(t *testing.T) {
}
}
-func verifyIndex(t *testing.T, actual *IndexFile) {
- var empty time.Time
- if actual.Generated.Equal(empty) {
- t.Errorf("Generated should be greater than 0: %s", actual.Generated)
- }
-
- if actual.APIVersion != APIVersionV1 {
- t.Error("Expected v1 API")
- }
-
- entries := actual.Entries
- if numEntries := len(entries); numEntries != 3 {
- t.Errorf("Expected 3 charts to be listed in index file but got %v", numEntries)
- }
-
- expects := map[string]ChartVersions{
- "frobnitz": {
- {
- Metadata: &chart.Metadata{
- Name: "frobnitz",
- Version: "1.2.3",
- },
- },
- },
- "sprocket": {
- {
- Metadata: &chart.Metadata{
- Name: "sprocket",
- Version: "1.2.0",
- },
- },
- {
- Metadata: &chart.Metadata{
- Name: "sprocket",
- Version: "1.1.0",
- },
- },
- },
- "zarthal": {
- {
- Metadata: &chart.Metadata{
- Name: "zarthal",
- Version: "1.0.0",
- },
- },
- },
- }
-
- for name, versions := range expects {
- got, ok := entries[name]
- if !ok {
- t.Errorf("Could not find %q entry", name)
- continue
- }
- if len(versions) != len(got) {
- t.Errorf("Expected %d versions, got %d", len(versions), len(got))
- continue
- }
- for i, e := range versions {
- g := got[i]
- if e.Name != g.Name {
- t.Errorf("Expected %q, got %q", e.Name, g.Name)
- }
- if e.Version != g.Version {
- t.Errorf("Expected %q, got %q", e.Version, g.Version)
- }
- if len(g.Keywords) != 3 {
- t.Error("Expected 3 keywords.")
- }
- if len(g.Maintainers) != 2 {
- t.Error("Expected 2 maintainers.")
- }
- if g.Created.Equal(empty) {
- t.Error("Expected created to be non-empty")
- }
- if g.Description == "" {
- t.Error("Expected description to be non-empty")
- }
- if g.Home == "" {
- t.Error("Expected home to be non-empty")
- }
- if g.Digest == "" {
- t.Error("Expected digest to be non-empty")
- }
- if len(g.URLs) != 1 {
- t.Error("Expected exactly 1 URL")
- }
- }
- }
-}
-
// startLocalServerForTests Start the local helm server
func startLocalServerForTests(handler http.Handler) (*httptest.Server, error) {
if handler == nil {
@@ -372,6 +203,9 @@ func TestErrorFindChartInRepoURL(t *testing.T) {
} else if err.Error() != `chart "nginx1" not found in `+srv.URL+` repository` {
t.Errorf("Expected error for chart not found, but got a different error (%v)", err)
}
+ if !errors.Is(err, ChartNotFoundError{}) {
+ t.Errorf("error is not of correct error type structure")
+ }
if _, err = FindChartInRepoURL(srv.URL, "nginx1", g, WithChartVersion("0.1.0")); err == nil {
t.Errorf("Expected error for chart not found, but did not get any errors")
@@ -390,11 +224,15 @@ func TestResolveReferenceURL(t *testing.T) {
for _, tt := range []struct {
baseURL, refURL, chartURL string
}{
+ {"http://localhost:8123/", "/nginx-0.2.0.tgz", "http://localhost:8123/nginx-0.2.0.tgz"},
{"http://localhost:8123/charts/", "nginx-0.2.0.tgz", "http://localhost:8123/charts/nginx-0.2.0.tgz"},
+ {"http://localhost:8123/charts/", "/nginx-0.2.0.tgz", "http://localhost:8123/nginx-0.2.0.tgz"},
{"http://localhost:8123/charts-with-no-trailing-slash", "nginx-0.2.0.tgz", "http://localhost:8123/charts-with-no-trailing-slash/nginx-0.2.0.tgz"},
{"http://localhost:8123", "https://charts.helm.sh/stable/nginx-0.2.0.tgz", "https://charts.helm.sh/stable/nginx-0.2.0.tgz"},
{"http://localhost:8123/charts%2fwith%2fescaped%2fslash", "nginx-0.2.0.tgz", "http://localhost:8123/charts%2fwith%2fescaped%2fslash/nginx-0.2.0.tgz"},
+ {"http://localhost:8123/charts%2fwith%2fescaped%2fslash", "/nginx-0.2.0.tgz", "http://localhost:8123/nginx-0.2.0.tgz"},
{"http://localhost:8123/charts?with=queryparameter", "nginx-0.2.0.tgz", "http://localhost:8123/charts/nginx-0.2.0.tgz?with=queryparameter"},
+ {"http://localhost:8123/charts?with=queryparameter", "/nginx-0.2.0.tgz", "http://localhost:8123/nginx-0.2.0.tgz?with=queryparameter"},
} {
chartURL, err := ResolveReferenceURL(tt.baseURL, tt.refURL)
if err != nil {
diff --git a/pkg/repo/doc.go b/pkg/repo/v1/doc.go
similarity index 100%
rename from pkg/repo/doc.go
rename to pkg/repo/v1/doc.go
diff --git a/pkg/repo/v1/error.go b/pkg/repo/v1/error.go
new file mode 100644
index 000000000..16264ed26
--- /dev/null
+++ b/pkg/repo/v1/error.go
@@ -0,0 +1,35 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package repo
+
+import (
+ "fmt"
+)
+
+type ChartNotFoundError struct {
+ RepoURL string
+ Chart string
+}
+
+func (e ChartNotFoundError) Error() string {
+ return fmt.Sprintf("%s not found in %s repository", e.Chart, e.RepoURL)
+}
+
+func (e ChartNotFoundError) Is(err error) bool {
+ _, ok := err.(ChartNotFoundError)
+ return ok
+}
diff --git a/pkg/repo/index.go b/pkg/repo/v1/index.go
similarity index 95%
rename from pkg/repo/index.go
rename to pkg/repo/v1/index.go
index 4f0266ca6..4de8bb463 100644
--- a/pkg/repo/index.go
+++ b/pkg/repo/v1/index.go
@@ -19,6 +19,8 @@ package repo
import (
"bytes"
"encoding/json"
+ "errors"
+ "fmt"
"log/slog"
"os"
"path"
@@ -28,7 +30,6 @@ import (
"time"
"github.com/Masterminds/semver/v3"
- "github.com/pkg/errors"
"sigs.k8s.io/yaml"
"helm.sh/helm/v4/internal/fileutil"
@@ -38,8 +39,6 @@ import (
"helm.sh/helm/v4/pkg/provenance"
)
-var indexPath = "index.yaml"
-
// APIVersionV1 is the v1 API version for index and repository files.
const APIVersionV1 = "v1"
@@ -110,7 +109,7 @@ func LoadIndexFile(path string) (*IndexFile, error) {
}
i, err := loadIndex(b, path)
if err != nil {
- return nil, errors.Wrapf(err, "error loading %s", path)
+ return nil, fmt.Errorf("error loading %s: %w", path, err)
}
return i, nil
}
@@ -126,7 +125,7 @@ func (i IndexFile) MustAdd(md *chart.Metadata, filename, baseURL, digest string)
md.APIVersion = chart.APIVersionV1
}
if err := md.Validate(); err != nil {
- return errors.Wrapf(err, "validate failed for %s", filename)
+ return fmt.Errorf("validate failed for %s: %w", filename, err)
}
u := filename
@@ -219,7 +218,7 @@ func (i IndexFile) Get(name, version string) (*ChartVersion, error) {
return ver, nil
}
}
- return nil, errors.Errorf("no chart version found for %s-%s", name, version)
+ return nil, fmt.Errorf("no chart version found for %s-%s", name, version)
}
// WriteFile writes an index file to the given destination path.
@@ -332,7 +331,7 @@ func IndexDirectory(dir, baseURL string) (*IndexFile, error) {
return index, err
}
if err := index.MustAdd(c.Metadata, fname, parentURL, hash); err != nil {
- return index, errors.Wrapf(err, "failed adding to %s to index", fname)
+ return index, fmt.Errorf("failed adding to %s to index: %w", fname, err)
}
}
return index, nil
@@ -356,7 +355,8 @@ func loadIndex(data []byte, source string) (*IndexFile, error) {
for name, cvs := range i.Entries {
for idx := len(cvs) - 1; idx >= 0; idx-- {
if cvs[idx] == nil {
- slog.Warn("skipping loading invalid entry for chart %q from %s: empty entry", name, source)
+ slog.Warn(fmt.Sprintf("skipping loading invalid entry for chart %q from %s: empty entry", name, source))
+ cvs = append(cvs[:idx], cvs[idx+1:]...)
continue
}
// When metadata section missing, initialize with no data
@@ -367,7 +367,7 @@ func loadIndex(data []byte, source string) (*IndexFile, error) {
cvs[idx].APIVersion = chart.APIVersionV1
}
if err := cvs[idx].Validate(); ignoreSkippableChartValidationError(err) != nil {
- slog.Warn("skipping loading invalid entry for chart %q %q from %s: %s", name, cvs[idx].Version, source, err)
+ slog.Warn(fmt.Sprintf("skipping loading invalid entry for chart %q %q from %s: %s", name, cvs[idx].Version, source, err))
cvs = append(cvs[:idx], cvs[idx+1:]...)
}
}
diff --git a/pkg/repo/index_test.go b/pkg/repo/v1/index_test.go
similarity index 98%
rename from pkg/repo/index_test.go
rename to pkg/repo/v1/index_test.go
index f50c7e65e..a8aadadec 100644
--- a/pkg/repo/index_test.go
+++ b/pkg/repo/v1/index_test.go
@@ -68,6 +68,7 @@ entries:
grafana:
- apiVersion: v2
name: grafana
+ - null
foo:
-
bar:
@@ -123,17 +124,17 @@ func TestIndexFile(t *testing.T) {
}
cv, err := i.Get("setter", "0.1.9")
- if err == nil && !strings.Contains(cv.Metadata.Version, "0.1.9") {
- t.Errorf("Unexpected version: %s", cv.Metadata.Version)
+ if err == nil && !strings.Contains(cv.Version, "0.1.9") {
+ t.Errorf("Unexpected version: %s", cv.Version)
}
cv, err = i.Get("setter", "0.1.9+alpha")
- if err != nil || cv.Metadata.Version != "0.1.9+alpha" {
+ if err != nil || cv.Version != "0.1.9+alpha" {
t.Errorf("Expected version: 0.1.9+alpha")
}
cv, err = i.Get("setter", "0.1.8")
- if err != nil || cv.Metadata.Version != "0.1.8" {
+ if err != nil || cv.Version != "0.1.8" {
t.Errorf("Expected version: 0.1.8")
}
}
@@ -159,7 +160,6 @@ func TestLoadIndex(t *testing.T) {
}
for _, tc := range tests {
- tc := tc
t.Run(tc.Name, func(t *testing.T) {
t.Parallel()
i, err := LoadIndexFile(tc.Filename)
@@ -352,6 +352,7 @@ func TestDownloadIndexFile(t *testing.T) {
}
func verifyLocalIndex(t *testing.T, i *IndexFile) {
+ t.Helper()
numEntries := len(i.Entries)
if numEntries != 3 {
t.Errorf("Expected 3 entries in index file but got %d", numEntries)
@@ -450,6 +451,7 @@ func verifyLocalIndex(t *testing.T, i *IndexFile) {
}
func verifyLocalChartsFile(t *testing.T, chartsContent []byte, indexContent *IndexFile) {
+ t.Helper()
var expected, reald []string
for chart := range indexContent.Entries {
expected = append(expected, chart)
diff --git a/pkg/repo/repo.go b/pkg/repo/v1/repo.go
similarity index 94%
rename from pkg/repo/repo.go
rename to pkg/repo/v1/repo.go
index 203dfb4de..38d2b0ca1 100644
--- a/pkg/repo/repo.go
+++ b/pkg/repo/v1/repo.go
@@ -14,14 +14,14 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package repo // import "helm.sh/helm/v4/pkg/repo"
+package repo // import "helm.sh/helm/v4/pkg/repo/v1"
import (
+ "fmt"
"os"
"path/filepath"
"time"
- "github.com/pkg/errors"
"sigs.k8s.io/yaml"
)
@@ -48,7 +48,7 @@ func LoadFile(path string) (*File, error) {
r := new(File)
b, err := os.ReadFile(path)
if err != nil {
- return r, errors.Wrapf(err, "couldn't load repositories file (%s)", path)
+ return r, fmt.Errorf("couldn't load repositories file (%s): %w", path, err)
}
err = yaml.Unmarshal(b, r)
diff --git a/pkg/repo/repo_test.go b/pkg/repo/v1/repo_test.go
similarity index 99%
rename from pkg/repo/repo_test.go
rename to pkg/repo/v1/repo_test.go
index c2087ebbe..bdaa61eda 100644
--- a/pkg/repo/repo_test.go
+++ b/pkg/repo/v1/repo_test.go
@@ -197,7 +197,7 @@ func TestWriteFile(t *testing.T) {
},
)
- file, err := os.CreateTemp("", "helm-repo")
+ file, err := os.CreateTemp(t.TempDir(), "helm-repo")
if err != nil {
t.Errorf("failed to create test-file (%v)", err)
}
diff --git a/pkg/repo/repotest/doc.go b/pkg/repo/v1/repotest/doc.go
similarity index 100%
rename from pkg/repo/repotest/doc.go
rename to pkg/repo/v1/repotest/doc.go
diff --git a/pkg/repo/repotest/server.go b/pkg/repo/v1/repotest/server.go
similarity index 94%
rename from pkg/repo/repotest/server.go
rename to pkg/repo/v1/repotest/server.go
index 709a6f5fd..12b96de5a 100644
--- a/pkg/repo/repotest/server.go
+++ b/pkg/repo/v1/repotest/server.go
@@ -16,9 +16,9 @@ limitations under the License.
package repotest
import (
- "context"
"crypto/tls"
"fmt"
+ "net"
"net/http"
"net/http/httptest"
"os"
@@ -30,7 +30,6 @@ import (
"github.com/distribution/distribution/v3/registry"
_ "github.com/distribution/distribution/v3/registry/auth/htpasswd" // used for docker test registry
_ "github.com/distribution/distribution/v3/registry/storage/driver/inmemory" // used for docker test registry
- "github.com/phayes/freeport"
"golang.org/x/crypto/bcrypt"
"sigs.k8s.io/yaml"
@@ -38,10 +37,11 @@ import (
"helm.sh/helm/v4/pkg/chart/v2/loader"
chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
ociRegistry "helm.sh/helm/v4/pkg/registry"
- "helm.sh/helm/v4/pkg/repo"
+ "helm.sh/helm/v4/pkg/repo/v1"
)
func BasicAuthMiddleware(t *testing.T) http.HandlerFunc {
+ t.Helper()
return http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) {
username, password, ok := r.BasicAuth()
if !ok || username != "username" || password != "password" {
@@ -89,11 +89,8 @@ type Server struct {
//
// The temp dir will be removed by testing package automatically when test finished.
func NewTempServer(t *testing.T, options ...ServerOption) *Server {
-
- docrootTempDir, err := os.MkdirTemp("", "helm-repotest-")
- if err != nil {
- t.Fatal(err)
- }
+ t.Helper()
+ docrootTempDir := t.TempDir()
srv := newServer(t, docrootTempDir, options...)
@@ -110,6 +107,7 @@ func NewTempServer(t *testing.T, options ...ServerOption) *Server {
// Create the server, but don't yet start it
func newServer(t *testing.T, docroot string, options ...ServerOption) *Server {
+ t.Helper()
absdocroot, err := filepath.Abs(docroot)
if err != nil {
t.Fatal(err)
@@ -162,6 +160,7 @@ func WithDependingChart(c *chart.Chart) OCIServerOpt {
}
func NewOCIServer(t *testing.T, dir string) (*OCIServer, error) {
+ t.Helper()
testHtpasswdFileBasename := "authtest.htpasswd"
testUsername, testPassword := "username", "password"
@@ -170,19 +169,21 @@ func NewOCIServer(t *testing.T, dir string) (*OCIServer, error) {
t.Fatal("error generating bcrypt password for test htpasswd file")
}
htpasswdPath := filepath.Join(dir, testHtpasswdFileBasename)
- err = os.WriteFile(htpasswdPath, []byte(fmt.Sprintf("%s:%s\n", testUsername, string(pwBytes))), 0644)
+ err = os.WriteFile(htpasswdPath, fmt.Appendf(nil, "%s:%s\n", testUsername, string(pwBytes)), 0o644)
if err != nil {
t.Fatalf("error creating test htpasswd file")
}
// Registry config
config := &configuration.Configuration{}
- port, err := freeport.GetFreePort()
+ ln, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatalf("error finding free port for test registry")
}
+ defer ln.Close()
- config.HTTP.Addr = fmt.Sprintf("127.0.0.1:%d", port)
+ port := ln.Addr().(*net.TCPAddr).Port
+ config.HTTP.Addr = ln.Addr().String()
config.HTTP.DrainTimeout = time.Duration(10) * time.Second
config.Storage = map[string]configuration.Parameters{"inmemory": map[string]interface{}{}}
config.Auth = configuration.Auth{
@@ -194,7 +195,7 @@ func NewOCIServer(t *testing.T, dir string) (*OCIServer, error) {
registryURL := fmt.Sprintf("localhost:%d", port)
- r, err := registry.NewRegistry(context.Background(), config)
+ r, err := registry.NewRegistry(t.Context(), config)
if err != nil {
t.Fatal(err)
}
@@ -209,6 +210,7 @@ func NewOCIServer(t *testing.T, dir string) (*OCIServer, error) {
}
func (srv *OCIServer) Run(t *testing.T, opts ...OCIServerOpt) {
+ t.Helper()
cfg := &OCIServerRunConfig{}
for _, fn := range opts {
fn(cfg)
@@ -327,7 +329,7 @@ func (s *Server) CopyCharts(origin string) ([]string, error) {
if err != nil {
return []string{}, err
}
- if err := os.WriteFile(newname, data, 0644); err != nil {
+ if err := os.WriteFile(newname, data, 0o644); err != nil {
return []string{}, err
}
copied[i] = newname
@@ -351,7 +353,7 @@ func (s *Server) CreateIndex() error {
}
ifile := filepath.Join(s.docroot, "index.yaml")
- return os.WriteFile(ifile, d, 0644)
+ return os.WriteFile(ifile, d, 0o644)
}
func (s *Server) start() {
@@ -403,5 +405,5 @@ func setTestingRepository(url, fname string) error {
Name: "test",
URL: url,
})
- return r.WriteFile(fname, 0640)
+ return r.WriteFile(fname, 0o640)
}
diff --git a/pkg/repo/repotest/server_test.go b/pkg/repo/v1/repotest/server_test.go
similarity index 92%
rename from pkg/repo/repotest/server_test.go
rename to pkg/repo/v1/repotest/server_test.go
index cf68e5110..f0e374fc0 100644
--- a/pkg/repo/repotest/server_test.go
+++ b/pkg/repo/v1/repotest/server_test.go
@@ -25,7 +25,7 @@ import (
"sigs.k8s.io/yaml"
"helm.sh/helm/v4/internal/test/ensure"
- "helm.sh/helm/v4/pkg/repo"
+ "helm.sh/helm/v4/pkg/repo/v1"
)
// Young'n, in these here parts, we test our tests.
@@ -92,7 +92,7 @@ func TestServer(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if res.StatusCode != 404 {
+ if res.StatusCode != http.StatusNotFound {
t.Fatalf("Expected 404, got %d", res.StatusCode)
}
}
@@ -113,7 +113,7 @@ func TestNewTempServer(t *testing.T) {
"tls": {
options: []ServerOption{
WithChartSourceGlob("testdata/examplechart-0.1.0.tgz"),
- WithTLSConfig(MakeTestTLSConfig(t, "../../../testdata")),
+ WithTLSConfig(MakeTestTLSConfig(t, "../../../../testdata")),
},
},
}
@@ -140,7 +140,7 @@ func TestNewTempServer(t *testing.T) {
res.Body.Close()
- if res.StatusCode != 200 {
+ if res.StatusCode != http.StatusOK {
t.Errorf("Expected 200, got %d", res.StatusCode)
}
@@ -153,7 +153,7 @@ func TestNewTempServer(t *testing.T) {
}
res.Body.Close()
- if res.StatusCode != 200 {
+ if res.StatusCode != http.StatusOK {
t.Errorf("Expected 200, got %d", res.StatusCode)
}
}
@@ -198,7 +198,7 @@ func TestNewTempServer(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if res.StatusCode != 404 {
+ if res.StatusCode != http.StatusNotFound {
t.Fatalf("Expected 404, got %d", res.StatusCode)
}
})
@@ -212,7 +212,7 @@ func TestNewTempServer_TLS(t *testing.T) {
srv := NewTempServer(
t,
WithChartSourceGlob("testdata/examplechart-0.1.0.tgz"),
- WithTLSConfig(MakeTestTLSConfig(t, "../../../testdata")),
+ WithTLSConfig(MakeTestTLSConfig(t, "../../../../testdata")),
)
defer srv.Stop()
diff --git a/pkg/repo/repotest/testdata/examplechart-0.1.0.tgz b/pkg/repo/v1/repotest/testdata/examplechart-0.1.0.tgz
similarity index 100%
rename from pkg/repo/repotest/testdata/examplechart-0.1.0.tgz
rename to pkg/repo/v1/repotest/testdata/examplechart-0.1.0.tgz
diff --git a/pkg/repo/repotest/testdata/examplechart/.helmignore b/pkg/repo/v1/repotest/testdata/examplechart/.helmignore
similarity index 100%
rename from pkg/repo/repotest/testdata/examplechart/.helmignore
rename to pkg/repo/v1/repotest/testdata/examplechart/.helmignore
diff --git a/pkg/repo/repotest/testdata/examplechart/Chart.yaml b/pkg/repo/v1/repotest/testdata/examplechart/Chart.yaml
similarity index 100%
rename from pkg/repo/repotest/testdata/examplechart/Chart.yaml
rename to pkg/repo/v1/repotest/testdata/examplechart/Chart.yaml
diff --git a/pkg/repo/repotest/testdata/examplechart/values.yaml b/pkg/repo/v1/repotest/testdata/examplechart/values.yaml
similarity index 100%
rename from pkg/repo/repotest/testdata/examplechart/values.yaml
rename to pkg/repo/v1/repotest/testdata/examplechart/values.yaml
diff --git a/pkg/repo/repotest/tlsconfig.go b/pkg/repo/v1/repotest/tlsconfig.go
similarity index 98%
rename from pkg/repo/repotest/tlsconfig.go
rename to pkg/repo/v1/repotest/tlsconfig.go
index 3914a4d3f..3ea7338ff 100644
--- a/pkg/repo/repotest/tlsconfig.go
+++ b/pkg/repo/v1/repotest/tlsconfig.go
@@ -26,6 +26,7 @@ import (
)
func MakeTestTLSConfig(t *testing.T, path string) *tls.Config {
+ t.Helper()
ca, pub, priv := filepath.Join(path, "rootca.crt"), filepath.Join(path, "crt.pem"), filepath.Join(path, "key.pem")
insecure := false
diff --git a/pkg/repo/testdata/chartmuseum-index.yaml b/pkg/repo/v1/testdata/chartmuseum-index.yaml
similarity index 100%
rename from pkg/repo/testdata/chartmuseum-index.yaml
rename to pkg/repo/v1/testdata/chartmuseum-index.yaml
diff --git a/pkg/repo/testdata/local-index-annotations.yaml b/pkg/repo/v1/testdata/local-index-annotations.yaml
similarity index 100%
rename from pkg/repo/testdata/local-index-annotations.yaml
rename to pkg/repo/v1/testdata/local-index-annotations.yaml
diff --git a/pkg/repo/testdata/local-index-unordered.yaml b/pkg/repo/v1/testdata/local-index-unordered.yaml
similarity index 100%
rename from pkg/repo/testdata/local-index-unordered.yaml
rename to pkg/repo/v1/testdata/local-index-unordered.yaml
diff --git a/pkg/repo/testdata/local-index.json b/pkg/repo/v1/testdata/local-index.json
similarity index 100%
rename from pkg/repo/testdata/local-index.json
rename to pkg/repo/v1/testdata/local-index.json
diff --git a/pkg/repo/testdata/local-index.yaml b/pkg/repo/v1/testdata/local-index.yaml
similarity index 100%
rename from pkg/repo/testdata/local-index.yaml
rename to pkg/repo/v1/testdata/local-index.yaml
diff --git a/pkg/repo/testdata/old-repositories.yaml b/pkg/repo/v1/testdata/old-repositories.yaml
similarity index 100%
rename from pkg/repo/testdata/old-repositories.yaml
rename to pkg/repo/v1/testdata/old-repositories.yaml
diff --git a/pkg/repo/testdata/repositories.yaml b/pkg/repo/v1/testdata/repositories.yaml
similarity index 100%
rename from pkg/repo/testdata/repositories.yaml
rename to pkg/repo/v1/testdata/repositories.yaml
diff --git a/pkg/repo/v1/testdata/repository/frobnitz-1.2.3.tgz b/pkg/repo/v1/testdata/repository/frobnitz-1.2.3.tgz
new file mode 100644
index 000000000..8731dce02
Binary files /dev/null and b/pkg/repo/v1/testdata/repository/frobnitz-1.2.3.tgz differ
diff --git a/pkg/repo/testdata/repository/sprocket-1.1.0.tgz b/pkg/repo/v1/testdata/repository/sprocket-1.1.0.tgz
similarity index 100%
rename from pkg/repo/testdata/repository/sprocket-1.1.0.tgz
rename to pkg/repo/v1/testdata/repository/sprocket-1.1.0.tgz
diff --git a/pkg/repo/testdata/repository/sprocket-1.2.0.tgz b/pkg/repo/v1/testdata/repository/sprocket-1.2.0.tgz
similarity index 100%
rename from pkg/repo/testdata/repository/sprocket-1.2.0.tgz
rename to pkg/repo/v1/testdata/repository/sprocket-1.2.0.tgz
diff --git a/pkg/repo/testdata/repository/universe/zarthal-1.0.0.tgz b/pkg/repo/v1/testdata/repository/universe/zarthal-1.0.0.tgz
similarity index 100%
rename from pkg/repo/testdata/repository/universe/zarthal-1.0.0.tgz
rename to pkg/repo/v1/testdata/repository/universe/zarthal-1.0.0.tgz
diff --git a/pkg/repo/testdata/server/index.yaml b/pkg/repo/v1/testdata/server/index.yaml
similarity index 100%
rename from pkg/repo/testdata/server/index.yaml
rename to pkg/repo/v1/testdata/server/index.yaml
diff --git a/pkg/repo/testdata/server/test.txt b/pkg/repo/v1/testdata/server/test.txt
similarity index 100%
rename from pkg/repo/testdata/server/test.txt
rename to pkg/repo/v1/testdata/server/test.txt
diff --git a/pkg/storage/driver/cfgmaps.go b/pkg/storage/driver/cfgmaps.go
index 2b84b7f82..de097f294 100644
--- a/pkg/storage/driver/cfgmaps.go
+++ b/pkg/storage/driver/cfgmaps.go
@@ -19,11 +19,11 @@ package driver // import "helm.sh/helm/v4/pkg/storage/driver"
import (
"context"
"fmt"
+ "log/slog"
"strconv"
"strings"
"time"
- "github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -43,7 +43,6 @@ const ConfigMapsDriverName = "ConfigMap"
// ConfigMapsInterface.
type ConfigMaps struct {
impl corev1.ConfigMapInterface
- Log func(string, ...interface{})
}
// NewConfigMaps initializes a new ConfigMaps wrapping an implementation of
@@ -51,7 +50,6 @@ type ConfigMaps struct {
func NewConfigMaps(impl corev1.ConfigMapInterface) *ConfigMaps {
return &ConfigMaps{
impl: impl,
- Log: func(_ string, _ ...interface{}) {},
}
}
@@ -70,16 +68,16 @@ func (cfgmaps *ConfigMaps) Get(key string) (*rspb.Release, error) {
return nil, ErrReleaseNotFound
}
- cfgmaps.Log("get: failed to get %q: %s", key, err)
+ slog.Debug("failed to get release", "key", key, slog.Any("error", err))
return nil, err
}
// found the configmap, decode the base64 data string
r, err := decodeRelease(obj.Data["release"])
if err != nil {
- cfgmaps.Log("get: failed to decode data %q: %s", key, err)
+ slog.Debug("failed to decode data", "key", key, slog.Any("error", err))
return nil, err
}
- r.Labels = filterSystemLabels(obj.ObjectMeta.Labels)
+ r.Labels = filterSystemLabels(obj.Labels)
// return the release object
return r, nil
}
@@ -93,7 +91,7 @@ func (cfgmaps *ConfigMaps) List(filter func(*rspb.Release) bool) ([]*rspb.Releas
list, err := cfgmaps.impl.List(context.Background(), opts)
if err != nil {
- cfgmaps.Log("list: failed to list: %s", err)
+ slog.Debug("failed to list releases", slog.Any("error", err))
return nil, err
}
@@ -104,11 +102,11 @@ func (cfgmaps *ConfigMaps) List(filter func(*rspb.Release) bool) ([]*rspb.Releas
for _, item := range list.Items {
rls, err := decodeRelease(item.Data["release"])
if err != nil {
- cfgmaps.Log("list: failed to decode release: %v: %s", item, err)
+ slog.Debug("failed to decode release", "item", item, slog.Any("error", err))
continue
}
- rls.Labels = item.ObjectMeta.Labels
+ rls.Labels = item.Labels
if filter(rls) {
results = append(results, rls)
@@ -123,7 +121,7 @@ func (cfgmaps *ConfigMaps) Query(labels map[string]string) ([]*rspb.Release, err
ls := kblabels.Set{}
for k, v := range labels {
if errs := validation.IsValidLabelValue(v); len(errs) != 0 {
- return nil, errors.Errorf("invalid label value: %q: %s", v, strings.Join(errs, "; "))
+ return nil, fmt.Errorf("invalid label value: %q: %s", v, strings.Join(errs, "; "))
}
ls[k] = v
}
@@ -132,7 +130,7 @@ func (cfgmaps *ConfigMaps) Query(labels map[string]string) ([]*rspb.Release, err
list, err := cfgmaps.impl.List(context.Background(), opts)
if err != nil {
- cfgmaps.Log("query: failed to query with labels: %s", err)
+ slog.Debug("failed to query with labels", slog.Any("error", err))
return nil, err
}
@@ -144,10 +142,10 @@ func (cfgmaps *ConfigMaps) Query(labels map[string]string) ([]*rspb.Release, err
for _, item := range list.Items {
rls, err := decodeRelease(item.Data["release"])
if err != nil {
- cfgmaps.Log("query: failed to decode release: %s", err)
+ slog.Debug("failed to decode release", slog.Any("error", err))
continue
}
- rls.Labels = item.ObjectMeta.Labels
+ rls.Labels = item.Labels
results = append(results, rls)
}
return results, nil
@@ -166,7 +164,7 @@ func (cfgmaps *ConfigMaps) Create(key string, rls *rspb.Release) error {
// create a new configmap to hold the release
obj, err := newConfigMapsObject(key, rls, lbs)
if err != nil {
- cfgmaps.Log("create: failed to encode release %q: %s", rls.Name, err)
+ slog.Debug("failed to encode release", "name", rls.Name, slog.Any("error", err))
return err
}
// push the configmap object out into the kubiverse
@@ -175,7 +173,7 @@ func (cfgmaps *ConfigMaps) Create(key string, rls *rspb.Release) error {
return ErrReleaseExists
}
- cfgmaps.Log("create: failed to create: %s", err)
+ slog.Debug("failed to create release", slog.Any("error", err))
return err
}
return nil
@@ -194,13 +192,13 @@ func (cfgmaps *ConfigMaps) Update(key string, rls *rspb.Release) error {
// create a new configmap object to hold the release
obj, err := newConfigMapsObject(key, rls, lbs)
if err != nil {
- cfgmaps.Log("update: failed to encode release %q: %s", rls.Name, err)
+ slog.Debug("failed to encode release", "name", rls.Name, slog.Any("error", err))
return err
}
// push the configmap object out into the kubiverse
_, err = cfgmaps.impl.Update(context.Background(), obj, metav1.UpdateOptions{})
if err != nil {
- cfgmaps.Log("update: failed to update: %s", err)
+ slog.Debug("failed to update release", slog.Any("error", err))
return err
}
return nil
diff --git a/pkg/storage/driver/cfgmaps_test.go b/pkg/storage/driver/cfgmaps_test.go
index 8ba6832fa..a563eb7d9 100644
--- a/pkg/storage/driver/cfgmaps_test.go
+++ b/pkg/storage/driver/cfgmaps_test.go
@@ -16,6 +16,7 @@ package driver
import (
"encoding/base64"
"encoding/json"
+ "errors"
"reflect"
"testing"
@@ -242,10 +243,8 @@ func TestConfigMapDelete(t *testing.T) {
if !reflect.DeepEqual(rel, rls) {
t.Errorf("Expected {%v}, got {%v}", rel, rls)
}
-
- // fetch the deleted release
_, err = cfgmaps.Get(key)
- if !reflect.DeepEqual(ErrReleaseNotFound, err) {
+ if !errors.Is(err, ErrReleaseNotFound) {
t.Errorf("Expected {%v}, got {%v}", ErrReleaseNotFound, err)
}
}
diff --git a/pkg/storage/driver/driver.go b/pkg/storage/driver/driver.go
index 661c32e52..4f9d63928 100644
--- a/pkg/storage/driver/driver.go
+++ b/pkg/storage/driver/driver.go
@@ -17,10 +17,9 @@ limitations under the License.
package driver // import "helm.sh/helm/v4/pkg/storage/driver"
import (
+ "errors"
"fmt"
- "github.com/pkg/errors"
-
rspb "helm.sh/helm/v4/pkg/release/v1"
)
diff --git a/pkg/storage/driver/mock_test.go b/pkg/storage/driver/mock_test.go
index 199da6505..7dba5fea2 100644
--- a/pkg/storage/driver/mock_test.go
+++ b/pkg/storage/driver/mock_test.go
@@ -52,6 +52,7 @@ func testKey(name string, vers int) string {
}
func tsFixtureMemory(t *testing.T) *Memory {
+ t.Helper()
hs := []*rspb.Release{
// rls-a
releaseStub("rls-a", 4, "default", rspb.StatusDeployed),
@@ -83,6 +84,7 @@ func tsFixtureMemory(t *testing.T) *Memory {
// newTestFixtureCfgMaps initializes a MockConfigMapsInterface.
// ConfigMaps are created for each release provided.
func newTestFixtureCfgMaps(t *testing.T, releases ...*rspb.Release) *ConfigMaps {
+ t.Helper()
var mock MockConfigMapsInterface
mock.Init(t, releases...)
@@ -98,6 +100,7 @@ type MockConfigMapsInterface struct {
// Init initializes the MockConfigMapsInterface with the set of releases.
func (mock *MockConfigMapsInterface) Init(t *testing.T, releases ...*rspb.Release) {
+ t.Helper()
mock.objects = map[string]*v1.ConfigMap{}
for _, rls := range releases {
@@ -130,7 +133,7 @@ func (mock *MockConfigMapsInterface) List(_ context.Context, opts metav1.ListOpt
}
for _, cfgmap := range mock.objects {
- if labelSelector.Matches(kblabels.Set(cfgmap.ObjectMeta.Labels)) {
+ if labelSelector.Matches(kblabels.Set(cfgmap.Labels)) {
list.Items = append(list.Items, *cfgmap)
}
}
@@ -139,7 +142,7 @@ func (mock *MockConfigMapsInterface) List(_ context.Context, opts metav1.ListOpt
// Create creates a new ConfigMap.
func (mock *MockConfigMapsInterface) Create(_ context.Context, cfgmap *v1.ConfigMap, _ metav1.CreateOptions) (*v1.ConfigMap, error) {
- name := cfgmap.ObjectMeta.Name
+ name := cfgmap.Name
if object, ok := mock.objects[name]; ok {
return object, apierrors.NewAlreadyExists(v1.Resource("tests"), name)
}
@@ -149,7 +152,7 @@ func (mock *MockConfigMapsInterface) Create(_ context.Context, cfgmap *v1.Config
// Update updates a ConfigMap.
func (mock *MockConfigMapsInterface) Update(_ context.Context, cfgmap *v1.ConfigMap, _ metav1.UpdateOptions) (*v1.ConfigMap, error) {
- name := cfgmap.ObjectMeta.Name
+ name := cfgmap.Name
if _, ok := mock.objects[name]; !ok {
return nil, apierrors.NewNotFound(v1.Resource("tests"), name)
}
@@ -166,9 +169,10 @@ func (mock *MockConfigMapsInterface) Delete(_ context.Context, name string, _ me
return nil
}
-// newTestFixture initializes a MockSecretsInterface.
+// newTestFixtureSecrets initializes a MockSecretsInterface.
// Secrets are created for each release provided.
func newTestFixtureSecrets(t *testing.T, releases ...*rspb.Release) *Secrets {
+ t.Helper()
var mock MockSecretsInterface
mock.Init(t, releases...)
@@ -184,6 +188,7 @@ type MockSecretsInterface struct {
// Init initializes the MockSecretsInterface with the set of releases.
func (mock *MockSecretsInterface) Init(t *testing.T, releases ...*rspb.Release) {
+ t.Helper()
mock.objects = map[string]*v1.Secret{}
for _, rls := range releases {
@@ -216,7 +221,7 @@ func (mock *MockSecretsInterface) List(_ context.Context, opts metav1.ListOption
}
for _, secret := range mock.objects {
- if labelSelector.Matches(kblabels.Set(secret.ObjectMeta.Labels)) {
+ if labelSelector.Matches(kblabels.Set(secret.Labels)) {
list.Items = append(list.Items, *secret)
}
}
@@ -225,7 +230,7 @@ func (mock *MockSecretsInterface) List(_ context.Context, opts metav1.ListOption
// Create creates a new Secret.
func (mock *MockSecretsInterface) Create(_ context.Context, secret *v1.Secret, _ metav1.CreateOptions) (*v1.Secret, error) {
- name := secret.ObjectMeta.Name
+ name := secret.Name
if object, ok := mock.objects[name]; ok {
return object, apierrors.NewAlreadyExists(v1.Resource("tests"), name)
}
@@ -235,7 +240,7 @@ func (mock *MockSecretsInterface) Create(_ context.Context, secret *v1.Secret, _
// Update updates a Secret.
func (mock *MockSecretsInterface) Update(_ context.Context, secret *v1.Secret, _ metav1.UpdateOptions) (*v1.Secret, error) {
- name := secret.ObjectMeta.Name
+ name := secret.Name
if _, ok := mock.objects[name]; !ok {
return nil, apierrors.NewNotFound(v1.Resource("tests"), name)
}
@@ -254,6 +259,7 @@ func (mock *MockSecretsInterface) Delete(_ context.Context, name string, _ metav
// newTestFixtureSQL mocks the SQL database (for testing purposes)
func newTestFixtureSQL(t *testing.T, _ ...*rspb.Release) (*SQL, sqlmock.Sqlmock) {
+ t.Helper()
sqlDB, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("error when opening stub database connection: %v", err)
@@ -262,7 +268,6 @@ func newTestFixtureSQL(t *testing.T, _ ...*rspb.Release) (*SQL, sqlmock.Sqlmock)
sqlxDB := sqlx.NewDb(sqlDB, "sqlmock")
return &SQL{
db: sqlxDB,
- Log: func(_ string, _ ...interface{}) {},
namespace: "default",
statementBuilder: sq.StatementBuilder.PlaceholderFormat(sq.Dollar),
}, mock
diff --git a/pkg/storage/driver/secrets.go b/pkg/storage/driver/secrets.go
index 2ab128c6b..23a8f5cab 100644
--- a/pkg/storage/driver/secrets.go
+++ b/pkg/storage/driver/secrets.go
@@ -19,11 +19,11 @@ package driver // import "helm.sh/helm/v4/pkg/storage/driver"
import (
"context"
"fmt"
+ "log/slog"
"strconv"
"strings"
"time"
- "github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -43,7 +43,6 @@ const SecretsDriverName = "Secret"
// SecretsInterface.
type Secrets struct {
impl corev1.SecretInterface
- Log func(string, ...interface{})
}
// NewSecrets initializes a new Secrets wrapping an implementation of
@@ -51,7 +50,6 @@ type Secrets struct {
func NewSecrets(impl corev1.SecretInterface) *Secrets {
return &Secrets{
impl: impl,
- Log: func(_ string, _ ...interface{}) {},
}
}
@@ -69,12 +67,15 @@ func (secrets *Secrets) Get(key string) (*rspb.Release, error) {
if apierrors.IsNotFound(err) {
return nil, ErrReleaseNotFound
}
- return nil, errors.Wrapf(err, "get: failed to get %q", key)
+ return nil, fmt.Errorf("get: failed to get %q: %w", key, err)
}
// found the secret, decode the base64 data string
r, err := decodeRelease(string(obj.Data["release"]))
- r.Labels = filterSystemLabels(obj.ObjectMeta.Labels)
- return r, errors.Wrapf(err, "get: failed to decode data %q", key)
+ if err != nil {
+ return r, fmt.Errorf("get: failed to decode data %q: %w", key, err)
+ }
+ r.Labels = filterSystemLabels(obj.Labels)
+ return r, nil
}
// List fetches all releases and returns the list releases such
@@ -86,7 +87,7 @@ func (secrets *Secrets) List(filter func(*rspb.Release) bool) ([]*rspb.Release,
list, err := secrets.impl.List(context.Background(), opts)
if err != nil {
- return nil, errors.Wrap(err, "list: failed to list")
+ return nil, fmt.Errorf("list: failed to list: %w", err)
}
var results []*rspb.Release
@@ -96,11 +97,11 @@ func (secrets *Secrets) List(filter func(*rspb.Release) bool) ([]*rspb.Release,
for _, item := range list.Items {
rls, err := decodeRelease(string(item.Data["release"]))
if err != nil {
- secrets.Log("list: failed to decode release: %v: %s", item, err)
+ slog.Debug("list failed to decode release", "key", item.Name, slog.Any("error", err))
continue
}
- rls.Labels = item.ObjectMeta.Labels
+ rls.Labels = item.Labels
if filter(rls) {
results = append(results, rls)
@@ -115,7 +116,7 @@ func (secrets *Secrets) Query(labels map[string]string) ([]*rspb.Release, error)
ls := kblabels.Set{}
for k, v := range labels {
if errs := validation.IsValidLabelValue(v); len(errs) != 0 {
- return nil, errors.Errorf("invalid label value: %q: %s", v, strings.Join(errs, "; "))
+ return nil, fmt.Errorf("invalid label value: %q: %s", v, strings.Join(errs, "; "))
}
ls[k] = v
}
@@ -124,7 +125,7 @@ func (secrets *Secrets) Query(labels map[string]string) ([]*rspb.Release, error)
list, err := secrets.impl.List(context.Background(), opts)
if err != nil {
- return nil, errors.Wrap(err, "query: failed to query with labels")
+ return nil, fmt.Errorf("query: failed to query with labels: %w", err)
}
if len(list.Items) == 0 {
@@ -135,10 +136,10 @@ func (secrets *Secrets) Query(labels map[string]string) ([]*rspb.Release, error)
for _, item := range list.Items {
rls, err := decodeRelease(string(item.Data["release"]))
if err != nil {
- secrets.Log("query: failed to decode release: %s", err)
+ slog.Debug("failed to decode release", "key", item.Name, slog.Any("error", err))
continue
}
- rls.Labels = item.ObjectMeta.Labels
+ rls.Labels = item.Labels
results = append(results, rls)
}
return results, nil
@@ -157,7 +158,7 @@ func (secrets *Secrets) Create(key string, rls *rspb.Release) error {
// create a new secret to hold the release
obj, err := newSecretsObject(key, rls, lbs)
if err != nil {
- return errors.Wrapf(err, "create: failed to encode release %q", rls.Name)
+ return fmt.Errorf("create: failed to encode release %q: %w", rls.Name, err)
}
// push the secret object out into the kubiverse
if _, err := secrets.impl.Create(context.Background(), obj, metav1.CreateOptions{}); err != nil {
@@ -165,7 +166,7 @@ func (secrets *Secrets) Create(key string, rls *rspb.Release) error {
return ErrReleaseExists
}
- return errors.Wrap(err, "create: failed to create")
+ return fmt.Errorf("create: failed to create: %w", err)
}
return nil
}
@@ -183,11 +184,14 @@ func (secrets *Secrets) Update(key string, rls *rspb.Release) error {
// create a new secret object to hold the release
obj, err := newSecretsObject(key, rls, lbs)
if err != nil {
- return errors.Wrapf(err, "update: failed to encode release %q", rls.Name)
+ return fmt.Errorf("update: failed to encode release %q: %w", rls.Name, err)
}
// push the secret object out into the kubiverse
_, err = secrets.impl.Update(context.Background(), obj, metav1.UpdateOptions{})
- return errors.Wrap(err, "update: failed to update")
+ if err != nil {
+ return fmt.Errorf("update: failed to update: %w", err)
+ }
+ return nil
}
// Delete deletes the Secret holding the release named by key.
@@ -198,7 +202,10 @@ func (secrets *Secrets) Delete(key string) (rls *rspb.Release, err error) {
}
// delete the release
err = secrets.impl.Delete(context.Background(), key, metav1.DeleteOptions{})
- return rls, err
+ if err != nil {
+ return nil, err
+ }
+ return rls, nil
}
// newSecretsObject constructs a kubernetes Secret object
diff --git a/pkg/storage/driver/secrets_test.go b/pkg/storage/driver/secrets_test.go
index 7affc81ab..9e45bae67 100644
--- a/pkg/storage/driver/secrets_test.go
+++ b/pkg/storage/driver/secrets_test.go
@@ -16,6 +16,7 @@ package driver
import (
"encoding/base64"
"encoding/json"
+ "errors"
"reflect"
"testing"
@@ -242,10 +243,8 @@ func TestSecretDelete(t *testing.T) {
if !reflect.DeepEqual(rel, rls) {
t.Errorf("Expected {%v}, got {%v}", rel, rls)
}
-
- // fetch the deleted release
_, err = secrets.Get(key)
- if !reflect.DeepEqual(ErrReleaseNotFound, err) {
+ if !errors.Is(err, ErrReleaseNotFound) {
t.Errorf("Expected {%v}, got {%v}", ErrReleaseNotFound, err)
}
}
diff --git a/pkg/storage/driver/sql.go b/pkg/storage/driver/sql.go
index 12bdd3ff4..46f6c6b2e 100644
--- a/pkg/storage/driver/sql.go
+++ b/pkg/storage/driver/sql.go
@@ -18,6 +18,8 @@ package driver // import "helm.sh/helm/v4/pkg/storage/driver"
import (
"fmt"
+ "log/slog"
+ "maps"
"sort"
"strconv"
"time"
@@ -86,8 +88,6 @@ type SQL struct {
db *sqlx.DB
namespace string
statementBuilder sq.StatementBuilderType
-
- Log func(string, ...interface{})
}
// Name returns the name of the driver.
@@ -108,13 +108,13 @@ func (s *SQL) checkAlreadyApplied(migrations []*migrate.Migration) bool {
records, err := migrate.GetMigrationRecords(s.db.DB, postgreSQLDialect)
migrate.SetDisableCreateTable(false)
if err != nil {
- s.Log("checkAlreadyApplied: failed to get migration records: %v", err)
+ slog.Debug("failed to get migration records", slog.Any("error", err))
return false
}
for _, record := range records {
if _, ok := migrationsIDs[record.Id]; ok {
- s.Log("checkAlreadyApplied: found previous migration (Id: %v) applied at %v", record.Id, record.AppliedAt)
+ slog.Debug("found previous migration", "id", record.Id, "appliedAt", record.AppliedAt)
delete(migrationsIDs, record.Id)
}
}
@@ -122,7 +122,7 @@ func (s *SQL) checkAlreadyApplied(migrations []*migrate.Migration) bool {
// check if all migrations applied
if len(migrationsIDs) != 0 {
for id := range migrationsIDs {
- s.Log("checkAlreadyApplied: find unapplied migration (id: %v)", id)
+ slog.Debug("find unapplied migration", "id", id)
}
return false
}
@@ -276,7 +276,7 @@ type SQLReleaseCustomLabelWrapper struct {
}
// NewSQL initializes a new sql driver.
-func NewSQL(connectionString string, logger func(string, ...interface{}), namespace string) (*SQL, error) {
+func NewSQL(connectionString string, namespace string) (*SQL, error) {
db, err := sqlx.Connect(postgreSQLDialect, connectionString)
if err != nil {
return nil, err
@@ -284,7 +284,6 @@ func NewSQL(connectionString string, logger func(string, ...interface{}), namesp
driver := &SQL{
db: db,
- Log: logger,
statementBuilder: sq.StatementBuilder.PlaceholderFormat(sq.Dollar),
}
@@ -309,24 +308,24 @@ func (s *SQL) Get(key string) (*rspb.Release, error) {
query, args, err := qb.ToSql()
if err != nil {
- s.Log("failed to build query: %v", err)
+ slog.Debug("failed to build query", slog.Any("error", err))
return nil, err
}
// Get will return an error if the result is empty
if err := s.db.Get(&record, query, args...); err != nil {
- s.Log("got SQL error when getting release %s: %v", key, err)
+ slog.Debug("got SQL error when getting release", "key", key, slog.Any("error", err))
return nil, ErrReleaseNotFound
}
release, err := decodeRelease(record.Body)
if err != nil {
- s.Log("get: failed to decode data %q: %v", key, err)
+ slog.Debug("failed to decode data", "key", key, slog.Any("error", err))
return nil, err
}
if release.Labels, err = s.getReleaseCustomLabels(key, s.namespace); err != nil {
- s.Log("failed to get release %s/%s custom labels: %v", s.namespace, key, err)
+ slog.Debug("failed to get release custom labels", "namespace", s.namespace, "key", key, slog.Any("error", err))
return nil, err
}
@@ -347,13 +346,13 @@ func (s *SQL) List(filter func(*rspb.Release) bool) ([]*rspb.Release, error) {
query, args, err := sb.ToSql()
if err != nil {
- s.Log("failed to build query: %v", err)
+ slog.Debug("failed to build query", slog.Any("error", err))
return nil, err
}
var records = []SQLReleaseWrapper{}
if err := s.db.Select(&records, query, args...); err != nil {
- s.Log("list: failed to list: %v", err)
+ slog.Debug("failed to list", slog.Any("error", err))
return nil, err
}
@@ -361,17 +360,15 @@ func (s *SQL) List(filter func(*rspb.Release) bool) ([]*rspb.Release, error) {
for _, record := range records {
release, err := decodeRelease(record.Body)
if err != nil {
- s.Log("list: failed to decode release: %v: %v", record, err)
+ slog.Debug("failed to decode release", "record", record, slog.Any("error", err))
continue
}
if release.Labels, err = s.getReleaseCustomLabels(record.Key, record.Namespace); err != nil {
- s.Log("failed to get release %s/%s custom labels: %v", record.Namespace, record.Key, err)
+ slog.Debug("failed to get release custom labels", "namespace", record.Namespace, "key", record.Key, slog.Any("error", err))
return nil, err
}
- for k, v := range getReleaseSystemLabels(release) {
- release.Labels[k] = v
- }
+ maps.Copy(release.Labels, getReleaseSystemLabels(release))
if filter(release) {
releases = append(releases, release)
@@ -396,7 +393,7 @@ func (s *SQL) Query(labels map[string]string) ([]*rspb.Release, error) {
if _, ok := labelMap[key]; ok {
sb = sb.Where(sq.Eq{key: labels[key]})
} else {
- s.Log("unknown label %s", key)
+ slog.Debug("unknown label", "key", key)
return nil, fmt.Errorf("unknown label %s", key)
}
}
@@ -409,13 +406,13 @@ func (s *SQL) Query(labels map[string]string) ([]*rspb.Release, error) {
// Build our query
query, args, err := sb.ToSql()
if err != nil {
- s.Log("failed to build query: %v", err)
+ slog.Debug("failed to build query", slog.Any("error", err))
return nil, err
}
var records = []SQLReleaseWrapper{}
if err := s.db.Select(&records, query, args...); err != nil {
- s.Log("list: failed to query with labels: %v", err)
+ slog.Debug("failed to query with labels", slog.Any("error", err))
return nil, err
}
@@ -427,12 +424,12 @@ func (s *SQL) Query(labels map[string]string) ([]*rspb.Release, error) {
for _, record := range records {
release, err := decodeRelease(record.Body)
if err != nil {
- s.Log("list: failed to decode release: %v: %v", record, err)
+ slog.Debug("failed to decode release", "record", record, slog.Any("error", err))
continue
}
if release.Labels, err = s.getReleaseCustomLabels(record.Key, record.Namespace); err != nil {
- s.Log("failed to get release %s/%s custom labels: %v", record.Namespace, record.Key, err)
+ slog.Debug("failed to get release custom labels", "namespace", record.Namespace, "key", record.Key, slog.Any("error", err))
return nil, err
}
@@ -456,13 +453,13 @@ func (s *SQL) Create(key string, rls *rspb.Release) error {
body, err := encodeRelease(rls)
if err != nil {
- s.Log("failed to encode release: %v", err)
+ slog.Debug("failed to encode release", slog.Any("error", err))
return err
}
transaction, err := s.db.Beginx()
if err != nil {
- s.Log("failed to start SQL transaction: %v", err)
+ slog.Debug("failed to start SQL transaction", slog.Any("error", err))
return fmt.Errorf("error beginning transaction: %v", err)
}
@@ -491,7 +488,7 @@ func (s *SQL) Create(key string, rls *rspb.Release) error {
int(time.Now().Unix()),
).ToSql()
if err != nil {
- s.Log("failed to build insert query: %v", err)
+ slog.Debug("failed to build insert query", slog.Any("error", err))
return err
}
@@ -505,17 +502,17 @@ func (s *SQL) Create(key string, rls *rspb.Release) error {
Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}).
ToSql()
if buildErr != nil {
- s.Log("failed to build select query: %v", buildErr)
+ slog.Debug("failed to build select query", "error", buildErr)
return err
}
var record SQLReleaseWrapper
if err := transaction.Get(&record, selectQuery, args...); err == nil {
- s.Log("release %s already exists", key)
+ slog.Debug("release already exists", "key", key)
return ErrReleaseExists
}
- s.Log("failed to store release %s in SQL database: %v", key, err)
+ slog.Debug("failed to store release in SQL database", "key", key, slog.Any("error", err))
return err
}
@@ -538,13 +535,13 @@ func (s *SQL) Create(key string, rls *rspb.Release) error {
if err != nil {
defer transaction.Rollback()
- s.Log("failed to build insert query: %v", err)
+ slog.Debug("failed to build insert query", slog.Any("error", err))
return err
}
if _, err := transaction.Exec(insertLabelsQuery, args...); err != nil {
defer transaction.Rollback()
- s.Log("failed to write Labels: %v", err)
+ slog.Debug("failed to write Labels", slog.Any("error", err))
return err
}
}
@@ -563,7 +560,7 @@ func (s *SQL) Update(key string, rls *rspb.Release) error {
body, err := encodeRelease(rls)
if err != nil {
- s.Log("failed to encode release: %v", err)
+ slog.Debug("failed to encode release", slog.Any("error", err))
return err
}
@@ -580,12 +577,12 @@ func (s *SQL) Update(key string, rls *rspb.Release) error {
ToSql()
if err != nil {
- s.Log("failed to build update query: %v", err)
+ slog.Debug("failed to build update query", slog.Any("error", err))
return err
}
if _, err := s.db.Exec(query, args...); err != nil {
- s.Log("failed to update release %s in SQL database: %v", key, err)
+ slog.Debug("failed to update release in SQL database", "key", key, slog.Any("error", err))
return err
}
@@ -596,7 +593,7 @@ func (s *SQL) Update(key string, rls *rspb.Release) error {
func (s *SQL) Delete(key string) (*rspb.Release, error) {
transaction, err := s.db.Beginx()
if err != nil {
- s.Log("failed to start SQL transaction: %v", err)
+ slog.Debug("failed to start SQL transaction", slog.Any("error", err))
return nil, fmt.Errorf("error beginning transaction: %v", err)
}
@@ -607,20 +604,20 @@ func (s *SQL) Delete(key string) (*rspb.Release, error) {
Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}).
ToSql()
if err != nil {
- s.Log("failed to build select query: %v", err)
+ slog.Debug("failed to build select query", slog.Any("error", err))
return nil, err
}
var record SQLReleaseWrapper
err = transaction.Get(&record, selectQuery, args...)
if err != nil {
- s.Log("release %s not found: %v", key, err)
+ slog.Debug("release not found", "key", key, slog.Any("error", err))
return nil, ErrReleaseNotFound
}
release, err := decodeRelease(record.Body)
if err != nil {
- s.Log("failed to decode release %s: %v", key, err)
+ slog.Debug("failed to decode release", "key", key, slog.Any("error", err))
transaction.Rollback()
return nil, err
}
@@ -632,18 +629,18 @@ func (s *SQL) Delete(key string) (*rspb.Release, error) {
Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}).
ToSql()
if err != nil {
- s.Log("failed to build delete query: %v", err)
+ slog.Debug("failed to build delete query", slog.Any("error", err))
return nil, err
}
_, err = transaction.Exec(deleteQuery, args...)
if err != nil {
- s.Log("failed perform delete query: %v", err)
+ slog.Debug("failed perform delete query", slog.Any("error", err))
return release, err
}
if release.Labels, err = s.getReleaseCustomLabels(key, s.namespace); err != nil {
- s.Log("failed to get release %s/%s custom labels: %v", s.namespace, key, err)
+ slog.Debug("failed to get release custom labels", "namespace", s.namespace, "key", key, slog.Any("error", err))
return nil, err
}
@@ -654,7 +651,7 @@ func (s *SQL) Delete(key string) (*rspb.Release, error) {
ToSql()
if err != nil {
- s.Log("failed to build delete Labels query: %v", err)
+ slog.Debug("failed to build delete Labels query", slog.Any("error", err))
return nil, err
}
_, err = transaction.Exec(deleteCustomLabelsQuery, args...)
diff --git a/pkg/storage/driver/util.go b/pkg/storage/driver/util.go
index 0abbe41b2..ca8e23cc2 100644
--- a/pkg/storage/driver/util.go
+++ b/pkg/storage/driver/util.go
@@ -22,6 +22,7 @@ import (
"encoding/base64"
"encoding/json"
"io"
+ "slices"
rspb "helm.sh/helm/v4/pkg/release/v1"
)
@@ -88,12 +89,7 @@ func decodeRelease(data string) (*rspb.Release, error) {
// Checks if label is system
func isSystemLabel(key string) bool {
- for _, v := range GetSystemLabels() {
- if key == v {
- return true
- }
- }
- return false
+ return slices.Contains(GetSystemLabels(), key)
}
// Removes system labels from labels map
diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go
index 5e8718ea0..f086309bb 100644
--- a/pkg/storage/storage.go
+++ b/pkg/storage/storage.go
@@ -17,13 +17,13 @@ limitations under the License.
package storage // import "helm.sh/helm/v4/pkg/storage"
import (
+ "errors"
"fmt"
+ "log/slog"
"strings"
- "github.com/pkg/errors"
-
- relutil "helm.sh/helm/v4/pkg/release/util"
rspb "helm.sh/helm/v4/pkg/release/v1"
+ relutil "helm.sh/helm/v4/pkg/release/v1/util"
"helm.sh/helm/v4/pkg/storage/driver"
)
@@ -42,15 +42,13 @@ type Storage struct {
// be retained, including the most recent release. Values of 0 or less are
// ignored (meaning no limits are imposed).
MaxHistory int
-
- Log func(string, ...interface{})
}
// Get retrieves the release from storage. An error is returned
// if the storage driver failed to fetch the release, or the
// release identified by the key, version pair does not exist.
func (s *Storage) Get(name string, version int) (*rspb.Release, error) {
- s.Log("getting release %q", makeKey(name, version))
+ slog.Debug("getting release", "key", makeKey(name, version))
return s.Driver.Get(makeKey(name, version))
}
@@ -58,7 +56,7 @@ func (s *Storage) Get(name string, version int) (*rspb.Release, error) {
// error is returned if the storage driver fails to store the
// release, or a release with an identical key already exists.
func (s *Storage) Create(rls *rspb.Release) error {
- s.Log("creating release %q", makeKey(rls.Name, rls.Version))
+ slog.Debug("creating release", "key", makeKey(rls.Name, rls.Version))
if s.MaxHistory > 0 {
// Want to make space for one more release.
if err := s.removeLeastRecent(rls.Name, s.MaxHistory-1); err != nil &&
@@ -73,7 +71,7 @@ func (s *Storage) Create(rls *rspb.Release) error {
// storage backend fails to update the release or if the release
// does not exist.
func (s *Storage) Update(rls *rspb.Release) error {
- s.Log("updating release %q", makeKey(rls.Name, rls.Version))
+ slog.Debug("updating release", "key", makeKey(rls.Name, rls.Version))
return s.Driver.Update(makeKey(rls.Name, rls.Version), rls)
}
@@ -81,22 +79,22 @@ func (s *Storage) Update(rls *rspb.Release) error {
// the storage backend fails to delete the release or if the release
// does not exist.
func (s *Storage) Delete(name string, version int) (*rspb.Release, error) {
- s.Log("deleting release %q", makeKey(name, version))
+ slog.Debug("deleting release", "key", makeKey(name, version))
return s.Driver.Delete(makeKey(name, version))
}
// ListReleases returns all releases from storage. An error is returned if the
// storage backend fails to retrieve the releases.
func (s *Storage) ListReleases() ([]*rspb.Release, error) {
- s.Log("listing all releases in storage")
- return s.Driver.List(func(_ *rspb.Release) bool { return true })
+ slog.Debug("listing all releases in storage")
+ return s.List(func(_ *rspb.Release) bool { return true })
}
// ListUninstalled returns all releases with Status == UNINSTALLED. An error is returned
// if the storage backend fails to retrieve the releases.
func (s *Storage) ListUninstalled() ([]*rspb.Release, error) {
- s.Log("listing uninstalled releases in storage")
- return s.Driver.List(func(rls *rspb.Release) bool {
+ slog.Debug("listing uninstalled releases in storage")
+ return s.List(func(rls *rspb.Release) bool {
return relutil.StatusFilter(rspb.StatusUninstalled).Check(rls)
})
}
@@ -104,8 +102,8 @@ func (s *Storage) ListUninstalled() ([]*rspb.Release, error) {
// ListDeployed returns all releases with Status == DEPLOYED. An error is returned
// if the storage backend fails to retrieve the releases.
func (s *Storage) ListDeployed() ([]*rspb.Release, error) {
- s.Log("listing all deployed releases in storage")
- return s.Driver.List(func(rls *rspb.Release) bool {
+ slog.Debug("listing all deployed releases in storage")
+ return s.List(func(rls *rspb.Release) bool {
return relutil.StatusFilter(rspb.StatusDeployed).Check(rls)
})
}
@@ -132,9 +130,9 @@ func (s *Storage) Deployed(name string) (*rspb.Release, error) {
// DeployedAll returns all deployed releases with the provided name, or
// returns driver.NewErrNoDeployedReleases if not found.
func (s *Storage) DeployedAll(name string) ([]*rspb.Release, error) {
- s.Log("getting deployed releases from %q history", name)
+ slog.Debug("getting deployed releases", "name", name)
- ls, err := s.Driver.Query(map[string]string{
+ ls, err := s.Query(map[string]string{
"name": name,
"owner": "helm",
"status": "deployed",
@@ -151,9 +149,9 @@ func (s *Storage) DeployedAll(name string) ([]*rspb.Release, error) {
// History returns the revision history for the release with the provided name, or
// returns driver.ErrReleaseNotFound if no such release name exists.
func (s *Storage) History(name string) ([]*rspb.Release, error) {
- s.Log("getting release history for %q", name)
+ slog.Debug("getting release history", "name", name)
- return s.Driver.Query(map[string]string{"name": name, "owner": "helm"})
+ return s.Query(map[string]string{"name": name, "owner": "helm"})
}
// removeLeastRecent removes items from history until the length number of releases
@@ -206,14 +204,14 @@ func (s *Storage) removeLeastRecent(name string, maximum int) error {
}
}
- s.Log("Pruned %d record(s) from %s with %d error(s)", len(toDelete), name, len(errs))
+ slog.Debug("pruned records", "count", len(toDelete), "release", name, "errors", len(errs))
switch c := len(errs); c {
case 0:
return nil
case 1:
return errs[0]
default:
- return errors.Errorf("encountered %d deletion errors. First is: %s", c, errs[0])
+ return fmt.Errorf("encountered %d deletion errors. First is: %w", c, errs[0])
}
}
@@ -221,7 +219,7 @@ func (s *Storage) deleteReleaseVersion(name string, version int) error {
key := makeKey(name, version)
_, err := s.Delete(name, version)
if err != nil {
- s.Log("error pruning %s from release history: %s", key, err)
+ slog.Debug("error pruning release", "key", key, slog.Any("error", err))
return err
}
return nil
@@ -229,13 +227,13 @@ func (s *Storage) deleteReleaseVersion(name string, version int) error {
// Last fetches the last revision of the named release.
func (s *Storage) Last(name string) (*rspb.Release, error) {
- s.Log("getting last revision of %q", name)
+ slog.Debug("getting last revision", "name", name)
h, err := s.History(name)
if err != nil {
return nil, err
}
if len(h) == 0 {
- return nil, errors.Errorf("no revision for release %q", name)
+ return nil, fmt.Errorf("no revision for release %q", name)
}
relutil.Reverse(h, relutil.SortByRevision)
@@ -261,6 +259,5 @@ func Init(d driver.Driver) *Storage {
}
return &Storage{
Driver: d,
- Log: func(_ string, _ ...interface{}) {},
}
}
diff --git a/pkg/storage/storage_test.go b/pkg/storage/storage_test.go
index 056b7f5f5..d3025eca3 100644
--- a/pkg/storage/storage_test.go
+++ b/pkg/storage/storage_test.go
@@ -17,12 +17,11 @@ limitations under the License.
package storage // import "helm.sh/helm/v4/pkg/storage"
import (
+ "errors"
"fmt"
"reflect"
"testing"
- "github.com/pkg/errors"
-
rspb "helm.sh/helm/v4/pkg/release/v1"
"helm.sh/helm/v4/pkg/storage/driver"
)
@@ -312,7 +311,6 @@ func (d *MaxHistoryMockDriver) Name() string {
func TestMaxHistoryErrorHandling(t *testing.T) {
//func TestStorageRemoveLeastRecentWithError(t *testing.T) {
storage := Init(NewMaxHistoryMockDriver(driver.NewMemory()))
- storage.Log = t.Logf
storage.MaxHistory = 1
@@ -338,7 +336,6 @@ func TestMaxHistoryErrorHandling(t *testing.T) {
func TestStorageRemoveLeastRecent(t *testing.T) {
storage := Init(driver.NewMemory())
- storage.Log = t.Logf
// Make sure that specifying this at the outset doesn't cause any bugs.
storage.MaxHistory = 10
@@ -395,7 +392,6 @@ func TestStorageRemoveLeastRecent(t *testing.T) {
func TestStorageDoNotDeleteDeployed(t *testing.T) {
storage := Init(driver.NewMemory())
- storage.Log = t.Logf
storage.MaxHistory = 3
const name = "angry-bird"
@@ -476,7 +472,7 @@ func TestStorageLast(t *testing.T) {
}
}
-// TestUpgradeInitiallyFailedRelease tests a case when there are no deployed release yet, but history limit has been
+// TestUpgradeInitiallyFailedReleaseWithHistoryLimit tests a case when there are no deployed release yet, but history limit has been
// reached: the has-no-deployed-releases error should not occur in such case.
func TestUpgradeInitiallyFailedReleaseWithHistoryLimit(t *testing.T) {
storage := Init(driver.NewMemory())
diff --git a/pkg/strvals/literal_parser.go b/pkg/strvals/literal_parser.go
index f75655811..d34e5e854 100644
--- a/pkg/strvals/literal_parser.go
+++ b/pkg/strvals/literal_parser.go
@@ -20,8 +20,6 @@ import (
"fmt"
"io"
"strconv"
-
- "github.com/pkg/errors"
)
// ParseLiteral parses a set line interpreting the value as a literal string.
@@ -102,7 +100,7 @@ func (t *literalParser) key(data map[string]interface{}, nestedNameLevel int) (r
if len(key) == 0 {
return err
}
- return errors.Errorf("key %q has no value", string(key))
+ return fmt.Errorf("key %q has no value", string(key))
case lastRune == '=':
// found end of key: swallow the '=' and get the value
@@ -129,7 +127,7 @@ func (t *literalParser) key(data map[string]interface{}, nestedNameLevel int) (r
// recurse on sub-tree with remaining data
err := t.key(inner, nestedNameLevel)
if err == nil && len(inner) == 0 {
- return errors.Errorf("key map %q has no value", string(key))
+ return fmt.Errorf("key map %q has no value", string(key))
}
if len(inner) != 0 {
set(data, string(key), inner)
@@ -140,7 +138,7 @@ func (t *literalParser) key(data map[string]interface{}, nestedNameLevel int) (r
// We are in a list index context, so we need to set an index.
i, err := t.keyIndex()
if err != nil {
- return errors.Wrap(err, "error parsing index")
+ return fmt.Errorf("error parsing index: %w", err)
}
kk := string(key)
@@ -178,7 +176,7 @@ func (t *literalParser) listItem(list []interface{}, i, nestedNameLevel int) ([]
switch key, lastRune, err := runesUntilLiteral(t.sc, stop); {
case len(key) > 0:
- return list, errors.Errorf("unexpected data at end of array index: %q", key)
+ return list, fmt.Errorf("unexpected data at end of array index: %q", key)
case err != nil:
return list, err
@@ -214,7 +212,7 @@ func (t *literalParser) listItem(list []interface{}, i, nestedNameLevel int) ([]
// now we have a nested list. Read the index and handle.
nextI, err := t.keyIndex()
if err != nil {
- return list, errors.Wrap(err, "error parsing index")
+ return list, fmt.Errorf("error parsing index: %w", err)
}
var crtList []interface{}
if len(list) > i {
@@ -233,7 +231,7 @@ func (t *literalParser) listItem(list []interface{}, i, nestedNameLevel int) ([]
return setIndex(list, i, list2)
default:
- return nil, errors.Errorf("parse error: unexpected token %v", lastRune)
+ return nil, fmt.Errorf("parse error: unexpected token %v", lastRune)
}
}
diff --git a/pkg/strvals/parser.go b/pkg/strvals/parser.go
index a0e8d66d1..c65e98c84 100644
--- a/pkg/strvals/parser.go
+++ b/pkg/strvals/parser.go
@@ -18,13 +18,13 @@ package strvals
import (
"bytes"
"encoding/json"
+ "errors"
"fmt"
"io"
"strconv"
"strings"
"unicode"
- "github.com/pkg/errors"
"sigs.k8s.io/yaml"
)
@@ -189,14 +189,14 @@ func (t *parser) key(data map[string]interface{}, nestedNameLevel int) (reterr e
if len(k) == 0 {
return err
}
- return errors.Errorf("key %q has no value", string(k))
+ return fmt.Errorf("key %q has no value", string(k))
//set(data, string(k), "")
//return err
case last == '[':
// We are in a list index context, so we need to set an index.
i, err := t.keyIndex()
if err != nil {
- return errors.Wrap(err, "error parsing index")
+ return fmt.Errorf("error parsing index: %w", err)
}
kk := string(k)
// Find or create target list
@@ -261,7 +261,7 @@ func (t *parser) key(data map[string]interface{}, nestedNameLevel int) (reterr e
case last == ',':
// No value given. Set the value to empty string. Return error.
set(data, string(k), "")
- return errors.Errorf("key %q has no value (cannot end with ,)", string(k))
+ return fmt.Errorf("key %q has no value (cannot end with ,)", string(k))
case last == '.':
// Check value name is within the maximum nested name level
nestedNameLevel++
@@ -278,7 +278,7 @@ func (t *parser) key(data map[string]interface{}, nestedNameLevel int) (reterr e
// Recurse
e := t.key(inner, nestedNameLevel)
if e == nil && len(inner) == 0 {
- return errors.Errorf("key map %q has no value", string(k))
+ return fmt.Errorf("key map %q has no value", string(k))
}
if len(inner) != 0 {
set(data, string(k), inner)
@@ -332,6 +332,7 @@ func (t *parser) keyIndex() (int, error) {
return strconv.Atoi(string(v))
}
+
func (t *parser) listItem(list []interface{}, i, nestedNameLevel int) ([]interface{}, error) {
if i < 0 {
return list, fmt.Errorf("negative %d index not allowed", i)
@@ -339,7 +340,7 @@ func (t *parser) listItem(list []interface{}, i, nestedNameLevel int) ([]interfa
stop := runeSet([]rune{'[', '.', '='})
switch k, last, err := runesUntil(t.sc, stop); {
case len(k) > 0:
- return list, errors.Errorf("unexpected data at end of array index: %q", k)
+ return list, fmt.Errorf("unexpected data at end of array index: %q", k)
case err != nil:
return list, err
case last == '=':
@@ -394,7 +395,7 @@ func (t *parser) listItem(list []interface{}, i, nestedNameLevel int) ([]interfa
// now we have a nested list. Read the index and handle.
nextI, err := t.keyIndex()
if err != nil {
- return list, errors.Wrap(err, "error parsing index")
+ return list, fmt.Errorf("error parsing index: %w", err)
}
var crtList []interface{}
if len(list) > i {
@@ -430,7 +431,7 @@ func (t *parser) listItem(list []interface{}, i, nestedNameLevel int) ([]interfa
}
return setIndex(list, i, inner)
default:
- return nil, errors.Errorf("parse error: unexpected token %v", last)
+ return nil, fmt.Errorf("parse error: unexpected token %v", last)
}
}
diff --git a/pkg/time/time.go b/pkg/time/time.go
index 13b1211e6..16973b455 100644
--- a/pkg/time/time.go
+++ b/pkg/time/time.go
@@ -41,7 +41,7 @@ func Now() Time {
}
func (t Time) MarshalJSON() ([]byte, error) {
- if t.Time.IsZero() {
+ if t.IsZero() {
return []byte(emptyString), nil
}
@@ -65,6 +65,7 @@ func Parse(layout, value string) (Time, error) {
t, err := time.Parse(layout, value)
return Time{Time: t}, err
}
+
func ParseInLocation(layout, value string, loc *time.Location) (Time, error) {
t, err := time.ParseInLocation(layout, value, loc)
return Time{Time: t}, err
diff --git a/pkg/time/time_test.go b/pkg/time/time_test.go
index 20f0f8e29..342ca4a10 100644
--- a/pkg/time/time_test.go
+++ b/pkg/time/time_test.go
@@ -20,64 +20,134 @@ import (
"encoding/json"
"testing"
"time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
var (
- testingTime, _ = Parse(time.RFC3339, "1977-09-02T22:04:05Z")
- testingTimeString = `"1977-09-02T22:04:05Z"`
+ timeParseString = `"1977-09-02T22:04:05Z"`
+ timeString = "1977-09-02 22:04:05 +0000 UTC"
)
-func TestNonZeroValueMarshal(t *testing.T) {
+func givenTime(t *testing.T) Time {
+ t.Helper()
+ result, err := Parse(time.RFC3339, "1977-09-02T22:04:05Z")
+ require.NoError(t, err)
+ return result
+}
+
+func TestDate(t *testing.T) {
+ testingTime := givenTime(t)
+ got := Date(1977, 9, 2, 22, 04, 05, 0, time.UTC)
+ assert.Equal(t, timeString, got.String())
+ assert.True(t, testingTime.Equal(got))
+ assert.True(t, got.Equal(testingTime))
+}
+
+func TestNow(t *testing.T) {
+ testingTime := givenTime(t)
+ got := Now()
+ assert.True(t, testingTime.Before(got))
+ assert.True(t, got.After(testingTime))
+}
+
+func TestTime_Add(t *testing.T) {
+ testingTime := givenTime(t)
+ got := testingTime.Add(time.Hour)
+ assert.Equal(t, timeString, testingTime.String())
+ assert.Equal(t, "1977-09-02 23:04:05 +0000 UTC", got.String())
+}
+
+func TestTime_AddDate(t *testing.T) {
+ testingTime := givenTime(t)
+ got := testingTime.AddDate(1, 1, 1)
+ assert.Equal(t, "1978-10-03 22:04:05 +0000 UTC", got.String())
+}
+
+func TestTime_In(t *testing.T) {
+ testingTime := givenTime(t)
+ edt, err := time.LoadLocation("America/New_York")
+ assert.NoError(t, err)
+ got := testingTime.In(edt)
+ assert.Equal(t, "America/New_York", got.Location().String())
+}
+
+func TestTime_MarshalJSONNonZero(t *testing.T) {
+ testingTime := givenTime(t)
res, err := json.Marshal(testingTime)
- if err != nil {
- t.Fatal(err)
- }
- if testingTimeString != string(res) {
- t.Errorf("expected a marshaled value of %s, got %s", testingTimeString, res)
- }
+ assert.NoError(t, err)
+ assert.Equal(t, timeParseString, string(res))
}
-func TestZeroValueMarshal(t *testing.T) {
+func TestTime_MarshalJSONZeroValue(t *testing.T) {
res, err := json.Marshal(Time{})
- if err != nil {
- t.Fatal(err)
- }
- if string(res) != emptyString {
- t.Errorf("expected zero value to marshal to empty string, got %s", res)
- }
+ assert.NoError(t, err)
+ assert.Equal(t, `""`, string(res))
}
-func TestNonZeroValueUnmarshal(t *testing.T) {
+func TestTime_Round(t *testing.T) {
+ testingTime := givenTime(t)
+ got := testingTime.Round(time.Hour)
+ assert.Equal(t, timeString, testingTime.String())
+ assert.Equal(t, "1977-09-02 22:00:00 +0000 UTC", got.String())
+}
+
+func TestTime_Sub(t *testing.T) {
+ testingTime := givenTime(t)
+ before, err := Parse(time.RFC3339, "1977-09-01T22:04:05Z")
+ require.NoError(t, err)
+ got := testingTime.Sub(before)
+ assert.Equal(t, "24h0m0s", got.String())
+}
+
+func TestTime_Truncate(t *testing.T) {
+ testingTime := givenTime(t)
+ got := testingTime.Truncate(time.Hour)
+ assert.Equal(t, timeString, testingTime.String())
+ assert.Equal(t, "1977-09-02 22:00:00 +0000 UTC", got.String())
+}
+
+func TestTime_UTC(t *testing.T) {
+ edtTime, err := Parse(time.RFC3339, "1977-09-03T05:04:05+07:00")
+ require.NoError(t, err)
+ got := edtTime.UTC()
+ assert.Equal(t, timeString, got.String())
+}
+
+func TestTime_UnmarshalJSONNonZeroValue(t *testing.T) {
+ testingTime := givenTime(t)
var myTime Time
- err := json.Unmarshal([]byte(testingTimeString), &myTime)
- if err != nil {
- t.Fatal(err)
- }
- if !myTime.Equal(testingTime) {
- t.Errorf("expected time to be equal to %v, got %v", testingTime, myTime)
- }
+ err := json.Unmarshal([]byte(timeParseString), &myTime)
+ assert.NoError(t, err)
+ assert.True(t, testingTime.Equal(myTime))
}
-func TestEmptyStringUnmarshal(t *testing.T) {
+func TestTime_UnmarshalJSONEmptyString(t *testing.T) {
var myTime Time
err := json.Unmarshal([]byte(emptyString), &myTime)
- if err != nil {
- t.Fatal(err)
- }
- if !myTime.IsZero() {
- t.Errorf("expected time to be equal to zero value, got %v", myTime)
- }
+ assert.NoError(t, err)
+ assert.True(t, myTime.IsZero())
+}
+
+func TestTime_UnmarshalJSONNullString(t *testing.T) {
+ var myTime Time
+ err := json.Unmarshal([]byte("null"), &myTime)
+ assert.NoError(t, err)
+ assert.True(t, myTime.IsZero())
}
-func TestZeroValueUnmarshal(t *testing.T) {
+func TestTime_UnmarshalJSONZeroValue(t *testing.T) {
// This test ensures that we can unmarshal any time value that was output
// with the current go default value of "0001-01-01T00:00:00Z"
var myTime Time
err := json.Unmarshal([]byte(`"0001-01-01T00:00:00Z"`), &myTime)
- if err != nil {
- t.Fatal(err)
- }
- if !myTime.IsZero() {
- t.Errorf("expected time to be equal to zero value, got %v", myTime)
- }
+ assert.NoError(t, err)
+ assert.True(t, myTime.IsZero())
+}
+
+func TestUnix(t *testing.T) {
+ got := Unix(242085845, 0)
+ assert.Equal(t, int64(242085845), got.Unix())
+ assert.Equal(t, timeString, got.UTC().String())
}
diff --git a/pkg/uploader/chart_uploader.go b/pkg/uploader/chart_uploader.go
index 41dfd4455..b3d612e38 100644
--- a/pkg/uploader/chart_uploader.go
+++ b/pkg/uploader/chart_uploader.go
@@ -20,8 +20,6 @@ import (
"io"
"net/url"
- "github.com/pkg/errors"
-
"helm.sh/helm/v4/pkg/pusher"
"helm.sh/helm/v4/pkg/registry"
)
@@ -42,7 +40,7 @@ type ChartUploader struct {
func (c *ChartUploader) UploadTo(ref, remote string) error {
u, err := url.Parse(remote)
if err != nil {
- return errors.Errorf("invalid chart URL format: %s", remote)
+ return fmt.Errorf("invalid chart URL format: %s", remote)
}
if u.Scheme == "" {
diff --git a/scripts/coverage.sh b/scripts/coverage.sh
index 2164d94da..487d4eeee 100755
--- a/scripts/coverage.sh
+++ b/scripts/coverage.sh
@@ -19,9 +19,10 @@ set -euo pipefail
covermode=${COVERMODE:-atomic}
coverdir=$(mktemp -d /tmp/coverage.XXXXXXXXXX)
profile="${coverdir}/cover.out"
+target="${1:-./...}" # by default the whole repository is tested
generate_cover_data() {
- for d in $(go list ./...) ; do
+ for d in $(go list "$target"); do
(
local output="${coverdir}/${d//\//-}.cover"
go test -coverprofile="${output}" -covermode="$covermode" "$d"
diff --git a/testdata/localhost-crt.pem b/testdata/localhost-crt.pem
new file mode 100644
index 000000000..70fa0a429
--- /dev/null
+++ b/testdata/localhost-crt.pem
@@ -0,0 +1,73 @@
+Certificate:
+ Data:
+ Version: 3 (0x2)
+ Serial Number:
+ 7f:5e:fa:21:fa:ee:e4:6a:be:9b:c2:80:bf:ed:42:f3:2d:47:f5:d2
+ Signature Algorithm: sha256WithRSAEncryption
+ Issuer: C=US, ST=CO, L=Boulder, O=Helm, CN=helm.sh
+ Validity
+ Not Before: Nov 6 21:59:18 2023 GMT
+ Not After : Nov 3 21:59:18 2033 GMT
+ Subject: C=CA, ST=ON, L=Kitchener, O=Helm, CN=localhost
+ Subject Public Key Info:
+ Public Key Algorithm: rsaEncryption
+ RSA Public-Key: (2048 bit)
+ Modulus:
+ 00:c8:89:55:0d:0b:f1:da:e6:c0:70:7d:d3:27:cd:
+ b8:a8:81:8b:7c:a4:89:e5:d1:b1:78:01:1d:df:44:
+ 88:0b:fc:d6:81:35:3d:d1:3b:5e:8f:bb:93:b3:7e:
+ 28:db:ed:ff:a0:13:3a:70:a3:fe:94:6b:0b:fe:fb:
+ 63:00:b0:cb:dc:81:cd:80:dc:d0:2f:bf:b2:4f:9a:
+ 81:d4:22:dc:97:c8:8f:27:86:59:91:fa:92:05:75:
+ c4:cc:6b:f5:a9:6b:74:1e:f5:db:a9:f8:bf:8c:a2:
+ 25:fd:a0:cc:79:f4:25:57:74:a9:23:9b:e2:b7:22:
+ 7a:14:7a:3d:ea:f1:7e:32:6b:57:6c:2e:c6:4f:75:
+ 54:f9:6b:54:d2:ca:eb:54:1c:af:39:15:9b:d0:7c:
+ 0f:f8:55:51:04:ea:da:fa:7b:8b:63:0f:ac:39:b1:
+ f6:4b:8e:4e:f6:ea:e9:7b:e6:ba:5e:5a:8e:91:ef:
+ dc:b1:7d:52:3f:73:83:52:46:83:48:49:ff:f2:2d:
+ ca:54:f2:36:bb:49:cc:59:99:c0:9e:cf:8e:78:55:
+ 6c:ed:7d:7e:83:b8:59:2c:7d:f8:1a:81:f0:7d:f5:
+ 27:f2:db:ae:d4:31:54:38:fe:47:b2:ee:16:20:0f:
+ f1:db:2d:28:bf:6f:38:eb:11:bb:9a:d4:b2:5a:3a:
+ 4a:7f
+ Exponent: 65537 (0x10001)
+ X509v3 extensions:
+ X509v3 Subject Alternative Name:
+ DNS:localhost
+ Signature Algorithm: sha256WithRSAEncryption
+ 47:47:fe:29:ca:94:28:75:59:ba:ab:67:ab:c6:a6:0b:0a:f2:
+ 0f:26:d9:1d:35:db:68:a5:d8:f5:1f:d1:87:e7:a7:74:fd:c0:
+ 22:aa:c8:ec:6c:d3:ac:8a:0b:ed:59:3a:a0:12:77:7c:53:74:
+ fd:30:59:34:8f:a4:ef:5b:98:3f:ff:cf:89:87:ed:d3:7f:41:
+ 2f:b1:9a:12:71:bb:fe:3a:cf:77:16:32:bc:83:90:cc:52:2f:
+ 3b:f4:ae:db:b1:bb:f0:dd:30:d4:03:17:5e:47:b7:06:86:7a:
+ 16:b1:72:2f:80:5d:d4:c0:f9:6c:91:df:5a:c5:15:86:66:68:
+ c8:90:8e:f1:a2:bb:40:0f:ef:26:1b:02:c4:42:de:8c:69:ec:
+ ad:27:d0:bc:da:7c:76:33:86:de:b7:c4:04:64:e6:f6:dc:44:
+ 89:7b:b8:2f:c7:28:7a:4c:a6:01:ad:a5:17:64:3a:23:da:aa:
+ db:ce:3f:86:e9:92:dc:0d:c4:5a:b4:52:a8:8a:ee:3d:62:7d:
+ b1:c8:fa:ef:96:2b:ab:f1:e1:6d:6f:7d:1e:ce:bc:7a:d0:92:
+ 02:1b:c8:55:36:77:bf:d4:42:d3:fc:57:ca:b7:cc:95:be:ce:
+ f8:6e:b2:28:ca:4d:9a:00:7d:78:c8:56:04:2e:b3:ac:03:fa:
+ 05:d8:42:bd
+-----BEGIN CERTIFICATE-----
+MIIDRDCCAiygAwIBAgIUf176Ifru5Gq+m8KAv+1C8y1H9dIwDQYJKoZIhvcNAQEL
+BQAwTTELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNPMRAwDgYDVQQHDAdCb3VsZGVy
+MQ0wCwYDVQQKDARIZWxtMRAwDgYDVQQDDAdoZWxtLnNoMB4XDTIzMTEwNjIxNTkx
+OFoXDTMzMTEwMzIxNTkxOFowUTELMAkGA1UEBhMCQ0ExCzAJBgNVBAgMAk9OMRIw
+EAYDVQQHDAlLaXRjaGVuZXIxDTALBgNVBAoMBEhlbG0xEjAQBgNVBAMMCWxvY2Fs
+aG9zdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMiJVQ0L8drmwHB9
+0yfNuKiBi3ykieXRsXgBHd9EiAv81oE1PdE7Xo+7k7N+KNvt/6ATOnCj/pRrC/77
+YwCwy9yBzYDc0C+/sk+agdQi3JfIjyeGWZH6kgV1xMxr9alrdB7126n4v4yiJf2g
+zHn0JVd0qSOb4rciehR6PerxfjJrV2wuxk91VPlrVNLK61QcrzkVm9B8D/hVUQTq
+2vp7i2MPrDmx9kuOTvbq6Xvmul5ajpHv3LF9Uj9zg1JGg0hJ//ItylTyNrtJzFmZ
+wJ7PjnhVbO19foO4WSx9+BqB8H31J/LbrtQxVDj+R7LuFiAP8dstKL9vOOsRu5rU
+slo6Sn8CAwEAAaMYMBYwFAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEB
+CwUAA4IBAQBHR/4pypQodVm6q2erxqYLCvIPJtkdNdtopdj1H9GH56d0/cAiqsjs
+bNOsigvtWTqgEnd8U3T9MFk0j6TvW5g//8+Jh+3Tf0EvsZoScbv+Os93FjK8g5DM
+Ui879K7bsbvw3TDUAxdeR7cGhnoWsXIvgF3UwPlskd9axRWGZmjIkI7xortAD+8m
+GwLEQt6MaeytJ9C82nx2M4bet8QEZOb23ESJe7gvxyh6TKYBraUXZDoj2qrbzj+G
+6ZLcDcRatFKoiu49Yn2xyPrvliur8eFtb30ezrx60JICG8hVNne/1ELT/FfKt8yV
+vs74brIoyk2aAH14yFYELrOsA/oF2EK9
+-----END CERTIFICATE-----
diff --git a/testdata/openssl.conf b/testdata/openssl.conf
index 9b27e445b..be5ff04b7 100644
--- a/testdata/openssl.conf
+++ b/testdata/openssl.conf
@@ -40,3 +40,7 @@ subjectAltName = @alternate_names
[alternate_names]
DNS.1 = helm.sh
IP.1 = 127.0.0.1
+
+# # Used to generate localhost-crt.pem
+# [alternate_names]
+# DNS.1 = localhost