From f25e0d607362857144bd9bfd263213262e5f1f93 Mon Sep 17 00:00:00 2001 From: Daniel Pap Date: Sun, 10 Dec 2023 05:55:25 +0100 Subject: [PATCH 01/88] show crds command output separated by document separator Signed-off-by: Daniel Pap --- pkg/action/show.go | 6 +++--- pkg/action/show_test.go | 8 ++++++++ 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/pkg/action/show.go b/pkg/action/show.go index 6ed855b83..46ba81ff6 100644 --- a/pkg/action/show.go +++ b/pkg/action/show.go @@ -139,10 +139,10 @@ func (s *Show) Run(chartpath string) (string, error) { if s.OutputFormat == ShowCRDs || s.OutputFormat == ShowAll { crds := s.chart.CRDObjects() if len(crds) > 0 { - if s.OutputFormat == ShowAll && !bytes.HasPrefix(crds[0].File.Data, []byte("---")) { - fmt.Fprintln(&out, "---") - } for _, crd := range crds { + if !bytes.HasPrefix(crd.File.Data, []byte("---")) { + fmt.Fprintln(&out, "---") + } fmt.Fprintf(&out, "%s\n", string(crd.File.Data)) } } diff --git a/pkg/action/show_test.go b/pkg/action/show_test.go index 8b617ea85..ab6f464ec 100644 --- a/pkg/action/show_test.go +++ b/pkg/action/show_test.go @@ -32,6 +32,7 @@ func TestShow(t *testing.T) { {Name: "crds/ignoreme.txt", Data: []byte("error")}, {Name: "crds/foo.yaml", Data: []byte("---\nfoo\n")}, {Name: "crds/bar.json", Data: []byte("---\nbar\n")}, + {Name: "crds/baz.yaml", Data: []byte("baz\n")}, }, Raw: []*chart.File{ {Name: "values.yaml", Data: []byte("VALUES\n")}, @@ -58,6 +59,9 @@ foo --- bar +--- +baz + ` if output != expect { t.Errorf("Expected\n%q\nGot\n%q\n", expect, output) @@ -102,6 +106,7 @@ func TestShowCRDs(t *testing.T) { {Name: "crds/ignoreme.txt", Data: []byte("error")}, {Name: "crds/foo.yaml", Data: []byte("---\nfoo\n")}, {Name: "crds/bar.json", Data: []byte("---\nbar\n")}, + {Name: "crds/baz.yaml", Data: []byte("baz\n")}, }, } @@ -116,6 +121,9 @@ foo --- bar +--- +baz + ` if output != expect { t.Errorf("Expected\n%q\nGot\n%q\n", expect, output) From be88c963c4281984b2b2bd97d88b119f123d6a71 Mon Sep 17 00:00:00 2001 From: "Leo R. Lundgren" Date: Fri, 14 Jun 2024 23:19:39 +0200 Subject: [PATCH 02/88] style(pkg/chartutil): add missing dots and indentation to defaultValues Pure cosmetics, add missing dots to a few comments and make indentation coherent between different parts of the defaultValues YAML. Signed-off-by: Leo R. Lundgren --- pkg/chartutil/create.go | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/pkg/chartutil/create.go b/pkg/chartutil/create.go index 50212f9d5..f9cdbc463 100644 --- a/pkg/chartutil/create.go +++ b/pkg/chartutil/create.go @@ -119,14 +119,14 @@ nameOverride: "" fullnameOverride: "" serviceAccount: - # Specifies whether a service account should be created + # Specifies whether a service account should be created. create: true # Automatically mount a ServiceAccount's API credentials? automount: true - # Annotations to add to the service account + # Annotations to add to the service account. annotations: {} # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template + # If not set and create is true, a name is generated using the fullname template. name: "" podAnnotations: {} @@ -159,9 +159,9 @@ ingress: - path: / pathType: ImplementationSpecific tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local + # - secretName: chart-example-tls + # hosts: + # - chart-example.local resources: {} # We usually recommend not to specify default resources and to leave this as a conscious @@ -193,16 +193,16 @@ autoscaling: # Additional volumes on the output Deployment definition. volumes: [] -# - name: foo -# secret: -# secretName: mysecret -# optional: false + # - name: foo + # secret: + # secretName: mysecret + # optional: false # Additional volumeMounts on the output Deployment definition. volumeMounts: [] -# - name: foo -# mountPath: "/etc/foo" -# readOnly: true + # - name: foo + # mountPath: "/etc/foo" + # readOnly: true nodeSelector: {} From 5635ce585e53a10ae4e4daff96315b0153881707 Mon Sep 17 00:00:00 2001 From: satoru Date: Sun, 8 Jan 2023 10:37:17 +0800 Subject: [PATCH 03/88] Refactor, use sort.Slice to reduce boilerplate code Signed-off-by: satoru --- pkg/releaseutil/sorter.go | 43 ++++++++++----------------------------- 1 file changed, 11 insertions(+), 32 deletions(-) diff --git a/pkg/releaseutil/sorter.go b/pkg/releaseutil/sorter.go index 1a8aa78a6..1ea5ef30d 100644 --- a/pkg/releaseutil/sorter.go +++ b/pkg/releaseutil/sorter.go @@ -22,35 +22,6 @@ import ( rspb "helm.sh/helm/v3/pkg/release" ) -type list []*rspb.Release - -func (s list) Len() int { return len(s) } -func (s list) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// ByName sorts releases by name -type ByName struct{ list } - -// Less compares to releases -func (s ByName) Less(i, j int) bool { return s.list[i].Name < s.list[j].Name } - -// ByDate sorts releases by date -type ByDate struct{ list } - -// Less compares to releases -func (s ByDate) Less(i, j int) bool { - ti := s.list[i].Info.LastDeployed.Unix() - tj := s.list[j].Info.LastDeployed.Unix() - return ti < tj -} - -// ByRevision sorts releases by revision number -type ByRevision struct{ list } - -// Less compares to releases -func (s ByRevision) Less(i, j int) bool { - return s.list[i].Version < s.list[j].Version -} - // Reverse reverses the list of releases sorted by the sort func. func Reverse(list []*rspb.Release, sortFn func([]*rspb.Release)) { sortFn(list) @@ -62,17 +33,25 @@ func Reverse(list []*rspb.Release, sortFn func([]*rspb.Release)) { // SortByName returns the list of releases sorted // in lexicographical order. func SortByName(list []*rspb.Release) { - sort.Sort(ByName{list}) + sort.Slice(list, func(i, j int) bool { + return list[i].Name < list[j].Name + }) } // SortByDate returns the list of releases sorted by a // release's last deployed time (in seconds). func SortByDate(list []*rspb.Release) { - sort.Sort(ByDate{list}) + sort.Slice(list, func(i, j int) bool { + ti := list[i].Info.LastDeployed.Unix() + tj := list[j].Info.LastDeployed.Unix() + return ti < tj + }) } // SortByRevision returns the list of releases sorted by a // release's revision number (release.Version). func SortByRevision(list []*rspb.Release) { - sort.Sort(ByRevision{list}) + sort.Slice(list, func(i, j int) bool { + return list[i].Version < list[j].Version + }) } From f55c462a79720b0ba56d94d9e28529f8c5b8c7d9 Mon Sep 17 00:00:00 2001 From: Terry Howe Date: Thu, 19 Jun 2025 19:42:45 -0600 Subject: [PATCH 04/88] fix: force bearer oauth for everything Signed-off-by: Terry Howe --- pkg/registry/client.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/registry/client.go b/pkg/registry/client.go index 3ea68f181..339939c6f 100644 --- a/pkg/registry/client.go +++ b/pkg/registry/client.go @@ -134,6 +134,7 @@ func NewClient(options ...ClientOption) (*Client, error) { authorizer.Cache = auth.NewCache() } + authorizer.ForceAttemptOAuth2 = true client.authorizer = &authorizer } From 5f9cbe6f4afa78be51d9af8a3870d1523c0b4245 Mon Sep 17 00:00:00 2001 From: George Jenkins Date: Wed, 21 May 2025 11:06:42 -0700 Subject: [PATCH 05/88] fix: Port pluginCommand & command warning Signed-off-by: George Jenkins --- pkg/plugin/plugin.go | 12 ++++++------ pkg/plugin/plugin_test.go | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/pkg/plugin/plugin.go b/pkg/plugin/plugin.go index 9d79ab4fc..67676b103 100644 --- a/pkg/plugin/plugin.go +++ b/pkg/plugin/plugin.go @@ -83,7 +83,7 @@ type Metadata struct { PlatformCommand []PlatformCommand `json:"platformCommand"` // Command is the plugin command, as a single string. - // Providing a command will result in an error if PlatformCommand is also set. + // Providing a command will result in an deprecation warning if PlatformCommand is also set. // // The command will be passed through environment expansion, so env vars can // be present in this command. Unless IgnoreFlags is set, this will @@ -92,7 +92,7 @@ type Metadata struct { // Note that command is not executed in a shell. To do so, we suggest // pointing the command to a shell script. // - // DEPRECATED: Use PlatformCommand instead. Remove in Helm 4. + // DEPRECATED: Use PlatformCommand instead Command string `json:"command"` // IgnoreFlags ignores any flags passed in from Helm @@ -119,14 +119,14 @@ type Metadata struct { PlatformHooks PlatformHooks `json:"platformHooks"` // Hooks are commands that will run on plugin events, as a single string. - // Providing a hooks will result in an error if PlatformHooks is also set. + // Providing a command will result in an deprecation warning if PlatformHooks is also set. // // The command will be passed through environment expansion, so env vars can // be present in this command. // // Note that the command is executed in the sh shell. // - // DEPRECATED: Use PlatformHooks instead. Remove in Helm 4. + // DEPRECATED: Use PlatformHooks instead Hooks Hooks // Downloaders field is used if the plugin supply downloader mechanism @@ -265,11 +265,11 @@ func validatePluginData(plug *Plugin, filepath string) error { plug.Metadata.Usage = sanitizeString(plug.Metadata.Usage) if len(plug.Metadata.PlatformCommand) > 0 && len(plug.Metadata.Command) > 0 { - return fmt.Errorf("both platformCommand and command are set in %q", filepath) + fmt.Printf("WARNING: both 'platformCommand' and 'command' are set in %q (this will become an error in a future Helm version)\n", filepath) } if len(plug.Metadata.PlatformHooks) > 0 && len(plug.Metadata.Hooks) > 0 { - return fmt.Errorf("both platformHooks and hooks are set in %q", filepath) + fmt.Printf("WARNING: both 'platformHooks' and 'hooks' are set in %q (this will become an error in a future Helm version)\n", filepath) } // We could also validate SemVer, executable, and other fields should we so choose. diff --git a/pkg/plugin/plugin_test.go b/pkg/plugin/plugin_test.go index b96428f6b..20bd2f737 100644 --- a/pkg/plugin/plugin_test.go +++ b/pkg/plugin/plugin_test.go @@ -496,8 +496,8 @@ func TestValidatePluginData(t *testing.T) { {false, mockMissingMeta}, // Test if the metadata section missing {true, mockNoCommand}, // Test no command metadata works {true, mockLegacyCommand}, // Test legacy command metadata works - {false, mockWithCommand}, // Test platformCommand and command both set fails - {false, mockWithHooks}, // Test platformHooks and hooks both set fails + {true, mockWithCommand}, // Test platformCommand and command both set works + {true, mockWithHooks}, // Test platformHooks and hooks both set works } { err := validatePluginData(item.plug, fmt.Sprintf("test-%d", i)) if item.pass && err != nil { From 62ca98f521a616c1b600405aff00d068303c13e6 Mon Sep 17 00:00:00 2001 From: George Jenkins Date: Tue, 1 Jul 2025 08:29:29 -0700 Subject: [PATCH 06/88] fix up verbiage Signed-off-by: George Jenkins --- pkg/plugin/plugin.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/plugin/plugin.go b/pkg/plugin/plugin.go index 67676b103..a30bd06c4 100644 --- a/pkg/plugin/plugin.go +++ b/pkg/plugin/plugin.go @@ -83,7 +83,7 @@ type Metadata struct { PlatformCommand []PlatformCommand `json:"platformCommand"` // Command is the plugin command, as a single string. - // Providing a command will result in an deprecation warning if PlatformCommand is also set. + // Providing Command and PlatformCommand will result in a warning being emitted (PlatformCommand takes precedence). // // The command will be passed through environment expansion, so env vars can // be present in this command. Unless IgnoreFlags is set, this will @@ -119,7 +119,7 @@ type Metadata struct { PlatformHooks PlatformHooks `json:"platformHooks"` // Hooks are commands that will run on plugin events, as a single string. - // Providing a command will result in an deprecation warning if PlatformHooks is also set. + // Providing Hook and PlatformHooks will result in a warning being emitted (PlatformHooks takes precedence). // // The command will be passed through environment expansion, so env vars can // be present in this command. From de1bdf582035dc4079970e06ccdafd2b1e802263 Mon Sep 17 00:00:00 2001 From: George Jenkins Date: Wed, 2 Jul 2025 17:31:35 -0700 Subject: [PATCH 07/88] switch to slog Signed-off-by: George Jenkins --- pkg/plugin/plugin.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pkg/plugin/plugin.go b/pkg/plugin/plugin.go index a30bd06c4..2c197f02e 100644 --- a/pkg/plugin/plugin.go +++ b/pkg/plugin/plugin.go @@ -17,6 +17,7 @@ package plugin // import "helm.sh/helm/v4/pkg/plugin" import ( "fmt" + "log/slog" "os" "path/filepath" "regexp" @@ -265,11 +266,11 @@ func validatePluginData(plug *Plugin, filepath string) error { plug.Metadata.Usage = sanitizeString(plug.Metadata.Usage) if len(plug.Metadata.PlatformCommand) > 0 && len(plug.Metadata.Command) > 0 { - fmt.Printf("WARNING: both 'platformCommand' and 'command' are set in %q (this will become an error in a future Helm version)\n", filepath) + slog.Warn("both 'platformCommand' and 'command' are set (this will become an error in a future Helm version)", slog.String("filepath", filepath)) } if len(plug.Metadata.PlatformHooks) > 0 && len(plug.Metadata.Hooks) > 0 { - fmt.Printf("WARNING: both 'platformHooks' and 'hooks' are set in %q (this will become an error in a future Helm version)\n", filepath) + slog.Warn("both 'platformHooks' and 'hooks' are set (this will become an error in a future Helm version)", slog.String("filepath", filepath)) } // We could also validate SemVer, executable, and other fields should we so choose. From 08840f042c3555904720d947af3dbce524a31db0 Mon Sep 17 00:00:00 2001 From: George Jenkins Date: Sun, 12 Jan 2025 18:23:43 -0800 Subject: [PATCH 08/88] Rename 'atomic' -> 'rollback-on-failure' Signed-off-by: George Jenkins --- pkg/action/install.go | 41 +++++++++++++++++++------------------- pkg/action/install_test.go | 18 ++++++++--------- pkg/action/upgrade.go | 19 +++++++++--------- pkg/action/upgrade_test.go | 19 +++++++++--------- pkg/cmd/install.go | 3 ++- pkg/cmd/upgrade.go | 6 ++++-- 6 files changed, 56 insertions(+), 50 deletions(-) diff --git a/pkg/action/install.go b/pkg/action/install.go index d9da2f14f..717247afd 100644 --- a/pkg/action/install.go +++ b/pkg/action/install.go @@ -78,21 +78,22 @@ type Install struct { DryRunOption string // HideSecret can be set to true when DryRun is enabled in order to hide // Kubernetes Secrets in the output. It cannot be used outside of DryRun. - HideSecret bool - DisableHooks bool - Replace bool - WaitStrategy kube.WaitStrategy - WaitForJobs bool - Devel bool - DependencyUpdate bool - Timeout time.Duration - Namespace string - ReleaseName string - GenerateName bool - NameTemplate string - Description string - OutputDir string - Atomic bool + HideSecret bool + DisableHooks bool + Replace bool + WaitStrategy kube.WaitStrategy + WaitForJobs bool + Devel bool + DependencyUpdate bool + Timeout time.Duration + Namespace string + ReleaseName string + GenerateName bool + NameTemplate string + Description string + OutputDir string + // RollbackOnFailure enables rolling back (uninstalling) the release on failure if set + RollbackOnFailure bool SkipCRDs bool SubNotes bool HideNotes bool @@ -293,9 +294,9 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma slog.Debug("API Version list given outside of client only mode, this list will be ignored") } - // Make sure if Atomic is set, that wait is set as well. This makes it so + // Make sure if RollbackOnFailure is set, that wait is set as well. This makes it so // the user doesn't have to specify both - if i.WaitStrategy == kube.HookOnlyStrategy && i.Atomic { + if i.WaitStrategy == kube.HookOnlyStrategy && i.RollbackOnFailure { i.WaitStrategy = kube.StatusWatcherStrategy } @@ -519,8 +520,8 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource func (i *Install) failRelease(rel *release.Release, err error) (*release.Release, error) { rel.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", i.ReleaseName, err.Error())) - if i.Atomic { - slog.Debug("install failed, uninstalling release", "release", i.ReleaseName) + if i.RollbackOnFailure { + slog.Debug("install failed and rollback-on-failure is set, uninstalling release", "release", i.ReleaseName) uninstall := NewUninstall(i.cfg) uninstall.DisableHooks = i.DisableHooks uninstall.KeepHistory = false @@ -528,7 +529,7 @@ func (i *Install) failRelease(rel *release.Release, err error) (*release.Release if _, uninstallErr := uninstall.Run(i.ReleaseName); uninstallErr != nil { return rel, fmt.Errorf("an error occurred while uninstalling the release. original install error: %w: %w", err, uninstallErr) } - return rel, fmt.Errorf("release %s failed, and has been uninstalled due to atomic being set: %w", i.ReleaseName, err) + return rel, fmt.Errorf("release %s failed, and has been uninstalled due to rollback-on-failure being set: %w", i.ReleaseName, err) } i.recordRelease(rel) // Ignore the error, since we have another error to deal with. return rel, err diff --git a/pkg/action/install_test.go b/pkg/action/install_test.go index 1882f19e7..51baac7ab 100644 --- a/pkg/action/install_test.go +++ b/pkg/action/install_test.go @@ -590,16 +590,16 @@ func TestInstallRelease_WaitForJobs(t *testing.T) { is.Equal(res.Info.Status, release.StatusFailed) } -func TestInstallRelease_Atomic(t *testing.T) { +func TestInstallRelease_RollbackOnFailure(t *testing.T) { is := assert.New(t) - t.Run("atomic uninstall succeeds", func(t *testing.T) { + t.Run("rollback-on-failure uninstall succeeds", func(t *testing.T) { instAction := installAction(t) instAction.ReleaseName = "come-fail-away" failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.WaitError = fmt.Errorf("I timed out") instAction.cfg.KubeClient = failer - instAction.Atomic = true + instAction.RollbackOnFailure = true // disabling hooks to avoid an early fail when // WaitForDelete is called on the pre-delete hook execution instAction.DisableHooks = true @@ -608,7 +608,7 @@ func TestInstallRelease_Atomic(t *testing.T) { res, err := instAction.Run(buildChart(), vals) is.Error(err) is.Contains(err.Error(), "I timed out") - is.Contains(err.Error(), "atomic") + is.Contains(err.Error(), "rollback-on-failure") // Now make sure it isn't in storage anymore _, err = instAction.cfg.Releases.Get(res.Name, res.Version) @@ -616,14 +616,14 @@ func TestInstallRelease_Atomic(t *testing.T) { is.Equal(err, driver.ErrReleaseNotFound) }) - t.Run("atomic uninstall fails", func(t *testing.T) { + t.Run("rollback-on-failure uninstall fails", func(t *testing.T) { instAction := installAction(t) instAction.ReleaseName = "come-fail-away-with-me" failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.WaitError = fmt.Errorf("I timed out") failer.DeleteError = fmt.Errorf("uninstall fail") instAction.cfg.KubeClient = failer - instAction.Atomic = true + instAction.RollbackOnFailure = true vals := map[string]interface{}{} _, err := instAction.Run(buildChart(), vals) @@ -633,7 +633,7 @@ func TestInstallRelease_Atomic(t *testing.T) { is.Contains(err.Error(), "an error occurred while uninstalling the release") }) } -func TestInstallRelease_Atomic_Interrupted(t *testing.T) { +func TestInstallRelease_RollbackOnFailure_Interrupted(t *testing.T) { is := assert.New(t) instAction := installAction(t) @@ -641,7 +641,7 @@ func TestInstallRelease_Atomic_Interrupted(t *testing.T) { failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.WaitDuration = 10 * time.Second instAction.cfg.KubeClient = failer - instAction.Atomic = true + instAction.RollbackOnFailure = true vals := map[string]interface{}{} ctx, cancel := context.WithCancel(t.Context()) @@ -652,7 +652,7 @@ func TestInstallRelease_Atomic_Interrupted(t *testing.T) { res, err := instAction.RunWithContext(ctx, buildChart(), vals) is.Error(err) is.Contains(err.Error(), "context canceled") - is.Contains(err.Error(), "atomic") + is.Contains(err.Error(), "rollback-on-failure") is.Contains(err.Error(), "uninstalled") // Now make sure it isn't in storage anymore diff --git a/pkg/action/upgrade.go b/pkg/action/upgrade.go index 271bc8aa9..566d42ab4 100644 --- a/pkg/action/upgrade.go +++ b/pkg/action/upgrade.go @@ -89,8 +89,8 @@ type Upgrade struct { ResetThenReuseValues bool // MaxHistory limits the maximum number of revisions saved per release MaxHistory int - // Atomic, if true, will roll back on failure. - Atomic bool + // RollbackOnFailure enables rolling back the upgraded release on failure + RollbackOnFailure bool // CleanupOnFail will, if true, cause the upgrade to delete newly-created resources on a failed update. CleanupOnFail bool // SubNotes determines whether sub-notes are rendered in the chart. @@ -151,9 +151,9 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart. return nil, err } - // Make sure if Atomic is set, that wait is set as well. This makes it so + // Make sure wait is set if RollbackOnFailure. This makes it so // the user doesn't have to specify both - if u.WaitStrategy == kube.HookOnlyStrategy && u.Atomic { + if u.WaitStrategy == kube.HookOnlyStrategy && u.RollbackOnFailure { u.WaitStrategy = kube.StatusWatcherStrategy } @@ -390,7 +390,7 @@ func (u *Upgrade) performUpgrade(ctx context.Context, originalRelease, upgradedR } } -// Function used to lock the Mutex, this is important for the case when the atomic flag is set. +// Function used to lock the Mutex, this is important for the case when RollbackOnFailure is set. // In that case the upgrade will finish before the rollback is finished so it is necessary to wait for the rollback to finish. // The rollback will be trigger by the function failRelease func (u *Upgrade) reportToPerformUpgrade(c chan<- resultMessage, rel *release.Release, created kube.ResourceList, err error) { @@ -408,7 +408,7 @@ func (u *Upgrade) handleContext(ctx context.Context, done chan interface{}, c ch case <-ctx.Done(): err := ctx.Err() - // when the atomic flag is set the ongoing release finish first and doesn't give time for the rollback happens. + // when RollbackOnFailure is set, the ongoing release finish first and doesn't give time for the rollback happens. u.reportToPerformUpgrade(c, upgradedRelease, kube.ResourceList{}, err) case <-done: return @@ -495,8 +495,9 @@ func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, e } slog.Debug("resource cleanup complete") } - if u.Atomic { - slog.Debug("upgrade failed and atomic is set, rolling back to last successful release") + + if u.RollbackOnFailure { + slog.Debug("Upgrade failed and rollback-on-failure is set, rolling back to previous successful release") // As a protection, get the last successful release before rollback. // If there are no successful releases, bail out @@ -530,7 +531,7 @@ func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, e if rollErr := rollin.Run(rel.Name); rollErr != nil { return rel, fmt.Errorf("an error occurred while rolling back the release. original upgrade error: %w: %w", err, rollErr) } - return rel, fmt.Errorf("release %s failed, and has been rolled back due to atomic being set: %w", rel.Name, err) + return rel, fmt.Errorf("release %s failed, and has been rolled back due to rollback-on-failure being set: %w", rel.Name, err) } return rel, err diff --git a/pkg/action/upgrade_test.go b/pkg/action/upgrade_test.go index e20955560..8ec727671 100644 --- a/pkg/action/upgrade_test.go +++ b/pkg/action/upgrade_test.go @@ -141,11 +141,11 @@ func TestUpgradeRelease_CleanupOnFail(t *testing.T) { is.Equal(res.Info.Status, release.StatusFailed) } -func TestUpgradeRelease_Atomic(t *testing.T) { +func TestUpgradeRelease_RollbackOnFailure(t *testing.T) { is := assert.New(t) req := require.New(t) - t.Run("atomic rollback succeeds", func(t *testing.T) { + t.Run("rollback-on-failure rollback succeeds", func(t *testing.T) { upAction := upgradeAction(t) rel := releaseStub() @@ -157,13 +157,13 @@ func TestUpgradeRelease_Atomic(t *testing.T) { // We can't make Update error because then the rollback won't work failer.WatchUntilReadyError = fmt.Errorf("arming key removed") upAction.cfg.KubeClient = failer - upAction.Atomic = true + upAction.RollbackOnFailure = true vals := map[string]interface{}{} res, err := upAction.Run(rel.Name, buildChart(), vals) req.Error(err) is.Contains(err.Error(), "arming key removed") - is.Contains(err.Error(), "atomic") + is.Contains(err.Error(), "rollback-on-failure") // Now make sure it is actually upgraded updatedRes, err := upAction.cfg.Releases.Get(res.Name, 3) @@ -172,7 +172,7 @@ func TestUpgradeRelease_Atomic(t *testing.T) { is.Equal(updatedRes.Info.Status, release.StatusDeployed) }) - t.Run("atomic uninstall fails", func(t *testing.T) { + t.Run("rollback-on-failure uninstall fails", func(t *testing.T) { upAction := upgradeAction(t) rel := releaseStub() rel.Name = "fallout" @@ -182,7 +182,7 @@ func TestUpgradeRelease_Atomic(t *testing.T) { failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.UpdateError = fmt.Errorf("update fail") upAction.cfg.KubeClient = failer - upAction.Atomic = true + upAction.RollbackOnFailure = true vals := map[string]interface{}{} _, err := upAction.Run(rel.Name, buildChart(), vals) @@ -409,7 +409,8 @@ func TestUpgradeRelease_Interrupted_Wait(t *testing.T) { is.Equal(res.Info.Status, release.StatusFailed) } -func TestUpgradeRelease_Interrupted_Atomic(t *testing.T) { +func TestUpgradeRelease_Interrupted_RollbackOnFailure(t *testing.T) { + is := assert.New(t) req := require.New(t) @@ -422,7 +423,7 @@ func TestUpgradeRelease_Interrupted_Atomic(t *testing.T) { failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.WaitDuration = 5 * time.Second upAction.cfg.KubeClient = failer - upAction.Atomic = true + upAction.RollbackOnFailure = true vals := map[string]interface{}{} ctx, cancel := context.WithCancel(t.Context()) @@ -431,7 +432,7 @@ func TestUpgradeRelease_Interrupted_Atomic(t *testing.T) { res, err := upAction.RunWithContext(ctx, rel.Name, buildChart(), vals) req.Error(err) - is.Contains(err.Error(), "release interrupted-release failed, and has been rolled back due to atomic being set: context canceled") + is.Contains(err.Error(), "release interrupted-release failed, and has been rolled back due to rollback-on-failure being set: context canceled") // Now make sure it is actually upgraded updatedRes, err := upAction.cfg.Releases.Get(res.Name, 3) diff --git a/pkg/cmd/install.go b/pkg/cmd/install.go index 3496a4bbd..33fc51584 100644 --- a/pkg/cmd/install.go +++ b/pkg/cmd/install.go @@ -203,7 +203,8 @@ func addInstallFlags(cmd *cobra.Command, f *pflag.FlagSet, client *action.Instal f.BoolVar(&client.Devel, "devel", false, "use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored") f.BoolVar(&client.DependencyUpdate, "dependency-update", false, "update dependencies if they are missing before installing the chart") f.BoolVar(&client.DisableOpenAPIValidation, "disable-openapi-validation", false, "if set, the installation process will not validate rendered templates against the Kubernetes OpenAPI Schema") - f.BoolVar(&client.Atomic, "atomic", false, "if set, the installation process deletes the installation on failure. The --wait flag will be set automatically to \"watcher\" if --atomic is used") + f.BoolVar(&client.RollbackOnFailure, "rollback-on-failure", false, "if set, Helm will rollback (uninstall) the installation upon failure. The --wait flag will be default to \"watcher\" if --rollback-on-failure is set") + f.MarkDeprecated("atomic", "use --rollback-on-failure instead") f.BoolVar(&client.SkipCRDs, "skip-crds", false, "if set, no CRDs will be installed. By default, CRDs are installed if not already present") f.BoolVar(&client.SubNotes, "render-subchart-notes", false, "if set, render subchart notes along with the parent") f.BoolVar(&client.SkipSchemaValidation, "skip-schema-validation", false, "if set, disables JSON schema validation") diff --git a/pkg/cmd/upgrade.go b/pkg/cmd/upgrade.go index d4e7b4852..34d96626a 100644 --- a/pkg/cmd/upgrade.go +++ b/pkg/cmd/upgrade.go @@ -140,7 +140,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { instClient.WaitForJobs = client.WaitForJobs instClient.Devel = client.Devel instClient.Namespace = client.Namespace - instClient.Atomic = client.Atomic + instClient.RollbackOnFailure = client.RollbackOnFailure instClient.PostRenderer = client.PostRenderer instClient.DisableOpenAPIValidation = client.DisableOpenAPIValidation instClient.SubNotes = client.SubNotes @@ -277,7 +277,9 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { f.BoolVar(&client.ReuseValues, "reuse-values", false, "when upgrading, reuse the last release's values and merge in any overrides from the command line via --set and -f. If '--reset-values' is specified, this is ignored") f.BoolVar(&client.ResetThenReuseValues, "reset-then-reuse-values", false, "when upgrading, reset the values to the ones built into the chart, apply the last release's values and merge in any overrides from the command line via --set and -f. If '--reset-values' or '--reuse-values' is specified, this is ignored") f.BoolVar(&client.WaitForJobs, "wait-for-jobs", false, "if set and --wait enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as --timeout") - f.BoolVar(&client.Atomic, "atomic", false, "if set, upgrade process rolls back changes made in case of failed upgrade. The --wait flag will be set automatically to \"watcher\" if --atomic is used") + f.BoolVar(&client.RollbackOnFailure, "rollback-on-failure", false, "if set, Helm will rollback the upgrade to previous success release upon failure. The --wait flag will be defaulted to \"watcher\" if --rollback-on-failure is set") + f.BoolVar(&client.RollbackOnFailure, "atomic", false, "deprecated") + f.MarkDeprecated("atomic", "use --rollback-on-failure instead") f.IntVar(&client.MaxHistory, "history-max", settings.MaxHistory, "limit the maximum number of revisions saved per release. Use 0 for no limit") f.BoolVar(&client.CleanupOnFail, "cleanup-on-fail", false, "allow deletion of new resources created in this upgrade when upgrade fails") f.BoolVar(&client.SubNotes, "render-subchart-notes", false, "if set, render subchart notes along with the parent") From 0c64ad1c9740b11379d7941816e0d57e9874b29c Mon Sep 17 00:00:00 2001 From: Matt Farina Date: Tue, 29 Jul 2025 13:18:29 -0400 Subject: [PATCH 09/88] fix Chart.yaml handling Signed-off-by: Matt Farina --- internal/chart/v3/util/dependencies.go | 5 +++-- pkg/chart/v2/util/dependencies.go | 5 +++-- pkg/lint/rules/chartfile.go | 3 +++ pkg/lint/rules/chartfile_test.go | 10 ++++++++++ 4 files changed, 19 insertions(+), 4 deletions(-) diff --git a/internal/chart/v3/util/dependencies.go b/internal/chart/v3/util/dependencies.go index bd5032ce4..129c46372 100644 --- a/internal/chart/v3/util/dependencies.go +++ b/internal/chart/v3/util/dependencies.go @@ -16,6 +16,7 @@ limitations under the License. package util import ( + "fmt" "log/slog" "strings" @@ -265,8 +266,8 @@ func processImportValues(c *chart.Chart, merge bool) error { for _, riv := range r.ImportValues { switch iv := riv.(type) { case map[string]interface{}: - child := iv["child"].(string) - parent := iv["parent"].(string) + child := fmt.Sprintf("%v", iv["child"]) + parent := fmt.Sprintf("%v", iv["parent"]) outiv = append(outiv, map[string]string{ "child": child, diff --git a/pkg/chart/v2/util/dependencies.go b/pkg/chart/v2/util/dependencies.go index f34144526..1a2aa1c95 100644 --- a/pkg/chart/v2/util/dependencies.go +++ b/pkg/chart/v2/util/dependencies.go @@ -16,6 +16,7 @@ limitations under the License. package util import ( + "fmt" "log/slog" "strings" @@ -265,8 +266,8 @@ func processImportValues(c *chart.Chart, merge bool) error { for _, riv := range r.ImportValues { switch iv := riv.(type) { case map[string]interface{}: - child := iv["child"].(string) - parent := iv["parent"].(string) + child := fmt.Sprintf("%v", iv["child"]) + parent := fmt.Sprintf("%v", iv["parent"]) outiv = append(outiv, map[string]string{ "child": child, diff --git a/pkg/lint/rules/chartfile.go b/pkg/lint/rules/chartfile.go index 724c3f2ea..103c28374 100644 --- a/pkg/lint/rules/chartfile.go +++ b/pkg/lint/rules/chartfile.go @@ -160,6 +160,9 @@ func validateChartVersion(cf *chart.Metadata) error { func validateChartMaintainer(cf *chart.Metadata) error { for _, maintainer := range cf.Maintainers { + if maintainer == nil { + return errors.New("a maintainer entry is empty") + } if maintainer.Name == "" { return errors.New("each maintainer requires a name") } else if maintainer.Email != "" && !govalidator.IsEmail(maintainer.Email) { diff --git a/pkg/lint/rules/chartfile_test.go b/pkg/lint/rules/chartfile_test.go index bbb14a5e8..1719a2011 100644 --- a/pkg/lint/rules/chartfile_test.go +++ b/pkg/lint/rules/chartfile_test.go @@ -142,6 +142,16 @@ func TestValidateChartMaintainer(t *testing.T) { t.Errorf("validateChartMaintainer(%s, %s) to return no error, got %s", test.Name, test.Email, err.Error()) } } + + // Testing for an empty maintainer + badChart.Maintainers = []*chart.Maintainer{nil} + err := validateChartMaintainer(badChart) + if err == nil { + t.Errorf("validateChartMaintainer did not return error for nil maintainer as expected") + } + if err.Error() != "a maintainer entry is empty" { + t.Errorf("validateChartMaintainer returned unexpected error for nil maintainer: %s", err.Error()) + } } func TestValidateChartSources(t *testing.T) { From 69efc0d4fbcc143e0b196253f6e82808aaa57fc3 Mon Sep 17 00:00:00 2001 From: Matt Farina Date: Tue, 29 Jul 2025 15:37:57 -0400 Subject: [PATCH 10/88] Handle messy index files Signed-off-by: Matt Farina --- pkg/repo/index.go | 5 +++-- pkg/repo/index_test.go | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/pkg/repo/index.go b/pkg/repo/index.go index c26d7581c..4de8bb463 100644 --- a/pkg/repo/index.go +++ b/pkg/repo/index.go @@ -355,7 +355,8 @@ func loadIndex(data []byte, source string) (*IndexFile, error) { for name, cvs := range i.Entries { for idx := len(cvs) - 1; idx >= 0; idx-- { if cvs[idx] == nil { - slog.Warn("skipping loading invalid entry for chart %q from %s: empty entry", name, source) + slog.Warn(fmt.Sprintf("skipping loading invalid entry for chart %q from %s: empty entry", name, source)) + cvs = append(cvs[:idx], cvs[idx+1:]...) continue } // When metadata section missing, initialize with no data @@ -366,7 +367,7 @@ func loadIndex(data []byte, source string) (*IndexFile, error) { cvs[idx].APIVersion = chart.APIVersionV1 } if err := cvs[idx].Validate(); ignoreSkippableChartValidationError(err) != nil { - slog.Warn("skipping loading invalid entry for chart %q %q from %s: %s", name, cvs[idx].Version, source, err) + slog.Warn(fmt.Sprintf("skipping loading invalid entry for chart %q %q from %s: %s", name, cvs[idx].Version, source, err)) cvs = append(cvs[:idx], cvs[idx+1:]...) } } diff --git a/pkg/repo/index_test.go b/pkg/repo/index_test.go index d40719b12..7810d3ac0 100644 --- a/pkg/repo/index_test.go +++ b/pkg/repo/index_test.go @@ -68,6 +68,7 @@ entries: grafana: - apiVersion: v2 name: grafana + - null foo: - bar: From 5e6a411c1f2d0e75600aa0bef1b2f30cffb8ce83 Mon Sep 17 00:00:00 2001 From: Evans Mungai Date: Thu, 7 Aug 2025 12:22:01 +0100 Subject: [PATCH 11/88] fix: use username and password if provided Ref: #31114 Signed-off-by: Evans Mungai --- go.mod | 2 +- pkg/registry/client.go | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index e2f8536a1..b0fef95bc 100644 --- a/go.mod +++ b/go.mod @@ -14,6 +14,7 @@ require ( github.com/cyphar/filepath-securejoin v0.4.1 github.com/distribution/distribution/v3 v3.0.0 github.com/evanphx/json-patch/v5 v5.9.11 + github.com/fatih/color v1.13.0 github.com/fluxcd/cli-utils v0.36.0-flux.14 github.com/foxcpp/go-mockdns v1.1.0 github.com/gobwas/glob v0.2.3 @@ -70,7 +71,6 @@ require ( github.com/docker/go-metrics v0.0.1 // indirect github.com/emicklei/go-restful/v3 v3.12.2 // indirect github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect - github.com/fatih/color v1.13.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fxamacker/cbor/v2 v2.8.0 // indirect github.com/go-errors/errors v1.5.1 // indirect diff --git a/pkg/registry/client.go b/pkg/registry/client.go index 3ea68f181..042f06065 100644 --- a/pkg/registry/client.go +++ b/pkg/registry/client.go @@ -128,7 +128,13 @@ func NewClient(options ...ClientOption) (*Client, error) { } authorizer.SetUserAgent(version.GetUserAgent()) - authorizer.Credential = credentials.Credential(client.credentialsStore) + if client.username != "" && client.password != "" { + authorizer.Credential = func(_ context.Context, hostport string) (auth.Credential, error) { + return auth.Credential{Username: client.username, Password: client.password}, nil + } + } else { + authorizer.Credential = credentials.Credential(client.credentialsStore) + } if client.enableCache { authorizer.Cache = auth.NewCache() From 0dae3d6e886dd2007ca447c85582a7faabf72eb1 Mon Sep 17 00:00:00 2001 From: Evans Mungai Date: Thu, 7 Aug 2025 12:34:26 +0100 Subject: [PATCH 12/88] chore: check if go modules are tidy before build Signed-off-by: Evans Mungai --- .github/workflows/build-test.yml | 2 ++ Makefile | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/.github/workflows/build-test.yml b/.github/workflows/build-test.yml index 11a5c49ec..0c3ff6596 100644 --- a/.github/workflows/build-test.yml +++ b/.github/workflows/build-test.yml @@ -28,6 +28,8 @@ jobs: check-latest: true - name: Test source headers are present run: make test-source-headers + - name: Check if go mod is tidy + run: go mod tidy -diff - name: Run unit tests run: make test-coverage - name: Test build diff --git a/Makefile b/Makefile index 0785fdb2e..6624c12bb 100644 --- a/Makefile +++ b/Makefile @@ -246,3 +246,7 @@ info: @echo "Git Tag: ${GIT_TAG}" @echo "Git Commit: ${GIT_COMMIT}" @echo "Git Tree State: ${GIT_DIRTY}" + +.PHONY: tidy +tidy: + go mod tidy From 0b367e8404b5679737a7898d06b2a858e21aaf0a Mon Sep 17 00:00:00 2001 From: Evans Mungai Date: Thu, 7 Aug 2025 12:47:35 +0100 Subject: [PATCH 13/88] Run go mod tidy Signed-off-by: Evans Mungai --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index e2f8536a1..b0fef95bc 100644 --- a/go.mod +++ b/go.mod @@ -14,6 +14,7 @@ require ( github.com/cyphar/filepath-securejoin v0.4.1 github.com/distribution/distribution/v3 v3.0.0 github.com/evanphx/json-patch/v5 v5.9.11 + github.com/fatih/color v1.13.0 github.com/fluxcd/cli-utils v0.36.0-flux.14 github.com/foxcpp/go-mockdns v1.1.0 github.com/gobwas/glob v0.2.3 @@ -70,7 +71,6 @@ require ( github.com/docker/go-metrics v0.0.1 // indirect github.com/emicklei/go-restful/v3 v3.12.2 // indirect github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect - github.com/fatih/color v1.13.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fxamacker/cbor/v2 v2.8.0 // indirect github.com/go-errors/errors v1.5.1 // indirect From 9e1cbbebcb9c6fa5ff919133daf69197862b60a6 Mon Sep 17 00:00:00 2001 From: Evans Mungai Date: Thu, 7 Aug 2025 12:50:54 +0100 Subject: [PATCH 14/88] fix linting warning Signed-off-by: Evans Mungai --- pkg/registry/client.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/registry/client.go b/pkg/registry/client.go index 042f06065..c86215beb 100644 --- a/pkg/registry/client.go +++ b/pkg/registry/client.go @@ -129,7 +129,7 @@ func NewClient(options ...ClientOption) (*Client, error) { authorizer.SetUserAgent(version.GetUserAgent()) if client.username != "" && client.password != "" { - authorizer.Credential = func(_ context.Context, hostport string) (auth.Credential, error) { + authorizer.Credential = func(_ context.Context, _ string) (auth.Credential, error) { return auth.Credential{Username: client.username, Password: client.password}, nil } } else { From 5e86e43edadce10aa798f632050850b1f89680df Mon Sep 17 00:00:00 2001 From: Evans Mungai Date: Thu, 7 Aug 2025 13:16:07 +0100 Subject: [PATCH 15/88] Add tests for pull command using OCI registry Signed-off-by: Evans Mungai --- pkg/cmd/pull_test.go | 184 ++++++++++++++++++++++++++++++++----------- 1 file changed, 138 insertions(+), 46 deletions(-) diff --git a/pkg/cmd/pull_test.go b/pkg/cmd/pull_test.go index c30c94b49..b8e7eff82 100644 --- a/pkg/cmd/pull_test.go +++ b/pkg/cmd/pull_test.go @@ -256,6 +256,77 @@ func TestPullCmd(t *testing.T) { } } +// runPullTests is a helper function to run pull command tests with common logic +func runPullTests(t *testing.T, tests []struct { + name string + args string + existFile string + existDir string + wantError bool + wantErrorMsg string + expectFile string + expectDir bool +}, outdir string, additionalFlags string) { + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cmd := fmt.Sprintf("pull %s -d '%s' --repository-config %s --repository-cache %s --registry-config %s %s", + tt.args, + outdir, + filepath.Join(outdir, "repositories.yaml"), + outdir, + filepath.Join(outdir, "config.json"), + additionalFlags, + ) + // Create file or Dir before helm pull --untar, see: https://github.com/helm/helm/issues/7182 + if tt.existFile != "" { + file := filepath.Join(outdir, tt.existFile) + _, err := os.Create(file) + if err != nil { + t.Fatal(err) + } + } + if tt.existDir != "" { + file := filepath.Join(outdir, tt.existDir) + err := os.Mkdir(file, 0755) + if err != nil { + t.Fatal(err) + } + } + _, _, err := executeActionCommand(cmd) + if err != nil { + if tt.wantError { + if tt.wantErrorMsg != "" && tt.wantErrorMsg == err.Error() { + t.Fatalf("Actual error %s, not equal to expected error %s", err, tt.wantErrorMsg) + } + return + } + t.Fatalf("%q reported error: %s", tt.name, err) + } + + ef := filepath.Join(outdir, tt.expectFile) + fi, err := os.Stat(ef) + if err != nil { + t.Errorf("%q: expected a file at %s. %s", tt.name, ef, err) + } + if fi.IsDir() != tt.expectDir { + t.Errorf("%q: expected directory=%t, but it's not.", tt.name, tt.expectDir) + } + }) + } +} + +// buildOCIURL is a helper function to build OCI URLs with credentials +func buildOCIURL(registryURL, chartName, version, username, password string) string { + baseURL := fmt.Sprintf("oci://%s/u/ocitestuser/%s", registryURL, chartName) + if version != "" { + baseURL += fmt.Sprintf(" --version %s", version) + } + if username != "" && password != "" { + baseURL += fmt.Sprintf(" --username %s --password %s", username, password) + } + return baseURL +} + func TestPullWithCredentialsCmd(t *testing.T) { srv := repotest.NewTempServer( t, @@ -311,52 +382,7 @@ func TestPullWithCredentialsCmd(t *testing.T) { }, } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - outdir := srv.Root() - cmd := fmt.Sprintf("pull %s -d '%s' --repository-config %s --repository-cache %s --registry-config %s", - tt.args, - outdir, - filepath.Join(outdir, "repositories.yaml"), - outdir, - filepath.Join(outdir, "config.json"), - ) - // Create file or Dir before helm pull --untar, see: https://github.com/helm/helm/issues/7182 - if tt.existFile != "" { - file := filepath.Join(outdir, tt.existFile) - _, err := os.Create(file) - if err != nil { - t.Fatal(err) - } - } - if tt.existDir != "" { - file := filepath.Join(outdir, tt.existDir) - err := os.Mkdir(file, 0755) - if err != nil { - t.Fatal(err) - } - } - _, _, err := executeActionCommand(cmd) - if err != nil { - if tt.wantError { - if tt.wantErrorMsg != "" && tt.wantErrorMsg == err.Error() { - t.Fatalf("Actual error %s, not equal to expected error %s", err, tt.wantErrorMsg) - } - return - } - t.Fatalf("%q reported error: %s", tt.name, err) - } - - ef := filepath.Join(outdir, tt.expectFile) - fi, err := os.Stat(ef) - if err != nil { - t.Errorf("%q: expected a file at %s. %s", tt.name, ef, err) - } - if fi.IsDir() != tt.expectDir { - t.Errorf("%q: expected directory=%t, but it's not.", tt.name, tt.expectDir) - } - }) - } + runPullTests(t, tests, srv.Root(), "") } func TestPullVersionCompletion(t *testing.T) { @@ -389,6 +415,72 @@ func TestPullVersionCompletion(t *testing.T) { runTestCmd(t, tests) } +func TestPullWithCredentialsCmdOCIRegistry(t *testing.T) { + srv := repotest.NewTempServer( + t, + repotest.WithChartSourceGlob("testdata/testcharts/*.tgz*"), + ) + defer srv.Stop() + + ociSrv, err := repotest.NewOCIServer(t, srv.Root()) + if err != nil { + t.Fatal(err) + } + ociSrv.Run(t) + + if err := srv.LinkIndices(); err != nil { + t.Fatal(err) + } + + // all flags will get "-d outdir" appended. + tests := []struct { + name string + args string + existFile string + existDir string + wantError bool + wantErrorMsg string + expectFile string + expectDir bool + }{ + { + name: "OCI Chart fetch with credentials", + args: buildOCIURL(ociSrv.RegistryURL, "oci-dependent-chart", "0.1.0", ociSrv.TestUsername, ociSrv.TestPassword), + expectFile: "./oci-dependent-chart-0.1.0.tgz", + }, + { + name: "OCI Chart fetch with credentials and untar", + args: buildOCIURL(ociSrv.RegistryURL, "oci-dependent-chart", "0.1.0", ociSrv.TestUsername, ociSrv.TestPassword) + " --untar", + expectFile: "./oci-dependent-chart", + expectDir: true, + }, + { + name: "OCI Chart fetch with credentials and untardir", + args: buildOCIURL(ociSrv.RegistryURL, "oci-dependent-chart", "0.1.0", ociSrv.TestUsername, ociSrv.TestPassword) + " --untar --untardir ocitest-credentials", + expectFile: "./ocitest-credentials", + expectDir: true, + }, + { + name: "Fail fetching OCI chart with wrong credentials", + args: buildOCIURL(ociSrv.RegistryURL, "oci-dependent-chart", "0.1.0", "wronguser", "wrongpass"), + wantError: true, + }, + { + name: "Fail fetching non-existent OCI chart with credentials", + args: buildOCIURL(ociSrv.RegistryURL, "nosuchthing", "0.1.0", ociSrv.TestUsername, ociSrv.TestPassword), + wantError: true, + }, + { + name: "Fail fetching OCI chart without version specified", + args: buildOCIURL(ociSrv.RegistryURL, "nosuchthing", "", ociSrv.TestUsername, ociSrv.TestPassword), + wantErrorMsg: "Error: --version flag is explicitly required for OCI registries", + wantError: true, + }, + } + + runPullTests(t, tests, srv.Root(), "--plain-http") +} + func TestPullFileCompletion(t *testing.T) { checkFileCompletion(t, "pull", false) checkFileCompletion(t, "pull repo/chart", false) From 97af5a5e85036d951db7de6f788309bca4c68e60 Mon Sep 17 00:00:00 2001 From: Evans Mungai Date: Thu, 7 Aug 2025 13:31:18 +0100 Subject: [PATCH 16/88] Fix linter warning Signed-off-by: Evans Mungai --- pkg/cmd/pull_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/cmd/pull_test.go b/pkg/cmd/pull_test.go index b8e7eff82..6a1d3ec0d 100644 --- a/pkg/cmd/pull_test.go +++ b/pkg/cmd/pull_test.go @@ -267,6 +267,7 @@ func runPullTests(t *testing.T, tests []struct { expectFile string expectDir bool }, outdir string, additionalFlags string) { + t.Helper() for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cmd := fmt.Sprintf("pull %s -d '%s' --repository-config %s --repository-cache %s --registry-config %s %s", From af1c9570f518c0a2631d21e88170b41dfbafe8de Mon Sep 17 00:00:00 2001 From: Evans Mungai Date: Thu, 7 Aug 2025 13:42:27 +0100 Subject: [PATCH 17/88] Rename go mod tidy check task Signed-off-by: Evans Mungai --- .github/workflows/build-test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-test.yml b/.github/workflows/build-test.yml index 0c3ff6596..5456b143f 100644 --- a/.github/workflows/build-test.yml +++ b/.github/workflows/build-test.yml @@ -28,7 +28,7 @@ jobs: check-latest: true - name: Test source headers are present run: make test-source-headers - - name: Check if go mod is tidy + - name: Check if go modules need to be tidied run: go mod tidy -diff - name: Run unit tests run: make test-coverage From 46c8caa4103a4cf19e80004abb552046db6e505c Mon Sep 17 00:00:00 2001 From: Evans Mungai Date: Thu, 7 Aug 2025 19:11:17 +0300 Subject: [PATCH 18/88] Add info target as part of build Signed-off-by: Evans Mungai --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 6624c12bb..64780b0d8 100644 --- a/Makefile +++ b/Makefile @@ -75,7 +75,7 @@ all: build # build .PHONY: build -build: $(BINDIR)/$(BINNAME) +build: $(BINDIR)/$(BINNAME) info $(BINDIR)/$(BINNAME): $(SRC) CGO_ENABLED=$(CGO_ENABLED) go build $(GOFLAGS) -trimpath -tags '$(TAGS)' -ldflags '$(LDFLAGS)' -o '$(BINDIR)'/$(BINNAME) ./cmd/helm From 064a18ff79c74f1d0ffa0a68ea501fdb97b42d11 Mon Sep 17 00:00:00 2001 From: Evans Mungai Date: Thu, 7 Aug 2025 17:50:26 +0100 Subject: [PATCH 19/88] Update Makefile Co-authored-by: Terry Howe Signed-off-by: Evans Mungai --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 64780b0d8..d1d6d0982 100644 --- a/Makefile +++ b/Makefile @@ -75,7 +75,7 @@ all: build # build .PHONY: build -build: $(BINDIR)/$(BINNAME) info +build: $(BINDIR)/$(BINNAME) tidy $(BINDIR)/$(BINNAME): $(SRC) CGO_ENABLED=$(CGO_ENABLED) go build $(GOFLAGS) -trimpath -tags '$(TAGS)' -ldflags '$(LDFLAGS)' -o '$(BINDIR)'/$(BINNAME) ./cmd/helm From b3568a67a8a9ac2adba6308bf12e5c10daee119d Mon Sep 17 00:00:00 2001 From: Stephane Jeandeaux Date: Fri, 19 Apr 2024 21:39:24 +0200 Subject: [PATCH 20/88] helm uninstall The goal is to have the same behaviour with or without dry-run with --ignore-not-found close #12970 Signed-off-by: Stephane Jeandeaux --- pkg/action/uninstall.go | 9 ++++++--- pkg/action/uninstall_test.go | 12 +++++++++++- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/pkg/action/uninstall.go b/pkg/action/uninstall.go index 61e10b2c8..6306a93b4 100644 --- a/pkg/action/uninstall.go +++ b/pkg/action/uninstall.go @@ -66,12 +66,15 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error) } if u.DryRun { - // In the dry run case, just see if the release exists r, err := u.cfg.releaseContent(name, 0) - if err != nil { + switch { + case u.IgnoreNotFound && errors.As(err, &driver.ErrReleaseNotFound): + fallthrough + case err == nil: + return &release.UninstallReleaseResponse{Release: r}, nil + default: return &release.UninstallReleaseResponse{}, err } - return &release.UninstallReleaseResponse{Release: r}, nil } if err := chartutil.ValidateReleaseName(name); err != nil { diff --git a/pkg/action/uninstall_test.go b/pkg/action/uninstall_test.go index 8b148522c..8e8af7493 100644 --- a/pkg/action/uninstall_test.go +++ b/pkg/action/uninstall_test.go @@ -34,6 +34,17 @@ func uninstallAction(t *testing.T) *Uninstall { return unAction } +func TestUninstallRelease_dryRun_ignoreNotFound(t *testing.T) { + unAction := uninstallAction(t) + unAction.DryRun = true + unAction.IgnoreNotFound = true + + is := assert.New(t) + res, err := unAction.Run("release-non-exist") + is.NotNil(res) + is.NoError(err) +} + func TestUninstallRelease_ignoreNotFound(t *testing.T) { unAction := uninstallAction(t) unAction.DryRun = false @@ -44,7 +55,6 @@ func TestUninstallRelease_ignoreNotFound(t *testing.T) { is.Nil(res) is.NoError(err) } - func TestUninstallRelease_deleteRelease(t *testing.T) { is := assert.New(t) From 65209bed54189bb316c03d92c52bb91c85484998 Mon Sep 17 00:00:00 2001 From: Stephane Jeandeaux Date: Mon, 13 May 2024 20:17:38 +0200 Subject: [PATCH 21/88] Update pkg/action/uninstall.go Co-authored-by: Eddy Moulton Signed-off-by: Stephane Jeandeaux --- pkg/action/uninstall.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/action/uninstall.go b/pkg/action/uninstall.go index 6306a93b4..2a47510d7 100644 --- a/pkg/action/uninstall.go +++ b/pkg/action/uninstall.go @@ -68,10 +68,10 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error) if u.DryRun { r, err := u.cfg.releaseContent(name, 0) switch { - case u.IgnoreNotFound && errors.As(err, &driver.ErrReleaseNotFound): - fallthrough case err == nil: return &release.UninstallReleaseResponse{Release: r}, nil + case u.IgnoreNotFound && errors.As(err, &driver.ErrReleaseNotFound): + fallthrough default: return &release.UninstallReleaseResponse{}, err } From 8434935a3dae47a6cdc94388f19e52709d4ea54f Mon Sep 17 00:00:00 2001 From: Stephane Jeandeaux Date: Mon, 13 May 2024 20:40:00 +0200 Subject: [PATCH 22/88] fix fallthrough Signed-off-by: Stephane Jeandeaux --- pkg/action/uninstall.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/action/uninstall.go b/pkg/action/uninstall.go index 2a47510d7..6de570753 100644 --- a/pkg/action/uninstall.go +++ b/pkg/action/uninstall.go @@ -69,9 +69,9 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error) r, err := u.cfg.releaseContent(name, 0) switch { case err == nil: - return &release.UninstallReleaseResponse{Release: r}, nil - case u.IgnoreNotFound && errors.As(err, &driver.ErrReleaseNotFound): fallthrough + case u.IgnoreNotFound && errors.As(err, &driver.ErrReleaseNotFound): + return &release.UninstallReleaseResponse{Release: r}, nil default: return &release.UninstallReleaseResponse{}, err } From f3c9407052534356522e8fb9729a6af447701b4b Mon Sep 17 00:00:00 2001 From: Paolo Gallina Date: Fri, 8 Aug 2025 12:35:23 +0200 Subject: [PATCH 23/88] fix(transport): leverage same tls config Signed-off-by: Paolo Gallina --- pkg/getter/httpgetter.go | 3 +++ pkg/getter/ocigetter.go | 4 ++++ 2 files changed, 7 insertions(+) diff --git a/pkg/getter/httpgetter.go b/pkg/getter/httpgetter.go index 925df201e..4cf528797 100644 --- a/pkg/getter/httpgetter.go +++ b/pkg/getter/httpgetter.go @@ -122,6 +122,9 @@ func (g *HTTPGetter) httpClient() (*http.Client, error) { g.transport = &http.Transport{ DisableCompression: true, Proxy: http.ProxyFromEnvironment, + // Being nil would cause the tls.Config default to be used + // "NewTLSConfig" modifies an empty TLS config, not the default one + TLSClientConfig: &tls.Config{}, } }) diff --git a/pkg/getter/ocigetter.go b/pkg/getter/ocigetter.go index 2a611e13a..7e8bcfcfb 100644 --- a/pkg/getter/ocigetter.go +++ b/pkg/getter/ocigetter.go @@ -17,6 +17,7 @@ package getter import ( "bytes" + "crypto/tls" "fmt" "net" "net/http" @@ -124,6 +125,9 @@ func (g *OCIGetter) newRegistryClient() (*registry.Client, error) { TLSHandshakeTimeout: 10 * time.Second, ExpectContinueTimeout: 1 * time.Second, Proxy: http.ProxyFromEnvironment, + // Being nil would cause the tls.Config default to be used + // "NewTLSConfig" modifies an empty TLS config, not the default one + TLSClientConfig: &tls.Config{}, } }) From 44a594fef5693aff44b9ff0f1ea38dacc7fcb880 Mon Sep 17 00:00:00 2001 From: Stephane Jeandeaux Date: Fri, 2 Aug 2024 11:21:23 +0200 Subject: [PATCH 24/88] review Signed-off-by: Stephane Jeandeaux --- pkg/action/uninstall.go | 13 +++++++------ pkg/action/uninstall_test.go | 2 +- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/pkg/action/uninstall.go b/pkg/action/uninstall.go index 6de570753..163af290e 100644 --- a/pkg/action/uninstall.go +++ b/pkg/action/uninstall.go @@ -17,6 +17,7 @@ limitations under the License. package action import ( + "errors" "fmt" "log/slog" "strings" @@ -28,6 +29,7 @@ import ( "helm.sh/helm/v4/pkg/kube" releaseutil "helm.sh/helm/v4/pkg/release/util" release "helm.sh/helm/v4/pkg/release/v1" + "helm.sh/helm/v4/pkg/storage/driver" helmtime "helm.sh/helm/v4/pkg/time" ) @@ -67,14 +69,13 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error) if u.DryRun { r, err := u.cfg.releaseContent(name, 0) - switch { - case err == nil: - fallthrough - case u.IgnoreNotFound && errors.As(err, &driver.ErrReleaseNotFound): - return &release.UninstallReleaseResponse{Release: r}, nil - default: + if err != nil { + if u.IgnoreNotFound && errors.Is(err, driver.ErrReleaseNotFound) { + return nil, nil + } return &release.UninstallReleaseResponse{}, err } + return &release.UninstallReleaseResponse{Release: r}, nil } if err := chartutil.ValidateReleaseName(name); err != nil { diff --git a/pkg/action/uninstall_test.go b/pkg/action/uninstall_test.go index 8e8af7493..44bd66d96 100644 --- a/pkg/action/uninstall_test.go +++ b/pkg/action/uninstall_test.go @@ -41,7 +41,7 @@ func TestUninstallRelease_dryRun_ignoreNotFound(t *testing.T) { is := assert.New(t) res, err := unAction.Run("release-non-exist") - is.NotNil(res) + is.Nil(res) is.NoError(err) } From 78436b2d0e4b857a9c825de668e55ae786d85d23 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Aug 2025 05:19:39 +0000 Subject: [PATCH 25/88] chore(deps): bump actions/checkout from 4.2.2 to 5.0.0 Bumps [actions/checkout](https://github.com/actions/checkout) from 4.2.2 to 5.0.0. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/11bd71901bbe5b1630ceea73d27597364c9af683...08c6903cd8c0fde910a37f88322edcfb5dd907a8) --- updated-dependencies: - dependency-name: actions/checkout dependency-version: 5.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/build-test.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/golangci-lint.yml | 2 +- .github/workflows/govulncheck.yml | 2 +- .github/workflows/release.yml | 4 ++-- .github/workflows/scorecards.yml | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build-test.yml b/.github/workflows/build-test.yml index 11a5c49ec..6a9d217b0 100644 --- a/.github/workflows/build-test.yml +++ b/.github/workflows/build-test.yml @@ -18,7 +18,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout source code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # pin@v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # pin@v5.0.0 - name: Add variables to environment file run: cat ".github/env" >> "$GITHUB_ENV" - name: Setup Go diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 9a6aeb582..c1a2bff20 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -43,7 +43,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # pin@v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # pin@v5.0.0 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 3059b05a2..0d5b4e969 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # pin@v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # pin@v5.0.0 - name: Add variables to environment file run: cat ".github/env" >> "$GITHUB_ENV" - name: Setup Go diff --git a/.github/workflows/govulncheck.yml b/.github/workflows/govulncheck.yml index 67cfa4c36..84d260a8f 100644 --- a/.github/workflows/govulncheck.yml +++ b/.github/workflows/govulncheck.yml @@ -14,7 +14,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # pin@v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # pin@v5.0.0 - name: Add variables to environment file run: cat ".github/env" >> "$GITHUB_ENV" - name: Setup Go diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 96138caf1..21c527442 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -20,7 +20,7 @@ jobs: runs-on: ubuntu-latest-16-cores steps: - name: Checkout source code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # pin@v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # pin@v5.0.0 with: fetch-depth: 0 @@ -79,7 +79,7 @@ jobs: if: github.ref == 'refs/heads/main' steps: - name: Checkout source code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # pin@v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # pin@v5.0.0 - name: Add variables to environment file run: cat ".github/env" >> "$GITHUB_ENV" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 4b135bb2a..6a44c8afb 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -28,7 +28,7 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: persist-credentials: false From 45141451b48503565ecc13b8805bbe85a2c6d6af Mon Sep 17 00:00:00 2001 From: George Jenkins Date: Thu, 3 Jul 2025 12:59:23 -0700 Subject: [PATCH 26/88] Kube client support server-side apply Signed-off-by: George Jenkins --- pkg/action/hooks.go | 4 +- pkg/action/install.go | 24 +- pkg/action/rollback.go | 6 +- pkg/action/upgrade.go | 6 +- pkg/kube/client.go | 519 ++++++++++++++++----- pkg/kube/client_test.go | 949 +++++++++++++++++++++++++++++++-------- pkg/kube/fake/fake.go | 16 +- pkg/kube/fake/printer.go | 4 +- pkg/kube/interface.go | 12 +- pkg/kube/wait.go | 21 - 10 files changed, 1213 insertions(+), 348 deletions(-) diff --git a/pkg/action/hooks.go b/pkg/action/hooks.go index d01ec84a0..95260e0e4 100644 --- a/pkg/action/hooks.go +++ b/pkg/action/hooks.go @@ -73,7 +73,9 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, h.LastRun.Phase = release.HookPhaseUnknown // Create hook resources - if _, err := cfg.KubeClient.Create(resources); err != nil { + if _, err := cfg.KubeClient.Create( + resources, + kube.ClientCreateOptionServerSideApply(false)); err != nil { h.LastRun.CompletedAt = helmtime.Now() h.LastRun.Phase = release.HookPhaseFailed return fmt.Errorf("warning: Hook %s %s failed: %w", hook, h.Path, err) diff --git a/pkg/action/install.go b/pkg/action/install.go index d8efa5d5d..9a9101f5d 100644 --- a/pkg/action/install.go +++ b/pkg/action/install.go @@ -173,7 +173,9 @@ func (i *Install) installCRDs(crds []chart.CRD) error { } // Send them to Kube - if _, err := i.cfg.KubeClient.Create(res); err != nil { + if _, err := i.cfg.KubeClient.Create( + res, + kube.ClientCreateOptionServerSideApply(false)); err != nil { // If the error is CRD already exists, continue. if apierrors.IsAlreadyExists(err) { crdName := res[0].Name @@ -399,7 +401,9 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma if err != nil { return nil, err } - if _, err := i.cfg.KubeClient.Create(resourceList); err != nil && !apierrors.IsAlreadyExists(err) { + if _, err := i.cfg.KubeClient.Create( + resourceList, + kube.ClientCreateOptionServerSideApply(false)); err != nil && !apierrors.IsAlreadyExists(err) { return nil, err } } @@ -468,13 +472,17 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource // do an update, but it's not clear whether we WANT to do an update if the reuse is set // to true, since that is basically an upgrade operation. if len(toBeAdopted) == 0 && len(resources) > 0 { - _, err = i.cfg.KubeClient.Create(resources) + _, err = i.cfg.KubeClient.Create( + resources, + kube.ClientCreateOptionServerSideApply(false)) } else if len(resources) > 0 { - if i.TakeOwnership { - _, err = i.cfg.KubeClient.(kube.InterfaceThreeWayMerge).UpdateThreeWayMerge(toBeAdopted, resources, i.ForceReplace) - } else { - _, err = i.cfg.KubeClient.Update(toBeAdopted, resources, i.ForceReplace) - } + updateThreeWayMergeForUnstructured := i.TakeOwnership + _, err = i.cfg.KubeClient.Update( + toBeAdopted, + resources, + kube.ClientUpdateOptionServerSideApply(false), + kube.ClientUpdateOptionThreeWayMergeForUnstructured(updateThreeWayMergeForUnstructured), + kube.ClientUpdateOptionForceReplace(i.ForceReplace)) } if err != nil { return rel, err diff --git a/pkg/action/rollback.go b/pkg/action/rollback.go index f529fa422..f60d4f4bc 100644 --- a/pkg/action/rollback.go +++ b/pkg/action/rollback.go @@ -190,7 +190,11 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas if err != nil { return targetRelease, fmt.Errorf("unable to set metadata visitor from target release: %w", err) } - results, err := r.cfg.KubeClient.Update(current, target, r.ForceReplace) + results, err := r.cfg.KubeClient.Update( + current, + target, + kube.ClientUpdateOptionServerSideApply(false), + kube.ClientUpdateOptionForceReplace(r.ForceReplace)) if err != nil { msg := fmt.Sprintf("Rollback %q failed: %s", targetRelease.Name, err) diff --git a/pkg/action/upgrade.go b/pkg/action/upgrade.go index 0567c8de2..a32d6e78e 100644 --- a/pkg/action/upgrade.go +++ b/pkg/action/upgrade.go @@ -426,7 +426,11 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele slog.Debug("upgrade hooks disabled", "name", upgradedRelease.Name) } - results, err := u.cfg.KubeClient.Update(current, target, u.ForceReplace) + results, err := u.cfg.KubeClient.Update( + current, + target, + kube.ClientUpdateOptionServerSideApply(false), + kube.ClientUpdateOptionForceReplace(u.ForceReplace)) if err != nil { u.cfg.recordRelease(originalRelease) u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 78ed4e088..aa7c86c9b 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -24,6 +24,7 @@ import ( "fmt" "io" "log/slog" + "net/http" "os" "path/filepath" "reflect" @@ -91,6 +92,14 @@ const ( HookOnlyStrategy WaitStrategy = "hookOnly" ) +type FieldValidationDirective string + +const ( + FieldValidationDirectiveIgnore FieldValidationDirective = "Ignore" + FieldValidationDirectiveWarn FieldValidationDirective = "Warn" + FieldValidationDirectiveStrict FieldValidationDirective = "Strict" +) + func init() { // Add CRDs to the scheme. They are missing by default. if err := apiextv1.AddToScheme(scheme.Scheme); err != nil { @@ -194,10 +203,101 @@ func (c *Client) IsReachable() error { return nil } +type clientCreateOptions struct { + serverSideApply bool + forceConflicts bool + dryRun bool + fieldValidationDirective FieldValidationDirective +} + +type ClientCreateOption func(*clientCreateOptions) error + +// ClientUpdateOptionServerSideApply enables performing object apply server-side +// see: https://kubernetes.io/docs/reference/using-api/server-side-apply/ +func ClientCreateOptionServerSideApply(serverSideApply bool) ClientCreateOption { + return func(o *clientCreateOptions) error { + o.serverSideApply = serverSideApply + + return nil + } +} + +// ClientCreateOptionForceConflicts forces field conflicts to be resolved +// see: https://kubernetes.io/docs/reference/using-api/server-side-apply/#conflicts +// Only valid when ClientUpdateOptionServerSideApply enabled +func ClientCreateOptionForceConflicts(forceConflicts bool) ClientCreateOption { + return func(o *clientCreateOptions) error { + o.forceConflicts = forceConflicts + + return nil + } +} + +// ClientCreateOptionDryRun performs non-mutating operations only +func ClientCreateOptionDryRun(dryRun bool) ClientCreateOption { + return func(o *clientCreateOptions) error { + o.dryRun = dryRun + + return nil + } +} + +// ClientCreateOptionFieldValidationDirective specifies show API operations validate object's schema +// - For client-side apply: this is ignored +// - For server-side apply: the directive is sent to the server to perform the validation +// +// Defaults to `FieldValidationDirectiveStrict` +func ClientCreateOptionFieldValidationDirective(fieldValidationDirective FieldValidationDirective) ClientCreateOption { + return func(o *clientCreateOptions) error { + o.fieldValidationDirective = fieldValidationDirective + + return nil + } +} + // Create creates Kubernetes resources specified in the resource list. -func (c *Client) Create(resources ResourceList) (*Result, error) { +func (c *Client) Create(resources ResourceList, options ...ClientCreateOption) (*Result, error) { slog.Debug("creating resource(s)", "resources", len(resources)) - if err := perform(resources, createResource); err != nil { + + createOptions := clientCreateOptions{ + serverSideApply: true, // Default to server-side apply + fieldValidationDirective: FieldValidationDirectiveStrict, + } + + for _, o := range options { + o(&createOptions) + } + + if createOptions.forceConflicts && !createOptions.serverSideApply { + return nil, fmt.Errorf("invalid operation: force conflicts can only be used with server-side apply") + } + + makeCreateApplyFunc := func() func(target *resource.Info) error { + if createOptions.serverSideApply { + slog.Debug("using server-side apply for resource creation", slog.Bool("forceConflicts", createOptions.forceConflicts), slog.Bool("dryRun", createOptions.dryRun), slog.String("fieldValidationDirective", string(createOptions.fieldValidationDirective))) + return func(target *resource.Info) error { + err := patchResourceServerSide(target, createOptions.dryRun, createOptions.forceConflicts, createOptions.fieldValidationDirective) + + logger := slog.With( + slog.String("namespace", target.Namespace), + slog.String("name", target.Name), + slog.String("gvk", target.Mapping.GroupVersionKind.String())) + if err != nil { + logger.Debug("Error patching resource", slog.Any("error", err)) + return err + } + + logger.Debug("Patched resource") + + return nil + } + } + + slog.Debug("using client-side apply for resource creation") + return createResource + } + + if err := perform(resources, makeCreateApplyFunc()); err != nil { return nil, err } return &Result{Created: resources}, nil @@ -348,96 +448,98 @@ func (c *Client) namespace() string { return v1.NamespaceDefault } -// newBuilder returns a new resource builder for structured api objects. -func (c *Client) newBuilder() *resource.Builder { - return c.Factory.NewBuilder(). - ContinueOnError(). - NamespaceParam(c.namespace()). - DefaultNamespace(). - Flatten() -} - -// Build validates for Kubernetes objects and returns unstructured infos. -func (c *Client) Build(reader io.Reader, validate bool) (ResourceList, error) { - validationDirective := metav1.FieldValidationIgnore +func determineFieldValidationDirective(validate bool) FieldValidationDirective { if validate { - validationDirective = metav1.FieldValidationStrict + return FieldValidationDirectiveStrict } - schema, err := c.Factory.Validator(validationDirective) + return FieldValidationDirectiveIgnore +} + +func buildResourceList(f Factory, namespace string, validationDirective FieldValidationDirective, reader io.Reader, transformRequest resource.RequestTransform) (ResourceList, error) { + + schema, err := f.Validator(string(validationDirective)) if err != nil { return nil, err } - result, err := c.newBuilder(). + + builder := f.NewBuilder(). + ContinueOnError(). + NamespaceParam(namespace). + DefaultNamespace(). + Flatten(). Unstructured(). Schema(schema). - Stream(reader, ""). - Do().Infos() + Stream(reader, "") + if transformRequest != nil { + builder.TransformRequests(transformRequest) + } + result, err := builder.Do().Infos() return result, scrubValidationError(err) } +// Build validates for Kubernetes objects and returns unstructured infos. +func (c *Client) Build(reader io.Reader, validate bool) (ResourceList, error) { + return buildResourceList( + c.Factory, + c.namespace(), + determineFieldValidationDirective(validate), + reader, + nil) +} + // BuildTable validates for Kubernetes objects and returns unstructured infos. // The returned kind is a Table. func (c *Client) BuildTable(reader io.Reader, validate bool) (ResourceList, error) { - validationDirective := metav1.FieldValidationIgnore - if validate { - validationDirective = metav1.FieldValidationStrict - } - - schema, err := c.Factory.Validator(validationDirective) - if err != nil { - return nil, err - } - result, err := c.newBuilder(). - Unstructured(). - Schema(schema). - Stream(reader, ""). - TransformRequests(transformRequests). - Do().Infos() - return result, scrubValidationError(err) + return buildResourceList( + c.Factory, + c.namespace(), + determineFieldValidationDirective(validate), + reader, + transformRequests) } -func (c *Client) update(original, target ResourceList, force, threeWayMerge bool) (*Result, error) { +func (c *Client) update(target, original ResourceList, updateApplyFunc func(target, original *resource.Info) error) (*Result, error) { updateErrors := []error{} res := &Result{} slog.Debug("checking resources for changes", "resources", len(target)) - err := target.Visit(func(info *resource.Info, err error) error { + err := target.Visit(func(target *resource.Info, err error) error { if err != nil { return err } - helper := resource.NewHelper(info.Client, info.Mapping).WithFieldManager(getManagedFieldsManager()) - if _, err := helper.Get(info.Namespace, info.Name); err != nil { + helper := resource.NewHelper(target.Client, target.Mapping).WithFieldManager(getManagedFieldsManager()) + if _, err := helper.Get(target.Namespace, target.Name); err != nil { if !apierrors.IsNotFound(err) { return fmt.Errorf("could not get information about the resource: %w", err) } // Append the created resource to the results, even if something fails - res.Created = append(res.Created, info) + res.Created = append(res.Created, target) // Since the resource does not exist, create it. - if err := createResource(info); err != nil { + if err := createResource(target); err != nil { return fmt.Errorf("failed to create resource: %w", err) } - kind := info.Mapping.GroupVersionKind.Kind - slog.Debug("created a new resource", "namespace", info.Namespace, "name", info.Name, "kind", kind) + kind := target.Mapping.GroupVersionKind.Kind + slog.Debug("created a new resource", "namespace", target.Namespace, "name", target.Name, "kind", kind) return nil } - originalInfo := original.Get(info) - if originalInfo == nil { - kind := info.Mapping.GroupVersionKind.Kind - return fmt.Errorf("no %s with the name %q found", kind, info.Name) + original := original.Get(target) + if original == nil { + kind := target.Mapping.GroupVersionKind.Kind + return fmt.Errorf("original object %s with the name %q not found", kind, target.Name) } - if err := updateResource(c, info, originalInfo.Object, force, threeWayMerge); err != nil { - slog.Debug("error updating the resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err)) + if err := updateApplyFunc(target, original); err != nil { updateErrors = append(updateErrors, err) } + // Because we check for errors later, append the info regardless - res.Updated = append(res.Updated, info) + res.Updated = append(res.Updated, target) return nil }) @@ -473,18 +575,81 @@ func (c *Client) update(original, target ResourceList, force, threeWayMerge bool return res, nil } -// Update takes the current list of objects and target list of objects and -// creates resources that don't already exist, updates resources that have been -// modified in the target configuration, and deletes resources from the current -// configuration that are not present in the target configuration. If an error -// occurs, a Result will still be returned with the error, containing all -// resource updates, creations, and deletions that were attempted. These can be -// used for cleanup or other logging purposes. +type clientUpdateOptions struct { + threeWayMergeForUnstructured bool + serverSideApply bool + forceReplace bool + forceConflicts bool + dryRun bool + fieldValidationDirective FieldValidationDirective +} + +type ClientUpdateOption func(*clientUpdateOptions) error + +// ClientUpdateOptionThreeWayMergeForUnstructured enables performing three-way merge for unstructured objects +// Must not be enabled when ClientUpdateOptionServerSideApply is enabled +func ClientUpdateOptionThreeWayMergeForUnstructured(threeWayMergeForUnstructured bool) ClientUpdateOption { + return func(o *clientUpdateOptions) error { + o.threeWayMergeForUnstructured = threeWayMergeForUnstructured + + return nil + } +} + +// ClientUpdateOptionServerSideApply enables performing object apply server-side (default) +// see: https://kubernetes.io/docs/reference/using-api/server-side-apply/ +// Must not be enabled when ClientUpdateOptionThreeWayMerge is enabled +func ClientUpdateOptionServerSideApply(serverSideApply bool) ClientUpdateOption { + return func(o *clientUpdateOptions) error { + o.serverSideApply = serverSideApply + + return nil + } +} + +// ClientUpdateOptionForceReplace forces objects to be replaced rather than updated via patch +// Must not be enabled when ClientUpdateOptionForceConflicts is enabled +func ClientUpdateOptionForceReplace(forceReplace bool) ClientUpdateOption { + return func(o *clientUpdateOptions) error { + o.forceReplace = forceReplace + + return nil + } +} + +// ClientUpdateOptionForceConflicts forces field conflicts to be resolved +// see: https://kubernetes.io/docs/reference/using-api/server-side-apply/#conflicts +// Must not be enabled when ClientUpdateOptionForceReplace is enabled +func ClientUpdateOptionForceConflicts(forceConflicts bool) ClientUpdateOption { + return func(o *clientUpdateOptions) error { + o.forceConflicts = forceConflicts + + return nil + } +} + +// ClientUpdateOptionForceConflicts forces field conflicts to be resolved +// see: https://kubernetes.io/docs/reference/using-api/server-side-apply/#conflicts +// Must not be enabled when ClientUpdateOptionForceReplace is enabled +func ClientUpdateOptionDryRun(dryRun bool) ClientUpdateOption { + return func(o *clientUpdateOptions) error { + o.dryRun = dryRun + + return nil + } +} + +// ClientUpdateOptionFieldValidationDirective specifies show API operations validate object's schema +// - For client-side apply: this is ignored +// - For server-side apply: the directive is sent to the server to perform the validation // -// The difference to Update is that UpdateThreeWayMerge does a three-way-merge -// for unstructured objects. -func (c *Client) UpdateThreeWayMerge(original, target ResourceList, force bool) (*Result, error) { - return c.update(original, target, force, true) +// Defaults to `FieldValidationDirectiveStrict` +func ClientUpdateOptionFieldValidationDirective(fieldValidationDirective FieldValidationDirective) ClientCreateOption { + return func(o *clientCreateOptions) error { + o.fieldValidationDirective = fieldValidationDirective + + return nil + } } // Update takes the current list of objects and target list of objects and @@ -494,8 +659,78 @@ func (c *Client) UpdateThreeWayMerge(original, target ResourceList, force bool) // occurs, a Result will still be returned with the error, containing all // resource updates, creations, and deletions that were attempted. These can be // used for cleanup or other logging purposes. -func (c *Client) Update(original, target ResourceList, force bool) (*Result, error) { - return c.update(original, target, force, false) +// +// The default is to use server-side apply, equivalent to: `ClientUpdateOptionServerSideApply(true)` +func (c *Client) Update(original, target ResourceList, options ...ClientUpdateOption) (*Result, error) { + updateOptions := clientUpdateOptions{ + serverSideApply: true, // Default to server-side apply + fieldValidationDirective: FieldValidationDirectiveStrict, + } + + for _, o := range options { + o(&updateOptions) + } + + if updateOptions.threeWayMergeForUnstructured && updateOptions.serverSideApply { + return nil, fmt.Errorf("invalid operation: cannot use three-way merge for unstructured and server-side apply together") + } + + if updateOptions.forceConflicts && updateOptions.forceReplace { + return nil, fmt.Errorf("invalid operation: cannot use force conflicts and force replace together") + } + + if updateOptions.serverSideApply && updateOptions.forceReplace { + return nil, fmt.Errorf("invalid operation: cannot use server-side apply and force replace together") + } + + makeUpdateApplyFunc := func() func(target, original *resource.Info) error { + if updateOptions.forceReplace { + slog.Debug( + "using resource replace update strategy", + slog.String("fieldValidationDirective", string(updateOptions.fieldValidationDirective))) + return func(target, original *resource.Info) error { + if err := replaceResource(target, updateOptions.fieldValidationDirective); err != nil { + slog.Debug("error replacing the resource", "namespace", target.Namespace, "name", target.Name, "kind", target.Mapping.GroupVersionKind.Kind, slog.Any("error", err)) + return err + } + + originalObject := original.Object + kind := target.Mapping.GroupVersionKind.Kind + slog.Debug("replace succeeded", "name", original.Name, "initialKind", originalObject.GetObjectKind().GroupVersionKind().Kind, "kind", kind) + + return nil + } + } else if updateOptions.serverSideApply { + slog.Debug( + "using server-side apply for resource update", + slog.Bool("forceConflicts", updateOptions.forceConflicts), + slog.Bool("dryRun", updateOptions.dryRun), + slog.String("fieldValidationDirective", string(updateOptions.fieldValidationDirective))) + return func(target, _ *resource.Info) error { + err := patchResourceServerSide(target, updateOptions.dryRun, updateOptions.forceConflicts, updateOptions.fieldValidationDirective) + + logger := slog.With( + slog.String("namespace", target.Namespace), + slog.String("name", target.Name), + slog.String("gvk", target.Mapping.GroupVersionKind.String())) + if err != nil { + logger.Debug("Error patching resource", slog.Any("error", err)) + return err + } + + logger.Debug("Patched resource") + + return nil + } + } + + slog.Debug("using client-side apply for resource update", slog.Bool("threeWayMergeForUnstructured", updateOptions.threeWayMergeForUnstructured)) + return func(target, original *resource.Info) error { + return patchResourceClientSide(target, original.Object, updateOptions.threeWayMergeForUnstructured) + } + } + + return c.update(target, original, makeUpdateApplyFunc()) } // Delete deletes Kubernetes resources specified in the resources list with @@ -503,7 +738,7 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err // if one or more fail and collect any errors. All successfully deleted items // will be returned in the `Deleted` ResourceList that is part of the result. func (c *Client) Delete(resources ResourceList) (*Result, []error) { - return rdelete(c, resources, metav1.DeletePropagationBackground) + return deleteResources(resources, metav1.DeletePropagationBackground) } // Delete deletes Kubernetes resources specified in the resources list with @@ -511,10 +746,10 @@ func (c *Client) Delete(resources ResourceList) (*Result, []error) { // if one or more fail and collect any errors. All successfully deleted items // will be returned in the `Deleted` ResourceList that is part of the result. func (c *Client) DeleteWithPropagationPolicy(resources ResourceList, policy metav1.DeletionPropagation) (*Result, []error) { - return rdelete(c, resources, policy) + return deleteResources(resources, policy) } -func rdelete(_ *Client, resources ResourceList, propagation metav1.DeletionPropagation) (*Result, []error) { +func deleteResources(resources ResourceList, propagation metav1.DeletionPropagation) (*Result, []error) { var errs []error res := &Result{} mtx := sync.Mutex{} @@ -548,6 +783,17 @@ func rdelete(_ *Client, resources ResourceList, propagation metav1.DeletionPropa return res, nil } +// https://github.com/kubernetes/kubectl/blob/197123726db24c61aa0f78d1f0ba6e91a2ec2f35/pkg/cmd/apply/apply.go#L439 +func isIncompatibleServerError(err error) bool { + // 415: Unsupported media type means we're talking to a server which doesn't + // support server-side apply. + if _, ok := err.(*apierrors.StatusError); !ok { + // Non-StatusError means the error isn't because the server is incompatible. + return false + } + return err.(*apierrors.StatusError).Status().Code == http.StatusUnsupportedMediaType +} + // getManagedFieldsManager returns the manager string. If one was set it will be returned. // Otherwise, one is calculated based on the name of the binary. func getManagedFieldsManager() string { @@ -568,18 +814,41 @@ func getManagedFieldsManager() string { return filepath.Base(os.Args[0]) } +func perform(infos ResourceList, fn func(*resource.Info) error) error { + var result error + + if len(infos) == 0 { + return ErrNoObjectsVisited + } + + errs := make(chan error) + go batchPerform(infos, fn, errs) + + for range infos { + err := <-errs + if err != nil { + result = errors.Join(result, err) + } + } + + return result +} + func batchPerform(infos ResourceList, fn func(*resource.Info) error, errs chan<- error) { var kind string var wg sync.WaitGroup + defer wg.Wait() + for _, info := range infos { currentKind := info.Object.GetObjectKind().GroupVersionKind().Kind if kind != currentKind { wg.Wait() kind = currentKind } + wg.Add(1) - go func(i *resource.Info) { - errs <- fn(i) + go func(info *resource.Info) { + errs <- fn(info) wg.Done() }(info) } @@ -597,6 +866,7 @@ func createResource(info *resource.Info) error { if err != nil { return err } + return info.Refresh(obj, true) }) } @@ -674,48 +944,95 @@ func createPatch(target *resource.Info, current runtime.Object, threeWayMergeFor return patch, types.StrategicMergePatchType, err } -func updateResource(_ *Client, target *resource.Info, currentObj runtime.Object, force, threeWayMergeForUnstructured bool) error { - var ( - obj runtime.Object - helper = resource.NewHelper(target.Client, target.Mapping).WithFieldManager(getManagedFieldsManager()) - kind = target.Mapping.GroupVersionKind.Kind - ) +func replaceResource(target *resource.Info, fieldValidationDirective FieldValidationDirective) error { - // if --force is applied, attempt to replace the existing resource with the new object. - if force { - var err error - obj, err = helper.Replace(target.Namespace, target.Name, true, target.Object) - if err != nil { - return fmt.Errorf("failed to replace object: %w", err) - } - slog.Debug("replace succeeded", "name", target.Name, "initialKind", currentObj.GetObjectKind().GroupVersionKind().Kind, "kind", kind) - } else { - patch, patchType, err := createPatch(target, currentObj, threeWayMergeForUnstructured) - if err != nil { - return fmt.Errorf("failed to create patch: %w", err) - } + helper := resource.NewHelper(target.Client, target.Mapping). + WithFieldValidation(string(fieldValidationDirective)). + WithFieldManager(getManagedFieldsManager()) - if patch == nil || string(patch) == "{}" { - slog.Debug("no changes detected", "kind", kind, "name", target.Name) - // This needs to happen to make sure that Helm has the latest info from the API - // Otherwise there will be no labels and other functions that use labels will panic - if err := target.Get(); err != nil { - return fmt.Errorf("failed to refresh resource information: %w", err) - } - return nil - } - // send patch to server - slog.Debug("patching resource", "kind", kind, "name", target.Name, "namespace", target.Namespace) - obj, err = helper.Patch(target.Namespace, target.Name, patchType, patch, nil) - if err != nil { - return fmt.Errorf("cannot patch %q with kind %s: %w", target.Name, kind, err) + obj, err := helper.Replace(target.Namespace, target.Name, true, target.Object) + if err != nil { + return fmt.Errorf("failed to replace object: %w", err) + } + + if err := target.Refresh(obj, true); err != nil { + return fmt.Errorf("failed to refresh object after replace: %w", err) + } + + return nil + +} + +func patchResourceClientSide(target *resource.Info, original runtime.Object, threeWayMergeForUnstructured bool) error { + + patch, patchType, err := createPatch(target, original, threeWayMergeForUnstructured) + if err != nil { + return fmt.Errorf("failed to create patch: %w", err) + } + + kind := target.Mapping.GroupVersionKind.Kind + if patch == nil || string(patch) == "{}" { + slog.Debug("no changes detected", "kind", kind, "name", target.Name) + // This needs to happen to make sure that Helm has the latest info from the API + // Otherwise there will be no labels and other functions that use labels will panic + if err := target.Get(); err != nil { + return fmt.Errorf("failed to refresh resource information: %w", err) } + return nil + } + + // send patch to server + slog.Debug("patching resource", "kind", kind, "name", target.Name, "namespace", target.Namespace) + helper := resource.NewHelper(target.Client, target.Mapping).WithFieldManager(getManagedFieldsManager()) + obj, err := helper.Patch(target.Namespace, target.Name, patchType, patch, nil) + if err != nil { + return fmt.Errorf("cannot patch %q with kind %s: %w", target.Name, kind, err) } target.Refresh(obj, true) + return nil } +// Patch reource using server-side apply +func patchResourceServerSide(info *resource.Info, dryRun bool, forceConflicts bool, fieldValidationDirective FieldValidationDirective) error { + helper := resource.NewHelper( + info.Client, + info.Mapping). + DryRun(dryRun). + WithFieldManager(ManagedFieldsManager). + WithFieldValidation(string(fieldValidationDirective)) + + // Send the full object to be applied on the server side. + data, err := runtime.Encode(unstructured.UnstructuredJSONScheme, info.Object) + if err != nil { + return fmt.Errorf("failed to encode object %s/%s with kind %s: %w", info.Namespace, info.Name, info.Mapping.GroupVersionKind.Kind, err) + } + options := metav1.PatchOptions{ + Force: &forceConflicts, + } + obj, err := helper.Patch( + info.Namespace, + info.Name, + types.ApplyPatchType, + data, + &options, + ) + if err != nil { + if isIncompatibleServerError(err) { + return fmt.Errorf("server-side apply not available on the server: %v", err) + } + + if apierrors.IsConflict(err) { + return fmt.Errorf("conflict occurred while applying %s/%s with kind %s: %w", info.Namespace, info.Name, info.Mapping.GroupVersionKind.Kind, err) + } + + return err + } + + return info.Refresh(obj, true) +} + // GetPodList uses the kubernetes interface to get the list of pods filtered by listOptions func (c *Client) GetPodList(namespace string, listOptions metav1.ListOptions) (*v1.PodList, error) { podList, err := c.kubeClient.CoreV1().Pods(namespace).List(context.Background(), listOptions) diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index 5ffa0972b..8de856a5a 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -19,15 +19,19 @@ package kube import ( "bytes" "errors" + "fmt" "io" "net/http" "strings" + "sync" "testing" "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -117,210 +121,209 @@ func newTestClient(t *testing.T) *Client { t.Cleanup(testFactory.Cleanup) return &Client{ - Factory: testFactory.WithNamespace("default"), + Factory: testFactory.WithNamespace(v1.NamespaceDefault), } } -func TestCreate(t *testing.T) { - // Note: c.Create with the fake client can currently only test creation of a single pod in the same list. When testing - // with more than one pod, c.Create will run into a data race as it calls perform->batchPerform which performs creation - // in batches. The first data race is on accessing var actions and can be fixed easily with a mutex lock in the Client - // function. The second data race though is something in the fake client itself in func (c *RESTClient) do(...) - // when it stores the req: c.Req = req and cannot (?) be fixed easily. - listA := newPodList("starfish") - listB := newPodList("dolphin") +type RequestResponseAction struct { + Request http.Request + Response http.Response + Error error +} - var actions []string - var iterationCounter int +type RoundTripperTestFunc func(previous []RequestResponseAction, req *http.Request) (*http.Response, error) - c := newTestClient(t) - c.Factory.(*cmdtesting.TestFactory).UnstructuredClient = &fake.RESTClient{ - NegotiatedSerializer: unstructuredSerializer, - Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { - path, method := req.URL.Path, req.Method - bodyReader := new(strings.Builder) - _, _ = io.Copy(bodyReader, req.Body) - body := bodyReader.String() - actions = append(actions, path+":"+method) - t.Logf("got request %s %s", path, method) - switch { - case path == "/namespaces/default/pods" && method == http.MethodPost: - if strings.Contains(body, "starfish") { - if iterationCounter < 2 { - iterationCounter++ - return newResponseJSON(http.StatusConflict, resourceQuotaConflict) - } - return newResponse(http.StatusOK, &listA.Items[0]) - } - return newResponseJSON(http.StatusConflict, resourceQuotaConflict) - default: - t.Fatalf("unexpected request: %s %s", method, path) - return nil, nil - } - }), +func NewRequestResponseLogClient(t *testing.T, cb RoundTripperTestFunc) RequestResponseLogClient { + t.Helper() + return RequestResponseLogClient{ + t: t, + cb: cb, } +} - t.Run("Create success", func(t *testing.T) { - list, err := c.Build(objBody(&listA), false) - if err != nil { - t.Fatal(err) - } +// RequestResponseLogClient is a test client that logs requests and responses +// Satifying http.RoundTripper interface, it can be used to mock HTTP requests in tests. +// Forwarding requests to a callback function (cb) that can be used to simulate server responses. +type RequestResponseLogClient struct { + t *testing.T + cb RoundTripperTestFunc + actionsLock sync.Mutex + Actions []RequestResponseAction +} - result, err := c.Create(list) - if err != nil { - t.Fatal(err) - } +func (r *RequestResponseLogClient) Do(req *http.Request) (*http.Response, error) { + t := r.t + t.Helper() - if len(result.Created) != 1 { - t.Errorf("expected 1 resource created, got %d", len(result.Created)) + readBodyBytes := func(body io.ReadCloser) []byte { + if body == nil { + return []byte{} } - expectedActions := []string{ - "/namespaces/default/pods:POST", - "/namespaces/default/pods:POST", - "/namespaces/default/pods:POST", - } - if len(expectedActions) != len(actions) { - t.Fatalf("unexpected number of requests, expected %d, got %d", len(expectedActions), len(actions)) - } - for k, v := range expectedActions { - if actions[k] != v { - t.Errorf("expected %s request got %s", v, actions[k]) - } - } - }) + defer body.Close() + bodyBytes, err := io.ReadAll(body) + require.NoError(t, err) - t.Run("Create failure", func(t *testing.T) { - list, err := c.Build(objBody(&listB), false) - if err != nil { - t.Fatal(err) - } + return bodyBytes + } - _, err = c.Create(list) - if err == nil { - t.Errorf("expected error") - } + reqBytes := readBodyBytes(req.Body) - expectedString := "Operation cannot be fulfilled on resourcequotas \"quota\": the object has been modified; " + - "please apply your changes to the latest version and try again" - if !strings.Contains(err.Error(), expectedString) { - t.Errorf("Unexpected error message: %q", err) - } + t.Logf("Request: %s %s %s", req.Method, req.URL.String(), reqBytes) + if req.Body != nil { + req.Body = io.NopCloser(bytes.NewReader(reqBytes)) + } - expectedActions := []string{ - "/namespaces/default/pods:POST", - } - for k, v := range actions { - if expectedActions[0] != v { - t.Errorf("expected %s request got %s", v, actions[k]) - } - } + resp, err := r.cb(r.Actions, req) + + respBytes := readBodyBytes(resp.Body) + t.Logf("Response: %d %s", resp.StatusCode, string(respBytes)) + if resp.Body != nil { + resp.Body = io.NopCloser(bytes.NewReader(respBytes)) + } + + r.actionsLock.Lock() + defer r.actionsLock.Unlock() + r.Actions = append(r.Actions, RequestResponseAction{ + Request: *req, + Response: *resp, + Error: err, }) + + return resp, err } -func testUpdate(t *testing.T, threeWayMerge bool) { - t.Helper() - listA := newPodList("starfish", "otter", "squid") - listB := newPodList("starfish", "otter", "dolphin") - listC := newPodList("starfish", "otter", "dolphin") - listB.Items[0].Spec.Containers[0].Ports = []v1.ContainerPort{{Name: "https", ContainerPort: 443}} - listC.Items[0].Spec.Containers[0].Ports = []v1.ContainerPort{{Name: "https", ContainerPort: 443}} +func TestCreate(t *testing.T) { + // Note: c.Create with the fake client can currently only test creation of a single pod/object in the same list. When testing + // with more than one pod, c.Create will run into a data race as it calls perform->batchPerform which performs creation + // in batches. The race is something in the fake client itself in `func (c *RESTClient) do(...)` + // when it stores the req: c.Req = req and cannot (?) be fixed easily. - var actions []string - var iterationCounter int + type testCase struct { + Name string + Pods v1.PodList + Callback func(t *testing.T, tc testCase, previous []RequestResponseAction, req *http.Request) (*http.Response, error) + ServerSideApply bool + ExpectedActions []string + ExpectedErrorContains string + } - c := newTestClient(t) - c.Factory.(*cmdtesting.TestFactory).UnstructuredClient = &fake.RESTClient{ - NegotiatedSerializer: unstructuredSerializer, - Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { - p, m := req.URL.Path, req.Method - actions = append(actions, p+":"+m) - t.Logf("got request %s %s", p, m) - switch { - case p == "/namespaces/default/pods/starfish" && m == http.MethodGet: - return newResponse(http.StatusOK, &listA.Items[0]) - case p == "/namespaces/default/pods/otter" && m == http.MethodGet: - return newResponse(http.StatusOK, &listA.Items[1]) - case p == "/namespaces/default/pods/otter" && m == http.MethodPatch: - data, err := io.ReadAll(req.Body) - if err != nil { - t.Fatalf("could not dump request: %s", err) - } - req.Body.Close() - expected := `{}` - if string(data) != expected { - t.Errorf("expected patch\n%s\ngot\n%s", expected, string(data)) - } - return newResponse(http.StatusOK, &listB.Items[0]) - case p == "/namespaces/default/pods/dolphin" && m == http.MethodGet: - return newResponse(http.StatusNotFound, notFoundBody()) - case p == "/namespaces/default/pods/starfish" && m == http.MethodPatch: - data, err := io.ReadAll(req.Body) - if err != nil { - t.Fatalf("could not dump request: %s", err) - } - req.Body.Close() - expected := `{"spec":{"$setElementOrder/containers":[{"name":"app:v4"}],"containers":[{"$setElementOrder/ports":[{"containerPort":443}],"name":"app:v4","ports":[{"containerPort":443,"name":"https"},{"$patch":"delete","containerPort":80}]}]}}` - if string(data) != expected { - t.Errorf("expected patch\n%s\ngot\n%s", expected, string(data)) - } - return newResponse(http.StatusOK, &listB.Items[0]) - case p == "/namespaces/default/pods" && m == http.MethodPost: - if iterationCounter < 2 { - iterationCounter++ + testCases := map[string]testCase{ + "Create success (client-side apply)": { + Pods: newPodList("starfish"), + ServerSideApply: false, + Callback: func(t *testing.T, tc testCase, previous []RequestResponseAction, _ *http.Request) (*http.Response, error) { + t.Helper() + + if len(previous) < 2 { // simulate a conflict return newResponseJSON(http.StatusConflict, resourceQuotaConflict) } - return newResponse(http.StatusOK, &listB.Items[1]) - case p == "/namespaces/default/pods/squid" && m == http.MethodDelete: - return newResponse(http.StatusOK, &listB.Items[1]) - case p == "/namespaces/default/pods/squid" && m == http.MethodGet: - return newResponse(http.StatusOK, &listB.Items[2]) - default: - t.Fatalf("unexpected request: %s %s", req.Method, req.URL.Path) - return nil, nil - } - }), - } - first, err := c.Build(objBody(&listA), false) - if err != nil { - t.Fatal(err) - } - second, err := c.Build(objBody(&listB), false) - if err != nil { - t.Fatal(err) - } - var result *Result - if threeWayMerge { - result, err = c.UpdateThreeWayMerge(first, second, false) - } else { - result, err = c.Update(first, second, false) - } - if err != nil { - t.Fatal(err) - } + return newResponse(http.StatusOK, &tc.Pods.Items[0]) + }, + ExpectedActions: []string{ + "/namespaces/default/pods:POST", + "/namespaces/default/pods:POST", + "/namespaces/default/pods:POST", + }, + }, + "Create success (server-side apply)": { + Pods: newPodList("whale"), + ServerSideApply: true, + Callback: func(t *testing.T, tc testCase, _ []RequestResponseAction, _ *http.Request) (*http.Response, error) { + t.Helper() - if len(result.Created) != 1 { - t.Errorf("expected 1 resource created, got %d", len(result.Created)) + return newResponse(http.StatusOK, &tc.Pods.Items[0]) + }, + ExpectedActions: []string{ + "/namespaces/default/pods/whale:PATCH", + }, + }, + "Create fail: incompatible server (server-side apply)": { + Pods: newPodList("lobster"), + ServerSideApply: true, + Callback: func(t *testing.T, _ testCase, _ []RequestResponseAction, req *http.Request) (*http.Response, error) { + t.Helper() + + return &http.Response{ + StatusCode: http.StatusUnsupportedMediaType, + Request: req, + }, nil + }, + ExpectedErrorContains: "server-side apply not available on the server:", + ExpectedActions: []string{ + "/namespaces/default/pods/lobster:PATCH", + }, + }, + "Create fail: quota (server-side apply)": { + Pods: newPodList("dolphin"), + ServerSideApply: true, + Callback: func(t *testing.T, _ testCase, _ []RequestResponseAction, _ *http.Request) (*http.Response, error) { + t.Helper() + + return newResponseJSON(http.StatusConflict, resourceQuotaConflict) + }, + ExpectedErrorContains: "Operation cannot be fulfilled on resourcequotas \"quota\": the object has been modified; " + + "please apply your changes to the latest version and try again", + ExpectedActions: []string{ + "/namespaces/default/pods/dolphin:PATCH", + }, + }, } - if len(result.Updated) != 2 { - t.Errorf("expected 2 resource updated, got %d", len(result.Updated)) + + c := newTestClient(t) + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + + client := NewRequestResponseLogClient(t, func(previous []RequestResponseAction, req *http.Request) (*http.Response, error) { + return tc.Callback(t, tc, previous, req) + }) + + c.Factory.(*cmdtesting.TestFactory).UnstructuredClient = &fake.RESTClient{ + NegotiatedSerializer: unstructuredSerializer, + Client: fake.CreateHTTPClient(client.Do), + } + + list, err := c.Build(objBody(&tc.Pods), false) + require.NoError(t, err) + if err != nil { + t.Fatal(err) + } + + result, err := c.Create( + list, + ClientCreateOptionServerSideApply(tc.ServerSideApply)) + if tc.ExpectedErrorContains != "" { + require.ErrorContains(t, err, tc.ExpectedErrorContains) + } else { + require.NoError(t, err) + + // See note above about limitations in supporting more than a single object + assert.Len(t, result.Created, 1, "expected 1 object created, got %d", len(result.Created)) + } + + actions := []string{} + for _, action := range client.Actions { + path, method := action.Request.URL.Path, action.Request.Method + actions = append(actions, path+":"+method) + } + + assert.Equal(t, tc.ExpectedActions, actions) + + }) } - if len(result.Deleted) != 1 { - t.Errorf("expected 1 resource deleted, got %d", len(result.Deleted)) +} + +func TestUpdate(t *testing.T) { + type testCase struct { + OriginalPods v1.PodList + TargetPods v1.PodList + ThreeWayMergeForUnstructured bool + ServerSideApply bool + ExpectedActions []string } - // TODO: Find a way to test methods that use Client Set - // Test with a wait - // if err := c.Update("test", objBody(codec, &listB), objBody(codec, &listC), false, 300, true); err != nil { - // t.Fatal(err) - // } - // Test with a wait should fail - // TODO: A way to make this not based off of an extremely short timeout? - // if err := c.Update("test", objBody(codec, &listC), objBody(codec, &listA), false, 2, true); err != nil { - // t.Fatal(err) - // } - expectedActions := []string{ + expectedActionsClientSideApply := []string{ "/namespaces/default/pods/starfish:GET", "/namespaces/default/pods/starfish:GET", "/namespaces/default/pods/starfish:PATCH", @@ -334,22 +337,152 @@ func testUpdate(t *testing.T, threeWayMerge bool) { "/namespaces/default/pods/squid:GET", "/namespaces/default/pods/squid:DELETE", } - if len(expectedActions) != len(actions) { - t.Fatalf("unexpected number of requests, expected %d, got %d", len(expectedActions), len(actions)) + + expectedActionsServerSideApply := []string{ + "/namespaces/default/pods/starfish:GET", + "/namespaces/default/pods/starfish:PATCH", + "/namespaces/default/pods/otter:GET", + "/namespaces/default/pods/otter:PATCH", + "/namespaces/default/pods/dolphin:GET", + "/namespaces/default/pods:POST", // create dolphin + "/namespaces/default/pods:POST", // retry due to 409 + "/namespaces/default/pods:POST", // retry due to 409 + "/namespaces/default/pods/squid:GET", + "/namespaces/default/pods/squid:DELETE", } - for k, v := range expectedActions { - if actions[k] != v { - t.Errorf("expected %s request got %s", v, actions[k]) - } + + testCases := map[string]testCase{ + "client-side apply": { + OriginalPods: newPodList("starfish", "otter", "squid"), + TargetPods: func() v1.PodList { + listTarget := newPodList("starfish", "otter", "dolphin") + listTarget.Items[0].Spec.Containers[0].Ports = []v1.ContainerPort{{Name: "https", ContainerPort: 443}} + + return listTarget + }(), + ThreeWayMergeForUnstructured: false, + ServerSideApply: false, + ExpectedActions: expectedActionsClientSideApply, + }, + "client-side apply (three-way merge for unstructured)": { + OriginalPods: newPodList("starfish", "otter", "squid"), + TargetPods: func() v1.PodList { + listTarget := newPodList("starfish", "otter", "dolphin") + listTarget.Items[0].Spec.Containers[0].Ports = []v1.ContainerPort{{Name: "https", ContainerPort: 443}} + + return listTarget + }(), + ThreeWayMergeForUnstructured: true, + ServerSideApply: false, + ExpectedActions: expectedActionsClientSideApply, + }, + "serverSideApply": { + OriginalPods: newPodList("starfish", "otter", "squid"), + TargetPods: func() v1.PodList { + listTarget := newPodList("starfish", "otter", "dolphin") + listTarget.Items[0].Spec.Containers[0].Ports = []v1.ContainerPort{{Name: "https", ContainerPort: 443}} + + return listTarget + }(), + ThreeWayMergeForUnstructured: false, + ServerSideApply: true, + ExpectedActions: expectedActionsServerSideApply, + }, } -} -func TestUpdate(t *testing.T) { - testUpdate(t, false) -} + c := newTestClient(t) + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + + listOriginal := tc.OriginalPods + listTarget := tc.TargetPods + + iterationCounter := 0 + cb := func(_ []RequestResponseAction, req *http.Request) (*http.Response, error) { + p, m := req.URL.Path, req.Method + + switch { + case p == "/namespaces/default/pods/starfish" && m == http.MethodGet: + return newResponse(http.StatusOK, &listOriginal.Items[0]) + case p == "/namespaces/default/pods/otter" && m == http.MethodGet: + return newResponse(http.StatusOK, &listOriginal.Items[1]) + case p == "/namespaces/default/pods/otter" && m == http.MethodPatch: + if !tc.ServerSideApply { + defer req.Body.Close() + data, err := io.ReadAll(req.Body) + require.NoError(t, err) + + assert.Equal(t, `{}`, string(data)) + } + + return newResponse(http.StatusOK, &listTarget.Items[0]) + case p == "/namespaces/default/pods/dolphin" && m == http.MethodGet: + return newResponse(http.StatusNotFound, notFoundBody()) + case p == "/namespaces/default/pods/starfish" && m == http.MethodPatch: + if !tc.ServerSideApply { + // Ensure client-side apply specifies correct patch + defer req.Body.Close() + data, err := io.ReadAll(req.Body) + require.NoError(t, err) + + expected := `{"spec":{"$setElementOrder/containers":[{"name":"app:v4"}],"containers":[{"$setElementOrder/ports":[{"containerPort":443}],"name":"app:v4","ports":[{"containerPort":443,"name":"https"},{"$patch":"delete","containerPort":80}]}]}}` + assert.Equal(t, expected, string(data)) + } + + return newResponse(http.StatusOK, &listTarget.Items[0]) + case p == "/namespaces/default/pods" && m == http.MethodPost: + if iterationCounter < 2 { + iterationCounter++ + return newResponseJSON(http.StatusConflict, resourceQuotaConflict) + } + + return newResponse(http.StatusOK, &listTarget.Items[1]) + case p == "/namespaces/default/pods/squid" && m == http.MethodDelete: + return newResponse(http.StatusOK, &listTarget.Items[1]) + case p == "/namespaces/default/pods/squid" && m == http.MethodGet: + return newResponse(http.StatusOK, &listTarget.Items[2]) + default: + } + + t.Fail() + return nil, nil + } + + client := NewRequestResponseLogClient(t, cb) + + c.Factory.(*cmdtesting.TestFactory).UnstructuredClient = &fake.RESTClient{ + NegotiatedSerializer: unstructuredSerializer, + Client: fake.CreateHTTPClient(client.Do), + } + + first, err := c.Build(objBody(&listOriginal), false) + require.NoError(t, err) + + second, err := c.Build(objBody(&listTarget), false) + require.NoError(t, err) + + result, err := c.Update( + first, + second, + ClientUpdateOptionThreeWayMergeForUnstructured(tc.ThreeWayMergeForUnstructured), + ClientUpdateOptionForceReplace(false), + ClientUpdateOptionServerSideApply(tc.ServerSideApply)) + require.NoError(t, err) -func TestUpdateThreeWayMerge(t *testing.T) { - testUpdate(t, true) + assert.Len(t, result.Created, 1, "expected 1 resource created, got %d", len(result.Created)) + assert.Len(t, result.Updated, 2, "expected 2 resource updated, got %d", len(result.Updated)) + assert.Len(t, result.Deleted, 1, "expected 1 resource deleted, got %d", len(result.Deleted)) + + actions := []string{} + for _, action := range client.Actions { + path, method := action.Request.URL.Path, action.Request.Method + actions = append(actions, path+":"+method) + } + + assert.Equal(t, tc.ExpectedActions, actions) + }) + } } func TestBuild(t *testing.T) { @@ -548,7 +681,11 @@ func TestWait(t *testing.T) { if err != nil { t.Fatal(err) } - result, err := c.Create(resources) + + result, err := c.Create( + resources, + ClientCreateOptionServerSideApply(false)) + if err != nil { t.Fatal(err) } @@ -605,7 +742,10 @@ func TestWaitJob(t *testing.T) { if err != nil { t.Fatal(err) } - result, err := c.Create(resources) + result, err := c.Create( + resources, + ClientCreateOptionServerSideApply(false)) + if err != nil { t.Fatal(err) } @@ -664,7 +804,9 @@ func TestWaitDelete(t *testing.T) { if err != nil { t.Fatal(err) } - result, err := c.Create(resources) + result, err := c.Create( + resources, + ClientCreateOptionServerSideApply(false)) if err != nil { t.Fatal(err) } @@ -1083,6 +1225,7 @@ func TestCreatePatchCustomResourceSpec(t *testing.T) { t.Run(testCase.name, testCase.run) } +<<<<<<< HEAD type errorFactory struct { *cmdtesting.TestFactory err error @@ -1183,4 +1326,428 @@ func TestIsReachable(t *testing.T) { } }) } +||||||| parent of 36a476ff4 (Kube client support server-side apply) +======= +func TestIsIncompatibleServerError(t *testing.T) { + testCases := map[string]struct { + Err error + Want bool + }{ + "Unsupported media type": { + Err: &apierrors.StatusError{ErrStatus: metav1.Status{Code: http.StatusUnsupportedMediaType}}, + Want: true, + }, + "Not found error": { + Err: &apierrors.StatusError{ErrStatus: metav1.Status{Code: http.StatusNotFound}}, + Want: false, + }, + "Generic error": { + Err: fmt.Errorf("some generic error"), + Want: false, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + if got := isIncompatibleServerError(tc.Err); got != tc.Want { + t.Errorf("isIncompatibleServerError() = %v, want %v", got, tc.Want) + } + }) + } +} + +func TestReplaceResource(t *testing.T) { + type testCase struct { + Pods v1.PodList + Callback func(t *testing.T, tc testCase, previous []RequestResponseAction, req *http.Request) (*http.Response, error) + ExpectedErrorContains string + } + + testCases := map[string]testCase{ + "normal": { + Pods: newPodList("whale"), + Callback: func(t *testing.T, tc testCase, previous []RequestResponseAction, req *http.Request) (*http.Response, error) { + t.Helper() + + assert.Equal(t, "/namespaces/default/pods/whale", req.URL.Path) + switch len(previous) { + case 0: + assert.Equal(t, "GET", req.Method) + case 1: + assert.Equal(t, "PUT", req.Method) + } + + return newResponse(http.StatusOK, &tc.Pods.Items[0]) + }, + }, + "conflict": { + Pods: newPodList("whale"), + Callback: func(t *testing.T, _ testCase, _ []RequestResponseAction, req *http.Request) (*http.Response, error) { + t.Helper() + + return &http.Response{ + StatusCode: http.StatusConflict, + Request: req, + }, nil + }, + ExpectedErrorContains: "failed to replace object: the server reported a conflict", + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + + testFactory := cmdtesting.NewTestFactory() + t.Cleanup(testFactory.Cleanup) + + client := NewRequestResponseLogClient(t, func(previous []RequestResponseAction, req *http.Request) (*http.Response, error) { + t.Helper() + + return tc.Callback(t, tc, previous, req) + }) + + testFactory.UnstructuredClient = &fake.RESTClient{ + NegotiatedSerializer: unstructuredSerializer, + Client: fake.CreateHTTPClient(client.Do), + } + + resourceList, err := buildResourceList(testFactory, v1.NamespaceDefault, FieldValidationDirectiveStrict, objBody(&tc.Pods), nil) + require.NoError(t, err) + + require.Len(t, resourceList, 1) + info := resourceList[0] + + err = replaceResource(info, FieldValidationDirectiveStrict) + if tc.ExpectedErrorContains != "" { + require.ErrorContains(t, err, tc.ExpectedErrorContains) + } else { + require.NoError(t, err) + require.NotNil(t, info.Object) + } + }) + } +} + +func TestUpdateResourceThreeWayMerge(t *testing.T) { + type testCase struct { + OriginalPods v1.PodList + TargetPods v1.PodList + ThreeWayMergeForUnstructured bool + Callback func(t *testing.T, tc testCase, previous []RequestResponseAction, req *http.Request) (*http.Response, error) + ExpectedErrorContains string + } + + testCases := map[string]testCase{ + "normal": { + OriginalPods: newPodList("whale"), + TargetPods: func() v1.PodList { + pods := newPodList("whale") + pods.Items[0].Spec.Containers[0].Ports = []v1.ContainerPort{{Name: "https", ContainerPort: 443}} + + return pods + }(), + ThreeWayMergeForUnstructured: false, + Callback: func(t *testing.T, tc testCase, previous []RequestResponseAction, req *http.Request) (*http.Response, error) { + t.Helper() + + assert.Equal(t, "/namespaces/default/pods/whale", req.URL.Path) + switch len(previous) { + case 0: + assert.Equal(t, "GET", req.Method) + return newResponse(http.StatusOK, &tc.OriginalPods.Items[0]) + case 1: + assert.Equal(t, "PATCH", req.Method) + assert.Equal(t, "application/strategic-merge-patch+json", req.Header.Get("Content-Type")) + return newResponse(http.StatusOK, &tc.TargetPods.Items[0]) + } + + t.Fail() + return nil, nil + }, + }, + "three way merge for unstructured": { + OriginalPods: newPodList("whale"), + TargetPods: func() v1.PodList { + pods := newPodList("whale") + pods.Items[0].Spec.Containers[0].Ports = []v1.ContainerPort{{Name: "https", ContainerPort: 443}} + + return pods + }(), + ThreeWayMergeForUnstructured: true, + Callback: func(t *testing.T, tc testCase, previous []RequestResponseAction, req *http.Request) (*http.Response, error) { + t.Helper() + + assert.Equal(t, "/namespaces/default/pods/whale", req.URL.Path) + switch len(previous) { + case 0: + assert.Equal(t, "GET", req.Method) + return newResponse(http.StatusOK, &tc.OriginalPods.Items[0]) + case 1: + t.Logf("patcher: %+v", req.Header) + assert.Equal(t, "PATCH", req.Method) + assert.Equal(t, "application/strategic-merge-patch+json", req.Header.Get("Content-Type")) + return newResponse(http.StatusOK, &tc.TargetPods.Items[0]) + } + + t.Fail() + return nil, nil + }, + }, + "conflict": { + OriginalPods: newPodList("whale"), + TargetPods: func() v1.PodList { + pods := newPodList("whale") + pods.Items[0].Spec.Containers[0].Ports = []v1.ContainerPort{{Name: "https", ContainerPort: 443}} + + return pods + }(), + Callback: func(t *testing.T, tc testCase, previous []RequestResponseAction, req *http.Request) (*http.Response, error) { + t.Helper() + + assert.Equal(t, "/namespaces/default/pods/whale", req.URL.Path) + switch len(previous) { + case 0: + assert.Equal(t, "GET", req.Method) + return newResponse(http.StatusOK, &tc.OriginalPods.Items[0]) + case 1: + assert.Equal(t, "PATCH", req.Method) + return &http.Response{ + StatusCode: http.StatusConflict, + Request: req, + }, nil + } + + t.Fail() + return nil, nil + + }, + ExpectedErrorContains: "cannot patch \"whale\" with kind Pod: the server reported a conflict", + }, + "no patch": { + OriginalPods: newPodList("whale"), + TargetPods: newPodList("whale"), + Callback: func(t *testing.T, tc testCase, previous []RequestResponseAction, req *http.Request) (*http.Response, error) { + t.Helper() + + assert.Equal(t, "/namespaces/default/pods/whale", req.URL.Path) + switch len(previous) { + case 0: + assert.Equal(t, "GET", req.Method) + return newResponse(http.StatusOK, &tc.OriginalPods.Items[0]) + case 1: + assert.Equal(t, "GET", req.Method) + return newResponse(http.StatusOK, &tc.TargetPods.Items[0]) + } + + t.Fail() + return nil, nil // newResponse(http.StatusOK, &tc.TargetPods.Items[0]) + + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + + testFactory := cmdtesting.NewTestFactory() + t.Cleanup(testFactory.Cleanup) + + client := NewRequestResponseLogClient(t, func(previous []RequestResponseAction, req *http.Request) (*http.Response, error) { + return tc.Callback(t, tc, previous, req) + }) + + testFactory.UnstructuredClient = &fake.RESTClient{ + NegotiatedSerializer: unstructuredSerializer, + Client: fake.CreateHTTPClient(client.Do), + } + + resourceListCurrent, err := buildResourceList(testFactory, v1.NamespaceDefault, FieldValidationDirectiveStrict, objBody(&tc.OriginalPods), nil) + require.NoError(t, err) + require.Len(t, resourceListCurrent, 1) + + resourceListTarget, err := buildResourceList(testFactory, v1.NamespaceDefault, FieldValidationDirectiveStrict, objBody(&tc.TargetPods), nil) + require.NoError(t, err) + require.Len(t, resourceListTarget, 1) + + current := resourceListCurrent[0] + target := resourceListTarget[0] + + err = patchResourceClientSide(target, current.Object, tc.ThreeWayMergeForUnstructured) + if tc.ExpectedErrorContains != "" { + require.ErrorContains(t, err, tc.ExpectedErrorContains) + } else { + require.NoError(t, err) + require.NotNil(t, target.Object) + } + }) + } +} + +func TestPatchResourceServerSide(t *testing.T) { + type testCase struct { + Pods v1.PodList + DryRun bool + ForceConflicts bool + FieldValidationDirective FieldValidationDirective + Callback func(t *testing.T, tc testCase, previous []RequestResponseAction, req *http.Request) (*http.Response, error) + ExpectedErrorContains string + } + + testCases := map[string]testCase{ + "normal": { + Pods: newPodList("whale"), + DryRun: false, + ForceConflicts: false, + FieldValidationDirective: FieldValidationDirectiveStrict, + Callback: func(t *testing.T, tc testCase, _ []RequestResponseAction, req *http.Request) (*http.Response, error) { + t.Helper() + + assert.Equal(t, "PATCH", req.Method) + assert.Equal(t, "application/apply-patch+yaml", req.Header.Get("Content-Type")) + assert.Equal(t, "/namespaces/default/pods/whale", req.URL.Path) + assert.Equal(t, "false", req.URL.Query().Get("force")) + assert.Equal(t, "Strict", req.URL.Query().Get("fieldValidation")) + + return newResponse(http.StatusOK, &tc.Pods.Items[0]) + }, + }, + "dry run": { + Pods: newPodList("whale"), + DryRun: true, + ForceConflicts: false, + FieldValidationDirective: FieldValidationDirectiveStrict, + Callback: func(t *testing.T, tc testCase, _ []RequestResponseAction, req *http.Request) (*http.Response, error) { + t.Helper() + + assert.Equal(t, "PATCH", req.Method) + assert.Equal(t, "application/apply-patch+yaml", req.Header.Get("Content-Type")) + assert.Equal(t, "/namespaces/default/pods/whale", req.URL.Path) + assert.Equal(t, "All", req.URL.Query().Get("dryRun")) + assert.Equal(t, "false", req.URL.Query().Get("force")) + assert.Equal(t, "Strict", req.URL.Query().Get("fieldValidation")) + + return newResponse(http.StatusOK, &tc.Pods.Items[0]) + }, + }, + "force conflicts": { + Pods: newPodList("whale"), + DryRun: false, + ForceConflicts: true, + FieldValidationDirective: FieldValidationDirectiveStrict, + Callback: func(t *testing.T, tc testCase, _ []RequestResponseAction, req *http.Request) (*http.Response, error) { + t.Helper() + + assert.Equal(t, "PATCH", req.Method) + assert.Equal(t, "application/apply-patch+yaml", req.Header.Get("Content-Type")) + assert.Equal(t, "/namespaces/default/pods/whale", req.URL.Path) + assert.Equal(t, "true", req.URL.Query().Get("force")) + assert.Equal(t, "Strict", req.URL.Query().Get("fieldValidation")) + + return newResponse(http.StatusOK, &tc.Pods.Items[0]) + }, + }, + "dry run + force conflicts": { + Pods: newPodList("whale"), + DryRun: true, + ForceConflicts: true, + FieldValidationDirective: FieldValidationDirectiveStrict, + Callback: func(t *testing.T, tc testCase, _ []RequestResponseAction, req *http.Request) (*http.Response, error) { + t.Helper() + + assert.Equal(t, "PATCH", req.Method) + assert.Equal(t, "application/apply-patch+yaml", req.Header.Get("Content-Type")) + assert.Equal(t, "/namespaces/default/pods/whale", req.URL.Path) + assert.Equal(t, "All", req.URL.Query().Get("dryRun")) + assert.Equal(t, "true", req.URL.Query().Get("force")) + assert.Equal(t, "Strict", req.URL.Query().Get("fieldValidation")) + + return newResponse(http.StatusOK, &tc.Pods.Items[0]) + }, + }, + "field validation ignore": { + Pods: newPodList("whale"), + DryRun: false, + ForceConflicts: false, + FieldValidationDirective: FieldValidationDirectiveIgnore, + Callback: func(t *testing.T, tc testCase, _ []RequestResponseAction, req *http.Request) (*http.Response, error) { + t.Helper() + + assert.Equal(t, "PATCH", req.Method) + assert.Equal(t, "application/apply-patch+yaml", req.Header.Get("Content-Type")) + assert.Equal(t, "/namespaces/default/pods/whale", req.URL.Path) + assert.Equal(t, "false", req.URL.Query().Get("force")) + assert.Equal(t, "Ignore", req.URL.Query().Get("fieldValidation")) + + return newResponse(http.StatusOK, &tc.Pods.Items[0]) + }, + }, + "incompatible server": { + Pods: newPodList("whale"), + DryRun: false, + ForceConflicts: false, + FieldValidationDirective: FieldValidationDirectiveStrict, + Callback: func(t *testing.T, _ testCase, _ []RequestResponseAction, req *http.Request) (*http.Response, error) { + t.Helper() + + return &http.Response{ + StatusCode: http.StatusUnsupportedMediaType, + Request: req, + }, nil + }, + ExpectedErrorContains: "server-side apply not available on the server:", + }, + "conflict": { + Pods: newPodList("whale"), + DryRun: false, + ForceConflicts: false, + FieldValidationDirective: FieldValidationDirectiveStrict, + Callback: func(t *testing.T, _ testCase, _ []RequestResponseAction, req *http.Request) (*http.Response, error) { + t.Helper() + + return &http.Response{ + StatusCode: http.StatusConflict, + Request: req, + }, nil + }, + ExpectedErrorContains: "the server reported a conflict", + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + + testFactory := cmdtesting.NewTestFactory() + t.Cleanup(testFactory.Cleanup) + + client := NewRequestResponseLogClient(t, func(previous []RequestResponseAction, req *http.Request) (*http.Response, error) { + return tc.Callback(t, tc, previous, req) + }) + + testFactory.UnstructuredClient = &fake.RESTClient{ + NegotiatedSerializer: unstructuredSerializer, + Client: fake.CreateHTTPClient(client.Do), + } + + resourceList, err := buildResourceList(testFactory, v1.NamespaceDefault, tc.FieldValidationDirective, objBody(&tc.Pods), nil) + require.NoError(t, err) + + require.Len(t, resourceList, 1) + info := resourceList[0] + + err = patchResourceServerSide(info, tc.DryRun, tc.ForceConflicts, tc.FieldValidationDirective) + if tc.ExpectedErrorContains != "" { + require.ErrorContains(t, err, tc.ExpectedErrorContains) + } else { + require.NoError(t, err) + require.NotNil(t, info.Object) + } + }) + } +} + +func TestDetermineFieldValidationDirective(t *testing.T) { + + assert.Equal(t, FieldValidationDirectiveIgnore, determineFieldValidationDirective(false)) + assert.Equal(t, FieldValidationDirectiveStrict, determineFieldValidationDirective(true)) +>>>>>>> 36a476ff4 (Kube client support server-side apply) } diff --git a/pkg/kube/fake/fake.go b/pkg/kube/fake/fake.go index a543a0f73..588bba83d 100644 --- a/pkg/kube/fake/fake.go +++ b/pkg/kube/fake/fake.go @@ -60,11 +60,11 @@ type FailingKubeWaiter struct { } // Create returns the configured error if set or prints -func (f *FailingKubeClient) Create(resources kube.ResourceList) (*kube.Result, error) { +func (f *FailingKubeClient) Create(resources kube.ResourceList, options ...kube.ClientCreateOption) (*kube.Result, error) { if f.CreateError != nil { return nil, f.CreateError } - return f.PrintingKubeClient.Create(resources) + return f.PrintingKubeClient.Create(resources, options...) } // Get returns the configured error if set or prints @@ -117,19 +117,11 @@ func (f *FailingKubeWaiter) WatchUntilReady(resources kube.ResourceList, d time. } // Update returns the configured error if set or prints -func (f *FailingKubeClient) Update(r, modified kube.ResourceList, ignoreMe bool) (*kube.Result, error) { +func (f *FailingKubeClient) Update(r, modified kube.ResourceList, options ...kube.ClientUpdateOption) (*kube.Result, error) { if f.UpdateError != nil { return &kube.Result{}, f.UpdateError } - return f.PrintingKubeClient.Update(r, modified, ignoreMe) -} - -// Update returns the configured error if set or prints -func (f *FailingKubeClient) UpdateThreeWayMerge(r, modified kube.ResourceList, ignoreMe bool) (*kube.Result, error) { - if f.UpdateError != nil { - return &kube.Result{}, f.UpdateError - } - return f.PrintingKubeClient.Update(r, modified, ignoreMe) + return f.PrintingKubeClient.Update(r, modified, options...) } // Build returns the configured error if set or prints diff --git a/pkg/kube/fake/printer.go b/pkg/kube/fake/printer.go index f6659a904..16c93615a 100644 --- a/pkg/kube/fake/printer.go +++ b/pkg/kube/fake/printer.go @@ -49,7 +49,7 @@ func (p *PrintingKubeClient) IsReachable() error { } // Create prints the values of what would be created with a real KubeClient. -func (p *PrintingKubeClient) Create(resources kube.ResourceList) (*kube.Result, error) { +func (p *PrintingKubeClient) Create(resources kube.ResourceList, _ ...kube.ClientCreateOption) (*kube.Result, error) { _, err := io.Copy(p.Out, bufferize(resources)) if err != nil { return nil, err @@ -98,7 +98,7 @@ func (p *PrintingKubeClient) Delete(resources kube.ResourceList) (*kube.Result, } // Update implements KubeClient Update. -func (p *PrintingKubeClient) Update(_, modified kube.ResourceList, _ bool) (*kube.Result, error) { +func (p *PrintingKubeClient) Update(_, modified kube.ResourceList, _ ...kube.ClientUpdateOption) (*kube.Result, error) { _, err := io.Copy(p.Out, bufferize(modified)) if err != nil { return nil, err diff --git a/pkg/kube/interface.go b/pkg/kube/interface.go index 6b945088e..7339ae0ff 100644 --- a/pkg/kube/interface.go +++ b/pkg/kube/interface.go @@ -30,14 +30,14 @@ import ( // A KubernetesClient must be concurrency safe. type Interface interface { // Create creates one or more resources. - Create(resources ResourceList) (*Result, error) + Create(resources ResourceList, options ...ClientCreateOption) (*Result, error) // Delete destroys one or more resources. Delete(resources ResourceList) (*Result, []error) // Update updates one or more resources or creates the resource // if it doesn't exist. - Update(original, target ResourceList, force bool) (*Result, error) + Update(original, target ResourceList, options ...ClientUpdateOption) (*Result, error) // Build creates a resource list from a Reader. // @@ -53,13 +53,6 @@ type Interface interface { GetWaiter(ws WaitStrategy) (Waiter, error) } -// InterfaceThreeWayMerge was introduced to avoid breaking backwards compatibility for Interface implementers. -// -// TODO Helm 4: Remove InterfaceThreeWayMerge and integrate its method(s) into the Interface. -type InterfaceThreeWayMerge interface { - UpdateThreeWayMerge(original, target ResourceList, force bool) (*Result, error) -} - // Waiter defines methods related to waiting for resource states. type Waiter interface { // Wait waits up to the given timeout for the specified resources to be ready. @@ -125,7 +118,6 @@ type InterfaceResources interface { } var _ Interface = (*Client)(nil) -var _ InterfaceThreeWayMerge = (*Client)(nil) var _ InterfaceLogs = (*Client)(nil) var _ InterfaceDeletionPropagation = (*Client)(nil) var _ InterfaceResources = (*Client)(nil) diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go index 8a3bacdcc..9bfa1ef6d 100644 --- a/pkg/kube/wait.go +++ b/pkg/kube/wait.go @@ -18,7 +18,6 @@ package kube // import "helm.sh/helm/v4/pkg/kube" import ( "context" - "errors" "fmt" "log/slog" "net/http" @@ -223,26 +222,6 @@ func (hw *legacyWaiter) WatchUntilReady(resources ResourceList, timeout time.Dur return perform(resources, hw.watchTimeout(timeout)) } -func perform(infos ResourceList, fn func(*resource.Info) error) error { - var result error - - if len(infos) == 0 { - return ErrNoObjectsVisited - } - - errs := make(chan error) - go batchPerform(infos, fn, errs) - - for range infos { - err := <-errs - if err != nil { - result = errors.Join(result, err) - } - } - - return result -} - func (hw *legacyWaiter) watchUntilReady(timeout time.Duration, info *resource.Info) error { kind := info.Mapping.GroupVersionKind.Kind switch kind { From 741facca434c9ae9d4c7de4c5ea1ad71d6782790 Mon Sep 17 00:00:00 2001 From: George Jenkins Date: Mon, 7 Jul 2025 10:41:40 -0700 Subject: [PATCH 27/88] Update pkg/kube/client_test.go Signed-off-by: George Jenkins --- pkg/kube/client_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index 8de856a5a..6fc2f1cc8 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -1428,7 +1428,7 @@ func TestReplaceResource(t *testing.T) { } } -func TestUpdateResourceThreeWayMerge(t *testing.T) { +func TestPatchResourceClientSide(t *testing.T) { type testCase struct { OriginalPods v1.PodList TargetPods v1.PodList From 99dc23f00b37624ef7070aa9059cfd5bdfcff5a2 Mon Sep 17 00:00:00 2001 From: George Jenkins Date: Mon, 28 Jul 2025 22:13:49 -0700 Subject: [PATCH 28/88] switch target<->original Signed-off-by: George Jenkins --- pkg/kube/client.go | 62 ++++++++++++++++++++--------------------- pkg/kube/client_test.go | 30 ++++++++++---------- 2 files changed, 46 insertions(+), 46 deletions(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index aa7c86c9b..b436f518f 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -499,12 +499,12 @@ func (c *Client) BuildTable(reader io.Reader, validate bool) (ResourceList, erro transformRequests) } -func (c *Client) update(target, original ResourceList, updateApplyFunc func(target, original *resource.Info) error) (*Result, error) { +func (c *Client) update(originals, targets ResourceList, updateApplyFunc func(original, target *resource.Info) error) (*Result, error) { updateErrors := []error{} res := &Result{} - slog.Debug("checking resources for changes", "resources", len(target)) - err := target.Visit(func(target *resource.Info, err error) error { + slog.Debug("checking resources for changes", "resources", len(targets)) + err := targets.Visit(func(target *resource.Info, err error) error { if err != nil { return err } @@ -528,13 +528,13 @@ func (c *Client) update(target, original ResourceList, updateApplyFunc func(targ return nil } - original := original.Get(target) + original := originals.Get(target) if original == nil { kind := target.Mapping.GroupVersionKind.Kind return fmt.Errorf("original object %s with the name %q not found", kind, target.Name) } - if err := updateApplyFunc(target, original); err != nil { + if err := updateApplyFunc(original, target); err != nil { updateErrors = append(updateErrors, err) } @@ -551,7 +551,7 @@ func (c *Client) update(target, original ResourceList, updateApplyFunc func(targ return res, joinErrors(updateErrors, " && ") } - for _, info := range original.Difference(target) { + for _, info := range originals.Difference(targets) { slog.Debug("deleting resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind) if err := info.Get(); err != nil { @@ -661,7 +661,7 @@ func ClientUpdateOptionFieldValidationDirective(fieldValidationDirective FieldVa // used for cleanup or other logging purposes. // // The default is to use server-side apply, equivalent to: `ClientUpdateOptionServerSideApply(true)` -func (c *Client) Update(original, target ResourceList, options ...ClientUpdateOption) (*Result, error) { +func (c *Client) Update(originals, targets ResourceList, options ...ClientUpdateOption) (*Result, error) { updateOptions := clientUpdateOptions{ serverSideApply: true, // Default to server-side apply fieldValidationDirective: FieldValidationDirectiveStrict, @@ -683,12 +683,12 @@ func (c *Client) Update(original, target ResourceList, options ...ClientUpdateOp return nil, fmt.Errorf("invalid operation: cannot use server-side apply and force replace together") } - makeUpdateApplyFunc := func() func(target, original *resource.Info) error { + makeUpdateApplyFunc := func() func(original, target *resource.Info) error { if updateOptions.forceReplace { slog.Debug( "using resource replace update strategy", slog.String("fieldValidationDirective", string(updateOptions.fieldValidationDirective))) - return func(target, original *resource.Info) error { + return func(original, target *resource.Info) error { if err := replaceResource(target, updateOptions.fieldValidationDirective); err != nil { slog.Debug("error replacing the resource", "namespace", target.Namespace, "name", target.Name, "kind", target.Mapping.GroupVersionKind.Kind, slog.Any("error", err)) return err @@ -706,7 +706,7 @@ func (c *Client) Update(original, target ResourceList, options ...ClientUpdateOp slog.Bool("forceConflicts", updateOptions.forceConflicts), slog.Bool("dryRun", updateOptions.dryRun), slog.String("fieldValidationDirective", string(updateOptions.fieldValidationDirective))) - return func(target, _ *resource.Info) error { + return func(_, target *resource.Info) error { err := patchResourceServerSide(target, updateOptions.dryRun, updateOptions.forceConflicts, updateOptions.fieldValidationDirective) logger := slog.With( @@ -725,12 +725,12 @@ func (c *Client) Update(original, target ResourceList, options ...ClientUpdateOp } slog.Debug("using client-side apply for resource update", slog.Bool("threeWayMergeForUnstructured", updateOptions.threeWayMergeForUnstructured)) - return func(target, original *resource.Info) error { - return patchResourceClientSide(target, original.Object, updateOptions.threeWayMergeForUnstructured) + return func(original, target *resource.Info) error { + return patchResourceClientSide(original.Object, target, updateOptions.threeWayMergeForUnstructured) } } - return c.update(target, original, makeUpdateApplyFunc()) + return c.update(originals, targets, makeUpdateApplyFunc()) } // Delete deletes Kubernetes resources specified in the resources list with @@ -753,16 +753,16 @@ func deleteResources(resources ResourceList, propagation metav1.DeletionPropagat var errs []error res := &Result{} mtx := sync.Mutex{} - err := perform(resources, func(info *resource.Info) error { - slog.Debug("starting delete resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind) - err := deleteResource(info, propagation) + err := perform(resources, func(target *resource.Info) error { + slog.Debug("starting delete resource", "namespace", target.Namespace, "name", target.Name, "kind", target.Mapping.GroupVersionKind.Kind) + err := deleteResource(target, propagation) if err == nil || apierrors.IsNotFound(err) { if err != nil { - slog.Debug("ignoring delete failure", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err)) + slog.Debug("ignoring delete failure", "namespace", target.Namespace, "name", target.Name, "kind", target.Mapping.GroupVersionKind.Kind, slog.Any("error", err)) } mtx.Lock() defer mtx.Unlock() - res.Deleted = append(res.Deleted, info) + res.Deleted = append(res.Deleted, target) return nil } mtx.Lock() @@ -881,8 +881,8 @@ func deleteResource(info *resource.Info, policy metav1.DeletionPropagation) erro }) } -func createPatch(target *resource.Info, current runtime.Object, threeWayMergeForUnstructured bool) ([]byte, types.PatchType, error) { - oldData, err := json.Marshal(current) +func createPatch(original runtime.Object, target *resource.Info, threeWayMergeForUnstructured bool) ([]byte, types.PatchType, error) { + oldData, err := json.Marshal(original) if err != nil { return nil, types.StrategicMergePatchType, fmt.Errorf("serializing current configuration: %w", err) } @@ -963,9 +963,9 @@ func replaceResource(target *resource.Info, fieldValidationDirective FieldValida } -func patchResourceClientSide(target *resource.Info, original runtime.Object, threeWayMergeForUnstructured bool) error { +func patchResourceClientSide(original runtime.Object, target *resource.Info, threeWayMergeForUnstructured bool) error { - patch, patchType, err := createPatch(target, original, threeWayMergeForUnstructured) + patch, patchType, err := createPatch(original, target, threeWayMergeForUnstructured) if err != nil { return fmt.Errorf("failed to create patch: %w", err) } @@ -995,25 +995,25 @@ func patchResourceClientSide(target *resource.Info, original runtime.Object, thr } // Patch reource using server-side apply -func patchResourceServerSide(info *resource.Info, dryRun bool, forceConflicts bool, fieldValidationDirective FieldValidationDirective) error { +func patchResourceServerSide(target *resource.Info, dryRun bool, forceConflicts bool, fieldValidationDirective FieldValidationDirective) error { helper := resource.NewHelper( - info.Client, - info.Mapping). + target.Client, + target.Mapping). DryRun(dryRun). WithFieldManager(ManagedFieldsManager). WithFieldValidation(string(fieldValidationDirective)) // Send the full object to be applied on the server side. - data, err := runtime.Encode(unstructured.UnstructuredJSONScheme, info.Object) + data, err := runtime.Encode(unstructured.UnstructuredJSONScheme, target.Object) if err != nil { - return fmt.Errorf("failed to encode object %s/%s with kind %s: %w", info.Namespace, info.Name, info.Mapping.GroupVersionKind.Kind, err) + return fmt.Errorf("failed to encode object %s/%s with kind %s: %w", target.Namespace, target.Name, target.Mapping.GroupVersionKind.Kind, err) } options := metav1.PatchOptions{ Force: &forceConflicts, } obj, err := helper.Patch( - info.Namespace, - info.Name, + target.Namespace, + target.Name, types.ApplyPatchType, data, &options, @@ -1024,13 +1024,13 @@ func patchResourceServerSide(info *resource.Info, dryRun bool, forceConflicts bo } if apierrors.IsConflict(err) { - return fmt.Errorf("conflict occurred while applying %s/%s with kind %s: %w", info.Namespace, info.Name, info.Mapping.GroupVersionKind.Kind, err) + return fmt.Errorf("conflict occurred while applying %s/%s with kind %s: %w", target.Namespace, target.Name, target.Mapping.GroupVersionKind.Kind, err) } return err } - return info.Refresh(obj, true) + return target.Refresh(obj, true) } // GetPodList uses the kubernetes interface to get the list of pods filtered by listOptions diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index 6fc2f1cc8..bdc5a9d7f 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -1083,8 +1083,8 @@ type createPatchTestCase struct { // The target state. target *unstructured.Unstructured - // The current state as it exists in the release. - current *unstructured.Unstructured + // The state as it exists in the release. + original *unstructured.Unstructured // The actual state as it exists in the cluster. actual *unstructured.Unstructured @@ -1132,15 +1132,15 @@ func (c createPatchTestCase) run(t *testing.T) { }, } - patch, patchType, err := createPatch(targetInfo, c.current, c.threeWayMergeForUnstructured) + patch, patchType, err := createPatch(c.original, targetInfo, c.threeWayMergeForUnstructured) if err != nil { t.Fatalf("Failed to create patch: %v", err) } if c.expectedPatch != string(patch) { - t.Errorf("Unexpected patch.\nTarget:\n%s\nCurrent:\n%s\nActual:\n%s\n\nExpected:\n%s\nGot:\n%s", + t.Errorf("Unexpected patch.\nTarget:\n%s\nOriginal:\n%s\nActual:\n%s\n\nExpected:\n%s\nGot:\n%s", c.target, - c.current, + c.original, c.actual, c.expectedPatch, string(patch), @@ -1182,9 +1182,9 @@ func TestCreatePatchCustomResourceMetadata(t *testing.T) { "objectset.rio.cattle.io/id": "default-foo-simple", }, nil) testCase := createPatchTestCase{ - name: "take ownership of resource", - target: target, - current: target, + name: "take ownership of resource", + target: target, + original: target, actual: newTestCustomResourceData(nil, map[string]interface{}{ "color": "red", }), @@ -1206,9 +1206,9 @@ func TestCreatePatchCustomResourceSpec(t *testing.T) { "size": "large", }) testCase := createPatchTestCase{ - name: "merge with spec of existing custom resource", - target: target, - current: target, + name: "merge with spec of existing custom resource", + target: target, + original: target, actual: newTestCustomResourceData(nil, map[string]interface{}{ "color": "red", "weight": "heavy", @@ -1561,18 +1561,18 @@ func TestPatchResourceClientSide(t *testing.T) { Client: fake.CreateHTTPClient(client.Do), } - resourceListCurrent, err := buildResourceList(testFactory, v1.NamespaceDefault, FieldValidationDirectiveStrict, objBody(&tc.OriginalPods), nil) + resourceListOriginal, err := buildResourceList(testFactory, v1.NamespaceDefault, FieldValidationDirectiveStrict, objBody(&tc.OriginalPods), nil) require.NoError(t, err) - require.Len(t, resourceListCurrent, 1) + require.Len(t, resourceListOriginal, 1) resourceListTarget, err := buildResourceList(testFactory, v1.NamespaceDefault, FieldValidationDirectiveStrict, objBody(&tc.TargetPods), nil) require.NoError(t, err) require.Len(t, resourceListTarget, 1) - current := resourceListCurrent[0] + original := resourceListOriginal[0] target := resourceListTarget[0] - err = patchResourceClientSide(target, current.Object, tc.ThreeWayMergeForUnstructured) + err = patchResourceClientSide(original.Object, target, tc.ThreeWayMergeForUnstructured) if tc.ExpectedErrorContains != "" { require.ErrorContains(t, err, tc.ExpectedErrorContains) } else { From b2dc411f9d77f3bca969fb4ad955d4a091aa0454 Mon Sep 17 00:00:00 2001 From: George Jenkins Date: Tue, 12 Aug 2025 10:49:10 -0700 Subject: [PATCH 29/88] code review (error checks, collapse forceConflicts, UpdateApplyFunc) Signed-off-by: George Jenkins --- pkg/action/hooks.go | 2 +- pkg/action/install.go | 8 +++--- pkg/action/rollback.go | 2 +- pkg/action/upgrade.go | 2 +- pkg/kube/client.go | 64 +++++++++++++++++++++-------------------- pkg/kube/client_test.go | 16 +++++------ 6 files changed, 47 insertions(+), 47 deletions(-) diff --git a/pkg/action/hooks.go b/pkg/action/hooks.go index 95260e0e4..275a1bf52 100644 --- a/pkg/action/hooks.go +++ b/pkg/action/hooks.go @@ -75,7 +75,7 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, // Create hook resources if _, err := cfg.KubeClient.Create( resources, - kube.ClientCreateOptionServerSideApply(false)); err != nil { + kube.ClientCreateOptionServerSideApply(false, false)); err != nil { h.LastRun.CompletedAt = helmtime.Now() h.LastRun.Phase = release.HookPhaseFailed return fmt.Errorf("warning: Hook %s %s failed: %w", hook, h.Path, err) diff --git a/pkg/action/install.go b/pkg/action/install.go index 9a9101f5d..b46b4446b 100644 --- a/pkg/action/install.go +++ b/pkg/action/install.go @@ -175,7 +175,7 @@ func (i *Install) installCRDs(crds []chart.CRD) error { // Send them to Kube if _, err := i.cfg.KubeClient.Create( res, - kube.ClientCreateOptionServerSideApply(false)); err != nil { + kube.ClientCreateOptionServerSideApply(false, false)); err != nil { // If the error is CRD already exists, continue. if apierrors.IsAlreadyExists(err) { crdName := res[0].Name @@ -403,7 +403,7 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma } if _, err := i.cfg.KubeClient.Create( resourceList, - kube.ClientCreateOptionServerSideApply(false)); err != nil && !apierrors.IsAlreadyExists(err) { + kube.ClientCreateOptionServerSideApply(false, false)); err != nil && !apierrors.IsAlreadyExists(err) { return nil, err } } @@ -474,13 +474,13 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource if len(toBeAdopted) == 0 && len(resources) > 0 { _, err = i.cfg.KubeClient.Create( resources, - kube.ClientCreateOptionServerSideApply(false)) + kube.ClientCreateOptionServerSideApply(false, false)) } else if len(resources) > 0 { updateThreeWayMergeForUnstructured := i.TakeOwnership _, err = i.cfg.KubeClient.Update( toBeAdopted, resources, - kube.ClientUpdateOptionServerSideApply(false), + kube.ClientUpdateOptionServerSideApply(false, false), kube.ClientUpdateOptionThreeWayMergeForUnstructured(updateThreeWayMergeForUnstructured), kube.ClientUpdateOptionForceReplace(i.ForceReplace)) } diff --git a/pkg/action/rollback.go b/pkg/action/rollback.go index f60d4f4bc..dd1f8c390 100644 --- a/pkg/action/rollback.go +++ b/pkg/action/rollback.go @@ -193,7 +193,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas results, err := r.cfg.KubeClient.Update( current, target, - kube.ClientUpdateOptionServerSideApply(false), + kube.ClientUpdateOptionServerSideApply(false, false), kube.ClientUpdateOptionForceReplace(r.ForceReplace)) if err != nil { diff --git a/pkg/action/upgrade.go b/pkg/action/upgrade.go index a32d6e78e..abf4342d3 100644 --- a/pkg/action/upgrade.go +++ b/pkg/action/upgrade.go @@ -429,7 +429,7 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele results, err := u.cfg.KubeClient.Update( current, target, - kube.ClientUpdateOptionServerSideApply(false), + kube.ClientUpdateOptionServerSideApply(false, false), kube.ClientUpdateOptionForceReplace(u.ForceReplace)) if err != nil { u.cfg.recordRelease(originalRelease) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index b436f518f..016055392 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -214,26 +214,23 @@ type ClientCreateOption func(*clientCreateOptions) error // ClientUpdateOptionServerSideApply enables performing object apply server-side // see: https://kubernetes.io/docs/reference/using-api/server-side-apply/ -func ClientCreateOptionServerSideApply(serverSideApply bool) ClientCreateOption { - return func(o *clientCreateOptions) error { - o.serverSideApply = serverSideApply - - return nil - } -} - -// ClientCreateOptionForceConflicts forces field conflicts to be resolved +// +// `forceConflicts` forces conflicts to be resolved (may be when serverSideApply enabled only) // see: https://kubernetes.io/docs/reference/using-api/server-side-apply/#conflicts -// Only valid when ClientUpdateOptionServerSideApply enabled -func ClientCreateOptionForceConflicts(forceConflicts bool) ClientCreateOption { +func ClientCreateOptionServerSideApply(serverSideApply, forceConflicts bool) ClientCreateOption { return func(o *clientCreateOptions) error { + if !serverSideApply && forceConflicts { + return fmt.Errorf("forceConflicts enabled when serverSideApply disabled") + } + + o.serverSideApply = serverSideApply o.forceConflicts = forceConflicts return nil } } -// ClientCreateOptionDryRun performs non-mutating operations only +// ClientCreateOptionDryRun requests the server to perform non-mutating operations only func ClientCreateOptionDryRun(dryRun bool) ClientCreateOption { return func(o *clientCreateOptions) error { o.dryRun = dryRun @@ -264,8 +261,12 @@ func (c *Client) Create(resources ResourceList, options ...ClientCreateOption) ( fieldValidationDirective: FieldValidationDirectiveStrict, } + errs := make([]error, 0, len(options)) for _, o := range options { - o(&createOptions) + errs = append(errs, o(&createOptions)) + } + if err := errors.Join(errs...); err != nil { + return nil, fmt.Errorf("invalid client create option(s): %w", err) } if createOptions.forceConflicts && !createOptions.serverSideApply { @@ -499,7 +500,7 @@ func (c *Client) BuildTable(reader io.Reader, validate bool) (ResourceList, erro transformRequests) } -func (c *Client) update(originals, targets ResourceList, updateApplyFunc func(original, target *resource.Info) error) (*Result, error) { +func (c *Client) update(originals, targets ResourceList, updateApplyFunc UpdateApplyFunc) (*Result, error) { updateErrors := []error{} res := &Result{} @@ -599,9 +600,17 @@ func ClientUpdateOptionThreeWayMergeForUnstructured(threeWayMergeForUnstructured // ClientUpdateOptionServerSideApply enables performing object apply server-side (default) // see: https://kubernetes.io/docs/reference/using-api/server-side-apply/ // Must not be enabled when ClientUpdateOptionThreeWayMerge is enabled -func ClientUpdateOptionServerSideApply(serverSideApply bool) ClientUpdateOption { +// +// `forceConflicts` forces conflicts to be resolved (may be enabled when serverSideApply enabled only) +// see: https://kubernetes.io/docs/reference/using-api/server-side-apply/#conflicts +func ClientUpdateOptionServerSideApply(serverSideApply, forceConflicts bool) ClientUpdateOption { return func(o *clientUpdateOptions) error { + if !serverSideApply && forceConflicts { + return fmt.Errorf("forceConflicts enabled when serverSideApply disabled") + } + o.serverSideApply = serverSideApply + o.forceConflicts = forceConflicts return nil } @@ -617,20 +626,7 @@ func ClientUpdateOptionForceReplace(forceReplace bool) ClientUpdateOption { } } -// ClientUpdateOptionForceConflicts forces field conflicts to be resolved -// see: https://kubernetes.io/docs/reference/using-api/server-side-apply/#conflicts -// Must not be enabled when ClientUpdateOptionForceReplace is enabled -func ClientUpdateOptionForceConflicts(forceConflicts bool) ClientUpdateOption { - return func(o *clientUpdateOptions) error { - o.forceConflicts = forceConflicts - - return nil - } -} - -// ClientUpdateOptionForceConflicts forces field conflicts to be resolved -// see: https://kubernetes.io/docs/reference/using-api/server-side-apply/#conflicts -// Must not be enabled when ClientUpdateOptionForceReplace is enabled +// ClientUpdateOptionDryRun requests the server to perform non-mutating operations only func ClientUpdateOptionDryRun(dryRun bool) ClientUpdateOption { return func(o *clientUpdateOptions) error { o.dryRun = dryRun @@ -652,6 +648,8 @@ func ClientUpdateOptionFieldValidationDirective(fieldValidationDirective FieldVa } } +type UpdateApplyFunc func(original, target *resource.Info) error + // Update takes the current list of objects and target list of objects and // creates resources that don't already exist, updates resources that have been // modified in the target configuration, and deletes resources from the current @@ -667,8 +665,12 @@ func (c *Client) Update(originals, targets ResourceList, options ...ClientUpdate fieldValidationDirective: FieldValidationDirectiveStrict, } + errs := make([]error, 0, len(options)) for _, o := range options { - o(&updateOptions) + errs = append(errs, o(&updateOptions)) + } + if err := errors.Join(errs...); err != nil { + return nil, fmt.Errorf("invalid client update option(s): %w", err) } if updateOptions.threeWayMergeForUnstructured && updateOptions.serverSideApply { @@ -683,7 +685,7 @@ func (c *Client) Update(originals, targets ResourceList, options ...ClientUpdate return nil, fmt.Errorf("invalid operation: cannot use server-side apply and force replace together") } - makeUpdateApplyFunc := func() func(original, target *resource.Info) error { + makeUpdateApplyFunc := func() UpdateApplyFunc { if updateOptions.forceReplace { slog.Debug( "using resource replace update strategy", diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index bdc5a9d7f..5060a5fc2 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -292,7 +292,7 @@ func TestCreate(t *testing.T) { result, err := c.Create( list, - ClientCreateOptionServerSideApply(tc.ServerSideApply)) + ClientCreateOptionServerSideApply(tc.ServerSideApply, false)) if tc.ExpectedErrorContains != "" { require.ErrorContains(t, err, tc.ExpectedErrorContains) } else { @@ -467,7 +467,7 @@ func TestUpdate(t *testing.T) { second, ClientUpdateOptionThreeWayMergeForUnstructured(tc.ThreeWayMergeForUnstructured), ClientUpdateOptionForceReplace(false), - ClientUpdateOptionServerSideApply(tc.ServerSideApply)) + ClientUpdateOptionServerSideApply(tc.ServerSideApply, false)) require.NoError(t, err) assert.Len(t, result.Created, 1, "expected 1 resource created, got %d", len(result.Created)) @@ -684,7 +684,7 @@ func TestWait(t *testing.T) { result, err := c.Create( resources, - ClientCreateOptionServerSideApply(false)) + ClientCreateOptionServerSideApply(false, false)) if err != nil { t.Fatal(err) @@ -744,7 +744,7 @@ func TestWaitJob(t *testing.T) { } result, err := c.Create( resources, - ClientCreateOptionServerSideApply(false)) + ClientCreateOptionServerSideApply(false, false)) if err != nil { t.Fatal(err) @@ -806,7 +806,7 @@ func TestWaitDelete(t *testing.T) { } result, err := c.Create( resources, - ClientCreateOptionServerSideApply(false)) + ClientCreateOptionServerSideApply(false, false)) if err != nil { t.Fatal(err) } @@ -1225,7 +1225,6 @@ func TestCreatePatchCustomResourceSpec(t *testing.T) { t.Run(testCase.name, testCase.run) } -<<<<<<< HEAD type errorFactory struct { *cmdtesting.TestFactory err error @@ -1326,8 +1325,8 @@ func TestIsReachable(t *testing.T) { } }) } -||||||| parent of 36a476ff4 (Kube client support server-side apply) -======= +} + func TestIsIncompatibleServerError(t *testing.T) { testCases := map[string]struct { Err error @@ -1749,5 +1748,4 @@ func TestDetermineFieldValidationDirective(t *testing.T) { assert.Equal(t, FieldValidationDirectiveIgnore, determineFieldValidationDirective(false)) assert.Equal(t, FieldValidationDirectiveStrict, determineFieldValidationDirective(true)) ->>>>>>> 36a476ff4 (Kube client support server-side apply) } From fab70472af3c4057f2e12019ae6bde1e1c2d013b Mon Sep 17 00:00:00 2001 From: joemicky Date: Thu, 14 Aug 2025 19:21:52 +0800 Subject: [PATCH 30/88] refactor: replace []byte(fmt.Sprintf) with fmt.Appendf Signed-off-by: joemicky --- internal/chart/v3/util/create.go | 4 ++-- pkg/chart/v2/util/create.go | 4 ++-- pkg/registry/utils_test.go | 2 +- pkg/repo/repotest/server.go | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/internal/chart/v3/util/create.go b/internal/chart/v3/util/create.go index 72fed5955..6a28f99d4 100644 --- a/internal/chart/v3/util/create.go +++ b/internal/chart/v3/util/create.go @@ -733,12 +733,12 @@ func Create(name, dir string) (string, error) { { // Chart.yaml path: filepath.Join(cdir, ChartfileName), - content: []byte(fmt.Sprintf(defaultChartfile, name)), + content: fmt.Appendf(nil, defaultChartfile, name), }, { // values.yaml path: filepath.Join(cdir, ValuesfileName), - content: []byte(fmt.Sprintf(defaultValues, name)), + content: fmt.Appendf(nil, defaultValues, name), }, { // .helmignore diff --git a/pkg/chart/v2/util/create.go b/pkg/chart/v2/util/create.go index e9cf3c2c6..a8ae3ab40 100644 --- a/pkg/chart/v2/util/create.go +++ b/pkg/chart/v2/util/create.go @@ -733,12 +733,12 @@ func Create(name, dir string) (string, error) { { // Chart.yaml path: filepath.Join(cdir, ChartfileName), - content: []byte(fmt.Sprintf(defaultChartfile, name)), + content: fmt.Appendf(nil, defaultChartfile, name), }, { // values.yaml path: filepath.Join(cdir, ValuesfileName), - content: []byte(fmt.Sprintf(defaultValues, name)), + content: fmt.Appendf(nil, defaultValues, name), }, { // .helmignore diff --git a/pkg/registry/utils_test.go b/pkg/registry/utils_test.go index f4ff5bd58..b46317fc6 100644 --- a/pkg/registry/utils_test.go +++ b/pkg/registry/utils_test.go @@ -121,7 +121,7 @@ func setup(suite *TestSuite, tlsEnabled, insecure bool) *registry.Registry { pwBytes, err := bcrypt.GenerateFromPassword([]byte(testPassword), bcrypt.DefaultCost) suite.Nil(err, "no error generating bcrypt password for test htpasswd file") htpasswdPath := filepath.Join(suite.WorkspaceDir, testHtpasswdFileBasename) - err = os.WriteFile(htpasswdPath, []byte(fmt.Sprintf("%s:%s\n", testUsername, string(pwBytes))), 0644) + err = os.WriteFile(htpasswdPath, fmt.Appendf(nil, "%s:%s\n", testUsername, string(pwBytes)), 0644) suite.Nil(err, "no error creating test htpasswd file") // Registry config diff --git a/pkg/repo/repotest/server.go b/pkg/repo/repotest/server.go index 7ff028b90..8f9f82281 100644 --- a/pkg/repo/repotest/server.go +++ b/pkg/repo/repotest/server.go @@ -169,7 +169,7 @@ func NewOCIServer(t *testing.T, dir string) (*OCIServer, error) { t.Fatal("error generating bcrypt password for test htpasswd file") } htpasswdPath := filepath.Join(dir, testHtpasswdFileBasename) - err = os.WriteFile(htpasswdPath, []byte(fmt.Sprintf("%s:%s\n", testUsername, string(pwBytes))), 0o644) + err = os.WriteFile(htpasswdPath, fmt.Appendf(nil, "%s:%s\n", testUsername, string(pwBytes)), 0o644) if err != nil { t.Fatalf("error creating test htpasswd file") } From a3d2da4d2e3732ac9888c490a356722428abcfaa Mon Sep 17 00:00:00 2001 From: joemicky Date: Thu, 14 Aug 2025 19:27:39 +0800 Subject: [PATCH 31/88] refactor: replace HasPrefix+TrimPrefix with CutPrefix Signed-off-by: joemicky --- pkg/cmd/release_testing.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/cmd/release_testing.go b/pkg/cmd/release_testing.go index b43b67ca0..b660a16c5 100644 --- a/pkg/cmd/release_testing.go +++ b/pkg/cmd/release_testing.go @@ -59,8 +59,8 @@ func newReleaseTestCmd(cfg *action.Configuration, out io.Writer) *cobra.Command client.Namespace = settings.Namespace() notName := regexp.MustCompile(`^!\s?name=`) for _, f := range filter { - if strings.HasPrefix(f, "name=") { - client.Filters[action.IncludeNameFilter] = append(client.Filters[action.IncludeNameFilter], strings.TrimPrefix(f, "name=")) + if after, ok := strings.CutPrefix(f, "name="); ok { + client.Filters[action.IncludeNameFilter] = append(client.Filters[action.IncludeNameFilter], after) } else if notName.MatchString(f) { client.Filters[action.ExcludeNameFilter] = append(client.Filters[action.ExcludeNameFilter], notName.ReplaceAllLiteralString(f, "")) } From 762ef3ee80caa67f371f7759e8299b6bc23d1263 Mon Sep 17 00:00:00 2001 From: joemicky Date: Thu, 14 Aug 2025 19:30:33 +0800 Subject: [PATCH 32/88] refactor: omit unnecessary reassignment Signed-off-by: joemicky --- pkg/action/install_test.go | 1 - pkg/cmd/load_plugins.go | 1 - pkg/repo/index_test.go | 1 - 3 files changed, 3 deletions(-) diff --git a/pkg/action/install_test.go b/pkg/action/install_test.go index 1882f19e7..424ee6135 100644 --- a/pkg/action/install_test.go +++ b/pkg/action/install_test.go @@ -890,7 +890,6 @@ func TestNameAndChartGenerateName(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.Name, func(t *testing.T) { t.Parallel() diff --git a/pkg/cmd/load_plugins.go b/pkg/cmd/load_plugins.go index 5c7f618eb..8def7f9fa 100644 --- a/pkg/cmd/load_plugins.go +++ b/pkg/cmd/load_plugins.go @@ -63,7 +63,6 @@ func loadPlugins(baseCmd *cobra.Command, out io.Writer) { // Now we create commands for all of these. for _, plug := range found { - plug := plug md := plug.Metadata if md.Usage == "" { md.Usage = fmt.Sprintf("the %q plugin", md.Name) diff --git a/pkg/repo/index_test.go b/pkg/repo/index_test.go index 7810d3ac0..a8aadadec 100644 --- a/pkg/repo/index_test.go +++ b/pkg/repo/index_test.go @@ -160,7 +160,6 @@ func TestLoadIndex(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.Name, func(t *testing.T) { t.Parallel() i, err := LoadIndexFile(tc.Filename) From fa73b6743be24c69b3cc32f749bc3c8ec65a0113 Mon Sep 17 00:00:00 2001 From: Isaiah Lewis Date: Fri, 15 Aug 2025 07:31:30 -0700 Subject: [PATCH 33/88] fix(helm-lint): Add HTTP/HTTPS URL support for json schema references Signed-off-by: Isaiah Lewis --- pkg/chart/v2/util/jsonschema.go | 48 ++++++++++++++++++++++++++++ pkg/chart/v2/util/jsonschema_test.go | 40 +++++++++++++++++++++++ 2 files changed, 88 insertions(+) diff --git a/pkg/chart/v2/util/jsonschema.go b/pkg/chart/v2/util/jsonschema.go index 820e5953a..96fd207b9 100644 --- a/pkg/chart/v2/util/jsonschema.go +++ b/pkg/chart/v2/util/jsonschema.go @@ -21,13 +21,53 @@ import ( "errors" "fmt" "log/slog" + "net/http" "strings" + "time" "github.com/santhosh-tekuri/jsonschema/v6" + "helm.sh/helm/v4/internal/version" + chart "helm.sh/helm/v4/pkg/chart/v2" ) +// HTTPURLLoader implements a loader for HTTP/HTTPS URLs +type HTTPURLLoader http.Client + +func (l *HTTPURLLoader) Load(urlStr string) (any, error) { + client := (*http.Client)(l) + + req, err := http.NewRequest(http.MethodGet, urlStr, nil) + if err != nil { + return nil, fmt.Errorf("failed to create HTTP request for %s: %w", urlStr, err) + } + req.Header.Set("User-Agent", version.GetUserAgent()) + + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("HTTP request failed for %s: %w", urlStr, err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("HTTP request to %s returned status %d (%s)", urlStr, resp.StatusCode, http.StatusText(resp.StatusCode)) + } + + return jsonschema.UnmarshalJSON(resp.Body) +} + +// newHTTPURLLoader creates a HTTP URL loader with proxy support. +func newHTTPURLLoader() *HTTPURLLoader { + httpLoader := HTTPURLLoader(http.Client{ + Timeout: 15 * time.Second, + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + }, + }) + return &httpLoader +} + // ValidateAgainstSchema checks that values does not violate the structure laid out in schema func ValidateAgainstSchema(chrt *chart.Chart, values map[string]interface{}) error { var sb strings.Builder @@ -71,7 +111,15 @@ func ValidateAgainstSingleSchema(values Values, schemaJSON []byte) (reterr error } slog.Debug("unmarshalled JSON schema", "schema", schemaJSON) + // Configure compiler with loaders for different URL schemes + loader := jsonschema.SchemeURLLoader{ + "file": jsonschema.FileLoader{}, + "http": newHTTPURLLoader(), + "https": newHTTPURLLoader(), + } + compiler := jsonschema.NewCompiler() + compiler.UseLoader(loader) err = compiler.AddResource("file:///values.schema.json", schema) if err != nil { return err diff --git a/pkg/chart/v2/util/jsonschema_test.go b/pkg/chart/v2/util/jsonschema_test.go index 3279eb0db..cd95b7faf 100644 --- a/pkg/chart/v2/util/jsonschema_test.go +++ b/pkg/chart/v2/util/jsonschema_test.go @@ -17,7 +17,10 @@ limitations under the License. package util import ( + "net/http" + "net/http/httptest" "os" + "strings" "testing" chart "helm.sh/helm/v4/pkg/chart/v2" @@ -245,3 +248,40 @@ func TestValidateAgainstSchema2020Negative(t *testing.T) { t.Errorf("Error string :\n`%s`\ndoes not match expected\n`%s`", errString, expectedErrString) } } + +func TestHTTPURLLoader_Load(t *testing.T) { + // Test successful JSON schema loading + t.Run("successful load", func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"type": "object", "properties": {"name": {"type": "string"}}}`)) + })) + defer server.Close() + + loader := newHTTPURLLoader() + result, err := loader.Load(server.URL) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + if result == nil { + t.Fatal("Expected result to be non-nil") + } + }) + + t.Run("HTTP error status", func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + defer server.Close() + + loader := newHTTPURLLoader() + _, err := loader.Load(server.URL) + if err == nil { + t.Fatal("Expected error for HTTP 404") + } + if !strings.Contains(err.Error(), "404") { + t.Errorf("Expected error message to contain '404', got: %v", err) + } + }) +} From a1c84f9a4c7a0bc0ae7598a1e46a83333aff0681 Mon Sep 17 00:00:00 2001 From: George Jenkins Date: Sat, 16 Aug 2025 14:42:00 -0700 Subject: [PATCH 34/88] Move pkg/plugin -> internal/plugin Signed-off-by: George Jenkins --- {pkg => internal}/plugin/cache/cache.go | 2 +- {pkg => internal}/plugin/hooks.go | 2 +- {pkg => internal}/plugin/installer/base.go | 2 +- {pkg => internal}/plugin/installer/base_test.go | 2 +- {pkg => internal}/plugin/installer/doc.go | 2 +- {pkg => internal}/plugin/installer/http_installer.go | 4 ++-- {pkg => internal}/plugin/installer/http_installer_test.go | 2 +- {pkg => internal}/plugin/installer/installer.go | 2 +- {pkg => internal}/plugin/installer/installer_test.go | 0 {pkg => internal}/plugin/installer/local_installer.go | 2 +- {pkg => internal}/plugin/installer/local_installer_test.go | 2 +- {pkg => internal}/plugin/installer/vcs_installer.go | 4 ++-- {pkg => internal}/plugin/installer/vcs_installer_test.go | 2 +- {pkg => internal}/plugin/plugin.go | 0 {pkg => internal}/plugin/plugin_test.go | 0 .../plugin/testdata/plugdir/bad/duplicate-entries/plugin.yaml | 0 .../plugin/testdata/plugdir/good/downloader/plugin.yaml | 0 .../plugin/testdata/plugdir/good/echo/plugin.yaml | 0 .../plugin/testdata/plugdir/good/hello/hello.ps1 | 0 {pkg => internal}/plugin/testdata/plugdir/good/hello/hello.sh | 0 .../plugin/testdata/plugdir/good/hello/plugin.yaml | 0 pkg/cmd/load_plugins.go | 2 +- pkg/cmd/plugin.go | 2 +- pkg/cmd/plugin_install.go | 4 ++-- pkg/cmd/plugin_list.go | 2 +- pkg/cmd/plugin_uninstall.go | 2 +- pkg/cmd/plugin_update.go | 4 ++-- pkg/getter/plugingetter.go | 2 +- 28 files changed, 23 insertions(+), 23 deletions(-) rename {pkg => internal}/plugin/cache/cache.go (96%) rename {pkg => internal}/plugin/hooks.go (94%) rename {pkg => internal}/plugin/installer/base.go (93%) rename {pkg => internal}/plugin/installer/base_test.go (94%) rename {pkg => internal}/plugin/installer/doc.go (89%) rename {pkg => internal}/plugin/installer/http_installer.go (98%) rename {pkg => internal}/plugin/installer/http_installer_test.go (99%) rename {pkg => internal}/plugin/installer/installer.go (99%) rename {pkg => internal}/plugin/installer/installer_test.go (100%) rename {pkg => internal}/plugin/installer/local_installer.go (95%) rename {pkg => internal}/plugin/installer/local_installer_test.go (96%) rename {pkg => internal}/plugin/installer/vcs_installer.go (97%) rename {pkg => internal}/plugin/installer/vcs_installer_test.go (98%) rename {pkg => internal}/plugin/plugin.go (100%) rename {pkg => internal}/plugin/plugin_test.go (100%) rename {pkg => internal}/plugin/testdata/plugdir/bad/duplicate-entries/plugin.yaml (100%) rename {pkg => internal}/plugin/testdata/plugdir/good/downloader/plugin.yaml (100%) rename {pkg => internal}/plugin/testdata/plugdir/good/echo/plugin.yaml (100%) rename {pkg => internal}/plugin/testdata/plugdir/good/hello/hello.ps1 (100%) rename {pkg => internal}/plugin/testdata/plugdir/good/hello/hello.sh (100%) rename {pkg => internal}/plugin/testdata/plugdir/good/hello/plugin.yaml (100%) diff --git a/pkg/plugin/cache/cache.go b/internal/plugin/cache/cache.go similarity index 96% rename from pkg/plugin/cache/cache.go rename to internal/plugin/cache/cache.go index f3e847374..f3b737477 100644 --- a/pkg/plugin/cache/cache.go +++ b/internal/plugin/cache/cache.go @@ -14,7 +14,7 @@ limitations under the License. */ // Package cache provides a key generator for vcs urls. -package cache // import "helm.sh/helm/v4/pkg/plugin/cache" +package cache // import "helm.sh/helm/v4/internal/plugin/cache" import ( "net/url" diff --git a/pkg/plugin/hooks.go b/internal/plugin/hooks.go similarity index 94% rename from pkg/plugin/hooks.go rename to internal/plugin/hooks.go index 10dc8580e..7b4ff5a38 100644 --- a/pkg/plugin/hooks.go +++ b/internal/plugin/hooks.go @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package plugin // import "helm.sh/helm/v4/pkg/plugin" +package plugin // import "helm.sh/helm/v4/internal/plugin" // Types of hooks const ( diff --git a/pkg/plugin/installer/base.go b/internal/plugin/installer/base.go similarity index 93% rename from pkg/plugin/installer/base.go rename to internal/plugin/installer/base.go index 3738246ee..c21a245a8 100644 --- a/pkg/plugin/installer/base.go +++ b/internal/plugin/installer/base.go @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package installer // import "helm.sh/helm/v4/pkg/plugin/installer" +package installer // import "helm.sh/helm/v4/internal/plugin/installer" import ( "path/filepath" diff --git a/pkg/plugin/installer/base_test.go b/internal/plugin/installer/base_test.go similarity index 94% rename from pkg/plugin/installer/base_test.go rename to internal/plugin/installer/base_test.go index 732ac7927..62b77bde5 100644 --- a/pkg/plugin/installer/base_test.go +++ b/internal/plugin/installer/base_test.go @@ -11,7 +11,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package installer // import "helm.sh/helm/v4/pkg/plugin/installer" +package installer // import "helm.sh/helm/v4/internal/plugin/installer" import ( "testing" diff --git a/pkg/plugin/installer/doc.go b/internal/plugin/installer/doc.go similarity index 89% rename from pkg/plugin/installer/doc.go rename to internal/plugin/installer/doc.go index b927dbd37..a4cf384bf 100644 --- a/pkg/plugin/installer/doc.go +++ b/internal/plugin/installer/doc.go @@ -14,4 +14,4 @@ limitations under the License. */ // Package installer provides an interface for installing Helm plugins. -package installer // import "helm.sh/helm/v4/pkg/plugin/installer" +package installer // import "helm.sh/helm/v4/internal/plugin/installer" diff --git a/pkg/plugin/installer/http_installer.go b/internal/plugin/installer/http_installer.go similarity index 98% rename from pkg/plugin/installer/http_installer.go rename to internal/plugin/installer/http_installer.go index 3bcf71208..b168f8646 100644 --- a/pkg/plugin/installer/http_installer.go +++ b/internal/plugin/installer/http_installer.go @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package installer // import "helm.sh/helm/v4/pkg/plugin/installer" +package installer // import "helm.sh/helm/v4/internal/plugin/installer" import ( "archive/tar" @@ -32,11 +32,11 @@ import ( securejoin "github.com/cyphar/filepath-securejoin" + "helm.sh/helm/v4/internal/plugin/cache" "helm.sh/helm/v4/internal/third_party/dep/fs" "helm.sh/helm/v4/pkg/cli" "helm.sh/helm/v4/pkg/getter" "helm.sh/helm/v4/pkg/helmpath" - "helm.sh/helm/v4/pkg/plugin/cache" ) // HTTPInstaller installs plugins from an archive served by a web server. diff --git a/pkg/plugin/installer/http_installer_test.go b/internal/plugin/installer/http_installer_test.go similarity index 99% rename from pkg/plugin/installer/http_installer_test.go rename to internal/plugin/installer/http_installer_test.go index ed4b73b35..92521474e 100644 --- a/pkg/plugin/installer/http_installer_test.go +++ b/internal/plugin/installer/http_installer_test.go @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package installer // import "helm.sh/helm/v4/pkg/plugin/installer" +package installer // import "helm.sh/helm/v4/internal/plugin/installer" import ( "archive/tar" diff --git a/pkg/plugin/installer/installer.go b/internal/plugin/installer/installer.go similarity index 99% rename from pkg/plugin/installer/installer.go rename to internal/plugin/installer/installer.go index d88737ebf..e14f16018 100644 --- a/pkg/plugin/installer/installer.go +++ b/internal/plugin/installer/installer.go @@ -22,7 +22,7 @@ import ( "path/filepath" "strings" - "helm.sh/helm/v4/pkg/plugin" + "helm.sh/helm/v4/internal/plugin" ) // ErrMissingMetadata indicates that plugin.yaml is missing. diff --git a/pkg/plugin/installer/installer_test.go b/internal/plugin/installer/installer_test.go similarity index 100% rename from pkg/plugin/installer/installer_test.go rename to internal/plugin/installer/installer_test.go diff --git a/pkg/plugin/installer/local_installer.go b/internal/plugin/installer/local_installer.go similarity index 95% rename from pkg/plugin/installer/local_installer.go rename to internal/plugin/installer/local_installer.go index 109f4f236..211904108 100644 --- a/pkg/plugin/installer/local_installer.go +++ b/internal/plugin/installer/local_installer.go @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package installer // import "helm.sh/helm/v4/pkg/plugin/installer" +package installer // import "helm.sh/helm/v4/internal/plugin/installer" import ( "errors" diff --git a/pkg/plugin/installer/local_installer_test.go b/internal/plugin/installer/local_installer_test.go similarity index 96% rename from pkg/plugin/installer/local_installer_test.go rename to internal/plugin/installer/local_installer_test.go index 9effcd2c4..ef5660d7d 100644 --- a/pkg/plugin/installer/local_installer_test.go +++ b/internal/plugin/installer/local_installer_test.go @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package installer // import "helm.sh/helm/v4/pkg/plugin/installer" +package installer // import "helm.sh/helm/v4/internal/plugin/installer" import ( "os" diff --git a/pkg/plugin/installer/vcs_installer.go b/internal/plugin/installer/vcs_installer.go similarity index 97% rename from pkg/plugin/installer/vcs_installer.go rename to internal/plugin/installer/vcs_installer.go index 3e53cbf11..3601ec7a8 100644 --- a/pkg/plugin/installer/vcs_installer.go +++ b/internal/plugin/installer/vcs_installer.go @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package installer // import "helm.sh/helm/v4/pkg/plugin/installer" +package installer // import "helm.sh/helm/v4/internal/plugin/installer" import ( "errors" @@ -26,9 +26,9 @@ import ( "github.com/Masterminds/semver/v3" "github.com/Masterminds/vcs" + "helm.sh/helm/v4/internal/plugin/cache" "helm.sh/helm/v4/internal/third_party/dep/fs" "helm.sh/helm/v4/pkg/helmpath" - "helm.sh/helm/v4/pkg/plugin/cache" ) // VCSInstaller installs plugins from remote a repository. diff --git a/pkg/plugin/installer/vcs_installer_test.go b/internal/plugin/installer/vcs_installer_test.go similarity index 98% rename from pkg/plugin/installer/vcs_installer_test.go rename to internal/plugin/installer/vcs_installer_test.go index 491d58a3f..76b337a2f 100644 --- a/pkg/plugin/installer/vcs_installer_test.go +++ b/internal/plugin/installer/vcs_installer_test.go @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package installer // import "helm.sh/helm/v4/pkg/plugin/installer" +package installer // import "helm.sh/helm/v4/internal/plugin/installer" import ( "fmt" diff --git a/pkg/plugin/plugin.go b/internal/plugin/plugin.go similarity index 100% rename from pkg/plugin/plugin.go rename to internal/plugin/plugin.go diff --git a/pkg/plugin/plugin_test.go b/internal/plugin/plugin_test.go similarity index 100% rename from pkg/plugin/plugin_test.go rename to internal/plugin/plugin_test.go diff --git a/pkg/plugin/testdata/plugdir/bad/duplicate-entries/plugin.yaml b/internal/plugin/testdata/plugdir/bad/duplicate-entries/plugin.yaml similarity index 100% rename from pkg/plugin/testdata/plugdir/bad/duplicate-entries/plugin.yaml rename to internal/plugin/testdata/plugdir/bad/duplicate-entries/plugin.yaml diff --git a/pkg/plugin/testdata/plugdir/good/downloader/plugin.yaml b/internal/plugin/testdata/plugdir/good/downloader/plugin.yaml similarity index 100% rename from pkg/plugin/testdata/plugdir/good/downloader/plugin.yaml rename to internal/plugin/testdata/plugdir/good/downloader/plugin.yaml diff --git a/pkg/plugin/testdata/plugdir/good/echo/plugin.yaml b/internal/plugin/testdata/plugdir/good/echo/plugin.yaml similarity index 100% rename from pkg/plugin/testdata/plugdir/good/echo/plugin.yaml rename to internal/plugin/testdata/plugdir/good/echo/plugin.yaml diff --git a/pkg/plugin/testdata/plugdir/good/hello/hello.ps1 b/internal/plugin/testdata/plugdir/good/hello/hello.ps1 similarity index 100% rename from pkg/plugin/testdata/plugdir/good/hello/hello.ps1 rename to internal/plugin/testdata/plugdir/good/hello/hello.ps1 diff --git a/pkg/plugin/testdata/plugdir/good/hello/hello.sh b/internal/plugin/testdata/plugdir/good/hello/hello.sh similarity index 100% rename from pkg/plugin/testdata/plugdir/good/hello/hello.sh rename to internal/plugin/testdata/plugdir/good/hello/hello.sh diff --git a/pkg/plugin/testdata/plugdir/good/hello/plugin.yaml b/internal/plugin/testdata/plugdir/good/hello/plugin.yaml similarity index 100% rename from pkg/plugin/testdata/plugdir/good/hello/plugin.yaml rename to internal/plugin/testdata/plugdir/good/hello/plugin.yaml diff --git a/pkg/cmd/load_plugins.go b/pkg/cmd/load_plugins.go index 8def7f9fa..e340ba1b6 100644 --- a/pkg/cmd/load_plugins.go +++ b/pkg/cmd/load_plugins.go @@ -31,7 +31,7 @@ import ( "github.com/spf13/cobra" "sigs.k8s.io/yaml" - "helm.sh/helm/v4/pkg/plugin" + "helm.sh/helm/v4/internal/plugin" ) const ( diff --git a/pkg/cmd/plugin.go b/pkg/cmd/plugin.go index a2bb838df..76bc99915 100644 --- a/pkg/cmd/plugin.go +++ b/pkg/cmd/plugin.go @@ -24,7 +24,7 @@ import ( "github.com/spf13/cobra" - "helm.sh/helm/v4/pkg/plugin" + "helm.sh/helm/v4/internal/plugin" ) const pluginHelp = ` diff --git a/pkg/cmd/plugin_install.go b/pkg/cmd/plugin_install.go index 945bf8ee0..7dd1623e7 100644 --- a/pkg/cmd/plugin_install.go +++ b/pkg/cmd/plugin_install.go @@ -22,9 +22,9 @@ import ( "github.com/spf13/cobra" + "helm.sh/helm/v4/internal/plugin" + "helm.sh/helm/v4/internal/plugin/installer" "helm.sh/helm/v4/pkg/cmd/require" - "helm.sh/helm/v4/pkg/plugin" - "helm.sh/helm/v4/pkg/plugin/installer" ) type pluginInstallOptions struct { diff --git a/pkg/cmd/plugin_list.go b/pkg/cmd/plugin_list.go index 5bb9ff68d..faf41b91e 100644 --- a/pkg/cmd/plugin_list.go +++ b/pkg/cmd/plugin_list.go @@ -24,7 +24,7 @@ import ( "github.com/gosuri/uitable" "github.com/spf13/cobra" - "helm.sh/helm/v4/pkg/plugin" + "helm.sh/helm/v4/internal/plugin" ) func newPluginListCmd(out io.Writer) *cobra.Command { diff --git a/pkg/cmd/plugin_uninstall.go b/pkg/cmd/plugin_uninstall.go index ec73ad6df..808cad92f 100644 --- a/pkg/cmd/plugin_uninstall.go +++ b/pkg/cmd/plugin_uninstall.go @@ -24,7 +24,7 @@ import ( "github.com/spf13/cobra" - "helm.sh/helm/v4/pkg/plugin" + "helm.sh/helm/v4/internal/plugin" ) type pluginUninstallOptions struct { diff --git a/pkg/cmd/plugin_update.go b/pkg/cmd/plugin_update.go index 59d884877..4fed3772d 100644 --- a/pkg/cmd/plugin_update.go +++ b/pkg/cmd/plugin_update.go @@ -24,8 +24,8 @@ import ( "github.com/spf13/cobra" - "helm.sh/helm/v4/pkg/plugin" - "helm.sh/helm/v4/pkg/plugin/installer" + "helm.sh/helm/v4/internal/plugin" + "helm.sh/helm/v4/internal/plugin/installer" ) type pluginUpdateOptions struct { diff --git a/pkg/getter/plugingetter.go b/pkg/getter/plugingetter.go index 3b8185543..1893e8327 100644 --- a/pkg/getter/plugingetter.go +++ b/pkg/getter/plugingetter.go @@ -23,8 +23,8 @@ import ( "path/filepath" "strings" + "helm.sh/helm/v4/internal/plugin" "helm.sh/helm/v4/pkg/cli" - "helm.sh/helm/v4/pkg/plugin" ) // collectPlugins scans for getter plugins. From 4aa2240750595241a724c87118db3ff556bfc2e4 Mon Sep 17 00:00:00 2001 From: Evans Mungai Date: Mon, 18 Aug 2025 09:18:02 +0100 Subject: [PATCH 35/88] Run go mod tidy Signed-off-by: Evans Mungai --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index e3ed6d975..688094670 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/cyphar/filepath-securejoin v0.4.1 github.com/distribution/distribution/v3 v3.0.0 github.com/evanphx/json-patch/v5 v5.9.11 - github.com/fatih/color v1.18.0 + github.com/fatih/color v1.18.0 github.com/fluxcd/cli-utils v0.36.0-flux.14 github.com/foxcpp/go-mockdns v1.1.0 github.com/gobwas/glob v0.2.3 From d918f919e02898d39cd0538886ff0cb224ecdd0b Mon Sep 17 00:00:00 2001 From: Terry Howe Date: Mon, 18 Aug 2025 07:26:50 -0600 Subject: [PATCH 36/88] fix: stale issue workflow Signed-off-by: Terry Howe --- .github/workflows/stale-issue-bot.yaml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/stale-issue-bot.yaml b/.github/workflows/stale-issue-bot.yaml index 613d2900c..e99b57cb8 100644 --- a/.github/workflows/stale-issue-bot.yaml +++ b/.github/workflows/stale-issue-bot.yaml @@ -2,8 +2,6 @@ name: "Close stale issues" on: schedule: - cron: "0 0 * * *" -permissions: - contents: read jobs: stale: @@ -16,4 +14,4 @@ jobs: exempt-issue-labels: 'keep open,v4.x,in progress' days-before-stale: 90 days-before-close: 30 - operations-per-run: 100 + operations-per-run: 200 From 77bbbbd84f99b557209570bbb10d4c199c0f46aa Mon Sep 17 00:00:00 2001 From: Terry Howe Date: Mon, 18 Aug 2025 08:46:41 -0600 Subject: [PATCH 37/88] feature: add stale pr workflow Signed-off-by: Terry Howe --- .github/workflows/{stale-issue-bot.yaml => stale.yaml} | 1 + 1 file changed, 1 insertion(+) rename .github/workflows/{stale-issue-bot.yaml => stale.yaml} (73%) diff --git a/.github/workflows/stale-issue-bot.yaml b/.github/workflows/stale.yaml similarity index 73% rename from .github/workflows/stale-issue-bot.yaml rename to .github/workflows/stale.yaml index e99b57cb8..3417e1734 100644 --- a/.github/workflows/stale-issue-bot.yaml +++ b/.github/workflows/stale.yaml @@ -11,6 +11,7 @@ jobs: with: repo-token: ${{ secrets.GITHUB_TOKEN }} stale-issue-message: 'This issue has been marked as stale because it has been open for 90 days with no activity. This thread will be automatically closed in 30 days if no further activity occurs.' + stale-pr-message: 'This pull request has been marked as stale because it has been open for 90 days with no activity. This pull request will be automatically closed in 30 days if no further activity occurs.' exempt-issue-labels: 'keep open,v4.x,in progress' days-before-stale: 90 days-before-close: 30 From 4bc93393bc2be6347b13d484a0b464a97c06ca2a Mon Sep 17 00:00:00 2001 From: Terry Howe Date: Mon, 18 Aug 2025 10:40:23 -0600 Subject: [PATCH 38/88] feature: enable shuffle for unit tests Signed-off-by: Terry Howe --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 0785fdb2e..0a20259bd 100644 --- a/Makefile +++ b/Makefile @@ -21,7 +21,7 @@ ACCEPTANCE_RUN_TESTS=. PKG := ./... TAGS := TESTS := . -TESTFLAGS := +TESTFLAGS := -shuffle=on -count=1 LDFLAGS := -w -s GOFLAGS := CGO_ENABLED ?= 0 From e2dcbe28bf964873fe91eb49f1d97b13f7d51783 Mon Sep 17 00:00:00 2001 From: George Jenkins Date: Sun, 27 Apr 2025 21:15:46 -0700 Subject: [PATCH 39/88] Helm client/SDK support server-side apply Signed-off-by: George Jenkins --- pkg/action/action.go | 7 ++ pkg/action/action_test.go | 5 ++ pkg/action/get_metadata.go | 2 + pkg/action/hooks.go | 4 +- pkg/action/hooks_test.go | 3 +- pkg/action/install.go | 42 +++++---- pkg/action/release_testing.go | 3 +- pkg/action/rollback.go | 62 ++++++++----- pkg/action/uninstall.go | 16 ++-- pkg/action/uninstall_test.go | 3 +- pkg/action/upgrade.go | 88 +++++++++++++------ pkg/action/upgrade_test.go | 106 +++++++++++++++++++++++ pkg/cmd/get_metadata.go | 17 ++++ pkg/cmd/install.go | 4 + pkg/cmd/rollback.go | 4 + pkg/cmd/testdata/output/get-metadata.txt | 1 + pkg/cmd/upgrade.go | 4 + pkg/kube/fake/fake.go | 16 ++-- pkg/kube/fake/printer.go | 22 ++--- pkg/release/v1/release.go | 8 ++ 20 files changed, 320 insertions(+), 97 deletions(-) diff --git a/pkg/action/action.go b/pkg/action/action.go index 69bcf4da2..5249c8cfa 100644 --- a/pkg/action/action.go +++ b/pkg/action/action.go @@ -520,3 +520,10 @@ func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namesp func (cfg *Configuration) SetHookOutputFunc(hookOutputFunc func(_, _, _ string) io.Writer) { cfg.HookOutputFunc = hookOutputFunc } + +func determineReleaseSSApplyMethod(serverSideApply bool) release.ApplyMethod { + if serverSideApply { + return release.ApplyMethodServerSideApply + } + return release.ApplyMethodClientSideApply +} diff --git a/pkg/action/action_test.go b/pkg/action/action_test.go index 43cf94622..7a510ace6 100644 --- a/pkg/action/action_test.go +++ b/pkg/action/action_test.go @@ -946,3 +946,8 @@ func TestRenderResources_NoPostRenderer(t *testing.T) { assert.NotNil(t, buf) assert.Equal(t, "", notes) } + +func TestDetermineReleaseSSAApplyMethod(t *testing.T) { + assert.Equal(t, release.ApplyMethodClientSideApply, determineReleaseSSApplyMethod(false)) + assert.Equal(t, release.ApplyMethodServerSideApply, determineReleaseSSApplyMethod(true)) +} diff --git a/pkg/action/get_metadata.go b/pkg/action/get_metadata.go index 4cb77361a..889545ddc 100644 --- a/pkg/action/get_metadata.go +++ b/pkg/action/get_metadata.go @@ -47,6 +47,7 @@ type Metadata struct { Revision int `json:"revision" yaml:"revision"` Status string `json:"status" yaml:"status"` DeployedAt string `json:"deployedAt" yaml:"deployedAt"` + ApplyMethod string `json:"applyMethod,omitempty" yaml:"applyMethod,omitempty"` } // NewGetMetadata creates a new GetMetadata object with the given configuration. @@ -79,6 +80,7 @@ func (g *GetMetadata) Run(name string) (*Metadata, error) { Revision: rel.Version, Status: rel.Info.Status.String(), DeployedAt: rel.Info.LastDeployed.Format(time.RFC3339), + ApplyMethod: rel.ApplyMethod, }, nil } diff --git a/pkg/action/hooks.go b/pkg/action/hooks.go index 275a1bf52..458a6342c 100644 --- a/pkg/action/hooks.go +++ b/pkg/action/hooks.go @@ -33,7 +33,7 @@ import ( ) // execHook executes all of the hooks for the given hook event. -func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, waitStrategy kube.WaitStrategy, timeout time.Duration) error { +func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, waitStrategy kube.WaitStrategy, timeout time.Duration, serverSideApply bool) error { executingHooks := []*release.Hook{} for _, h := range rl.Hooks { @@ -75,7 +75,7 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, // Create hook resources if _, err := cfg.KubeClient.Create( resources, - kube.ClientCreateOptionServerSideApply(false, false)); err != nil { + kube.ClientCreateOptionServerSideApply(serverSideApply, false)); err != nil { h.LastRun.CompletedAt = helmtime.Now() h.LastRun.Phase = release.HookPhaseFailed return fmt.Errorf("warning: Hook %s %s failed: %w", hook, h.Path, err) diff --git a/pkg/action/hooks_test.go b/pkg/action/hooks_test.go index ad1de2c59..e3a2c0808 100644 --- a/pkg/action/hooks_test.go +++ b/pkg/action/hooks_test.go @@ -385,7 +385,8 @@ data: Capabilities: chartutil.DefaultCapabilities, } - err := configuration.execHook(&tc.inputRelease, hookEvent, kube.StatusWatcherStrategy, 600) + serverSideApply := true + err := configuration.execHook(&tc.inputRelease, hookEvent, kube.StatusWatcherStrategy, 600, serverSideApply) if !reflect.DeepEqual(kubeClient.deleteRecord, tc.expectedDeleteRecord) { t.Fatalf("Got unexpected delete record, expected: %#v, but got: %#v", kubeClient.deleteRecord, tc.expectedDeleteRecord) diff --git a/pkg/action/install.go b/pkg/action/install.go index b46b4446b..f7482d466 100644 --- a/pkg/action/install.go +++ b/pkg/action/install.go @@ -75,7 +75,13 @@ type Install struct { // ForceReplace will, if set to `true`, ignore certain warnings and perform the install anyway. // // This should be used with caution. - ForceReplace bool + ForceReplace bool + // ForceConflicts causes server-side apply to force conflicts ("Overwrite value, become sole manager") + // see: https://kubernetes.io/docs/reference/using-api/server-side-apply/#conflicts + ForceConflicts bool + // ServerSideApply when true (default) will enable changes to be applied via Kubernetes server-side apply + // see: https://kubernetes.io/docs/reference/using-api/server-side-apply/ + ServerSideApply bool CreateNamespace bool DryRun bool DryRunOption string @@ -145,7 +151,8 @@ type ChartPathOptions struct { // NewInstall creates a new Install object with the given configuration. func NewInstall(cfg *Configuration) *Install { in := &Install{ - cfg: cfg, + cfg: cfg, + ServerSideApply: true, } in.registryClient = cfg.RegistryClient @@ -175,7 +182,7 @@ func (i *Install) installCRDs(crds []chart.CRD) error { // Send them to Kube if _, err := i.cfg.KubeClient.Create( res, - kube.ClientCreateOptionServerSideApply(false, false)); err != nil { + kube.ClientCreateOptionServerSideApply(i.ServerSideApply, i.ForceConflicts)); err != nil { // If the error is CRD already exists, continue. if apierrors.IsAlreadyExists(err) { crdName := res[0].Name @@ -403,7 +410,7 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma } if _, err := i.cfg.KubeClient.Create( resourceList, - kube.ClientCreateOptionServerSideApply(false, false)); err != nil && !apierrors.IsAlreadyExists(err) { + kube.ClientCreateOptionServerSideApply(i.ServerSideApply, false)); err != nil && !apierrors.IsAlreadyExists(err) { return nil, err } } @@ -415,8 +422,7 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma } } - // Store the release in history before continuing (new in Helm 3). We always know - // that this is a create operation. + // Store the release in history before continuing. We always know that this is a create operation if err := i.cfg.Releases.Create(rel); err != nil { // We could try to recover gracefully here, but since nothing has been installed // yet, this is probably safer than trying to continue when we know storage is @@ -463,7 +469,7 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource var err error // pre-install hooks if !i.DisableHooks { - if err := i.cfg.execHook(rel, release.HookPreInstall, i.WaitStrategy, i.Timeout); err != nil { + if err := i.cfg.execHook(rel, release.HookPreInstall, i.WaitStrategy, i.Timeout, i.ServerSideApply); err != nil { return rel, fmt.Errorf("failed pre-install: %s", err) } } @@ -474,15 +480,15 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource if len(toBeAdopted) == 0 && len(resources) > 0 { _, err = i.cfg.KubeClient.Create( resources, - kube.ClientCreateOptionServerSideApply(false, false)) + kube.ClientCreateOptionServerSideApply(i.ServerSideApply, false)) } else if len(resources) > 0 { - updateThreeWayMergeForUnstructured := i.TakeOwnership + useUpdateThreeWayMergeForUnstructured := i.TakeOwnership && !i.ServerSideApply // Use three-way merge when taking ownership (and not using server-side apply) _, err = i.cfg.KubeClient.Update( toBeAdopted, resources, - kube.ClientUpdateOptionServerSideApply(false, false), - kube.ClientUpdateOptionThreeWayMergeForUnstructured(updateThreeWayMergeForUnstructured), - kube.ClientUpdateOptionForceReplace(i.ForceReplace)) + kube.ClientUpdateOptionForceReplace(i.ForceReplace), + kube.ClientUpdateOptionServerSideApply(i.ServerSideApply, i.ForceConflicts), + kube.ClientUpdateOptionThreeWayMergeForUnstructured(useUpdateThreeWayMergeForUnstructured)) } if err != nil { return rel, err @@ -503,7 +509,7 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource } if !i.DisableHooks { - if err := i.cfg.execHook(rel, release.HookPostInstall, i.WaitStrategy, i.Timeout); err != nil { + if err := i.cfg.execHook(rel, release.HookPostInstall, i.WaitStrategy, i.Timeout, i.ServerSideApply); err != nil { return rel, fmt.Errorf("failed post-install: %s", err) } } @@ -580,7 +586,8 @@ func (i *Install) availableName() error { // createRelease creates a new release object func (i *Install) createRelease(chrt *chart.Chart, rawVals map[string]interface{}, labels map[string]string) *release.Release { ts := i.cfg.Now() - return &release.Release{ + + r := &release.Release{ Name: i.ReleaseName, Namespace: i.Namespace, Chart: chrt, @@ -590,9 +597,12 @@ func (i *Install) createRelease(chrt *chart.Chart, rawVals map[string]interface{ LastDeployed: ts, Status: release.StatusUnknown, }, - Version: 1, - Labels: labels, + Version: 1, + Labels: labels, + ApplyMethod: string(determineReleaseSSApplyMethod(i.ServerSideApply)), } + + return r } // recordRelease with an update operation in case reuse has been set. diff --git a/pkg/action/release_testing.go b/pkg/action/release_testing.go index b5f6fe712..009f4d793 100644 --- a/pkg/action/release_testing.go +++ b/pkg/action/release_testing.go @@ -96,7 +96,8 @@ func (r *ReleaseTesting) Run(name string) (*release.Release, error) { rel.Hooks = executingHooks } - if err := r.cfg.execHook(rel, release.HookTest, kube.StatusWatcherStrategy, r.Timeout); err != nil { + serverSideApply := rel.ApplyMethod == string(release.ApplyMethodServerSideApply) + if err := r.cfg.execHook(rel, release.HookTest, kube.StatusWatcherStrategy, r.Timeout, serverSideApply); err != nil { rel.Hooks = append(skippedHooks, rel.Hooks...) r.cfg.Releases.Update(rel) return rel, err diff --git a/pkg/action/rollback.go b/pkg/action/rollback.go index dd1f8c390..5f0ed02f1 100644 --- a/pkg/action/rollback.go +++ b/pkg/action/rollback.go @@ -44,9 +44,17 @@ type Rollback struct { // ForceReplace will, if set to `true`, ignore certain warnings and perform the rollback anyway. // // This should be used with caution. - ForceReplace bool - CleanupOnFail bool - MaxHistory int // MaxHistory limits the maximum number of revisions saved per release + ForceReplace bool + // ForceConflicts causes server-side apply to force conflicts ("Overwrite value, become sole manager") + // see: https://kubernetes.io/docs/reference/using-api/server-side-apply/#conflicts + ForceConflicts bool + // ServerSideApply enables changes to be applied via Kubernetes server-side apply + // Can be the string: "true", "false" or "auto" + // When "auto", sever-side usage will be based upon the releases previous usage + // see: https://kubernetes.io/docs/reference/using-api/server-side-apply/ + ServerSideApply string + CleanupOnFail bool + MaxHistory int // MaxHistory limits the maximum number of revisions saved per release } // NewRollback creates a new Rollback object with the given configuration. @@ -65,7 +73,7 @@ func (r *Rollback) Run(name string) error { r.cfg.Releases.MaxHistory = r.MaxHistory slog.Debug("preparing rollback", "name", name) - currentRelease, targetRelease, err := r.prepareRollback(name) + currentRelease, targetRelease, serverSideApply, err := r.prepareRollback(name) if err != nil { return err } @@ -78,7 +86,7 @@ func (r *Rollback) Run(name string) error { } slog.Debug("performing rollback", "name", name) - if _, err := r.performRollback(currentRelease, targetRelease); err != nil { + if _, err := r.performRollback(currentRelease, targetRelease, serverSideApply); err != nil { return err } @@ -93,18 +101,18 @@ func (r *Rollback) Run(name string) error { // prepareRollback finds the previous release and prepares a new release object with // the previous release's configuration -func (r *Rollback) prepareRollback(name string) (*release.Release, *release.Release, error) { +func (r *Rollback) prepareRollback(name string) (*release.Release, *release.Release, bool, error) { if err := chartutil.ValidateReleaseName(name); err != nil { - return nil, nil, fmt.Errorf("prepareRollback: Release name is invalid: %s", name) + return nil, nil, false, fmt.Errorf("prepareRollback: Release name is invalid: %s", name) } if r.Version < 0 { - return nil, nil, errInvalidRevision + return nil, nil, false, errInvalidRevision } currentRelease, err := r.cfg.Releases.Last(name) if err != nil { - return nil, nil, err + return nil, nil, false, err } previousVersion := r.Version @@ -114,7 +122,7 @@ func (r *Rollback) prepareRollback(name string) (*release.Release, *release.Rele historyReleases, err := r.cfg.Releases.History(name) if err != nil { - return nil, nil, err + return nil, nil, false, err } // Check if the history version to be rolled back exists @@ -127,14 +135,19 @@ func (r *Rollback) prepareRollback(name string) (*release.Release, *release.Rele } } if !previousVersionExist { - return nil, nil, fmt.Errorf("release has no %d version", previousVersion) + return nil, nil, false, fmt.Errorf("release has no %d version", previousVersion) } slog.Debug("rolling back", "name", name, "currentVersion", currentRelease.Version, "targetVersion", previousVersion) previousRelease, err := r.cfg.Releases.Get(name, previousVersion) if err != nil { - return nil, nil, err + return nil, nil, false, err + } + + serverSideApply, err := getUpgradeServerSideValue(r.ServerSideApply, previousRelease.ApplyMethod) + if err != nil { + return nil, nil, false, err } // Store a new release object with previous release's configuration @@ -152,16 +165,17 @@ func (r *Rollback) prepareRollback(name string) (*release.Release, *release.Rele // message here, and only override it later if we experience failure. Description: fmt.Sprintf("Rollback to %d", previousVersion), }, - Version: currentRelease.Version + 1, - Labels: previousRelease.Labels, - Manifest: previousRelease.Manifest, - Hooks: previousRelease.Hooks, + Version: currentRelease.Version + 1, + Labels: previousRelease.Labels, + Manifest: previousRelease.Manifest, + Hooks: previousRelease.Hooks, + ApplyMethod: string(determineReleaseSSApplyMethod(serverSideApply)), } - return currentRelease, targetRelease, nil + return currentRelease, targetRelease, serverSideApply, nil } -func (r *Rollback) performRollback(currentRelease, targetRelease *release.Release) (*release.Release, error) { +func (r *Rollback) performRollback(currentRelease, targetRelease *release.Release, serverSideApply bool) (*release.Release, error) { if r.DryRun { slog.Debug("dry run", "name", targetRelease.Name) return targetRelease, nil @@ -177,15 +191,16 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas } // pre-rollback hooks + if !r.DisableHooks { - if err := r.cfg.execHook(targetRelease, release.HookPreRollback, r.WaitStrategy, r.Timeout); err != nil { + if err := r.cfg.execHook(targetRelease, release.HookPreRollback, r.WaitStrategy, r.Timeout, serverSideApply); err != nil { return targetRelease, err } } else { slog.Debug("rollback hooks disabled", "name", targetRelease.Name) } - // It is safe to use "force" here because these are resources currently rendered by the chart. + // It is safe to use "forceOwnership" here because these are resources currently rendered by the chart. err = target.Visit(setMetadataVisitor(targetRelease.Name, targetRelease.Namespace, true)) if err != nil { return targetRelease, fmt.Errorf("unable to set metadata visitor from target release: %w", err) @@ -193,8 +208,9 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas results, err := r.cfg.KubeClient.Update( current, target, - kube.ClientUpdateOptionServerSideApply(false, false), - kube.ClientUpdateOptionForceReplace(r.ForceReplace)) + kube.ClientUpdateOptionForceReplace(r.ForceReplace), + kube.ClientUpdateOptionServerSideApply(serverSideApply, r.ForceConflicts), + kube.ClientUpdateOptionThreeWayMergeForUnstructured(false)) if err != nil { msg := fmt.Sprintf("Rollback %q failed: %s", targetRelease.Name, err) @@ -239,7 +255,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas // post-rollback hooks if !r.DisableHooks { - if err := r.cfg.execHook(targetRelease, release.HookPostRollback, r.WaitStrategy, r.Timeout); err != nil { + if err := r.cfg.execHook(targetRelease, release.HookPostRollback, r.WaitStrategy, r.Timeout, serverSideApply); err != nil { return targetRelease, err } } diff --git a/pkg/action/uninstall.go b/pkg/action/uninstall.go index 163af290e..4444f4331 100644 --- a/pkg/action/uninstall.go +++ b/pkg/action/uninstall.go @@ -115,7 +115,8 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error) res := &release.UninstallReleaseResponse{Release: rel} if !u.DisableHooks { - if err := u.cfg.execHook(rel, release.HookPreDelete, u.WaitStrategy, u.Timeout); err != nil { + serverSideApply := true + if err := u.cfg.execHook(rel, release.HookPreDelete, u.WaitStrategy, u.Timeout, serverSideApply); err != nil { return res, err } } else { @@ -144,7 +145,8 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error) } if !u.DisableHooks { - if err := u.cfg.execHook(rel, release.HookPostDelete, u.WaitStrategy, u.Timeout); err != nil { + serverSideApply := true + if err := u.cfg.execHook(rel, release.HookPostDelete, u.WaitStrategy, u.Timeout, serverSideApply); err != nil { errs = append(errs, err) } } @@ -244,11 +246,13 @@ func (u *Uninstall) deleteRelease(rel *release.Release) (kube.ResourceList, stri return nil, "", []error{fmt.Errorf("unable to build kubernetes objects for delete: %w", err)} } if len(resources) > 0 { - if kubeClient, ok := u.cfg.KubeClient.(kube.InterfaceDeletionPropagation); ok { - _, errs = kubeClient.DeleteWithPropagationPolicy(resources, parseCascadingFlag(u.DeletionPropagation)) - return resources, kept, errs + if len(resources) > 0 { + if kubeClient, ok := u.cfg.KubeClient.(kube.InterfaceDeletionPropagation); ok { + _, errs = kubeClient.DeleteWithPropagationPolicy(resources, parseCascadingFlag(u.DeletionPropagation)) + return resources, kept, errs + } + _, errs = u.cfg.KubeClient.Delete(resources) } - _, errs = u.cfg.KubeClient.Delete(resources) } return resources, kept, errs } diff --git a/pkg/action/uninstall_test.go b/pkg/action/uninstall_test.go index 44bd66d96..f7c9e5f44 100644 --- a/pkg/action/uninstall_test.go +++ b/pkg/action/uninstall_test.go @@ -21,6 +21,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "helm.sh/helm/v4/pkg/kube" kubefake "helm.sh/helm/v4/pkg/kube/fake" @@ -147,6 +148,6 @@ func TestUninstallRelease_Cascade(t *testing.T) { failer.BuildDummy = true unAction.cfg.KubeClient = failer _, err := unAction.Run(rel.Name) - is.Error(err) + require.Error(t, err) is.Contains(err.Error(), "failed to delete release: come-fail-away") } diff --git a/pkg/action/upgrade.go b/pkg/action/upgrade.go index abf4342d3..41ddf859f 100644 --- a/pkg/action/upgrade.go +++ b/pkg/action/upgrade.go @@ -81,6 +81,14 @@ type Upgrade struct { // // This should be used with caution. ForceReplace bool + // ForceConflicts causes server-side apply to force conflicts ("Overwrite value, become sole manager") + // see: https://kubernetes.io/docs/reference/using-api/server-side-apply/#conflicts + ForceConflicts bool + // ServerSideApply enables changes to be applied via Kubernetes server-side apply + // Can be the string: "true", "false" or "auto" + // When "auto", sever-side usage will be based upon the releases previous usage + // see: https://kubernetes.io/docs/reference/using-api/server-side-apply/ + ServerSideApply string // ResetValues will reset the values to the chart's built-ins rather than merging with existing. ResetValues bool // ReuseValues will reuse the user's last supplied values. @@ -127,7 +135,8 @@ type resultMessage struct { // NewUpgrade creates a new Upgrade object with the given configuration. func NewUpgrade(cfg *Configuration) *Upgrade { up := &Upgrade{ - cfg: cfg, + cfg: cfg, + ServerSideApply: "auto", } up.registryClient = cfg.RegistryClient @@ -162,7 +171,7 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart. } slog.Debug("preparing upgrade", "name", name) - currentRelease, upgradedRelease, err := u.prepareUpgrade(name, chart, vals) + currentRelease, upgradedRelease, serverSideApply, err := u.prepareUpgrade(name, chart, vals) if err != nil { return nil, err } @@ -170,7 +179,7 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart. u.cfg.Releases.MaxHistory = u.MaxHistory slog.Debug("performing update", "name", name) - res, err := u.performUpgrade(ctx, currentRelease, upgradedRelease) + res, err := u.performUpgrade(ctx, currentRelease, upgradedRelease, serverSideApply) if err != nil { return res, err } @@ -195,14 +204,14 @@ func (u *Upgrade) isDryRun() bool { } // prepareUpgrade builds an upgraded release for an upgrade operation. -func (u *Upgrade) prepareUpgrade(name string, chart *chart.Chart, vals map[string]interface{}) (*release.Release, *release.Release, error) { +func (u *Upgrade) prepareUpgrade(name string, chart *chart.Chart, vals map[string]interface{}) (*release.Release, *release.Release, bool, error) { if chart == nil { - return nil, nil, errMissingChart + return nil, nil, false, errMissingChart } // HideSecret must be used with dry run. Otherwise, return an error. if !u.isDryRun() && u.HideSecret { - return nil, nil, errors.New("hiding Kubernetes secrets requires a dry-run mode") + return nil, nil, false, errors.New("hiding Kubernetes secrets requires a dry-run mode") } // finds the last non-deleted release with the given name @@ -210,14 +219,14 @@ func (u *Upgrade) prepareUpgrade(name string, chart *chart.Chart, vals map[strin if err != nil { // to keep existing behavior of returning the "%q has no deployed releases" error when an existing release does not exist if errors.Is(err, driver.ErrReleaseNotFound) { - return nil, nil, driver.NewErrNoDeployedReleases(name) + return nil, nil, false, driver.NewErrNoDeployedReleases(name) } - return nil, nil, err + return nil, nil, false, err } // Concurrent `helm upgrade`s will either fail here with `errPending` or when creating the release with "already exists". This should act as a pessimistic lock. if lastRelease.Info.Status.IsPending() { - return nil, nil, errPending + return nil, nil, false, errPending } var currentRelease *release.Release @@ -232,7 +241,7 @@ func (u *Upgrade) prepareUpgrade(name string, chart *chart.Chart, vals map[strin (lastRelease.Info.Status == release.StatusFailed || lastRelease.Info.Status == release.StatusSuperseded) { currentRelease = lastRelease } else { - return nil, nil, err + return nil, nil, false, err } } } @@ -240,11 +249,11 @@ func (u *Upgrade) prepareUpgrade(name string, chart *chart.Chart, vals map[strin // determine if values will be reused vals, err = u.reuseValues(chart, currentRelease, vals) if err != nil { - return nil, nil, err + return nil, nil, false, err } if err := chartutil.ProcessDependencies(chart, vals); err != nil { - return nil, nil, err + return nil, nil, false, err } // Increment revision count. This is passed to templates, and also stored on @@ -260,11 +269,11 @@ func (u *Upgrade) prepareUpgrade(name string, chart *chart.Chart, vals map[strin caps, err := u.cfg.getCapabilities() if err != nil { - return nil, nil, err + return nil, nil, false, err } valuesToRender, err := chartutil.ToRenderValuesWithSchemaValidation(chart, vals, options, caps, u.SkipSchemaValidation) if err != nil { - return nil, nil, err + return nil, nil, false, err } // Determine whether or not to interact with remote @@ -275,13 +284,20 @@ func (u *Upgrade) prepareUpgrade(name string, chart *chart.Chart, vals map[strin hooks, manifestDoc, notesTxt, err := u.cfg.renderResources(chart, valuesToRender, "", "", u.SubNotes, false, false, u.PostRenderer, interactWithRemote, u.EnableDNS, u.HideSecret) if err != nil { - return nil, nil, err + return nil, nil, false, err } if driver.ContainsSystemLabels(u.Labels) { - return nil, nil, fmt.Errorf("user supplied labels contains system reserved label name. System labels: %+v", driver.GetSystemLabels()) + return nil, nil, false, fmt.Errorf("user supplied labels contains system reserved label name. System labels: %+v", driver.GetSystemLabels()) } + serverSideApply, err := getUpgradeServerSideValue(u.ServerSideApply, lastRelease.ApplyMethod) + if err != nil { + return nil, nil, false, err + } + + slog.Debug("determined release apply method", slog.Bool("server_side_apply", serverSideApply), slog.String("previous_release_apply_method", lastRelease.ApplyMethod)) + // Store an upgraded release. upgradedRelease := &release.Release{ Name: name, @@ -294,20 +310,21 @@ func (u *Upgrade) prepareUpgrade(name string, chart *chart.Chart, vals map[strin Status: release.StatusPendingUpgrade, Description: "Preparing upgrade", // This should be overwritten later. }, - Version: revision, - Manifest: manifestDoc.String(), - Hooks: hooks, - Labels: mergeCustomLabels(lastRelease.Labels, u.Labels), + Version: revision, + Manifest: manifestDoc.String(), + Hooks: hooks, + Labels: mergeCustomLabels(lastRelease.Labels, u.Labels), + ApplyMethod: string(determineReleaseSSApplyMethod(serverSideApply)), } if len(notesTxt) > 0 { upgradedRelease.Info.Notes = notesTxt } err = validateManifest(u.cfg.KubeClient, manifestDoc.Bytes(), !u.DisableOpenAPIValidation) - return currentRelease, upgradedRelease, err + return currentRelease, upgradedRelease, serverSideApply, err } -func (u *Upgrade) performUpgrade(ctx context.Context, originalRelease, upgradedRelease *release.Release) (*release.Release, error) { +func (u *Upgrade) performUpgrade(ctx context.Context, originalRelease, upgradedRelease *release.Release, serverSideApply bool) (*release.Release, error) { current, err := u.cfg.KubeClient.Build(bytes.NewBufferString(originalRelease.Manifest), false) if err != nil { // Checking for removed Kubernetes API error so can provide a more informative error message to the user @@ -380,7 +397,7 @@ func (u *Upgrade) performUpgrade(ctx context.Context, originalRelease, upgradedR ctxChan := make(chan resultMessage) doneChan := make(chan interface{}) defer close(doneChan) - go u.releasingUpgrade(rChan, upgradedRelease, current, target, originalRelease) + go u.releasingUpgrade(rChan, upgradedRelease, current, target, originalRelease, serverSideApply) go u.handleContext(ctx, doneChan, ctxChan, upgradedRelease) select { case result := <-rChan: @@ -414,11 +431,11 @@ func (u *Upgrade) handleContext(ctx context.Context, done chan interface{}, c ch return } } -func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *release.Release, current kube.ResourceList, target kube.ResourceList, originalRelease *release.Release) { +func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *release.Release, current kube.ResourceList, target kube.ResourceList, originalRelease *release.Release, serverSideApply bool) { // pre-upgrade hooks if !u.DisableHooks { - if err := u.cfg.execHook(upgradedRelease, release.HookPreUpgrade, u.WaitStrategy, u.Timeout); err != nil { + if err := u.cfg.execHook(upgradedRelease, release.HookPreUpgrade, u.WaitStrategy, u.Timeout, serverSideApply); err != nil { u.reportToPerformUpgrade(c, upgradedRelease, kube.ResourceList{}, fmt.Errorf("pre-upgrade hooks failed: %s", err)) return } @@ -429,8 +446,8 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele results, err := u.cfg.KubeClient.Update( current, target, - kube.ClientUpdateOptionServerSideApply(false, false), - kube.ClientUpdateOptionForceReplace(u.ForceReplace)) + kube.ClientUpdateOptionForceReplace(u.ForceReplace), + kube.ClientUpdateOptionServerSideApply(serverSideApply, u.ForceConflicts)) if err != nil { u.cfg.recordRelease(originalRelease) u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err) @@ -459,7 +476,7 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele // post-upgrade hooks if !u.DisableHooks { - if err := u.cfg.execHook(upgradedRelease, release.HookPostUpgrade, u.WaitStrategy, u.Timeout); err != nil { + if err := u.cfg.execHook(upgradedRelease, release.HookPostUpgrade, u.WaitStrategy, u.Timeout, serverSideApply); err != nil { u.reportToPerformUpgrade(c, upgradedRelease, results.Created, fmt.Errorf("post-upgrade hooks failed: %s", err)) return } @@ -530,6 +547,8 @@ func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, e rollin.WaitForJobs = u.WaitForJobs rollin.DisableHooks = u.DisableHooks rollin.ForceReplace = u.ForceReplace + rollin.ForceConflicts = u.ForceConflicts + rollin.ServerSideApply = u.ServerSideApply rollin.Timeout = u.Timeout if rollErr := rollin.Run(rel.Name); rollErr != nil { return rel, fmt.Errorf("an error occurred while rolling back the release. original upgrade error: %w: %w", err, rollErr) @@ -607,3 +626,16 @@ func mergeCustomLabels(current, desired map[string]string) map[string]string { } return labels } + +func getUpgradeServerSideValue(serverSideOption string, releaseApplyMethod string) (bool, error) { + switch serverSideOption { + case "auto": + return releaseApplyMethod == "ssa", nil + case "false": + return false, nil + case "true": + return true, nil + default: + return false, fmt.Errorf("invalid/unknown release server-side apply method: %s", serverSideOption) + } +} diff --git a/pkg/action/upgrade_test.go b/pkg/action/upgrade_test.go index e20955560..ccb0b8447 100644 --- a/pkg/action/upgrade_test.go +++ b/pkg/action/upgrade_test.go @@ -583,3 +583,109 @@ func TestUpgradeRelease_DryRun(t *testing.T) { done() req.Error(err) } + +func TestGetUpgradeServerSideValue(t *testing.T) { + tests := []struct { + name string + actionServerSideOption string + releaseApplyMethod string + expectedServerSideApply bool + }{ + { + name: "action ssa auto / release csa", + actionServerSideOption: "auto", + releaseApplyMethod: "csa", + expectedServerSideApply: false, + }, + { + name: "action ssa auto / release ssa", + actionServerSideOption: "auto", + releaseApplyMethod: "ssa", + expectedServerSideApply: true, + }, + { + name: "action ssa auto / release empty", + actionServerSideOption: "auto", + releaseApplyMethod: "", + expectedServerSideApply: false, + }, + { + name: "action ssa true / release csa", + actionServerSideOption: "true", + releaseApplyMethod: "csa", + expectedServerSideApply: true, + }, + { + name: "action ssa true / release ssa", + actionServerSideOption: "true", + releaseApplyMethod: "ssa", + expectedServerSideApply: true, + }, + { + name: "action ssa true / release 'unknown'", + actionServerSideOption: "true", + releaseApplyMethod: "foo", + expectedServerSideApply: true, + }, + { + name: "action ssa true / release empty", + actionServerSideOption: "true", + releaseApplyMethod: "", + expectedServerSideApply: true, + }, + { + name: "action ssa false / release csa", + actionServerSideOption: "false", + releaseApplyMethod: "ssa", + expectedServerSideApply: false, + }, + { + name: "action ssa false / release ssa", + actionServerSideOption: "false", + releaseApplyMethod: "ssa", + expectedServerSideApply: false, + }, + { + name: "action ssa false / release 'unknown'", + actionServerSideOption: "false", + releaseApplyMethod: "foo", + expectedServerSideApply: false, + }, + { + name: "action ssa false / release empty", + actionServerSideOption: "false", + releaseApplyMethod: "ssa", + expectedServerSideApply: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + serverSideApply, err := getUpgradeServerSideValue(tt.actionServerSideOption, tt.releaseApplyMethod) + assert.Nil(t, err) + assert.Equal(t, tt.expectedServerSideApply, serverSideApply) + }) + } + + testsError := []struct { + name string + actionServerSideOption string + releaseApplyMethod string + expectedErrorMsg string + }{ + { + name: "action invalid option", + actionServerSideOption: "invalid", + releaseApplyMethod: "ssa", + expectedErrorMsg: "invalid/unknown release server-side apply method: invalid", + }, + } + + for _, tt := range testsError { + t.Run(tt.name, func(t *testing.T) { + _, err := getUpgradeServerSideValue(tt.actionServerSideOption, tt.releaseApplyMethod) + assert.ErrorContains(t, err, tt.expectedErrorMsg) + }) + } + +} diff --git a/pkg/cmd/get_metadata.go b/pkg/cmd/get_metadata.go index aea149f5e..eb90b6e44 100644 --- a/pkg/cmd/get_metadata.go +++ b/pkg/cmd/get_metadata.go @@ -27,6 +27,8 @@ import ( "helm.sh/helm/v4/pkg/action" "helm.sh/helm/v4/pkg/cli/output" "helm.sh/helm/v4/pkg/cmd/require" + + release "helm.sh/helm/v4/pkg/release/v1" ) type metadataWriter struct { @@ -75,6 +77,20 @@ func newGetMetadataCmd(cfg *action.Configuration, out io.Writer) *cobra.Command } func (w metadataWriter) WriteTable(out io.Writer) error { + + formatApplyMethod := func(applyMethod string) string { + switch applyMethod { + case "": + return "client-side apply (defaulted)" + case string(release.ApplyMethodClientSideApply): + return "client-side apply" + case string(release.ApplyMethodServerSideApply): + return "server-side apply" + default: + return fmt.Sprintf("unknown (%q)", applyMethod) + } + } + _, _ = fmt.Fprintf(out, "NAME: %v\n", w.metadata.Name) _, _ = fmt.Fprintf(out, "CHART: %v\n", w.metadata.Chart) _, _ = fmt.Fprintf(out, "VERSION: %v\n", w.metadata.Version) @@ -86,6 +102,7 @@ func (w metadataWriter) WriteTable(out io.Writer) error { _, _ = fmt.Fprintf(out, "REVISION: %v\n", w.metadata.Revision) _, _ = fmt.Fprintf(out, "STATUS: %v\n", w.metadata.Status) _, _ = fmt.Fprintf(out, "DEPLOYED_AT: %v\n", w.metadata.DeployedAt) + _, _ = fmt.Fprintf(out, "APPLY_METHOD: %v\n", formatApplyMethod(w.metadata.ApplyMethod)) return nil } diff --git a/pkg/cmd/install.go b/pkg/cmd/install.go index d53b1d981..5be298ff8 100644 --- a/pkg/cmd/install.go +++ b/pkg/cmd/install.go @@ -196,6 +196,8 @@ func addInstallFlags(cmd *cobra.Command, f *pflag.FlagSet, client *action.Instal f.BoolVar(&client.ForceReplace, "force-replace", false, "force resource updates by replacement") f.BoolVar(&client.ForceReplace, "force", false, "deprecated") f.MarkDeprecated("force", "use --force-replace instead") + f.BoolVar(&client.ForceConflicts, "force-conflicts", false, "if set server-side apply will force changes against conflicts") + f.BoolVar(&client.ServerSideApply, "server-side", true, "object updates run in the server instead of the client") f.BoolVar(&client.DisableHooks, "no-hooks", false, "prevent hooks from running during install") f.BoolVar(&client.Replace, "replace", false, "reuse the given name, only if that name is a deleted release which remains in the history. This is unsafe in production") f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)") @@ -217,6 +219,8 @@ func addInstallFlags(cmd *cobra.Command, f *pflag.FlagSet, client *action.Instal addValueOptionsFlags(f, valueOpts) addChartPathOptionsFlags(f, &client.ChartPathOptions) AddWaitFlag(cmd, &client.WaitStrategy) + cmd.MarkFlagsMutuallyExclusive("force-replace", "force-conflicts") + cmd.MarkFlagsMutuallyExclusive("force", "force-conflicts") err := cmd.RegisterFlagCompletionFunc("version", func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { requiredArgs := 2 diff --git a/pkg/cmd/rollback.go b/pkg/cmd/rollback.go index 4b7f3016d..ff60aaedf 100644 --- a/pkg/cmd/rollback.go +++ b/pkg/cmd/rollback.go @@ -80,12 +80,16 @@ func newRollbackCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { f.BoolVar(&client.ForceReplace, "force-replace", false, "force resource updates by replacement") f.BoolVar(&client.ForceReplace, "force", false, "deprecated") f.MarkDeprecated("force", "use --force-replace instead") + f.BoolVar(&client.ForceConflicts, "force-conflicts", false, "if set server-side apply will force changes against conflicts") + f.StringVar(&client.ServerSideApply, "server-side", "auto", "must be \"true\", \"false\" or \"auto\". Object updates run in the server instead of the client (\"auto\" defaults the value from the previous chart release's method)") f.BoolVar(&client.DisableHooks, "no-hooks", false, "prevent hooks from running during rollback") f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)") f.BoolVar(&client.WaitForJobs, "wait-for-jobs", false, "if set and --wait enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as --timeout") f.BoolVar(&client.CleanupOnFail, "cleanup-on-fail", false, "allow deletion of new resources created in this rollback when rollback fails") f.IntVar(&client.MaxHistory, "history-max", settings.MaxHistory, "limit the maximum number of revisions saved per release. Use 0 for no limit") AddWaitFlag(cmd, &client.WaitStrategy) + cmd.MarkFlagsMutuallyExclusive("force-replace", "force-conflicts") + cmd.MarkFlagsMutuallyExclusive("force", "force-conflicts") return cmd } diff --git a/pkg/cmd/testdata/output/get-metadata.txt b/pkg/cmd/testdata/output/get-metadata.txt index 5744083dd..b3cb73ee2 100644 --- a/pkg/cmd/testdata/output/get-metadata.txt +++ b/pkg/cmd/testdata/output/get-metadata.txt @@ -9,3 +9,4 @@ NAMESPACE: default REVISION: 1 STATUS: deployed DEPLOYED_AT: 1977-09-02T22:04:05Z +APPLY_METHOD: client-side apply (defaulted) diff --git a/pkg/cmd/upgrade.go b/pkg/cmd/upgrade.go index c3288286b..f39810c88 100644 --- a/pkg/cmd/upgrade.go +++ b/pkg/cmd/upgrade.go @@ -273,6 +273,8 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { f.BoolVar(&client.ForceReplace, "force-replace", false, "force resource updates by replacement") f.BoolVar(&client.ForceReplace, "force", false, "deprecated") f.MarkDeprecated("force", "use --force-replace instead") + f.BoolVar(&client.ForceConflicts, "force-conflicts", false, "if set server-side apply will force changes against conflicts") + f.StringVar(&client.ServerSideApply, "server-side", "auto", "must be \"true\", \"false\" or \"auto\". Object updates run in the server instead of the client (\"auto\" defaults the value from the previous chart release's method)") f.BoolVar(&client.DisableHooks, "no-hooks", false, "disable pre/post upgrade hooks") f.BoolVar(&client.DisableOpenAPIValidation, "disable-openapi-validation", false, "if set, the upgrade process will not validate rendered templates against the Kubernetes OpenAPI Schema") f.BoolVar(&client.SkipCRDs, "skip-crds", false, "if set, no CRDs will be installed when an upgrade is performed with install flag enabled. By default, CRDs are installed if not already present, when an upgrade is performed with install flag enabled") @@ -297,6 +299,8 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { bindOutputFlag(cmd, &outfmt) bindPostRenderFlag(cmd, &client.PostRenderer) AddWaitFlag(cmd, &client.WaitStrategy) + cmd.MarkFlagsMutuallyExclusive("force-replace", "force-conflicts") + cmd.MarkFlagsMutuallyExclusive("force", "force-conflicts") err := cmd.RegisterFlagCompletionFunc("version", func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) != 2 { diff --git a/pkg/kube/fake/fake.go b/pkg/kube/fake/fake.go index 588bba83d..ae3853fb7 100644 --- a/pkg/kube/fake/fake.go +++ b/pkg/kube/fake/fake.go @@ -108,6 +108,14 @@ func (f *FailingKubeClient) Delete(resources kube.ResourceList) (*kube.Result, [ return f.PrintingKubeClient.Delete(resources) } +// DeleteWithPropagationPolicy returns the configured error if set or prints +func (f *FailingKubeClient) DeleteWithPropagationPolicy(resources kube.ResourceList, policy metav1.DeletionPropagation) (*kube.Result, []error) { + if f.DeleteWithPropagationError != nil { + return nil, []error{f.DeleteWithPropagationError} + } + return f.PrintingKubeClient.DeleteWithPropagationPolicy(resources, policy) +} + // WatchUntilReady returns the configured error if set or prints func (f *FailingKubeWaiter) WatchUntilReady(resources kube.ResourceList, d time.Duration) error { if f.watchUntilReadyError != nil { @@ -146,14 +154,6 @@ func (f *FailingKubeClient) BuildTable(r io.Reader, _ bool) (kube.ResourceList, return f.PrintingKubeClient.BuildTable(r, false) } -// DeleteWithPropagationPolicy returns the configured error if set or prints -func (f *FailingKubeClient) DeleteWithPropagationPolicy(resources kube.ResourceList, policy metav1.DeletionPropagation) (*kube.Result, []error) { - if f.DeleteWithPropagationError != nil { - return nil, []error{f.DeleteWithPropagationError} - } - return f.PrintingKubeClient.DeleteWithPropagationPolicy(resources, policy) -} - func (f *FailingKubeClient) GetWaiter(ws kube.WaitStrategy) (kube.Waiter, error) { waiter, _ := f.PrintingKubeClient.GetWaiter(ws) printingKubeWaiter, _ := waiter.(*PrintingKubeWaiter) diff --git a/pkg/kube/fake/printer.go b/pkg/kube/fake/printer.go index 16c93615a..130e923c6 100644 --- a/pkg/kube/fake/printer.go +++ b/pkg/kube/fake/printer.go @@ -97,6 +97,17 @@ func (p *PrintingKubeClient) Delete(resources kube.ResourceList) (*kube.Result, return &kube.Result{Deleted: resources}, nil } +// DeleteWithPropagationPolicy implements KubeClient delete. +// +// It only prints out the content to be deleted. +func (p *PrintingKubeClient) DeleteWithPropagationPolicy(resources kube.ResourceList, _ metav1.DeletionPropagation) (*kube.Result, []error) { + _, err := io.Copy(p.Out, bufferize(resources)) + if err != nil { + return nil, []error{err} + } + return &kube.Result{Deleted: resources}, nil +} + // Update implements KubeClient Update. func (p *PrintingKubeClient) Update(_, modified kube.ResourceList, _ ...kube.ClientUpdateOption) (*kube.Result, error) { _, err := io.Copy(p.Out, bufferize(modified)) @@ -135,17 +146,6 @@ func (p *PrintingKubeClient) OutputContainerLogsForPodList(_ *v1.PodList, someNa return err } -// DeleteWithPropagationPolicy implements KubeClient delete. -// -// It only prints out the content to be deleted. -func (p *PrintingKubeClient) DeleteWithPropagationPolicy(resources kube.ResourceList, _ metav1.DeletionPropagation) (*kube.Result, []error) { - _, err := io.Copy(p.Out, bufferize(resources)) - if err != nil { - return nil, []error{err} - } - return &kube.Result{Deleted: resources}, nil -} - func (p *PrintingKubeClient) GetWaiter(_ kube.WaitStrategy) (kube.Waiter, error) { return &PrintingKubeWaiter{Out: p.Out, LogOutput: p.LogOutput}, nil } diff --git a/pkg/release/v1/release.go b/pkg/release/v1/release.go index 74e834f7b..a7f076e04 100644 --- a/pkg/release/v1/release.go +++ b/pkg/release/v1/release.go @@ -19,6 +19,11 @@ import ( chart "helm.sh/helm/v4/pkg/chart/v2" ) +type ApplyMethod string + +const ApplyMethodClientSideApply ApplyMethod = "csa" +const ApplyMethodServerSideApply ApplyMethod = "ssa" + // Release describes a deployment of a chart, together with the chart // and the variables used to deploy that chart. type Release struct { @@ -42,6 +47,9 @@ type Release struct { // Labels of the release. // Disabled encoding into Json cause labels are stored in storage driver metadata field. Labels map[string]string `json:"-"` + // ApplyMethod stores whether server-side or client-side apply was used for the release + // Unset (empty string) should be treated as the default of client-side apply + ApplyMethod string `json:"apply_method,omitempty"` // "ssa" | "csa" } // SetStatus is a helper for setting the status on a release. From fb12b44493eb36e12b1af4804051cca01515f5f3 Mon Sep 17 00:00:00 2001 From: Isaiah Lewis Date: Mon, 18 Aug 2025 11:35:59 -0700 Subject: [PATCH 40/88] fix(helm-lint): Add TLSClientConfig Signed-off-by: Isaiah Lewis --- pkg/chart/v2/util/jsonschema.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/chart/v2/util/jsonschema.go b/pkg/chart/v2/util/jsonschema.go index 96fd207b9..0d03db710 100644 --- a/pkg/chart/v2/util/jsonschema.go +++ b/pkg/chart/v2/util/jsonschema.go @@ -18,6 +18,7 @@ package util import ( "bytes" + "crypto/tls" "errors" "fmt" "log/slog" @@ -63,6 +64,7 @@ func newHTTPURLLoader() *HTTPURLLoader { Timeout: 15 * time.Second, Transport: &http.Transport{ Proxy: http.ProxyFromEnvironment, + TLSClientConfig: &tls.Config{}, }, }) return &httpLoader From b4b2392f7e9d519af85f3a797fe1085d04c7f954 Mon Sep 17 00:00:00 2001 From: George Jenkins Date: Mon, 18 Aug 2025 10:17:22 -0700 Subject: [PATCH 41/88] mergefix Signed-off-by: George Jenkins --- pkg/action/install.go | 4 ++-- pkg/action/uninstall.go | 10 ++++------ pkg/kube/fake/fake.go | 16 ++++++++-------- pkg/kube/fake/printer.go | 22 +++++++++++----------- 4 files changed, 25 insertions(+), 27 deletions(-) diff --git a/pkg/action/install.go b/pkg/action/install.go index f7482d466..89755d4e5 100644 --- a/pkg/action/install.go +++ b/pkg/action/install.go @@ -482,13 +482,13 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource resources, kube.ClientCreateOptionServerSideApply(i.ServerSideApply, false)) } else if len(resources) > 0 { - useUpdateThreeWayMergeForUnstructured := i.TakeOwnership && !i.ServerSideApply // Use three-way merge when taking ownership (and not using server-side apply) + updateThreeWayMergeForUnstructured := i.TakeOwnership && !i.ServerSideApply // Use three-way merge when taking ownership (and not using server-side apply) _, err = i.cfg.KubeClient.Update( toBeAdopted, resources, kube.ClientUpdateOptionForceReplace(i.ForceReplace), kube.ClientUpdateOptionServerSideApply(i.ServerSideApply, i.ForceConflicts), - kube.ClientUpdateOptionThreeWayMergeForUnstructured(useUpdateThreeWayMergeForUnstructured)) + kube.ClientUpdateOptionThreeWayMergeForUnstructured(updateThreeWayMergeForUnstructured)) } if err != nil { return rel, err diff --git a/pkg/action/uninstall.go b/pkg/action/uninstall.go index 4444f4331..6aa87d331 100644 --- a/pkg/action/uninstall.go +++ b/pkg/action/uninstall.go @@ -246,13 +246,11 @@ func (u *Uninstall) deleteRelease(rel *release.Release) (kube.ResourceList, stri return nil, "", []error{fmt.Errorf("unable to build kubernetes objects for delete: %w", err)} } if len(resources) > 0 { - if len(resources) > 0 { - if kubeClient, ok := u.cfg.KubeClient.(kube.InterfaceDeletionPropagation); ok { - _, errs = kubeClient.DeleteWithPropagationPolicy(resources, parseCascadingFlag(u.DeletionPropagation)) - return resources, kept, errs - } - _, errs = u.cfg.KubeClient.Delete(resources) + if kubeClient, ok := u.cfg.KubeClient.(kube.InterfaceDeletionPropagation); ok { + _, errs = kubeClient.DeleteWithPropagationPolicy(resources, parseCascadingFlag(u.DeletionPropagation)) + return resources, kept, errs } + _, errs = u.cfg.KubeClient.Delete(resources) } return resources, kept, errs } diff --git a/pkg/kube/fake/fake.go b/pkg/kube/fake/fake.go index ae3853fb7..588bba83d 100644 --- a/pkg/kube/fake/fake.go +++ b/pkg/kube/fake/fake.go @@ -108,14 +108,6 @@ func (f *FailingKubeClient) Delete(resources kube.ResourceList) (*kube.Result, [ return f.PrintingKubeClient.Delete(resources) } -// DeleteWithPropagationPolicy returns the configured error if set or prints -func (f *FailingKubeClient) DeleteWithPropagationPolicy(resources kube.ResourceList, policy metav1.DeletionPropagation) (*kube.Result, []error) { - if f.DeleteWithPropagationError != nil { - return nil, []error{f.DeleteWithPropagationError} - } - return f.PrintingKubeClient.DeleteWithPropagationPolicy(resources, policy) -} - // WatchUntilReady returns the configured error if set or prints func (f *FailingKubeWaiter) WatchUntilReady(resources kube.ResourceList, d time.Duration) error { if f.watchUntilReadyError != nil { @@ -154,6 +146,14 @@ func (f *FailingKubeClient) BuildTable(r io.Reader, _ bool) (kube.ResourceList, return f.PrintingKubeClient.BuildTable(r, false) } +// DeleteWithPropagationPolicy returns the configured error if set or prints +func (f *FailingKubeClient) DeleteWithPropagationPolicy(resources kube.ResourceList, policy metav1.DeletionPropagation) (*kube.Result, []error) { + if f.DeleteWithPropagationError != nil { + return nil, []error{f.DeleteWithPropagationError} + } + return f.PrintingKubeClient.DeleteWithPropagationPolicy(resources, policy) +} + func (f *FailingKubeClient) GetWaiter(ws kube.WaitStrategy) (kube.Waiter, error) { waiter, _ := f.PrintingKubeClient.GetWaiter(ws) printingKubeWaiter, _ := waiter.(*PrintingKubeWaiter) diff --git a/pkg/kube/fake/printer.go b/pkg/kube/fake/printer.go index 130e923c6..16c93615a 100644 --- a/pkg/kube/fake/printer.go +++ b/pkg/kube/fake/printer.go @@ -97,17 +97,6 @@ func (p *PrintingKubeClient) Delete(resources kube.ResourceList) (*kube.Result, return &kube.Result{Deleted: resources}, nil } -// DeleteWithPropagationPolicy implements KubeClient delete. -// -// It only prints out the content to be deleted. -func (p *PrintingKubeClient) DeleteWithPropagationPolicy(resources kube.ResourceList, _ metav1.DeletionPropagation) (*kube.Result, []error) { - _, err := io.Copy(p.Out, bufferize(resources)) - if err != nil { - return nil, []error{err} - } - return &kube.Result{Deleted: resources}, nil -} - // Update implements KubeClient Update. func (p *PrintingKubeClient) Update(_, modified kube.ResourceList, _ ...kube.ClientUpdateOption) (*kube.Result, error) { _, err := io.Copy(p.Out, bufferize(modified)) @@ -146,6 +135,17 @@ func (p *PrintingKubeClient) OutputContainerLogsForPodList(_ *v1.PodList, someNa return err } +// DeleteWithPropagationPolicy implements KubeClient delete. +// +// It only prints out the content to be deleted. +func (p *PrintingKubeClient) DeleteWithPropagationPolicy(resources kube.ResourceList, _ metav1.DeletionPropagation) (*kube.Result, []error) { + _, err := io.Copy(p.Out, bufferize(resources)) + if err != nil { + return nil, []error{err} + } + return &kube.Result{Deleted: resources}, nil +} + func (p *PrintingKubeClient) GetWaiter(_ kube.WaitStrategy) (kube.Waiter, error) { return &PrintingKubeWaiter{Out: p.Out, LogOutput: p.LogOutput}, nil } From e1e23d2af1ef2ca1214a723035aa189bab64c74a Mon Sep 17 00:00:00 2001 From: Eric Stroczynski Date: Mon, 18 Aug 2025 12:27:22 -0700 Subject: [PATCH 42/88] fix: set repo authorizer in registry.Client.Resolve() Signed-off-by: Eric Stroczynski --- pkg/registry/client.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/registry/client.go b/pkg/registry/client.go index 3ccdba92c..8d6af9697 100644 --- a/pkg/registry/client.go +++ b/pkg/registry/client.go @@ -818,6 +818,7 @@ func (c *Client) Resolve(ref string) (desc ocispec.Descriptor, err error) { return desc, err } remoteRepository.PlainHTTP = c.plainHTTP + remoteRepository.Client = c.authorizer parsedReference, err := newReference(ref) if err != nil { From c9e6e8a040721c258a5e3d12c7bdd9ada6f62082 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Aug 2025 22:39:10 +0000 Subject: [PATCH 43/88] chore(deps): bump the k8s-io group with 7 updates Bumps the k8s-io group with 7 updates: | Package | From | To | | --- | --- | --- | | [k8s.io/api](https://github.com/kubernetes/api) | `0.33.3` | `0.33.4` | | [k8s.io/apiextensions-apiserver](https://github.com/kubernetes/apiextensions-apiserver) | `0.33.3` | `0.33.4` | | [k8s.io/apimachinery](https://github.com/kubernetes/apimachinery) | `0.33.3` | `0.33.4` | | [k8s.io/apiserver](https://github.com/kubernetes/apiserver) | `0.33.3` | `0.33.4` | | [k8s.io/cli-runtime](https://github.com/kubernetes/cli-runtime) | `0.33.3` | `0.33.4` | | [k8s.io/client-go](https://github.com/kubernetes/client-go) | `0.33.3` | `0.33.4` | | [k8s.io/kubectl](https://github.com/kubernetes/kubectl) | `0.33.3` | `0.33.4` | Updates `k8s.io/api` from 0.33.3 to 0.33.4 - [Commits](https://github.com/kubernetes/api/compare/v0.33.3...v0.33.4) Updates `k8s.io/apiextensions-apiserver` from 0.33.3 to 0.33.4 - [Release notes](https://github.com/kubernetes/apiextensions-apiserver/releases) - [Commits](https://github.com/kubernetes/apiextensions-apiserver/compare/v0.33.3...v0.33.4) Updates `k8s.io/apimachinery` from 0.33.3 to 0.33.4 - [Commits](https://github.com/kubernetes/apimachinery/compare/v0.33.3...v0.33.4) Updates `k8s.io/apiserver` from 0.33.3 to 0.33.4 - [Commits](https://github.com/kubernetes/apiserver/compare/v0.33.3...v0.33.4) Updates `k8s.io/cli-runtime` from 0.33.3 to 0.33.4 - [Commits](https://github.com/kubernetes/cli-runtime/compare/v0.33.3...v0.33.4) Updates `k8s.io/client-go` from 0.33.3 to 0.33.4 - [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/kubernetes/client-go/compare/v0.33.3...v0.33.4) Updates `k8s.io/kubectl` from 0.33.3 to 0.33.4 - [Commits](https://github.com/kubernetes/kubectl/compare/v0.33.3...v0.33.4) --- updated-dependencies: - dependency-name: k8s.io/api dependency-version: 0.33.4 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: k8s-io - dependency-name: k8s.io/apiextensions-apiserver dependency-version: 0.33.4 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: k8s-io - dependency-name: k8s.io/apimachinery dependency-version: 0.33.4 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: k8s-io - dependency-name: k8s.io/apiserver dependency-version: 0.33.4 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: k8s-io - dependency-name: k8s.io/cli-runtime dependency-version: 0.33.4 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: k8s-io - dependency-name: k8s.io/client-go dependency-version: 0.33.4 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: k8s-io - dependency-name: k8s.io/kubectl dependency-version: 0.33.4 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: k8s-io ... Signed-off-by: dependabot[bot] --- go.mod | 16 ++++++++-------- go.sum | 32 ++++++++++++++++---------------- 2 files changed, 24 insertions(+), 24 deletions(-) diff --git a/go.mod b/go.mod index 688094670..f3a3ebd33 100644 --- a/go.mod +++ b/go.mod @@ -35,14 +35,14 @@ require ( golang.org/x/crypto v0.41.0 golang.org/x/term v0.34.0 golang.org/x/text v0.28.0 - k8s.io/api v0.33.3 - k8s.io/apiextensions-apiserver v0.33.3 - k8s.io/apimachinery v0.33.3 - k8s.io/apiserver v0.33.3 - k8s.io/cli-runtime v0.33.3 - k8s.io/client-go v0.33.3 + k8s.io/api v0.33.4 + k8s.io/apiextensions-apiserver v0.33.4 + k8s.io/apimachinery v0.33.4 + k8s.io/apiserver v0.33.4 + k8s.io/cli-runtime v0.33.4 + k8s.io/client-go v0.33.4 k8s.io/klog/v2 v2.130.1 - k8s.io/kubectl v0.33.3 + k8s.io/kubectl v0.33.4 oras.land/oras-go/v2 v2.6.0 sigs.k8s.io/controller-runtime v0.21.0 sigs.k8s.io/kustomize/kyaml v0.20.1 @@ -170,7 +170,7 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/component-base v0.33.3 // indirect + k8s.io/component-base v0.33.4 // indirect k8s.io/kube-openapi v0.0.0-20250701173324-9bd5c66d9911 // indirect k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect diff --git a/go.sum b/go.sum index 5ac66f328..b76d921d3 100644 --- a/go.sum +++ b/go.sum @@ -500,26 +500,26 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.33.3 h1:SRd5t//hhkI1buzxb288fy2xvjubstenEKL9K51KBI8= -k8s.io/api v0.33.3/go.mod h1:01Y/iLUjNBM3TAvypct7DIj0M0NIZc+PzAHCIo0CYGE= -k8s.io/apiextensions-apiserver v0.33.3 h1:qmOcAHN6DjfD0v9kxL5udB27SRP6SG/MTopmge3MwEs= -k8s.io/apiextensions-apiserver v0.33.3/go.mod h1:oROuctgo27mUsyp9+Obahos6CWcMISSAPzQ77CAQGz8= -k8s.io/apimachinery v0.33.3 h1:4ZSrmNa0c/ZpZJhAgRdcsFcZOw1PQU1bALVQ0B3I5LA= -k8s.io/apimachinery v0.33.3/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= -k8s.io/apiserver v0.33.3 h1:Wv0hGc+QFdMJB4ZSiHrCgN3zL3QRatu56+rpccKC3J4= -k8s.io/apiserver v0.33.3/go.mod h1:05632ifFEe6TxwjdAIrwINHWE2hLwyADFk5mBsQa15E= -k8s.io/cli-runtime v0.33.3 h1:Dgy4vPjNIu8LMJBSvs8W0LcdV0PX/8aGG1DA1W8lklA= -k8s.io/cli-runtime v0.33.3/go.mod h1:yklhLklD4vLS8HNGgC9wGiuHWze4g7x6XQZ+8edsKEo= -k8s.io/client-go v0.33.3 h1:M5AfDnKfYmVJif92ngN532gFqakcGi6RvaOF16efrpA= -k8s.io/client-go v0.33.3/go.mod h1:luqKBQggEf3shbxHY4uVENAxrDISLOarxpTKMiUuujg= -k8s.io/component-base v0.33.3 h1:mlAuyJqyPlKZM7FyaoM/LcunZaaY353RXiOd2+B5tGA= -k8s.io/component-base v0.33.3/go.mod h1:ktBVsBzkI3imDuxYXmVxZ2zxJnYTZ4HAsVj9iF09qp4= +k8s.io/api v0.33.4 h1:oTzrFVNPXBjMu0IlpA2eDDIU49jsuEorGHB4cvKupkk= +k8s.io/api v0.33.4/go.mod h1:VHQZ4cuxQ9sCUMESJV5+Fe8bGnqAARZ08tSTdHWfeAc= +k8s.io/apiextensions-apiserver v0.33.4 h1:rtq5SeXiDbXmSwxsF0MLe2Mtv3SwprA6wp+5qh/CrOU= +k8s.io/apiextensions-apiserver v0.33.4/go.mod h1:mWXcZQkQV1GQyxeIjYApuqsn/081hhXPZwZ2URuJeSs= +k8s.io/apimachinery v0.33.4 h1:SOf/JW33TP0eppJMkIgQ+L6atlDiP/090oaX0y9pd9s= +k8s.io/apimachinery v0.33.4/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/apiserver v0.33.4 h1:6N0TEVA6kASUS3owYDIFJjUH6lgN8ogQmzZvaFFj1/Y= +k8s.io/apiserver v0.33.4/go.mod h1:8ODgXMnOoSPLMUg1aAzMFx+7wTJM+URil+INjbTZCok= +k8s.io/cli-runtime v0.33.4 h1:V8NSxGfh24XzZVhXmIGzsApdBpGq0RQS2u/Fz1GvJwk= +k8s.io/cli-runtime v0.33.4/go.mod h1:V+ilyokfqjT5OI+XE+O515K7jihtr0/uncwoyVqXaIU= +k8s.io/client-go v0.33.4 h1:TNH+CSu8EmXfitntjUPwaKVPN0AYMbc9F1bBS8/ABpw= +k8s.io/client-go v0.33.4/go.mod h1:LsA0+hBG2DPwovjd931L/AoaezMPX9CmBgyVyBZmbCY= +k8s.io/component-base v0.33.4 h1:Jvb/aw/tl3pfgnJ0E0qPuYLT0NwdYs1VXXYQmSuxJGY= +k8s.io/component-base v0.33.4/go.mod h1:567TeSdixWW2Xb1yYUQ7qk5Docp2kNznKL87eygY8Rc= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20250701173324-9bd5c66d9911 h1:gAXU86Fmbr/ktY17lkHwSjw5aoThQvhnstGGIYKlKYc= k8s.io/kube-openapi v0.0.0-20250701173324-9bd5c66d9911/go.mod h1:GLOk5B+hDbRROvt0X2+hqX64v/zO3vXN7J78OUmBSKw= -k8s.io/kubectl v0.33.3 h1:r/phHvH1iU7gO/l7tTjQk2K01ER7/OAJi8uFHHyWSac= -k8s.io/kubectl v0.33.3/go.mod h1:euj2bG56L6kUGOE/ckZbCoudPwuj4Kud7BR0GzyNiT0= +k8s.io/kubectl v0.33.4 h1:nXEI6Vi+oB9hXxoAHyHisXolm/l1qutK3oZQMak4N98= +k8s.io/kubectl v0.33.4/go.mod h1:Xe7P9X4DfILvKmlBsVqUtzktkI56lEj22SJW7cFy6nE= k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc= From 6ac2c34689df6fb78470e7c809f2c97060fc4d27 Mon Sep 17 00:00:00 2001 From: Matt Farina Date: Tue, 19 Aug 2025 14:00:36 -0400 Subject: [PATCH 44/88] Initial addition of content based cache The previous cache was based on chart name and version. If 2 charts with different content had the same name and version they would collide. Helm did not trust the cache because of this and always downloaded content. It was a short lived cache. This commit introduces a content based cache which is based on the content rather than file name. Charts with the same name but different content are no longer an issue. While the system assumes a file based interface, the cache system is pluggable. In the future, it should return bytes for the content instead of paths to it. That would requie a larger change for Helm 5 or later. Signed-off-by: Matt Farina --- pkg/action/install.go | 4 +- pkg/action/verify.go | 2 +- pkg/downloader/cache.go | 86 ++++++++++ pkg/downloader/chart_downloader.go | 209 ++++++++++++++++++++---- pkg/downloader/chart_downloader_test.go | 6 +- pkg/registry/client.go | 23 +-- 6 files changed, 281 insertions(+), 49 deletions(-) create mode 100644 pkg/downloader/cache.go diff --git a/pkg/action/install.go b/pkg/action/install.go index d8efa5d5d..b13bbfb8b 100644 --- a/pkg/action/install.go +++ b/pkg/action/install.go @@ -792,7 +792,7 @@ func (c *ChartPathOptions) LocateChart(name string, settings *cli.EnvSettings) ( return abs, err } if c.Verify { - if _, err := downloader.VerifyChart(abs, c.Keyring); err != nil { + if _, err := downloader.VerifyChart(abs, abs+".prov", c.Keyring); err != nil { return "", err } } @@ -868,7 +868,7 @@ func (c *ChartPathOptions) LocateChart(name string, settings *cli.EnvSettings) ( return "", err } - filename, _, err := dl.DownloadTo(name, version, settings.RepositoryCache) + filename, _, err := dl.DownloadToCache(name, version) if err != nil { return "", err } diff --git a/pkg/action/verify.go b/pkg/action/verify.go index 68a5e2d88..ca2f4fa63 100644 --- a/pkg/action/verify.go +++ b/pkg/action/verify.go @@ -39,7 +39,7 @@ func NewVerify() *Verify { // Run executes 'helm verify'. func (v *Verify) Run(chartfile string) error { var out strings.Builder - p, err := downloader.VerifyChart(chartfile, v.Keyring) + p, err := downloader.VerifyChart(chartfile, chartfile+".prov", v.Keyring) if err != nil { return err } diff --git a/pkg/downloader/cache.go b/pkg/downloader/cache.go new file mode 100644 index 000000000..d9b925756 --- /dev/null +++ b/pkg/downloader/cache.go @@ -0,0 +1,86 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package downloader + +import ( + "crypto/sha256" + "fmt" + "io" + "log/slog" + "os" + "path/filepath" + + "helm.sh/helm/v4/internal/fileutil" +) + +// Cache describes a cache that can get and put chart data. +// The cache key is the sha256 has of the content. sha256 is used in Helm for +// digests in index files providing a common key for checking content. +type Cache interface { + // Get returns a reader for the given key. + Get(key [sha256.Size]byte, prov bool) (string, error) + // Put stores the given reader for the given key. + Put(key [sha256.Size]byte, data io.Reader, prov bool) (string, error) +} + +// TODO: The cache assumes files because much of Helm assumes files. Convert +// Helm to pass content around instead of file locations. + +// DiskCache is a cache that stores data on disk. +type DiskCache struct { + Root string +} + +// Get returns a reader for the given key. +func (c *DiskCache) Get(key [sha256.Size]byte, prov bool) (string, error) { + p := c.fileName(key, prov) + fi, err := os.Stat(p) + if err != nil { + return "", err + } + // Empty files treated as not exist because there is no content. + if fi.Size() == 0 { + return p, os.ErrNotExist + } + // directories should never happen unless something outside helm is operating + // on this content. + if fi.IsDir() { + return p, os.ErrInvalid + } + return p, nil +} + +// Put stores the given reader for the given key. +// It returns the path to the stored file. +func (c *DiskCache) Put(key [sha256.Size]byte, data io.Reader, prov bool) (string, error) { + // TODO: verify the key and digest of the key are the same. + p := c.fileName(key, prov) + if err := os.MkdirAll(filepath.Dir(p), 0755); err != nil { + slog.Error("failed to create cache directory") + return p, err + } + return p, fileutil.AtomicWriteFile(p, data, 0644) +} + +// fileName generates the filename in a structured manner where the first part is the +// directory and the full hash is the filename. +func (c *DiskCache) fileName(id [sha256.Size]byte, prov bool) string { + suffix := ".tgz" + if prov { + suffix = ".prov" + } + return filepath.Join(c.Root, fmt.Sprintf("%02x", id[0]), fmt.Sprintf("%x", id)+suffix) +} diff --git a/pkg/downloader/chart_downloader.go b/pkg/downloader/chart_downloader.go index 529fd788e..bdf65011c 100644 --- a/pkg/downloader/chart_downloader.go +++ b/pkg/downloader/chart_downloader.go @@ -16,6 +16,9 @@ limitations under the License. package downloader import ( + "bytes" + "crypto/sha256" + "encoding/hex" "errors" "fmt" "io" @@ -72,6 +75,9 @@ type ChartDownloader struct { RegistryClient *registry.Client RepositoryConfig string RepositoryCache string + + // Cache specifies the cache implementation to use. + Cache Cache } // DownloadTo retrieves a chart. Depending on the settings, it may also download a provenance file. @@ -86,7 +92,10 @@ type ChartDownloader struct { // Returns a string path to the location where the file was downloaded and a verification // (if provenance was verified), or an error if something bad happened. func (c *ChartDownloader) DownloadTo(ref, version, dest string) (string, *provenance.Verification, error) { - u, err := c.ResolveChartVersion(ref, version) + if c.Cache == nil { + c.Cache = &DiskCache{Root: c.RepositoryCache} + } + hash, u, err := c.ResolveChartVersion(ref, version) if err != nil { return "", nil, err } @@ -96,11 +105,36 @@ func (c *ChartDownloader) DownloadTo(ref, version, dest string) (string, *proven return "", nil, err } - c.Options = append(c.Options, getter.WithAcceptHeader("application/gzip,application/octet-stream")) + // Check the cache for the content. Otherwise download it. + // Note, this process will pull from the cache but does not automatically populate + // the cache with the file it downloads. + var data *bytes.Buffer + var found bool + var digest []byte + var digest32 [32]byte + if hash != "" { + // if there is a hash, populate the other formats + digest, err = hex.DecodeString(hash) + if err != nil { + return "", nil, err + } + copy(digest32[:], digest) + if pth, err := c.Cache.Get(digest32, false); err == nil { + fdata, err := os.ReadFile(pth) + if err == nil { + found = true + data = bytes.NewBuffer(fdata) + } + } + } - data, err := g.Get(u.String(), c.Options...) - if err != nil { - return "", nil, err + if !found { + c.Options = append(c.Options, getter.WithAcceptHeader("application/gzip,application/octet-stream")) + + data, err = g.Get(u.String(), c.Options...) + if err != nil { + return "", nil, err + } } name := filepath.Base(u.Path) @@ -117,13 +151,26 @@ func (c *ChartDownloader) DownloadTo(ref, version, dest string) (string, *proven // If provenance is requested, verify it. ver := &provenance.Verification{} if c.Verify > VerifyNever { - body, err := g.Get(u.String() + ".prov") - if err != nil { - if c.Verify == VerifyAlways { - return destfile, ver, fmt.Errorf("failed to fetch provenance %q", u.String()+".prov") + found = false + var body *bytes.Buffer + if hash != "" { + if pth, err := c.Cache.Get(digest32, true); err == nil { + fdata, err := os.ReadFile(pth) + if err == nil { + found = true + body = bytes.NewBuffer(fdata) + } + } + } + if !found { + body, err = g.Get(u.String() + ".prov") + if err != nil { + if c.Verify == VerifyAlways { + return destfile, ver, fmt.Errorf("failed to fetch provenance %q", u.String()+".prov") + } + fmt.Fprintf(c.Out, "WARNING: Verification not found for %s: %s\n", ref, err) + return destfile, ver, nil } - fmt.Fprintf(c.Out, "WARNING: Verification not found for %s: %s\n", ref, err) - return destfile, ver, nil } provfile := destfile + ".prov" if err := fileutil.AtomicWriteFile(provfile, body, 0644); err != nil { @@ -131,7 +178,7 @@ func (c *ChartDownloader) DownloadTo(ref, version, dest string) (string, *proven } if c.Verify != VerifyLater { - ver, err = VerifyChart(destfile, c.Keyring) + ver, err = VerifyChart(destfile, destfile+".prov", c.Keyring) if err != nil { // Fail always in this case, since it means the verification step // failed. @@ -142,10 +189,105 @@ func (c *ChartDownloader) DownloadTo(ref, version, dest string) (string, *proven return destfile, ver, nil } +// DownloadToCache retrieves resources while using a content based cache. +func (c *ChartDownloader) DownloadToCache(ref, version string) (string, *provenance.Verification, error) { + if c.Cache == nil { + c.Cache = &DiskCache{Root: c.RepositoryCache} + } + + digestString, u, err := c.ResolveChartVersion(ref, version) + if err != nil { + return "", nil, err + } + + g, err := c.Getters.ByScheme(u.Scheme) + if err != nil { + return "", nil, err + } + + c.Options = append(c.Options, getter.WithAcceptHeader("application/gzip,application/octet-stream")) + + // Check the cache for the file + digest, err := hex.DecodeString(digestString) + if err != nil { + return "", nil, err + } + var digest32 [32]byte + copy(digest32[:], digest) + if err != nil { + return "", nil, fmt.Errorf("unable to decode digest: %w", err) + } + + var pth string + // only fetch from the cache if we have a digest + if len(digest) > 0 { + pth, err = c.Cache.Get(digest32, false) + } + if len(digest) == 0 || err != nil { + if err != nil && !os.IsNotExist(err) { + return "", nil, err + } + + // Get file not in the cache + data, gerr := g.Get(u.String(), c.Options...) + if gerr != nil { + return "", nil, gerr + } + + // Generate the digest + if len(digest) == 0 { + h := sha256.New() + digest32 = [sha256.Size]byte(h.Sum(data.Bytes())) + } + + pth, err = c.Cache.Put(digest32, data, false) + if err != nil { + return "", nil, err + } + } + + // If provenance is requested, verify it. + ver := &provenance.Verification{} + if c.Verify > VerifyNever { + ppth, err := c.Cache.Get(digest32, true) + if err != nil { + if !os.IsNotExist(err) { + return pth, ver, err + } + + body, err := g.Get(u.String() + ".prov") + if err != nil { + if c.Verify == VerifyAlways { + return pth, ver, fmt.Errorf("failed to fetch provenance %q", u.String()+".prov") + } + fmt.Fprintf(c.Out, "WARNING: Verification not found for %s: %s\n", ref, err) + return pth, ver, nil + } + + ppth, err = c.Cache.Put(digest32, body, true) + if err != nil { + return "", nil, err + } + } + + if c.Verify != VerifyLater { + ver, err = VerifyChart(pth, ppth, c.Keyring) + if err != nil { + // Fail always in this case, since it means the verification step + // failed. + return pth, ver, err + } + } + } + return pth, ver, nil +} + // ResolveChartVersion resolves a chart reference to a URL. // -// It returns the URL and sets the ChartDownloader's Options that can fetch -// the URL using the appropriate Getter. +// It returns: +// - A hash of the content if available +// - The URL and sets the ChartDownloader's Options that can fetch the URL using the appropriate Getter. +// - An error if there is one // // A reference may be an HTTP URL, an oci reference URL, a 'reponame/chartname' // reference, or a local path. @@ -157,23 +299,26 @@ func (c *ChartDownloader) DownloadTo(ref, version, dest string) (string, *proven // - If version is non-empty, this will return the URL for that version // - If version is empty, this will return the URL for the latest version // - If no version can be found, an error is returned -func (c *ChartDownloader) ResolveChartVersion(ref, version string) (*url.URL, error) { +// +// TODO: support OCI hash +func (c *ChartDownloader) ResolveChartVersion(ref, version string) (string, *url.URL, error) { u, err := url.Parse(ref) if err != nil { - return nil, fmt.Errorf("invalid chart URL format: %s", ref) + return "", nil, fmt.Errorf("invalid chart URL format: %s", ref) } if registry.IsOCI(u.String()) { if c.RegistryClient == nil { - return nil, fmt.Errorf("unable to lookup ref %s at version '%s', missing registry client", ref, version) + return "", nil, fmt.Errorf("unable to lookup ref %s at version '%s', missing registry client", ref, version) } - return c.RegistryClient.ValidateReference(ref, version, u) + digest, OCIref, err := c.RegistryClient.ValidateReference(ref, version, u) + return digest, OCIref, err } rf, err := loadRepoConfig(c.RepositoryConfig) if err != nil { - return u, err + return "", u, err } if u.IsAbs() && len(u.Host) > 0 && len(u.Path) > 0 { @@ -190,9 +335,9 @@ func (c *ChartDownloader) ResolveChartVersion(ref, version string) (*url.URL, er if err == ErrNoOwnerRepo { // Make sure to add the ref URL as the URL for the getter c.Options = append(c.Options, getter.WithURL(ref)) - return u, nil + return "", u, nil } - return u, err + return "", u, err } // If we get here, we don't need to go through the next phase of looking @@ -211,20 +356,20 @@ func (c *ChartDownloader) ResolveChartVersion(ref, version string) (*url.URL, er getter.WithPassCredentialsAll(rc.PassCredentialsAll), ) } - return u, nil + return "", u, nil } // See if it's of the form: repo/path_to_chart p := strings.SplitN(u.Path, "/", 2) if len(p) < 2 { - return u, fmt.Errorf("non-absolute URLs should be in form of repo_name/path_to_chart, got: %s", u) + return "", u, fmt.Errorf("non-absolute URLs should be in form of repo_name/path_to_chart, got: %s", u) } repoName := p[0] chartName := p[1] rc, err := pickChartRepositoryConfigByName(repoName, rf.Repositories) if err != nil { - return u, err + return "", u, err } // Now that we have the chart repository information we can use that URL @@ -233,7 +378,7 @@ func (c *ChartDownloader) ResolveChartVersion(ref, version string) (*url.URL, er r, err := repo.NewChartRepository(rc, c.Getters) if err != nil { - return u, err + return "", u, err } if r != nil && r.Config != nil { @@ -252,32 +397,33 @@ func (c *ChartDownloader) ResolveChartVersion(ref, version string) (*url.URL, er idxFile := filepath.Join(c.RepositoryCache, helmpath.CacheIndexFile(r.Config.Name)) i, err := repo.LoadIndexFile(idxFile) if err != nil { - return u, fmt.Errorf("no cached repo found. (try 'helm repo update'): %w", err) + return "", u, fmt.Errorf("no cached repo found. (try 'helm repo update'): %w", err) } cv, err := i.Get(chartName, version) if err != nil { - return u, fmt.Errorf("chart %q matching %s not found in %s index. (try 'helm repo update'): %w", chartName, version, r.Config.Name, err) + return "", u, fmt.Errorf("chart %q matching %s not found in %s index. (try 'helm repo update'): %w", chartName, version, r.Config.Name, err) } if len(cv.URLs) == 0 { - return u, fmt.Errorf("chart %q has no downloadable URLs", ref) + return "", u, fmt.Errorf("chart %q has no downloadable URLs", ref) } // TODO: Seems that picking first URL is not fully correct resolvedURL, err := repo.ResolveReferenceURL(rc.URL, cv.URLs[0]) if err != nil { - return u, fmt.Errorf("invalid chart URL format: %s", ref) + return cv.Digest, u, fmt.Errorf("invalid chart URL format: %s", ref) } - return url.Parse(resolvedURL) + loc, err := url.Parse(resolvedURL) + return cv.Digest, loc, err } // VerifyChart takes a path to a chart archive and a keyring, and verifies the chart. // // It assumes that a chart archive file is accompanied by a provenance file whose // name is the archive file name plus the ".prov" extension. -func VerifyChart(path, keyring string) (*provenance.Verification, error) { +func VerifyChart(path, provfile, keyring string) (*provenance.Verification, error) { // For now, error out if it's not a tar file. switch fi, err := os.Stat(path); { case err != nil: @@ -288,7 +434,6 @@ func VerifyChart(path, keyring string) (*provenance.Verification, error) { return nil, errors.New("chart must be a tgz file") } - provfile := path + ".prov" if _, err := os.Stat(provfile); err != nil { return nil, fmt.Errorf("could not load provenance file %s: %w", provfile, err) } diff --git a/pkg/downloader/chart_downloader_test.go b/pkg/downloader/chart_downloader_test.go index a2e09eae5..5b5f96751 100644 --- a/pkg/downloader/chart_downloader_test.go +++ b/pkg/downloader/chart_downloader_test.go @@ -79,7 +79,7 @@ func TestResolveChartRef(t *testing.T) { } for _, tt := range tests { - u, err := c.ResolveChartVersion(tt.ref, tt.version) + _, u, err := c.ResolveChartVersion(tt.ref, tt.version) if err != nil { if tt.fail { continue @@ -131,7 +131,7 @@ func TestResolveChartOpts(t *testing.T) { continue } - u, err := c.ResolveChartVersion(tt.ref, tt.version) + _, u, err := c.ResolveChartVersion(tt.ref, tt.version) if err != nil { t.Errorf("%s: failed with error %s", tt.name, err) continue @@ -155,7 +155,7 @@ func TestResolveChartOpts(t *testing.T) { } func TestVerifyChart(t *testing.T) { - v, err := VerifyChart("testdata/signtest-0.1.0.tgz", "testdata/helm-test-key.pub") + v, err := VerifyChart("testdata/signtest-0.1.0.tgz", "testdata/signtest-0.1.0.tgz.prov", "testdata/helm-test-key.pub") if err != nil { t.Fatal(err) } diff --git a/pkg/registry/client.go b/pkg/registry/client.go index 0c9f256d3..673c6ea87 100644 --- a/pkg/registry/client.go +++ b/pkg/registry/client.go @@ -823,12 +823,12 @@ func (c *Client) Resolve(ref string) (desc ocispec.Descriptor, err error) { } // ValidateReference for path and version -func (c *Client) ValidateReference(ref, version string, u *url.URL) (*url.URL, error) { +func (c *Client) ValidateReference(ref, version string, u *url.URL) (string, *url.URL, error) { var tag string registryReference, err := newReference(u.Host + u.Path) if err != nil { - return nil, err + return "", nil, err } if version == "" { @@ -836,14 +836,14 @@ func (c *Client) ValidateReference(ref, version string, u *url.URL) (*url.URL, e version = registryReference.Tag } else { if registryReference.Tag != "" && registryReference.Tag != version { - return nil, fmt.Errorf("chart reference and version mismatch: %s is not %s", version, registryReference.Tag) + return "", nil, fmt.Errorf("chart reference and version mismatch: %s is not %s", version, registryReference.Tag) } } if registryReference.Digest != "" { if version == "" { // Install by digest only - return u, nil + return "", u, nil } u.Path = fmt.Sprintf("%s@%s", registryReference.Repository, registryReference.Digest) @@ -852,12 +852,12 @@ func (c *Client) ValidateReference(ref, version string, u *url.URL) (*url.URL, e desc, err := c.Resolve(path) if err != nil { // The resource does not have to be tagged when digest is specified - return u, nil + return "", u, nil } if desc.Digest.String() != registryReference.Digest { - return nil, fmt.Errorf("chart reference digest mismatch: %s is not %s", desc.Digest.String(), registryReference.Digest) + return "", nil, fmt.Errorf("chart reference digest mismatch: %s is not %s", desc.Digest.String(), registryReference.Digest) } - return u, nil + return registryReference.Digest, u, nil } // Evaluate whether an explicit version has been provided. Otherwise, determine version to use @@ -868,10 +868,10 @@ func (c *Client) ValidateReference(ref, version string, u *url.URL) (*url.URL, e // Retrieve list of repository tags tags, err := c.Tags(strings.TrimPrefix(ref, fmt.Sprintf("%s://", OCIScheme))) if err != nil { - return nil, err + return "", nil, err } if len(tags) == 0 { - return nil, fmt.Errorf("unable to locate any tags in provided repository: %s", ref) + return "", nil, fmt.Errorf("unable to locate any tags in provided repository: %s", ref) } // Determine if version provided @@ -880,13 +880,14 @@ func (c *Client) ValidateReference(ref, version string, u *url.URL) (*url.URL, e // If semver constraint string, try to find a match tag, err = GetTagMatchingVersionOrConstraint(tags, version) if err != nil { - return nil, err + return "", nil, err } } u.Path = fmt.Sprintf("%s:%s", registryReference.Repository, tag) + // desc, err := c.Resolve(u.Path) - return u, err + return "", u, err } // tagManifest prepares and tags a manifest in memory storage From 62e0c78ef8dcfbdaffc44c634088c00f692d8344 Mon Sep 17 00:00:00 2001 From: Isaiah Lewis Date: Tue, 19 Aug 2025 12:35:12 -0700 Subject: [PATCH 45/88] fix(helm-lint): fmt Signed-off-by: Isaiah Lewis --- pkg/chart/v2/util/jsonschema.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/chart/v2/util/jsonschema.go b/pkg/chart/v2/util/jsonschema.go index 0d03db710..72e133363 100644 --- a/pkg/chart/v2/util/jsonschema.go +++ b/pkg/chart/v2/util/jsonschema.go @@ -63,7 +63,7 @@ func newHTTPURLLoader() *HTTPURLLoader { httpLoader := HTTPURLLoader(http.Client{ Timeout: 15 * time.Second, Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, + Proxy: http.ProxyFromEnvironment, TLSClientConfig: &tls.Config{}, }, }) From ebc874ef844bd85d0ad33df1183d1a7c6b388df7 Mon Sep 17 00:00:00 2001 From: George Jenkins Date: Wed, 20 Aug 2025 17:37:01 -0700 Subject: [PATCH 46/88] fix client-side to server-side field manager migration Signed-off-by: George Jenkins --- pkg/action/install.go | 3 +- pkg/action/rollback.go | 3 +- pkg/action/upgrade.go | 10 +++- pkg/kube/client.go | 121 +++++++++++++++++++++++++++++++++++----- pkg/kube/client_test.go | 5 +- 5 files changed, 123 insertions(+), 19 deletions(-) diff --git a/pkg/action/install.go b/pkg/action/install.go index 89755d4e5..90c673b9c 100644 --- a/pkg/action/install.go +++ b/pkg/action/install.go @@ -488,7 +488,8 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource resources, kube.ClientUpdateOptionForceReplace(i.ForceReplace), kube.ClientUpdateOptionServerSideApply(i.ServerSideApply, i.ForceConflicts), - kube.ClientUpdateOptionThreeWayMergeForUnstructured(updateThreeWayMergeForUnstructured)) + kube.ClientUpdateOptionThreeWayMergeForUnstructured(updateThreeWayMergeForUnstructured), + kube.ClientUpdateOptionUpgradeClientSideFieldManager(true)) } if err != nil { return rel, err diff --git a/pkg/action/rollback.go b/pkg/action/rollback.go index 5f0ed02f1..adaf22615 100644 --- a/pkg/action/rollback.go +++ b/pkg/action/rollback.go @@ -210,7 +210,8 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas target, kube.ClientUpdateOptionForceReplace(r.ForceReplace), kube.ClientUpdateOptionServerSideApply(serverSideApply, r.ForceConflicts), - kube.ClientUpdateOptionThreeWayMergeForUnstructured(false)) + kube.ClientUpdateOptionThreeWayMergeForUnstructured(false), + kube.ClientUpdateOptionUpgradeClientSideFieldManager(true)) if err != nil { msg := fmt.Sprintf("Rollback %q failed: %s", targetRelease.Name, err) diff --git a/pkg/action/upgrade.go b/pkg/action/upgrade.go index 41ddf859f..d86ac7752 100644 --- a/pkg/action/upgrade.go +++ b/pkg/action/upgrade.go @@ -399,6 +399,7 @@ func (u *Upgrade) performUpgrade(ctx context.Context, originalRelease, upgradedR defer close(doneChan) go u.releasingUpgrade(rChan, upgradedRelease, current, target, originalRelease, serverSideApply) go u.handleContext(ctx, doneChan, ctxChan, upgradedRelease) + select { case result := <-rChan: return result.r, result.e @@ -431,6 +432,11 @@ func (u *Upgrade) handleContext(ctx context.Context, done chan interface{}, c ch return } } + +func isReleaseApplyMethodClientSideApply(applyMethod string) bool { + return applyMethod == "" || applyMethod == string(release.ApplyMethodClientSideApply) +} + func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *release.Release, current kube.ResourceList, target kube.ResourceList, originalRelease *release.Release, serverSideApply bool) { // pre-upgrade hooks @@ -443,11 +449,13 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele slog.Debug("upgrade hooks disabled", "name", upgradedRelease.Name) } + upgradeClientSideFieldManager := isReleaseApplyMethodClientSideApply(originalRelease.ApplyMethod) && serverSideApply // Update client-side field manager if transitioning from client-side to server-side apply results, err := u.cfg.KubeClient.Update( current, target, kube.ClientUpdateOptionForceReplace(u.ForceReplace), - kube.ClientUpdateOptionServerSideApply(serverSideApply, u.ForceConflicts)) + kube.ClientUpdateOptionServerSideApply(serverSideApply, u.ForceConflicts), + kube.ClientUpdateOptionUpgradeClientSideFieldManager(upgradeClientSideFieldManager)) if err != nil { u.cfg.recordRelease(originalRelease) u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 016055392..c41165490 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -47,12 +47,14 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/jsonmergepatch" "k8s.io/apimachinery/pkg/util/mergepatch" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/resource" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" + "k8s.io/client-go/util/csaupgrade" "k8s.io/client-go/util/retry" cmdutil "k8s.io/kubectl/pkg/cmd/util" ) @@ -577,12 +579,13 @@ func (c *Client) update(originals, targets ResourceList, updateApplyFunc UpdateA } type clientUpdateOptions struct { - threeWayMergeForUnstructured bool - serverSideApply bool - forceReplace bool - forceConflicts bool - dryRun bool - fieldValidationDirective FieldValidationDirective + threeWayMergeForUnstructured bool + serverSideApply bool + forceReplace bool + forceConflicts bool + dryRun bool + fieldValidationDirective FieldValidationDirective + upgradeClientSideFieldManager bool } type ClientUpdateOption func(*clientUpdateOptions) error @@ -640,14 +643,32 @@ func ClientUpdateOptionDryRun(dryRun bool) ClientUpdateOption { // - For server-side apply: the directive is sent to the server to perform the validation // // Defaults to `FieldValidationDirectiveStrict` -func ClientUpdateOptionFieldValidationDirective(fieldValidationDirective FieldValidationDirective) ClientCreateOption { - return func(o *clientCreateOptions) error { +func ClientUpdateOptionFieldValidationDirective(fieldValidationDirective FieldValidationDirective) ClientUpdateOption { + return func(o *clientUpdateOptions) error { o.fieldValidationDirective = fieldValidationDirective return nil } } +// ClientUpdateOptionUpgradeClientSideFieldManager specifies that resources client-side field manager should be upgraded to server-side apply +// (before applying the object server-side) +// This is required when upgrading a chart from client-side to server-side apply, otherwise the client-side field management remains. Conflicting with server-side applied updates. +// +// Note: +// if this option is specified, but the object is not managed by client-side field manager, it will be a no-op. However, the cost of fetching the objects will be incurred. +// +// see: +// - https://github.com/kubernetes/kubernetes/pull/112905 +// - `UpgradeManagedFields` / https://github.com/kubernetes/kubernetes/blob/f47e9696d7237f1011d23c9b55f6947e60526179/staging/src/k8s.io/client-go/util/csaupgrade/upgrade.go#L81 +func ClientUpdateOptionUpgradeClientSideFieldManager(upgradeClientSideFieldManager bool) ClientUpdateOption { + return func(o *clientUpdateOptions) error { + o.upgradeClientSideFieldManager = upgradeClientSideFieldManager + + return nil + } +} + type UpdateApplyFunc func(original, target *resource.Info) error // Update takes the current list of objects and target list of objects and @@ -707,15 +728,28 @@ func (c *Client) Update(originals, targets ResourceList, options ...ClientUpdate "using server-side apply for resource update", slog.Bool("forceConflicts", updateOptions.forceConflicts), slog.Bool("dryRun", updateOptions.dryRun), - slog.String("fieldValidationDirective", string(updateOptions.fieldValidationDirective))) - return func(_, target *resource.Info) error { - err := patchResourceServerSide(target, updateOptions.dryRun, updateOptions.forceConflicts, updateOptions.fieldValidationDirective) + slog.String("fieldValidationDirective", string(updateOptions.fieldValidationDirective)), + slog.Bool("upgradeClientSideFieldManager", updateOptions.upgradeClientSideFieldManager)) + return func(original, target *resource.Info) error { logger := slog.With( slog.String("namespace", target.Namespace), slog.String("name", target.Name), slog.String("gvk", target.Mapping.GroupVersionKind.String())) - if err != nil { + + if updateOptions.upgradeClientSideFieldManager { + patched, err := upgradeClientSideFieldManager(original, updateOptions.dryRun, updateOptions.fieldValidationDirective) + if err != nil { + slog.Debug("Error patching resource to replace CSA field management", slog.Any("error", err)) + return err + } + + if patched { + logger.Debug("Upgraded object client-side field management with server-side apply field management") + } + } + + if err := patchResourceServerSide(target, updateOptions.dryRun, updateOptions.forceConflicts, updateOptions.fieldValidationDirective); err != nil { logger.Debug("Error patching resource", slog.Any("error", err)) return err } @@ -996,19 +1030,76 @@ func patchResourceClientSide(original runtime.Object, target *resource.Info, thr return nil } +// upgradeClientSideFieldManager is simply a wrapper around csaupgrade.UpgradeManagedFields +// that ugrade CSA managed fields to SSA apply +// see: https://github.com/kubernetes/kubernetes/pull/112905 +func upgradeClientSideFieldManager(info *resource.Info, dryRun bool, fieldValidationDirective FieldValidationDirective) (bool, error) { + + fieldManagerName := getManagedFieldsManager() + + patched := false + err := retry.RetryOnConflict( + retry.DefaultRetry, + func() error { + + if err := info.Get(); err != nil { + return fmt.Errorf("failed to get object %s/%s %s: %w", info.Namespace, info.Name, info.Mapping.GroupVersionKind.String(), err) + } + + helper := resource.NewHelper( + info.Client, + info.Mapping). + DryRun(dryRun). + WithFieldManager(fieldManagerName). + WithFieldValidation(string(fieldValidationDirective)) + + patchData, err := csaupgrade.UpgradeManagedFieldsPatch( + info.Object, + sets.New(fieldManagerName), + fieldManagerName) + if err != nil { + return fmt.Errorf("failed to upgrade managed fields for object %s/%s %s: %w", info.Namespace, info.Name, info.Mapping.GroupVersionKind.String(), err) + } + + if len(patchData) == 0 { + return nil + } + + obj, err := helper.Patch( + info.Namespace, + info.Name, + types.JSONPatchType, + patchData, + nil) + + if err == nil { + patched = true + return info.Refresh(obj, true) + } + + if !apierrors.IsConflict(err) { + return fmt.Errorf("failed to patch object to upgrade CSA field manager %s/%s %s: %w", info.Namespace, info.Name, info.Mapping.GroupVersionKind.String(), err) + } + + return err + }) + + return patched, err +} + // Patch reource using server-side apply func patchResourceServerSide(target *resource.Info, dryRun bool, forceConflicts bool, fieldValidationDirective FieldValidationDirective) error { helper := resource.NewHelper( target.Client, target.Mapping). DryRun(dryRun). - WithFieldManager(ManagedFieldsManager). + WithFieldManager(getManagedFieldsManager()). WithFieldValidation(string(fieldValidationDirective)) // Send the full object to be applied on the server side. data, err := runtime.Encode(unstructured.UnstructuredJSONScheme, target.Object) if err != nil { - return fmt.Errorf("failed to encode object %s/%s with kind %s: %w", target.Namespace, target.Name, target.Mapping.GroupVersionKind.Kind, err) + return fmt.Errorf("failed to encode object %s/%s %s: %w", target.Namespace, target.Name, target.Mapping.GroupVersionKind.String(), err) } options := metav1.PatchOptions{ Force: &forceConflicts, @@ -1026,7 +1117,7 @@ func patchResourceServerSide(target *resource.Info, dryRun bool, forceConflicts } if apierrors.IsConflict(err) { - return fmt.Errorf("conflict occurred while applying %s/%s with kind %s: %w", target.Namespace, target.Name, target.Mapping.GroupVersionKind.Kind, err) + return fmt.Errorf("conflict occurred while applying object %s/%s %s: %w", target.Namespace, target.Name, target.Mapping.GroupVersionKind.String(), err) } return err diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index 5060a5fc2..a8a8668c7 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -339,9 +339,11 @@ func TestUpdate(t *testing.T) { } expectedActionsServerSideApply := []string{ + "/namespaces/default/pods/starfish:GET", "/namespaces/default/pods/starfish:GET", "/namespaces/default/pods/starfish:PATCH", "/namespaces/default/pods/otter:GET", + "/namespaces/default/pods/otter:GET", "/namespaces/default/pods/otter:PATCH", "/namespaces/default/pods/dolphin:GET", "/namespaces/default/pods:POST", // create dolphin @@ -467,7 +469,8 @@ func TestUpdate(t *testing.T) { second, ClientUpdateOptionThreeWayMergeForUnstructured(tc.ThreeWayMergeForUnstructured), ClientUpdateOptionForceReplace(false), - ClientUpdateOptionServerSideApply(tc.ServerSideApply, false)) + ClientUpdateOptionServerSideApply(tc.ServerSideApply, false), + ClientUpdateOptionUpgradeClientSideFieldManager(true)) require.NoError(t, err) assert.Len(t, result.Created, 1, "expected 1 resource created, got %d", len(result.Created)) From 04cb1bad672e5d10453bea42d0fcaee5dae8df63 Mon Sep 17 00:00:00 2001 From: cuiweixie Date: Thu, 21 Aug 2025 19:44:33 +0800 Subject: [PATCH 47/88] pkg/register: refactor to use atomic.Uint64 Signed-off-by: cuiweixie --- pkg/registry/transport.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/registry/transport.go b/pkg/registry/transport.go index a82229e2f..9d6a37326 100644 --- a/pkg/registry/transport.go +++ b/pkg/registry/transport.go @@ -32,7 +32,7 @@ import ( var ( // requestCount records the number of logged request-response pairs and will // be used as the unique id for the next pair. - requestCount uint64 + requestCount atomic.Uint64 // toScrub is a set of headers that should be scrubbed from the log. toScrub = []string{ @@ -79,7 +79,7 @@ func NewTransport(debug bool) *retry.Transport { // RoundTrip calls base round trip while keeping track of the current request. func (t *LoggingTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) { - id := atomic.AddUint64(&requestCount, 1) - 1 + id := requestCount.Add(1) - 1 slog.Debug(req.Method, "id", id, "url", req.URL, "header", logHeader(req.Header)) resp, err = t.RoundTripper.RoundTrip(req) From fea6d8eb045ec82bfb6a500d91fa6c965898efd2 Mon Sep 17 00:00:00 2001 From: Matt Farina Date: Thu, 21 Aug 2025 14:25:55 -0400 Subject: [PATCH 48/88] Updating to tested content cache A few things are added here: 1. The cache is made to be more generic as a content based cache. It could be used for other things such as plugins 2. Flags were added to specify the content cache locaiton rather than rely on the repository cache. Keeping the 2 the same hid bugs and errors. 3. Tests were added and updated to ensure the cache is used and tested Signed-off-by: Matt Farina --- internal/third_party/dep/fs/fs.go | 8 +- internal/third_party/dep/fs/fs_test.go | 6 +- pkg/action/install.go | 1 + pkg/action/pull.go | 1 + pkg/cli/environment.go | 4 + pkg/cmd/dependency_build.go | 1 + pkg/cmd/dependency_update.go | 1 + pkg/cmd/dependency_update_test.go | 19 ++-- pkg/cmd/install.go | 1 + pkg/cmd/package.go | 1 + pkg/cmd/pull_test.go | 5 +- pkg/cmd/show_test.go | 5 +- pkg/cmd/upgrade.go | 1 + pkg/downloader/cache.go | 29 +++--- pkg/downloader/cache_test.go | 122 ++++++++++++++++++++++++ pkg/downloader/chart_downloader.go | 75 ++++++++++++--- pkg/downloader/chart_downloader_test.go | 119 +++++++++++++++++++++++ pkg/downloader/manager.go | 4 + pkg/downloader/manager_test.go | 2 + 19 files changed, 364 insertions(+), 41 deletions(-) create mode 100644 pkg/downloader/cache_test.go diff --git a/internal/third_party/dep/fs/fs.go b/internal/third_party/dep/fs/fs.go index 717eff04d..6e2720f3b 100644 --- a/internal/third_party/dep/fs/fs.go +++ b/internal/third_party/dep/fs/fs.go @@ -73,7 +73,7 @@ func renameByCopy(src, dst string) error { cerr = fmt.Errorf("copying directory failed: %w", cerr) } } else { - cerr = copyFile(src, dst) + cerr = CopyFile(src, dst) if cerr != nil { cerr = fmt.Errorf("copying file failed: %w", cerr) } @@ -139,7 +139,7 @@ func CopyDir(src, dst string) error { } else { // This will include symlinks, which is what we want when // copying things. - if err = copyFile(srcPath, dstPath); err != nil { + if err = CopyFile(srcPath, dstPath); err != nil { return fmt.Errorf("copying file failed: %w", err) } } @@ -148,11 +148,11 @@ func CopyDir(src, dst string) error { return nil } -// copyFile copies the contents of the file named src to the file named +// CopyFile copies the contents of the file named src to the file named // by dst. The file will be created if it does not already exist. If the // destination file exists, all its contents will be replaced by the contents // of the source file. The file mode will be copied from the source. -func copyFile(src, dst string) (err error) { +func CopyFile(src, dst string) (err error) { if sym, err := IsSymlink(src); err != nil { return fmt.Errorf("symlink check failed: %w", err) } else if sym { diff --git a/internal/third_party/dep/fs/fs_test.go b/internal/third_party/dep/fs/fs_test.go index 4c59d17fe..610771bc3 100644 --- a/internal/third_party/dep/fs/fs_test.go +++ b/internal/third_party/dep/fs/fs_test.go @@ -326,7 +326,7 @@ func TestCopyFile(t *testing.T) { srcf.Close() destf := filepath.Join(dir, "destf") - if err := copyFile(srcf.Name(), destf); err != nil { + if err := CopyFile(srcf.Name(), destf); err != nil { t.Fatal(err) } @@ -366,7 +366,7 @@ func TestCopyFileSymlink(t *testing.T) { for symlink, dst := range testcases { t.Run(symlink, func(t *testing.T) { var err error - if err = copyFile(symlink, dst); err != nil { + if err = CopyFile(symlink, dst); err != nil { t.Fatalf("failed to copy symlink: %s", err) } @@ -438,7 +438,7 @@ func TestCopyFileFail(t *testing.T) { defer cleanup() fn := filepath.Join(dstdir, "file") - if err := copyFile(srcf.Name(), fn); err == nil { + if err := CopyFile(srcf.Name(), fn); err == nil { t.Fatalf("expected error for %s, got none", fn) } } diff --git a/pkg/action/install.go b/pkg/action/install.go index b13bbfb8b..db130c6e9 100644 --- a/pkg/action/install.go +++ b/pkg/action/install.go @@ -815,6 +815,7 @@ func (c *ChartPathOptions) LocateChart(name string, settings *cli.EnvSettings) ( }, RepositoryConfig: settings.RepositoryConfig, RepositoryCache: settings.RepositoryCache, + ContentCache: settings.ContentCache, RegistryClient: c.registryClient, } diff --git a/pkg/action/pull.go b/pkg/action/pull.go index b4779f8d2..c1f77e44c 100644 --- a/pkg/action/pull.go +++ b/pkg/action/pull.go @@ -88,6 +88,7 @@ func (p *Pull) Run(chartRef string) (string, error) { RegistryClient: p.cfg.RegistryClient, RepositoryConfig: p.Settings.RepositoryConfig, RepositoryCache: p.Settings.RepositoryCache, + ContentCache: p.Settings.ContentCache, } if registry.IsOCI(chartRef) { diff --git a/pkg/cli/environment.go b/pkg/cli/environment.go index c5f87cf24..19563cba3 100644 --- a/pkg/cli/environment.go +++ b/pkg/cli/environment.go @@ -91,6 +91,8 @@ type EnvSettings struct { QPS float32 // ColorMode controls colorized output (never, auto, always) ColorMode string + // ContentCache is the location where cached charts are stored + ContentCache string } func New() *EnvSettings { @@ -109,6 +111,7 @@ func New() *EnvSettings { RegistryConfig: envOr("HELM_REGISTRY_CONFIG", helmpath.ConfigPath("registry/config.json")), RepositoryConfig: envOr("HELM_REPOSITORY_CONFIG", helmpath.ConfigPath("repositories.yaml")), RepositoryCache: envOr("HELM_REPOSITORY_CACHE", helmpath.CachePath("repository")), + ContentCache: envOr("HELM_CONTENT_CACHE", helmpath.CachePath("content")), BurstLimit: envIntOr("HELM_BURST_LIMIT", defaultBurstLimit), QPS: envFloat32Or("HELM_QPS", defaultQPS), ColorMode: envColorMode(), @@ -161,6 +164,7 @@ func (s *EnvSettings) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&s.RegistryConfig, "registry-config", s.RegistryConfig, "path to the registry config file") fs.StringVar(&s.RepositoryConfig, "repository-config", s.RepositoryConfig, "path to the file containing repository names and URLs") fs.StringVar(&s.RepositoryCache, "repository-cache", s.RepositoryCache, "path to the directory containing cached repository indexes") + fs.StringVar(&s.ContentCache, "content-cache", s.ContentCache, "path to the directory containing cached content (e.g. charts)") fs.IntVar(&s.BurstLimit, "burst-limit", s.BurstLimit, "client-side default throttling limit") fs.Float32Var(&s.QPS, "qps", s.QPS, "queries per second used when communicating with the Kubernetes API, not including bursting") fs.StringVar(&s.ColorMode, "color", s.ColorMode, "use colored output (never, auto, always)") diff --git a/pkg/cmd/dependency_build.go b/pkg/cmd/dependency_build.go index 16907facf..320fe12ae 100644 --- a/pkg/cmd/dependency_build.go +++ b/pkg/cmd/dependency_build.go @@ -69,6 +69,7 @@ func newDependencyBuildCmd(out io.Writer) *cobra.Command { RegistryClient: registryClient, RepositoryConfig: settings.RepositoryConfig, RepositoryCache: settings.RepositoryCache, + ContentCache: settings.ContentCache, Debug: settings.Debug, } if client.Verify { diff --git a/pkg/cmd/dependency_update.go b/pkg/cmd/dependency_update.go index 921e5ef49..b534fb48a 100644 --- a/pkg/cmd/dependency_update.go +++ b/pkg/cmd/dependency_update.go @@ -73,6 +73,7 @@ func newDependencyUpdateCmd(_ *action.Configuration, out io.Writer) *cobra.Comma RegistryClient: registryClient, RepositoryConfig: settings.RepositoryConfig, RepositoryCache: settings.RepositoryCache, + ContentCache: settings.ContentCache, Debug: settings.Debug, } if client.Verify { diff --git a/pkg/cmd/dependency_update_test.go b/pkg/cmd/dependency_update_test.go index 9646c6816..f1b39c4b7 100644 --- a/pkg/cmd/dependency_update_test.go +++ b/pkg/cmd/dependency_update_test.go @@ -45,6 +45,7 @@ func TestDependencyUpdateCmd(t *testing.T) { if err != nil { t.Fatal(err) } + contentCache := t.TempDir() ociChartName := "oci-depending-chart" c := createTestingMetadataForOCI(ociChartName, ociSrv.RegistryURL) @@ -69,7 +70,7 @@ func TestDependencyUpdateCmd(t *testing.T) { } _, out, err := executeActionCommand( - fmt.Sprintf("dependency update '%s' --repository-config %s --repository-cache %s --plain-http", dir(chartname), dir("repositories.yaml"), dir()), + fmt.Sprintf("dependency update '%s' --repository-config %s --repository-cache %s --content-cache %s --plain-http", dir(chartname), dir("repositories.yaml"), dir(), contentCache), ) if err != nil { t.Logf("Output: %s", out) @@ -112,7 +113,7 @@ func TestDependencyUpdateCmd(t *testing.T) { t.Fatal(err) } - _, out, err = executeActionCommand(fmt.Sprintf("dependency update '%s' --repository-config %s --repository-cache %s --plain-http", dir(chartname), dir("repositories.yaml"), dir())) + _, out, err = executeActionCommand(fmt.Sprintf("dependency update '%s' --repository-config %s --repository-cache %s --content-cache %s --plain-http", dir(chartname), dir("repositories.yaml"), dir(), contentCache)) if err != nil { t.Logf("Output: %s", out) t.Fatal(err) @@ -133,11 +134,12 @@ func TestDependencyUpdateCmd(t *testing.T) { if err := chartutil.SaveDir(c, dir()); err != nil { t.Fatal(err) } - cmd := fmt.Sprintf("dependency update '%s' --repository-config %s --repository-cache %s --registry-config %s/config.json --plain-http", + cmd := fmt.Sprintf("dependency update '%s' --repository-config %s --repository-cache %s --registry-config %s/config.json --content-cache %s --plain-http", dir(ociChartName), dir("repositories.yaml"), dir(), - dir()) + dir(), + contentCache) _, out, err = executeActionCommand(cmd) if err != nil { t.Logf("Output: %s", out) @@ -179,8 +181,9 @@ func TestDependencyUpdateCmd_DoNotDeleteOldChartsOnError(t *testing.T) { // Chart repo is down srv.Stop() + contentCache := t.TempDir() - _, output, err = executeActionCommand(fmt.Sprintf("dependency update %s --repository-config %s --repository-cache %s --plain-http", dir(chartname), dir("repositories.yaml"), dir())) + _, output, err = executeActionCommand(fmt.Sprintf("dependency update %s --repository-config %s --repository-cache %s --content-cache %s --plain-http", dir(chartname), dir("repositories.yaml"), dir(), contentCache)) if err == nil { t.Logf("Output: %s", output) t.Fatal("Expected error, got nil") @@ -232,9 +235,11 @@ func TestDependencyUpdateCmd_WithRepoThatWasNotAdded(t *testing.T) { t.Fatal(err) } + contentCache := t.TempDir() + _, out, err := executeActionCommand( - fmt.Sprintf("dependency update '%s' --repository-config %s --repository-cache %s", dir(chartname), - dir("repositories.yaml"), dir()), + fmt.Sprintf("dependency update '%s' --repository-config %s --repository-cache %s --content-cache %s", dir(chartname), + dir("repositories.yaml"), dir(), contentCache), ) if err != nil { diff --git a/pkg/cmd/install.go b/pkg/cmd/install.go index d53b1d981..b254b887e 100644 --- a/pkg/cmd/install.go +++ b/pkg/cmd/install.go @@ -287,6 +287,7 @@ func runInstall(args []string, client *action.Install, valueOpts *values.Options Getters: p, RepositoryConfig: settings.RepositoryConfig, RepositoryCache: settings.RepositoryCache, + ContentCache: settings.ContentCache, Debug: settings.Debug, RegistryClient: client.GetRegistryClient(), } diff --git a/pkg/cmd/package.go b/pkg/cmd/package.go index 40c503222..fc56e936a 100644 --- a/pkg/cmd/package.go +++ b/pkg/cmd/package.go @@ -100,6 +100,7 @@ func newPackageCmd(out io.Writer) *cobra.Command { RegistryClient: registryClient, RepositoryConfig: settings.RepositoryConfig, RepositoryCache: settings.RepositoryCache, + ContentCache: settings.ContentCache, } if err := downloadManager.Update(); err != nil { diff --git a/pkg/cmd/pull_test.go b/pkg/cmd/pull_test.go index 58e1862ae..ed8ea442e 100644 --- a/pkg/cmd/pull_test.go +++ b/pkg/cmd/pull_test.go @@ -212,15 +212,18 @@ func TestPullCmd(t *testing.T) { }, } + contentCache := t.TempDir() + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { outdir := srv.Root() - cmd := fmt.Sprintf("fetch %s -d '%s' --repository-config %s --repository-cache %s --registry-config %s --plain-http", + cmd := fmt.Sprintf("fetch %s -d '%s' --repository-config %s --repository-cache %s --registry-config %s --content-cache %s --plain-http", tt.args, outdir, filepath.Join(outdir, "repositories.yaml"), outdir, filepath.Join(outdir, "config.json"), + contentCache, ) // Create file or Dir before helm pull --untar, see: https://github.com/helm/helm/issues/7182 if tt.existFile != "" { diff --git a/pkg/cmd/show_test.go b/pkg/cmd/show_test.go index ab8cafc37..5ccb4bcad 100644 --- a/pkg/cmd/show_test.go +++ b/pkg/cmd/show_test.go @@ -64,14 +64,17 @@ func TestShowPreReleaseChart(t *testing.T) { }, } + contentTmp := t.TempDir() + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { outdir := srv.Root() - cmd := fmt.Sprintf("show all '%s' %s --repository-config %s --repository-cache %s", + cmd := fmt.Sprintf("show all '%s' %s --repository-config %s --repository-cache %s --content-cache %s", tt.args, tt.flags, filepath.Join(outdir, "repositories.yaml"), outdir, + contentTmp, ) //_, out, err := executeActionCommand(cmd) _, _, err := executeActionCommand(cmd) diff --git a/pkg/cmd/upgrade.go b/pkg/cmd/upgrade.go index c3288286b..4f204037a 100644 --- a/pkg/cmd/upgrade.go +++ b/pkg/cmd/upgrade.go @@ -210,6 +210,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { Getters: p, RepositoryConfig: settings.RepositoryConfig, RepositoryCache: settings.RepositoryCache, + ContentCache: settings.ContentCache, Debug: settings.Debug, } if err := man.Update(); err != nil { diff --git a/pkg/downloader/cache.go b/pkg/downloader/cache.go index d9b925756..cecfc8bd7 100644 --- a/pkg/downloader/cache.go +++ b/pkg/downloader/cache.go @@ -17,6 +17,7 @@ package downloader import ( "crypto/sha256" + "errors" "fmt" "io" "log/slog" @@ -31,11 +32,17 @@ import ( // digests in index files providing a common key for checking content. type Cache interface { // Get returns a reader for the given key. - Get(key [sha256.Size]byte, prov bool) (string, error) + Get(key [sha256.Size]byte, cacheType string) (string, error) // Put stores the given reader for the given key. - Put(key [sha256.Size]byte, data io.Reader, prov bool) (string, error) + Put(key [sha256.Size]byte, data io.Reader, cacheType string) (string, error) } +// CacheChart specifies the content is a chart +var CacheChart = ".chart" + +// CacheProv specifies the content is a provenance file +var CacheProv = ".prov" + // TODO: The cache assumes files because much of Helm assumes files. Convert // Helm to pass content around instead of file locations. @@ -45,8 +52,8 @@ type DiskCache struct { } // Get returns a reader for the given key. -func (c *DiskCache) Get(key [sha256.Size]byte, prov bool) (string, error) { - p := c.fileName(key, prov) +func (c *DiskCache) Get(key [sha256.Size]byte, cacheType string) (string, error) { + p := c.fileName(key, cacheType) fi, err := os.Stat(p) if err != nil { return "", err @@ -58,16 +65,16 @@ func (c *DiskCache) Get(key [sha256.Size]byte, prov bool) (string, error) { // directories should never happen unless something outside helm is operating // on this content. if fi.IsDir() { - return p, os.ErrInvalid + return p, errors.New("is a directory") } return p, nil } // Put stores the given reader for the given key. // It returns the path to the stored file. -func (c *DiskCache) Put(key [sha256.Size]byte, data io.Reader, prov bool) (string, error) { +func (c *DiskCache) Put(key [sha256.Size]byte, data io.Reader, cacheType string) (string, error) { // TODO: verify the key and digest of the key are the same. - p := c.fileName(key, prov) + p := c.fileName(key, cacheType) if err := os.MkdirAll(filepath.Dir(p), 0755); err != nil { slog.Error("failed to create cache directory") return p, err @@ -77,10 +84,6 @@ func (c *DiskCache) Put(key [sha256.Size]byte, data io.Reader, prov bool) (strin // fileName generates the filename in a structured manner where the first part is the // directory and the full hash is the filename. -func (c *DiskCache) fileName(id [sha256.Size]byte, prov bool) string { - suffix := ".tgz" - if prov { - suffix = ".prov" - } - return filepath.Join(c.Root, fmt.Sprintf("%02x", id[0]), fmt.Sprintf("%x", id)+suffix) +func (c *DiskCache) fileName(id [sha256.Size]byte, cacheType string) string { + return filepath.Join(c.Root, fmt.Sprintf("%02x", id[0]), fmt.Sprintf("%x", id)+cacheType) } diff --git a/pkg/downloader/cache_test.go b/pkg/downloader/cache_test.go new file mode 100644 index 000000000..340c77aba --- /dev/null +++ b/pkg/downloader/cache_test.go @@ -0,0 +1,122 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package downloader + +import ( + "bytes" + "crypto/sha256" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// compiler check to ensure DiskCache implements the Cache interface. +var _ Cache = (*DiskCache)(nil) + +func TestDiskCache_PutAndGet(t *testing.T) { + // Setup a temporary directory for the cache + tmpDir := t.TempDir() + cache := &DiskCache{Root: tmpDir} + + // Test data + content := []byte("hello world") + key := sha256.Sum256(content) + + // --- Test case 1: Put and Get a regular file (prov=false) --- + t.Run("PutAndGetTgz", func(t *testing.T) { + // Put the data into the cache + path, err := cache.Put(key, bytes.NewReader(content), CacheChart) + require.NoError(t, err, "Put should not return an error") + + // Verify the file exists at the returned path + _, err = os.Stat(path) + require.NoError(t, err, "File should exist after Put") + + // Get the file from the cache + retrievedPath, err := cache.Get(key, CacheChart) + require.NoError(t, err, "Get should not return an error for existing file") + assert.Equal(t, path, retrievedPath, "Get should return the same path as Put") + + // Verify content + data, err := os.ReadFile(retrievedPath) + require.NoError(t, err) + assert.Equal(t, content, data, "Content of retrieved file should match original content") + }) + + // --- Test case 2: Put and Get a provenance file (prov=true) --- + t.Run("PutAndGetProv", func(t *testing.T) { + provContent := []byte("provenance data") + provKey := sha256.Sum256(provContent) + + path, err := cache.Put(provKey, bytes.NewReader(provContent), CacheProv) + require.NoError(t, err) + + retrievedPath, err := cache.Get(provKey, CacheProv) + require.NoError(t, err) + assert.Equal(t, path, retrievedPath) + + data, err := os.ReadFile(retrievedPath) + require.NoError(t, err) + assert.Equal(t, provContent, data) + }) + + // --- Test case 3: Get a non-existent file --- + t.Run("GetNonExistent", func(t *testing.T) { + nonExistentKey := sha256.Sum256([]byte("does not exist")) + _, err := cache.Get(nonExistentKey, CacheChart) + assert.ErrorIs(t, err, os.ErrNotExist, "Get for a non-existent key should return os.ErrNotExist") + }) + + // --- Test case 4: Put an empty file --- + t.Run("PutEmptyFile", func(t *testing.T) { + emptyContent := []byte{} + emptyKey := sha256.Sum256(emptyContent) + + path, err := cache.Put(emptyKey, bytes.NewReader(emptyContent), CacheChart) + require.NoError(t, err) + + // Get should return ErrNotExist for empty files + _, err = cache.Get(emptyKey, CacheChart) + assert.ErrorIs(t, err, os.ErrNotExist, "Get for an empty file should return os.ErrNotExist") + + // But the file should exist + _, err = os.Stat(path) + require.NoError(t, err, "Empty file should still exist on disk") + }) + + // --- Test case 5: Get a directory --- + t.Run("GetDirectory", func(t *testing.T) { + dirKey := sha256.Sum256([]byte("i am a directory")) + dirPath := cache.fileName(dirKey, CacheChart) + err := os.MkdirAll(dirPath, 0755) + require.NoError(t, err) + + _, err = cache.Get(dirKey, CacheChart) + assert.EqualError(t, err, "is a directory") + }) +} + +func TestDiskCache_fileName(t *testing.T) { + cache := &DiskCache{Root: "/tmp/cache"} + key := sha256.Sum256([]byte("some data")) + + assert.Equal(t, filepath.Join("/tmp/cache", "13", "1307990e6ba5ca145eb35e99182a9bec46531bc54ddf656a602c780fa0240dee.chart"), cache.fileName(key, CacheChart)) + assert.Equal(t, filepath.Join("/tmp/cache", "13", "1307990e6ba5ca145eb35e99182a9bec46531bc54ddf656a602c780fa0240dee.prov"), cache.fileName(key, CacheProv)) +} diff --git a/pkg/downloader/chart_downloader.go b/pkg/downloader/chart_downloader.go index bdf65011c..693e6b009 100644 --- a/pkg/downloader/chart_downloader.go +++ b/pkg/downloader/chart_downloader.go @@ -23,12 +23,14 @@ import ( "fmt" "io" "io/fs" + "log/slog" "net/url" "os" "path/filepath" "strings" "helm.sh/helm/v4/internal/fileutil" + ifs "helm.sh/helm/v4/internal/third_party/dep/fs" "helm.sh/helm/v4/internal/urlutil" "helm.sh/helm/v4/pkg/getter" "helm.sh/helm/v4/pkg/helmpath" @@ -76,6 +78,11 @@ type ChartDownloader struct { RepositoryConfig string RepositoryCache string + // ContentCache is the location where Cache stores its files by default + // In previous versions of Helm the charts were put in the RepositoryCache. The + // repositories and charts are stored in 2 difference caches. + ContentCache string + // Cache specifies the cache implementation to use. Cache Cache } @@ -93,7 +100,11 @@ type ChartDownloader struct { // (if provenance was verified), or an error if something bad happened. func (c *ChartDownloader) DownloadTo(ref, version, dest string) (string, *provenance.Verification, error) { if c.Cache == nil { - c.Cache = &DiskCache{Root: c.RepositoryCache} + if c.ContentCache == "" { + return "", nil, errors.New("content cache must be set") + } + c.Cache = &DiskCache{Root: c.ContentCache} + slog.Debug("setup up default downloader cache") } hash, u, err := c.ResolveChartVersion(ref, version) if err != nil { @@ -119,11 +130,12 @@ func (c *ChartDownloader) DownloadTo(ref, version, dest string) (string, *proven return "", nil, err } copy(digest32[:], digest) - if pth, err := c.Cache.Get(digest32, false); err == nil { + if pth, err := c.Cache.Get(digest32, CacheChart); err == nil { fdata, err := os.ReadFile(pth) if err == nil { found = true data = bytes.NewBuffer(fdata) + slog.Debug("found chart in cache", "id", hash) } } } @@ -154,11 +166,12 @@ func (c *ChartDownloader) DownloadTo(ref, version, dest string) (string, *proven found = false var body *bytes.Buffer if hash != "" { - if pth, err := c.Cache.Get(digest32, true); err == nil { + if pth, err := c.Cache.Get(digest32, CacheProv); err == nil { fdata, err := os.ReadFile(pth) if err == nil { found = true body = bytes.NewBuffer(fdata) + slog.Debug("found provenance in cache", "id", hash) } } } @@ -192,7 +205,11 @@ func (c *ChartDownloader) DownloadTo(ref, version, dest string) (string, *proven // DownloadToCache retrieves resources while using a content based cache. func (c *ChartDownloader) DownloadToCache(ref, version string) (string, *provenance.Verification, error) { if c.Cache == nil { - c.Cache = &DiskCache{Root: c.RepositoryCache} + if c.ContentCache == "" { + return "", nil, errors.New("content cache must be set") + } + c.Cache = &DiskCache{Root: c.ContentCache} + slog.Debug("setup up default downloader cache") } digestString, u, err := c.ResolveChartVersion(ref, version) @@ -221,9 +238,13 @@ func (c *ChartDownloader) DownloadToCache(ref, version string) (string, *provena var pth string // only fetch from the cache if we have a digest if len(digest) > 0 { - pth, err = c.Cache.Get(digest32, false) + pth, err = c.Cache.Get(digest32, CacheChart) + if err == nil { + slog.Debug("found chart in cache", "id", digestString) + } } if len(digest) == 0 || err != nil { + slog.Debug("attempting to download chart", "ref", ref, "version", version) if err != nil && !os.IsNotExist(err) { return "", nil, err } @@ -236,21 +257,24 @@ func (c *ChartDownloader) DownloadToCache(ref, version string) (string, *provena // Generate the digest if len(digest) == 0 { - h := sha256.New() - digest32 = [sha256.Size]byte(h.Sum(data.Bytes())) + digest32 = sha256.Sum256(data.Bytes()) } - pth, err = c.Cache.Put(digest32, data, false) + pth, err = c.Cache.Put(digest32, data, CacheChart) if err != nil { return "", nil, err } + slog.Debug("put downloaded chart in cache", "id", hex.EncodeToString(digest32[:])) } // If provenance is requested, verify it. ver := &provenance.Verification{} if c.Verify > VerifyNever { - ppth, err := c.Cache.Get(digest32, true) - if err != nil { + + ppth, err := c.Cache.Get(digest32, CacheProv) + if err == nil { + slog.Debug("found provenance in cache", "id", digestString) + } else { if !os.IsNotExist(err) { return pth, ver, err } @@ -264,14 +288,41 @@ func (c *ChartDownloader) DownloadToCache(ref, version string) (string, *provena return pth, ver, nil } - ppth, err = c.Cache.Put(digest32, body, true) + ppth, err = c.Cache.Put(digest32, body, CacheProv) if err != nil { return "", nil, err } + slog.Debug("put downloaded provenance file in cache", "id", hex.EncodeToString(digest32[:])) } if c.Verify != VerifyLater { - ver, err = VerifyChart(pth, ppth, c.Keyring) + + // provenance files pin to a specific name so this needs to be accounted for + // when verifying. + // Note, this does make an assumption that the name/version is unique to a + // hash when a provenance file is used. If this isn't true, this section of code + // will need to be reworked. + name := filepath.Base(u.Path) + if u.Scheme == registry.OCIScheme { + idx := strings.LastIndexByte(name, ':') + name = fmt.Sprintf("%s-%s.tgz", name[:idx], name[idx+1:]) + } + + // Copy chart to a known location with the right name for verification and then + // clean it up. + tmpdir := filepath.Dir(filepath.Join(c.ContentCache, "tmp")) + if err := os.MkdirAll(tmpdir, 0755); err != nil { + return pth, ver, err + } + tmpfile := filepath.Join(tmpdir, name) + err = ifs.CopyFile(pth, tmpfile) + if err != nil { + return pth, ver, err + } + // Not removing the tmp dir itself because a concurrent process may be using it + defer os.RemoveAll(tmpfile) + + ver, err = VerifyChart(tmpfile, ppth, c.Keyring) if err != nil { // Fail always in this case, since it means the verification step // failed. diff --git a/pkg/downloader/chart_downloader_test.go b/pkg/downloader/chart_downloader_test.go index 5b5f96751..649448fef 100644 --- a/pkg/downloader/chart_downloader_test.go +++ b/pkg/downloader/chart_downloader_test.go @@ -16,10 +16,14 @@ limitations under the License. package downloader import ( + "crypto/sha256" + "encoding/hex" "os" "path/filepath" "testing" + "github.com/stretchr/testify/require" + "helm.sh/helm/v4/internal/test/ensure" "helm.sh/helm/v4/pkg/cli" "helm.sh/helm/v4/pkg/getter" @@ -198,15 +202,19 @@ func TestDownloadTo(t *testing.T) { t.Fatal(err) } + contentCache := t.TempDir() + c := ChartDownloader{ Out: os.Stderr, Verify: VerifyAlways, Keyring: "testdata/helm-test-key.pub", RepositoryConfig: repoConfig, RepositoryCache: repoCache, + ContentCache: contentCache, Getters: getter.All(&cli.EnvSettings{ RepositoryConfig: repoConfig, RepositoryCache: repoCache, + ContentCache: contentCache, }), Options: []getter.Option{ getter.WithBasicAuth("username", "password"), @@ -250,6 +258,7 @@ func TestDownloadTo_TLS(t *testing.T) { repoConfig := filepath.Join(srv.Root(), "repositories.yaml") repoCache := srv.Root() + contentCache := t.TempDir() c := ChartDownloader{ Out: os.Stderr, @@ -257,9 +266,11 @@ func TestDownloadTo_TLS(t *testing.T) { Keyring: "testdata/helm-test-key.pub", RepositoryConfig: repoConfig, RepositoryCache: repoCache, + ContentCache: contentCache, Getters: getter.All(&cli.EnvSettings{ RepositoryConfig: repoConfig, RepositoryCache: repoCache, + ContentCache: contentCache, }), Options: []getter.Option{ getter.WithTLSClientConfig( @@ -304,15 +315,18 @@ func TestDownloadTo_VerifyLater(t *testing.T) { if err := srv.LinkIndices(); err != nil { t.Fatal(err) } + contentCache := t.TempDir() c := ChartDownloader{ Out: os.Stderr, Verify: VerifyLater, RepositoryConfig: repoConfig, RepositoryCache: repoCache, + ContentCache: contentCache, Getters: getter.All(&cli.EnvSettings{ RepositoryConfig: repoConfig, RepositoryCache: repoCache, + ContentCache: contentCache, }), } cname := "/signtest-0.1.0.tgz" @@ -366,3 +380,108 @@ func TestScanReposForURL(t *testing.T) { t.Fatalf("expected ErrNoOwnerRepo, got %v", err) } } + +func TestDownloadToCache(t *testing.T) { + srv := repotest.NewTempServer(t, + repotest.WithChartSourceGlob("testdata/*.tgz*"), + ) + defer srv.Stop() + if err := srv.CreateIndex(); err != nil { + t.Fatal(err) + } + if err := srv.LinkIndices(); err != nil { + t.Fatal(err) + } + + // The repo file needs to point to our server. + repoFile := filepath.Join(srv.Root(), "repositories.yaml") + repoCache := srv.Root() + contentCache := t.TempDir() + + c := ChartDownloader{ + Out: os.Stderr, + Verify: VerifyNever, + RepositoryConfig: repoFile, + RepositoryCache: repoCache, + Getters: getter.All(&cli.EnvSettings{ + RepositoryConfig: repoFile, + RepositoryCache: repoCache, + ContentCache: contentCache, + }), + Cache: &DiskCache{Root: contentCache}, + } + + // Case 1: Chart not in cache, download it. + t.Run("download and cache chart", func(t *testing.T) { + // Clear cache for this test + os.RemoveAll(contentCache) + os.MkdirAll(contentCache, 0755) + c.Cache = &DiskCache{Root: contentCache} + + pth, v, err := c.DownloadToCache("test/signtest", "0.1.0") + require.NoError(t, err) + require.NotNil(t, v) + + // Check that the file exists at the returned path + _, err = os.Stat(pth) + require.NoError(t, err, "chart should exist at returned path") + + // Check that it's in the cache + digest, _, err := c.ResolveChartVersion("test/signtest", "0.1.0") + require.NoError(t, err) + digestBytes, err := hex.DecodeString(digest) + require.NoError(t, err) + var digestArray [sha256.Size]byte + copy(digestArray[:], digestBytes) + + cachePath, err := c.Cache.Get(digestArray, CacheChart) + require.NoError(t, err, "chart should now be in cache") + require.Equal(t, pth, cachePath) + }) + + // Case 2: Chart is in cache, get from cache. + t.Run("get chart from cache", func(t *testing.T) { + // The cache should be populated from the previous test. + // To prove it's coming from cache, we can stop the server. + // But repotest doesn't support restarting. + // Let's just call it again and assume it works if it's fast and doesn't error. + pth, v, err := c.DownloadToCache("test/signtest", "0.1.0") + require.NoError(t, err) + require.NotNil(t, v) + + _, err = os.Stat(pth) + require.NoError(t, err, "chart should exist at returned path") + }) + + // Case 3: Download with verification + t.Run("download and verify", func(t *testing.T) { + // Clear cache + os.RemoveAll(contentCache) + os.MkdirAll(contentCache, 0755) + c.Cache = &DiskCache{Root: contentCache} + c.Verify = VerifyAlways + c.Keyring = "testdata/helm-test-key.pub" + + _, v, err := c.DownloadToCache("test/signtest", "0.1.0") + require.NoError(t, err) + require.NotNil(t, v) + require.NotEmpty(t, v.FileHash, "verification should have a file hash") + + // Check that both chart and prov are in cache + digest, _, err := c.ResolveChartVersion("test/signtest", "0.1.0") + require.NoError(t, err) + digestBytes, err := hex.DecodeString(digest) + require.NoError(t, err) + var digestArray [sha256.Size]byte + copy(digestArray[:], digestBytes) + + _, err = c.Cache.Get(digestArray, CacheChart) + require.NoError(t, err, "chart should be in cache") + _, err = c.Cache.Get(digestArray, CacheProv) + require.NoError(t, err, "provenance file should be in cache") + + // Reset for other tests + c.Verify = VerifyNever + c.Keyring = "" + }) +} diff --git a/pkg/downloader/manager.go b/pkg/downloader/manager.go index b43165975..8b77a77c0 100644 --- a/pkg/downloader/manager.go +++ b/pkg/downloader/manager.go @@ -75,6 +75,9 @@ type Manager struct { RegistryClient *registry.Client RepositoryConfig string RepositoryCache string + + // ContentCache is a location where a cache of charts can be stored + ContentCache string } // Build rebuilds a local charts directory from a lockfile. @@ -331,6 +334,7 @@ func (m *Manager) downloadAll(deps []*chart.Dependency) error { Keyring: m.Keyring, RepositoryConfig: m.RepositoryConfig, RepositoryCache: m.RepositoryCache, + ContentCache: m.ContentCache, RegistryClient: m.RegistryClient, Getters: m.Getters, Options: []getter.Option{ diff --git a/pkg/downloader/manager_test.go b/pkg/downloader/manager_test.go index f01a5d7ad..b7121a4ce 100644 --- a/pkg/downloader/manager_test.go +++ b/pkg/downloader/manager_test.go @@ -488,12 +488,14 @@ func checkBuildWithOptionalFields(t *testing.T, chartName string, dep chart.Depe Schemes: []string{"http", "https"}, New: getter.NewHTTPGetter, }} + contentCache := t.TempDir() m := &Manager{ ChartPath: dir(chartName), Out: b, Getters: g, RepositoryConfig: dir("repositories.yaml"), RepositoryCache: dir(), + ContentCache: contentCache, } // First build will update dependencies and create Chart.lock file. From be74ab72a06c2525fa833d3f118a2d4cf46e3c49 Mon Sep 17 00:00:00 2001 From: Scott Rigby Date: Fri, 22 Aug 2025 16:12:49 -0400 Subject: [PATCH 49/88] [HIP-0026] Plugin runtime interface (#31145) * Runtime abstraction to encapsulate subprocess code and enable future runtimes Also fix race condition in TestPrepareCommandExtraArgs by replacing the shared variable modification with a local copy Co-authored-by: George Jenkins Signed-off-by: Scott Rigby * Remove commented out code Co-authored-by: Joe Julian Signed-off-by: Scott Rigby * Check test failure string Co-authored-by: Jesse Simpson Signed-off-by: Scott Rigby --------- Signed-off-by: Scott Rigby Co-authored-by: George Jenkins Co-authored-by: Joe Julian Co-authored-by: Jesse Simpson --- go.mod | 4 +- internal/plugin/config.go | 66 +++ internal/plugin/descriptor.go | 24 + internal/plugin/doc.go | 89 +++ internal/plugin/error.go | 29 + .../plugin/installer/local_installer_test.go | 6 +- .../plugin/installer/vcs_installer_test.go | 2 +- internal/plugin/loader.go | 224 ++++++++ internal/plugin/loader_test.go | 197 +++++++ internal/plugin/metadata.go | 155 +++++ internal/plugin/metadata_legacy.go | 113 ++++ internal/plugin/metadata_test.go | 141 +++++ internal/plugin/plugin.go | 370 ++---------- internal/plugin/plugin_test.go | 533 +----------------- internal/plugin/runtime.go | 33 ++ internal/plugin/runtime_subprocess.go | 229 ++++++++ internal/plugin/runtime_subprocess_getter.go | 92 +++ .../{hooks.go => runtime_subprocess_hooks.go} | 0 internal/plugin/runtime_subprocess_test.go | 64 +++ internal/plugin/schema/cli.go | 29 + internal/plugin/schema/getter.go | 47 ++ internal/plugin/subprocess_commands.go | 111 ++++ internal/plugin/subprocess_commands_test.go | 259 +++++++++ .../plugin.yaml | 0 .../plugdir/good/downloader/plugin.yaml | 1 + .../good/{echo => echo-legacy}/plugin.yaml | 3 +- .../good/{hello => hello-legacy}/hello.ps1 | 0 .../good/{hello => hello-legacy}/hello.sh | 0 .../good/{hello => hello-legacy}/plugin.yaml | 9 +- pkg/action/action.go | 2 +- pkg/action/install.go | 2 +- pkg/cmd/flags.go | 2 + pkg/cmd/helpers_test.go | 6 +- pkg/cmd/load_plugins.go | 159 ++++-- pkg/cmd/plugin.go | 37 +- pkg/cmd/plugin_install.go | 2 +- pkg/cmd/plugin_list.go | 44 +- pkg/cmd/plugin_test.go | 64 +-- pkg/cmd/plugin_uninstall.go | 11 +- pkg/cmd/plugin_update.go | 6 +- pkg/cmd/root.go | 4 +- pkg/cmd/testdata/testplugin/plugin.yaml | 4 - pkg/getter/getter.go | 35 +- pkg/getter/httpgetter.go | 2 +- pkg/getter/httpgetter_test.go | 2 +- pkg/getter/ocigetter.go | 4 +- pkg/getter/ocigetter_test.go | 2 +- pkg/getter/plugingetter.go | 147 ++--- pkg/getter/plugingetter_test.go | 120 ++-- pkg/getter/testdata/plugins/testgetter/get.sh | 8 - .../testdata/plugins/testgetter/plugin.yaml | 15 +- .../testdata/plugins/testgetter2/get.sh | 8 - .../testdata/plugins/testgetter2/plugin.yaml | 10 +- 53 files changed, 2346 insertions(+), 1180 deletions(-) create mode 100644 internal/plugin/config.go create mode 100644 internal/plugin/descriptor.go create mode 100644 internal/plugin/doc.go create mode 100644 internal/plugin/error.go create mode 100644 internal/plugin/loader.go create mode 100644 internal/plugin/loader_test.go create mode 100644 internal/plugin/metadata.go create mode 100644 internal/plugin/metadata_legacy.go create mode 100644 internal/plugin/metadata_test.go create mode 100644 internal/plugin/runtime.go create mode 100644 internal/plugin/runtime_subprocess.go create mode 100644 internal/plugin/runtime_subprocess_getter.go rename internal/plugin/{hooks.go => runtime_subprocess_hooks.go} (100%) create mode 100644 internal/plugin/runtime_subprocess_test.go create mode 100644 internal/plugin/schema/cli.go create mode 100644 internal/plugin/schema/getter.go create mode 100644 internal/plugin/subprocess_commands.go create mode 100644 internal/plugin/subprocess_commands_test.go rename internal/plugin/testdata/plugdir/bad/{duplicate-entries => duplicate-entries-legacy}/plugin.yaml (100%) rename internal/plugin/testdata/plugdir/good/{echo => echo-legacy}/plugin.yaml (85%) rename internal/plugin/testdata/plugdir/good/{hello => hello-legacy}/hello.ps1 (100%) rename internal/plugin/testdata/plugdir/good/{hello => hello-legacy}/hello.sh (100%) rename internal/plugin/testdata/plugdir/good/{hello => hello-legacy}/plugin.yaml (84%) delete mode 100644 pkg/cmd/testdata/testplugin/plugin.yaml delete mode 100755 pkg/getter/testdata/plugins/testgetter/get.sh delete mode 100755 pkg/getter/testdata/plugins/testgetter2/get.sh diff --git a/go.mod b/go.mod index f3a3ebd33..6557d7663 100644 --- a/go.mod +++ b/go.mod @@ -25,6 +25,7 @@ require ( github.com/mattn/go-shellwords v1.0.12 github.com/mitchellh/copystructure v1.2.0 github.com/moby/term v0.5.2 + github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.1 github.com/rubenv/sql-migrate v1.8.0 github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 @@ -35,6 +36,7 @@ require ( golang.org/x/crypto v0.41.0 golang.org/x/term v0.34.0 golang.org/x/text v0.28.0 + gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/api v0.33.4 k8s.io/apiextensions-apiserver v0.33.4 k8s.io/apimachinery v0.33.4 @@ -114,7 +116,6 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/onsi/gomega v1.37.0 // indirect - github.com/opencontainers/go-digest v1.0.0 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect @@ -169,7 +170,6 @@ require ( gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/component-base v0.33.4 // indirect k8s.io/kube-openapi v0.0.0-20250701173324-9bd5c66d9911 // indirect k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect diff --git a/internal/plugin/config.go b/internal/plugin/config.go new file mode 100644 index 000000000..f308e7ae9 --- /dev/null +++ b/internal/plugin/config.go @@ -0,0 +1,66 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugin + +import ( + "fmt" +) + +// Config interface defines the methods that all plugin type configurations must implement +type Config interface { + GetType() string + Validate() error +} + +// ConfigCLI represents the configuration for CLI plugins +type ConfigCLI struct { + // Usage is the single-line usage text shown in help + // For recommended syntax, see [spf13/cobra.command.Command] Use field comment: + // https://pkg.go.dev/github.com/spf13/cobra#Command + Usage string `yaml:"usage"` + // ShortHelp is the short description shown in the 'helm help' output + ShortHelp string `yaml:"shortHelp"` + // LongHelp is the long message shown in the 'helm help ' output + LongHelp string `yaml:"longHelp"` + // IgnoreFlags ignores any flags passed in from Helm + IgnoreFlags bool `yaml:"ignoreFlags"` +} + +// ConfigGetter represents the configuration for download plugins +type ConfigGetter struct { + // Protocols are the list of URL schemes supported by this downloader + Protocols []string `yaml:"protocols"` +} + +func (c *ConfigCLI) GetType() string { return "cli/v1" } +func (c *ConfigGetter) GetType() string { return "getter/v1" } + +func (c *ConfigCLI) Validate() error { + // Config validation for CLI plugins + return nil +} + +func (c *ConfigGetter) Validate() error { + if len(c.Protocols) == 0 { + return fmt.Errorf("getter has no protocols") + } + for i, protocol := range c.Protocols { + if protocol == "" { + return fmt.Errorf("getter has empty protocol at index %d", i) + } + } + return nil +} diff --git a/internal/plugin/descriptor.go b/internal/plugin/descriptor.go new file mode 100644 index 000000000..ba92b3c55 --- /dev/null +++ b/internal/plugin/descriptor.go @@ -0,0 +1,24 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugin + +// Descriptor describes a plugin to find +type Descriptor struct { + // Name is the name of the plugin + Name string + // Type is the type of the plugin (cli, getter, postrenderer) + Type string +} diff --git a/internal/plugin/doc.go b/internal/plugin/doc.go new file mode 100644 index 000000000..f150358bd --- /dev/null +++ b/internal/plugin/doc.go @@ -0,0 +1,89 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +--- +TODO: move this section to public plugin package + +Package plugin provides the implementation of the Helm plugin system. + +Conceptually, "plugins" enable extending Helm's functionality external to Helm's core codebase. The plugin system allows +code to fetch plugins by type, then invoke the plugin with an input as required by that plugin type. The plugin +returning an output for the caller to consume. + +An example of a plugin invocation: +``` +d := plugin.Descriptor{ + Type: "example/v1", // +} +plgs, err := plugin.FindPlugins([]string{settings.PluginsDirectory}, d) + +for _, plg := range plgs { + input := &plugin.Input{ + Message: schema.InputMessageExampleV1{ // The type of the input message is defined by the plugin's "type" (example/v1 here) + ... + }, + } + output, err := plg.Invoke(context.Background(), input) + if err != nil { + ... + } + + // consume the output, using type assertion to convert to the expected output type (as defined by the plugin's "type") + outputMessage, ok := output.Message.(schema.OutputMessageExampleV1) +} + +--- + +Package `plugin` provides the implementation of the Helm plugin system. + +Helm plugins are exposed to uses as the "Plugin" type, the basic interface that primarily support the "Invoke" method. + +# Plugin Runtimes +Internally, plugins must be implemented by a "runtime" that is responsible for creating the plugin instance, and dispatching the plugin's invocation to the plugin's implementation. +For example: +- forming environment variables and command line args for subprocess execution +- converting input to JSON and invoking a function in a future runtime (eg, Wasm) + +Internally, the code structure is: +Runtime.CreatePlugin() + | + | (creates) + | + \---> PluginRuntime + | + | (implements) + v + Plugin.Invoke() + +# Plugin Types +Each plugin implements a specific functionality, denoted by the plugin's "type" e.g. "getter/v1". The "type" includes a version, in order to allow a given types messaging schema and invocation options to evolve. + +Specifically, the plugin's "type" specifies the contract for the input and output messages that are expected to be passed to the plugin, and returned from the plugin. The plugin's "type" also defines the options that can be passed to the plugin when invoking it. + +# Metadata +Each plugin must have a `plugin.yaml`, that defines the plugin's metadata. The metadata includes the plugin's name, version, and other information. + +For legacy plugins, the type is inferred by which fields are set on the plugin: a downloader plugin is inferred when metadata contains a "downloaders" yaml node, otherwise it is assumed to define a Helm CLI subcommand. + +For future plugin api versions, the metadata will include explicit apiVersion and type fields. It will also contain type and runtime specific Config and RuntimeConfig fields. + +# Runtime and type cardinality +From a cardinality perspective, this means there a "few" runtimes, and "many" plugins types. It is also expected that the subprocess runtime will not be extended to support extra plugin types, and deprecated in a future version of Helm. + +Future ideas that are intended to be implemented include extending the plugin system to support future Wasm standards. Or allowing Helm SDK user's to inject "plugins" that are actually implemented as native go modules. Or even moving Helm's internal functionality e.g. yaml rendering engine to be used as an "in-built" plugin, along side other plugins that may implement other (non-go template) rendering engines. +*/ + +package plugin diff --git a/internal/plugin/error.go b/internal/plugin/error.go new file mode 100644 index 000000000..5ace680cb --- /dev/null +++ b/internal/plugin/error.go @@ -0,0 +1,29 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugin + +// InvokeExecError is returned when a plugin invocation returns a non-zero status/exit code +// - subprocess plugin: child process exit code +// - extism plugin: wasm function return code +type InvokeExecError struct { + Err error // Underlying error + Code int // Exeit code from plugin code execution +} + +// Error implements the error interface +func (e *InvokeExecError) Error() string { + return e.Err.Error() +} diff --git a/internal/plugin/installer/local_installer_test.go b/internal/plugin/installer/local_installer_test.go index ef5660d7d..3b1c0f680 100644 --- a/internal/plugin/installer/local_installer_test.go +++ b/internal/plugin/installer/local_installer_test.go @@ -34,7 +34,7 @@ func TestLocalInstaller(t *testing.T) { t.Fatal(err) } - source := "../testdata/plugdir/good/echo" + source := "../testdata/plugdir/good/echo-legacy" i, err := NewForSource(source, "") if err != nil { t.Fatalf("unexpected error: %s", err) @@ -44,14 +44,14 @@ func TestLocalInstaller(t *testing.T) { t.Fatal(err) } - if i.Path() != helmpath.DataPath("plugins", "echo") { + if i.Path() != helmpath.DataPath("plugins", "echo-legacy") { t.Fatalf("expected path '$XDG_CONFIG_HOME/helm/plugins/helm-env', got %q", i.Path()) } defer os.RemoveAll(filepath.Dir(helmpath.DataPath())) // helmpath.DataPath is like /tmp/helm013130971/helm } func TestLocalInstallerNotAFolder(t *testing.T) { - source := "../testdata/plugdir/good/echo/plugin.yaml" + source := "../testdata/plugdir/good/echo-legacy/plugin.yaml" i, err := NewForSource(source, "") if err != nil { t.Fatalf("unexpected error: %s", err) diff --git a/internal/plugin/installer/vcs_installer_test.go b/internal/plugin/installer/vcs_installer_test.go index 76b337a2f..9c65d244c 100644 --- a/internal/plugin/installer/vcs_installer_test.go +++ b/internal/plugin/installer/vcs_installer_test.go @@ -57,7 +57,7 @@ func TestVCSInstaller(t *testing.T) { } source := "https://github.com/adamreese/helm-env" - testRepoPath, _ := filepath.Abs("../testdata/plugdir/good/echo") + testRepoPath, _ := filepath.Abs("../testdata/plugdir/good/echo-legacy") repo := &testRepo{ local: testRepoPath, tags: []string{"0.1.0", "0.1.1"}, diff --git a/internal/plugin/loader.go b/internal/plugin/loader.go new file mode 100644 index 000000000..b47b15d34 --- /dev/null +++ b/internal/plugin/loader.go @@ -0,0 +1,224 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugin + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + + "go.yaml.in/yaml/v3" +) + +func peekAPIVersion(r io.Reader) (string, error) { + type apiVersion struct { + APIVersion string `yaml:"apiVersion"` + } + + var v apiVersion + d := yaml.NewDecoder(r) + if err := d.Decode(&v); err != nil { + return "", err + } + + return v.APIVersion, nil +} + +func loadMetadataLegacy(metadataData []byte) (*Metadata, error) { + + var ml MetadataLegacy + d := yaml.NewDecoder(bytes.NewReader(metadataData)) + if err := d.Decode(&ml); err != nil { + return nil, err + } + + if err := ml.Validate(); err != nil { + return nil, err + } + + m := fromMetadataLegacy(ml) + if err := m.Validate(); err != nil { + return nil, err + } + return m, nil +} + +func loadMetadata(metadataData []byte) (*Metadata, error) { + apiVersion, err := peekAPIVersion(bytes.NewReader(metadataData)) + if err != nil { + return nil, fmt.Errorf("failed to peek %s API version: %w", PluginFileName, err) + } + + switch apiVersion { + case "": // legacy + return loadMetadataLegacy(metadataData) + } + + return nil, fmt.Errorf("invalid plugin apiVersion: %q", apiVersion) +} + +type prototypePluginManager struct { + runtimes map[string]Runtime +} + +func newPrototypePluginManager() *prototypePluginManager { + return &prototypePluginManager{ + runtimes: map[string]Runtime{ + "subprocess": &RuntimeSubprocess{}, + }, + } +} + +func (pm *prototypePluginManager) RegisterRuntime(runtimeName string, runtime Runtime) { + pm.runtimes[runtimeName] = runtime +} + +func (pm *prototypePluginManager) CreatePlugin(pluginPath string, metadata *Metadata) (Plugin, error) { + rt, ok := pm.runtimes[metadata.Runtime] + if !ok { + return nil, fmt.Errorf("unsupported plugin runtime type: %q", metadata.Runtime) + } + + return rt.CreatePlugin(pluginPath, metadata) +} + +// LoadDir loads a plugin from the given directory. +func LoadDir(dirname string) (Plugin, error) { + pluginfile := filepath.Join(dirname, PluginFileName) + metadataData, err := os.ReadFile(pluginfile) + if err != nil { + return nil, fmt.Errorf("failed to read plugin at %q: %w", pluginfile, err) + } + + m, err := loadMetadata(metadataData) + if err != nil { + return nil, fmt.Errorf("failed to load plugin %q: %w", dirname, err) + } + + pm := newPrototypePluginManager() + return pm.CreatePlugin(dirname, m) +} + +// LoadAll loads all plugins found beneath the base directory. +// +// This scans only one directory level. +func LoadAll(basedir string) ([]Plugin, error) { + var plugins []Plugin + // We want basedir/*/plugin.yaml + scanpath := filepath.Join(basedir, "*", PluginFileName) + matches, err := filepath.Glob(scanpath) + if err != nil { + return nil, fmt.Errorf("failed to search for plugins in %q: %w", scanpath, err) + } + + // empty dir should load + if len(matches) == 0 { + return plugins, nil + } + + for _, yamlFile := range matches { + dir := filepath.Dir(yamlFile) + p, err := LoadDir(dir) + if err != nil { + return plugins, err + } + plugins = append(plugins, p) + } + return plugins, detectDuplicates(plugins) +} + +// findFunc is a function that finds plugins in a directory +type findFunc func(pluginsDir string) ([]Plugin, error) + +// filterFunc is a function that filters plugins +type filterFunc func(Plugin) bool + +// FindPlugins returns a list of plugins that match the descriptor +func FindPlugins(pluginsDirs []string, descriptor Descriptor) ([]Plugin, error) { + return findPlugins(pluginsDirs, LoadAll, makeDescriptorFilter(descriptor)) +} + +// findPlugins is the internal implementation that uses the find and filter functions +func findPlugins(pluginsDirs []string, findFn findFunc, filterFn filterFunc) ([]Plugin, error) { + var found []Plugin + for _, pluginsDir := range pluginsDirs { + ps, err := findFn(pluginsDir) + + if err != nil { + return nil, err + } + + for _, p := range ps { + if filterFn(p) { + found = append(found, p) + } + } + + } + + return found, nil +} + +// makeDescriptorFilter creates a filter function from a descriptor +// Additional plugin filter criteria we wish to support can be added here +func makeDescriptorFilter(descriptor Descriptor) filterFunc { + return func(p Plugin) bool { + // If name is specified, it must match + if descriptor.Name != "" && p.Metadata().Name != descriptor.Name { + return false + + } + // If type is specified, it must match + if descriptor.Type != "" && p.Metadata().Type != descriptor.Type { + return false + } + return true + } +} + +// FindPlugin returns a single plugin that matches the descriptor +func FindPlugin(dirs []string, descriptor Descriptor) (Plugin, error) { + plugins, err := FindPlugins(dirs, descriptor) + if err != nil { + return nil, err + } + + if len(plugins) > 0 { + return plugins[0], nil + } + + return nil, fmt.Errorf("plugin: %+v not found", descriptor) +} + +func detectDuplicates(plugs []Plugin) error { + names := map[string]string{} + + for _, plug := range plugs { + if oldpath, ok := names[plug.Metadata().Name]; ok { + return fmt.Errorf( + "two plugins claim the name %q at %q and %q", + plug.Metadata().Name, + oldpath, + plug.Dir(), + ) + } + names[plug.Metadata().Name] = plug.Dir() + } + + return nil +} diff --git a/internal/plugin/loader_test.go b/internal/plugin/loader_test.go new file mode 100644 index 000000000..b80d6a096 --- /dev/null +++ b/internal/plugin/loader_test.go @@ -0,0 +1,197 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugin + +import ( + "bytes" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPeekAPIVersion(t *testing.T) { + testCases := map[string]struct { + data []byte + expected string + }{ + "legacy": { // No apiVersion field + data: []byte(`--- +name: "test-plugin" +`), + expected: "", + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + version, err := peekAPIVersion(bytes.NewReader(tc.data)) + require.NoError(t, err) + assert.Equal(t, tc.expected, version) + }) + } + + // invalid yaml + { + data := []byte(`bad yaml`) + _, err := peekAPIVersion(bytes.NewReader(data)) + assert.Error(t, err) + } +} + +func TestLoadDir(t *testing.T) { + + makeMetadata := func(apiVersion string) Metadata { + usage := "hello [params]..." + if apiVersion == "legacy" { + usage = "" // Legacy plugins don't have Usage field for command syntax + } + return Metadata{ + APIVersion: apiVersion, + Name: fmt.Sprintf("hello-%s", apiVersion), + Version: "0.1.0", + Type: "cli/v1", + Runtime: "subprocess", + Config: &ConfigCLI{ + Usage: usage, + ShortHelp: "echo hello message", + LongHelp: "description", + IgnoreFlags: true, + }, + RuntimeConfig: &RuntimeConfigSubprocess{ + PlatformCommands: []PlatformCommand{ + {OperatingSystem: "linux", Architecture: "", Command: "sh", Args: []string{"-c", "${HELM_PLUGIN_DIR}/hello.sh"}}, + {OperatingSystem: "windows", Architecture: "", Command: "pwsh", Args: []string{"-c", "${HELM_PLUGIN_DIR}/hello.ps1"}}, + }, + PlatformHooks: map[string][]PlatformCommand{ + Install: { + {OperatingSystem: "linux", Architecture: "", Command: "sh", Args: []string{"-c", "echo \"installing...\""}}, + {OperatingSystem: "windows", Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"installing...\""}}, + }, + }, + }, + } + } + + testCases := map[string]struct { + dirname string + apiVersion string + expect Metadata + }{ + "legacy": { + dirname: "testdata/plugdir/good/hello-legacy", + apiVersion: "legacy", + expect: makeMetadata("legacy"), + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + plug, err := LoadDir(tc.dirname) + require.NoError(t, err, "error loading plugin from %s", tc.dirname) + + assert.Equal(t, tc.dirname, plug.Dir()) + assert.EqualValues(t, tc.expect, plug.Metadata()) + }) + } +} + +func TestLoadDirDuplicateEntries(t *testing.T) { + testCases := map[string]string{ + "legacy": "testdata/plugdir/bad/duplicate-entries-legacy", + } + for name, dirname := range testCases { + t.Run(name, func(t *testing.T) { + _, err := LoadDir(dirname) + assert.Error(t, err) + }) + } +} + +func TestDetectDuplicates(t *testing.T) { + plugs := []Plugin{ + mockSubprocessCLIPlugin(t, "foo"), + mockSubprocessCLIPlugin(t, "bar"), + } + if err := detectDuplicates(plugs); err != nil { + t.Error("no duplicates in the first set") + } + plugs = append(plugs, mockSubprocessCLIPlugin(t, "foo")) + if err := detectDuplicates(plugs); err == nil { + t.Error("duplicates in the second set") + } +} + +func TestLoadAll(t *testing.T) { + // Verify that empty dir loads: + { + plugs, err := LoadAll("testdata") + require.NoError(t, err) + assert.Len(t, plugs, 0) + } + + basedir := "testdata/plugdir/good" + plugs, err := LoadAll(basedir) + require.NoError(t, err) + require.NotEmpty(t, plugs, "expected plugins to be loaded from %s", basedir) + + plugsMap := map[string]Plugin{} + for _, p := range plugs { + plugsMap[p.Metadata().Name] = p + } + + assert.Len(t, plugsMap, 3) + assert.Contains(t, plugsMap, "downloader") + assert.Contains(t, plugsMap, "echo-legacy") + assert.Contains(t, plugsMap, "hello-legacy") +} + +func TestFindPlugins(t *testing.T) { + cases := []struct { + name string + plugdirs string + expected int + }{ + { + name: "plugdirs is empty", + plugdirs: "", + expected: 0, + }, + { + name: "plugdirs isn't dir", + plugdirs: "./plugin_test.go", + expected: 0, + }, + { + name: "plugdirs doesn't have plugin", + plugdirs: ".", + expected: 0, + }, + { + name: "normal", + plugdirs: "./testdata/plugdir/good", + expected: 3, + }, + } + for _, c := range cases { + t.Run(t.Name(), func(t *testing.T) { + plugin, err := LoadAll(c.plugdirs) + require.NoError(t, err) + assert.Len(t, plugin, c.expected, "expected %d plugins, got %d", c.expected, len(plugin)) + }) + } +} diff --git a/internal/plugin/metadata.go b/internal/plugin/metadata.go new file mode 100644 index 000000000..b899ef336 --- /dev/null +++ b/internal/plugin/metadata.go @@ -0,0 +1,155 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugin + +import ( + "errors" + "fmt" +) + +// Metadata of a plugin, converted from the "on-disk" plugin.yaml +// Specifically, Config and RuntimeConfig are converted to their respective types based on the plugin type and runtime +type Metadata struct { + // APIVersion specifies the plugin API version + APIVersion string + + // Name is the name of the plugin + Name string + + // Type of plugin (eg, cli/v1, getter/v1) + Type string + + // Runtime specifies the runtime type (subprocess, wasm) + Runtime string + + // Version is the SemVer 2 version of the plugin. + Version string + + // SourceURL is the URL where this plugin can be found + SourceURL string + + // Config contains the type-specific configuration for this plugin + Config Config + + // RuntimeConfig contains the runtime-specific configuration + RuntimeConfig RuntimeConfig +} + +func (m Metadata) Validate() error { + var errs []error + + if !validPluginName.MatchString(m.Name) { + errs = append(errs, fmt.Errorf("invalid name")) + } + + if m.APIVersion == "" { + errs = append(errs, fmt.Errorf("empty APIVersion")) + } + + if m.Type == "" { + errs = append(errs, fmt.Errorf("empty type field")) + } + + if m.Runtime == "" { + errs = append(errs, fmt.Errorf("empty runtime field")) + } + + if m.Config == nil { + errs = append(errs, fmt.Errorf("missing config field")) + } + + if m.RuntimeConfig == nil { + errs = append(errs, fmt.Errorf("missing runtimeConfig field")) + } + + // Validate the config itself + if m.Config != nil { + if err := m.Config.Validate(); err != nil { + errs = append(errs, fmt.Errorf("config validation failed: %w", err)) + } + } + + // Validate the runtime config itself + if m.RuntimeConfig != nil { + if err := m.RuntimeConfig.Validate(); err != nil { + errs = append(errs, fmt.Errorf("runtime config validation failed: %w", err)) + } + } + + if len(errs) > 0 { + return errors.Join(errs...) + } + + return nil +} + +func fromMetadataLegacy(m MetadataLegacy) *Metadata { + pluginType := "cli/v1" + + if len(m.Downloaders) > 0 { + pluginType = "getter/v1" + } + + return &Metadata{ + APIVersion: "legacy", + Name: m.Name, + Version: m.Version, + Type: pluginType, + Runtime: "subprocess", + Config: buildLegacyConfig(m, pluginType), + RuntimeConfig: buildLegacyRuntimeConfig(m), + } +} + +func buildLegacyConfig(m MetadataLegacy, pluginType string) Config { + switch pluginType { + case "getter/v1": + var protocols []string + for _, d := range m.Downloaders { + protocols = append(protocols, d.Protocols...) + } + return &ConfigGetter{ + Protocols: protocols, + } + case "cli/v1": + return &ConfigCLI{ + Usage: "", // Legacy plugins don't have Usage field for command syntax + ShortHelp: m.Usage, // Map legacy usage to shortHelp + LongHelp: m.Description, // Map legacy description to longHelp + IgnoreFlags: m.IgnoreFlags, + } + default: + return nil + } +} + +func buildLegacyRuntimeConfig(m MetadataLegacy) RuntimeConfig { + var protocolCommands []SubprocessProtocolCommand + if len(m.Downloaders) > 0 { + protocolCommands = + make([]SubprocessProtocolCommand, 0, len(m.Downloaders)) + for _, d := range m.Downloaders { + protocolCommands = append(protocolCommands, SubprocessProtocolCommand(d)) + } + } + return &RuntimeConfigSubprocess{ + PlatformCommands: m.PlatformCommands, + Command: m.Command, + PlatformHooks: m.PlatformHooks, + Hooks: m.Hooks, + ProtocolCommands: protocolCommands, + } +} diff --git a/internal/plugin/metadata_legacy.go b/internal/plugin/metadata_legacy.go new file mode 100644 index 000000000..ce9c2f580 --- /dev/null +++ b/internal/plugin/metadata_legacy.go @@ -0,0 +1,113 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugin + +import ( + "fmt" + "strings" + "unicode" +) + +// Downloaders represents the plugins capability if it can retrieve +// charts from special sources +type Downloaders struct { + // Protocols are the list of schemes from the charts URL. + Protocols []string `yaml:"protocols"` + // Command is the executable path with which the plugin performs + // the actual download for the corresponding Protocols + Command string `yaml:"command"` +} + +// MetadataLegacy is the legacy plugin.yaml format +type MetadataLegacy struct { + // Name is the name of the plugin + Name string `yaml:"name"` + + // Version is a SemVer 2 version of the plugin. + Version string `yaml:"version"` + + // Usage is the single-line usage text shown in help + Usage string `yaml:"usage"` + + // Description is a long description shown in places like `helm help` + Description string `yaml:"description"` + + // PlatformCommands is the plugin command, with a platform selector and support for args. + PlatformCommands []PlatformCommand `yaml:"platformCommand"` + + // Command is the plugin command, as a single string. + // DEPRECATED: Use PlatformCommand instead. Removed in subprocess/v1 plugins. + Command string `yaml:"command"` + + // IgnoreFlags ignores any flags passed in from Helm + IgnoreFlags bool `yaml:"ignoreFlags"` + + // PlatformHooks are commands that will run on plugin events, with a platform selector and support for args. + PlatformHooks PlatformHooks `yaml:"platformHooks"` + + // Hooks are commands that will run on plugin events, as a single string. + // DEPRECATED: Use PlatformHooks instead. Removed in subprocess/v1 plugins. + Hooks Hooks `yaml:"hooks"` + + // Downloaders field is used if the plugin supply downloader mechanism + // for special protocols. + Downloaders []Downloaders `yaml:"downloaders"` +} + +func (m *MetadataLegacy) Validate() error { + if !validPluginName.MatchString(m.Name) { + return fmt.Errorf("invalid plugin name") + } + m.Usage = sanitizeString(m.Usage) + + if len(m.PlatformCommands) > 0 && len(m.Command) > 0 { + return fmt.Errorf("both platformCommand and command are set") + } + + if len(m.PlatformHooks) > 0 && len(m.Hooks) > 0 { + return fmt.Errorf("both platformHooks and hooks are set") + } + + // Validate downloader plugins + for i, downloader := range m.Downloaders { + if downloader.Command == "" { + return fmt.Errorf("downloader %d has empty command", i) + } + if len(downloader.Protocols) == 0 { + return fmt.Errorf("downloader %d has no protocols", i) + } + for j, protocol := range downloader.Protocols { + if protocol == "" { + return fmt.Errorf("downloader %d has empty protocol at index %d", i, j) + } + } + } + + return nil +} + +// sanitizeString normalize spaces and removes non-printable characters. +func sanitizeString(str string) string { + return strings.Map(func(r rune) rune { + if unicode.IsSpace(r) { + return ' ' + } + if unicode.IsPrint(r) { + return r + } + return -1 + }, str) +} diff --git a/internal/plugin/metadata_test.go b/internal/plugin/metadata_test.go new file mode 100644 index 000000000..810020a67 --- /dev/null +++ b/internal/plugin/metadata_test.go @@ -0,0 +1,141 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugin + +import ( + "strings" + "testing" +) + +func TestValidatePluginData(t *testing.T) { + + // A mock plugin with no commands + mockNoCommand := mockSubprocessCLIPlugin(t, "foo") + mockNoCommand.metadata.RuntimeConfig = &RuntimeConfigSubprocess{ + PlatformCommands: []PlatformCommand{}, + PlatformHooks: map[string][]PlatformCommand{}, + } + + // A mock plugin with legacy commands + mockLegacyCommand := mockSubprocessCLIPlugin(t, "foo") + mockLegacyCommand.metadata.RuntimeConfig = &RuntimeConfigSubprocess{ + PlatformCommands: []PlatformCommand{}, + Command: "echo \"mock plugin\"", + PlatformHooks: map[string][]PlatformCommand{}, + Hooks: map[string]string{ + Install: "echo installing...", + }, + } + + // A mock plugin with a command also set + mockWithCommand := mockSubprocessCLIPlugin(t, "foo") + mockWithCommand.metadata.RuntimeConfig = &RuntimeConfigSubprocess{ + PlatformCommands: []PlatformCommand{ + {OperatingSystem: "linux", Architecture: "", Command: "sh", Args: []string{"-c", "echo \"mock plugin\""}}, + }, + Command: "echo \"mock plugin\"", + } + + // A mock plugin with a hooks also set + mockWithHooks := mockSubprocessCLIPlugin(t, "foo") + mockWithHooks.metadata.RuntimeConfig = &RuntimeConfigSubprocess{ + PlatformCommands: []PlatformCommand{ + {OperatingSystem: "linux", Architecture: "", Command: "sh", Args: []string{"-c", "echo \"mock plugin\""}}, + }, + PlatformHooks: map[string][]PlatformCommand{ + Install: { + {OperatingSystem: "linux", Architecture: "", Command: "sh", Args: []string{"-c", "echo \"installing...\""}}, + }, + }, + Hooks: map[string]string{ + Install: "echo installing...", + }, + } + + for i, item := range []struct { + pass bool + plug Plugin + errString string + }{ + {true, mockSubprocessCLIPlugin(t, "abcdefghijklmnopqrstuvwxyz0123456789_-ABC"), ""}, + {true, mockSubprocessCLIPlugin(t, "foo-bar-FOO-BAR_1234"), ""}, + {false, mockSubprocessCLIPlugin(t, "foo -bar"), "invalid name"}, + {false, mockSubprocessCLIPlugin(t, "$foo -bar"), "invalid name"}, // Test leading chars + {false, mockSubprocessCLIPlugin(t, "foo -bar "), "invalid name"}, // Test trailing chars + {false, mockSubprocessCLIPlugin(t, "foo\nbar"), "invalid name"}, // Test newline + {true, mockNoCommand, ""}, // Test no command metadata works + {true, mockLegacyCommand, ""}, // Test legacy command metadata works + {false, mockWithCommand, "runtime config validation failed: both platformCommand and command are set"}, // Test platformCommand and command both set fails + {false, mockWithHooks, "runtime config validation failed: both platformHooks and hooks are set"}, // Test platformHooks and hooks both set fails + } { + err := item.plug.Metadata().Validate() + if item.pass && err != nil { + t.Errorf("failed to validate case %d: %s", i, err) + } else if !item.pass && err == nil { + t.Errorf("expected case %d to fail", i) + } + if !item.pass && err.Error() != item.errString { + t.Errorf("index [%d]: expected the following error: %s, but got: %s", i, item.errString, err.Error()) + } + } +} + +func TestMetadataValidateMultipleErrors(t *testing.T) { + // Create metadata with multiple validation issues + metadata := Metadata{ + Name: "invalid name with spaces", // Invalid name + APIVersion: "", // Empty API version + Type: "", // Empty type + Runtime: "", // Empty runtime + Config: nil, // Missing config + RuntimeConfig: nil, // Missing runtime config + } + + err := metadata.Validate() + if err == nil { + t.Fatal("expected validation to fail with multiple errors") + } + + errStr := err.Error() + + // Check that all expected errors are present in the joined error + expectedErrors := []string{ + "invalid name", + "empty APIVersion", + "empty type field", + "empty runtime field", + "missing config field", + "missing runtimeConfig field", + } + + for _, expectedErr := range expectedErrors { + if !strings.Contains(errStr, expectedErr) { + t.Errorf("expected error to contain %q, but got: %v", expectedErr, errStr) + } + } + + // Verify that the error contains the correct number of error messages + errorCount := 0 + for _, expectedErr := range expectedErrors { + if strings.Contains(errStr, expectedErr) { + errorCount++ + } + } + + if errorCount < len(expectedErrors) { + t.Errorf("expected %d errors, but only found %d in: %v", len(expectedErrors), errorCount, errStr) + } +} diff --git a/internal/plugin/plugin.go b/internal/plugin/plugin.go index 11ab71352..132b1739e 100644 --- a/internal/plugin/plugin.go +++ b/internal/plugin/plugin.go @@ -13,359 +13,69 @@ See the License for the specific language governing permissions and limitations under the License. */ -package plugin // import "helm.sh/helm/v4/pkg/plugin" +package plugin // import "helm.sh/helm/v4/internal/plugin" import ( - "fmt" - "log/slog" - "os" - "path/filepath" + "context" + "io" "regexp" - "runtime" - "strings" - "unicode" - - "sigs.k8s.io/yaml" - - "helm.sh/helm/v4/pkg/cli" ) const PluginFileName = "plugin.yaml" -// Downloaders represents the plugins capability if it can retrieve -// charts from special sources -type Downloaders struct { - // Protocols are the list of schemes from the charts URL. - Protocols []string `json:"protocols"` - // Command is the executable path with which the plugin performs - // the actual download for the corresponding Protocols - Command string `json:"command"` -} - -// PlatformCommand represents a command for a particular operating system and architecture -type PlatformCommand struct { - OperatingSystem string `json:"os"` - Architecture string `json:"arch"` - Command string `json:"command"` - Args []string `json:"args"` -} - -// Metadata describes a plugin. -// -// This is the plugin equivalent of a chart.Metadata. -type Metadata struct { - // Name is the name of the plugin - Name string `json:"name"` - - // Version is a SemVer 2 version of the plugin. - Version string `json:"version"` +// Plugin defines a plugin instance. The client (Helm codebase) facing type that can be used to introspect and invoke a plugin +type Plugin interface { + // Dir return the plugin directory (as an absolute path) on the filesystem + Dir() string - // Usage is the single-line usage text shown in help - Usage string `json:"usage"` + // Metadata describes the plugin's type, version, etc. + // (This metadata type is the converted and plugin version independented in-memory representation of the plugin.yaml file) + Metadata() Metadata - // Description is a long description shown in places like `helm help` - Description string `json:"description"` - - // PlatformCommand is the plugin command, with a platform selector and support for args. - // - // The command and args will be passed through environment expansion, so env vars can - // be present in this command. Unless IgnoreFlags is set, this will - // also merge the flags passed from Helm. - // - // Note that the command is not executed in a shell. To do so, we suggest - // pointing the command to a shell script. - // - // The following rules will apply to processing platform commands: - // - If PlatformCommand is present, it will be used - // - If both OS and Arch match the current platform, search will stop and the command will be executed - // - If OS matches and Arch is empty, the command will be executed - // - If no OS/Arch match is found, the default command will be executed - // - If no matches are found in platformCommand, Helm will exit with an error - PlatformCommand []PlatformCommand `json:"platformCommand"` - - // Command is the plugin command, as a single string. - // Providing Command and PlatformCommand will result in a warning being emitted (PlatformCommand takes precedence). - // - // The command will be passed through environment expansion, so env vars can - // be present in this command. Unless IgnoreFlags is set, this will - // also merge the flags passed from Helm. + // Invoke takes the given input, and dispatches the contents to plugin instance + // The input is expected to be a JSON-serializable object, which the plugin will interpret according to its type + // The plugin is expected to return a JSON-serializable object, which the invoker + // will interpret according to the plugin's type // - // Note that command is not executed in a shell. To do so, we suggest - // pointing the command to a shell script. - // - // DEPRECATED: Use PlatformCommand instead - Command string `json:"command"` - - // IgnoreFlags ignores any flags passed in from Helm + // Invoke can be thought of as a request/response mechanism. Similar to e.g. http.RoundTripper // - // For example, if the plugin is invoked as `helm --debug myplugin`, if this - // is false, `--debug` will be appended to `--command`. If this is true, - // the `--debug` flag will be discarded. - IgnoreFlags bool `json:"ignoreFlags"` - - // PlatformHooks are commands that will run on plugin events, with a platform selector and support for args. - // - // The command and args will be passed through environment expansion, so env vars can - // be present in the command. - // - // Note that the command is not executed in a shell. To do so, we suggest - // pointing the command to a shell script. - // - // The following rules will apply to processing platform hooks: - // - If PlatformHooks is present, it will be used - // - If both OS and Arch match the current platform, search will stop and the command will be executed - // - If OS matches and Arch is empty, the command will be executed - // - If no OS/Arch match is found, the default command will be executed - // - If no matches are found in platformHooks, Helm will skip the event - PlatformHooks PlatformHooks `json:"platformHooks"` - - // Hooks are commands that will run on plugin events, as a single string. - // Providing Hook and PlatformHooks will result in a warning being emitted (PlatformHooks takes precedence). - // - // The command will be passed through environment expansion, so env vars can - // be present in this command. - // - // Note that the command is executed in the sh shell. - // - // DEPRECATED: Use PlatformHooks instead - Hooks Hooks - - // Downloaders field is used if the plugin supply downloader mechanism - // for special protocols. - Downloaders []Downloaders `json:"downloaders"` -} - -// Plugin represents a plugin. -type Plugin struct { - // Metadata is a parsed representation of a plugin.yaml - Metadata *Metadata - // Dir is the string path to the directory that holds the plugin. - Dir string + // If plugin's execution fails with a non-zero "return code" (this is plugin runtime implementation specific) + // an InvokeExecError is returned + Invoke(ctx context.Context, input *Input) (*Output, error) } -// Returns command and args strings based on the following rules in priority order: -// - From the PlatformCommand where OS and Arch match the current platform -// - From the PlatformCommand where OS matches the current platform and Arch is empty/unspecified -// - From the PlatformCommand where OS is empty/unspecified and Arch matches the current platform -// - From the PlatformCommand where OS and Arch are both empty/unspecified -// - Return nil, nil -func getPlatformCommand(cmds []PlatformCommand) ([]string, []string) { - var command, args []string - found := false - foundOs := false - - eq := strings.EqualFold - for _, c := range cmds { - if eq(c.OperatingSystem, runtime.GOOS) && eq(c.Architecture, runtime.GOARCH) { - // Return early for an exact match - return strings.Split(c.Command, " "), c.Args - } - - if (len(c.OperatingSystem) > 0 && !eq(c.OperatingSystem, runtime.GOOS)) || len(c.Architecture) > 0 { - // Skip if OS is not empty and doesn't match or if arch is set as a set arch requires an OS match - continue - } - - if !foundOs && len(c.OperatingSystem) > 0 && eq(c.OperatingSystem, runtime.GOOS) { - // First OS match with empty arch, can only be overridden by a direct match - command = strings.Split(c.Command, " ") - args = c.Args - found = true - foundOs = true - } else if !found { - // First empty match, can be overridden by a direct match or an OS match - command = strings.Split(c.Command, " ") - args = c.Args - found = true - } - } - - return command, args +// PluginHook allows plugins to implement hooks that are invoked on plugin management events (install, upgrade, etc) +type PluginHook interface { //nolint:revive + InvokeHook(event string) error } -// PrepareCommands takes a []Plugin.PlatformCommand -// and prepares the command and arguments for execution. -// -// It merges extraArgs into any arguments supplied in the plugin. It -// returns the main command and an args array. -// -// The result is suitable to pass to exec.Command. -func PrepareCommands(cmds []PlatformCommand, expandArgs bool, extraArgs []string) (string, []string, error) { - cmdParts, args := getPlatformCommand(cmds) - if len(cmdParts) == 0 || cmdParts[0] == "" { - return "", nil, fmt.Errorf("no plugin command is applicable") - } +// Input defines the input message and parameters to be passed to the plugin +type Input struct { + // Message represents the type-elided value to be passed to the plugin. + // The plugin is expected to interpret the message according to its type + // The message object must be JSON-serializable + Message any - main := os.ExpandEnv(cmdParts[0]) - baseArgs := []string{} - if len(cmdParts) > 1 { - for _, cmdPart := range cmdParts[1:] { - if expandArgs { - baseArgs = append(baseArgs, os.ExpandEnv(cmdPart)) - } else { - baseArgs = append(baseArgs, cmdPart) - } - } - } + // Optional: Reader to be consumed plugin's "stdin" + Stdin io.Reader - for _, arg := range args { - if expandArgs { - baseArgs = append(baseArgs, os.ExpandEnv(arg)) - } else { - baseArgs = append(baseArgs, arg) - } - } + // Optional: Writers to consume the plugin's "stdout" and "stderr" + Stdout, Stderr io.Writer - if len(extraArgs) > 0 { - baseArgs = append(baseArgs, extraArgs...) - } - - return main, baseArgs, nil + // Optional: Env represents the environment as a list of "key=value" strings + // see os.Environ + Env []string } -// PrepareCommand gets the correct command and arguments for a plugin. -// -// It merges extraArgs into any arguments supplied in the plugin. It returns the name of the command and an args array. -// -// The result is suitable to pass to exec.Command. -func (p *Plugin) PrepareCommand(extraArgs []string) (string, []string, error) { - var extraArgsIn []string - - if !p.Metadata.IgnoreFlags { - extraArgsIn = extraArgs - } - - cmds := p.Metadata.PlatformCommand - if len(cmds) == 0 && len(p.Metadata.Command) > 0 { - cmds = []PlatformCommand{{Command: p.Metadata.Command}} - } - - return PrepareCommands(cmds, true, extraArgsIn) +// Output defines the output message and parameters the passed from the plugin +type Output struct { + // Message represents the type-elided value returned from the plugin + // The invoker is expected to interpret the message according to the plugin's type + // The message object must be JSON-serializable + Message any } // validPluginName is a regular expression that validates plugin names. // // Plugin names can only contain the ASCII characters a-z, A-Z, 0-9, ​_​ and ​-. var validPluginName = regexp.MustCompile("^[A-Za-z0-9_-]+$") - -// validatePluginData validates a plugin's YAML data. -func validatePluginData(plug *Plugin, filepath string) error { - // When metadata section missing, initialize with no data - if plug.Metadata == nil { - plug.Metadata = &Metadata{} - } - if !validPluginName.MatchString(plug.Metadata.Name) { - return fmt.Errorf("invalid plugin name at %q", filepath) - } - plug.Metadata.Usage = sanitizeString(plug.Metadata.Usage) - - if len(plug.Metadata.PlatformCommand) > 0 && len(plug.Metadata.Command) > 0 { - slog.Warn("both 'platformCommand' and 'command' are set (this will become an error in a future Helm version)", slog.String("filepath", filepath)) - } - - if len(plug.Metadata.PlatformHooks) > 0 && len(plug.Metadata.Hooks) > 0 { - slog.Warn("both 'platformHooks' and 'hooks' are set (this will become an error in a future Helm version)", slog.String("filepath", filepath)) - } - - // We could also validate SemVer, executable, and other fields should we so choose. - return nil -} - -// sanitizeString normalize spaces and removes non-printable characters. -func sanitizeString(str string) string { - return strings.Map(func(r rune) rune { - if unicode.IsSpace(r) { - return ' ' - } - if unicode.IsPrint(r) { - return r - } - return -1 - }, str) -} - -func detectDuplicates(plugs []*Plugin) error { - names := map[string]string{} - - for _, plug := range plugs { - if oldpath, ok := names[plug.Metadata.Name]; ok { - return fmt.Errorf( - "two plugins claim the name %q at %q and %q", - plug.Metadata.Name, - oldpath, - plug.Dir, - ) - } - names[plug.Metadata.Name] = plug.Dir - } - - return nil -} - -// LoadDir loads a plugin from the given directory. -func LoadDir(dirname string) (*Plugin, error) { - pluginfile := filepath.Join(dirname, PluginFileName) - data, err := os.ReadFile(pluginfile) - if err != nil { - return nil, fmt.Errorf("failed to read plugin at %q: %w", pluginfile, err) - } - - plug := &Plugin{Dir: dirname} - if err := yaml.UnmarshalStrict(data, &plug.Metadata); err != nil { - return nil, fmt.Errorf("failed to load plugin at %q: %w", pluginfile, err) - } - return plug, validatePluginData(plug, pluginfile) -} - -// LoadAll loads all plugins found beneath the base directory. -// -// This scans only one directory level. -func LoadAll(basedir string) ([]*Plugin, error) { - plugins := []*Plugin{} - // We want basedir/*/plugin.yaml - scanpath := filepath.Join(basedir, "*", PluginFileName) - matches, err := filepath.Glob(scanpath) - if err != nil { - return plugins, fmt.Errorf("failed to find plugins in %q: %w", scanpath, err) - } - - if matches == nil { - return plugins, nil - } - - for _, yaml := range matches { - dir := filepath.Dir(yaml) - p, err := LoadDir(dir) - if err != nil { - return plugins, err - } - plugins = append(plugins, p) - } - return plugins, detectDuplicates(plugins) -} - -// FindPlugins returns a list of YAML files that describe plugins. -func FindPlugins(plugdirs string) ([]*Plugin, error) { - found := []*Plugin{} - // Let's get all UNIXy and allow path separators - for _, p := range filepath.SplitList(plugdirs) { - matches, err := LoadAll(p) - if err != nil { - return matches, err - } - found = append(found, matches...) - } - return found, nil -} - -// SetupPluginEnv prepares os.Env for plugins. It operates on os.Env because -// the plugin subsystem itself needs access to the environment variables -// created here. -func SetupPluginEnv(settings *cli.EnvSettings, name, base string) { - env := settings.EnvVars() - env["HELM_PLUGIN_NAME"] = name - env["HELM_PLUGIN_DIR"] = base - for key, val := range env { - os.Setenv(key, val) - } -} diff --git a/internal/plugin/plugin_test.go b/internal/plugin/plugin_test.go index 20bd2f737..3c78006b7 100644 --- a/internal/plugin/plugin_test.go +++ b/internal/plugin/plugin_test.go @@ -13,290 +13,20 @@ See the License for the specific language governing permissions and limitations under the License. */ -package plugin // import "helm.sh/helm/v4/pkg/plugin" +package plugin import ( - "fmt" - "os" - "path/filepath" - "reflect" - "runtime" "testing" - - "helm.sh/helm/v4/pkg/cli" ) -func TestPrepareCommand(t *testing.T) { - cmdMain := "sh" - cmdArgs := []string{"-c", "echo \"test\""} - - p := &Plugin{ - Dir: "/tmp", // Unused - Metadata: &Metadata{ - Name: "test", - Command: "echo \"error\"", - PlatformCommand: []PlatformCommand{ - {OperatingSystem: "no-os", Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, - {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, - {OperatingSystem: runtime.GOOS, Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, - {OperatingSystem: runtime.GOOS, Architecture: runtime.GOARCH, Command: cmdMain, Args: cmdArgs}, - }, - }, - } - - cmd, args, err := p.PrepareCommand([]string{}) - if err != nil { - t.Fatal(err) - } - if cmd != cmdMain { - t.Fatalf("Expected %q, got %q", cmdMain, cmd) - } - if !reflect.DeepEqual(args, cmdArgs) { - t.Fatalf("Expected %v, got %v", cmdArgs, args) - } -} - -func TestPrepareCommandExtraArgs(t *testing.T) { - cmdMain := "sh" - cmdArgs := []string{"-c", "echo \"test\""} - extraArgs := []string{"--debug", "--foo", "bar"} - - p := &Plugin{ - Dir: "/tmp", // Unused - Metadata: &Metadata{ - Name: "test", - Command: "echo \"error\"", - PlatformCommand: []PlatformCommand{ - {OperatingSystem: "no-os", Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, - {OperatingSystem: runtime.GOOS, Architecture: runtime.GOARCH, Command: cmdMain, Args: cmdArgs}, - {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, - {OperatingSystem: runtime.GOOS, Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, - }, - }, - } - - expectedArgs := append(cmdArgs, extraArgs...) - - cmd, args, err := p.PrepareCommand(extraArgs) - if err != nil { - t.Fatal(err) - } - if cmd != cmdMain { - t.Fatalf("Expected %q, got %q", cmdMain, cmd) - } - if !reflect.DeepEqual(args, expectedArgs) { - t.Fatalf("Expected %v, got %v", expectedArgs, args) - } -} - -func TestPrepareCommandExtraArgsIgnored(t *testing.T) { - cmdMain := "sh" - cmdArgs := []string{"-c", "echo \"test\""} - extraArgs := []string{"--debug", "--foo", "bar"} - - p := &Plugin{ - Dir: "/tmp", // Unused - Metadata: &Metadata{ - Name: "test", - Command: "echo \"error\"", - PlatformCommand: []PlatformCommand{ - {OperatingSystem: "no-os", Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, - {OperatingSystem: runtime.GOOS, Architecture: runtime.GOARCH, Command: cmdMain, Args: cmdArgs}, - {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, - {OperatingSystem: runtime.GOOS, Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, - }, - IgnoreFlags: true, - }, - } - - cmd, args, err := p.PrepareCommand(extraArgs) - if err != nil { - t.Fatal(err) - } - if cmd != cmdMain { - t.Fatalf("Expected %q, got %q", cmdMain, cmd) - } - if !reflect.DeepEqual(args, cmdArgs) { - t.Fatalf("Expected %v, got %v", cmdArgs, args) - } -} - -func TestPrepareCommands(t *testing.T) { - cmdMain := "sh" - cmdArgs := []string{"-c", "echo \"test\""} - - cmds := []PlatformCommand{ - {OperatingSystem: "no-os", Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, - {OperatingSystem: runtime.GOOS, Architecture: runtime.GOARCH, Command: cmdMain, Args: cmdArgs}, - {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, - {OperatingSystem: runtime.GOOS, Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, - } - - cmd, args, err := PrepareCommands(cmds, true, []string{}) - if err != nil { - t.Fatal(err) - } - if cmd != cmdMain { - t.Fatalf("Expected %q, got %q", cmdMain, cmd) - } - if !reflect.DeepEqual(args, cmdArgs) { - t.Fatalf("Expected %v, got %v", cmdArgs, args) - } -} - -func TestPrepareCommandsExtraArgs(t *testing.T) { - cmdMain := "sh" - cmdArgs := []string{"-c", "echo \"test\""} - extraArgs := []string{"--debug", "--foo", "bar"} - - cmds := []PlatformCommand{ - {OperatingSystem: "no-os", Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, - {OperatingSystem: runtime.GOOS, Architecture: runtime.GOARCH, Command: "sh", Args: []string{"-c", "echo \"test\""}}, - {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, - {OperatingSystem: runtime.GOOS, Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, - } - - expectedArgs := append(cmdArgs, extraArgs...) - - cmd, args, err := PrepareCommands(cmds, true, extraArgs) - if err != nil { - t.Fatal(err) - } - if cmd != cmdMain { - t.Fatalf("Expected %q, got %q", cmdMain, cmd) - } - if !reflect.DeepEqual(args, expectedArgs) { - t.Fatalf("Expected %v, got %v", expectedArgs, args) - } -} - -func TestPrepareCommandsNoArch(t *testing.T) { - cmdMain := "sh" - cmdArgs := []string{"-c", "echo \"test\""} - - cmds := []PlatformCommand{ - {OperatingSystem: "no-os", Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, - {OperatingSystem: runtime.GOOS, Architecture: "", Command: "sh", Args: []string{"-c", "echo \"test\""}}, - {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, - } - - cmd, args, err := PrepareCommands(cmds, true, []string{}) - if err != nil { - t.Fatal(err) - } - if cmd != cmdMain { - t.Fatalf("Expected %q, got %q", cmdMain, cmd) - } - if !reflect.DeepEqual(args, cmdArgs) { - t.Fatalf("Expected %v, got %v", cmdArgs, args) - } -} - -func TestPrepareCommandsNoOsNoArch(t *testing.T) { - cmdMain := "sh" - cmdArgs := []string{"-c", "echo \"test\""} - - cmds := []PlatformCommand{ - {OperatingSystem: "no-os", Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, - {OperatingSystem: "", Architecture: "", Command: "sh", Args: []string{"-c", "echo \"test\""}}, - {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, - } - - cmd, args, err := PrepareCommands(cmds, true, []string{}) - if err != nil { - t.Fatal(err) - } - if cmd != cmdMain { - t.Fatalf("Expected %q, got %q", cmdMain, cmd) - } - if !reflect.DeepEqual(args, cmdArgs) { - t.Fatalf("Expected %v, got %v", cmdArgs, args) - } -} - -func TestPrepareCommandsNoMatch(t *testing.T) { - cmds := []PlatformCommand{ - {OperatingSystem: "no-os", Architecture: "no-arch", Command: "sh", Args: []string{"-c", "echo \"test\""}}, - {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "sh", Args: []string{"-c", "echo \"test\""}}, - {OperatingSystem: "no-os", Architecture: runtime.GOARCH, Command: "sh", Args: []string{"-c", "echo \"test\""}}, - } - - if _, _, err := PrepareCommands(cmds, true, []string{}); err == nil { - t.Fatalf("Expected error to be returned") - } -} - -func TestPrepareCommandsNoCommands(t *testing.T) { - cmds := []PlatformCommand{} - - if _, _, err := PrepareCommands(cmds, true, []string{}); err == nil { - t.Fatalf("Expected error to be returned") - } -} - -func TestPrepareCommandsExpand(t *testing.T) { - t.Setenv("TEST", "test") - cmdMain := "sh" - cmdArgs := []string{"-c", "echo \"${TEST}\""} - cmds := []PlatformCommand{ - {OperatingSystem: "", Architecture: "", Command: cmdMain, Args: cmdArgs}, - } - - expectedArgs := []string{"-c", "echo \"test\""} - - cmd, args, err := PrepareCommands(cmds, true, []string{}) - if err != nil { - t.Fatal(err) - } - if cmd != cmdMain { - t.Fatalf("Expected %q, got %q", cmdMain, cmd) - } - if !reflect.DeepEqual(args, expectedArgs) { - t.Fatalf("Expected %v, got %v", expectedArgs, args) - } -} - -func TestPrepareCommandsNoExpand(t *testing.T) { - t.Setenv("TEST", "test") - cmdMain := "sh" - cmdArgs := []string{"-c", "echo \"${TEST}\""} - cmds := []PlatformCommand{ - {OperatingSystem: "", Architecture: "", Command: cmdMain, Args: cmdArgs}, - } - - cmd, args, err := PrepareCommands(cmds, false, []string{}) - if err != nil { - t.Fatal(err) - } - if cmd != cmdMain { - t.Fatalf("Expected %q, got %q", cmdMain, cmd) - } - if !reflect.DeepEqual(args, cmdArgs) { - t.Fatalf("Expected %v, got %v", cmdArgs, args) - } -} - -func TestLoadDir(t *testing.T) { - dirname := "testdata/plugdir/good/hello" - plug, err := LoadDir(dirname) - if err != nil { - t.Fatalf("error loading Hello plugin: %s", err) - } - - if plug.Dir != dirname { - t.Fatalf("Expected dir %q, got %q", dirname, plug.Dir) - } +func mockSubprocessCLIPlugin(t *testing.T, pluginName string) *SubprocessPluginRuntime { + t.Helper() - expect := &Metadata{ - Name: "hello", - Version: "0.1.0", - Usage: "usage", - Description: "description", - PlatformCommand: []PlatformCommand{ - {OperatingSystem: "linux", Architecture: "", Command: "sh", Args: []string{"-c", "${HELM_PLUGIN_DIR}/hello.sh"}}, - {OperatingSystem: "windows", Architecture: "", Command: "pwsh", Args: []string{"-c", "${HELM_PLUGIN_DIR}/hello.ps1"}}, + rc := RuntimeConfigSubprocess{ + PlatformCommands: []PlatformCommand{ + {OperatingSystem: "linux", Architecture: "", Command: "sh", Args: []string{"-c", "echo \"mock plugin\""}}, + {OperatingSystem: "windows", Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"mock plugin\""}}, }, - IgnoreFlags: true, PlatformHooks: map[string][]PlatformCommand{ Install: { {OperatingSystem: "linux", Architecture: "", Command: "sh", Args: []string{"-c", "echo \"installing...\""}}, @@ -305,241 +35,24 @@ func TestLoadDir(t *testing.T) { }, } - if !reflect.DeepEqual(expect, plug.Metadata) { - t.Fatalf("Expected plugin metadata %v, got %v", expect, plug.Metadata) - } -} - -func TestLoadDirDuplicateEntries(t *testing.T) { - dirname := "testdata/plugdir/bad/duplicate-entries" - if _, err := LoadDir(dirname); err == nil { - t.Errorf("successfully loaded plugin with duplicate entries when it should've failed") - } -} + pluginDir := t.TempDir() -func TestDownloader(t *testing.T) { - dirname := "testdata/plugdir/good/downloader" - plug, err := LoadDir(dirname) - if err != nil { - t.Fatalf("error loading Hello plugin: %s", err) - } - - if plug.Dir != dirname { - t.Fatalf("Expected dir %q, got %q", dirname, plug.Dir) - } - - expect := &Metadata{ - Name: "downloader", - Version: "1.2.3", - Usage: "usage", - Description: "download something", - Command: "echo Hello", - Downloaders: []Downloaders{ - { - Protocols: []string{"myprotocol", "myprotocols"}, - Command: "echo Download", - }, - }, - } - - if !reflect.DeepEqual(expect, plug.Metadata) { - t.Fatalf("Expected metadata %v, got %v", expect, plug.Metadata) - } -} - -func TestLoadAll(t *testing.T) { - // Verify that empty dir loads: - if plugs, err := LoadAll("testdata"); err != nil { - t.Fatalf("error loading dir with no plugins: %s", err) - } else if len(plugs) > 0 { - t.Fatalf("expected empty dir to have 0 plugins") - } - - basedir := "testdata/plugdir/good" - plugs, err := LoadAll(basedir) - if err != nil { - t.Fatalf("Could not load %q: %s", basedir, err) - } - - if l := len(plugs); l != 3 { - t.Fatalf("expected 3 plugins, found %d", l) - } - - if plugs[0].Metadata.Name != "downloader" { - t.Errorf("Expected first plugin to be echo, got %q", plugs[0].Metadata.Name) - } - if plugs[1].Metadata.Name != "echo" { - t.Errorf("Expected first plugin to be echo, got %q", plugs[0].Metadata.Name) - } - if plugs[2].Metadata.Name != "hello" { - t.Errorf("Expected second plugin to be hello, got %q", plugs[1].Metadata.Name) - } -} - -func TestFindPlugins(t *testing.T) { - cases := []struct { - name string - plugdirs string - expected int - }{ - { - name: "plugdirs is empty", - plugdirs: "", - expected: 0, - }, - { - name: "plugdirs isn't dir", - plugdirs: "./plugin_test.go", - expected: 0, - }, - { - name: "plugdirs doesn't have plugin", - plugdirs: ".", - expected: 0, - }, - { - name: "normal", - plugdirs: "./testdata/plugdir/good", - expected: 3, - }, - } - for _, c := range cases { - t.Run(t.Name(), func(t *testing.T) { - plugin, _ := FindPlugins(c.plugdirs) - if len(plugin) != c.expected { - t.Errorf("expected: %v, got: %v", c.expected, len(plugin)) - } - }) - } -} - -func TestSetupEnv(t *testing.T) { - name := "pequod" - base := filepath.Join("testdata/helmhome/helm/plugins", name) - - s := cli.New() - s.PluginsDirectory = "testdata/helmhome/helm/plugins" - - SetupPluginEnv(s, name, base) - for _, tt := range []struct { - name, expect string - }{ - {"HELM_PLUGIN_NAME", name}, - {"HELM_PLUGIN_DIR", base}, - } { - if got := os.Getenv(tt.name); got != tt.expect { - t.Errorf("Expected $%s=%q, got %q", tt.name, tt.expect, got) - } - } -} - -func TestSetupEnvWithSpace(t *testing.T) { - name := "sureshdsk" - base := filepath.Join("testdata/helm home/helm/plugins", name) - - s := cli.New() - s.PluginsDirectory = "testdata/helm home/helm/plugins" - - SetupPluginEnv(s, name, base) - for _, tt := range []struct { - name, expect string - }{ - {"HELM_PLUGIN_NAME", name}, - {"HELM_PLUGIN_DIR", base}, - } { - if got := os.Getenv(tt.name); got != tt.expect { - t.Errorf("Expected $%s=%q, got %q", tt.name, tt.expect, got) - } - } -} - -func TestValidatePluginData(t *testing.T) { - // A mock plugin missing any metadata. - mockMissingMeta := &Plugin{ - Dir: "no-such-dir", - } - - // A mock plugin with no commands - mockNoCommand := mockPlugin("foo") - mockNoCommand.Metadata.PlatformCommand = []PlatformCommand{} - mockNoCommand.Metadata.PlatformHooks = map[string][]PlatformCommand{} - - // A mock plugin with legacy commands - mockLegacyCommand := mockPlugin("foo") - mockLegacyCommand.Metadata.PlatformCommand = []PlatformCommand{} - mockLegacyCommand.Metadata.Command = "echo \"mock plugin\"" - mockLegacyCommand.Metadata.PlatformHooks = map[string][]PlatformCommand{} - mockLegacyCommand.Metadata.Hooks = map[string]string{ - Install: "echo installing...", - } - - // A mock plugin with a command also set - mockWithCommand := mockPlugin("foo") - mockWithCommand.Metadata.Command = "echo \"mock plugin\"" - - // A mock plugin with a hooks also set - mockWithHooks := mockPlugin("foo") - mockWithHooks.Metadata.Hooks = map[string]string{ - Install: "echo installing...", - } - - for i, item := range []struct { - pass bool - plug *Plugin - }{ - {true, mockPlugin("abcdefghijklmnopqrstuvwxyz0123456789_-ABC")}, - {true, mockPlugin("foo-bar-FOO-BAR_1234")}, - {false, mockPlugin("foo -bar")}, - {false, mockPlugin("$foo -bar")}, // Test leading chars - {false, mockPlugin("foo -bar ")}, // Test trailing chars - {false, mockPlugin("foo\nbar")}, // Test newline - {false, mockMissingMeta}, // Test if the metadata section missing - {true, mockNoCommand}, // Test no command metadata works - {true, mockLegacyCommand}, // Test legacy command metadata works - {true, mockWithCommand}, // Test platformCommand and command both set works - {true, mockWithHooks}, // Test platformHooks and hooks both set works - } { - err := validatePluginData(item.plug, fmt.Sprintf("test-%d", i)) - if item.pass && err != nil { - t.Errorf("failed to validate case %d: %s", i, err) - } else if !item.pass && err == nil { - t.Errorf("expected case %d to fail", i) - } - } -} - -func TestDetectDuplicates(t *testing.T) { - plugs := []*Plugin{ - mockPlugin("foo"), - mockPlugin("bar"), - } - if err := detectDuplicates(plugs); err != nil { - t.Error("no duplicates in the first set") - } - plugs = append(plugs, mockPlugin("foo")) - if err := detectDuplicates(plugs); err == nil { - t.Error("duplicates in the second set") - } -} - -func mockPlugin(name string) *Plugin { - return &Plugin{ - Metadata: &Metadata{ - Name: name, - Version: "v0.1.2", - Usage: "Mock plugin", - Description: "Mock plugin for testing", - PlatformCommand: []PlatformCommand{ - {OperatingSystem: "linux", Architecture: "", Command: "sh", Args: []string{"-c", "echo \"mock plugin\""}}, - {OperatingSystem: "windows", Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"mock plugin\""}}, - }, - PlatformHooks: map[string][]PlatformCommand{ - Install: { - {OperatingSystem: "linux", Architecture: "", Command: "sh", Args: []string{"-c", "echo \"installing...\""}}, - {OperatingSystem: "windows", Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"installing...\""}}, - }, + return &SubprocessPluginRuntime{ + metadata: Metadata{ + Name: pluginName, + Version: "v0.1.2", + Type: "cli/v1", + APIVersion: "legacy", + Runtime: "subprocess", + Config: &ConfigCLI{ + Usage: "Mock plugin", + ShortHelp: "Mock plugin", + LongHelp: "Mock plugin for testing", + IgnoreFlags: false, }, + RuntimeConfig: &rc, }, - Dir: "no-such-dir", + pluginDir: pluginDir, // NOTE: dir is empty (ie. plugin.yaml is not present) + RuntimeConfig: rc, } } diff --git a/internal/plugin/runtime.go b/internal/plugin/runtime.go new file mode 100644 index 000000000..87f068724 --- /dev/null +++ b/internal/plugin/runtime.go @@ -0,0 +1,33 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugin + +// Runtime represents a plugin runtime (subprocess, extism, etc) ie. how a plugin should be executed +// Runtime is responsible for instantiating plugins that implement the runtime +// TODO: could call this something more like "PluginRuntimeCreator"? +type Runtime interface { + // CreatePlugin creates a plugin instance from the given metadata + CreatePlugin(pluginDir string, metadata *Metadata) (Plugin, error) + + // TODO: move config unmarshalling to the runtime? + // UnmarshalConfig(runtimeConfigRaw map[string]any) (RuntimeConfig, error) +} + +// RuntimeConfig represents the assertable type for a plugin's runtime configuration. +// It is expected to type assert (cast) the a RuntimeConfig to its expected type +type RuntimeConfig interface { + Validate() error +} diff --git a/internal/plugin/runtime_subprocess.go b/internal/plugin/runtime_subprocess.go new file mode 100644 index 000000000..286c1abeb --- /dev/null +++ b/internal/plugin/runtime_subprocess.go @@ -0,0 +1,229 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugin + +import ( + "context" + "fmt" + "io" + "os" + "os/exec" + "syscall" + + "helm.sh/helm/v4/internal/plugin/schema" + "helm.sh/helm/v4/pkg/cli" +) + +// SubprocessProtocolCommand maps a given protocol to the getter command used to retrieve artifacts for that protcol +type SubprocessProtocolCommand struct { + // Protocols are the list of schemes from the charts URL. + Protocols []string `yaml:"protocols"` + // Command is the executable path with which the plugin performs + // the actual download for the corresponding Protocols + Command string `yaml:"command"` +} + +// RuntimeConfigSubprocess represents configuration for subprocess runtime +type RuntimeConfigSubprocess struct { + // PlatformCommand is a list containing a plugin command, with a platform selector and support for args. + PlatformCommands []PlatformCommand `yaml:"platformCommand"` + // Command is the plugin command, as a single string. + // DEPRECATED: Use PlatformCommand instead. Remove in Helm 4. + Command string `yaml:"command"` + // PlatformHooks are commands that will run on plugin events, with a platform selector and support for args. + PlatformHooks PlatformHooks `yaml:"platformHooks"` + // Hooks are commands that will run on plugin events, as a single string. + // DEPRECATED: Use PlatformHooks instead. Remove in Helm 4. + Hooks Hooks `yaml:"hooks"` + // ProtocolCommands field is used if the plugin supply downloader mechanism + // for special protocols. + // (This is a compatibility hangover from the old plugin downloader mechanism, which was extended to support multiple + // protocols in a given plugin) + ProtocolCommands []SubprocessProtocolCommand `yaml:"protocolCommands,omitempty"` +} + +var _ RuntimeConfig = (*RuntimeConfigSubprocess)(nil) + +func (r *RuntimeConfigSubprocess) GetType() string { return "subprocess" } + +func (r *RuntimeConfigSubprocess) Validate() error { + if len(r.PlatformCommands) > 0 && len(r.Command) > 0 { + return fmt.Errorf("both platformCommand and command are set") + } + if len(r.PlatformHooks) > 0 && len(r.Hooks) > 0 { + return fmt.Errorf("both platformHooks and hooks are set") + } + return nil +} + +type RuntimeSubprocess struct{} + +var _ Runtime = (*RuntimeSubprocess)(nil) + +// CreateRuntime implementation for RuntimeConfig +func (r *RuntimeSubprocess) CreatePlugin(pluginDir string, metadata *Metadata) (Plugin, error) { + return &SubprocessPluginRuntime{ + metadata: *metadata, + pluginDir: pluginDir, + RuntimeConfig: *(metadata.RuntimeConfig.(*RuntimeConfigSubprocess)), + }, nil +} + +// RuntimeSubprocess implements the Runtime interface for subprocess execution +type SubprocessPluginRuntime struct { + metadata Metadata + pluginDir string + RuntimeConfig RuntimeConfigSubprocess +} + +var _ Plugin = (*SubprocessPluginRuntime)(nil) + +func (r *SubprocessPluginRuntime) Dir() string { + return r.pluginDir +} + +func (r *SubprocessPluginRuntime) Metadata() Metadata { + return r.metadata +} + +func (r *SubprocessPluginRuntime) Invoke(_ context.Context, input *Input) (*Output, error) { + switch input.Message.(type) { + case schema.InputMessageCLIV1: + return r.runCLI(input) + case schema.InputMessageGetterV1: + return r.runGetter(input) + default: + return nil, fmt.Errorf("unsupported subprocess plugin type %q", r.metadata.Type) + } +} + +// InvokeWithEnv executes a plugin command with custom environment and I/O streams +// This method allows execution with different command/args than the plugin's default +func (r *SubprocessPluginRuntime) InvokeWithEnv(main string, argv []string, env []string, stdin io.Reader, stdout, stderr io.Writer) error { + mainCmdExp := os.ExpandEnv(main) + prog := exec.Command(mainCmdExp, argv...) + prog.Env = env + prog.Stdin = stdin + prog.Stdout = stdout + prog.Stderr = stderr + + if err := prog.Run(); err != nil { + if eerr, ok := err.(*exec.ExitError); ok { + os.Stderr.Write(eerr.Stderr) + status := eerr.Sys().(syscall.WaitStatus) + return &InvokeExecError{ + Err: fmt.Errorf("plugin %q exited with error", r.metadata.Name), + Code: status.ExitStatus(), + } + } + } + return nil +} + +func (r *SubprocessPluginRuntime) InvokeHook(event string) error { + // Get hook commands for the event + var cmds []PlatformCommand + expandArgs := true + + cmds = r.RuntimeConfig.PlatformHooks[event] + if len(cmds) == 0 && len(r.RuntimeConfig.Hooks) > 0 { + cmd := r.RuntimeConfig.Hooks[event] + if len(cmd) > 0 { + cmds = []PlatformCommand{{Command: "sh", Args: []string{"-c", cmd}}} + expandArgs = false + } + } + + // If no hook commands are defined, just return successfully + if len(cmds) == 0 { + return nil + } + + main, argv, err := PrepareCommands(cmds, expandArgs, []string{}) + if err != nil { + return err + } + + prog := exec.Command(main, argv...) + prog.Stdout, prog.Stderr = os.Stdout, os.Stderr + + if err := prog.Run(); err != nil { + if eerr, ok := err.(*exec.ExitError); ok { + os.Stderr.Write(eerr.Stderr) + return fmt.Errorf("plugin %s hook for %q exited with error", event, r.metadata.Name) + } + return err + } + return nil +} + +// TODO decide the best way to handle this code +// right now we implement status and error return in 3 slightly different ways in this file +// then replace the other three with a call to this func +func executeCmd(prog *exec.Cmd, pluginName string) error { + if err := prog.Run(); err != nil { + if eerr, ok := err.(*exec.ExitError); ok { + os.Stderr.Write(eerr.Stderr) + return &InvokeExecError{ + Err: fmt.Errorf("plugin %q exited with error", pluginName), + Code: eerr.ExitCode(), + } + } + + return err + } + + return nil +} + +func (r *SubprocessPluginRuntime) runCLI(input *Input) (*Output, error) { + if _, ok := input.Message.(schema.InputMessageCLIV1); !ok { + return nil, fmt.Errorf("plugin %q input message does not implement InputMessageCLIV1", r.metadata.Name) + } + + extraArgs := input.Message.(schema.InputMessageCLIV1).ExtraArgs + + cmds := r.RuntimeConfig.PlatformCommands + if len(cmds) == 0 && len(r.RuntimeConfig.Command) > 0 { + cmds = []PlatformCommand{{Command: r.RuntimeConfig.Command}} + } + + command, args, err := PrepareCommands(cmds, true, extraArgs) + if err != nil { + return nil, fmt.Errorf("failed to prepare plugin command: %w", err) + } + + err2 := r.InvokeWithEnv(command, args, input.Env, input.Stdin, input.Stdout, input.Stderr) + if err2 != nil { + return nil, err2 + } + + return &Output{ + Message: &schema.OutputMessageCLIV1{}, + }, nil +} + +// SetupPluginEnv prepares os.Env for plugins. It operates on os.Env because +// the plugin subsystem itself needs access to the environment variables +// created here. +func SetupPluginEnv(settings *cli.EnvSettings, name, base string) { // TODO: remove + env := settings.EnvVars() + env["HELM_PLUGIN_NAME"] = name + env["HELM_PLUGIN_DIR"] = base + for key, val := range env { + os.Setenv(key, val) + } +} diff --git a/internal/plugin/runtime_subprocess_getter.go b/internal/plugin/runtime_subprocess_getter.go new file mode 100644 index 000000000..6f9bfea91 --- /dev/null +++ b/internal/plugin/runtime_subprocess_getter.go @@ -0,0 +1,92 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugin + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "path/filepath" + "slices" + "strings" + + "helm.sh/helm/v4/internal/plugin/schema" +) + +func getProtocolCommand(commands []SubprocessProtocolCommand, protocol string) *SubprocessProtocolCommand { + for _, c := range commands { + if slices.Contains(c.Protocols, protocol) { + return &c + } + } + + return nil +} + +// TODO can we replace a lot of this func with RuntimeSubprocess.invokeWithEnv? +func (r *SubprocessPluginRuntime) runGetter(input *Input) (*Output, error) { + msg, ok := (input.Message).(schema.InputMessageGetterV1) + if !ok { + return nil, fmt.Errorf("expected input type schema.InputMessageGetterV1, got %T", input) + } + + tmpDir, err := os.MkdirTemp(os.TempDir(), fmt.Sprintf("helm-plugin-%s-", r.metadata.Name)) + if err != nil { + return nil, fmt.Errorf("failed to create temporary directory: %w", err) + } + defer os.RemoveAll(tmpDir) + + d := getProtocolCommand(r.RuntimeConfig.ProtocolCommands, msg.Protocol) + if d == nil { + return nil, fmt.Errorf("no downloader found for protocol %q", msg.Protocol) + } + + commands := strings.Split(d.Command, " ") + args := append( + commands[1:], + msg.Options.CertFile, + msg.Options.KeyFile, + msg.Options.CAFile, + msg.Href) + + // TODO should we append to input.Env too? + env := append( + os.Environ(), + fmt.Sprintf("HELM_PLUGIN_USERNAME=%s", msg.Options.Username), + fmt.Sprintf("HELM_PLUGIN_PASSWORD=%s", msg.Options.Password), + fmt.Sprintf("HELM_PLUGIN_PASS_CREDENTIALS_ALL=%t", msg.Options.PassCredentialsAll)) + + // TODO should we pass along input.Stdout? + buf := bytes.Buffer{} // subprocess getters are expected to write content to stdout + + pluginCommand := filepath.Join(r.pluginDir, commands[0]) + prog := exec.Command( + pluginCommand, + args...) + prog.Env = env + prog.Stdout = &buf + prog.Stderr = os.Stderr + if err := executeCmd(prog, r.metadata.Name); err != nil { + return nil, err + } + + return &Output{ + Message: &schema.OutputMessageGetterV1{ + Data: buf.Bytes(), + }, + }, nil +} diff --git a/internal/plugin/hooks.go b/internal/plugin/runtime_subprocess_hooks.go similarity index 100% rename from internal/plugin/hooks.go rename to internal/plugin/runtime_subprocess_hooks.go diff --git a/internal/plugin/runtime_subprocess_test.go b/internal/plugin/runtime_subprocess_test.go new file mode 100644 index 000000000..9d932816d --- /dev/null +++ b/internal/plugin/runtime_subprocess_test.go @@ -0,0 +1,64 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugin + +import ( + "os" + "path/filepath" + "testing" + + "helm.sh/helm/v4/pkg/cli" +) + +func TestSetupEnv(t *testing.T) { + name := "pequod" + base := filepath.Join("testdata/helmhome/helm/plugins", name) + + s := cli.New() + s.PluginsDirectory = "testdata/helmhome/helm/plugins" + + SetupPluginEnv(s, name, base) + for _, tt := range []struct { + name, expect string + }{ + {"HELM_PLUGIN_NAME", name}, + {"HELM_PLUGIN_DIR", base}, + } { + if got := os.Getenv(tt.name); got != tt.expect { + t.Errorf("Expected $%s=%q, got %q", tt.name, tt.expect, got) + } + } +} + +func TestSetupEnvWithSpace(t *testing.T) { + name := "sureshdsk" + base := filepath.Join("testdata/helm home/helm/plugins", name) + + s := cli.New() + s.PluginsDirectory = "testdata/helm home/helm/plugins" + + SetupPluginEnv(s, name, base) + for _, tt := range []struct { + name, expect string + }{ + {"HELM_PLUGIN_NAME", name}, + {"HELM_PLUGIN_DIR", base}, + } { + if got := os.Getenv(tt.name); got != tt.expect { + t.Errorf("Expected $%s=%q, got %q", tt.name, tt.expect, got) + } + } +} diff --git a/internal/plugin/schema/cli.go b/internal/plugin/schema/cli.go new file mode 100644 index 000000000..3976d3737 --- /dev/null +++ b/internal/plugin/schema/cli.go @@ -0,0 +1,29 @@ +/* + Copyright The Helm Authors. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package schema + +import ( + "bytes" + + "helm.sh/helm/v4/pkg/cli" +) + +type InputMessageCLIV1 struct { + ExtraArgs []string `json:"extraArgs"` + Settings *cli.EnvSettings `json:"settings"` +} + +type OutputMessageCLIV1 struct { + Data *bytes.Buffer `json:"data"` +} diff --git a/internal/plugin/schema/getter.go b/internal/plugin/schema/getter.go new file mode 100644 index 000000000..f9840008e --- /dev/null +++ b/internal/plugin/schema/getter.go @@ -0,0 +1,47 @@ +/* + Copyright The Helm Authors. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package schema + +import ( + "time" +) + +// TODO: can we generate these plugin input/outputs? + +type GetterOptionsV1 struct { + URL string + CertFile string + KeyFile string + CAFile string + UNTar bool + InsecureSkipVerifyTLS bool + PlainHTTP bool + AcceptHeader string + Username string + Password string + PassCredentialsAll bool + UserAgent string + Version string + Timeout time.Duration +} + +type InputMessageGetterV1 struct { + Href string `json:"href"` + Protocol string `json:"protocol"` + Options GetterOptionsV1 `json:"options"` +} + +type OutputMessageGetterV1 struct { + Data []byte `json:"data"` +} diff --git a/internal/plugin/subprocess_commands.go b/internal/plugin/subprocess_commands.go new file mode 100644 index 000000000..d979f98e3 --- /dev/null +++ b/internal/plugin/subprocess_commands.go @@ -0,0 +1,111 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugin + +import ( + "fmt" + "os" + "runtime" + "strings" +) + +// PlatformCommand represents a command for a particular operating system and architecture +type PlatformCommand struct { + OperatingSystem string `yaml:"os"` + Architecture string `yaml:"arch"` + Command string `yaml:"command"` + Args []string `yaml:"args"` +} + +// Returns command and args strings based on the following rules in priority order: +// - From the PlatformCommand where OS and Arch match the current platform +// - From the PlatformCommand where OS matches the current platform and Arch is empty/unspecified +// - From the PlatformCommand where OS is empty/unspecified and Arch matches the current platform +// - From the PlatformCommand where OS and Arch are both empty/unspecified +// - Return nil, nil +func getPlatformCommand(cmds []PlatformCommand) ([]string, []string) { + var command, args []string + found := false + foundOs := false + + eq := strings.EqualFold + for _, c := range cmds { + if eq(c.OperatingSystem, runtime.GOOS) && eq(c.Architecture, runtime.GOARCH) { + // Return early for an exact match + return strings.Split(c.Command, " "), c.Args + } + + if (len(c.OperatingSystem) > 0 && !eq(c.OperatingSystem, runtime.GOOS)) || len(c.Architecture) > 0 { + // Skip if OS is not empty and doesn't match or if arch is set as a set arch requires an OS match + continue + } + + if !foundOs && len(c.OperatingSystem) > 0 && eq(c.OperatingSystem, runtime.GOOS) { + // First OS match with empty arch, can only be overridden by a direct match + command = strings.Split(c.Command, " ") + args = c.Args + found = true + foundOs = true + } else if !found { + // First empty match, can be overridden by a direct match or an OS match + command = strings.Split(c.Command, " ") + args = c.Args + found = true + } + } + + return command, args +} + +// PrepareCommands takes a []Plugin.PlatformCommand +// and prepares the command and arguments for execution. +// +// It merges extraArgs into any arguments supplied in the plugin. It +// returns the main command and an args array. +// +// The result is suitable to pass to exec.Command. +func PrepareCommands(cmds []PlatformCommand, expandArgs bool, extraArgs []string) (string, []string, error) { + cmdParts, args := getPlatformCommand(cmds) + if len(cmdParts) == 0 || cmdParts[0] == "" { + return "", nil, fmt.Errorf("no plugin command is applicable") + } + + main := os.ExpandEnv(cmdParts[0]) + baseArgs := []string{} + if len(cmdParts) > 1 { + for _, cmdPart := range cmdParts[1:] { + if expandArgs { + baseArgs = append(baseArgs, os.ExpandEnv(cmdPart)) + } else { + baseArgs = append(baseArgs, cmdPart) + } + } + } + + for _, arg := range args { + if expandArgs { + baseArgs = append(baseArgs, os.ExpandEnv(arg)) + } else { + baseArgs = append(baseArgs, arg) + } + } + + if len(extraArgs) > 0 { + baseArgs = append(baseArgs, extraArgs...) + } + + return main, baseArgs, nil +} diff --git a/internal/plugin/subprocess_commands_test.go b/internal/plugin/subprocess_commands_test.go new file mode 100644 index 000000000..3879a4bd0 --- /dev/null +++ b/internal/plugin/subprocess_commands_test.go @@ -0,0 +1,259 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugin + +import ( + "reflect" + "runtime" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPrepareCommand(t *testing.T) { + cmdMain := "sh" + cmdArgs := []string{"-c", "echo \"test\""} + + platformCommands := []PlatformCommand{ + {OperatingSystem: "no-os", Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, + {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, + {OperatingSystem: runtime.GOOS, Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, + {OperatingSystem: runtime.GOOS, Architecture: runtime.GOARCH, Command: cmdMain, Args: cmdArgs}, + } + + cmd, args, err := PrepareCommands(platformCommands, true, []string{}) + if err != nil { + t.Fatal(err) + } + if cmd != cmdMain { + t.Fatalf("Expected %q, got %q", cmdMain, cmd) + } + if !reflect.DeepEqual(args, cmdArgs) { + t.Fatalf("Expected %v, got %v", cmdArgs, args) + } +} + +func TestPrepareCommandExtraArgs(t *testing.T) { + + cmdMain := "sh" + cmdArgs := []string{"-c", "echo \"test\""} + platformCommands := []PlatformCommand{ + {OperatingSystem: "no-os", Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, + {OperatingSystem: runtime.GOOS, Architecture: runtime.GOARCH, Command: cmdMain, Args: cmdArgs}, + {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, + {OperatingSystem: runtime.GOOS, Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, + } + + extraArgs := []string{"--debug", "--foo", "bar"} + + type testCaseExpected struct { + cmdMain string + args []string + } + + testCases := map[string]struct { + ignoreFlags bool + expected testCaseExpected + }{ + "ignoreFlags false": { + ignoreFlags: false, + expected: testCaseExpected{ + cmdMain: cmdMain, + args: []string{"-c", "echo \"test\"", "--debug", "--foo", "bar"}, + }, + }, + "ignoreFlags true": { + ignoreFlags: true, + expected: testCaseExpected{ + cmdMain: cmdMain, + args: []string{"-c", "echo \"test\""}, + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + //expectedArgs := append(cmdArgs, extraArgs...) + + // extra args are expected when ignoreFlags is unset or false + testExtraArgs := extraArgs + if tc.ignoreFlags { + testExtraArgs = []string{} + } + cmd, args, err := PrepareCommands(platformCommands, true, testExtraArgs) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, tc.expected.cmdMain, cmd, "Expected command to match") + assert.Equal(t, tc.expected.args, args, "Expected args to match") + }) + } +} + +func TestPrepareCommands(t *testing.T) { + cmdMain := "sh" + cmdArgs := []string{"-c", "echo \"test\""} + + cmds := []PlatformCommand{ + {OperatingSystem: "no-os", Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, + {OperatingSystem: runtime.GOOS, Architecture: runtime.GOARCH, Command: cmdMain, Args: cmdArgs}, + {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, + {OperatingSystem: runtime.GOOS, Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, + } + + cmd, args, err := PrepareCommands(cmds, true, []string{}) + if err != nil { + t.Fatal(err) + } + if cmd != cmdMain { + t.Fatalf("Expected %q, got %q", cmdMain, cmd) + } + if !reflect.DeepEqual(args, cmdArgs) { + t.Fatalf("Expected %v, got %v", cmdArgs, args) + } +} + +func TestPrepareCommandsExtraArgs(t *testing.T) { + cmdMain := "sh" + cmdArgs := []string{"-c", "echo \"test\""} + extraArgs := []string{"--debug", "--foo", "bar"} + + cmds := []PlatformCommand{ + {OperatingSystem: "no-os", Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, + {OperatingSystem: runtime.GOOS, Architecture: runtime.GOARCH, Command: "sh", Args: []string{"-c", "echo \"test\""}}, + {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, + {OperatingSystem: runtime.GOOS, Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, + } + + expectedArgs := append(cmdArgs, extraArgs...) + + cmd, args, err := PrepareCommands(cmds, true, extraArgs) + if err != nil { + t.Fatal(err) + } + if cmd != cmdMain { + t.Fatalf("Expected %q, got %q", cmdMain, cmd) + } + if !reflect.DeepEqual(args, expectedArgs) { + t.Fatalf("Expected %v, got %v", expectedArgs, args) + } +} + +func TestPrepareCommandsNoArch(t *testing.T) { + cmdMain := "sh" + cmdArgs := []string{"-c", "echo \"test\""} + + cmds := []PlatformCommand{ + {OperatingSystem: "no-os", Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, + {OperatingSystem: runtime.GOOS, Architecture: "", Command: "sh", Args: []string{"-c", "echo \"test\""}}, + {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, + } + + cmd, args, err := PrepareCommands(cmds, true, []string{}) + if err != nil { + t.Fatal(err) + } + if cmd != cmdMain { + t.Fatalf("Expected %q, got %q", cmdMain, cmd) + } + if !reflect.DeepEqual(args, cmdArgs) { + t.Fatalf("Expected %v, got %v", cmdArgs, args) + } +} + +func TestPrepareCommandsNoOsNoArch(t *testing.T) { + cmdMain := "sh" + cmdArgs := []string{"-c", "echo \"test\""} + + cmds := []PlatformCommand{ + {OperatingSystem: "no-os", Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, + {OperatingSystem: "", Architecture: "", Command: "sh", Args: []string{"-c", "echo \"test\""}}, + {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, + } + + cmd, args, err := PrepareCommands(cmds, true, []string{}) + if err != nil { + t.Fatal(err) + } + if cmd != cmdMain { + t.Fatalf("Expected %q, got %q", cmdMain, cmd) + } + if !reflect.DeepEqual(args, cmdArgs) { + t.Fatalf("Expected %v, got %v", cmdArgs, args) + } +} + +func TestPrepareCommandsNoMatch(t *testing.T) { + cmds := []PlatformCommand{ + {OperatingSystem: "no-os", Architecture: "no-arch", Command: "sh", Args: []string{"-c", "echo \"test\""}}, + {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "sh", Args: []string{"-c", "echo \"test\""}}, + {OperatingSystem: "no-os", Architecture: runtime.GOARCH, Command: "sh", Args: []string{"-c", "echo \"test\""}}, + } + + if _, _, err := PrepareCommands(cmds, true, []string{}); err == nil { + t.Fatalf("Expected error to be returned") + } +} + +func TestPrepareCommandsNoCommands(t *testing.T) { + cmds := []PlatformCommand{} + + if _, _, err := PrepareCommands(cmds, true, []string{}); err == nil { + t.Fatalf("Expected error to be returned") + } +} + +func TestPrepareCommandsExpand(t *testing.T) { + t.Setenv("TEST", "test") + cmdMain := "sh" + cmdArgs := []string{"-c", "echo \"${TEST}\""} + cmds := []PlatformCommand{ + {OperatingSystem: "", Architecture: "", Command: cmdMain, Args: cmdArgs}, + } + + expectedArgs := []string{"-c", "echo \"test\""} + + cmd, args, err := PrepareCommands(cmds, true, []string{}) + if err != nil { + t.Fatal(err) + } + if cmd != cmdMain { + t.Fatalf("Expected %q, got %q", cmdMain, cmd) + } + if !reflect.DeepEqual(args, expectedArgs) { + t.Fatalf("Expected %v, got %v", expectedArgs, args) + } +} + +func TestPrepareCommandsNoExpand(t *testing.T) { + t.Setenv("TEST", "test") + cmdMain := "sh" + cmdArgs := []string{"-c", "echo \"${TEST}\""} + cmds := []PlatformCommand{ + {OperatingSystem: "", Architecture: "", Command: cmdMain, Args: cmdArgs}, + } + + cmd, args, err := PrepareCommands(cmds, false, []string{}) + if err != nil { + t.Fatal(err) + } + if cmd != cmdMain { + t.Fatalf("Expected %q, got %q", cmdMain, cmd) + } + if !reflect.DeepEqual(args, cmdArgs) { + t.Fatalf("Expected %v, got %v", cmdArgs, args) + } +} diff --git a/internal/plugin/testdata/plugdir/bad/duplicate-entries/plugin.yaml b/internal/plugin/testdata/plugdir/bad/duplicate-entries-legacy/plugin.yaml similarity index 100% rename from internal/plugin/testdata/plugdir/bad/duplicate-entries/plugin.yaml rename to internal/plugin/testdata/plugdir/bad/duplicate-entries-legacy/plugin.yaml diff --git a/internal/plugin/testdata/plugdir/good/downloader/plugin.yaml b/internal/plugin/testdata/plugdir/good/downloader/plugin.yaml index c0b90379b..4e85f1f79 100644 --- a/internal/plugin/testdata/plugdir/good/downloader/plugin.yaml +++ b/internal/plugin/testdata/plugdir/good/downloader/plugin.yaml @@ -1,3 +1,4 @@ +--- name: "downloader" version: "1.2.3" usage: "usage" diff --git a/internal/plugin/testdata/plugdir/good/echo/plugin.yaml b/internal/plugin/testdata/plugdir/good/echo-legacy/plugin.yaml similarity index 85% rename from internal/plugin/testdata/plugdir/good/echo/plugin.yaml rename to internal/plugin/testdata/plugdir/good/echo-legacy/plugin.yaml index 8baa35b6d..ef84a4d8f 100644 --- a/internal/plugin/testdata/plugdir/good/echo/plugin.yaml +++ b/internal/plugin/testdata/plugdir/good/echo-legacy/plugin.yaml @@ -1,4 +1,5 @@ -name: "echo" +--- +name: "echo-legacy" version: "1.2.3" usage: "echo something" description: |- diff --git a/internal/plugin/testdata/plugdir/good/hello/hello.ps1 b/internal/plugin/testdata/plugdir/good/hello-legacy/hello.ps1 similarity index 100% rename from internal/plugin/testdata/plugdir/good/hello/hello.ps1 rename to internal/plugin/testdata/plugdir/good/hello-legacy/hello.ps1 diff --git a/internal/plugin/testdata/plugdir/good/hello/hello.sh b/internal/plugin/testdata/plugdir/good/hello-legacy/hello.sh similarity index 100% rename from internal/plugin/testdata/plugdir/good/hello/hello.sh rename to internal/plugin/testdata/plugdir/good/hello-legacy/hello.sh diff --git a/internal/plugin/testdata/plugdir/good/hello/plugin.yaml b/internal/plugin/testdata/plugdir/good/hello-legacy/plugin.yaml similarity index 84% rename from internal/plugin/testdata/plugdir/good/hello/plugin.yaml rename to internal/plugin/testdata/plugdir/good/hello-legacy/plugin.yaml index 71dc88259..bf37e0626 100644 --- a/internal/plugin/testdata/plugdir/good/hello/plugin.yaml +++ b/internal/plugin/testdata/plugdir/good/hello-legacy/plugin.yaml @@ -1,25 +1,22 @@ -name: "hello" +--- +name: "hello-legacy" version: "0.1.0" -usage: "usage" +usage: "echo hello message" description: |- description platformCommand: - os: linux - arch: command: "sh" args: ["-c", "${HELM_PLUGIN_DIR}/hello.sh"] - os: windows - arch: command: "pwsh" args: ["-c", "${HELM_PLUGIN_DIR}/hello.ps1"] ignoreFlags: true platformHooks: install: - os: linux - arch: "" command: "sh" args: ["-c", 'echo "installing..."'] - os: windows - arch: "" command: "pwsh" args: ["-c", 'echo "installing..."'] diff --git a/pkg/action/action.go b/pkg/action/action.go index 69bcf4da2..42dc56c96 100644 --- a/pkg/action/action.go +++ b/pkg/action/action.go @@ -177,7 +177,7 @@ func splitAndDeannotate(postrendered string) (map[string]string, error) { // // This code has to do with writing files to disk. func (cfg *Configuration) renderResources(ch *chart.Chart, values chartutil.Values, releaseName, outputDir string, subNotes, useReleaseName, includeCrds bool, pr postrender.PostRenderer, interactWithRemote, enableDNS, hideSecret bool) ([]*release.Hook, *bytes.Buffer, string, error) { - hs := []*release.Hook{} + var hs []*release.Hook b := bytes.NewBuffer(nil) caps, err := cfg.getCapabilities() diff --git a/pkg/action/install.go b/pkg/action/install.go index 78c86cdc0..8f76eee7b 100644 --- a/pkg/action/install.go +++ b/pkg/action/install.go @@ -237,7 +237,7 @@ func (i *Install) Run(chrt *chart.Chart, vals map[string]interface{}) (*release. return i.RunWithContext(ctx, chrt, vals) } -// Run executes the installation with Context +// RunWithContext executes the installation with Context // // When the task is cancelled through ctx, the function returns and the install // proceeds in the background. diff --git a/pkg/cmd/flags.go b/pkg/cmd/flags.go index 74c3c8352..420631264 100644 --- a/pkg/cmd/flags.go +++ b/pkg/cmd/flags.go @@ -27,6 +27,7 @@ import ( "github.com/spf13/cobra" "github.com/spf13/pflag" + "k8s.io/klog/v2" "helm.sh/helm/v4/pkg/action" @@ -163,6 +164,7 @@ func (o *outputValue) Set(s string) error { return nil } +// TODO there is probably a better way to pass cobra settings than as a param func bindPostRenderFlag(cmd *cobra.Command, varRef *postrender.PostRenderer) { p := &postRendererOptions{varRef, "", []string{}} cmd.Flags().Var(&postRendererString{p}, postRenderFlag, "the path to an executable to be used for post rendering. If it exists in $PATH, the binary will be used, otherwise it will try to look for the executable at the given path") diff --git a/pkg/cmd/helpers_test.go b/pkg/cmd/helpers_test.go index 8c06db4ae..40478c30e 100644 --- a/pkg/cmd/helpers_test.go +++ b/pkg/cmd/helpers_test.go @@ -104,6 +104,10 @@ func executeActionCommandStdinC(store *storage.Storage, in *os.File, cmd string) root.SetArgs(args) oldStdin := os.Stdin + defer func() { + os.Stdin = oldStdin + }() + if in != nil { root.SetIn(in) os.Stdin = in @@ -116,8 +120,6 @@ func executeActionCommandStdinC(store *storage.Storage, in *os.File, cmd string) result := buf.String() - os.Stdin = oldStdin - return c, result, err } diff --git a/pkg/cmd/load_plugins.go b/pkg/cmd/load_plugins.go index e340ba1b6..5057c1033 100644 --- a/pkg/cmd/load_plugins.go +++ b/pkg/cmd/load_plugins.go @@ -17,16 +17,17 @@ package cmd import ( "bytes" + "context" "fmt" "io" "log" "os" - "os/exec" "path/filepath" "slices" "strconv" "strings" - "syscall" + + "helm.sh/helm/v4/internal/plugin/schema" "github.com/spf13/cobra" "sigs.k8s.io/yaml" @@ -34,6 +35,12 @@ import ( "helm.sh/helm/v4/internal/plugin" ) +// TODO: move pluginDynamicCompletionExecutable pkg/plugin/runtime_subprocess.go +// any references to executables should be for [plugin.SubprocessPluginRuntime] only +// this should also be for backwards compatibility in [plugin.Legacy] only +// +// TODO: for v1 make this configurable with a new CompletionCommand field for +// [plugin.RuntimeConfigSubprocess] const ( pluginStaticCompletionFile = "completion.yaml" pluginDynamicCompletionExecutable = "plugin.complete" @@ -44,18 +51,22 @@ type PluginError struct { Code int } -// loadPlugins loads plugins into the command list. +// loadCLIPlugins loads CLI plugins into the command list. // // This follows a different pattern than the other commands because it has // to inspect its environment and then add commands to the base command // as it finds them. -func loadPlugins(baseCmd *cobra.Command, out io.Writer) { +func loadCLIPlugins(baseCmd *cobra.Command, out io.Writer) { // If HELM_NO_PLUGINS is set to 1, do not load plugins. if os.Getenv("HELM_NO_PLUGINS") == "1" { return } - found, err := plugin.FindPlugins(settings.PluginsDirectory) + dirs := filepath.SplitList(settings.PluginsDirectory) + descriptor := plugin.Descriptor{ + Type: "cli/v1", + } + found, err := plugin.FindPlugins(dirs, descriptor) if err != nil { fmt.Fprintf(os.Stderr, "failed to load plugins: %s\n", err) return @@ -63,32 +74,69 @@ func loadPlugins(baseCmd *cobra.Command, out io.Writer) { // Now we create commands for all of these. for _, plug := range found { - md := plug.Metadata - if md.Usage == "" { - md.Usage = fmt.Sprintf("the %q plugin", md.Name) + var use, short, long string + var ignoreFlags bool + if cliConfig, ok := plug.Metadata().Config.(*plugin.ConfigCLI); ok { + use = cliConfig.Usage + short = cliConfig.ShortHelp + long = cliConfig.LongHelp + ignoreFlags = cliConfig.IgnoreFlags + } + + // Set defaults + if use == "" { + use = plug.Metadata().Name + } + if short == "" { + short = fmt.Sprintf("the %q plugin", plug.Metadata().Name) } + // long has no default, empty is ok c := &cobra.Command{ - Use: md.Name, - Short: md.Usage, - Long: md.Description, + Use: use, + Short: short, + Long: long, RunE: func(cmd *cobra.Command, args []string) error { u, err := processParent(cmd, args) if err != nil { return err } + // Setup plugin environment + plugin.SetupPluginEnv(settings, plug.Metadata().Name, plug.Dir()) + + // For CLI plugin types runtime, set extra args and settings + extraArgs := []string{} + if !ignoreFlags { + extraArgs = u + } - // Call setupEnv before PrepareCommand because - // PrepareCommand uses os.ExpandEnv and expects the - // setupEnv vars. - plugin.SetupPluginEnv(settings, md.Name, plug.Dir) - main, argv, prepCmdErr := plug.PrepareCommand(u) - if prepCmdErr != nil { - os.Stderr.WriteString(prepCmdErr.Error()) - return fmt.Errorf("plugin %q exited with error", md.Name) + // Prepare environment + env := os.Environ() + for k, v := range settings.EnvVars() { + env = append(env, fmt.Sprintf("%s=%s", k, v)) } - return callPluginExecutable(md.Name, main, argv, out) + // Invoke plugin + input := &plugin.Input{ + Message: schema.InputMessageCLIV1{ + ExtraArgs: extraArgs, + Settings: settings, + }, + Env: env, + Stdin: os.Stdin, + Stdout: out, + Stderr: os.Stderr, + } + _, err = plug.Invoke(context.Background(), input) + // TODO do we want to keep execErr here? + if execErr, ok := err.(*plugin.InvokeExecError); ok { + // TODO can we replace cmd.PluginError with plugin.Error? + return PluginError{ + error: execErr.Err, + Code: execErr.Code, + } + } + return err }, // This passes all the flags to the subcommand. DisableFlagParsing: true, @@ -118,34 +166,6 @@ func processParent(cmd *cobra.Command, args []string) ([]string, error) { return u, nil } -// This function is used to setup the environment for the plugin and then -// call the executable specified by the parameter 'main' -func callPluginExecutable(pluginName string, main string, argv []string, out io.Writer) error { - env := os.Environ() - for k, v := range settings.EnvVars() { - env = append(env, fmt.Sprintf("%s=%s", k, v)) - } - - mainCmdExp := os.ExpandEnv(main) - prog := exec.Command(mainCmdExp, argv...) - prog.Env = env - prog.Stdin = os.Stdin - prog.Stdout = out - prog.Stderr = os.Stderr - if err := prog.Run(); err != nil { - if eerr, ok := err.(*exec.ExitError); ok { - os.Stderr.Write(eerr.Stderr) - status := eerr.Sys().(syscall.WaitStatus) - return PluginError{ - error: fmt.Errorf("plugin %q exited with error", pluginName), - Code: status.ExitStatus(), - } - } - return err - } - return nil -} - // manuallyProcessArgs processes an arg array, removing special args. // // Returns two sets of args: known and unknown (in that order) @@ -200,10 +220,10 @@ type pluginCommand struct { // loadCompletionForPlugin will load and parse any completion.yaml provided by the plugin // and add the dynamic completion hook to call the optional plugin.complete -func loadCompletionForPlugin(pluginCmd *cobra.Command, plugin *plugin.Plugin) { +func loadCompletionForPlugin(pluginCmd *cobra.Command, plug plugin.Plugin) { // Parse the yaml file providing the plugin's sub-commands and flags cmds, err := loadFile(strings.Join( - []string{plugin.Dir, pluginStaticCompletionFile}, string(filepath.Separator))) + []string{plug.Dir(), pluginStaticCompletionFile}, string(filepath.Separator))) if err != nil { // The file could be missing or invalid. No static completion for this plugin. @@ -217,12 +237,12 @@ func loadCompletionForPlugin(pluginCmd *cobra.Command, plugin *plugin.Plugin) { // Preserve the Usage string specified for the plugin cmds.Name = pluginCmd.Use - addPluginCommands(plugin, pluginCmd, cmds) + addPluginCommands(plug, pluginCmd, cmds) } // addPluginCommands is a recursive method that adds each different level // of sub-commands and flags for the plugins that have provided such information -func addPluginCommands(plugin *plugin.Plugin, baseCmd *cobra.Command, cmds *pluginCommand) { +func addPluginCommands(plug plugin.Plugin, baseCmd *cobra.Command, cmds *pluginCommand) { if cmds == nil { return } @@ -245,7 +265,7 @@ func addPluginCommands(plugin *plugin.Plugin, baseCmd *cobra.Command, cmds *plug // calling plugin.complete at every completion, which greatly simplifies // development of plugin.complete for plugin developers. baseCmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return pluginDynamicComp(plugin, cmd, args, toComplete) + return pluginDynamicComp(plug, cmd, args, toComplete) } } @@ -300,7 +320,7 @@ func addPluginCommands(plugin *plugin.Plugin, baseCmd *cobra.Command, cmds *plug Run: func(_ *cobra.Command, _ []string) {}, } baseCmd.AddCommand(subCmd) - addPluginCommands(plugin, subCmd, &cmd) + addPluginCommands(plug, subCmd, &cmd) } } @@ -319,8 +339,19 @@ func loadFile(path string) (*pluginCommand, error) { // pluginDynamicComp call the plugin.complete script of the plugin (if available) // to obtain the dynamic completion choices. It must pass all the flags and sub-commands // specified in the command-line to the plugin.complete executable (except helm's global flags) -func pluginDynamicComp(plug *plugin.Plugin, cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - md := plug.Metadata +func pluginDynamicComp(plug plugin.Plugin, cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + + subprocessPlug, ok := plug.(*plugin.SubprocessPluginRuntime) + if !ok { + // Completion only supported for subprocess plugins (TODO: fix this) + cobra.CompDebugln(fmt.Sprintf("Unsupported plugin runtime: %q", plug.Metadata().Runtime), settings.Debug) + return nil, cobra.ShellCompDirectiveDefault + } + + var ignoreFlags bool + if cliConfig, ok := subprocessPlug.Metadata().Config.(*plugin.ConfigCLI); ok { + ignoreFlags = cliConfig.IgnoreFlags + } u, err := processParent(cmd, args) if err != nil { @@ -328,21 +359,29 @@ func pluginDynamicComp(plug *plugin.Plugin, cmd *cobra.Command, args []string, t } // We will call the dynamic completion script of the plugin - main := strings.Join([]string{plug.Dir, pluginDynamicCompletionExecutable}, string(filepath.Separator)) + main := strings.Join([]string{plug.Dir(), pluginDynamicCompletionExecutable}, string(filepath.Separator)) // We must include all sub-commands passed on the command-line. // To do that, we pass-in the entire CommandPath, except the first two elements // which are 'helm' and 'pluginName'. argv := strings.Split(cmd.CommandPath(), " ")[2:] - if !md.IgnoreFlags { + if !ignoreFlags { argv = append(argv, u...) argv = append(argv, toComplete) } - plugin.SetupPluginEnv(settings, md.Name, plug.Dir) + plugin.SetupPluginEnv(settings, plug.Metadata().Name, plug.Dir()) cobra.CompDebugln(fmt.Sprintf("calling %s with args %v", main, argv), settings.Debug) buf := new(bytes.Buffer) - if err := callPluginExecutable(md.Name, main, argv, buf); err != nil { + + // Prepare environment + env := os.Environ() + for k, v := range settings.EnvVars() { + env = append(env, fmt.Sprintf("%s=%s", k, v)) + } + + // For subprocess runtime, use InvokeWithEnv for dynamic completion + if err := subprocessPlug.InvokeWithEnv(main, argv, env, nil, buf, buf); err != nil { // The dynamic completion file is optional for a plugin, so this error is ok. cobra.CompDebugln(fmt.Sprintf("Unable to call %s: %v", main, err.Error()), settings.Debug) return nil, cobra.ShellCompDirectiveDefault diff --git a/pkg/cmd/plugin.go b/pkg/cmd/plugin.go index 76bc99915..b03000ad4 100644 --- a/pkg/cmd/plugin.go +++ b/pkg/cmd/plugin.go @@ -16,11 +16,7 @@ limitations under the License. package cmd import ( - "fmt" "io" - "log/slog" - "os" - "os/exec" "github.com/spf13/cobra" @@ -47,35 +43,12 @@ func newPluginCmd(out io.Writer) *cobra.Command { } // runHook will execute a plugin hook. -func runHook(p *plugin.Plugin, event string) error { - plugin.SetupPluginEnv(settings, p.Metadata.Name, p.Dir) - - cmds := p.Metadata.PlatformHooks[event] - expandArgs := true - if len(cmds) == 0 && len(p.Metadata.Hooks) > 0 { - cmd := p.Metadata.Hooks[event] - if len(cmd) > 0 { - cmds = []plugin.PlatformCommand{{Command: "sh", Args: []string{"-c", cmd}}} - expandArgs = false - } - } - - main, argv, err := plugin.PrepareCommands(cmds, expandArgs, []string{}) - if err != nil { - return nil +func runHook(p plugin.Plugin, event string) error { + pluginHook, ok := p.(plugin.PluginHook) + if ok { + plugin.SetupPluginEnv(settings, p.Metadata().Name, p.Dir()) + return pluginHook.InvokeHook(event) } - prog := exec.Command(main, argv...) - - slog.Debug("running hook", "event", event, "program", prog) - - prog.Stdout, prog.Stderr = os.Stdout, os.Stderr - if err := prog.Run(); err != nil { - if eerr, ok := err.(*exec.ExitError); ok { - os.Stderr.Write(eerr.Stderr) - return fmt.Errorf("plugin %s hook for %q exited with error", event, p.Metadata.Name) - } - return err - } return nil } diff --git a/pkg/cmd/plugin_install.go b/pkg/cmd/plugin_install.go index 7dd1623e7..7dae39505 100644 --- a/pkg/cmd/plugin_install.go +++ b/pkg/cmd/plugin_install.go @@ -89,6 +89,6 @@ func (o *pluginInstallOptions) run(out io.Writer) error { return err } - fmt.Fprintf(out, "Installed plugin: %s\n", p.Metadata.Name) + fmt.Fprintf(out, "Installed plugin: %s\n", p.Metadata().Name) return nil } diff --git a/pkg/cmd/plugin_list.go b/pkg/cmd/plugin_list.go index faf41b91e..31a76330d 100644 --- a/pkg/cmd/plugin_list.go +++ b/pkg/cmd/plugin_list.go @@ -19,6 +19,7 @@ import ( "fmt" "io" "log/slog" + "path/filepath" "slices" "github.com/gosuri/uitable" @@ -28,6 +29,7 @@ import ( ) func newPluginListCmd(out io.Writer) *cobra.Command { + var pluginType string cmd := &cobra.Command{ Use: "list", Aliases: []string{"ls"}, @@ -35,33 +37,46 @@ func newPluginListCmd(out io.Writer) *cobra.Command { ValidArgsFunction: noMoreArgsCompFunc, RunE: func(_ *cobra.Command, _ []string) error { slog.Debug("pluginDirs", "directory", settings.PluginsDirectory) - plugins, err := plugin.FindPlugins(settings.PluginsDirectory) + dirs := filepath.SplitList(settings.PluginsDirectory) + descriptor := plugin.Descriptor{ + Type: pluginType, + } + plugins, err := plugin.FindPlugins(dirs, descriptor) if err != nil { return err } table := uitable.New() - table.AddRow("NAME", "VERSION", "DESCRIPTION") + table.AddRow("NAME", "VERSION", "TYPE", "APIVERSION", "SOURCE") for _, p := range plugins { - table.AddRow(p.Metadata.Name, p.Metadata.Version, p.Metadata.Description) + m := p.Metadata() + sourceURL := m.SourceURL + if sourceURL == "" { + sourceURL = "unknown" + } + table.AddRow(m.Name, m.Version, m.Type, m.APIVersion, sourceURL) } fmt.Fprintln(out, table) return nil }, } + + f := cmd.Flags() + f.StringVar(&pluginType, "type", "", "Plugin type") + return cmd } // Returns all plugins from plugins, except those with names matching ignoredPluginNames -func filterPlugins(plugins []*plugin.Plugin, ignoredPluginNames []string) []*plugin.Plugin { - // if ignoredPluginNames is nil, just return plugins - if ignoredPluginNames == nil { +func filterPlugins(plugins []plugin.Plugin, ignoredPluginNames []string) []plugin.Plugin { + // if ignoredPluginNames is nil or empty, just return plugins + if len(ignoredPluginNames) == 0 { return plugins } - var filteredPlugins []*plugin.Plugin + var filteredPlugins []plugin.Plugin for _, plugin := range plugins { - found := slices.Contains(ignoredPluginNames, plugin.Metadata.Name) + found := slices.Contains(ignoredPluginNames, plugin.Metadata().Name) if !found { filteredPlugins = append(filteredPlugins, plugin) } @@ -73,11 +88,20 @@ func filterPlugins(plugins []*plugin.Plugin, ignoredPluginNames []string) []*plu // Provide dynamic auto-completion for plugin names func compListPlugins(_ string, ignoredPluginNames []string) []string { var pNames []string - plugins, err := plugin.FindPlugins(settings.PluginsDirectory) + dirs := filepath.SplitList(settings.PluginsDirectory) + descriptor := plugin.Descriptor{ + Type: "cli/v1", + } + plugins, err := plugin.FindPlugins(dirs, descriptor) if err == nil && len(plugins) > 0 { filteredPlugins := filterPlugins(plugins, ignoredPluginNames) for _, p := range filteredPlugins { - pNames = append(pNames, fmt.Sprintf("%s\t%s", p.Metadata.Name, p.Metadata.Usage)) + m := p.Metadata() + var shortHelp string + if config, ok := m.Config.(*plugin.ConfigCLI); ok { + shortHelp = config.ShortHelp + } + pNames = append(pNames, fmt.Sprintf("%s\t%s", p.Metadata().Name, shortHelp)) } } return pNames diff --git a/pkg/cmd/plugin_test.go b/pkg/cmd/plugin_test.go index 74f7a276a..b476b80d2 100644 --- a/pkg/cmd/plugin_test.go +++ b/pkg/cmd/plugin_test.go @@ -19,12 +19,13 @@ import ( "bytes" "os" "runtime" - "sort" "strings" "testing" "github.com/spf13/cobra" "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" release "helm.sh/helm/v4/pkg/release/v1" ) @@ -81,7 +82,7 @@ func TestManuallyProcessArgs(t *testing.T) { } } -func TestLoadPlugins(t *testing.T) { +func TestLoadCLIPlugins(t *testing.T) { settings.PluginsDirectory = "testdata/helmhome/helm/plugins" settings.RepositoryConfig = "testdata/helmhome/helm/repositories.yaml" settings.RepositoryCache = "testdata/helmhome/helm/repository" @@ -90,7 +91,7 @@ func TestLoadPlugins(t *testing.T) { out bytes.Buffer cmd cobra.Command ) - loadPlugins(&cmd, &out) + loadCLIPlugins(&cmd, &out) envs := strings.Join([]string{ "fullenv", @@ -119,9 +120,7 @@ func TestLoadPlugins(t *testing.T) { plugins := cmd.Commands() - if len(plugins) != len(tests) { - t.Fatalf("Expected %d plugins, got %d", len(tests), len(plugins)) - } + require.Len(t, plugins, len(tests), "Expected %d plugins, got %d", len(tests), len(plugins)) for i := 0; i < len(plugins); i++ { out.Reset() @@ -153,9 +152,7 @@ func TestLoadPlugins(t *testing.T) { t.Errorf("Error running %s: %+v", tt.use, err) } } - if out.String() != tt.expect { - t.Errorf("Expected %s to output:\n%s\ngot\n%s", tt.use, tt.expect, out.String()) - } + assert.Equal(t, tt.expect, out.String(), "expected output for %s", tt.use) } } } @@ -169,7 +166,7 @@ func TestLoadPluginsWithSpace(t *testing.T) { out bytes.Buffer cmd cobra.Command ) - loadPlugins(&cmd, &out) + loadCLIPlugins(&cmd, &out) envs := strings.Join([]string{ "fullenv", @@ -228,9 +225,7 @@ func TestLoadPluginsWithSpace(t *testing.T) { t.Errorf("Error running %s: %+v", tt.use, err) } } - if out.String() != tt.expect { - t.Errorf("Expected %s to output:\n%s\ngot\n%s", tt.use, tt.expect, out.String()) - } + assert.Equal(t, tt.expect, out.String(), "expected output for %s", tt.use) } } } @@ -242,7 +237,7 @@ type staticCompletionDetails struct { next []staticCompletionDetails } -func TestLoadPluginsForCompletion(t *testing.T) { +func TestLoadCLIPluginsForCompletion(t *testing.T) { settings.PluginsDirectory = "testdata/helmhome/helm/plugins" var out bytes.Buffer @@ -250,8 +245,7 @@ func TestLoadPluginsForCompletion(t *testing.T) { cmd := &cobra.Command{ Use: "completion", } - - loadPlugins(cmd, &out) + loadCLIPlugins(cmd, &out) tests := []staticCompletionDetails{ {"args", []string{}, []string{}, []staticCompletionDetails{}}, @@ -276,30 +270,17 @@ func TestLoadPluginsForCompletion(t *testing.T) { func checkCommand(t *testing.T, plugins []*cobra.Command, tests []staticCompletionDetails) { t.Helper() - if len(plugins) != len(tests) { - t.Fatalf("Expected commands %v, got %v", tests, plugins) - } + require.Len(t, plugins, len(tests), "Expected commands %v, got %v", tests, plugins) - for i := 0; i < len(plugins); i++ { + is := assert.New(t) + for i := range plugins { pp := plugins[i] tt := tests[i] - if pp.Use != tt.use { - t.Errorf("%s: Expected Use=%q, got %q", pp.Name(), tt.use, pp.Use) - } + is.Equal(pp.Use, tt.use, "Expected Use=%q, got %q", tt.use, pp.Use) targs := tt.validArgs pargs := pp.ValidArgs - if len(targs) != len(pargs) { - t.Fatalf("%s: expected args %v, got %v", pp.Name(), targs, pargs) - } - - sort.Strings(targs) - sort.Strings(pargs) - for j := range targs { - if targs[j] != pargs[j] { - t.Errorf("%s: expected validArg=%q, got %q", pp.Name(), targs[j], pargs[j]) - } - } + is.ElementsMatch(targs, pargs) tflags := tt.flags var pflags []string @@ -309,17 +290,8 @@ func checkCommand(t *testing.T, plugins []*cobra.Command, tests []staticCompleti pflags = append(pflags, flag.Shorthand) } }) - if len(tflags) != len(pflags) { - t.Fatalf("%s: expected flags %v, got %v", pp.Name(), tflags, pflags) - } + is.ElementsMatch(tflags, pflags) - sort.Strings(tflags) - sort.Strings(pflags) - for j := range tflags { - if tflags[j] != pflags[j] { - t.Errorf("%s: expected flag=%q, got %q", pp.Name(), tflags[j], pflags[j]) - } - } // Check the next level checkCommand(t, pp.Commands(), tt.next) } @@ -358,7 +330,7 @@ func TestPluginDynamicCompletion(t *testing.T) { } } -func TestLoadPlugins_HelmNoPlugins(t *testing.T) { +func TestLoadCLIPlugins_HelmNoPlugins(t *testing.T) { settings.PluginsDirectory = "testdata/helmhome/helm/plugins" settings.RepositoryConfig = "testdata/helmhome/helm/repository" @@ -366,7 +338,7 @@ func TestLoadPlugins_HelmNoPlugins(t *testing.T) { out := bytes.NewBuffer(nil) cmd := &cobra.Command{} - loadPlugins(cmd, out) + loadCLIPlugins(cmd, out) plugins := cmd.Commands() if len(plugins) != 0 { diff --git a/pkg/cmd/plugin_uninstall.go b/pkg/cmd/plugin_uninstall.go index 808cad92f..a925c66dd 100644 --- a/pkg/cmd/plugin_uninstall.go +++ b/pkg/cmd/plugin_uninstall.go @@ -61,7 +61,7 @@ func (o *pluginUninstallOptions) complete(args []string) error { func (o *pluginUninstallOptions) run(out io.Writer) error { slog.Debug("loading installer plugins", "dir", settings.PluginsDirectory) - plugins, err := plugin.FindPlugins(settings.PluginsDirectory) + plugins, err := plugin.LoadAll(settings.PluginsDirectory) if err != nil { return err } @@ -83,16 +83,17 @@ func (o *pluginUninstallOptions) run(out io.Writer) error { return nil } -func uninstallPlugin(p *plugin.Plugin) error { - if err := os.RemoveAll(p.Dir); err != nil { +func uninstallPlugin(p plugin.Plugin) error { + if err := os.RemoveAll(p.Dir()); err != nil { return err } return runHook(p, plugin.Delete) } -func findPlugin(plugins []*plugin.Plugin, name string) *plugin.Plugin { +// TODO should this be in pkg/plugin/loader.go? +func findPlugin(plugins []plugin.Plugin, name string) plugin.Plugin { for _, p := range plugins { - if p.Metadata.Name == name { + if p.Metadata().Name == name { return p } } diff --git a/pkg/cmd/plugin_update.go b/pkg/cmd/plugin_update.go index 4fed3772d..c6d4b8530 100644 --- a/pkg/cmd/plugin_update.go +++ b/pkg/cmd/plugin_update.go @@ -63,7 +63,7 @@ func (o *pluginUpdateOptions) complete(args []string) error { func (o *pluginUpdateOptions) run(out io.Writer) error { installer.Debug = settings.Debug slog.Debug("loading installed plugins", "path", settings.PluginsDirectory) - plugins, err := plugin.FindPlugins(settings.PluginsDirectory) + plugins, err := plugin.LoadAll(settings.PluginsDirectory) if err != nil { return err } @@ -86,8 +86,8 @@ func (o *pluginUpdateOptions) run(out io.Writer) error { return nil } -func updatePlugin(p *plugin.Plugin) error { - exactLocation, err := filepath.EvalSymlinks(p.Dir) +func updatePlugin(p plugin.Plugin) error { + exactLocation, err := filepath.EvalSymlinks(p.Dir()) if err != nil { return err } diff --git a/pkg/cmd/root.go b/pkg/cmd/root.go index f43ce7abe..836df834d 100644 --- a/pkg/cmd/root.go +++ b/pkg/cmd/root.go @@ -291,8 +291,8 @@ func newRootCmdWithConfig(actionConfig *action.Configuration, out io.Writer, arg newPushCmd(actionConfig, out), ) - // Find and add plugins - loadPlugins(cmd, out) + // Find and add CLI plugins + loadCLIPlugins(cmd, out) // Check for expired repositories checkForExpiredRepos(settings.RepositoryConfig) diff --git a/pkg/cmd/testdata/testplugin/plugin.yaml b/pkg/cmd/testdata/testplugin/plugin.yaml deleted file mode 100644 index 890292cbf..000000000 --- a/pkg/cmd/testdata/testplugin/plugin.yaml +++ /dev/null @@ -1,4 +0,0 @@ -name: testplugin -usage: "echo test" -description: "This echos test" -command: "echo test" diff --git a/pkg/getter/getter.go b/pkg/getter/getter.go index 5605e043f..8585ac449 100644 --- a/pkg/getter/getter.go +++ b/pkg/getter/getter.go @@ -27,10 +27,11 @@ import ( "helm.sh/helm/v4/pkg/registry" ) -// options are generic parameters to be provided to the getter during instantiation. +// getterOptions are generic parameters to be provided to the getter during instantiation. // // Getters may or may not ignore these parameters as they are passed in. -type options struct { +// TODO what is the difference between this and schema.GetterOptionsV1? +type getterOptions struct { url string certFile string keyFile string @@ -51,54 +52,54 @@ type options struct { // Option allows specifying various settings configurable by the user for overriding the defaults // used when performing Get operations with the Getter. -type Option func(*options) +type Option func(*getterOptions) // WithURL informs the getter the server name that will be used when fetching objects. Used in conjunction with // WithTLSClientConfig to set the TLSClientConfig's server name. func WithURL(url string) Option { - return func(opts *options) { + return func(opts *getterOptions) { opts.url = url } } // WithAcceptHeader sets the request's Accept header as some REST APIs serve multiple content types func WithAcceptHeader(header string) Option { - return func(opts *options) { + return func(opts *getterOptions) { opts.acceptHeader = header } } // WithBasicAuth sets the request's Authorization header to use the provided credentials func WithBasicAuth(username, password string) Option { - return func(opts *options) { + return func(opts *getterOptions) { opts.username = username opts.password = password } } func WithPassCredentialsAll(pass bool) Option { - return func(opts *options) { + return func(opts *getterOptions) { opts.passCredentialsAll = pass } } // WithUserAgent sets the request's User-Agent header to use the provided agent name. func WithUserAgent(userAgent string) Option { - return func(opts *options) { + return func(opts *getterOptions) { opts.userAgent = userAgent } } // WithInsecureSkipVerifyTLS determines if a TLS Certificate will be checked func WithInsecureSkipVerifyTLS(insecureSkipVerifyTLS bool) Option { - return func(opts *options) { + return func(opts *getterOptions) { opts.insecureSkipVerifyTLS = insecureSkipVerifyTLS } } // WithTLSClientConfig sets the client auth with the provided credentials. func WithTLSClientConfig(certFile, keyFile, caFile string) Option { - return func(opts *options) { + return func(opts *getterOptions) { opts.certFile = certFile opts.keyFile = keyFile opts.caFile = caFile @@ -106,39 +107,39 @@ func WithTLSClientConfig(certFile, keyFile, caFile string) Option { } func WithPlainHTTP(plainHTTP bool) Option { - return func(opts *options) { + return func(opts *getterOptions) { opts.plainHTTP = plainHTTP } } // WithTimeout sets the timeout for requests func WithTimeout(timeout time.Duration) Option { - return func(opts *options) { + return func(opts *getterOptions) { opts.timeout = timeout } } func WithTagName(tagname string) Option { - return func(opts *options) { + return func(opts *getterOptions) { opts.version = tagname } } func WithRegistryClient(client *registry.Client) Option { - return func(opts *options) { + return func(opts *getterOptions) { opts.registryClient = client } } func WithUntar() Option { - return func(opts *options) { + return func(opts *getterOptions) { opts.unTar = true } } // WithTransport sets the http.Transport to allow overwriting the HTTPGetter default. func WithTransport(transport *http.Transport) Option { - return func(opts *options) { + return func(opts *getterOptions) { opts.transport = transport } } @@ -217,7 +218,7 @@ func Getters(extraOpts ...Option) Providers { // notations are collected. func All(settings *cli.EnvSettings, opts ...Option) Providers { result := Getters(opts...) - pluginDownloaders, _ := collectPlugins(settings) + pluginDownloaders, _ := collectGetterPlugins(settings) result = append(result, pluginDownloaders...) return result } diff --git a/pkg/getter/httpgetter.go b/pkg/getter/httpgetter.go index 4cf528797..110f45c54 100644 --- a/pkg/getter/httpgetter.go +++ b/pkg/getter/httpgetter.go @@ -30,7 +30,7 @@ import ( // HTTPGetter is the default HTTP(/S) backend handler type HTTPGetter struct { - opts options + opts getterOptions transport *http.Transport once sync.Once } diff --git a/pkg/getter/httpgetter_test.go b/pkg/getter/httpgetter_test.go index a997c7f03..f87d71877 100644 --- a/pkg/getter/httpgetter_test.go +++ b/pkg/getter/httpgetter_test.go @@ -50,7 +50,7 @@ func TestHTTPGetter(t *testing.T) { timeout := time.Second * 5 transport := &http.Transport{} - // Test with options + // Test with getterOptions g, err = NewHTTPGetter( WithBasicAuth("I", "Am"), WithPassCredentialsAll(false), diff --git a/pkg/getter/ocigetter.go b/pkg/getter/ocigetter.go index 7e8bcfcfb..45e7263fe 100644 --- a/pkg/getter/ocigetter.go +++ b/pkg/getter/ocigetter.go @@ -33,7 +33,7 @@ import ( // OCIGetter is the default HTTP(/S) backend handler type OCIGetter struct { - opts options + opts getterOptions transport *http.Transport once sync.Once } @@ -63,6 +63,8 @@ func (g *OCIGetter) get(href string) (*bytes.Buffer, error) { if version := g.opts.version; version != "" && !strings.Contains(path.Base(ref), ":") { ref = fmt.Sprintf("%s:%s", ref, version) } + + // Default to chart behavior for backward compatibility var pullOpts []registry.PullOption requestingProv := strings.HasSuffix(ref, ".prov") if requestingProv { diff --git a/pkg/getter/ocigetter_test.go b/pkg/getter/ocigetter_test.go index e3d9278a5..ef196afcc 100644 --- a/pkg/getter/ocigetter_test.go +++ b/pkg/getter/ocigetter_test.go @@ -42,7 +42,7 @@ func TestOCIGetter(t *testing.T) { insecureSkipVerifyTLS := false plainHTTP := false - // Test with options + // Test with getterOptions g, err = NewOCIGetter( WithBasicAuth("I", "Am"), WithTLSClientConfig(pub, priv, ca), diff --git a/pkg/getter/plugingetter.go b/pkg/getter/plugingetter.go index 1893e8327..2b7669f23 100644 --- a/pkg/getter/plugingetter.go +++ b/pkg/getter/plugingetter.go @@ -17,92 +17,109 @@ package getter import ( "bytes" + "context" "fmt" - "os" - "os/exec" - "path/filepath" - "strings" + + "net/url" "helm.sh/helm/v4/internal/plugin" + + "helm.sh/helm/v4/internal/plugin/schema" "helm.sh/helm/v4/pkg/cli" ) -// collectPlugins scans for getter plugins. +// collectGetterPlugins scans for getter plugins. // This will load plugins according to the cli. -func collectPlugins(settings *cli.EnvSettings) (Providers, error) { - plugins, err := plugin.FindPlugins(settings.PluginsDirectory) +func collectGetterPlugins(settings *cli.EnvSettings) (Providers, error) { + d := plugin.Descriptor{ + Type: "getter/v1", + } + plgs, err := plugin.FindPlugins([]string{settings.PluginsDirectory}, d) if err != nil { return nil, err } - var result Providers - for _, plugin := range plugins { - for _, downloader := range plugin.Metadata.Downloaders { - result = append(result, Provider{ - Schemes: downloader.Protocols, - New: NewPluginGetter( - downloader.Command, - settings, - plugin.Metadata.Name, - plugin.Dir, - ), + pluginConstructorBuilder := func(plg plugin.Plugin) Constructor { + return func(option ...Option) (Getter, error) { + + return &getterPlugin{ + options: append([]Option{}, option...), + plg: plg, + }, nil + } + } + results := make([]Provider, 0, len(plgs)) + for _, plg := range plgs { + if c, ok := plg.Metadata().Config.(*plugin.ConfigGetter); ok { + results = append(results, Provider{ + Schemes: c.Protocols, + New: pluginConstructorBuilder(plg), }) } } - return result, nil + return results, nil } -// pluginGetter is a generic type to invoke custom downloaders, -// implemented in plugins. -type pluginGetter struct { - command string - settings *cli.EnvSettings - name string - base string - opts options +func convertOptions(globalOptions, options []Option) schema.GetterOptionsV1 { + opts := getterOptions{} + for _, opt := range globalOptions { + opt(&opts) + } + for _, opt := range options { + opt(&opts) + } + + result := schema.GetterOptionsV1{ + URL: opts.url, + CertFile: opts.certFile, + KeyFile: opts.keyFile, + CAFile: opts.caFile, + UNTar: opts.unTar, + InsecureSkipVerifyTLS: opts.insecureSkipVerifyTLS, + PlainHTTP: opts.plainHTTP, + AcceptHeader: opts.acceptHeader, + Username: opts.username, + Password: opts.password, + PassCredentialsAll: opts.passCredentialsAll, + UserAgent: opts.userAgent, + Version: opts.version, + Timeout: opts.timeout, + } + + return result } -func (p *pluginGetter) setupOptionsEnv(env []string) []string { - env = append(env, fmt.Sprintf("HELM_PLUGIN_USERNAME=%s", p.opts.username)) - env = append(env, fmt.Sprintf("HELM_PLUGIN_PASSWORD=%s", p.opts.password)) - env = append(env, fmt.Sprintf("HELM_PLUGIN_PASS_CREDENTIALS_ALL=%t", p.opts.passCredentialsAll)) - return env +type getterPlugin struct { + options []Option + plg plugin.Plugin } -// Get runs downloader plugin command -func (p *pluginGetter) Get(href string, options ...Option) (*bytes.Buffer, error) { - for _, opt := range options { - opt(&p.opts) - } - commands := strings.Split(p.command, " ") - argv := append(commands[1:], p.opts.certFile, p.opts.keyFile, p.opts.caFile, href) - prog := exec.Command(filepath.Join(p.base, commands[0]), argv...) - plugin.SetupPluginEnv(p.settings, p.name, p.base) - prog.Env = p.setupOptionsEnv(os.Environ()) - buf := bytes.NewBuffer(nil) - prog.Stdout = buf - prog.Stderr = os.Stderr - if err := prog.Run(); err != nil { - if eerr, ok := err.(*exec.ExitError); ok { - os.Stderr.Write(eerr.Stderr) - return nil, fmt.Errorf("plugin %q exited with error", p.command) - } +func (g *getterPlugin) Get(href string, options ...Option) (*bytes.Buffer, error) { + opts := convertOptions(g.options, options) + + // TODO optimization: pass this along to Get() instead of re-parsing here + u, err := url.Parse(href) + if err != nil { return nil, err } - return buf, nil -} -// NewPluginGetter constructs a valid plugin getter -func NewPluginGetter(command string, settings *cli.EnvSettings, name, base string) Constructor { - return func(options ...Option) (Getter, error) { - result := &pluginGetter{ - command: command, - settings: settings, - name: name, - base: base, - } - for _, opt := range options { - opt(&result.opts) - } - return result, nil + input := &plugin.Input{ + Message: schema.InputMessageGetterV1{ + Href: href, + Options: opts, + Protocol: u.Scheme, + }, + // TODO should we pass Stdin, Stdout, and Stderr through Input here to getter plugins? + //Stdout: os.Stdout, + } + output, err := g.plg.Invoke(context.Background(), input) + if err != nil { + return nil, fmt.Errorf("plugin %q failed to invoke: %w", g.plg, err) } + + outputMessage, ok := output.Message.(*schema.OutputMessageGetterV1) + if !ok { + return nil, fmt.Errorf("invalid output message type from plugin %q", g.plg.Metadata().Name) + } + + return bytes.NewBuffer(outputMessage.Data), nil } diff --git a/pkg/getter/plugingetter_test.go b/pkg/getter/plugingetter_test.go index 310ab9e07..e7354819b 100644 --- a/pkg/getter/plugingetter_test.go +++ b/pkg/getter/plugingetter_test.go @@ -16,9 +16,16 @@ limitations under the License. package getter import ( - "runtime" - "strings" + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "helm.sh/helm/v4/internal/plugin" + "helm.sh/helm/v4/internal/plugin/schema" "helm.sh/helm/v4/pkg/cli" ) @@ -27,7 +34,7 @@ func TestCollectPlugins(t *testing.T) { env := cli.New() env.PluginsDirectory = pluginDir - p, err := collectPlugins(env) + p, err := collectGetterPlugins(env) if err != nil { t.Fatal(err) } @@ -49,53 +56,88 @@ func TestCollectPlugins(t *testing.T) { } } -func TestPluginGetter(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("TODO: refactor this test to work on windows") +func TestConvertOptions(t *testing.T) { + opts := convertOptions( + []Option{ + WithURL("example://foo"), + WithAcceptHeader("Accept-Header"), + WithBasicAuth("username", "password"), + WithPassCredentialsAll(true), + WithUserAgent("User-agent"), + WithInsecureSkipVerifyTLS(true), + WithTLSClientConfig("certFile.pem", "keyFile.pem", "caFile.pem"), + WithPlainHTTP(true), + WithTimeout(10), + WithTagName("1.2.3"), + WithUntar(), + }, + []Option{ + WithTimeout(20), + }, + ) + + expected := schema.GetterOptionsV1{ + URL: "example://foo", + CertFile: "certFile.pem", + KeyFile: "keyFile.pem", + CAFile: "caFile.pem", + UNTar: true, + Timeout: 20, + InsecureSkipVerifyTLS: true, + PlainHTTP: true, + AcceptHeader: "Accept-Header", + Username: "username", + Password: "password", + PassCredentialsAll: true, + UserAgent: "User-agent", + Version: "1.2.3", } + assert.Equal(t, expected, opts) +} - env := cli.New() - env.PluginsDirectory = pluginDir - pg := NewPluginGetter("echo", env, "test", ".") - g, err := pg() - if err != nil { - t.Fatal(err) - } +type TestPlugin struct { + t *testing.T + dir string +} - data, err := g.Get("test://foo/bar") - if err != nil { - t.Fatal(err) - } +func (t *TestPlugin) Dir() string { + return t.dir +} - expect := "test://foo/bar" - got := strings.TrimSpace(data.String()) - if got != expect { - t.Errorf("Expected %q, got %q", expect, got) +func (t *TestPlugin) Metadata() plugin.Metadata { + return plugin.Metadata{ + Name: "fake-plugin", + Config: &plugin.ConfigGetter{}, + RuntimeConfig: &plugin.RuntimeConfigSubprocess{ + PlatformCommands: []plugin.PlatformCommand{ + { + Command: "echo fake-plugin", + }, + }, + }, } } -func TestPluginSubCommands(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("TODO: refactor this test to work on windows") +func (t *TestPlugin) Invoke(_ context.Context, _ *plugin.Input) (*plugin.Output, error) { + // Simulate a plugin invocation + output := &plugin.Output{ + Message: &schema.OutputMessageGetterV1{ + Data: []byte("fake-plugin output"), + }, } + return output, nil +} - env := cli.New() - env.PluginsDirectory = pluginDir +var _ plugin.Plugin = (*TestPlugin)(nil) - pg := NewPluginGetter("echo -n", env, "test", ".") - g, err := pg() - if err != nil { - t.Fatal(err) +func TestGetterPlugin(t *testing.T) { + gp := getterPlugin{ + options: []Option{}, + plg: &TestPlugin{t: t, dir: "fake/dir"}, } - data, err := g.Get("test://foo/bar") - if err != nil { - t.Fatal(err) - } + buf, err := gp.Get("test://example.com", WithTimeout(5*time.Second)) + require.NoError(t, err) - expect := " test://foo/bar" - got := data.String() - if got != expect { - t.Errorf("Expected %q, got %q", expect, got) - } + assert.Equal(t, "fake-plugin output", buf.String()) } diff --git a/pkg/getter/testdata/plugins/testgetter/get.sh b/pkg/getter/testdata/plugins/testgetter/get.sh deleted file mode 100755 index cdd992369..000000000 --- a/pkg/getter/testdata/plugins/testgetter/get.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -echo ENVIRONMENT -env - -echo "" -echo ARGUMENTS -echo $@ diff --git a/pkg/getter/testdata/plugins/testgetter/plugin.yaml b/pkg/getter/testdata/plugins/testgetter/plugin.yaml index d1b929e3f..625b8b462 100644 --- a/pkg/getter/testdata/plugins/testgetter/plugin.yaml +++ b/pkg/getter/testdata/plugins/testgetter/plugin.yaml @@ -1,15 +1,6 @@ name: "testgetter" version: "0.1.0" -usage: "Fetch a package from a test:// source" -description: |- - Print the environment that the plugin was given, then exit. - - This registers the test:// protocol. - -command: "$HELM_PLUGIN_DIR/get.sh" -ignoreFlags: true downloaders: -#- command: "$HELM_PLUGIN_DIR/get.sh" -- command: "echo" - protocols: - - "test" + - command: "echo" + protocols: + - "test" diff --git a/pkg/getter/testdata/plugins/testgetter2/get.sh b/pkg/getter/testdata/plugins/testgetter2/get.sh deleted file mode 100755 index cdd992369..000000000 --- a/pkg/getter/testdata/plugins/testgetter2/get.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -echo ENVIRONMENT -env - -echo "" -echo ARGUMENTS -echo $@ diff --git a/pkg/getter/testdata/plugins/testgetter2/plugin.yaml b/pkg/getter/testdata/plugins/testgetter2/plugin.yaml index f1a527ef9..4657bc9c1 100644 --- a/pkg/getter/testdata/plugins/testgetter2/plugin.yaml +++ b/pkg/getter/testdata/plugins/testgetter2/plugin.yaml @@ -1,10 +1,6 @@ name: "testgetter2" version: "0.1.0" -usage: "Fetch a different package from a test2:// source" -description: "Handle test2 scheme" -command: "$HELM_PLUGIN_DIR/get.sh" -ignoreFlags: true downloaders: -- command: "echo" - protocols: - - "test2" + - command: "echo" + protocols: + - "test2" From a7578fec748de1d747aa220cd13e7237670d5c84 Mon Sep 17 00:00:00 2001 From: Scott Rigby Date: Sun, 17 Aug 2025 18:18:05 -0400 Subject: [PATCH 50/88] Plugin types and plugin apiVersion v1 Co-authored-by: George Jenkins Signed-off-by: Scott Rigby --- internal/plugin/config.go | 16 +++++ internal/plugin/doc.go | 4 +- .../plugin/installer/local_installer_test.go | 6 +- .../plugin/installer/vcs_installer_test.go | 2 +- internal/plugin/loader.go | 25 +++++++ internal/plugin/loader_test.go | 48 ++++++++++++- internal/plugin/metadata.go | 63 ++++++++++++++++- internal/plugin/metadata_v1.go | 67 +++++++++++++++++++ internal/plugin/plugin_test.go | 2 +- internal/plugin/runtime.go | 16 +++++ internal/plugin/subprocess_commands_test.go | 2 - .../bad/duplicate-entries-v1/plugin.yaml | 16 +++++ .../testdata/plugdir/good/echo-v1/plugin.yaml | 15 +++++ .../testdata/plugdir/good/getter/plugin.yaml | 16 +++++ .../testdata/plugdir/good/hello-v1/hello.ps1 | 3 + .../testdata/plugdir/good/hello-v1/hello.sh | 9 +++ .../plugdir/good/hello-v1/plugin.yaml | 32 +++++++++ pkg/cmd/flags.go | 1 - .../helm/plugins/fullenv/plugin.yaml | 12 +++- .../helmhome/helm/plugins/args/plugin.yaml | 12 +++- .../helmhome/helm/plugins/echo/plugin.yaml | 12 +++- .../helmhome/helm/plugins/env/plugin.yaml | 12 +++- .../helm/plugins/exitwith/plugin.yaml | 12 +++- .../helmhome/helm/plugins/fullenv/plugin.yaml | 12 +++- pkg/getter/plugingetter_test.go | 6 +- .../testdata/plugins/testgetter/plugin.yaml | 15 +++-- .../testdata/plugins/testgetter2/plugin.yaml | 15 +++-- 27 files changed, 411 insertions(+), 40 deletions(-) create mode 100644 internal/plugin/metadata_v1.go create mode 100644 internal/plugin/testdata/plugdir/bad/duplicate-entries-v1/plugin.yaml create mode 100644 internal/plugin/testdata/plugdir/good/echo-v1/plugin.yaml create mode 100644 internal/plugin/testdata/plugdir/good/getter/plugin.yaml create mode 100644 internal/plugin/testdata/plugdir/good/hello-v1/hello.ps1 create mode 100755 internal/plugin/testdata/plugdir/good/hello-v1/hello.sh create mode 100644 internal/plugin/testdata/plugdir/good/hello-v1/plugin.yaml diff --git a/internal/plugin/config.go b/internal/plugin/config.go index f308e7ae9..812dba7f6 100644 --- a/internal/plugin/config.go +++ b/internal/plugin/config.go @@ -17,6 +17,8 @@ package plugin import ( "fmt" + + "go.yaml.in/yaml/v3" ) // Config interface defines the methods that all plugin type configurations must implement @@ -64,3 +66,17 @@ func (c *ConfigGetter) Validate() error { } return nil } + +func remarshalConfig[T Config](configData map[string]any) (Config, error) { + data, err := yaml.Marshal(configData) + if err != nil { + return nil, err + } + + var config T + if err := yaml.Unmarshal(data, &config); err != nil { + return nil, err + } + + return config, nil +} diff --git a/internal/plugin/doc.go b/internal/plugin/doc.go index f150358bd..39ba6300b 100644 --- a/internal/plugin/doc.go +++ b/internal/plugin/doc.go @@ -55,7 +55,7 @@ Helm plugins are exposed to uses as the "Plugin" type, the basic interface that Internally, plugins must be implemented by a "runtime" that is responsible for creating the plugin instance, and dispatching the plugin's invocation to the plugin's implementation. For example: - forming environment variables and command line args for subprocess execution -- converting input to JSON and invoking a function in a future runtime (eg, Wasm) +- converting input to JSON and invoking a function in a Wasm runtime Internally, the code structure is: Runtime.CreatePlugin() @@ -78,7 +78,7 @@ Each plugin must have a `plugin.yaml`, that defines the plugin's metadata. The m For legacy plugins, the type is inferred by which fields are set on the plugin: a downloader plugin is inferred when metadata contains a "downloaders" yaml node, otherwise it is assumed to define a Helm CLI subcommand. -For future plugin api versions, the metadata will include explicit apiVersion and type fields. It will also contain type and runtime specific Config and RuntimeConfig fields. +For v1 plugins, the metadata includes explicit apiVersion and type fields. It will also contain type-specific Config, and RuntimeConfig fields. # Runtime and type cardinality From a cardinality perspective, this means there a "few" runtimes, and "many" plugins types. It is also expected that the subprocess runtime will not be extended to support extra plugin types, and deprecated in a future version of Helm. diff --git a/internal/plugin/installer/local_installer_test.go b/internal/plugin/installer/local_installer_test.go index 3b1c0f680..fdb669314 100644 --- a/internal/plugin/installer/local_installer_test.go +++ b/internal/plugin/installer/local_installer_test.go @@ -34,7 +34,7 @@ func TestLocalInstaller(t *testing.T) { t.Fatal(err) } - source := "../testdata/plugdir/good/echo-legacy" + source := "../testdata/plugdir/good/echo-v1" i, err := NewForSource(source, "") if err != nil { t.Fatalf("unexpected error: %s", err) @@ -44,14 +44,14 @@ func TestLocalInstaller(t *testing.T) { t.Fatal(err) } - if i.Path() != helmpath.DataPath("plugins", "echo-legacy") { + if i.Path() != helmpath.DataPath("plugins", "echo-v1") { t.Fatalf("expected path '$XDG_CONFIG_HOME/helm/plugins/helm-env', got %q", i.Path()) } defer os.RemoveAll(filepath.Dir(helmpath.DataPath())) // helmpath.DataPath is like /tmp/helm013130971/helm } func TestLocalInstallerNotAFolder(t *testing.T) { - source := "../testdata/plugdir/good/echo-legacy/plugin.yaml" + source := "../testdata/plugdir/good/echo-v1/plugin.yaml" i, err := NewForSource(source, "") if err != nil { t.Fatalf("unexpected error: %s", err) diff --git a/internal/plugin/installer/vcs_installer_test.go b/internal/plugin/installer/vcs_installer_test.go index 9c65d244c..f024b4b40 100644 --- a/internal/plugin/installer/vcs_installer_test.go +++ b/internal/plugin/installer/vcs_installer_test.go @@ -57,7 +57,7 @@ func TestVCSInstaller(t *testing.T) { } source := "https://github.com/adamreese/helm-env" - testRepoPath, _ := filepath.Abs("../testdata/plugdir/good/echo-legacy") + testRepoPath, _ := filepath.Abs("../testdata/plugdir/good/echo-v1") repo := &testRepo{ local: testRepoPath, tags: []string{"0.1.0", "0.1.1"}, diff --git a/internal/plugin/loader.go b/internal/plugin/loader.go index b47b15d34..eb05cb722 100644 --- a/internal/plugin/loader.go +++ b/internal/plugin/loader.go @@ -58,6 +58,29 @@ func loadMetadataLegacy(metadataData []byte) (*Metadata, error) { return m, nil } +func loadMetadataV1(metadataData []byte) (*Metadata, error) { + + var mv1 MetadataV1 + d := yaml.NewDecoder(bytes.NewReader(metadataData)) + if err := d.Decode(&mv1); err != nil { + return nil, err + } + + if err := mv1.Validate(); err != nil { + return nil, err + } + + m, err := fromMetadataV1(mv1) + if err != nil { + return nil, fmt.Errorf("failed to convert MetadataV1 to Metadata: %w", err) + } + + if err := m.Validate(); err != nil { + return nil, err + } + return m, nil +} + func loadMetadata(metadataData []byte) (*Metadata, error) { apiVersion, err := peekAPIVersion(bytes.NewReader(metadataData)) if err != nil { @@ -67,6 +90,8 @@ func loadMetadata(metadataData []byte) (*Metadata, error) { switch apiVersion { case "": // legacy return loadMetadataLegacy(metadataData) + case "v1": + return loadMetadataV1(metadataData) } return nil, fmt.Errorf("invalid plugin apiVersion: %q", apiVersion) diff --git a/internal/plugin/loader_test.go b/internal/plugin/loader_test.go index b80d6a096..81ef26e02 100644 --- a/internal/plugin/loader_test.go +++ b/internal/plugin/loader_test.go @@ -29,6 +29,13 @@ func TestPeekAPIVersion(t *testing.T) { data []byte expected string }{ + "v1": { + data: []byte(`--- +apiVersion: v1 +name: "test-plugin" +`), + expected: "v1", + }, "legacy": { // No apiVersion field data: []byte(`--- name: "test-plugin" @@ -97,6 +104,11 @@ func TestLoadDir(t *testing.T) { apiVersion: "legacy", expect: makeMetadata("legacy"), }, + "v1": { + dirname: "testdata/plugdir/good/hello-v1", + apiVersion: "v1", + expect: makeMetadata("v1"), + }, } for name, tc := range testCases { @@ -113,6 +125,7 @@ func TestLoadDir(t *testing.T) { func TestLoadDirDuplicateEntries(t *testing.T) { testCases := map[string]string{ "legacy": "testdata/plugdir/bad/duplicate-entries-legacy", + "v1": "testdata/plugdir/bad/duplicate-entries-v1", } for name, dirname := range testCases { t.Run(name, func(t *testing.T) { @@ -122,6 +135,34 @@ func TestLoadDirDuplicateEntries(t *testing.T) { } } +func TestLoadDirGetter(t *testing.T) { + dirname := "testdata/plugdir/good/getter" + + expect := Metadata{ + Name: "getter", + Version: "1.2.3", + Type: "getter/v1", + APIVersion: "v1", + Runtime: "subprocess", + Config: &ConfigGetter{ + Protocols: []string{"myprotocol", "myprotocols"}, + }, + RuntimeConfig: &RuntimeConfigSubprocess{ + ProtocolCommands: []SubprocessProtocolCommand{ + { + Protocols: []string{"myprotocol", "myprotocols"}, + Command: "echo getter", + }, + }, + }, + } + + plug, err := LoadDir(dirname) + require.NoError(t, err) + assert.Equal(t, dirname, plug.Dir()) + assert.Equal(t, expect, plug.Metadata()) +} + func TestDetectDuplicates(t *testing.T) { plugs := []Plugin{ mockSubprocessCLIPlugin(t, "foo"), @@ -154,10 +195,13 @@ func TestLoadAll(t *testing.T) { plugsMap[p.Metadata().Name] = p } - assert.Len(t, plugsMap, 3) + assert.Len(t, plugsMap, 6) assert.Contains(t, plugsMap, "downloader") assert.Contains(t, plugsMap, "echo-legacy") + assert.Contains(t, plugsMap, "echo-v1") + assert.Contains(t, plugsMap, "getter") assert.Contains(t, plugsMap, "hello-legacy") + assert.Contains(t, plugsMap, "hello-v1") } func TestFindPlugins(t *testing.T) { @@ -184,7 +228,7 @@ func TestFindPlugins(t *testing.T) { { name: "normal", plugdirs: "./testdata/plugdir/good", - expected: 3, + expected: 6, }, } for _, c := range cases { diff --git a/internal/plugin/metadata.go b/internal/plugin/metadata.go index b899ef336..48741474e 100644 --- a/internal/plugin/metadata.go +++ b/internal/plugin/metadata.go @@ -20,7 +20,7 @@ import ( "fmt" ) -// Metadata of a plugin, converted from the "on-disk" plugin.yaml +// Metadata of a plugin, converted from the "on-disk" legacy or v1 plugin.yaml // Specifically, Config and RuntimeConfig are converted to their respective types based on the plugin type and runtime type Metadata struct { // APIVersion specifies the plugin API version @@ -153,3 +153,64 @@ func buildLegacyRuntimeConfig(m MetadataLegacy) RuntimeConfig { ProtocolCommands: protocolCommands, } } + +func fromMetadataV1(mv1 MetadataV1) (*Metadata, error) { + + config, err := convertMetadataConfig(mv1.Type, mv1.Config) + if err != nil { + return nil, err + } + + runtimeConfig, err := convertMetdataRuntimeConfig(mv1.Runtime, mv1.RuntimeConfig) + if err != nil { + return nil, err + } + + return &Metadata{ + APIVersion: mv1.APIVersion, + Name: mv1.Name, + Type: mv1.Type, + Runtime: mv1.Runtime, + Version: mv1.Version, + SourceURL: mv1.SourceURL, + Config: config, + RuntimeConfig: runtimeConfig, + }, nil +} + +func convertMetadataConfig(pluginType string, configRaw map[string]any) (Config, error) { + var err error + var config Config + + switch pluginType { + case "cli/v1": + config, err = remarshalConfig[*ConfigCLI](configRaw) + case "getter/v1": + config, err = remarshalConfig[*ConfigGetter](configRaw) + default: + return nil, fmt.Errorf("unsupported plugin type: %s", pluginType) + } + + if err != nil { + return nil, fmt.Errorf("failed to unmarshal config for %s plugin type: %w", pluginType, err) + } + + return config, nil +} + +func convertMetdataRuntimeConfig(runtimeType string, runtimeConfigRaw map[string]any) (RuntimeConfig, error) { + var runtimeConfig RuntimeConfig + var err error + + switch runtimeType { + case "subprocess": + runtimeConfig, err = remarshalRuntimeConfig[*RuntimeConfigSubprocess](runtimeConfigRaw) + default: + return nil, fmt.Errorf("unsupported plugin runtime type: %q", runtimeType) + } + + if err != nil { + return nil, fmt.Errorf("failed to unmarshal runtimeConfig for %s runtime: %w", runtimeType, err) + } + return runtimeConfig, nil +} diff --git a/internal/plugin/metadata_v1.go b/internal/plugin/metadata_v1.go new file mode 100644 index 000000000..654aa8900 --- /dev/null +++ b/internal/plugin/metadata_v1.go @@ -0,0 +1,67 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugin + +import ( + "fmt" +) + +// MetadataV1 is the APIVersion V1 plugin.yaml format +type MetadataV1 struct { + // APIVersion specifies the plugin API version + APIVersion string `yaml:"apiVersion"` + + // Name is the name of the plugin + Name string `yaml:"name"` + + // Type of plugin (eg, cli/v1, getter/v1) + Type string `yaml:"type"` + + // Runtime specifies the runtime type (subprocess, wasm) + Runtime string `yaml:"runtime"` + + // Version is a SemVer 2 version of the plugin. + Version string `yaml:"version"` + + // SourceURL is the URL where this plugin can be found + SourceURL string `yaml:"sourceURL,omitempty"` + + // Config contains the type-specific configuration for this plugin + Config map[string]any `yaml:"config"` + + // RuntimeConfig contains the runtime-specific configuration + RuntimeConfig map[string]any `yaml:"runtimeConfig"` +} + +func (m *MetadataV1) Validate() error { + if !validPluginName.MatchString(m.Name) { + return fmt.Errorf("invalid plugin `name`") + } + + if m.APIVersion != "v1" { + return fmt.Errorf("invalid `apiVersion`: %q", m.APIVersion) + } + + if m.Type == "" { + return fmt.Errorf("`type` missing") + } + + if m.Runtime == "" { + return fmt.Errorf("`runtime` missing") + } + + return nil +} diff --git a/internal/plugin/plugin_test.go b/internal/plugin/plugin_test.go index 3c78006b7..fbebecac4 100644 --- a/internal/plugin/plugin_test.go +++ b/internal/plugin/plugin_test.go @@ -42,7 +42,7 @@ func mockSubprocessCLIPlugin(t *testing.T, pluginName string) *SubprocessPluginR Name: pluginName, Version: "v0.1.2", Type: "cli/v1", - APIVersion: "legacy", + APIVersion: "v1", Runtime: "subprocess", Config: &ConfigCLI{ Usage: "Mock plugin", diff --git a/internal/plugin/runtime.go b/internal/plugin/runtime.go index 87f068724..8add92dea 100644 --- a/internal/plugin/runtime.go +++ b/internal/plugin/runtime.go @@ -15,6 +15,8 @@ limitations under the License. package plugin +import "go.yaml.in/yaml/v3" + // Runtime represents a plugin runtime (subprocess, extism, etc) ie. how a plugin should be executed // Runtime is responsible for instantiating plugins that implement the runtime // TODO: could call this something more like "PluginRuntimeCreator"? @@ -31,3 +33,17 @@ type Runtime interface { type RuntimeConfig interface { Validate() error } + +func remarshalRuntimeConfig[T RuntimeConfig](runtimeData map[string]any) (RuntimeConfig, error) { + data, err := yaml.Marshal(runtimeData) + if err != nil { + return nil, err + } + + var config T + if err := yaml.Unmarshal(data, &config); err != nil { + return nil, err + } + + return config, nil +} diff --git a/internal/plugin/subprocess_commands_test.go b/internal/plugin/subprocess_commands_test.go index 3879a4bd0..3cb9325ab 100644 --- a/internal/plugin/subprocess_commands_test.go +++ b/internal/plugin/subprocess_commands_test.go @@ -86,8 +86,6 @@ func TestPrepareCommandExtraArgs(t *testing.T) { for name, tc := range testCases { t.Run(name, func(t *testing.T) { - //expectedArgs := append(cmdArgs, extraArgs...) - // extra args are expected when ignoreFlags is unset or false testExtraArgs := extraArgs if tc.ignoreFlags { diff --git a/internal/plugin/testdata/plugdir/bad/duplicate-entries-v1/plugin.yaml b/internal/plugin/testdata/plugdir/bad/duplicate-entries-v1/plugin.yaml new file mode 100644 index 000000000..030ae6aca --- /dev/null +++ b/internal/plugin/testdata/plugdir/bad/duplicate-entries-v1/plugin.yaml @@ -0,0 +1,16 @@ +name: "duplicate-entries" +version: "0.1.0" +type: cli/v1 +apiVersion: v1 +runtime: subprocess +config: + shortHelp: "test duplicate entries" + longHelp: |- + description + ignoreFlags: true +runtimeConfig: + command: "echo hello" + hooks: + install: "echo installing..." + hooks: + install: "echo installing something different" diff --git a/internal/plugin/testdata/plugdir/good/echo-v1/plugin.yaml b/internal/plugin/testdata/plugdir/good/echo-v1/plugin.yaml new file mode 100644 index 000000000..8bbef9c0f --- /dev/null +++ b/internal/plugin/testdata/plugdir/good/echo-v1/plugin.yaml @@ -0,0 +1,15 @@ +--- +name: "echo-v1" +version: "1.2.3" +type: cli/v1 +apiVersion: v1 +runtime: subprocess +config: + shortHelp: "echo something" + longHelp: |- + This is a testing fixture. + ignoreFlags: false +runtimeConfig: + command: "echo Hello" + hooks: + install: "echo Installing" diff --git a/internal/plugin/testdata/plugdir/good/getter/plugin.yaml b/internal/plugin/testdata/plugdir/good/getter/plugin.yaml new file mode 100644 index 000000000..cfe80fbdc --- /dev/null +++ b/internal/plugin/testdata/plugdir/good/getter/plugin.yaml @@ -0,0 +1,16 @@ +--- +name: "getter" +version: "1.2.3" +type: getter/v1 +apiVersion: v1 +runtime: subprocess +config: + protocols: + - "myprotocol" + - "myprotocols" +runtimeConfig: + protocolCommands: + - command: "echo getter" + protocols: + - "myprotocol" + - "myprotocols" diff --git a/internal/plugin/testdata/plugdir/good/hello-v1/hello.ps1 b/internal/plugin/testdata/plugdir/good/hello-v1/hello.ps1 new file mode 100644 index 000000000..bee61f27d --- /dev/null +++ b/internal/plugin/testdata/plugdir/good/hello-v1/hello.ps1 @@ -0,0 +1,3 @@ +#!/usr/bin/env pwsh + +Write-Host "Hello, world!" diff --git a/internal/plugin/testdata/plugdir/good/hello-v1/hello.sh b/internal/plugin/testdata/plugdir/good/hello-v1/hello.sh new file mode 100755 index 000000000..dcfd58876 --- /dev/null +++ b/internal/plugin/testdata/plugdir/good/hello-v1/hello.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +echo "Hello from a Helm plugin" + +echo "PARAMS" +echo $* + +$HELM_BIN ls --all + diff --git a/internal/plugin/testdata/plugdir/good/hello-v1/plugin.yaml b/internal/plugin/testdata/plugdir/good/hello-v1/plugin.yaml new file mode 100644 index 000000000..044a3476d --- /dev/null +++ b/internal/plugin/testdata/plugdir/good/hello-v1/plugin.yaml @@ -0,0 +1,32 @@ +--- +name: "hello-v1" +version: "0.1.0" +type: cli/v1 +apiVersion: v1 +runtime: subprocess +config: + usage: hello [params]... + shortHelp: "echo hello message" + longHelp: |- + description + ignoreFlags: true +runtimeConfig: + platformCommand: + - os: linux + arch: + command: "sh" + args: ["-c", "${HELM_PLUGIN_DIR}/hello.sh"] + - os: windows + arch: + command: "pwsh" + args: ["-c", "${HELM_PLUGIN_DIR}/hello.ps1"] + platformHooks: + install: + - os: linux + arch: "" + command: "sh" + args: ["-c", 'echo "installing..."'] + - os: windows + arch: "" + command: "pwsh" + args: ["-c", 'echo "installing..."'] diff --git a/pkg/cmd/flags.go b/pkg/cmd/flags.go index 420631264..d11073e5f 100644 --- a/pkg/cmd/flags.go +++ b/pkg/cmd/flags.go @@ -164,7 +164,6 @@ func (o *outputValue) Set(s string) error { return nil } -// TODO there is probably a better way to pass cobra settings than as a param func bindPostRenderFlag(cmd *cobra.Command, varRef *postrender.PostRenderer) { p := &postRendererOptions{varRef, "", []string{}} cmd.Flags().Var(&postRendererString{p}, postRenderFlag, "the path to an executable to be used for post rendering. If it exists in $PATH, the binary will be used, otherwise it will try to look for the executable at the given path") diff --git a/pkg/cmd/testdata/helm home with space/helm/plugins/fullenv/plugin.yaml b/pkg/cmd/testdata/helm home with space/helm/plugins/fullenv/plugin.yaml index 63f2f12db..8b874da1d 100644 --- a/pkg/cmd/testdata/helm home with space/helm/plugins/fullenv/plugin.yaml +++ b/pkg/cmd/testdata/helm home with space/helm/plugins/fullenv/plugin.yaml @@ -1,4 +1,10 @@ name: fullenv -usage: "show env vars" -description: "show all env vars" -command: "$HELM_PLUGIN_DIR/fullenv.sh" +type: cli/v1 +apiVersion: v1 +runtime: subprocess +config: + shortHelp: "show env vars" + longHelp: "show all env vars" + ignoreFlags: false +runtimeConfig: + command: "$HELM_PLUGIN_DIR/fullenv.sh" diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/args/plugin.yaml b/pkg/cmd/testdata/helmhome/helm/plugins/args/plugin.yaml index 21e28a7c2..57312cbfa 100644 --- a/pkg/cmd/testdata/helmhome/helm/plugins/args/plugin.yaml +++ b/pkg/cmd/testdata/helmhome/helm/plugins/args/plugin.yaml @@ -1,4 +1,10 @@ name: args -usage: "echo args" -description: "This echos args" -command: "$HELM_PLUGIN_DIR/args.sh" +type: cli/v1 +apiVersion: v1 +runtime: subprocess +config: + shortHelp: "echo args" + longHelp: "This echos args" + ignoreFlags: false +runtimeConfig: + command: "$HELM_PLUGIN_DIR/args.sh" diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/echo/plugin.yaml b/pkg/cmd/testdata/helmhome/helm/plugins/echo/plugin.yaml index 7b9362a08..544efa85e 100644 --- a/pkg/cmd/testdata/helmhome/helm/plugins/echo/plugin.yaml +++ b/pkg/cmd/testdata/helmhome/helm/plugins/echo/plugin.yaml @@ -1,4 +1,10 @@ name: echo -usage: "echo stuff" -description: "This echos stuff" -command: "echo hello" +type: cli/v1 +apiVersion: v1 +runtime: subprocess +config: + shortHelp: "echo stuff" + longHelp: "This echos stuff" + ignoreFlags: false +runtimeConfig: + command: "echo hello" diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/env/plugin.yaml b/pkg/cmd/testdata/helmhome/helm/plugins/env/plugin.yaml index 52cb7a848..d7a4c229c 100644 --- a/pkg/cmd/testdata/helmhome/helm/plugins/env/plugin.yaml +++ b/pkg/cmd/testdata/helmhome/helm/plugins/env/plugin.yaml @@ -1,4 +1,10 @@ name: env -usage: "env stuff" -description: "show the env" -command: "echo $HELM_PLUGIN_NAME" +type: cli/v1 +apiVersion: v1 +runtime: subprocess +config: + shortHelp: "env stuff" + longHelp: "show the env" + ignoreFlags: false +runtimeConfig: + command: "echo $HELM_PLUGIN_NAME" diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/exitwith/plugin.yaml b/pkg/cmd/testdata/helmhome/helm/plugins/exitwith/plugin.yaml index 5691d1712..06a350f83 100644 --- a/pkg/cmd/testdata/helmhome/helm/plugins/exitwith/plugin.yaml +++ b/pkg/cmd/testdata/helmhome/helm/plugins/exitwith/plugin.yaml @@ -1,4 +1,10 @@ name: exitwith -usage: "exitwith code" -description: "This exits with the specified exit code" -command: "$HELM_PLUGIN_DIR/exitwith.sh" +type: cli/v1 +apiVersion: v1 +runtime: subprocess +config: + shortHelp: "exitwith code" + longHelp: "This exits with the specified exit code" + ignoreFlags: false +runtimeConfig: + command: "$HELM_PLUGIN_DIR/exitwith.sh" diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/fullenv/plugin.yaml b/pkg/cmd/testdata/helmhome/helm/plugins/fullenv/plugin.yaml index 63f2f12db..8b874da1d 100644 --- a/pkg/cmd/testdata/helmhome/helm/plugins/fullenv/plugin.yaml +++ b/pkg/cmd/testdata/helmhome/helm/plugins/fullenv/plugin.yaml @@ -1,4 +1,10 @@ name: fullenv -usage: "show env vars" -description: "show all env vars" -command: "$HELM_PLUGIN_DIR/fullenv.sh" +type: cli/v1 +apiVersion: v1 +runtime: subprocess +config: + shortHelp: "show env vars" + longHelp: "show all env vars" + ignoreFlags: false +runtimeConfig: + command: "$HELM_PLUGIN_DIR/fullenv.sh" diff --git a/pkg/getter/plugingetter_test.go b/pkg/getter/plugingetter_test.go index e7354819b..85c847752 100644 --- a/pkg/getter/plugingetter_test.go +++ b/pkg/getter/plugingetter_test.go @@ -106,7 +106,11 @@ func (t *TestPlugin) Dir() string { func (t *TestPlugin) Metadata() plugin.Metadata { return plugin.Metadata{ - Name: "fake-plugin", + Name: "fake-plugin", + Type: "cli/v1", + APIVersion: "v1", + Runtime: "subprocess", + // TODO: either change Config to plugin.ConfigCLI, or change APIVersion to getter/v1? Config: &plugin.ConfigGetter{}, RuntimeConfig: &plugin.RuntimeConfigSubprocess{ PlatformCommands: []plugin.PlatformCommand{ diff --git a/pkg/getter/testdata/plugins/testgetter/plugin.yaml b/pkg/getter/testdata/plugins/testgetter/plugin.yaml index 625b8b462..ca11b95ea 100644 --- a/pkg/getter/testdata/plugins/testgetter/plugin.yaml +++ b/pkg/getter/testdata/plugins/testgetter/plugin.yaml @@ -1,6 +1,13 @@ name: "testgetter" version: "0.1.0" -downloaders: - - command: "echo" - protocols: - - "test" +type: getter/v1 +apiVersion: v1 +runtime: subprocess +config: + protocols: + - "test" +runtimeConfig: + protocolCommands: + - command: "echo" + protocols: + - "test" diff --git a/pkg/getter/testdata/plugins/testgetter2/plugin.yaml b/pkg/getter/testdata/plugins/testgetter2/plugin.yaml index 4657bc9c1..1c944a7c7 100644 --- a/pkg/getter/testdata/plugins/testgetter2/plugin.yaml +++ b/pkg/getter/testdata/plugins/testgetter2/plugin.yaml @@ -1,6 +1,13 @@ name: "testgetter2" version: "0.1.0" -downloaders: - - command: "echo" - protocols: - - "test2" +type: getter/v1 +apiVersion: v1 +runtime: subprocess +config: + protocols: + - "test2" +runtimeConfig: + protocolCommands: + - command: "echo" + protocols: + - "test2" From 533eddc57d4727f1422d6e8a3e5d8fa6fbf8697e Mon Sep 17 00:00:00 2001 From: Matt Farina Date: Fri, 22 Aug 2025 15:41:47 -0400 Subject: [PATCH 51/88] Add content cache to helm env Signed-off-by: Matt Farina --- pkg/cli/environment.go | 1 + pkg/cmd/testdata/output/env-comp.txt | 1 + 2 files changed, 2 insertions(+) diff --git a/pkg/cli/environment.go b/pkg/cli/environment.go index 19563cba3..106d24336 100644 --- a/pkg/cli/environment.go +++ b/pkg/cli/environment.go @@ -249,6 +249,7 @@ func (s *EnvSettings) EnvVars() map[string]string { "HELM_PLUGINS": s.PluginsDirectory, "HELM_REGISTRY_CONFIG": s.RegistryConfig, "HELM_REPOSITORY_CACHE": s.RepositoryCache, + "HELM_CONTENT_CACHE": s.ContentCache, "HELM_REPOSITORY_CONFIG": s.RepositoryConfig, "HELM_NAMESPACE": s.Namespace(), "HELM_MAX_HISTORY": strconv.Itoa(s.MaxHistory), diff --git a/pkg/cmd/testdata/output/env-comp.txt b/pkg/cmd/testdata/output/env-comp.txt index 8f9c53fc7..9d38ee464 100644 --- a/pkg/cmd/testdata/output/env-comp.txt +++ b/pkg/cmd/testdata/output/env-comp.txt @@ -2,6 +2,7 @@ HELM_BIN HELM_BURST_LIMIT HELM_CACHE_HOME HELM_CONFIG_HOME +HELM_CONTENT_CACHE HELM_DATA_HOME HELM_DEBUG HELM_KUBEAPISERVER From 7d22bb25faea807a4d2162e1a5c7f61ea3877f8b Mon Sep 17 00:00:00 2001 From: Scott Rigby Date: Thu, 21 Aug 2025 03:18:32 -0400 Subject: [PATCH 52/88] Plugin OCI installer Signed-off-by: Scott Rigby --- internal/plugin/installer/oci_installer.go | 229 +++++ .../plugin/installer/oci_installer_test.go | 814 ++++++++++++++++++ pkg/cmd/plugin_install.go | 40 +- pkg/cmd/plugin_uninstall.go | 31 + pkg/cmd/plugin_uninstall_test.go | 146 ++++ pkg/getter/getter.go | 8 + pkg/getter/ocigetter.go | 29 + pkg/getter/plugingetter_test.go | 3 +- pkg/registry/client.go | 144 ++-- pkg/registry/generic.go | 162 ++++ pkg/registry/plugin.go | 176 ++++ 11 files changed, 1705 insertions(+), 77 deletions(-) create mode 100644 internal/plugin/installer/oci_installer.go create mode 100644 internal/plugin/installer/oci_installer_test.go create mode 100644 pkg/cmd/plugin_uninstall_test.go create mode 100644 pkg/registry/generic.go create mode 100644 pkg/registry/plugin.go diff --git a/internal/plugin/installer/oci_installer.go b/internal/plugin/installer/oci_installer.go new file mode 100644 index 000000000..acb28ccf9 --- /dev/null +++ b/internal/plugin/installer/oci_installer.go @@ -0,0 +1,229 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package installer + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "fmt" + "io" + "log/slog" + "os" + "path/filepath" + "strings" + + "helm.sh/helm/v4/internal/plugin/cache" + "helm.sh/helm/v4/internal/third_party/dep/fs" + "helm.sh/helm/v4/pkg/cli" + "helm.sh/helm/v4/pkg/getter" + "helm.sh/helm/v4/pkg/helmpath" + "helm.sh/helm/v4/pkg/registry" +) + +// OCIInstaller installs plugins from OCI registries +type OCIInstaller struct { + CacheDir string + PluginName string + base + settings *cli.EnvSettings + getter getter.Getter +} + +// NewOCIInstaller creates a new OCIInstaller with optional getter options +func NewOCIInstaller(source string, options ...getter.Option) (*OCIInstaller, error) { + ref := strings.TrimPrefix(source, fmt.Sprintf("%s://", registry.OCIScheme)) + + // Extract plugin name from OCI reference + // e.g., "ghcr.io/user/plugin-name:v1.0.0" -> "plugin-name" + parts := strings.Split(ref, "/") + if len(parts) < 2 { + return nil, fmt.Errorf("invalid OCI reference: %s", source) + } + lastPart := parts[len(parts)-1] + pluginName := lastPart + if idx := strings.LastIndex(lastPart, ":"); idx > 0 { + pluginName = lastPart[:idx] + } + if idx := strings.LastIndex(lastPart, "@"); idx > 0 { + pluginName = lastPart[:idx] + } + + key, err := cache.Key(source) + if err != nil { + return nil, err + } + + settings := cli.New() + + // Always add plugin artifact type and any provided options + pluginOptions := append([]getter.Option{getter.WithArtifactType("plugin")}, options...) + getterProvider, err := getter.NewOCIGetter(pluginOptions...) + if err != nil { + return nil, err + } + + i := &OCIInstaller{ + CacheDir: helmpath.CachePath("plugins", key), + PluginName: pluginName, + base: newBase(source), + settings: settings, + getter: getterProvider, + } + return i, nil +} + +// Install downloads and installs a plugin from OCI registry +// Implements Installer. +func (i *OCIInstaller) Install() error { + slog.Debug("pulling OCI plugin", "source", i.Source) + + // Use getter to download the plugin + pluginData, err := i.getter.Get(i.Source) + if err != nil { + return fmt.Errorf("failed to pull plugin from %s: %w", i.Source, err) + } + + // Create cache directory + if err := os.MkdirAll(i.CacheDir, 0755); err != nil { + return fmt.Errorf("failed to create cache directory: %w", err) + } + + // Check if this is a gzip compressed file + pluginBytes := pluginData.Bytes() + if len(pluginBytes) < 2 || pluginBytes[0] != 0x1f || pluginBytes[1] != 0x8b { + return fmt.Errorf("plugin data is not a gzip compressed archive") + } + + // Extract as gzipped tar + if err := extractTarGz(bytes.NewReader(pluginBytes), i.CacheDir); err != nil { + return fmt.Errorf("failed to extract plugin: %w", err) + } + + // Verify plugin.yaml exists - check root and subdirectories + pluginDir := i.CacheDir + if !isPlugin(pluginDir) { + // Check if plugin.yaml is in a subdirectory + entries, err := os.ReadDir(i.CacheDir) + if err != nil { + return err + } + + foundPluginDir := "" + for _, entry := range entries { + if entry.IsDir() { + subDir := filepath.Join(i.CacheDir, entry.Name()) + if isPlugin(subDir) { + foundPluginDir = subDir + break + } + } + } + + if foundPluginDir == "" { + return ErrMissingMetadata + } + + // Use the subdirectory as the plugin directory + pluginDir = foundPluginDir + } + + // Copy from cache to final destination + src, err := filepath.Abs(pluginDir) + if err != nil { + return err + } + + slog.Debug("copying", "source", src, "path", i.Path()) + return fs.CopyDir(src, i.Path()) +} + +// Update updates a plugin by reinstalling it +func (i *OCIInstaller) Update() error { + // For OCI, update means removing the old version and installing the new one + if err := os.RemoveAll(i.Path()); err != nil { + return err + } + return i.Install() +} + +// Path is where the plugin will be installed +func (i OCIInstaller) Path() string { + if i.Source == "" { + return "" + } + return filepath.Join(i.settings.PluginsDirectory, i.PluginName) +} + +// extractTarGz extracts a gzipped tar archive to a directory +func extractTarGz(r io.Reader, targetDir string) error { + gzr, err := gzip.NewReader(r) + if err != nil { + return err + } + defer gzr.Close() + + return extractTar(gzr, targetDir) +} + +// extractTar extracts a tar archive to a directory +func extractTar(r io.Reader, targetDir string) error { + tarReader := tar.NewReader(r) + + for { + header, err := tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + + path, err := cleanJoin(targetDir, header.Name) + if err != nil { + return err + } + + switch header.Typeflag { + case tar.TypeDir: + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + case tar.TypeReg: + dir := filepath.Dir(path) + if err := os.MkdirAll(dir, 0755); err != nil { + return err + } + + outFile, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode)) + if err != nil { + return err + } + if _, err := io.Copy(outFile, tarReader); err != nil { + outFile.Close() + return err + } + outFile.Close() + case tar.TypeXGlobalHeader, tar.TypeXHeader: + // Skip these + continue + default: + return fmt.Errorf("unknown type: %b in %s", header.Typeflag, header.Name) + } + } + + return nil +} diff --git a/internal/plugin/installer/oci_installer_test.go b/internal/plugin/installer/oci_installer_test.go new file mode 100644 index 000000000..1ed10ff8e --- /dev/null +++ b/internal/plugin/installer/oci_installer_test.go @@ -0,0 +1,814 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package installer // import "helm.sh/helm/v4/internal/plugin/installer" + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto/sha256" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + + "helm.sh/helm/v4/pkg/cli" + "helm.sh/helm/v4/pkg/getter" + "helm.sh/helm/v4/pkg/helmpath" +) + +var _ Installer = new(OCIInstaller) + +// createTestPluginTarGz creates a test plugin tar.gz with plugin.yaml +func createTestPluginTarGz(t *testing.T, pluginName string) []byte { + t.Helper() + + var buf bytes.Buffer + gzWriter := gzip.NewWriter(&buf) + tarWriter := tar.NewWriter(gzWriter) + + // Add plugin.yaml + pluginYAML := fmt.Sprintf(`name: %s +version: "1.0.0" +description: "Test plugin for OCI installer" +command: "$HELM_PLUGIN_DIR/bin/%s" +`, pluginName, pluginName) + header := &tar.Header{ + Name: "plugin.yaml", + Mode: 0644, + Size: int64(len(pluginYAML)), + Typeflag: tar.TypeReg, + } + if err := tarWriter.WriteHeader(header); err != nil { + t.Fatal(err) + } + if _, err := tarWriter.Write([]byte(pluginYAML)); err != nil { + t.Fatal(err) + } + + // Add bin directory + dirHeader := &tar.Header{ + Name: "bin/", + Mode: 0755, + Typeflag: tar.TypeDir, + } + if err := tarWriter.WriteHeader(dirHeader); err != nil { + t.Fatal(err) + } + + // Add executable + execContent := fmt.Sprintf("#!/bin/sh\necho '%s test plugin'", pluginName) + execHeader := &tar.Header{ + Name: fmt.Sprintf("bin/%s", pluginName), + Mode: 0755, + Size: int64(len(execContent)), + Typeflag: tar.TypeReg, + } + if err := tarWriter.WriteHeader(execHeader); err != nil { + t.Fatal(err) + } + if _, err := tarWriter.Write([]byte(execContent)); err != nil { + t.Fatal(err) + } + + tarWriter.Close() + gzWriter.Close() + + return buf.Bytes() +} + +// mockOCIRegistryWithArtifactType creates a mock OCI registry server using the new artifact type approach +func mockOCIRegistryWithArtifactType(t *testing.T, pluginName string) (*httptest.Server, string) { + t.Helper() + + pluginData := createTestPluginTarGz(t, pluginName) + layerDigest := fmt.Sprintf("sha256:%x", sha256Sum(pluginData)) + + // Create empty config data (as per OCI v1.1+ spec) + configData := []byte("{}") + configDigest := fmt.Sprintf("sha256:%x", sha256Sum(configData)) + + // Create manifest with artifact type + manifest := ocispec.Manifest{ + MediaType: ocispec.MediaTypeImageManifest, + ArtifactType: "application/vnd.helm.plugin.v1+json", // Using artifact type + Config: ocispec.Descriptor{ + MediaType: "application/vnd.oci.empty.v1+json", // Empty config + Digest: digest.Digest(configDigest), + Size: int64(len(configData)), + }, + Layers: []ocispec.Descriptor{ + { + MediaType: "application/vnd.oci.image.layer.v1.tar", + Digest: digest.Digest(layerDigest), + Size: int64(len(pluginData)), + Annotations: map[string]string{ + ocispec.AnnotationTitle: pluginName + ".tgz", // Layer named properly + }, + }, + }, + } + + manifestData, err := json.Marshal(manifest) + if err != nil { + t.Fatal(err) + } + manifestDigest := fmt.Sprintf("sha256:%x", sha256Sum(manifestData)) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch { + case r.Method == http.MethodGet && strings.Contains(r.URL.Path, "/v2/") && !strings.Contains(r.URL.Path, "/manifests/") && !strings.Contains(r.URL.Path, "/blobs/"): + // API version check + w.Header().Set("Docker-Distribution-API-Version", "registry/2.0") + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + w.Write([]byte("{}")) + + case r.Method == http.MethodGet && strings.Contains(r.URL.Path, "/manifests/") && strings.Contains(r.URL.Path, pluginName): + // Return manifest + w.Header().Set("Content-Type", ocispec.MediaTypeImageManifest) + w.Header().Set("Docker-Content-Digest", manifestDigest) + w.WriteHeader(http.StatusOK) + w.Write(manifestData) + + case r.Method == http.MethodGet && strings.Contains(r.URL.Path, "/blobs/"+layerDigest): + // Return layer data + w.Header().Set("Content-Type", "application/vnd.oci.image.layer.v1.tar") + w.WriteHeader(http.StatusOK) + w.Write(pluginData) + + case r.Method == http.MethodGet && strings.Contains(r.URL.Path, "/blobs/"+configDigest): + // Return config data + w.Header().Set("Content-Type", "application/vnd.oci.empty.v1+json") + w.WriteHeader(http.StatusOK) + w.Write(configData) + + default: + w.WriteHeader(http.StatusNotFound) + } + })) + + // Parse server URL to get host:port format for OCI reference + serverURL, err := url.Parse(server.URL) + if err != nil { + t.Fatal(err) + } + registryHost := serverURL.Host + + return server, registryHost +} + +// sha256Sum calculates SHA256 sum of data +func sha256Sum(data []byte) []byte { + h := sha256.New() + h.Write(data) + return h.Sum(nil) +} + +func TestNewOCIInstaller(t *testing.T) { + tests := []struct { + name string + source string + expectName string + expectError bool + }{ + { + name: "valid OCI reference with tag", + source: "oci://ghcr.io/user/plugin-name:v1.0.0", + expectName: "plugin-name", + expectError: false, + }, + { + name: "valid OCI reference with digest", + source: "oci://ghcr.io/user/plugin-name@sha256:1234567890abcdef", + expectName: "plugin-name", + expectError: false, + }, + { + name: "valid OCI reference without tag", + source: "oci://ghcr.io/user/plugin-name", + expectName: "plugin-name", + expectError: false, + }, + { + name: "valid OCI reference with multiple path segments", + source: "oci://registry.example.com/org/team/plugin-name:latest", + expectName: "plugin-name", + expectError: false, + }, + { + name: "invalid OCI reference - no path", + source: "oci://registry.example.com", + expectName: "", + expectError: true, + }, + { + name: "valid OCI reference - single path segment", + source: "oci://registry.example.com/plugin", + expectName: "plugin", + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + installer, err := NewOCIInstaller(tt.source) + + if tt.expectError { + if err == nil { + t.Errorf("expected error but got none") + } + return + } + + if err != nil { + t.Errorf("unexpected error: %v", err) + return + } + + // Check all fields thoroughly + if installer.PluginName != tt.expectName { + t.Errorf("expected plugin name %s, got %s", tt.expectName, installer.PluginName) + } + + if installer.Source != tt.source { + t.Errorf("expected source %s, got %s", tt.source, installer.Source) + } + + if installer.CacheDir == "" { + t.Error("expected non-empty cache directory") + } + + if !strings.Contains(installer.CacheDir, "plugins") { + t.Errorf("expected cache directory to contain 'plugins', got %s", installer.CacheDir) + } + + if installer.settings == nil { + t.Error("expected settings to be initialized") + } + + // Check that Path() method works + expectedPath := helmpath.DataPath("plugins", tt.expectName) + if installer.Path() != expectedPath { + t.Errorf("expected path %s, got %s", expectedPath, installer.Path()) + } + }) + } +} + +func TestOCIInstaller_Path(t *testing.T) { + tests := []struct { + name string + source string + pluginName string + expectPath string + }{ + { + name: "valid plugin name", + source: "oci://ghcr.io/user/plugin-name:v1.0.0", + pluginName: "plugin-name", + expectPath: helmpath.DataPath("plugins", "plugin-name"), + }, + { + name: "empty source", + source: "", + pluginName: "", + expectPath: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + installer := &OCIInstaller{ + PluginName: tt.pluginName, + base: newBase(tt.source), + settings: cli.New(), + } + + path := installer.Path() + if path != tt.expectPath { + t.Errorf("expected path %s, got %s", tt.expectPath, path) + } + }) + } +} + +func TestOCIInstaller_Install(t *testing.T) { + // Set up isolated test environment FIRST + testPluginsDir := t.TempDir() + t.Setenv("HELM_PLUGINS", testPluginsDir) + + pluginName := "test-plugin-basic" + server, registryHost := mockOCIRegistryWithArtifactType(t, pluginName) + defer server.Close() + + // Test OCI reference + source := fmt.Sprintf("oci://%s/%s:latest", registryHost, pluginName) + + // Test with plain HTTP (since test server uses HTTP) + installer, err := NewOCIInstaller(source, getter.WithPlainHTTP(true)) + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + + // The OCI installer uses helmpath.DataPath, which now points to our test directory + actualPath := installer.Path() + t.Logf("Installer will use path: %s", actualPath) + + // Verify the path is actually in our test directory + if !strings.HasPrefix(actualPath, testPluginsDir) { + t.Fatalf("Expected path %s to be under test directory %s", actualPath, testPluginsDir) + } + + // Install the plugin + if err := Install(installer); err != nil { + t.Fatalf("Expected installation to succeed, got error: %v", err) + } + + // Verify plugin was installed to the correct location + if !isPlugin(actualPath) { + t.Errorf("Expected plugin directory %s to contain plugin.yaml", actualPath) + } + + // Debug: list what was actually created + if entries, err := os.ReadDir(actualPath); err != nil { + t.Fatalf("Could not read plugin directory %s: %v", actualPath, err) + } else { + t.Logf("Plugin directory %s contains:", actualPath) + for _, entry := range entries { + t.Logf(" - %s", entry.Name()) + } + } + + // Verify the plugin.yaml file exists and is valid + pluginFile := filepath.Join(actualPath, "plugin.yaml") + if _, err := os.Stat(pluginFile); err != nil { + t.Errorf("Expected plugin.yaml to exist, got error: %v", err) + } +} + +func TestOCIInstaller_Install_WithGetterOptions(t *testing.T) { + testCases := []struct { + name string + pluginName string + options []getter.Option + wantErr bool + }{ + { + name: "plain HTTP", + pluginName: "example-cli-plain-http", + options: []getter.Option{getter.WithPlainHTTP(true)}, + wantErr: false, + }, + { + name: "insecure skip TLS verify", + pluginName: "example-cli-insecure", + options: []getter.Option{getter.WithPlainHTTP(true), getter.WithInsecureSkipVerifyTLS(true)}, + wantErr: false, + }, + { + name: "with timeout", + pluginName: "example-cli-timeout", + options: []getter.Option{getter.WithPlainHTTP(true), getter.WithTimeout(30 * time.Second)}, + wantErr: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Set up isolated test environment for each subtest + testPluginsDir := t.TempDir() + t.Setenv("HELM_PLUGINS", testPluginsDir) + + server, registryHost := mockOCIRegistryWithArtifactType(t, tc.pluginName) + defer server.Close() + + source := fmt.Sprintf("oci://%s/%s:latest", registryHost, tc.pluginName) + + installer, err := NewOCIInstaller(source, tc.options...) + if err != nil { + if !tc.wantErr { + t.Fatalf("Expected no error creating installer, got %v", err) + } + return + } + + // The installer now uses our isolated test directory + actualPath := installer.Path() + + // Install the plugin + err = Install(installer) + if tc.wantErr { + if err == nil { + t.Errorf("Expected installation to fail, but it succeeded") + } + } else { + if err != nil { + t.Errorf("Expected installation to succeed, got error: %v", err) + } else { + // Verify plugin was installed to the actual path + if !isPlugin(actualPath) { + t.Errorf("Expected plugin directory %s to contain plugin.yaml", actualPath) + } + } + } + }) + } +} + +func TestOCIInstaller_Install_AlreadyExists(t *testing.T) { + // Set up isolated test environment + testPluginsDir := t.TempDir() + t.Setenv("HELM_PLUGINS", testPluginsDir) + + pluginName := "test-plugin-exists" + server, registryHost := mockOCIRegistryWithArtifactType(t, pluginName) + defer server.Close() + + source := fmt.Sprintf("oci://%s/%s:latest", registryHost, pluginName) + installer, err := NewOCIInstaller(source, getter.WithPlainHTTP(true)) + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + + // First install should succeed + if err := Install(installer); err != nil { + t.Fatalf("Expected first installation to succeed, got error: %v", err) + } + + // Verify plugin was installed + if !isPlugin(installer.Path()) { + t.Errorf("Expected plugin directory %s to contain plugin.yaml", installer.Path()) + } + + // Second install should fail with "plugin already exists" + err = Install(installer) + if err == nil { + t.Error("Expected error when installing plugin that already exists") + } else if !strings.Contains(err.Error(), "plugin already exists") { + t.Errorf("Expected 'plugin already exists' error, got: %v", err) + } +} + +func TestOCIInstaller_Update(t *testing.T) { + // Set up isolated test environment + testPluginsDir := t.TempDir() + t.Setenv("HELM_PLUGINS", testPluginsDir) + + pluginName := "test-plugin-update" + server, registryHost := mockOCIRegistryWithArtifactType(t, pluginName) + defer server.Close() + + source := fmt.Sprintf("oci://%s/%s:latest", registryHost, pluginName) + installer, err := NewOCIInstaller(source, getter.WithPlainHTTP(true)) + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + + // Test update when plugin does not exist - should fail + err = Update(installer) + if err == nil { + t.Error("Expected error when updating plugin that does not exist") + } else if !strings.Contains(err.Error(), "plugin does not exist") { + t.Errorf("Expected 'plugin does not exist' error, got: %v", err) + } + + // Install plugin first + if err := Install(installer); err != nil { + t.Fatalf("Expected installation to succeed, got error: %v", err) + } + + // Verify plugin was installed + if !isPlugin(installer.Path()) { + t.Errorf("Expected plugin directory %s to contain plugin.yaml", installer.Path()) + } + + // Test update when plugin exists - should succeed + // For OCI, Update() removes old version and reinstalls + if err := Update(installer); err != nil { + t.Errorf("Expected update to succeed, got error: %v", err) + } + + // Verify plugin is still installed after update + if !isPlugin(installer.Path()) { + t.Errorf("Expected plugin directory %s to contain plugin.yaml after update", installer.Path()) + } +} + +func TestOCIInstaller_Install_ComponentExtraction(t *testing.T) { + // Test that we can extract a plugin archive properly + // This tests the extraction logic that Install() uses + tempDir := t.TempDir() + pluginName := "test-plugin-extract" + + pluginData := createTestPluginTarGz(t, pluginName) + + // Test extraction + err := extractTarGz(bytes.NewReader(pluginData), tempDir) + if err != nil { + t.Fatalf("Failed to extract plugin: %v", err) + } + + // Verify plugin.yaml exists + pluginYAMLPath := filepath.Join(tempDir, "plugin.yaml") + if _, err := os.Stat(pluginYAMLPath); os.IsNotExist(err) { + t.Errorf("plugin.yaml not found after extraction") + } + + // Verify bin directory exists + binPath := filepath.Join(tempDir, "bin") + if _, err := os.Stat(binPath); os.IsNotExist(err) { + t.Errorf("bin directory not found after extraction") + } + + // Verify executable exists and has correct permissions + execPath := filepath.Join(tempDir, "bin", pluginName) + if info, err := os.Stat(execPath); err != nil { + t.Errorf("executable not found: %v", err) + } else if info.Mode()&0111 == 0 { + t.Errorf("file is not executable") + } + + // Verify this would be recognized as a plugin + if !isPlugin(tempDir) { + t.Errorf("extracted directory is not a valid plugin") + } +} + +func TestExtractTarGz(t *testing.T) { + tempDir := t.TempDir() + + // Create a test tar.gz file + var buf bytes.Buffer + gzWriter := gzip.NewWriter(&buf) + tarWriter := tar.NewWriter(gzWriter) + + // Add a test file to the archive + testContent := "test content" + header := &tar.Header{ + Name: "test-file.txt", + Mode: 0644, + Size: int64(len(testContent)), + Typeflag: tar.TypeReg, + } + + if err := tarWriter.WriteHeader(header); err != nil { + t.Fatal(err) + } + + if _, err := tarWriter.Write([]byte(testContent)); err != nil { + t.Fatal(err) + } + + // Add a test directory + dirHeader := &tar.Header{ + Name: "test-dir/", + Mode: 0755, + Typeflag: tar.TypeDir, + } + + if err := tarWriter.WriteHeader(dirHeader); err != nil { + t.Fatal(err) + } + + tarWriter.Close() + gzWriter.Close() + + // Test extraction + err := extractTarGz(bytes.NewReader(buf.Bytes()), tempDir) + if err != nil { + t.Errorf("extractTarGz failed: %v", err) + } + + // Verify extracted file + extractedFile := filepath.Join(tempDir, "test-file.txt") + content, err := os.ReadFile(extractedFile) + if err != nil { + t.Errorf("failed to read extracted file: %v", err) + } + + if string(content) != testContent { + t.Errorf("expected content %s, got %s", testContent, string(content)) + } + + // Verify extracted directory + extractedDir := filepath.Join(tempDir, "test-dir") + if _, err := os.Stat(extractedDir); os.IsNotExist(err) { + t.Errorf("extracted directory does not exist: %s", extractedDir) + } +} + +func TestExtractTarGz_InvalidGzip(t *testing.T) { + tempDir := t.TempDir() + + // Test with invalid gzip data + invalidGzipData := []byte("not gzip data") + err := extractTarGz(bytes.NewReader(invalidGzipData), tempDir) + if err == nil { + t.Error("expected error for invalid gzip data") + } +} + +func TestExtractTar_UnknownFileType(t *testing.T) { + tempDir := t.TempDir() + + // Create a test tar file + var buf bytes.Buffer + tarWriter := tar.NewWriter(&buf) + + // Add a test file + testContent := "test content" + header := &tar.Header{ + Name: "test-file.txt", + Mode: 0644, + Size: int64(len(testContent)), + Typeflag: tar.TypeReg, + } + + if err := tarWriter.WriteHeader(header); err != nil { + t.Fatal(err) + } + + if _, err := tarWriter.Write([]byte(testContent)); err != nil { + t.Fatal(err) + } + + // Test unknown file type + unknownHeader := &tar.Header{ + Name: "unknown-type", + Mode: 0644, + Typeflag: tar.TypeSymlink, // Use a type that's not handled + } + + if err := tarWriter.WriteHeader(unknownHeader); err != nil { + t.Fatal(err) + } + + tarWriter.Close() + + // Test extraction - should fail due to unknown type + err := extractTar(bytes.NewReader(buf.Bytes()), tempDir) + if err == nil { + t.Error("expected error for unknown tar file type") + } + + if !strings.Contains(err.Error(), "unknown type") { + t.Errorf("expected 'unknown type' error, got: %v", err) + } +} + +func TestExtractTar_SuccessfulExtraction(t *testing.T) { + tempDir := t.TempDir() + + // Since we can't easily create extended headers with Go's tar package, + // we'll test the logic that skips them by creating a simple tar with regular files + // and then testing that the extraction works correctly. + + // Create a test tar file + var buf bytes.Buffer + tarWriter := tar.NewWriter(&buf) + + // Add a regular file + testContent := "test content" + header := &tar.Header{ + Name: "test-file.txt", + Mode: 0644, + Size: int64(len(testContent)), + Typeflag: tar.TypeReg, + } + + if err := tarWriter.WriteHeader(header); err != nil { + t.Fatal(err) + } + + if _, err := tarWriter.Write([]byte(testContent)); err != nil { + t.Fatal(err) + } + + tarWriter.Close() + + // Test extraction + err := extractTar(bytes.NewReader(buf.Bytes()), tempDir) + if err != nil { + t.Errorf("extractTar failed: %v", err) + } + + // Verify the regular file was extracted + extractedFile := filepath.Join(tempDir, "test-file.txt") + content, err := os.ReadFile(extractedFile) + if err != nil { + t.Errorf("failed to read extracted file: %v", err) + } + + if string(content) != testContent { + t.Errorf("expected content %s, got %s", testContent, string(content)) + } +} + +func TestOCIInstaller_Install_PlainHTTPOption(t *testing.T) { + // Test that PlainHTTP option is properly passed to getter + source := "oci://example.com/test-plugin:v1.0.0" + + // Test with PlainHTTP=false (default) + installer1, err := NewOCIInstaller(source) + if err != nil { + t.Fatalf("failed to create installer: %v", err) + } + if installer1.getter == nil { + t.Error("getter should be initialized") + } + + // Test with PlainHTTP=true + installer2, err := NewOCIInstaller(source, getter.WithPlainHTTP(true)) + if err != nil { + t.Fatalf("failed to create installer with PlainHTTP=true: %v", err) + } + if installer2.getter == nil { + t.Error("getter should be initialized with PlainHTTP=true") + } + + // Both installers should have the same basic properties + if installer1.PluginName != installer2.PluginName { + t.Error("plugin names should match") + } + if installer1.Source != installer2.Source { + t.Error("sources should match") + } + + // Test with multiple options + installer3, err := NewOCIInstaller(source, + getter.WithPlainHTTP(true), + getter.WithBasicAuth("user", "pass"), + ) + if err != nil { + t.Fatalf("failed to create installer with multiple options: %v", err) + } + if installer3.getter == nil { + t.Error("getter should be initialized with multiple options") + } +} + +func TestOCIInstaller_Install_ValidationErrors(t *testing.T) { + tests := []struct { + name string + layerData []byte + expectError bool + errorMsg string + }{ + { + name: "non-gzip layer", + layerData: []byte("not gzip data"), + expectError: true, + errorMsg: "is not a gzip compressed archive", + }, + { + name: "empty layer", + layerData: []byte{}, + expectError: true, + errorMsg: "is not a gzip compressed archive", + }, + { + name: "single byte layer", + layerData: []byte{0x1f}, + expectError: true, + errorMsg: "is not a gzip compressed archive", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Test the gzip validation logic that's used in the Install method + if len(tt.layerData) < 2 || tt.layerData[0] != 0x1f || tt.layerData[1] != 0x8b { + // This matches the validation in the Install method + if !tt.expectError { + t.Error("expected valid gzip data") + } + if !strings.Contains(tt.errorMsg, "is not a gzip compressed archive") { + t.Errorf("expected error message to contain 'is not a gzip compressed archive'") + } + } + }) + } +} diff --git a/pkg/cmd/plugin_install.go b/pkg/cmd/plugin_install.go index 7dae39505..960404a76 100644 --- a/pkg/cmd/plugin_install.go +++ b/pkg/cmd/plugin_install.go @@ -19,17 +19,28 @@ import ( "fmt" "io" "log/slog" + "strings" "github.com/spf13/cobra" "helm.sh/helm/v4/internal/plugin" "helm.sh/helm/v4/internal/plugin/installer" "helm.sh/helm/v4/pkg/cmd/require" + "helm.sh/helm/v4/pkg/getter" + "helm.sh/helm/v4/pkg/registry" ) type pluginInstallOptions struct { source string version string + // OCI-specific options + certFile string + keyFile string + caFile string + insecureSkipTLSverify bool + plainHTTP bool + password string + username string } const pluginInstallDesc = ` @@ -60,6 +71,15 @@ func newPluginInstallCmd(out io.Writer) *cobra.Command { }, } cmd.Flags().StringVar(&o.version, "version", "", "specify a version constraint. If this is not specified, the latest version is installed") + + // Add OCI-specific flags + cmd.Flags().StringVar(&o.certFile, "cert-file", "", "identify registry client using this SSL certificate file") + cmd.Flags().StringVar(&o.keyFile, "key-file", "", "identify registry client using this SSL key file") + cmd.Flags().StringVar(&o.caFile, "ca-file", "", "verify certificates of HTTPS-enabled servers using this CA bundle") + cmd.Flags().BoolVar(&o.insecureSkipTLSverify, "insecure-skip-tls-verify", false, "skip tls certificate checks for the plugin download") + cmd.Flags().BoolVar(&o.plainHTTP, "plain-http", false, "use insecure HTTP connections for the plugin download") + cmd.Flags().StringVar(&o.username, "username", "", "registry username") + cmd.Flags().StringVar(&o.password, "password", "", "registry password") return cmd } @@ -68,10 +88,28 @@ func (o *pluginInstallOptions) complete(args []string) error { return nil } +func (o *pluginInstallOptions) newInstallerForSource() (installer.Installer, error) { + // Check if source is an OCI registry reference + if strings.HasPrefix(o.source, fmt.Sprintf("%s://", registry.OCIScheme)) { + // Build getter options for OCI + options := []getter.Option{ + getter.WithTLSClientConfig(o.certFile, o.keyFile, o.caFile), + getter.WithInsecureSkipVerifyTLS(o.insecureSkipTLSverify), + getter.WithPlainHTTP(o.plainHTTP), + getter.WithBasicAuth(o.username, o.password), + } + + return installer.NewOCIInstaller(o.source, options...) + } + + // For non-OCI sources, use the original logic + return installer.NewForSource(o.source, o.version) +} + func (o *pluginInstallOptions) run(out io.Writer) error { installer.Debug = settings.Debug - i, err := installer.NewForSource(o.source, o.version) + i, err := o.newInstallerForSource() if err != nil { return err } diff --git a/pkg/cmd/plugin_uninstall.go b/pkg/cmd/plugin_uninstall.go index a925c66dd..85eb46219 100644 --- a/pkg/cmd/plugin_uninstall.go +++ b/pkg/cmd/plugin_uninstall.go @@ -21,6 +21,7 @@ import ( "io" "log/slog" "os" + "path/filepath" "github.com/spf13/cobra" @@ -87,6 +88,36 @@ func uninstallPlugin(p plugin.Plugin) error { if err := os.RemoveAll(p.Dir()); err != nil { return err } + + // Clean up versioned tarball and provenance files from HELM_PLUGINS directory + // These files are saved with pattern: PLUGIN_NAME-VERSION.tgz and PLUGIN_NAME-VERSION.tgz.prov + pluginName := p.Metadata().Name + pluginVersion := p.Metadata().Version + pluginsDir := settings.PluginsDirectory + + // Remove versioned files: plugin-name-version.tgz and plugin-name-version.tgz.prov + if pluginVersion != "" { + versionedBasename := fmt.Sprintf("%s-%s.tgz", pluginName, pluginVersion) + + // Remove tarball file + tarballPath := filepath.Join(pluginsDir, versionedBasename) + if _, err := os.Stat(tarballPath); err == nil { + slog.Debug("removing versioned tarball", "path", tarballPath) + if err := os.Remove(tarballPath); err != nil { + slog.Debug("failed to remove tarball file", "path", tarballPath, "error", err) + } + } + + // Remove provenance file + provPath := filepath.Join(pluginsDir, versionedBasename+".prov") + if _, err := os.Stat(provPath); err == nil { + slog.Debug("removing versioned provenance", "path", provPath) + if err := os.Remove(provPath); err != nil { + slog.Debug("failed to remove provenance file", "path", provPath, "error", err) + } + } + } + return runHook(p, plugin.Delete) } diff --git a/pkg/cmd/plugin_uninstall_test.go b/pkg/cmd/plugin_uninstall_test.go new file mode 100644 index 000000000..93d4dc8a8 --- /dev/null +++ b/pkg/cmd/plugin_uninstall_test.go @@ -0,0 +1,146 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "fmt" + "os" + "path/filepath" + "testing" + + "helm.sh/helm/v4/internal/plugin" + "helm.sh/helm/v4/internal/test/ensure" + "helm.sh/helm/v4/pkg/cli" +) + +func TestPluginUninstallCleansUpVersionedFiles(t *testing.T) { + ensure.HelmHome(t) + + // Create a fake plugin directory structure in a temp directory + pluginsDir := t.TempDir() + t.Setenv("HELM_PLUGINS", pluginsDir) + + // Create a new settings instance that will pick up the environment variable + testSettings := cli.New() + pluginName := "test-plugin" + + // Create plugin directory + pluginDir := filepath.Join(pluginsDir, pluginName) + if err := os.MkdirAll(pluginDir, 0755); err != nil { + t.Fatal(err) + } + + // Create plugin.yaml + pluginYAML := `name: test-plugin +version: 1.2.3 +description: Test plugin +command: $HELM_PLUGIN_DIR/test-plugin +` + if err := os.WriteFile(filepath.Join(pluginDir, "plugin.yaml"), []byte(pluginYAML), 0644); err != nil { + t.Fatal(err) + } + + // Create versioned tarball and provenance files + tarballFile := filepath.Join(pluginsDir, "test-plugin-1.2.3.tgz") + provFile := filepath.Join(pluginsDir, "test-plugin-1.2.3.tgz.prov") + otherVersionTarball := filepath.Join(pluginsDir, "test-plugin-2.0.0.tgz") + + if err := os.WriteFile(tarballFile, []byte("fake tarball"), 0644); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(provFile, []byte("fake provenance"), 0644); err != nil { + t.Fatal(err) + } + // Create another version that should NOT be removed + if err := os.WriteFile(otherVersionTarball, []byte("other version"), 0644); err != nil { + t.Fatal(err) + } + + // Load the plugin + p, err := plugin.LoadDir(pluginDir) + if err != nil { + t.Fatal(err) + } + + // Create a test uninstall function that uses our test settings + testUninstallPlugin := func(plugin plugin.Plugin) error { + if err := os.RemoveAll(plugin.Dir()); err != nil { + return err + } + + // Clean up versioned tarball and provenance files from test HELM_PLUGINS directory + pluginName := plugin.Metadata().Name + pluginVersion := plugin.Metadata().Version + testPluginsDir := testSettings.PluginsDirectory + + // Remove versioned files: plugin-name-version.tgz and plugin-name-version.tgz.prov + if pluginVersion != "" { + versionedBasename := fmt.Sprintf("%s-%s.tgz", pluginName, pluginVersion) + + // Remove tarball file + tarballPath := filepath.Join(testPluginsDir, versionedBasename) + if _, err := os.Stat(tarballPath); err == nil { + if err := os.Remove(tarballPath); err != nil { + t.Logf("failed to remove tarball file: %v", err) + } + } + + // Remove provenance file + provPath := filepath.Join(testPluginsDir, versionedBasename+".prov") + if _, err := os.Stat(provPath); err == nil { + if err := os.Remove(provPath); err != nil { + t.Logf("failed to remove provenance file: %v", err) + } + } + } + + // Skip runHook in test + return nil + } + + // Verify files exist before uninstall + if _, err := os.Stat(tarballFile); os.IsNotExist(err) { + t.Fatal("tarball file should exist before uninstall") + } + if _, err := os.Stat(provFile); os.IsNotExist(err) { + t.Fatal("provenance file should exist before uninstall") + } + if _, err := os.Stat(otherVersionTarball); os.IsNotExist(err) { + t.Fatal("other version tarball should exist before uninstall") + } + + // Uninstall the plugin + if err := testUninstallPlugin(p); err != nil { + t.Fatal(err) + } + + // Verify plugin directory is removed + if _, err := os.Stat(pluginDir); !os.IsNotExist(err) { + t.Error("plugin directory should be removed") + } + + // Verify only exact version files are removed + if _, err := os.Stat(tarballFile); !os.IsNotExist(err) { + t.Error("versioned tarball file should be removed") + } + if _, err := os.Stat(provFile); !os.IsNotExist(err) { + t.Error("versioned provenance file should be removed") + } + // Verify other version files are NOT removed + if _, err := os.Stat(otherVersionTarball); os.IsNotExist(err) { + t.Error("other version tarball should NOT be removed") + } +} diff --git a/pkg/getter/getter.go b/pkg/getter/getter.go index 8585ac449..a2d0f0ee2 100644 --- a/pkg/getter/getter.go +++ b/pkg/getter/getter.go @@ -48,6 +48,7 @@ type getterOptions struct { registryClient *registry.Client timeout time.Duration transport *http.Transport + artifactType string } // Option allows specifying various settings configurable by the user for overriding the defaults @@ -144,6 +145,13 @@ func WithTransport(transport *http.Transport) Option { } } +// WithArtifactType sets the type of OCI artifact ("chart" or "plugin") +func WithArtifactType(artifactType string) Option { + return func(opts *getterOptions) { + opts.artifactType = artifactType + } +} + // Getter is an interface to support GET to the specified URL. type Getter interface { // Get file content by url string diff --git a/pkg/getter/ocigetter.go b/pkg/getter/ocigetter.go index 45e7263fe..121e000c8 100644 --- a/pkg/getter/ocigetter.go +++ b/pkg/getter/ocigetter.go @@ -63,6 +63,10 @@ func (g *OCIGetter) get(href string) (*bytes.Buffer, error) { if version := g.opts.version; version != "" && !strings.Contains(path.Base(ref), ":") { ref = fmt.Sprintf("%s:%s", ref, version) } + // Check if this is a plugin request + if g.opts.artifactType == "plugin" { + return g.getPlugin(client, ref) + } // Default to chart behavior for backward compatibility var pullOpts []registry.PullOption @@ -168,3 +172,28 @@ func (g *OCIGetter) newRegistryClient() (*registry.Client, error) { return client, nil } + +// getPlugin handles plugin-specific OCI pulls +func (g *OCIGetter) getPlugin(client *registry.Client, ref string) (*bytes.Buffer, error) { + // Extract plugin name from the reference + // e.g., "ghcr.io/user/plugin-name:v1.0.0" -> "plugin-name" + parts := strings.Split(ref, "/") + if len(parts) < 2 { + return nil, fmt.Errorf("invalid OCI reference: %s", ref) + } + lastPart := parts[len(parts)-1] + pluginName := lastPart + if idx := strings.LastIndex(lastPart, ":"); idx > 0 { + pluginName = lastPart[:idx] + } + if idx := strings.LastIndex(lastPart, "@"); idx > 0 { + pluginName = lastPart[:idx] + } + + result, err := client.PullPlugin(ref, pluginName) + if err != nil { + return nil, err + } + + return bytes.NewBuffer(result.PluginData), nil +} diff --git a/pkg/getter/plugingetter_test.go b/pkg/getter/plugingetter_test.go index 85c847752..1c0f5593f 100644 --- a/pkg/getter/plugingetter_test.go +++ b/pkg/getter/plugingetter_test.go @@ -110,8 +110,7 @@ func (t *TestPlugin) Metadata() plugin.Metadata { Type: "cli/v1", APIVersion: "v1", Runtime: "subprocess", - // TODO: either change Config to plugin.ConfigCLI, or change APIVersion to getter/v1? - Config: &plugin.ConfigGetter{}, + Config: &plugin.ConfigCLI{}, RuntimeConfig: &plugin.RuntimeConfigSubprocess{ PlatformCommands: []plugin.PlatformCommand{ { diff --git a/pkg/registry/client.go b/pkg/registry/client.go index 169900750..7ba26ac5c 100644 --- a/pkg/registry/client.go +++ b/pkg/registry/client.go @@ -29,13 +29,11 @@ import ( "os" "sort" "strings" - "sync" "github.com/Masterminds/semver/v3" "github.com/opencontainers/image-spec/specs-go" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "oras.land/oras-go/v2" - "oras.land/oras-go/v2/content" "oras.land/oras-go/v2/content/memory" "oras.land/oras-go/v2/registry" "oras.land/oras-go/v2/registry/remote" @@ -147,6 +145,11 @@ func NewClient(options ...ClientOption) (*Client, error) { return client, nil } +// Generic returns a GenericClient for low-level OCI operations +func (c *Client) Generic() *GenericClient { + return NewGenericClient(c) +} + // ClientOptDebug returns a function that sets the debug setting on client options set func ClientOptDebug(debug bool) ClientOption { return func(client *Client) { @@ -418,84 +421,31 @@ type ( } ) -// Pull downloads a chart from a registry -func (c *Client) Pull(ref string, options ...PullOption) (*PullResult, error) { - parsedRef, err := newReference(ref) - if err != nil { - return nil, err - } +// processChartPull handles chart-specific processing of a generic pull result +func (c *Client) processChartPull(genericResult *GenericPullResult, operation *pullOperation) (*PullResult, error) { + var err error - operation := &pullOperation{ - withChart: true, // By default, always download the chart layer - } - for _, option := range options { - option(operation) - } - if !operation.withChart && !operation.withProv { - return nil, errors.New( - "must specify at least one layer to pull (chart/prov)") - } - memoryStore := memory.New() - allowedMediaTypes := []string{ - ocispec.MediaTypeImageManifest, - ConfigMediaType, - } + // Chart-specific validation minNumDescriptors := 1 // 1 for the config if operation.withChart { minNumDescriptors++ - allowedMediaTypes = append(allowedMediaTypes, ChartLayerMediaType, LegacyChartLayerMediaType) } - if operation.withProv { - if !operation.ignoreMissingProv { - minNumDescriptors++ - } - allowedMediaTypes = append(allowedMediaTypes, ProvLayerMediaType) - } - - var descriptors, layers []ocispec.Descriptor - - repository, err := remote.NewRepository(parsedRef.String()) - if err != nil { - return nil, err - } - repository.PlainHTTP = c.plainHTTP - repository.Client = c.authorizer - - ctx := context.Background() - - sort.Strings(allowedMediaTypes) - - var mu sync.Mutex - manifest, err := oras.Copy(ctx, repository, parsedRef.String(), memoryStore, "", oras.CopyOptions{ - CopyGraphOptions: oras.CopyGraphOptions{ - PreCopy: func(_ context.Context, desc ocispec.Descriptor) error { - mediaType := desc.MediaType - if i := sort.SearchStrings(allowedMediaTypes, mediaType); i >= len(allowedMediaTypes) || allowedMediaTypes[i] != mediaType { - return oras.SkipNode - } - - mu.Lock() - layers = append(layers, desc) - mu.Unlock() - return nil - }, - }, - }) - if err != nil { - return nil, err + if operation.withProv && !operation.ignoreMissingProv { + minNumDescriptors++ } - descriptors = append(descriptors, layers...) - - numDescriptors := len(descriptors) + numDescriptors := len(genericResult.Descriptors) if numDescriptors < minNumDescriptors { return nil, fmt.Errorf("manifest does not contain minimum number of descriptors (%d), descriptors found: %d", minNumDescriptors, numDescriptors) } + + // Find chart-specific descriptors var configDescriptor *ocispec.Descriptor var chartDescriptor *ocispec.Descriptor var provDescriptor *ocispec.Descriptor - for _, descriptor := range descriptors { + + for _, descriptor := range genericResult.Descriptors { d := descriptor switch d.MediaType { case ConfigMediaType: @@ -509,6 +459,8 @@ func (c *Client) Pull(ref string, options ...PullOption) (*PullResult, error) { fmt.Fprintf(c.out, "Warning: chart media type %s is deprecated\n", LegacyChartLayerMediaType) } } + + // Chart-specific validation if configDescriptor == nil { return nil, fmt.Errorf("could not load config with mediatype %s", ConfigMediaType) } @@ -516,6 +468,7 @@ func (c *Client) Pull(ref string, options ...PullOption) (*PullResult, error) { return nil, fmt.Errorf("manifest does not contain a layer with mediatype %s", ChartLayerMediaType) } + var provMissing bool if operation.withProv && provDescriptor == nil { if operation.ignoreMissingProv { @@ -525,10 +478,12 @@ func (c *Client) Pull(ref string, options ...PullOption) (*PullResult, error) { ProvLayerMediaType) } } + + // Build chart-specific result result := &PullResult{ Manifest: &DescriptorPullSummary{ - Digest: manifest.Digest.String(), - Size: manifest.Size, + Digest: genericResult.Manifest.Digest.String(), + Size: genericResult.Manifest.Size, }, Config: &DescriptorPullSummary{ Digest: configDescriptor.Digest.String(), @@ -536,15 +491,18 @@ func (c *Client) Pull(ref string, options ...PullOption) (*PullResult, error) { }, Chart: &DescriptorPullSummaryWithMeta{}, Prov: &DescriptorPullSummary{}, - Ref: parsedRef.String(), + Ref: genericResult.Ref, } - result.Manifest.Data, err = content.FetchAll(ctx, memoryStore, manifest) + // Fetch data using generic client + genericClient := c.Generic() + + result.Manifest.Data, err = genericClient.GetDescriptorData(genericResult.MemoryStore, genericResult.Manifest) if err != nil { - return nil, fmt.Errorf("unable to retrieve blob with digest %s: %w", manifest.Digest, err) + return nil, fmt.Errorf("unable to retrieve blob with digest %s: %w", genericResult.Manifest.Digest, err) } - result.Config.Data, err = content.FetchAll(ctx, memoryStore, *configDescriptor) + result.Config.Data, err = genericClient.GetDescriptorData(genericResult.MemoryStore, *configDescriptor) if err != nil { return nil, fmt.Errorf("unable to retrieve blob with digest %s: %w", configDescriptor.Digest, err) } @@ -554,7 +512,7 @@ func (c *Client) Pull(ref string, options ...PullOption) (*PullResult, error) { } if operation.withChart { - result.Chart.Data, err = content.FetchAll(ctx, memoryStore, *chartDescriptor) + result.Chart.Data, err = genericClient.GetDescriptorData(genericResult.MemoryStore, *chartDescriptor) if err != nil { return nil, fmt.Errorf("unable to retrieve blob with digest %s: %w", chartDescriptor.Digest, err) } @@ -563,7 +521,7 @@ func (c *Client) Pull(ref string, options ...PullOption) (*PullResult, error) { } if operation.withProv && !provMissing { - result.Prov.Data, err = content.FetchAll(ctx, memoryStore, *provDescriptor) + result.Prov.Data, err = genericClient.GetDescriptorData(genericResult.MemoryStore, *provDescriptor) if err != nil { return nil, fmt.Errorf("unable to retrieve blob with digest %s: %w", provDescriptor.Digest, err) } @@ -582,6 +540,44 @@ func (c *Client) Pull(ref string, options ...PullOption) (*PullResult, error) { return result, nil } +// Pull downloads a chart from a registry +func (c *Client) Pull(ref string, options ...PullOption) (*PullResult, error) { + operation := &pullOperation{ + withChart: true, // By default, always download the chart layer + } + for _, option := range options { + option(operation) + } + if !operation.withChart && !operation.withProv { + return nil, errors.New( + "must specify at least one layer to pull (chart/prov)") + } + + // Build allowed media types for chart pull + allowedMediaTypes := []string{ + ocispec.MediaTypeImageManifest, + ConfigMediaType, + } + if operation.withChart { + allowedMediaTypes = append(allowedMediaTypes, ChartLayerMediaType, LegacyChartLayerMediaType) + } + if operation.withProv { + allowedMediaTypes = append(allowedMediaTypes, ProvLayerMediaType) + } + + // Use generic client for the pull operation + genericClient := c.Generic() + genericResult, err := genericClient.PullGeneric(ref, GenericPullOptions{ + AllowedMediaTypes: allowedMediaTypes, + }) + if err != nil { + return nil, err + } + + // Process the result with chart-specific logic + return c.processChartPull(genericResult, operation) +} + // PullOptWithChart returns a function that sets the withChart setting on pull func PullOptWithChart(withChart bool) PullOption { return func(operation *pullOperation) { diff --git a/pkg/registry/generic.go b/pkg/registry/generic.go new file mode 100644 index 000000000..b82132338 --- /dev/null +++ b/pkg/registry/generic.go @@ -0,0 +1,162 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry + +import ( + "context" + "io" + "net/http" + "sort" + "sync" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "oras.land/oras-go/v2" + "oras.land/oras-go/v2/content" + "oras.land/oras-go/v2/content/memory" + "oras.land/oras-go/v2/registry/remote" + "oras.land/oras-go/v2/registry/remote/auth" + "oras.land/oras-go/v2/registry/remote/credentials" +) + +// GenericClient provides low-level OCI operations without artifact-specific assumptions +type GenericClient struct { + debug bool + enableCache bool + credentialsFile string + username string + password string + out io.Writer + authorizer *auth.Client + registryAuthorizer RemoteClient + credentialsStore credentials.Store + httpClient *http.Client + plainHTTP bool +} + +// GenericPullOptions configures a generic pull operation +type GenericPullOptions struct { + // MediaTypes to include in the pull (empty means all) + AllowedMediaTypes []string + // Skip descriptors with these media types + SkipMediaTypes []string + // Custom PreCopy function for filtering + PreCopy func(context.Context, ocispec.Descriptor) error +} + +// GenericPullResult contains the result of a generic pull operation +type GenericPullResult struct { + Manifest ocispec.Descriptor + Descriptors []ocispec.Descriptor + MemoryStore *memory.Store + Ref string +} + +// NewGenericClient creates a new generic OCI client from an existing Client +func NewGenericClient(client *Client) *GenericClient { + return &GenericClient{ + debug: client.debug, + enableCache: client.enableCache, + credentialsFile: client.credentialsFile, + username: client.username, + password: client.password, + out: client.out, + authorizer: client.authorizer, + registryAuthorizer: client.registryAuthorizer, + credentialsStore: client.credentialsStore, + httpClient: client.httpClient, + plainHTTP: client.plainHTTP, + } +} + +// PullGeneric performs a generic OCI pull without artifact-specific assumptions +func (c *GenericClient) PullGeneric(ref string, options GenericPullOptions) (*GenericPullResult, error) { + parsedRef, err := newReference(ref) + if err != nil { + return nil, err + } + + memoryStore := memory.New() + var descriptors []ocispec.Descriptor + + // Set up repository with authentication and configuration + repository, err := remote.NewRepository(parsedRef.String()) + if err != nil { + return nil, err + } + repository.PlainHTTP = c.plainHTTP + repository.Client = c.authorizer + + ctx := context.Background() + + // Prepare allowed media types for filtering + var allowedMediaTypes []string + if len(options.AllowedMediaTypes) > 0 { + allowedMediaTypes = make([]string, len(options.AllowedMediaTypes)) + copy(allowedMediaTypes, options.AllowedMediaTypes) + sort.Strings(allowedMediaTypes) + } + + var mu sync.Mutex + manifest, err := oras.Copy(ctx, repository, parsedRef.String(), memoryStore, "", oras.CopyOptions{ + CopyGraphOptions: oras.CopyGraphOptions{ + PreCopy: func(ctx context.Context, desc ocispec.Descriptor) error { + // Apply custom PreCopy function if provided + if options.PreCopy != nil { + if err := options.PreCopy(ctx, desc); err != nil { + return err + } + } + + mediaType := desc.MediaType + + // Skip media types if specified + for _, skipType := range options.SkipMediaTypes { + if mediaType == skipType { + return oras.SkipNode + } + } + + // Filter by allowed media types if specified + if len(allowedMediaTypes) > 0 { + if i := sort.SearchStrings(allowedMediaTypes, mediaType); i >= len(allowedMediaTypes) || allowedMediaTypes[i] != mediaType { + return oras.SkipNode + } + } + + mu.Lock() + descriptors = append(descriptors, desc) + mu.Unlock() + return nil + }, + }, + }) + if err != nil { + return nil, err + } + + return &GenericPullResult{ + Manifest: manifest, + Descriptors: descriptors, + MemoryStore: memoryStore, + Ref: parsedRef.String(), + }, nil +} + +// GetDescriptorData retrieves the data for a specific descriptor +func (c *GenericClient) GetDescriptorData(store *memory.Store, desc ocispec.Descriptor) ([]byte, error) { + return content.FetchAll(context.Background(), store, desc) +} diff --git a/pkg/registry/plugin.go b/pkg/registry/plugin.go new file mode 100644 index 000000000..a92aaf452 --- /dev/null +++ b/pkg/registry/plugin.go @@ -0,0 +1,176 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry + +import ( + "encoding/json" + "fmt" + "strings" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Plugin-specific constants +const ( + // PluginArtifactType is the artifact type for Helm plugins + PluginArtifactType = "application/vnd.helm.plugin.v1+json" +) + +// PluginPullOptions configures a plugin pull operation +type PluginPullOptions struct { + // PluginName specifies the expected plugin name for layer validation + PluginName string +} + +// PluginPullResult contains the result of a plugin pull operation +type PluginPullResult struct { + Manifest ocispec.Descriptor + PluginData []byte + ProvenanceData []byte // Optional provenance data + Ref string + PluginName string +} + +// PullPlugin downloads a plugin from an OCI registry using artifact type +func (c *Client) PullPlugin(ref string, pluginName string, options ...PluginPullOption) (*PluginPullResult, error) { + operation := &pluginPullOperation{ + pluginName: pluginName, + } + for _, option := range options { + option(operation) + } + + // Use generic client for the pull operation with artifact type filtering + genericClient := c.Generic() + genericResult, err := genericClient.PullGeneric(ref, GenericPullOptions{ + // Allow manifests and all layer types - we'll validate artifact type after download + AllowedMediaTypes: []string{ + ocispec.MediaTypeImageManifest, + "application/vnd.oci.image.layer.v1.tar", + "application/vnd.oci.image.layer.v1.tar+gzip", + }, + }) + if err != nil { + return nil, err + } + + // Process the result with plugin-specific logic + return c.processPluginPull(genericResult, operation.pluginName) +} + +// processPluginPull handles plugin-specific processing of a generic pull result using artifact type +func (c *Client) processPluginPull(genericResult *GenericPullResult, pluginName string) (*PluginPullResult, error) { + // First validate that this is actually a plugin artifact + manifestData, err := c.Generic().GetDescriptorData(genericResult.MemoryStore, genericResult.Manifest) + if err != nil { + return nil, fmt.Errorf("unable to retrieve manifest: %w", err) + } + + // Parse the manifest to check artifact type + var manifest ocispec.Manifest + if err := json.Unmarshal(manifestData, &manifest); err != nil { + return nil, fmt.Errorf("unable to parse manifest: %w", err) + } + + // Validate artifact type (for OCI v1.1+ manifests) + if manifest.ArtifactType != "" && manifest.ArtifactType != PluginArtifactType { + return nil, fmt.Errorf("expected artifact type %s, got %s", PluginArtifactType, manifest.ArtifactType) + } + + // For backwards compatibility, also check config media type if no artifact type + if manifest.ArtifactType == "" && manifest.Config.MediaType != PluginArtifactType { + return nil, fmt.Errorf("expected config media type %s for legacy compatibility, got %s", PluginArtifactType, manifest.Config.MediaType) + } + + // Find the required plugin tarball and optional provenance + expectedTarball := pluginName + ".tgz" + expectedProvenance := pluginName + ".tgz.prov" + + var pluginDescriptor *ocispec.Descriptor + var provenanceDescriptor *ocispec.Descriptor + + // Look for layers with the expected titles/annotations + for _, layer := range manifest.Layers { + d := layer + // Check for title annotation (preferred method) + if title, exists := d.Annotations[ocispec.AnnotationTitle]; exists { + switch title { + case expectedTarball: + pluginDescriptor = &d + case expectedProvenance: + provenanceDescriptor = &d + } + } + } + + // Plugin tarball is required + if pluginDescriptor == nil { + return nil, fmt.Errorf("required layer %s not found in manifest", expectedTarball) + } + + // Build plugin-specific result + result := &PluginPullResult{ + Manifest: genericResult.Manifest, + Ref: genericResult.Ref, + PluginName: pluginName, + } + + // Fetch plugin data using generic client + genericClient := c.Generic() + result.PluginData, err = genericClient.GetDescriptorData(genericResult.MemoryStore, *pluginDescriptor) + if err != nil { + return nil, fmt.Errorf("unable to retrieve plugin data with digest %s: %w", pluginDescriptor.Digest, err) + } + + // Fetch provenance data if available + if provenanceDescriptor != nil { + result.ProvenanceData, err = genericClient.GetDescriptorData(genericResult.MemoryStore, *provenanceDescriptor) + if err != nil { + return nil, fmt.Errorf("unable to retrieve provenance data with digest %s: %w", provenanceDescriptor.Digest, err) + } + } + + fmt.Fprintf(c.out, "Pulled plugin: %s\n", result.Ref) + fmt.Fprintf(c.out, "Digest: %s\n", result.Manifest.Digest) + if result.ProvenanceData != nil { + fmt.Fprintf(c.out, "Provenance: %s\n", expectedProvenance) + } + + if strings.Contains(result.Ref, "_") { + fmt.Fprintf(c.out, "%s contains an underscore.\n", result.Ref) + fmt.Fprint(c.out, registryUnderscoreMessage+"\n") + } + + return result, nil +} + +// Plugin pull operation types and options +type ( + pluginPullOperation struct { + pluginName string + } + + // PluginPullOption allows customizing plugin pull operations + PluginPullOption func(*pluginPullOperation) +) + +// PluginPullOptWithPluginName sets the plugin name for validation +func PluginPullOptWithPluginName(name string) PluginPullOption { + return func(operation *pluginPullOperation) { + operation.pluginName = name + } +} From fd41fdd9c9e741edaf93155f0ff300c206ee4957 Mon Sep 17 00:00:00 2001 From: Scott Rigby Date: Mon, 25 Aug 2025 11:19:02 -0400 Subject: [PATCH 53/88] New registry plugin func GetPluginName. Re-use regsitry.reference Signed-off-by: Scott Rigby --- internal/plugin/installer/oci_installer.go | 20 +---- pkg/registry/plugin.go | 25 ++++++ pkg/registry/plugin_test.go | 93 ++++++++++++++++++++++ 3 files changed, 122 insertions(+), 16 deletions(-) create mode 100644 pkg/registry/plugin_test.go diff --git a/internal/plugin/installer/oci_installer.go b/internal/plugin/installer/oci_installer.go index acb28ccf9..89dd44056 100644 --- a/internal/plugin/installer/oci_installer.go +++ b/internal/plugin/installer/oci_installer.go @@ -24,7 +24,6 @@ import ( "log/slog" "os" "path/filepath" - "strings" "helm.sh/helm/v4/internal/plugin/cache" "helm.sh/helm/v4/internal/third_party/dep/fs" @@ -45,21 +44,10 @@ type OCIInstaller struct { // NewOCIInstaller creates a new OCIInstaller with optional getter options func NewOCIInstaller(source string, options ...getter.Option) (*OCIInstaller, error) { - ref := strings.TrimPrefix(source, fmt.Sprintf("%s://", registry.OCIScheme)) - - // Extract plugin name from OCI reference - // e.g., "ghcr.io/user/plugin-name:v1.0.0" -> "plugin-name" - parts := strings.Split(ref, "/") - if len(parts) < 2 { - return nil, fmt.Errorf("invalid OCI reference: %s", source) - } - lastPart := parts[len(parts)-1] - pluginName := lastPart - if idx := strings.LastIndex(lastPart, ":"); idx > 0 { - pluginName = lastPart[:idx] - } - if idx := strings.LastIndex(lastPart, "@"); idx > 0 { - pluginName = lastPart[:idx] + // Extract plugin name from OCI reference using robust registry parsing + pluginName, err := registry.GetPluginName(source) + if err != nil { + return nil, err } key, err := cache.Key(source) diff --git a/pkg/registry/plugin.go b/pkg/registry/plugin.go index a92aaf452..5d22a99ee 100644 --- a/pkg/registry/plugin.go +++ b/pkg/registry/plugin.go @@ -174,3 +174,28 @@ func PluginPullOptWithPluginName(name string) PluginPullOption { operation.pluginName = name } } + +// GetPluginName extracts the plugin name from an OCI reference using proper reference parsing +func GetPluginName(source string) (string, error) { + ref, err := newReference(source) + if err != nil { + return "", fmt.Errorf("invalid OCI reference: %w", err) + } + + // Extract plugin name from the repository path + // e.g., "ghcr.io/user/plugin-name:v1.0.0" -> Repository: "user/plugin-name" + repository := ref.Repository + if repository == "" { + return "", fmt.Errorf("invalid OCI reference: missing repository") + } + + // Get the last part of the repository path as the plugin name + parts := strings.Split(repository, "/") + pluginName := parts[len(parts)-1] + + if pluginName == "" { + return "", fmt.Errorf("invalid OCI reference: cannot determine plugin name from repository %s", repository) + } + + return pluginName, nil +} diff --git a/pkg/registry/plugin_test.go b/pkg/registry/plugin_test.go new file mode 100644 index 000000000..f8525829c --- /dev/null +++ b/pkg/registry/plugin_test.go @@ -0,0 +1,93 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry + +import ( + "testing" +) + +func TestGetPluginName(t *testing.T) { + tests := []struct { + name string + source string + expected string + expectErr bool + }{ + { + name: "valid OCI reference with tag", + source: "oci://ghcr.io/user/plugin-name:v1.0.0", + expected: "plugin-name", + }, + { + name: "valid OCI reference with digest", + source: "oci://ghcr.io/user/plugin-name@sha256:1234567890abcdef", + expected: "plugin-name", + }, + { + name: "valid OCI reference without tag", + source: "oci://ghcr.io/user/plugin-name", + expected: "plugin-name", + }, + { + name: "valid OCI reference with multiple path segments", + source: "oci://registry.example.com/org/team/plugin-name:latest", + expected: "plugin-name", + }, + { + name: "valid OCI reference with plus signs in tag", + source: "oci://registry.example.com/user/plugin-name:v1.0.0+build.1", + expected: "plugin-name", + }, + { + name: "valid OCI reference - single path segment", + source: "oci://registry.example.com/plugin", + expected: "plugin", + }, + { + name: "invalid OCI reference - no repository", + source: "oci://registry.example.com", + expectErr: true, + }, + { + name: "invalid OCI reference - malformed", + source: "not-an-oci-reference", + expectErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + pluginName, err := GetPluginName(tt.source) + + if tt.expectErr { + if err == nil { + t.Errorf("expected error but got none") + } + return + } + + if err != nil { + t.Errorf("unexpected error: %v", err) + return + } + + if pluginName != tt.expected { + t.Errorf("expected plugin name %q, got %q", tt.expected, pluginName) + } + }) + } +} From d19130f69ea4036bff9615f120dbb509fe31897b Mon Sep 17 00:00:00 2001 From: Scott Rigby Date: Mon, 25 Aug 2025 22:23:20 -0400 Subject: [PATCH 54/88] Fix file handle management in tar extractors Use defer outFile.Close() to avoid multiple close calls and ensure proper resource cleanup Co-authored-by: Andrew Block Signed-off-by: Scott Rigby --- internal/plugin/installer/http_installer.go | 3 +-- internal/plugin/installer/oci_installer.go | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/internal/plugin/installer/http_installer.go b/internal/plugin/installer/http_installer.go index b168f8646..e598bce02 100644 --- a/internal/plugin/installer/http_installer.go +++ b/internal/plugin/installer/http_installer.go @@ -256,11 +256,10 @@ func (g *TarGzExtractor) Extract(buffer *bytes.Buffer, targetDir string) error { if err != nil { return err } + defer outFile.Close() if _, err := io.Copy(outFile, tarReader); err != nil { - outFile.Close() return err } - outFile.Close() // We don't want to process these extension header files. case tar.TypeXGlobalHeader, tar.TypeXHeader: continue diff --git a/internal/plugin/installer/oci_installer.go b/internal/plugin/installer/oci_installer.go index 89dd44056..a96a94ee1 100644 --- a/internal/plugin/installer/oci_installer.go +++ b/internal/plugin/installer/oci_installer.go @@ -200,11 +200,10 @@ func extractTar(r io.Reader, targetDir string) error { if err != nil { return err } + defer outFile.Close() if _, err := io.Copy(outFile, tarReader); err != nil { - outFile.Close() return err } - outFile.Close() case tar.TypeXGlobalHeader, tar.TypeXHeader: // Skip these continue From 5c663db853af87e0951f9af8a71a9412051af370 Mon Sep 17 00:00:00 2001 From: Scott Rigby Date: Thu, 21 Aug 2025 04:40:16 -0400 Subject: [PATCH 55/88] Plugin tarball installer support for HTTP (fix) and local (feat) Signed-off-by: Scott Rigby --- internal/plugin/installer/http_installer.go | 22 +- .../plugin/installer/http_installer_test.go | 252 ++++++++++++++++++ internal/plugin/installer/installer.go | 9 + internal/plugin/installer/installer_test.go | 11 +- internal/plugin/installer/local_installer.go | 67 +++++ .../plugin/installer/local_installer_test.go | 160 +++++++++++ internal/plugin/installer/plugin_structure.go | 80 ++++++ .../plugin/installer/plugin_structure_test.go | 165 ++++++++++++ 8 files changed, 760 insertions(+), 6 deletions(-) create mode 100644 internal/plugin/installer/plugin_structure.go create mode 100644 internal/plugin/installer/plugin_structure_test.go diff --git a/internal/plugin/installer/http_installer.go b/internal/plugin/installer/http_installer.go index e598bce02..b68fc059a 100644 --- a/internal/plugin/installer/http_installer.go +++ b/internal/plugin/installer/http_installer.go @@ -69,6 +69,9 @@ func mediaTypeToExtension(mt string) (string, bool) { switch strings.ToLower(mt) { case "application/gzip", "application/x-gzip", "application/x-tgz", "application/x-gtar": return ".tgz", true + case "application/octet-stream": + // Generic binary type - we'll need to check the URL suffix + return "", false default: return "", false } @@ -138,11 +141,18 @@ func (i *HTTPInstaller) Install() error { return fmt.Errorf("extracting files from archive: %w", err) } - if !isPlugin(i.CacheDir) { - return ErrMissingMetadata + // Detect where the plugin.yaml actually is + pluginRoot, err := detectPluginRoot(i.CacheDir) + if err != nil { + return err + } + + // Validate plugin structure if needed + if err := validatePluginName(pluginRoot, i.PluginName); err != nil { + return err } - src, err := filepath.Abs(i.CacheDir) + src, err := filepath.Abs(pluginRoot) if err != nil { return err } @@ -248,10 +258,14 @@ func (g *TarGzExtractor) Extract(buffer *bytes.Buffer, targetDir string) error { switch header.Typeflag { case tar.TypeDir: - if err := os.Mkdir(path, 0755); err != nil { + if err := os.MkdirAll(path, 0755); err != nil { return err } case tar.TypeReg: + // Ensure parent directory exists + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } outFile, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode)) if err != nil { return err diff --git a/internal/plugin/installer/http_installer_test.go b/internal/plugin/installer/http_installer_test.go index 92521474e..ac74b8cf6 100644 --- a/internal/plugin/installer/http_installer_test.go +++ b/internal/plugin/installer/http_installer_test.go @@ -348,3 +348,255 @@ func TestMediaTypeToExtension(t *testing.T) { } } } + +func TestExtractWithNestedDirectories(t *testing.T) { + source := "https://repo.localdomain/plugins/nested-plugin-0.0.1.tar.gz" + tempDir := t.TempDir() + + // Set the umask to default open permissions so we can actually test + oldmask := syscall.Umask(0000) + defer func() { + syscall.Umask(oldmask) + }() + + // Write a tarball with nested directory structure + var tarbuf bytes.Buffer + tw := tar.NewWriter(&tarbuf) + var files = []struct { + Name string + Body string + Mode int64 + TypeFlag byte + }{ + {"plugin.yaml", "plugin metadata", 0600, tar.TypeReg}, + {"bin/", "", 0755, tar.TypeDir}, + {"bin/plugin", "#!/bin/bash\necho plugin", 0755, tar.TypeReg}, + {"docs/", "", 0755, tar.TypeDir}, + {"docs/README.md", "readme content", 0644, tar.TypeReg}, + {"docs/examples/", "", 0755, tar.TypeDir}, + {"docs/examples/example1.yaml", "example content", 0644, tar.TypeReg}, + } + + for _, file := range files { + hdr := &tar.Header{ + Name: file.Name, + Typeflag: file.TypeFlag, + Mode: file.Mode, + Size: int64(len(file.Body)), + } + if err := tw.WriteHeader(hdr); err != nil { + t.Fatal(err) + } + if file.TypeFlag == tar.TypeReg { + if _, err := tw.Write([]byte(file.Body)); err != nil { + t.Fatal(err) + } + } + } + + if err := tw.Close(); err != nil { + t.Fatal(err) + } + + var buf bytes.Buffer + gz := gzip.NewWriter(&buf) + if _, err := gz.Write(tarbuf.Bytes()); err != nil { + t.Fatal(err) + } + gz.Close() + + extractor, err := NewExtractor(source) + if err != nil { + t.Fatal(err) + } + + // First extraction + if err = extractor.Extract(&buf, tempDir); err != nil { + t.Fatalf("First extraction failed: %v", err) + } + + // Verify nested structure was created + nestedFile := filepath.Join(tempDir, "docs", "examples", "example1.yaml") + if _, err := os.Stat(nestedFile); err != nil { + t.Fatalf("Expected nested file %s to exist but got error: %v", nestedFile, err) + } + + // Reset buffer for second extraction + buf.Reset() + gz = gzip.NewWriter(&buf) + if _, err := gz.Write(tarbuf.Bytes()); err != nil { + t.Fatal(err) + } + gz.Close() + + // Second extraction to same directory (should not fail) + if err = extractor.Extract(&buf, tempDir); err != nil { + t.Fatalf("Second extraction to existing directory failed: %v", err) + } +} + +func TestExtractWithExistingDirectory(t *testing.T) { + source := "https://repo.localdomain/plugins/test-plugin-0.0.1.tar.gz" + tempDir := t.TempDir() + + // Pre-create the cache directory structure + cacheDir := filepath.Join(tempDir, "cache") + if err := os.MkdirAll(filepath.Join(cacheDir, "existing", "dir"), 0755); err != nil { + t.Fatal(err) + } + + // Create a file in the existing directory + existingFile := filepath.Join(cacheDir, "existing", "file.txt") + if err := os.WriteFile(existingFile, []byte("existing content"), 0644); err != nil { + t.Fatal(err) + } + + // Write a tarball + var tarbuf bytes.Buffer + tw := tar.NewWriter(&tarbuf) + files := []struct { + Name string + Body string + Mode int64 + TypeFlag byte + }{ + {"plugin.yaml", "plugin metadata", 0600, tar.TypeReg}, + {"existing/", "", 0755, tar.TypeDir}, + {"existing/dir/", "", 0755, tar.TypeDir}, + {"existing/dir/newfile.txt", "new content", 0644, tar.TypeReg}, + } + + for _, file := range files { + hdr := &tar.Header{ + Name: file.Name, + Typeflag: file.TypeFlag, + Mode: file.Mode, + Size: int64(len(file.Body)), + } + if err := tw.WriteHeader(hdr); err != nil { + t.Fatal(err) + } + if file.TypeFlag == tar.TypeReg { + if _, err := tw.Write([]byte(file.Body)); err != nil { + t.Fatal(err) + } + } + } + + if err := tw.Close(); err != nil { + t.Fatal(err) + } + + var buf bytes.Buffer + gz := gzip.NewWriter(&buf) + if _, err := gz.Write(tarbuf.Bytes()); err != nil { + t.Fatal(err) + } + gz.Close() + + extractor, err := NewExtractor(source) + if err != nil { + t.Fatal(err) + } + + // Extract to directory with existing content + if err = extractor.Extract(&buf, cacheDir); err != nil { + t.Fatalf("Extraction to directory with existing content failed: %v", err) + } + + // Verify new file was created + newFile := filepath.Join(cacheDir, "existing", "dir", "newfile.txt") + if _, err := os.Stat(newFile); err != nil { + t.Fatalf("Expected new file %s to exist but got error: %v", newFile, err) + } + + // Verify existing file is still there + if _, err := os.Stat(existingFile); err != nil { + t.Fatalf("Expected existing file %s to still exist but got error: %v", existingFile, err) + } +} + +func TestExtractPluginInSubdirectory(t *testing.T) { + source := "https://repo.localdomain/plugins/subdir-plugin-1.0.0.tar.gz" + tempDir := t.TempDir() + + // Create a tarball where plugin files are in a subdirectory + var tarbuf bytes.Buffer + tw := tar.NewWriter(&tarbuf) + files := []struct { + Name string + Body string + Mode int64 + TypeFlag byte + }{ + {"my-plugin/", "", 0755, tar.TypeDir}, + {"my-plugin/plugin.yaml", "name: my-plugin\nversion: 1.0.0\nusage: test\ndescription: test plugin\ncommand: $HELM_PLUGIN_DIR/bin/my-plugin", 0644, tar.TypeReg}, + {"my-plugin/bin/", "", 0755, tar.TypeDir}, + {"my-plugin/bin/my-plugin", "#!/bin/bash\necho test", 0755, tar.TypeReg}, + } + + for _, file := range files { + hdr := &tar.Header{ + Name: file.Name, + Typeflag: file.TypeFlag, + Mode: file.Mode, + Size: int64(len(file.Body)), + } + if err := tw.WriteHeader(hdr); err != nil { + t.Fatal(err) + } + if file.TypeFlag == tar.TypeReg { + if _, err := tw.Write([]byte(file.Body)); err != nil { + t.Fatal(err) + } + } + } + + if err := tw.Close(); err != nil { + t.Fatal(err) + } + + var buf bytes.Buffer + gz := gzip.NewWriter(&buf) + if _, err := gz.Write(tarbuf.Bytes()); err != nil { + t.Fatal(err) + } + gz.Close() + + // Test the installer + installer := &HTTPInstaller{ + CacheDir: tempDir, + PluginName: "subdir-plugin", + base: newBase(source), + extractor: &TarGzExtractor{}, + } + + // Create a mock getter + installer.getter = &TestHTTPGetter{ + MockResponse: &buf, + } + + // Ensure the destination directory doesn't exist + // (In a real scenario, this is handled by installer.Install() wrapper) + destPath := installer.Path() + if err := os.RemoveAll(destPath); err != nil { + t.Fatalf("Failed to clean destination path: %v", err) + } + + // Install should handle the subdirectory correctly + if err := installer.Install(); err != nil { + t.Fatalf("Failed to install plugin with subdirectory: %v", err) + } + + // The plugin should be installed from the subdirectory + // Check that detectPluginRoot found the correct location + pluginRoot, err := detectPluginRoot(tempDir) + if err != nil { + t.Fatalf("Failed to detect plugin root: %v", err) + } + + expectedRoot := filepath.Join(tempDir, "my-plugin") + if pluginRoot != expectedRoot { + t.Errorf("Expected plugin root to be %s but got %s", expectedRoot, pluginRoot) + } +} diff --git a/internal/plugin/installer/installer.go b/internal/plugin/installer/installer.go index e14f16018..7900f6745 100644 --- a/internal/plugin/installer/installer.go +++ b/internal/plugin/installer/installer.go @@ -92,6 +92,15 @@ func isLocalReference(source string) bool { // HEAD operation to see if the remote resource is a file that we understand. func isRemoteHTTPArchive(source string) bool { if strings.HasPrefix(source, "http://") || strings.HasPrefix(source, "https://") { + // First, check if the URL ends with a known archive suffix + // This is more reliable than content-type detection + for suffix := range Extractors { + if strings.HasSuffix(source, suffix) { + return true + } + } + + // If no suffix match, try HEAD request to check content type res, err := http.Head(source) if err != nil { // If we get an error at the network layer, we can't install it. So diff --git a/internal/plugin/installer/installer_test.go b/internal/plugin/installer/installer_test.go index a11464924..dcd76fe9c 100644 --- a/internal/plugin/installer/installer_test.go +++ b/internal/plugin/installer/installer_test.go @@ -26,8 +26,15 @@ func TestIsRemoteHTTPArchive(t *testing.T) { t.Errorf("Expected non-URL to return false") } - if isRemoteHTTPArchive("https://127.0.0.1:123/fake/plugin-1.2.3.tgz") { - t.Errorf("Bad URL should not have succeeded.") + // URLs with valid archive extensions are considered valid archives + // even if the server is unreachable (optimization to avoid unnecessary HTTP requests) + if !isRemoteHTTPArchive("https://127.0.0.1:123/fake/plugin-1.2.3.tgz") { + t.Errorf("URL with .tgz extension should be considered a valid archive") + } + + // Test with invalid extension and unreachable server + if isRemoteHTTPArchive("https://127.0.0.1:123/fake/plugin-1.2.3.notanarchive") { + t.Errorf("Bad URL without valid extension should not succeed") } if !isRemoteHTTPArchive(source) { diff --git a/internal/plugin/installer/local_installer.go b/internal/plugin/installer/local_installer.go index 211904108..59e8aebfb 100644 --- a/internal/plugin/installer/local_installer.go +++ b/internal/plugin/installer/local_installer.go @@ -16,11 +16,15 @@ limitations under the License. package installer // import "helm.sh/helm/v4/internal/plugin/installer" import ( + "bytes" "errors" "fmt" "log/slog" "os" "path/filepath" + "strings" + + "helm.sh/helm/v4/internal/third_party/dep/fs" ) // ErrPluginNotAFolder indicates that the plugin path is not a folder. @@ -29,6 +33,8 @@ var ErrPluginNotAFolder = errors.New("expected plugin to be a folder") // LocalInstaller installs plugins from the filesystem. type LocalInstaller struct { base + isArchive bool + extractor Extractor } // NewLocalInstaller creates a new LocalInstaller. @@ -40,13 +46,42 @@ func NewLocalInstaller(source string) (*LocalInstaller, error) { i := &LocalInstaller{ base: newBase(src), } + + // Check if source is an archive + if isLocalArchive(src) { + i.isArchive = true + extractor, err := NewExtractor(src) + if err != nil { + return nil, fmt.Errorf("unsupported archive format: %w", err) + } + i.extractor = extractor + } + return i, nil } +// isLocalArchive checks if the file is a supported archive format +func isLocalArchive(path string) bool { + for suffix := range Extractors { + if strings.HasSuffix(path, suffix) { + return true + } + } + return false +} + // Install creates a symlink to the plugin directory. // // Implements Installer. func (i *LocalInstaller) Install() error { + if i.isArchive { + return i.installFromArchive() + } + return i.installFromDirectory() +} + +// installFromDirectory creates a symlink to the plugin directory +func (i *LocalInstaller) installFromDirectory() error { stat, err := os.Stat(i.Source) if err != nil { return err @@ -62,6 +97,38 @@ func (i *LocalInstaller) Install() error { return os.Symlink(i.Source, i.Path()) } +// installFromArchive extracts and installs a plugin from a tarball +func (i *LocalInstaller) installFromArchive() error { + // Read the archive file + data, err := os.ReadFile(i.Source) + if err != nil { + return fmt.Errorf("failed to read archive: %w", err) + } + + // Create a temporary directory for extraction + tempDir, err := os.MkdirTemp("", "helm-plugin-extract-") + if err != nil { + return fmt.Errorf("failed to create temp directory: %w", err) + } + defer os.RemoveAll(tempDir) + + // Extract the archive + buffer := bytes.NewBuffer(data) + if err := i.extractor.Extract(buffer, tempDir); err != nil { + return fmt.Errorf("failed to extract archive: %w", err) + } + + // Detect where the plugin.yaml actually is + pluginRoot, err := detectPluginRoot(tempDir) + if err != nil { + return err + } + + // Copy to the final destination + slog.Debug("copying", "source", pluginRoot, "path", i.Path()) + return fs.CopyDir(pluginRoot, i.Path()) +} + // Update updates a local repository func (i *LocalInstaller) Update() error { slog.Debug("local repository is auto-updated") diff --git a/internal/plugin/installer/local_installer_test.go b/internal/plugin/installer/local_installer_test.go index fdb669314..05118e183 100644 --- a/internal/plugin/installer/local_installer_test.go +++ b/internal/plugin/installer/local_installer_test.go @@ -16,6 +16,9 @@ limitations under the License. package installer // import "helm.sh/helm/v4/internal/plugin/installer" import ( + "archive/tar" + "bytes" + "compress/gzip" "os" "path/filepath" "testing" @@ -65,3 +68,160 @@ func TestLocalInstallerNotAFolder(t *testing.T) { t.Fatalf("expected error to equal: %q", err) } } + +func TestLocalInstallerTarball(t *testing.T) { + ensure.HelmHome(t) + + // Create a test tarball + tempDir := t.TempDir() + tarballPath := filepath.Join(tempDir, "test-plugin-1.0.0.tar.gz") + + // Create tarball content + var buf bytes.Buffer + gw := gzip.NewWriter(&buf) + tw := tar.NewWriter(gw) + + files := []struct { + Name string + Body string + Mode int64 + }{ + {"plugin.yaml", "name: test-plugin\nversion: 1.0.0\nusage: test\ndescription: test\ncommand: echo", 0644}, + {"bin/test-plugin", "#!/bin/bash\necho test", 0755}, + } + + for _, file := range files { + hdr := &tar.Header{ + Name: file.Name, + Mode: file.Mode, + Size: int64(len(file.Body)), + } + if err := tw.WriteHeader(hdr); err != nil { + t.Fatal(err) + } + if _, err := tw.Write([]byte(file.Body)); err != nil { + t.Fatal(err) + } + } + + if err := tw.Close(); err != nil { + t.Fatal(err) + } + if err := gw.Close(); err != nil { + t.Fatal(err) + } + + // Write tarball to file + if err := os.WriteFile(tarballPath, buf.Bytes(), 0644); err != nil { + t.Fatal(err) + } + + // Test installation + i, err := NewForSource(tarballPath, "") + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // Verify it's detected as LocalInstaller + localInstaller, ok := i.(*LocalInstaller) + if !ok { + t.Fatal("expected LocalInstaller") + } + + if !localInstaller.isArchive { + t.Fatal("expected isArchive to be true") + } + + if err := Install(i); err != nil { + t.Fatal(err) + } + + expectedPath := helmpath.DataPath("plugins", "test-plugin") + if i.Path() != expectedPath { + t.Fatalf("expected path %q, got %q", expectedPath, i.Path()) + } + + // Verify plugin was installed + if _, err := os.Stat(i.Path()); err != nil { + t.Fatalf("plugin not found at %s: %v", i.Path(), err) + } +} + +func TestLocalInstallerTarballWithSubdirectory(t *testing.T) { + ensure.HelmHome(t) + + // Create a test tarball with subdirectory + tempDir := t.TempDir() + tarballPath := filepath.Join(tempDir, "subdir-plugin-1.0.0.tar.gz") + + // Create tarball content + var buf bytes.Buffer + gw := gzip.NewWriter(&buf) + tw := tar.NewWriter(gw) + + files := []struct { + Name string + Body string + Mode int64 + IsDir bool + }{ + {"my-plugin/", "", 0755, true}, + {"my-plugin/plugin.yaml", "name: my-plugin\nversion: 1.0.0\nusage: test\ndescription: test\ncommand: echo", 0644, false}, + {"my-plugin/bin/", "", 0755, true}, + {"my-plugin/bin/my-plugin", "#!/bin/bash\necho test", 0755, false}, + } + + for _, file := range files { + hdr := &tar.Header{ + Name: file.Name, + Mode: file.Mode, + } + if file.IsDir { + hdr.Typeflag = tar.TypeDir + } else { + hdr.Size = int64(len(file.Body)) + } + + if err := tw.WriteHeader(hdr); err != nil { + t.Fatal(err) + } + if !file.IsDir { + if _, err := tw.Write([]byte(file.Body)); err != nil { + t.Fatal(err) + } + } + } + + if err := tw.Close(); err != nil { + t.Fatal(err) + } + if err := gw.Close(); err != nil { + t.Fatal(err) + } + + // Write tarball to file + if err := os.WriteFile(tarballPath, buf.Bytes(), 0644); err != nil { + t.Fatal(err) + } + + // Test installation + i, err := NewForSource(tarballPath, "") + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if err := Install(i); err != nil { + t.Fatal(err) + } + + expectedPath := helmpath.DataPath("plugins", "subdir-plugin") + if i.Path() != expectedPath { + t.Fatalf("expected path %q, got %q", expectedPath, i.Path()) + } + + // Verify plugin was installed from subdirectory + pluginYaml := filepath.Join(i.Path(), "plugin.yaml") + if _, err := os.Stat(pluginYaml); err != nil { + t.Fatalf("plugin.yaml not found at %s: %v", pluginYaml, err) + } +} diff --git a/internal/plugin/installer/plugin_structure.go b/internal/plugin/installer/plugin_structure.go new file mode 100644 index 000000000..10647141e --- /dev/null +++ b/internal/plugin/installer/plugin_structure.go @@ -0,0 +1,80 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package installer + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "helm.sh/helm/v4/internal/plugin" +) + +// detectPluginRoot searches for plugin.yaml in the extracted directory +// and returns the path to the directory containing it. +// This handles cases where the tarball contains the plugin in a subdirectory. +func detectPluginRoot(extractDir string) (string, error) { + // First check if plugin.yaml is at the root + if _, err := os.Stat(filepath.Join(extractDir, plugin.PluginFileName)); err == nil { + return extractDir, nil + } + + // Otherwise, look for plugin.yaml in subdirectories (only one level deep) + entries, err := os.ReadDir(extractDir) + if err != nil { + return "", err + } + + for _, entry := range entries { + if entry.IsDir() { + subdir := filepath.Join(extractDir, entry.Name()) + if _, err := os.Stat(filepath.Join(subdir, plugin.PluginFileName)); err == nil { + return subdir, nil + } + } + } + + return "", fmt.Errorf("plugin.yaml not found in %s or its immediate subdirectories", extractDir) +} + +// validatePluginName checks if the plugin directory name matches the plugin name +// from plugin.yaml when the plugin is in a subdirectory. +func validatePluginName(pluginRoot string, expectedName string) error { + // Only validate if plugin is in a subdirectory + dirName := filepath.Base(pluginRoot) + if dirName == expectedName { + return nil + } + + // Load plugin.yaml to get the actual name + p, err := plugin.LoadDir(pluginRoot) + if err != nil { + return fmt.Errorf("failed to load plugin from %s: %w", pluginRoot, err) + } + + m := p.Metadata() + actualName := m.Name + + // For now, just log a warning if names don't match + // In the future, we might want to enforce this more strictly + if actualName != dirName && actualName != strings.TrimSuffix(expectedName, filepath.Ext(expectedName)) { + // This is just informational - not an error + return nil + } + + return nil +} diff --git a/internal/plugin/installer/plugin_structure_test.go b/internal/plugin/installer/plugin_structure_test.go new file mode 100644 index 000000000..c8766ce59 --- /dev/null +++ b/internal/plugin/installer/plugin_structure_test.go @@ -0,0 +1,165 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package installer + +import ( + "os" + "path/filepath" + "testing" +) + +func TestDetectPluginRoot(t *testing.T) { + tests := []struct { + name string + setup func(dir string) error + expectRoot string + expectError bool + }{ + { + name: "plugin.yaml at root", + setup: func(dir string) error { + return os.WriteFile(filepath.Join(dir, "plugin.yaml"), []byte("name: test"), 0644) + }, + expectRoot: ".", + expectError: false, + }, + { + name: "plugin.yaml in subdirectory", + setup: func(dir string) error { + subdir := filepath.Join(dir, "my-plugin") + if err := os.MkdirAll(subdir, 0755); err != nil { + return err + } + return os.WriteFile(filepath.Join(subdir, "plugin.yaml"), []byte("name: test"), 0644) + }, + expectRoot: "my-plugin", + expectError: false, + }, + { + name: "no plugin.yaml", + setup: func(dir string) error { + return os.WriteFile(filepath.Join(dir, "README.md"), []byte("test"), 0644) + }, + expectRoot: "", + expectError: true, + }, + { + name: "plugin.yaml in nested subdirectory (should not find)", + setup: func(dir string) error { + subdir := filepath.Join(dir, "outer", "inner") + if err := os.MkdirAll(subdir, 0755); err != nil { + return err + } + return os.WriteFile(filepath.Join(subdir, "plugin.yaml"), []byte("name: test"), 0644) + }, + expectRoot: "", + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dir := t.TempDir() + if err := tt.setup(dir); err != nil { + t.Fatalf("Setup failed: %v", err) + } + + root, err := detectPluginRoot(dir) + if tt.expectError { + if err == nil { + t.Error("Expected error but got none") + } + } else { + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + expectedPath := dir + if tt.expectRoot != "." { + expectedPath = filepath.Join(dir, tt.expectRoot) + } + if root != expectedPath { + t.Errorf("Expected root %s but got %s", expectedPath, root) + } + } + }) + } +} + +func TestValidatePluginName(t *testing.T) { + tests := []struct { + name string + setup func(dir string) error + pluginRoot string + expectedName string + expectError bool + }{ + { + name: "matching directory and plugin name", + setup: func(dir string) error { + subdir := filepath.Join(dir, "my-plugin") + if err := os.MkdirAll(subdir, 0755); err != nil { + return err + } + yaml := `name: my-plugin +version: 1.0.0 +usage: test +description: test` + return os.WriteFile(filepath.Join(subdir, "plugin.yaml"), []byte(yaml), 0644) + }, + pluginRoot: "my-plugin", + expectedName: "my-plugin", + expectError: false, + }, + { + name: "different directory and plugin name", + setup: func(dir string) error { + subdir := filepath.Join(dir, "wrong-name") + if err := os.MkdirAll(subdir, 0755); err != nil { + return err + } + yaml := `name: my-plugin +version: 1.0.0 +usage: test +description: test` + return os.WriteFile(filepath.Join(subdir, "plugin.yaml"), []byte(yaml), 0644) + }, + pluginRoot: "wrong-name", + expectedName: "wrong-name", + expectError: false, // Currently we don't error on mismatch + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dir := t.TempDir() + if err := tt.setup(dir); err != nil { + t.Fatalf("Setup failed: %v", err) + } + + pluginRoot := filepath.Join(dir, tt.pluginRoot) + err := validatePluginName(pluginRoot, tt.expectedName) + if tt.expectError { + if err == nil { + t.Error("Expected error but got none") + } + } else { + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + } + }) + } +} From 16924a51db7d44698d999c8b9c3bf2fdc74bc60f Mon Sep 17 00:00:00 2001 From: Scott Rigby Date: Mon, 25 Aug 2025 10:16:03 -0400 Subject: [PATCH 56/88] Fix: Removed unsafe umask manipulation in tests Problem: Tests used syscall.Umask(0000) which could leave your shell creating files with 777 permissions if interrupted. Solution: Instead of changing umask, tests now detect the current umask and calculate expected permissions after it's applied. Result: Same test coverage, but safe from system-wide side effects. Co-authored-by: Jesse Simpson Signed-off-by: Scott Rigby --- .../plugin/installer/http_installer_test.go | 28 +++++++++---------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/internal/plugin/installer/http_installer_test.go b/internal/plugin/installer/http_installer_test.go index ac74b8cf6..453021b76 100644 --- a/internal/plugin/installer/http_installer_test.go +++ b/internal/plugin/installer/http_installer_test.go @@ -210,11 +210,9 @@ func TestExtract(t *testing.T) { tempDir := t.TempDir() - // Set the umask to default open permissions so we can actually test - oldmask := syscall.Umask(0000) - defer func() { - syscall.Umask(oldmask) - }() + // Get current umask to predict expected permissions + currentUmask := syscall.Umask(0) + syscall.Umask(currentUmask) // Write a tarball to a buffer for us to extract var tarbuf bytes.Buffer @@ -274,14 +272,19 @@ func TestExtract(t *testing.T) { t.Fatalf("Did not expect error but got error: %v", err) } + // Calculate expected permissions after umask is applied + expectedPluginYAMLPerm := os.FileMode(0600 &^ currentUmask) + expectedReadmePerm := os.FileMode(0777 &^ currentUmask) + pluginYAMLFullPath := filepath.Join(tempDir, "plugin.yaml") if info, err := os.Stat(pluginYAMLFullPath); err != nil { if errors.Is(err, fs.ErrNotExist) { t.Fatalf("Expected %s to exist but doesn't", pluginYAMLFullPath) } t.Fatal(err) - } else if info.Mode().Perm() != 0600 { - t.Fatalf("Expected %s to have 0600 mode it but has %o", pluginYAMLFullPath, info.Mode().Perm()) + } else if info.Mode().Perm() != expectedPluginYAMLPerm { + t.Fatalf("Expected %s to have %o mode but has %o (umask: %o)", + pluginYAMLFullPath, expectedPluginYAMLPerm, info.Mode().Perm(), currentUmask) } readmeFullPath := filepath.Join(tempDir, "README.md") @@ -290,8 +293,9 @@ func TestExtract(t *testing.T) { t.Fatalf("Expected %s to exist but doesn't", readmeFullPath) } t.Fatal(err) - } else if info.Mode().Perm() != 0777 { - t.Fatalf("Expected %s to have 0777 mode it but has %o", readmeFullPath, info.Mode().Perm()) + } else if info.Mode().Perm() != expectedReadmePerm { + t.Fatalf("Expected %s to have %o mode but has %o (umask: %o)", + readmeFullPath, expectedReadmePerm, info.Mode().Perm(), currentUmask) } } @@ -353,12 +357,6 @@ func TestExtractWithNestedDirectories(t *testing.T) { source := "https://repo.localdomain/plugins/nested-plugin-0.0.1.tar.gz" tempDir := t.TempDir() - // Set the umask to default open permissions so we can actually test - oldmask := syscall.Umask(0000) - defer func() { - syscall.Umask(oldmask) - }() - // Write a tarball with nested directory structure var tarbuf bytes.Buffer tw := tar.NewWriter(&tarbuf) From 3d30112468fb4171a015210cfabbb0e41a6c6587 Mon Sep 17 00:00:00 2001 From: Scott Rigby Date: Mon, 25 Aug 2025 22:57:42 -0400 Subject: [PATCH 57/88] Fix LocalInstaller Path() to strip version from tarball filenames Override Path() method to use existing stripPluginName function for archives Signed-off-by: Scott Rigby --- internal/plugin/installer/local_installer.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/internal/plugin/installer/local_installer.go b/internal/plugin/installer/local_installer.go index 59e8aebfb..87b9eaf97 100644 --- a/internal/plugin/installer/local_installer.go +++ b/internal/plugin/installer/local_installer.go @@ -129,6 +129,18 @@ func (i *LocalInstaller) installFromArchive() error { return fs.CopyDir(pluginRoot, i.Path()) } +// Path returns the path where the plugin will be installed. +// For archive sources, strips the version from the filename. +func (i *LocalInstaller) Path() string { + if i.Source == "" { + return "" + } + if i.isArchive { + return filepath.Join(i.PluginsDirectory, stripPluginName(filepath.Base(i.Source))) + } + return filepath.Join(i.PluginsDirectory, filepath.Base(i.Source)) +} + // Update updates a local repository func (i *LocalInstaller) Update() error { slog.Debug("local repository is auto-updated") From 389646ffd1e0b150ea96d70c84a5da7f206865c5 Mon Sep 17 00:00:00 2001 From: Terry Howe Date: Tue, 26 Aug 2025 08:13:13 -0600 Subject: [PATCH 58/88] fix: send logging to stderr Signed-off-by: Terry Howe --- internal/logging/logging.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/logging/logging.go b/internal/logging/logging.go index 946a211ef..2e8208d08 100644 --- a/internal/logging/logging.go +++ b/internal/logging/logging.go @@ -64,7 +64,7 @@ func (h *DebugCheckHandler) WithGroup(name string) slog.Handler { // NewLogger creates a new logger with dynamic debug checking func NewLogger(debugEnabled DebugEnabledFunc) *slog.Logger { // Create base handler that removes timestamps - baseHandler := slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{ + baseHandler := slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{ // Always use LevelDebug here to allow all messages through // Our custom handler will do the filtering Level: slog.LevelDebug, From 417e6a2cbb2daf3bc66655a75fdcab048e8383d4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Aug 2025 14:45:37 +0000 Subject: [PATCH 59/88] chore(deps): bump github.com/stretchr/testify from 1.10.0 to 1.11.0 Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.10.0 to 1.11.0. - [Release notes](https://github.com/stretchr/testify/releases) - [Commits](https://github.com/stretchr/testify/compare/v1.10.0...v1.11.0) --- updated-dependencies: - dependency-name: github.com/stretchr/testify dependency-version: 1.11.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 6557d7663..c28405240 100644 --- a/go.mod +++ b/go.mod @@ -31,7 +31,7 @@ require ( github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 github.com/spf13/cobra v1.9.1 github.com/spf13/pflag v1.0.7 - github.com/stretchr/testify v1.10.0 + github.com/stretchr/testify v1.11.0 go.yaml.in/yaml/v3 v3.0.4 golang.org/x/crypto v0.41.0 golang.org/x/term v0.34.0 diff --git a/go.sum b/go.sum index b76d921d3..f4f54ecdc 100644 --- a/go.sum +++ b/go.sum @@ -309,8 +309,8 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.0 h1:ib4sjIrwZKxE5u/Japgo/7SJV3PvgjGiRNAvTVGqQl8= +github.com/stretchr/testify v1.11.0/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= From c8e51b40c23e388646ba6987f32e68af19c1850e Mon Sep 17 00:00:00 2001 From: George Jenkins Date: Mon, 25 Aug 2025 13:25:38 -0700 Subject: [PATCH 60/88] Plugin extism/v1 runtime Signed-off-by: George Jenkins --- go.mod | 7 +- go.sum | 10 + internal/plugin/config.go | 1 - internal/plugin/loader.go | 23 +- internal/plugin/metadata.go | 6 + internal/plugin/plugin_type_registry.go | 100 ++++++ internal/plugin/plugin_type_registry_test.go | 38 +++ internal/plugin/runtime.go | 28 +- internal/plugin/runtime_extismv1.go | 297 ++++++++++++++++++ internal/plugin/runtime_extismv1_test.go | 124 ++++++++ internal/plugin/runtime_test.go | 63 ++++ internal/plugin/schema/test.go | 28 ++ .../testdata/src/extismv1-test/.gitignore | 1 + .../testdata/src/extismv1-test/Makefile | 12 + .../plugin/testdata/src/extismv1-test/go.mod | 5 + .../plugin/testdata/src/extismv1-test/go.sum | 2 + .../plugin/testdata/src/extismv1-test/main.go | 61 ++++ .../testdata/src/extismv1-test/plugin.yaml | 6 + 18 files changed, 806 insertions(+), 6 deletions(-) create mode 100644 internal/plugin/plugin_type_registry.go create mode 100644 internal/plugin/plugin_type_registry_test.go create mode 100644 internal/plugin/runtime_extismv1.go create mode 100644 internal/plugin/runtime_extismv1_test.go create mode 100644 internal/plugin/runtime_test.go create mode 100644 internal/plugin/schema/test.go create mode 100644 internal/plugin/testdata/src/extismv1-test/.gitignore create mode 100644 internal/plugin/testdata/src/extismv1-test/Makefile create mode 100644 internal/plugin/testdata/src/extismv1-test/go.mod create mode 100644 internal/plugin/testdata/src/extismv1-test/go.sum create mode 100644 internal/plugin/testdata/src/extismv1-test/main.go create mode 100644 internal/plugin/testdata/src/extismv1-test/plugin.yaml diff --git a/go.mod b/go.mod index c28405240..8cff102c9 100644 --- a/go.mod +++ b/go.mod @@ -14,6 +14,7 @@ require ( github.com/cyphar/filepath-securejoin v0.4.1 github.com/distribution/distribution/v3 v3.0.0 github.com/evanphx/json-patch/v5 v5.9.11 + github.com/extism/go-sdk v1.7.1 github.com/fatih/color v1.18.0 github.com/fluxcd/cli-utils v0.36.0-flux.14 github.com/foxcpp/go-mockdns v1.1.0 @@ -25,13 +26,14 @@ require ( github.com/mattn/go-shellwords v1.0.12 github.com/mitchellh/copystructure v1.2.0 github.com/moby/term v0.5.2 - github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.1 github.com/rubenv/sql-migrate v1.8.0 github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 github.com/spf13/cobra v1.9.1 github.com/spf13/pflag v1.0.7 github.com/stretchr/testify v1.11.0 + github.com/tetratelabs/wazero v1.9.0 go.yaml.in/yaml/v3 v3.0.4 golang.org/x/crypto v0.41.0 golang.org/x/term v0.34.0 @@ -71,6 +73,7 @@ require ( github.com/docker/docker-credential-helpers v0.8.2 // indirect github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect github.com/docker/go-metrics v0.0.1 // indirect + github.com/dylibso/observe-sdk/go v0.0.0-20240819160327-2d926c5d788a // indirect github.com/emicklei/go-restful/v3 v3.12.2 // indirect github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -95,6 +98,7 @@ require ( github.com/hashicorp/golang-lru/arc/v2 v2.0.5 // indirect github.com/hashicorp/golang-lru/v2 v2.0.5 // indirect github.com/huandu/xstrings v1.5.0 // indirect + github.com/ianlancetaylor/demangle v0.0.0-20240805132620-81f5be970eca // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -130,6 +134,7 @@ require ( github.com/shopspring/decimal v1.4.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/cast v1.7.0 // indirect + github.com/tetratelabs/wabin v0.0.0-20230304001439-f6f874872834 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect diff --git a/go.sum b/go.sum index f4f54ecdc..9b41a7c39 100644 --- a/go.sum +++ b/go.sum @@ -77,12 +77,16 @@ github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/dylibso/observe-sdk/go v0.0.0-20240819160327-2d926c5d788a h1:UwSIFv5g5lIvbGgtf3tVwC7Ky9rmMFBp0RMs+6f6YqE= +github.com/dylibso/observe-sdk/go v0.0.0-20240819160327-2d926c5d788a/go.mod h1:C8DzXehI4zAbrdlbtOByKX6pfivJTBiV9Jjqv56Yd9Q= github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc= +github.com/extism/go-sdk v1.7.1 h1:lWJos6uY+tRFdlIHR+SJjwFDApY7OypS/2nMhiVQ9Sw= +github.com/extism/go-sdk v1.7.1/go.mod h1:IT+Xdg5AZM9hVtpFUA+uZCJMge/hbvshl8bwzLtFyKA= github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= @@ -164,6 +168,8 @@ github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvH github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/ianlancetaylor/demangle v0.0.0-20240805132620-81f5be970eca h1:T54Ema1DU8ngI+aef9ZhAhNGQhcRTrWxVeG07F+c/Rw= +github.com/ianlancetaylor/demangle v0.0.0-20240805132620-81f5be970eca/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= @@ -311,6 +317,10 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.11.0 h1:ib4sjIrwZKxE5u/Japgo/7SJV3PvgjGiRNAvTVGqQl8= github.com/stretchr/testify v1.11.0/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/tetratelabs/wabin v0.0.0-20230304001439-f6f874872834 h1:ZF+QBjOI+tILZjBaFj3HgFonKXUcwgJ4djLb6i42S3Q= +github.com/tetratelabs/wabin v0.0.0-20230304001439-f6f874872834/go.mod h1:m9ymHTgNSEjuxvw8E7WWe4Pl4hZQHXONY8wE6dMLaRk= +github.com/tetratelabs/wazero v1.9.0 h1:IcZ56OuxrtaEz8UYNRHBrUa9bYeX9oVY93KspZZBf/I= +github.com/tetratelabs/wazero v1.9.0/go.mod h1:TSbcXCfFP0L2FGkRPxHphadXPjo1T6W+CseNNY7EkjM= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= diff --git a/internal/plugin/config.go b/internal/plugin/config.go index 812dba7f6..83a2e0b25 100644 --- a/internal/plugin/config.go +++ b/internal/plugin/config.go @@ -23,7 +23,6 @@ import ( // Config interface defines the methods that all plugin type configurations must implement type Config interface { - GetType() string Validate() error } diff --git a/internal/plugin/loader.go b/internal/plugin/loader.go index eb05cb722..a58a84126 100644 --- a/internal/plugin/loader.go +++ b/internal/plugin/loader.go @@ -22,7 +22,11 @@ import ( "os" "path/filepath" + extism "github.com/extism/go-sdk" + "github.com/tetratelabs/wazero" "go.yaml.in/yaml/v3" + + "helm.sh/helm/v4/pkg/helmpath" ) func peekAPIVersion(r io.Reader) (string, error) { @@ -101,12 +105,22 @@ type prototypePluginManager struct { runtimes map[string]Runtime } -func newPrototypePluginManager() *prototypePluginManager { +func newPrototypePluginManager() (*prototypePluginManager, error) { + + cc, err := wazero.NewCompilationCacheWithDir(helmpath.CachePath("wazero-build")) + if err != nil { + return nil, fmt.Errorf("failed to create wazero compilation cache: %w", err) + } + return &prototypePluginManager{ runtimes: map[string]Runtime{ "subprocess": &RuntimeSubprocess{}, + "extism/v1": &RuntimeExtismV1{ + HostFunctions: map[string]extism.HostFunction{}, + CompilationCache: cc, + }, }, - } + }, nil } func (pm *prototypePluginManager) RegisterRuntime(runtimeName string, runtime Runtime) { @@ -135,7 +149,10 @@ func LoadDir(dirname string) (Plugin, error) { return nil, fmt.Errorf("failed to load plugin %q: %w", dirname, err) } - pm := newPrototypePluginManager() + pm, err := newPrototypePluginManager() + if err != nil { + return nil, fmt.Errorf("failed to create plugin manager: %w", err) + } return pm.CreatePlugin(dirname, m) } diff --git a/internal/plugin/metadata.go b/internal/plugin/metadata.go index 48741474e..bb7e9409f 100644 --- a/internal/plugin/metadata.go +++ b/internal/plugin/metadata.go @@ -18,6 +18,8 @@ package plugin import ( "errors" "fmt" + + "helm.sh/helm/v4/internal/plugin/schema" ) // Metadata of a plugin, converted from the "on-disk" legacy or v1 plugin.yaml @@ -183,6 +185,8 @@ func convertMetadataConfig(pluginType string, configRaw map[string]any) (Config, var config Config switch pluginType { + case "test/v1": + config, err = remarshalConfig[*schema.ConfigTestV1](configRaw) case "cli/v1": config, err = remarshalConfig[*ConfigCLI](configRaw) case "getter/v1": @@ -205,6 +209,8 @@ func convertMetdataRuntimeConfig(runtimeType string, runtimeConfigRaw map[string switch runtimeType { case "subprocess": runtimeConfig, err = remarshalRuntimeConfig[*RuntimeConfigSubprocess](runtimeConfigRaw) + case "extism/v1": + runtimeConfig, err = remarshalRuntimeConfig[*RuntimeConfigExtismV1](runtimeConfigRaw) default: return nil, fmt.Errorf("unsupported plugin runtime type: %q", runtimeType) } diff --git a/internal/plugin/plugin_type_registry.go b/internal/plugin/plugin_type_registry.go new file mode 100644 index 000000000..63450b823 --- /dev/null +++ b/internal/plugin/plugin_type_registry.go @@ -0,0 +1,100 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +This file contains a "registry" of supported plugin types. + +It enables "dyanmic" operations on the go type associated with a given plugin type (see: `helm.sh/helm/v4/internal/plugin/schema` package) + +Examples: + +``` + + // Create a new instance of the output message type for a given plugin type: + + pluginType := "cli/v1" // for example + ptm, ok := pluginTypesIndex[pluginType] + if !ok { + return fmt.Errorf("unknown plugin type %q", pluginType) + } + + outputMessageType := reflect.Zero(ptm.outputType).Interface() + +``` + +``` +// Create a new instance of the config type for a given plugin type + + pluginType := "cli/v1" // for example + ptm, ok := pluginTypesIndex[pluginType] + if !ok { + return nil + } + + config := reflect.New(ptm.configType).Interface().(Config) // `config` is variable of type `Config`, with + + // validate + err := config.Validate() + if err != nil { // handle error } + + // assert to concrete type if needed + cliConfig := config.(*schema.ConfigCLIV1) + +``` +*/ + +package plugin + +import ( + "reflect" + + "helm.sh/helm/v4/internal/plugin/schema" +) + +type pluginTypeMeta struct { + pluginType string + inputType reflect.Type + outputType reflect.Type + configType reflect.Type +} + +var pluginTypes = []pluginTypeMeta{ + { + pluginType: "test/v1", + inputType: reflect.TypeOf(schema.InputMessageTestV1{}), + outputType: reflect.TypeOf(schema.OutputMessageTestV1{}), + configType: reflect.TypeOf(schema.ConfigTestV1{}), + }, + { + pluginType: "cli/v1", + inputType: reflect.TypeOf(schema.InputMessageCLIV1{}), + outputType: reflect.TypeOf(schema.OutputMessageCLIV1{}), + configType: reflect.TypeOf(ConfigCLI{}), + }, + { + pluginType: "getter/v1", + inputType: reflect.TypeOf(schema.InputMessageGetterV1{}), + outputType: reflect.TypeOf(schema.OutputMessageGetterV1{}), + configType: reflect.TypeOf(ConfigGetter{}), + }, +} + +var pluginTypesIndex = func() map[string]*pluginTypeMeta { + result := make(map[string]*pluginTypeMeta, len(pluginTypes)) + for _, m := range pluginTypes { + result[m.pluginType] = &m + } + return result +}() diff --git a/internal/plugin/plugin_type_registry_test.go b/internal/plugin/plugin_type_registry_test.go new file mode 100644 index 000000000..ee8a44bb6 --- /dev/null +++ b/internal/plugin/plugin_type_registry_test.go @@ -0,0 +1,38 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugin + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/assert" + + "helm.sh/helm/v4/internal/plugin/schema" +) + +func TestMakeOutputMessage(t *testing.T) { + ptm := pluginTypesIndex["getter/v1"] + outputType := reflect.Zero(ptm.outputType).Interface() + assert.IsType(t, schema.OutputMessageGetterV1{}, outputType) + +} + +func TestMakeConfig(t *testing.T) { + ptm := pluginTypesIndex["getter/v1"] + config := reflect.New(ptm.configType).Interface().(Config) + assert.IsType(t, &ConfigGetter{}, config) +} diff --git a/internal/plugin/runtime.go b/internal/plugin/runtime.go index 8add92dea..a9c01a380 100644 --- a/internal/plugin/runtime.go +++ b/internal/plugin/runtime.go @@ -15,7 +15,11 @@ limitations under the License. package plugin -import "go.yaml.in/yaml/v3" +import ( + "strings" + + "go.yaml.in/yaml/v3" +) // Runtime represents a plugin runtime (subprocess, extism, etc) ie. how a plugin should be executed // Runtime is responsible for instantiating plugins that implement the runtime @@ -47,3 +51,25 @@ func remarshalRuntimeConfig[T RuntimeConfig](runtimeData map[string]any) (Runtim return config, nil } + +// parseEnv takes a list of "KEY=value" environment variable strings +// and transforms the result into a map[KEY]=value +// +// - empty input strings are ignored +// - input strings with no value are stored as empty strings +// - duplicate keys overwrite earlier values +func parseEnv(env []string) map[string]string { + result := make(map[string]string, len(env)) + for _, envVar := range env { + parts := strings.SplitN(envVar, "=", 2) + if len(parts) > 0 && parts[0] != "" { + key := parts[0] + var value string + if len(parts) > 1 { + value = parts[1] + } + result[key] = value + } + } + return result +} diff --git a/internal/plugin/runtime_extismv1.go b/internal/plugin/runtime_extismv1.go new file mode 100644 index 000000000..d3ecff182 --- /dev/null +++ b/internal/plugin/runtime_extismv1.go @@ -0,0 +1,297 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugin + +import ( + "context" + "encoding/json" + "fmt" + "log/slog" + "os" + "path/filepath" + "reflect" + + extism "github.com/extism/go-sdk" + "github.com/tetratelabs/wazero" +) + +const ExtistmV1WasmBinaryFilename = "plugin.wasm" + +type RuntimeConfigExtismV1Memory struct { + // The max amount of pages the plugin can allocate + // One page is 64Kib. e.g. 16 pages would require 1MiB. + // Default is 4 pages (256KiB) + MaxPages uint32 `yaml:"maxPages,omitempty"` + + // The max size of an Extism HTTP response in bytes + // Default is 4096 bytes (4KiB) + MaxHTTPResponseBytes int64 `yaml:"maxHttpResponseBytes,omitempty"` + + // The max size of all Extism vars in bytes + // Default is 4096 bytes (4KiB) + MaxVarBytes int64 `yaml:"maxVarBytes,omitempty"` +} + +type RuntimeConfigExtismV1FileSystem struct { + // If specified, a temporary directory will be created and mapped to /tmp in the plugin's filesystem. + // Data written to the directory will be visible on the host filesystem. + // The directory will be removed when the plugin invocation completes. + CreateTempDir bool `yaml:"createTempDir,omitempty"` + + // // An optional set of mappings between the host's filesystem and the paths a plugin can access. + // TODO: shuld Helm expose this? + //AllowedPaths map[string]string `yaml:"allowedPaths,omitempty"` +} + +// RuntimeConfigExtismV1 defines the user-configurable options the plugin's Extism runtime +// The format loosely follows the Extism Manifest format: https://extism.org/docs/concepts/manifest/ +type RuntimeConfigExtismV1 struct { + // Describes the limits on the memory the plugin may be allocated. + Memory RuntimeConfigExtismV1Memory `yaml:"memory"` + + // The "config" key is a free-form map that can be passed to the plugin. + // The plugin must interpret arbitrary data this map may contain + Config map[string]string `yaml:"config,omitempty"` + + // An optional set of hosts this plugin can communicate with. + // This only has an effect if the plugin makes HTTP requests. + // If not specified, then no hosts are allowed. + AllowedHosts []string `yaml:"allowedHosts,omitempty"` + + FileSystem RuntimeConfigExtismV1FileSystem `yaml:"fileSystem,omitempty"` + + // The timeout in milliseconds for the plugin to execute + Timeout uint64 `yaml:"timeout,omitempty"` + + // HostFunction names exposed in Helm the plugin may access + // see: https://extism.org/docs/concepts/host-functions/ + HostFunctions []string `yaml:"hostFunctions,omitempty"` + + // The name of entry function name to call in the plugin + // Defaults to "helm_plugin_main". + EntryFuncName string `yaml:"entryFuncName,omitempty"` +} + +var _ RuntimeConfig = (*RuntimeConfigExtismV1)(nil) + +func (r *RuntimeConfigExtismV1) Validate() error { + // TODO + return nil +} + +type RuntimeExtismV1 struct { + HostFunctions map[string]extism.HostFunction + CompilationCache wazero.CompilationCache +} + +var _ Runtime = (*RuntimeExtismV1)(nil) + +func (r *RuntimeExtismV1) CreatePlugin(pluginDir string, metadata *Metadata) (Plugin, error) { + + rc, ok := metadata.RuntimeConfig.(*RuntimeConfigExtismV1) + if !ok { + return nil, fmt.Errorf("invalid extism/v1 plugin runtime config type: %T", metadata.RuntimeConfig) + } + + fmt.Printf("Creating extism/v1 plugin %q with config: %+v\n", metadata.Name, rc) + + wasmFile := filepath.Join(pluginDir, ExtistmV1WasmBinaryFilename) + if _, err := os.Stat(wasmFile); err != nil { + if os.IsNotExist(err) { + return nil, fmt.Errorf("wasm binary missing for extism/v1 plugin: %q", wasmFile) + } + return nil, fmt.Errorf("failed to stat extism/v1 plugin wasm binary %q: %w", wasmFile, err) + } + + return &ExtismV1PluginRuntime{ + metadata: *metadata, + dir: pluginDir, + rc: rc, + r: r, + }, nil +} + +type ExtismV1PluginRuntime struct { + metadata Metadata + dir string + rc *RuntimeConfigExtismV1 + r *RuntimeExtismV1 +} + +var _ Plugin = (*ExtismV1PluginRuntime)(nil) + +func (p *ExtismV1PluginRuntime) Metadata() Metadata { + return p.metadata +} + +func (p *ExtismV1PluginRuntime) Dir() string { + return p.dir +} + +func (p *ExtismV1PluginRuntime) Invoke(ctx context.Context, input *Input) (*Output, error) { + + var tmpDir string + if p.rc.FileSystem.CreateTempDir { + tmpDir, err := os.MkdirTemp(os.TempDir(), "helm-plugin-*") + slog.Debug("created plugin temp dir", slog.String("dir", tmpDir), slog.String("plugin", p.metadata.Name)) + if err != nil { + return nil, fmt.Errorf("failed to create temp dir for extism compilation cache: %w", err) + } + defer func() { + if err := os.RemoveAll(tmpDir); err != nil { + slog.Warn("failed to remove plugin temp dir", slog.String("dir", tmpDir), slog.String("plugin", p.metadata.Name), slog.String("error", err.Error())) + } + }() + } + + manifest, err := buildManifest(p.dir, tmpDir, p.rc) + if err != nil { + return nil, err + } + + config, err := buildPluginConfig(input, p.r) + if err != nil { + return nil, err + } + + hostFunctions, err := buildHostFunctions(p.r.HostFunctions, p.rc) + if err != nil { + return nil, err + } + + pe, err := extism.NewPlugin(ctx, manifest, config, hostFunctions) + if err != nil { + return nil, fmt.Errorf("failed to create existing plugin: %w", err) + } + + pe.SetLogger(func(logLevel extism.LogLevel, s string) { + slog.Debug(s, slog.String("level", logLevel.String()), slog.String("plugin", p.metadata.Name)) + }) + + inputData, err := json.Marshal(input.Message) + if err != nil { + return nil, fmt.Errorf("failed to json marshel plugin input message: %T: %w", input.Message, err) + } + + slog.Debug("plugin input", slog.String("plugin", p.metadata.Name), slog.String("inputData", string(inputData))) + + entryFuncName := p.rc.EntryFuncName + if entryFuncName == "" { + entryFuncName = "helm_plugin_main" + } + + exitCode, outputData, err := pe.Call(entryFuncName, inputData) + if err != nil { + return nil, fmt.Errorf("plugin error: %w", err) + } + + if exitCode != 0 { + return nil, &InvokeExecError{ + Code: int(exitCode), + } + } + + slog.Debug("plugin output", slog.String("plugin", p.metadata.Name), slog.Int("exitCode", int(exitCode)), slog.String("outputData", string(outputData))) + + outputMessage := reflect.New(pluginTypesIndex[p.metadata.Type].outputType) + if err := json.Unmarshal(outputData, outputMessage.Interface()); err != nil { + return nil, fmt.Errorf("failed to json marshel plugin output message: %T: %w", outputMessage, err) + } + + output := &Output{ + Message: outputMessage.Elem().Interface(), + } + + return output, nil +} + +func buildManifest(pluginDir string, tmpDir string, rc *RuntimeConfigExtismV1) (extism.Manifest, error) { + wasmFile := filepath.Join(pluginDir, ExtistmV1WasmBinaryFilename) + + allowedHosts := rc.AllowedHosts + if allowedHosts == nil { + allowedHosts = []string{} + } + + allowedPaths := map[string]string{} + if tmpDir != "" { + allowedPaths[tmpDir] = "/tmp" + } + + return extism.Manifest{ + Wasm: []extism.Wasm{ + extism.WasmFile{ + Path: wasmFile, + Name: wasmFile, + }, + }, + Memory: &extism.ManifestMemory{ + MaxPages: rc.Memory.MaxPages, + MaxHttpResponseBytes: rc.Memory.MaxHTTPResponseBytes, + MaxVarBytes: rc.Memory.MaxVarBytes, + }, + Config: rc.Config, + AllowedHosts: allowedHosts, + AllowedPaths: allowedPaths, + Timeout: rc.Timeout, + }, nil +} + +func buildPluginConfig(input *Input, r *RuntimeExtismV1) (extism.PluginConfig, error) { + + mc := wazero.NewModuleConfig(). + WithSysWalltime() + if input.Stdin != nil { + mc = mc.WithStdin(input.Stdin) + } + if input.Stdout != nil { + mc = mc.WithStdout(input.Stdout) + } + if input.Stderr != nil { + mc = mc.WithStderr(input.Stderr) + } + if len(input.Env) > 0 { + env := parseEnv(input.Env) + for k, v := range env { + mc = mc.WithEnv(k, v) + } + } + + config := extism.PluginConfig{ + ModuleConfig: mc, + RuntimeConfig: wazero.NewRuntimeConfigCompiler(). + WithCloseOnContextDone(true). + WithCompilationCache(r.CompilationCache), + EnableWasi: true, + EnableHttpResponseHeaders: true, + } + + return config, nil +} + +func buildHostFunctions(hostFunctions map[string]extism.HostFunction, rc *RuntimeConfigExtismV1) ([]extism.HostFunction, error) { + result := make([]extism.HostFunction, len(rc.HostFunctions)) + for _, fnName := range rc.HostFunctions { + fn, ok := hostFunctions[fnName] + if !ok { + return nil, fmt.Errorf("plugin requested host function %q not found", fnName) + } + + result = append(result, fn) + } + + return result, nil +} diff --git a/internal/plugin/runtime_extismv1_test.go b/internal/plugin/runtime_extismv1_test.go new file mode 100644 index 000000000..8d9c55195 --- /dev/null +++ b/internal/plugin/runtime_extismv1_test.go @@ -0,0 +1,124 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugin + +import ( + "os" + "os/exec" + "path/filepath" + "testing" + + extism "github.com/extism/go-sdk" + + "helm.sh/helm/v4/internal/plugin/schema" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type pluginRaw struct { + Metadata Metadata + Dir string +} + +func buildLoadExtismPlugin(t *testing.T, dir string) pluginRaw { + t.Helper() + + pluginFile := filepath.Join(dir, PluginFileName) + + metadataData, err := os.ReadFile(pluginFile) + require.NoError(t, err) + + m, err := loadMetadata(metadataData) + require.NoError(t, err) + require.Equal(t, "extism/v1", m.Runtime, "expected plugin runtime to be extism/v1") + + cmd := exec.Command("make", "-C", dir) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + require.NoError(t, cmd.Run(), "failed to build plugin in %q", dir) + + return pluginRaw{ + Metadata: *m, + Dir: dir, + } +} + +func TestRuntimeConfigExtismV1Validate(t *testing.T) { + rc := RuntimeConfigExtismV1{} + err := rc.Validate() + assert.NoError(t, err, "expected no error for empty RuntimeConfigExtismV1") +} + +func TestRuntimeExtismV1InvokePlugin(t *testing.T) { + r := RuntimeExtismV1{} + + pr := buildLoadExtismPlugin(t, "testdata/src/extismv1-test") + require.Equal(t, "test/v1", pr.Metadata.Type) + + p, err := r.CreatePlugin(pr.Dir, &pr.Metadata) + + assert.NoError(t, err, "expected no error creating plugin") + assert.NotNil(t, p, "expected plugin to be created") + + output, err := p.Invoke(t.Context(), &Input{ + Message: schema.InputMessageTestV1{ + Name: "Phippy", + }, + }) + require.Nil(t, err) + + msg := output.Message.(schema.OutputMessageTestV1) + assert.Equal(t, "Hello, Phippy! (6)", msg.Greeting) +} + +func TestBuildManifest(t *testing.T) { + rc := &RuntimeConfigExtismV1{ + Memory: RuntimeConfigExtismV1Memory{ + MaxPages: 8, + MaxHTTPResponseBytes: 81920, + MaxVarBytes: 8192, + }, + FileSystem: RuntimeConfigExtismV1FileSystem{ + CreateTempDir: true, + }, + Config: map[string]string{"CONFIG_KEY": "config_value"}, + AllowedHosts: []string{"example.com", "api.example.com"}, + Timeout: 5000, + } + + expected := extism.Manifest{ + Wasm: []extism.Wasm{ + extism.WasmFile{ + Path: "/path/to/plugin/plugin.wasm", + Name: "/path/to/plugin/plugin.wasm", + }, + }, + Memory: &extism.ManifestMemory{ + MaxPages: 8, + MaxHttpResponseBytes: 81920, + MaxVarBytes: 8192, + }, + Config: map[string]string{"CONFIG_KEY": "config_value"}, + AllowedHosts: []string{"example.com", "api.example.com"}, + AllowedPaths: map[string]string{"/tmp/foo": "/tmp"}, + Timeout: 5000, + } + + manifest, err := buildManifest("/path/to/plugin", "/tmp/foo", rc) + require.NoError(t, err) + assert.Equal(t, expected, manifest) +} diff --git a/internal/plugin/runtime_test.go b/internal/plugin/runtime_test.go new file mode 100644 index 000000000..8b72648b2 --- /dev/null +++ b/internal/plugin/runtime_test.go @@ -0,0 +1,63 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugin + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParseEnv(t *testing.T) { + type testCase struct { + env []string + expected map[string]string + } + + testCases := map[string]testCase{ + "empty": { + env: []string{}, + expected: map[string]string{}, + }, + "single": { + env: []string{"KEY=value"}, + expected: map[string]string{"KEY": "value"}, + }, + "multiple": { + env: []string{"KEY1=value1", "KEY2=value2"}, + expected: map[string]string{"KEY1": "value1", "KEY2": "value2"}, + }, + "no_value": { + env: []string{"KEY1=value1", "KEY2="}, + expected: map[string]string{"KEY1": "value1", "KEY2": ""}, + }, + "duplicate_keys": { + env: []string{"KEY=value1", "KEY=value2"}, + expected: map[string]string{"KEY": "value2"}, // last value should overwrite + }, + "empty_strings": { + env: []string{"", "KEY=value", ""}, + expected: map[string]string{"KEY": "value"}, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + result := parseEnv(tc.env) + assert.Equal(t, tc.expected, result) + }) + } +} diff --git a/internal/plugin/schema/test.go b/internal/plugin/schema/test.go new file mode 100644 index 000000000..97efa0fde --- /dev/null +++ b/internal/plugin/schema/test.go @@ -0,0 +1,28 @@ +/* + Copyright The Helm Authors. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package schema + +type InputMessageTestV1 struct { + Name string +} + +type OutputMessageTestV1 struct { + Greeting string +} + +type ConfigTestV1 struct{} + +func (c *ConfigTestV1) Validate() error { + return nil +} diff --git a/internal/plugin/testdata/src/extismv1-test/.gitignore b/internal/plugin/testdata/src/extismv1-test/.gitignore new file mode 100644 index 000000000..ef7d91fbb --- /dev/null +++ b/internal/plugin/testdata/src/extismv1-test/.gitignore @@ -0,0 +1 @@ +plugin.wasm diff --git a/internal/plugin/testdata/src/extismv1-test/Makefile b/internal/plugin/testdata/src/extismv1-test/Makefile new file mode 100644 index 000000000..24da1f371 --- /dev/null +++ b/internal/plugin/testdata/src/extismv1-test/Makefile @@ -0,0 +1,12 @@ + +.DEFAULT: build +.PHONY: build test vet + +.PHONY: plugin.wasm +plugin.wasm: + GOOS=wasip1 GOARCH=wasm go build -buildmode=c-shared -o plugin.wasm . + +build: plugin.wasm + +vet: + GOOS=wasip1 GOARCH=wasm go vet ./... diff --git a/internal/plugin/testdata/src/extismv1-test/go.mod b/internal/plugin/testdata/src/extismv1-test/go.mod new file mode 100644 index 000000000..baed75fab --- /dev/null +++ b/internal/plugin/testdata/src/extismv1-test/go.mod @@ -0,0 +1,5 @@ +module helm.sh/helm/v4/internal/plugin/src/extismv1-test + +go 1.25.0 + +require github.com/extism/go-pdk v1.1.3 diff --git a/internal/plugin/testdata/src/extismv1-test/go.sum b/internal/plugin/testdata/src/extismv1-test/go.sum new file mode 100644 index 000000000..c15d38292 --- /dev/null +++ b/internal/plugin/testdata/src/extismv1-test/go.sum @@ -0,0 +1,2 @@ +github.com/extism/go-pdk v1.1.3 h1:hfViMPWrqjN6u67cIYRALZTZLk/enSPpNKa+rZ9X2SQ= +github.com/extism/go-pdk v1.1.3/go.mod h1:Gz+LIU/YCKnKXhgge8yo5Yu1F/lbv7KtKFkiCSzW/P4= diff --git a/internal/plugin/testdata/src/extismv1-test/main.go b/internal/plugin/testdata/src/extismv1-test/main.go new file mode 100644 index 000000000..40311329d --- /dev/null +++ b/internal/plugin/testdata/src/extismv1-test/main.go @@ -0,0 +1,61 @@ +package main + +import ( + _ "embed" + "fmt" + + pdk "github.com/extism/go-pdk" +) + +type InputMessageTestV1 struct { + Name string +} + +type OutputMessageTestV1 struct { + Greeting string +} + +type ConfigTestV1 struct{} + +func runGetterPluginImpl(input InputMessageTestV1) (*OutputMessageTestV1, error) { + name := input.Name + return &OutputMessageTestV1{ + Greeting: fmt.Sprintf("Hello, %s! (%d)", name, len(name)), + }, nil +} + +func RunGetterPlugin() error { + var input InputMessageTestV1 + if err := pdk.InputJSON(&input); err != nil { + return fmt.Errorf("failed to parse input json: %w", err) + } + + pdk.Log(pdk.LogDebug, fmt.Sprintf("Received input: %+v", input)) + output, err := runGetterPluginImpl(input) + if err != nil { + pdk.Log(pdk.LogError, fmt.Sprintf("failed: %s", err.Error())) + return err + } + + pdk.Log(pdk.LogDebug, fmt.Sprintf("Sending output: %+v", output)) + if err := pdk.OutputJSON(output); err != nil { + return fmt.Errorf("failed to write output json: %w", err) + } + + return nil +} + +//go:wasmexport helm_plugin_main +func HelmPlugin() uint32 { + pdk.Log(pdk.LogDebug, "running example-extism-getter plugin") + + if err := RunGetterPlugin(); err != nil { + pdk.Log(pdk.LogError, err.Error()) + pdk.SetError(err) + return 1 + } + + return 0 +} + +func main() {} diff --git a/internal/plugin/testdata/src/extismv1-test/plugin.yaml b/internal/plugin/testdata/src/extismv1-test/plugin.yaml new file mode 100644 index 000000000..2d3694fe6 --- /dev/null +++ b/internal/plugin/testdata/src/extismv1-test/plugin.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +type: test/v1 +name: extismv1-test +version: 0.1.0 +runtime: extism/v1 \ No newline at end of file From b6545e903a1184423a09b4728d5e8a6215c651e1 Mon Sep 17 00:00:00 2001 From: George Jenkins Date: Wed, 27 Aug 2025 08:18:36 -0700 Subject: [PATCH 61/88] code review + bug fixes Signed-off-by: George Jenkins --- internal/plugin/runtime_extismv1.go | 33 ++++++++----------- .../plugin/testdata/src/extismv1-test/main.go | 9 ++++- .../testdata/src/extismv1-test/plugin.yaml | 5 ++- pkg/getter/plugingetter.go | 2 +- 4 files changed, 27 insertions(+), 22 deletions(-) diff --git a/internal/plugin/runtime_extismv1.go b/internal/plugin/runtime_extismv1.go index d3ecff182..a39fc2d48 100644 --- a/internal/plugin/runtime_extismv1.go +++ b/internal/plugin/runtime_extismv1.go @@ -28,8 +28,9 @@ import ( "github.com/tetratelabs/wazero" ) -const ExtistmV1WasmBinaryFilename = "plugin.wasm" +const ExtismV1WasmBinaryFilename = "plugin.wasm" +// RuntimeConfigExtismV1Memory exposes the Wasm/Extism memory options for the plugin type RuntimeConfigExtismV1Memory struct { // The max amount of pages the plugin can allocate // One page is 64Kib. e.g. 16 pages would require 1MiB. @@ -45,15 +46,13 @@ type RuntimeConfigExtismV1Memory struct { MaxVarBytes int64 `yaml:"maxVarBytes,omitempty"` } +// RuntimeConfigExtismV1FileSystem exposes filesystem options for the configuration +// TODO: should Helm expose AllowedPaths? type RuntimeConfigExtismV1FileSystem struct { // If specified, a temporary directory will be created and mapped to /tmp in the plugin's filesystem. // Data written to the directory will be visible on the host filesystem. // The directory will be removed when the plugin invocation completes. CreateTempDir bool `yaml:"createTempDir,omitempty"` - - // // An optional set of mappings between the host's filesystem and the paths a plugin can access. - // TODO: shuld Helm expose this? - //AllowedPaths map[string]string `yaml:"allowedPaths,omitempty"` } // RuntimeConfigExtismV1 defines the user-configurable options the plugin's Extism runtime @@ -106,9 +105,7 @@ func (r *RuntimeExtismV1) CreatePlugin(pluginDir string, metadata *Metadata) (Pl return nil, fmt.Errorf("invalid extism/v1 plugin runtime config type: %T", metadata.RuntimeConfig) } - fmt.Printf("Creating extism/v1 plugin %q with config: %+v\n", metadata.Name, rc) - - wasmFile := filepath.Join(pluginDir, ExtistmV1WasmBinaryFilename) + wasmFile := filepath.Join(pluginDir, ExtismV1WasmBinaryFilename) if _, err := os.Stat(wasmFile); err != nil { if os.IsNotExist(err) { return nil, fmt.Errorf("wasm binary missing for extism/v1 plugin: %q", wasmFile) @@ -145,7 +142,7 @@ func (p *ExtismV1PluginRuntime) Invoke(ctx context.Context, input *Input) (*Outp var tmpDir string if p.rc.FileSystem.CreateTempDir { - tmpDir, err := os.MkdirTemp(os.TempDir(), "helm-plugin-*") + tmpDirInner, err := os.MkdirTemp(os.TempDir(), "helm-plugin-*") slog.Debug("created plugin temp dir", slog.String("dir", tmpDir), slog.String("plugin", p.metadata.Name)) if err != nil { return nil, fmt.Errorf("failed to create temp dir for extism compilation cache: %w", err) @@ -155,6 +152,8 @@ func (p *ExtismV1PluginRuntime) Invoke(ctx context.Context, input *Input) (*Outp slog.Warn("failed to remove plugin temp dir", slog.String("dir", tmpDir), slog.String("plugin", p.metadata.Name), slog.String("error", err.Error())) } }() + + tmpDir = tmpDirInner } manifest, err := buildManifest(p.dir, tmpDir, p.rc) @@ -162,10 +161,7 @@ func (p *ExtismV1PluginRuntime) Invoke(ctx context.Context, input *Input) (*Outp return nil, err } - config, err := buildPluginConfig(input, p.r) - if err != nil { - return nil, err - } + config := buildPluginConfig(input, p.r) hostFunctions, err := buildHostFunctions(p.r.HostFunctions, p.rc) if err != nil { @@ -183,7 +179,7 @@ func (p *ExtismV1PluginRuntime) Invoke(ctx context.Context, input *Input) (*Outp inputData, err := json.Marshal(input.Message) if err != nil { - return nil, fmt.Errorf("failed to json marshel plugin input message: %T: %w", input.Message, err) + return nil, fmt.Errorf("failed to json marshal plugin input message: %T: %w", input.Message, err) } slog.Debug("plugin input", slog.String("plugin", p.metadata.Name), slog.String("inputData", string(inputData))) @@ -208,7 +204,7 @@ func (p *ExtismV1PluginRuntime) Invoke(ctx context.Context, input *Input) (*Outp outputMessage := reflect.New(pluginTypesIndex[p.metadata.Type].outputType) if err := json.Unmarshal(outputData, outputMessage.Interface()); err != nil { - return nil, fmt.Errorf("failed to json marshel plugin output message: %T: %w", outputMessage, err) + return nil, fmt.Errorf("failed to json marshal plugin output message: %T: %w", outputMessage, err) } output := &Output{ @@ -219,7 +215,7 @@ func (p *ExtismV1PluginRuntime) Invoke(ctx context.Context, input *Input) (*Outp } func buildManifest(pluginDir string, tmpDir string, rc *RuntimeConfigExtismV1) (extism.Manifest, error) { - wasmFile := filepath.Join(pluginDir, ExtistmV1WasmBinaryFilename) + wasmFile := filepath.Join(pluginDir, ExtismV1WasmBinaryFilename) allowedHosts := rc.AllowedHosts if allowedHosts == nil { @@ -250,8 +246,7 @@ func buildManifest(pluginDir string, tmpDir string, rc *RuntimeConfigExtismV1) ( }, nil } -func buildPluginConfig(input *Input, r *RuntimeExtismV1) (extism.PluginConfig, error) { - +func buildPluginConfig(input *Input, r *RuntimeExtismV1) extism.PluginConfig { mc := wazero.NewModuleConfig(). WithSysWalltime() if input.Stdin != nil { @@ -279,7 +274,7 @@ func buildPluginConfig(input *Input, r *RuntimeExtismV1) (extism.PluginConfig, e EnableHttpResponseHeaders: true, } - return config, nil + return config } func buildHostFunctions(hostFunctions map[string]extism.HostFunction, rc *RuntimeConfigExtismV1) ([]extism.HostFunction, error) { diff --git a/internal/plugin/testdata/src/extismv1-test/main.go b/internal/plugin/testdata/src/extismv1-test/main.go index 40311329d..31c739a5b 100644 --- a/internal/plugin/testdata/src/extismv1-test/main.go +++ b/internal/plugin/testdata/src/extismv1-test/main.go @@ -3,6 +3,7 @@ package main import ( _ "embed" "fmt" + "os" pdk "github.com/extism/go-pdk" ) @@ -19,8 +20,14 @@ type ConfigTestV1 struct{} func runGetterPluginImpl(input InputMessageTestV1) (*OutputMessageTestV1, error) { name := input.Name + + greeting := fmt.Sprintf("Hello, %s! (%d)", name, len(name)) + err := os.WriteFile("/tmp/greeting.txt", []byte(greeting), 0o600) + if err != nil { + return nil, fmt.Errorf("failed to write temp file: %w", err) + } return &OutputMessageTestV1{ - Greeting: fmt.Sprintf("Hello, %s! (%d)", name, len(name)), + Greeting: greeting, }, nil } diff --git a/internal/plugin/testdata/src/extismv1-test/plugin.yaml b/internal/plugin/testdata/src/extismv1-test/plugin.yaml index 2d3694fe6..fea1e3f66 100644 --- a/internal/plugin/testdata/src/extismv1-test/plugin.yaml +++ b/internal/plugin/testdata/src/extismv1-test/plugin.yaml @@ -3,4 +3,7 @@ apiVersion: v1 type: test/v1 name: extismv1-test version: 0.1.0 -runtime: extism/v1 \ No newline at end of file +runtime: extism/v1 +runtimeConfig: + fileSystem: + createTempDir: true \ No newline at end of file diff --git a/pkg/getter/plugingetter.go b/pkg/getter/plugingetter.go index 2b7669f23..b2dfb3e42 100644 --- a/pkg/getter/plugingetter.go +++ b/pkg/getter/plugingetter.go @@ -116,7 +116,7 @@ func (g *getterPlugin) Get(href string, options ...Option) (*bytes.Buffer, error return nil, fmt.Errorf("plugin %q failed to invoke: %w", g.plg, err) } - outputMessage, ok := output.Message.(*schema.OutputMessageGetterV1) + outputMessage, ok := output.Message.(schema.OutputMessageGetterV1) if !ok { return nil, fmt.Errorf("invalid output message type from plugin %q", g.plg.Metadata().Name) } From 6273f9b38e22632b3b36b3cd142ad461f97c5e96 Mon Sep 17 00:00:00 2001 From: Terry Howe Date: Wed, 27 Aug 2025 08:18:26 -0600 Subject: [PATCH 62/88] fix: flaky registry data race on mockdns close Signed-off-by: Terry Howe --- pkg/registry/client_http_test.go | 5 +---- pkg/registry/client_insecure_tls_test.go | 5 +---- pkg/registry/client_tls_test.go | 5 +---- pkg/registry/utils_test.go | 20 ++++++++++---------- 4 files changed, 13 insertions(+), 22 deletions(-) diff --git a/pkg/registry/client_http_test.go b/pkg/registry/client_http_test.go index 043fd4205..dddd29ee9 100644 --- a/pkg/registry/client_http_test.go +++ b/pkg/registry/client_http_test.go @@ -32,10 +32,7 @@ type HTTPRegistryClientTestSuite struct { func (suite *HTTPRegistryClientTestSuite) SetupSuite() { // init test client - dockerRegistry := setup(&suite.TestSuite, false, false) - - // Start Docker registry - go dockerRegistry.ListenAndServe() + setup(&suite.TestSuite, false, false) } func (suite *HTTPRegistryClientTestSuite) TearDownSuite() { diff --git a/pkg/registry/client_insecure_tls_test.go b/pkg/registry/client_insecure_tls_test.go index accbf1670..03354475a 100644 --- a/pkg/registry/client_insecure_tls_test.go +++ b/pkg/registry/client_insecure_tls_test.go @@ -29,10 +29,7 @@ type InsecureTLSRegistryClientTestSuite struct { func (suite *InsecureTLSRegistryClientTestSuite) SetupSuite() { // init test client - dockerRegistry := setup(&suite.TestSuite, true, true) - - // Start Docker registry - go dockerRegistry.ListenAndServe() + setup(&suite.TestSuite, true, true) } func (suite *InsecureTLSRegistryClientTestSuite) TearDownSuite() { diff --git a/pkg/registry/client_tls_test.go b/pkg/registry/client_tls_test.go index 0897858b5..2bf1750a9 100644 --- a/pkg/registry/client_tls_test.go +++ b/pkg/registry/client_tls_test.go @@ -31,10 +31,7 @@ type TLSRegistryClientTestSuite struct { func (suite *TLSRegistryClientTestSuite) SetupSuite() { // init test client - dockerRegistry := setup(&suite.TestSuite, true, false) - - // Start Docker registry - go dockerRegistry.ListenAndServe() + setup(&suite.TestSuite, true, false) } func (suite *TLSRegistryClientTestSuite) TearDownSuite() { diff --git a/pkg/registry/utils_test.go b/pkg/registry/utils_test.go index b46317fc6..781f3dd75 100644 --- a/pkg/registry/utils_test.go +++ b/pkg/registry/utils_test.go @@ -29,7 +29,6 @@ import ( "os" "path/filepath" "strings" - "sync" "time" "github.com/distribution/distribution/v3/configuration" @@ -65,12 +64,13 @@ type TestSuite struct { CompromisedRegistryHost string WorkspaceDir string RegistryClient *Client + dockerRegistry *registry.Registry // A mock DNS server needed for TLS connection testing. srv *mockdns.Server } -func setup(suite *TestSuite, tlsEnabled, insecure bool) *registry.Registry { +func setup(suite *TestSuite, tlsEnabled, insecure bool) { suite.WorkspaceDir = testWorkspaceDir os.RemoveAll(suite.WorkspaceDir) os.Mkdir(suite.WorkspaceDir, 0700) @@ -166,20 +166,20 @@ func setup(suite *TestSuite, tlsEnabled, insecure bool) *registry.Registry { config.HTTP.TLS.ClientCAs = []string{tlsCA} } } - dockerRegistry, err := registry.NewRegistry(context.Background(), config) + suite.dockerRegistry, err = registry.NewRegistry(context.Background(), config) suite.Nil(err, "no error creating test registry") suite.CompromisedRegistryHost = initCompromisedRegistryTestServer() - return dockerRegistry + go func() { + _ = suite.dockerRegistry.ListenAndServe() + _ = suite.srv.Close() + mockdns.UnpatchNet(net.DefaultResolver) + }() } func teardown(suite *TestSuite) { - var lock sync.Mutex - lock.Lock() - defer lock.Unlock() - if suite.srv != nil { - mockdns.UnpatchNet(net.DefaultResolver) - suite.srv.Close() + if suite.dockerRegistry != nil { + _ = suite.dockerRegistry.Shutdown(context.Background()) } } From ce97a2449e24bc7985e1ad614b0a8b1713d10894 Mon Sep 17 00:00:00 2001 From: Terry Howe Date: Wed, 27 Aug 2025 10:41:46 -0600 Subject: [PATCH 63/88] fix: move mockdns to packge level Signed-off-by: Terry Howe --- pkg/registry/main_test.go | 51 ++++++++++++++++++++++++++++++++++++++ pkg/registry/utils_test.go | 13 ---------- 2 files changed, 51 insertions(+), 13 deletions(-) create mode 100644 pkg/registry/main_test.go diff --git a/pkg/registry/main_test.go b/pkg/registry/main_test.go new file mode 100644 index 000000000..4f6e11e4f --- /dev/null +++ b/pkg/registry/main_test.go @@ -0,0 +1,51 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry + +import ( + "net" + "os" + "testing" + + "github.com/foxcpp/go-mockdns" +) + +func TestMain(m *testing.M) { + // A mock DNS server needed for TLS connection testing. + var srv *mockdns.Server + var err error + + srv, err = mockdns.NewServer(map[string]mockdns.Zone{ + "helm-test-registry.": { + A: []string{"127.0.0.1"}, + }, + }, false) + if err != nil { + panic(err) + } + + saveDialFunction := net.DefaultResolver.Dial + srv.PatchNet(net.DefaultResolver) + + // Run all tests in the package + code := m.Run() + + net.DefaultResolver.Dial = saveDialFunction + _ = srv.Close() + + os.Exit(code) +} diff --git a/pkg/registry/utils_test.go b/pkg/registry/utils_test.go index 781f3dd75..1da90566f 100644 --- a/pkg/registry/utils_test.go +++ b/pkg/registry/utils_test.go @@ -35,7 +35,6 @@ import ( "github.com/distribution/distribution/v3/registry" _ "github.com/distribution/distribution/v3/registry/auth/htpasswd" _ "github.com/distribution/distribution/v3/registry/storage/driver/inmemory" - "github.com/foxcpp/go-mockdns" "github.com/stretchr/testify/suite" "golang.org/x/crypto/bcrypt" @@ -65,9 +64,6 @@ type TestSuite struct { WorkspaceDir string RegistryClient *Client dockerRegistry *registry.Registry - - // A mock DNS server needed for TLS connection testing. - srv *mockdns.Server } func setup(suite *TestSuite, tlsEnabled, insecure bool) { @@ -135,13 +131,6 @@ func setup(suite *TestSuite, tlsEnabled, insecure bool) { // host is localhost/127.0.0.1. port := ln.Addr().(*net.TCPAddr).Port suite.DockerRegistryHost = fmt.Sprintf("helm-test-registry:%d", port) - suite.srv, err = mockdns.NewServer(map[string]mockdns.Zone{ - "helm-test-registry.": { - A: []string{"127.0.0.1"}, - }, - }, false) - suite.Nil(err, "no error creating mock DNS server") - suite.srv.PatchNet(net.DefaultResolver) config.HTTP.Addr = ln.Addr().String() config.HTTP.DrainTimeout = time.Duration(10) * time.Second @@ -172,8 +161,6 @@ func setup(suite *TestSuite, tlsEnabled, insecure bool) { suite.CompromisedRegistryHost = initCompromisedRegistryTestServer() go func() { _ = suite.dockerRegistry.ListenAndServe() - _ = suite.srv.Close() - mockdns.UnpatchNet(net.DefaultResolver) }() } From e5b612626e21a7b90caa4df546a7628da06b7c51 Mon Sep 17 00:00:00 2001 From: George Jenkins Date: Wed, 27 Aug 2025 10:13:27 -0700 Subject: [PATCH 64/88] fixup slog tmpDirInner Signed-off-by: George Jenkins --- internal/plugin/runtime_extismv1.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/plugin/runtime_extismv1.go b/internal/plugin/runtime_extismv1.go index a39fc2d48..c0122d08f 100644 --- a/internal/plugin/runtime_extismv1.go +++ b/internal/plugin/runtime_extismv1.go @@ -143,7 +143,7 @@ func (p *ExtismV1PluginRuntime) Invoke(ctx context.Context, input *Input) (*Outp var tmpDir string if p.rc.FileSystem.CreateTempDir { tmpDirInner, err := os.MkdirTemp(os.TempDir(), "helm-plugin-*") - slog.Debug("created plugin temp dir", slog.String("dir", tmpDir), slog.String("plugin", p.metadata.Name)) + slog.Debug("created plugin temp dir", slog.String("dir", tmpDirInner), slog.String("plugin", p.metadata.Name)) if err != nil { return nil, fmt.Errorf("failed to create temp dir for extism compilation cache: %w", err) } From 2658a00863a9dd13cb023b68707d1a82cbd1e9ed Mon Sep 17 00:00:00 2001 From: George Jenkins Date: Wed, 27 Aug 2025 10:21:16 -0700 Subject: [PATCH 65/88] fix output message value Signed-off-by: George Jenkins --- internal/plugin/runtime_subprocess.go | 2 +- internal/plugin/runtime_subprocess_getter.go | 2 +- pkg/getter/plugingetter_test.go | 14 +++++++------- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/internal/plugin/runtime_subprocess.go b/internal/plugin/runtime_subprocess.go index 286c1abeb..163f0621f 100644 --- a/internal/plugin/runtime_subprocess.go +++ b/internal/plugin/runtime_subprocess.go @@ -212,7 +212,7 @@ func (r *SubprocessPluginRuntime) runCLI(input *Input) (*Output, error) { } return &Output{ - Message: &schema.OutputMessageCLIV1{}, + Message: schema.OutputMessageCLIV1{}, }, nil } diff --git a/internal/plugin/runtime_subprocess_getter.go b/internal/plugin/runtime_subprocess_getter.go index 6f9bfea91..af2d0c572 100644 --- a/internal/plugin/runtime_subprocess_getter.go +++ b/internal/plugin/runtime_subprocess_getter.go @@ -85,7 +85,7 @@ func (r *SubprocessPluginRuntime) runGetter(input *Input) (*Output, error) { } return &Output{ - Message: &schema.OutputMessageGetterV1{ + Message: schema.OutputMessageGetterV1{ Data: buf.Bytes(), }, }, nil diff --git a/pkg/getter/plugingetter_test.go b/pkg/getter/plugingetter_test.go index 1c0f5593f..8e0619635 100644 --- a/pkg/getter/plugingetter_test.go +++ b/pkg/getter/plugingetter_test.go @@ -95,16 +95,16 @@ func TestConvertOptions(t *testing.T) { assert.Equal(t, expected, opts) } -type TestPlugin struct { +type testPlugin struct { t *testing.T dir string } -func (t *TestPlugin) Dir() string { +func (t *testPlugin) Dir() string { return t.dir } -func (t *TestPlugin) Metadata() plugin.Metadata { +func (t *testPlugin) Metadata() plugin.Metadata { return plugin.Metadata{ Name: "fake-plugin", Type: "cli/v1", @@ -121,22 +121,22 @@ func (t *TestPlugin) Metadata() plugin.Metadata { } } -func (t *TestPlugin) Invoke(_ context.Context, _ *plugin.Input) (*plugin.Output, error) { +func (t *testPlugin) Invoke(_ context.Context, _ *plugin.Input) (*plugin.Output, error) { // Simulate a plugin invocation output := &plugin.Output{ - Message: &schema.OutputMessageGetterV1{ + Message: schema.OutputMessageGetterV1{ Data: []byte("fake-plugin output"), }, } return output, nil } -var _ plugin.Plugin = (*TestPlugin)(nil) +var _ plugin.Plugin = (*testPlugin)(nil) func TestGetterPlugin(t *testing.T) { gp := getterPlugin{ options: []Option{}, - plg: &TestPlugin{t: t, dir: "fake/dir"}, + plg: &testPlugin{t: t, dir: "fake/dir"}, } buf, err := gp.Get("test://example.com", WithTimeout(5*time.Second)) From d985122a2686dc88d92558583c1de950d5890887 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 28 Aug 2025 16:40:28 +0000 Subject: [PATCH 66/88] chore(deps): bump github.com/stretchr/testify from 1.11.0 to 1.11.1 Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.11.0 to 1.11.1. - [Release notes](https://github.com/stretchr/testify/releases) - [Commits](https://github.com/stretchr/testify/compare/v1.11.0...v1.11.1) --- updated-dependencies: - dependency-name: github.com/stretchr/testify dependency-version: 1.11.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 8cff102c9..7099e9d46 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,7 @@ require ( github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 github.com/spf13/cobra v1.9.1 github.com/spf13/pflag v1.0.7 - github.com/stretchr/testify v1.11.0 + github.com/stretchr/testify v1.11.1 github.com/tetratelabs/wazero v1.9.0 go.yaml.in/yaml/v3 v3.0.4 golang.org/x/crypto v0.41.0 diff --git a/go.sum b/go.sum index 9b41a7c39..1a1601366 100644 --- a/go.sum +++ b/go.sum @@ -315,8 +315,8 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.11.0 h1:ib4sjIrwZKxE5u/Japgo/7SJV3PvgjGiRNAvTVGqQl8= -github.com/stretchr/testify v1.11.0/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/tetratelabs/wabin v0.0.0-20230304001439-f6f874872834 h1:ZF+QBjOI+tILZjBaFj3HgFonKXUcwgJ4djLb6i42S3Q= github.com/tetratelabs/wabin v0.0.0-20230304001439-f6f874872834/go.mod h1:m9ymHTgNSEjuxvw8E7WWe4Pl4hZQHXONY8wE6dMLaRk= github.com/tetratelabs/wazero v1.9.0 h1:IcZ56OuxrtaEz8UYNRHBrUa9bYeX9oVY93KspZZBf/I= From 15bbb4406c72ed5a9981a7eb67fbbfa334b9948a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 28 Aug 2025 21:13:46 +0000 Subject: [PATCH 67/88] chore(deps): bump the k8s-io group with 7 updates Bumps the k8s-io group with 7 updates: | Package | From | To | | --- | --- | --- | | [k8s.io/api](https://github.com/kubernetes/api) | `0.33.4` | `0.34.0` | | [k8s.io/apiextensions-apiserver](https://github.com/kubernetes/apiextensions-apiserver) | `0.33.4` | `0.34.0` | | [k8s.io/apimachinery](https://github.com/kubernetes/apimachinery) | `0.33.4` | `0.34.0` | | [k8s.io/apiserver](https://github.com/kubernetes/apiserver) | `0.33.4` | `0.34.0` | | [k8s.io/cli-runtime](https://github.com/kubernetes/cli-runtime) | `0.33.4` | `0.34.0` | | [k8s.io/client-go](https://github.com/kubernetes/client-go) | `0.33.4` | `0.34.0` | | [k8s.io/kubectl](https://github.com/kubernetes/kubectl) | `0.33.4` | `0.34.0` | Updates `k8s.io/api` from 0.33.4 to 0.34.0 - [Commits](https://github.com/kubernetes/api/compare/v0.33.4...v0.34.0) Updates `k8s.io/apiextensions-apiserver` from 0.33.4 to 0.34.0 - [Release notes](https://github.com/kubernetes/apiextensions-apiserver/releases) - [Commits](https://github.com/kubernetes/apiextensions-apiserver/compare/v0.33.4...v0.34.0) Updates `k8s.io/apimachinery` from 0.33.4 to 0.34.0 - [Commits](https://github.com/kubernetes/apimachinery/compare/v0.33.4...v0.34.0) Updates `k8s.io/apiserver` from 0.33.4 to 0.34.0 - [Commits](https://github.com/kubernetes/apiserver/compare/v0.33.4...v0.34.0) Updates `k8s.io/cli-runtime` from 0.33.4 to 0.34.0 - [Commits](https://github.com/kubernetes/cli-runtime/compare/v0.33.4...v0.34.0) Updates `k8s.io/client-go` from 0.33.4 to 0.34.0 - [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/kubernetes/client-go/compare/v0.33.4...v0.34.0) Updates `k8s.io/kubectl` from 0.33.4 to 0.34.0 - [Commits](https://github.com/kubernetes/kubectl/compare/v0.33.4...v0.34.0) --- updated-dependencies: - dependency-name: k8s.io/api dependency-version: 0.34.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: k8s-io - dependency-name: k8s.io/apiextensions-apiserver dependency-version: 0.34.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: k8s-io - dependency-name: k8s.io/apimachinery dependency-version: 0.34.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: k8s-io - dependency-name: k8s.io/apiserver dependency-version: 0.34.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: k8s-io - dependency-name: k8s.io/cli-runtime dependency-version: 0.34.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: k8s-io - dependency-name: k8s.io/client-go dependency-version: 0.34.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: k8s-io - dependency-name: k8s.io/kubectl dependency-version: 0.34.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: k8s-io ... Signed-off-by: dependabot[bot] --- go.mod | 45 ++++++++++++++-------------- go.sum | 92 ++++++++++++++++++++++++++++------------------------------ 2 files changed, 66 insertions(+), 71 deletions(-) diff --git a/go.mod b/go.mod index 7099e9d46..3c9992dce 100644 --- a/go.mod +++ b/go.mod @@ -39,14 +39,14 @@ require ( golang.org/x/term v0.34.0 golang.org/x/text v0.28.0 gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.33.4 - k8s.io/apiextensions-apiserver v0.33.4 - k8s.io/apimachinery v0.33.4 - k8s.io/apiserver v0.33.4 - k8s.io/cli-runtime v0.33.4 - k8s.io/client-go v0.33.4 + k8s.io/api v0.34.0 + k8s.io/apiextensions-apiserver v0.34.0 + k8s.io/apimachinery v0.34.0 + k8s.io/apiserver v0.34.0 + k8s.io/cli-runtime v0.34.0 + k8s.io/client-go v0.34.0 k8s.io/klog/v2 v2.130.1 - k8s.io/kubectl v0.33.4 + k8s.io/kubectl v0.34.0 oras.land/oras-go/v2 v2.6.0 sigs.k8s.io/controller-runtime v0.21.0 sigs.k8s.io/kustomize/kyaml v0.20.1 @@ -61,7 +61,6 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/bshuster-repo/logrus-logstash-hook v1.0.0 // indirect - github.com/carapace-sh/carapace-shlex v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect @@ -77,7 +76,7 @@ require ( github.com/emicklei/go-restful/v3 v3.12.2 // indirect github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fxamacker/cbor/v2 v2.8.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect github.com/go-errors/errors v1.5.1 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect github.com/go-logr/logr v1.4.3 // indirect @@ -94,7 +93,7 @@ require ( github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect github.com/hashicorp/golang-lru/arc/v2 v2.0.5 // indirect github.com/hashicorp/golang-lru/v2 v2.0.5 // indirect github.com/huandu/xstrings v1.5.0 // indirect @@ -115,7 +114,7 @@ require ( github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/spdystream v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect @@ -146,8 +145,8 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 // indirect go.opentelemetry.io/otel/exporters/prometheus v0.54.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 // indirect @@ -155,11 +154,11 @@ require ( go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 // indirect go.opentelemetry.io/otel/log v0.8.0 // indirect go.opentelemetry.io/otel/metric v1.37.0 // indirect - go.opentelemetry.io/otel/sdk v1.33.0 // indirect + go.opentelemetry.io/otel/sdk v1.34.0 // indirect go.opentelemetry.io/otel/sdk/log v0.8.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.32.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.34.0 // indirect go.opentelemetry.io/otel/trace v1.37.0 // indirect - go.opentelemetry.io/proto/otlp v1.4.0 // indirect + go.opentelemetry.io/proto/otlp v1.5.0 // indirect go.yaml.in/yaml/v2 v2.4.2 // indirect golang.org/x/mod v0.26.0 // indirect golang.org/x/net v0.42.0 // indirect @@ -168,18 +167,18 @@ require ( golang.org/x/sys v0.35.0 // indirect golang.org/x/time v0.12.0 // indirect golang.org/x/tools v0.35.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect - google.golang.org/grpc v1.68.1 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb // indirect + google.golang.org/grpc v1.72.1 // indirect google.golang.org/protobuf v1.36.6 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - k8s.io/component-base v0.33.4 // indirect - k8s.io/kube-openapi v0.0.0-20250701173324-9bd5c66d9911 // indirect + k8s.io/component-base v0.34.0 // indirect + k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect - sigs.k8s.io/kustomize/api v0.20.0 // indirect + sigs.k8s.io/kustomize/api v0.20.1 // indirect sigs.k8s.io/randfill v1.0.0 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.7.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect ) diff --git a/go.sum b/go.sum index 1a1601366..d9e7c3d3d 100644 --- a/go.sum +++ b/go.sum @@ -42,8 +42,6 @@ github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdb github.com/bsm/gomega v1.26.0/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= -github.com/carapace-sh/carapace-shlex v1.0.1 h1:ww0JCgWpOVuqWG7k3724pJ18Lq8gh5pHQs9j3ojUs1c= -github.com/carapace-sh/carapace-shlex v1.0.1/go.mod h1:lJ4ZsdxytE0wHJ8Ta9S7Qq0XpjgjU0mdfCqiI2FHx7M= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -97,8 +95,8 @@ github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7Dlme github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fxamacker/cbor/v2 v2.8.0 h1:fFtUGXUzXPHTIUdne5+zzMPTfffl3RD5qYnkY40vtxU= -github.com/fxamacker/cbor/v2 v2.8.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= @@ -142,7 +140,6 @@ github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl76 github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -160,8 +157,8 @@ github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= github.com/hashicorp/golang-lru/arc/v2 v2.0.5 h1:l2zaLDubNhW4XO3LnliVj0GXO3+/CGNJAg1dcN2Fpfw= github.com/hashicorp/golang-lru/arc/v2 v2.0.5/go.mod h1:ny6zBSQZi2JxIeYcv7kt2sH2PXJtirBN7RDhRpxPkxU= github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4= @@ -233,8 +230,9 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= @@ -346,10 +344,10 @@ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 h1:j7Z go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0/go.mod h1:WXbYJTUaZXAbYd8lbgGuvih0yuCfOFC5RJoYnoLcGz8= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 h1:t/Qur3vKSkUCcDVaSumWF2PKHt85pc7fRvFuoVT8qFU= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0/go.mod h1:Rl61tySSdcOJWoEgYZVtmnKdA0GeKrSqkHC1t+91CH8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 h1:cMyu9O88joYEaI47CnQkxO1XZdpoTF9fEnW2duIddhw= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0/go.mod h1:6Am3rn7P9TVVeXYG+wtcGE7IE1tsQ+bP3AuWcKt/gOI= go.opentelemetry.io/otel/exporters/prometheus v0.54.0 h1:rFwzp68QMgtzu9PgP3jm9XaMICI6TsofWWPcBDKwlsU= @@ -364,16 +362,16 @@ go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWer go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8= go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= -go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= -go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= +go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs= go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo= -go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= -go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= +go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= +go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= -go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= -go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= +go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= +go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -488,12 +486,12 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q= -google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= -google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0= -google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw= +google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950= +google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb h1:TLPQVbx1GJ8VKZxz52VAxl1EBgKXXbTiU9Fc5fZeLn4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I= +google.golang.org/grpc v1.72.1 h1:HR03wO6eyZ7lknl75XlxABNVLLFc2PAb6mHlYh756mA= +google.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= @@ -510,26 +508,26 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.33.4 h1:oTzrFVNPXBjMu0IlpA2eDDIU49jsuEorGHB4cvKupkk= -k8s.io/api v0.33.4/go.mod h1:VHQZ4cuxQ9sCUMESJV5+Fe8bGnqAARZ08tSTdHWfeAc= -k8s.io/apiextensions-apiserver v0.33.4 h1:rtq5SeXiDbXmSwxsF0MLe2Mtv3SwprA6wp+5qh/CrOU= -k8s.io/apiextensions-apiserver v0.33.4/go.mod h1:mWXcZQkQV1GQyxeIjYApuqsn/081hhXPZwZ2URuJeSs= -k8s.io/apimachinery v0.33.4 h1:SOf/JW33TP0eppJMkIgQ+L6atlDiP/090oaX0y9pd9s= -k8s.io/apimachinery v0.33.4/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= -k8s.io/apiserver v0.33.4 h1:6N0TEVA6kASUS3owYDIFJjUH6lgN8ogQmzZvaFFj1/Y= -k8s.io/apiserver v0.33.4/go.mod h1:8ODgXMnOoSPLMUg1aAzMFx+7wTJM+URil+INjbTZCok= -k8s.io/cli-runtime v0.33.4 h1:V8NSxGfh24XzZVhXmIGzsApdBpGq0RQS2u/Fz1GvJwk= -k8s.io/cli-runtime v0.33.4/go.mod h1:V+ilyokfqjT5OI+XE+O515K7jihtr0/uncwoyVqXaIU= -k8s.io/client-go v0.33.4 h1:TNH+CSu8EmXfitntjUPwaKVPN0AYMbc9F1bBS8/ABpw= -k8s.io/client-go v0.33.4/go.mod h1:LsA0+hBG2DPwovjd931L/AoaezMPX9CmBgyVyBZmbCY= -k8s.io/component-base v0.33.4 h1:Jvb/aw/tl3pfgnJ0E0qPuYLT0NwdYs1VXXYQmSuxJGY= -k8s.io/component-base v0.33.4/go.mod h1:567TeSdixWW2Xb1yYUQ7qk5Docp2kNznKL87eygY8Rc= +k8s.io/api v0.34.0 h1:L+JtP2wDbEYPUeNGbeSa/5GwFtIA662EmT2YSLOkAVE= +k8s.io/api v0.34.0/go.mod h1:YzgkIzOOlhl9uwWCZNqpw6RJy9L2FK4dlJeayUoydug= +k8s.io/apiextensions-apiserver v0.34.0 h1:B3hiB32jV7BcyKcMU5fDaDxk882YrJ1KU+ZSkA9Qxoc= +k8s.io/apiextensions-apiserver v0.34.0/go.mod h1:hLI4GxE1BDBy9adJKxUxCEHBGZtGfIg98Q+JmTD7+g0= +k8s.io/apimachinery v0.34.0 h1:eR1WO5fo0HyoQZt1wdISpFDffnWOvFLOOeJ7MgIv4z0= +k8s.io/apimachinery v0.34.0/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/apiserver v0.34.0 h1:Z51fw1iGMqN7uJ1kEaynf2Aec1Y774PqU+FVWCFV3Jg= +k8s.io/apiserver v0.34.0/go.mod h1:52ti5YhxAvewmmpVRqlASvaqxt0gKJxvCeW7ZrwgazQ= +k8s.io/cli-runtime v0.34.0 h1:N2/rUlJg6TMEBgtQ3SDRJwa8XyKUizwjlOknT1mB2Cw= +k8s.io/cli-runtime v0.34.0/go.mod h1:t/skRecS73Piv+J+FmWIQA2N2/rDjdYSQzEE67LUUs8= +k8s.io/client-go v0.34.0 h1:YoWv5r7bsBfb0Hs2jh8SOvFbKzzxyNo0nSb0zC19KZo= +k8s.io/client-go v0.34.0/go.mod h1:ozgMnEKXkRjeMvBZdV1AijMHLTh3pbACPvK7zFR+QQY= +k8s.io/component-base v0.34.0 h1:bS8Ua3zlJzapklsB1dZgjEJuJEeHjj8yTu1gxE2zQX8= +k8s.io/component-base v0.34.0/go.mod h1:RSCqUdvIjjrEm81epPcjQ/DS+49fADvGSCkIP3IC6vg= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20250701173324-9bd5c66d9911 h1:gAXU86Fmbr/ktY17lkHwSjw5aoThQvhnstGGIYKlKYc= -k8s.io/kube-openapi v0.0.0-20250701173324-9bd5c66d9911/go.mod h1:GLOk5B+hDbRROvt0X2+hqX64v/zO3vXN7J78OUmBSKw= -k8s.io/kubectl v0.33.4 h1:nXEI6Vi+oB9hXxoAHyHisXolm/l1qutK3oZQMak4N98= -k8s.io/kubectl v0.33.4/go.mod h1:Xe7P9X4DfILvKmlBsVqUtzktkI56lEj22SJW7cFy6nE= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= +k8s.io/kubectl v0.34.0 h1:NcXz4TPTaUwhiX4LU+6r6udrlm0NsVnSkP3R9t0dmxs= +k8s.io/kubectl v0.34.0/go.mod h1:bmd0W5i+HuG7/p5sqicr0Li0rR2iIhXL0oUyLF3OjR4= k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc= @@ -538,15 +536,13 @@ sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytI sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= -sigs.k8s.io/kustomize/api v0.20.0 h1:xPLqcobHI0bThyRUteO+nCV8G4d1Rlo5HafO57VRcas= -sigs.k8s.io/kustomize/api v0.20.0/go.mod h1:F6CfaV27oevRCMJgehLqyX81dlUnRX/Fc13Uo7+OSo4= +sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I= +sigs.k8s.io/kustomize/api v0.20.1/go.mod h1:t6hUFxO+Ph0VxIk1sKp1WS0dOjbPCtLJ4p8aADLwqjM= sigs.k8s.io/kustomize/kyaml v0.20.1 h1:PCMnA2mrVbRP3NIB6v9kYCAc38uvFLVs8j/CD567A78= sigs.k8s.io/kustomize/kyaml v0.20.1/go.mod h1:0EmkQHRUsJxY8Ug9Niig1pUMSCGHxQ5RklbpV/Ri6po= -sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= -sigs.k8s.io/structured-merge-diff/v4 v4.7.0 h1:qPeWmscJcXP0snki5IYF79Z8xrl8ETFxgMd7wez1XkI= -sigs.k8s.io/structured-merge-diff/v4 v4.7.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= From b12cd28503ffdc8fc28d14cd635690e38def629f Mon Sep 17 00:00:00 2001 From: Terry Howe Date: Wed, 27 Aug 2025 04:02:28 -0600 Subject: [PATCH 68/88] fix: installer action goroutine count Signed-off-by: Terry Howe --- pkg/action/install.go | 11 ++++++++++- pkg/action/install_test.go | 29 +++++++++++++---------------- 2 files changed, 23 insertions(+), 17 deletions(-) diff --git a/pkg/action/install.go b/pkg/action/install.go index 276009b5c..b5b45bd42 100644 --- a/pkg/action/install.go +++ b/pkg/action/install.go @@ -30,6 +30,7 @@ import ( "path/filepath" "strings" "sync" + "sync/atomic" "text/template" "time" @@ -126,7 +127,8 @@ type Install struct { TakeOwnership bool PostRenderer postrender.PostRenderer // Lock to control raceconditions when the process receives a SIGTERM - Lock sync.Mutex + Lock sync.Mutex + goroutineCount atomic.Int32 } // ChartPathOptions captures common options used for controlling chart paths @@ -446,8 +448,10 @@ func (i *Install) performInstallCtx(ctx context.Context, rel *release.Release, t resultChan := make(chan Msg, 1) go func() { + i.goroutineCount.Add(1) rel, err := i.performInstall(rel, toBeAdopted, resources) resultChan <- Msg{rel, err} + i.goroutineCount.Add(-1) }() select { case <-ctx.Done(): @@ -458,6 +462,11 @@ func (i *Install) performInstallCtx(ctx context.Context, rel *release.Release, t } } +// getGoroutineCount return the number of running routines +func (i *Install) getGoroutineCount() int32 { + return i.goroutineCount.Load() +} + // isDryRun returns true if Upgrade is set to run as a DryRun func (i *Install) isDryRun() bool { if i.DryRun || i.DryRunOption == "client" || i.DryRunOption == "server" || i.DryRunOption == "true" { diff --git a/pkg/action/install_test.go b/pkg/action/install_test.go index f567b3df4..fa9cfb222 100644 --- a/pkg/action/install_test.go +++ b/pkg/action/install_test.go @@ -28,7 +28,6 @@ import ( "os" "path/filepath" "regexp" - "runtime" "strings" "testing" "time" @@ -330,8 +329,8 @@ func TestInstallRelease_WithChartAndDependencyParentNotes(t *testing.T) { } rel, err := instAction.cfg.Releases.Get(res.Name, res.Version) - is.Equal("with-notes", rel.Name) is.NoError(err) + is.Equal("with-notes", rel.Name) is.Equal("parent", rel.Info.Notes) is.Equal(rel.Info.Description, "Install complete") } @@ -349,8 +348,8 @@ func TestInstallRelease_WithChartAndDependencyAllNotes(t *testing.T) { } rel, err := instAction.cfg.Releases.Get(res.Name, res.Version) - is.Equal("with-notes", rel.Name) is.NoError(err) + is.Equal("with-notes", rel.Name) // test run can return as either 'parent\nchild' or 'child\nparent' if !strings.Contains(rel.Info.Notes, "parent") && !strings.Contains(rel.Info.Notes, "child") { t.Fatalf("Expected 'parent\nchild' or 'child\nparent', got '%s'", rel.Info.Notes) @@ -454,9 +453,7 @@ func TestInstallReleaseIncorrectTemplate_DryRun(t *testing.T) { if err == nil { t.Fatalf("Install should fail containing error: %s", expectedErr) } - if err != nil { - is.Contains(err.Error(), expectedErr) - } + is.Contains(err.Error(), expectedErr) } func TestInstallRelease_NoHooks(t *testing.T) { @@ -541,14 +538,14 @@ func TestInstallRelease_Wait(t *testing.T) { instAction.WaitStrategy = kube.StatusWatcherStrategy vals := map[string]interface{}{} - goroutines := runtime.NumGoroutine() + goroutines := instAction.getGoroutineCount() res, err := instAction.Run(buildChart(), vals) is.Error(err) is.Contains(res.Info.Description, "I timed out") is.Equal(res.Info.Status, release.StatusFailed) - is.Equal(goroutines, runtime.NumGoroutine()) + is.Equal(goroutines, instAction.getGoroutineCount()) } func TestInstallRelease_Wait_Interrupted(t *testing.T) { is := assert.New(t) @@ -563,15 +560,15 @@ func TestInstallRelease_Wait_Interrupted(t *testing.T) { ctx, cancel := context.WithCancel(t.Context()) time.AfterFunc(time.Second, cancel) - goroutines := runtime.NumGoroutine() + goroutines := instAction.getGoroutineCount() _, err := instAction.RunWithContext(ctx, buildChart(), vals) is.Error(err) is.Contains(err.Error(), "context canceled") - is.Equal(goroutines+1, runtime.NumGoroutine()) // installation goroutine still is in background - time.Sleep(10 * time.Second) // wait for goroutine to finish - is.Equal(goroutines, runtime.NumGoroutine()) + is.Equal(goroutines+1, instAction.getGoroutineCount()) // installation goroutine still is in background + time.Sleep(10 * time.Second) // wait for goroutine to finish + is.Equal(goroutines, instAction.getGoroutineCount()) } func TestInstallRelease_WaitForJobs(t *testing.T) { is := assert.New(t) @@ -647,7 +644,7 @@ func TestInstallRelease_RollbackOnFailure_Interrupted(t *testing.T) { ctx, cancel := context.WithCancel(t.Context()) time.AfterFunc(time.Second, cancel) - goroutines := runtime.NumGoroutine() + goroutines := instAction.getGoroutineCount() res, err := instAction.RunWithContext(ctx, buildChart(), vals) is.Error(err) @@ -659,9 +656,9 @@ func TestInstallRelease_RollbackOnFailure_Interrupted(t *testing.T) { _, err = instAction.cfg.Releases.Get(res.Name, res.Version) is.Error(err) is.Equal(err, driver.ErrReleaseNotFound) - is.Equal(goroutines+1, runtime.NumGoroutine()) // installation goroutine still is in background - time.Sleep(10 * time.Second) // wait for goroutine to finish - is.Equal(goroutines, runtime.NumGoroutine()) + is.Equal(goroutines+1, instAction.getGoroutineCount()) // installation goroutine still is in background + time.Sleep(10 * time.Second) // wait for goroutine to finish + is.Equal(goroutines, instAction.getGoroutineCount()) } func TestNameTemplate(t *testing.T) { From 9eafbc53dfb4b6b9ecaaaaa3c73bcffa58af397a Mon Sep 17 00:00:00 2001 From: Terry Howe Date: Mon, 18 Aug 2025 10:54:03 -0600 Subject: [PATCH 69/88] fix: make file whitespace Signed-off-by: Terry Howe --- Makefile | 33 +++++++++++++++------------------ 1 file changed, 15 insertions(+), 18 deletions(-) diff --git a/Makefile b/Makefile index 0a20259bd..8dfa3344c 100644 --- a/Makefile +++ b/Makefile @@ -13,9 +13,9 @@ GOX = $(GOBIN)/gox GOIMPORTS = $(GOBIN)/goimports ARCH = $(shell go env GOARCH) -ACCEPTANCE_DIR:=../acceptance-testing +ACCEPTANCE_DIR := ../acceptance-testing # To specify the subset of acceptance tests to run. '.' means all tests -ACCEPTANCE_RUN_TESTS=. +ACCEPTANCE_RUN_TESTS = . # go option PKG := ./... @@ -227,22 +227,19 @@ clean: .PHONY: release-notes release-notes: - @if [ ! -d "./_dist" ]; then \ - echo "please run 'make fetch-dist' first" && \ - exit 1; \ - fi - @if [ -z "${PREVIOUS_RELEASE}" ]; then \ - echo "please set PREVIOUS_RELEASE environment variable" \ - && exit 1; \ - fi - - @./scripts/release-notes.sh ${PREVIOUS_RELEASE} ${VERSION} - - + @if [ ! -d "./_dist" ]; then \ + echo "please run 'make fetch-dist' first" && \ + exit 1; \ + fi + @if [ -z "${PREVIOUS_RELEASE}" ]; then \ + echo "please set PREVIOUS_RELEASE environment variable" && \ + exit 1; \ + fi + @./scripts/release-notes.sh ${PREVIOUS_RELEASE} ${VERSION} .PHONY: info info: - @echo "Version: ${VERSION}" - @echo "Git Tag: ${GIT_TAG}" - @echo "Git Commit: ${GIT_COMMIT}" - @echo "Git Tree State: ${GIT_DIRTY}" + @echo "Version: ${VERSION}" + @echo "Git Tag: ${GIT_TAG}" + @echo "Git Commit: ${GIT_COMMIT}" + @echo "Git Tree State: ${GIT_DIRTY}" From 9ea35da0d0309b59b15c1a00cf619f3869512b61 Mon Sep 17 00:00:00 2001 From: Scott Rigby Date: Sat, 30 Aug 2025 13:25:28 -0400 Subject: [PATCH 70/88] [HIP-0026] Plugin packaging, signing, and verification (#31176) * Plugin packaging, signing and verification Signed-off-by: Scott Rigby * wrap keyring read error with more explicit message Co-authored-by: Jesse Simpson Signed-off-by: Scott Rigby * skip unnecessary check Co-authored-by: Evans Mungai Signed-off-by: Scott Rigby * Change behavior for installing plugin with missing .prov file (now warns and continues instead of failing) Signed-off-by: Scott Rigby * Add comprehensive plugin verification tests - Test missing .prov files (warns but continues) - Test invalid .prov file formats (fails verification) - Test hash mismatches in .prov files (fails verification) - Test .prov file access errors (fails appropriately) - Test directory plugins don't support verification - Test installation without verification enabled (succeeds) - Test with valid .prov files (fails on empty keyring as expected) --------- Signed-off-by: Scott Rigby Co-authored-by: Jesse Simpson Co-authored-by: Evans Mungai --- internal/plugin/installer/extractor.go | 195 ++++++++ internal/plugin/installer/http_installer.go | 215 +++------ .../plugin/installer/http_installer_test.go | 3 +- internal/plugin/installer/installer.go | 94 +++- internal/plugin/installer/local_installer.go | 84 +++- .../plugin/installer/local_installer_test.go | 83 +--- internal/plugin/installer/oci_installer.go | 97 +++- .../plugin/installer/oci_installer_test.go | 24 +- .../plugin/installer/vcs_installer_test.go | 5 +- .../plugin/installer/verification_test.go | 421 ++++++++++++++++++ internal/plugin/sign.go | 166 +++++++ internal/plugin/sign_test.go | 92 ++++ internal/plugin/signing_info.go | 178 ++++++++ internal/plugin/verify.go | 72 +++ internal/plugin/verify_test.go | 201 +++++++++ pkg/action/package.go | 16 +- pkg/cmd/plugin.go | 2 + pkg/cmd/plugin_install.go | 55 ++- pkg/cmd/plugin_list.go | 12 +- pkg/cmd/plugin_package.go | 209 +++++++++ pkg/cmd/plugin_package_test.go | 170 +++++++ pkg/cmd/plugin_verify.go | 88 ++++ pkg/cmd/plugin_verify_test.go | 264 +++++++++++ pkg/getter/ocigetter.go | 16 +- pkg/provenance/doc.go | 10 +- pkg/provenance/sign.go | 96 ++-- pkg/provenance/sign_test.go | 42 +- pkg/registry/plugin.go | 45 +- 28 files changed, 2599 insertions(+), 356 deletions(-) create mode 100644 internal/plugin/installer/extractor.go create mode 100644 internal/plugin/installer/verification_test.go create mode 100644 internal/plugin/sign.go create mode 100644 internal/plugin/sign_test.go create mode 100644 internal/plugin/signing_info.go create mode 100644 internal/plugin/verify.go create mode 100644 internal/plugin/verify_test.go create mode 100644 pkg/cmd/plugin_package.go create mode 100644 pkg/cmd/plugin_package_test.go create mode 100644 pkg/cmd/plugin_verify.go create mode 100644 pkg/cmd/plugin_verify_test.go diff --git a/internal/plugin/installer/extractor.go b/internal/plugin/installer/extractor.go new file mode 100644 index 000000000..9417a0535 --- /dev/null +++ b/internal/plugin/installer/extractor.go @@ -0,0 +1,195 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package installer // import "helm.sh/helm/v4/internal/plugin/installer" + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "errors" + "fmt" + "io" + "os" + "path" + "path/filepath" + "regexp" + "slices" + "strings" + + securejoin "github.com/cyphar/filepath-securejoin" +) + +// TarGzExtractor extracts gzip compressed tar archives +type TarGzExtractor struct{} + +// Extractor provides an interface for extracting archives +type Extractor interface { + Extract(buffer *bytes.Buffer, targetDir string) error +} + +// Extractors contains a map of suffixes and matching implementations of extractor to return +var Extractors = map[string]Extractor{ + ".tar.gz": &TarGzExtractor{}, + ".tgz": &TarGzExtractor{}, +} + +// Convert a media type to an extractor extension. +// +// This should be refactored in Helm 4, combined with the extension-based mechanism. +func mediaTypeToExtension(mt string) (string, bool) { + switch strings.ToLower(mt) { + case "application/gzip", "application/x-gzip", "application/x-tgz", "application/x-gtar": + return ".tgz", true + case "application/octet-stream": + // Generic binary type - we'll need to check the URL suffix + return "", false + default: + return "", false + } +} + +// NewExtractor creates a new extractor matching the source file name +func NewExtractor(source string) (Extractor, error) { + for suffix, extractor := range Extractors { + if strings.HasSuffix(source, suffix) { + return extractor, nil + } + } + return nil, fmt.Errorf("no extractor implemented yet for %s", source) +} + +// cleanJoin resolves dest as a subpath of root. +// +// This function runs several security checks on the path, generating an error if +// the supplied `dest` looks suspicious or would result in dubious behavior on the +// filesystem. +// +// cleanJoin assumes that any attempt by `dest` to break out of the CWD is an attempt +// to be malicious. (If you don't care about this, use the securejoin-filepath library.) +// It will emit an error if it detects paths that _look_ malicious, operating on the +// assumption that we don't actually want to do anything with files that already +// appear to be nefarious. +// +// - The character `:` is considered illegal because it is a separator on UNIX and a +// drive designator on Windows. +// - The path component `..` is considered suspicions, and therefore illegal +// - The character \ (backslash) is treated as a path separator and is converted to /. +// - Beginning a path with a path separator is illegal +// - Rudimentary symlink protects are offered by SecureJoin. +func cleanJoin(root, dest string) (string, error) { + + // On Windows, this is a drive separator. On UNIX-like, this is the path list separator. + // In neither case do we want to trust a TAR that contains these. + if strings.Contains(dest, ":") { + return "", errors.New("path contains ':', which is illegal") + } + + // The Go tar library does not convert separators for us. + // We assume here, as we do elsewhere, that `\\` means a Windows path. + dest = strings.ReplaceAll(dest, "\\", "/") + + // We want to alert the user that something bad was attempted. Cleaning it + // is not a good practice. + if slices.Contains(strings.Split(dest, "/"), "..") { + return "", errors.New("path contains '..', which is illegal") + } + + // If a path is absolute, the creator of the TAR is doing something shady. + if path.IsAbs(dest) { + return "", errors.New("path is absolute, which is illegal") + } + + // SecureJoin will do some cleaning, as well as some rudimentary checking of symlinks. + // The directory needs to be cleaned prior to passing to SecureJoin or the location may end up + // being wrong or returning an error. This was introduced in v0.4.0. + root = filepath.Clean(root) + newpath, err := securejoin.SecureJoin(root, dest) + if err != nil { + return "", err + } + + return filepath.ToSlash(newpath), nil +} + +// Extract extracts compressed archives +// +// Implements Extractor. +func (g *TarGzExtractor) Extract(buffer *bytes.Buffer, targetDir string) error { + uncompressedStream, err := gzip.NewReader(buffer) + if err != nil { + return err + } + + if err := os.MkdirAll(targetDir, 0755); err != nil { + return err + } + + tarReader := tar.NewReader(uncompressedStream) + for { + header, err := tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + + path, err := cleanJoin(targetDir, header.Name) + if err != nil { + return err + } + + switch header.Typeflag { + case tar.TypeDir: + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + case tar.TypeReg: + // Ensure parent directory exists + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + outFile, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode)) + if err != nil { + return err + } + if _, err := io.Copy(outFile, tarReader); err != nil { + outFile.Close() + return err + } + outFile.Close() + // We don't want to process these extension header files. + case tar.TypeXGlobalHeader, tar.TypeXHeader: + continue + default: + return fmt.Errorf("unknown type: %b in %s", header.Typeflag, header.Name) + } + } + return nil +} + +// stripPluginName is a helper that relies on some sort of convention for plugin name (plugin-name-) +func stripPluginName(name string) string { + var strippedName string + for suffix := range Extractors { + if strings.HasSuffix(name, suffix) { + strippedName = strings.TrimSuffix(name, suffix) + break + } + } + re := regexp.MustCompile(`(.*)-[0-9]+\..*`) + return re.ReplaceAllString(strippedName, `$1`) +} diff --git a/internal/plugin/installer/http_installer.go b/internal/plugin/installer/http_installer.go index b68fc059a..a4687d8c9 100644 --- a/internal/plugin/installer/http_installer.go +++ b/internal/plugin/installer/http_installer.go @@ -16,22 +16,14 @@ limitations under the License. package installer // import "helm.sh/helm/v4/internal/plugin/installer" import ( - "archive/tar" "bytes" - "compress/gzip" - "errors" "fmt" - "io" "log/slog" "os" - "path" "path/filepath" - "regexp" - "slices" "strings" - securejoin "github.com/cyphar/filepath-securejoin" - + "helm.sh/helm/v4/internal/plugin" "helm.sh/helm/v4/internal/plugin/cache" "helm.sh/helm/v4/internal/third_party/dep/fs" "helm.sh/helm/v4/pkg/cli" @@ -46,45 +38,8 @@ type HTTPInstaller struct { base extractor Extractor getter getter.Getter -} - -// TarGzExtractor extracts gzip compressed tar archives -type TarGzExtractor struct{} - -// Extractor provides an interface for extracting archives -type Extractor interface { - Extract(buffer *bytes.Buffer, targetDir string) error -} - -// Extractors contains a map of suffixes and matching implementations of extractor to return -var Extractors = map[string]Extractor{ - ".tar.gz": &TarGzExtractor{}, - ".tgz": &TarGzExtractor{}, -} - -// Convert a media type to an extractor extension. -// -// This should be refactored in Helm 4, combined with the extension-based mechanism. -func mediaTypeToExtension(mt string) (string, bool) { - switch strings.ToLower(mt) { - case "application/gzip", "application/x-gzip", "application/x-tgz", "application/x-gtar": - return ".tgz", true - case "application/octet-stream": - // Generic binary type - we'll need to check the URL suffix - return "", false - default: - return "", false - } -} - -// NewExtractor creates a new extractor matching the source file name -func NewExtractor(source string) (Extractor, error) { - for suffix, extractor := range Extractors { - if strings.HasSuffix(source, suffix) { - return extractor, nil - } - } - return nil, fmt.Errorf("no extractor implemented yet for %s", source) + // Provenance data to save after installation + provData []byte } // NewHTTPInstaller creates a new HttpInstaller. @@ -114,19 +69,6 @@ func NewHTTPInstaller(source string) (*HTTPInstaller, error) { return i, nil } -// helper that relies on some sort of convention for plugin name (plugin-name-) -func stripPluginName(name string) string { - var strippedName string - for suffix := range Extractors { - if strings.HasSuffix(name, suffix) { - strippedName = strings.TrimSuffix(name, suffix) - break - } - } - re := regexp.MustCompile(`(.*)-[0-9]+\..*`) - return re.ReplaceAllString(strippedName, `$1`) -} - // Install downloads and extracts the tarball into the cache directory // and installs into the plugin directory. // @@ -137,6 +79,31 @@ func (i *HTTPInstaller) Install() error { return err } + // Save the original tarball to plugins directory for verification + // Extract metadata to get the actual plugin name and version + pluginBytes := pluginData.Bytes() + metadata, err := plugin.ExtractPluginMetadataFromReader(bytes.NewReader(pluginBytes)) + if err != nil { + return fmt.Errorf("failed to extract plugin metadata from tarball: %w", err) + } + filename := fmt.Sprintf("%s-%s.tgz", metadata.Name, metadata.Version) + tarballPath := helmpath.DataPath("plugins", filename) + if err := os.MkdirAll(filepath.Dir(tarballPath), 0755); err != nil { + return fmt.Errorf("failed to create plugins directory: %w", err) + } + if err := os.WriteFile(tarballPath, pluginBytes, 0644); err != nil { + return fmt.Errorf("failed to save tarball: %w", err) + } + + // Try to download .prov file if it exists + provURL := i.Source + ".prov" + if provData, err := i.getter.Get(provURL); err == nil { + provPath := tarballPath + ".prov" + if err := os.WriteFile(provPath, provData.Bytes(), 0644); err != nil { + slog.Debug("failed to save provenance file", "error", err) + } + } + if err := i.extractor.Extract(pluginData, i.CacheDir); err != nil { return fmt.Errorf("extracting files from archive: %w", err) } @@ -175,111 +142,57 @@ func (i HTTPInstaller) Path() string { return helmpath.DataPath("plugins", i.PluginName) } -// cleanJoin resolves dest as a subpath of root. -// -// This function runs several security checks on the path, generating an error if -// the supplied `dest` looks suspicious or would result in dubious behavior on the -// filesystem. -// -// cleanJoin assumes that any attempt by `dest` to break out of the CWD is an attempt -// to be malicious. (If you don't care about this, use the securejoin-filepath library.) -// It will emit an error if it detects paths that _look_ malicious, operating on the -// assumption that we don't actually want to do anything with files that already -// appear to be nefarious. -// -// - The character `:` is considered illegal because it is a separator on UNIX and a -// drive designator on Windows. -// - The path component `..` is considered suspicions, and therefore illegal -// - The character \ (backslash) is treated as a path separator and is converted to /. -// - Beginning a path with a path separator is illegal -// - Rudimentary symlink protects are offered by SecureJoin. -func cleanJoin(root, dest string) (string, error) { +// SupportsVerification returns true if the HTTP installer can verify plugins +func (i *HTTPInstaller) SupportsVerification() bool { + // Only support verification for tarball URLs + return strings.HasSuffix(i.Source, ".tgz") || strings.HasSuffix(i.Source, ".tar.gz") +} - // On Windows, this is a drive separator. On UNIX-like, this is the path list separator. - // In neither case do we want to trust a TAR that contains these. - if strings.Contains(dest, ":") { - return "", errors.New("path contains ':', which is illegal") +// PrepareForVerification downloads the plugin and signature files for verification +func (i *HTTPInstaller) PrepareForVerification() (string, func(), error) { + if !i.SupportsVerification() { + return "", nil, fmt.Errorf("verification not supported for this source") } - // The Go tar library does not convert separators for us. - // We assume here, as we do elsewhere, that `\\` means a Windows path. - dest = strings.ReplaceAll(dest, "\\", "/") - - // We want to alert the user that something bad was attempted. Cleaning it - // is not a good practice. - if slices.Contains(strings.Split(dest, "/"), "..") { - return "", errors.New("path contains '..', which is illegal") + // Create temporary directory for downloads + tempDir, err := os.MkdirTemp("", "helm-plugin-verify-*") + if err != nil { + return "", nil, fmt.Errorf("failed to create temp directory: %w", err) } - // If a path is absolute, the creator of the TAR is doing something shady. - if path.IsAbs(dest) { - return "", errors.New("path is absolute, which is illegal") + cleanup := func() { + os.RemoveAll(tempDir) } - // SecureJoin will do some cleaning, as well as some rudimentary checking of symlinks. - // The directory needs to be cleaned prior to passing to SecureJoin or the location may end up - // being wrong or returning an error. This was introduced in v0.4.0. - root = filepath.Clean(root) - newpath, err := securejoin.SecureJoin(root, dest) + // Download plugin tarball + pluginFile := filepath.Join(tempDir, filepath.Base(i.Source)) + + g, err := getter.All(new(cli.EnvSettings)).ByScheme("http") if err != nil { - return "", err + cleanup() + return "", nil, err } - return filepath.ToSlash(newpath), nil -} - -// Extract extracts compressed archives -// -// Implements Extractor. -func (g *TarGzExtractor) Extract(buffer *bytes.Buffer, targetDir string) error { - uncompressedStream, err := gzip.NewReader(buffer) + data, err := g.Get(i.Source, getter.WithURL(i.Source)) if err != nil { - return err + cleanup() + return "", nil, fmt.Errorf("failed to download plugin: %w", err) } - if err := os.MkdirAll(targetDir, 0755); err != nil { - return err + if err := os.WriteFile(pluginFile, data.Bytes(), 0644); err != nil { + cleanup() + return "", nil, fmt.Errorf("failed to write plugin file: %w", err) } - tarReader := tar.NewReader(uncompressedStream) - for { - header, err := tarReader.Next() - if err == io.EOF { - break - } - if err != nil { - return err - } - - path, err := cleanJoin(targetDir, header.Name) - if err != nil { - return err - } - - switch header.Typeflag { - case tar.TypeDir: - if err := os.MkdirAll(path, 0755); err != nil { - return err - } - case tar.TypeReg: - // Ensure parent directory exists - if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { - return err - } - outFile, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode)) - if err != nil { - return err - } - defer outFile.Close() - if _, err := io.Copy(outFile, tarReader); err != nil { - return err - } - // We don't want to process these extension header files. - case tar.TypeXGlobalHeader, tar.TypeXHeader: - continue - default: - return fmt.Errorf("unknown type: %b in %s", header.Typeflag, header.Name) + // Try to download signature file - don't fail if it doesn't exist + if provData, err := g.Get(i.Source+".prov", getter.WithURL(i.Source+".prov")); err == nil { + if err := os.WriteFile(pluginFile+".prov", provData.Bytes(), 0644); err == nil { + // Store the provenance data so we can save it after installation + i.provData = provData.Bytes() } } - return nil + // Note: We don't fail if .prov file can't be downloaded - the verification logic + // in InstallWithOptions will handle missing .prov files appropriately + + return pluginFile, cleanup, nil } diff --git a/internal/plugin/installer/http_installer_test.go b/internal/plugin/installer/http_installer_test.go index 453021b76..be40b1b90 100644 --- a/internal/plugin/installer/http_installer_test.go +++ b/internal/plugin/installer/http_installer_test.go @@ -49,7 +49,7 @@ func (t *TestHTTPGetter) Get(_ string, _ ...getter.Option) (*bytes.Buffer, error } // Fake plugin tarball data -var fakePluginB64 = "H4sIAKRj51kAA+3UX0vCUBgGcC9jn+Iwuk3Peza3GeyiUlJQkcogCOzgli7dJm4TvYk+a5+k479UqquUCJ/fLs549sLO2TnvWnJa9aXnjwujYdYLovxMhsPcfnHOLdNkOXthM/IVQQYjg2yyLLJ4kXGhLp5j0z3P41tZksqxmspL3B/O+j/XtZu1y8rdYzkOZRCxduKPk53ny6Wwz/GfIIf1As8lxzGJSmoHNLJZphKHG4YpTCE0wVk3DULfpSJ3DMMqkj3P5JfMYLdX1Vr9Ie/5E5cstcdC8K04iGLX5HaJuKpWL17F0TCIBi5pf/0pjtLhun5j3f9v6r7wfnI/H0eNp9d1/5P6Gez0vzo7wsoxfrAZbTny/o9k6J8z/VkO/LPlWdC1iVpbEEcq5nmeJ13LEtmbV0k2r2PrOs9PuuNglC5rL1Y5S/syXRQmutaNw1BGnnp8Wq3UG51WvX1da3bKtZtCN/R09DwAAAAAAAAAAAAAAAAAAADAb30AoMczDwAoAAA=" +var fakePluginB64 = "H4sIAAAAAAAAA+3SQUvDMBgG4Jz7K0LwapdvSxrwJig6mCKC5xHabBaXdDSt4L+3cQ56mV42ZPg+lw+SF5LwZmXf3OV206/rMGEnIgdG6zTJaDmee4y01FOlZpqGHJGZSsb1qS401sfOtpyz0FTup9xv+2dqNep/N/IP6zdHPSMVXCh1sH8yhtGMDBUFFTL1r4iIcXnUWxzwz/sP1rsrLkbfQGTvro11E4ZlmcucRNZHu04py1OO73OVi2Vbb7td9vp7nXevtvsKRpGVjfc2VMP2xf3t4mH5tHi5mz8ub+bPk9JXIvvr5wMAAAAAAAAAAAAAAAAAAAAAnLVPqwHcXQAoAAA=" func TestStripName(t *testing.T) { if stripPluginName("fake-plugin-0.0.1.tar.gz") != "fake-plugin" { @@ -515,6 +515,7 @@ func TestExtractWithExistingDirectory(t *testing.T) { } func TestExtractPluginInSubdirectory(t *testing.T) { + ensure.HelmHome(t) source := "https://repo.localdomain/plugins/subdir-plugin-1.0.0.tar.gz" tempDir := t.TempDir() diff --git a/internal/plugin/installer/installer.go b/internal/plugin/installer/installer.go index 7900f6745..dd169397e 100644 --- a/internal/plugin/installer/installer.go +++ b/internal/plugin/installer/installer.go @@ -17,12 +17,14 @@ package installer import ( "errors" + "fmt" "net/http" "os" "path/filepath" "strings" "helm.sh/helm/v4/internal/plugin" + "helm.sh/helm/v4/pkg/registry" ) // ErrMissingMetadata indicates that plugin.yaml is missing. @@ -31,6 +33,14 @@ var ErrMissingMetadata = errors.New("plugin metadata (plugin.yaml) missing") // Debug enables verbose output. var Debug bool +// Options contains options for plugin installation. +type Options struct { + // Verify enables signature verification before installation + Verify bool + // Keyring is the path to the keyring for verification + Keyring string +} + // Installer provides an interface for installing helm client plugins. type Installer interface { // Install adds a plugin. @@ -41,15 +51,89 @@ type Installer interface { Update() error } +// Verifier provides an interface for installers that support verification. +type Verifier interface { + // SupportsVerification returns true if this installer can verify plugins + SupportsVerification() bool + // PrepareForVerification downloads necessary files for verification + PrepareForVerification() (pluginPath string, cleanup func(), err error) +} + // Install installs a plugin. func Install(i Installer) error { + _, err := InstallWithOptions(i, Options{}) + return err +} + +// VerificationResult contains the result of plugin verification +type VerificationResult struct { + SignedBy []string + Fingerprint string + FileHash string +} + +// InstallWithOptions installs a plugin with options. +func InstallWithOptions(i Installer, opts Options) (*VerificationResult, error) { + if err := os.MkdirAll(filepath.Dir(i.Path()), 0755); err != nil { - return err + return nil, err } if _, pathErr := os.Stat(i.Path()); !os.IsNotExist(pathErr) { - return errors.New("plugin already exists") + return nil, errors.New("plugin already exists") + } + + var result *VerificationResult + + // If verification is requested, check if installer supports it + if opts.Verify { + verifier, ok := i.(Verifier) + if !ok || !verifier.SupportsVerification() { + return nil, fmt.Errorf("--verify is only supported for plugin tarballs (.tgz files)") + } + + // Prepare for verification (download files if needed) + pluginPath, cleanup, err := verifier.PrepareForVerification() + if err != nil { + return nil, fmt.Errorf("failed to prepare for verification: %w", err) + } + if cleanup != nil { + defer cleanup() + } + + // Check if provenance file exists + provFile := pluginPath + ".prov" + if _, err := os.Stat(provFile); err != nil { + if os.IsNotExist(err) { + // No .prov file found - emit warning but continue installation + fmt.Fprintf(os.Stderr, "WARNING: No provenance file found for plugin. Plugin is not signed and cannot be verified.\n") + } else { + // Other error accessing .prov file + return nil, fmt.Errorf("failed to access provenance file: %w", err) + } + } else { + // Provenance file exists - verify the plugin + verification, err := plugin.VerifyPlugin(pluginPath, opts.Keyring) + if err != nil { + return nil, fmt.Errorf("plugin verification failed: %w", err) + } + + // Collect verification info + result = &VerificationResult{ + SignedBy: make([]string, 0), + Fingerprint: fmt.Sprintf("%X", verification.SignedBy.PrimaryKey.Fingerprint), + FileHash: verification.FileHash, + } + for name := range verification.SignedBy.Identities { + result.SignedBy = append(result.SignedBy, name) + } + } } - return i.Install() + + if err := i.Install(); err != nil { + return nil, err + } + + return result, nil } // Update updates a plugin. @@ -62,6 +146,10 @@ func Update(i Installer) error { // NewForSource determines the correct Installer for the given source. func NewForSource(source, version string) (Installer, error) { + // Check if source is an OCI registry reference + if strings.HasPrefix(source, fmt.Sprintf("%s://", registry.OCIScheme)) { + return NewOCIInstaller(source) + } // Check if source is a local directory if isLocalReference(source) { return NewLocalInstaller(source) diff --git a/internal/plugin/installer/local_installer.go b/internal/plugin/installer/local_installer.go index 87b9eaf97..0e00c93d0 100644 --- a/internal/plugin/installer/local_installer.go +++ b/internal/plugin/installer/local_installer.go @@ -24,7 +24,9 @@ import ( "path/filepath" "strings" + "helm.sh/helm/v4/internal/plugin" "helm.sh/helm/v4/internal/third_party/dep/fs" + "helm.sh/helm/v4/pkg/helmpath" ) // ErrPluginNotAFolder indicates that the plugin path is not a folder. @@ -35,6 +37,7 @@ type LocalInstaller struct { base isArchive bool extractor Extractor + provData []byte // Provenance data to save after installation } // NewLocalInstaller creates a new LocalInstaller. @@ -105,6 +108,30 @@ func (i *LocalInstaller) installFromArchive() error { return fmt.Errorf("failed to read archive: %w", err) } + // Copy the original tarball to plugins directory for verification + // Extract metadata to get the actual plugin name and version + metadata, err := plugin.ExtractPluginMetadataFromReader(bytes.NewReader(data)) + if err != nil { + return fmt.Errorf("failed to extract plugin metadata from tarball: %w", err) + } + filename := fmt.Sprintf("%s-%s.tgz", metadata.Name, metadata.Version) + tarballPath := helmpath.DataPath("plugins", filename) + if err := os.MkdirAll(filepath.Dir(tarballPath), 0755); err != nil { + return fmt.Errorf("failed to create plugins directory: %w", err) + } + if err := os.WriteFile(tarballPath, data, 0644); err != nil { + return fmt.Errorf("failed to save tarball: %w", err) + } + + // Check for and copy .prov file if it exists + provSource := i.Source + ".prov" + if provData, err := os.ReadFile(provSource); err == nil { + provPath := tarballPath + ".prov" + if err := os.WriteFile(provPath, provData, 0644); err != nil { + slog.Debug("failed to save provenance file", "error", err) + } + } + // Create a temporary directory for extraction tempDir, err := os.MkdirTemp("", "helm-plugin-extract-") if err != nil { @@ -118,31 +145,60 @@ func (i *LocalInstaller) installFromArchive() error { return fmt.Errorf("failed to extract archive: %w", err) } - // Detect where the plugin.yaml actually is - pluginRoot, err := detectPluginRoot(tempDir) - if err != nil { - return err + // Plugin directory should be named after the plugin at the archive root + pluginName := stripPluginName(filepath.Base(i.Source)) + pluginDir := filepath.Join(tempDir, pluginName) + if _, err = os.Stat(filepath.Join(pluginDir, "plugin.yaml")); err != nil { + return fmt.Errorf("plugin.yaml not found in expected directory %s: %w", pluginDir, err) } // Copy to the final destination - slog.Debug("copying", "source", pluginRoot, "path", i.Path()) - return fs.CopyDir(pluginRoot, i.Path()) + slog.Debug("copying", "source", pluginDir, "path", i.Path()) + return fs.CopyDir(pluginDir, i.Path()) +} + +// Update updates a local repository +func (i *LocalInstaller) Update() error { + slog.Debug("local repository is auto-updated") + return nil } -// Path returns the path where the plugin will be installed. -// For archive sources, strips the version from the filename. +// Path is overridden to handle archive plugin names properly func (i *LocalInstaller) Path() string { if i.Source == "" { return "" } + + pluginName := filepath.Base(i.Source) if i.isArchive { - return filepath.Join(i.PluginsDirectory, stripPluginName(filepath.Base(i.Source))) + // Strip archive extension to get plugin name + pluginName = stripPluginName(pluginName) } - return filepath.Join(i.PluginsDirectory, filepath.Base(i.Source)) + + return helmpath.DataPath("plugins", pluginName) } -// Update updates a local repository -func (i *LocalInstaller) Update() error { - slog.Debug("local repository is auto-updated") - return nil +// SupportsVerification returns true if the local installer can verify plugins +func (i *LocalInstaller) SupportsVerification() bool { + // Only support verification for local tarball files + return i.isArchive +} + +// PrepareForVerification returns the local path for verification +func (i *LocalInstaller) PrepareForVerification() (string, func(), error) { + if !i.SupportsVerification() { + return "", nil, fmt.Errorf("verification not supported for directories") + } + + // For local files, try to read the .prov file if it exists + provFile := i.Source + ".prov" + if provData, err := os.ReadFile(provFile); err == nil { + // Store the provenance data so we can save it after installation + i.provData = provData + } + // Note: We don't fail if .prov file doesn't exist - the verification logic + // in InstallWithOptions will handle missing .prov files appropriately + + // Return the source path directly, no cleanup needed + return i.Source, nil, nil } diff --git a/internal/plugin/installer/local_installer_test.go b/internal/plugin/installer/local_installer_test.go index 05118e183..339028ef3 100644 --- a/internal/plugin/installer/local_installer_test.go +++ b/internal/plugin/installer/local_installer_test.go @@ -86,8 +86,8 @@ func TestLocalInstallerTarball(t *testing.T) { Body string Mode int64 }{ - {"plugin.yaml", "name: test-plugin\nversion: 1.0.0\nusage: test\ndescription: test\ncommand: echo", 0644}, - {"bin/test-plugin", "#!/bin/bash\necho test", 0755}, + {"test-plugin/plugin.yaml", "name: test-plugin\nversion: 1.0.0\nusage: test\ndescription: test\ncommand: echo", 0644}, + {"test-plugin/bin/test-plugin", "#!/bin/bash\necho test", 0755}, } for _, file := range files { @@ -146,82 +146,3 @@ func TestLocalInstallerTarball(t *testing.T) { t.Fatalf("plugin not found at %s: %v", i.Path(), err) } } - -func TestLocalInstallerTarballWithSubdirectory(t *testing.T) { - ensure.HelmHome(t) - - // Create a test tarball with subdirectory - tempDir := t.TempDir() - tarballPath := filepath.Join(tempDir, "subdir-plugin-1.0.0.tar.gz") - - // Create tarball content - var buf bytes.Buffer - gw := gzip.NewWriter(&buf) - tw := tar.NewWriter(gw) - - files := []struct { - Name string - Body string - Mode int64 - IsDir bool - }{ - {"my-plugin/", "", 0755, true}, - {"my-plugin/plugin.yaml", "name: my-plugin\nversion: 1.0.0\nusage: test\ndescription: test\ncommand: echo", 0644, false}, - {"my-plugin/bin/", "", 0755, true}, - {"my-plugin/bin/my-plugin", "#!/bin/bash\necho test", 0755, false}, - } - - for _, file := range files { - hdr := &tar.Header{ - Name: file.Name, - Mode: file.Mode, - } - if file.IsDir { - hdr.Typeflag = tar.TypeDir - } else { - hdr.Size = int64(len(file.Body)) - } - - if err := tw.WriteHeader(hdr); err != nil { - t.Fatal(err) - } - if !file.IsDir { - if _, err := tw.Write([]byte(file.Body)); err != nil { - t.Fatal(err) - } - } - } - - if err := tw.Close(); err != nil { - t.Fatal(err) - } - if err := gw.Close(); err != nil { - t.Fatal(err) - } - - // Write tarball to file - if err := os.WriteFile(tarballPath, buf.Bytes(), 0644); err != nil { - t.Fatal(err) - } - - // Test installation - i, err := NewForSource(tarballPath, "") - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - if err := Install(i); err != nil { - t.Fatal(err) - } - - expectedPath := helmpath.DataPath("plugins", "subdir-plugin") - if i.Path() != expectedPath { - t.Fatalf("expected path %q, got %q", expectedPath, i.Path()) - } - - // Verify plugin was installed from subdirectory - pluginYaml := filepath.Join(i.Path(), "plugin.yaml") - if _, err := os.Stat(pluginYaml); err != nil { - t.Fatalf("plugin.yaml not found at %s: %v", pluginYaml, err) - } -} diff --git a/internal/plugin/installer/oci_installer.go b/internal/plugin/installer/oci_installer.go index a96a94ee1..c33ef13d5 100644 --- a/internal/plugin/installer/oci_installer.go +++ b/internal/plugin/installer/oci_installer.go @@ -25,6 +25,7 @@ import ( "os" "path/filepath" + "helm.sh/helm/v4/internal/plugin" "helm.sh/helm/v4/internal/plugin/cache" "helm.sh/helm/v4/internal/third_party/dep/fs" "helm.sh/helm/v4/pkg/cli" @@ -33,6 +34,9 @@ import ( "helm.sh/helm/v4/pkg/registry" ) +// Ensure OCIInstaller implements Verifier +var _ Verifier = (*OCIInstaller)(nil) + // OCIInstaller installs plugins from OCI registries type OCIInstaller struct { CacheDir string @@ -85,17 +89,44 @@ func (i *OCIInstaller) Install() error { return fmt.Errorf("failed to pull plugin from %s: %w", i.Source, err) } - // Create cache directory - if err := os.MkdirAll(i.CacheDir, 0755); err != nil { - return fmt.Errorf("failed to create cache directory: %w", err) + // Save the original tarball to plugins directory for verification + // For OCI plugins, extract version from plugin.yaml inside the tarball + pluginBytes := pluginData.Bytes() + + // Extract metadata to get the actual plugin name and version + metadata, err := plugin.ExtractPluginMetadataFromReader(bytes.NewReader(pluginBytes)) + if err != nil { + return fmt.Errorf("failed to extract plugin metadata from tarball: %w", err) + } + filename := fmt.Sprintf("%s-%s.tgz", metadata.Name, metadata.Version) + + tarballPath := helmpath.DataPath("plugins", filename) + if err := os.MkdirAll(filepath.Dir(tarballPath), 0755); err != nil { + return fmt.Errorf("failed to create plugins directory: %w", err) + } + if err := os.WriteFile(tarballPath, pluginBytes, 0644); err != nil { + return fmt.Errorf("failed to save tarball: %w", err) + } + + // Try to download and save .prov file alongside the tarball + provSource := i.Source + ".prov" + if provData, err := i.getter.Get(provSource); err == nil { + provPath := tarballPath + ".prov" + if err := os.WriteFile(provPath, provData.Bytes(), 0644); err != nil { + slog.Debug("failed to save provenance file", "error", err) + } } // Check if this is a gzip compressed file - pluginBytes := pluginData.Bytes() if len(pluginBytes) < 2 || pluginBytes[0] != 0x1f || pluginBytes[1] != 0x8b { return fmt.Errorf("plugin data is not a gzip compressed archive") } + // Create cache directory + if err := os.MkdirAll(i.CacheDir, 0755); err != nil { + return fmt.Errorf("failed to create cache directory: %w", err) + } + // Extract as gzipped tar if err := extractTarGz(bytes.NewReader(pluginBytes), i.CacheDir); err != nil { return fmt.Errorf("failed to extract plugin: %w", err) @@ -214,3 +245,61 @@ func extractTar(r io.Reader, targetDir string) error { return nil } + +// SupportsVerification returns true since OCI plugins can be verified +func (i *OCIInstaller) SupportsVerification() bool { + return true +} + +// PrepareForVerification downloads the plugin tarball and provenance to a temporary directory +func (i *OCIInstaller) PrepareForVerification() (pluginPath string, cleanup func(), err error) { + slog.Debug("preparing OCI plugin for verification", "source", i.Source) + + // Create temporary directory for verification + tempDir, err := os.MkdirTemp("", "helm-oci-verify-") + if err != nil { + return "", nil, fmt.Errorf("failed to create temp directory: %w", err) + } + + cleanup = func() { + os.RemoveAll(tempDir) + } + + // Download the plugin tarball + pluginData, err := i.getter.Get(i.Source) + if err != nil { + cleanup() + return "", nil, fmt.Errorf("failed to pull plugin from %s: %w", i.Source, err) + } + + // Extract metadata to get the actual plugin name and version + pluginBytes := pluginData.Bytes() + metadata, err := plugin.ExtractPluginMetadataFromReader(bytes.NewReader(pluginBytes)) + if err != nil { + cleanup() + return "", nil, fmt.Errorf("failed to extract plugin metadata from tarball: %w", err) + } + filename := fmt.Sprintf("%s-%s.tgz", metadata.Name, metadata.Version) + + // Save plugin tarball to temp directory + pluginTarball := filepath.Join(tempDir, filename) + if err := os.WriteFile(pluginTarball, pluginBytes, 0644); err != nil { + cleanup() + return "", nil, fmt.Errorf("failed to save plugin tarball: %w", err) + } + + // Try to download the provenance file - don't fail if it doesn't exist + provSource := i.Source + ".prov" + if provData, err := i.getter.Get(provSource); err == nil { + // Save provenance to temp directory + provFile := filepath.Join(tempDir, filename+".prov") + if err := os.WriteFile(provFile, provData.Bytes(), 0644); err == nil { + slog.Debug("prepared plugin for verification", "plugin", pluginTarball, "provenance", provFile) + } + } + // Note: We don't fail if .prov file can't be downloaded - the verification logic + // in InstallWithOptions will handle missing .prov files appropriately + + slog.Debug("prepared plugin for verification", "plugin", pluginTarball) + return pluginTarball, cleanup, nil +} diff --git a/internal/plugin/installer/oci_installer_test.go b/internal/plugin/installer/oci_installer_test.go index 1ed10ff8e..1280cf97d 100644 --- a/internal/plugin/installer/oci_installer_test.go +++ b/internal/plugin/installer/oci_installer_test.go @@ -34,6 +34,7 @@ import ( "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "helm.sh/helm/v4/internal/test/ensure" "helm.sh/helm/v4/pkg/cli" "helm.sh/helm/v4/pkg/getter" "helm.sh/helm/v4/pkg/helmpath" @@ -125,7 +126,7 @@ func mockOCIRegistryWithArtifactType(t *testing.T, pluginName string) (*httptest Digest: digest.Digest(layerDigest), Size: int64(len(pluginData)), Annotations: map[string]string{ - ocispec.AnnotationTitle: pluginName + ".tgz", // Layer named properly + ocispec.AnnotationTitle: pluginName + "-1.0.0.tgz", // Layer named with version }, }, }, @@ -316,9 +317,8 @@ func TestOCIInstaller_Path(t *testing.T) { } func TestOCIInstaller_Install(t *testing.T) { - // Set up isolated test environment FIRST - testPluginsDir := t.TempDir() - t.Setenv("HELM_PLUGINS", testPluginsDir) + // Set up isolated test environment + ensure.HelmHome(t) pluginName := "test-plugin-basic" server, registryHost := mockOCIRegistryWithArtifactType(t, pluginName) @@ -333,15 +333,10 @@ func TestOCIInstaller_Install(t *testing.T) { t.Fatalf("Expected no error, got %v", err) } - // The OCI installer uses helmpath.DataPath, which now points to our test directory + // The OCI installer uses helmpath.DataPath, which is isolated by ensure.HelmHome(t) actualPath := installer.Path() t.Logf("Installer will use path: %s", actualPath) - // Verify the path is actually in our test directory - if !strings.HasPrefix(actualPath, testPluginsDir) { - t.Fatalf("Expected path %s to be under test directory %s", actualPath, testPluginsDir) - } - // Install the plugin if err := Install(installer); err != nil { t.Fatalf("Expected installation to succeed, got error: %v", err) @@ -399,8 +394,7 @@ func TestOCIInstaller_Install_WithGetterOptions(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { // Set up isolated test environment for each subtest - testPluginsDir := t.TempDir() - t.Setenv("HELM_PLUGINS", testPluginsDir) + ensure.HelmHome(t) server, registryHost := mockOCIRegistryWithArtifactType(t, tc.pluginName) defer server.Close() @@ -440,8 +434,7 @@ func TestOCIInstaller_Install_WithGetterOptions(t *testing.T) { func TestOCIInstaller_Install_AlreadyExists(t *testing.T) { // Set up isolated test environment - testPluginsDir := t.TempDir() - t.Setenv("HELM_PLUGINS", testPluginsDir) + ensure.HelmHome(t) pluginName := "test-plugin-exists" server, registryHost := mockOCIRegistryWithArtifactType(t, pluginName) @@ -474,8 +467,7 @@ func TestOCIInstaller_Install_AlreadyExists(t *testing.T) { func TestOCIInstaller_Update(t *testing.T) { // Set up isolated test environment - testPluginsDir := t.TempDir() - t.Setenv("HELM_PLUGINS", testPluginsDir) + ensure.HelmHome(t) pluginName := "test-plugin-update" server, registryHost := mockOCIRegistryWithArtifactType(t, pluginName) diff --git a/internal/plugin/installer/vcs_installer_test.go b/internal/plugin/installer/vcs_installer_test.go index f024b4b40..d542a0f75 100644 --- a/internal/plugin/installer/vcs_installer_test.go +++ b/internal/plugin/installer/vcs_installer_test.go @@ -83,8 +83,9 @@ func TestVCSInstaller(t *testing.T) { if repo.current != "0.1.1" { t.Fatalf("expected version '0.1.1', got %q", repo.current) } - if i.Path() != helmpath.DataPath("plugins", "helm-env") { - t.Fatalf("expected path '$XDG_CONFIG_HOME/helm/plugins/helm-env', got %q", i.Path()) + expectedPath := helmpath.DataPath("plugins", "helm-env") + if i.Path() != expectedPath { + t.Fatalf("expected path %q, got %q", expectedPath, i.Path()) } // Install again to test plugin exists error diff --git a/internal/plugin/installer/verification_test.go b/internal/plugin/installer/verification_test.go new file mode 100644 index 000000000..22f0a8308 --- /dev/null +++ b/internal/plugin/installer/verification_test.go @@ -0,0 +1,421 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package installer + +import ( + "bytes" + "crypto/sha256" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "testing" + + "helm.sh/helm/v4/internal/plugin" + "helm.sh/helm/v4/internal/test/ensure" +) + +func TestInstallWithOptions_VerifyMissingProvenance(t *testing.T) { + ensure.HelmHome(t) + + // Create a temporary plugin tarball without .prov file + pluginDir := createTestPluginDir(t) + pluginTgz := createTarballFromPluginDir(t, pluginDir) + defer os.Remove(pluginTgz) + + // Create local installer + installer, err := NewLocalInstaller(pluginTgz) + if err != nil { + t.Fatalf("Failed to create installer: %v", err) + } + defer os.RemoveAll(installer.Path()) + + // Capture stderr to check warning message + oldStderr := os.Stderr + r, w, _ := os.Pipe() + os.Stderr = w + + // Install with verification enabled (should warn but succeed) + result, err := InstallWithOptions(installer, Options{Verify: true, Keyring: "dummy"}) + + // Restore stderr and read captured output + w.Close() + os.Stderr = oldStderr + var buf bytes.Buffer + io.Copy(&buf, r) + output := buf.String() + + // Should succeed with nil result (no verification performed) + if err != nil { + t.Fatalf("Expected installation to succeed despite missing .prov file, got error: %v", err) + } + if result != nil { + t.Errorf("Expected nil verification result when .prov file is missing, got: %+v", result) + } + + // Should contain warning message + expectedWarning := "WARNING: No provenance file found for plugin" + if !strings.Contains(output, expectedWarning) { + t.Errorf("Expected warning message '%s' in output, got: %s", expectedWarning, output) + } + + // Plugin should be installed + if _, err := os.Stat(installer.Path()); os.IsNotExist(err) { + t.Errorf("Plugin should be installed at %s", installer.Path()) + } +} + +func TestInstallWithOptions_VerifyWithValidProvenance(t *testing.T) { + ensure.HelmHome(t) + + // Create a temporary plugin tarball with valid .prov file + pluginDir := createTestPluginDir(t) + pluginTgz := createTarballFromPluginDir(t, pluginDir) + + provFile := pluginTgz + ".prov" + createProvFile(t, provFile, pluginTgz, "") + defer os.Remove(provFile) + + // Create keyring with test key (empty for testing) + keyring := createTestKeyring(t) + defer os.Remove(keyring) + + // Create local installer + installer, err := NewLocalInstaller(pluginTgz) + if err != nil { + t.Fatalf("Failed to create installer: %v", err) + } + defer os.RemoveAll(installer.Path()) + + // Install with verification enabled + // This will fail signature verification but pass hash validation + result, err := InstallWithOptions(installer, Options{Verify: true, Keyring: keyring}) + + // Should fail due to invalid signature (empty keyring) but we test that it gets past the hash check + if err == nil { + t.Fatalf("Expected installation to fail with empty keyring") + } + if !strings.Contains(err.Error(), "plugin verification failed") { + t.Errorf("Expected plugin verification failed error, got: %v", err) + } + if result != nil { + t.Errorf("Expected nil verification result when verification fails, got: %+v", result) + } + + // Plugin should not be installed due to verification failure + if _, err := os.Stat(installer.Path()); !os.IsNotExist(err) { + t.Errorf("Plugin should not be installed when verification fails") + } +} + +func TestInstallWithOptions_VerifyWithInvalidProvenance(t *testing.T) { + ensure.HelmHome(t) + + // Create a temporary plugin tarball with invalid .prov file + pluginDir := createTestPluginDir(t) + pluginTgz := createTarballFromPluginDir(t, pluginDir) + defer os.Remove(pluginTgz) + + provFile := pluginTgz + ".prov" + createProvFileInvalidFormat(t, provFile) + defer os.Remove(provFile) + + // Create keyring with test key + keyring := createTestKeyring(t) + defer os.Remove(keyring) + + // Create local installer + installer, err := NewLocalInstaller(pluginTgz) + if err != nil { + t.Fatalf("Failed to create installer: %v", err) + } + defer os.RemoveAll(installer.Path()) + + // Install with verification enabled (should fail) + result, err := InstallWithOptions(installer, Options{Verify: true, Keyring: keyring}) + + // Should fail with verification error + if err == nil { + t.Fatalf("Expected installation with invalid .prov file to fail") + } + if result != nil { + t.Errorf("Expected nil verification result when verification fails, got: %+v", result) + } + + // Should contain verification failure message + expectedError := "plugin verification failed" + if !strings.Contains(err.Error(), expectedError) { + t.Errorf("Expected error message '%s', got: %s", expectedError, err.Error()) + } + + // Plugin should not be installed + if _, err := os.Stat(installer.Path()); !os.IsNotExist(err) { + t.Errorf("Plugin should not be installed when verification fails") + } +} + +func TestInstallWithOptions_NoVerifyRequested(t *testing.T) { + ensure.HelmHome(t) + + // Create a temporary plugin tarball without .prov file + pluginDir := createTestPluginDir(t) + pluginTgz := createTarballFromPluginDir(t, pluginDir) + defer os.Remove(pluginTgz) + + // Create local installer + installer, err := NewLocalInstaller(pluginTgz) + if err != nil { + t.Fatalf("Failed to create installer: %v", err) + } + defer os.RemoveAll(installer.Path()) + + // Install without verification (should succeed without any verification) + result, err := InstallWithOptions(installer, Options{Verify: false}) + + // Should succeed with no verification + if err != nil { + t.Fatalf("Expected installation without verification to succeed, got error: %v", err) + } + if result != nil { + t.Errorf("Expected nil verification result when verification is disabled, got: %+v", result) + } + + // Plugin should be installed + if _, err := os.Stat(installer.Path()); os.IsNotExist(err) { + t.Errorf("Plugin should be installed at %s", installer.Path()) + } +} + +func TestInstallWithOptions_VerifyDirectoryNotSupported(t *testing.T) { + ensure.HelmHome(t) + + // Create a directory-based plugin (not an archive) + pluginDir := createTestPluginDir(t) + + // Create local installer for directory + installer, err := NewLocalInstaller(pluginDir) + if err != nil { + t.Fatalf("Failed to create installer: %v", err) + } + defer os.RemoveAll(installer.Path()) + + // Install with verification should fail (directories don't support verification) + result, err := InstallWithOptions(installer, Options{Verify: true, Keyring: "dummy"}) + + // Should fail with verification not supported error + if err == nil { + t.Fatalf("Expected installation to fail with verification not supported error") + } + if !strings.Contains(err.Error(), "--verify is only supported for plugin tarballs") { + t.Errorf("Expected verification not supported error, got: %v", err) + } + if result != nil { + t.Errorf("Expected nil verification result when verification fails, got: %+v", result) + } +} + +func TestInstallWithOptions_VerifyMismatchedProvenance(t *testing.T) { + ensure.HelmHome(t) + + // Create plugin tarball + pluginDir := createTestPluginDir(t) + pluginTgz := createTarballFromPluginDir(t, pluginDir) + defer os.Remove(pluginTgz) + + provFile := pluginTgz + ".prov" + // Create provenance file with wrong hash (for a different file) + createProvFile(t, provFile, pluginTgz, "sha256:wronghash") + defer os.Remove(provFile) + + // Create keyring with test key + keyring := createTestKeyring(t) + defer os.Remove(keyring) + + // Create local installer + installer, err := NewLocalInstaller(pluginTgz) + if err != nil { + t.Fatalf("Failed to create installer: %v", err) + } + defer os.RemoveAll(installer.Path()) + + // Install with verification should fail due to hash mismatch + result, err := InstallWithOptions(installer, Options{Verify: true, Keyring: keyring}) + + // Should fail with verification error + if err == nil { + t.Fatalf("Expected installation to fail with hash mismatch") + } + if !strings.Contains(err.Error(), "plugin verification failed") { + t.Errorf("Expected plugin verification failed error, got: %v", err) + } + if result != nil { + t.Errorf("Expected nil verification result when verification fails, got: %+v", result) + } +} + +func TestInstallWithOptions_VerifyProvenanceAccessError(t *testing.T) { + ensure.HelmHome(t) + + // Create plugin tarball + pluginDir := createTestPluginDir(t) + pluginTgz := createTarballFromPluginDir(t, pluginDir) + defer os.Remove(pluginTgz) + + // Create a .prov file but make it inaccessible (simulate permission error) + provFile := pluginTgz + ".prov" + if err := os.WriteFile(provFile, []byte("test"), 0000); err != nil { + t.Fatalf("Failed to create inaccessible provenance file: %v", err) + } + defer os.Remove(provFile) + + // Create keyring + keyring := createTestKeyring(t) + defer os.Remove(keyring) + + // Create local installer + installer, err := NewLocalInstaller(pluginTgz) + if err != nil { + t.Fatalf("Failed to create installer: %v", err) + } + defer os.RemoveAll(installer.Path()) + + // Install with verification should fail due to access error + result, err := InstallWithOptions(installer, Options{Verify: true, Keyring: keyring}) + + // Should fail with access error (either at stat level or during verification) + if err == nil { + t.Fatalf("Expected installation to fail with provenance file access error") + } + // The error could be either "failed to access provenance file" or "plugin verification failed" + // depending on when the permission error occurs + if !strings.Contains(err.Error(), "failed to access provenance file") && + !strings.Contains(err.Error(), "plugin verification failed") { + t.Errorf("Expected provenance file access or verification error, got: %v", err) + } + if result != nil { + t.Errorf("Expected nil verification result when verification fails, got: %+v", result) + } +} + +// Helper functions for test setup + +func createTestPluginDir(t *testing.T) string { + t.Helper() + + // Create temporary directory with plugin structure + tmpDir := t.TempDir() + pluginDir := filepath.Join(tmpDir, "test-plugin") + if err := os.MkdirAll(pluginDir, 0755); err != nil { + t.Fatalf("Failed to create plugin directory: %v", err) + } + + // Create plugin.yaml using the standardized v1 format + pluginYaml := `apiVersion: v1 +name: test-plugin +type: cli/v1 +runtime: subprocess +version: 1.0.0 +runtimeConfig: + platformCommand: + - command: echo` + if err := os.WriteFile(filepath.Join(pluginDir, "plugin.yaml"), []byte(pluginYaml), 0644); err != nil { + t.Fatalf("Failed to create plugin.yaml: %v", err) + } + + return pluginDir +} + +func createTarballFromPluginDir(t *testing.T, pluginDir string) string { + t.Helper() + + // Create tarball using the plugin package helper + tmpDir := filepath.Dir(pluginDir) + tgzPath := filepath.Join(tmpDir, "test-plugin-1.0.0.tgz") + tarFile, err := os.Create(tgzPath) + if err != nil { + t.Fatalf("Failed to create tarball file: %v", err) + } + defer tarFile.Close() + + if err := plugin.CreatePluginTarball(pluginDir, "test-plugin", tarFile); err != nil { + t.Fatalf("Failed to create tarball: %v", err) + } + + return tgzPath +} + +func createProvFile(t *testing.T, provFile, pluginTgz, hash string) { + t.Helper() + + var hashStr string + if hash == "" { + // Calculate actual hash of the tarball for realistic testing + data, err := os.ReadFile(pluginTgz) + if err != nil { + t.Fatalf("Failed to read tarball for hashing: %v", err) + } + hashSum := sha256.Sum256(data) + hashStr = fmt.Sprintf("sha256:%x", hashSum) + } else { + // Use provided hash (could be wrong for testing) + hashStr = hash + } + + // Create properly formatted provenance file with specified hash + provContent := fmt.Sprintf(`-----BEGIN PGP SIGNED MESSAGE----- +Hash: SHA256 + +name: test-plugin +version: 1.0.0 +description: Test plugin for verification +files: + test-plugin-1.0.0.tgz: %s +-----BEGIN PGP SIGNATURE----- +Version: GnuPG v1 + +iQEcBAEBCAAGBQJktest... +-----END PGP SIGNATURE----- +`, hashStr) + if err := os.WriteFile(provFile, []byte(provContent), 0644); err != nil { + t.Fatalf("Failed to create provenance file: %v", err) + } +} + +func createProvFileInvalidFormat(t *testing.T, provFile string) { + t.Helper() + + // Create an invalid provenance file (not PGP signed format) + invalidProv := "This is not a valid PGP signed message" + if err := os.WriteFile(provFile, []byte(invalidProv), 0644); err != nil { + t.Fatalf("Failed to create invalid provenance file: %v", err) + } +} + +func createTestKeyring(t *testing.T) string { + t.Helper() + + // Create a temporary keyring file + tmpDir := t.TempDir() + keyringPath := filepath.Join(tmpDir, "pubring.gpg") + + // Create empty keyring for testing + if err := os.WriteFile(keyringPath, []byte{}, 0644); err != nil { + t.Fatalf("Failed to create test keyring: %v", err) + } + + return keyringPath +} diff --git a/internal/plugin/sign.go b/internal/plugin/sign.go new file mode 100644 index 000000000..134c640e7 --- /dev/null +++ b/internal/plugin/sign.go @@ -0,0 +1,166 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugin + +import ( + "archive/tar" + "compress/gzip" + "errors" + "fmt" + "io" + "os" + "path/filepath" + + "sigs.k8s.io/yaml" + + "helm.sh/helm/v4/pkg/provenance" +) + +// SignPlugin signs a plugin using the SHA256 hash of the tarball. +// +// This is used when packaging and signing a plugin from a tarball file. +// It creates a signature that includes the tarball hash and plugin metadata, +// allowing verification of the original tarball later. +func SignPlugin(tarballPath string, signer *provenance.Signatory) (string, error) { + // Extract plugin metadata from tarball + pluginMeta, err := extractPluginMetadata(tarballPath) + if err != nil { + return "", fmt.Errorf("failed to extract plugin metadata: %w", err) + } + + // Marshal plugin metadata to YAML bytes + metadataBytes, err := yaml.Marshal(pluginMeta) + if err != nil { + return "", fmt.Errorf("failed to marshal plugin metadata: %w", err) + } + + // Use the generic provenance signing function + return signer.ClearSign(tarballPath, metadataBytes) +} + +// extractPluginMetadata extracts plugin metadata from a tarball +func extractPluginMetadata(tarballPath string) (*Metadata, error) { + f, err := os.Open(tarballPath) + if err != nil { + return nil, err + } + defer f.Close() + + return ExtractPluginMetadataFromReader(f) +} + +// ExtractPluginMetadataFromReader extracts plugin metadata from a tarball reader +func ExtractPluginMetadataFromReader(r io.Reader) (*Metadata, error) { + gzr, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + defer gzr.Close() + + tr := tar.NewReader(gzr) + for { + header, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + + // Look for plugin.yaml file + if filepath.Base(header.Name) == "plugin.yaml" { + data, err := io.ReadAll(tr) + if err != nil { + return nil, err + } + + // Parse the plugin metadata + metadata, err := loadMetadata(data) + if err != nil { + return nil, err + } + + return metadata, nil + } + } + + return nil, errors.New("plugin.yaml not found in tarball") +} + +// parsePluginMessageBlock parses a signed message block to extract plugin metadata and checksums +func parsePluginMessageBlock(data []byte) (*Metadata, *provenance.SumCollection, error) { + sc := &provenance.SumCollection{} + + // We only need the checksums for verification, not the full metadata + if err := provenance.ParseMessageBlock(data, nil, sc); err != nil { + return nil, sc, err + } + return nil, sc, nil +} + +// CreatePluginTarball creates a gzipped tarball from a plugin directory +func CreatePluginTarball(sourceDir, pluginName string, w io.Writer) error { + gzw := gzip.NewWriter(w) + defer gzw.Close() + + tw := tar.NewWriter(gzw) + defer tw.Close() + + // Use the plugin name as the base directory in the tarball + baseDir := pluginName + + // Walk the directory tree + return filepath.Walk(sourceDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Create header + header, err := tar.FileInfoHeader(info, "") + if err != nil { + return err + } + + // Update the name to be relative to the source directory + relPath, err := filepath.Rel(sourceDir, path) + if err != nil { + return err + } + + // Include the base directory name in the tarball + header.Name = filepath.Join(baseDir, relPath) + + // Write header + if err := tw.WriteHeader(header); err != nil { + return err + } + + // If it's a regular file, write its content + if info.Mode().IsRegular() { + file, err := os.Open(path) + if err != nil { + return err + } + defer file.Close() + + if _, err := io.Copy(tw, file); err != nil { + return err + } + } + + return nil + }) +} diff --git a/internal/plugin/sign_test.go b/internal/plugin/sign_test.go new file mode 100644 index 000000000..a60970cdc --- /dev/null +++ b/internal/plugin/sign_test.go @@ -0,0 +1,92 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugin + +import ( + "os" + "path/filepath" + "strings" + "testing" + + "helm.sh/helm/v4/pkg/provenance" +) + +func TestSignPlugin(t *testing.T) { + // Create a test plugin directory + tempDir := t.TempDir() + pluginDir := filepath.Join(tempDir, "test-plugin") + if err := os.MkdirAll(pluginDir, 0755); err != nil { + t.Fatal(err) + } + + // Create a plugin.yaml file + pluginYAML := `apiVersion: v1 +name: test-plugin +type: cli/v1 +runtime: subprocess +version: 1.0.0 +runtimeConfig: + platformCommand: + - command: echo` + if err := os.WriteFile(filepath.Join(pluginDir, "plugin.yaml"), []byte(pluginYAML), 0644); err != nil { + t.Fatal(err) + } + + // Create a tarball + tarballPath := filepath.Join(tempDir, "test-plugin.tgz") + tarFile, err := os.Create(tarballPath) + if err != nil { + t.Fatal(err) + } + if err := CreatePluginTarball(pluginDir, "test-plugin", tarFile); err != nil { + tarFile.Close() + t.Fatal(err) + } + tarFile.Close() + + // Create a test key for signing + keyring := "../../pkg/cmd/testdata/helm-test-key.secret" + signer, err := provenance.NewFromKeyring(keyring, "helm-test") + if err != nil { + t.Fatal(err) + } + if err := signer.DecryptKey(func(_ string) ([]byte, error) { + return []byte(""), nil + }); err != nil { + t.Fatal(err) + } + + // Sign the plugin tarball + sig, err := SignPlugin(tarballPath, signer) + if err != nil { + t.Fatalf("failed to sign plugin: %v", err) + } + + // Verify the signature contains the expected content + if !strings.Contains(sig, "-----BEGIN PGP SIGNED MESSAGE-----") { + t.Error("signature does not contain PGP header") + } + + // Verify the tarball hash is in the signature + expectedHash, err := provenance.DigestFile(tarballPath) + if err != nil { + t.Fatal(err) + } + // The signature should contain the tarball hash + if !strings.Contains(sig, "sha256:"+expectedHash) { + t.Errorf("signature does not contain expected tarball hash: sha256:%s", expectedHash) + } +} diff --git a/internal/plugin/signing_info.go b/internal/plugin/signing_info.go new file mode 100644 index 000000000..43d01c893 --- /dev/null +++ b/internal/plugin/signing_info.go @@ -0,0 +1,178 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugin + +import ( + "crypto/sha256" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "golang.org/x/crypto/openpgp/clearsign" //nolint + + "helm.sh/helm/v4/pkg/helmpath" +) + +// SigningInfo contains information about a plugin's signing status +type SigningInfo struct { + // Status can be: + // - "local dev": Plugin is a symlink (development mode) + // - "unsigned": No provenance file found + // - "invalid provenance": Provenance file is malformed + // - "mismatched provenance": Provenance file does not match the installed tarball + // - "signed": Valid signature exists for the installed tarball + Status string + IsSigned bool // True if plugin has a valid signature (even if not verified against keyring) +} + +// GetPluginSigningInfo returns signing information for an installed plugin +func GetPluginSigningInfo(metadata Metadata) (*SigningInfo, error) { + pluginName := metadata.Name + pluginDir := helmpath.DataPath("plugins", pluginName) + + // Check if plugin directory exists + fi, err := os.Lstat(pluginDir) + if err != nil { + return nil, fmt.Errorf("plugin %s not found: %w", pluginName, err) + } + + // Check if it's a symlink (local development) + if fi.Mode()&os.ModeSymlink != 0 { + return &SigningInfo{ + Status: "local dev", + IsSigned: false, + }, nil + } + + // Find the exact tarball file for this plugin + pluginsDir := helmpath.DataPath("plugins") + tarballPath := filepath.Join(pluginsDir, fmt.Sprintf("%s-%s.tgz", metadata.Name, metadata.Version)) + if _, err := os.Stat(tarballPath); err != nil { + return &SigningInfo{ + Status: "unsigned", + IsSigned: false, + }, nil + } + + // Check for .prov file associated with the tarball + provFile := tarballPath + ".prov" + provData, err := os.ReadFile(provFile) + if err != nil { + if os.IsNotExist(err) { + return &SigningInfo{ + Status: "unsigned", + IsSigned: false, + }, nil + } + return nil, fmt.Errorf("failed to read provenance file: %w", err) + } + + // Parse the provenance file to check validity + block, _ := clearsign.Decode(provData) + if block == nil { + return &SigningInfo{ + Status: "invalid provenance", + IsSigned: false, + }, nil + } + + // Check if provenance matches the actual tarball + blockContent := string(block.Plaintext) + if !validateProvenanceHash(blockContent, tarballPath) { + return &SigningInfo{ + Status: "mismatched provenance", + IsSigned: false, + }, nil + } + + // We have a provenance file that is valid for this plugin + // Without a keyring, we can't verify the signature, but we know: + // 1. A .prov file exists + // 2. It's a valid clearsigned document (cryptographically signed) + // 3. The provenance contains valid checksums + return &SigningInfo{ + Status: "signed", + IsSigned: true, + }, nil +} + +func validateProvenanceHash(blockContent string, tarballPath string) bool { + // Parse provenance to get the expected hash + _, sums, err := parsePluginMessageBlock([]byte(blockContent)) + if err != nil { + return false + } + + // Must have file checksums + if len(sums.Files) == 0 { + return false + } + + // Calculate actual hash of the tarball + actualHash, err := calculateFileHash(tarballPath) + if err != nil { + return false + } + + // Check if the actual hash matches the expected hash in the provenance + for filename, expectedHash := range sums.Files { + if strings.Contains(filename, filepath.Base(tarballPath)) && expectedHash == actualHash { + return true + } + } + + return false +} + +// calculateFileHash calculates the SHA256 hash of a file +func calculateFileHash(filePath string) (string, error) { + file, err := os.Open(filePath) + if err != nil { + return "", err + } + defer file.Close() + + hasher := sha256.New() + if _, err := io.Copy(hasher, file); err != nil { + return "", err + } + + return fmt.Sprintf("sha256:%x", hasher.Sum(nil)), nil +} + +// GetSigningInfoForPlugins returns signing info for multiple plugins +func GetSigningInfoForPlugins(plugins []Plugin) map[string]*SigningInfo { + result := make(map[string]*SigningInfo) + + for _, p := range plugins { + m := p.Metadata() + + info, err := GetPluginSigningInfo(m) + if err != nil { + // If there's an error, treat as unsigned + result[m.Name] = &SigningInfo{ + Status: "unknown", + IsSigned: false, + } + } else { + result[m.Name] = info + } + } + + return result +} diff --git a/internal/plugin/verify.go b/internal/plugin/verify.go new file mode 100644 index 000000000..e9656a3a6 --- /dev/null +++ b/internal/plugin/verify.go @@ -0,0 +1,72 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugin + +import ( + "errors" + "fmt" + "os" + "path/filepath" + + "helm.sh/helm/v4/pkg/provenance" +) + +// VerifyPlugin verifies a plugin tarball against a signature. +// +// This function verifies that a plugin tarball has a valid provenance file +// and that the provenance file is signed by a trusted entity. +func VerifyPlugin(pluginPath, keyring string) (*provenance.Verification, error) { + // Verify the plugin path exists + fi, err := os.Stat(pluginPath) + if err != nil { + return nil, err + } + + // Only support tarball verification + if fi.IsDir() { + return nil, errors.New("directory verification not supported - only plugin tarballs can be verified") + } + + // Verify it's a tarball + if !isTarball(pluginPath) { + return nil, errors.New("plugin file must be a gzipped tarball (.tar.gz or .tgz)") + } + + // Look for provenance file + provFile := pluginPath + ".prov" + if _, err := os.Stat(provFile); err != nil { + return nil, fmt.Errorf("could not find provenance file %s: %w", provFile, err) + } + + // Create signatory from keyring + sig, err := provenance.NewFromKeyring(keyring, "") + if err != nil { + return nil, err + } + + return verifyPluginTarball(pluginPath, provFile, sig) +} + +// verifyPluginTarball verifies a plugin tarball against its signature +func verifyPluginTarball(pluginPath, provPath string, sig *provenance.Signatory) (*provenance.Verification, error) { + // Reuse chart verification logic from pkg/provenance + return sig.Verify(pluginPath, provPath) +} + +// isTarball checks if a file has a tarball extension +func isTarball(filename string) bool { + return filepath.Ext(filename) == ".gz" || filepath.Ext(filename) == ".tgz" +} diff --git a/internal/plugin/verify_test.go b/internal/plugin/verify_test.go new file mode 100644 index 000000000..a09b35ec9 --- /dev/null +++ b/internal/plugin/verify_test.go @@ -0,0 +1,201 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugin + +import ( + "os" + "path/filepath" + "strings" + "testing" + + "helm.sh/helm/v4/pkg/provenance" +) + +const testKeyFile = "../../pkg/cmd/testdata/helm-test-key.secret" +const testPubFile = "../../pkg/cmd/testdata/helm-test-key.pub" + +const testPluginYAML = `apiVersion: v1 +name: test-plugin +type: cli/v1 +runtime: subprocess +version: 1.0.0 +runtimeConfig: + platformCommand: + - command: echo` + +func TestVerifyPlugin(t *testing.T) { + // Create a test plugin and sign it + tempDir := t.TempDir() + + // Create plugin directory + pluginDir := filepath.Join(tempDir, "verify-test-plugin") + if err := os.MkdirAll(pluginDir, 0755); err != nil { + t.Fatal(err) + } + + if err := os.WriteFile(filepath.Join(pluginDir, "plugin.yaml"), []byte(testPluginYAML), 0644); err != nil { + t.Fatal(err) + } + + // Create tarball + tarballPath := filepath.Join(tempDir, "verify-test-plugin.tar.gz") + tarFile, err := os.Create(tarballPath) + if err != nil { + t.Fatal(err) + } + + if err := CreatePluginTarball(pluginDir, "test-plugin", tarFile); err != nil { + tarFile.Close() + t.Fatal(err) + } + tarFile.Close() + + // Sign the plugin with source directory + signer, err := provenance.NewFromKeyring(testKeyFile, "helm-test") + if err != nil { + t.Fatal(err) + } + if err := signer.DecryptKey(func(_ string) ([]byte, error) { + return []byte(""), nil + }); err != nil { + t.Fatal(err) + } + + sig, err := SignPlugin(tarballPath, signer) + if err != nil { + t.Fatal(err) + } + + // Write the signature to .prov file + provFile := tarballPath + ".prov" + if err := os.WriteFile(provFile, []byte(sig), 0644); err != nil { + t.Fatal(err) + } + + // Now verify the plugin + verification, err := VerifyPlugin(tarballPath, testPubFile) + if err != nil { + t.Fatalf("Failed to verify plugin: %v", err) + } + + // Check verification results + if verification.SignedBy == nil { + t.Error("SignedBy is nil") + } + + if verification.FileName != "verify-test-plugin.tar.gz" { + t.Errorf("Expected filename 'verify-test-plugin.tar.gz', got %s", verification.FileName) + } + + if verification.FileHash == "" { + t.Error("FileHash is empty") + } +} + +func TestVerifyPluginBadSignature(t *testing.T) { + tempDir := t.TempDir() + + // Create a plugin tarball + pluginDir := filepath.Join(tempDir, "bad-plugin") + if err := os.MkdirAll(pluginDir, 0755); err != nil { + t.Fatal(err) + } + + if err := os.WriteFile(filepath.Join(pluginDir, "plugin.yaml"), []byte(testPluginYAML), 0644); err != nil { + t.Fatal(err) + } + + tarballPath := filepath.Join(tempDir, "bad-plugin.tar.gz") + tarFile, err := os.Create(tarballPath) + if err != nil { + t.Fatal(err) + } + + if err := CreatePluginTarball(pluginDir, "test-plugin", tarFile); err != nil { + tarFile.Close() + t.Fatal(err) + } + tarFile.Close() + + // Create a bad signature (just some text) + badSig := `-----BEGIN PGP SIGNED MESSAGE----- +Hash: SHA512 + +This is not a real signature +-----BEGIN PGP SIGNATURE----- + +InvalidSignatureData + +-----END PGP SIGNATURE-----` + + provFile := tarballPath + ".prov" + if err := os.WriteFile(provFile, []byte(badSig), 0644); err != nil { + t.Fatal(err) + } + + // Try to verify - should fail + _, err = VerifyPlugin(tarballPath, testPubFile) + if err == nil { + t.Error("Expected verification to fail with bad signature") + } +} + +func TestVerifyPluginMissingProvenance(t *testing.T) { + tempDir := t.TempDir() + tarballPath := filepath.Join(tempDir, "no-prov.tar.gz") + + // Create a minimal tarball + if err := os.WriteFile(tarballPath, []byte("dummy"), 0644); err != nil { + t.Fatal(err) + } + + // Try to verify without .prov file + _, err := VerifyPlugin(tarballPath, testPubFile) + if err == nil { + t.Error("Expected verification to fail without provenance file") + } +} + +func TestVerifyPluginDirectory(t *testing.T) { + // Create a test plugin directory + tempDir := t.TempDir() + pluginDir := filepath.Join(tempDir, "test-plugin") + if err := os.MkdirAll(pluginDir, 0755); err != nil { + t.Fatal(err) + } + + // Create a plugin.yaml file + if err := os.WriteFile(filepath.Join(pluginDir, "plugin.yaml"), []byte(testPluginYAML), 0644); err != nil { + t.Fatal(err) + } + + // Attempt to verify the directory - should fail + _, err := VerifyPlugin(pluginDir, testPubFile) + if err == nil { + t.Error("Expected directory verification to fail, but it succeeded") + } + + expectedError := "directory verification not supported" + if !containsString(err.Error(), expectedError) { + t.Errorf("Expected error to contain %q, got %q", expectedError, err.Error()) + } +} + +func containsString(s, substr string) bool { + return len(s) >= len(substr) && (s == substr || len(s) > len(substr) && + (s[:len(substr)] == substr || s[len(s)-len(substr):] == substr || + strings.Contains(s, substr))) +} diff --git a/pkg/action/package.go b/pkg/action/package.go index e57ce4921..c59efcdb3 100644 --- a/pkg/action/package.go +++ b/pkg/action/package.go @@ -25,6 +25,7 @@ import ( "github.com/Masterminds/semver/v3" "golang.org/x/term" + "sigs.k8s.io/yaml" "helm.sh/helm/v4/pkg/chart/v2/loader" chartutil "helm.sh/helm/v4/pkg/chart/v2/util" @@ -143,7 +144,20 @@ func (p *Package) Clearsign(filename string) error { return err } - sig, err := signer.ClearSign(filename) + // Load the chart archive to extract metadata + chart, err := loader.LoadFile(filename) + if err != nil { + return fmt.Errorf("failed to load chart for signing: %w", err) + } + + // Marshal chart metadata to YAML bytes + metadataBytes, err := yaml.Marshal(chart.Metadata) + if err != nil { + return fmt.Errorf("failed to marshal chart metadata: %w", err) + } + + // Use the generic provenance signing function + sig, err := signer.ClearSign(filename, metadataBytes) if err != nil { return err } diff --git a/pkg/cmd/plugin.go b/pkg/cmd/plugin.go index b03000ad4..393e9672c 100644 --- a/pkg/cmd/plugin.go +++ b/pkg/cmd/plugin.go @@ -38,6 +38,8 @@ func newPluginCmd(out io.Writer) *cobra.Command { newPluginListCmd(out), newPluginUninstallCmd(out), newPluginUpdateCmd(out), + newPluginPackageCmd(out), + newPluginVerifyCmd(out), ) return cmd } diff --git a/pkg/cmd/plugin_install.go b/pkg/cmd/plugin_install.go index 960404a76..0abefa76b 100644 --- a/pkg/cmd/plugin_install.go +++ b/pkg/cmd/plugin_install.go @@ -33,6 +33,9 @@ import ( type pluginInstallOptions struct { source string version string + // signing options + verify bool + keyring string // OCI-specific options certFile string keyFile string @@ -45,6 +48,13 @@ type pluginInstallOptions struct { const pluginInstallDesc = ` This command allows you to install a plugin from a url to a VCS repo or a local path. + +By default, plugin signatures are verified before installation when installing from +tarballs (.tgz or .tar.gz). This requires a corresponding .prov file to be available +alongside the tarball. +For local development, plugins installed from local directories are automatically +treated as "local dev" and do not require signatures. +Use --verify=false to skip signature verification for remote plugins. ` func newPluginInstallCmd(out io.Writer) *cobra.Command { @@ -71,6 +81,8 @@ func newPluginInstallCmd(out io.Writer) *cobra.Command { }, } cmd.Flags().StringVar(&o.version, "version", "", "specify a version constraint. If this is not specified, the latest version is installed") + cmd.Flags().BoolVar(&o.verify, "verify", true, "verify the plugin signature before installing") + cmd.Flags().StringVar(&o.keyring, "keyring", defaultKeyring(), "location of public keys used for verification") // Add OCI-specific flags cmd.Flags().StringVar(&o.certFile, "cert-file", "", "identify registry client using this SSL certificate file") @@ -113,10 +125,51 @@ func (o *pluginInstallOptions) run(out io.Writer) error { if err != nil { return err } - if err := installer.Install(i); err != nil { + + // Determine if we should verify based on installer type and flags + shouldVerify := o.verify + + // Check if this is a local directory installation (for development) + if localInst, ok := i.(*installer.LocalInstaller); ok && !localInst.SupportsVerification() { + // Local directory installations are allowed without verification + shouldVerify = false + fmt.Fprintf(out, "Installing plugin from local directory (development mode)\n") + } else if shouldVerify { + // For remote installations, check if verification is supported + if verifier, ok := i.(installer.Verifier); !ok || !verifier.SupportsVerification() { + return fmt.Errorf("plugin source does not support verification. Use --verify=false to skip verification") + } + } else { + // User explicitly disabled verification + fmt.Fprintf(out, "WARNING: Skipping plugin signature verification\n") + } + + // Set up installation options + opts := installer.Options{ + Verify: shouldVerify, + Keyring: o.keyring, + } + + // If verify is requested, show verification output + if shouldVerify { + fmt.Fprintf(out, "Verifying plugin signature...\n") + } + + // Install the plugin with options + verifyResult, err := installer.InstallWithOptions(i, opts) + if err != nil { return err } + // If verification was successful, show the details + if verifyResult != nil { + for _, signer := range verifyResult.SignedBy { + fmt.Fprintf(out, "Signed by: %s\n", signer) + } + fmt.Fprintf(out, "Using Key With Fingerprint: %s\n", verifyResult.Fingerprint) + fmt.Fprintf(out, "Plugin Hash Verified: %s\n", verifyResult.FileHash) + } + slog.Debug("loading plugin", "path", i.Path()) p, err := plugin.LoadDir(i.Path()) if err != nil { diff --git a/pkg/cmd/plugin_list.go b/pkg/cmd/plugin_list.go index 31a76330d..9b2895441 100644 --- a/pkg/cmd/plugin_list.go +++ b/pkg/cmd/plugin_list.go @@ -46,15 +46,23 @@ func newPluginListCmd(out io.Writer) *cobra.Command { return err } + // Get signing info for all plugins + signingInfo := plugin.GetSigningInfoForPlugins(plugins) + table := uitable.New() - table.AddRow("NAME", "VERSION", "TYPE", "APIVERSION", "SOURCE") + table.AddRow("NAME", "VERSION", "TYPE", "APIVERSION", "PROVENANCE", "SOURCE") for _, p := range plugins { m := p.Metadata() sourceURL := m.SourceURL if sourceURL == "" { sourceURL = "unknown" } - table.AddRow(m.Name, m.Version, m.Type, m.APIVersion, sourceURL) + // Get signing status + signedStatus := "unknown" + if info, ok := signingInfo[m.Name]; ok { + signedStatus = info.Status + } + table.AddRow(m.Name, m.Version, m.Type, m.APIVersion, signedStatus, sourceURL) } fmt.Fprintln(out, table) return nil diff --git a/pkg/cmd/plugin_package.go b/pkg/cmd/plugin_package.go new file mode 100644 index 000000000..5da6c624e --- /dev/null +++ b/pkg/cmd/plugin_package.go @@ -0,0 +1,209 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "syscall" + + "github.com/spf13/cobra" + "golang.org/x/term" + + "helm.sh/helm/v4/internal/plugin" + "helm.sh/helm/v4/pkg/cmd/require" + "helm.sh/helm/v4/pkg/provenance" +) + +const pluginPackageDesc = ` +This command packages a Helm plugin directory into a tarball. + +By default, the command will generate a provenance file signed with a PGP key. +This ensures the plugin can be verified after installation. + +Use --sign=false to skip signing (not recommended for distribution). +` + +type pluginPackageOptions struct { + sign bool + keyring string + key string + passphraseFile string + pluginPath string + destination string +} + +func newPluginPackageCmd(out io.Writer) *cobra.Command { + o := &pluginPackageOptions{} + + cmd := &cobra.Command{ + Use: "package [PATH]", + Short: "package a plugin directory into a plugin archive", + Long: pluginPackageDesc, + Args: require.ExactArgs(1), + RunE: func(_ *cobra.Command, args []string) error { + o.pluginPath = args[0] + return o.run(out) + }, + } + + f := cmd.Flags() + f.BoolVar(&o.sign, "sign", true, "use a PGP private key to sign this plugin") + f.StringVar(&o.key, "key", "", "name of the key to use when signing. Used if --sign is true") + f.StringVar(&o.keyring, "keyring", defaultKeyring(), "location of a public keyring") + f.StringVar(&o.passphraseFile, "passphrase-file", "", "location of a file which contains the passphrase for the signing key. Use \"-\" to read from stdin.") + f.StringVarP(&o.destination, "destination", "d", ".", "location to write the plugin tarball.") + + return cmd +} + +func (o *pluginPackageOptions) run(out io.Writer) error { + // Check if the plugin path exists and is a directory + fi, err := os.Stat(o.pluginPath) + if err != nil { + return err + } + if !fi.IsDir() { + return fmt.Errorf("plugin package only supports directories, not tarballs") + } + + // Load and validate plugin metadata + pluginMeta, err := plugin.LoadDir(o.pluginPath) + if err != nil { + return fmt.Errorf("invalid plugin directory: %w", err) + } + + // Create destination directory if needed + if err := os.MkdirAll(o.destination, 0755); err != nil { + return err + } + + // If signing is requested, prepare the signer first + var signer *provenance.Signatory + if o.sign { + // Load the signing key + signer, err = provenance.NewFromKeyring(o.keyring, o.key) + if err != nil { + return fmt.Errorf("error reading from keyring: %w", err) + } + + // Get passphrase + passphraseFetcher := o.promptUser + if o.passphraseFile != "" { + passphraseFetcher, err = o.passphraseFileFetcher() + if err != nil { + return err + } + } + + // Decrypt the key + if err := signer.DecryptKey(passphraseFetcher); err != nil { + return err + } + } else { + // User explicitly disabled signing + fmt.Fprintf(out, "WARNING: Skipping plugin signing. This is not recommended for plugins intended for distribution.\n") + } + + // Now create the tarball (only after signing prerequisites are met) + // Use plugin metadata for filename: PLUGIN_NAME-SEMVER.tgz + metadata := pluginMeta.Metadata() + filename := fmt.Sprintf("%s-%s.tgz", metadata.Name, metadata.Version) + tarballPath := filepath.Join(o.destination, filename) + + tarFile, err := os.Create(tarballPath) + if err != nil { + return fmt.Errorf("failed to create tarball: %w", err) + } + defer tarFile.Close() + + if err := plugin.CreatePluginTarball(o.pluginPath, metadata.Name, tarFile); err != nil { + os.Remove(tarballPath) + return fmt.Errorf("failed to create plugin tarball: %w", err) + } + tarFile.Close() // Ensure file is closed before signing + + // If signing was requested, sign the tarball + if o.sign { + // Sign the plugin tarball (not the source directory) + sig, err := plugin.SignPlugin(tarballPath, signer) + if err != nil { + os.Remove(tarballPath) + return fmt.Errorf("failed to sign plugin: %w", err) + } + + // Write the signature + provFile := tarballPath + ".prov" + if err := os.WriteFile(provFile, []byte(sig), 0644); err != nil { + os.Remove(tarballPath) + return err + } + + fmt.Fprintf(out, "Successfully signed. Signature written to: %s\n", provFile) + } + + fmt.Fprintf(out, "Successfully packaged plugin and saved it to: %s\n", tarballPath) + + return nil +} + +func (o *pluginPackageOptions) promptUser(name string) ([]byte, error) { + fmt.Printf("Password for key %q > ", name) + pw, err := term.ReadPassword(int(syscall.Stdin)) + fmt.Println() + return pw, err +} + +func (o *pluginPackageOptions) passphraseFileFetcher() (provenance.PassphraseFetcher, error) { + file, err := openPassphraseFile(o.passphraseFile, os.Stdin) + if err != nil { + return nil, err + } + defer file.Close() + + // Read the entire passphrase + passphrase, err := io.ReadAll(file) + if err != nil { + return nil, err + } + + // Trim any trailing newline characters (both \n and \r\n) + passphrase = bytes.TrimRight(passphrase, "\r\n") + + return func(_ string) ([]byte, error) { + return passphrase, nil + }, nil +} + +// copied from action.openPassphraseFile +// TODO: should we move this to pkg/action so we can reuse the func from there? +func openPassphraseFile(passphraseFile string, stdin *os.File) (*os.File, error) { + if passphraseFile == "-" { + stat, err := stdin.Stat() + if err != nil { + return nil, err + } + if (stat.Mode() & os.ModeNamedPipe) == 0 { + return nil, errors.New("specified reading passphrase from stdin, without input on stdin") + } + return stdin, nil + } + return os.Open(passphraseFile) +} diff --git a/pkg/cmd/plugin_package_test.go b/pkg/cmd/plugin_package_test.go new file mode 100644 index 000000000..df6cdd849 --- /dev/null +++ b/pkg/cmd/plugin_package_test.go @@ -0,0 +1,170 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "bytes" + "os" + "path/filepath" + "strings" + "testing" +) + +// Common plugin.yaml content for v1 format tests +const testPluginYAML = `apiVersion: v1 +name: test-plugin +version: 1.0.0 +type: cli/v1 +runtime: subprocess +config: + usage: test-plugin [flags] + shortHelp: A test plugin + longHelp: A test plugin for testing purposes +runtimeConfig: + platformCommands: + - os: linux + command: echo + args: ["test"]` + +func TestPluginPackageWithoutSigning(t *testing.T) { + // Create a test plugin directory + tempDir := t.TempDir() + pluginDir := filepath.Join(tempDir, "test-plugin") + if err := os.MkdirAll(pluginDir, 0755); err != nil { + t.Fatal(err) + } + + // Create a plugin.yaml file + if err := os.WriteFile(filepath.Join(pluginDir, "plugin.yaml"), []byte(testPluginYAML), 0644); err != nil { + t.Fatal(err) + } + + // Create package options with sign=false + o := &pluginPackageOptions{ + sign: false, // Explicitly disable signing + pluginPath: pluginDir, + destination: tempDir, + } + + // Run the package command + out := &bytes.Buffer{} + err := o.run(out) + + // Should succeed without error + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + // Check that tarball was created with plugin name and version + tarballPath := filepath.Join(tempDir, "test-plugin-1.0.0.tgz") + if _, err := os.Stat(tarballPath); os.IsNotExist(err) { + t.Error("tarball should exist when sign=false") + } + + // Check that no .prov file was created + provPath := tarballPath + ".prov" + if _, err := os.Stat(provPath); !os.IsNotExist(err) { + t.Error("provenance file should not exist when sign=false") + } + + // Output should contain warning about skipping signing + output := out.String() + if !strings.Contains(output, "WARNING: Skipping plugin signing") { + t.Error("should print warning when signing is skipped") + } + if !strings.Contains(output, "Successfully packaged") { + t.Error("should print success message") + } +} + +func TestPluginPackageDefaultRequiresSigning(t *testing.T) { + // Create a test plugin directory + tempDir := t.TempDir() + pluginDir := filepath.Join(tempDir, "test-plugin") + if err := os.MkdirAll(pluginDir, 0755); err != nil { + t.Fatal(err) + } + + // Create a plugin.yaml file + if err := os.WriteFile(filepath.Join(pluginDir, "plugin.yaml"), []byte(testPluginYAML), 0644); err != nil { + t.Fatal(err) + } + + // Create package options with default sign=true and invalid keyring + o := &pluginPackageOptions{ + sign: true, // This is now the default + keyring: "/non/existent/keyring", + pluginPath: pluginDir, + destination: tempDir, + } + + // Run the package command + out := &bytes.Buffer{} + err := o.run(out) + + // Should fail because signing is required by default + if err == nil { + t.Error("expected error when signing fails with default settings") + } + + // Check that no tarball was created + tarballPath := filepath.Join(tempDir, "test-plugin.tgz") + if _, err := os.Stat(tarballPath); !os.IsNotExist(err) { + t.Error("tarball should not exist when signing fails") + } +} + +func TestPluginPackageSigningFailure(t *testing.T) { + // Create a test plugin directory + tempDir := t.TempDir() + pluginDir := filepath.Join(tempDir, "test-plugin") + if err := os.MkdirAll(pluginDir, 0755); err != nil { + t.Fatal(err) + } + + // Create a plugin.yaml file + if err := os.WriteFile(filepath.Join(pluginDir, "plugin.yaml"), []byte(testPluginYAML), 0644); err != nil { + t.Fatal(err) + } + + // Create package options with sign flag but invalid keyring + o := &pluginPackageOptions{ + sign: true, + keyring: "/non/existent/keyring", // This will cause signing to fail + pluginPath: pluginDir, + destination: tempDir, + } + + // Run the package command + out := &bytes.Buffer{} + err := o.run(out) + + // Should get an error + if err == nil { + t.Error("expected error when signing fails, got nil") + } + + // Check that no tarball was created + tarballPath := filepath.Join(tempDir, "test-plugin.tgz") + if _, err := os.Stat(tarballPath); !os.IsNotExist(err) { + t.Error("tarball should not exist when signing fails") + } + + // Output should not contain success message + if bytes.Contains(out.Bytes(), []byte("Successfully packaged")) { + t.Error("should not print success message when signing fails") + } +} diff --git a/pkg/cmd/plugin_verify.go b/pkg/cmd/plugin_verify.go new file mode 100644 index 000000000..4772fcc33 --- /dev/null +++ b/pkg/cmd/plugin_verify.go @@ -0,0 +1,88 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "fmt" + "io" + + "github.com/spf13/cobra" + + "helm.sh/helm/v4/internal/plugin" + "helm.sh/helm/v4/pkg/cmd/require" +) + +const pluginVerifyDesc = ` +This command verifies that a Helm plugin has a valid provenance file, +and that the provenance file is signed by a trusted PGP key. + +It supports both: +- Plugin tarballs (.tgz or .tar.gz files) +- Installed plugin directories + +For installed plugins, use the path shown by 'helm env HELM_PLUGINS' followed +by the plugin name. For example: + helm plugin verify ~/.local/share/helm/plugins/example-cli + +To generate a signed plugin, use the 'helm plugin package --sign' command. +` + +type pluginVerifyOptions struct { + keyring string + pluginPath string +} + +func newPluginVerifyCmd(out io.Writer) *cobra.Command { + o := &pluginVerifyOptions{} + + cmd := &cobra.Command{ + Use: "verify [PATH]", + Short: "verify that a plugin at the given path has been signed and is valid", + Long: pluginVerifyDesc, + Args: require.ExactArgs(1), + RunE: func(_ *cobra.Command, args []string) error { + o.pluginPath = args[0] + return o.run(out) + }, + } + + cmd.Flags().StringVar(&o.keyring, "keyring", defaultKeyring(), "keyring containing public keys") + + return cmd +} + +func (o *pluginVerifyOptions) run(out io.Writer) error { + // Verify the plugin + verification, err := plugin.VerifyPlugin(o.pluginPath, o.keyring) + if err != nil { + return err + } + + // Output verification details + for name := range verification.SignedBy.Identities { + fmt.Fprintf(out, "Signed by: %v\n", name) + } + fmt.Fprintf(out, "Using Key With Fingerprint: %X\n", verification.SignedBy.PrimaryKey.Fingerprint) + + // Only show hash for tarballs + if verification.FileHash != "" { + fmt.Fprintf(out, "Plugin Hash Verified: %s\n", verification.FileHash) + } else { + fmt.Fprintf(out, "Plugin Metadata Verified: %s\n", verification.FileName) + } + + return nil +} diff --git a/pkg/cmd/plugin_verify_test.go b/pkg/cmd/plugin_verify_test.go new file mode 100644 index 000000000..e631814dd --- /dev/null +++ b/pkg/cmd/plugin_verify_test.go @@ -0,0 +1,264 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "bytes" + "crypto/sha256" + "fmt" + "os" + "path/filepath" + "strings" + "testing" + + "helm.sh/helm/v4/internal/plugin" + "helm.sh/helm/v4/internal/test/ensure" +) + +func TestPluginVerifyCmd_NoArgs(t *testing.T) { + ensure.HelmHome(t) + + out := &bytes.Buffer{} + cmd := newPluginVerifyCmd(out) + cmd.SetArgs([]string{}) + + err := cmd.Execute() + if err == nil { + t.Error("expected error when no arguments provided") + } + if !strings.Contains(err.Error(), "requires 1 argument") { + t.Errorf("expected 'requires 1 argument' error, got: %v", err) + } +} + +func TestPluginVerifyCmd_TooManyArgs(t *testing.T) { + ensure.HelmHome(t) + + out := &bytes.Buffer{} + cmd := newPluginVerifyCmd(out) + cmd.SetArgs([]string{"plugin1", "plugin2"}) + + err := cmd.Execute() + if err == nil { + t.Error("expected error when too many arguments provided") + } + if !strings.Contains(err.Error(), "requires 1 argument") { + t.Errorf("expected 'requires 1 argument' error, got: %v", err) + } +} + +func TestPluginVerifyCmd_NonexistentFile(t *testing.T) { + ensure.HelmHome(t) + + out := &bytes.Buffer{} + cmd := newPluginVerifyCmd(out) + cmd.SetArgs([]string{"/nonexistent/plugin.tgz"}) + + err := cmd.Execute() + if err == nil { + t.Error("expected error when plugin file doesn't exist") + } +} + +func TestPluginVerifyCmd_MissingProvenance(t *testing.T) { + ensure.HelmHome(t) + + // Create a plugin tarball without .prov file + pluginTgz := createTestPluginTarball(t) + defer os.Remove(pluginTgz) + + out := &bytes.Buffer{} + cmd := newPluginVerifyCmd(out) + cmd.SetArgs([]string{pluginTgz}) + + err := cmd.Execute() + if err == nil { + t.Error("expected error when .prov file is missing") + } + if !strings.Contains(err.Error(), "could not find provenance file") { + t.Errorf("expected 'could not find provenance file' error, got: %v", err) + } +} + +func TestPluginVerifyCmd_InvalidProvenance(t *testing.T) { + ensure.HelmHome(t) + + // Create a plugin tarball with invalid .prov file + pluginTgz := createTestPluginTarball(t) + defer os.Remove(pluginTgz) + + // Create invalid .prov file + provFile := pluginTgz + ".prov" + if err := os.WriteFile(provFile, []byte("invalid provenance"), 0644); err != nil { + t.Fatal(err) + } + defer os.Remove(provFile) + + out := &bytes.Buffer{} + cmd := newPluginVerifyCmd(out) + cmd.SetArgs([]string{pluginTgz}) + + err := cmd.Execute() + if err == nil { + t.Error("expected error when .prov file is invalid") + } +} + +func TestPluginVerifyCmd_DirectoryNotSupported(t *testing.T) { + ensure.HelmHome(t) + + // Create a plugin directory + pluginDir := createTestPluginDir(t) + + out := &bytes.Buffer{} + cmd := newPluginVerifyCmd(out) + cmd.SetArgs([]string{pluginDir}) + + err := cmd.Execute() + if err == nil { + t.Error("expected error when verifying directory") + } + if !strings.Contains(err.Error(), "directory verification not supported") { + t.Errorf("expected 'directory verification not supported' error, got: %v", err) + } +} + +func TestPluginVerifyCmd_KeyringFlag(t *testing.T) { + ensure.HelmHome(t) + + // Create a plugin tarball with .prov file + pluginTgz := createTestPluginTarball(t) + defer os.Remove(pluginTgz) + + // Create .prov file + provFile := pluginTgz + ".prov" + createProvFile(t, provFile, pluginTgz, "") + defer os.Remove(provFile) + + // Create empty keyring file + keyring := createTestKeyring(t) + defer os.Remove(keyring) + + out := &bytes.Buffer{} + cmd := newPluginVerifyCmd(out) + cmd.SetArgs([]string{"--keyring", keyring, pluginTgz}) + + // Should fail with keyring error but command parsing should work + err := cmd.Execute() + if err == nil { + t.Error("expected error with empty keyring") + } + // The important thing is that the keyring flag was parsed and used +} + +func TestPluginVerifyOptions_Run_Success(t *testing.T) { + // Skip this test as it would require real PGP keys and valid signatures + // The core verification logic is thoroughly tested in internal/plugin/verify_test.go + t.Skip("Success case requires real PGP keys - core logic tested in internal/plugin/verify_test.go") +} + +// Helper functions for test setup + +func createTestPluginDir(t *testing.T) string { + t.Helper() + + // Create temporary directory with plugin structure + tmpDir := t.TempDir() + pluginDir := filepath.Join(tmpDir, "test-plugin") + if err := os.MkdirAll(pluginDir, 0755); err != nil { + t.Fatalf("Failed to create plugin directory: %v", err) + } + + // Use the same plugin YAML as other cmd tests + if err := os.WriteFile(filepath.Join(pluginDir, "plugin.yaml"), []byte(testPluginYAML), 0644); err != nil { + t.Fatalf("Failed to create plugin.yaml: %v", err) + } + + return pluginDir +} + +func createTestPluginTarball(t *testing.T) string { + t.Helper() + + pluginDir := createTestPluginDir(t) + + // Create tarball using the plugin package helper + tmpDir := filepath.Dir(pluginDir) + tgzPath := filepath.Join(tmpDir, "test-plugin-1.0.0.tgz") + tarFile, err := os.Create(tgzPath) + if err != nil { + t.Fatalf("Failed to create tarball file: %v", err) + } + defer tarFile.Close() + + if err := plugin.CreatePluginTarball(pluginDir, "test-plugin", tarFile); err != nil { + t.Fatalf("Failed to create tarball: %v", err) + } + + return tgzPath +} + +func createProvFile(t *testing.T, provFile, pluginTgz, hash string) { + t.Helper() + + var hashStr string + if hash == "" { + // Calculate actual hash of the tarball + data, err := os.ReadFile(pluginTgz) + if err != nil { + t.Fatalf("Failed to read tarball for hashing: %v", err) + } + hashSum := sha256.Sum256(data) + hashStr = fmt.Sprintf("sha256:%x", hashSum) + } else { + // Use provided hash + hashStr = hash + } + + // Create properly formatted provenance file with specified hash + provContent := fmt.Sprintf(`-----BEGIN PGP SIGNED MESSAGE----- +Hash: SHA256 + +name: test-plugin +version: 1.0.0 +description: Test plugin for verification +files: + test-plugin-1.0.0.tgz: %s +-----BEGIN PGP SIGNATURE----- +Version: GnuPG v1 + +iQEcBAEBCAAGBQJktest... +-----END PGP SIGNATURE----- +`, hashStr) + if err := os.WriteFile(provFile, []byte(provContent), 0644); err != nil { + t.Fatalf("Failed to create provenance file: %v", err) + } +} + +func createTestKeyring(t *testing.T) string { + t.Helper() + + // Create a temporary keyring file + tmpDir := t.TempDir() + keyringPath := filepath.Join(tmpDir, "pubring.gpg") + + // Create empty keyring for testing + if err := os.WriteFile(keyringPath, []byte{}, 0644); err != nil { + t.Fatalf("Failed to create test keyring: %v", err) + } + + return keyringPath +} diff --git a/pkg/getter/ocigetter.go b/pkg/getter/ocigetter.go index 121e000c8..24fc60c56 100644 --- a/pkg/getter/ocigetter.go +++ b/pkg/getter/ocigetter.go @@ -175,6 +175,12 @@ func (g *OCIGetter) newRegistryClient() (*registry.Client, error) { // getPlugin handles plugin-specific OCI pulls func (g *OCIGetter) getPlugin(client *registry.Client, ref string) (*bytes.Buffer, error) { + // Check if this is a provenance file request + requestingProv := strings.HasSuffix(ref, ".prov") + if requestingProv { + ref = strings.TrimSuffix(ref, ".prov") + } + // Extract plugin name from the reference // e.g., "ghcr.io/user/plugin-name:v1.0.0" -> "plugin-name" parts := strings.Split(ref, "/") @@ -190,10 +196,18 @@ func (g *OCIGetter) getPlugin(client *registry.Client, ref string) (*bytes.Buffe pluginName = lastPart[:idx] } - result, err := client.PullPlugin(ref, pluginName) + var pullOpts []registry.PluginPullOption + if requestingProv { + pullOpts = append(pullOpts, registry.PullPluginOptWithProv(true)) + } + + result, err := client.PullPlugin(ref, pluginName, pullOpts...) if err != nil { return nil, err } + if requestingProv { + return bytes.NewBuffer(result.Prov.Data), nil + } return bytes.NewBuffer(result.PluginData), nil } diff --git a/pkg/provenance/doc.go b/pkg/provenance/doc.go index 883c0e724..dd14568d9 100644 --- a/pkg/provenance/doc.go +++ b/pkg/provenance/doc.go @@ -14,15 +14,15 @@ limitations under the License. */ /* -Package provenance provides tools for establishing the authenticity of a chart. +Package provenance provides tools for establishing the authenticity of packages. In Helm, provenance is established via several factors. The primary factor is the -cryptographic signature of a chart. Chart authors may sign charts, which in turn -provide the necessary metadata to ensure the integrity of the chart file, the -Chart.yaml, and the referenced Docker images. +cryptographic signature of a package. Package authors may sign packages, which in turn +provide the necessary metadata to ensure the integrity of the package file, the +metadata, and the referenced Docker images. A provenance file is clear-signed. This provides cryptographic verification that -a particular block of information (Chart.yaml, archive file, images) have not +a particular block of information (metadata, archive file, images) have not been tampered with or altered. To learn more, read the GnuPG documentation on clear signatures: https://www.gnupg.org/gph/en/manual/x135.html diff --git a/pkg/provenance/sign.go b/pkg/provenance/sign.go index 504bc6aa1..103c81fbb 100644 --- a/pkg/provenance/sign.go +++ b/pkg/provenance/sign.go @@ -30,9 +30,6 @@ import ( "golang.org/x/crypto/openpgp/clearsign" //nolint "golang.org/x/crypto/openpgp/packet" //nolint "sigs.k8s.io/yaml" - - hapi "helm.sh/helm/v4/pkg/chart/v2" - "helm.sh/helm/v4/pkg/chart/v2/loader" ) var defaultPGPConfig = packet.Config{ @@ -58,7 +55,7 @@ type SumCollection struct { // Verification contains information about a verification operation. type Verification struct { - // SignedBy contains the entity that signed a chart. + // SignedBy contains the entity that signed a package. SignedBy *openpgp.Entity // FileHash is the hash, prepended with the scheme, for the file that was verified. FileHash string @@ -68,11 +65,11 @@ type Verification struct { // Signatory signs things. // -// Signatories can be constructed from a PGP private key file using NewFromFiles +// Signatories can be constructed from a PGP private key file using NewFromFiles, // or they can be constructed manually by setting the Entity to a valid // PGP entity. // -// The same Signatory can be used to sign or validate multiple charts. +// The same Signatory can be used to sign or validate multiple packages. type Signatory struct { // The signatory for this instance of Helm. This is used for signing. Entity *openpgp.Entity @@ -197,20 +194,21 @@ func (s *Signatory) DecryptKey(fn PassphraseFetcher) error { return s.Entity.PrivateKey.Decrypt(p) } -// ClearSign signs a chart with the given key. +// ClearSign signs a package with the given key and pre-marshalled metadata. // -// This takes the path to a chart archive file and a key, and it returns a clear signature. +// This takes the path to a package archive file, a key, and marshalled metadata bytes. +// This allows both charts and plugins to use the same signing infrastructure. // // The Signatory must have a valid Entity.PrivateKey for this to work. If it does // not, an error will be returned. -func (s *Signatory) ClearSign(chartpath string) (string, error) { +func (s *Signatory) ClearSign(packagePath string, metadataBytes []byte) (string, error) { if s.Entity == nil { return "", errors.New("private key not found") } else if s.Entity.PrivateKey == nil { return "", errors.New("provided key is not a private key. Try providing a keyring with secret keys") } - if fi, err := os.Stat(chartpath); err != nil { + if fi, err := os.Stat(packagePath); err != nil { return "", err } else if fi.IsDir() { return "", errors.New("cannot sign a directory") @@ -218,7 +216,7 @@ func (s *Signatory) ClearSign(chartpath string) (string, error) { out := bytes.NewBuffer(nil) - b, err := messageBlock(chartpath) + b, err := messageBlock(packagePath, metadataBytes) if err != nil { return "", err } @@ -248,10 +246,10 @@ func (s *Signatory) ClearSign(chartpath string) (string, error) { return out.String(), nil } -// Verify checks a signature and verifies that it is legit for a chart. -func (s *Signatory) Verify(chartpath, sigpath string) (*Verification, error) { +// Verify checks a signature and verifies that it is legit for a package. +func (s *Signatory) Verify(packagePath, sigpath string) (*Verification, error) { ver := &Verification{} - for _, fname := range []string{chartpath, sigpath} { + for _, fname := range []string{packagePath, sigpath} { if fi, err := os.Stat(fname); err != nil { return ver, err } else if fi.IsDir() { @@ -272,17 +270,17 @@ func (s *Signatory) Verify(chartpath, sigpath string) (*Verification, error) { ver.SignedBy = by // Second, verify the hash of the tarball. - sum, err := DigestFile(chartpath) + sum, err := DigestFile(packagePath) if err != nil { return ver, err } - _, sums, err := parseMessageBlock(sig.Plaintext) + sums, err := parseMessageBlock(sig.Plaintext) if err != nil { return ver, err } sum = "sha256:" + sum - basename := filepath.Base(chartpath) + basename := filepath.Base(packagePath) if sha, ok := sums.Files[basename]; !ok { return ver, fmt.Errorf("provenance does not contain a SHA for a file named %q", basename) } else if sha != sum { @@ -320,64 +318,64 @@ func (s *Signatory) verifySignature(block *clearsign.Block) (*openpgp.Entity, er ) } -func messageBlock(chartpath string) (*bytes.Buffer, error) { - var b *bytes.Buffer +// messageBlock creates a message block from a package path and pre-marshalled metadata +func messageBlock(packagePath string, metadataBytes []byte) (*bytes.Buffer, error) { // Checksum the archive - chash, err := DigestFile(chartpath) + chash, err := DigestFile(packagePath) if err != nil { - return b, err + return nil, err } - base := filepath.Base(chartpath) + base := filepath.Base(packagePath) sums := &SumCollection{ Files: map[string]string{ base: "sha256:" + chash, }, } - // Load the archive into memory. - chart, err := loader.LoadFile(chartpath) - if err != nil { - return b, err - } - - // Buffer a hash + checksums YAML file - data, err := yaml.Marshal(chart.Metadata) - if err != nil { - return b, err - } - + // Buffer the metadata + checksums YAML file // FIXME: YAML uses ---\n as a file start indicator, but this is not legal in a PGP // clearsign block. So we use ...\n, which is the YAML document end marker. // http://yaml.org/spec/1.2/spec.html#id2800168 - b = bytes.NewBuffer(data) + b := bytes.NewBuffer(metadataBytes) b.WriteString("\n...\n") - data, err = yaml.Marshal(sums) + data, err := yaml.Marshal(sums) if err != nil { - return b, err + return nil, err } b.Write(data) return b, nil } -// parseMessageBlock -func parseMessageBlock(data []byte) (*hapi.Metadata, *SumCollection, error) { - // This sucks. +// parseMessageBlock parses a message block and returns only checksums (metadata ignored like upstream) +func parseMessageBlock(data []byte) (*SumCollection, error) { + sc := &SumCollection{} + + // We ignore metadata, just like upstream - only need checksums for verification + if err := ParseMessageBlock(data, nil, sc); err != nil { + return sc, err + } + return sc, nil +} + +// ParseMessageBlock parses a message block containing metadata and checksums. +// +// This is the generic version that can work with any metadata type. +// The metadata parameter should be a pointer to a struct that can be unmarshaled from YAML. +func ParseMessageBlock(data []byte, metadata interface{}, sums *SumCollection) error { parts := bytes.Split(data, []byte("\n...\n")) if len(parts) < 2 { - return nil, nil, errors.New("message block must have at least two parts") + return errors.New("message block must have at least two parts") } - md := &hapi.Metadata{} - sc := &SumCollection{} - - if err := yaml.Unmarshal(parts[0], md); err != nil { - return md, sc, err + if metadata != nil { + if err := yaml.Unmarshal(parts[0], metadata); err != nil { + return err + } } - err := yaml.Unmarshal(parts[1], sc) - return md, sc, err + return yaml.Unmarshal(parts[1], sums) } // loadKey loads a GPG key found at a particular path. @@ -406,7 +404,7 @@ func loadKeyRing(ringpath string) (openpgp.EntityList, error) { // It takes the path to the archive file, and returns a string representation of // the SHA256 sum. // -// The intended use of this function is to generate a sum of a chart TGZ file. +// This function can be used to generate a sum of any package archive file. func DigestFile(filename string) (string, error) { f, err := os.Open(filename) if err != nil { diff --git a/pkg/provenance/sign_test.go b/pkg/provenance/sign_test.go index 9a60fd19c..4594fac01 100644 --- a/pkg/provenance/sign_test.go +++ b/pkg/provenance/sign_test.go @@ -25,6 +25,9 @@ import ( "testing" pgperrors "golang.org/x/crypto/openpgp/errors" //nolint + "sigs.k8s.io/yaml" + + "helm.sh/helm/v4/pkg/chart/v2/loader" ) const ( @@ -75,8 +78,27 @@ files: hashtest-1.2.3.tgz: sha256:c6841b3a895f1444a6738b5d04564a57e860ce42f8519c3be807fb6d9bee7888 ` +// loadChartMetadataForSigning is a test helper that loads chart metadata and marshals it to YAML bytes +func loadChartMetadataForSigning(t *testing.T, chartPath string) []byte { + t.Helper() + + chart, err := loader.LoadFile(chartPath) + if err != nil { + t.Fatal(err) + } + + metadataBytes, err := yaml.Marshal(chart.Metadata) + if err != nil { + t.Fatal(err) + } + + return metadataBytes +} + func TestMessageBlock(t *testing.T) { - out, err := messageBlock(testChartfile) + metadataBytes := loadChartMetadataForSigning(t, testChartfile) + + out, err := messageBlock(testChartfile, metadataBytes) if err != nil { t.Fatal(err) } @@ -88,14 +110,12 @@ func TestMessageBlock(t *testing.T) { } func TestParseMessageBlock(t *testing.T) { - md, sc, err := parseMessageBlock([]byte(testMessageBlock)) + sc, err := parseMessageBlock([]byte(testMessageBlock)) if err != nil { t.Fatal(err) } - if md.Name != "hashtest" { - t.Errorf("Expected name %q, got %q", "hashtest", md.Name) - } + // parseMessageBlock only returns checksums, not metadata (like upstream) if lsc := len(sc.Files); lsc != 1 { t.Errorf("Expected 1 file, got %d", lsc) @@ -221,7 +241,9 @@ func TestClearSign(t *testing.T) { t.Fatal(err) } - sig, err := signer.ClearSign(testChartfile) + metadataBytes := loadChartMetadataForSigning(t, testChartfile) + + sig, err := signer.ClearSign(testChartfile, metadataBytes) if err != nil { t.Fatal(err) } @@ -252,7 +274,9 @@ func TestClearSignError(t *testing.T) { // ensure that signing always fails signer.Entity.PrivateKey.PrivateKey = failSigner{} - sig, err := signer.ClearSign(testChartfile) + metadataBytes := loadChartMetadataForSigning(t, testChartfile) + + sig, err := signer.ClearSign(testChartfile, metadataBytes) if err == nil { t.Fatal("didn't get an error from ClearSign but expected one") } @@ -271,7 +295,9 @@ func TestDecodeSignature(t *testing.T) { t.Fatal(err) } - sig, err := signer.ClearSign(testChartfile) + metadataBytes := loadChartMetadataForSigning(t, testChartfile) + + sig, err := signer.ClearSign(testChartfile, metadataBytes) if err != nil { t.Fatal(err) } diff --git a/pkg/registry/plugin.go b/pkg/registry/plugin.go index 5d22a99ee..991bace76 100644 --- a/pkg/registry/plugin.go +++ b/pkg/registry/plugin.go @@ -38,11 +38,13 @@ type PluginPullOptions struct { // PluginPullResult contains the result of a plugin pull operation type PluginPullResult struct { - Manifest ocispec.Descriptor - PluginData []byte - ProvenanceData []byte // Optional provenance data - Ref string - PluginName string + Manifest ocispec.Descriptor + PluginData []byte + Prov struct { + Data []byte + } + Ref string + PluginName string } // PullPlugin downloads a plugin from an OCI registry using artifact type @@ -96,30 +98,31 @@ func (c *Client) processPluginPull(genericResult *GenericPullResult, pluginName return nil, fmt.Errorf("expected config media type %s for legacy compatibility, got %s", PluginArtifactType, manifest.Config.MediaType) } - // Find the required plugin tarball and optional provenance - expectedTarball := pluginName + ".tgz" - expectedProvenance := pluginName + ".tgz.prov" - + // Find the plugin tarball and optional provenance using NAME-VERSION.tgz format var pluginDescriptor *ocispec.Descriptor var provenanceDescriptor *ocispec.Descriptor + var foundProvenanceName string // Look for layers with the expected titles/annotations for _, layer := range manifest.Layers { d := layer - // Check for title annotation (preferred method) + // Check for title annotation if title, exists := d.Annotations[ocispec.AnnotationTitle]; exists { - switch title { - case expectedTarball: + // Check if this looks like a plugin tarball: {pluginName}-{version}.tgz + if pluginDescriptor == nil && strings.HasPrefix(title, pluginName+"-") && strings.HasSuffix(title, ".tgz") { pluginDescriptor = &d - case expectedProvenance: + } + // Check if this looks like a plugin provenance: {pluginName}-{version}.tgz.prov + if provenanceDescriptor == nil && strings.HasPrefix(title, pluginName+"-") && strings.HasSuffix(title, ".tgz.prov") { provenanceDescriptor = &d + foundProvenanceName = title } } } // Plugin tarball is required if pluginDescriptor == nil { - return nil, fmt.Errorf("required layer %s not found in manifest", expectedTarball) + return nil, fmt.Errorf("required layer matching pattern %s-VERSION.tgz not found in manifest", pluginName) } // Build plugin-specific result @@ -138,7 +141,7 @@ func (c *Client) processPluginPull(genericResult *GenericPullResult, pluginName // Fetch provenance data if available if provenanceDescriptor != nil { - result.ProvenanceData, err = genericClient.GetDescriptorData(genericResult.MemoryStore, *provenanceDescriptor) + result.Prov.Data, err = genericClient.GetDescriptorData(genericResult.MemoryStore, *provenanceDescriptor) if err != nil { return nil, fmt.Errorf("unable to retrieve provenance data with digest %s: %w", provenanceDescriptor.Digest, err) } @@ -146,8 +149,8 @@ func (c *Client) processPluginPull(genericResult *GenericPullResult, pluginName fmt.Fprintf(c.out, "Pulled plugin: %s\n", result.Ref) fmt.Fprintf(c.out, "Digest: %s\n", result.Manifest.Digest) - if result.ProvenanceData != nil { - fmt.Fprintf(c.out, "Provenance: %s\n", expectedProvenance) + if result.Prov.Data != nil { + fmt.Fprintf(c.out, "Provenance: %s\n", foundProvenanceName) } if strings.Contains(result.Ref, "_") { @@ -162,6 +165,7 @@ func (c *Client) processPluginPull(genericResult *GenericPullResult, pluginName type ( pluginPullOperation struct { pluginName string + withProv bool } // PluginPullOption allows customizing plugin pull operations @@ -199,3 +203,10 @@ func GetPluginName(source string) (string, error) { return pluginName, nil } + +// PullPluginOptWithProv configures the pull to fetch provenance data +func PullPluginOptWithProv(withProv bool) PluginPullOption { + return func(operation *pluginPullOperation) { + operation.withProv = withProv + } +} From e814ff3c38043a092b559ac449ef6286e8fb0790 Mon Sep 17 00:00:00 2001 From: Scott Rigby Date: Tue, 26 Aug 2025 23:19:54 -0400 Subject: [PATCH 71/88] Remove unnecessary file i/o operations from signing and verifying Signed-off-by: Scott Rigby --- internal/plugin/installer/http_installer.go | 95 +++++++--------- internal/plugin/installer/installer.go | 31 ++--- internal/plugin/installer/local_installer.go | 47 +++++--- internal/plugin/installer/oci_installer.go | 114 +++++++++---------- internal/plugin/sign.go | 28 ++--- internal/plugin/sign_test.go | 8 +- internal/plugin/verify.go | 43 +------ internal/plugin/verify_test.go | 79 +++++++------ pkg/action/package.go | 9 +- pkg/cmd/plugin_package.go | 11 +- pkg/cmd/plugin_verify.go | 39 ++++++- pkg/downloader/chart_downloader.go | 13 ++- pkg/provenance/sign.go | 81 ++++--------- pkg/provenance/sign_test.go | 75 ++++++------ 14 files changed, 332 insertions(+), 341 deletions(-) diff --git a/internal/plugin/installer/http_installer.go b/internal/plugin/installer/http_installer.go index a4687d8c9..bb96314f4 100644 --- a/internal/plugin/installer/http_installer.go +++ b/internal/plugin/installer/http_installer.go @@ -38,8 +38,9 @@ type HTTPInstaller struct { base extractor Extractor getter getter.Getter - // Provenance data to save after installation - provData []byte + // Cached data to avoid duplicate downloads + pluginData []byte + provData []byte } // NewHTTPInstaller creates a new HttpInstaller. @@ -74,15 +75,18 @@ func NewHTTPInstaller(source string) (*HTTPInstaller, error) { // // Implements Installer. func (i *HTTPInstaller) Install() error { - pluginData, err := i.getter.Get(i.Source) - if err != nil { - return err + // Ensure plugin data is cached + if i.pluginData == nil { + pluginData, err := i.getter.Get(i.Source) + if err != nil { + return err + } + i.pluginData = pluginData.Bytes() } // Save the original tarball to plugins directory for verification // Extract metadata to get the actual plugin name and version - pluginBytes := pluginData.Bytes() - metadata, err := plugin.ExtractPluginMetadataFromReader(bytes.NewReader(pluginBytes)) + metadata, err := plugin.ExtractTgzPluginMetadata(bytes.NewReader(i.pluginData)) if err != nil { return fmt.Errorf("failed to extract plugin metadata from tarball: %w", err) } @@ -91,20 +95,28 @@ func (i *HTTPInstaller) Install() error { if err := os.MkdirAll(filepath.Dir(tarballPath), 0755); err != nil { return fmt.Errorf("failed to create plugins directory: %w", err) } - if err := os.WriteFile(tarballPath, pluginBytes, 0644); err != nil { + if err := os.WriteFile(tarballPath, i.pluginData, 0644); err != nil { return fmt.Errorf("failed to save tarball: %w", err) } - // Try to download .prov file if it exists - provURL := i.Source + ".prov" - if provData, err := i.getter.Get(provURL); err == nil { + // Ensure prov data is cached if available + if i.provData == nil { + // Try to download .prov file if it exists + provURL := i.Source + ".prov" + if provData, err := i.getter.Get(provURL); err == nil { + i.provData = provData.Bytes() + } + } + + // Save prov file if we have the data + if i.provData != nil { provPath := tarballPath + ".prov" - if err := os.WriteFile(provPath, provData.Bytes(), 0644); err != nil { + if err := os.WriteFile(provPath, i.provData, 0644); err != nil { slog.Debug("failed to save provenance file", "error", err) } } - if err := i.extractor.Extract(pluginData, i.CacheDir); err != nil { + if err := i.extractor.Extract(bytes.NewBuffer(i.pluginData), i.CacheDir); err != nil { return fmt.Errorf("extracting files from archive: %w", err) } @@ -148,51 +160,32 @@ func (i *HTTPInstaller) SupportsVerification() bool { return strings.HasSuffix(i.Source, ".tgz") || strings.HasSuffix(i.Source, ".tar.gz") } -// PrepareForVerification downloads the plugin and signature files for verification -func (i *HTTPInstaller) PrepareForVerification() (string, func(), error) { +// GetVerificationData returns cached plugin and provenance data for verification +func (i *HTTPInstaller) GetVerificationData() (archiveData, provData []byte, filename string, err error) { if !i.SupportsVerification() { - return "", nil, fmt.Errorf("verification not supported for this source") - } - - // Create temporary directory for downloads - tempDir, err := os.MkdirTemp("", "helm-plugin-verify-*") - if err != nil { - return "", nil, fmt.Errorf("failed to create temp directory: %w", err) + return nil, nil, "", fmt.Errorf("verification not supported for this source") } - cleanup := func() { - os.RemoveAll(tempDir) - } - - // Download plugin tarball - pluginFile := filepath.Join(tempDir, filepath.Base(i.Source)) - - g, err := getter.All(new(cli.EnvSettings)).ByScheme("http") - if err != nil { - cleanup() - return "", nil, err - } - - data, err := g.Get(i.Source, getter.WithURL(i.Source)) - if err != nil { - cleanup() - return "", nil, fmt.Errorf("failed to download plugin: %w", err) - } - - if err := os.WriteFile(pluginFile, data.Bytes(), 0644); err != nil { - cleanup() - return "", nil, fmt.Errorf("failed to write plugin file: %w", err) + // Download plugin data once and cache it + if i.pluginData == nil { + data, err := i.getter.Get(i.Source) + if err != nil { + return nil, nil, "", fmt.Errorf("failed to download plugin: %w", err) + } + i.pluginData = data.Bytes() } - // Try to download signature file - don't fail if it doesn't exist - if provData, err := g.Get(i.Source+".prov", getter.WithURL(i.Source+".prov")); err == nil { - if err := os.WriteFile(pluginFile+".prov", provData.Bytes(), 0644); err == nil { - // Store the provenance data so we can save it after installation + // Download prov data once and cache it if available + if i.provData == nil { + provData, err := i.getter.Get(i.Source + ".prov") + if err != nil { + // If provenance file doesn't exist, set provData to nil + // The verification logic will handle this gracefully + i.provData = nil + } else { i.provData = provData.Bytes() } } - // Note: We don't fail if .prov file can't be downloaded - the verification logic - // in InstallWithOptions will handle missing .prov files appropriately - return pluginFile, cleanup, nil + return i.pluginData, i.provData, filepath.Base(i.Source), nil } diff --git a/internal/plugin/installer/installer.go b/internal/plugin/installer/installer.go index dd169397e..b65dac2f4 100644 --- a/internal/plugin/installer/installer.go +++ b/internal/plugin/installer/installer.go @@ -55,8 +55,8 @@ type Installer interface { type Verifier interface { // SupportsVerification returns true if this installer can verify plugins SupportsVerification() bool - // PrepareForVerification downloads necessary files for verification - PrepareForVerification() (pluginPath string, cleanup func(), err error) + // GetVerificationData returns plugin and provenance data for verification + GetVerificationData() (archiveData, provData []byte, filename string, err error) } // Install installs a plugin. @@ -91,28 +91,19 @@ func InstallWithOptions(i Installer, opts Options) (*VerificationResult, error) return nil, fmt.Errorf("--verify is only supported for plugin tarballs (.tgz files)") } - // Prepare for verification (download files if needed) - pluginPath, cleanup, err := verifier.PrepareForVerification() + // Get verification data (works for both memory and file-based installers) + archiveData, provData, filename, err := verifier.GetVerificationData() if err != nil { - return nil, fmt.Errorf("failed to prepare for verification: %w", err) - } - if cleanup != nil { - defer cleanup() + return nil, fmt.Errorf("failed to get verification data: %w", err) } - // Check if provenance file exists - provFile := pluginPath + ".prov" - if _, err := os.Stat(provFile); err != nil { - if os.IsNotExist(err) { - // No .prov file found - emit warning but continue installation - fmt.Fprintf(os.Stderr, "WARNING: No provenance file found for plugin. Plugin is not signed and cannot be verified.\n") - } else { - // Other error accessing .prov file - return nil, fmt.Errorf("failed to access provenance file: %w", err) - } + // Check if provenance data exists + if len(provData) == 0 { + // No .prov file found - emit warning but continue installation + fmt.Fprintf(os.Stderr, "WARNING: No provenance file found for plugin. Plugin is not signed and cannot be verified.\n") } else { - // Provenance file exists - verify the plugin - verification, err := plugin.VerifyPlugin(pluginPath, opts.Keyring) + // Provenance data exists - verify the plugin + verification, err := plugin.VerifyPlugin(archiveData, provData, filename, opts.Keyring) if err != nil { return nil, fmt.Errorf("plugin verification failed: %w", err) } diff --git a/internal/plugin/installer/local_installer.go b/internal/plugin/installer/local_installer.go index 0e00c93d0..e02261d59 100644 --- a/internal/plugin/installer/local_installer.go +++ b/internal/plugin/installer/local_installer.go @@ -35,9 +35,10 @@ var ErrPluginNotAFolder = errors.New("expected plugin to be a folder") // LocalInstaller installs plugins from the filesystem. type LocalInstaller struct { base - isArchive bool - extractor Extractor - provData []byte // Provenance data to save after installation + isArchive bool + extractor Extractor + pluginData []byte // Cached plugin data + provData []byte // Cached provenance data } // NewLocalInstaller creates a new LocalInstaller. @@ -110,7 +111,7 @@ func (i *LocalInstaller) installFromArchive() error { // Copy the original tarball to plugins directory for verification // Extract metadata to get the actual plugin name and version - metadata, err := plugin.ExtractPluginMetadataFromReader(bytes.NewReader(data)) + metadata, err := plugin.ExtractTgzPluginMetadata(bytes.NewReader(data)) if err != nil { return fmt.Errorf("failed to extract plugin metadata from tarball: %w", err) } @@ -184,21 +185,35 @@ func (i *LocalInstaller) SupportsVerification() bool { return i.isArchive } -// PrepareForVerification returns the local path for verification -func (i *LocalInstaller) PrepareForVerification() (string, func(), error) { +// GetVerificationData loads plugin and provenance data from local files for verification +func (i *LocalInstaller) GetVerificationData() (archiveData, provData []byte, filename string, err error) { if !i.SupportsVerification() { - return "", nil, fmt.Errorf("verification not supported for directories") + return nil, nil, "", fmt.Errorf("verification not supported for directories") } - // For local files, try to read the .prov file if it exists - provFile := i.Source + ".prov" - if provData, err := os.ReadFile(provFile); err == nil { - // Store the provenance data so we can save it after installation - i.provData = provData + // Read and cache the plugin archive file + if i.pluginData == nil { + i.pluginData, err = os.ReadFile(i.Source) + if err != nil { + return nil, nil, "", fmt.Errorf("failed to read plugin file: %w", err) + } + } + + // Read and cache the provenance file if it exists + if i.provData == nil { + provFile := i.Source + ".prov" + i.provData, err = os.ReadFile(provFile) + if err != nil { + if os.IsNotExist(err) { + // If provenance file doesn't exist, set provData to nil + // The verification logic will handle this gracefully + i.provData = nil + } else { + // If file exists but can't be read (permissions, etc), return error + return nil, nil, "", fmt.Errorf("failed to access provenance file %s: %w", provFile, err) + } + } } - // Note: We don't fail if .prov file doesn't exist - the verification logic - // in InstallWithOptions will handle missing .prov files appropriately - // Return the source path directly, no cleanup needed - return i.Source, nil, nil + return i.pluginData, i.provData, filepath.Base(i.Source), nil } diff --git a/internal/plugin/installer/oci_installer.go b/internal/plugin/installer/oci_installer.go index c33ef13d5..afbb42ca5 100644 --- a/internal/plugin/installer/oci_installer.go +++ b/internal/plugin/installer/oci_installer.go @@ -44,6 +44,9 @@ type OCIInstaller struct { base settings *cli.EnvSettings getter getter.Getter + // Cached data to avoid duplicate downloads + pluginData []byte + provData []byte } // NewOCIInstaller creates a new OCIInstaller with optional getter options @@ -83,18 +86,17 @@ func NewOCIInstaller(source string, options ...getter.Option) (*OCIInstaller, er func (i *OCIInstaller) Install() error { slog.Debug("pulling OCI plugin", "source", i.Source) - // Use getter to download the plugin - pluginData, err := i.getter.Get(i.Source) - if err != nil { - return fmt.Errorf("failed to pull plugin from %s: %w", i.Source, err) + // Ensure plugin data is cached + if i.pluginData == nil { + pluginData, err := i.getter.Get(i.Source) + if err != nil { + return fmt.Errorf("failed to pull plugin from %s: %w", i.Source, err) + } + i.pluginData = pluginData.Bytes() } - // Save the original tarball to plugins directory for verification - // For OCI plugins, extract version from plugin.yaml inside the tarball - pluginBytes := pluginData.Bytes() - // Extract metadata to get the actual plugin name and version - metadata, err := plugin.ExtractPluginMetadataFromReader(bytes.NewReader(pluginBytes)) + metadata, err := plugin.ExtractTgzPluginMetadata(bytes.NewReader(i.pluginData)) if err != nil { return fmt.Errorf("failed to extract plugin metadata from tarball: %w", err) } @@ -104,21 +106,29 @@ func (i *OCIInstaller) Install() error { if err := os.MkdirAll(filepath.Dir(tarballPath), 0755); err != nil { return fmt.Errorf("failed to create plugins directory: %w", err) } - if err := os.WriteFile(tarballPath, pluginBytes, 0644); err != nil { + if err := os.WriteFile(tarballPath, i.pluginData, 0644); err != nil { return fmt.Errorf("failed to save tarball: %w", err) } - // Try to download and save .prov file alongside the tarball - provSource := i.Source + ".prov" - if provData, err := i.getter.Get(provSource); err == nil { + // Ensure prov data is cached if available + if i.provData == nil { + // Try to download .prov file if it exists + provSource := i.Source + ".prov" + if provData, err := i.getter.Get(provSource); err == nil { + i.provData = provData.Bytes() + } + } + + // Save prov file if we have the data + if i.provData != nil { provPath := tarballPath + ".prov" - if err := os.WriteFile(provPath, provData.Bytes(), 0644); err != nil { + if err := os.WriteFile(provPath, i.provData, 0644); err != nil { slog.Debug("failed to save provenance file", "error", err) } } // Check if this is a gzip compressed file - if len(pluginBytes) < 2 || pluginBytes[0] != 0x1f || pluginBytes[1] != 0x8b { + if len(i.pluginData) < 2 || i.pluginData[0] != 0x1f || i.pluginData[1] != 0x8b { return fmt.Errorf("plugin data is not a gzip compressed archive") } @@ -128,7 +138,7 @@ func (i *OCIInstaller) Install() error { } // Extract as gzipped tar - if err := extractTarGz(bytes.NewReader(pluginBytes), i.CacheDir); err != nil { + if err := extractTarGz(bytes.NewReader(i.pluginData), i.CacheDir); err != nil { return fmt.Errorf("failed to extract plugin: %w", err) } @@ -251,55 +261,41 @@ func (i *OCIInstaller) SupportsVerification() bool { return true } -// PrepareForVerification downloads the plugin tarball and provenance to a temporary directory -func (i *OCIInstaller) PrepareForVerification() (pluginPath string, cleanup func(), err error) { - slog.Debug("preparing OCI plugin for verification", "source", i.Source) +// GetVerificationData downloads and caches plugin and provenance data from OCI registry for verification +func (i *OCIInstaller) GetVerificationData() (archiveData, provData []byte, filename string, err error) { + slog.Debug("getting verification data for OCI plugin", "source", i.Source) - // Create temporary directory for verification - tempDir, err := os.MkdirTemp("", "helm-oci-verify-") - if err != nil { - return "", nil, fmt.Errorf("failed to create temp directory: %w", err) - } - - cleanup = func() { - os.RemoveAll(tempDir) + // Download plugin data once and cache it + if i.pluginData == nil { + pluginDataBuffer, err := i.getter.Get(i.Source) + if err != nil { + return nil, nil, "", fmt.Errorf("failed to pull plugin from %s: %w", i.Source, err) + } + i.pluginData = pluginDataBuffer.Bytes() } - // Download the plugin tarball - pluginData, err := i.getter.Get(i.Source) - if err != nil { - cleanup() - return "", nil, fmt.Errorf("failed to pull plugin from %s: %w", i.Source, err) + // Download prov data once and cache it if available + if i.provData == nil { + provSource := i.Source + ".prov" + // Calling getter.Get again is reasonable because: 1. The OCI registry client already optimizes the underlying network calls + // 2. Both calls use the same underlying manifest and memory store 3. The second .prov call is very fast since the data is already pulled + provDataBuffer, err := i.getter.Get(provSource) + if err != nil { + // If provenance file doesn't exist, set provData to nil + // The verification logic will handle this gracefully + i.provData = nil + } else { + i.provData = provDataBuffer.Bytes() + } } - // Extract metadata to get the actual plugin name and version - pluginBytes := pluginData.Bytes() - metadata, err := plugin.ExtractPluginMetadataFromReader(bytes.NewReader(pluginBytes)) + // Extract metadata to get the filename + metadata, err := plugin.ExtractTgzPluginMetadata(bytes.NewReader(i.pluginData)) if err != nil { - cleanup() - return "", nil, fmt.Errorf("failed to extract plugin metadata from tarball: %w", err) - } - filename := fmt.Sprintf("%s-%s.tgz", metadata.Name, metadata.Version) - - // Save plugin tarball to temp directory - pluginTarball := filepath.Join(tempDir, filename) - if err := os.WriteFile(pluginTarball, pluginBytes, 0644); err != nil { - cleanup() - return "", nil, fmt.Errorf("failed to save plugin tarball: %w", err) - } - - // Try to download the provenance file - don't fail if it doesn't exist - provSource := i.Source + ".prov" - if provData, err := i.getter.Get(provSource); err == nil { - // Save provenance to temp directory - provFile := filepath.Join(tempDir, filename+".prov") - if err := os.WriteFile(provFile, provData.Bytes(), 0644); err == nil { - slog.Debug("prepared plugin for verification", "plugin", pluginTarball, "provenance", provFile) - } + return nil, nil, "", fmt.Errorf("failed to extract plugin metadata from tarball: %w", err) } - // Note: We don't fail if .prov file can't be downloaded - the verification logic - // in InstallWithOptions will handle missing .prov files appropriately + filename = fmt.Sprintf("%s-%s.tgz", metadata.Name, metadata.Version) - slog.Debug("prepared plugin for verification", "plugin", pluginTarball) - return pluginTarball, cleanup, nil + slog.Debug("got verification data for OCI plugin", "filename", filename) + return i.pluginData, i.provData, filename, nil } diff --git a/internal/plugin/sign.go b/internal/plugin/sign.go index 134c640e7..6b8aafd3e 100644 --- a/internal/plugin/sign.go +++ b/internal/plugin/sign.go @@ -17,6 +17,7 @@ package plugin import ( "archive/tar" + "bytes" "compress/gzip" "errors" "fmt" @@ -29,14 +30,14 @@ import ( "helm.sh/helm/v4/pkg/provenance" ) -// SignPlugin signs a plugin using the SHA256 hash of the tarball. +// SignPlugin signs a plugin using the SHA256 hash of the tarball data. // -// This is used when packaging and signing a plugin from a tarball file. +// This is used when packaging and signing a plugin from tarball data. // It creates a signature that includes the tarball hash and plugin metadata, // allowing verification of the original tarball later. -func SignPlugin(tarballPath string, signer *provenance.Signatory) (string, error) { - // Extract plugin metadata from tarball - pluginMeta, err := extractPluginMetadata(tarballPath) +func SignPlugin(tarballData []byte, filename string, signer *provenance.Signatory) (string, error) { + // Extract plugin metadata from tarball data + pluginMeta, err := ExtractTgzPluginMetadata(bytes.NewReader(tarballData)) if err != nil { return "", fmt.Errorf("failed to extract plugin metadata: %w", err) } @@ -48,22 +49,11 @@ func SignPlugin(tarballPath string, signer *provenance.Signatory) (string, error } // Use the generic provenance signing function - return signer.ClearSign(tarballPath, metadataBytes) + return signer.ClearSign(tarballData, filename, metadataBytes) } -// extractPluginMetadata extracts plugin metadata from a tarball -func extractPluginMetadata(tarballPath string) (*Metadata, error) { - f, err := os.Open(tarballPath) - if err != nil { - return nil, err - } - defer f.Close() - - return ExtractPluginMetadataFromReader(f) -} - -// ExtractPluginMetadataFromReader extracts plugin metadata from a tarball reader -func ExtractPluginMetadataFromReader(r io.Reader) (*Metadata, error) { +// ExtractTgzPluginMetadata extracts plugin metadata from a gzipped tarball reader +func ExtractTgzPluginMetadata(r io.Reader) (*Metadata, error) { gzr, err := gzip.NewReader(r) if err != nil { return nil, err diff --git a/internal/plugin/sign_test.go b/internal/plugin/sign_test.go index a60970cdc..fce2dbeb3 100644 --- a/internal/plugin/sign_test.go +++ b/internal/plugin/sign_test.go @@ -69,8 +69,14 @@ runtimeConfig: t.Fatal(err) } + // Read the tarball data + tarballData, err := os.ReadFile(tarballPath) + if err != nil { + t.Fatalf("failed to read tarball: %v", err) + } + // Sign the plugin tarball - sig, err := SignPlugin(tarballPath, signer) + sig, err := SignPlugin(tarballData, filepath.Base(tarballPath), signer) if err != nil { t.Fatalf("failed to sign plugin: %v", err) } diff --git a/internal/plugin/verify.go b/internal/plugin/verify.go index e9656a3a6..760a56e67 100644 --- a/internal/plugin/verify.go +++ b/internal/plugin/verify.go @@ -16,57 +16,24 @@ limitations under the License. package plugin import ( - "errors" - "fmt" - "os" "path/filepath" "helm.sh/helm/v4/pkg/provenance" ) -// VerifyPlugin verifies a plugin tarball against a signature. -// -// This function verifies that a plugin tarball has a valid provenance file -// and that the provenance file is signed by a trusted entity. -func VerifyPlugin(pluginPath, keyring string) (*provenance.Verification, error) { - // Verify the plugin path exists - fi, err := os.Stat(pluginPath) - if err != nil { - return nil, err - } - - // Only support tarball verification - if fi.IsDir() { - return nil, errors.New("directory verification not supported - only plugin tarballs can be verified") - } - - // Verify it's a tarball - if !isTarball(pluginPath) { - return nil, errors.New("plugin file must be a gzipped tarball (.tar.gz or .tgz)") - } - - // Look for provenance file - provFile := pluginPath + ".prov" - if _, err := os.Stat(provFile); err != nil { - return nil, fmt.Errorf("could not find provenance file %s: %w", provFile, err) - } - +// VerifyPlugin verifies plugin data against a signature using data in memory. +func VerifyPlugin(archiveData, provData []byte, filename, keyring string) (*provenance.Verification, error) { // Create signatory from keyring sig, err := provenance.NewFromKeyring(keyring, "") if err != nil { return nil, err } - return verifyPluginTarball(pluginPath, provFile, sig) -} - -// verifyPluginTarball verifies a plugin tarball against its signature -func verifyPluginTarball(pluginPath, provPath string, sig *provenance.Signatory) (*provenance.Verification, error) { - // Reuse chart verification logic from pkg/provenance - return sig.Verify(pluginPath, provPath) + // Use the new VerifyData method directly + return sig.Verify(archiveData, provData, filename) } // isTarball checks if a file has a tarball extension -func isTarball(filename string) bool { +func IsTarball(filename string) bool { return filepath.Ext(filename) == ".gz" || filepath.Ext(filename) == ".tgz" } diff --git a/internal/plugin/verify_test.go b/internal/plugin/verify_test.go index a09b35ec9..9c907788f 100644 --- a/internal/plugin/verify_test.go +++ b/internal/plugin/verify_test.go @@ -18,7 +18,6 @@ package plugin import ( "os" "path/filepath" - "strings" "testing" "helm.sh/helm/v4/pkg/provenance" @@ -74,7 +73,13 @@ func TestVerifyPlugin(t *testing.T) { t.Fatal(err) } - sig, err := SignPlugin(tarballPath, signer) + // Read the tarball data + tarballData, err := os.ReadFile(tarballPath) + if err != nil { + t.Fatal(err) + } + + sig, err := SignPlugin(tarballData, filepath.Base(tarballPath), signer) if err != nil { t.Fatal(err) } @@ -85,8 +90,19 @@ func TestVerifyPlugin(t *testing.T) { t.Fatal(err) } + // Read the files for verification + archiveData, err := os.ReadFile(tarballPath) + if err != nil { + t.Fatal(err) + } + + provData, err := os.ReadFile(provFile) + if err != nil { + t.Fatal(err) + } + // Now verify the plugin - verification, err := VerifyPlugin(tarballPath, testPubFile) + verification, err := VerifyPlugin(archiveData, provData, filepath.Base(tarballPath), testPubFile) if err != nil { t.Fatalf("Failed to verify plugin: %v", err) } @@ -146,8 +162,19 @@ InvalidSignatureData t.Fatal(err) } + // Read the files + archiveData, err := os.ReadFile(tarballPath) + if err != nil { + t.Fatal(err) + } + + provData, err := os.ReadFile(provFile) + if err != nil { + t.Fatal(err) + } + // Try to verify - should fail - _, err = VerifyPlugin(tarballPath, testPubFile) + _, err = VerifyPlugin(archiveData, provData, filepath.Base(tarballPath), testPubFile) if err == nil { t.Error("Expected verification to fail with bad signature") } @@ -162,40 +189,26 @@ func TestVerifyPluginMissingProvenance(t *testing.T) { t.Fatal(err) } - // Try to verify without .prov file - _, err := VerifyPlugin(tarballPath, testPubFile) - if err == nil { - t.Error("Expected verification to fail without provenance file") - } -} - -func TestVerifyPluginDirectory(t *testing.T) { - // Create a test plugin directory - tempDir := t.TempDir() - pluginDir := filepath.Join(tempDir, "test-plugin") - if err := os.MkdirAll(pluginDir, 0755); err != nil { - t.Fatal(err) - } - - // Create a plugin.yaml file - if err := os.WriteFile(filepath.Join(pluginDir, "plugin.yaml"), []byte(testPluginYAML), 0644); err != nil { + // Read the tarball data + archiveData, err := os.ReadFile(tarballPath) + if err != nil { t.Fatal(err) } - // Attempt to verify the directory - should fail - _, err := VerifyPlugin(pluginDir, testPubFile) + // Try to verify with empty provenance data + _, err = VerifyPlugin(archiveData, nil, filepath.Base(tarballPath), testPubFile) if err == nil { - t.Error("Expected directory verification to fail, but it succeeded") - } - - expectedError := "directory verification not supported" - if !containsString(err.Error(), expectedError) { - t.Errorf("Expected error to contain %q, got %q", expectedError, err.Error()) + t.Error("Expected verification to fail with empty provenance data") } } -func containsString(s, substr string) bool { - return len(s) >= len(substr) && (s == substr || len(s) > len(substr) && - (s[:len(substr)] == substr || s[len(s)-len(substr):] == substr || - strings.Contains(s, substr))) +func TestVerifyPluginMalformedData(t *testing.T) { + // Test with malformed tarball data - should fail + malformedData := []byte("not a tarball") + provData := []byte("fake provenance") + + _, err := VerifyPlugin(malformedData, provData, "malformed.tar.gz", testPubFile) + if err == nil { + t.Error("Expected malformed data verification to fail, but it succeeded") + } } diff --git a/pkg/action/package.go b/pkg/action/package.go index c59efcdb3..6e762b507 100644 --- a/pkg/action/package.go +++ b/pkg/action/package.go @@ -21,6 +21,7 @@ import ( "errors" "fmt" "os" + "path/filepath" "syscall" "github.com/Masterminds/semver/v3" @@ -156,8 +157,14 @@ func (p *Package) Clearsign(filename string) error { return fmt.Errorf("failed to marshal chart metadata: %w", err) } + // Read the chart archive file + archiveData, err := os.ReadFile(filename) + if err != nil { + return fmt.Errorf("failed to read chart archive: %w", err) + } + // Use the generic provenance signing function - sig, err := signer.ClearSign(filename, metadataBytes) + sig, err := signer.ClearSign(archiveData, filepath.Base(filename), metadataBytes) if err != nil { return err } diff --git a/pkg/cmd/plugin_package.go b/pkg/cmd/plugin_package.go index 5da6c624e..05f8bb5ad 100644 --- a/pkg/cmd/plugin_package.go +++ b/pkg/cmd/plugin_package.go @@ -142,8 +142,15 @@ func (o *pluginPackageOptions) run(out io.Writer) error { // If signing was requested, sign the tarball if o.sign { - // Sign the plugin tarball (not the source directory) - sig, err := plugin.SignPlugin(tarballPath, signer) + // Read the tarball data + tarballData, err := os.ReadFile(tarballPath) + if err != nil { + os.Remove(tarballPath) + return fmt.Errorf("failed to read tarball for signing: %w", err) + } + + // Sign the plugin tarball data + sig, err := plugin.SignPlugin(tarballData, filepath.Base(tarballPath), signer) if err != nil { os.Remove(tarballPath) return fmt.Errorf("failed to sign plugin: %w", err) diff --git a/pkg/cmd/plugin_verify.go b/pkg/cmd/plugin_verify.go index 4772fcc33..5f89e743e 100644 --- a/pkg/cmd/plugin_verify.go +++ b/pkg/cmd/plugin_verify.go @@ -18,6 +18,8 @@ package cmd import ( "fmt" "io" + "os" + "path/filepath" "github.com/spf13/cobra" @@ -65,8 +67,41 @@ func newPluginVerifyCmd(out io.Writer) *cobra.Command { } func (o *pluginVerifyOptions) run(out io.Writer) error { - // Verify the plugin - verification, err := plugin.VerifyPlugin(o.pluginPath, o.keyring) + // Verify the plugin path exists + fi, err := os.Stat(o.pluginPath) + if err != nil { + return err + } + + // Only support tarball verification + if fi.IsDir() { + return fmt.Errorf("directory verification not supported - only plugin tarballs can be verified") + } + + // Verify it's a tarball + if !plugin.IsTarball(o.pluginPath) { + return fmt.Errorf("plugin file must be a gzipped tarball (.tar.gz or .tgz)") + } + + // Look for provenance file + provFile := o.pluginPath + ".prov" + if _, err := os.Stat(provFile); err != nil { + return fmt.Errorf("could not find provenance file %s: %w", provFile, err) + } + + // Read the files + archiveData, err := os.ReadFile(o.pluginPath) + if err != nil { + return fmt.Errorf("failed to read plugin file: %w", err) + } + + provData, err := os.ReadFile(provFile) + if err != nil { + return fmt.Errorf("failed to read provenance file: %w", err) + } + + // Verify the plugin using data + verification, err := plugin.VerifyPlugin(archiveData, provData, filepath.Base(o.pluginPath), o.keyring) if err != nil { return err } diff --git a/pkg/downloader/chart_downloader.go b/pkg/downloader/chart_downloader.go index 693e6b009..a24cad3fd 100644 --- a/pkg/downloader/chart_downloader.go +++ b/pkg/downloader/chart_downloader.go @@ -493,7 +493,18 @@ func VerifyChart(path, provfile, keyring string) (*provenance.Verification, erro if err != nil { return nil, fmt.Errorf("failed to load keyring: %w", err) } - return sig.Verify(path, provfile) + + // Read archive and provenance files + archiveData, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("failed to read chart archive: %w", err) + } + provData, err := os.ReadFile(provfile) + if err != nil { + return nil, fmt.Errorf("failed to read provenance file: %w", err) + } + + return sig.Verify(archiveData, provData, filepath.Base(path)) } // isTar tests whether the given file is a tar file. diff --git a/pkg/provenance/sign.go b/pkg/provenance/sign.go index 103c81fbb..3ffad2765 100644 --- a/pkg/provenance/sign.go +++ b/pkg/provenance/sign.go @@ -23,7 +23,6 @@ import ( "fmt" "io" "os" - "path/filepath" "strings" "golang.org/x/crypto/openpgp" //nolint @@ -194,29 +193,20 @@ func (s *Signatory) DecryptKey(fn PassphraseFetcher) error { return s.Entity.PrivateKey.Decrypt(p) } -// ClearSign signs a package with the given key and pre-marshalled metadata. +// ClearSign signs package data with the given key and pre-marshalled metadata. // -// This takes the path to a package archive file, a key, and marshalled metadata bytes. -// This allows both charts and plugins to use the same signing infrastructure. -// -// The Signatory must have a valid Entity.PrivateKey for this to work. If it does -// not, an error will be returned. -func (s *Signatory) ClearSign(packagePath string, metadataBytes []byte) (string, error) { +// This is the core signing method that works with data in memory. +// The Signatory must have a valid Entity.PrivateKey for this to work. +func (s *Signatory) ClearSign(archiveData []byte, filename string, metadataBytes []byte) (string, error) { if s.Entity == nil { return "", errors.New("private key not found") } else if s.Entity.PrivateKey == nil { return "", errors.New("provided key is not a private key. Try providing a keyring with secret keys") } - if fi, err := os.Stat(packagePath); err != nil { - return "", err - } else if fi.IsDir() { - return "", errors.New("cannot sign a directory") - } - out := bytes.NewBuffer(nil) - b, err := messageBlock(packagePath, metadataBytes) + b, err := messageBlock(archiveData, filename, metadataBytes) if err != nil { return "", err } @@ -246,69 +236,47 @@ func (s *Signatory) ClearSign(packagePath string, metadataBytes []byte) (string, return out.String(), nil } -// Verify checks a signature and verifies that it is legit for a package. -func (s *Signatory) Verify(packagePath, sigpath string) (*Verification, error) { +// Verify checks a signature and verifies that it is legit for package data. +// This is the core verification method that works with data in memory. +func (s *Signatory) Verify(archiveData, provData []byte, filename string) (*Verification, error) { ver := &Verification{} - for _, fname := range []string{packagePath, sigpath} { - if fi, err := os.Stat(fname); err != nil { - return ver, err - } else if fi.IsDir() { - return ver, fmt.Errorf("%s cannot be a directory", fname) - } - } // First verify the signature - sig, err := s.decodeSignature(sigpath) - if err != nil { - return ver, fmt.Errorf("failed to decode signature: %w", err) + block, _ := clearsign.Decode(provData) + if block == nil { + return ver, errors.New("signature block not found") } - by, err := s.verifySignature(sig) + by, err := s.verifySignature(block) if err != nil { return ver, err } ver.SignedBy = by - // Second, verify the hash of the tarball. - sum, err := DigestFile(packagePath) + // Second, verify the hash of the data. + sum, err := Digest(bytes.NewBuffer(archiveData)) if err != nil { return ver, err } - sums, err := parseMessageBlock(sig.Plaintext) + sums, err := parseMessageBlock(block.Plaintext) if err != nil { return ver, err } sum = "sha256:" + sum - basename := filepath.Base(packagePath) - if sha, ok := sums.Files[basename]; !ok { - return ver, fmt.Errorf("provenance does not contain a SHA for a file named %q", basename) + if sha, ok := sums.Files[filename]; !ok { + return ver, fmt.Errorf("provenance does not contain a SHA for a file named %q", filename) } else if sha != sum { - return ver, fmt.Errorf("sha256 sum does not match for %s: %q != %q", basename, sha, sum) + return ver, fmt.Errorf("sha256 sum does not match for %s: %q != %q", filename, sha, sum) } ver.FileHash = sum - ver.FileName = basename + ver.FileName = filename // TODO: when image signing is added, verify that here. return ver, nil } -func (s *Signatory) decodeSignature(filename string) (*clearsign.Block, error) { - data, err := os.ReadFile(filename) - if err != nil { - return nil, err - } - - block, _ := clearsign.Decode(data) - if block == nil { - // There was no sig in the file. - return nil, errors.New("signature block not found") - } - - return block, nil -} - // verifySignature verifies that the given block is validly signed, and returns the signer. func (s *Signatory) verifySignature(block *clearsign.Block) (*openpgp.Entity, error) { return openpgp.CheckDetachedSignature( @@ -318,18 +286,17 @@ func (s *Signatory) verifySignature(block *clearsign.Block) (*openpgp.Entity, er ) } -// messageBlock creates a message block from a package path and pre-marshalled metadata -func messageBlock(packagePath string, metadataBytes []byte) (*bytes.Buffer, error) { - // Checksum the archive - chash, err := DigestFile(packagePath) +// messageBlock creates a message block from archive data and pre-marshalled metadata +func messageBlock(archiveData []byte, filename string, metadataBytes []byte) (*bytes.Buffer, error) { + // Checksum the archive data + chash, err := Digest(bytes.NewBuffer(archiveData)) if err != nil { return nil, err } - base := filepath.Base(packagePath) sums := &SumCollection{ Files: map[string]string{ - base: "sha256:" + chash, + filename: "sha256:" + chash, }, } diff --git a/pkg/provenance/sign_test.go b/pkg/provenance/sign_test.go index 4594fac01..4f2fc7298 100644 --- a/pkg/provenance/sign_test.go +++ b/pkg/provenance/sign_test.go @@ -98,7 +98,13 @@ func loadChartMetadataForSigning(t *testing.T, chartPath string) []byte { func TestMessageBlock(t *testing.T) { metadataBytes := loadChartMetadataForSigning(t, testChartfile) - out, err := messageBlock(testChartfile, metadataBytes) + // Read the chart file data + archiveData, err := os.ReadFile(testChartfile) + if err != nil { + t.Fatal(err) + } + + out, err := messageBlock(archiveData, filepath.Base(testChartfile), metadataBytes) if err != nil { t.Fatal(err) } @@ -243,7 +249,13 @@ func TestClearSign(t *testing.T) { metadataBytes := loadChartMetadataForSigning(t, testChartfile) - sig, err := signer.ClearSign(testChartfile, metadataBytes) + // Read the chart file data + archiveData, err := os.ReadFile(testChartfile) + if err != nil { + t.Fatal(err) + } + + sig, err := signer.ClearSign(archiveData, filepath.Base(testChartfile), metadataBytes) if err != nil { t.Fatal(err) } @@ -276,7 +288,13 @@ func TestClearSignError(t *testing.T) { metadataBytes := loadChartMetadataForSigning(t, testChartfile) - sig, err := signer.ClearSign(testChartfile, metadataBytes) + // Read the chart file data + archiveData, err := os.ReadFile(testChartfile) + if err != nil { + t.Fatal(err) + } + + sig, err := signer.ClearSign(archiveData, filepath.Base(testChartfile), metadataBytes) if err == nil { t.Fatal("didn't get an error from ClearSign but expected one") } @@ -286,56 +304,25 @@ func TestClearSignError(t *testing.T) { } } -func TestDecodeSignature(t *testing.T) { - // Unlike other tests, this does a round-trip test, ensuring that a signature - // generated by the library can also be verified by the library. - +func TestVerify(t *testing.T) { signer, err := NewFromFiles(testKeyfile, testPubfile) if err != nil { t.Fatal(err) } - metadataBytes := loadChartMetadataForSigning(t, testChartfile) - - sig, err := signer.ClearSign(testChartfile, metadataBytes) - if err != nil { - t.Fatal(err) - } - - f, err := os.CreateTemp(t.TempDir(), "helm-test-sig-") - if err != nil { - t.Fatal(err) - } - - tname := f.Name() - defer func() { - os.Remove(tname) - }() - f.WriteString(sig) - f.Close() - - sig2, err := signer.decodeSignature(tname) + // Read the chart file data + archiveData, err := os.ReadFile(testChartfile) if err != nil { t.Fatal(err) } - by, err := signer.verifySignature(sig2) + // Read the signature file data + sigData, err := os.ReadFile(testSigBlock) if err != nil { t.Fatal(err) } - if _, ok := by.Identities[testKeyName]; !ok { - t.Errorf("Expected identity %q", testKeyName) - } -} - -func TestVerify(t *testing.T) { - signer, err := NewFromFiles(testKeyfile, testPubfile) - if err != nil { - t.Fatal(err) - } - - if ver, err := signer.Verify(testChartfile, testSigBlock); err != nil { + if ver, err := signer.Verify(archiveData, sigData, filepath.Base(testChartfile)); err != nil { t.Errorf("Failed to pass verify. Err: %s", err) } else if len(ver.FileHash) == 0 { t.Error("Verification is missing hash.") @@ -345,7 +332,13 @@ func TestVerify(t *testing.T) { t.Errorf("FileName is unexpectedly %q", ver.FileName) } - if _, err = signer.Verify(testChartfile, testTamperedSigBlock); err == nil { + // Read the tampered signature file data + tamperedSigData, err := os.ReadFile(testTamperedSigBlock) + if err != nil { + t.Fatal(err) + } + + if _, err = signer.Verify(archiveData, tamperedSigData, filepath.Base(testChartfile)); err == nil { t.Errorf("Expected %s to fail.", testTamperedSigBlock) } From 591d863df544ec5d4093e514636553a279a67e09 Mon Sep 17 00:00:00 2001 From: Scott Rigby Date: Wed, 20 Aug 2025 17:17:16 -0400 Subject: [PATCH 72/88] Move Postrenderer to a plugin type Fix/add back postrenderer args unit tests Signed-off-by: Scott Rigby --- internal/plugin/config.go | 10 +- internal/plugin/loader_test.go | 30 ++- internal/plugin/metadata.go | 4 +- internal/plugin/metadata_v1.go | 2 +- internal/plugin/runtime_subprocess.go | 66 +++++- .../plugin/schema/postrenderer.go | 30 +-- .../plugdir/good/postrenderer-v1/plugin.yaml | 8 + .../plugdir/good/postrenderer-v1/sed-test.sh | 6 + pkg/action/action.go | 4 +- pkg/action/install.go | 4 +- pkg/action/upgrade.go | 4 +- pkg/cmd/flags.go | 27 +-- pkg/cmd/flags_test.go | 12 +- pkg/cmd/install.go | 2 +- pkg/cmd/template.go | 2 +- .../helm/plugins/postrenderer-v1/plugin.yaml | 8 + .../helm/plugins/postrenderer-v1/sed-test.sh | 6 + pkg/cmd/upgrade.go | 2 +- pkg/postrender/exec.go | 114 ----------- pkg/postrender/exec_test.go | 193 ------------------ pkg/postrenderer/postrenderer.go | 85 ++++++++ pkg/postrenderer/postrenderer_test.go | 89 ++++++++ .../plugins/postrenderer-v1/plugin.yaml | 8 + .../plugins/postrenderer-v1/sed-test.sh | 6 + 24 files changed, 368 insertions(+), 354 deletions(-) rename pkg/postrender/postrender.go => internal/plugin/schema/postrenderer.go (50%) create mode 100644 internal/plugin/testdata/plugdir/good/postrenderer-v1/plugin.yaml create mode 100755 internal/plugin/testdata/plugdir/good/postrenderer-v1/sed-test.sh create mode 100644 pkg/cmd/testdata/helmhome/helm/plugins/postrenderer-v1/plugin.yaml create mode 100755 pkg/cmd/testdata/helmhome/helm/plugins/postrenderer-v1/sed-test.sh delete mode 100644 pkg/postrender/exec.go delete mode 100644 pkg/postrender/exec_test.go create mode 100644 pkg/postrenderer/postrenderer.go create mode 100644 pkg/postrenderer/postrenderer_test.go create mode 100644 pkg/postrenderer/testdata/plugins/postrenderer-v1/plugin.yaml create mode 100755 pkg/postrenderer/testdata/plugins/postrenderer-v1/sed-test.sh diff --git a/internal/plugin/config.go b/internal/plugin/config.go index 83a2e0b25..e8bf4e356 100644 --- a/internal/plugin/config.go +++ b/internal/plugin/config.go @@ -46,8 +46,9 @@ type ConfigGetter struct { Protocols []string `yaml:"protocols"` } -func (c *ConfigCLI) GetType() string { return "cli/v1" } -func (c *ConfigGetter) GetType() string { return "getter/v1" } +// ConfigPostrenderer represents the configuration for postrenderer plugins +// there are no runtime-independent configurations for postrenderer/v1 plugin type +type ConfigPostrenderer struct{} func (c *ConfigCLI) Validate() error { // Config validation for CLI plugins @@ -66,6 +67,11 @@ func (c *ConfigGetter) Validate() error { return nil } +func (c *ConfigPostrenderer) Validate() error { + // Config validation for postrenderer plugins + return nil +} + func remarshalConfig[T Config](configData map[string]any) (Config, error) { data, err := yaml.Marshal(configData) if err != nil { diff --git a/internal/plugin/loader_test.go b/internal/plugin/loader_test.go index 81ef26e02..63d930cbe 100644 --- a/internal/plugin/loader_test.go +++ b/internal/plugin/loader_test.go @@ -163,6 +163,31 @@ func TestLoadDirGetter(t *testing.T) { assert.Equal(t, expect, plug.Metadata()) } +func TestPostRenderer(t *testing.T) { + dirname := "testdata/plugdir/good/postrenderer-v1" + + expect := Metadata{ + Name: "postrenderer-v1", + Version: "1.2.3", + Type: "postrenderer/v1", + APIVersion: "v1", + Runtime: "subprocess", + Config: &ConfigPostrenderer{}, + RuntimeConfig: &RuntimeConfigSubprocess{ + PlatformCommands: []PlatformCommand{ + { + Command: "${HELM_PLUGIN_DIR}/sed-test.sh", + }, + }, + }, + } + + plug, err := LoadDir(dirname) + require.NoError(t, err) + assert.Equal(t, dirname, plug.Dir()) + assert.Equal(t, expect, plug.Metadata()) +} + func TestDetectDuplicates(t *testing.T) { plugs := []Plugin{ mockSubprocessCLIPlugin(t, "foo"), @@ -195,13 +220,14 @@ func TestLoadAll(t *testing.T) { plugsMap[p.Metadata().Name] = p } - assert.Len(t, plugsMap, 6) + assert.Len(t, plugsMap, 7) assert.Contains(t, plugsMap, "downloader") assert.Contains(t, plugsMap, "echo-legacy") assert.Contains(t, plugsMap, "echo-v1") assert.Contains(t, plugsMap, "getter") assert.Contains(t, plugsMap, "hello-legacy") assert.Contains(t, plugsMap, "hello-v1") + assert.Contains(t, plugsMap, "postrenderer-v1") } func TestFindPlugins(t *testing.T) { @@ -228,7 +254,7 @@ func TestFindPlugins(t *testing.T) { { name: "normal", plugdirs: "./testdata/plugdir/good", - expected: 6, + expected: 7, }, } for _, c := range cases { diff --git a/internal/plugin/metadata.go b/internal/plugin/metadata.go index bb7e9409f..fbe7a16b8 100644 --- a/internal/plugin/metadata.go +++ b/internal/plugin/metadata.go @@ -31,7 +31,7 @@ type Metadata struct { // Name is the name of the plugin Name string - // Type of plugin (eg, cli/v1, getter/v1) + // Type of plugin (eg, cli/v1, getter/v1, postrenderer/v1) Type string // Runtime specifies the runtime type (subprocess, wasm) @@ -191,6 +191,8 @@ func convertMetadataConfig(pluginType string, configRaw map[string]any) (Config, config, err = remarshalConfig[*ConfigCLI](configRaw) case "getter/v1": config, err = remarshalConfig[*ConfigGetter](configRaw) + case "postrenderer/v1": + config, err = remarshalConfig[*ConfigPostrenderer](configRaw) default: return nil, fmt.Errorf("unsupported plugin type: %s", pluginType) } diff --git a/internal/plugin/metadata_v1.go b/internal/plugin/metadata_v1.go index 654aa8900..81dbc2e20 100644 --- a/internal/plugin/metadata_v1.go +++ b/internal/plugin/metadata_v1.go @@ -27,7 +27,7 @@ type MetadataV1 struct { // Name is the name of the plugin Name string `yaml:"name"` - // Type of plugin (eg, cli/v1, getter/v1) + // Type of plugin (eg, cli/v1, getter/v1, postrenderer/v1) Type string `yaml:"type"` // Runtime specifies the runtime type (subprocess, wasm) diff --git a/internal/plugin/runtime_subprocess.go b/internal/plugin/runtime_subprocess.go index 163f0621f..e7faeed36 100644 --- a/internal/plugin/runtime_subprocess.go +++ b/internal/plugin/runtime_subprocess.go @@ -16,9 +16,11 @@ limitations under the License. package plugin import ( + "bytes" "context" "fmt" "io" + "log/slog" "os" "os/exec" "syscall" @@ -36,7 +38,7 @@ type SubprocessProtocolCommand struct { Command string `yaml:"command"` } -// RuntimeConfigSubprocess represents configuration for subprocess runtime +// RuntimeConfigSubprocess implements RuntimeConfig for RuntimeSubprocess type RuntimeConfigSubprocess struct { // PlatformCommand is a list containing a plugin command, with a platform selector and support for args. PlatformCommands []PlatformCommand `yaml:"platformCommand"` @@ -73,7 +75,7 @@ type RuntimeSubprocess struct{} var _ Runtime = (*RuntimeSubprocess)(nil) -// CreateRuntime implementation for RuntimeConfig +// CreatePlugin implementation for Runtime func (r *RuntimeSubprocess) CreatePlugin(pluginDir string, metadata *Metadata) (Plugin, error) { return &SubprocessPluginRuntime{ metadata: *metadata, @@ -82,7 +84,7 @@ func (r *RuntimeSubprocess) CreatePlugin(pluginDir string, metadata *Metadata) ( }, nil } -// RuntimeSubprocess implements the Runtime interface for subprocess execution +// SubprocessPluginRuntime implements the Plugin interface for subprocess execution type SubprocessPluginRuntime struct { metadata Metadata pluginDir string @@ -105,6 +107,8 @@ func (r *SubprocessPluginRuntime) Invoke(_ context.Context, input *Input) (*Outp return r.runCLI(input) case schema.InputMessageGetterV1: return r.runGetter(input) + case schema.InputMessagePostRendererV1: + return r.runPostrenderer(input) default: return nil, fmt.Errorf("unsupported subprocess plugin type %q", r.metadata.Type) } @@ -216,6 +220,62 @@ func (r *SubprocessPluginRuntime) runCLI(input *Input) (*Output, error) { }, nil } +func (r *SubprocessPluginRuntime) runPostrenderer(input *Input) (*Output, error) { + if _, ok := input.Message.(schema.InputMessagePostRendererV1); !ok { + return nil, fmt.Errorf("plugin %q input message does not implement InputMessagePostRendererV1", r.metadata.Name) + } + + msg := input.Message.(schema.InputMessagePostRendererV1) + extraArgs := msg.ExtraArgs + settings := msg.Settings + + // Setup plugin environment + SetupPluginEnv(settings, r.metadata.Name, r.pluginDir) + + cmds := r.RuntimeConfig.PlatformCommands + if len(cmds) == 0 && len(r.RuntimeConfig.Command) > 0 { + cmds = []PlatformCommand{{Command: r.RuntimeConfig.Command}} + } + + command, args, err := PrepareCommands(cmds, true, extraArgs) + if err != nil { + return nil, fmt.Errorf("failed to prepare plugin command: %w", err) + } + + // TODO de-duplicate code here by calling RuntimeSubprocess.invokeWithEnv() + cmd := exec.Command( + command, + args...) + + stdin, err := cmd.StdinPipe() + if err != nil { + return nil, err + } + + go func() { + defer stdin.Close() + io.Copy(stdin, msg.Manifests) + }() + + postRendered := &bytes.Buffer{} + stderr := &bytes.Buffer{} + + //cmd.Env = pluginExec.env + cmd.Stdout = postRendered + cmd.Stderr = stderr + + if err := executeCmd(cmd, r.metadata.Name); err != nil { + slog.Info("plugin execution failed", slog.String("stderr", stderr.String())) + return nil, err + } + + return &Output{ + Message: &schema.OutputMessagePostRendererV1{ + Manifests: postRendered, + }, + }, nil +} + // SetupPluginEnv prepares os.Env for plugins. It operates on os.Env because // the plugin subsystem itself needs access to the environment variables // created here. diff --git a/pkg/postrender/postrender.go b/internal/plugin/schema/postrenderer.go similarity index 50% rename from pkg/postrender/postrender.go rename to internal/plugin/schema/postrenderer.go index 3af384290..0f0c09369 100644 --- a/pkg/postrender/postrender.go +++ b/internal/plugin/schema/postrenderer.go @@ -14,16 +14,22 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package postrender contains an interface that can be implemented for custom -// post-renderers and an exec implementation that can be used for arbitrary -// binaries and scripts -package postrender - -import "bytes" - -type PostRenderer interface { - // Run expects a single buffer filled with Helm rendered manifests. It - // expects the modified results to be returned on a separate buffer or an - // error if there was an issue or failure while running the post render step - Run(renderedManifests *bytes.Buffer) (modifiedManifests *bytes.Buffer, err error) +package schema + +import ( + "bytes" + + "helm.sh/helm/v4/pkg/cli" +) + +// InputMessagePostRendererV1 implements Input.Message +type InputMessagePostRendererV1 struct { + Manifests *bytes.Buffer `json:"manifests"` + // from CLI --post-renderer-args + ExtraArgs []string `json:"extraArgs"` + Settings *cli.EnvSettings `json:"settings"` +} + +type OutputMessagePostRendererV1 struct { + Manifests *bytes.Buffer `json:"manifests"` } diff --git a/internal/plugin/testdata/plugdir/good/postrenderer-v1/plugin.yaml b/internal/plugin/testdata/plugdir/good/postrenderer-v1/plugin.yaml new file mode 100644 index 000000000..30f1599b4 --- /dev/null +++ b/internal/plugin/testdata/plugdir/good/postrenderer-v1/plugin.yaml @@ -0,0 +1,8 @@ +name: "postrenderer-v1" +version: "1.2.3" +type: postrenderer/v1 +apiVersion: v1 +runtime: subprocess +runtimeConfig: + platformCommand: + - command: "${HELM_PLUGIN_DIR}/sed-test.sh" diff --git a/internal/plugin/testdata/plugdir/good/postrenderer-v1/sed-test.sh b/internal/plugin/testdata/plugdir/good/postrenderer-v1/sed-test.sh new file mode 100755 index 000000000..a016e398f --- /dev/null +++ b/internal/plugin/testdata/plugdir/good/postrenderer-v1/sed-test.sh @@ -0,0 +1,6 @@ +#!/bin/sh +if [ $# -eq 0 ]; then + sed s/FOOTEST/BARTEST/g <&0 +else + sed s/FOOTEST/"$*"/g <&0 +fi diff --git a/pkg/action/action.go b/pkg/action/action.go index 38c8b6729..7b8fa3c34 100644 --- a/pkg/action/action.go +++ b/pkg/action/action.go @@ -43,7 +43,7 @@ import ( chartutil "helm.sh/helm/v4/pkg/chart/v2/util" "helm.sh/helm/v4/pkg/engine" "helm.sh/helm/v4/pkg/kube" - "helm.sh/helm/v4/pkg/postrender" + "helm.sh/helm/v4/pkg/postrenderer" "helm.sh/helm/v4/pkg/registry" releaseutil "helm.sh/helm/v4/pkg/release/util" release "helm.sh/helm/v4/pkg/release/v1" @@ -176,7 +176,7 @@ func splitAndDeannotate(postrendered string) (map[string]string, error) { // TODO: As part of the refactor the duplicate code in cmd/helm/template.go should be removed // // This code has to do with writing files to disk. -func (cfg *Configuration) renderResources(ch *chart.Chart, values chartutil.Values, releaseName, outputDir string, subNotes, useReleaseName, includeCrds bool, pr postrender.PostRenderer, interactWithRemote, enableDNS, hideSecret bool) ([]*release.Hook, *bytes.Buffer, string, error) { +func (cfg *Configuration) renderResources(ch *chart.Chart, values chartutil.Values, releaseName, outputDir string, subNotes, useReleaseName, includeCrds bool, pr postrenderer.PostRenderer, interactWithRemote, enableDNS, hideSecret bool) ([]*release.Hook, *bytes.Buffer, string, error) { var hs []*release.Hook b := bytes.NewBuffer(nil) diff --git a/pkg/action/install.go b/pkg/action/install.go index 276009b5c..5ca499d64 100644 --- a/pkg/action/install.go +++ b/pkg/action/install.go @@ -48,7 +48,7 @@ import ( "helm.sh/helm/v4/pkg/getter" "helm.sh/helm/v4/pkg/kube" kubefake "helm.sh/helm/v4/pkg/kube/fake" - "helm.sh/helm/v4/pkg/postrender" + "helm.sh/helm/v4/pkg/postrenderer" "helm.sh/helm/v4/pkg/registry" releaseutil "helm.sh/helm/v4/pkg/release/util" release "helm.sh/helm/v4/pkg/release/v1" @@ -124,7 +124,7 @@ type Install struct { UseReleaseName bool // TakeOwnership will ignore the check for helm annotations and take ownership of the resources. TakeOwnership bool - PostRenderer postrender.PostRenderer + PostRenderer postrenderer.PostRenderer // Lock to control raceconditions when the process receives a SIGTERM Lock sync.Mutex } diff --git a/pkg/action/upgrade.go b/pkg/action/upgrade.go index 63646c12b..f7fbd490f 100644 --- a/pkg/action/upgrade.go +++ b/pkg/action/upgrade.go @@ -31,7 +31,7 @@ import ( chart "helm.sh/helm/v4/pkg/chart/v2" chartutil "helm.sh/helm/v4/pkg/chart/v2/util" "helm.sh/helm/v4/pkg/kube" - "helm.sh/helm/v4/pkg/postrender" + "helm.sh/helm/v4/pkg/postrenderer" "helm.sh/helm/v4/pkg/registry" releaseutil "helm.sh/helm/v4/pkg/release/util" release "helm.sh/helm/v4/pkg/release/v1" @@ -114,7 +114,7 @@ type Upgrade struct { // // If this is non-nil, then after templates are rendered, they will be sent to the // post renderer before sending to the Kubernetes API server. - PostRenderer postrender.PostRenderer + PostRenderer postrenderer.PostRenderer // DisableOpenAPIValidation controls whether OpenAPI validation is enforced. DisableOpenAPIValidation bool // Get missing dependencies diff --git a/pkg/cmd/flags.go b/pkg/cmd/flags.go index d11073e5f..98881c795 100644 --- a/pkg/cmd/flags.go +++ b/pkg/cmd/flags.go @@ -31,11 +31,12 @@ import ( "k8s.io/klog/v2" "helm.sh/helm/v4/pkg/action" + "helm.sh/helm/v4/pkg/cli" "helm.sh/helm/v4/pkg/cli/output" "helm.sh/helm/v4/pkg/cli/values" "helm.sh/helm/v4/pkg/helmpath" "helm.sh/helm/v4/pkg/kube" - "helm.sh/helm/v4/pkg/postrender" + "helm.sh/helm/v4/pkg/postrenderer" "helm.sh/helm/v4/pkg/repo" ) @@ -164,16 +165,18 @@ func (o *outputValue) Set(s string) error { return nil } -func bindPostRenderFlag(cmd *cobra.Command, varRef *postrender.PostRenderer) { - p := &postRendererOptions{varRef, "", []string{}} - cmd.Flags().Var(&postRendererString{p}, postRenderFlag, "the path to an executable to be used for post rendering. If it exists in $PATH, the binary will be used, otherwise it will try to look for the executable at the given path") +// TODO there is probably a better way to pass cobra settings than as a param +func bindPostRenderFlag(cmd *cobra.Command, varRef *postrenderer.PostRenderer, settings *cli.EnvSettings) { + p := &postRendererOptions{varRef, "", []string{}, settings} + cmd.Flags().Var(&postRendererString{p}, postRenderFlag, "the name of a postrenderer type plugin to be used for post rendering. If it exists, the plugin will be used") cmd.Flags().Var(&postRendererArgsSlice{p}, postRenderArgsFlag, "an argument to the post-renderer (can specify multiple)") } type postRendererOptions struct { - renderer *postrender.PostRenderer - binaryPath string + renderer *postrenderer.PostRenderer + pluginName string args []string + settings *cli.EnvSettings } type postRendererString struct { @@ -181,7 +184,7 @@ type postRendererString struct { } func (p *postRendererString) String() string { - return p.options.binaryPath + return p.options.pluginName } func (p *postRendererString) Type() string { @@ -192,11 +195,11 @@ func (p *postRendererString) Set(val string) error { if val == "" { return nil } - if p.options.binaryPath != "" { + if p.options.pluginName != "" { return fmt.Errorf("cannot specify --post-renderer flag more than once") } - p.options.binaryPath = val - pr, err := postrender.NewExec(p.options.binaryPath, p.options.args...) + p.options.pluginName = val + pr, err := postrenderer.NewPostRendererPlugin(p.options.settings, p.options.pluginName, p.options.args...) if err != nil { return err } @@ -221,11 +224,11 @@ func (p *postRendererArgsSlice) Set(val string) error { // a post-renderer defined by a user may accept empty arguments p.options.args = append(p.options.args, val) - if p.options.binaryPath == "" { + if p.options.pluginName == "" { return nil } // overwrite if already create PostRenderer by `post-renderer` flags - pr, err := postrender.NewExec(p.options.binaryPath, p.options.args...) + pr, err := postrenderer.NewPostRendererPlugin(p.options.settings, p.options.pluginName, p.options.args...) if err != nil { return err } diff --git a/pkg/cmd/flags_test.go b/pkg/cmd/flags_test.go index cbc2e6419..dce748a6b 100644 --- a/pkg/cmd/flags_test.go +++ b/pkg/cmd/flags_test.go @@ -101,20 +101,22 @@ func outputFlagCompletionTest(t *testing.T, cmdName string) { func TestPostRendererFlagSetOnce(t *testing.T) { cfg := action.Configuration{} client := action.NewInstall(&cfg) + settings.PluginsDirectory = "testdata/helmhome/helm/plugins" str := postRendererString{ options: &postRendererOptions{ renderer: &client.PostRenderer, + settings: settings, }, } - // Set the binary once - err := str.Set("echo") + // Set the plugin name once + err := str.Set("postrenderer-v1") require.NoError(t, err) - // Set the binary again to the same value is not ok - err = str.Set("echo") + // Set the plugin name again to the same value is not ok + err = str.Set("postrenderer-v1") require.Error(t, err) - // Set the binary again to a different value is not ok + // Set the plugin name again to a different value is not ok err = str.Set("cat") require.Error(t, err) } diff --git a/pkg/cmd/install.go b/pkg/cmd/install.go index 361d91e5f..c4e121c1f 100644 --- a/pkg/cmd/install.go +++ b/pkg/cmd/install.go @@ -179,7 +179,7 @@ func newInstallCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { f := cmd.Flags() f.BoolVar(&client.HideSecret, "hide-secret", false, "hide Kubernetes Secrets when also using the --dry-run flag") bindOutputFlag(cmd, &outfmt) - bindPostRenderFlag(cmd, &client.PostRenderer) + bindPostRenderFlag(cmd, &client.PostRenderer, settings) return cmd } diff --git a/pkg/cmd/template.go b/pkg/cmd/template.go index ac20a45b3..c93b5395b 100644 --- a/pkg/cmd/template.go +++ b/pkg/cmd/template.go @@ -203,7 +203,7 @@ func newTemplateCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { f.StringVar(&kubeVersion, "kube-version", "", "Kubernetes version used for Capabilities.KubeVersion") f.StringSliceVarP(&extraAPIs, "api-versions", "a", []string{}, "Kubernetes api versions used for Capabilities.APIVersions (multiple can be specified)") f.BoolVar(&client.UseReleaseName, "release-name", false, "use release name in the output-dir path.") - bindPostRenderFlag(cmd, &client.PostRenderer) + bindPostRenderFlag(cmd, &client.PostRenderer, settings) return cmd } diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/postrenderer-v1/plugin.yaml b/pkg/cmd/testdata/helmhome/helm/plugins/postrenderer-v1/plugin.yaml new file mode 100644 index 000000000..30f1599b4 --- /dev/null +++ b/pkg/cmd/testdata/helmhome/helm/plugins/postrenderer-v1/plugin.yaml @@ -0,0 +1,8 @@ +name: "postrenderer-v1" +version: "1.2.3" +type: postrenderer/v1 +apiVersion: v1 +runtime: subprocess +runtimeConfig: + platformCommand: + - command: "${HELM_PLUGIN_DIR}/sed-test.sh" diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/postrenderer-v1/sed-test.sh b/pkg/cmd/testdata/helmhome/helm/plugins/postrenderer-v1/sed-test.sh new file mode 100755 index 000000000..a016e398f --- /dev/null +++ b/pkg/cmd/testdata/helmhome/helm/plugins/postrenderer-v1/sed-test.sh @@ -0,0 +1,6 @@ +#!/bin/sh +if [ $# -eq 0 ]; then + sed s/FOOTEST/BARTEST/g <&0 +else + sed s/FOOTEST/"$*"/g <&0 +fi diff --git a/pkg/cmd/upgrade.go b/pkg/cmd/upgrade.go index 74061caf7..c8fbf8bd3 100644 --- a/pkg/cmd/upgrade.go +++ b/pkg/cmd/upgrade.go @@ -300,7 +300,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { addChartPathOptionsFlags(f, &client.ChartPathOptions) addValueOptionsFlags(f, valueOpts) bindOutputFlag(cmd, &outfmt) - bindPostRenderFlag(cmd, &client.PostRenderer) + bindPostRenderFlag(cmd, &client.PostRenderer, settings) AddWaitFlag(cmd, &client.WaitStrategy) cmd.MarkFlagsMutuallyExclusive("force-replace", "force-conflicts") cmd.MarkFlagsMutuallyExclusive("force", "force-conflicts") diff --git a/pkg/postrender/exec.go b/pkg/postrender/exec.go deleted file mode 100644 index 16d9c09ce..000000000 --- a/pkg/postrender/exec.go +++ /dev/null @@ -1,114 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package postrender - -import ( - "bytes" - "fmt" - "io" - "os/exec" - "path/filepath" -) - -type execRender struct { - binaryPath string - args []string -} - -// NewExec returns a PostRenderer implementation that calls the provided binary. -// It returns an error if the binary cannot be found. If the path does not -// contain any separators, it will search in $PATH, otherwise it will resolve -// any relative paths to a fully qualified path -func NewExec(binaryPath string, args ...string) (PostRenderer, error) { - fullPath, err := getFullPath(binaryPath) - if err != nil { - return nil, err - } - return &execRender{fullPath, args}, nil -} - -// Run the configured binary for the post render -func (p *execRender) Run(renderedManifests *bytes.Buffer) (*bytes.Buffer, error) { - cmd := exec.Command(p.binaryPath, p.args...) - stdin, err := cmd.StdinPipe() - if err != nil { - return nil, err - } - - var postRendered = &bytes.Buffer{} - var stderr = &bytes.Buffer{} - cmd.Stdout = postRendered - cmd.Stderr = stderr - - go func() { - defer stdin.Close() - io.Copy(stdin, renderedManifests) - }() - err = cmd.Run() - if err != nil { - return nil, fmt.Errorf("error while running command %s. error output:\n%s: %w", p.binaryPath, stderr.String(), err) - } - - // If the binary returned almost nothing, it's likely that it didn't - // successfully render anything - if len(bytes.TrimSpace(postRendered.Bytes())) == 0 { - return nil, fmt.Errorf("post-renderer %q produced empty output", p.binaryPath) - } - - return postRendered, nil -} - -// getFullPath returns the full filepath to the binary to execute. If the path -// does not contain any separators, it will search in $PATH, otherwise it will -// resolve any relative paths to a fully qualified path -func getFullPath(binaryPath string) (string, error) { - // NOTE(thomastaylor312): I am leaving this code commented out here. During - // the implementation of post-render, it was brought up that if we are - // relying on plugins, we should actually use the plugin system so it can - // properly handle multiple OSs. This will be a feature add in the future, - // so I left this code for reference. It can be deleted or reused once the - // feature is implemented - - // Manually check the plugin dir first - // if !strings.Contains(binaryPath, string(filepath.Separator)) { - // // First check the plugin dir - // pluginDir := helmpath.DataPath("plugins") // Default location - // // If location for plugins is explicitly set, check there - // if v, ok := os.LookupEnv("HELM_PLUGINS"); ok { - // pluginDir = v - // } - // // The plugins variable can actually contain multiple paths, so loop through those - // for _, p := range filepath.SplitList(pluginDir) { - // _, err := os.Stat(filepath.Join(p, binaryPath)) - // if err != nil && !errors.Is(err, fs.ErrNotExist) { - // return "", err - // } else if err == nil { - // binaryPath = filepath.Join(p, binaryPath) - // break - // } - // } - // } - - // Now check for the binary using the given path or check if it exists in - // the path and is executable - checkedPath, err := exec.LookPath(binaryPath) - if err != nil { - return "", fmt.Errorf("unable to find binary at %s: %w", binaryPath, err) - } - - return filepath.Abs(checkedPath) -} diff --git a/pkg/postrender/exec_test.go b/pkg/postrender/exec_test.go deleted file mode 100644 index a10ad2cc4..000000000 --- a/pkg/postrender/exec_test.go +++ /dev/null @@ -1,193 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package postrender - -import ( - "bytes" - "os" - "path/filepath" - "runtime" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const testingScript = `#!/bin/sh -if [ $# -eq 0 ]; then -sed s/FOOTEST/BARTEST/g <&0 -else -sed s/FOOTEST/"$*"/g <&0 -fi -` - -func TestGetFullPath(t *testing.T) { - is := assert.New(t) - t.Run("full path resolves correctly", func(t *testing.T) { - testpath := setupTestingScript(t) - - fullPath, err := getFullPath(testpath) - is.NoError(err) - is.Equal(testpath, fullPath) - }) - - t.Run("relative path resolves correctly", func(t *testing.T) { - testpath := setupTestingScript(t) - - currentDir, err := os.Getwd() - require.NoError(t, err) - relative, err := filepath.Rel(currentDir, testpath) - require.NoError(t, err) - fullPath, err := getFullPath(relative) - is.NoError(err) - is.Equal(testpath, fullPath) - }) - - t.Run("binary in PATH resolves correctly", func(t *testing.T) { - testpath := setupTestingScript(t) - - t.Setenv("PATH", filepath.Dir(testpath)) - - fullPath, err := getFullPath(filepath.Base(testpath)) - is.NoError(err) - is.Equal(testpath, fullPath) - }) - - // NOTE(thomastaylor312): See note in getFullPath for more details why this - // is here - - // t.Run("binary in plugin path resolves correctly", func(t *testing.T) { - // testpath, cleanup := setupTestingScript(t) - // defer cleanup() - - // realPath := os.Getenv("HELM_PLUGINS") - // os.Setenv("HELM_PLUGINS", filepath.Dir(testpath)) - // defer func() { - // os.Setenv("HELM_PLUGINS", realPath) - // }() - - // fullPath, err := getFullPath(filepath.Base(testpath)) - // is.NoError(err) - // is.Equal(testpath, fullPath) - // }) - - // t.Run("binary in multiple plugin paths resolves correctly", func(t *testing.T) { - // testpath, cleanup := setupTestingScript(t) - // defer cleanup() - - // realPath := os.Getenv("HELM_PLUGINS") - // os.Setenv("HELM_PLUGINS", filepath.Dir(testpath)+string(os.PathListSeparator)+"/another/dir") - // defer func() { - // os.Setenv("HELM_PLUGINS", realPath) - // }() - - // fullPath, err := getFullPath(filepath.Base(testpath)) - // is.NoError(err) - // is.Equal(testpath, fullPath) - // }) -} - -func TestExecRun(t *testing.T) { - if runtime.GOOS == "windows" { - // the actual Run test uses a basic sed example, so skip this test on windows - t.Skip("skipping on windows") - } - is := assert.New(t) - testpath := setupTestingScript(t) - - renderer, err := NewExec(testpath) - require.NoError(t, err) - - output, err := renderer.Run(bytes.NewBufferString("FOOTEST")) - is.NoError(err) - is.Contains(output.String(), "BARTEST") -} - -func TestExecRunWithNoOutput(t *testing.T) { - if runtime.GOOS == "windows" { - // the actual Run test uses a basic sed example, so skip this test on windows - t.Skip("skipping on windows") - } - is := assert.New(t) - testpath := setupTestingScript(t) - - renderer, err := NewExec(testpath) - require.NoError(t, err) - - _, err = renderer.Run(bytes.NewBufferString("")) - is.Error(err) -} - -func TestNewExecWithOneArgsRun(t *testing.T) { - if runtime.GOOS == "windows" { - // the actual Run test uses a basic sed example, so skip this test on windows - t.Skip("skipping on windows") - } - is := assert.New(t) - testpath := setupTestingScript(t) - - renderer, err := NewExec(testpath, "ARG1") - require.NoError(t, err) - - output, err := renderer.Run(bytes.NewBufferString("FOOTEST")) - is.NoError(err) - is.Contains(output.String(), "ARG1") -} - -func TestNewExecWithTwoArgsRun(t *testing.T) { - if runtime.GOOS == "windows" { - // the actual Run test uses a basic sed example, so skip this test on windows - t.Skip("skipping on windows") - } - is := assert.New(t) - testpath := setupTestingScript(t) - - renderer, err := NewExec(testpath, "ARG1", "ARG2") - require.NoError(t, err) - - output, err := renderer.Run(bytes.NewBufferString("FOOTEST")) - is.NoError(err) - is.Contains(output.String(), "ARG1 ARG2") -} - -func setupTestingScript(t *testing.T) (filepath string) { - t.Helper() - - tempdir := t.TempDir() - - f, err := os.CreateTemp(tempdir, "post-render-test.sh") - if err != nil { - t.Fatalf("unable to create tempfile for testing: %s", err) - } - - _, err = f.WriteString(testingScript) - if err != nil { - t.Fatalf("unable to write tempfile for testing: %s", err) - } - - err = f.Chmod(0o755) - if err != nil { - t.Fatalf("unable to make tempfile executable for testing: %s", err) - } - - err = f.Close() - if err != nil { - t.Fatalf("unable to close tempfile after writing: %s", err) - } - - return f.Name() -} diff --git a/pkg/postrenderer/postrenderer.go b/pkg/postrenderer/postrenderer.go new file mode 100644 index 000000000..2107cc465 --- /dev/null +++ b/pkg/postrenderer/postrenderer.go @@ -0,0 +1,85 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package postrenderer + +import ( + "bytes" + "context" + "fmt" + "path/filepath" + + "helm.sh/helm/v4/internal/plugin/schema" + + "helm.sh/helm/v4/internal/plugin" + "helm.sh/helm/v4/pkg/cli" +) + +// PostRenderer is an interface different plugin runtimes +// it may be also be used without the factory for custom post-renderers +type PostRenderer interface { + // Run expects a single buffer filled with Helm rendered manifests. It + // expects the modified results to be returned on a separate buffer or an + // error if there was an issue or failure while running the post render step + Run(renderedManifests *bytes.Buffer) (modifiedManifests *bytes.Buffer, err error) +} + +// NewPostRendererPlugin creates a PostRenderer that uses the plugin's Runtime +func NewPostRendererPlugin(settings *cli.EnvSettings, pluginName string, args ...string) (PostRenderer, error) { + descriptor := plugin.Descriptor{ + Name: pluginName, + Type: "postrenderer/v1", + } + p, err := plugin.FindPlugin(filepath.SplitList(settings.PluginsDirectory), descriptor) + if err != nil { + return nil, err + } + + return &postRendererPlugin{ + plugin: p, + args: args, + settings: settings, + }, nil +} + +// postRendererPlugin implements PostRenderer by delegating to the plugin's Runtime +type postRendererPlugin struct { + plugin plugin.Plugin + args []string + settings *cli.EnvSettings +} + +// Run implements PostRenderer by using the plugin's Runtime +func (r *postRendererPlugin) Run(renderedManifests *bytes.Buffer) (*bytes.Buffer, error) { + input := &plugin.Input{ + Message: schema.InputMessagePostRendererV1{ + ExtraArgs: r.args, + Manifests: renderedManifests, + Settings: r.settings, + }, + } + output, err := r.plugin.Invoke(context.Background(), input) + if err != nil { + return nil, fmt.Errorf("failed to invoke post-renderer plugin %q: %w", r.plugin.Metadata().Name, err) + } + + outputMessage := output.Message.(*schema.OutputMessagePostRendererV1) + + // If the binary returned almost nothing, it's likely that it didn't + // successfully render anything + if len(bytes.TrimSpace(outputMessage.Manifests.Bytes())) == 0 { + return nil, fmt.Errorf("post-renderer %q produced empty output", r.plugin.Metadata().Name) + } + + return outputMessage.Manifests, nil +} diff --git a/pkg/postrenderer/postrenderer_test.go b/pkg/postrenderer/postrenderer_test.go new file mode 100644 index 000000000..9addd481d --- /dev/null +++ b/pkg/postrenderer/postrenderer_test.go @@ -0,0 +1,89 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package postrenderer + +import ( + "bytes" + "path/filepath" + "runtime" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "helm.sh/helm/v4/internal/plugin" + "helm.sh/helm/v4/pkg/cli" +) + +func TestNewPostRenderPluginRunWithNoOutput(t *testing.T) { + if runtime.GOOS == "windows" { + // the actual Run test uses a basic sed example, so skip this test on windows + t.Skip("skipping on windows") + } + is := assert.New(t) + s := cli.New() + s.PluginsDirectory = "testdata/plugins" + name := "postrenderer-v1" + base := filepath.Join(s.PluginsDirectory, name) + plugin.SetupPluginEnv(s, name, base) + + renderer, err := NewPostRendererPlugin(s, name, "") + require.NoError(t, err) + + _, err = renderer.Run(bytes.NewBufferString("")) + is.Error(err) +} + +func TestNewPostRenderPluginWithOneArgsRun(t *testing.T) { + if runtime.GOOS == "windows" { + // the actual Run test uses a basic sed example, so skip this test on windows + t.Skip("skipping on windows") + } + is := assert.New(t) + s := cli.New() + s.PluginsDirectory = "testdata/plugins" + name := "postrenderer-v1" + base := filepath.Join(s.PluginsDirectory, name) + plugin.SetupPluginEnv(s, name, base) + + renderer, err := NewPostRendererPlugin(s, name, "ARG1") + require.NoError(t, err) + + output, err := renderer.Run(bytes.NewBufferString("FOOTEST")) + is.NoError(err) + is.Contains(output.String(), "ARG1") +} + +func TestNewPostRenderPluginWithTwoArgsRun(t *testing.T) { + if runtime.GOOS == "windows" { + // the actual Run test uses a basic sed example, so skip this test on windows + t.Skip("skipping on windows") + } + is := assert.New(t) + s := cli.New() + s.PluginsDirectory = "testdata/plugins" + name := "postrenderer-v1" + base := filepath.Join(s.PluginsDirectory, name) + plugin.SetupPluginEnv(s, name, base) + + renderer, err := NewPostRendererPlugin(s, name, "ARG1", "ARG2") + require.NoError(t, err) + + output, err := renderer.Run(bytes.NewBufferString("FOOTEST")) + is.NoError(err) + is.Contains(output.String(), "ARG1 ARG2") +} diff --git a/pkg/postrenderer/testdata/plugins/postrenderer-v1/plugin.yaml b/pkg/postrenderer/testdata/plugins/postrenderer-v1/plugin.yaml new file mode 100644 index 000000000..30f1599b4 --- /dev/null +++ b/pkg/postrenderer/testdata/plugins/postrenderer-v1/plugin.yaml @@ -0,0 +1,8 @@ +name: "postrenderer-v1" +version: "1.2.3" +type: postrenderer/v1 +apiVersion: v1 +runtime: subprocess +runtimeConfig: + platformCommand: + - command: "${HELM_PLUGIN_DIR}/sed-test.sh" diff --git a/pkg/postrenderer/testdata/plugins/postrenderer-v1/sed-test.sh b/pkg/postrenderer/testdata/plugins/postrenderer-v1/sed-test.sh new file mode 100755 index 000000000..a016e398f --- /dev/null +++ b/pkg/postrenderer/testdata/plugins/postrenderer-v1/sed-test.sh @@ -0,0 +1,6 @@ +#!/bin/sh +if [ $# -eq 0 ]; then + sed s/FOOTEST/BARTEST/g <&0 +else + sed s/FOOTEST/"$*"/g <&0 +fi From c35755a197e0509a654d44893149e08a438576e5 Mon Sep 17 00:00:00 2001 From: George Jenkins Date: Fri, 22 Aug 2025 12:26:33 -0700 Subject: [PATCH 73/88] Remove legacy Command/Hooks from v1 Subprocess (#23) Signed-off-by: George Jenkins --- .../plugin/installer/local_installer_test.go | 2 +- internal/plugin/loader_test.go | 9 +-- internal/plugin/metadata.go | 27 +++++++-- internal/plugin/metadata_legacy.go | 6 +- internal/plugin/metadata_test.go | 39 +++---------- internal/plugin/plugin_test.go | 2 +- internal/plugin/runtime_subprocess.go | 58 +++++-------------- internal/plugin/runtime_subprocess_getter.go | 12 ++-- internal/plugin/subprocess_commands_test.go | 8 +-- .../bad/duplicate-entries-v1/plugin.yaml | 13 +++-- .../testdata/plugdir/good/getter/plugin.yaml | 3 +- pkg/cmd/plugin_package_test.go | 2 +- pkg/cmd/plugin_test.go | 2 +- .../helm/plugins/fullenv/plugin.yaml | 6 +- .../helmhome/helm/plugins/args/plugin.yaml | 3 +- .../helmhome/helm/plugins/echo/plugin.yaml | 3 +- .../helmhome/helm/plugins/env/plugin.yaml | 6 +- .../helm/plugins/exitwith/plugin.yaml | 6 +- .../helmhome/helm/plugins/fullenv/plugin.yaml | 6 +- .../helm/plugins/postrenderer-v1/plugin.yaml | 9 ++- pkg/cmd/testdata/testplugin/plugin.yaml | 12 ++++ pkg/getter/plugingetter_test.go | 2 +- .../plugins/postrenderer-v1/plugin.yaml | 2 +- 23 files changed, 120 insertions(+), 118 deletions(-) create mode 100644 pkg/cmd/testdata/testplugin/plugin.yaml diff --git a/internal/plugin/installer/local_installer_test.go b/internal/plugin/installer/local_installer_test.go index 339028ef3..189108fdb 100644 --- a/internal/plugin/installer/local_installer_test.go +++ b/internal/plugin/installer/local_installer_test.go @@ -86,7 +86,7 @@ func TestLocalInstallerTarball(t *testing.T) { Body string Mode int64 }{ - {"test-plugin/plugin.yaml", "name: test-plugin\nversion: 1.0.0\nusage: test\ndescription: test\ncommand: echo", 0644}, + {"test-plugin/plugin.yaml", "name: test-plugin\napiVersion: v1\ntype: cli/v1\nruntime: subprocess\nversion: 1.0.0\nconfig:\n shortHelp: test\n longHelp: test\nruntimeConfig:\n platformCommand:\n - command: echo", 0644}, {"test-plugin/bin/test-plugin", "#!/bin/bash\necho test", 0755}, } diff --git a/internal/plugin/loader_test.go b/internal/plugin/loader_test.go index 63d930cbe..d214f7b6b 100644 --- a/internal/plugin/loader_test.go +++ b/internal/plugin/loader_test.go @@ -80,7 +80,7 @@ func TestLoadDir(t *testing.T) { IgnoreFlags: true, }, RuntimeConfig: &RuntimeConfigSubprocess{ - PlatformCommands: []PlatformCommand{ + PlatformCommand: []PlatformCommand{ {OperatingSystem: "linux", Architecture: "", Command: "sh", Args: []string{"-c", "${HELM_PLUGIN_DIR}/hello.sh"}}, {OperatingSystem: "windows", Architecture: "", Command: "pwsh", Args: []string{"-c", "${HELM_PLUGIN_DIR}/hello.ps1"}}, }, @@ -90,6 +90,7 @@ func TestLoadDir(t *testing.T) { {OperatingSystem: "windows", Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"installing...\""}}, }, }, + expandHookArgs: apiVersion == "legacy", }, } } @@ -150,8 +151,8 @@ func TestLoadDirGetter(t *testing.T) { RuntimeConfig: &RuntimeConfigSubprocess{ ProtocolCommands: []SubprocessProtocolCommand{ { - Protocols: []string{"myprotocol", "myprotocols"}, - Command: "echo getter", + Protocols: []string{"myprotocol", "myprotocols"}, + PlatformCommand: []PlatformCommand{{Command: "echo getter"}}, }, }, }, @@ -174,7 +175,7 @@ func TestPostRenderer(t *testing.T) { Runtime: "subprocess", Config: &ConfigPostrenderer{}, RuntimeConfig: &RuntimeConfigSubprocess{ - PlatformCommands: []PlatformCommand{ + PlatformCommand: []PlatformCommand{ { Command: "${HELM_PLUGIN_DIR}/sed-test.sh", }, diff --git a/internal/plugin/metadata.go b/internal/plugin/metadata.go index fbe7a16b8..1c4f02836 100644 --- a/internal/plugin/metadata.go +++ b/internal/plugin/metadata.go @@ -144,15 +144,32 @@ func buildLegacyRuntimeConfig(m MetadataLegacy) RuntimeConfig { protocolCommands = make([]SubprocessProtocolCommand, 0, len(m.Downloaders)) for _, d := range m.Downloaders { - protocolCommands = append(protocolCommands, SubprocessProtocolCommand(d)) + protocolCommands = append(protocolCommands, SubprocessProtocolCommand{ + Protocols: d.Protocols, + PlatformCommand: []PlatformCommand{{Command: d.Command}}, + }) + } + } + + platformCommand := m.PlatformCommand + if len(platformCommand) == 0 && len(m.Command) > 0 { + platformCommand = []PlatformCommand{{Command: m.Command}} + } + + platformHooks := m.PlatformHooks + expandHookArgs := true + if len(platformHooks) == 0 && len(m.Hooks) > 0 { + platformHooks = make(PlatformHooks, len(m.Hooks)) + for hookName, hookCommand := range m.Hooks { + platformHooks[hookName] = []PlatformCommand{{Command: "sh", Args: []string{"-c", hookCommand}}} + expandHookArgs = false } } return &RuntimeConfigSubprocess{ - PlatformCommands: m.PlatformCommands, - Command: m.Command, - PlatformHooks: m.PlatformHooks, - Hooks: m.Hooks, + PlatformCommand: platformCommand, + PlatformHooks: platformHooks, ProtocolCommands: protocolCommands, + expandHookArgs: expandHookArgs, } } diff --git a/internal/plugin/metadata_legacy.go b/internal/plugin/metadata_legacy.go index ce9c2f580..a7b245dc0 100644 --- a/internal/plugin/metadata_legacy.go +++ b/internal/plugin/metadata_legacy.go @@ -45,8 +45,8 @@ type MetadataLegacy struct { // Description is a long description shown in places like `helm help` Description string `yaml:"description"` - // PlatformCommands is the plugin command, with a platform selector and support for args. - PlatformCommands []PlatformCommand `yaml:"platformCommand"` + // PlatformCommand is the plugin command, with a platform selector and support for args. + PlatformCommand []PlatformCommand `yaml:"platformCommand"` // Command is the plugin command, as a single string. // DEPRECATED: Use PlatformCommand instead. Removed in subprocess/v1 plugins. @@ -73,7 +73,7 @@ func (m *MetadataLegacy) Validate() error { } m.Usage = sanitizeString(m.Usage) - if len(m.PlatformCommands) > 0 && len(m.Command) > 0 { + if len(m.PlatformCommand) > 0 && len(m.Command) > 0 { return fmt.Errorf("both platformCommand and command are set") } diff --git a/internal/plugin/metadata_test.go b/internal/plugin/metadata_test.go index 810020a67..28bc4cf51 100644 --- a/internal/plugin/metadata_test.go +++ b/internal/plugin/metadata_test.go @@ -25,44 +25,25 @@ func TestValidatePluginData(t *testing.T) { // A mock plugin with no commands mockNoCommand := mockSubprocessCLIPlugin(t, "foo") mockNoCommand.metadata.RuntimeConfig = &RuntimeConfigSubprocess{ - PlatformCommands: []PlatformCommand{}, - PlatformHooks: map[string][]PlatformCommand{}, + PlatformCommand: []PlatformCommand{}, + PlatformHooks: map[string][]PlatformCommand{}, } // A mock plugin with legacy commands mockLegacyCommand := mockSubprocessCLIPlugin(t, "foo") mockLegacyCommand.metadata.RuntimeConfig = &RuntimeConfigSubprocess{ - PlatformCommands: []PlatformCommand{}, - Command: "echo \"mock plugin\"", - PlatformHooks: map[string][]PlatformCommand{}, - Hooks: map[string]string{ - Install: "echo installing...", - }, - } - - // A mock plugin with a command also set - mockWithCommand := mockSubprocessCLIPlugin(t, "foo") - mockWithCommand.metadata.RuntimeConfig = &RuntimeConfigSubprocess{ - PlatformCommands: []PlatformCommand{ - {OperatingSystem: "linux", Architecture: "", Command: "sh", Args: []string{"-c", "echo \"mock plugin\""}}, - }, - Command: "echo \"mock plugin\"", - } - - // A mock plugin with a hooks also set - mockWithHooks := mockSubprocessCLIPlugin(t, "foo") - mockWithHooks.metadata.RuntimeConfig = &RuntimeConfigSubprocess{ - PlatformCommands: []PlatformCommand{ - {OperatingSystem: "linux", Architecture: "", Command: "sh", Args: []string{"-c", "echo \"mock plugin\""}}, + PlatformCommand: []PlatformCommand{ + { + Command: "echo \"mock plugin\"", + }, }, PlatformHooks: map[string][]PlatformCommand{ Install: { - {OperatingSystem: "linux", Architecture: "", Command: "sh", Args: []string{"-c", "echo \"installing...\""}}, + PlatformCommand{ + Command: "echo installing...", + }, }, }, - Hooks: map[string]string{ - Install: "echo installing...", - }, } for i, item := range []struct { @@ -78,8 +59,6 @@ func TestValidatePluginData(t *testing.T) { {false, mockSubprocessCLIPlugin(t, "foo\nbar"), "invalid name"}, // Test newline {true, mockNoCommand, ""}, // Test no command metadata works {true, mockLegacyCommand, ""}, // Test legacy command metadata works - {false, mockWithCommand, "runtime config validation failed: both platformCommand and command are set"}, // Test platformCommand and command both set fails - {false, mockWithHooks, "runtime config validation failed: both platformHooks and hooks are set"}, // Test platformHooks and hooks both set fails } { err := item.plug.Metadata().Validate() if item.pass && err != nil { diff --git a/internal/plugin/plugin_test.go b/internal/plugin/plugin_test.go index fbebecac4..bddabd136 100644 --- a/internal/plugin/plugin_test.go +++ b/internal/plugin/plugin_test.go @@ -23,7 +23,7 @@ func mockSubprocessCLIPlugin(t *testing.T, pluginName string) *SubprocessPluginR t.Helper() rc := RuntimeConfigSubprocess{ - PlatformCommands: []PlatformCommand{ + PlatformCommand: []PlatformCommand{ {OperatingSystem: "linux", Architecture: "", Command: "sh", Args: []string{"-c", "echo \"mock plugin\""}}, {OperatingSystem: "windows", Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"mock plugin\""}}, }, diff --git a/internal/plugin/runtime_subprocess.go b/internal/plugin/runtime_subprocess.go index e7faeed36..6961d1fa5 100644 --- a/internal/plugin/runtime_subprocess.go +++ b/internal/plugin/runtime_subprocess.go @@ -33,28 +33,25 @@ import ( type SubprocessProtocolCommand struct { // Protocols are the list of schemes from the charts URL. Protocols []string `yaml:"protocols"` - // Command is the executable path with which the plugin performs - // the actual download for the corresponding Protocols - Command string `yaml:"command"` + // PlatformCommand is the platform based command which the plugin performs + // to download for the corresponding getter Protocols. + PlatformCommand []PlatformCommand `yaml:"platformCommand"` } // RuntimeConfigSubprocess implements RuntimeConfig for RuntimeSubprocess type RuntimeConfigSubprocess struct { // PlatformCommand is a list containing a plugin command, with a platform selector and support for args. - PlatformCommands []PlatformCommand `yaml:"platformCommand"` - // Command is the plugin command, as a single string. - // DEPRECATED: Use PlatformCommand instead. Remove in Helm 4. - Command string `yaml:"command"` + PlatformCommand []PlatformCommand `yaml:"platformCommand"` // PlatformHooks are commands that will run on plugin events, with a platform selector and support for args. PlatformHooks PlatformHooks `yaml:"platformHooks"` - // Hooks are commands that will run on plugin events, as a single string. - // DEPRECATED: Use PlatformHooks instead. Remove in Helm 4. - Hooks Hooks `yaml:"hooks"` - // ProtocolCommands field is used if the plugin supply downloader mechanism - // for special protocols. - // (This is a compatibility hangover from the old plugin downloader mechanism, which was extended to support multiple - // protocols in a given plugin) + // ProtocolCommands allows the plugin to specify protocol specific commands + // + // Obsolete/deprecated: This is a compatibility hangover from the old plugin downloader mechanism, which was extended + // to support multiple protocols in a given plugin. The command supplied in PlatformCommand should implement protocol + // specific logic by inspecting the download URL ProtocolCommands []SubprocessProtocolCommand `yaml:"protocolCommands,omitempty"` + + expandHookArgs bool } var _ RuntimeConfig = (*RuntimeConfigSubprocess)(nil) @@ -62,12 +59,6 @@ var _ RuntimeConfig = (*RuntimeConfigSubprocess)(nil) func (r *RuntimeConfigSubprocess) GetType() string { return "subprocess" } func (r *RuntimeConfigSubprocess) Validate() error { - if len(r.PlatformCommands) > 0 && len(r.Command) > 0 { - return fmt.Errorf("both platformCommand and command are set") - } - if len(r.PlatformHooks) > 0 && len(r.Hooks) > 0 { - return fmt.Errorf("both platformHooks and hooks are set") - } return nil } @@ -138,25 +129,13 @@ func (r *SubprocessPluginRuntime) InvokeWithEnv(main string, argv []string, env } func (r *SubprocessPluginRuntime) InvokeHook(event string) error { - // Get hook commands for the event - var cmds []PlatformCommand - expandArgs := true - - cmds = r.RuntimeConfig.PlatformHooks[event] - if len(cmds) == 0 && len(r.RuntimeConfig.Hooks) > 0 { - cmd := r.RuntimeConfig.Hooks[event] - if len(cmd) > 0 { - cmds = []PlatformCommand{{Command: "sh", Args: []string{"-c", cmd}}} - expandArgs = false - } - } + cmds := r.RuntimeConfig.PlatformHooks[event] - // If no hook commands are defined, just return successfully if len(cmds) == 0 { return nil } - main, argv, err := PrepareCommands(cmds, expandArgs, []string{}) + main, argv, err := PrepareCommands(cmds, r.RuntimeConfig.expandHookArgs, []string{}) if err != nil { return err } @@ -200,10 +179,7 @@ func (r *SubprocessPluginRuntime) runCLI(input *Input) (*Output, error) { extraArgs := input.Message.(schema.InputMessageCLIV1).ExtraArgs - cmds := r.RuntimeConfig.PlatformCommands - if len(cmds) == 0 && len(r.RuntimeConfig.Command) > 0 { - cmds = []PlatformCommand{{Command: r.RuntimeConfig.Command}} - } + cmds := r.RuntimeConfig.PlatformCommand command, args, err := PrepareCommands(cmds, true, extraArgs) if err != nil { @@ -232,11 +208,7 @@ func (r *SubprocessPluginRuntime) runPostrenderer(input *Input) (*Output, error) // Setup plugin environment SetupPluginEnv(settings, r.metadata.Name, r.pluginDir) - cmds := r.RuntimeConfig.PlatformCommands - if len(cmds) == 0 && len(r.RuntimeConfig.Command) > 0 { - cmds = []PlatformCommand{{Command: r.RuntimeConfig.Command}} - } - + cmds := r.RuntimeConfig.PlatformCommand command, args, err := PrepareCommands(cmds, true, extraArgs) if err != nil { return nil, fmt.Errorf("failed to prepare plugin command: %w", err) diff --git a/internal/plugin/runtime_subprocess_getter.go b/internal/plugin/runtime_subprocess_getter.go index af2d0c572..d1884bc93 100644 --- a/internal/plugin/runtime_subprocess_getter.go +++ b/internal/plugin/runtime_subprocess_getter.go @@ -22,7 +22,6 @@ import ( "os/exec" "path/filepath" "slices" - "strings" "helm.sh/helm/v4/internal/plugin/schema" ) @@ -55,9 +54,12 @@ func (r *SubprocessPluginRuntime) runGetter(input *Input) (*Output, error) { return nil, fmt.Errorf("no downloader found for protocol %q", msg.Protocol) } - commands := strings.Split(d.Command, " ") - args := append( - commands[1:], + command, args, err := PrepareCommands(d.PlatformCommand, false, []string{}) + if err != nil { + return nil, fmt.Errorf("failed to prepare commands for protocol %q: %w", msg.Protocol, err) + } + args = append( + args, msg.Options.CertFile, msg.Options.KeyFile, msg.Options.CAFile, @@ -73,7 +75,7 @@ func (r *SubprocessPluginRuntime) runGetter(input *Input) (*Output, error) { // TODO should we pass along input.Stdout? buf := bytes.Buffer{} // subprocess getters are expected to write content to stdout - pluginCommand := filepath.Join(r.pluginDir, commands[0]) + pluginCommand := filepath.Join(r.pluginDir, command) prog := exec.Command( pluginCommand, args...) diff --git a/internal/plugin/subprocess_commands_test.go b/internal/plugin/subprocess_commands_test.go index 3cb9325ab..16446cdec 100644 --- a/internal/plugin/subprocess_commands_test.go +++ b/internal/plugin/subprocess_commands_test.go @@ -27,14 +27,14 @@ func TestPrepareCommand(t *testing.T) { cmdMain := "sh" cmdArgs := []string{"-c", "echo \"test\""} - platformCommands := []PlatformCommand{ + platformCommand := []PlatformCommand{ {OperatingSystem: "no-os", Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, {OperatingSystem: runtime.GOOS, Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, {OperatingSystem: runtime.GOOS, Architecture: runtime.GOARCH, Command: cmdMain, Args: cmdArgs}, } - cmd, args, err := PrepareCommands(platformCommands, true, []string{}) + cmd, args, err := PrepareCommands(platformCommand, true, []string{}) if err != nil { t.Fatal(err) } @@ -50,7 +50,7 @@ func TestPrepareCommandExtraArgs(t *testing.T) { cmdMain := "sh" cmdArgs := []string{"-c", "echo \"test\""} - platformCommands := []PlatformCommand{ + platformCommand := []PlatformCommand{ {OperatingSystem: "no-os", Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, {OperatingSystem: runtime.GOOS, Architecture: runtime.GOARCH, Command: cmdMain, Args: cmdArgs}, {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, @@ -91,7 +91,7 @@ func TestPrepareCommandExtraArgs(t *testing.T) { if tc.ignoreFlags { testExtraArgs = []string{} } - cmd, args, err := PrepareCommands(platformCommands, true, testExtraArgs) + cmd, args, err := PrepareCommands(platformCommand, true, testExtraArgs) if err != nil { t.Fatal(err) } diff --git a/internal/plugin/testdata/plugdir/bad/duplicate-entries-v1/plugin.yaml b/internal/plugin/testdata/plugdir/bad/duplicate-entries-v1/plugin.yaml index 030ae6aca..344141121 100644 --- a/internal/plugin/testdata/plugdir/bad/duplicate-entries-v1/plugin.yaml +++ b/internal/plugin/testdata/plugdir/bad/duplicate-entries-v1/plugin.yaml @@ -9,8 +9,11 @@ config: description ignoreFlags: true runtimeConfig: - command: "echo hello" - hooks: - install: "echo installing..." - hooks: - install: "echo installing something different" + platformCommand: + - command: "echo hello" + platformHooks: + install: + - command: "echo installing..." + platformHooks: + install: + - command: "echo installing something different" diff --git a/internal/plugin/testdata/plugdir/good/getter/plugin.yaml b/internal/plugin/testdata/plugdir/good/getter/plugin.yaml index cfe80fbdc..7bdee9bde 100644 --- a/internal/plugin/testdata/plugdir/good/getter/plugin.yaml +++ b/internal/plugin/testdata/plugdir/good/getter/plugin.yaml @@ -10,7 +10,8 @@ config: - "myprotocols" runtimeConfig: protocolCommands: - - command: "echo getter" + - platformCommand: + - command: "echo getter" protocols: - "myprotocol" - "myprotocols" diff --git a/pkg/cmd/plugin_package_test.go b/pkg/cmd/plugin_package_test.go index df6cdd849..7d97562f8 100644 --- a/pkg/cmd/plugin_package_test.go +++ b/pkg/cmd/plugin_package_test.go @@ -34,7 +34,7 @@ config: shortHelp: A test plugin longHelp: A test plugin for testing purposes runtimeConfig: - platformCommands: + platformCommand: - os: linux command: echo args: ["test"]` diff --git a/pkg/cmd/plugin_test.go b/pkg/cmd/plugin_test.go index b476b80d2..738a64740 100644 --- a/pkg/cmd/plugin_test.go +++ b/pkg/cmd/plugin_test.go @@ -122,7 +122,7 @@ func TestLoadCLIPlugins(t *testing.T) { require.Len(t, plugins, len(tests), "Expected %d plugins, got %d", len(tests), len(plugins)) - for i := 0; i < len(plugins); i++ { + for i := range plugins { out.Reset() tt := tests[i] pp := plugins[i] diff --git a/pkg/cmd/testdata/helm home with space/helm/plugins/fullenv/plugin.yaml b/pkg/cmd/testdata/helm home with space/helm/plugins/fullenv/plugin.yaml index 8b874da1d..a58544b03 100644 --- a/pkg/cmd/testdata/helm home with space/helm/plugins/fullenv/plugin.yaml +++ b/pkg/cmd/testdata/helm home with space/helm/plugins/fullenv/plugin.yaml @@ -1,10 +1,12 @@ +--- +apiVersion: v1 name: fullenv type: cli/v1 -apiVersion: v1 runtime: subprocess config: shortHelp: "show env vars" longHelp: "show all env vars" ignoreFlags: false runtimeConfig: - command: "$HELM_PLUGIN_DIR/fullenv.sh" + platformCommand: + - command: "$HELM_PLUGIN_DIR/fullenv.sh" diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/args/plugin.yaml b/pkg/cmd/testdata/helmhome/helm/plugins/args/plugin.yaml index 57312cbfa..4156e7f17 100644 --- a/pkg/cmd/testdata/helmhome/helm/plugins/args/plugin.yaml +++ b/pkg/cmd/testdata/helmhome/helm/plugins/args/plugin.yaml @@ -7,4 +7,5 @@ config: longHelp: "This echos args" ignoreFlags: false runtimeConfig: - command: "$HELM_PLUGIN_DIR/args.sh" + platformCommand: + - command: "$HELM_PLUGIN_DIR/args.sh" diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/echo/plugin.yaml b/pkg/cmd/testdata/helmhome/helm/plugins/echo/plugin.yaml index 544efa85e..a0a0b5255 100644 --- a/pkg/cmd/testdata/helmhome/helm/plugins/echo/plugin.yaml +++ b/pkg/cmd/testdata/helmhome/helm/plugins/echo/plugin.yaml @@ -7,4 +7,5 @@ config: longHelp: "This echos stuff" ignoreFlags: false runtimeConfig: - command: "echo hello" + platformCommand: + - command: "echo hello" diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/env/plugin.yaml b/pkg/cmd/testdata/helmhome/helm/plugins/env/plugin.yaml index d7a4c229c..fa933af93 100644 --- a/pkg/cmd/testdata/helmhome/helm/plugins/env/plugin.yaml +++ b/pkg/cmd/testdata/helmhome/helm/plugins/env/plugin.yaml @@ -1,10 +1,12 @@ +--- +apiVersion: v1 name: env type: cli/v1 -apiVersion: v1 runtime: subprocess config: shortHelp: "env stuff" longHelp: "show the env" ignoreFlags: false runtimeConfig: - command: "echo $HELM_PLUGIN_NAME" + platformCommand: + - command: "echo $HELM_PLUGIN_NAME" diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/exitwith/plugin.yaml b/pkg/cmd/testdata/helmhome/helm/plugins/exitwith/plugin.yaml index 06a350f83..ba9508255 100644 --- a/pkg/cmd/testdata/helmhome/helm/plugins/exitwith/plugin.yaml +++ b/pkg/cmd/testdata/helmhome/helm/plugins/exitwith/plugin.yaml @@ -1,10 +1,12 @@ +--- +apiVersion: v1 name: exitwith type: cli/v1 -apiVersion: v1 runtime: subprocess config: shortHelp: "exitwith code" longHelp: "This exits with the specified exit code" ignoreFlags: false runtimeConfig: - command: "$HELM_PLUGIN_DIR/exitwith.sh" + platformCommand: + - command: "$HELM_PLUGIN_DIR/exitwith.sh" diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/fullenv/plugin.yaml b/pkg/cmd/testdata/helmhome/helm/plugins/fullenv/plugin.yaml index 8b874da1d..a58544b03 100644 --- a/pkg/cmd/testdata/helmhome/helm/plugins/fullenv/plugin.yaml +++ b/pkg/cmd/testdata/helmhome/helm/plugins/fullenv/plugin.yaml @@ -1,10 +1,12 @@ +--- +apiVersion: v1 name: fullenv type: cli/v1 -apiVersion: v1 runtime: subprocess config: shortHelp: "show env vars" longHelp: "show all env vars" ignoreFlags: false runtimeConfig: - command: "$HELM_PLUGIN_DIR/fullenv.sh" + platformCommand: + - command: "$HELM_PLUGIN_DIR/fullenv.sh" diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/postrenderer-v1/plugin.yaml b/pkg/cmd/testdata/helmhome/helm/plugins/postrenderer-v1/plugin.yaml index 30f1599b4..d4cd57a13 100644 --- a/pkg/cmd/testdata/helmhome/helm/plugins/postrenderer-v1/plugin.yaml +++ b/pkg/cmd/testdata/helmhome/helm/plugins/postrenderer-v1/plugin.yaml @@ -1,8 +1,13 @@ +--- +apiVersion: v1 name: "postrenderer-v1" version: "1.2.3" type: postrenderer/v1 -apiVersion: v1 runtime: subprocess +config: + shortHelp: "echo test" + longHelp: "This echos test" + ignoreFlags: false runtimeConfig: platformCommand: - - command: "${HELM_PLUGIN_DIR}/sed-test.sh" + - command: "${HELM_PLUGIN_DIR}/sed-test.sh" diff --git a/pkg/cmd/testdata/testplugin/plugin.yaml b/pkg/cmd/testdata/testplugin/plugin.yaml new file mode 100644 index 000000000..3ee5d04f6 --- /dev/null +++ b/pkg/cmd/testdata/testplugin/plugin.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +name: testplugin +type: cli/v1 +runtime: subprocess +config: + shortHelp: "echo test" + longHelp: "This echos test" + ignoreFlags: false +runtimeConfig: + platformCommand: + - command: "echo test" diff --git a/pkg/getter/plugingetter_test.go b/pkg/getter/plugingetter_test.go index 8e0619635..23cfc80f8 100644 --- a/pkg/getter/plugingetter_test.go +++ b/pkg/getter/plugingetter_test.go @@ -112,7 +112,7 @@ func (t *testPlugin) Metadata() plugin.Metadata { Runtime: "subprocess", Config: &plugin.ConfigCLI{}, RuntimeConfig: &plugin.RuntimeConfigSubprocess{ - PlatformCommands: []plugin.PlatformCommand{ + PlatformCommand: []plugin.PlatformCommand{ { Command: "echo fake-plugin", }, diff --git a/pkg/postrenderer/testdata/plugins/postrenderer-v1/plugin.yaml b/pkg/postrenderer/testdata/plugins/postrenderer-v1/plugin.yaml index 30f1599b4..423a5191e 100644 --- a/pkg/postrenderer/testdata/plugins/postrenderer-v1/plugin.yaml +++ b/pkg/postrenderer/testdata/plugins/postrenderer-v1/plugin.yaml @@ -5,4 +5,4 @@ apiVersion: v1 runtime: subprocess runtimeConfig: platformCommand: - - command: "${HELM_PLUGIN_DIR}/sed-test.sh" + - command: "${HELM_PLUGIN_DIR}/sed-test.sh" From 89aca09e5e40674f63c1d01cfcd69bfac0dc219d Mon Sep 17 00:00:00 2001 From: tzchenxixi Date: Mon, 1 Sep 2025 18:30:27 +0800 Subject: [PATCH 74/88] chore: fix function name Signed-off-by: tzchenxixi --- pkg/kube/client.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index c41165490..26ba7abfc 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -214,7 +214,7 @@ type clientCreateOptions struct { type ClientCreateOption func(*clientCreateOptions) error -// ClientUpdateOptionServerSideApply enables performing object apply server-side +// ClientCreateOptionServerSideApply enables performing object apply server-side // see: https://kubernetes.io/docs/reference/using-api/server-side-apply/ // // `forceConflicts` forces conflicts to be resolved (may be when serverSideApply enabled only) From 5595c0d00587892beb03505dd99ae3ec31ceefa9 Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Mon, 1 Sep 2025 17:48:35 +0200 Subject: [PATCH 75/88] Prevent failing helm push on ghcr.io using standard GET auth token flow MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix GHCR auth by not forcing OAuth2 POST but also reset ForceAttemptOAuth2 after login. - Remove ForceAttemptOAuth2 in NewClient and only enable during Login ping and always restore to false. - Aligns with OCI Distribution auth (token via GET), avoiding GHCR 405 on POST /token. - Some tests Failures logs: ```sh ~/p/lifen/test/helm-f/quicktest ❯ ../../../helm/bin/helm push quicktest-0.1.0.tgz oci://ghcr.io/benoittgt/helm-charts --debug level=DEBUG msg=HEAD id=0 url=https://ghcr.io/v2/benoittgt/helm-charts/quicktest/manifests/sha256:af359fd8fb968ec1097afbd6e8e1dac9ee130861082e54dc2340d0c019407873 header=" \"Accept\": \"application/vnd.docker.distribution.manifest.v2+json, application/vnd.docker.distribution.manifest.list.v2+json, application/vnd.oci.image.manifest.v1+json, application/vnd.oci.image.index.v1+json, application/vnd.oci.artifact.manifest.v1+json\"\n \"User-Agent\": \"Helm/4.0+unreleased\"" level=DEBUG msg=Resp id=0 status="401 Unauthorized" header=" \"Www-Authenticate\": \"Bearer realm=\\\"https://ghcr.io/token\\\",service=\\\"ghcr.io\\\",scope=\\\"repository:benoittgt/helm-charts/quicktest:pull\\\"\"\n \"Date\": \"Mon, 01 Sep 2025 13:56:35 GMT\"\n \"Content-Length\": \"73\"\n \"X-Github-Request-Id\": \"DC73:115F:2B40F2C:2BAB567:68B5A613\"\n \"Content-Type\": \"application/json\"" body=" Response body is empty" level=DEBUG msg=POST id=1 url=https://ghcr.io/token header=" \"Content-Type\": \"application/x-www-form-urlencoded\"\n \"User-Agent\": \"Helm/4.0+unreleased\"" level=DEBUG msg=Resp id=1 status="405 Method Not Allowed" header=" \"Docker-Distribution-Api-Version\": \"registry/2.0\"\n \"Strict-Transport-Security\": \"max-age=63072000; includeSubDomains; preload\"\n \"Date\": \"Mon, 01 Sep 2025 13:56:35 GMT\"\n \"Content-Length\": \"78\"\n \"X-Github-Request-Id\": \"DC73:115F:2B40F75:2BAB5C2:68B5A613\"\n \"Content-Type\": \"application/json\"" body="{\"errors\":[{\"code\":\"UNSUPPORTED\",\"message\":\"The operation is unsupported.\"}]}\n" Error: failed to perform "Exists" on destination: HEAD "https://ghcr.io/v2/benoittgt/helm-charts/quicktest/manifests/sha256:af359fd8fb968ec1097afbd6e8e1dac9ee130861082e54dc2340d0c019407873": POST "https://ghcr.io/token": response status code 405: unsupported: The operation is unsupported. ``` Signed-off-by: Benoit Tigeot --- pkg/registry/client.go | 4 +-- pkg/registry/client_test.go | 69 +++++++++++++++++++++++++++++++++++++ 2 files changed, 71 insertions(+), 2 deletions(-) diff --git a/pkg/registry/client.go b/pkg/registry/client.go index 7ba26ac5c..95250f8da 100644 --- a/pkg/registry/client.go +++ b/pkg/registry/client.go @@ -137,8 +137,6 @@ func NewClient(options ...ClientOption) (*Client, error) { if client.enableCache { authorizer.Cache = auth.NewCache() } - - authorizer.ForceAttemptOAuth2 = true client.authorizer = &authorizer } @@ -251,6 +249,8 @@ func (c *Client) Login(host string, options ...LoginOption) error { return fmt.Errorf("authenticating to %q: %w", host, err) } } + // Always restore to false after probing, to avoid forcing POST to token endpoints like GHCR. + c.authorizer.ForceAttemptOAuth2 = false key := credentials.ServerAddressFromRegistry(host) key = credentials.ServerAddressFromHostname(key) diff --git a/pkg/registry/client_test.go b/pkg/registry/client_test.go index 2ffd691c2..6ae32e342 100644 --- a/pkg/registry/client_test.go +++ b/pkg/registry/client_test.go @@ -18,6 +18,10 @@ package registry import ( "io" + "net/http" + "net/http/httptest" + "path/filepath" + "strings" "testing" ocispec "github.com/opencontainers/image-spec/specs-go/v1" @@ -51,3 +55,68 @@ func TestTagManifestTransformsReferences(t *testing.T) { _, err = memStore.Resolve(ctx, refWithPlus) require.Error(t, err, "Should NOT find the reference with the original +") } + +// Verifies that Login always restores ForceAttemptOAuth2 to false on success. +func TestLogin_ResetsForceAttemptOAuth2_OnSuccess(t *testing.T) { + t.Parallel() + + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/v2/" { + // Accept either HEAD or GET + w.WriteHeader(http.StatusOK) + return + } + http.NotFound(w, r) + })) + defer srv.Close() + + host := strings.TrimPrefix(srv.URL, "http://") + + credFile := filepath.Join(t.TempDir(), "config.json") + c, err := NewClient( + ClientOptWriter(io.Discard), + ClientOptCredentialsFile(credFile), + ) + if err != nil { + t.Fatalf("NewClient error: %v", err) + } + + if c.authorizer == nil || c.authorizer.ForceAttemptOAuth2 { + t.Fatalf("expected ForceAttemptOAuth2 default to be false") + } + + // Call Login with plain HTTP against our test server + if err := c.Login(host, LoginOptPlainText(true), LoginOptBasicAuth("u", "p")); err != nil { + t.Fatalf("Login error: %v", err) + } + + if c.authorizer.ForceAttemptOAuth2 { + t.Errorf("ForceAttemptOAuth2 should be false after successful Login") + } +} + +// Verifies that Login restores ForceAttemptOAuth2 to false even when ping fails. +func TestLogin_ResetsForceAttemptOAuth2_OnFailure(t *testing.T) { + t.Parallel() + + // Start and immediately close, so connections will fail + srv := httptest.NewServer(http.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) {})) + host := strings.TrimPrefix(srv.URL, "http://") + srv.Close() + + credFile := filepath.Join(t.TempDir(), "config.json") + c, err := NewClient( + ClientOptWriter(io.Discard), + ClientOptCredentialsFile(credFile), + ) + if err != nil { + t.Fatalf("NewClient error: %v", err) + } + + // Invoke Login, expect an error but ForceAttemptOAuth2 must end false + _ = c.Login(host, LoginOptPlainText(true), LoginOptBasicAuth("u", "p")) + + if c.authorizer.ForceAttemptOAuth2 { + t.Errorf("ForceAttemptOAuth2 should be false after failed Login") + } +} From d99d73254261d851d5f2fd8dad45b8881d1b9638 Mon Sep 17 00:00:00 2001 From: George Jenkins Date: Mon, 1 Sep 2025 09:39:38 -0700 Subject: [PATCH 76/88] fix: Adjust PostRenderer plugin output to value Signed-off-by: George Jenkins --- internal/plugin/runtime_subprocess.go | 2 +- pkg/postrenderer/postrenderer.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/plugin/runtime_subprocess.go b/internal/plugin/runtime_subprocess.go index e7faeed36..55362b972 100644 --- a/internal/plugin/runtime_subprocess.go +++ b/internal/plugin/runtime_subprocess.go @@ -270,7 +270,7 @@ func (r *SubprocessPluginRuntime) runPostrenderer(input *Input) (*Output, error) } return &Output{ - Message: &schema.OutputMessagePostRendererV1{ + Message: schema.OutputMessagePostRendererV1{ Manifests: postRendered, }, }, nil diff --git a/pkg/postrenderer/postrenderer.go b/pkg/postrenderer/postrenderer.go index 2107cc465..ed6699c32 100644 --- a/pkg/postrenderer/postrenderer.go +++ b/pkg/postrenderer/postrenderer.go @@ -73,7 +73,7 @@ func (r *postRendererPlugin) Run(renderedManifests *bytes.Buffer) (*bytes.Buffer return nil, fmt.Errorf("failed to invoke post-renderer plugin %q: %w", r.plugin.Metadata().Name, err) } - outputMessage := output.Message.(*schema.OutputMessagePostRendererV1) + outputMessage := output.Message.(schema.OutputMessagePostRendererV1) // If the binary returned almost nothing, it's likely that it didn't // successfully render anything From ee37c00c33e96c9c8747560c5e967e496547b33b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Sep 2025 16:57:28 +0000 Subject: [PATCH 77/88] chore(deps): bump sigs.k8s.io/controller-runtime from 0.21.0 to 0.22.0 Bumps [sigs.k8s.io/controller-runtime](https://github.com/kubernetes-sigs/controller-runtime) from 0.21.0 to 0.22.0. - [Release notes](https://github.com/kubernetes-sigs/controller-runtime/releases) - [Changelog](https://github.com/kubernetes-sigs/controller-runtime/blob/main/RELEASE.md) - [Commits](https://github.com/kubernetes-sigs/controller-runtime/compare/v0.21.0...v0.22.0) --- updated-dependencies: - dependency-name: sigs.k8s.io/controller-runtime dependency-version: 0.22.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 3c9992dce..ab8797e6f 100644 --- a/go.mod +++ b/go.mod @@ -48,7 +48,7 @@ require ( k8s.io/klog/v2 v2.130.1 k8s.io/kubectl v0.34.0 oras.land/oras-go/v2 v2.6.0 - sigs.k8s.io/controller-runtime v0.21.0 + sigs.k8s.io/controller-runtime v0.22.0 sigs.k8s.io/kustomize/kyaml v0.20.1 sigs.k8s.io/yaml v1.6.0 ) diff --git a/go.sum b/go.sum index d9e7c3d3d..076b6e5bd 100644 --- a/go.sum +++ b/go.sum @@ -532,8 +532,8 @@ k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8 k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc= oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o= -sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8= -sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM= +sigs.k8s.io/controller-runtime v0.22.0 h1:mTOfibb8Hxwpx3xEkR56i7xSjB+nH4hZG37SrlCY5e0= +sigs.k8s.io/controller-runtime v0.22.0/go.mod h1:FwiwRjkRPbiN+zp2QRp7wlTCzbUXxZ/D4OzuQUDwBHY= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I= From 5926ec83dd4760d02f316652a801f0812af39d87 Mon Sep 17 00:00:00 2001 From: George Jenkins Date: Fri, 22 Aug 2025 15:06:09 -0700 Subject: [PATCH 78/88] Remove SetupPluginEnv Signed-off-by: George Jenkins --- cmd/helm/helm.go | 8 +- cmd/helm/helm_test.go | 20 +-- internal/plugin/error.go | 4 +- internal/plugin/plugin_test.go | 2 + internal/plugin/runtime.go | 9 ++ internal/plugin/runtime_extismv1.go | 2 +- internal/plugin/runtime_subprocess.go | 117 ++++++++++-------- internal/plugin/runtime_subprocess_getter.go | 34 ++--- internal/plugin/runtime_subprocess_test.go | 92 ++++++++------ internal/plugin/runtime_test.go | 37 ++++++ internal/plugin/schema/postrenderer.go | 5 +- internal/plugin/subprocess_commands.go | 6 +- internal/plugin/subprocess_commands_test.go | 31 +++-- pkg/cmd/load_plugins.go | 16 +-- pkg/cmd/plugin.go | 1 - pkg/cmd/plugin_test.go | 86 +++++++------ pkg/cmd/root.go | 5 + .../helmhome/helm/plugins/env/plugin-name.sh | 3 + .../helmhome/helm/plugins/env/plugin.yaml | 2 +- .../helmhome/helm/plugins/fullenv/fullenv.sh | 12 +- pkg/postrenderer/postrenderer.go | 1 - pkg/postrenderer/postrenderer_test.go | 8 -- 22 files changed, 296 insertions(+), 205 deletions(-) create mode 100755 pkg/cmd/testdata/helmhome/helm/plugins/env/plugin-name.sh diff --git a/cmd/helm/helm.go b/cmd/helm/helm.go index 05e7e7ba2..66d342500 100644 --- a/cmd/helm/helm.go +++ b/cmd/helm/helm.go @@ -41,11 +41,9 @@ func main() { } if err := cmd.Execute(); err != nil { - switch e := err.(type) { - case helmcmd.PluginError: - os.Exit(e.Code) - default: - os.Exit(1) + if cerr, ok := err.(helmcmd.CommandError); ok { + os.Exit(cerr.ExitCode) } + os.Exit(1) } } diff --git a/cmd/helm/helm_test.go b/cmd/helm/helm_test.go index 5431daad0..0458e8037 100644 --- a/cmd/helm/helm_test.go +++ b/cmd/helm/helm_test.go @@ -22,11 +22,13 @@ import ( "os/exec" "runtime" "testing" + + "github.com/stretchr/testify/assert" ) -func TestPluginExitCode(t *testing.T) { +func TestCliPluginExitCode(t *testing.T) { if os.Getenv("RUN_MAIN_FOR_TESTING") == "1" { - os.Args = []string{"helm", "exitwith", "2"} + os.Args = []string{"helm", "exitwith", "43"} // We DO call helm's main() here. So this looks like a normal `helm` process. main() @@ -43,7 +45,7 @@ func TestPluginExitCode(t *testing.T) { // So that the second run is able to run main() and this first run can verify the exit status returned by that. // // This technique originates from https://talks.golang.org/2014/testing.slide#23. - cmd := exec.Command(os.Args[0], "-test.run=TestPluginExitCode") + cmd := exec.Command(os.Args[0], "-test.run=TestCliPluginExitCode") cmd.Env = append( os.Environ(), "RUN_MAIN_FOR_TESTING=1", @@ -57,23 +59,21 @@ func TestPluginExitCode(t *testing.T) { cmd.Stdout = stdout cmd.Stderr = stderr err := cmd.Run() - exiterr, ok := err.(*exec.ExitError) + exiterr, ok := err.(*exec.ExitError) if !ok { - t.Fatalf("Unexpected error returned by os.Exit: %T", err) + t.Fatalf("Unexpected error type returned by os.Exit: %T", err) } - if stdout.String() != "" { - t.Errorf("Expected no write to stdout: Got %q", stdout.String()) - } + assert.Empty(t, stdout.String()) expectedStderr := "Error: plugin \"exitwith\" exited with error\n" if stderr.String() != expectedStderr { t.Errorf("Expected %q written to stderr: Got %q", expectedStderr, stderr.String()) } - if exiterr.ExitCode() != 2 { - t.Errorf("Expected exit code 2: Got %d", exiterr.ExitCode()) + if exiterr.ExitCode() != 43 { + t.Errorf("Expected exit code 43: Got %d", exiterr.ExitCode()) } } } diff --git a/internal/plugin/error.go b/internal/plugin/error.go index 5ace680cb..212460cea 100644 --- a/internal/plugin/error.go +++ b/internal/plugin/error.go @@ -19,8 +19,8 @@ package plugin // - subprocess plugin: child process exit code // - extism plugin: wasm function return code type InvokeExecError struct { - Err error // Underlying error - Code int // Exeit code from plugin code execution + ExitCode int // Exit code from plugin code execution + Err error // Underlying error } // Error implements the error interface diff --git a/internal/plugin/plugin_test.go b/internal/plugin/plugin_test.go index bddabd136..a4de8e52a 100644 --- a/internal/plugin/plugin_test.go +++ b/internal/plugin/plugin_test.go @@ -24,11 +24,13 @@ func mockSubprocessCLIPlugin(t *testing.T, pluginName string) *SubprocessPluginR rc := RuntimeConfigSubprocess{ PlatformCommand: []PlatformCommand{ + {OperatingSystem: "darwin", Architecture: "", Command: "sh", Args: []string{"-c", "echo \"mock plugin\""}}, {OperatingSystem: "linux", Architecture: "", Command: "sh", Args: []string{"-c", "echo \"mock plugin\""}}, {OperatingSystem: "windows", Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"mock plugin\""}}, }, PlatformHooks: map[string][]PlatformCommand{ Install: { + {OperatingSystem: "darwin", Architecture: "", Command: "sh", Args: []string{"-c", "echo \"installing...\""}}, {OperatingSystem: "linux", Architecture: "", Command: "sh", Args: []string{"-c", "echo \"installing...\""}}, {OperatingSystem: "windows", Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"installing...\""}}, }, diff --git a/internal/plugin/runtime.go b/internal/plugin/runtime.go index a9c01a380..b2ff0b7ca 100644 --- a/internal/plugin/runtime.go +++ b/internal/plugin/runtime.go @@ -16,6 +16,7 @@ limitations under the License. package plugin import ( + "fmt" "strings" "go.yaml.in/yaml/v3" @@ -73,3 +74,11 @@ func parseEnv(env []string) map[string]string { } return result } + +func formatEnv(env map[string]string) []string { + result := make([]string, 0, len(env)) + for key, value := range env { + result = append(result, fmt.Sprintf("%s=%s", key, value)) + } + return result +} diff --git a/internal/plugin/runtime_extismv1.go b/internal/plugin/runtime_extismv1.go index c0122d08f..b5cc79a6f 100644 --- a/internal/plugin/runtime_extismv1.go +++ b/internal/plugin/runtime_extismv1.go @@ -196,7 +196,7 @@ func (p *ExtismV1PluginRuntime) Invoke(ctx context.Context, input *Input) (*Outp if exitCode != 0 { return nil, &InvokeExecError{ - Code: int(exitCode), + ExitCode: int(exitCode), } } diff --git a/internal/plugin/runtime_subprocess.go b/internal/plugin/runtime_subprocess.go index a1a698679..5e6676a00 100644 --- a/internal/plugin/runtime_subprocess.go +++ b/internal/plugin/runtime_subprocess.go @@ -21,12 +21,12 @@ import ( "fmt" "io" "log/slog" + "maps" "os" "os/exec" - "syscall" + "slices" "helm.sh/helm/v4/internal/plugin/schema" - "helm.sh/helm/v4/pkg/cli" ) // SubprocessProtocolCommand maps a given protocol to the getter command used to retrieve artifacts for that protcol @@ -62,7 +62,9 @@ func (r *RuntimeConfigSubprocess) Validate() error { return nil } -type RuntimeSubprocess struct{} +type RuntimeSubprocess struct { + EnvVars map[string]string +} var _ Runtime = (*RuntimeSubprocess)(nil) @@ -72,6 +74,7 @@ func (r *RuntimeSubprocess) CreatePlugin(pluginDir string, metadata *Metadata) ( metadata: *metadata, pluginDir: pluginDir, RuntimeConfig: *(metadata.RuntimeConfig.(*RuntimeConfigSubprocess)), + EnvVars: maps.Clone(r.EnvVars), }, nil } @@ -80,6 +83,7 @@ type SubprocessPluginRuntime struct { metadata Metadata pluginDir string RuntimeConfig RuntimeConfigSubprocess + EnvVars map[string]string } var _ Plugin = (*SubprocessPluginRuntime)(nil) @@ -109,22 +113,22 @@ func (r *SubprocessPluginRuntime) Invoke(_ context.Context, input *Input) (*Outp // This method allows execution with different command/args than the plugin's default func (r *SubprocessPluginRuntime) InvokeWithEnv(main string, argv []string, env []string, stdin io.Reader, stdout, stderr io.Writer) error { mainCmdExp := os.ExpandEnv(main) - prog := exec.Command(mainCmdExp, argv...) - prog.Env = env - prog.Stdin = stdin - prog.Stdout = stdout - prog.Stderr = stderr + cmd := exec.Command(mainCmdExp, argv...) + cmd.Env = slices.Clone(os.Environ()) + cmd.Env = append( + cmd.Env, + fmt.Sprintf("HELM_PLUGIN_NAME=%s", r.metadata.Name), + fmt.Sprintf("HELM_PLUGIN_DIR=%s", r.pluginDir)) + cmd.Env = append(cmd.Env, env...) + + cmd.Stdin = stdin + cmd.Stdout = stdout + cmd.Stderr = stderr - if err := prog.Run(); err != nil { - if eerr, ok := err.(*exec.ExitError); ok { - os.Stderr.Write(eerr.Stderr) - status := eerr.Sys().(syscall.WaitStatus) - return &InvokeExecError{ - Err: fmt.Errorf("plugin %q exited with error", r.metadata.Name), - Code: status.ExitStatus(), - } - } + if err := executeCmd(cmd, r.metadata.Name); err != nil { + return err } + return nil } @@ -135,15 +139,23 @@ func (r *SubprocessPluginRuntime) InvokeHook(event string) error { return nil } - main, argv, err := PrepareCommands(cmds, r.RuntimeConfig.expandHookArgs, []string{}) + env := parseEnv(os.Environ()) + maps.Insert(env, maps.All(r.EnvVars)) + env["HELM_PLUGIN_NAME"] = r.metadata.Name + env["HELM_PLUGIN_DIR"] = r.pluginDir + + main, argv, err := PrepareCommands(cmds, r.RuntimeConfig.expandHookArgs, []string{}, env) if err != nil { return err } - prog := exec.Command(main, argv...) - prog.Stdout, prog.Stderr = os.Stdout, os.Stderr + cmd := exec.Command(main, argv...) + cmd.Env = formatEnv(env) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr - if err := prog.Run(); err != nil { + slog.Debug("executing plugin hook command", slog.String("pluginName", r.metadata.Name), slog.String("command", cmd.String())) + if err := cmd.Run(); err != nil { if eerr, ok := err.(*exec.ExitError); ok { os.Stderr.Write(eerr.Stderr) return fmt.Errorf("plugin %s hook for %q exited with error", event, r.metadata.Name) @@ -159,10 +171,15 @@ func (r *SubprocessPluginRuntime) InvokeHook(event string) error { func executeCmd(prog *exec.Cmd, pluginName string) error { if err := prog.Run(); err != nil { if eerr, ok := err.(*exec.ExitError); ok { - os.Stderr.Write(eerr.Stderr) + slog.Debug( + "plugin execution failed", + slog.String("pluginName", pluginName), + slog.String("error", err.Error()), + slog.Int("exitCode", eerr.ExitCode()), + slog.String("stderr", string(bytes.TrimSpace(eerr.Stderr)))) return &InvokeExecError{ - Err: fmt.Errorf("plugin %q exited with error", pluginName), - Code: eerr.ExitCode(), + Err: fmt.Errorf("plugin %q exited with error", pluginName), + ExitCode: eerr.ExitCode(), } } @@ -181,14 +198,27 @@ func (r *SubprocessPluginRuntime) runCLI(input *Input) (*Output, error) { cmds := r.RuntimeConfig.PlatformCommand - command, args, err := PrepareCommands(cmds, true, extraArgs) + env := parseEnv(os.Environ()) + maps.Insert(env, maps.All(r.EnvVars)) + maps.Insert(env, maps.All(parseEnv(input.Env))) + env["HELM_PLUGIN_NAME"] = r.metadata.Name + env["HELM_PLUGIN_DIR"] = r.pluginDir + + command, args, err := PrepareCommands(cmds, true, extraArgs, env) if err != nil { return nil, fmt.Errorf("failed to prepare plugin command: %w", err) } - err2 := r.InvokeWithEnv(command, args, input.Env, input.Stdin, input.Stdout, input.Stderr) - if err2 != nil { - return nil, err2 + cmd := exec.Command(command, args...) + cmd.Env = formatEnv(env) + + cmd.Stdin = input.Stdin + cmd.Stdout = input.Stdout + cmd.Stderr = input.Stderr + + slog.Debug("executing plugin command", slog.String("pluginName", r.metadata.Name), slog.String("command", cmd.String())) + if err := executeCmd(cmd, r.metadata.Name); err != nil { + return nil, err } return &Output{ @@ -201,20 +231,19 @@ func (r *SubprocessPluginRuntime) runPostrenderer(input *Input) (*Output, error) return nil, fmt.Errorf("plugin %q input message does not implement InputMessagePostRendererV1", r.metadata.Name) } - msg := input.Message.(schema.InputMessagePostRendererV1) - extraArgs := msg.ExtraArgs - settings := msg.Settings - - // Setup plugin environment - SetupPluginEnv(settings, r.metadata.Name, r.pluginDir) + env := parseEnv(os.Environ()) + maps.Insert(env, maps.All(r.EnvVars)) + maps.Insert(env, maps.All(parseEnv(input.Env))) + env["HELM_PLUGIN_NAME"] = r.metadata.Name + env["HELM_PLUGIN_DIR"] = r.pluginDir + msg := input.Message.(schema.InputMessagePostRendererV1) cmds := r.RuntimeConfig.PlatformCommand - command, args, err := PrepareCommands(cmds, true, extraArgs) + command, args, err := PrepareCommands(cmds, true, msg.ExtraArgs, env) if err != nil { return nil, fmt.Errorf("failed to prepare plugin command: %w", err) } - // TODO de-duplicate code here by calling RuntimeSubprocess.invokeWithEnv() cmd := exec.Command( command, args...) @@ -232,12 +261,12 @@ func (r *SubprocessPluginRuntime) runPostrenderer(input *Input) (*Output, error) postRendered := &bytes.Buffer{} stderr := &bytes.Buffer{} - //cmd.Env = pluginExec.env + cmd.Env = formatEnv(env) cmd.Stdout = postRendered cmd.Stderr = stderr + slog.Debug("executing plugin command", slog.String("pluginName", r.metadata.Name), slog.String("command", cmd.String())) if err := executeCmd(cmd, r.metadata.Name); err != nil { - slog.Info("plugin execution failed", slog.String("stderr", stderr.String())) return nil, err } @@ -247,15 +276,3 @@ func (r *SubprocessPluginRuntime) runPostrenderer(input *Input) (*Output, error) }, }, nil } - -// SetupPluginEnv prepares os.Env for plugins. It operates on os.Env because -// the plugin subsystem itself needs access to the environment variables -// created here. -func SetupPluginEnv(settings *cli.EnvSettings, name, base string) { // TODO: remove - env := settings.EnvVars() - env["HELM_PLUGIN_NAME"] = name - env["HELM_PLUGIN_DIR"] = base - for key, val := range env { - os.Setenv(key, val) - } -} diff --git a/internal/plugin/runtime_subprocess_getter.go b/internal/plugin/runtime_subprocess_getter.go index d1884bc93..6a41b149f 100644 --- a/internal/plugin/runtime_subprocess_getter.go +++ b/internal/plugin/runtime_subprocess_getter.go @@ -18,6 +18,8 @@ package plugin import ( "bytes" "fmt" + "log/slog" + "maps" "os" "os/exec" "path/filepath" @@ -54,10 +56,20 @@ func (r *SubprocessPluginRuntime) runGetter(input *Input) (*Output, error) { return nil, fmt.Errorf("no downloader found for protocol %q", msg.Protocol) } - command, args, err := PrepareCommands(d.PlatformCommand, false, []string{}) + env := parseEnv(os.Environ()) + maps.Insert(env, maps.All(r.EnvVars)) + maps.Insert(env, maps.All(parseEnv(input.Env))) + env["HELM_PLUGIN_NAME"] = r.metadata.Name + env["HELM_PLUGIN_DIR"] = r.pluginDir + env["HELM_PLUGIN_USERNAME"] = msg.Options.Username + env["HELM_PLUGIN_PASSWORD"] = msg.Options.Password + env["HELM_PLUGIN_PASS_CREDENTIALS_ALL"] = fmt.Sprintf("%t", msg.Options.PassCredentialsAll) + + command, args, err := PrepareCommands(d.PlatformCommand, false, []string{}, env) if err != nil { return nil, fmt.Errorf("failed to prepare commands for protocol %q: %w", msg.Protocol, err) } + args = append( args, msg.Options.CertFile, @@ -65,24 +77,18 @@ func (r *SubprocessPluginRuntime) runGetter(input *Input) (*Output, error) { msg.Options.CAFile, msg.Href) - // TODO should we append to input.Env too? - env := append( - os.Environ(), - fmt.Sprintf("HELM_PLUGIN_USERNAME=%s", msg.Options.Username), - fmt.Sprintf("HELM_PLUGIN_PASSWORD=%s", msg.Options.Password), - fmt.Sprintf("HELM_PLUGIN_PASS_CREDENTIALS_ALL=%t", msg.Options.PassCredentialsAll)) - - // TODO should we pass along input.Stdout? buf := bytes.Buffer{} // subprocess getters are expected to write content to stdout pluginCommand := filepath.Join(r.pluginDir, command) - prog := exec.Command( + cmd := exec.Command( pluginCommand, args...) - prog.Env = env - prog.Stdout = &buf - prog.Stderr = os.Stderr - if err := executeCmd(prog, r.metadata.Name); err != nil { + cmd.Env = formatEnv(env) + cmd.Stdout = &buf + cmd.Stderr = os.Stderr + + slog.Debug("executing plugin command", slog.String("pluginName", r.metadata.Name), slog.String("command", cmd.String())) + if err := executeCmd(cmd, r.metadata.Name); err != nil { return nil, err } diff --git a/internal/plugin/runtime_subprocess_test.go b/internal/plugin/runtime_subprocess_test.go index 9d932816d..dab372027 100644 --- a/internal/plugin/runtime_subprocess_test.go +++ b/internal/plugin/runtime_subprocess_test.go @@ -16,49 +16,69 @@ limitations under the License. package plugin import ( + "fmt" "os" "path/filepath" "testing" - "helm.sh/helm/v4/pkg/cli" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.yaml.in/yaml/v3" + + "helm.sh/helm/v4/internal/plugin/schema" ) -func TestSetupEnv(t *testing.T) { - name := "pequod" - base := filepath.Join("testdata/helmhome/helm/plugins", name) - - s := cli.New() - s.PluginsDirectory = "testdata/helmhome/helm/plugins" - - SetupPluginEnv(s, name, base) - for _, tt := range []struct { - name, expect string - }{ - {"HELM_PLUGIN_NAME", name}, - {"HELM_PLUGIN_DIR", base}, - } { - if got := os.Getenv(tt.name); got != tt.expect { - t.Errorf("Expected $%s=%q, got %q", tt.name, tt.expect, got) - } +func mockSubprocessCLIPluginErrorExit(t *testing.T, pluginName string, exitCode uint8) *SubprocessPluginRuntime { + t.Helper() + + rc := RuntimeConfigSubprocess{ + PlatformCommand: []PlatformCommand{ + {Command: "sh", Args: []string{"-c", fmt.Sprintf("echo \"mock plugin $@\"; exit %d", exitCode)}}, + }, + } + + pluginDir := t.TempDir() + + md := Metadata{ + Name: pluginName, + Version: "v0.1.2", + Type: "cli/v1", + APIVersion: "v1", + Runtime: "subprocess", + Config: &ConfigCLI{ + Usage: "Mock plugin", + ShortHelp: "Mock plugin", + LongHelp: "Mock plugin for testing", + IgnoreFlags: false, + }, + RuntimeConfig: &rc, } -} -func TestSetupEnvWithSpace(t *testing.T) { - name := "sureshdsk" - base := filepath.Join("testdata/helm home/helm/plugins", name) - - s := cli.New() - s.PluginsDirectory = "testdata/helm home/helm/plugins" - - SetupPluginEnv(s, name, base) - for _, tt := range []struct { - name, expect string - }{ - {"HELM_PLUGIN_NAME", name}, - {"HELM_PLUGIN_DIR", base}, - } { - if got := os.Getenv(tt.name); got != tt.expect { - t.Errorf("Expected $%s=%q, got %q", tt.name, tt.expect, got) - } + data, err := yaml.Marshal(md) + require.NoError(t, err) + os.WriteFile(filepath.Join(pluginDir, "plugin.yaml"), data, 0o644) + + return &SubprocessPluginRuntime{ + metadata: md, + pluginDir: pluginDir, + RuntimeConfig: rc, } } + +func TestSubprocessPluginRuntime(t *testing.T) { + p := mockSubprocessCLIPluginErrorExit(t, "foo", 56) + + output, err := p.Invoke(t.Context(), &Input{ + Message: schema.InputMessageCLIV1{ + ExtraArgs: []string{"arg1", "arg2"}, + //Env: []string{"FOO=bar"}, + }, + }) + + require.Error(t, err) + ieerr, ok := err.(*InvokeExecError) + require.True(t, ok, "expected InvokeExecError, got %T", err) + assert.Equal(t, 56, ieerr.ExitCode) + + assert.Nil(t, output) +} diff --git a/internal/plugin/runtime_test.go b/internal/plugin/runtime_test.go index 8b72648b2..f8fe481c1 100644 --- a/internal/plugin/runtime_test.go +++ b/internal/plugin/runtime_test.go @@ -61,3 +61,40 @@ func TestParseEnv(t *testing.T) { }) } } + +func TestFormatEnv(t *testing.T) { + type testCase struct { + env map[string]string + expected []string + } + + testCases := map[string]testCase{ + "empty": { + env: map[string]string{}, + expected: []string{}, + }, + "single": { + env: map[string]string{"KEY": "value"}, + expected: []string{"KEY=value"}, + }, + "multiple": { + env: map[string]string{"KEY1": "value1", "KEY2": "value2"}, + expected: []string{"KEY1=value1", "KEY2=value2"}, + }, + "empty_key": { + env: map[string]string{"": "value1", "KEY2": "value2"}, + expected: []string{"=value1", "KEY2=value2"}, + }, + "empty_value": { + env: map[string]string{"KEY1": "value1", "KEY2": "", "KEY3": "value3"}, + expected: []string{"KEY1=value1", "KEY2=", "KEY3=value3"}, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + result := formatEnv(tc.env) + assert.ElementsMatch(t, tc.expected, result) + }) + } +} diff --git a/internal/plugin/schema/postrenderer.go b/internal/plugin/schema/postrenderer.go index 0f0c09369..82fd3059f 100644 --- a/internal/plugin/schema/postrenderer.go +++ b/internal/plugin/schema/postrenderer.go @@ -18,16 +18,13 @@ package schema import ( "bytes" - - "helm.sh/helm/v4/pkg/cli" ) // InputMessagePostRendererV1 implements Input.Message type InputMessagePostRendererV1 struct { Manifests *bytes.Buffer `json:"manifests"` // from CLI --post-renderer-args - ExtraArgs []string `json:"extraArgs"` - Settings *cli.EnvSettings `json:"settings"` + ExtraArgs []string `json:"extraArgs"` } type OutputMessagePostRendererV1 struct { diff --git a/internal/plugin/subprocess_commands.go b/internal/plugin/subprocess_commands.go index d979f98e3..e21ec2bab 100644 --- a/internal/plugin/subprocess_commands.go +++ b/internal/plugin/subprocess_commands.go @@ -77,13 +77,15 @@ func getPlatformCommand(cmds []PlatformCommand) ([]string, []string) { // returns the main command and an args array. // // The result is suitable to pass to exec.Command. -func PrepareCommands(cmds []PlatformCommand, expandArgs bool, extraArgs []string) (string, []string, error) { +func PrepareCommands(cmds []PlatformCommand, expandArgs bool, extraArgs []string, env map[string]string) (string, []string, error) { cmdParts, args := getPlatformCommand(cmds) if len(cmdParts) == 0 || cmdParts[0] == "" { return "", nil, fmt.Errorf("no plugin command is applicable") } - main := os.ExpandEnv(cmdParts[0]) + main := os.Expand(cmdParts[0], func(key string) string { + return env[key] + }) baseArgs := []string{} if len(cmdParts) > 1 { for _, cmdPart := range cmdParts[1:] { diff --git a/internal/plugin/subprocess_commands_test.go b/internal/plugin/subprocess_commands_test.go index 16446cdec..c1eba7a55 100644 --- a/internal/plugin/subprocess_commands_test.go +++ b/internal/plugin/subprocess_commands_test.go @@ -34,7 +34,8 @@ func TestPrepareCommand(t *testing.T) { {OperatingSystem: runtime.GOOS, Architecture: runtime.GOARCH, Command: cmdMain, Args: cmdArgs}, } - cmd, args, err := PrepareCommands(platformCommand, true, []string{}) + env := map[string]string{} + cmd, args, err := PrepareCommands(platformCommand, true, []string{}, env) if err != nil { t.Fatal(err) } @@ -91,7 +92,9 @@ func TestPrepareCommandExtraArgs(t *testing.T) { if tc.ignoreFlags { testExtraArgs = []string{} } - cmd, args, err := PrepareCommands(platformCommand, true, testExtraArgs) + + env := map[string]string{} + cmd, args, err := PrepareCommands(platformCommand, true, testExtraArgs, env) if err != nil { t.Fatal(err) } @@ -112,7 +115,8 @@ func TestPrepareCommands(t *testing.T) { {OperatingSystem: runtime.GOOS, Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, } - cmd, args, err := PrepareCommands(cmds, true, []string{}) + env := map[string]string{} + cmd, args, err := PrepareCommands(cmds, true, []string{}, env) if err != nil { t.Fatal(err) } @@ -138,7 +142,8 @@ func TestPrepareCommandsExtraArgs(t *testing.T) { expectedArgs := append(cmdArgs, extraArgs...) - cmd, args, err := PrepareCommands(cmds, true, extraArgs) + env := map[string]string{} + cmd, args, err := PrepareCommands(cmds, true, extraArgs, env) if err != nil { t.Fatal(err) } @@ -160,7 +165,8 @@ func TestPrepareCommandsNoArch(t *testing.T) { {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, } - cmd, args, err := PrepareCommands(cmds, true, []string{}) + env := map[string]string{} + cmd, args, err := PrepareCommands(cmds, true, []string{}, env) if err != nil { t.Fatal(err) } @@ -182,7 +188,8 @@ func TestPrepareCommandsNoOsNoArch(t *testing.T) { {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}}, } - cmd, args, err := PrepareCommands(cmds, true, []string{}) + env := map[string]string{} + cmd, args, err := PrepareCommands(cmds, true, []string{}, env) if err != nil { t.Fatal(err) } @@ -201,7 +208,8 @@ func TestPrepareCommandsNoMatch(t *testing.T) { {OperatingSystem: "no-os", Architecture: runtime.GOARCH, Command: "sh", Args: []string{"-c", "echo \"test\""}}, } - if _, _, err := PrepareCommands(cmds, true, []string{}); err == nil { + env := map[string]string{} + if _, _, err := PrepareCommands(cmds, true, []string{}, env); err == nil { t.Fatalf("Expected error to be returned") } } @@ -209,7 +217,8 @@ func TestPrepareCommandsNoMatch(t *testing.T) { func TestPrepareCommandsNoCommands(t *testing.T) { cmds := []PlatformCommand{} - if _, _, err := PrepareCommands(cmds, true, []string{}); err == nil { + env := map[string]string{} + if _, _, err := PrepareCommands(cmds, true, []string{}, env); err == nil { t.Fatalf("Expected error to be returned") } } @@ -224,7 +233,8 @@ func TestPrepareCommandsExpand(t *testing.T) { expectedArgs := []string{"-c", "echo \"test\""} - cmd, args, err := PrepareCommands(cmds, true, []string{}) + env := map[string]string{} + cmd, args, err := PrepareCommands(cmds, true, []string{}, env) if err != nil { t.Fatal(err) } @@ -244,7 +254,8 @@ func TestPrepareCommandsNoExpand(t *testing.T) { {OperatingSystem: "", Architecture: "", Command: cmdMain, Args: cmdArgs}, } - cmd, args, err := PrepareCommands(cmds, false, []string{}) + env := map[string]string{} + cmd, args, err := PrepareCommands(cmds, false, []string{}, env) if err != nil { t.Fatal(err) } diff --git a/pkg/cmd/load_plugins.go b/pkg/cmd/load_plugins.go index 5057c1033..75cfdc3cf 100644 --- a/pkg/cmd/load_plugins.go +++ b/pkg/cmd/load_plugins.go @@ -46,11 +46,6 @@ const ( pluginDynamicCompletionExecutable = "plugin.complete" ) -type PluginError struct { - error - Code int -} - // loadCLIPlugins loads CLI plugins into the command list. // // This follows a different pattern than the other commands because it has @@ -101,8 +96,6 @@ func loadCLIPlugins(baseCmd *cobra.Command, out io.Writer) { if err != nil { return err } - // Setup plugin environment - plugin.SetupPluginEnv(settings, plug.Metadata().Name, plug.Dir()) // For CLI plugin types runtime, set extra args and settings extraArgs := []string{} @@ -128,12 +121,10 @@ func loadCLIPlugins(baseCmd *cobra.Command, out io.Writer) { Stderr: os.Stderr, } _, err = plug.Invoke(context.Background(), input) - // TODO do we want to keep execErr here? if execErr, ok := err.(*plugin.InvokeExecError); ok { - // TODO can we replace cmd.PluginError with plugin.Error? - return PluginError{ - error: execErr.Err, - Code: execErr.Code, + return CommandError{ + error: execErr.Err, + ExitCode: execErr.ExitCode, } } return err @@ -369,7 +360,6 @@ func pluginDynamicComp(plug plugin.Plugin, cmd *cobra.Command, args []string, to argv = append(argv, u...) argv = append(argv, toComplete) } - plugin.SetupPluginEnv(settings, plug.Metadata().Name, plug.Dir()) cobra.CompDebugln(fmt.Sprintf("calling %s with args %v", main, argv), settings.Debug) buf := new(bytes.Buffer) diff --git a/pkg/cmd/plugin.go b/pkg/cmd/plugin.go index 393e9672c..ba904ef5f 100644 --- a/pkg/cmd/plugin.go +++ b/pkg/cmd/plugin.go @@ -48,7 +48,6 @@ func newPluginCmd(out io.Writer) *cobra.Command { func runHook(p plugin.Plugin, event string) error { pluginHook, ok := p.(plugin.PluginHook) if ok { - plugin.SetupPluginEnv(settings, p.Metadata().Name, p.Dir()) return pluginHook.InvokeHook(event) } diff --git a/pkg/cmd/plugin_test.go b/pkg/cmd/plugin_test.go index 738a64740..f7a418569 100644 --- a/pkg/cmd/plugin_test.go +++ b/pkg/cmd/plugin_test.go @@ -17,6 +17,7 @@ package cmd import ( "bytes" + "fmt" "os" "runtime" "strings" @@ -93,14 +94,14 @@ func TestLoadCLIPlugins(t *testing.T) { ) loadCLIPlugins(&cmd, &out) - envs := strings.Join([]string{ - "fullenv", - "testdata/helmhome/helm/plugins/fullenv", - "testdata/helmhome/helm/plugins", - "testdata/helmhome/helm/repositories.yaml", - "testdata/helmhome/helm/repository", - os.Args[0], - }, "\n") + fullEnvOutput := strings.Join([]string{ + "HELM_PLUGIN_NAME=fullenv", + "HELM_PLUGIN_DIR=testdata/helmhome/helm/plugins/fullenv", + "HELM_PLUGINS=testdata/helmhome/helm/plugins", + "HELM_REPOSITORY_CONFIG=testdata/helmhome/helm/repositories.yaml", + "HELM_REPOSITORY_CACHE=testdata/helmhome/helm/repository", + fmt.Sprintf("HELM_BIN=%s", os.Args[0]), + }, "\n") + "\n" // Test that the YAML file was correctly converted to a command. tests := []struct { @@ -113,47 +114,50 @@ func TestLoadCLIPlugins(t *testing.T) { }{ {"args", "echo args", "This echos args", "-a -b -c\n", []string{"-a", "-b", "-c"}, 0}, {"echo", "echo stuff", "This echos stuff", "hello\n", []string{}, 0}, - {"env", "env stuff", "show the env", "env\n", []string{}, 0}, + {"env", "env stuff", "show the env", "HELM_PLUGIN_NAME=env\n", []string{}, 0}, {"exitwith", "exitwith code", "This exits with the specified exit code", "", []string{"2"}, 2}, - {"fullenv", "show env vars", "show all env vars", envs + "\n", []string{}, 0}, + {"fullenv", "show env vars", "show all env vars", fullEnvOutput, []string{}, 0}, } - plugins := cmd.Commands() + pluginCmds := cmd.Commands() - require.Len(t, plugins, len(tests), "Expected %d plugins, got %d", len(tests), len(plugins)) + require.Len(t, pluginCmds, len(tests), "Expected %d plugins, got %d", len(tests), len(pluginCmds)) - for i := range plugins { + for i := range pluginCmds { out.Reset() tt := tests[i] - pp := plugins[i] - if pp.Use != tt.use { - t.Errorf("%d: Expected Use=%q, got %q", i, tt.use, pp.Use) - } - if pp.Short != tt.short { - t.Errorf("%d: Expected Use=%q, got %q", i, tt.short, pp.Short) - } - if pp.Long != tt.long { - t.Errorf("%d: Expected Use=%q, got %q", i, tt.long, pp.Long) - } + pluginCmd := pluginCmds[i] + t.Run(fmt.Sprintf("%s-%d", pluginCmd.Name(), i), func(t *testing.T) { + out.Reset() + if pluginCmd.Use != tt.use { + t.Errorf("%d: Expected Use=%q, got %q", i, tt.use, pluginCmd.Use) + } + if pluginCmd.Short != tt.short { + t.Errorf("%d: Expected Use=%q, got %q", i, tt.short, pluginCmd.Short) + } + if pluginCmd.Long != tt.long { + t.Errorf("%d: Expected Use=%q, got %q", i, tt.long, pluginCmd.Long) + } - // Currently, plugins assume a Linux subsystem. Skip the execution - // tests until this is fixed - if runtime.GOOS != "windows" { - if err := pp.RunE(pp, tt.args); err != nil { - if tt.code > 0 { - perr, ok := err.(PluginError) - if !ok { - t.Errorf("Expected %s to return pluginError: got %v(%T)", tt.use, err, err) + // Currently, plugins assume a Linux subsystem. Skip the execution + // tests until this is fixed + if runtime.GOOS != "windows" { + if err := pluginCmd.RunE(pluginCmd, tt.args); err != nil { + if tt.code > 0 { + cerr, ok := err.(CommandError) + if !ok { + t.Errorf("Expected %s to return pluginError: got %v(%T)", tt.use, err, err) + } + if cerr.ExitCode != tt.code { + t.Errorf("Expected %s to return %d: got %d", tt.use, tt.code, cerr.ExitCode) + } + } else { + t.Errorf("Error running %s: %+v", tt.use, err) } - if perr.Code != tt.code { - t.Errorf("Expected %s to return %d: got %d", tt.use, tt.code, perr.Code) - } - } else { - t.Errorf("Error running %s: %+v", tt.use, err) } + assert.Equal(t, tt.expect, out.String(), "expected output for %q", tt.use) } - assert.Equal(t, tt.expect, out.String(), "expected output for %s", tt.use) - } + }) } } @@ -214,12 +218,12 @@ func TestLoadPluginsWithSpace(t *testing.T) { if runtime.GOOS != "windows" { if err := pp.RunE(pp, tt.args); err != nil { if tt.code > 0 { - perr, ok := err.(PluginError) + cerr, ok := err.(CommandError) if !ok { t.Errorf("Expected %s to return pluginError: got %v(%T)", tt.use, err, err) } - if perr.Code != tt.code { - t.Errorf("Expected %s to return %d: got %d", tt.use, tt.code, perr.Code) + if cerr.ExitCode != tt.code { + t.Errorf("Expected %s to return %d: got %d", tt.use, tt.code, cerr.ExitCode) } } else { t.Errorf("Error running %s: %+v", tt.use, err) diff --git a/pkg/cmd/root.go b/pkg/cmd/root.go index 836df834d..2b2f7b750 100644 --- a/pkg/cmd/root.go +++ b/pkg/cmd/root.go @@ -460,3 +460,8 @@ func newRegistryClientWithTLS( } return registryClient, nil } + +type CommandError struct { + error + ExitCode int +} diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/env/plugin-name.sh b/pkg/cmd/testdata/helmhome/helm/plugins/env/plugin-name.sh new file mode 100755 index 000000000..9e823ac13 --- /dev/null +++ b/pkg/cmd/testdata/helmhome/helm/plugins/env/plugin-name.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env sh + +echo HELM_PLUGIN_NAME=${HELM_PLUGIN_NAME} diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/env/plugin.yaml b/pkg/cmd/testdata/helmhome/helm/plugins/env/plugin.yaml index fa933af93..78a0a23fb 100644 --- a/pkg/cmd/testdata/helmhome/helm/plugins/env/plugin.yaml +++ b/pkg/cmd/testdata/helmhome/helm/plugins/env/plugin.yaml @@ -9,4 +9,4 @@ config: ignoreFlags: false runtimeConfig: platformCommand: - - command: "echo $HELM_PLUGIN_NAME" + - command: ${HELM_PLUGIN_DIR}/plugin-name.sh diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/fullenv/fullenv.sh b/pkg/cmd/testdata/helmhome/helm/plugins/fullenv/fullenv.sh index 2efad9b3c..cc0c64a6a 100755 --- a/pkg/cmd/testdata/helmhome/helm/plugins/fullenv/fullenv.sh +++ b/pkg/cmd/testdata/helmhome/helm/plugins/fullenv/fullenv.sh @@ -1,7 +1,7 @@ #!/bin/sh -echo $HELM_PLUGIN_NAME -echo $HELM_PLUGIN_DIR -echo $HELM_PLUGINS -echo $HELM_REPOSITORY_CONFIG -echo $HELM_REPOSITORY_CACHE -echo $HELM_BIN +echo HELM_PLUGIN_NAME=${HELM_PLUGIN_NAME} +echo HELM_PLUGIN_DIR=${HELM_PLUGIN_DIR} +echo HELM_PLUGINS=${HELM_PLUGINS} +echo HELM_REPOSITORY_CONFIG=${HELM_REPOSITORY_CONFIG} +echo HELM_REPOSITORY_CACHE=${HELM_REPOSITORY_CACHE} +echo HELM_BIN=${HELM_BIN} diff --git a/pkg/postrenderer/postrenderer.go b/pkg/postrenderer/postrenderer.go index ed6699c32..55e6d3adf 100644 --- a/pkg/postrenderer/postrenderer.go +++ b/pkg/postrenderer/postrenderer.go @@ -65,7 +65,6 @@ func (r *postRendererPlugin) Run(renderedManifests *bytes.Buffer) (*bytes.Buffer Message: schema.InputMessagePostRendererV1{ ExtraArgs: r.args, Manifests: renderedManifests, - Settings: r.settings, }, } output, err := r.plugin.Invoke(context.Background(), input) diff --git a/pkg/postrenderer/postrenderer_test.go b/pkg/postrenderer/postrenderer_test.go index 9addd481d..824a1d179 100644 --- a/pkg/postrenderer/postrenderer_test.go +++ b/pkg/postrenderer/postrenderer_test.go @@ -18,14 +18,12 @@ package postrenderer import ( "bytes" - "path/filepath" "runtime" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "helm.sh/helm/v4/internal/plugin" "helm.sh/helm/v4/pkg/cli" ) @@ -38,8 +36,6 @@ func TestNewPostRenderPluginRunWithNoOutput(t *testing.T) { s := cli.New() s.PluginsDirectory = "testdata/plugins" name := "postrenderer-v1" - base := filepath.Join(s.PluginsDirectory, name) - plugin.SetupPluginEnv(s, name, base) renderer, err := NewPostRendererPlugin(s, name, "") require.NoError(t, err) @@ -57,8 +53,6 @@ func TestNewPostRenderPluginWithOneArgsRun(t *testing.T) { s := cli.New() s.PluginsDirectory = "testdata/plugins" name := "postrenderer-v1" - base := filepath.Join(s.PluginsDirectory, name) - plugin.SetupPluginEnv(s, name, base) renderer, err := NewPostRendererPlugin(s, name, "ARG1") require.NoError(t, err) @@ -77,8 +71,6 @@ func TestNewPostRenderPluginWithTwoArgsRun(t *testing.T) { s := cli.New() s.PluginsDirectory = "testdata/plugins" name := "postrenderer-v1" - base := filepath.Join(s.PluginsDirectory, name) - plugin.SetupPluginEnv(s, name, base) renderer, err := NewPostRendererPlugin(s, name, "ARG1", "ARG2") require.NoError(t, err) From 6f957f4922ea065138f76b990ba5cc95bbcd774b Mon Sep 17 00:00:00 2001 From: Matt Farina Date: Sun, 31 Aug 2025 08:48:15 -0400 Subject: [PATCH 79/88] Move the release util to the versioned directory The release util package is directly related to the v1 of releases and uses the v1 of releases. Signed-off-by: Matt Farina --- pkg/action/action.go | 2 +- pkg/action/install.go | 2 +- pkg/action/list.go | 2 +- pkg/action/resource_policy.go | 2 +- pkg/action/uninstall.go | 2 +- pkg/action/upgrade.go | 2 +- pkg/cmd/history.go | 2 +- pkg/cmd/template.go | 2 +- pkg/release/{ => v1}/util/filter.go | 2 +- pkg/release/{ => v1}/util/filter_test.go | 2 +- pkg/release/{ => v1}/util/kind_sorter.go | 0 pkg/release/{ => v1}/util/kind_sorter_test.go | 0 pkg/release/{ => v1}/util/manifest.go | 0 pkg/release/{ => v1}/util/manifest_sorter.go | 0 pkg/release/{ => v1}/util/manifest_sorter_test.go | 0 pkg/release/{ => v1}/util/manifest_test.go | 2 +- pkg/release/{ => v1}/util/sorter.go | 2 +- pkg/release/{ => v1}/util/sorter_test.go | 2 +- pkg/storage/storage.go | 2 +- 19 files changed, 14 insertions(+), 14 deletions(-) rename pkg/release/{ => v1}/util/filter.go (97%) rename pkg/release/{ => v1}/util/filter_test.go (96%) rename pkg/release/{ => v1}/util/kind_sorter.go (100%) rename pkg/release/{ => v1}/util/kind_sorter_test.go (100%) rename pkg/release/{ => v1}/util/manifest.go (100%) rename pkg/release/{ => v1}/util/manifest_sorter.go (100%) rename pkg/release/{ => v1}/util/manifest_sorter_test.go (100%) rename pkg/release/{ => v1}/util/manifest_test.go (95%) rename pkg/release/{ => v1}/util/sorter.go (96%) rename pkg/release/{ => v1}/util/sorter_test.go (97%) diff --git a/pkg/action/action.go b/pkg/action/action.go index 7b8fa3c34..522226a1a 100644 --- a/pkg/action/action.go +++ b/pkg/action/action.go @@ -45,8 +45,8 @@ import ( "helm.sh/helm/v4/pkg/kube" "helm.sh/helm/v4/pkg/postrenderer" "helm.sh/helm/v4/pkg/registry" - releaseutil "helm.sh/helm/v4/pkg/release/util" release "helm.sh/helm/v4/pkg/release/v1" + releaseutil "helm.sh/helm/v4/pkg/release/v1/util" "helm.sh/helm/v4/pkg/storage" "helm.sh/helm/v4/pkg/storage/driver" "helm.sh/helm/v4/pkg/time" diff --git a/pkg/action/install.go b/pkg/action/install.go index 5ca499d64..484cdbf8c 100644 --- a/pkg/action/install.go +++ b/pkg/action/install.go @@ -50,8 +50,8 @@ import ( kubefake "helm.sh/helm/v4/pkg/kube/fake" "helm.sh/helm/v4/pkg/postrenderer" "helm.sh/helm/v4/pkg/registry" - releaseutil "helm.sh/helm/v4/pkg/release/util" release "helm.sh/helm/v4/pkg/release/v1" + releaseutil "helm.sh/helm/v4/pkg/release/v1/util" "helm.sh/helm/v4/pkg/repo" "helm.sh/helm/v4/pkg/storage" "helm.sh/helm/v4/pkg/storage/driver" diff --git a/pkg/action/list.go b/pkg/action/list.go index 82500582f..c6d6f2037 100644 --- a/pkg/action/list.go +++ b/pkg/action/list.go @@ -22,8 +22,8 @@ import ( "k8s.io/apimachinery/pkg/labels" - releaseutil "helm.sh/helm/v4/pkg/release/util" release "helm.sh/helm/v4/pkg/release/v1" + releaseutil "helm.sh/helm/v4/pkg/release/v1/util" ) // ListStates represents zero or more status codes that a list item may have set diff --git a/pkg/action/resource_policy.go b/pkg/action/resource_policy.go index b72e94124..fcea98ad6 100644 --- a/pkg/action/resource_policy.go +++ b/pkg/action/resource_policy.go @@ -20,7 +20,7 @@ import ( "strings" "helm.sh/helm/v4/pkg/kube" - releaseutil "helm.sh/helm/v4/pkg/release/util" + releaseutil "helm.sh/helm/v4/pkg/release/v1/util" ) func filterManifestsToKeep(manifests []releaseutil.Manifest) (keep, remaining []releaseutil.Manifest) { diff --git a/pkg/action/uninstall.go b/pkg/action/uninstall.go index 6aa87d331..866be5d54 100644 --- a/pkg/action/uninstall.go +++ b/pkg/action/uninstall.go @@ -27,8 +27,8 @@ import ( chartutil "helm.sh/helm/v4/pkg/chart/v2/util" "helm.sh/helm/v4/pkg/kube" - releaseutil "helm.sh/helm/v4/pkg/release/util" release "helm.sh/helm/v4/pkg/release/v1" + releaseutil "helm.sh/helm/v4/pkg/release/v1/util" "helm.sh/helm/v4/pkg/storage/driver" helmtime "helm.sh/helm/v4/pkg/time" ) diff --git a/pkg/action/upgrade.go b/pkg/action/upgrade.go index f7fbd490f..c00a59079 100644 --- a/pkg/action/upgrade.go +++ b/pkg/action/upgrade.go @@ -33,8 +33,8 @@ import ( "helm.sh/helm/v4/pkg/kube" "helm.sh/helm/v4/pkg/postrenderer" "helm.sh/helm/v4/pkg/registry" - releaseutil "helm.sh/helm/v4/pkg/release/util" release "helm.sh/helm/v4/pkg/release/v1" + releaseutil "helm.sh/helm/v4/pkg/release/v1/util" "helm.sh/helm/v4/pkg/storage/driver" ) diff --git a/pkg/cmd/history.go b/pkg/cmd/history.go index ec2a1bc12..9f029268c 100644 --- a/pkg/cmd/history.go +++ b/pkg/cmd/history.go @@ -29,8 +29,8 @@ import ( chart "helm.sh/helm/v4/pkg/chart/v2" "helm.sh/helm/v4/pkg/cli/output" "helm.sh/helm/v4/pkg/cmd/require" - releaseutil "helm.sh/helm/v4/pkg/release/util" release "helm.sh/helm/v4/pkg/release/v1" + releaseutil "helm.sh/helm/v4/pkg/release/v1/util" helmtime "helm.sh/helm/v4/pkg/time" ) diff --git a/pkg/cmd/template.go b/pkg/cmd/template.go index c93b5395b..aaf848c9e 100644 --- a/pkg/cmd/template.go +++ b/pkg/cmd/template.go @@ -38,7 +38,7 @@ import ( chartutil "helm.sh/helm/v4/pkg/chart/v2/util" "helm.sh/helm/v4/pkg/cli/values" "helm.sh/helm/v4/pkg/cmd/require" - releaseutil "helm.sh/helm/v4/pkg/release/util" + releaseutil "helm.sh/helm/v4/pkg/release/v1/util" ) const templateDesc = ` diff --git a/pkg/release/util/filter.go b/pkg/release/v1/util/filter.go similarity index 97% rename from pkg/release/util/filter.go rename to pkg/release/v1/util/filter.go index f0a082cfd..f818a6196 100644 --- a/pkg/release/util/filter.go +++ b/pkg/release/v1/util/filter.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util // import "helm.sh/helm/v4/pkg/release/util" +package util // import "helm.sh/helm/v4/pkg/release/v1/util" import rspb "helm.sh/helm/v4/pkg/release/v1" diff --git a/pkg/release/util/filter_test.go b/pkg/release/v1/util/filter_test.go similarity index 96% rename from pkg/release/util/filter_test.go rename to pkg/release/v1/util/filter_test.go index 5d2564619..c8b23d526 100644 --- a/pkg/release/util/filter_test.go +++ b/pkg/release/v1/util/filter_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util // import "helm.sh/helm/v4/pkg/release/util" +package util // import "helm.sh/helm/v4/pkg/release/v1/util" import ( "testing" diff --git a/pkg/release/util/kind_sorter.go b/pkg/release/v1/util/kind_sorter.go similarity index 100% rename from pkg/release/util/kind_sorter.go rename to pkg/release/v1/util/kind_sorter.go diff --git a/pkg/release/util/kind_sorter_test.go b/pkg/release/v1/util/kind_sorter_test.go similarity index 100% rename from pkg/release/util/kind_sorter_test.go rename to pkg/release/v1/util/kind_sorter_test.go diff --git a/pkg/release/util/manifest.go b/pkg/release/v1/util/manifest.go similarity index 100% rename from pkg/release/util/manifest.go rename to pkg/release/v1/util/manifest.go diff --git a/pkg/release/util/manifest_sorter.go b/pkg/release/v1/util/manifest_sorter.go similarity index 100% rename from pkg/release/util/manifest_sorter.go rename to pkg/release/v1/util/manifest_sorter.go diff --git a/pkg/release/util/manifest_sorter_test.go b/pkg/release/v1/util/manifest_sorter_test.go similarity index 100% rename from pkg/release/util/manifest_sorter_test.go rename to pkg/release/v1/util/manifest_sorter_test.go diff --git a/pkg/release/util/manifest_test.go b/pkg/release/v1/util/manifest_test.go similarity index 95% rename from pkg/release/util/manifest_test.go rename to pkg/release/v1/util/manifest_test.go index cfc19563d..754ac1367 100644 --- a/pkg/release/util/manifest_test.go +++ b/pkg/release/v1/util/manifest_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util // import "helm.sh/helm/v4/pkg/release/util" +package util // import "helm.sh/helm/v4/pkg/release/v1/util" import ( "reflect" diff --git a/pkg/release/util/sorter.go b/pkg/release/v1/util/sorter.go similarity index 96% rename from pkg/release/util/sorter.go rename to pkg/release/v1/util/sorter.go index 1b09d0f3b..3712a58ef 100644 --- a/pkg/release/util/sorter.go +++ b/pkg/release/v1/util/sorter.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util // import "helm.sh/helm/v4/pkg/release/util" +package util // import "helm.sh/helm/v4/pkg/release/v1/util" import ( "sort" diff --git a/pkg/release/util/sorter_test.go b/pkg/release/v1/util/sorter_test.go similarity index 97% rename from pkg/release/util/sorter_test.go rename to pkg/release/v1/util/sorter_test.go index 7ca540441..4628a5192 100644 --- a/pkg/release/util/sorter_test.go +++ b/pkg/release/v1/util/sorter_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util // import "helm.sh/helm/v4/pkg/release/util" +package util // import "helm.sh/helm/v4/pkg/release/v1/util" import ( "testing" diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go index b43f7c0f2..f086309bb 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/storage.go @@ -22,8 +22,8 @@ import ( "log/slog" "strings" - relutil "helm.sh/helm/v4/pkg/release/util" rspb "helm.sh/helm/v4/pkg/release/v1" + relutil "helm.sh/helm/v4/pkg/release/v1/util" "helm.sh/helm/v4/pkg/storage/driver" ) From 52267ee74bf642ac3ea84f40ae6796ef9b391aaf Mon Sep 17 00:00:00 2001 From: Matt Farina Date: Sun, 31 Aug 2025 09:04:48 -0400 Subject: [PATCH 80/88] Move repo package to versioned directory The repo package is internally versioned at v1. Repos were designed to be versioned. This change moves it to a versioned directory the same way other packages are now being handled. Signed-off-by: Matt Farina --- internal/resolver/resolver.go | 2 +- pkg/action/install.go | 2 +- pkg/action/pull.go | 2 +- pkg/cmd/dependency_build_test.go | 4 ++-- pkg/cmd/dependency_update_test.go | 4 ++-- pkg/cmd/flags.go | 2 +- pkg/cmd/install_test.go | 2 +- pkg/cmd/pull_test.go | 2 +- pkg/cmd/repo_add.go | 2 +- pkg/cmd/repo_add_test.go | 4 ++-- pkg/cmd/repo_index.go | 2 +- pkg/cmd/repo_index_test.go | 2 +- pkg/cmd/repo_list.go | 2 +- pkg/cmd/repo_remove.go | 2 +- pkg/cmd/repo_remove_test.go | 4 ++-- pkg/cmd/repo_update.go | 2 +- pkg/cmd/repo_update_test.go | 4 ++-- pkg/cmd/root.go | 2 +- pkg/cmd/search/search.go | 2 +- pkg/cmd/search/search_test.go | 2 +- pkg/cmd/search_repo.go | 2 +- pkg/cmd/show_test.go | 2 +- pkg/downloader/chart_downloader.go | 2 +- pkg/downloader/chart_downloader_test.go | 4 ++-- pkg/downloader/manager.go | 2 +- pkg/downloader/manager_test.go | 4 ++-- pkg/registry/utils_test.go | 2 +- pkg/repo/{ => v1}/chartrepo.go | 2 +- pkg/repo/{ => v1}/chartrepo_test.go | 0 pkg/repo/{ => v1}/doc.go | 0 pkg/repo/{ => v1}/error.go | 0 pkg/repo/{ => v1}/index.go | 0 pkg/repo/{ => v1}/index_test.go | 0 pkg/repo/{ => v1}/repo.go | 2 +- pkg/repo/{ => v1}/repo_test.go | 0 pkg/repo/{ => v1}/repotest/doc.go | 0 pkg/repo/{ => v1}/repotest/server.go | 2 +- pkg/repo/{ => v1}/repotest/server_test.go | 6 +++--- .../repotest/testdata/examplechart-0.1.0.tgz | Bin .../repotest/testdata/examplechart/.helmignore | 0 .../repotest/testdata/examplechart/Chart.yaml | 0 .../repotest/testdata/examplechart/values.yaml | 0 pkg/repo/{ => v1}/repotest/tlsconfig.go | 0 pkg/repo/{ => v1}/testdata/chartmuseum-index.yaml | 0 .../{ => v1}/testdata/local-index-annotations.yaml | 0 .../{ => v1}/testdata/local-index-unordered.yaml | 0 pkg/repo/{ => v1}/testdata/local-index.json | 0 pkg/repo/{ => v1}/testdata/local-index.yaml | 0 pkg/repo/{ => v1}/testdata/old-repositories.yaml | 0 pkg/repo/{ => v1}/testdata/repositories.yaml | 0 .../{ => v1}/testdata/repository/frobnitz-1.2.3.tgz | Bin .../{ => v1}/testdata/repository/sprocket-1.1.0.tgz | Bin .../{ => v1}/testdata/repository/sprocket-1.2.0.tgz | Bin .../testdata/repository/universe/zarthal-1.0.0.tgz | Bin pkg/repo/{ => v1}/testdata/server/index.yaml | 0 pkg/repo/{ => v1}/testdata/server/test.txt | 0 56 files changed, 40 insertions(+), 40 deletions(-) rename pkg/repo/{ => v1}/chartrepo.go (99%) rename pkg/repo/{ => v1}/chartrepo_test.go (100%) rename pkg/repo/{ => v1}/doc.go (100%) rename pkg/repo/{ => v1}/error.go (100%) rename pkg/repo/{ => v1}/index.go (100%) rename pkg/repo/{ => v1}/index_test.go (100%) rename pkg/repo/{ => v1}/repo.go (98%) rename pkg/repo/{ => v1}/repo_test.go (100%) rename pkg/repo/{ => v1}/repotest/doc.go (100%) rename pkg/repo/{ => v1}/repotest/server.go (99%) rename pkg/repo/{ => v1}/repotest/server_test.go (96%) rename pkg/repo/{ => v1}/repotest/testdata/examplechart-0.1.0.tgz (100%) rename pkg/repo/{ => v1}/repotest/testdata/examplechart/.helmignore (100%) rename pkg/repo/{ => v1}/repotest/testdata/examplechart/Chart.yaml (100%) rename pkg/repo/{ => v1}/repotest/testdata/examplechart/values.yaml (100%) rename pkg/repo/{ => v1}/repotest/tlsconfig.go (100%) rename pkg/repo/{ => v1}/testdata/chartmuseum-index.yaml (100%) rename pkg/repo/{ => v1}/testdata/local-index-annotations.yaml (100%) rename pkg/repo/{ => v1}/testdata/local-index-unordered.yaml (100%) rename pkg/repo/{ => v1}/testdata/local-index.json (100%) rename pkg/repo/{ => v1}/testdata/local-index.yaml (100%) rename pkg/repo/{ => v1}/testdata/old-repositories.yaml (100%) rename pkg/repo/{ => v1}/testdata/repositories.yaml (100%) rename pkg/repo/{ => v1}/testdata/repository/frobnitz-1.2.3.tgz (100%) rename pkg/repo/{ => v1}/testdata/repository/sprocket-1.1.0.tgz (100%) rename pkg/repo/{ => v1}/testdata/repository/sprocket-1.2.0.tgz (100%) rename pkg/repo/{ => v1}/testdata/repository/universe/zarthal-1.0.0.tgz (100%) rename pkg/repo/{ => v1}/testdata/server/index.yaml (100%) rename pkg/repo/{ => v1}/testdata/server/test.txt (100%) diff --git a/internal/resolver/resolver.go b/internal/resolver/resolver.go index 13dcd2ce9..3efe94f10 100644 --- a/internal/resolver/resolver.go +++ b/internal/resolver/resolver.go @@ -33,7 +33,7 @@ import ( "helm.sh/helm/v4/pkg/helmpath" "helm.sh/helm/v4/pkg/provenance" "helm.sh/helm/v4/pkg/registry" - "helm.sh/helm/v4/pkg/repo" + "helm.sh/helm/v4/pkg/repo/v1" ) // Resolver resolves dependencies from semantic version ranges to a particular version. diff --git a/pkg/action/install.go b/pkg/action/install.go index 484cdbf8c..b2330d551 100644 --- a/pkg/action/install.go +++ b/pkg/action/install.go @@ -52,7 +52,7 @@ import ( "helm.sh/helm/v4/pkg/registry" release "helm.sh/helm/v4/pkg/release/v1" releaseutil "helm.sh/helm/v4/pkg/release/v1/util" - "helm.sh/helm/v4/pkg/repo" + "helm.sh/helm/v4/pkg/repo/v1" "helm.sh/helm/v4/pkg/storage" "helm.sh/helm/v4/pkg/storage/driver" ) diff --git a/pkg/action/pull.go b/pkg/action/pull.go index c1f77e44c..be71d0ed0 100644 --- a/pkg/action/pull.go +++ b/pkg/action/pull.go @@ -27,7 +27,7 @@ import ( "helm.sh/helm/v4/pkg/downloader" "helm.sh/helm/v4/pkg/getter" "helm.sh/helm/v4/pkg/registry" - "helm.sh/helm/v4/pkg/repo" + "helm.sh/helm/v4/pkg/repo/v1" ) // Pull is the action for checking a given release's information. diff --git a/pkg/cmd/dependency_build_test.go b/pkg/cmd/dependency_build_test.go index a4a89b7a9..a3473301d 100644 --- a/pkg/cmd/dependency_build_test.go +++ b/pkg/cmd/dependency_build_test.go @@ -24,8 +24,8 @@ import ( chartutil "helm.sh/helm/v4/pkg/chart/v2/util" "helm.sh/helm/v4/pkg/provenance" - "helm.sh/helm/v4/pkg/repo" - "helm.sh/helm/v4/pkg/repo/repotest" + "helm.sh/helm/v4/pkg/repo/v1" + "helm.sh/helm/v4/pkg/repo/v1/repotest" ) func TestDependencyBuildCmd(t *testing.T) { diff --git a/pkg/cmd/dependency_update_test.go b/pkg/cmd/dependency_update_test.go index f1b39c4b7..3eaa51df1 100644 --- a/pkg/cmd/dependency_update_test.go +++ b/pkg/cmd/dependency_update_test.go @@ -29,8 +29,8 @@ import ( chartutil "helm.sh/helm/v4/pkg/chart/v2/util" "helm.sh/helm/v4/pkg/helmpath" "helm.sh/helm/v4/pkg/provenance" - "helm.sh/helm/v4/pkg/repo" - "helm.sh/helm/v4/pkg/repo/repotest" + "helm.sh/helm/v4/pkg/repo/v1" + "helm.sh/helm/v4/pkg/repo/v1/repotest" ) func TestDependencyUpdateCmd(t *testing.T) { diff --git a/pkg/cmd/flags.go b/pkg/cmd/flags.go index 98881c795..b20772ef9 100644 --- a/pkg/cmd/flags.go +++ b/pkg/cmd/flags.go @@ -37,7 +37,7 @@ import ( "helm.sh/helm/v4/pkg/helmpath" "helm.sh/helm/v4/pkg/kube" "helm.sh/helm/v4/pkg/postrenderer" - "helm.sh/helm/v4/pkg/repo" + "helm.sh/helm/v4/pkg/repo/v1" ) const ( diff --git a/pkg/cmd/install_test.go b/pkg/cmd/install_test.go index 9cd244e84..f0f12e4f7 100644 --- a/pkg/cmd/install_test.go +++ b/pkg/cmd/install_test.go @@ -23,7 +23,7 @@ import ( "path/filepath" "testing" - "helm.sh/helm/v4/pkg/repo/repotest" + "helm.sh/helm/v4/pkg/repo/v1/repotest" ) func TestInstall(t *testing.T) { diff --git a/pkg/cmd/pull_test.go b/pkg/cmd/pull_test.go index c3156c394..c24bf33b7 100644 --- a/pkg/cmd/pull_test.go +++ b/pkg/cmd/pull_test.go @@ -24,7 +24,7 @@ import ( "path/filepath" "testing" - "helm.sh/helm/v4/pkg/repo/repotest" + "helm.sh/helm/v4/pkg/repo/v1/repotest" ) func TestPullCmd(t *testing.T) { diff --git a/pkg/cmd/repo_add.go b/pkg/cmd/repo_add.go index 187234486..00e698daf 100644 --- a/pkg/cmd/repo_add.go +++ b/pkg/cmd/repo_add.go @@ -34,7 +34,7 @@ import ( "helm.sh/helm/v4/pkg/cmd/require" "helm.sh/helm/v4/pkg/getter" - "helm.sh/helm/v4/pkg/repo" + "helm.sh/helm/v4/pkg/repo/v1" ) // Repositories that have been permanently deleted and no longer work diff --git a/pkg/cmd/repo_add_test.go b/pkg/cmd/repo_add_test.go index aa6c4eaad..6d3696f52 100644 --- a/pkg/cmd/repo_add_test.go +++ b/pkg/cmd/repo_add_test.go @@ -31,8 +31,8 @@ import ( "helm.sh/helm/v4/pkg/helmpath" "helm.sh/helm/v4/pkg/helmpath/xdg" - "helm.sh/helm/v4/pkg/repo" - "helm.sh/helm/v4/pkg/repo/repotest" + "helm.sh/helm/v4/pkg/repo/v1" + "helm.sh/helm/v4/pkg/repo/v1/repotest" ) func TestRepoAddCmd(t *testing.T) { diff --git a/pkg/cmd/repo_index.go b/pkg/cmd/repo_index.go index c17fd9391..ece0ce811 100644 --- a/pkg/cmd/repo_index.go +++ b/pkg/cmd/repo_index.go @@ -27,7 +27,7 @@ import ( "github.com/spf13/cobra" "helm.sh/helm/v4/pkg/cmd/require" - "helm.sh/helm/v4/pkg/repo" + "helm.sh/helm/v4/pkg/repo/v1" ) const repoIndexDesc = ` diff --git a/pkg/cmd/repo_index_test.go b/pkg/cmd/repo_index_test.go index c865c8a5d..c8959f21e 100644 --- a/pkg/cmd/repo_index_test.go +++ b/pkg/cmd/repo_index_test.go @@ -24,7 +24,7 @@ import ( "path/filepath" "testing" - "helm.sh/helm/v4/pkg/repo" + "helm.sh/helm/v4/pkg/repo/v1" ) func TestRepoIndexCmd(t *testing.T) { diff --git a/pkg/cmd/repo_list.go b/pkg/cmd/repo_list.go index 70f57992e..10b4442a0 100644 --- a/pkg/cmd/repo_list.go +++ b/pkg/cmd/repo_list.go @@ -25,7 +25,7 @@ import ( "helm.sh/helm/v4/pkg/cli/output" "helm.sh/helm/v4/pkg/cmd/require" - "helm.sh/helm/v4/pkg/repo" + "helm.sh/helm/v4/pkg/repo/v1" ) func newRepoListCmd(out io.Writer) *cobra.Command { diff --git a/pkg/cmd/repo_remove.go b/pkg/cmd/repo_remove.go index d0a3aa205..330e69d3a 100644 --- a/pkg/cmd/repo_remove.go +++ b/pkg/cmd/repo_remove.go @@ -28,7 +28,7 @@ import ( "helm.sh/helm/v4/pkg/cmd/require" "helm.sh/helm/v4/pkg/helmpath" - "helm.sh/helm/v4/pkg/repo" + "helm.sh/helm/v4/pkg/repo/v1" ) type repoRemoveOptions struct { diff --git a/pkg/cmd/repo_remove_test.go b/pkg/cmd/repo_remove_test.go index bd8757812..fce15bb73 100644 --- a/pkg/cmd/repo_remove_test.go +++ b/pkg/cmd/repo_remove_test.go @@ -25,8 +25,8 @@ import ( "testing" "helm.sh/helm/v4/pkg/helmpath" - "helm.sh/helm/v4/pkg/repo" - "helm.sh/helm/v4/pkg/repo/repotest" + "helm.sh/helm/v4/pkg/repo/v1" + "helm.sh/helm/v4/pkg/repo/v1/repotest" ) func TestRepoRemove(t *testing.T) { diff --git a/pkg/cmd/repo_update.go b/pkg/cmd/repo_update.go index 54318bf29..f2e7c0e0f 100644 --- a/pkg/cmd/repo_update.go +++ b/pkg/cmd/repo_update.go @@ -28,7 +28,7 @@ import ( "helm.sh/helm/v4/pkg/cmd/require" "helm.sh/helm/v4/pkg/getter" - "helm.sh/helm/v4/pkg/repo" + "helm.sh/helm/v4/pkg/repo/v1" ) const updateDesc = ` diff --git a/pkg/cmd/repo_update_test.go b/pkg/cmd/repo_update_test.go index b0deff1ae..7aa4d414f 100644 --- a/pkg/cmd/repo_update_test.go +++ b/pkg/cmd/repo_update_test.go @@ -26,8 +26,8 @@ import ( "helm.sh/helm/v4/internal/test/ensure" "helm.sh/helm/v4/pkg/getter" - "helm.sh/helm/v4/pkg/repo" - "helm.sh/helm/v4/pkg/repo/repotest" + "helm.sh/helm/v4/pkg/repo/v1" + "helm.sh/helm/v4/pkg/repo/v1/repotest" ) func TestUpdateCmd(t *testing.T) { diff --git a/pkg/cmd/root.go b/pkg/cmd/root.go index 836df834d..2c3a2f944 100644 --- a/pkg/cmd/root.go +++ b/pkg/cmd/root.go @@ -40,7 +40,7 @@ import ( kubefake "helm.sh/helm/v4/pkg/kube/fake" "helm.sh/helm/v4/pkg/registry" release "helm.sh/helm/v4/pkg/release/v1" - "helm.sh/helm/v4/pkg/repo" + "helm.sh/helm/v4/pkg/repo/v1" "helm.sh/helm/v4/pkg/storage/driver" ) diff --git a/pkg/cmd/search/search.go b/pkg/cmd/search/search.go index f9e229154..1c7bb1d06 100644 --- a/pkg/cmd/search/search.go +++ b/pkg/cmd/search/search.go @@ -31,7 +31,7 @@ import ( "github.com/Masterminds/semver/v3" - "helm.sh/helm/v4/pkg/repo" + "helm.sh/helm/v4/pkg/repo/v1" ) // Result is a search result. diff --git a/pkg/cmd/search/search_test.go b/pkg/cmd/search/search_test.go index 7a4ba786b..a24eb1f64 100644 --- a/pkg/cmd/search/search_test.go +++ b/pkg/cmd/search/search_test.go @@ -21,7 +21,7 @@ import ( "testing" chart "helm.sh/helm/v4/pkg/chart/v2" - "helm.sh/helm/v4/pkg/repo" + "helm.sh/helm/v4/pkg/repo/v1" ) func TestSortScore(t *testing.T) { diff --git a/pkg/cmd/search_repo.go b/pkg/cmd/search_repo.go index dffa0d1c4..35608e22e 100644 --- a/pkg/cmd/search_repo.go +++ b/pkg/cmd/search_repo.go @@ -34,7 +34,7 @@ import ( "helm.sh/helm/v4/pkg/cli/output" "helm.sh/helm/v4/pkg/cmd/search" "helm.sh/helm/v4/pkg/helmpath" - "helm.sh/helm/v4/pkg/repo" + "helm.sh/helm/v4/pkg/repo/v1" ) const searchRepoDesc = ` diff --git a/pkg/cmd/show_test.go b/pkg/cmd/show_test.go index 5ccb4bcad..ff3671dbc 100644 --- a/pkg/cmd/show_test.go +++ b/pkg/cmd/show_test.go @@ -22,7 +22,7 @@ import ( "strings" "testing" - "helm.sh/helm/v4/pkg/repo/repotest" + "helm.sh/helm/v4/pkg/repo/v1/repotest" ) func TestShowPreReleaseChart(t *testing.T) { diff --git a/pkg/downloader/chart_downloader.go b/pkg/downloader/chart_downloader.go index a24cad3fd..00c8c56e8 100644 --- a/pkg/downloader/chart_downloader.go +++ b/pkg/downloader/chart_downloader.go @@ -36,7 +36,7 @@ import ( "helm.sh/helm/v4/pkg/helmpath" "helm.sh/helm/v4/pkg/provenance" "helm.sh/helm/v4/pkg/registry" - "helm.sh/helm/v4/pkg/repo" + "helm.sh/helm/v4/pkg/repo/v1" ) // VerificationStrategy describes a strategy for determining whether to verify a chart. diff --git a/pkg/downloader/chart_downloader_test.go b/pkg/downloader/chart_downloader_test.go index 649448fef..4349ecef9 100644 --- a/pkg/downloader/chart_downloader_test.go +++ b/pkg/downloader/chart_downloader_test.go @@ -28,8 +28,8 @@ import ( "helm.sh/helm/v4/pkg/cli" "helm.sh/helm/v4/pkg/getter" "helm.sh/helm/v4/pkg/registry" - "helm.sh/helm/v4/pkg/repo" - "helm.sh/helm/v4/pkg/repo/repotest" + "helm.sh/helm/v4/pkg/repo/v1" + "helm.sh/helm/v4/pkg/repo/v1/repotest" ) const ( diff --git a/pkg/downloader/manager.go b/pkg/downloader/manager.go index 8b77a77c0..d41b8fdb4 100644 --- a/pkg/downloader/manager.go +++ b/pkg/downloader/manager.go @@ -42,7 +42,7 @@ import ( "helm.sh/helm/v4/pkg/getter" "helm.sh/helm/v4/pkg/helmpath" "helm.sh/helm/v4/pkg/registry" - "helm.sh/helm/v4/pkg/repo" + "helm.sh/helm/v4/pkg/repo/v1" ) // ErrRepoNotFound indicates that chart repositories can't be found in local repo cache. diff --git a/pkg/downloader/manager_test.go b/pkg/downloader/manager_test.go index b7121a4ce..9e27f183f 100644 --- a/pkg/downloader/manager_test.go +++ b/pkg/downloader/manager_test.go @@ -32,8 +32,8 @@ import ( "helm.sh/helm/v4/pkg/chart/v2/loader" chartutil "helm.sh/helm/v4/pkg/chart/v2/util" "helm.sh/helm/v4/pkg/getter" - "helm.sh/helm/v4/pkg/repo" - "helm.sh/helm/v4/pkg/repo/repotest" + "helm.sh/helm/v4/pkg/repo/v1" + "helm.sh/helm/v4/pkg/repo/v1/repotest" ) func TestVersionEquals(t *testing.T) { diff --git a/pkg/registry/utils_test.go b/pkg/registry/utils_test.go index b46317fc6..de2f9024f 100644 --- a/pkg/registry/utils_test.go +++ b/pkg/registry/utils_test.go @@ -231,7 +231,7 @@ func testPush(suite *TestSuite) { suite.NotNil(err, "error pushing non-chart bytes") // Load a test chart - chartData, err := os.ReadFile("../repo/repotest/testdata/examplechart-0.1.0.tgz") + chartData, err := os.ReadFile("../repo/v1/repotest/testdata/examplechart-0.1.0.tgz") suite.Nil(err, "no error loading test chart") meta, err := extractChartMeta(chartData) suite.Nil(err, "no error extracting chart meta") diff --git a/pkg/repo/chartrepo.go b/pkg/repo/v1/chartrepo.go similarity index 99% rename from pkg/repo/chartrepo.go rename to pkg/repo/v1/chartrepo.go index c54197d60..95c04ccef 100644 --- a/pkg/repo/chartrepo.go +++ b/pkg/repo/v1/chartrepo.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package repo // import "helm.sh/helm/v4/pkg/repo" +package repo // import "helm.sh/helm/v4/pkg/repo/v1" import ( "crypto/rand" diff --git a/pkg/repo/chartrepo_test.go b/pkg/repo/v1/chartrepo_test.go similarity index 100% rename from pkg/repo/chartrepo_test.go rename to pkg/repo/v1/chartrepo_test.go diff --git a/pkg/repo/doc.go b/pkg/repo/v1/doc.go similarity index 100% rename from pkg/repo/doc.go rename to pkg/repo/v1/doc.go diff --git a/pkg/repo/error.go b/pkg/repo/v1/error.go similarity index 100% rename from pkg/repo/error.go rename to pkg/repo/v1/error.go diff --git a/pkg/repo/index.go b/pkg/repo/v1/index.go similarity index 100% rename from pkg/repo/index.go rename to pkg/repo/v1/index.go diff --git a/pkg/repo/index_test.go b/pkg/repo/v1/index_test.go similarity index 100% rename from pkg/repo/index_test.go rename to pkg/repo/v1/index_test.go diff --git a/pkg/repo/repo.go b/pkg/repo/v1/repo.go similarity index 98% rename from pkg/repo/repo.go rename to pkg/repo/v1/repo.go index 48c0e0193..38d2b0ca1 100644 --- a/pkg/repo/repo.go +++ b/pkg/repo/v1/repo.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package repo // import "helm.sh/helm/v4/pkg/repo" +package repo // import "helm.sh/helm/v4/pkg/repo/v1" import ( "fmt" diff --git a/pkg/repo/repo_test.go b/pkg/repo/v1/repo_test.go similarity index 100% rename from pkg/repo/repo_test.go rename to pkg/repo/v1/repo_test.go diff --git a/pkg/repo/repotest/doc.go b/pkg/repo/v1/repotest/doc.go similarity index 100% rename from pkg/repo/repotest/doc.go rename to pkg/repo/v1/repotest/doc.go diff --git a/pkg/repo/repotest/server.go b/pkg/repo/v1/repotest/server.go similarity index 99% rename from pkg/repo/repotest/server.go rename to pkg/repo/v1/repotest/server.go index 8f9f82281..12b96de5a 100644 --- a/pkg/repo/repotest/server.go +++ b/pkg/repo/v1/repotest/server.go @@ -37,7 +37,7 @@ import ( "helm.sh/helm/v4/pkg/chart/v2/loader" chartutil "helm.sh/helm/v4/pkg/chart/v2/util" ociRegistry "helm.sh/helm/v4/pkg/registry" - "helm.sh/helm/v4/pkg/repo" + "helm.sh/helm/v4/pkg/repo/v1" ) func BasicAuthMiddleware(t *testing.T) http.HandlerFunc { diff --git a/pkg/repo/repotest/server_test.go b/pkg/repo/v1/repotest/server_test.go similarity index 96% rename from pkg/repo/repotest/server_test.go rename to pkg/repo/v1/repotest/server_test.go index 4d62ef8ed..f0e374fc0 100644 --- a/pkg/repo/repotest/server_test.go +++ b/pkg/repo/v1/repotest/server_test.go @@ -25,7 +25,7 @@ import ( "sigs.k8s.io/yaml" "helm.sh/helm/v4/internal/test/ensure" - "helm.sh/helm/v4/pkg/repo" + "helm.sh/helm/v4/pkg/repo/v1" ) // Young'n, in these here parts, we test our tests. @@ -113,7 +113,7 @@ func TestNewTempServer(t *testing.T) { "tls": { options: []ServerOption{ WithChartSourceGlob("testdata/examplechart-0.1.0.tgz"), - WithTLSConfig(MakeTestTLSConfig(t, "../../../testdata")), + WithTLSConfig(MakeTestTLSConfig(t, "../../../../testdata")), }, }, } @@ -212,7 +212,7 @@ func TestNewTempServer_TLS(t *testing.T) { srv := NewTempServer( t, WithChartSourceGlob("testdata/examplechart-0.1.0.tgz"), - WithTLSConfig(MakeTestTLSConfig(t, "../../../testdata")), + WithTLSConfig(MakeTestTLSConfig(t, "../../../../testdata")), ) defer srv.Stop() diff --git a/pkg/repo/repotest/testdata/examplechart-0.1.0.tgz b/pkg/repo/v1/repotest/testdata/examplechart-0.1.0.tgz similarity index 100% rename from pkg/repo/repotest/testdata/examplechart-0.1.0.tgz rename to pkg/repo/v1/repotest/testdata/examplechart-0.1.0.tgz diff --git a/pkg/repo/repotest/testdata/examplechart/.helmignore b/pkg/repo/v1/repotest/testdata/examplechart/.helmignore similarity index 100% rename from pkg/repo/repotest/testdata/examplechart/.helmignore rename to pkg/repo/v1/repotest/testdata/examplechart/.helmignore diff --git a/pkg/repo/repotest/testdata/examplechart/Chart.yaml b/pkg/repo/v1/repotest/testdata/examplechart/Chart.yaml similarity index 100% rename from pkg/repo/repotest/testdata/examplechart/Chart.yaml rename to pkg/repo/v1/repotest/testdata/examplechart/Chart.yaml diff --git a/pkg/repo/repotest/testdata/examplechart/values.yaml b/pkg/repo/v1/repotest/testdata/examplechart/values.yaml similarity index 100% rename from pkg/repo/repotest/testdata/examplechart/values.yaml rename to pkg/repo/v1/repotest/testdata/examplechart/values.yaml diff --git a/pkg/repo/repotest/tlsconfig.go b/pkg/repo/v1/repotest/tlsconfig.go similarity index 100% rename from pkg/repo/repotest/tlsconfig.go rename to pkg/repo/v1/repotest/tlsconfig.go diff --git a/pkg/repo/testdata/chartmuseum-index.yaml b/pkg/repo/v1/testdata/chartmuseum-index.yaml similarity index 100% rename from pkg/repo/testdata/chartmuseum-index.yaml rename to pkg/repo/v1/testdata/chartmuseum-index.yaml diff --git a/pkg/repo/testdata/local-index-annotations.yaml b/pkg/repo/v1/testdata/local-index-annotations.yaml similarity index 100% rename from pkg/repo/testdata/local-index-annotations.yaml rename to pkg/repo/v1/testdata/local-index-annotations.yaml diff --git a/pkg/repo/testdata/local-index-unordered.yaml b/pkg/repo/v1/testdata/local-index-unordered.yaml similarity index 100% rename from pkg/repo/testdata/local-index-unordered.yaml rename to pkg/repo/v1/testdata/local-index-unordered.yaml diff --git a/pkg/repo/testdata/local-index.json b/pkg/repo/v1/testdata/local-index.json similarity index 100% rename from pkg/repo/testdata/local-index.json rename to pkg/repo/v1/testdata/local-index.json diff --git a/pkg/repo/testdata/local-index.yaml b/pkg/repo/v1/testdata/local-index.yaml similarity index 100% rename from pkg/repo/testdata/local-index.yaml rename to pkg/repo/v1/testdata/local-index.yaml diff --git a/pkg/repo/testdata/old-repositories.yaml b/pkg/repo/v1/testdata/old-repositories.yaml similarity index 100% rename from pkg/repo/testdata/old-repositories.yaml rename to pkg/repo/v1/testdata/old-repositories.yaml diff --git a/pkg/repo/testdata/repositories.yaml b/pkg/repo/v1/testdata/repositories.yaml similarity index 100% rename from pkg/repo/testdata/repositories.yaml rename to pkg/repo/v1/testdata/repositories.yaml diff --git a/pkg/repo/testdata/repository/frobnitz-1.2.3.tgz b/pkg/repo/v1/testdata/repository/frobnitz-1.2.3.tgz similarity index 100% rename from pkg/repo/testdata/repository/frobnitz-1.2.3.tgz rename to pkg/repo/v1/testdata/repository/frobnitz-1.2.3.tgz diff --git a/pkg/repo/testdata/repository/sprocket-1.1.0.tgz b/pkg/repo/v1/testdata/repository/sprocket-1.1.0.tgz similarity index 100% rename from pkg/repo/testdata/repository/sprocket-1.1.0.tgz rename to pkg/repo/v1/testdata/repository/sprocket-1.1.0.tgz diff --git a/pkg/repo/testdata/repository/sprocket-1.2.0.tgz b/pkg/repo/v1/testdata/repository/sprocket-1.2.0.tgz similarity index 100% rename from pkg/repo/testdata/repository/sprocket-1.2.0.tgz rename to pkg/repo/v1/testdata/repository/sprocket-1.2.0.tgz diff --git a/pkg/repo/testdata/repository/universe/zarthal-1.0.0.tgz b/pkg/repo/v1/testdata/repository/universe/zarthal-1.0.0.tgz similarity index 100% rename from pkg/repo/testdata/repository/universe/zarthal-1.0.0.tgz rename to pkg/repo/v1/testdata/repository/universe/zarthal-1.0.0.tgz diff --git a/pkg/repo/testdata/server/index.yaml b/pkg/repo/v1/testdata/server/index.yaml similarity index 100% rename from pkg/repo/testdata/server/index.yaml rename to pkg/repo/v1/testdata/server/index.yaml diff --git a/pkg/repo/testdata/server/test.txt b/pkg/repo/v1/testdata/server/test.txt similarity index 100% rename from pkg/repo/testdata/server/test.txt rename to pkg/repo/v1/testdata/server/test.txt From 9dcc49cbd5e37cc916a23a4f375f7f4214dfd515 Mon Sep 17 00:00:00 2001 From: Matt Farina Date: Mon, 1 Sep 2025 17:46:14 -0400 Subject: [PATCH 81/88] Move lint pkg to be part of each chart version Linting is specific to the chart versions. A v2 and v3 chart will lint differently. To accomplish this, packages like engine need to be able to handle different chart versions. This was accomplished by some changes: 1. The introduction of a Charter interface for charts 2. The ChartAccessor which is able to accept a chart and then provide access to its data via an interface. There is an interface, factory, and implementation for each version of chart. 3. Common packages were moved to a common and util packages. Due to some package loops, there are 2 packages which may get some consolidation in the future. The new interfaces provide the foundation to move the actions and cmd packages to be able to handle multiple apiVersions of charts. Signed-off-by: Matt Farina --- .golangci.yml | 6 +- Makefile | 13 +- internal/chart/v3/chart.go | 14 +- internal/chart/v3/chart_test.go | 14 +- internal/chart/v3/lint/lint.go | 66 ++ internal/chart/v3/lint/lint_test.go | 246 ++++++ internal/chart/v3/lint/rules/chartfile.go | 225 ++++++ .../chart/v3/lint/rules/chartfile_test.go | 276 +++++++ internal/chart/v3/lint/rules/crds.go | 113 +++ internal/chart/v3/lint/rules/crds_test.go | 36 + internal/chart/v3/lint/rules/dependencies.go | 101 +++ .../chart/v3/lint/rules/dependencies_test.go | 157 ++++ .../chart/v3}/lint/rules/deprecations.go | 8 +- .../chart/v3/lint/rules/deprecations_test.go | 41 + internal/chart/v3/lint/rules/template.go | 348 +++++++++ internal/chart/v3/lint/rules/template_test.go | 441 +++++++++++ .../lint/rules/testdata/albatross/Chart.yaml | 5 + .../testdata/albatross/templates/_helpers.tpl | 0 .../testdata/albatross/templates/fail.yaml | 0 .../testdata/albatross/templates/svc.yaml | 0 .../lint/rules/testdata/albatross/values.yaml | 0 .../testdata/anotherbadchartfile/Chart.yaml | 15 + .../rules/testdata/badchartfile/Chart.yaml | 0 .../rules/testdata/badchartfile/values.yaml | 0 .../rules/testdata/badchartname/Chart.yaml | 5 + .../rules/testdata/badchartname/values.yaml | 0 .../lint/rules/testdata/badcrdfile/Chart.yaml | 6 + .../badcrdfile/crds/bad-apiversion.yaml | 0 .../testdata/badcrdfile/crds/bad-crd.yaml | 0 .../testdata/badcrdfile/templates/.gitkeep | 0 .../rules/testdata/badcrdfile/values.yaml | 0 .../rules/testdata/badvaluesfile/Chart.yaml | 6 + .../templates/badvaluesfile.yaml | 0 .../rules/testdata/badvaluesfile/values.yaml | 0 .../v3/lint/rules/testdata/goodone/Chart.yaml | 5 + .../rules/testdata/goodone/crds/test-crd.yaml | 0 .../testdata/goodone/templates/goodone.yaml | 0 .../lint/rules/testdata/goodone/values.yaml | 0 .../testdata/invalidchartfile/Chart.yaml | 0 .../testdata/invalidchartfile/values.yaml | 0 .../rules/testdata/invalidcrdsdir/Chart.yaml | 6 + .../lint/rules/testdata/invalidcrdsdir/crds | 0 .../rules/testdata/invalidcrdsdir/values.yaml | 0 .../testdata/malformed-template/.helmignore | 0 .../testdata/malformed-template/Chart.yaml | 25 + .../malformed-template/templates/bad.yaml | 0 .../testdata/malformed-template/values.yaml | 0 .../testdata/multi-template-fail/Chart.yaml | 21 + .../templates/multi-fail.yaml | 0 .../v3/lint/rules/testdata/v3-fail/Chart.yaml | 21 + .../testdata/v3-fail/templates/_helpers.tpl | 0 .../v3-fail/templates/deployment.yaml | 0 .../testdata/v3-fail/templates/ingress.yaml | 0 .../testdata/v3-fail/templates/service.yaml | 0 .../lint/rules/testdata/v3-fail/values.yaml | 0 .../rules/testdata/withsubchart/Chart.yaml | 16 + .../withsubchart/charts/subchart/Chart.yaml | 6 + .../charts/subchart/templates/subchart.yaml | 0 .../withsubchart/charts/subchart/values.yaml | 0 .../withsubchart/templates/mainchart.yaml | 0 .../rules/testdata/withsubchart/values.yaml | 0 internal/chart/v3/lint/rules/values.go | 79 ++ .../chart/v3}/lint/rules/values_test.go | 0 .../errors_test.go => lint/support/doc.go} | 26 +- .../chart/v3}/lint/support/message.go | 0 .../chart/v3}/lint/support/message_test.go | 0 internal/chart/v3/loader/load.go | 9 +- internal/chart/v3/loader/load_test.go | 5 +- internal/chart/v3/util/capabilities.go | 122 --- internal/chart/v3/util/capabilities_test.go | 84 -- internal/chart/v3/util/coalesce.go | 308 -------- internal/chart/v3/util/coalesce_test.go | 723 ------------------ internal/chart/v3/util/create.go | 5 +- internal/chart/v3/util/dependencies.go | 38 +- internal/chart/v3/util/dependencies_test.go | 11 +- internal/chart/v3/util/errors.go | 43 -- internal/chart/v3/util/jsonschema.go | 113 --- internal/chart/v3/util/jsonschema_test.go | 247 ------ internal/chart/v3/util/save.go | 5 +- internal/chart/v3/util/save_test.go | 11 +- internal/chart/v3/util/values.go | 220 ------ internal/chart/v3/util/values_test.go | 293 ------- pkg/action/action.go | 21 +- pkg/action/action_test.go | 20 +- pkg/action/get_values.go | 6 +- pkg/action/hooks_test.go | 9 +- pkg/action/install.go | 12 +- pkg/action/install_test.go | 7 +- pkg/action/lint.go | 9 +- pkg/action/show.go | 3 +- pkg/action/show_test.go | 9 +- pkg/action/upgrade.go | 12 +- pkg/chart/common.go | 219 ++++++ pkg/chart/{v2/util => common}/capabilities.go | 2 +- .../{v2/util => common}/capabilities_test.go | 2 +- pkg/chart/{v2/util => common}/errors.go | 2 +- pkg/chart/{v2/util => common}/errors_test.go | 2 +- .../chart/v3 => pkg/chart/common}/file.go | 2 +- .../util => common}/testdata/coleridge.yaml | 0 pkg/chart/{v2 => common}/util/coalesce.go | 80 +- .../{v2 => common}/util/coalesce_test.go | 18 +- pkg/chart/{v2 => common}/util/jsonschema.go | 21 +- .../{v2 => common}/util/jsonschema_test.go | 7 +- .../testdata/test-values-invalid.schema.json | 0 .../util/testdata/test-values-negative.yaml | 0 .../util/testdata/test-values.schema.json | 0 .../util/testdata/test-values.yaml | 0 pkg/chart/common/util/values.go | 70 ++ pkg/chart/common/util/values_test.go | 111 +++ pkg/chart/{v2/util => common}/values.go | 47 +- pkg/chart/{v2/util => common}/values_test.go | 90 +-- pkg/chart/{v2/file.go => interfaces.go} | 28 +- pkg/chart/v2/chart.go | 14 +- pkg/chart/v2/chart_test.go | 14 +- pkg/{ => chart/v2}/lint/lint.go | 12 +- pkg/{ => chart/v2}/lint/lint_test.go | 2 +- pkg/{ => chart/v2}/lint/rules/chartfile.go | 4 +- .../v2}/lint/rules/chartfile_test.go | 2 +- pkg/{ => chart/v2}/lint/rules/crds.go | 2 +- pkg/{ => chart/v2}/lint/rules/crds_test.go | 2 +- pkg/{ => chart/v2}/lint/rules/dependencies.go | 4 +- .../v2}/lint/rules/dependencies_test.go | 2 +- pkg/chart/v2/lint/rules/deprecations.go | 106 +++ .../v2}/lint/rules/deprecations_test.go | 2 +- pkg/{ => chart/v2}/lint/rules/template.go | 16 +- .../v2}/lint/rules/template_test.go | 9 +- .../lint/rules/testdata/albatross/Chart.yaml | 0 .../testdata/albatross/templates/_helpers.tpl | 16 + .../testdata/albatross/templates/fail.yaml | 1 + .../testdata/albatross/templates/svc.yaml | 19 + .../lint/rules/testdata/albatross/values.yaml | 1 + .../testdata/anotherbadchartfile/Chart.yaml | 0 .../rules/testdata/badchartfile/Chart.yaml | 11 + .../rules/testdata/badchartfile/values.yaml | 1 + .../rules/testdata/badchartname/Chart.yaml | 0 .../rules/testdata/badchartname/values.yaml | 1 + .../lint/rules/testdata/badcrdfile/Chart.yaml | 0 .../badcrdfile/crds/bad-apiversion.yaml | 2 + .../testdata/badcrdfile/crds/bad-crd.yaml | 2 + .../testdata/badcrdfile/templates/.gitkeep | 0 .../rules/testdata/badcrdfile/values.yaml | 1 + .../rules/testdata/badvaluesfile/Chart.yaml | 0 .../templates/badvaluesfile.yaml | 2 + .../rules/testdata/badvaluesfile/values.yaml | 2 + .../lint/rules/testdata/goodone/Chart.yaml | 0 .../rules/testdata/goodone/crds/test-crd.yaml | 19 + .../testdata/goodone/templates/goodone.yaml | 2 + .../lint/rules/testdata/goodone/values.yaml | 1 + .../testdata/invalidchartfile/Chart.yaml | 6 + .../testdata/invalidchartfile/values.yaml | 0 .../rules/testdata/invalidcrdsdir/Chart.yaml | 0 .../lint/rules/testdata/invalidcrdsdir/crds | 0 .../rules/testdata/invalidcrdsdir/values.yaml | 1 + .../testdata/malformed-template/.helmignore | 23 + .../testdata/malformed-template/Chart.yaml | 0 .../malformed-template/templates/bad.yaml | 1 + .../testdata/malformed-template/values.yaml | 82 ++ .../testdata/multi-template-fail/Chart.yaml | 0 .../templates/multi-fail.yaml | 13 + .../lint/rules/testdata/v3-fail/Chart.yaml | 0 .../testdata/v3-fail/templates/_helpers.tpl | 63 ++ .../v3-fail/templates/deployment.yaml | 56 ++ .../testdata/v3-fail/templates/ingress.yaml | 62 ++ .../testdata/v3-fail/templates/service.yaml | 17 + .../lint/rules/testdata/v3-fail/values.yaml | 66 ++ .../rules/testdata/withsubchart/Chart.yaml | 0 .../withsubchart/charts/subchart/Chart.yaml | 0 .../charts/subchart/templates/subchart.yaml | 2 + .../withsubchart/charts/subchart/values.yaml | 2 + .../withsubchart/templates/mainchart.yaml | 2 + .../rules/testdata/withsubchart/values.yaml | 0 pkg/{ => chart/v2}/lint/rules/values.go | 13 +- pkg/chart/v2/lint/rules/values_test.go | 169 ++++ pkg/{ => chart/v2}/lint/support/doc.go | 2 +- pkg/chart/v2/lint/support/message.go | 76 ++ pkg/chart/v2/lint/support/message_test.go | 79 ++ pkg/chart/v2/loader/load.go | 13 +- pkg/chart/v2/loader/load_test.go | 5 +- pkg/chart/v2/util/create.go | 5 +- pkg/chart/v2/util/dependencies.go | 38 +- pkg/chart/v2/util/dependencies_test.go | 11 +- pkg/chart/v2/util/save.go | 5 +- pkg/chart/v2/util/save_test.go | 11 +- pkg/cli/values/options_test.go | 2 +- pkg/cmd/helpers_test.go | 4 +- pkg/cmd/lint.go | 6 +- pkg/cmd/status.go | 4 +- pkg/cmd/template.go | 6 +- pkg/cmd/upgrade_test.go | 5 +- pkg/engine/engine.go | 61 +- pkg/engine/engine_test.go | 203 ++--- pkg/engine/files.go | 4 +- pkg/engine/lookup_func.go | 2 +- pkg/release/v1/mock.go | 3 +- pkg/release/v1/util/manifest_sorter.go | 4 +- 195 files changed, 4090 insertions(+), 2702 deletions(-) create mode 100644 internal/chart/v3/lint/lint.go create mode 100644 internal/chart/v3/lint/lint_test.go create mode 100644 internal/chart/v3/lint/rules/chartfile.go create mode 100644 internal/chart/v3/lint/rules/chartfile_test.go create mode 100644 internal/chart/v3/lint/rules/crds.go create mode 100644 internal/chart/v3/lint/rules/crds_test.go create mode 100644 internal/chart/v3/lint/rules/dependencies.go create mode 100644 internal/chart/v3/lint/rules/dependencies_test.go rename {pkg => internal/chart/v3}/lint/rules/deprecations.go (95%) create mode 100644 internal/chart/v3/lint/rules/deprecations_test.go create mode 100644 internal/chart/v3/lint/rules/template.go create mode 100644 internal/chart/v3/lint/rules/template_test.go create mode 100644 internal/chart/v3/lint/rules/testdata/albatross/Chart.yaml rename {pkg => internal/chart/v3}/lint/rules/testdata/albatross/templates/_helpers.tpl (100%) rename {pkg => internal/chart/v3}/lint/rules/testdata/albatross/templates/fail.yaml (100%) rename {pkg => internal/chart/v3}/lint/rules/testdata/albatross/templates/svc.yaml (100%) rename {pkg => internal/chart/v3}/lint/rules/testdata/albatross/values.yaml (100%) create mode 100644 internal/chart/v3/lint/rules/testdata/anotherbadchartfile/Chart.yaml rename {pkg => internal/chart/v3}/lint/rules/testdata/badchartfile/Chart.yaml (100%) rename {pkg => internal/chart/v3}/lint/rules/testdata/badchartfile/values.yaml (100%) create mode 100644 internal/chart/v3/lint/rules/testdata/badchartname/Chart.yaml rename {pkg => internal/chart/v3}/lint/rules/testdata/badchartname/values.yaml (100%) create mode 100644 internal/chart/v3/lint/rules/testdata/badcrdfile/Chart.yaml rename {pkg => internal/chart/v3}/lint/rules/testdata/badcrdfile/crds/bad-apiversion.yaml (100%) rename {pkg => internal/chart/v3}/lint/rules/testdata/badcrdfile/crds/bad-crd.yaml (100%) rename {pkg => internal/chart/v3}/lint/rules/testdata/badcrdfile/templates/.gitkeep (100%) rename {pkg => internal/chart/v3}/lint/rules/testdata/badcrdfile/values.yaml (100%) create mode 100644 internal/chart/v3/lint/rules/testdata/badvaluesfile/Chart.yaml rename {pkg => internal/chart/v3}/lint/rules/testdata/badvaluesfile/templates/badvaluesfile.yaml (100%) rename {pkg => internal/chart/v3}/lint/rules/testdata/badvaluesfile/values.yaml (100%) create mode 100644 internal/chart/v3/lint/rules/testdata/goodone/Chart.yaml rename {pkg => internal/chart/v3}/lint/rules/testdata/goodone/crds/test-crd.yaml (100%) rename {pkg => internal/chart/v3}/lint/rules/testdata/goodone/templates/goodone.yaml (100%) rename {pkg => internal/chart/v3}/lint/rules/testdata/goodone/values.yaml (100%) rename {pkg => internal/chart/v3}/lint/rules/testdata/invalidchartfile/Chart.yaml (100%) rename {pkg => internal/chart/v3}/lint/rules/testdata/invalidchartfile/values.yaml (100%) create mode 100644 internal/chart/v3/lint/rules/testdata/invalidcrdsdir/Chart.yaml rename {pkg => internal/chart/v3}/lint/rules/testdata/invalidcrdsdir/crds (100%) rename {pkg => internal/chart/v3}/lint/rules/testdata/invalidcrdsdir/values.yaml (100%) rename {pkg => internal/chart/v3}/lint/rules/testdata/malformed-template/.helmignore (100%) create mode 100644 internal/chart/v3/lint/rules/testdata/malformed-template/Chart.yaml rename {pkg => internal/chart/v3}/lint/rules/testdata/malformed-template/templates/bad.yaml (100%) rename {pkg => internal/chart/v3}/lint/rules/testdata/malformed-template/values.yaml (100%) create mode 100644 internal/chart/v3/lint/rules/testdata/multi-template-fail/Chart.yaml rename {pkg => internal/chart/v3}/lint/rules/testdata/multi-template-fail/templates/multi-fail.yaml (100%) create mode 100644 internal/chart/v3/lint/rules/testdata/v3-fail/Chart.yaml rename {pkg => internal/chart/v3}/lint/rules/testdata/v3-fail/templates/_helpers.tpl (100%) rename {pkg => internal/chart/v3}/lint/rules/testdata/v3-fail/templates/deployment.yaml (100%) rename {pkg => internal/chart/v3}/lint/rules/testdata/v3-fail/templates/ingress.yaml (100%) rename {pkg => internal/chart/v3}/lint/rules/testdata/v3-fail/templates/service.yaml (100%) rename {pkg => internal/chart/v3}/lint/rules/testdata/v3-fail/values.yaml (100%) create mode 100644 internal/chart/v3/lint/rules/testdata/withsubchart/Chart.yaml create mode 100644 internal/chart/v3/lint/rules/testdata/withsubchart/charts/subchart/Chart.yaml rename {pkg => internal/chart/v3}/lint/rules/testdata/withsubchart/charts/subchart/templates/subchart.yaml (100%) rename {pkg => internal/chart/v3}/lint/rules/testdata/withsubchart/charts/subchart/values.yaml (100%) rename {pkg => internal/chart/v3}/lint/rules/testdata/withsubchart/templates/mainchart.yaml (100%) rename {pkg => internal/chart/v3}/lint/rules/testdata/withsubchart/values.yaml (100%) create mode 100644 internal/chart/v3/lint/rules/values.go rename {pkg => internal/chart/v3}/lint/rules/values_test.go (100%) rename internal/chart/v3/{util/errors_test.go => lint/support/doc.go} (67%) rename {pkg => internal/chart/v3}/lint/support/message.go (100%) rename {pkg => internal/chart/v3}/lint/support/message_test.go (100%) delete mode 100644 internal/chart/v3/util/capabilities.go delete mode 100644 internal/chart/v3/util/capabilities_test.go delete mode 100644 internal/chart/v3/util/coalesce.go delete mode 100644 internal/chart/v3/util/coalesce_test.go delete mode 100644 internal/chart/v3/util/errors.go delete mode 100644 internal/chart/v3/util/jsonschema.go delete mode 100644 internal/chart/v3/util/jsonschema_test.go delete mode 100644 internal/chart/v3/util/values.go delete mode 100644 internal/chart/v3/util/values_test.go create mode 100644 pkg/chart/common.go rename pkg/chart/{v2/util => common}/capabilities.go (99%) rename pkg/chart/{v2/util => common}/capabilities_test.go (99%) rename pkg/chart/{v2/util => common}/errors.go (98%) rename pkg/chart/{v2/util => common}/errors_test.go (98%) rename {internal/chart/v3 => pkg/chart/common}/file.go (98%) rename pkg/chart/{v2/util => common}/testdata/coleridge.yaml (100%) rename pkg/chart/{v2 => common}/util/coalesce.go (81%) rename pkg/chart/{v2 => common}/util/coalesce_test.go (97%) rename pkg/chart/{v2 => common}/util/jsonschema.go (89%) rename pkg/chart/{v2 => common}/util/jsonschema_test.go (96%) rename pkg/chart/{v2 => common}/util/testdata/test-values-invalid.schema.json (100%) rename pkg/chart/{v2 => common}/util/testdata/test-values-negative.yaml (100%) rename pkg/chart/{v2 => common}/util/testdata/test-values.schema.json (100%) rename pkg/chart/{v2 => common}/util/testdata/test-values.yaml (100%) create mode 100644 pkg/chart/common/util/values.go create mode 100644 pkg/chart/common/util/values_test.go rename pkg/chart/{v2/util => common}/values.go (74%) rename pkg/chart/{v2/util => common}/values_test.go (66%) rename pkg/chart/{v2/file.go => interfaces.go} (60%) rename pkg/{ => chart/v2}/lint/lint.go (83%) rename pkg/{ => chart/v2}/lint/lint_test.go (99%) rename pkg/{ => chart/v2}/lint/rules/chartfile.go (98%) rename pkg/{ => chart/v2}/lint/rules/chartfile_test.go (99%) rename pkg/{ => chart/v2}/lint/rules/crds.go (98%) rename pkg/{ => chart/v2}/lint/rules/crds_test.go (95%) rename pkg/{ => chart/v2}/lint/rules/dependencies.go (96%) rename pkg/{ => chart/v2}/lint/rules/dependencies_test.go (98%) create mode 100644 pkg/chart/v2/lint/rules/deprecations.go rename pkg/{ => chart/v2}/lint/rules/deprecations_test.go (94%) rename pkg/{ => chart/v2}/lint/rules/template.go (95%) rename pkg/{ => chart/v2}/lint/rules/template_test.go (98%) rename pkg/{ => chart/v2}/lint/rules/testdata/albatross/Chart.yaml (100%) create mode 100644 pkg/chart/v2/lint/rules/testdata/albatross/templates/_helpers.tpl create mode 100644 pkg/chart/v2/lint/rules/testdata/albatross/templates/fail.yaml create mode 100644 pkg/chart/v2/lint/rules/testdata/albatross/templates/svc.yaml create mode 100644 pkg/chart/v2/lint/rules/testdata/albatross/values.yaml rename pkg/{ => chart/v2}/lint/rules/testdata/anotherbadchartfile/Chart.yaml (100%) create mode 100644 pkg/chart/v2/lint/rules/testdata/badchartfile/Chart.yaml create mode 100644 pkg/chart/v2/lint/rules/testdata/badchartfile/values.yaml rename pkg/{ => chart/v2}/lint/rules/testdata/badchartname/Chart.yaml (100%) create mode 100644 pkg/chart/v2/lint/rules/testdata/badchartname/values.yaml rename pkg/{ => chart/v2}/lint/rules/testdata/badcrdfile/Chart.yaml (100%) create mode 100644 pkg/chart/v2/lint/rules/testdata/badcrdfile/crds/bad-apiversion.yaml create mode 100644 pkg/chart/v2/lint/rules/testdata/badcrdfile/crds/bad-crd.yaml create mode 100644 pkg/chart/v2/lint/rules/testdata/badcrdfile/templates/.gitkeep create mode 100644 pkg/chart/v2/lint/rules/testdata/badcrdfile/values.yaml rename pkg/{ => chart/v2}/lint/rules/testdata/badvaluesfile/Chart.yaml (100%) create mode 100644 pkg/chart/v2/lint/rules/testdata/badvaluesfile/templates/badvaluesfile.yaml create mode 100644 pkg/chart/v2/lint/rules/testdata/badvaluesfile/values.yaml rename pkg/{ => chart/v2}/lint/rules/testdata/goodone/Chart.yaml (100%) create mode 100644 pkg/chart/v2/lint/rules/testdata/goodone/crds/test-crd.yaml create mode 100644 pkg/chart/v2/lint/rules/testdata/goodone/templates/goodone.yaml create mode 100644 pkg/chart/v2/lint/rules/testdata/goodone/values.yaml create mode 100644 pkg/chart/v2/lint/rules/testdata/invalidchartfile/Chart.yaml create mode 100644 pkg/chart/v2/lint/rules/testdata/invalidchartfile/values.yaml rename pkg/{ => chart/v2}/lint/rules/testdata/invalidcrdsdir/Chart.yaml (100%) create mode 100644 pkg/chart/v2/lint/rules/testdata/invalidcrdsdir/crds create mode 100644 pkg/chart/v2/lint/rules/testdata/invalidcrdsdir/values.yaml create mode 100644 pkg/chart/v2/lint/rules/testdata/malformed-template/.helmignore rename pkg/{ => chart/v2}/lint/rules/testdata/malformed-template/Chart.yaml (100%) create mode 100644 pkg/chart/v2/lint/rules/testdata/malformed-template/templates/bad.yaml create mode 100644 pkg/chart/v2/lint/rules/testdata/malformed-template/values.yaml rename pkg/{ => chart/v2}/lint/rules/testdata/multi-template-fail/Chart.yaml (100%) create mode 100644 pkg/chart/v2/lint/rules/testdata/multi-template-fail/templates/multi-fail.yaml rename pkg/{ => chart/v2}/lint/rules/testdata/v3-fail/Chart.yaml (100%) create mode 100644 pkg/chart/v2/lint/rules/testdata/v3-fail/templates/_helpers.tpl create mode 100644 pkg/chart/v2/lint/rules/testdata/v3-fail/templates/deployment.yaml create mode 100644 pkg/chart/v2/lint/rules/testdata/v3-fail/templates/ingress.yaml create mode 100644 pkg/chart/v2/lint/rules/testdata/v3-fail/templates/service.yaml create mode 100644 pkg/chart/v2/lint/rules/testdata/v3-fail/values.yaml rename pkg/{ => chart/v2}/lint/rules/testdata/withsubchart/Chart.yaml (100%) rename pkg/{ => chart/v2}/lint/rules/testdata/withsubchart/charts/subchart/Chart.yaml (100%) create mode 100644 pkg/chart/v2/lint/rules/testdata/withsubchart/charts/subchart/templates/subchart.yaml create mode 100644 pkg/chart/v2/lint/rules/testdata/withsubchart/charts/subchart/values.yaml create mode 100644 pkg/chart/v2/lint/rules/testdata/withsubchart/templates/mainchart.yaml create mode 100644 pkg/chart/v2/lint/rules/testdata/withsubchart/values.yaml rename pkg/{ => chart/v2}/lint/rules/values.go (84%) create mode 100644 pkg/chart/v2/lint/rules/values_test.go rename pkg/{ => chart/v2}/lint/support/doc.go (91%) create mode 100644 pkg/chart/v2/lint/support/message.go create mode 100644 pkg/chart/v2/lint/support/message_test.go diff --git a/.golangci.yml b/.golangci.yml index a9b13c35f..3df31b997 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -33,6 +33,7 @@ linters: - usetesting exclusions: + generated: lax presets: @@ -41,7 +42,10 @@ linters: - legacy - std-error-handling - rules: [] + rules: + - linters: + - revive + text: 'var-naming: avoid meaningless package names' warn-unused: true diff --git a/Makefile b/Makefile index 5e424bf05..5e1bfc6c2 100644 --- a/Makefile +++ b/Makefile @@ -63,10 +63,12 @@ K8S_MODULES_VER=$(subst ., ,$(subst v,,$(shell go list -f '{{.Version}}' -m k8s. K8S_MODULES_MAJOR_VER=$(shell echo $$(($(firstword $(K8S_MODULES_VER)) + 1))) K8S_MODULES_MINOR_VER=$(word 2,$(K8S_MODULES_VER)) -LDFLAGS += -X helm.sh/helm/v4/pkg/lint/rules.k8sVersionMajor=$(K8S_MODULES_MAJOR_VER) -LDFLAGS += -X helm.sh/helm/v4/pkg/lint/rules.k8sVersionMinor=$(K8S_MODULES_MINOR_VER) -LDFLAGS += -X helm.sh/helm/v4/pkg/chart/v2/util.k8sVersionMajor=$(K8S_MODULES_MAJOR_VER) -LDFLAGS += -X helm.sh/helm/v4/pkg/chart/v2/util.k8sVersionMinor=$(K8S_MODULES_MINOR_VER) +LDFLAGS += -X helm.sh/helm/v4/pkg/chart/v2/lint/rules.k8sVersionMajor=$(K8S_MODULES_MAJOR_VER) +LDFLAGS += -X helm.sh/helm/v4/pkg/chart/v2/lint/rules.k8sVersionMinor=$(K8S_MODULES_MINOR_VER) +LDFLAGS += -X helm.sh/helm/v4/pkg/internal/v3/lint/rules.k8sVersionMajor=$(K8S_MODULES_MAJOR_VER) +LDFLAGS += -X helm.sh/helm/v4/pkg/internal/v3/lint/rules.k8sVersionMinor=$(K8S_MODULES_MINOR_VER) +LDFLAGS += -X helm.sh/helm/v4/pkg/chart/common/util.k8sVersionMajor=$(K8S_MODULES_MAJOR_VER) +LDFLAGS += -X helm.sh/helm/v4/pkg/chart/common/util.k8sVersionMinor=$(K8S_MODULES_MINOR_VER) .PHONY: all all: build @@ -112,7 +114,8 @@ test-unit: # based on older versions, this is run separately. When run without the ldflags in the unit test (above) or coverage # test, it still passes with a false-positive result as the resources shouldn’t be deprecated in the older Kubernetes # version if it only starts failing with the latest. - go test $(GOFLAGS) -run ^TestHelmCreateChart_CheckDeprecatedWarnings$$ ./pkg/lint/ $(TESTFLAGS) -ldflags '$(LDFLAGS)' + go test $(GOFLAGS) -run ^TestHelmCreateChart_CheckDeprecatedWarnings$$ ./pkg/chart/v2/lint/ $(TESTFLAGS) -ldflags '$(LDFLAGS)' + go test $(GOFLAGS) -run ^TestHelmCreateChart_CheckDeprecatedWarnings$$ ./internal/chart/v3/lint/ $(TESTFLAGS) -ldflags '$(LDFLAGS)' .PHONY: test-coverage diff --git a/internal/chart/v3/chart.go b/internal/chart/v3/chart.go index 4d59fa5ec..2edc6c339 100644 --- a/internal/chart/v3/chart.go +++ b/internal/chart/v3/chart.go @@ -19,6 +19,8 @@ import ( "path/filepath" "regexp" "strings" + + "helm.sh/helm/v4/pkg/chart/common" ) // APIVersionV3 is the API version number for version 3. @@ -34,20 +36,20 @@ type Chart struct { // // This should not be used except in special cases like `helm show values`, // where we want to display the raw values, comments and all. - Raw []*File `json:"-"` + Raw []*common.File `json:"-"` // Metadata is the contents of the Chartfile. Metadata *Metadata `json:"metadata"` // Lock is the contents of Chart.lock. Lock *Lock `json:"lock"` // Templates for this chart. - Templates []*File `json:"templates"` + Templates []*common.File `json:"templates"` // Values are default config for this chart. Values map[string]interface{} `json:"values"` // Schema is an optional JSON schema for imposing structure on Values Schema []byte `json:"schema"` // Files are miscellaneous files in a chart archive, // e.g. README, LICENSE, etc. - Files []*File `json:"files"` + Files []*common.File `json:"files"` parent *Chart dependencies []*Chart @@ -59,7 +61,7 @@ type CRD struct { // Filename is the File obj Name including (sub-)chart.ChartFullPath Filename string // File is the File obj for the crd - File *File + File *common.File } // SetDependencies replaces the chart dependencies. @@ -134,8 +136,8 @@ func (ch *Chart) AppVersion() string { // CRDs returns a list of File objects in the 'crds/' directory of a Helm chart. // Deprecated: use CRDObjects() -func (ch *Chart) CRDs() []*File { - files := []*File{} +func (ch *Chart) CRDs() []*common.File { + files := []*common.File{} // Find all resources in the crds/ directory for _, f := range ch.Files { if strings.HasPrefix(f.Name, "crds/") && hasManifestExtension(f.Name) { diff --git a/internal/chart/v3/chart_test.go b/internal/chart/v3/chart_test.go index f93b3356b..b1820ac0a 100644 --- a/internal/chart/v3/chart_test.go +++ b/internal/chart/v3/chart_test.go @@ -20,11 +20,13 @@ import ( "testing" "github.com/stretchr/testify/assert" + + "helm.sh/helm/v4/pkg/chart/common" ) func TestCRDs(t *testing.T) { chrt := Chart{ - Files: []*File{ + Files: []*common.File{ { Name: "crds/foo.yaml", Data: []byte("hello"), @@ -57,7 +59,7 @@ func TestCRDs(t *testing.T) { func TestSaveChartNoRawData(t *testing.T) { chrt := Chart{ - Raw: []*File{ + Raw: []*common.File{ { Name: "fhqwhgads.yaml", Data: []byte("Everybody to the Limit"), @@ -76,7 +78,7 @@ func TestSaveChartNoRawData(t *testing.T) { t.Fatal(err) } - is.Equal([]*File(nil), res.Raw) + is.Equal([]*common.File(nil), res.Raw) } func TestMetadata(t *testing.T) { @@ -162,7 +164,7 @@ func TestChartFullPath(t *testing.T) { func TestCRDObjects(t *testing.T) { chrt := Chart{ - Files: []*File{ + Files: []*common.File{ { Name: "crds/foo.yaml", Data: []byte("hello"), @@ -190,7 +192,7 @@ func TestCRDObjects(t *testing.T) { { Name: "crds/foo.yaml", Filename: "crds/foo.yaml", - File: &File{ + File: &common.File{ Name: "crds/foo.yaml", Data: []byte("hello"), }, @@ -198,7 +200,7 @@ func TestCRDObjects(t *testing.T) { { Name: "crds/foo/bar/baz.yaml", Filename: "crds/foo/bar/baz.yaml", - File: &File{ + File: &common.File{ Name: "crds/foo/bar/baz.yaml", Data: []byte("hello"), }, diff --git a/internal/chart/v3/lint/lint.go b/internal/chart/v3/lint/lint.go new file mode 100644 index 000000000..231bb6803 --- /dev/null +++ b/internal/chart/v3/lint/lint.go @@ -0,0 +1,66 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lint // import "helm.sh/helm/v4/internal/chart/v3/lint" + +import ( + "path/filepath" + + "helm.sh/helm/v4/internal/chart/v3/lint/rules" + "helm.sh/helm/v4/internal/chart/v3/lint/support" + "helm.sh/helm/v4/pkg/chart/common" +) + +type linterOptions struct { + KubeVersion *common.KubeVersion + SkipSchemaValidation bool +} + +type LinterOption func(lo *linterOptions) + +func WithKubeVersion(kubeVersion *common.KubeVersion) LinterOption { + return func(lo *linterOptions) { + lo.KubeVersion = kubeVersion + } +} + +func WithSkipSchemaValidation(skipSchemaValidation bool) LinterOption { + return func(lo *linterOptions) { + lo.SkipSchemaValidation = skipSchemaValidation + } +} + +func RunAll(baseDir string, values map[string]interface{}, namespace string, options ...LinterOption) support.Linter { + + chartDir, _ := filepath.Abs(baseDir) + + lo := linterOptions{} + for _, option := range options { + option(&lo) + } + + result := support.Linter{ + ChartDir: chartDir, + } + + rules.Chartfile(&result) + rules.ValuesWithOverrides(&result, values) + rules.TemplatesWithSkipSchemaValidation(&result, values, namespace, lo.KubeVersion, lo.SkipSchemaValidation) + rules.Dependencies(&result) + rules.Crds(&result) + + return result +} diff --git a/internal/chart/v3/lint/lint_test.go b/internal/chart/v3/lint/lint_test.go new file mode 100644 index 000000000..af44cc58d --- /dev/null +++ b/internal/chart/v3/lint/lint_test.go @@ -0,0 +1,246 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lint + +import ( + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "helm.sh/helm/v4/internal/chart/v3/lint/support" + chartutil "helm.sh/helm/v4/internal/chart/v3/util" +) + +var values map[string]interface{} + +const namespace = "testNamespace" + +const badChartDir = "rules/testdata/badchartfile" +const badValuesFileDir = "rules/testdata/badvaluesfile" +const badYamlFileDir = "rules/testdata/albatross" +const badCrdFileDir = "rules/testdata/badcrdfile" +const goodChartDir = "rules/testdata/goodone" +const subChartValuesDir = "rules/testdata/withsubchart" +const malformedTemplate = "rules/testdata/malformed-template" +const invalidChartFileDir = "rules/testdata/invalidchartfile" + +func TestBadChartV3(t *testing.T) { + m := RunAll(badChartDir, values, namespace).Messages + if len(m) != 8 { + t.Errorf("Number of errors %v", len(m)) + t.Errorf("All didn't fail with expected errors, got %#v", m) + } + // There should be one INFO, one WARNING, and 2 ERROR messages, check for them + var i, w, e, e2, e3, e4, e5, e6 bool + for _, msg := range m { + if msg.Severity == support.InfoSev { + if strings.Contains(msg.Err.Error(), "icon is recommended") { + i = true + } + } + if msg.Severity == support.WarningSev { + if strings.Contains(msg.Err.Error(), "does not exist") { + w = true + } + } + if msg.Severity == support.ErrorSev { + if strings.Contains(msg.Err.Error(), "version '0.0.0.0' is not a valid SemVer") { + e = true + } + if strings.Contains(msg.Err.Error(), "name is required") { + e2 = true + } + + if strings.Contains(msg.Err.Error(), "apiVersion is required. The value must be \"v3\"") { + e3 = true + } + + if strings.Contains(msg.Err.Error(), "chart type is not valid in apiVersion") { + e4 = true + } + + if strings.Contains(msg.Err.Error(), "dependencies are not valid in the Chart file with apiVersion") { + e5 = true + } + // This comes from the dependency check, which loads dependency info from the Chart.yaml + if strings.Contains(msg.Err.Error(), "unable to load chart") { + e6 = true + } + } + } + if !e || !e2 || !e3 || !e4 || !e5 || !i || !e6 || !w { + t.Errorf("Didn't find all the expected errors, got %#v", m) + } +} + +func TestInvalidYaml(t *testing.T) { + m := RunAll(badYamlFileDir, values, namespace).Messages + if len(m) != 1 { + t.Fatalf("All didn't fail with expected errors, got %#v", m) + } + if !strings.Contains(m[0].Err.Error(), "deliberateSyntaxError") { + t.Errorf("All didn't have the error for deliberateSyntaxError") + } +} + +func TestInvalidChartYamlV3(t *testing.T) { + m := RunAll(invalidChartFileDir, values, namespace).Messages + t.Log(m) + if len(m) != 3 { + t.Fatalf("All didn't fail with expected errors, got %#v", m) + } + if !strings.Contains(m[0].Err.Error(), "failed to strictly parse chart metadata file") { + t.Errorf("All didn't have the error for duplicate YAML keys") + } +} + +func TestBadValuesV3(t *testing.T) { + m := RunAll(badValuesFileDir, values, namespace).Messages + if len(m) < 1 { + t.Fatalf("All didn't fail with expected errors, got %#v", m) + } + if !strings.Contains(m[0].Err.Error(), "unable to parse YAML") { + t.Errorf("All didn't have the error for invalid key format: %s", m[0].Err) + } +} + +func TestBadCrdFileV3(t *testing.T) { + m := RunAll(badCrdFileDir, values, namespace).Messages + assert.Lenf(t, m, 2, "All didn't fail with expected errors, got %#v", m) + assert.ErrorContains(t, m[0].Err, "apiVersion is not in 'apiextensions.k8s.io'") + assert.ErrorContains(t, m[1].Err, "object kind is not 'CustomResourceDefinition'") +} + +func TestGoodChart(t *testing.T) { + m := RunAll(goodChartDir, values, namespace).Messages + if len(m) != 0 { + t.Error("All returned linter messages when it shouldn't have") + for i, msg := range m { + t.Logf("Message %d: %s", i, msg) + } + } +} + +// TestHelmCreateChart tests that a `helm create` always passes a `helm lint` test. +// +// See https://github.com/helm/helm/issues/7923 +func TestHelmCreateChart(t *testing.T) { + dir := t.TempDir() + + createdChart, err := chartutil.Create("testhelmcreatepasseslint", dir) + if err != nil { + t.Error(err) + // Fatal is bad because of the defer. + return + } + + // Note: we test with strict=true here, even though others have + // strict = false. + m := RunAll(createdChart, values, namespace, WithSkipSchemaValidation(true)).Messages + if ll := len(m); ll != 1 { + t.Errorf("All should have had exactly 1 error. Got %d", ll) + for i, msg := range m { + t.Logf("Message %d: %s", i, msg.Error()) + } + } else if msg := m[0].Err.Error(); !strings.Contains(msg, "icon is recommended") { + t.Errorf("Unexpected lint error: %s", msg) + } +} + +// TestHelmCreateChart_CheckDeprecatedWarnings checks if any default template created by `helm create` throws +// deprecated warnings in the linter check against the current Kubernetes version (provided using ldflags). +// +// See https://github.com/helm/helm/issues/11495 +// +// Resources like hpa and ingress, which are disabled by default in values.yaml are enabled here using the equivalent +// of the `--set` flag. +// +// Note: This test requires the following ldflags to be set per the current Kubernetes version to avoid false-positive +// results. +// 1. -X helm.sh/helm/v4/pkg/lint/rules.k8sVersionMajor= +// 2. -X helm.sh/helm/v4/pkg/lint/rules.k8sVersionMinor= +// or directly use '$(LDFLAGS)' in Makefile. +// +// When run without ldflags, the test passes giving a false-positive result. This is because the variables +// `k8sVersionMajor` and `k8sVersionMinor` by default are set to an older version of Kubernetes, with which, there +// might not be the deprecation warning. +func TestHelmCreateChart_CheckDeprecatedWarnings(t *testing.T) { + createdChart, err := chartutil.Create("checkdeprecatedwarnings", t.TempDir()) + if err != nil { + t.Error(err) + return + } + + // Add values to enable hpa, and ingress which are disabled by default. + // This is the equivalent of: + // helm lint checkdeprecatedwarnings --set 'autoscaling.enabled=true,ingress.enabled=true' + updatedValues := map[string]interface{}{ + "autoscaling": map[string]interface{}{ + "enabled": true, + }, + "ingress": map[string]interface{}{ + "enabled": true, + }, + } + + linterRunDetails := RunAll(createdChart, updatedValues, namespace, WithSkipSchemaValidation(true)) + for _, msg := range linterRunDetails.Messages { + if strings.HasPrefix(msg.Error(), "[WARNING]") && + strings.Contains(msg.Error(), "deprecated") { + // When there is a deprecation warning for an object created + // by `helm create` for the current Kubernetes version, fail. + t.Errorf("Unexpected deprecation warning for %q: %s", msg.Path, msg.Error()) + } + } +} + +// lint ignores import-values +// See https://github.com/helm/helm/issues/9658 +func TestSubChartValuesChart(t *testing.T) { + m := RunAll(subChartValuesDir, values, namespace).Messages + if len(m) != 0 { + t.Error("All returned linter messages when it shouldn't have") + for i, msg := range m { + t.Logf("Message %d: %s", i, msg) + } + } +} + +// lint stuck with malformed template object +// See https://github.com/helm/helm/issues/11391 +func TestMalformedTemplate(t *testing.T) { + c := time.After(3 * time.Second) + ch := make(chan int, 1) + var m []support.Message + go func() { + m = RunAll(malformedTemplate, values, namespace).Messages + ch <- 1 + }() + select { + case <-c: + t.Fatalf("lint malformed template timeout") + case <-ch: + if len(m) != 1 { + t.Fatalf("All didn't fail with expected errors, got %#v", m) + } + if !strings.Contains(m[0].Err.Error(), "invalid character '{'") { + t.Errorf("All didn't have the error for invalid character '{'") + } + } +} diff --git a/internal/chart/v3/lint/rules/chartfile.go b/internal/chart/v3/lint/rules/chartfile.go new file mode 100644 index 000000000..e72a0d3b2 --- /dev/null +++ b/internal/chart/v3/lint/rules/chartfile.go @@ -0,0 +1,225 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rules // import "helm.sh/helm/v4/internal/chart/v3/lint/rules" + +import ( + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/Masterminds/semver/v3" + "github.com/asaskevich/govalidator" + "sigs.k8s.io/yaml" + + chart "helm.sh/helm/v4/internal/chart/v3" + "helm.sh/helm/v4/internal/chart/v3/lint/support" + chartutil "helm.sh/helm/v4/internal/chart/v3/util" +) + +// Chartfile runs a set of linter rules related to Chart.yaml file +func Chartfile(linter *support.Linter) { + chartFileName := "Chart.yaml" + chartPath := filepath.Join(linter.ChartDir, chartFileName) + + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartYamlNotDirectory(chartPath)) + + chartFile, err := chartutil.LoadChartfile(chartPath) + validChartFile := linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartYamlFormat(err)) + + // Guard clause. Following linter rules require a parsable ChartFile + if !validChartFile { + return + } + + _, err = chartutil.StrictLoadChartfile(chartPath) + linter.RunLinterRule(support.WarningSev, chartFileName, validateChartYamlStrictFormat(err)) + + // type check for Chart.yaml . ignoring error as any parse + // errors would already be caught in the above load function + chartFileForTypeCheck, _ := loadChartFileForTypeCheck(chartPath) + + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartName(chartFile)) + + // Chart metadata + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartAPIVersion(chartFile)) + + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartVersionType(chartFileForTypeCheck)) + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartVersion(chartFile)) + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartAppVersionType(chartFileForTypeCheck)) + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartMaintainer(chartFile)) + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartSources(chartFile)) + linter.RunLinterRule(support.InfoSev, chartFileName, validateChartIconPresence(chartFile)) + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartIconURL(chartFile)) + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartType(chartFile)) + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartDependencies(chartFile)) +} + +func validateChartVersionType(data map[string]interface{}) error { + return isStringValue(data, "version") +} + +func validateChartAppVersionType(data map[string]interface{}) error { + return isStringValue(data, "appVersion") +} + +func isStringValue(data map[string]interface{}, key string) error { + value, ok := data[key] + if !ok { + return nil + } + valueType := fmt.Sprintf("%T", value) + if valueType != "string" { + return fmt.Errorf("%s should be of type string but it's of type %s", key, valueType) + } + return nil +} + +func validateChartYamlNotDirectory(chartPath string) error { + fi, err := os.Stat(chartPath) + + if err == nil && fi.IsDir() { + return errors.New("should be a file, not a directory") + } + return nil +} + +func validateChartYamlFormat(chartFileError error) error { + if chartFileError != nil { + return fmt.Errorf("unable to parse YAML\n\t%w", chartFileError) + } + return nil +} + +func validateChartYamlStrictFormat(chartFileError error) error { + if chartFileError != nil { + return fmt.Errorf("failed to strictly parse chart metadata file\n\t%w", chartFileError) + } + return nil +} + +func validateChartName(cf *chart.Metadata) error { + if cf.Name == "" { + return errors.New("name is required") + } + name := filepath.Base(cf.Name) + if name != cf.Name { + return fmt.Errorf("chart name %q is invalid", cf.Name) + } + return nil +} + +func validateChartAPIVersion(cf *chart.Metadata) error { + if cf.APIVersion == "" { + return errors.New("apiVersion is required. The value must be \"v3\"") + } + + if cf.APIVersion != chart.APIVersionV3 { + return fmt.Errorf("apiVersion '%s' is not valid. The value must be \"v3\"", cf.APIVersion) + } + + return nil +} + +func validateChartVersion(cf *chart.Metadata) error { + if cf.Version == "" { + return errors.New("version is required") + } + + version, err := semver.NewVersion(cf.Version) + if err != nil { + return fmt.Errorf("version '%s' is not a valid SemVer", cf.Version) + } + + c, err := semver.NewConstraint(">0.0.0-0") + if err != nil { + return err + } + valid, msg := c.Validate(version) + + if !valid && len(msg) > 0 { + return fmt.Errorf("version %v", msg[0]) + } + + return nil +} + +func validateChartMaintainer(cf *chart.Metadata) error { + for _, maintainer := range cf.Maintainers { + if maintainer == nil { + return errors.New("a maintainer entry is empty") + } + if maintainer.Name == "" { + return errors.New("each maintainer requires a name") + } else if maintainer.Email != "" && !govalidator.IsEmail(maintainer.Email) { + return fmt.Errorf("invalid email '%s' for maintainer '%s'", maintainer.Email, maintainer.Name) + } else if maintainer.URL != "" && !govalidator.IsURL(maintainer.URL) { + return fmt.Errorf("invalid url '%s' for maintainer '%s'", maintainer.URL, maintainer.Name) + } + } + return nil +} + +func validateChartSources(cf *chart.Metadata) error { + for _, source := range cf.Sources { + if source == "" || !govalidator.IsRequestURL(source) { + return fmt.Errorf("invalid source URL '%s'", source) + } + } + return nil +} + +func validateChartIconPresence(cf *chart.Metadata) error { + if cf.Icon == "" { + return errors.New("icon is recommended") + } + return nil +} + +func validateChartIconURL(cf *chart.Metadata) error { + if cf.Icon != "" && !govalidator.IsRequestURL(cf.Icon) { + return fmt.Errorf("invalid icon URL '%s'", cf.Icon) + } + return nil +} + +func validateChartDependencies(cf *chart.Metadata) error { + if len(cf.Dependencies) > 0 && cf.APIVersion != chart.APIVersionV3 { + return fmt.Errorf("dependencies are not valid in the Chart file with apiVersion '%s'. They are valid in apiVersion '%s'", cf.APIVersion, chart.APIVersionV3) + } + return nil +} + +func validateChartType(cf *chart.Metadata) error { + if len(cf.Type) > 0 && cf.APIVersion != chart.APIVersionV3 { + return fmt.Errorf("chart type is not valid in apiVersion '%s'. It is valid in apiVersion '%s'", cf.APIVersion, chart.APIVersionV3) + } + return nil +} + +// loadChartFileForTypeCheck loads the Chart.yaml +// in a generic form of a map[string]interface{}, so that the type +// of the values can be checked +func loadChartFileForTypeCheck(filename string) (map[string]interface{}, error) { + b, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + y := make(map[string]interface{}) + err = yaml.Unmarshal(b, &y) + return y, err +} diff --git a/internal/chart/v3/lint/rules/chartfile_test.go b/internal/chart/v3/lint/rules/chartfile_test.go new file mode 100644 index 000000000..070cc244d --- /dev/null +++ b/internal/chart/v3/lint/rules/chartfile_test.go @@ -0,0 +1,276 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rules + +import ( + "errors" + "os" + "path/filepath" + "strings" + "testing" + + chart "helm.sh/helm/v4/internal/chart/v3" + "helm.sh/helm/v4/internal/chart/v3/lint/support" + chartutil "helm.sh/helm/v4/internal/chart/v3/util" +) + +const ( + badChartNameDir = "testdata/badchartname" + badChartDir = "testdata/badchartfile" + anotherBadChartDir = "testdata/anotherbadchartfile" +) + +var ( + badChartNamePath = filepath.Join(badChartNameDir, "Chart.yaml") + badChartFilePath = filepath.Join(badChartDir, "Chart.yaml") + nonExistingChartFilePath = filepath.Join(os.TempDir(), "Chart.yaml") +) + +var badChart, _ = chartutil.LoadChartfile(badChartFilePath) +var badChartName, _ = chartutil.LoadChartfile(badChartNamePath) + +// Validation functions Test +func TestValidateChartYamlNotDirectory(t *testing.T) { + _ = os.Mkdir(nonExistingChartFilePath, os.ModePerm) + defer os.Remove(nonExistingChartFilePath) + + err := validateChartYamlNotDirectory(nonExistingChartFilePath) + if err == nil { + t.Errorf("validateChartYamlNotDirectory to return a linter error, got no error") + } +} + +func TestValidateChartYamlFormat(t *testing.T) { + err := validateChartYamlFormat(errors.New("Read error")) + if err == nil { + t.Errorf("validateChartYamlFormat to return a linter error, got no error") + } + + err = validateChartYamlFormat(nil) + if err != nil { + t.Errorf("validateChartYamlFormat to return no error, got a linter error") + } +} + +func TestValidateChartName(t *testing.T) { + err := validateChartName(badChart) + if err == nil { + t.Errorf("validateChartName to return a linter error, got no error") + } + + err = validateChartName(badChartName) + if err == nil { + t.Error("expected validateChartName to return a linter error for an invalid name, got no error") + } +} + +func TestValidateChartVersion(t *testing.T) { + var failTest = []struct { + Version string + ErrorMsg string + }{ + {"", "version is required"}, + {"1.2.3.4", "version '1.2.3.4' is not a valid SemVer"}, + {"waps", "'waps' is not a valid SemVer"}, + {"-3", "'-3' is not a valid SemVer"}, + } + + var successTest = []string{"0.0.1", "0.0.1+build", "0.0.1-beta"} + + for _, test := range failTest { + badChart.Version = test.Version + err := validateChartVersion(badChart) + if err == nil || !strings.Contains(err.Error(), test.ErrorMsg) { + t.Errorf("validateChartVersion(%s) to return \"%s\", got no error", test.Version, test.ErrorMsg) + } + } + + for _, version := range successTest { + badChart.Version = version + err := validateChartVersion(badChart) + if err != nil { + t.Errorf("validateChartVersion(%s) to return no error, got a linter error", version) + } + } +} + +func TestValidateChartMaintainer(t *testing.T) { + var failTest = []struct { + Name string + Email string + ErrorMsg string + }{ + {"", "", "each maintainer requires a name"}, + {"", "test@test.com", "each maintainer requires a name"}, + {"John Snow", "wrongFormatEmail.com", "invalid email"}, + } + + var successTest = []struct { + Name string + Email string + }{ + {"John Snow", ""}, + {"John Snow", "john@winterfell.com"}, + } + + for _, test := range failTest { + badChart.Maintainers = []*chart.Maintainer{{Name: test.Name, Email: test.Email}} + err := validateChartMaintainer(badChart) + if err == nil || !strings.Contains(err.Error(), test.ErrorMsg) { + t.Errorf("validateChartMaintainer(%s, %s) to return \"%s\", got no error", test.Name, test.Email, test.ErrorMsg) + } + } + + for _, test := range successTest { + badChart.Maintainers = []*chart.Maintainer{{Name: test.Name, Email: test.Email}} + err := validateChartMaintainer(badChart) + if err != nil { + t.Errorf("validateChartMaintainer(%s, %s) to return no error, got %s", test.Name, test.Email, err.Error()) + } + } + + // Testing for an empty maintainer + badChart.Maintainers = []*chart.Maintainer{nil} + err := validateChartMaintainer(badChart) + if err == nil { + t.Errorf("validateChartMaintainer did not return error for nil maintainer as expected") + } + if err.Error() != "a maintainer entry is empty" { + t.Errorf("validateChartMaintainer returned unexpected error for nil maintainer: %s", err.Error()) + } +} + +func TestValidateChartSources(t *testing.T) { + var failTest = []string{"", "RiverRun", "john@winterfell", "riverrun.io"} + var successTest = []string{"http://riverrun.io", "https://riverrun.io", "https://riverrun.io/blackfish"} + for _, test := range failTest { + badChart.Sources = []string{test} + err := validateChartSources(badChart) + if err == nil || !strings.Contains(err.Error(), "invalid source URL") { + t.Errorf("validateChartSources(%s) to return \"invalid source URL\", got no error", test) + } + } + + for _, test := range successTest { + badChart.Sources = []string{test} + err := validateChartSources(badChart) + if err != nil { + t.Errorf("validateChartSources(%s) to return no error, got %s", test, err.Error()) + } + } +} + +func TestValidateChartIconPresence(t *testing.T) { + t.Run("Icon absent", func(t *testing.T) { + testChart := &chart.Metadata{ + Icon: "", + } + + err := validateChartIconPresence(testChart) + + if err == nil { + t.Errorf("validateChartIconPresence to return a linter error, got no error") + } else if !strings.Contains(err.Error(), "icon is recommended") { + t.Errorf("expected %q, got %q", "icon is recommended", err.Error()) + } + }) + t.Run("Icon present", func(t *testing.T) { + testChart := &chart.Metadata{ + Icon: "http://example.org/icon.png", + } + + err := validateChartIconPresence(testChart) + + if err != nil { + t.Errorf("Unexpected error: %q", err.Error()) + } + }) +} + +func TestValidateChartIconURL(t *testing.T) { + var failTest = []string{"RiverRun", "john@winterfell", "riverrun.io"} + var successTest = []string{"http://riverrun.io", "https://riverrun.io", "https://riverrun.io/blackfish.png"} + for _, test := range failTest { + badChart.Icon = test + err := validateChartIconURL(badChart) + if err == nil || !strings.Contains(err.Error(), "invalid icon URL") { + t.Errorf("validateChartIconURL(%s) to return \"invalid icon URL\", got no error", test) + } + } + + for _, test := range successTest { + badChart.Icon = test + err := validateChartSources(badChart) + if err != nil { + t.Errorf("validateChartIconURL(%s) to return no error, got %s", test, err.Error()) + } + } +} + +func TestV3Chartfile(t *testing.T) { + t.Run("Chart.yaml basic validity issues", func(t *testing.T) { + linter := support.Linter{ChartDir: badChartDir} + Chartfile(&linter) + msgs := linter.Messages + expectedNumberOfErrorMessages := 6 + + if len(msgs) != expectedNumberOfErrorMessages { + t.Errorf("Expected %d errors, got %d", expectedNumberOfErrorMessages, len(msgs)) + return + } + + if !strings.Contains(msgs[0].Err.Error(), "name is required") { + t.Errorf("Unexpected message 0: %s", msgs[0].Err) + } + + if !strings.Contains(msgs[1].Err.Error(), "apiVersion is required. The value must be \"v3\"") { + t.Errorf("Unexpected message 1: %s", msgs[1].Err) + } + + if !strings.Contains(msgs[2].Err.Error(), "version '0.0.0.0' is not a valid SemVer") { + t.Errorf("Unexpected message 2: %s", msgs[2].Err) + } + + if !strings.Contains(msgs[3].Err.Error(), "icon is recommended") { + t.Errorf("Unexpected message 3: %s", msgs[3].Err) + } + }) + + t.Run("Chart.yaml validity issues due to type mismatch", func(t *testing.T) { + linter := support.Linter{ChartDir: anotherBadChartDir} + Chartfile(&linter) + msgs := linter.Messages + expectedNumberOfErrorMessages := 3 + + if len(msgs) != expectedNumberOfErrorMessages { + t.Errorf("Expected %d errors, got %d", expectedNumberOfErrorMessages, len(msgs)) + return + } + + if !strings.Contains(msgs[0].Err.Error(), "version should be of type string") { + t.Errorf("Unexpected message 0: %s", msgs[0].Err) + } + + if !strings.Contains(msgs[1].Err.Error(), "version '7.2445e+06' is not a valid SemVer") { + t.Errorf("Unexpected message 1: %s", msgs[1].Err) + } + + if !strings.Contains(msgs[2].Err.Error(), "appVersion should be of type string") { + t.Errorf("Unexpected message 2: %s", msgs[2].Err) + } + }) +} diff --git a/internal/chart/v3/lint/rules/crds.go b/internal/chart/v3/lint/rules/crds.go new file mode 100644 index 000000000..6bafb52eb --- /dev/null +++ b/internal/chart/v3/lint/rules/crds.go @@ -0,0 +1,113 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rules + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "strings" + + "k8s.io/apimachinery/pkg/util/yaml" + + "helm.sh/helm/v4/internal/chart/v3/lint/support" + "helm.sh/helm/v4/internal/chart/v3/loader" +) + +// Crds lints the CRDs in the Linter. +func Crds(linter *support.Linter) { + fpath := "crds/" + crdsPath := filepath.Join(linter.ChartDir, fpath) + + // crds directory is optional + if _, err := os.Stat(crdsPath); errors.Is(err, fs.ErrNotExist) { + return + } + + crdsDirValid := linter.RunLinterRule(support.ErrorSev, fpath, validateCrdsDir(crdsPath)) + if !crdsDirValid { + return + } + + // Load chart and parse CRDs + chart, err := loader.Load(linter.ChartDir) + + chartLoaded := linter.RunLinterRule(support.ErrorSev, fpath, err) + + if !chartLoaded { + return + } + + /* Iterate over all the CRDs to check: + 1. It is a YAML file and not a template + 2. The API version is apiextensions.k8s.io + 3. The kind is CustomResourceDefinition + */ + for _, crd := range chart.CRDObjects() { + fileName := crd.Name + fpath = fileName + + decoder := yaml.NewYAMLOrJSONDecoder(bytes.NewReader(crd.File.Data), 4096) + for { + var yamlStruct *k8sYamlStruct + + err := decoder.Decode(&yamlStruct) + if err == io.EOF { + break + } + + // If YAML parsing fails here, it will always fail in the next block as well, so we should return here. + // This also confirms the YAML is not a template, since templates can't be decoded into a K8sYamlStruct. + if !linter.RunLinterRule(support.ErrorSev, fpath, validateYamlContent(err)) { + return + } + + linter.RunLinterRule(support.ErrorSev, fpath, validateCrdAPIVersion(yamlStruct)) + linter.RunLinterRule(support.ErrorSev, fpath, validateCrdKind(yamlStruct)) + } + } +} + +// Validation functions +func validateCrdsDir(crdsPath string) error { + fi, err := os.Stat(crdsPath) + if err != nil { + return err + } + if !fi.IsDir() { + return errors.New("not a directory") + } + return nil +} + +func validateCrdAPIVersion(obj *k8sYamlStruct) error { + if !strings.HasPrefix(obj.APIVersion, "apiextensions.k8s.io") { + return fmt.Errorf("apiVersion is not in 'apiextensions.k8s.io'") + } + return nil +} + +func validateCrdKind(obj *k8sYamlStruct) error { + if obj.Kind != "CustomResourceDefinition" { + return fmt.Errorf("object kind is not 'CustomResourceDefinition'") + } + return nil +} diff --git a/internal/chart/v3/lint/rules/crds_test.go b/internal/chart/v3/lint/rules/crds_test.go new file mode 100644 index 000000000..d93e3d978 --- /dev/null +++ b/internal/chart/v3/lint/rules/crds_test.go @@ -0,0 +1,36 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rules + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "helm.sh/helm/v4/internal/chart/v3/lint/support" +) + +const invalidCrdsDir = "./testdata/invalidcrdsdir" + +func TestInvalidCrdsDir(t *testing.T) { + linter := support.Linter{ChartDir: invalidCrdsDir} + Crds(&linter) + res := linter.Messages + + assert.Len(t, res, 1) + assert.ErrorContains(t, res[0].Err, "not a directory") +} diff --git a/internal/chart/v3/lint/rules/dependencies.go b/internal/chart/v3/lint/rules/dependencies.go new file mode 100644 index 000000000..f45153728 --- /dev/null +++ b/internal/chart/v3/lint/rules/dependencies.go @@ -0,0 +1,101 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rules // import "helm.sh/helm/v4/internal/chart/v3/lint/rules" + +import ( + "fmt" + "strings" + + chart "helm.sh/helm/v4/internal/chart/v3" + "helm.sh/helm/v4/internal/chart/v3/lint/support" + "helm.sh/helm/v4/internal/chart/v3/loader" +) + +// Dependencies runs lints against a chart's dependencies +// +// See https://github.com/helm/helm/issues/7910 +func Dependencies(linter *support.Linter) { + c, err := loader.LoadDir(linter.ChartDir) + if !linter.RunLinterRule(support.ErrorSev, "", validateChartFormat(err)) { + return + } + + linter.RunLinterRule(support.ErrorSev, linter.ChartDir, validateDependencyInMetadata(c)) + linter.RunLinterRule(support.ErrorSev, linter.ChartDir, validateDependenciesUnique(c)) + linter.RunLinterRule(support.WarningSev, linter.ChartDir, validateDependencyInChartsDir(c)) +} + +func validateChartFormat(chartError error) error { + if chartError != nil { + return fmt.Errorf("unable to load chart\n\t%w", chartError) + } + return nil +} + +func validateDependencyInChartsDir(c *chart.Chart) (err error) { + dependencies := map[string]struct{}{} + missing := []string{} + for _, dep := range c.Dependencies() { + dependencies[dep.Metadata.Name] = struct{}{} + } + for _, dep := range c.Metadata.Dependencies { + if _, ok := dependencies[dep.Name]; !ok { + missing = append(missing, dep.Name) + } + } + if len(missing) > 0 { + err = fmt.Errorf("chart directory is missing these dependencies: %s", strings.Join(missing, ",")) + } + return err +} + +func validateDependencyInMetadata(c *chart.Chart) (err error) { + dependencies := map[string]struct{}{} + missing := []string{} + for _, dep := range c.Metadata.Dependencies { + dependencies[dep.Name] = struct{}{} + } + for _, dep := range c.Dependencies() { + if _, ok := dependencies[dep.Metadata.Name]; !ok { + missing = append(missing, dep.Metadata.Name) + } + } + if len(missing) > 0 { + err = fmt.Errorf("chart metadata is missing these dependencies: %s", strings.Join(missing, ",")) + } + return err +} + +func validateDependenciesUnique(c *chart.Chart) (err error) { + dependencies := map[string]*chart.Dependency{} + shadowing := []string{} + + for _, dep := range c.Metadata.Dependencies { + key := dep.Name + if dep.Alias != "" { + key = dep.Alias + } + if dependencies[key] != nil { + shadowing = append(shadowing, key) + } + dependencies[key] = dep + } + if len(shadowing) > 0 { + err = fmt.Errorf("multiple dependencies with name or alias: %s", strings.Join(shadowing, ",")) + } + return err +} diff --git a/internal/chart/v3/lint/rules/dependencies_test.go b/internal/chart/v3/lint/rules/dependencies_test.go new file mode 100644 index 000000000..b80e4b8a9 --- /dev/null +++ b/internal/chart/v3/lint/rules/dependencies_test.go @@ -0,0 +1,157 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package rules + +import ( + "path/filepath" + "testing" + + chart "helm.sh/helm/v4/internal/chart/v3" + "helm.sh/helm/v4/internal/chart/v3/lint/support" + chartutil "helm.sh/helm/v4/internal/chart/v3/util" +) + +func chartWithBadDependencies() chart.Chart { + badChartDeps := chart.Chart{ + Metadata: &chart.Metadata{ + Name: "badchart", + Version: "0.1.0", + APIVersion: "v2", + Dependencies: []*chart.Dependency{ + { + Name: "sub2", + }, + { + Name: "sub3", + }, + }, + }, + } + + badChartDeps.SetDependencies( + &chart.Chart{ + Metadata: &chart.Metadata{ + Name: "sub1", + Version: "0.1.0", + APIVersion: "v2", + }, + }, + &chart.Chart{ + Metadata: &chart.Metadata{ + Name: "sub2", + Version: "0.1.0", + APIVersion: "v2", + }, + }, + ) + return badChartDeps +} + +func TestValidateDependencyInChartsDir(t *testing.T) { + c := chartWithBadDependencies() + + if err := validateDependencyInChartsDir(&c); err == nil { + t.Error("chart should have been flagged for missing deps in chart directory") + } +} + +func TestValidateDependencyInMetadata(t *testing.T) { + c := chartWithBadDependencies() + + if err := validateDependencyInMetadata(&c); err == nil { + t.Errorf("chart should have been flagged for missing deps in chart metadata") + } +} + +func TestValidateDependenciesUnique(t *testing.T) { + tests := []struct { + chart chart.Chart + }{ + {chart.Chart{ + Metadata: &chart.Metadata{ + Name: "badchart", + Version: "0.1.0", + APIVersion: "v2", + Dependencies: []*chart.Dependency{ + { + Name: "foo", + }, + { + Name: "foo", + }, + }, + }, + }}, + {chart.Chart{ + Metadata: &chart.Metadata{ + Name: "badchart", + Version: "0.1.0", + APIVersion: "v2", + Dependencies: []*chart.Dependency{ + { + Name: "foo", + Alias: "bar", + }, + { + Name: "bar", + }, + }, + }, + }}, + {chart.Chart{ + Metadata: &chart.Metadata{ + Name: "badchart", + Version: "0.1.0", + APIVersion: "v2", + Dependencies: []*chart.Dependency{ + { + Name: "foo", + Alias: "baz", + }, + { + Name: "bar", + Alias: "baz", + }, + }, + }, + }}, + } + + for _, tt := range tests { + if err := validateDependenciesUnique(&tt.chart); err == nil { + t.Errorf("chart should have been flagged for dependency shadowing") + } + } +} + +func TestDependencies(t *testing.T) { + tmp := t.TempDir() + + c := chartWithBadDependencies() + err := chartutil.SaveDir(&c, tmp) + if err != nil { + t.Fatal(err) + } + linter := support.Linter{ChartDir: filepath.Join(tmp, c.Metadata.Name)} + + Dependencies(&linter) + if l := len(linter.Messages); l != 2 { + t.Errorf("expected 2 linter errors for bad chart dependencies. Got %d.", l) + for i, msg := range linter.Messages { + t.Logf("Message: %d, Error: %#v", i, msg) + } + } +} diff --git a/pkg/lint/rules/deprecations.go b/internal/chart/v3/lint/rules/deprecations.go similarity index 95% rename from pkg/lint/rules/deprecations.go rename to internal/chart/v3/lint/rules/deprecations.go index c6d635a5e..6f86bdbbd 100644 --- a/pkg/lint/rules/deprecations.go +++ b/internal/chart/v3/lint/rules/deprecations.go @@ -14,18 +14,18 @@ See the License for the specific language governing permissions and limitations under the License. */ -package rules // import "helm.sh/helm/v4/pkg/lint/rules" +package rules // import "helm.sh/helm/v4/internal/chart/v3/lint/rules" import ( "fmt" "strconv" + "helm.sh/helm/v4/pkg/chart/common" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/endpoints/deprecation" kscheme "k8s.io/client-go/kubernetes/scheme" - - chartutil "helm.sh/helm/v4/pkg/chart/v2/util" ) var ( @@ -47,7 +47,7 @@ func (e deprecatedAPIError) Error() string { return msg } -func validateNoDeprecations(resource *k8sYamlStruct, kubeVersion *chartutil.KubeVersion) error { +func validateNoDeprecations(resource *k8sYamlStruct, kubeVersion *common.KubeVersion) error { // if `resource` does not have an APIVersion or Kind, we cannot test it for deprecation if resource.APIVersion == "" { return nil diff --git a/internal/chart/v3/lint/rules/deprecations_test.go b/internal/chart/v3/lint/rules/deprecations_test.go new file mode 100644 index 000000000..35e541e5c --- /dev/null +++ b/internal/chart/v3/lint/rules/deprecations_test.go @@ -0,0 +1,41 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rules // import "helm.sh/helm/v4/internal/chart/v3/lint/rules" + +import "testing" + +func TestValidateNoDeprecations(t *testing.T) { + deprecated := &k8sYamlStruct{ + APIVersion: "extensions/v1beta1", + Kind: "Deployment", + } + err := validateNoDeprecations(deprecated, nil) + if err == nil { + t.Fatal("Expected deprecated extension to be flagged") + } + depErr := err.(deprecatedAPIError) + if depErr.Message == "" { + t.Fatalf("Expected error message to be non-blank: %v", err) + } + + if err := validateNoDeprecations(&k8sYamlStruct{ + APIVersion: "v1", + Kind: "Pod", + }, nil); err != nil { + t.Errorf("Expected a v1 Pod to not be deprecated") + } +} diff --git a/internal/chart/v3/lint/rules/template.go b/internal/chart/v3/lint/rules/template.go new file mode 100644 index 000000000..d4c62839f --- /dev/null +++ b/internal/chart/v3/lint/rules/template.go @@ -0,0 +1,348 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rules + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "os" + "path" + "path/filepath" + "slices" + "strings" + + "k8s.io/apimachinery/pkg/api/validation" + apipath "k8s.io/apimachinery/pkg/api/validation/path" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apimachinery/pkg/util/yaml" + + "helm.sh/helm/v4/internal/chart/v3/lint/support" + "helm.sh/helm/v4/internal/chart/v3/loader" + chartutil "helm.sh/helm/v4/internal/chart/v3/util" + "helm.sh/helm/v4/pkg/chart/common" + "helm.sh/helm/v4/pkg/chart/common/util" + "helm.sh/helm/v4/pkg/engine" +) + +// Templates lints the templates in the Linter. +func Templates(linter *support.Linter, values map[string]interface{}, namespace string, _ bool) { + TemplatesWithKubeVersion(linter, values, namespace, nil) +} + +// TemplatesWithKubeVersion lints the templates in the Linter, allowing to specify the kubernetes version. +func TemplatesWithKubeVersion(linter *support.Linter, values map[string]interface{}, namespace string, kubeVersion *common.KubeVersion) { + TemplatesWithSkipSchemaValidation(linter, values, namespace, kubeVersion, false) +} + +// TemplatesWithSkipSchemaValidation lints the templates in the Linter, allowing to specify the kubernetes version and if schema validation is enabled or not. +func TemplatesWithSkipSchemaValidation(linter *support.Linter, values map[string]interface{}, namespace string, kubeVersion *common.KubeVersion, skipSchemaValidation bool) { + fpath := "templates/" + templatesPath := filepath.Join(linter.ChartDir, fpath) + + // Templates directory is optional for now + templatesDirExists := linter.RunLinterRule(support.WarningSev, fpath, templatesDirExists(templatesPath)) + if !templatesDirExists { + return + } + + validTemplatesDir := linter.RunLinterRule(support.ErrorSev, fpath, validateTemplatesDir(templatesPath)) + if !validTemplatesDir { + return + } + + // Load chart and parse templates + chart, err := loader.Load(linter.ChartDir) + + chartLoaded := linter.RunLinterRule(support.ErrorSev, fpath, err) + + if !chartLoaded { + return + } + + options := common.ReleaseOptions{ + Name: "test-release", + Namespace: namespace, + } + + caps := common.DefaultCapabilities.Copy() + if kubeVersion != nil { + caps.KubeVersion = *kubeVersion + } + + // lint ignores import-values + // See https://github.com/helm/helm/issues/9658 + if err := chartutil.ProcessDependencies(chart, values); err != nil { + return + } + + cvals, err := util.CoalesceValues(chart, values) + if err != nil { + return + } + + valuesToRender, err := util.ToRenderValuesWithSchemaValidation(chart, cvals, options, caps, skipSchemaValidation) + if err != nil { + linter.RunLinterRule(support.ErrorSev, fpath, err) + return + } + var e engine.Engine + e.LintMode = true + renderedContentMap, err := e.Render(chart, valuesToRender) + + renderOk := linter.RunLinterRule(support.ErrorSev, fpath, err) + + if !renderOk { + return + } + + /* Iterate over all the templates to check: + - It is a .yaml file + - All the values in the template file is defined + - {{}} include | quote + - Generated content is a valid Yaml file + - Metadata.Namespace is not set + */ + for _, template := range chart.Templates { + fileName := template.Name + fpath = fileName + + linter.RunLinterRule(support.ErrorSev, fpath, validateAllowedExtension(fileName)) + + // We only apply the following lint rules to yaml files + if filepath.Ext(fileName) != ".yaml" || filepath.Ext(fileName) == ".yml" { + continue + } + + // NOTE: disabled for now, Refs https://github.com/helm/helm/issues/1463 + // Check that all the templates have a matching value + // linter.RunLinterRule(support.WarningSev, fpath, validateNoMissingValues(templatesPath, valuesToRender, preExecutedTemplate)) + + // NOTE: disabled for now, Refs https://github.com/helm/helm/issues/1037 + // linter.RunLinterRule(support.WarningSev, fpath, validateQuotes(string(preExecutedTemplate))) + + renderedContent := renderedContentMap[path.Join(chart.Name(), fileName)] + if strings.TrimSpace(renderedContent) != "" { + linter.RunLinterRule(support.WarningSev, fpath, validateTopIndentLevel(renderedContent)) + + decoder := yaml.NewYAMLOrJSONDecoder(strings.NewReader(renderedContent), 4096) + + // Lint all resources if the file contains multiple documents separated by --- + for { + // Even though k8sYamlStruct only defines a few fields, an error in any other + // key will be raised as well + var yamlStruct *k8sYamlStruct + + err := decoder.Decode(&yamlStruct) + if err == io.EOF { + break + } + + // If YAML linting fails here, it will always fail in the next block as well, so we should return here. + // fix https://github.com/helm/helm/issues/11391 + if !linter.RunLinterRule(support.ErrorSev, fpath, validateYamlContent(err)) { + return + } + if yamlStruct != nil { + // NOTE: set to warnings to allow users to support out-of-date kubernetes + // Refs https://github.com/helm/helm/issues/8596 + linter.RunLinterRule(support.WarningSev, fpath, validateMetadataName(yamlStruct)) + linter.RunLinterRule(support.WarningSev, fpath, validateNoDeprecations(yamlStruct, kubeVersion)) + + linter.RunLinterRule(support.ErrorSev, fpath, validateMatchSelector(yamlStruct, renderedContent)) + linter.RunLinterRule(support.ErrorSev, fpath, validateListAnnotations(yamlStruct, renderedContent)) + } + } + } + } +} + +// validateTopIndentLevel checks that the content does not start with an indent level > 0. +// +// This error can occur when a template accidentally inserts space. It can cause +// unpredictable errors depending on whether the text is normalized before being passed +// into the YAML parser. So we trap it here. +// +// See https://github.com/helm/helm/issues/8467 +func validateTopIndentLevel(content string) error { + // Read lines until we get to a non-empty one + scanner := bufio.NewScanner(bytes.NewBufferString(content)) + for scanner.Scan() { + line := scanner.Text() + // If line is empty, skip + if strings.TrimSpace(line) == "" { + continue + } + // If it starts with one or more spaces, this is an error + if strings.HasPrefix(line, " ") || strings.HasPrefix(line, "\t") { + return fmt.Errorf("document starts with an illegal indent: %q, which may cause parsing problems", line) + } + // Any other condition passes. + return nil + } + return scanner.Err() +} + +// Validation functions +func templatesDirExists(templatesPath string) error { + _, err := os.Stat(templatesPath) + if errors.Is(err, os.ErrNotExist) { + return errors.New("directory does not exist") + } + return nil +} + +func validateTemplatesDir(templatesPath string) error { + fi, err := os.Stat(templatesPath) + if err != nil { + return err + } + if !fi.IsDir() { + return errors.New("not a directory") + } + return nil +} + +func validateAllowedExtension(fileName string) error { + ext := filepath.Ext(fileName) + validExtensions := []string{".yaml", ".yml", ".tpl", ".txt"} + + if slices.Contains(validExtensions, ext) { + return nil + } + + return fmt.Errorf("file extension '%s' not valid. Valid extensions are .yaml, .yml, .tpl, or .txt", ext) +} + +func validateYamlContent(err error) error { + if err != nil { + return fmt.Errorf("unable to parse YAML: %w", err) + } + return nil +} + +// validateMetadataName uses the correct validation function for the object +// Kind, or if not set, defaults to the standard definition of a subdomain in +// DNS (RFC 1123), used by most resources. +func validateMetadataName(obj *k8sYamlStruct) error { + fn := validateMetadataNameFunc(obj) + allErrs := field.ErrorList{} + for _, msg := range fn(obj.Metadata.Name, false) { + allErrs = append(allErrs, field.Invalid(field.NewPath("metadata").Child("name"), obj.Metadata.Name, msg)) + } + if len(allErrs) > 0 { + return fmt.Errorf("object name does not conform to Kubernetes naming requirements: %q: %w", obj.Metadata.Name, allErrs.ToAggregate()) + } + return nil +} + +// validateMetadataNameFunc will return a name validation function for the +// object kind, if defined below. +// +// Rules should match those set in the various api validations: +// https://github.com/kubernetes/kubernetes/blob/v1.20.0/pkg/apis/core/validation/validation.go#L205-L274 +// https://github.com/kubernetes/kubernetes/blob/v1.20.0/pkg/apis/apps/validation/validation.go#L39 +// ... +// +// Implementing here to avoid importing k/k. +// +// If no mapping is defined, returns NameIsDNSSubdomain. This is used by object +// kinds that don't have special requirements, so is the most likely to work if +// new kinds are added. +func validateMetadataNameFunc(obj *k8sYamlStruct) validation.ValidateNameFunc { + switch strings.ToLower(obj.Kind) { + case "pod", "node", "secret", "endpoints", "resourcequota", // core + "controllerrevision", "daemonset", "deployment", "replicaset", "statefulset", // apps + "autoscaler", // autoscaler + "cronjob", "job", // batch + "lease", // coordination + "endpointslice", // discovery + "networkpolicy", "ingress", // networking + "podsecuritypolicy", // policy + "priorityclass", // scheduling + "podpreset", // settings + "storageclass", "volumeattachment", "csinode": // storage + return validation.NameIsDNSSubdomain + case "service": + return validation.NameIsDNS1035Label + case "namespace": + return validation.ValidateNamespaceName + case "serviceaccount": + return validation.ValidateServiceAccountName + case "certificatesigningrequest": + // No validation. + // https://github.com/kubernetes/kubernetes/blob/v1.20.0/pkg/apis/certificates/validation/validation.go#L137-L140 + return func(_ string, _ bool) []string { return nil } + case "role", "clusterrole", "rolebinding", "clusterrolebinding": + // https://github.com/kubernetes/kubernetes/blob/v1.20.0/pkg/apis/rbac/validation/validation.go#L32-L34 + return func(name string, _ bool) []string { + return apipath.IsValidPathSegmentName(name) + } + default: + return validation.NameIsDNSSubdomain + } +} + +// validateMatchSelector ensures that template specs have a selector declared. +// See https://github.com/helm/helm/issues/1990 +func validateMatchSelector(yamlStruct *k8sYamlStruct, manifest string) error { + switch yamlStruct.Kind { + case "Deployment", "ReplicaSet", "DaemonSet", "StatefulSet": + // verify that matchLabels or matchExpressions is present + if !strings.Contains(manifest, "matchLabels") && !strings.Contains(manifest, "matchExpressions") { + return fmt.Errorf("a %s must contain matchLabels or matchExpressions, and %q does not", yamlStruct.Kind, yamlStruct.Metadata.Name) + } + } + return nil +} + +func validateListAnnotations(yamlStruct *k8sYamlStruct, manifest string) error { + if yamlStruct.Kind == "List" { + m := struct { + Items []struct { + Metadata struct { + Annotations map[string]string + } + } + }{} + + if err := yaml.Unmarshal([]byte(manifest), &m); err != nil { + return validateYamlContent(err) + } + + for _, i := range m.Items { + if _, ok := i.Metadata.Annotations["helm.sh/resource-policy"]; ok { + return errors.New("annotation 'helm.sh/resource-policy' within List objects are ignored") + } + } + } + return nil +} + +// k8sYamlStruct stubs a Kubernetes YAML file. +type k8sYamlStruct struct { + APIVersion string `json:"apiVersion"` + Kind string + Metadata k8sYamlMetadata +} + +type k8sYamlMetadata struct { + Namespace string + Name string +} diff --git a/internal/chart/v3/lint/rules/template_test.go b/internal/chart/v3/lint/rules/template_test.go new file mode 100644 index 000000000..40bcfa26b --- /dev/null +++ b/internal/chart/v3/lint/rules/template_test.go @@ -0,0 +1,441 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rules + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "testing" + + chart "helm.sh/helm/v4/internal/chart/v3" + "helm.sh/helm/v4/internal/chart/v3/lint/support" + chartutil "helm.sh/helm/v4/internal/chart/v3/util" + "helm.sh/helm/v4/pkg/chart/common" +) + +const templateTestBasedir = "./testdata/albatross" + +func TestValidateAllowedExtension(t *testing.T) { + var failTest = []string{"/foo", "/test.toml"} + for _, test := range failTest { + err := validateAllowedExtension(test) + if err == nil || !strings.Contains(err.Error(), "Valid extensions are .yaml, .yml, .tpl, or .txt") { + t.Errorf("validateAllowedExtension('%s') to return \"Valid extensions are .yaml, .yml, .tpl, or .txt\", got no error", test) + } + } + var successTest = []string{"/foo.yaml", "foo.yaml", "foo.tpl", "/foo/bar/baz.yaml", "NOTES.txt"} + for _, test := range successTest { + err := validateAllowedExtension(test) + if err != nil { + t.Errorf("validateAllowedExtension('%s') to return no error but got \"%s\"", test, err.Error()) + } + } +} + +var values = map[string]interface{}{"nameOverride": "", "httpPort": 80} + +const namespace = "testNamespace" +const strict = false + +func TestTemplateParsing(t *testing.T) { + linter := support.Linter{ChartDir: templateTestBasedir} + Templates(&linter, values, namespace, strict) + res := linter.Messages + + if len(res) != 1 { + t.Fatalf("Expected one error, got %d, %v", len(res), res) + } + + if !strings.Contains(res[0].Err.Error(), "deliberateSyntaxError") { + t.Errorf("Unexpected error: %s", res[0]) + } +} + +var wrongTemplatePath = filepath.Join(templateTestBasedir, "templates", "fail.yaml") +var ignoredTemplatePath = filepath.Join(templateTestBasedir, "fail.yaml.ignored") + +// Test a template with all the existing features: +// namespaces, partial templates +func TestTemplateIntegrationHappyPath(t *testing.T) { + // Rename file so it gets ignored by the linter + os.Rename(wrongTemplatePath, ignoredTemplatePath) + defer os.Rename(ignoredTemplatePath, wrongTemplatePath) + + linter := support.Linter{ChartDir: templateTestBasedir} + Templates(&linter, values, namespace, strict) + res := linter.Messages + + if len(res) != 0 { + t.Fatalf("Expected no error, got %d, %v", len(res), res) + } +} + +func TestMultiTemplateFail(t *testing.T) { + linter := support.Linter{ChartDir: "./testdata/multi-template-fail"} + Templates(&linter, values, namespace, strict) + res := linter.Messages + + if len(res) != 1 { + t.Fatalf("Expected 1 error, got %d, %v", len(res), res) + } + + if !strings.Contains(res[0].Err.Error(), "object name does not conform to Kubernetes naming requirements") { + t.Errorf("Unexpected error: %s", res[0].Err) + } +} + +func TestValidateMetadataName(t *testing.T) { + tests := []struct { + obj *k8sYamlStruct + wantErr bool + }{ + // Most kinds use IsDNS1123Subdomain. + {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: ""}}, true}, + {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "foo"}}, false}, + {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "foo.bar1234baz.seventyone"}}, false}, + {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "FOO"}}, true}, + {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "123baz"}}, false}, + {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "foo.BAR.baz"}}, true}, + {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "one-two"}}, false}, + {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "-two"}}, true}, + {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "one_two"}}, true}, + {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "a..b"}}, true}, + {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "%^&#$%*@^*@&#^"}}, true}, + {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "operator:pod"}}, true}, + {&k8sYamlStruct{Kind: "ServiceAccount", Metadata: k8sYamlMetadata{Name: "foo"}}, false}, + {&k8sYamlStruct{Kind: "ServiceAccount", Metadata: k8sYamlMetadata{Name: "foo.bar1234baz.seventyone"}}, false}, + {&k8sYamlStruct{Kind: "ServiceAccount", Metadata: k8sYamlMetadata{Name: "FOO"}}, true}, + {&k8sYamlStruct{Kind: "ServiceAccount", Metadata: k8sYamlMetadata{Name: "operator:sa"}}, true}, + + // Service uses IsDNS1035Label. + {&k8sYamlStruct{Kind: "Service", Metadata: k8sYamlMetadata{Name: "foo"}}, false}, + {&k8sYamlStruct{Kind: "Service", Metadata: k8sYamlMetadata{Name: "123baz"}}, true}, + {&k8sYamlStruct{Kind: "Service", Metadata: k8sYamlMetadata{Name: "foo.bar"}}, true}, + + // Namespace uses IsDNS1123Label. + {&k8sYamlStruct{Kind: "Namespace", Metadata: k8sYamlMetadata{Name: "foo"}}, false}, + {&k8sYamlStruct{Kind: "Namespace", Metadata: k8sYamlMetadata{Name: "123baz"}}, false}, + {&k8sYamlStruct{Kind: "Namespace", Metadata: k8sYamlMetadata{Name: "foo.bar"}}, true}, + {&k8sYamlStruct{Kind: "Namespace", Metadata: k8sYamlMetadata{Name: "foo-bar"}}, false}, + + // CertificateSigningRequest has no validation. + {&k8sYamlStruct{Kind: "CertificateSigningRequest", Metadata: k8sYamlMetadata{Name: ""}}, false}, + {&k8sYamlStruct{Kind: "CertificateSigningRequest", Metadata: k8sYamlMetadata{Name: "123baz"}}, false}, + {&k8sYamlStruct{Kind: "CertificateSigningRequest", Metadata: k8sYamlMetadata{Name: "%^&#$%*@^*@&#^"}}, false}, + + // RBAC uses path validation. + {&k8sYamlStruct{Kind: "Role", Metadata: k8sYamlMetadata{Name: "foo"}}, false}, + {&k8sYamlStruct{Kind: "Role", Metadata: k8sYamlMetadata{Name: "123baz"}}, false}, + {&k8sYamlStruct{Kind: "Role", Metadata: k8sYamlMetadata{Name: "foo.bar"}}, false}, + {&k8sYamlStruct{Kind: "Role", Metadata: k8sYamlMetadata{Name: "operator:role"}}, false}, + {&k8sYamlStruct{Kind: "Role", Metadata: k8sYamlMetadata{Name: "operator/role"}}, true}, + {&k8sYamlStruct{Kind: "Role", Metadata: k8sYamlMetadata{Name: "operator%role"}}, true}, + {&k8sYamlStruct{Kind: "ClusterRole", Metadata: k8sYamlMetadata{Name: "foo"}}, false}, + {&k8sYamlStruct{Kind: "ClusterRole", Metadata: k8sYamlMetadata{Name: "123baz"}}, false}, + {&k8sYamlStruct{Kind: "ClusterRole", Metadata: k8sYamlMetadata{Name: "foo.bar"}}, false}, + {&k8sYamlStruct{Kind: "ClusterRole", Metadata: k8sYamlMetadata{Name: "operator:role"}}, false}, + {&k8sYamlStruct{Kind: "ClusterRole", Metadata: k8sYamlMetadata{Name: "operator/role"}}, true}, + {&k8sYamlStruct{Kind: "ClusterRole", Metadata: k8sYamlMetadata{Name: "operator%role"}}, true}, + {&k8sYamlStruct{Kind: "RoleBinding", Metadata: k8sYamlMetadata{Name: "operator:role"}}, false}, + {&k8sYamlStruct{Kind: "ClusterRoleBinding", Metadata: k8sYamlMetadata{Name: "operator:role"}}, false}, + + // Unknown Kind + {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: ""}}, true}, + {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "foo"}}, false}, + {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "foo.bar1234baz.seventyone"}}, false}, + {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "FOO"}}, true}, + {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "123baz"}}, false}, + {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "foo.BAR.baz"}}, true}, + {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "one-two"}}, false}, + {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "-two"}}, true}, + {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "one_two"}}, true}, + {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "a..b"}}, true}, + {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "%^&#$%*@^*@&#^"}}, true}, + {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "operator:pod"}}, true}, + + // No kind + {&k8sYamlStruct{Metadata: k8sYamlMetadata{Name: "foo"}}, false}, + {&k8sYamlStruct{Metadata: k8sYamlMetadata{Name: "operator:pod"}}, true}, + } + for _, tt := range tests { + t.Run(fmt.Sprintf("%s/%s", tt.obj.Kind, tt.obj.Metadata.Name), func(t *testing.T) { + if err := validateMetadataName(tt.obj); (err != nil) != tt.wantErr { + t.Errorf("validateMetadataName() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestDeprecatedAPIFails(t *testing.T) { + mychart := chart.Chart{ + Metadata: &chart.Metadata{ + APIVersion: "v2", + Name: "failapi", + Version: "0.1.0", + Icon: "satisfy-the-linting-gods.gif", + }, + Templates: []*common.File{ + { + Name: "templates/baddeployment.yaml", + Data: []byte("apiVersion: apps/v1beta1\nkind: Deployment\nmetadata:\n name: baddep\nspec: {selector: {matchLabels: {foo: bar}}}"), + }, + { + Name: "templates/goodsecret.yaml", + Data: []byte("apiVersion: v1\nkind: Secret\nmetadata:\n name: goodsecret"), + }, + }, + } + tmpdir := t.TempDir() + + if err := chartutil.SaveDir(&mychart, tmpdir); err != nil { + t.Fatal(err) + } + + linter := support.Linter{ChartDir: filepath.Join(tmpdir, mychart.Name())} + Templates(&linter, values, namespace, strict) + if l := len(linter.Messages); l != 1 { + for i, msg := range linter.Messages { + t.Logf("Message %d: %s", i, msg) + } + t.Fatalf("Expected 1 lint error, got %d", l) + } + + err := linter.Messages[0].Err.(deprecatedAPIError) + if err.Deprecated != "apps/v1beta1 Deployment" { + t.Errorf("Surprised to learn that %q is deprecated", err.Deprecated) + } +} + +const manifest = `apiVersion: v1 +kind: ConfigMap +metadata: + name: foo +data: + myval1: {{default "val" .Values.mymap.key1 }} + myval2: {{default "val" .Values.mymap.key2 }} +` + +// TestStrictTemplateParsingMapError is a regression test. +// +// The template engine should not produce an error when a map in values.yaml does +// not contain all possible keys. +// +// See https://github.com/helm/helm/issues/7483 +func TestStrictTemplateParsingMapError(t *testing.T) { + + ch := chart.Chart{ + Metadata: &chart.Metadata{ + Name: "regression7483", + APIVersion: "v2", + Version: "0.1.0", + }, + Values: map[string]interface{}{ + "mymap": map[string]string{ + "key1": "val1", + }, + }, + Templates: []*common.File{ + { + Name: "templates/configmap.yaml", + Data: []byte(manifest), + }, + }, + } + dir := t.TempDir() + if err := chartutil.SaveDir(&ch, dir); err != nil { + t.Fatal(err) + } + linter := &support.Linter{ + ChartDir: filepath.Join(dir, ch.Metadata.Name), + } + Templates(linter, ch.Values, namespace, strict) + if len(linter.Messages) != 0 { + t.Errorf("expected zero messages, got %d", len(linter.Messages)) + for i, msg := range linter.Messages { + t.Logf("Message %d: %q", i, msg) + } + } +} + +func TestValidateMatchSelector(t *testing.T) { + md := &k8sYamlStruct{ + APIVersion: "apps/v1", + Kind: "Deployment", + Metadata: k8sYamlMetadata{ + Name: "mydeployment", + }, + } + manifest := ` + apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + labels: + app: nginx +spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ` + if err := validateMatchSelector(md, manifest); err != nil { + t.Error(err) + } + manifest = ` + apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + labels: + app: nginx +spec: + replicas: 3 + selector: + matchExpressions: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ` + if err := validateMatchSelector(md, manifest); err != nil { + t.Error(err) + } + manifest = ` + apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + labels: + app: nginx +spec: + replicas: 3 + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ` + if err := validateMatchSelector(md, manifest); err == nil { + t.Error("expected Deployment with no selector to fail") + } +} + +func TestValidateTopIndentLevel(t *testing.T) { + for doc, shouldFail := range map[string]bool{ + // Should not fail + "\n\n\n\t\n \t\n": false, + "apiVersion:foo\n bar:baz": false, + "\n\n\napiVersion:foo\n\n\n": false, + // Should fail + " apiVersion:foo": true, + "\n\n apiVersion:foo\n\n": true, + } { + if err := validateTopIndentLevel(doc); (err == nil) == shouldFail { + t.Errorf("Expected %t for %q", shouldFail, doc) + } + } + +} + +// TestEmptyWithCommentsManifests checks the lint is not failing against empty manifests that contains only comments +// See https://github.com/helm/helm/issues/8621 +func TestEmptyWithCommentsManifests(t *testing.T) { + mychart := chart.Chart{ + Metadata: &chart.Metadata{ + APIVersion: "v2", + Name: "emptymanifests", + Version: "0.1.0", + Icon: "satisfy-the-linting-gods.gif", + }, + Templates: []*common.File{ + { + Name: "templates/empty-with-comments.yaml", + Data: []byte("#@formatter:off\n"), + }, + }, + } + tmpdir := t.TempDir() + + if err := chartutil.SaveDir(&mychart, tmpdir); err != nil { + t.Fatal(err) + } + + linter := support.Linter{ChartDir: filepath.Join(tmpdir, mychart.Name())} + Templates(&linter, values, namespace, strict) + if l := len(linter.Messages); l > 0 { + for i, msg := range linter.Messages { + t.Logf("Message %d: %s", i, msg) + } + t.Fatalf("Expected 0 lint errors, got %d", l) + } +} +func TestValidateListAnnotations(t *testing.T) { + md := &k8sYamlStruct{ + APIVersion: "v1", + Kind: "List", + Metadata: k8sYamlMetadata{ + Name: "list", + }, + } + manifest := ` +apiVersion: v1 +kind: List +items: + - apiVersion: v1 + kind: ConfigMap + metadata: + annotations: + helm.sh/resource-policy: keep +` + + if err := validateListAnnotations(md, manifest); err == nil { + t.Fatal("expected list with nested keep annotations to fail") + } + + manifest = ` +apiVersion: v1 +kind: List +metadata: + annotations: + helm.sh/resource-policy: keep +items: + - apiVersion: v1 + kind: ConfigMap +` + + if err := validateListAnnotations(md, manifest); err != nil { + t.Fatalf("List objects keep annotations should pass. got: %s", err) + } +} diff --git a/internal/chart/v3/lint/rules/testdata/albatross/Chart.yaml b/internal/chart/v3/lint/rules/testdata/albatross/Chart.yaml new file mode 100644 index 000000000..5e1ed515c --- /dev/null +++ b/internal/chart/v3/lint/rules/testdata/albatross/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v3 +name: albatross +description: testing chart +version: 199.44.12345-Alpha.1+cafe009 +icon: http://riverrun.io diff --git a/pkg/lint/rules/testdata/albatross/templates/_helpers.tpl b/internal/chart/v3/lint/rules/testdata/albatross/templates/_helpers.tpl similarity index 100% rename from pkg/lint/rules/testdata/albatross/templates/_helpers.tpl rename to internal/chart/v3/lint/rules/testdata/albatross/templates/_helpers.tpl diff --git a/pkg/lint/rules/testdata/albatross/templates/fail.yaml b/internal/chart/v3/lint/rules/testdata/albatross/templates/fail.yaml similarity index 100% rename from pkg/lint/rules/testdata/albatross/templates/fail.yaml rename to internal/chart/v3/lint/rules/testdata/albatross/templates/fail.yaml diff --git a/pkg/lint/rules/testdata/albatross/templates/svc.yaml b/internal/chart/v3/lint/rules/testdata/albatross/templates/svc.yaml similarity index 100% rename from pkg/lint/rules/testdata/albatross/templates/svc.yaml rename to internal/chart/v3/lint/rules/testdata/albatross/templates/svc.yaml diff --git a/pkg/lint/rules/testdata/albatross/values.yaml b/internal/chart/v3/lint/rules/testdata/albatross/values.yaml similarity index 100% rename from pkg/lint/rules/testdata/albatross/values.yaml rename to internal/chart/v3/lint/rules/testdata/albatross/values.yaml diff --git a/internal/chart/v3/lint/rules/testdata/anotherbadchartfile/Chart.yaml b/internal/chart/v3/lint/rules/testdata/anotherbadchartfile/Chart.yaml new file mode 100644 index 000000000..8a598473b --- /dev/null +++ b/internal/chart/v3/lint/rules/testdata/anotherbadchartfile/Chart.yaml @@ -0,0 +1,15 @@ +name: "some-chart" +apiVersion: v3 +description: A Helm chart for Kubernetes +version: 72445e2 +home: "" +type: application +appVersion: 72225e2 +icon: "https://some-url.com/icon.jpeg" +dependencies: + - name: mariadb + version: 5.x.x + repository: https://charts.helm.sh/stable/ + condition: mariadb.enabled + tags: + - database diff --git a/pkg/lint/rules/testdata/badchartfile/Chart.yaml b/internal/chart/v3/lint/rules/testdata/badchartfile/Chart.yaml similarity index 100% rename from pkg/lint/rules/testdata/badchartfile/Chart.yaml rename to internal/chart/v3/lint/rules/testdata/badchartfile/Chart.yaml diff --git a/pkg/lint/rules/testdata/badchartfile/values.yaml b/internal/chart/v3/lint/rules/testdata/badchartfile/values.yaml similarity index 100% rename from pkg/lint/rules/testdata/badchartfile/values.yaml rename to internal/chart/v3/lint/rules/testdata/badchartfile/values.yaml diff --git a/internal/chart/v3/lint/rules/testdata/badchartname/Chart.yaml b/internal/chart/v3/lint/rules/testdata/badchartname/Chart.yaml new file mode 100644 index 000000000..41f452354 --- /dev/null +++ b/internal/chart/v3/lint/rules/testdata/badchartname/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v3 +description: A Helm chart for Kubernetes +version: 0.1.0 +name: "../badchartname" +type: application diff --git a/pkg/lint/rules/testdata/badchartname/values.yaml b/internal/chart/v3/lint/rules/testdata/badchartname/values.yaml similarity index 100% rename from pkg/lint/rules/testdata/badchartname/values.yaml rename to internal/chart/v3/lint/rules/testdata/badchartname/values.yaml diff --git a/internal/chart/v3/lint/rules/testdata/badcrdfile/Chart.yaml b/internal/chart/v3/lint/rules/testdata/badcrdfile/Chart.yaml new file mode 100644 index 000000000..3bf007393 --- /dev/null +++ b/internal/chart/v3/lint/rules/testdata/badcrdfile/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v3 +description: A Helm chart for Kubernetes +version: 0.1.0 +name: badcrdfile +type: application +icon: http://riverrun.io diff --git a/pkg/lint/rules/testdata/badcrdfile/crds/bad-apiversion.yaml b/internal/chart/v3/lint/rules/testdata/badcrdfile/crds/bad-apiversion.yaml similarity index 100% rename from pkg/lint/rules/testdata/badcrdfile/crds/bad-apiversion.yaml rename to internal/chart/v3/lint/rules/testdata/badcrdfile/crds/bad-apiversion.yaml diff --git a/pkg/lint/rules/testdata/badcrdfile/crds/bad-crd.yaml b/internal/chart/v3/lint/rules/testdata/badcrdfile/crds/bad-crd.yaml similarity index 100% rename from pkg/lint/rules/testdata/badcrdfile/crds/bad-crd.yaml rename to internal/chart/v3/lint/rules/testdata/badcrdfile/crds/bad-crd.yaml diff --git a/pkg/lint/rules/testdata/badcrdfile/templates/.gitkeep b/internal/chart/v3/lint/rules/testdata/badcrdfile/templates/.gitkeep similarity index 100% rename from pkg/lint/rules/testdata/badcrdfile/templates/.gitkeep rename to internal/chart/v3/lint/rules/testdata/badcrdfile/templates/.gitkeep diff --git a/pkg/lint/rules/testdata/badcrdfile/values.yaml b/internal/chart/v3/lint/rules/testdata/badcrdfile/values.yaml similarity index 100% rename from pkg/lint/rules/testdata/badcrdfile/values.yaml rename to internal/chart/v3/lint/rules/testdata/badcrdfile/values.yaml diff --git a/internal/chart/v3/lint/rules/testdata/badvaluesfile/Chart.yaml b/internal/chart/v3/lint/rules/testdata/badvaluesfile/Chart.yaml new file mode 100644 index 000000000..aace27e21 --- /dev/null +++ b/internal/chart/v3/lint/rules/testdata/badvaluesfile/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v3 +name: badvaluesfile +description: A Helm chart for Kubernetes +version: 0.0.1 +home: "" +icon: http://riverrun.io diff --git a/pkg/lint/rules/testdata/badvaluesfile/templates/badvaluesfile.yaml b/internal/chart/v3/lint/rules/testdata/badvaluesfile/templates/badvaluesfile.yaml similarity index 100% rename from pkg/lint/rules/testdata/badvaluesfile/templates/badvaluesfile.yaml rename to internal/chart/v3/lint/rules/testdata/badvaluesfile/templates/badvaluesfile.yaml diff --git a/pkg/lint/rules/testdata/badvaluesfile/values.yaml b/internal/chart/v3/lint/rules/testdata/badvaluesfile/values.yaml similarity index 100% rename from pkg/lint/rules/testdata/badvaluesfile/values.yaml rename to internal/chart/v3/lint/rules/testdata/badvaluesfile/values.yaml diff --git a/internal/chart/v3/lint/rules/testdata/goodone/Chart.yaml b/internal/chart/v3/lint/rules/testdata/goodone/Chart.yaml new file mode 100644 index 000000000..bf8f5e309 --- /dev/null +++ b/internal/chart/v3/lint/rules/testdata/goodone/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v3 +name: goodone +description: good testing chart +version: 199.44.12345-Alpha.1+cafe009 +icon: http://riverrun.io diff --git a/pkg/lint/rules/testdata/goodone/crds/test-crd.yaml b/internal/chart/v3/lint/rules/testdata/goodone/crds/test-crd.yaml similarity index 100% rename from pkg/lint/rules/testdata/goodone/crds/test-crd.yaml rename to internal/chart/v3/lint/rules/testdata/goodone/crds/test-crd.yaml diff --git a/pkg/lint/rules/testdata/goodone/templates/goodone.yaml b/internal/chart/v3/lint/rules/testdata/goodone/templates/goodone.yaml similarity index 100% rename from pkg/lint/rules/testdata/goodone/templates/goodone.yaml rename to internal/chart/v3/lint/rules/testdata/goodone/templates/goodone.yaml diff --git a/pkg/lint/rules/testdata/goodone/values.yaml b/internal/chart/v3/lint/rules/testdata/goodone/values.yaml similarity index 100% rename from pkg/lint/rules/testdata/goodone/values.yaml rename to internal/chart/v3/lint/rules/testdata/goodone/values.yaml diff --git a/pkg/lint/rules/testdata/invalidchartfile/Chart.yaml b/internal/chart/v3/lint/rules/testdata/invalidchartfile/Chart.yaml similarity index 100% rename from pkg/lint/rules/testdata/invalidchartfile/Chart.yaml rename to internal/chart/v3/lint/rules/testdata/invalidchartfile/Chart.yaml diff --git a/pkg/lint/rules/testdata/invalidchartfile/values.yaml b/internal/chart/v3/lint/rules/testdata/invalidchartfile/values.yaml similarity index 100% rename from pkg/lint/rules/testdata/invalidchartfile/values.yaml rename to internal/chart/v3/lint/rules/testdata/invalidchartfile/values.yaml diff --git a/internal/chart/v3/lint/rules/testdata/invalidcrdsdir/Chart.yaml b/internal/chart/v3/lint/rules/testdata/invalidcrdsdir/Chart.yaml new file mode 100644 index 000000000..0f6d1ee98 --- /dev/null +++ b/internal/chart/v3/lint/rules/testdata/invalidcrdsdir/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v3 +description: A Helm chart for Kubernetes +version: 0.1.0 +name: invalidcrdsdir +type: application +icon: http://riverrun.io diff --git a/pkg/lint/rules/testdata/invalidcrdsdir/crds b/internal/chart/v3/lint/rules/testdata/invalidcrdsdir/crds similarity index 100% rename from pkg/lint/rules/testdata/invalidcrdsdir/crds rename to internal/chart/v3/lint/rules/testdata/invalidcrdsdir/crds diff --git a/pkg/lint/rules/testdata/invalidcrdsdir/values.yaml b/internal/chart/v3/lint/rules/testdata/invalidcrdsdir/values.yaml similarity index 100% rename from pkg/lint/rules/testdata/invalidcrdsdir/values.yaml rename to internal/chart/v3/lint/rules/testdata/invalidcrdsdir/values.yaml diff --git a/pkg/lint/rules/testdata/malformed-template/.helmignore b/internal/chart/v3/lint/rules/testdata/malformed-template/.helmignore similarity index 100% rename from pkg/lint/rules/testdata/malformed-template/.helmignore rename to internal/chart/v3/lint/rules/testdata/malformed-template/.helmignore diff --git a/internal/chart/v3/lint/rules/testdata/malformed-template/Chart.yaml b/internal/chart/v3/lint/rules/testdata/malformed-template/Chart.yaml new file mode 100644 index 000000000..d46b98cb5 --- /dev/null +++ b/internal/chart/v3/lint/rules/testdata/malformed-template/Chart.yaml @@ -0,0 +1,25 @@ +apiVersion: v3 +name: test +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" +icon: https://riverrun.io \ No newline at end of file diff --git a/pkg/lint/rules/testdata/malformed-template/templates/bad.yaml b/internal/chart/v3/lint/rules/testdata/malformed-template/templates/bad.yaml similarity index 100% rename from pkg/lint/rules/testdata/malformed-template/templates/bad.yaml rename to internal/chart/v3/lint/rules/testdata/malformed-template/templates/bad.yaml diff --git a/pkg/lint/rules/testdata/malformed-template/values.yaml b/internal/chart/v3/lint/rules/testdata/malformed-template/values.yaml similarity index 100% rename from pkg/lint/rules/testdata/malformed-template/values.yaml rename to internal/chart/v3/lint/rules/testdata/malformed-template/values.yaml diff --git a/internal/chart/v3/lint/rules/testdata/multi-template-fail/Chart.yaml b/internal/chart/v3/lint/rules/testdata/multi-template-fail/Chart.yaml new file mode 100644 index 000000000..bfb580bea --- /dev/null +++ b/internal/chart/v3/lint/rules/testdata/multi-template-fail/Chart.yaml @@ -0,0 +1,21 @@ +apiVersion: v3 +name: multi-template-fail +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application and it is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/pkg/lint/rules/testdata/multi-template-fail/templates/multi-fail.yaml b/internal/chart/v3/lint/rules/testdata/multi-template-fail/templates/multi-fail.yaml similarity index 100% rename from pkg/lint/rules/testdata/multi-template-fail/templates/multi-fail.yaml rename to internal/chart/v3/lint/rules/testdata/multi-template-fail/templates/multi-fail.yaml diff --git a/internal/chart/v3/lint/rules/testdata/v3-fail/Chart.yaml b/internal/chart/v3/lint/rules/testdata/v3-fail/Chart.yaml new file mode 100644 index 000000000..2a29c33fa --- /dev/null +++ b/internal/chart/v3/lint/rules/testdata/v3-fail/Chart.yaml @@ -0,0 +1,21 @@ +apiVersion: v3 +name: v3-fail +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application and it is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/pkg/lint/rules/testdata/v3-fail/templates/_helpers.tpl b/internal/chart/v3/lint/rules/testdata/v3-fail/templates/_helpers.tpl similarity index 100% rename from pkg/lint/rules/testdata/v3-fail/templates/_helpers.tpl rename to internal/chart/v3/lint/rules/testdata/v3-fail/templates/_helpers.tpl diff --git a/pkg/lint/rules/testdata/v3-fail/templates/deployment.yaml b/internal/chart/v3/lint/rules/testdata/v3-fail/templates/deployment.yaml similarity index 100% rename from pkg/lint/rules/testdata/v3-fail/templates/deployment.yaml rename to internal/chart/v3/lint/rules/testdata/v3-fail/templates/deployment.yaml diff --git a/pkg/lint/rules/testdata/v3-fail/templates/ingress.yaml b/internal/chart/v3/lint/rules/testdata/v3-fail/templates/ingress.yaml similarity index 100% rename from pkg/lint/rules/testdata/v3-fail/templates/ingress.yaml rename to internal/chart/v3/lint/rules/testdata/v3-fail/templates/ingress.yaml diff --git a/pkg/lint/rules/testdata/v3-fail/templates/service.yaml b/internal/chart/v3/lint/rules/testdata/v3-fail/templates/service.yaml similarity index 100% rename from pkg/lint/rules/testdata/v3-fail/templates/service.yaml rename to internal/chart/v3/lint/rules/testdata/v3-fail/templates/service.yaml diff --git a/pkg/lint/rules/testdata/v3-fail/values.yaml b/internal/chart/v3/lint/rules/testdata/v3-fail/values.yaml similarity index 100% rename from pkg/lint/rules/testdata/v3-fail/values.yaml rename to internal/chart/v3/lint/rules/testdata/v3-fail/values.yaml diff --git a/internal/chart/v3/lint/rules/testdata/withsubchart/Chart.yaml b/internal/chart/v3/lint/rules/testdata/withsubchart/Chart.yaml new file mode 100644 index 000000000..fa15eabaf --- /dev/null +++ b/internal/chart/v3/lint/rules/testdata/withsubchart/Chart.yaml @@ -0,0 +1,16 @@ +apiVersion: v3 +name: withsubchart +description: A Helm chart for Kubernetes +type: application +version: 0.1.0 +appVersion: "1.16.0" +icon: http://riverrun.io + +dependencies: + - name: subchart + version: 0.1.16 + repository: "file://../subchart" + import-values: + - child: subchart + parent: subchart + diff --git a/internal/chart/v3/lint/rules/testdata/withsubchart/charts/subchart/Chart.yaml b/internal/chart/v3/lint/rules/testdata/withsubchart/charts/subchart/Chart.yaml new file mode 100644 index 000000000..35b13e70d --- /dev/null +++ b/internal/chart/v3/lint/rules/testdata/withsubchart/charts/subchart/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v3 +name: subchart +description: A Helm chart for Kubernetes +type: application +version: 0.1.0 +appVersion: "1.16.0" diff --git a/pkg/lint/rules/testdata/withsubchart/charts/subchart/templates/subchart.yaml b/internal/chart/v3/lint/rules/testdata/withsubchart/charts/subchart/templates/subchart.yaml similarity index 100% rename from pkg/lint/rules/testdata/withsubchart/charts/subchart/templates/subchart.yaml rename to internal/chart/v3/lint/rules/testdata/withsubchart/charts/subchart/templates/subchart.yaml diff --git a/pkg/lint/rules/testdata/withsubchart/charts/subchart/values.yaml b/internal/chart/v3/lint/rules/testdata/withsubchart/charts/subchart/values.yaml similarity index 100% rename from pkg/lint/rules/testdata/withsubchart/charts/subchart/values.yaml rename to internal/chart/v3/lint/rules/testdata/withsubchart/charts/subchart/values.yaml diff --git a/pkg/lint/rules/testdata/withsubchart/templates/mainchart.yaml b/internal/chart/v3/lint/rules/testdata/withsubchart/templates/mainchart.yaml similarity index 100% rename from pkg/lint/rules/testdata/withsubchart/templates/mainchart.yaml rename to internal/chart/v3/lint/rules/testdata/withsubchart/templates/mainchart.yaml diff --git a/pkg/lint/rules/testdata/withsubchart/values.yaml b/internal/chart/v3/lint/rules/testdata/withsubchart/values.yaml similarity index 100% rename from pkg/lint/rules/testdata/withsubchart/values.yaml rename to internal/chart/v3/lint/rules/testdata/withsubchart/values.yaml diff --git a/internal/chart/v3/lint/rules/values.go b/internal/chart/v3/lint/rules/values.go new file mode 100644 index 000000000..adf2e2c52 --- /dev/null +++ b/internal/chart/v3/lint/rules/values.go @@ -0,0 +1,79 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rules + +import ( + "fmt" + "os" + "path/filepath" + + "helm.sh/helm/v4/internal/chart/v3/lint/support" + "helm.sh/helm/v4/pkg/chart/common" + "helm.sh/helm/v4/pkg/chart/common/util" +) + +// ValuesWithOverrides tests the values.yaml file. +// +// If a schema is present in the chart, values are tested against that. Otherwise, +// they are only tested for well-formedness. +// +// If additional values are supplied, they are coalesced into the values in values.yaml. +func ValuesWithOverrides(linter *support.Linter, valueOverrides map[string]interface{}) { + file := "values.yaml" + vf := filepath.Join(linter.ChartDir, file) + fileExists := linter.RunLinterRule(support.InfoSev, file, validateValuesFileExistence(vf)) + + if !fileExists { + return + } + + linter.RunLinterRule(support.ErrorSev, file, validateValuesFile(vf, valueOverrides)) +} + +func validateValuesFileExistence(valuesPath string) error { + _, err := os.Stat(valuesPath) + if err != nil { + return fmt.Errorf("file does not exist") + } + return nil +} + +func validateValuesFile(valuesPath string, overrides map[string]interface{}) error { + values, err := common.ReadValuesFile(valuesPath) + if err != nil { + return fmt.Errorf("unable to parse YAML: %w", err) + } + + // Helm 3.0.0 carried over the values linting from Helm 2.x, which only tests the top + // level values against the top-level expectations. Subchart values are not linted. + // We could change that. For now, though, we retain that strategy, and thus can + // coalesce tables (like reuse-values does) instead of doing the full chart + // CoalesceValues + coalescedValues := util.CoalesceTables(make(map[string]interface{}, len(overrides)), overrides) + coalescedValues = util.CoalesceTables(coalescedValues, values) + + ext := filepath.Ext(valuesPath) + schemaPath := valuesPath[:len(valuesPath)-len(ext)] + ".schema.json" + schema, err := os.ReadFile(schemaPath) + if len(schema) == 0 { + return nil + } + if err != nil { + return err + } + return util.ValidateAgainstSingleSchema(coalescedValues, schema) +} diff --git a/pkg/lint/rules/values_test.go b/internal/chart/v3/lint/rules/values_test.go similarity index 100% rename from pkg/lint/rules/values_test.go rename to internal/chart/v3/lint/rules/values_test.go diff --git a/internal/chart/v3/util/errors_test.go b/internal/chart/v3/lint/support/doc.go similarity index 67% rename from internal/chart/v3/util/errors_test.go rename to internal/chart/v3/lint/support/doc.go index b8ae86384..2d54a9b7d 100644 --- a/internal/chart/v3/util/errors_test.go +++ b/internal/chart/v3/lint/support/doc.go @@ -14,24 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util - -import ( - "testing" -) - -func TestErrorNoTableDoesNotPanic(t *testing.T) { - x := "empty" - - y := ErrNoTable{x} - - t.Logf("error is: %s", y) -} - -func TestErrorNoValueDoesNotPanic(t *testing.T) { - x := "empty" - - y := ErrNoValue{x} +/* +Package support contains tools for linting charts. - t.Logf("error is: %s", y) -} +Linting is the process of testing charts for errors or warnings regarding +formatting, compilation, or standards compliance. +*/ +package support // import "helm.sh/helm/v4/internal/chart/v3/lint/support" diff --git a/pkg/lint/support/message.go b/internal/chart/v3/lint/support/message.go similarity index 100% rename from pkg/lint/support/message.go rename to internal/chart/v3/lint/support/message.go diff --git a/pkg/lint/support/message_test.go b/internal/chart/v3/lint/support/message_test.go similarity index 100% rename from pkg/lint/support/message_test.go rename to internal/chart/v3/lint/support/message_test.go diff --git a/internal/chart/v3/loader/load.go b/internal/chart/v3/loader/load.go index 30bafdad4..2959fc71d 100644 --- a/internal/chart/v3/loader/load.go +++ b/internal/chart/v3/loader/load.go @@ -31,6 +31,7 @@ import ( "sigs.k8s.io/yaml" chart "helm.sh/helm/v4/internal/chart/v3" + "helm.sh/helm/v4/pkg/chart/common" ) // ChartLoader loads a chart. @@ -79,7 +80,7 @@ func LoadFiles(files []*BufferedFile) (*chart.Chart, error) { // do not rely on assumed ordering of files in the chart and crash // if Chart.yaml was not coming early enough to initialize metadata for _, f := range files { - c.Raw = append(c.Raw, &chart.File{Name: f.Name, Data: f.Data}) + c.Raw = append(c.Raw, &common.File{Name: f.Name, Data: f.Data}) if f.Name == "Chart.yaml" { if c.Metadata == nil { c.Metadata = new(chart.Metadata) @@ -115,10 +116,10 @@ func LoadFiles(files []*BufferedFile) (*chart.Chart, error) { c.Schema = f.Data case strings.HasPrefix(f.Name, "templates/"): - c.Templates = append(c.Templates, &chart.File{Name: f.Name, Data: f.Data}) + c.Templates = append(c.Templates, &common.File{Name: f.Name, Data: f.Data}) case strings.HasPrefix(f.Name, "charts/"): if filepath.Ext(f.Name) == ".prov" { - c.Files = append(c.Files, &chart.File{Name: f.Name, Data: f.Data}) + c.Files = append(c.Files, &common.File{Name: f.Name, Data: f.Data}) continue } @@ -126,7 +127,7 @@ func LoadFiles(files []*BufferedFile) (*chart.Chart, error) { cname := strings.SplitN(fname, "/", 2)[0] subcharts[cname] = append(subcharts[cname], &BufferedFile{Name: fname, Data: f.Data}) default: - c.Files = append(c.Files, &chart.File{Name: f.Name, Data: f.Data}) + c.Files = append(c.Files, &common.File{Name: f.Name, Data: f.Data}) } } diff --git a/internal/chart/v3/loader/load_test.go b/internal/chart/v3/loader/load_test.go index e770923ff..1d8ca836a 100644 --- a/internal/chart/v3/loader/load_test.go +++ b/internal/chart/v3/loader/load_test.go @@ -31,6 +31,7 @@ import ( "time" chart "helm.sh/helm/v4/internal/chart/v3" + "helm.sh/helm/v4/pkg/chart/common" ) func TestLoadDir(t *testing.T) { @@ -491,7 +492,7 @@ foo: } } -func TestMergeValues(t *testing.T) { +func TestMergeValuesV3(t *testing.T) { nestedMap := map[string]interface{}{ "foo": "bar", "baz": map[string]string{ @@ -701,7 +702,7 @@ func verifyChartFileAndTemplate(t *testing.T, c *chart.Chart, name string) { } } -func verifyBomStripped(t *testing.T, files []*chart.File) { +func verifyBomStripped(t *testing.T, files []*common.File) { t.Helper() for _, file := range files { if bytes.HasPrefix(file.Data, utf8bom) { diff --git a/internal/chart/v3/util/capabilities.go b/internal/chart/v3/util/capabilities.go deleted file mode 100644 index 23b6d46fa..000000000 --- a/internal/chart/v3/util/capabilities.go +++ /dev/null @@ -1,122 +0,0 @@ -/* -Copyright The Helm Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "fmt" - "slices" - "strconv" - - "github.com/Masterminds/semver/v3" - "k8s.io/client-go/kubernetes/scheme" - - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - - helmversion "helm.sh/helm/v4/internal/version" -) - -var ( - // The Kubernetes version can be set by LDFLAGS. In order to do that the value - // must be a string. - k8sVersionMajor = "1" - k8sVersionMinor = "20" - - // DefaultVersionSet is the default version set, which includes only Core V1 ("v1"). - DefaultVersionSet = allKnownVersions() - - // DefaultCapabilities is the default set of capabilities. - DefaultCapabilities = &Capabilities{ - KubeVersion: KubeVersion{ - Version: fmt.Sprintf("v%s.%s.0", k8sVersionMajor, k8sVersionMinor), - Major: k8sVersionMajor, - Minor: k8sVersionMinor, - }, - APIVersions: DefaultVersionSet, - HelmVersion: helmversion.Get(), - } -) - -// Capabilities describes the capabilities of the Kubernetes cluster. -type Capabilities struct { - // KubeVersion is the Kubernetes version. - KubeVersion KubeVersion - // APIVersions are supported Kubernetes API versions. - APIVersions VersionSet - // HelmVersion is the build information for this helm version - HelmVersion helmversion.BuildInfo -} - -func (capabilities *Capabilities) Copy() *Capabilities { - return &Capabilities{ - KubeVersion: capabilities.KubeVersion, - APIVersions: capabilities.APIVersions, - HelmVersion: capabilities.HelmVersion, - } -} - -// KubeVersion is the Kubernetes version. -type KubeVersion struct { - Version string // Kubernetes version - Major string // Kubernetes major version - Minor string // Kubernetes minor version -} - -// String implements fmt.Stringer -func (kv *KubeVersion) String() string { return kv.Version } - -// GitVersion returns the Kubernetes version string. -// -// Deprecated: use KubeVersion.Version. -func (kv *KubeVersion) GitVersion() string { return kv.Version } - -// ParseKubeVersion parses kubernetes version from string -func ParseKubeVersion(version string) (*KubeVersion, error) { - sv, err := semver.NewVersion(version) - if err != nil { - return nil, err - } - return &KubeVersion{ - Version: "v" + sv.String(), - Major: strconv.FormatUint(sv.Major(), 10), - Minor: strconv.FormatUint(sv.Minor(), 10), - }, nil -} - -// VersionSet is a set of Kubernetes API versions. -type VersionSet []string - -// Has returns true if the version string is in the set. -// -// vs.Has("apps/v1") -func (v VersionSet) Has(apiVersion string) bool { - return slices.Contains(v, apiVersion) -} - -func allKnownVersions() VersionSet { - // We should register the built in extension APIs as well so CRDs are - // supported in the default version set. This has caused problems with `helm - // template` in the past, so let's be safe - apiextensionsv1beta1.AddToScheme(scheme.Scheme) - apiextensionsv1.AddToScheme(scheme.Scheme) - - groups := scheme.Scheme.PrioritizedVersionsAllGroups() - vs := make(VersionSet, 0, len(groups)) - for _, gv := range groups { - vs = append(vs, gv.String()) - } - return vs -} diff --git a/internal/chart/v3/util/capabilities_test.go b/internal/chart/v3/util/capabilities_test.go deleted file mode 100644 index aa9be9db8..000000000 --- a/internal/chart/v3/util/capabilities_test.go +++ /dev/null @@ -1,84 +0,0 @@ -/* -Copyright The Helm Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "testing" -) - -func TestVersionSet(t *testing.T) { - vs := VersionSet{"v1", "apps/v1"} - if d := len(vs); d != 2 { - t.Errorf("Expected 2 versions, got %d", d) - } - - if !vs.Has("apps/v1") { - t.Error("Expected to find apps/v1") - } - - if vs.Has("Spanish/inquisition") { - t.Error("No one expects the Spanish/inquisition") - } -} - -func TestDefaultVersionSet(t *testing.T) { - if !DefaultVersionSet.Has("v1") { - t.Error("Expected core v1 version set") - } -} - -func TestDefaultCapabilities(t *testing.T) { - kv := DefaultCapabilities.KubeVersion - if kv.String() != "v1.20.0" { - t.Errorf("Expected default KubeVersion.String() to be v1.20.0, got %q", kv.String()) - } - if kv.Version != "v1.20.0" { - t.Errorf("Expected default KubeVersion.Version to be v1.20.0, got %q", kv.Version) - } - if kv.GitVersion() != "v1.20.0" { - t.Errorf("Expected default KubeVersion.GitVersion() to be v1.20.0, got %q", kv.Version) - } - if kv.Major != "1" { - t.Errorf("Expected default KubeVersion.Major to be 1, got %q", kv.Major) - } - if kv.Minor != "20" { - t.Errorf("Expected default KubeVersion.Minor to be 20, got %q", kv.Minor) - } -} - -func TestDefaultCapabilitiesHelmVersion(t *testing.T) { - hv := DefaultCapabilities.HelmVersion - - if hv.Version != "v4.0" { - t.Errorf("Expected default HelmVersion to be v4.0, got %q", hv.Version) - } -} - -func TestParseKubeVersion(t *testing.T) { - kv, err := ParseKubeVersion("v1.16.0") - if err != nil { - t.Errorf("Expected v1.16.0 to parse successfully") - } - if kv.Version != "v1.16.0" { - t.Errorf("Expected parsed KubeVersion.Version to be v1.16.0, got %q", kv.String()) - } - if kv.Major != "1" { - t.Errorf("Expected parsed KubeVersion.Major to be 1, got %q", kv.Major) - } - if kv.Minor != "16" { - t.Errorf("Expected parsed KubeVersion.Minor to be 16, got %q", kv.Minor) - } -} diff --git a/internal/chart/v3/util/coalesce.go b/internal/chart/v3/util/coalesce.go deleted file mode 100644 index caea2e119..000000000 --- a/internal/chart/v3/util/coalesce.go +++ /dev/null @@ -1,308 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "fmt" - "log" - "maps" - - "github.com/mitchellh/copystructure" - - chart "helm.sh/helm/v4/internal/chart/v3" -) - -func concatPrefix(a, b string) string { - if a == "" { - return b - } - return fmt.Sprintf("%s.%s", a, b) -} - -// CoalesceValues coalesces all of the values in a chart (and its subcharts). -// -// Values are coalesced together using the following rules: -// -// - Values in a higher level chart always override values in a lower-level -// dependency chart -// - Scalar values and arrays are replaced, maps are merged -// - A chart has access to all of the variables for it, as well as all of -// the values destined for its dependencies. -func CoalesceValues(chrt *chart.Chart, vals map[string]interface{}) (Values, error) { - valsCopy, err := copyValues(vals) - if err != nil { - return vals, err - } - return coalesce(log.Printf, chrt, valsCopy, "", false) -} - -// MergeValues is used to merge the values in a chart and its subcharts. This -// is different from Coalescing as nil/null values are preserved. -// -// Values are coalesced together using the following rules: -// -// - Values in a higher level chart always override values in a lower-level -// dependency chart -// - Scalar values and arrays are replaced, maps are merged -// - A chart has access to all of the variables for it, as well as all of -// the values destined for its dependencies. -// -// Retaining Nils is useful when processes early in a Helm action or business -// logic need to retain them for when Coalescing will happen again later in the -// business logic. -func MergeValues(chrt *chart.Chart, vals map[string]interface{}) (Values, error) { - valsCopy, err := copyValues(vals) - if err != nil { - return vals, err - } - return coalesce(log.Printf, chrt, valsCopy, "", true) -} - -func copyValues(vals map[string]interface{}) (Values, error) { - v, err := copystructure.Copy(vals) - if err != nil { - return vals, err - } - - valsCopy := v.(map[string]interface{}) - // if we have an empty map, make sure it is initialized - if valsCopy == nil { - valsCopy = make(map[string]interface{}) - } - - return valsCopy, nil -} - -type printFn func(format string, v ...interface{}) - -// coalesce coalesces the dest values and the chart values, giving priority to the dest values. -// -// This is a helper function for CoalesceValues and MergeValues. -// -// Note, the merge argument specifies whether this is being used by MergeValues -// or CoalesceValues. Coalescing removes null values and their keys in some -// situations while merging keeps the null values. -func coalesce(printf printFn, ch *chart.Chart, dest map[string]interface{}, prefix string, merge bool) (map[string]interface{}, error) { - coalesceValues(printf, ch, dest, prefix, merge) - return coalesceDeps(printf, ch, dest, prefix, merge) -} - -// coalesceDeps coalesces the dependencies of the given chart. -func coalesceDeps(printf printFn, chrt *chart.Chart, dest map[string]interface{}, prefix string, merge bool) (map[string]interface{}, error) { - for _, subchart := range chrt.Dependencies() { - if c, ok := dest[subchart.Name()]; !ok { - // If dest doesn't already have the key, create it. - dest[subchart.Name()] = make(map[string]interface{}) - } else if !istable(c) { - return dest, fmt.Errorf("type mismatch on %s: %t", subchart.Name(), c) - } - if dv, ok := dest[subchart.Name()]; ok { - dvmap := dv.(map[string]interface{}) - subPrefix := concatPrefix(prefix, chrt.Metadata.Name) - // Get globals out of dest and merge them into dvmap. - coalesceGlobals(printf, dvmap, dest, subPrefix, merge) - // Now coalesce the rest of the values. - var err error - dest[subchart.Name()], err = coalesce(printf, subchart, dvmap, subPrefix, merge) - if err != nil { - return dest, err - } - } - } - return dest, nil -} - -// coalesceGlobals copies the globals out of src and merges them into dest. -// -// For convenience, returns dest. -func coalesceGlobals(printf printFn, dest, src map[string]interface{}, prefix string, _ bool) { - var dg, sg map[string]interface{} - - if destglob, ok := dest[GlobalKey]; !ok { - dg = make(map[string]interface{}) - } else if dg, ok = destglob.(map[string]interface{}); !ok { - printf("warning: skipping globals because destination %s is not a table.", GlobalKey) - return - } - - if srcglob, ok := src[GlobalKey]; !ok { - sg = make(map[string]interface{}) - } else if sg, ok = srcglob.(map[string]interface{}); !ok { - printf("warning: skipping globals because source %s is not a table.", GlobalKey) - return - } - - // EXPERIMENTAL: In the past, we have disallowed globals to test tables. This - // reverses that decision. It may somehow be possible to introduce a loop - // here, but I haven't found a way. So for the time being, let's allow - // tables in globals. - for key, val := range sg { - if istable(val) { - vv := copyMap(val.(map[string]interface{})) - if destv, ok := dg[key]; !ok { - // Here there is no merge. We're just adding. - dg[key] = vv - } else { - if destvmap, ok := destv.(map[string]interface{}); !ok { - printf("Conflict: cannot merge map onto non-map for %q. Skipping.", key) - } else { - // Basically, we reverse order of coalesce here to merge - // top-down. - subPrefix := concatPrefix(prefix, key) - // In this location coalesceTablesFullKey should always have - // merge set to true. The output of coalesceGlobals is run - // through coalesce where any nils will be removed. - coalesceTablesFullKey(printf, vv, destvmap, subPrefix, true) - dg[key] = vv - } - } - } else if dv, ok := dg[key]; ok && istable(dv) { - // It's not clear if this condition can actually ever trigger. - printf("key %s is table. Skipping", key) - } else { - // TODO: Do we need to do any additional checking on the value? - dg[key] = val - } - } - dest[GlobalKey] = dg -} - -func copyMap(src map[string]interface{}) map[string]interface{} { - m := make(map[string]interface{}, len(src)) - maps.Copy(m, src) - return m -} - -// coalesceValues builds up a values map for a particular chart. -// -// Values in v will override the values in the chart. -func coalesceValues(printf printFn, c *chart.Chart, v map[string]interface{}, prefix string, merge bool) { - subPrefix := concatPrefix(prefix, c.Metadata.Name) - - // Using c.Values directly when coalescing a table can cause problems where - // the original c.Values is altered. Creating a deep copy stops the problem. - // This section is fault-tolerant as there is no ability to return an error. - valuesCopy, err := copystructure.Copy(c.Values) - var vc map[string]interface{} - var ok bool - if err != nil { - // If there is an error something is wrong with copying c.Values it - // means there is a problem in the deep copying package or something - // wrong with c.Values. In this case we will use c.Values and report - // an error. - printf("warning: unable to copy values, err: %s", err) - vc = c.Values - } else { - vc, ok = valuesCopy.(map[string]interface{}) - if !ok { - // c.Values has a map[string]interface{} structure. If the copy of - // it cannot be treated as map[string]interface{} there is something - // strangely wrong. Log it and use c.Values - printf("warning: unable to convert values copy to values type") - vc = c.Values - } - } - - for key, val := range vc { - if value, ok := v[key]; ok { - if value == nil && !merge { - // When the YAML value is null and we are coalescing instead of - // merging, we remove the value's key. - // This allows Helm's various sources of values (value files or --set) to - // remove incompatible keys from any previous chart, file, or set values. - delete(v, key) - } else if dest, ok := value.(map[string]interface{}); ok { - // if v[key] is a table, merge nv's val table into v[key]. - src, ok := val.(map[string]interface{}) - if !ok { - // If the original value is nil, there is nothing to coalesce, so we don't print - // the warning - if val != nil { - printf("warning: skipped value for %s.%s: Not a table.", subPrefix, key) - } - } else { - // If the key is a child chart, coalesce tables with Merge set to true - merge := childChartMergeTrue(c, key, merge) - - // Because v has higher precedence than nv, dest values override src - // values. - coalesceTablesFullKey(printf, dest, src, concatPrefix(subPrefix, key), merge) - } - } - } else { - // If the key is not in v, copy it from nv. - v[key] = val - } - } -} - -func childChartMergeTrue(chrt *chart.Chart, key string, merge bool) bool { - for _, subchart := range chrt.Dependencies() { - if subchart.Name() == key { - return true - } - } - return merge -} - -// CoalesceTables merges a source map into a destination map. -// -// dest is considered authoritative. -func CoalesceTables(dst, src map[string]interface{}) map[string]interface{} { - return coalesceTablesFullKey(log.Printf, dst, src, "", false) -} - -func MergeTables(dst, src map[string]interface{}) map[string]interface{} { - return coalesceTablesFullKey(log.Printf, dst, src, "", true) -} - -// coalesceTablesFullKey merges a source map into a destination map. -// -// dest is considered authoritative. -func coalesceTablesFullKey(printf printFn, dst, src map[string]interface{}, prefix string, merge bool) map[string]interface{} { - // When --reuse-values is set but there are no modifications yet, return new values - if src == nil { - return dst - } - if dst == nil { - return src - } - for key, val := range dst { - if val == nil { - src[key] = nil - } - } - // Because dest has higher precedence than src, dest values override src - // values. - for key, val := range src { - fullkey := concatPrefix(prefix, key) - if dv, ok := dst[key]; ok && !merge && dv == nil { - delete(dst, key) - } else if !ok { - dst[key] = val - } else if istable(val) { - if istable(dv) { - coalesceTablesFullKey(printf, dv.(map[string]interface{}), val.(map[string]interface{}), fullkey, merge) - } else { - printf("warning: cannot overwrite table with non table for %s (%v)", fullkey, val) - } - } else if istable(dv) && val != nil { - printf("warning: destination for %s is a table. Ignoring non-table value (%v)", fullkey, val) - } - } - return dst -} diff --git a/internal/chart/v3/util/coalesce_test.go b/internal/chart/v3/util/coalesce_test.go deleted file mode 100644 index 4770b601d..000000000 --- a/internal/chart/v3/util/coalesce_test.go +++ /dev/null @@ -1,723 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "encoding/json" - "fmt" - "maps" - "testing" - - "github.com/stretchr/testify/assert" - - chart "helm.sh/helm/v4/internal/chart/v3" -) - -// ref: http://www.yaml.org/spec/1.2/spec.html#id2803362 -var testCoalesceValuesYaml = []byte(` -top: yup -bottom: null -right: Null -left: NULL -front: ~ -back: "" -nested: - boat: null - -global: - name: Ishmael - subject: Queequeg - nested: - boat: true - -pequod: - boat: null - global: - name: Stinky - harpooner: Tashtego - nested: - boat: false - sail: true - foo2: null - ahab: - scope: whale - boat: null - nested: - foo: true - boat: null - object: null -`) - -func withDeps(c *chart.Chart, deps ...*chart.Chart) *chart.Chart { - c.AddDependency(deps...) - return c -} - -func TestCoalesceValues(t *testing.T) { - is := assert.New(t) - - c := withDeps(&chart.Chart{ - Metadata: &chart.Metadata{Name: "moby"}, - Values: map[string]interface{}{ - "back": "exists", - "bottom": "exists", - "front": "exists", - "left": "exists", - "name": "moby", - "nested": map[string]interface{}{"boat": true}, - "override": "bad", - "right": "exists", - "scope": "moby", - "top": "nope", - "global": map[string]interface{}{ - "nested2": map[string]interface{}{"l0": "moby"}, - }, - "pequod": map[string]interface{}{ - "boat": "maybe", - "ahab": map[string]interface{}{ - "boat": "maybe", - "nested": map[string]interface{}{"boat": "maybe"}, - }, - }, - }, - }, - withDeps(&chart.Chart{ - Metadata: &chart.Metadata{Name: "pequod"}, - Values: map[string]interface{}{ - "name": "pequod", - "scope": "pequod", - "global": map[string]interface{}{ - "nested2": map[string]interface{}{"l1": "pequod"}, - }, - "boat": false, - "ahab": map[string]interface{}{ - "boat": false, - "nested": map[string]interface{}{"boat": false}, - }, - }, - }, - &chart.Chart{ - Metadata: &chart.Metadata{Name: "ahab"}, - Values: map[string]interface{}{ - "global": map[string]interface{}{ - "nested": map[string]interface{}{"foo": "bar", "foo2": "bar2"}, - "nested2": map[string]interface{}{"l2": "ahab"}, - }, - "scope": "ahab", - "name": "ahab", - "boat": true, - "nested": map[string]interface{}{"foo": false, "boat": true}, - "object": map[string]interface{}{"foo": "bar"}, - }, - }, - ), - &chart.Chart{ - Metadata: &chart.Metadata{Name: "spouter"}, - Values: map[string]interface{}{ - "scope": "spouter", - "global": map[string]interface{}{ - "nested2": map[string]interface{}{"l1": "spouter"}, - }, - }, - }, - ) - - vals, err := ReadValues(testCoalesceValuesYaml) - if err != nil { - t.Fatal(err) - } - - // taking a copy of the values before passing it - // to CoalesceValues as argument, so that we can - // use it for asserting later - valsCopy := make(Values, len(vals)) - maps.Copy(valsCopy, vals) - - v, err := CoalesceValues(c, vals) - if err != nil { - t.Fatal(err) - } - j, _ := json.MarshalIndent(v, "", " ") - t.Logf("Coalesced Values: %s", string(j)) - - tests := []struct { - tpl string - expect string - }{ - {"{{.top}}", "yup"}, - {"{{.back}}", ""}, - {"{{.name}}", "moby"}, - {"{{.global.name}}", "Ishmael"}, - {"{{.global.subject}}", "Queequeg"}, - {"{{.global.harpooner}}", ""}, - {"{{.pequod.name}}", "pequod"}, - {"{{.pequod.ahab.name}}", "ahab"}, - {"{{.pequod.ahab.scope}}", "whale"}, - {"{{.pequod.ahab.nested.foo}}", "true"}, - {"{{.pequod.ahab.global.name}}", "Ishmael"}, - {"{{.pequod.ahab.global.nested.foo}}", "bar"}, - {"{{.pequod.ahab.global.nested.foo2}}", ""}, - {"{{.pequod.ahab.global.subject}}", "Queequeg"}, - {"{{.pequod.ahab.global.harpooner}}", "Tashtego"}, - {"{{.pequod.global.name}}", "Ishmael"}, - {"{{.pequod.global.nested.foo}}", ""}, - {"{{.pequod.global.subject}}", "Queequeg"}, - {"{{.spouter.global.name}}", "Ishmael"}, - {"{{.spouter.global.harpooner}}", ""}, - - {"{{.global.nested.boat}}", "true"}, - {"{{.pequod.global.nested.boat}}", "true"}, - {"{{.spouter.global.nested.boat}}", "true"}, - {"{{.pequod.global.nested.sail}}", "true"}, - {"{{.spouter.global.nested.sail}}", ""}, - - {"{{.global.nested2.l0}}", "moby"}, - {"{{.global.nested2.l1}}", ""}, - {"{{.global.nested2.l2}}", ""}, - {"{{.pequod.global.nested2.l0}}", "moby"}, - {"{{.pequod.global.nested2.l1}}", "pequod"}, - {"{{.pequod.global.nested2.l2}}", ""}, - {"{{.pequod.ahab.global.nested2.l0}}", "moby"}, - {"{{.pequod.ahab.global.nested2.l1}}", "pequod"}, - {"{{.pequod.ahab.global.nested2.l2}}", "ahab"}, - {"{{.spouter.global.nested2.l0}}", "moby"}, - {"{{.spouter.global.nested2.l1}}", "spouter"}, - {"{{.spouter.global.nested2.l2}}", ""}, - } - - for _, tt := range tests { - if o, err := ttpl(tt.tpl, v); err != nil || o != tt.expect { - t.Errorf("Expected %q to expand to %q, got %q", tt.tpl, tt.expect, o) - } - } - - nullKeys := []string{"bottom", "right", "left", "front"} - for _, nullKey := range nullKeys { - if _, ok := v[nullKey]; ok { - t.Errorf("Expected key %q to be removed, still present", nullKey) - } - } - - if _, ok := v["nested"].(map[string]interface{})["boat"]; ok { - t.Error("Expected nested boat key to be removed, still present") - } - - subchart := v["pequod"].(map[string]interface{}) - if _, ok := subchart["boat"]; ok { - t.Error("Expected subchart boat key to be removed, still present") - } - - subsubchart := subchart["ahab"].(map[string]interface{}) - if _, ok := subsubchart["boat"]; ok { - t.Error("Expected sub-subchart ahab boat key to be removed, still present") - } - - if _, ok := subsubchart["nested"].(map[string]interface{})["boat"]; ok { - t.Error("Expected sub-subchart nested boat key to be removed, still present") - } - - if _, ok := subsubchart["object"]; ok { - t.Error("Expected sub-subchart object map to be removed, still present") - } - - // CoalesceValues should not mutate the passed arguments - is.Equal(valsCopy, vals) -} - -func TestMergeValues(t *testing.T) { - is := assert.New(t) - - c := withDeps(&chart.Chart{ - Metadata: &chart.Metadata{Name: "moby"}, - Values: map[string]interface{}{ - "back": "exists", - "bottom": "exists", - "front": "exists", - "left": "exists", - "name": "moby", - "nested": map[string]interface{}{"boat": true}, - "override": "bad", - "right": "exists", - "scope": "moby", - "top": "nope", - "global": map[string]interface{}{ - "nested2": map[string]interface{}{"l0": "moby"}, - }, - }, - }, - withDeps(&chart.Chart{ - Metadata: &chart.Metadata{Name: "pequod"}, - Values: map[string]interface{}{ - "name": "pequod", - "scope": "pequod", - "global": map[string]interface{}{ - "nested2": map[string]interface{}{"l1": "pequod"}, - }, - }, - }, - &chart.Chart{ - Metadata: &chart.Metadata{Name: "ahab"}, - Values: map[string]interface{}{ - "global": map[string]interface{}{ - "nested": map[string]interface{}{"foo": "bar"}, - "nested2": map[string]interface{}{"l2": "ahab"}, - }, - "scope": "ahab", - "name": "ahab", - "boat": true, - "nested": map[string]interface{}{"foo": false, "bar": true}, - }, - }, - ), - &chart.Chart{ - Metadata: &chart.Metadata{Name: "spouter"}, - Values: map[string]interface{}{ - "scope": "spouter", - "global": map[string]interface{}{ - "nested2": map[string]interface{}{"l1": "spouter"}, - }, - }, - }, - ) - - vals, err := ReadValues(testCoalesceValuesYaml) - if err != nil { - t.Fatal(err) - } - - // taking a copy of the values before passing it - // to MergeValues as argument, so that we can - // use it for asserting later - valsCopy := make(Values, len(vals)) - maps.Copy(valsCopy, vals) - - v, err := MergeValues(c, vals) - if err != nil { - t.Fatal(err) - } - j, _ := json.MarshalIndent(v, "", " ") - t.Logf("Coalesced Values: %s", string(j)) - - tests := []struct { - tpl string - expect string - }{ - {"{{.top}}", "yup"}, - {"{{.back}}", ""}, - {"{{.name}}", "moby"}, - {"{{.global.name}}", "Ishmael"}, - {"{{.global.subject}}", "Queequeg"}, - {"{{.global.harpooner}}", ""}, - {"{{.pequod.name}}", "pequod"}, - {"{{.pequod.ahab.name}}", "ahab"}, - {"{{.pequod.ahab.scope}}", "whale"}, - {"{{.pequod.ahab.nested.foo}}", "true"}, - {"{{.pequod.ahab.global.name}}", "Ishmael"}, - {"{{.pequod.ahab.global.nested.foo}}", "bar"}, - {"{{.pequod.ahab.global.subject}}", "Queequeg"}, - {"{{.pequod.ahab.global.harpooner}}", "Tashtego"}, - {"{{.pequod.global.name}}", "Ishmael"}, - {"{{.pequod.global.nested.foo}}", ""}, - {"{{.pequod.global.subject}}", "Queequeg"}, - {"{{.spouter.global.name}}", "Ishmael"}, - {"{{.spouter.global.harpooner}}", ""}, - - {"{{.global.nested.boat}}", "true"}, - {"{{.pequod.global.nested.boat}}", "true"}, - {"{{.spouter.global.nested.boat}}", "true"}, - {"{{.pequod.global.nested.sail}}", "true"}, - {"{{.spouter.global.nested.sail}}", ""}, - - {"{{.global.nested2.l0}}", "moby"}, - {"{{.global.nested2.l1}}", ""}, - {"{{.global.nested2.l2}}", ""}, - {"{{.pequod.global.nested2.l0}}", "moby"}, - {"{{.pequod.global.nested2.l1}}", "pequod"}, - {"{{.pequod.global.nested2.l2}}", ""}, - {"{{.pequod.ahab.global.nested2.l0}}", "moby"}, - {"{{.pequod.ahab.global.nested2.l1}}", "pequod"}, - {"{{.pequod.ahab.global.nested2.l2}}", "ahab"}, - {"{{.spouter.global.nested2.l0}}", "moby"}, - {"{{.spouter.global.nested2.l1}}", "spouter"}, - {"{{.spouter.global.nested2.l2}}", ""}, - } - - for _, tt := range tests { - if o, err := ttpl(tt.tpl, v); err != nil || o != tt.expect { - t.Errorf("Expected %q to expand to %q, got %q", tt.tpl, tt.expect, o) - } - } - - // nullKeys is different from coalescing. Here the null/nil values are not - // removed. - nullKeys := []string{"bottom", "right", "left", "front"} - for _, nullKey := range nullKeys { - if vv, ok := v[nullKey]; !ok { - t.Errorf("Expected key %q to be present but it was removed", nullKey) - } else if vv != nil { - t.Errorf("Expected key %q to be null but it has a value of %v", nullKey, vv) - } - } - - if _, ok := v["nested"].(map[string]interface{})["boat"]; !ok { - t.Error("Expected nested boat key to be present but it was removed") - } - - subchart := v["pequod"].(map[string]interface{})["ahab"].(map[string]interface{}) - if _, ok := subchart["boat"]; !ok { - t.Error("Expected subchart boat key to be present but it was removed") - } - - if _, ok := subchart["nested"].(map[string]interface{})["bar"]; !ok { - t.Error("Expected subchart nested bar key to be present but it was removed") - } - - // CoalesceValues should not mutate the passed arguments - is.Equal(valsCopy, vals) -} - -func TestCoalesceTables(t *testing.T) { - dst := map[string]interface{}{ - "name": "Ishmael", - "address": map[string]interface{}{ - "street": "123 Spouter Inn Ct.", - "city": "Nantucket", - "country": nil, - }, - "details": map[string]interface{}{ - "friends": []string{"Tashtego"}, - }, - "boat": "pequod", - "hole": nil, - } - src := map[string]interface{}{ - "occupation": "whaler", - "address": map[string]interface{}{ - "state": "MA", - "street": "234 Spouter Inn Ct.", - "country": "US", - }, - "details": "empty", - "boat": map[string]interface{}{ - "mast": true, - }, - "hole": "black", - } - - // What we expect is that anything in dst overrides anything in src, but that - // otherwise the values are coalesced. - CoalesceTables(dst, src) - - if dst["name"] != "Ishmael" { - t.Errorf("Unexpected name: %s", dst["name"]) - } - if dst["occupation"] != "whaler" { - t.Errorf("Unexpected occupation: %s", dst["occupation"]) - } - - addr, ok := dst["address"].(map[string]interface{}) - if !ok { - t.Fatal("Address went away.") - } - - if addr["street"].(string) != "123 Spouter Inn Ct." { - t.Errorf("Unexpected address: %v", addr["street"]) - } - - if addr["city"].(string) != "Nantucket" { - t.Errorf("Unexpected city: %v", addr["city"]) - } - - if addr["state"].(string) != "MA" { - t.Errorf("Unexpected state: %v", addr["state"]) - } - - if _, ok = addr["country"]; ok { - t.Error("The country is not left out.") - } - - if det, ok := dst["details"].(map[string]interface{}); !ok { - t.Fatalf("Details is the wrong type: %v", dst["details"]) - } else if _, ok := det["friends"]; !ok { - t.Error("Could not find your friends. Maybe you don't have any. :-(") - } - - if dst["boat"].(string) != "pequod" { - t.Errorf("Expected boat string, got %v", dst["boat"]) - } - - if _, ok = dst["hole"]; ok { - t.Error("The hole still exists.") - } - - dst2 := map[string]interface{}{ - "name": "Ishmael", - "address": map[string]interface{}{ - "street": "123 Spouter Inn Ct.", - "city": "Nantucket", - "country": "US", - }, - "details": map[string]interface{}{ - "friends": []string{"Tashtego"}, - }, - "boat": "pequod", - "hole": "black", - } - - // What we expect is that anything in dst should have all values set, - // this happens when the --reuse-values flag is set but the chart has no modifications yet - CoalesceTables(dst2, nil) - - if dst2["name"] != "Ishmael" { - t.Errorf("Unexpected name: %s", dst2["name"]) - } - - addr2, ok := dst2["address"].(map[string]interface{}) - if !ok { - t.Fatal("Address went away.") - } - - if addr2["street"].(string) != "123 Spouter Inn Ct." { - t.Errorf("Unexpected address: %v", addr2["street"]) - } - - if addr2["city"].(string) != "Nantucket" { - t.Errorf("Unexpected city: %v", addr2["city"]) - } - - if addr2["country"].(string) != "US" { - t.Errorf("Unexpected Country: %v", addr2["country"]) - } - - if det2, ok := dst2["details"].(map[string]interface{}); !ok { - t.Fatalf("Details is the wrong type: %v", dst2["details"]) - } else if _, ok := det2["friends"]; !ok { - t.Error("Could not find your friends. Maybe you don't have any. :-(") - } - - if dst2["boat"].(string) != "pequod" { - t.Errorf("Expected boat string, got %v", dst2["boat"]) - } - - if dst2["hole"].(string) != "black" { - t.Errorf("Expected hole string, got %v", dst2["boat"]) - } -} - -func TestMergeTables(t *testing.T) { - dst := map[string]interface{}{ - "name": "Ishmael", - "address": map[string]interface{}{ - "street": "123 Spouter Inn Ct.", - "city": "Nantucket", - "country": nil, - }, - "details": map[string]interface{}{ - "friends": []string{"Tashtego"}, - }, - "boat": "pequod", - "hole": nil, - } - src := map[string]interface{}{ - "occupation": "whaler", - "address": map[string]interface{}{ - "state": "MA", - "street": "234 Spouter Inn Ct.", - "country": "US", - }, - "details": "empty", - "boat": map[string]interface{}{ - "mast": true, - }, - "hole": "black", - } - - // What we expect is that anything in dst overrides anything in src, but that - // otherwise the values are coalesced. - MergeTables(dst, src) - - if dst["name"] != "Ishmael" { - t.Errorf("Unexpected name: %s", dst["name"]) - } - if dst["occupation"] != "whaler" { - t.Errorf("Unexpected occupation: %s", dst["occupation"]) - } - - addr, ok := dst["address"].(map[string]interface{}) - if !ok { - t.Fatal("Address went away.") - } - - if addr["street"].(string) != "123 Spouter Inn Ct." { - t.Errorf("Unexpected address: %v", addr["street"]) - } - - if addr["city"].(string) != "Nantucket" { - t.Errorf("Unexpected city: %v", addr["city"]) - } - - if addr["state"].(string) != "MA" { - t.Errorf("Unexpected state: %v", addr["state"]) - } - - // This is one test that is different from CoalesceTables. Because country - // is a nil value and it's not removed it's still present. - if _, ok = addr["country"]; !ok { - t.Error("The country is left out.") - } - - if det, ok := dst["details"].(map[string]interface{}); !ok { - t.Fatalf("Details is the wrong type: %v", dst["details"]) - } else if _, ok := det["friends"]; !ok { - t.Error("Could not find your friends. Maybe you don't have any. :-(") - } - - if dst["boat"].(string) != "pequod" { - t.Errorf("Expected boat string, got %v", dst["boat"]) - } - - // This is one test that is different from CoalesceTables. Because hole - // is a nil value and it's not removed it's still present. - if _, ok = dst["hole"]; !ok { - t.Error("The hole no longer exists.") - } - - dst2 := map[string]interface{}{ - "name": "Ishmael", - "address": map[string]interface{}{ - "street": "123 Spouter Inn Ct.", - "city": "Nantucket", - "country": "US", - }, - "details": map[string]interface{}{ - "friends": []string{"Tashtego"}, - }, - "boat": "pequod", - "hole": "black", - "nilval": nil, - } - - // What we expect is that anything in dst should have all values set, - // this happens when the --reuse-values flag is set but the chart has no modifications yet - MergeTables(dst2, nil) - - if dst2["name"] != "Ishmael" { - t.Errorf("Unexpected name: %s", dst2["name"]) - } - - addr2, ok := dst2["address"].(map[string]interface{}) - if !ok { - t.Fatal("Address went away.") - } - - if addr2["street"].(string) != "123 Spouter Inn Ct." { - t.Errorf("Unexpected address: %v", addr2["street"]) - } - - if addr2["city"].(string) != "Nantucket" { - t.Errorf("Unexpected city: %v", addr2["city"]) - } - - if addr2["country"].(string) != "US" { - t.Errorf("Unexpected Country: %v", addr2["country"]) - } - - if det2, ok := dst2["details"].(map[string]interface{}); !ok { - t.Fatalf("Details is the wrong type: %v", dst2["details"]) - } else if _, ok := det2["friends"]; !ok { - t.Error("Could not find your friends. Maybe you don't have any. :-(") - } - - if dst2["boat"].(string) != "pequod" { - t.Errorf("Expected boat string, got %v", dst2["boat"]) - } - - if dst2["hole"].(string) != "black" { - t.Errorf("Expected hole string, got %v", dst2["boat"]) - } - - if dst2["nilval"] != nil { - t.Error("Expected nilvalue to have nil value but it does not") - } -} - -func TestCoalesceValuesWarnings(t *testing.T) { - - c := withDeps(&chart.Chart{ - Metadata: &chart.Metadata{Name: "level1"}, - Values: map[string]interface{}{ - "name": "moby", - }, - }, - withDeps(&chart.Chart{ - Metadata: &chart.Metadata{Name: "level2"}, - Values: map[string]interface{}{ - "name": "pequod", - }, - }, - &chart.Chart{ - Metadata: &chart.Metadata{Name: "level3"}, - Values: map[string]interface{}{ - "name": "ahab", - "boat": true, - "spear": map[string]interface{}{ - "tip": true, - "sail": map[string]interface{}{ - "cotton": true, - }, - }, - }, - }, - ), - ) - - vals := map[string]interface{}{ - "level2": map[string]interface{}{ - "level3": map[string]interface{}{ - "boat": map[string]interface{}{"mast": true}, - "spear": map[string]interface{}{ - "tip": map[string]interface{}{ - "sharp": true, - }, - "sail": true, - }, - }, - }, - } - - warnings := make([]string, 0) - printf := func(format string, v ...interface{}) { - t.Logf(format, v...) - warnings = append(warnings, fmt.Sprintf(format, v...)) - } - - _, err := coalesce(printf, c, vals, "", false) - if err != nil { - t.Fatal(err) - } - - t.Logf("vals: %v", vals) - assert.Contains(t, warnings, "warning: skipped value for level1.level2.level3.boat: Not a table.") - assert.Contains(t, warnings, "warning: destination for level1.level2.level3.spear.tip is a table. Ignoring non-table value (true)") - assert.Contains(t, warnings, "warning: cannot overwrite table with non table for level1.level2.level3.spear.sail (map[cotton:true])") - -} - -func TestConcatPrefix(t *testing.T) { - assert.Equal(t, "b", concatPrefix("", "b")) - assert.Equal(t, "a.b", concatPrefix("a", "b")) -} diff --git a/internal/chart/v3/util/create.go b/internal/chart/v3/util/create.go index 6a28f99d4..9f742e646 100644 --- a/internal/chart/v3/util/create.go +++ b/internal/chart/v3/util/create.go @@ -28,6 +28,7 @@ import ( chart "helm.sh/helm/v4/internal/chart/v3" "helm.sh/helm/v4/internal/chart/v3/loader" + "helm.sh/helm/v4/pkg/chart/common" ) // chartName is a regular expression for testing the supplied name of a chart. @@ -655,11 +656,11 @@ func CreateFrom(chartfile *chart.Metadata, dest, src string) error { schart.Metadata = chartfile - var updatedTemplates []*chart.File + var updatedTemplates []*common.File for _, template := range schart.Templates { newData := transform(string(template.Data), schart.Name()) - updatedTemplates = append(updatedTemplates, &chart.File{Name: template.Name, Data: newData}) + updatedTemplates = append(updatedTemplates, &common.File{Name: template.Name, Data: newData}) } schart.Templates = updatedTemplates diff --git a/internal/chart/v3/util/dependencies.go b/internal/chart/v3/util/dependencies.go index 129c46372..489772115 100644 --- a/internal/chart/v3/util/dependencies.go +++ b/internal/chart/v3/util/dependencies.go @@ -23,10 +23,12 @@ import ( "github.com/mitchellh/copystructure" chart "helm.sh/helm/v4/internal/chart/v3" + "helm.sh/helm/v4/pkg/chart/common" + "helm.sh/helm/v4/pkg/chart/common/util" ) // ProcessDependencies checks through this chart's dependencies, processing accordingly. -func ProcessDependencies(c *chart.Chart, v Values) error { +func ProcessDependencies(c *chart.Chart, v common.Values) error { if err := processDependencyEnabled(c, v, ""); err != nil { return err } @@ -34,7 +36,7 @@ func ProcessDependencies(c *chart.Chart, v Values) error { } // processDependencyConditions disables charts based on condition path value in values -func processDependencyConditions(reqs []*chart.Dependency, cvals Values, cpath string) { +func processDependencyConditions(reqs []*chart.Dependency, cvals common.Values, cpath string) { if reqs == nil { return } @@ -50,7 +52,7 @@ func processDependencyConditions(reqs []*chart.Dependency, cvals Values, cpath s break } slog.Warn("returned non-bool value", "path", c, "chart", r.Name) - } else if _, ok := err.(ErrNoValue); !ok { + } else if _, ok := err.(common.ErrNoValue); !ok { // this is a real error slog.Warn("the method PathValue returned error", slog.Any("error", err)) } @@ -60,7 +62,7 @@ func processDependencyConditions(reqs []*chart.Dependency, cvals Values, cpath s } // processDependencyTags disables charts based on tags in values -func processDependencyTags(reqs []*chart.Dependency, cvals Values) { +func processDependencyTags(reqs []*chart.Dependency, cvals common.Values) { if reqs == nil { return } @@ -177,7 +179,7 @@ Loop: for _, lr := range c.Metadata.Dependencies { lr.Enabled = true } - cvals, err := CoalesceValues(c, v) + cvals, err := util.CoalesceValues(c, v) if err != nil { return err } @@ -232,6 +234,8 @@ func pathToMap(path string, data map[string]interface{}) map[string]interface{} return set(parsePath(path), data) } +func parsePath(key string) []string { return strings.Split(key, ".") } + func set(path []string, data map[string]interface{}) map[string]interface{} { if len(path) == 0 { return nil @@ -249,12 +253,12 @@ func processImportValues(c *chart.Chart, merge bool) error { return nil } // combine chart values and empty config to get Values - var cvals Values + var cvals common.Values var err error if merge { - cvals, err = MergeValues(c, nil) + cvals, err = util.MergeValues(c, nil) } else { - cvals, err = CoalesceValues(c, nil) + cvals, err = util.CoalesceValues(c, nil) } if err != nil { return err @@ -282,9 +286,9 @@ func processImportValues(c *chart.Chart, merge bool) error { } // create value map from child to be merged into parent if merge { - b = MergeTables(b, pathToMap(parent, vv.AsMap())) + b = util.MergeTables(b, pathToMap(parent, vv.AsMap())) } else { - b = CoalesceTables(b, pathToMap(parent, vv.AsMap())) + b = util.CoalesceTables(b, pathToMap(parent, vv.AsMap())) } case string: child := "exports." + iv @@ -298,9 +302,9 @@ func processImportValues(c *chart.Chart, merge bool) error { continue } if merge { - b = MergeTables(b, vm.AsMap()) + b = util.MergeTables(b, vm.AsMap()) } else { - b = CoalesceTables(b, vm.AsMap()) + b = util.CoalesceTables(b, vm.AsMap()) } } } @@ -315,14 +319,14 @@ func processImportValues(c *chart.Chart, merge bool) error { // deep copying the cvals as there are cases where pointers can end // up in the cvals when they are copied onto b in ways that break things. cvals = deepCopyMap(cvals) - c.Values = MergeTables(cvals, b) + c.Values = util.MergeTables(cvals, b) } else { // Trimming the nil values from cvals is needed for backwards compatibility. // Previously, the b value had been populated with cvals along with some // overrides. This caused the coalescing functionality to remove the // nil/null values. This trimming is for backwards compat. cvals = trimNilValues(cvals) - c.Values = CoalesceTables(cvals, b) + c.Values = util.CoalesceTables(cvals, b) } return nil @@ -355,6 +359,12 @@ func trimNilValues(vals map[string]interface{}) map[string]interface{} { return valsCopyMap } +// istable is a special-purpose function to see if the present thing matches the definition of a YAML table. +func istable(v interface{}) bool { + _, ok := v.(map[string]interface{}) + return ok +} + // processDependencyImportValues imports specified chart values from child to parent. func processDependencyImportValues(c *chart.Chart, merge bool) error { for _, d := range c.Dependencies() { diff --git a/internal/chart/v3/util/dependencies_test.go b/internal/chart/v3/util/dependencies_test.go index 55839fe65..3c5bb96f7 100644 --- a/internal/chart/v3/util/dependencies_test.go +++ b/internal/chart/v3/util/dependencies_test.go @@ -23,6 +23,7 @@ import ( chart "helm.sh/helm/v4/internal/chart/v3" "helm.sh/helm/v4/internal/chart/v3/loader" + "helm.sh/helm/v4/pkg/chart/common" ) func loadChart(t *testing.T, path string) *chart.Chart { @@ -221,7 +222,7 @@ func TestProcessDependencyImportValues(t *testing.T) { if err := processDependencyImportValues(c, false); err != nil { t.Fatalf("processing import values dependencies %v", err) } - cc := Values(c.Values) + cc := common.Values(c.Values) for kk, vv := range e { pv, err := cc.PathValue(kk) if err != nil { @@ -251,7 +252,7 @@ func TestProcessDependencyImportValues(t *testing.T) { t.Error("expect nil value not found but found it") } switch xerr := err.(type) { - case ErrNoValue: + case common.ErrNoValue: // We found what we expected default: t.Errorf("expected an ErrNoValue but got %q instead", xerr) @@ -261,7 +262,7 @@ func TestProcessDependencyImportValues(t *testing.T) { if err := processDependencyImportValues(c, true); err != nil { t.Fatalf("processing import values dependencies %v", err) } - cc = Values(c.Values) + cc = common.Values(c.Values) val, err := cc.PathValue("ensurenull") if err != nil { t.Error("expect value but ensurenull was not found") @@ -291,7 +292,7 @@ func TestProcessDependencyImportValuesFromSharedDependencyToAliases(t *testing.T e["foo.grandchild.defaults.defaultValue"] = "42" e["bar.grandchild.defaults.defaultValue"] = "42" - cValues := Values(c.Values) + cValues := common.Values(c.Values) for kk, vv := range e { pv, err := cValues.PathValue(kk) if err != nil { @@ -329,7 +330,7 @@ func TestProcessDependencyImportValuesMultiLevelPrecedence(t *testing.T) { if err := processDependencyImportValues(c, true); err != nil { t.Fatalf("processing import values dependencies %v", err) } - cc := Values(c.Values) + cc := common.Values(c.Values) for kk, vv := range e { pv, err := cc.PathValue(kk) if err != nil { diff --git a/internal/chart/v3/util/errors.go b/internal/chart/v3/util/errors.go deleted file mode 100644 index a175b9758..000000000 --- a/internal/chart/v3/util/errors.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "fmt" -) - -// ErrNoTable indicates that a chart does not have a matching table. -type ErrNoTable struct { - Key string -} - -func (e ErrNoTable) Error() string { return fmt.Sprintf("%q is not a table", e.Key) } - -// ErrNoValue indicates that Values does not contain a key with a value -type ErrNoValue struct { - Key string -} - -func (e ErrNoValue) Error() string { return fmt.Sprintf("%q is not a value", e.Key) } - -type ErrInvalidChartName struct { - Name string -} - -func (e ErrInvalidChartName) Error() string { - return fmt.Sprintf("%q is not a valid chart name", e.Name) -} diff --git a/internal/chart/v3/util/jsonschema.go b/internal/chart/v3/util/jsonschema.go deleted file mode 100644 index 9fe35904e..000000000 --- a/internal/chart/v3/util/jsonschema.go +++ /dev/null @@ -1,113 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "bytes" - "errors" - "fmt" - "log/slog" - "strings" - - "github.com/santhosh-tekuri/jsonschema/v6" - - chart "helm.sh/helm/v4/internal/chart/v3" -) - -// ValidateAgainstSchema checks that values does not violate the structure laid out in schema -func ValidateAgainstSchema(chrt *chart.Chart, values map[string]interface{}) error { - var sb strings.Builder - if chrt.Schema != nil { - slog.Debug("chart name", "chart-name", chrt.Name()) - err := ValidateAgainstSingleSchema(values, chrt.Schema) - if err != nil { - sb.WriteString(fmt.Sprintf("%s:\n", chrt.Name())) - sb.WriteString(err.Error()) - } - } - slog.Debug("number of dependencies in the chart", "dependencies", len(chrt.Dependencies())) - // For each dependency, recursively call this function with the coalesced values - for _, subchart := range chrt.Dependencies() { - subchartValues := values[subchart.Name()].(map[string]interface{}) - if err := ValidateAgainstSchema(subchart, subchartValues); err != nil { - sb.WriteString(err.Error()) - } - } - - if sb.Len() > 0 { - return errors.New(sb.String()) - } - - return nil -} - -// ValidateAgainstSingleSchema checks that values does not violate the structure laid out in this schema -func ValidateAgainstSingleSchema(values Values, schemaJSON []byte) (reterr error) { - defer func() { - if r := recover(); r != nil { - reterr = fmt.Errorf("unable to validate schema: %s", r) - } - }() - - // This unmarshal function leverages UseNumber() for number precision. The parser - // used for values does this as well. - schema, err := jsonschema.UnmarshalJSON(bytes.NewReader(schemaJSON)) - if err != nil { - return err - } - slog.Debug("unmarshalled JSON schema", "schema", schemaJSON) - - compiler := jsonschema.NewCompiler() - err = compiler.AddResource("file:///values.schema.json", schema) - if err != nil { - return err - } - - validator, err := compiler.Compile("file:///values.schema.json") - if err != nil { - return err - } - - err = validator.Validate(values.AsMap()) - if err != nil { - return JSONSchemaValidationError{err} - } - - return nil -} - -// Note, JSONSchemaValidationError is used to wrap the error from the underlying -// validation package so that Helm has a clean interface and the validation package -// could be replaced without changing the Helm SDK API. - -// JSONSchemaValidationError is the error returned when there is a schema validation -// error. -type JSONSchemaValidationError struct { - embeddedErr error -} - -// Error prints the error message -func (e JSONSchemaValidationError) Error() string { - errStr := e.embeddedErr.Error() - - // This string prefixes all of our error details. Further up the stack of helm error message - // building more detail is provided to users. This is removed. - errStr = strings.TrimPrefix(errStr, "jsonschema validation failed with 'file:///values.schema.json#'\n") - - // The extra new line is needed for when there are sub-charts. - return errStr + "\n" -} diff --git a/internal/chart/v3/util/jsonschema_test.go b/internal/chart/v3/util/jsonschema_test.go deleted file mode 100644 index 0a3820377..000000000 --- a/internal/chart/v3/util/jsonschema_test.go +++ /dev/null @@ -1,247 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "os" - "testing" - - chart "helm.sh/helm/v4/internal/chart/v3" -) - -func TestValidateAgainstSingleSchema(t *testing.T) { - values, err := ReadValuesFile("./testdata/test-values.yaml") - if err != nil { - t.Fatalf("Error reading YAML file: %s", err) - } - schema, err := os.ReadFile("./testdata/test-values.schema.json") - if err != nil { - t.Fatalf("Error reading YAML file: %s", err) - } - - if err := ValidateAgainstSingleSchema(values, schema); err != nil { - t.Errorf("Error validating Values against Schema: %s", err) - } -} - -func TestValidateAgainstInvalidSingleSchema(t *testing.T) { - values, err := ReadValuesFile("./testdata/test-values.yaml") - if err != nil { - t.Fatalf("Error reading YAML file: %s", err) - } - schema, err := os.ReadFile("./testdata/test-values-invalid.schema.json") - if err != nil { - t.Fatalf("Error reading YAML file: %s", err) - } - - var errString string - if err := ValidateAgainstSingleSchema(values, schema); err == nil { - t.Fatalf("Expected an error, but got nil") - } else { - errString = err.Error() - } - - expectedErrString := `"file:///values.schema.json#" is not valid against metaschema: jsonschema validation failed with 'https://json-schema.org/draft/2020-12/schema#' -- at '': got number, want boolean or object` - if errString != expectedErrString { - t.Errorf("Error string :\n`%s`\ndoes not match expected\n`%s`", errString, expectedErrString) - } -} - -func TestValidateAgainstSingleSchemaNegative(t *testing.T) { - values, err := ReadValuesFile("./testdata/test-values-negative.yaml") - if err != nil { - t.Fatalf("Error reading YAML file: %s", err) - } - schema, err := os.ReadFile("./testdata/test-values.schema.json") - if err != nil { - t.Fatalf("Error reading JSON file: %s", err) - } - - var errString string - if err := ValidateAgainstSingleSchema(values, schema); err == nil { - t.Fatalf("Expected an error, but got nil") - } else { - errString = err.Error() - } - - expectedErrString := `- at '': missing property 'employmentInfo' -- at '/age': minimum: got -5, want 0 -` - if errString != expectedErrString { - t.Errorf("Error string :\n`%s`\ndoes not match expected\n`%s`", errString, expectedErrString) - } -} - -const subchartSchema = `{ - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "Values", - "type": "object", - "properties": { - "age": { - "description": "Age", - "minimum": 0, - "type": "integer" - } - }, - "required": [ - "age" - ] -} -` - -const subchartSchema2020 = `{ - "$schema": "https://json-schema.org/draft/2020-12/schema", - "title": "Values", - "type": "object", - "properties": { - "data": { - "type": "array", - "contains": { "type": "string" }, - "unevaluatedItems": { "type": "number" } - } - }, - "required": ["data"] -} -` - -func TestValidateAgainstSchema(t *testing.T) { - subchartJSON := []byte(subchartSchema) - subchart := &chart.Chart{ - Metadata: &chart.Metadata{ - Name: "subchart", - }, - Schema: subchartJSON, - } - chrt := &chart.Chart{ - Metadata: &chart.Metadata{ - Name: "chrt", - }, - } - chrt.AddDependency(subchart) - - vals := map[string]interface{}{ - "name": "John", - "subchart": map[string]interface{}{ - "age": 25, - }, - } - - if err := ValidateAgainstSchema(chrt, vals); err != nil { - t.Errorf("Error validating Values against Schema: %s", err) - } -} - -func TestValidateAgainstSchemaNegative(t *testing.T) { - subchartJSON := []byte(subchartSchema) - subchart := &chart.Chart{ - Metadata: &chart.Metadata{ - Name: "subchart", - }, - Schema: subchartJSON, - } - chrt := &chart.Chart{ - Metadata: &chart.Metadata{ - Name: "chrt", - }, - } - chrt.AddDependency(subchart) - - vals := map[string]interface{}{ - "name": "John", - "subchart": map[string]interface{}{}, - } - - var errString string - if err := ValidateAgainstSchema(chrt, vals); err == nil { - t.Fatalf("Expected an error, but got nil") - } else { - errString = err.Error() - } - - expectedErrString := `subchart: -- at '': missing property 'age' -` - if errString != expectedErrString { - t.Errorf("Error string :\n`%s`\ndoes not match expected\n`%s`", errString, expectedErrString) - } -} - -func TestValidateAgainstSchema2020(t *testing.T) { - subchartJSON := []byte(subchartSchema2020) - subchart := &chart.Chart{ - Metadata: &chart.Metadata{ - Name: "subchart", - }, - Schema: subchartJSON, - } - chrt := &chart.Chart{ - Metadata: &chart.Metadata{ - Name: "chrt", - }, - } - chrt.AddDependency(subchart) - - vals := map[string]interface{}{ - "name": "John", - "subchart": map[string]interface{}{ - "data": []any{"hello", 12}, - }, - } - - if err := ValidateAgainstSchema(chrt, vals); err != nil { - t.Errorf("Error validating Values against Schema: %s", err) - } -} - -func TestValidateAgainstSchema2020Negative(t *testing.T) { - subchartJSON := []byte(subchartSchema2020) - subchart := &chart.Chart{ - Metadata: &chart.Metadata{ - Name: "subchart", - }, - Schema: subchartJSON, - } - chrt := &chart.Chart{ - Metadata: &chart.Metadata{ - Name: "chrt", - }, - } - chrt.AddDependency(subchart) - - vals := map[string]interface{}{ - "name": "John", - "subchart": map[string]interface{}{ - "data": []any{12}, - }, - } - - var errString string - if err := ValidateAgainstSchema(chrt, vals); err == nil { - t.Fatalf("Expected an error, but got nil") - } else { - errString = err.Error() - } - - expectedErrString := `subchart: -- at '/data': no items match contains schema - - at '/data/0': got number, want string -` - if errString != expectedErrString { - t.Errorf("Error string :\n`%s`\ndoes not match expected\n`%s`", errString, expectedErrString) - } -} diff --git a/internal/chart/v3/util/save.go b/internal/chart/v3/util/save.go index 3125cc3c9..49d93bf40 100644 --- a/internal/chart/v3/util/save.go +++ b/internal/chart/v3/util/save.go @@ -30,6 +30,7 @@ import ( "sigs.k8s.io/yaml" chart "helm.sh/helm/v4/internal/chart/v3" + "helm.sh/helm/v4/pkg/chart/common" ) var headerBytes = []byte("+aHR0cHM6Ly95b3V0dS5iZS96OVV6MWljandyTQo=") @@ -76,7 +77,7 @@ func SaveDir(c *chart.Chart, dest string) error { } // Save templates and files - for _, o := range [][]*chart.File{c.Templates, c.Files} { + for _, o := range [][]*common.File{c.Templates, c.Files} { for _, f := range o { n := filepath.Join(outdir, f.Name) if err := writeFile(n, f.Data); err != nil { @@ -246,7 +247,7 @@ func validateName(name string) error { nname := filepath.Base(name) if nname != name { - return ErrInvalidChartName{name} + return common.ErrInvalidChartName{Name: name} } return nil diff --git a/internal/chart/v3/util/save_test.go b/internal/chart/v3/util/save_test.go index 852675bb0..9b1b14a4c 100644 --- a/internal/chart/v3/util/save_test.go +++ b/internal/chart/v3/util/save_test.go @@ -31,6 +31,7 @@ import ( chart "helm.sh/helm/v4/internal/chart/v3" "helm.sh/helm/v4/internal/chart/v3/loader" + "helm.sh/helm/v4/pkg/chart/common" ) func TestSave(t *testing.T) { @@ -47,7 +48,7 @@ func TestSave(t *testing.T) { Lock: &chart.Lock{ Digest: "testdigest", }, - Files: []*chart.File{ + Files: []*common.File{ {Name: "scheherazade/shahryar.txt", Data: []byte("1,001 Nights")}, }, Schema: []byte("{\n \"title\": \"Values\"\n}"), @@ -113,7 +114,7 @@ func TestSave(t *testing.T) { Lock: &chart.Lock{ Digest: "testdigest", }, - Files: []*chart.File{ + Files: []*common.File{ {Name: "scheherazade/shahryar.txt", Data: []byte("1,001 Nights")}, }, } @@ -153,7 +154,7 @@ func TestSavePreservesTimestamps(t *testing.T) { "imageName": "testimage", "imageId": 42, }, - Files: []*chart.File{ + Files: []*common.File{ {Name: "scheherazade/shahryar.txt", Data: []byte("1,001 Nights")}, }, Schema: []byte("{\n \"title\": \"Values\"\n}"), @@ -219,10 +220,10 @@ func TestSaveDir(t *testing.T) { Name: "ahab", Version: "1.2.3", }, - Files: []*chart.File{ + Files: []*common.File{ {Name: "scheherazade/shahryar.txt", Data: []byte("1,001 Nights")}, }, - Templates: []*chart.File{ + Templates: []*common.File{ {Name: path.Join(TemplatesDir, "nested", "dir", "thing.yaml"), Data: []byte("abc: {{ .Values.abc }}")}, }, } diff --git a/internal/chart/v3/util/values.go b/internal/chart/v3/util/values.go deleted file mode 100644 index 8e1a14b45..000000000 --- a/internal/chart/v3/util/values.go +++ /dev/null @@ -1,220 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "errors" - "fmt" - "io" - "os" - "strings" - - "sigs.k8s.io/yaml" - - chart "helm.sh/helm/v4/internal/chart/v3" -) - -// GlobalKey is the name of the Values key that is used for storing global vars. -const GlobalKey = "global" - -// Values represents a collection of chart values. -type Values map[string]interface{} - -// YAML encodes the Values into a YAML string. -func (v Values) YAML() (string, error) { - b, err := yaml.Marshal(v) - return string(b), err -} - -// Table gets a table (YAML subsection) from a Values object. -// -// The table is returned as a Values. -// -// Compound table names may be specified with dots: -// -// foo.bar -// -// The above will be evaluated as "The table bar inside the table -// foo". -// -// An ErrNoTable is returned if the table does not exist. -func (v Values) Table(name string) (Values, error) { - table := v - var err error - - for _, n := range parsePath(name) { - if table, err = tableLookup(table, n); err != nil { - break - } - } - return table, err -} - -// AsMap is a utility function for converting Values to a map[string]interface{}. -// -// It protects against nil map panics. -func (v Values) AsMap() map[string]interface{} { - if len(v) == 0 { - return map[string]interface{}{} - } - return v -} - -// Encode writes serialized Values information to the given io.Writer. -func (v Values) Encode(w io.Writer) error { - out, err := yaml.Marshal(v) - if err != nil { - return err - } - _, err = w.Write(out) - return err -} - -func tableLookup(v Values, simple string) (Values, error) { - v2, ok := v[simple] - if !ok { - return v, ErrNoTable{simple} - } - if vv, ok := v2.(map[string]interface{}); ok { - return vv, nil - } - - // This catches a case where a value is of type Values, but doesn't (for some - // reason) match the map[string]interface{}. This has been observed in the - // wild, and might be a result of a nil map of type Values. - if vv, ok := v2.(Values); ok { - return vv, nil - } - - return Values{}, ErrNoTable{simple} -} - -// ReadValues will parse YAML byte data into a Values. -func ReadValues(data []byte) (vals Values, err error) { - err = yaml.Unmarshal(data, &vals) - if len(vals) == 0 { - vals = Values{} - } - return vals, err -} - -// ReadValuesFile will parse a YAML file into a map of values. -func ReadValuesFile(filename string) (Values, error) { - data, err := os.ReadFile(filename) - if err != nil { - return map[string]interface{}{}, err - } - return ReadValues(data) -} - -// ReleaseOptions represents the additional release options needed -// for the composition of the final values struct -type ReleaseOptions struct { - Name string - Namespace string - Revision int - IsUpgrade bool - IsInstall bool -} - -// ToRenderValues composes the struct from the data coming from the Releases, Charts and Values files -// -// This takes both ReleaseOptions and Capabilities to merge into the render values. -func ToRenderValues(chrt *chart.Chart, chrtVals map[string]interface{}, options ReleaseOptions, caps *Capabilities) (Values, error) { - return ToRenderValuesWithSchemaValidation(chrt, chrtVals, options, caps, false) -} - -// ToRenderValuesWithSchemaValidation composes the struct from the data coming from the Releases, Charts and Values files -// -// This takes both ReleaseOptions and Capabilities to merge into the render values. -func ToRenderValuesWithSchemaValidation(chrt *chart.Chart, chrtVals map[string]interface{}, options ReleaseOptions, caps *Capabilities, skipSchemaValidation bool) (Values, error) { - if caps == nil { - caps = DefaultCapabilities - } - top := map[string]interface{}{ - "Chart": chrt.Metadata, - "Capabilities": caps, - "Release": map[string]interface{}{ - "Name": options.Name, - "Namespace": options.Namespace, - "IsUpgrade": options.IsUpgrade, - "IsInstall": options.IsInstall, - "Revision": options.Revision, - "Service": "Helm", - }, - } - - vals, err := CoalesceValues(chrt, chrtVals) - if err != nil { - return top, err - } - - if !skipSchemaValidation { - if err := ValidateAgainstSchema(chrt, vals); err != nil { - return top, fmt.Errorf("values don't meet the specifications of the schema(s) in the following chart(s):\n%w", err) - } - } - - top["Values"] = vals - return top, nil -} - -// istable is a special-purpose function to see if the present thing matches the definition of a YAML table. -func istable(v interface{}) bool { - _, ok := v.(map[string]interface{}) - return ok -} - -// PathValue takes a path that traverses a YAML structure and returns the value at the end of that path. -// The path starts at the root of the YAML structure and is comprised of YAML keys separated by periods. -// Given the following YAML data the value at path "chapter.one.title" is "Loomings". -// -// chapter: -// one: -// title: "Loomings" -func (v Values) PathValue(path string) (interface{}, error) { - if path == "" { - return nil, errors.New("YAML path cannot be empty") - } - return v.pathValue(parsePath(path)) -} - -func (v Values) pathValue(path []string) (interface{}, error) { - if len(path) == 1 { - // if exists must be root key not table - if _, ok := v[path[0]]; ok && !istable(v[path[0]]) { - return v[path[0]], nil - } - return nil, ErrNoValue{path[0]} - } - - key, path := path[len(path)-1], path[:len(path)-1] - // get our table for table path - t, err := v.Table(joinPath(path...)) - if err != nil { - return nil, ErrNoValue{key} - } - // check table for key and ensure value is not a table - if k, ok := t[key]; ok && !istable(k) { - return k, nil - } - return nil, ErrNoValue{key} -} - -func parsePath(key string) []string { return strings.Split(key, ".") } - -func joinPath(path ...string) string { return strings.Join(path, ".") } diff --git a/internal/chart/v3/util/values_test.go b/internal/chart/v3/util/values_test.go deleted file mode 100644 index 34c664581..000000000 --- a/internal/chart/v3/util/values_test.go +++ /dev/null @@ -1,293 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "bytes" - "fmt" - "testing" - "text/template" - - chart "helm.sh/helm/v4/internal/chart/v3" -) - -func TestReadValues(t *testing.T) { - doc := `# Test YAML parse -poet: "Coleridge" -title: "Rime of the Ancient Mariner" -stanza: - - "at" - - "length" - - "did" - - cross - - an - - Albatross - -mariner: - with: "crossbow" - shot: "ALBATROSS" - -water: - water: - where: "everywhere" - nor: "any drop to drink" -` - - data, err := ReadValues([]byte(doc)) - if err != nil { - t.Fatalf("Error parsing bytes: %s", err) - } - matchValues(t, data) - - tests := []string{`poet: "Coleridge"`, "# Just a comment", ""} - - for _, tt := range tests { - data, err = ReadValues([]byte(tt)) - if err != nil { - t.Fatalf("Error parsing bytes (%s): %s", tt, err) - } - if data == nil { - t.Errorf(`YAML string "%s" gave a nil map`, tt) - } - } -} - -func TestToRenderValues(t *testing.T) { - - chartValues := map[string]interface{}{ - "name": "al Rashid", - "where": map[string]interface{}{ - "city": "Basrah", - "title": "caliph", - }, - } - - overrideValues := map[string]interface{}{ - "name": "Haroun", - "where": map[string]interface{}{ - "city": "Baghdad", - "date": "809 CE", - }, - } - - c := &chart.Chart{ - Metadata: &chart.Metadata{Name: "test"}, - Templates: []*chart.File{}, - Values: chartValues, - Files: []*chart.File{ - {Name: "scheherazade/shahryar.txt", Data: []byte("1,001 Nights")}, - }, - } - c.AddDependency(&chart.Chart{ - Metadata: &chart.Metadata{Name: "where"}, - }) - - o := ReleaseOptions{ - Name: "Seven Voyages", - Namespace: "default", - Revision: 1, - IsInstall: true, - } - - res, err := ToRenderValuesWithSchemaValidation(c, overrideValues, o, nil, false) - if err != nil { - t.Fatal(err) - } - - // Ensure that the top-level values are all set. - if name := res["Chart"].(*chart.Metadata).Name; name != "test" { - t.Errorf("Expected chart name 'test', got %q", name) - } - relmap := res["Release"].(map[string]interface{}) - if name := relmap["Name"]; name.(string) != "Seven Voyages" { - t.Errorf("Expected release name 'Seven Voyages', got %q", name) - } - if namespace := relmap["Namespace"]; namespace.(string) != "default" { - t.Errorf("Expected namespace 'default', got %q", namespace) - } - if revision := relmap["Revision"]; revision.(int) != 1 { - t.Errorf("Expected revision '1', got %d", revision) - } - if relmap["IsUpgrade"].(bool) { - t.Error("Expected upgrade to be false.") - } - if !relmap["IsInstall"].(bool) { - t.Errorf("Expected install to be true.") - } - if !res["Capabilities"].(*Capabilities).APIVersions.Has("v1") { - t.Error("Expected Capabilities to have v1 as an API") - } - if res["Capabilities"].(*Capabilities).KubeVersion.Major != "1" { - t.Error("Expected Capabilities to have a Kube version") - } - - vals := res["Values"].(Values) - if vals["name"] != "Haroun" { - t.Errorf("Expected 'Haroun', got %q (%v)", vals["name"], vals) - } - where := vals["where"].(map[string]interface{}) - expects := map[string]string{ - "city": "Baghdad", - "date": "809 CE", - "title": "caliph", - } - for field, expect := range expects { - if got := where[field]; got != expect { - t.Errorf("Expected %q, got %q (%v)", expect, got, where) - } - } -} - -func TestReadValuesFile(t *testing.T) { - data, err := ReadValuesFile("./testdata/coleridge.yaml") - if err != nil { - t.Fatalf("Error reading YAML file: %s", err) - } - matchValues(t, data) -} - -func ExampleValues() { - doc := ` -title: "Moby Dick" -chapter: - one: - title: "Loomings" - two: - title: "The Carpet-Bag" - three: - title: "The Spouter Inn" -` - d, err := ReadValues([]byte(doc)) - if err != nil { - panic(err) - } - ch1, err := d.Table("chapter.one") - if err != nil { - panic("could not find chapter one") - } - fmt.Print(ch1["title"]) - // Output: - // Loomings -} - -func TestTable(t *testing.T) { - doc := ` -title: "Moby Dick" -chapter: - one: - title: "Loomings" - two: - title: "The Carpet-Bag" - three: - title: "The Spouter Inn" -` - d, err := ReadValues([]byte(doc)) - if err != nil { - t.Fatalf("Failed to parse the White Whale: %s", err) - } - - if _, err := d.Table("title"); err == nil { - t.Fatalf("Title is not a table.") - } - - if _, err := d.Table("chapter"); err != nil { - t.Fatalf("Failed to get the chapter table: %s\n%v", err, d) - } - - if v, err := d.Table("chapter.one"); err != nil { - t.Errorf("Failed to get chapter.one: %s", err) - } else if v["title"] != "Loomings" { - t.Errorf("Unexpected title: %s", v["title"]) - } - - if _, err := d.Table("chapter.three"); err != nil { - t.Errorf("Chapter three is missing: %s\n%v", err, d) - } - - if _, err := d.Table("chapter.OneHundredThirtySix"); err == nil { - t.Errorf("I think you mean 'Epilogue'") - } -} - -func matchValues(t *testing.T, data map[string]interface{}) { - t.Helper() - if data["poet"] != "Coleridge" { - t.Errorf("Unexpected poet: %s", data["poet"]) - } - - if o, err := ttpl("{{len .stanza}}", data); err != nil { - t.Errorf("len stanza: %s", err) - } else if o != "6" { - t.Errorf("Expected 6, got %s", o) - } - - if o, err := ttpl("{{.mariner.shot}}", data); err != nil { - t.Errorf(".mariner.shot: %s", err) - } else if o != "ALBATROSS" { - t.Errorf("Expected that mariner shot ALBATROSS") - } - - if o, err := ttpl("{{.water.water.where}}", data); err != nil { - t.Errorf(".water.water.where: %s", err) - } else if o != "everywhere" { - t.Errorf("Expected water water everywhere") - } -} - -func ttpl(tpl string, v map[string]interface{}) (string, error) { - var b bytes.Buffer - tt := template.Must(template.New("t").Parse(tpl)) - err := tt.Execute(&b, v) - return b.String(), err -} - -func TestPathValue(t *testing.T) { - doc := ` -title: "Moby Dick" -chapter: - one: - title: "Loomings" - two: - title: "The Carpet-Bag" - three: - title: "The Spouter Inn" -` - d, err := ReadValues([]byte(doc)) - if err != nil { - t.Fatalf("Failed to parse the White Whale: %s", err) - } - - if v, err := d.PathValue("chapter.one.title"); err != nil { - t.Errorf("Got error instead of title: %s\n%v", err, d) - } else if v != "Loomings" { - t.Errorf("No error but got wrong value for title: %s\n%v", err, d) - } - if _, err := d.PathValue("chapter.one.doesnotexist"); err == nil { - t.Errorf("Non-existent key should return error: %s\n%v", err, d) - } - if _, err := d.PathValue("chapter.doesnotexist.one"); err == nil { - t.Errorf("Non-existent key in middle of path should return error: %s\n%v", err, d) - } - if _, err := d.PathValue(""); err == nil { - t.Error("Asking for the value from an empty path should yield an error") - } - if v, err := d.PathValue("title"); err == nil { - if v != "Moby Dick" { - t.Errorf("Failed to return values for root key title") - } - } -} diff --git a/pkg/action/action.go b/pkg/action/action.go index 522226a1a..bcf6ca8ef 100644 --- a/pkg/action/action.go +++ b/pkg/action/action.go @@ -39,6 +39,7 @@ import ( "sigs.k8s.io/kustomize/kyaml/kio" kyaml "sigs.k8s.io/kustomize/kyaml/yaml" + "helm.sh/helm/v4/pkg/chart/common" chart "helm.sh/helm/v4/pkg/chart/v2" chartutil "helm.sh/helm/v4/pkg/chart/v2/util" "helm.sh/helm/v4/pkg/engine" @@ -84,7 +85,7 @@ type Configuration struct { RegistryClient *registry.Client // Capabilities describes the capabilities of the Kubernetes cluster. - Capabilities *chartutil.Capabilities + Capabilities *common.Capabilities // CustomTemplateFuncs is defined by users to provide custom template funcs CustomTemplateFuncs template.FuncMap @@ -176,7 +177,7 @@ func splitAndDeannotate(postrendered string) (map[string]string, error) { // TODO: As part of the refactor the duplicate code in cmd/helm/template.go should be removed // // This code has to do with writing files to disk. -func (cfg *Configuration) renderResources(ch *chart.Chart, values chartutil.Values, releaseName, outputDir string, subNotes, useReleaseName, includeCrds bool, pr postrenderer.PostRenderer, interactWithRemote, enableDNS, hideSecret bool) ([]*release.Hook, *bytes.Buffer, string, error) { +func (cfg *Configuration) renderResources(ch *chart.Chart, values common.Values, releaseName, outputDir string, subNotes, useReleaseName, includeCrds bool, pr postrenderer.PostRenderer, interactWithRemote, enableDNS, hideSecret bool) ([]*release.Hook, *bytes.Buffer, string, error) { var hs []*release.Hook b := bytes.NewBuffer(nil) @@ -337,7 +338,7 @@ type RESTClientGetter interface { } // capabilities builds a Capabilities from discovery information. -func (cfg *Configuration) getCapabilities() (*chartutil.Capabilities, error) { +func (cfg *Configuration) getCapabilities() (*common.Capabilities, error) { if cfg.Capabilities != nil { return cfg.Capabilities, nil } @@ -366,14 +367,14 @@ func (cfg *Configuration) getCapabilities() (*chartutil.Capabilities, error) { } } - cfg.Capabilities = &chartutil.Capabilities{ + cfg.Capabilities = &common.Capabilities{ APIVersions: apiVersions, - KubeVersion: chartutil.KubeVersion{ + KubeVersion: common.KubeVersion{ Version: kubeVersion.GitVersion, Major: kubeVersion.Major, Minor: kubeVersion.Minor, }, - HelmVersion: chartutil.DefaultCapabilities.HelmVersion, + HelmVersion: common.DefaultCapabilities.HelmVersion, } return cfg.Capabilities, nil } @@ -409,10 +410,10 @@ func (cfg *Configuration) releaseContent(name string, version int) (*release.Rel } // GetVersionSet retrieves a set of available k8s API versions -func GetVersionSet(client discovery.ServerResourcesInterface) (chartutil.VersionSet, error) { +func GetVersionSet(client discovery.ServerResourcesInterface) (common.VersionSet, error) { groups, resources, err := client.ServerGroupsAndResources() if err != nil && !discovery.IsGroupDiscoveryFailedError(err) { - return chartutil.DefaultVersionSet, fmt.Errorf("could not get apiVersions from Kubernetes: %w", err) + return common.DefaultVersionSet, fmt.Errorf("could not get apiVersions from Kubernetes: %w", err) } // FIXME: The Kubernetes test fixture for cli appears to always return nil @@ -420,7 +421,7 @@ func GetVersionSet(client discovery.ServerResourcesInterface) (chartutil.Version // return the default API list. This is also a safe value to return in any // other odd-ball case. if len(groups) == 0 && len(resources) == 0 { - return chartutil.DefaultVersionSet, nil + return common.DefaultVersionSet, nil } versionMap := make(map[string]interface{}) @@ -453,7 +454,7 @@ func GetVersionSet(client discovery.ServerResourcesInterface) (chartutil.Version versions = append(versions, k) } - return chartutil.VersionSet(versions), nil + return common.VersionSet(versions), nil } // recordRelease with an update operation in case reuse has been set. diff --git a/pkg/action/action_test.go b/pkg/action/action_test.go index 7a510ace6..b65e40024 100644 --- a/pkg/action/action_test.go +++ b/pkg/action/action_test.go @@ -30,8 +30,8 @@ import ( fakeclientset "k8s.io/client-go/kubernetes/fake" "helm.sh/helm/v4/internal/logging" + "helm.sh/helm/v4/pkg/chart/common" chart "helm.sh/helm/v4/pkg/chart/v2" - chartutil "helm.sh/helm/v4/pkg/chart/v2/util" "helm.sh/helm/v4/pkg/kube" kubefake "helm.sh/helm/v4/pkg/kube/fake" "helm.sh/helm/v4/pkg/registry" @@ -64,7 +64,7 @@ func actionConfigFixtureWithDummyResources(t *testing.T, dummyResources kube.Res return &Configuration{ Releases: storage.Init(driver.NewMemory()), KubeClient: &kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: dummyResources}, - Capabilities: chartutil.DefaultCapabilities, + Capabilities: common.DefaultCapabilities, RegistryClient: registryClient, } } @@ -122,14 +122,14 @@ type chartOptions struct { type chartOption func(*chartOptions) func buildChart(opts ...chartOption) *chart.Chart { - defaultTemplates := []*chart.File{ + defaultTemplates := []*common.File{ {Name: "templates/hello", Data: []byte("hello: world")}, {Name: "templates/hooks", Data: []byte(manifestWithHook)}, } return buildChartWithTemplates(defaultTemplates, opts...) } -func buildChartWithTemplates(templates []*chart.File, opts ...chartOption) *chart.Chart { +func buildChartWithTemplates(templates []*common.File, opts ...chartOption) *chart.Chart { c := &chartOptions{ Chart: &chart.Chart{ // TODO: This should be more complete. @@ -179,7 +179,7 @@ func withValues(values map[string]interface{}) chartOption { func withNotes(notes string) chartOption { return func(opts *chartOptions) { - opts.Templates = append(opts.Templates, &chart.File{ + opts.Templates = append(opts.Templates, &common.File{ Name: "templates/NOTES.txt", Data: []byte(notes), }) @@ -200,7 +200,7 @@ func withMetadataDependency(dependency chart.Dependency) chartOption { func withSampleTemplates() chartOption { return func(opts *chartOptions) { - sampleTemplates := []*chart.File{ + sampleTemplates := []*common.File{ // This adds basic templates and partials. {Name: "templates/goodbye", Data: []byte("goodbye: world")}, {Name: "templates/empty", Data: []byte("")}, @@ -213,14 +213,14 @@ func withSampleTemplates() chartOption { func withSampleSecret() chartOption { return func(opts *chartOptions) { - sampleSecret := &chart.File{Name: "templates/secret.yaml", Data: []byte("apiVersion: v1\nkind: Secret\n")} + sampleSecret := &common.File{Name: "templates/secret.yaml", Data: []byte("apiVersion: v1\nkind: Secret\n")} opts.Templates = append(opts.Templates, sampleSecret) } } func withSampleIncludingIncorrectTemplates() chartOption { return func(opts *chartOptions) { - sampleTemplates := []*chart.File{ + sampleTemplates := []*common.File{ // This adds basic templates and partials. {Name: "templates/goodbye", Data: []byte("goodbye: world")}, {Name: "templates/empty", Data: []byte("")}, @@ -234,7 +234,7 @@ func withSampleIncludingIncorrectTemplates() chartOption { func withMultipleManifestTemplate() chartOption { return func(opts *chartOptions) { - sampleTemplates := []*chart.File{ + sampleTemplates := []*common.File{ {Name: "templates/rbac", Data: []byte(rbacManifests)}, } opts.Templates = append(opts.Templates, sampleTemplates...) @@ -851,7 +851,7 @@ func TestRenderResources_PostRenderer_MergeError(t *testing.T) { Name: "test-chart", Version: "0.1.0", }, - Templates: []*chart.File{ + Templates: []*common.File{ {Name: "templates/invalid", Data: []byte("invalid: yaml: content:")}, }, } diff --git a/pkg/action/get_values.go b/pkg/action/get_values.go index 18b8b4838..a0b5d92c1 100644 --- a/pkg/action/get_values.go +++ b/pkg/action/get_values.go @@ -16,9 +16,7 @@ limitations under the License. package action -import ( - chartutil "helm.sh/helm/v4/pkg/chart/v2/util" -) +import "helm.sh/helm/v4/pkg/chart/common/util" // GetValues is the action for checking a given release's values. // @@ -50,7 +48,7 @@ func (g *GetValues) Run(name string) (map[string]interface{}, error) { // If the user wants all values, compute the values and return. if g.AllValues { - cfg, err := chartutil.CoalesceValues(rel.Chart, rel.Config) + cfg, err := util.CoalesceValues(rel.Chart, rel.Config) if err != nil { return nil, err } diff --git a/pkg/action/hooks_test.go b/pkg/action/hooks_test.go index e3a2c0808..091155bc2 100644 --- a/pkg/action/hooks_test.go +++ b/pkg/action/hooks_test.go @@ -29,8 +29,7 @@ import ( "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/cli-runtime/pkg/resource" - chart "helm.sh/helm/v4/pkg/chart/v2" - chartutil "helm.sh/helm/v4/pkg/chart/v2/util" + "helm.sh/helm/v4/pkg/chart/common" "helm.sh/helm/v4/pkg/kube" kubefake "helm.sh/helm/v4/pkg/kube/fake" release "helm.sh/helm/v4/pkg/release/v1" @@ -178,7 +177,7 @@ func runInstallForHooksWithSuccess(t *testing.T, manifest, expectedNamespace str outBuffer := &bytes.Buffer{} instAction.cfg.KubeClient = &kubefake.PrintingKubeClient{Out: io.Discard, LogOutput: outBuffer} - templates := []*chart.File{ + templates := []*common.File{ {Name: "templates/hello", Data: []byte("hello: world")}, {Name: "templates/hooks", Data: []byte(manifest)}, } @@ -205,7 +204,7 @@ func runInstallForHooksWithFailure(t *testing.T, manifest, expectedNamespace str outBuffer := &bytes.Buffer{} failingClient.PrintingKubeClient = kubefake.PrintingKubeClient{Out: io.Discard, LogOutput: outBuffer} - templates := []*chart.File{ + templates := []*common.File{ {Name: "templates/hello", Data: []byte("hello: world")}, {Name: "templates/hooks", Data: []byte(manifest)}, } @@ -382,7 +381,7 @@ data: configuration := &Configuration{ Releases: storage.Init(driver.NewMemory()), KubeClient: kubeClient, - Capabilities: chartutil.DefaultCapabilities, + Capabilities: common.DefaultCapabilities, } serverSideApply := true diff --git a/pkg/action/install.go b/pkg/action/install.go index b2330d551..0fe3ebc4b 100644 --- a/pkg/action/install.go +++ b/pkg/action/install.go @@ -41,6 +41,8 @@ import ( "k8s.io/cli-runtime/pkg/resource" "sigs.k8s.io/yaml" + "helm.sh/helm/v4/pkg/chart/common" + "helm.sh/helm/v4/pkg/chart/common/util" chart "helm.sh/helm/v4/pkg/chart/v2" chartutil "helm.sh/helm/v4/pkg/chart/v2/util" "helm.sh/helm/v4/pkg/cli" @@ -113,8 +115,8 @@ type Install struct { // KubeVersion allows specifying a custom kubernetes version to use and // APIVersions allows a manual set of supported API Versions to be passed // (for things like templating). These are ignored if ClientOnly is false - KubeVersion *chartutil.KubeVersion - APIVersions chartutil.VersionSet + KubeVersion *common.KubeVersion + APIVersions common.VersionSet // Used by helm template to render charts with .Release.IsUpgrade. Ignored if Dry-Run is false IsUpgrade bool // Enable DNS lookups when rendering templates @@ -292,7 +294,7 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma if i.ClientOnly { // Add mock objects in here so it doesn't use Kube API server // NOTE(bacongobbler): used for `helm template` - i.cfg.Capabilities = chartutil.DefaultCapabilities.Copy() + i.cfg.Capabilities = common.DefaultCapabilities.Copy() if i.KubeVersion != nil { i.cfg.Capabilities.KubeVersion = *i.KubeVersion } @@ -319,14 +321,14 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma // special case for helm template --is-upgrade isUpgrade := i.IsUpgrade && i.isDryRun() - options := chartutil.ReleaseOptions{ + options := common.ReleaseOptions{ Name: i.ReleaseName, Namespace: i.Namespace, Revision: 1, IsInstall: !isUpgrade, IsUpgrade: isUpgrade, } - valuesToRender, err := chartutil.ToRenderValuesWithSchemaValidation(chrt, vals, options, caps, i.SkipSchemaValidation) + valuesToRender, err := util.ToRenderValuesWithSchemaValidation(chrt, vals, options, caps, i.SkipSchemaValidation) if err != nil { return nil, err } diff --git a/pkg/action/install_test.go b/pkg/action/install_test.go index f567b3df4..92bb64b4d 100644 --- a/pkg/action/install_test.go +++ b/pkg/action/install_test.go @@ -45,8 +45,7 @@ import ( "k8s.io/client-go/rest/fake" "helm.sh/helm/v4/internal/test" - chart "helm.sh/helm/v4/pkg/chart/v2" - chartutil "helm.sh/helm/v4/pkg/chart/v2/util" + "helm.sh/helm/v4/pkg/chart/common" "helm.sh/helm/v4/pkg/kube" kubefake "helm.sh/helm/v4/pkg/kube/fake" release "helm.sh/helm/v4/pkg/release/v1" @@ -258,7 +257,7 @@ func TestInstallReleaseClientOnly(t *testing.T) { instAction.ClientOnly = true instAction.Run(buildChart(), nil) // disregard output - is.Equal(instAction.cfg.Capabilities, chartutil.DefaultCapabilities) + is.Equal(instAction.cfg.Capabilities, common.DefaultCapabilities) is.Equal(instAction.cfg.KubeClient, &kubefake.PrintingKubeClient{Out: io.Discard}) } @@ -429,7 +428,7 @@ func TestInstallRelease_DryRun_Lookup(t *testing.T) { vals := map[string]interface{}{} mockChart := buildChart(withSampleTemplates()) - mockChart.Templates = append(mockChart.Templates, &chart.File{ + mockChart.Templates = append(mockChart.Templates, &common.File{ Name: "templates/lookup", Data: []byte(`goodbye: {{ lookup "v1" "Namespace" "" "___" }}`), }) diff --git a/pkg/action/lint.go b/pkg/action/lint.go index 7b3c00ad2..208fd4637 100644 --- a/pkg/action/lint.go +++ b/pkg/action/lint.go @@ -22,9 +22,10 @@ import ( "path/filepath" "strings" + "helm.sh/helm/v4/pkg/chart/common" + "helm.sh/helm/v4/pkg/chart/v2/lint" + "helm.sh/helm/v4/pkg/chart/v2/lint/support" chartutil "helm.sh/helm/v4/pkg/chart/v2/util" - "helm.sh/helm/v4/pkg/lint" - "helm.sh/helm/v4/pkg/lint/support" ) // Lint is the action for checking that the semantics of a chart are well-formed. @@ -36,7 +37,7 @@ type Lint struct { WithSubcharts bool Quiet bool SkipSchemaValidation bool - KubeVersion *chartutil.KubeVersion + KubeVersion *common.KubeVersion } // LintResult is the result of Lint @@ -86,7 +87,7 @@ func HasWarningsOrErrors(result *LintResult) bool { return len(result.Errors) > 0 } -func lintChart(path string, vals map[string]interface{}, namespace string, kubeVersion *chartutil.KubeVersion, skipSchemaValidation bool) (support.Linter, error) { +func lintChart(path string, vals map[string]interface{}, namespace string, kubeVersion *common.KubeVersion, skipSchemaValidation bool) (support.Linter, error) { var chartPath string linter := support.Linter{} diff --git a/pkg/action/show.go b/pkg/action/show.go index 6d6e10d24..4195d69a5 100644 --- a/pkg/action/show.go +++ b/pkg/action/show.go @@ -24,6 +24,7 @@ import ( "k8s.io/cli-runtime/pkg/printers" "sigs.k8s.io/yaml" + "helm.sh/helm/v4/pkg/chart/common" chart "helm.sh/helm/v4/pkg/chart/v2" "helm.sh/helm/v4/pkg/chart/v2/loader" chartutil "helm.sh/helm/v4/pkg/chart/v2/util" @@ -140,7 +141,7 @@ func (s *Show) Run(chartpath string) (string, error) { return out.String(), nil } -func findReadme(files []*chart.File) (file *chart.File) { +func findReadme(files []*common.File) (file *common.File) { for _, file := range files { for _, n := range readmeFileNames { if file == nil { diff --git a/pkg/action/show_test.go b/pkg/action/show_test.go index 67eba2338..faf306f2a 100644 --- a/pkg/action/show_test.go +++ b/pkg/action/show_test.go @@ -19,6 +19,7 @@ package action import ( "testing" + "helm.sh/helm/v4/pkg/chart/common" chart "helm.sh/helm/v4/pkg/chart/v2" ) @@ -27,14 +28,14 @@ func TestShow(t *testing.T) { client := NewShow(ShowAll, config) client.chart = &chart.Chart{ Metadata: &chart.Metadata{Name: "alpine"}, - Files: []*chart.File{ + Files: []*common.File{ {Name: "README.md", Data: []byte("README\n")}, {Name: "crds/ignoreme.txt", Data: []byte("error")}, {Name: "crds/foo.yaml", Data: []byte("---\nfoo\n")}, {Name: "crds/bar.json", Data: []byte("---\nbar\n")}, {Name: "crds/baz.yaml", Data: []byte("baz\n")}, }, - Raw: []*chart.File{ + Raw: []*common.File{ {Name: "values.yaml", Data: []byte("VALUES\n")}, }, Values: map[string]interface{}{}, @@ -105,7 +106,7 @@ func TestShowCRDs(t *testing.T) { client := NewShow(ShowCRDs, config) client.chart = &chart.Chart{ Metadata: &chart.Metadata{Name: "alpine"}, - Files: []*chart.File{ + Files: []*common.File{ {Name: "crds/ignoreme.txt", Data: []byte("error")}, {Name: "crds/foo.yaml", Data: []byte("---\nfoo\n")}, {Name: "crds/bar.json", Data: []byte("---\nbar\n")}, @@ -138,7 +139,7 @@ func TestShowNoReadme(t *testing.T) { client := NewShow(ShowAll, config) client.chart = &chart.Chart{ Metadata: &chart.Metadata{Name: "alpine"}, - Files: []*chart.File{ + Files: []*common.File{ {Name: "crds/ignoreme.txt", Data: []byte("error")}, {Name: "crds/foo.yaml", Data: []byte("---\nfoo\n")}, {Name: "crds/bar.json", Data: []byte("---\nbar\n")}, diff --git a/pkg/action/upgrade.go b/pkg/action/upgrade.go index c00a59079..3688adf0e 100644 --- a/pkg/action/upgrade.go +++ b/pkg/action/upgrade.go @@ -28,6 +28,8 @@ import ( "k8s.io/cli-runtime/pkg/resource" + "helm.sh/helm/v4/pkg/chart/common" + "helm.sh/helm/v4/pkg/chart/common/util" chart "helm.sh/helm/v4/pkg/chart/v2" chartutil "helm.sh/helm/v4/pkg/chart/v2/util" "helm.sh/helm/v4/pkg/kube" @@ -260,7 +262,7 @@ func (u *Upgrade) prepareUpgrade(name string, chart *chart.Chart, vals map[strin // the release object. revision := lastRelease.Version + 1 - options := chartutil.ReleaseOptions{ + options := common.ReleaseOptions{ Name: name, Namespace: currentRelease.Namespace, Revision: revision, @@ -271,7 +273,7 @@ func (u *Upgrade) prepareUpgrade(name string, chart *chart.Chart, vals map[strin if err != nil { return nil, nil, false, err } - valuesToRender, err := chartutil.ToRenderValuesWithSchemaValidation(chart, vals, options, caps, u.SkipSchemaValidation) + valuesToRender, err := util.ToRenderValuesWithSchemaValidation(chart, vals, options, caps, u.SkipSchemaValidation) if err != nil { return nil, nil, false, err } @@ -588,12 +590,12 @@ func (u *Upgrade) reuseValues(chart *chart.Chart, current *release.Release, newV slog.Debug("reusing the old release's values") // We have to regenerate the old coalesced values: - oldVals, err := chartutil.CoalesceValues(current.Chart, current.Config) + oldVals, err := util.CoalesceValues(current.Chart, current.Config) if err != nil { return nil, fmt.Errorf("failed to rebuild old values: %w", err) } - newVals = chartutil.CoalesceTables(newVals, current.Config) + newVals = util.CoalesceTables(newVals, current.Config) chart.Values = oldVals @@ -604,7 +606,7 @@ func (u *Upgrade) reuseValues(chart *chart.Chart, current *release.Release, newV if u.ResetThenReuseValues { slog.Debug("merging values from old release to new values") - newVals = chartutil.CoalesceTables(newVals, current.Config) + newVals = util.CoalesceTables(newVals, current.Config) return newVals, nil } diff --git a/pkg/chart/common.go b/pkg/chart/common.go new file mode 100644 index 000000000..8b1dd58c3 --- /dev/null +++ b/pkg/chart/common.go @@ -0,0 +1,219 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chart + +import ( + "errors" + "fmt" + "log/slog" + "reflect" + "strings" + + v3chart "helm.sh/helm/v4/internal/chart/v3" + common "helm.sh/helm/v4/pkg/chart/common" + v2chart "helm.sh/helm/v4/pkg/chart/v2" +) + +var NewAccessor func(chrt Charter) (Accessor, error) = NewDefaultAccessor //nolint:revive + +func NewDefaultAccessor(chrt Charter) (Accessor, error) { + switch v := chrt.(type) { + case v2chart.Chart: + return &v2Accessor{&v}, nil + case *v2chart.Chart: + return &v2Accessor{v}, nil + case v3chart.Chart: + return &v3Accessor{&v}, nil + case *v3chart.Chart: + return &v3Accessor{v}, nil + default: + return nil, errors.New("unsupported chart type") + } +} + +type v2Accessor struct { + chrt *v2chart.Chart +} + +func (r *v2Accessor) Name() string { + return r.chrt.Metadata.Name +} + +func (r *v2Accessor) IsRoot() bool { + return r.chrt.IsRoot() +} + +func (r *v2Accessor) MetadataAsMap() map[string]interface{} { + var ret map[string]interface{} + if r.chrt.Metadata == nil { + return ret + } + + ret, err := structToMap(r.chrt.Metadata) + if err != nil { + slog.Error("error converting metadata to map", "error", err) + } + return ret +} + +func (r *v2Accessor) Files() []*common.File { + return r.chrt.Files +} + +func (r *v2Accessor) Templates() []*common.File { + return r.chrt.Templates +} + +func (r *v2Accessor) ChartFullPath() string { + return r.chrt.ChartFullPath() +} + +func (r *v2Accessor) IsLibraryChart() bool { + return strings.EqualFold(r.chrt.Metadata.Type, "library") +} + +func (r *v2Accessor) Dependencies() []Charter { + var deps = make([]Charter, len(r.chrt.Dependencies())) + for i, c := range r.chrt.Dependencies() { + deps[i] = c + } + return deps +} + +func (r *v2Accessor) Values() map[string]interface{} { + return r.chrt.Values +} + +func (r *v2Accessor) Schema() []byte { + return r.chrt.Schema +} + +type v3Accessor struct { + chrt *v3chart.Chart +} + +func (r *v3Accessor) Name() string { + return r.chrt.Metadata.Name +} + +func (r *v3Accessor) IsRoot() bool { + return r.chrt.IsRoot() +} + +func (r *v3Accessor) MetadataAsMap() map[string]interface{} { + var ret map[string]interface{} + if r.chrt.Metadata == nil { + return ret + } + + ret, err := structToMap(r.chrt.Metadata) + if err != nil { + slog.Error("error converting metadata to map", "error", err) + } + return ret +} + +func (r *v3Accessor) Files() []*common.File { + return r.chrt.Files +} + +func (r *v3Accessor) Templates() []*common.File { + return r.chrt.Templates +} + +func (r *v3Accessor) ChartFullPath() string { + return r.chrt.ChartFullPath() +} + +func (r *v3Accessor) IsLibraryChart() bool { + return strings.EqualFold(r.chrt.Metadata.Type, "library") +} + +func (r *v3Accessor) Dependencies() []Charter { + var deps = make([]Charter, len(r.chrt.Dependencies())) + for i, c := range r.chrt.Dependencies() { + deps[i] = c + } + return deps +} + +func (r *v3Accessor) Values() map[string]interface{} { + return r.chrt.Values +} + +func (r *v3Accessor) Schema() []byte { + return r.chrt.Schema +} + +func structToMap(obj interface{}) (map[string]interface{}, error) { + objValue := reflect.ValueOf(obj) + + // If the value is a pointer, dereference it + if objValue.Kind() == reflect.Ptr { + objValue = objValue.Elem() + } + + // Check if the input is a struct + if objValue.Kind() != reflect.Struct { + return nil, fmt.Errorf("input must be a struct or a pointer to a struct") + } + + result := make(map[string]interface{}) + objType := objValue.Type() + + for i := 0; i < objValue.NumField(); i++ { + field := objType.Field(i) + value := objValue.Field(i) + + switch value.Kind() { + case reflect.Struct: + nestedMap, err := structToMap(value.Interface()) + if err != nil { + return nil, err + } + result[field.Name] = nestedMap + case reflect.Ptr: + // Recurse for pointers by dereferencing + if value.IsNil() { + result[field.Name] = nil + } else { + nestedMap, err := structToMap(value.Interface()) + if err != nil { + return nil, err + } + result[field.Name] = nestedMap + } + case reflect.Slice: + sliceOfMaps := make([]interface{}, value.Len()) + for j := 0; j < value.Len(); j++ { + sliceElement := value.Index(j) + if sliceElement.Kind() == reflect.Struct || sliceElement.Kind() == reflect.Ptr { + nestedMap, err := structToMap(sliceElement.Interface()) + if err != nil { + return nil, err + } + sliceOfMaps[j] = nestedMap + } else { + sliceOfMaps[j] = sliceElement.Interface() + } + } + result[field.Name] = sliceOfMaps + default: + result[field.Name] = value.Interface() + } + } + return result, nil +} diff --git a/pkg/chart/v2/util/capabilities.go b/pkg/chart/common/capabilities.go similarity index 99% rename from pkg/chart/v2/util/capabilities.go rename to pkg/chart/common/capabilities.go index 19d62c5e3..355c3978a 100644 --- a/pkg/chart/v2/util/capabilities.go +++ b/pkg/chart/common/capabilities.go @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util +package common import ( "fmt" diff --git a/pkg/chart/v2/util/capabilities_test.go b/pkg/chart/common/capabilities_test.go similarity index 99% rename from pkg/chart/v2/util/capabilities_test.go rename to pkg/chart/common/capabilities_test.go index e5513b3fd..bf32b1f3f 100644 --- a/pkg/chart/v2/util/capabilities_test.go +++ b/pkg/chart/common/capabilities_test.go @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util +package common import ( "testing" diff --git a/pkg/chart/v2/util/errors.go b/pkg/chart/common/errors.go similarity index 98% rename from pkg/chart/v2/util/errors.go rename to pkg/chart/common/errors.go index a175b9758..b0a2d650e 100644 --- a/pkg/chart/v2/util/errors.go +++ b/pkg/chart/common/errors.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util +package common import ( "fmt" diff --git a/pkg/chart/v2/util/errors_test.go b/pkg/chart/common/errors_test.go similarity index 98% rename from pkg/chart/v2/util/errors_test.go rename to pkg/chart/common/errors_test.go index b8ae86384..06b3b054c 100644 --- a/pkg/chart/v2/util/errors_test.go +++ b/pkg/chart/common/errors_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util +package common import ( "testing" diff --git a/internal/chart/v3/file.go b/pkg/chart/common/file.go similarity index 98% rename from internal/chart/v3/file.go rename to pkg/chart/common/file.go index ba04e106d..304643f1a 100644 --- a/internal/chart/v3/file.go +++ b/pkg/chart/common/file.go @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v3 +package common // File represents a file as a name/value pair. // diff --git a/pkg/chart/v2/util/testdata/coleridge.yaml b/pkg/chart/common/testdata/coleridge.yaml similarity index 100% rename from pkg/chart/v2/util/testdata/coleridge.yaml rename to pkg/chart/common/testdata/coleridge.yaml diff --git a/pkg/chart/v2/util/coalesce.go b/pkg/chart/common/util/coalesce.go similarity index 81% rename from pkg/chart/v2/util/coalesce.go rename to pkg/chart/common/util/coalesce.go index a3e0f5ae8..5bfa1c608 100644 --- a/pkg/chart/v2/util/coalesce.go +++ b/pkg/chart/common/util/coalesce.go @@ -23,7 +23,8 @@ import ( "github.com/mitchellh/copystructure" - chart "helm.sh/helm/v4/pkg/chart/v2" + chart "helm.sh/helm/v4/pkg/chart" + "helm.sh/helm/v4/pkg/chart/common" ) func concatPrefix(a, b string) string { @@ -42,7 +43,7 @@ func concatPrefix(a, b string) string { // - Scalar values and arrays are replaced, maps are merged // - A chart has access to all of the variables for it, as well as all of // the values destined for its dependencies. -func CoalesceValues(chrt *chart.Chart, vals map[string]interface{}) (Values, error) { +func CoalesceValues(chrt chart.Charter, vals map[string]interface{}) (common.Values, error) { valsCopy, err := copyValues(vals) if err != nil { return vals, err @@ -64,7 +65,7 @@ func CoalesceValues(chrt *chart.Chart, vals map[string]interface{}) (Values, err // Retaining Nils is useful when processes early in a Helm action or business // logic need to retain them for when Coalescing will happen again later in the // business logic. -func MergeValues(chrt *chart.Chart, vals map[string]interface{}) (Values, error) { +func MergeValues(chrt chart.Charter, vals map[string]interface{}) (common.Values, error) { valsCopy, err := copyValues(vals) if err != nil { return vals, err @@ -72,7 +73,7 @@ func MergeValues(chrt *chart.Chart, vals map[string]interface{}) (Values, error) return coalesce(log.Printf, chrt, valsCopy, "", true) } -func copyValues(vals map[string]interface{}) (Values, error) { +func copyValues(vals map[string]interface{}) (common.Values, error) { v, err := copystructure.Copy(vals) if err != nil { return vals, err @@ -96,28 +97,36 @@ type printFn func(format string, v ...interface{}) // Note, the merge argument specifies whether this is being used by MergeValues // or CoalesceValues. Coalescing removes null values and their keys in some // situations while merging keeps the null values. -func coalesce(printf printFn, ch *chart.Chart, dest map[string]interface{}, prefix string, merge bool) (map[string]interface{}, error) { +func coalesce(printf printFn, ch chart.Charter, dest map[string]interface{}, prefix string, merge bool) (map[string]interface{}, error) { coalesceValues(printf, ch, dest, prefix, merge) return coalesceDeps(printf, ch, dest, prefix, merge) } // coalesceDeps coalesces the dependencies of the given chart. -func coalesceDeps(printf printFn, chrt *chart.Chart, dest map[string]interface{}, prefix string, merge bool) (map[string]interface{}, error) { - for _, subchart := range chrt.Dependencies() { - if c, ok := dest[subchart.Name()]; !ok { +func coalesceDeps(printf printFn, chrt chart.Charter, dest map[string]interface{}, prefix string, merge bool) (map[string]interface{}, error) { + ch, err := chart.NewAccessor(chrt) + if err != nil { + return dest, err + } + for _, subchart := range ch.Dependencies() { + sub, err := chart.NewAccessor(subchart) + if err != nil { + return dest, err + } + if c, ok := dest[sub.Name()]; !ok { // If dest doesn't already have the key, create it. - dest[subchart.Name()] = make(map[string]interface{}) + dest[sub.Name()] = make(map[string]interface{}) } else if !istable(c) { - return dest, fmt.Errorf("type mismatch on %s: %t", subchart.Name(), c) + return dest, fmt.Errorf("type mismatch on %s: %t", sub.Name(), c) } - if dv, ok := dest[subchart.Name()]; ok { + if dv, ok := dest[sub.Name()]; ok { dvmap := dv.(map[string]interface{}) - subPrefix := concatPrefix(prefix, chrt.Metadata.Name) + subPrefix := concatPrefix(prefix, ch.Name()) // Get globals out of dest and merge them into dvmap. coalesceGlobals(printf, dvmap, dest, subPrefix, merge) // Now coalesce the rest of the values. var err error - dest[subchart.Name()], err = coalesce(printf, subchart, dvmap, subPrefix, merge) + dest[sub.Name()], err = coalesce(printf, subchart, dvmap, subPrefix, merge) if err != nil { return dest, err } @@ -132,17 +141,17 @@ func coalesceDeps(printf printFn, chrt *chart.Chart, dest map[string]interface{} func coalesceGlobals(printf printFn, dest, src map[string]interface{}, prefix string, _ bool) { var dg, sg map[string]interface{} - if destglob, ok := dest[GlobalKey]; !ok { + if destglob, ok := dest[common.GlobalKey]; !ok { dg = make(map[string]interface{}) } else if dg, ok = destglob.(map[string]interface{}); !ok { - printf("warning: skipping globals because destination %s is not a table.", GlobalKey) + printf("warning: skipping globals because destination %s is not a table.", common.GlobalKey) return } - if srcglob, ok := src[GlobalKey]; !ok { + if srcglob, ok := src[common.GlobalKey]; !ok { sg = make(map[string]interface{}) } else if sg, ok = srcglob.(map[string]interface{}); !ok { - printf("warning: skipping globals because source %s is not a table.", GlobalKey) + printf("warning: skipping globals because source %s is not a table.", common.GlobalKey) return } @@ -178,7 +187,7 @@ func coalesceGlobals(printf printFn, dest, src map[string]interface{}, prefix st dg[key] = val } } - dest[GlobalKey] = dg + dest[common.GlobalKey] = dg } func copyMap(src map[string]interface{}) map[string]interface{} { @@ -190,13 +199,18 @@ func copyMap(src map[string]interface{}) map[string]interface{} { // coalesceValues builds up a values map for a particular chart. // // Values in v will override the values in the chart. -func coalesceValues(printf printFn, c *chart.Chart, v map[string]interface{}, prefix string, merge bool) { - subPrefix := concatPrefix(prefix, c.Metadata.Name) +func coalesceValues(printf printFn, c chart.Charter, v map[string]interface{}, prefix string, merge bool) { + ch, err := chart.NewAccessor(c) + if err != nil { + return + } + + subPrefix := concatPrefix(prefix, ch.Name()) // Using c.Values directly when coalescing a table can cause problems where // the original c.Values is altered. Creating a deep copy stops the problem. // This section is fault-tolerant as there is no ability to return an error. - valuesCopy, err := copystructure.Copy(c.Values) + valuesCopy, err := copystructure.Copy(ch.Values()) var vc map[string]interface{} var ok bool if err != nil { @@ -205,7 +219,7 @@ func coalesceValues(printf printFn, c *chart.Chart, v map[string]interface{}, pr // wrong with c.Values. In this case we will use c.Values and report // an error. printf("warning: unable to copy values, err: %s", err) - vc = c.Values + vc = ch.Values() } else { vc, ok = valuesCopy.(map[string]interface{}) if !ok { @@ -213,7 +227,7 @@ func coalesceValues(printf printFn, c *chart.Chart, v map[string]interface{}, pr // it cannot be treated as map[string]interface{} there is something // strangely wrong. Log it and use c.Values printf("warning: unable to convert values copy to values type") - vc = c.Values + vc = ch.Values() } } @@ -250,9 +264,17 @@ func coalesceValues(printf printFn, c *chart.Chart, v map[string]interface{}, pr } } -func childChartMergeTrue(chrt *chart.Chart, key string, merge bool) bool { - for _, subchart := range chrt.Dependencies() { - if subchart.Name() == key { +func childChartMergeTrue(chrt chart.Charter, key string, merge bool) bool { + ch, err := chart.NewAccessor(chrt) + if err != nil { + return merge + } + for _, subchart := range ch.Dependencies() { + sub, err := chart.NewAccessor(subchart) + if err != nil { + return merge + } + if sub.Name() == key { return true } } @@ -306,3 +328,9 @@ func coalesceTablesFullKey(printf printFn, dst, src map[string]interface{}, pref } return dst } + +// istable is a special-purpose function to see if the present thing matches the definition of a YAML table. +func istable(v interface{}) bool { + _, ok := v.(map[string]interface{}) + return ok +} diff --git a/pkg/chart/v2/util/coalesce_test.go b/pkg/chart/common/util/coalesce_test.go similarity index 97% rename from pkg/chart/v2/util/coalesce_test.go rename to pkg/chart/common/util/coalesce_test.go index e2c45a435..871bfa8da 100644 --- a/pkg/chart/v2/util/coalesce_test.go +++ b/pkg/chart/common/util/coalesce_test.go @@ -17,13 +17,16 @@ limitations under the License. package util import ( + "bytes" "encoding/json" "fmt" "maps" "testing" + "text/template" "github.com/stretchr/testify/assert" + "helm.sh/helm/v4/pkg/chart/common" chart "helm.sh/helm/v4/pkg/chart/v2" ) @@ -136,7 +139,7 @@ func TestCoalesceValues(t *testing.T) { }, ) - vals, err := ReadValues(testCoalesceValuesYaml) + vals, err := common.ReadValues(testCoalesceValuesYaml) if err != nil { t.Fatal(err) } @@ -144,7 +147,7 @@ func TestCoalesceValues(t *testing.T) { // taking a copy of the values before passing it // to CoalesceValues as argument, so that we can // use it for asserting later - valsCopy := make(Values, len(vals)) + valsCopy := make(common.Values, len(vals)) maps.Copy(valsCopy, vals) v, err := CoalesceValues(c, vals) @@ -238,6 +241,13 @@ func TestCoalesceValues(t *testing.T) { is.Equal(valsCopy, vals) } +func ttpl(tpl string, v map[string]interface{}) (string, error) { + var b bytes.Buffer + tt := template.Must(template.New("t").Parse(tpl)) + err := tt.Execute(&b, v) + return b.String(), err +} + func TestMergeValues(t *testing.T) { is := assert.New(t) @@ -294,7 +304,7 @@ func TestMergeValues(t *testing.T) { }, ) - vals, err := ReadValues(testCoalesceValuesYaml) + vals, err := common.ReadValues(testCoalesceValuesYaml) if err != nil { t.Fatal(err) } @@ -302,7 +312,7 @@ func TestMergeValues(t *testing.T) { // taking a copy of the values before passing it // to MergeValues as argument, so that we can // use it for asserting later - valsCopy := make(Values, len(vals)) + valsCopy := make(common.Values, len(vals)) maps.Copy(valsCopy, vals) v, err := MergeValues(c, vals) diff --git a/pkg/chart/v2/util/jsonschema.go b/pkg/chart/common/util/jsonschema.go similarity index 89% rename from pkg/chart/v2/util/jsonschema.go rename to pkg/chart/common/util/jsonschema.go index 72e133363..acd2ca100 100644 --- a/pkg/chart/v2/util/jsonschema.go +++ b/pkg/chart/common/util/jsonschema.go @@ -30,7 +30,8 @@ import ( "helm.sh/helm/v4/internal/version" - chart "helm.sh/helm/v4/pkg/chart/v2" + chart "helm.sh/helm/v4/pkg/chart" + "helm.sh/helm/v4/pkg/chart/common" ) // HTTPURLLoader implements a loader for HTTP/HTTPS URLs @@ -71,11 +72,15 @@ func newHTTPURLLoader() *HTTPURLLoader { } // ValidateAgainstSchema checks that values does not violate the structure laid out in schema -func ValidateAgainstSchema(chrt *chart.Chart, values map[string]interface{}) error { +func ValidateAgainstSchema(ch chart.Charter, values map[string]interface{}) error { + chrt, err := chart.NewAccessor(ch) + if err != nil { + return err + } var sb strings.Builder - if chrt.Schema != nil { + if chrt.Schema() != nil { slog.Debug("chart name", "chart-name", chrt.Name()) - err := ValidateAgainstSingleSchema(values, chrt.Schema) + err := ValidateAgainstSingleSchema(values, chrt.Schema()) if err != nil { sb.WriteString(fmt.Sprintf("%s:\n", chrt.Name())) sb.WriteString(err.Error()) @@ -84,7 +89,11 @@ func ValidateAgainstSchema(chrt *chart.Chart, values map[string]interface{}) err slog.Debug("number of dependencies in the chart", "dependencies", len(chrt.Dependencies())) // For each dependency, recursively call this function with the coalesced values for _, subchart := range chrt.Dependencies() { - subchartValues := values[subchart.Name()].(map[string]interface{}) + sub, err := chart.NewAccessor(subchart) + if err != nil { + return err + } + subchartValues := values[sub.Name()].(map[string]interface{}) if err := ValidateAgainstSchema(subchart, subchartValues); err != nil { sb.WriteString(err.Error()) } @@ -98,7 +107,7 @@ func ValidateAgainstSchema(chrt *chart.Chart, values map[string]interface{}) err } // ValidateAgainstSingleSchema checks that values does not violate the structure laid out in this schema -func ValidateAgainstSingleSchema(values Values, schemaJSON []byte) (reterr error) { +func ValidateAgainstSingleSchema(values common.Values, schemaJSON []byte) (reterr error) { defer func() { if r := recover(); r != nil { reterr = fmt.Errorf("unable to validate schema: %s", r) diff --git a/pkg/chart/v2/util/jsonschema_test.go b/pkg/chart/common/util/jsonschema_test.go similarity index 96% rename from pkg/chart/v2/util/jsonschema_test.go rename to pkg/chart/common/util/jsonschema_test.go index cd95b7faf..b34f9d514 100644 --- a/pkg/chart/v2/util/jsonschema_test.go +++ b/pkg/chart/common/util/jsonschema_test.go @@ -23,11 +23,12 @@ import ( "strings" "testing" + "helm.sh/helm/v4/pkg/chart/common" chart "helm.sh/helm/v4/pkg/chart/v2" ) func TestValidateAgainstSingleSchema(t *testing.T) { - values, err := ReadValuesFile("./testdata/test-values.yaml") + values, err := common.ReadValuesFile("./testdata/test-values.yaml") if err != nil { t.Fatalf("Error reading YAML file: %s", err) } @@ -42,7 +43,7 @@ func TestValidateAgainstSingleSchema(t *testing.T) { } func TestValidateAgainstInvalidSingleSchema(t *testing.T) { - values, err := ReadValuesFile("./testdata/test-values.yaml") + values, err := common.ReadValuesFile("./testdata/test-values.yaml") if err != nil { t.Fatalf("Error reading YAML file: %s", err) } @@ -66,7 +67,7 @@ func TestValidateAgainstInvalidSingleSchema(t *testing.T) { } func TestValidateAgainstSingleSchemaNegative(t *testing.T) { - values, err := ReadValuesFile("./testdata/test-values-negative.yaml") + values, err := common.ReadValuesFile("./testdata/test-values-negative.yaml") if err != nil { t.Fatalf("Error reading YAML file: %s", err) } diff --git a/pkg/chart/v2/util/testdata/test-values-invalid.schema.json b/pkg/chart/common/util/testdata/test-values-invalid.schema.json similarity index 100% rename from pkg/chart/v2/util/testdata/test-values-invalid.schema.json rename to pkg/chart/common/util/testdata/test-values-invalid.schema.json diff --git a/pkg/chart/v2/util/testdata/test-values-negative.yaml b/pkg/chart/common/util/testdata/test-values-negative.yaml similarity index 100% rename from pkg/chart/v2/util/testdata/test-values-negative.yaml rename to pkg/chart/common/util/testdata/test-values-negative.yaml diff --git a/pkg/chart/v2/util/testdata/test-values.schema.json b/pkg/chart/common/util/testdata/test-values.schema.json similarity index 100% rename from pkg/chart/v2/util/testdata/test-values.schema.json rename to pkg/chart/common/util/testdata/test-values.schema.json diff --git a/pkg/chart/v2/util/testdata/test-values.yaml b/pkg/chart/common/util/testdata/test-values.yaml similarity index 100% rename from pkg/chart/v2/util/testdata/test-values.yaml rename to pkg/chart/common/util/testdata/test-values.yaml diff --git a/pkg/chart/common/util/values.go b/pkg/chart/common/util/values.go new file mode 100644 index 000000000..85cb29012 --- /dev/null +++ b/pkg/chart/common/util/values.go @@ -0,0 +1,70 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + + "helm.sh/helm/v4/pkg/chart" + "helm.sh/helm/v4/pkg/chart/common" +) + +// ToRenderValues composes the struct from the data coming from the Releases, Charts and Values files +// +// This takes both ReleaseOptions and Capabilities to merge into the render values. +func ToRenderValues(chrt chart.Charter, chrtVals map[string]interface{}, options common.ReleaseOptions, caps *common.Capabilities) (common.Values, error) { + return ToRenderValuesWithSchemaValidation(chrt, chrtVals, options, caps, false) +} + +// ToRenderValuesWithSchemaValidation composes the struct from the data coming from the Releases, Charts and Values files +// +// This takes both ReleaseOptions and Capabilities to merge into the render values. +func ToRenderValuesWithSchemaValidation(chrt chart.Charter, chrtVals map[string]interface{}, options common.ReleaseOptions, caps *common.Capabilities, skipSchemaValidation bool) (common.Values, error) { + if caps == nil { + caps = common.DefaultCapabilities + } + accessor, err := chart.NewAccessor(chrt) + if err != nil { + return nil, err + } + top := map[string]interface{}{ + "Chart": accessor.MetadataAsMap(), + "Capabilities": caps, + "Release": map[string]interface{}{ + "Name": options.Name, + "Namespace": options.Namespace, + "IsUpgrade": options.IsUpgrade, + "IsInstall": options.IsInstall, + "Revision": options.Revision, + "Service": "Helm", + }, + } + + vals, err := CoalesceValues(chrt, chrtVals) + if err != nil { + return common.Values(top), err + } + + if !skipSchemaValidation { + if err := ValidateAgainstSchema(chrt, vals); err != nil { + return top, fmt.Errorf("values don't meet the specifications of the schema(s) in the following chart(s):\n%w", err) + } + } + + top["Values"] = vals + return top, nil +} diff --git a/pkg/chart/common/util/values_test.go b/pkg/chart/common/util/values_test.go new file mode 100644 index 000000000..5fc030567 --- /dev/null +++ b/pkg/chart/common/util/values_test.go @@ -0,0 +1,111 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "testing" + + "helm.sh/helm/v4/pkg/chart/common" + chart "helm.sh/helm/v4/pkg/chart/v2" +) + +func TestToRenderValues(t *testing.T) { + + chartValues := map[string]interface{}{ + "name": "al Rashid", + "where": map[string]interface{}{ + "city": "Basrah", + "title": "caliph", + }, + } + + overrideValues := map[string]interface{}{ + "name": "Haroun", + "where": map[string]interface{}{ + "city": "Baghdad", + "date": "809 CE", + }, + } + + c := &chart.Chart{ + Metadata: &chart.Metadata{Name: "test"}, + Templates: []*common.File{}, + Values: chartValues, + Files: []*common.File{ + {Name: "scheherazade/shahryar.txt", Data: []byte("1,001 Nights")}, + }, + } + c.AddDependency(&chart.Chart{ + Metadata: &chart.Metadata{Name: "where"}, + }) + + o := common.ReleaseOptions{ + Name: "Seven Voyages", + Namespace: "default", + Revision: 1, + IsInstall: true, + } + + res, err := ToRenderValuesWithSchemaValidation(c, overrideValues, o, nil, false) + if err != nil { + t.Fatal(err) + } + + // Ensure that the top-level values are all set. + metamap := res["Chart"].(map[string]interface{}) + if name := metamap["Name"]; name.(string) != "test" { + t.Errorf("Expected chart name 'test', got %q", name) + } + relmap := res["Release"].(map[string]interface{}) + if name := relmap["Name"]; name.(string) != "Seven Voyages" { + t.Errorf("Expected release name 'Seven Voyages', got %q", name) + } + if namespace := relmap["Namespace"]; namespace.(string) != "default" { + t.Errorf("Expected namespace 'default', got %q", namespace) + } + if revision := relmap["Revision"]; revision.(int) != 1 { + t.Errorf("Expected revision '1', got %d", revision) + } + if relmap["IsUpgrade"].(bool) { + t.Error("Expected upgrade to be false.") + } + if !relmap["IsInstall"].(bool) { + t.Errorf("Expected install to be true.") + } + if !res["Capabilities"].(*common.Capabilities).APIVersions.Has("v1") { + t.Error("Expected Capabilities to have v1 as an API") + } + if res["Capabilities"].(*common.Capabilities).KubeVersion.Major != "1" { + t.Error("Expected Capabilities to have a Kube version") + } + + vals := res["Values"].(common.Values) + if vals["name"] != "Haroun" { + t.Errorf("Expected 'Haroun', got %q (%v)", vals["name"], vals) + } + where := vals["where"].(map[string]interface{}) + expects := map[string]string{ + "city": "Baghdad", + "date": "809 CE", + "title": "caliph", + } + for field, expect := range expects { + if got := where[field]; got != expect { + t.Errorf("Expected %q, got %q (%v)", expect, got, where) + } + } +} diff --git a/pkg/chart/v2/util/values.go b/pkg/chart/common/values.go similarity index 74% rename from pkg/chart/v2/util/values.go rename to pkg/chart/common/values.go index 6850e8b9b..94958a779 100644 --- a/pkg/chart/v2/util/values.go +++ b/pkg/chart/common/values.go @@ -14,18 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util +package common import ( "errors" - "fmt" "io" "os" "strings" "sigs.k8s.io/yaml" - - chart "helm.sh/helm/v4/pkg/chart/v2" ) // GlobalKey is the name of the Values key that is used for storing global vars. @@ -131,48 +128,6 @@ type ReleaseOptions struct { IsInstall bool } -// ToRenderValues composes the struct from the data coming from the Releases, Charts and Values files -// -// This takes both ReleaseOptions and Capabilities to merge into the render values. -func ToRenderValues(chrt *chart.Chart, chrtVals map[string]interface{}, options ReleaseOptions, caps *Capabilities) (Values, error) { - return ToRenderValuesWithSchemaValidation(chrt, chrtVals, options, caps, false) -} - -// ToRenderValuesWithSchemaValidation composes the struct from the data coming from the Releases, Charts and Values files -// -// This takes both ReleaseOptions and Capabilities to merge into the render values. -func ToRenderValuesWithSchemaValidation(chrt *chart.Chart, chrtVals map[string]interface{}, options ReleaseOptions, caps *Capabilities, skipSchemaValidation bool) (Values, error) { - if caps == nil { - caps = DefaultCapabilities - } - top := map[string]interface{}{ - "Chart": chrt.Metadata, - "Capabilities": caps, - "Release": map[string]interface{}{ - "Name": options.Name, - "Namespace": options.Namespace, - "IsUpgrade": options.IsUpgrade, - "IsInstall": options.IsInstall, - "Revision": options.Revision, - "Service": "Helm", - }, - } - - vals, err := CoalesceValues(chrt, chrtVals) - if err != nil { - return top, err - } - - if !skipSchemaValidation { - if err := ValidateAgainstSchema(chrt, vals); err != nil { - return top, fmt.Errorf("values don't meet the specifications of the schema(s) in the following chart(s):\n%w", err) - } - } - - top["Values"] = vals - return top, nil -} - // istable is a special-purpose function to see if the present thing matches the definition of a YAML table. func istable(v interface{}) bool { _, ok := v.(map[string]interface{}) diff --git a/pkg/chart/v2/util/values_test.go b/pkg/chart/common/values_test.go similarity index 66% rename from pkg/chart/v2/util/values_test.go rename to pkg/chart/common/values_test.go index 1a25fafb8..3cceeb2b5 100644 --- a/pkg/chart/v2/util/values_test.go +++ b/pkg/chart/common/values_test.go @@ -14,15 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util +package common import ( "bytes" "fmt" "testing" "text/template" - - chart "helm.sh/helm/v4/pkg/chart/v2" ) func TestReadValues(t *testing.T) { @@ -66,92 +64,6 @@ water: } } -func TestToRenderValues(t *testing.T) { - - chartValues := map[string]interface{}{ - "name": "al Rashid", - "where": map[string]interface{}{ - "city": "Basrah", - "title": "caliph", - }, - } - - overrideValues := map[string]interface{}{ - "name": "Haroun", - "where": map[string]interface{}{ - "city": "Baghdad", - "date": "809 CE", - }, - } - - c := &chart.Chart{ - Metadata: &chart.Metadata{Name: "test"}, - Templates: []*chart.File{}, - Values: chartValues, - Files: []*chart.File{ - {Name: "scheherazade/shahryar.txt", Data: []byte("1,001 Nights")}, - }, - } - c.AddDependency(&chart.Chart{ - Metadata: &chart.Metadata{Name: "where"}, - }) - - o := ReleaseOptions{ - Name: "Seven Voyages", - Namespace: "default", - Revision: 1, - IsInstall: true, - } - - res, err := ToRenderValuesWithSchemaValidation(c, overrideValues, o, nil, false) - if err != nil { - t.Fatal(err) - } - - // Ensure that the top-level values are all set. - if name := res["Chart"].(*chart.Metadata).Name; name != "test" { - t.Errorf("Expected chart name 'test', got %q", name) - } - relmap := res["Release"].(map[string]interface{}) - if name := relmap["Name"]; name.(string) != "Seven Voyages" { - t.Errorf("Expected release name 'Seven Voyages', got %q", name) - } - if namespace := relmap["Namespace"]; namespace.(string) != "default" { - t.Errorf("Expected namespace 'default', got %q", namespace) - } - if revision := relmap["Revision"]; revision.(int) != 1 { - t.Errorf("Expected revision '1', got %d", revision) - } - if relmap["IsUpgrade"].(bool) { - t.Error("Expected upgrade to be false.") - } - if !relmap["IsInstall"].(bool) { - t.Errorf("Expected install to be true.") - } - if !res["Capabilities"].(*Capabilities).APIVersions.Has("v1") { - t.Error("Expected Capabilities to have v1 as an API") - } - if res["Capabilities"].(*Capabilities).KubeVersion.Major != "1" { - t.Error("Expected Capabilities to have a Kube version") - } - - vals := res["Values"].(Values) - if vals["name"] != "Haroun" { - t.Errorf("Expected 'Haroun', got %q (%v)", vals["name"], vals) - } - where := vals["where"].(map[string]interface{}) - expects := map[string]string{ - "city": "Baghdad", - "date": "809 CE", - "title": "caliph", - } - for field, expect := range expects { - if got := where[field]; got != expect { - t.Errorf("Expected %q, got %q (%v)", expect, got, where) - } - } -} - func TestReadValuesFile(t *testing.T) { data, err := ReadValuesFile("./testdata/coleridge.yaml") if err != nil { diff --git a/pkg/chart/v2/file.go b/pkg/chart/interfaces.go similarity index 60% rename from pkg/chart/v2/file.go rename to pkg/chart/interfaces.go index a2eeb0fcd..e87dd2c08 100644 --- a/pkg/chart/v2/file.go +++ b/pkg/chart/interfaces.go @@ -13,15 +13,23 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v2 +package chart -// File represents a file as a name/value pair. -// -// By convention, name is a relative path within the scope of the chart's -// base directory. -type File struct { - // Name is the path-like name of the template. - Name string `json:"name"` - // Data is the template as byte data. - Data []byte `json:"data"` +import ( + common "helm.sh/helm/v4/pkg/chart/common" +) + +type Charter interface{} + +type Accessor interface { + Name() string + IsRoot() bool + MetadataAsMap() map[string]interface{} + Files() []*common.File + Templates() []*common.File + ChartFullPath() string + IsLibraryChart() bool + Dependencies() []Charter + Values() map[string]interface{} + Schema() []byte } diff --git a/pkg/chart/v2/chart.go b/pkg/chart/v2/chart.go index 66ddf98a5..f59bcd8b3 100644 --- a/pkg/chart/v2/chart.go +++ b/pkg/chart/v2/chart.go @@ -19,6 +19,8 @@ import ( "path/filepath" "regexp" "strings" + + "helm.sh/helm/v4/pkg/chart/common" ) // APIVersionV1 is the API version number for version 1. @@ -37,20 +39,20 @@ type Chart struct { // // This should not be used except in special cases like `helm show values`, // where we want to display the raw values, comments and all. - Raw []*File `json:"-"` + Raw []*common.File `json:"-"` // Metadata is the contents of the Chartfile. Metadata *Metadata `json:"metadata"` // Lock is the contents of Chart.lock. Lock *Lock `json:"lock"` // Templates for this chart. - Templates []*File `json:"templates"` + Templates []*common.File `json:"templates"` // Values are default config for this chart. Values map[string]interface{} `json:"values"` // Schema is an optional JSON schema for imposing structure on Values Schema []byte `json:"schema"` // Files are miscellaneous files in a chart archive, // e.g. README, LICENSE, etc. - Files []*File `json:"files"` + Files []*common.File `json:"files"` parent *Chart dependencies []*Chart @@ -62,7 +64,7 @@ type CRD struct { // Filename is the File obj Name including (sub-)chart.ChartFullPath Filename string // File is the File obj for the crd - File *File + File *common.File } // SetDependencies replaces the chart dependencies. @@ -137,8 +139,8 @@ func (ch *Chart) AppVersion() string { // CRDs returns a list of File objects in the 'crds/' directory of a Helm chart. // Deprecated: use CRDObjects() -func (ch *Chart) CRDs() []*File { - files := []*File{} +func (ch *Chart) CRDs() []*common.File { + files := []*common.File{} // Find all resources in the crds/ directory for _, f := range ch.Files { if strings.HasPrefix(f.Name, "crds/") && hasManifestExtension(f.Name) { diff --git a/pkg/chart/v2/chart_test.go b/pkg/chart/v2/chart_test.go index d6311085b..a96d8c0c0 100644 --- a/pkg/chart/v2/chart_test.go +++ b/pkg/chart/v2/chart_test.go @@ -20,11 +20,13 @@ import ( "testing" "github.com/stretchr/testify/assert" + + "helm.sh/helm/v4/pkg/chart/common" ) func TestCRDs(t *testing.T) { chrt := Chart{ - Files: []*File{ + Files: []*common.File{ { Name: "crds/foo.yaml", Data: []byte("hello"), @@ -57,7 +59,7 @@ func TestCRDs(t *testing.T) { func TestSaveChartNoRawData(t *testing.T) { chrt := Chart{ - Raw: []*File{ + Raw: []*common.File{ { Name: "fhqwhgads.yaml", Data: []byte("Everybody to the Limit"), @@ -76,7 +78,7 @@ func TestSaveChartNoRawData(t *testing.T) { t.Fatal(err) } - is.Equal([]*File(nil), res.Raw) + is.Equal([]*common.File(nil), res.Raw) } func TestMetadata(t *testing.T) { @@ -162,7 +164,7 @@ func TestChartFullPath(t *testing.T) { func TestCRDObjects(t *testing.T) { chrt := Chart{ - Files: []*File{ + Files: []*common.File{ { Name: "crds/foo.yaml", Data: []byte("hello"), @@ -190,7 +192,7 @@ func TestCRDObjects(t *testing.T) { { Name: "crds/foo.yaml", Filename: "crds/foo.yaml", - File: &File{ + File: &common.File{ Name: "crds/foo.yaml", Data: []byte("hello"), }, @@ -198,7 +200,7 @@ func TestCRDObjects(t *testing.T) { { Name: "crds/foo/bar/baz.yaml", Filename: "crds/foo/bar/baz.yaml", - File: &File{ + File: &common.File{ Name: "crds/foo/bar/baz.yaml", Data: []byte("hello"), }, diff --git a/pkg/lint/lint.go b/pkg/chart/v2/lint/lint.go similarity index 83% rename from pkg/lint/lint.go rename to pkg/chart/v2/lint/lint.go index 64b2a6057..773c9bc5e 100644 --- a/pkg/lint/lint.go +++ b/pkg/chart/v2/lint/lint.go @@ -14,24 +14,24 @@ See the License for the specific language governing permissions and limitations under the License. */ -package lint // import "helm.sh/helm/v4/pkg/lint" +package lint // import "helm.sh/helm/v4/pkg/chart/v2/lint" import ( "path/filepath" - chartutil "helm.sh/helm/v4/pkg/chart/v2/util" - "helm.sh/helm/v4/pkg/lint/rules" - "helm.sh/helm/v4/pkg/lint/support" + "helm.sh/helm/v4/pkg/chart/common" + "helm.sh/helm/v4/pkg/chart/v2/lint/rules" + "helm.sh/helm/v4/pkg/chart/v2/lint/support" ) type linterOptions struct { - KubeVersion *chartutil.KubeVersion + KubeVersion *common.KubeVersion SkipSchemaValidation bool } type LinterOption func(lo *linterOptions) -func WithKubeVersion(kubeVersion *chartutil.KubeVersion) LinterOption { +func WithKubeVersion(kubeVersion *common.KubeVersion) LinterOption { return func(lo *linterOptions) { lo.KubeVersion = kubeVersion } diff --git a/pkg/lint/lint_test.go b/pkg/chart/v2/lint/lint_test.go similarity index 99% rename from pkg/lint/lint_test.go rename to pkg/chart/v2/lint/lint_test.go index 5b590c010..3c777e2bb 100644 --- a/pkg/lint/lint_test.go +++ b/pkg/chart/v2/lint/lint_test.go @@ -23,8 +23,8 @@ import ( "github.com/stretchr/testify/assert" + "helm.sh/helm/v4/pkg/chart/v2/lint/support" chartutil "helm.sh/helm/v4/pkg/chart/v2/util" - "helm.sh/helm/v4/pkg/lint/support" ) var values map[string]interface{} diff --git a/pkg/lint/rules/chartfile.go b/pkg/chart/v2/lint/rules/chartfile.go similarity index 98% rename from pkg/lint/rules/chartfile.go rename to pkg/chart/v2/lint/rules/chartfile.go index 103c28374..185f524a4 100644 --- a/pkg/lint/rules/chartfile.go +++ b/pkg/chart/v2/lint/rules/chartfile.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package rules // import "helm.sh/helm/v4/pkg/lint/rules" +package rules // import "helm.sh/helm/v4/pkg/chart/v2/lint/rules" import ( "errors" @@ -27,8 +27,8 @@ import ( "sigs.k8s.io/yaml" chart "helm.sh/helm/v4/pkg/chart/v2" + "helm.sh/helm/v4/pkg/chart/v2/lint/support" chartutil "helm.sh/helm/v4/pkg/chart/v2/util" - "helm.sh/helm/v4/pkg/lint/support" ) // Chartfile runs a set of linter rules related to Chart.yaml file diff --git a/pkg/lint/rules/chartfile_test.go b/pkg/chart/v2/lint/rules/chartfile_test.go similarity index 99% rename from pkg/lint/rules/chartfile_test.go rename to pkg/chart/v2/lint/rules/chartfile_test.go index 1719a2011..5a1ad2f24 100644 --- a/pkg/lint/rules/chartfile_test.go +++ b/pkg/chart/v2/lint/rules/chartfile_test.go @@ -24,8 +24,8 @@ import ( "testing" chart "helm.sh/helm/v4/pkg/chart/v2" + "helm.sh/helm/v4/pkg/chart/v2/lint/support" chartutil "helm.sh/helm/v4/pkg/chart/v2/util" - "helm.sh/helm/v4/pkg/lint/support" ) const ( diff --git a/pkg/lint/rules/crds.go b/pkg/chart/v2/lint/rules/crds.go similarity index 98% rename from pkg/lint/rules/crds.go rename to pkg/chart/v2/lint/rules/crds.go index 1b8a73139..49e30192a 100644 --- a/pkg/lint/rules/crds.go +++ b/pkg/chart/v2/lint/rules/crds.go @@ -28,8 +28,8 @@ import ( "k8s.io/apimachinery/pkg/util/yaml" + "helm.sh/helm/v4/pkg/chart/v2/lint/support" "helm.sh/helm/v4/pkg/chart/v2/loader" - "helm.sh/helm/v4/pkg/lint/support" ) // Crds lints the CRDs in the Linter. diff --git a/pkg/lint/rules/crds_test.go b/pkg/chart/v2/lint/rules/crds_test.go similarity index 95% rename from pkg/lint/rules/crds_test.go rename to pkg/chart/v2/lint/rules/crds_test.go index d497b29ba..e644f182f 100644 --- a/pkg/lint/rules/crds_test.go +++ b/pkg/chart/v2/lint/rules/crds_test.go @@ -21,7 +21,7 @@ import ( "github.com/stretchr/testify/assert" - "helm.sh/helm/v4/pkg/lint/support" + "helm.sh/helm/v4/pkg/chart/v2/lint/support" ) const invalidCrdsDir = "./testdata/invalidcrdsdir" diff --git a/pkg/lint/rules/dependencies.go b/pkg/chart/v2/lint/rules/dependencies.go similarity index 96% rename from pkg/lint/rules/dependencies.go rename to pkg/chart/v2/lint/rules/dependencies.go index 16c9d6435..d944a016d 100644 --- a/pkg/lint/rules/dependencies.go +++ b/pkg/chart/v2/lint/rules/dependencies.go @@ -14,15 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -package rules // import "helm.sh/helm/v4/pkg/lint/rules" +package rules // import "helm.sh/helm/v4/pkg/chart/v2/lint/rules" import ( "fmt" "strings" chart "helm.sh/helm/v4/pkg/chart/v2" + "helm.sh/helm/v4/pkg/chart/v2/lint/support" "helm.sh/helm/v4/pkg/chart/v2/loader" - "helm.sh/helm/v4/pkg/lint/support" ) // Dependencies runs lints against a chart's dependencies diff --git a/pkg/lint/rules/dependencies_test.go b/pkg/chart/v2/lint/rules/dependencies_test.go similarity index 98% rename from pkg/lint/rules/dependencies_test.go rename to pkg/chart/v2/lint/rules/dependencies_test.go index 1369b2372..08a6646cd 100644 --- a/pkg/lint/rules/dependencies_test.go +++ b/pkg/chart/v2/lint/rules/dependencies_test.go @@ -20,8 +20,8 @@ import ( "testing" chart "helm.sh/helm/v4/pkg/chart/v2" + "helm.sh/helm/v4/pkg/chart/v2/lint/support" chartutil "helm.sh/helm/v4/pkg/chart/v2/util" - "helm.sh/helm/v4/pkg/lint/support" ) func chartWithBadDependencies() chart.Chart { diff --git a/pkg/chart/v2/lint/rules/deprecations.go b/pkg/chart/v2/lint/rules/deprecations.go new file mode 100644 index 000000000..6eba316bc --- /dev/null +++ b/pkg/chart/v2/lint/rules/deprecations.go @@ -0,0 +1,106 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rules // import "helm.sh/helm/v4/pkg/chart/v2/lint/rules" + +import ( + "fmt" + "strconv" + + "helm.sh/helm/v4/pkg/chart/common" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/endpoints/deprecation" + kscheme "k8s.io/client-go/kubernetes/scheme" +) + +var ( + // This should be set in the Makefile based on the version of client-go being imported. + // These constants will be overwritten with LDFLAGS. The version components must be + // strings in order for LDFLAGS to set them. + k8sVersionMajor = "1" + k8sVersionMinor = "20" +) + +// deprecatedAPIError indicates than an API is deprecated in Kubernetes +type deprecatedAPIError struct { + Deprecated string + Message string +} + +func (e deprecatedAPIError) Error() string { + msg := e.Message + return msg +} + +func validateNoDeprecations(resource *k8sYamlStruct, kubeVersion *common.KubeVersion) error { + // if `resource` does not have an APIVersion or Kind, we cannot test it for deprecation + if resource.APIVersion == "" { + return nil + } + if resource.Kind == "" { + return nil + } + + majorVersion := k8sVersionMajor + minorVersion := k8sVersionMinor + + if kubeVersion != nil { + majorVersion = kubeVersion.Major + minorVersion = kubeVersion.Minor + } + + runtimeObject, err := resourceToRuntimeObject(resource) + if err != nil { + // do not error for non-kubernetes resources + if runtime.IsNotRegisteredError(err) { + return nil + } + return err + } + + major, err := strconv.Atoi(majorVersion) + if err != nil { + return err + } + minor, err := strconv.Atoi(minorVersion) + if err != nil { + return err + } + + if !deprecation.IsDeprecated(runtimeObject, major, minor) { + return nil + } + gvk := fmt.Sprintf("%s %s", resource.APIVersion, resource.Kind) + return deprecatedAPIError{ + Deprecated: gvk, + Message: deprecation.WarningMessage(runtimeObject), + } +} + +func resourceToRuntimeObject(resource *k8sYamlStruct) (runtime.Object, error) { + scheme := runtime.NewScheme() + kscheme.AddToScheme(scheme) + + gvk := schema.FromAPIVersionAndKind(resource.APIVersion, resource.Kind) + out, err := scheme.New(gvk) + if err != nil { + return nil, err + } + out.GetObjectKind().SetGroupVersionKind(gvk) + return out, nil +} diff --git a/pkg/lint/rules/deprecations_test.go b/pkg/chart/v2/lint/rules/deprecations_test.go similarity index 94% rename from pkg/lint/rules/deprecations_test.go rename to pkg/chart/v2/lint/rules/deprecations_test.go index 6add843ce..e153f67e6 100644 --- a/pkg/lint/rules/deprecations_test.go +++ b/pkg/chart/v2/lint/rules/deprecations_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package rules // import "helm.sh/helm/v4/pkg/lint/rules" +package rules // import "helm.sh/helm/v4/pkg/chart/v2/lint/rules" import "testing" diff --git a/pkg/lint/rules/template.go b/pkg/chart/v2/lint/rules/template.go similarity index 95% rename from pkg/lint/rules/template.go rename to pkg/chart/v2/lint/rules/template.go index b36153ec6..5c84d0f68 100644 --- a/pkg/lint/rules/template.go +++ b/pkg/chart/v2/lint/rules/template.go @@ -33,10 +33,12 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apimachinery/pkg/util/yaml" + "helm.sh/helm/v4/pkg/chart/common" + "helm.sh/helm/v4/pkg/chart/common/util" + "helm.sh/helm/v4/pkg/chart/v2/lint/support" "helm.sh/helm/v4/pkg/chart/v2/loader" chartutil "helm.sh/helm/v4/pkg/chart/v2/util" "helm.sh/helm/v4/pkg/engine" - "helm.sh/helm/v4/pkg/lint/support" ) // Templates lints the templates in the Linter. @@ -45,12 +47,12 @@ func Templates(linter *support.Linter, values map[string]interface{}, namespace } // TemplatesWithKubeVersion lints the templates in the Linter, allowing to specify the kubernetes version. -func TemplatesWithKubeVersion(linter *support.Linter, values map[string]interface{}, namespace string, kubeVersion *chartutil.KubeVersion) { +func TemplatesWithKubeVersion(linter *support.Linter, values map[string]interface{}, namespace string, kubeVersion *common.KubeVersion) { TemplatesWithSkipSchemaValidation(linter, values, namespace, kubeVersion, false) } // TemplatesWithSkipSchemaValidation lints the templates in the Linter, allowing to specify the kubernetes version and if schema validation is enabled or not. -func TemplatesWithSkipSchemaValidation(linter *support.Linter, values map[string]interface{}, namespace string, kubeVersion *chartutil.KubeVersion, skipSchemaValidation bool) { +func TemplatesWithSkipSchemaValidation(linter *support.Linter, values map[string]interface{}, namespace string, kubeVersion *common.KubeVersion, skipSchemaValidation bool) { fpath := "templates/" templatesPath := filepath.Join(linter.ChartDir, fpath) @@ -74,12 +76,12 @@ func TemplatesWithSkipSchemaValidation(linter *support.Linter, values map[string return } - options := chartutil.ReleaseOptions{ + options := common.ReleaseOptions{ Name: "test-release", Namespace: namespace, } - caps := chartutil.DefaultCapabilities.Copy() + caps := common.DefaultCapabilities.Copy() if kubeVersion != nil { caps.KubeVersion = *kubeVersion } @@ -90,12 +92,12 @@ func TemplatesWithSkipSchemaValidation(linter *support.Linter, values map[string return } - cvals, err := chartutil.CoalesceValues(chart, values) + cvals, err := util.CoalesceValues(chart, values) if err != nil { return } - valuesToRender, err := chartutil.ToRenderValuesWithSchemaValidation(chart, cvals, options, caps, skipSchemaValidation) + valuesToRender, err := util.ToRenderValuesWithSchemaValidation(chart, cvals, options, caps, skipSchemaValidation) if err != nil { linter.RunLinterRule(support.ErrorSev, fpath, err) return diff --git a/pkg/lint/rules/template_test.go b/pkg/chart/v2/lint/rules/template_test.go similarity index 98% rename from pkg/lint/rules/template_test.go rename to pkg/chart/v2/lint/rules/template_test.go index 787bd6e4b..3e8e0b371 100644 --- a/pkg/lint/rules/template_test.go +++ b/pkg/chart/v2/lint/rules/template_test.go @@ -23,9 +23,10 @@ import ( "strings" "testing" + "helm.sh/helm/v4/pkg/chart/common" chart "helm.sh/helm/v4/pkg/chart/v2" + "helm.sh/helm/v4/pkg/chart/v2/lint/support" chartutil "helm.sh/helm/v4/pkg/chart/v2/util" - "helm.sh/helm/v4/pkg/lint/support" ) const templateTestBasedir = "./testdata/albatross" @@ -189,7 +190,7 @@ func TestDeprecatedAPIFails(t *testing.T) { Version: "0.1.0", Icon: "satisfy-the-linting-gods.gif", }, - Templates: []*chart.File{ + Templates: []*common.File{ { Name: "templates/baddeployment.yaml", Data: []byte("apiVersion: apps/v1beta1\nkind: Deployment\nmetadata:\n name: baddep\nspec: {selector: {matchLabels: {foo: bar}}}"), @@ -249,7 +250,7 @@ func TestStrictTemplateParsingMapError(t *testing.T) { "key1": "val1", }, }, - Templates: []*chart.File{ + Templates: []*common.File{ { Name: "templates/configmap.yaml", Data: []byte(manifest), @@ -378,7 +379,7 @@ func TestEmptyWithCommentsManifests(t *testing.T) { Version: "0.1.0", Icon: "satisfy-the-linting-gods.gif", }, - Templates: []*chart.File{ + Templates: []*common.File{ { Name: "templates/empty-with-comments.yaml", Data: []byte("#@formatter:off\n"), diff --git a/pkg/lint/rules/testdata/albatross/Chart.yaml b/pkg/chart/v2/lint/rules/testdata/albatross/Chart.yaml similarity index 100% rename from pkg/lint/rules/testdata/albatross/Chart.yaml rename to pkg/chart/v2/lint/rules/testdata/albatross/Chart.yaml diff --git a/pkg/chart/v2/lint/rules/testdata/albatross/templates/_helpers.tpl b/pkg/chart/v2/lint/rules/testdata/albatross/templates/_helpers.tpl new file mode 100644 index 000000000..24f76db73 --- /dev/null +++ b/pkg/chart/v2/lint/rules/testdata/albatross/templates/_helpers.tpl @@ -0,0 +1,16 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{define "name"}}{{default "nginx" .Values.nameOverride | trunc 63 | trimSuffix "-" }}{{end}} + +{{/* +Create a default fully qualified app name. + +We truncate at 63 chars because some Kubernetes name fields are limited to this +(by the DNS naming spec). +*/}} +{{define "fullname"}} +{{- $name := default "nginx" .Values.nameOverride -}} +{{printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{end}} diff --git a/pkg/chart/v2/lint/rules/testdata/albatross/templates/fail.yaml b/pkg/chart/v2/lint/rules/testdata/albatross/templates/fail.yaml new file mode 100644 index 000000000..a11e0e90e --- /dev/null +++ b/pkg/chart/v2/lint/rules/testdata/albatross/templates/fail.yaml @@ -0,0 +1 @@ +{{ deliberateSyntaxError }} diff --git a/pkg/chart/v2/lint/rules/testdata/albatross/templates/svc.yaml b/pkg/chart/v2/lint/rules/testdata/albatross/templates/svc.yaml new file mode 100644 index 000000000..16bb27d55 --- /dev/null +++ b/pkg/chart/v2/lint/rules/testdata/albatross/templates/svc.yaml @@ -0,0 +1,19 @@ +# This is a service gateway to the replica set created by the deployment. +# Take a look at the deployment.yaml for general notes about this chart. +apiVersion: v1 +kind: Service +metadata: + name: "{{ .Values.name }}" + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{.Chart.Name}}-{{.Chart.Version}}" + kubeVersion: {{ .Capabilities.KubeVersion.Major }} +spec: + ports: + - port: {{default 80 .Values.httpPort | quote}} + targetPort: 80 + protocol: TCP + name: http + selector: + app.kubernetes.io/name: {{template "fullname" .}} diff --git a/pkg/chart/v2/lint/rules/testdata/albatross/values.yaml b/pkg/chart/v2/lint/rules/testdata/albatross/values.yaml new file mode 100644 index 000000000..74cc6a0dc --- /dev/null +++ b/pkg/chart/v2/lint/rules/testdata/albatross/values.yaml @@ -0,0 +1 @@ +name: "mariner" diff --git a/pkg/lint/rules/testdata/anotherbadchartfile/Chart.yaml b/pkg/chart/v2/lint/rules/testdata/anotherbadchartfile/Chart.yaml similarity index 100% rename from pkg/lint/rules/testdata/anotherbadchartfile/Chart.yaml rename to pkg/chart/v2/lint/rules/testdata/anotherbadchartfile/Chart.yaml diff --git a/pkg/chart/v2/lint/rules/testdata/badchartfile/Chart.yaml b/pkg/chart/v2/lint/rules/testdata/badchartfile/Chart.yaml new file mode 100644 index 000000000..3564ede3e --- /dev/null +++ b/pkg/chart/v2/lint/rules/testdata/badchartfile/Chart.yaml @@ -0,0 +1,11 @@ +description: A Helm chart for Kubernetes +version: 0.0.0.0 +home: "" +type: application +dependencies: +- name: mariadb + version: 5.x.x + repository: https://charts.helm.sh/stable/ + condition: mariadb.enabled + tags: + - database diff --git a/pkg/chart/v2/lint/rules/testdata/badchartfile/values.yaml b/pkg/chart/v2/lint/rules/testdata/badchartfile/values.yaml new file mode 100644 index 000000000..9f367033b --- /dev/null +++ b/pkg/chart/v2/lint/rules/testdata/badchartfile/values.yaml @@ -0,0 +1 @@ +# Default values for badchartfile. diff --git a/pkg/lint/rules/testdata/badchartname/Chart.yaml b/pkg/chart/v2/lint/rules/testdata/badchartname/Chart.yaml similarity index 100% rename from pkg/lint/rules/testdata/badchartname/Chart.yaml rename to pkg/chart/v2/lint/rules/testdata/badchartname/Chart.yaml diff --git a/pkg/chart/v2/lint/rules/testdata/badchartname/values.yaml b/pkg/chart/v2/lint/rules/testdata/badchartname/values.yaml new file mode 100644 index 000000000..9f367033b --- /dev/null +++ b/pkg/chart/v2/lint/rules/testdata/badchartname/values.yaml @@ -0,0 +1 @@ +# Default values for badchartfile. diff --git a/pkg/lint/rules/testdata/badcrdfile/Chart.yaml b/pkg/chart/v2/lint/rules/testdata/badcrdfile/Chart.yaml similarity index 100% rename from pkg/lint/rules/testdata/badcrdfile/Chart.yaml rename to pkg/chart/v2/lint/rules/testdata/badcrdfile/Chart.yaml diff --git a/pkg/chart/v2/lint/rules/testdata/badcrdfile/crds/bad-apiversion.yaml b/pkg/chart/v2/lint/rules/testdata/badcrdfile/crds/bad-apiversion.yaml new file mode 100644 index 000000000..468916053 --- /dev/null +++ b/pkg/chart/v2/lint/rules/testdata/badcrdfile/crds/bad-apiversion.yaml @@ -0,0 +1,2 @@ +apiVersion: bad.k8s.io/v1beta1 +kind: CustomResourceDefinition diff --git a/pkg/chart/v2/lint/rules/testdata/badcrdfile/crds/bad-crd.yaml b/pkg/chart/v2/lint/rules/testdata/badcrdfile/crds/bad-crd.yaml new file mode 100644 index 000000000..523b97f85 --- /dev/null +++ b/pkg/chart/v2/lint/rules/testdata/badcrdfile/crds/bad-crd.yaml @@ -0,0 +1,2 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: NotACustomResourceDefinition diff --git a/pkg/chart/v2/lint/rules/testdata/badcrdfile/templates/.gitkeep b/pkg/chart/v2/lint/rules/testdata/badcrdfile/templates/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/pkg/chart/v2/lint/rules/testdata/badcrdfile/values.yaml b/pkg/chart/v2/lint/rules/testdata/badcrdfile/values.yaml new file mode 100644 index 000000000..2fffc7715 --- /dev/null +++ b/pkg/chart/v2/lint/rules/testdata/badcrdfile/values.yaml @@ -0,0 +1 @@ +# Default values for badcrdfile. diff --git a/pkg/lint/rules/testdata/badvaluesfile/Chart.yaml b/pkg/chart/v2/lint/rules/testdata/badvaluesfile/Chart.yaml similarity index 100% rename from pkg/lint/rules/testdata/badvaluesfile/Chart.yaml rename to pkg/chart/v2/lint/rules/testdata/badvaluesfile/Chart.yaml diff --git a/pkg/chart/v2/lint/rules/testdata/badvaluesfile/templates/badvaluesfile.yaml b/pkg/chart/v2/lint/rules/testdata/badvaluesfile/templates/badvaluesfile.yaml new file mode 100644 index 000000000..6c2ceb8db --- /dev/null +++ b/pkg/chart/v2/lint/rules/testdata/badvaluesfile/templates/badvaluesfile.yaml @@ -0,0 +1,2 @@ +metadata: + name: {{.name | default "foo" | title}} diff --git a/pkg/chart/v2/lint/rules/testdata/badvaluesfile/values.yaml b/pkg/chart/v2/lint/rules/testdata/badvaluesfile/values.yaml new file mode 100644 index 000000000..b5a10271c --- /dev/null +++ b/pkg/chart/v2/lint/rules/testdata/badvaluesfile/values.yaml @@ -0,0 +1,2 @@ +# Invalid value for badvaluesfile for testing lint fails with invalid yaml format +name= "value" diff --git a/pkg/lint/rules/testdata/goodone/Chart.yaml b/pkg/chart/v2/lint/rules/testdata/goodone/Chart.yaml similarity index 100% rename from pkg/lint/rules/testdata/goodone/Chart.yaml rename to pkg/chart/v2/lint/rules/testdata/goodone/Chart.yaml diff --git a/pkg/chart/v2/lint/rules/testdata/goodone/crds/test-crd.yaml b/pkg/chart/v2/lint/rules/testdata/goodone/crds/test-crd.yaml new file mode 100644 index 000000000..1d7350f1d --- /dev/null +++ b/pkg/chart/v2/lint/rules/testdata/goodone/crds/test-crd.yaml @@ -0,0 +1,19 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: tests.test.io +spec: + group: test.io + names: + kind: Test + listKind: TestList + plural: tests + singular: test + scope: Namespaced + versions: + - name : v1alpha2 + served: true + storage: true + - name : v1alpha1 + served: true + storage: false diff --git a/pkg/chart/v2/lint/rules/testdata/goodone/templates/goodone.yaml b/pkg/chart/v2/lint/rules/testdata/goodone/templates/goodone.yaml new file mode 100644 index 000000000..cd46f62c7 --- /dev/null +++ b/pkg/chart/v2/lint/rules/testdata/goodone/templates/goodone.yaml @@ -0,0 +1,2 @@ +metadata: + name: {{ .Values.name | default "foo" | lower }} diff --git a/pkg/chart/v2/lint/rules/testdata/goodone/values.yaml b/pkg/chart/v2/lint/rules/testdata/goodone/values.yaml new file mode 100644 index 000000000..92c3d9bb9 --- /dev/null +++ b/pkg/chart/v2/lint/rules/testdata/goodone/values.yaml @@ -0,0 +1 @@ +name: "goodone-here" diff --git a/pkg/chart/v2/lint/rules/testdata/invalidchartfile/Chart.yaml b/pkg/chart/v2/lint/rules/testdata/invalidchartfile/Chart.yaml new file mode 100644 index 000000000..0fd58d1d4 --- /dev/null +++ b/pkg/chart/v2/lint/rules/testdata/invalidchartfile/Chart.yaml @@ -0,0 +1,6 @@ +name: some-chart +apiVersion: v2 +apiVersion: v1 +description: A Helm chart for Kubernetes +version: 1.3.0 +icon: http://example.com diff --git a/pkg/chart/v2/lint/rules/testdata/invalidchartfile/values.yaml b/pkg/chart/v2/lint/rules/testdata/invalidchartfile/values.yaml new file mode 100644 index 000000000..e69de29bb diff --git a/pkg/lint/rules/testdata/invalidcrdsdir/Chart.yaml b/pkg/chart/v2/lint/rules/testdata/invalidcrdsdir/Chart.yaml similarity index 100% rename from pkg/lint/rules/testdata/invalidcrdsdir/Chart.yaml rename to pkg/chart/v2/lint/rules/testdata/invalidcrdsdir/Chart.yaml diff --git a/pkg/chart/v2/lint/rules/testdata/invalidcrdsdir/crds b/pkg/chart/v2/lint/rules/testdata/invalidcrdsdir/crds new file mode 100644 index 000000000..e69de29bb diff --git a/pkg/chart/v2/lint/rules/testdata/invalidcrdsdir/values.yaml b/pkg/chart/v2/lint/rules/testdata/invalidcrdsdir/values.yaml new file mode 100644 index 000000000..6b1611a64 --- /dev/null +++ b/pkg/chart/v2/lint/rules/testdata/invalidcrdsdir/values.yaml @@ -0,0 +1 @@ +# Default values for invalidcrdsdir. diff --git a/pkg/chart/v2/lint/rules/testdata/malformed-template/.helmignore b/pkg/chart/v2/lint/rules/testdata/malformed-template/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/pkg/chart/v2/lint/rules/testdata/malformed-template/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/pkg/lint/rules/testdata/malformed-template/Chart.yaml b/pkg/chart/v2/lint/rules/testdata/malformed-template/Chart.yaml similarity index 100% rename from pkg/lint/rules/testdata/malformed-template/Chart.yaml rename to pkg/chart/v2/lint/rules/testdata/malformed-template/Chart.yaml diff --git a/pkg/chart/v2/lint/rules/testdata/malformed-template/templates/bad.yaml b/pkg/chart/v2/lint/rules/testdata/malformed-template/templates/bad.yaml new file mode 100644 index 000000000..213198fda --- /dev/null +++ b/pkg/chart/v2/lint/rules/testdata/malformed-template/templates/bad.yaml @@ -0,0 +1 @@ +{ {- $relname := .Release.Name -}} diff --git a/pkg/chart/v2/lint/rules/testdata/malformed-template/values.yaml b/pkg/chart/v2/lint/rules/testdata/malformed-template/values.yaml new file mode 100644 index 000000000..1cc3182ea --- /dev/null +++ b/pkg/chart/v2/lint/rules/testdata/malformed-template/values.yaml @@ -0,0 +1,82 @@ +# Default values for test. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: nginx + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/pkg/lint/rules/testdata/multi-template-fail/Chart.yaml b/pkg/chart/v2/lint/rules/testdata/multi-template-fail/Chart.yaml similarity index 100% rename from pkg/lint/rules/testdata/multi-template-fail/Chart.yaml rename to pkg/chart/v2/lint/rules/testdata/multi-template-fail/Chart.yaml diff --git a/pkg/chart/v2/lint/rules/testdata/multi-template-fail/templates/multi-fail.yaml b/pkg/chart/v2/lint/rules/testdata/multi-template-fail/templates/multi-fail.yaml new file mode 100644 index 000000000..835be07be --- /dev/null +++ b/pkg/chart/v2/lint/rules/testdata/multi-template-fail/templates/multi-fail.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: game-config +data: + game.properties: cheat +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: -this:name-is-not_valid$ +data: + game.properties: empty diff --git a/pkg/lint/rules/testdata/v3-fail/Chart.yaml b/pkg/chart/v2/lint/rules/testdata/v3-fail/Chart.yaml similarity index 100% rename from pkg/lint/rules/testdata/v3-fail/Chart.yaml rename to pkg/chart/v2/lint/rules/testdata/v3-fail/Chart.yaml diff --git a/pkg/chart/v2/lint/rules/testdata/v3-fail/templates/_helpers.tpl b/pkg/chart/v2/lint/rules/testdata/v3-fail/templates/_helpers.tpl new file mode 100644 index 000000000..0b89e723b --- /dev/null +++ b/pkg/chart/v2/lint/rules/testdata/v3-fail/templates/_helpers.tpl @@ -0,0 +1,63 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "v3-fail.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "v3-fail.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "v3-fail.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "v3-fail.labels" -}} +helm.sh/chart: {{ include "v3-fail.chart" . }} +{{ include "v3-fail.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Selector labels +*/}} +{{- define "v3-fail.selectorLabels" -}} +app.kubernetes.io/name: {{ include "v3-fail.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "v3-fail.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "v3-fail.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} diff --git a/pkg/chart/v2/lint/rules/testdata/v3-fail/templates/deployment.yaml b/pkg/chart/v2/lint/rules/testdata/v3-fail/templates/deployment.yaml new file mode 100644 index 000000000..6d651ab8e --- /dev/null +++ b/pkg/chart/v2/lint/rules/testdata/v3-fail/templates/deployment.yaml @@ -0,0 +1,56 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "v3-fail.fullname" . }} + labels: + nope: {{ .Release.Time }} + {{- include "v3-fail.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + {{- include "v3-fail.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "v3-fail.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "v3-fail.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: 80 + protocol: TCP + livenessProbe: + httpGet: + path: / + port: http + readinessProbe: + httpGet: + path: / + port: http + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/pkg/chart/v2/lint/rules/testdata/v3-fail/templates/ingress.yaml b/pkg/chart/v2/lint/rules/testdata/v3-fail/templates/ingress.yaml new file mode 100644 index 000000000..4790650d0 --- /dev/null +++ b/pkg/chart/v2/lint/rules/testdata/v3-fail/templates/ingress.yaml @@ -0,0 +1,62 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "v3-fail.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "v3-fail.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + "helm.sh/hook": crd-install + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/pkg/chart/v2/lint/rules/testdata/v3-fail/templates/service.yaml b/pkg/chart/v2/lint/rules/testdata/v3-fail/templates/service.yaml new file mode 100644 index 000000000..79a0f40b0 --- /dev/null +++ b/pkg/chart/v2/lint/rules/testdata/v3-fail/templates/service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "v3-fail.fullname" . }} + annotations: + helm.sh/hook: crd-install + labels: + {{- include "v3-fail.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "v3-fail.selectorLabels" . | nindent 4 }} diff --git a/pkg/chart/v2/lint/rules/testdata/v3-fail/values.yaml b/pkg/chart/v2/lint/rules/testdata/v3-fail/values.yaml new file mode 100644 index 000000000..01d99b4e6 --- /dev/null +++ b/pkg/chart/v2/lint/rules/testdata/v3-fail/values.yaml @@ -0,0 +1,66 @@ +# Default values for v3-fail. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: nginx + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/pkg/lint/rules/testdata/withsubchart/Chart.yaml b/pkg/chart/v2/lint/rules/testdata/withsubchart/Chart.yaml similarity index 100% rename from pkg/lint/rules/testdata/withsubchart/Chart.yaml rename to pkg/chart/v2/lint/rules/testdata/withsubchart/Chart.yaml diff --git a/pkg/lint/rules/testdata/withsubchart/charts/subchart/Chart.yaml b/pkg/chart/v2/lint/rules/testdata/withsubchart/charts/subchart/Chart.yaml similarity index 100% rename from pkg/lint/rules/testdata/withsubchart/charts/subchart/Chart.yaml rename to pkg/chart/v2/lint/rules/testdata/withsubchart/charts/subchart/Chart.yaml diff --git a/pkg/chart/v2/lint/rules/testdata/withsubchart/charts/subchart/templates/subchart.yaml b/pkg/chart/v2/lint/rules/testdata/withsubchart/charts/subchart/templates/subchart.yaml new file mode 100644 index 000000000..6cb6cc2af --- /dev/null +++ b/pkg/chart/v2/lint/rules/testdata/withsubchart/charts/subchart/templates/subchart.yaml @@ -0,0 +1,2 @@ +metadata: + name: {{ .Values.subchart.name | lower }} diff --git a/pkg/chart/v2/lint/rules/testdata/withsubchart/charts/subchart/values.yaml b/pkg/chart/v2/lint/rules/testdata/withsubchart/charts/subchart/values.yaml new file mode 100644 index 000000000..422a359d5 --- /dev/null +++ b/pkg/chart/v2/lint/rules/testdata/withsubchart/charts/subchart/values.yaml @@ -0,0 +1,2 @@ +subchart: + name: subchart \ No newline at end of file diff --git a/pkg/chart/v2/lint/rules/testdata/withsubchart/templates/mainchart.yaml b/pkg/chart/v2/lint/rules/testdata/withsubchart/templates/mainchart.yaml new file mode 100644 index 000000000..6cb6cc2af --- /dev/null +++ b/pkg/chart/v2/lint/rules/testdata/withsubchart/templates/mainchart.yaml @@ -0,0 +1,2 @@ +metadata: + name: {{ .Values.subchart.name | lower }} diff --git a/pkg/chart/v2/lint/rules/testdata/withsubchart/values.yaml b/pkg/chart/v2/lint/rules/testdata/withsubchart/values.yaml new file mode 100644 index 000000000..e69de29bb diff --git a/pkg/lint/rules/values.go b/pkg/chart/v2/lint/rules/values.go similarity index 84% rename from pkg/lint/rules/values.go rename to pkg/chart/v2/lint/rules/values.go index 019e74fa7..5260bf8b3 100644 --- a/pkg/lint/rules/values.go +++ b/pkg/chart/v2/lint/rules/values.go @@ -21,8 +21,9 @@ import ( "os" "path/filepath" - chartutil "helm.sh/helm/v4/pkg/chart/v2/util" - "helm.sh/helm/v4/pkg/lint/support" + "helm.sh/helm/v4/pkg/chart/common" + "helm.sh/helm/v4/pkg/chart/common/util" + "helm.sh/helm/v4/pkg/chart/v2/lint/support" ) // ValuesWithOverrides tests the values.yaml file. @@ -52,7 +53,7 @@ func validateValuesFileExistence(valuesPath string) error { } func validateValuesFile(valuesPath string, overrides map[string]interface{}) error { - values, err := chartutil.ReadValuesFile(valuesPath) + values, err := common.ReadValuesFile(valuesPath) if err != nil { return fmt.Errorf("unable to parse YAML: %w", err) } @@ -62,8 +63,8 @@ func validateValuesFile(valuesPath string, overrides map[string]interface{}) err // We could change that. For now, though, we retain that strategy, and thus can // coalesce tables (like reuse-values does) instead of doing the full chart // CoalesceValues - coalescedValues := chartutil.CoalesceTables(make(map[string]interface{}, len(overrides)), overrides) - coalescedValues = chartutil.CoalesceTables(coalescedValues, values) + coalescedValues := util.CoalesceTables(make(map[string]interface{}, len(overrides)), overrides) + coalescedValues = util.CoalesceTables(coalescedValues, values) ext := filepath.Ext(valuesPath) schemaPath := valuesPath[:len(valuesPath)-len(ext)] + ".schema.json" @@ -74,5 +75,5 @@ func validateValuesFile(valuesPath string, overrides map[string]interface{}) err if err != nil { return err } - return chartutil.ValidateAgainstSingleSchema(coalescedValues, schema) + return util.ValidateAgainstSingleSchema(coalescedValues, schema) } diff --git a/pkg/chart/v2/lint/rules/values_test.go b/pkg/chart/v2/lint/rules/values_test.go new file mode 100644 index 000000000..348695785 --- /dev/null +++ b/pkg/chart/v2/lint/rules/values_test.go @@ -0,0 +1,169 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rules + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + + "helm.sh/helm/v4/internal/test/ensure" +) + +var nonExistingValuesFilePath = filepath.Join("/fake/dir", "values.yaml") + +const testSchema = ` +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "helm values test schema", + "type": "object", + "additionalProperties": false, + "required": [ + "username", + "password" + ], + "properties": { + "username": { + "description": "Your username", + "type": "string" + }, + "password": { + "description": "Your password", + "type": "string" + } + } +} +` + +func TestValidateValuesYamlNotDirectory(t *testing.T) { + _ = os.Mkdir(nonExistingValuesFilePath, os.ModePerm) + defer os.Remove(nonExistingValuesFilePath) + + err := validateValuesFileExistence(nonExistingValuesFilePath) + if err == nil { + t.Errorf("validateValuesFileExistence to return a linter error, got no error") + } +} + +func TestValidateValuesFileWellFormed(t *testing.T) { + badYaml := ` + not:well[]{}formed + ` + tmpdir := ensure.TempFile(t, "values.yaml", []byte(badYaml)) + valfile := filepath.Join(tmpdir, "values.yaml") + if err := validateValuesFile(valfile, map[string]interface{}{}); err == nil { + t.Fatal("expected values file to fail parsing") + } +} + +func TestValidateValuesFileSchema(t *testing.T) { + yaml := "username: admin\npassword: swordfish" + tmpdir := ensure.TempFile(t, "values.yaml", []byte(yaml)) + createTestingSchema(t, tmpdir) + + valfile := filepath.Join(tmpdir, "values.yaml") + if err := validateValuesFile(valfile, map[string]interface{}{}); err != nil { + t.Fatalf("Failed validation with %s", err) + } +} + +func TestValidateValuesFileSchemaFailure(t *testing.T) { + // 1234 is an int, not a string. This should fail. + yaml := "username: 1234\npassword: swordfish" + tmpdir := ensure.TempFile(t, "values.yaml", []byte(yaml)) + createTestingSchema(t, tmpdir) + + valfile := filepath.Join(tmpdir, "values.yaml") + + err := validateValuesFile(valfile, map[string]interface{}{}) + if err == nil { + t.Fatal("expected values file to fail parsing") + } + + assert.Contains(t, err.Error(), "- at '/username': got number, want string") +} + +func TestValidateValuesFileSchemaOverrides(t *testing.T) { + yaml := "username: admin" + overrides := map[string]interface{}{ + "password": "swordfish", + } + tmpdir := ensure.TempFile(t, "values.yaml", []byte(yaml)) + createTestingSchema(t, tmpdir) + + valfile := filepath.Join(tmpdir, "values.yaml") + if err := validateValuesFile(valfile, overrides); err != nil { + t.Fatalf("Failed validation with %s", err) + } +} + +func TestValidateValuesFile(t *testing.T) { + tests := []struct { + name string + yaml string + overrides map[string]interface{} + errorMessage string + }{ + { + name: "value added", + yaml: "username: admin", + overrides: map[string]interface{}{"password": "swordfish"}, + }, + { + name: "value not overridden", + yaml: "username: admin\npassword:", + overrides: map[string]interface{}{"username": "anotherUser"}, + errorMessage: "- at '/password': got null, want string", + }, + { + name: "value overridden", + yaml: "username: admin\npassword:", + overrides: map[string]interface{}{"username": "anotherUser", "password": "swordfish"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tmpdir := ensure.TempFile(t, "values.yaml", []byte(tt.yaml)) + createTestingSchema(t, tmpdir) + + valfile := filepath.Join(tmpdir, "values.yaml") + + err := validateValuesFile(valfile, tt.overrides) + + switch { + case err != nil && tt.errorMessage == "": + t.Errorf("Failed validation with %s", err) + case err == nil && tt.errorMessage != "": + t.Error("expected values file to fail parsing") + case err != nil && tt.errorMessage != "": + assert.Contains(t, err.Error(), tt.errorMessage, "Failed with unexpected error") + } + }) + } +} + +func createTestingSchema(t *testing.T, dir string) string { + t.Helper() + schemafile := filepath.Join(dir, "values.schema.json") + if err := os.WriteFile(schemafile, []byte(testSchema), 0700); err != nil { + t.Fatalf("Failed to write schema to tmpdir: %s", err) + } + return schemafile +} diff --git a/pkg/lint/support/doc.go b/pkg/chart/v2/lint/support/doc.go similarity index 91% rename from pkg/lint/support/doc.go rename to pkg/chart/v2/lint/support/doc.go index b007804dc..7e050b8c2 100644 --- a/pkg/lint/support/doc.go +++ b/pkg/chart/v2/lint/support/doc.go @@ -20,4 +20,4 @@ Package support contains tools for linting charts. Linting is the process of testing charts for errors or warnings regarding formatting, compilation, or standards compliance. */ -package support // import "helm.sh/helm/v4/pkg/lint/support" +package support // import "helm.sh/helm/v4/pkg/chart/v2/lint/support" diff --git a/pkg/chart/v2/lint/support/message.go b/pkg/chart/v2/lint/support/message.go new file mode 100644 index 000000000..5efbc7a61 --- /dev/null +++ b/pkg/chart/v2/lint/support/message.go @@ -0,0 +1,76 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package support + +import "fmt" + +// Severity indicates the severity of a Message. +const ( + // UnknownSev indicates that the severity of the error is unknown, and should not stop processing. + UnknownSev = iota + // InfoSev indicates information, for example missing values.yaml file + InfoSev + // WarningSev indicates that something does not meet code standards, but will likely function. + WarningSev + // ErrorSev indicates that something will not likely function. + ErrorSev +) + +// sev matches the *Sev states. +var sev = []string{"UNKNOWN", "INFO", "WARNING", "ERROR"} + +// Linter encapsulates a linting run of a particular chart. +type Linter struct { + Messages []Message + // The highest severity of all the failing lint rules + HighestSeverity int + ChartDir string +} + +// Message describes an error encountered while linting. +type Message struct { + // Severity is one of the *Sev constants + Severity int + Path string + Err error +} + +func (m Message) Error() string { + return fmt.Sprintf("[%s] %s: %s", sev[m.Severity], m.Path, m.Err.Error()) +} + +// NewMessage creates a new Message struct +func NewMessage(severity int, path string, err error) Message { + return Message{Severity: severity, Path: path, Err: err} +} + +// RunLinterRule returns true if the validation passed +func (l *Linter) RunLinterRule(severity int, path string, err error) bool { + // severity is out of bound + if severity < 0 || severity >= len(sev) { + return false + } + + if err != nil { + l.Messages = append(l.Messages, NewMessage(severity, path, err)) + + if severity > l.HighestSeverity { + l.HighestSeverity = severity + } + } + return err == nil +} diff --git a/pkg/chart/v2/lint/support/message_test.go b/pkg/chart/v2/lint/support/message_test.go new file mode 100644 index 000000000..ce5b5e42e --- /dev/null +++ b/pkg/chart/v2/lint/support/message_test.go @@ -0,0 +1,79 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package support + +import ( + "errors" + "testing" +) + +var errLint = errors.New("lint failed") + +func TestRunLinterRule(t *testing.T) { + var tests = []struct { + Severity int + LintError error + ExpectedMessages int + ExpectedReturn bool + ExpectedHighestSeverity int + }{ + {InfoSev, errLint, 1, false, InfoSev}, + {WarningSev, errLint, 2, false, WarningSev}, + {ErrorSev, errLint, 3, false, ErrorSev}, + // No error so it returns true + {ErrorSev, nil, 3, true, ErrorSev}, + // Retains highest severity + {InfoSev, errLint, 4, false, ErrorSev}, + // Invalid severity values + {4, errLint, 4, false, ErrorSev}, + {22, errLint, 4, false, ErrorSev}, + {-1, errLint, 4, false, ErrorSev}, + } + + linter := Linter{} + for _, test := range tests { + isValid := linter.RunLinterRule(test.Severity, "chart", test.LintError) + if len(linter.Messages) != test.ExpectedMessages { + t.Errorf("RunLinterRule(%d, \"chart\", %v), linter.Messages should now have %d message, we got %d", test.Severity, test.LintError, test.ExpectedMessages, len(linter.Messages)) + } + + if linter.HighestSeverity != test.ExpectedHighestSeverity { + t.Errorf("RunLinterRule(%d, \"chart\", %v), linter.HighestSeverity should be %d, we got %d", test.Severity, test.LintError, test.ExpectedHighestSeverity, linter.HighestSeverity) + } + + if isValid != test.ExpectedReturn { + t.Errorf("RunLinterRule(%d, \"chart\", %v), should have returned %t but returned %t", test.Severity, test.LintError, test.ExpectedReturn, isValid) + } + } +} + +func TestMessage(t *testing.T) { + m := Message{ErrorSev, "Chart.yaml", errors.New("Foo")} + if m.Error() != "[ERROR] Chart.yaml: Foo" { + t.Errorf("Unexpected output: %s", m.Error()) + } + + m = Message{WarningSev, "templates/", errors.New("Bar")} + if m.Error() != "[WARNING] templates/: Bar" { + t.Errorf("Unexpected output: %s", m.Error()) + } + + m = Message{InfoSev, "templates/rc.yaml", errors.New("FooBar")} + if m.Error() != "[INFO] templates/rc.yaml: FooBar" { + t.Errorf("Unexpected output: %s", m.Error()) + } +} diff --git a/pkg/chart/v2/loader/load.go b/pkg/chart/v2/loader/load.go index 75c73e959..0c025e183 100644 --- a/pkg/chart/v2/loader/load.go +++ b/pkg/chart/v2/loader/load.go @@ -31,6 +31,7 @@ import ( utilyaml "k8s.io/apimachinery/pkg/util/yaml" "sigs.k8s.io/yaml" + "helm.sh/helm/v4/pkg/chart/common" chart "helm.sh/helm/v4/pkg/chart/v2" ) @@ -80,7 +81,7 @@ func LoadFiles(files []*BufferedFile) (*chart.Chart, error) { // do not rely on assumed ordering of files in the chart and crash // if Chart.yaml was not coming early enough to initialize metadata for _, f := range files { - c.Raw = append(c.Raw, &chart.File{Name: f.Name, Data: f.Data}) + c.Raw = append(c.Raw, &common.File{Name: f.Name, Data: f.Data}) if f.Name == "Chart.yaml" { if c.Metadata == nil { c.Metadata = new(chart.Metadata) @@ -128,7 +129,7 @@ func LoadFiles(files []*BufferedFile) (*chart.Chart, error) { return c, fmt.Errorf("cannot load requirements.yaml: %w", err) } if c.Metadata.APIVersion == chart.APIVersionV1 { - c.Files = append(c.Files, &chart.File{Name: f.Name, Data: f.Data}) + c.Files = append(c.Files, &common.File{Name: f.Name, Data: f.Data}) } // Deprecated: requirements.lock is deprecated use Chart.lock. case f.Name == "requirements.lock": @@ -143,14 +144,14 @@ func LoadFiles(files []*BufferedFile) (*chart.Chart, error) { log.Printf("Warning: Dependency locking is handled in Chart.lock since apiVersion \"v2\". We recommend migrating to Chart.lock.") } if c.Metadata.APIVersion == chart.APIVersionV1 { - c.Files = append(c.Files, &chart.File{Name: f.Name, Data: f.Data}) + c.Files = append(c.Files, &common.File{Name: f.Name, Data: f.Data}) } case strings.HasPrefix(f.Name, "templates/"): - c.Templates = append(c.Templates, &chart.File{Name: f.Name, Data: f.Data}) + c.Templates = append(c.Templates, &common.File{Name: f.Name, Data: f.Data}) case strings.HasPrefix(f.Name, "charts/"): if filepath.Ext(f.Name) == ".prov" { - c.Files = append(c.Files, &chart.File{Name: f.Name, Data: f.Data}) + c.Files = append(c.Files, &common.File{Name: f.Name, Data: f.Data}) continue } @@ -158,7 +159,7 @@ func LoadFiles(files []*BufferedFile) (*chart.Chart, error) { cname := strings.SplitN(fname, "/", 2)[0] subcharts[cname] = append(subcharts[cname], &BufferedFile{Name: fname, Data: f.Data}) default: - c.Files = append(c.Files, &chart.File{Name: f.Name, Data: f.Data}) + c.Files = append(c.Files, &common.File{Name: f.Name, Data: f.Data}) } } diff --git a/pkg/chart/v2/loader/load_test.go b/pkg/chart/v2/loader/load_test.go index 41154421c..c4ae646f6 100644 --- a/pkg/chart/v2/loader/load_test.go +++ b/pkg/chart/v2/loader/load_test.go @@ -30,6 +30,7 @@ import ( "testing" "time" + "helm.sh/helm/v4/pkg/chart/common" chart "helm.sh/helm/v4/pkg/chart/v2" ) @@ -543,7 +544,7 @@ foo: } } -func TestMergeValues(t *testing.T) { +func TestMergeValuesV2(t *testing.T) { nestedMap := map[string]interface{}{ "foo": "bar", "baz": map[string]string{ @@ -753,7 +754,7 @@ func verifyChartFileAndTemplate(t *testing.T, c *chart.Chart, name string) { } } -func verifyBomStripped(t *testing.T, files []*chart.File) { +func verifyBomStripped(t *testing.T, files []*common.File) { t.Helper() for _, file := range files { if bytes.HasPrefix(file.Data, utf8bom) { diff --git a/pkg/chart/v2/util/create.go b/pkg/chart/v2/util/create.go index a8ae3ab40..d7c1fe31c 100644 --- a/pkg/chart/v2/util/create.go +++ b/pkg/chart/v2/util/create.go @@ -26,6 +26,7 @@ import ( "sigs.k8s.io/yaml" + "helm.sh/helm/v4/pkg/chart/common" chart "helm.sh/helm/v4/pkg/chart/v2" "helm.sh/helm/v4/pkg/chart/v2/loader" ) @@ -655,11 +656,11 @@ func CreateFrom(chartfile *chart.Metadata, dest, src string) error { schart.Metadata = chartfile - var updatedTemplates []*chart.File + var updatedTemplates []*common.File for _, template := range schart.Templates { newData := transform(string(template.Data), schart.Name()) - updatedTemplates = append(updatedTemplates, &chart.File{Name: template.Name, Data: newData}) + updatedTemplates = append(updatedTemplates, &common.File{Name: template.Name, Data: newData}) } schart.Templates = updatedTemplates diff --git a/pkg/chart/v2/util/dependencies.go b/pkg/chart/v2/util/dependencies.go index 1a2aa1c95..a52f09f82 100644 --- a/pkg/chart/v2/util/dependencies.go +++ b/pkg/chart/v2/util/dependencies.go @@ -22,11 +22,13 @@ import ( "github.com/mitchellh/copystructure" + "helm.sh/helm/v4/pkg/chart/common" + "helm.sh/helm/v4/pkg/chart/common/util" chart "helm.sh/helm/v4/pkg/chart/v2" ) // ProcessDependencies checks through this chart's dependencies, processing accordingly. -func ProcessDependencies(c *chart.Chart, v Values) error { +func ProcessDependencies(c *chart.Chart, v common.Values) error { if err := processDependencyEnabled(c, v, ""); err != nil { return err } @@ -34,7 +36,7 @@ func ProcessDependencies(c *chart.Chart, v Values) error { } // processDependencyConditions disables charts based on condition path value in values -func processDependencyConditions(reqs []*chart.Dependency, cvals Values, cpath string) { +func processDependencyConditions(reqs []*chart.Dependency, cvals common.Values, cpath string) { if reqs == nil { return } @@ -50,7 +52,7 @@ func processDependencyConditions(reqs []*chart.Dependency, cvals Values, cpath s break } slog.Warn("returned non-bool value", "path", c, "chart", r.Name) - } else if _, ok := err.(ErrNoValue); !ok { + } else if _, ok := err.(common.ErrNoValue); !ok { // this is a real error slog.Warn("the method PathValue returned error", slog.Any("error", err)) } @@ -60,7 +62,7 @@ func processDependencyConditions(reqs []*chart.Dependency, cvals Values, cpath s } // processDependencyTags disables charts based on tags in values -func processDependencyTags(reqs []*chart.Dependency, cvals Values) { +func processDependencyTags(reqs []*chart.Dependency, cvals common.Values) { if reqs == nil { return } @@ -177,7 +179,7 @@ Loop: for _, lr := range c.Metadata.Dependencies { lr.Enabled = true } - cvals, err := CoalesceValues(c, v) + cvals, err := util.CoalesceValues(c, v) if err != nil { return err } @@ -232,6 +234,8 @@ func pathToMap(path string, data map[string]interface{}) map[string]interface{} return set(parsePath(path), data) } +func parsePath(key string) []string { return strings.Split(key, ".") } + func set(path []string, data map[string]interface{}) map[string]interface{} { if len(path) == 0 { return nil @@ -249,12 +253,12 @@ func processImportValues(c *chart.Chart, merge bool) error { return nil } // combine chart values and empty config to get Values - var cvals Values + var cvals common.Values var err error if merge { - cvals, err = MergeValues(c, nil) + cvals, err = util.MergeValues(c, nil) } else { - cvals, err = CoalesceValues(c, nil) + cvals, err = util.CoalesceValues(c, nil) } if err != nil { return err @@ -282,9 +286,9 @@ func processImportValues(c *chart.Chart, merge bool) error { } // create value map from child to be merged into parent if merge { - b = MergeTables(b, pathToMap(parent, vv.AsMap())) + b = util.MergeTables(b, pathToMap(parent, vv.AsMap())) } else { - b = CoalesceTables(b, pathToMap(parent, vv.AsMap())) + b = util.CoalesceTables(b, pathToMap(parent, vv.AsMap())) } case string: child := "exports." + iv @@ -298,9 +302,9 @@ func processImportValues(c *chart.Chart, merge bool) error { continue } if merge { - b = MergeTables(b, vm.AsMap()) + b = util.MergeTables(b, vm.AsMap()) } else { - b = CoalesceTables(b, vm.AsMap()) + b = util.CoalesceTables(b, vm.AsMap()) } } } @@ -315,14 +319,14 @@ func processImportValues(c *chart.Chart, merge bool) error { // deep copying the cvals as there are cases where pointers can end // up in the cvals when they are copied onto b in ways that break things. cvals = deepCopyMap(cvals) - c.Values = MergeTables(cvals, b) + c.Values = util.MergeTables(cvals, b) } else { // Trimming the nil values from cvals is needed for backwards compatibility. // Previously, the b value had been populated with cvals along with some // overrides. This caused the coalescing functionality to remove the // nil/null values. This trimming is for backwards compat. cvals = trimNilValues(cvals) - c.Values = CoalesceTables(cvals, b) + c.Values = util.CoalesceTables(cvals, b) } return nil @@ -355,6 +359,12 @@ func trimNilValues(vals map[string]interface{}) map[string]interface{} { return valsCopyMap } +// istable is a special-purpose function to see if the present thing matches the definition of a YAML table. +func istable(v interface{}) bool { + _, ok := v.(map[string]interface{}) + return ok +} + // processDependencyImportValues imports specified chart values from child to parent. func processDependencyImportValues(c *chart.Chart, merge bool) error { for _, d := range c.Dependencies() { diff --git a/pkg/chart/v2/util/dependencies_test.go b/pkg/chart/v2/util/dependencies_test.go index d645d7bf5..c817b0b89 100644 --- a/pkg/chart/v2/util/dependencies_test.go +++ b/pkg/chart/v2/util/dependencies_test.go @@ -21,6 +21,7 @@ import ( "strconv" "testing" + "helm.sh/helm/v4/pkg/chart/common" chart "helm.sh/helm/v4/pkg/chart/v2" "helm.sh/helm/v4/pkg/chart/v2/loader" ) @@ -221,7 +222,7 @@ func TestProcessDependencyImportValues(t *testing.T) { if err := processDependencyImportValues(c, false); err != nil { t.Fatalf("processing import values dependencies %v", err) } - cc := Values(c.Values) + cc := common.Values(c.Values) for kk, vv := range e { pv, err := cc.PathValue(kk) if err != nil { @@ -251,7 +252,7 @@ func TestProcessDependencyImportValues(t *testing.T) { t.Error("expect nil value not found but found it") } switch xerr := err.(type) { - case ErrNoValue: + case common.ErrNoValue: // We found what we expected default: t.Errorf("expected an ErrNoValue but got %q instead", xerr) @@ -261,7 +262,7 @@ func TestProcessDependencyImportValues(t *testing.T) { if err := processDependencyImportValues(c, true); err != nil { t.Fatalf("processing import values dependencies %v", err) } - cc = Values(c.Values) + cc = common.Values(c.Values) val, err := cc.PathValue("ensurenull") if err != nil { t.Error("expect value but ensurenull was not found") @@ -291,7 +292,7 @@ func TestProcessDependencyImportValuesFromSharedDependencyToAliases(t *testing.T e["foo.grandchild.defaults.defaultValue"] = "42" e["bar.grandchild.defaults.defaultValue"] = "42" - cValues := Values(c.Values) + cValues := common.Values(c.Values) for kk, vv := range e { pv, err := cValues.PathValue(kk) if err != nil { @@ -329,7 +330,7 @@ func TestProcessDependencyImportValuesMultiLevelPrecedence(t *testing.T) { if err := processDependencyImportValues(c, true); err != nil { t.Fatalf("processing import values dependencies %v", err) } - cc := Values(c.Values) + cc := common.Values(c.Values) for kk, vv := range e { pv, err := cc.PathValue(kk) if err != nil { diff --git a/pkg/chart/v2/util/save.go b/pkg/chart/v2/util/save.go index 624a5b562..69a98924c 100644 --- a/pkg/chart/v2/util/save.go +++ b/pkg/chart/v2/util/save.go @@ -29,6 +29,7 @@ import ( "sigs.k8s.io/yaml" + "helm.sh/helm/v4/pkg/chart/common" chart "helm.sh/helm/v4/pkg/chart/v2" ) @@ -76,7 +77,7 @@ func SaveDir(c *chart.Chart, dest string) error { } // Save templates and files - for _, o := range [][]*chart.File{c.Templates, c.Files} { + for _, o := range [][]*common.File{c.Templates, c.Files} { for _, f := range o { n := filepath.Join(outdir, f.Name) if err := writeFile(n, f.Data); err != nil { @@ -258,7 +259,7 @@ func validateName(name string) error { nname := filepath.Base(name) if nname != name { - return ErrInvalidChartName{name} + return common.ErrInvalidChartName{Name: name} } return nil diff --git a/pkg/chart/v2/util/save_test.go b/pkg/chart/v2/util/save_test.go index ff96331b5..ef822a82a 100644 --- a/pkg/chart/v2/util/save_test.go +++ b/pkg/chart/v2/util/save_test.go @@ -29,6 +29,7 @@ import ( "testing" "time" + "helm.sh/helm/v4/pkg/chart/common" chart "helm.sh/helm/v4/pkg/chart/v2" "helm.sh/helm/v4/pkg/chart/v2/loader" ) @@ -47,7 +48,7 @@ func TestSave(t *testing.T) { Lock: &chart.Lock{ Digest: "testdigest", }, - Files: []*chart.File{ + Files: []*common.File{ {Name: "scheherazade/shahryar.txt", Data: []byte("1,001 Nights")}, }, Schema: []byte("{\n \"title\": \"Values\"\n}"), @@ -116,7 +117,7 @@ func TestSave(t *testing.T) { Lock: &chart.Lock{ Digest: "testdigest", }, - Files: []*chart.File{ + Files: []*common.File{ {Name: "scheherazade/shahryar.txt", Data: []byte("1,001 Nights")}, }, } @@ -156,7 +157,7 @@ func TestSavePreservesTimestamps(t *testing.T) { "imageName": "testimage", "imageId": 42, }, - Files: []*chart.File{ + Files: []*common.File{ {Name: "scheherazade/shahryar.txt", Data: []byte("1,001 Nights")}, }, Schema: []byte("{\n \"title\": \"Values\"\n}"), @@ -222,10 +223,10 @@ func TestSaveDir(t *testing.T) { Name: "ahab", Version: "1.2.3", }, - Files: []*chart.File{ + Files: []*common.File{ {Name: "scheherazade/shahryar.txt", Data: []byte("1,001 Nights")}, }, - Templates: []*chart.File{ + Templates: []*common.File{ {Name: path.Join(TemplatesDir, "nested", "dir", "thing.yaml"), Data: []byte("abc: {{ .Values.abc }}")}, }, } diff --git a/pkg/cli/values/options_test.go b/pkg/cli/values/options_test.go index 4dbc709f1..fe1afc5d2 100644 --- a/pkg/cli/values/options_test.go +++ b/pkg/cli/values/options_test.go @@ -294,7 +294,7 @@ func TestReadFileOriginal(t *testing.T) { } } -func TestMergeValues(t *testing.T) { +func TestMergeValuesCLI(t *testing.T) { tests := []struct { name string opts Options diff --git a/pkg/cmd/helpers_test.go b/pkg/cmd/helpers_test.go index 40478c30e..55e3a842f 100644 --- a/pkg/cmd/helpers_test.go +++ b/pkg/cmd/helpers_test.go @@ -28,7 +28,7 @@ import ( "helm.sh/helm/v4/internal/test" "helm.sh/helm/v4/pkg/action" - chartutil "helm.sh/helm/v4/pkg/chart/v2/util" + "helm.sh/helm/v4/pkg/chart/common" "helm.sh/helm/v4/pkg/cli" kubefake "helm.sh/helm/v4/pkg/kube/fake" release "helm.sh/helm/v4/pkg/release/v1" @@ -91,7 +91,7 @@ func executeActionCommandStdinC(store *storage.Storage, in *os.File, cmd string) actionConfig := &action.Configuration{ Releases: store, KubeClient: &kubefake.PrintingKubeClient{Out: io.Discard}, - Capabilities: chartutil.DefaultCapabilities, + Capabilities: common.DefaultCapabilities, } root, err := newRootCmdWithConfig(actionConfig, buf, args, SetupLogging) diff --git a/pkg/cmd/lint.go b/pkg/cmd/lint.go index 78083a7ea..71540f1be 100644 --- a/pkg/cmd/lint.go +++ b/pkg/cmd/lint.go @@ -27,10 +27,10 @@ import ( "github.com/spf13/cobra" "helm.sh/helm/v4/pkg/action" - chartutil "helm.sh/helm/v4/pkg/chart/v2/util" + "helm.sh/helm/v4/pkg/chart/common" + "helm.sh/helm/v4/pkg/chart/v2/lint/support" "helm.sh/helm/v4/pkg/cli/values" "helm.sh/helm/v4/pkg/getter" - "helm.sh/helm/v4/pkg/lint/support" ) var longLintHelp = ` @@ -58,7 +58,7 @@ func newLintCmd(out io.Writer) *cobra.Command { } if kubeVersion != "" { - parsedKubeVersion, err := chartutil.ParseKubeVersion(kubeVersion) + parsedKubeVersion, err := common.ParseKubeVersion(kubeVersion) if err != nil { return fmt.Errorf("invalid kube version '%s': %s", kubeVersion, err) } diff --git a/pkg/cmd/status.go b/pkg/cmd/status.go index aa836f9f3..3d1309c3e 100644 --- a/pkg/cmd/status.go +++ b/pkg/cmd/status.go @@ -30,7 +30,7 @@ import ( coloroutput "helm.sh/helm/v4/internal/cli/output" "helm.sh/helm/v4/pkg/action" - chartutil "helm.sh/helm/v4/pkg/chart/v2/util" + "helm.sh/helm/v4/pkg/chart/common/util" "helm.sh/helm/v4/pkg/cli/output" "helm.sh/helm/v4/pkg/cmd/require" release "helm.sh/helm/v4/pkg/release/v1" @@ -197,7 +197,7 @@ func (s statusPrinter) WriteTable(out io.Writer) error { // Print an extra newline _, _ = fmt.Fprintln(out) - cfg, err := chartutil.CoalesceValues(s.release.Chart, s.release.Config) + cfg, err := util.CoalesceValues(s.release.Chart, s.release.Config) if err != nil { return err } diff --git a/pkg/cmd/template.go b/pkg/cmd/template.go index aaf848c9e..81c112d51 100644 --- a/pkg/cmd/template.go +++ b/pkg/cmd/template.go @@ -35,7 +35,7 @@ import ( "github.com/spf13/cobra" "helm.sh/helm/v4/pkg/action" - chartutil "helm.sh/helm/v4/pkg/chart/v2/util" + "helm.sh/helm/v4/pkg/chart/common" "helm.sh/helm/v4/pkg/cli/values" "helm.sh/helm/v4/pkg/cmd/require" releaseutil "helm.sh/helm/v4/pkg/release/v1/util" @@ -69,7 +69,7 @@ func newTemplateCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { }, RunE: func(_ *cobra.Command, args []string) error { if kubeVersion != "" { - parsedKubeVersion, err := chartutil.ParseKubeVersion(kubeVersion) + parsedKubeVersion, err := common.ParseKubeVersion(kubeVersion) if err != nil { return fmt.Errorf("invalid kube version '%s': %s", kubeVersion, err) } @@ -93,7 +93,7 @@ func newTemplateCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { client.ReleaseName = "release-name" client.Replace = true // Skip the name check client.ClientOnly = !validate - client.APIVersions = chartutil.VersionSet(extraAPIs) + client.APIVersions = common.VersionSet(extraAPIs) client.IncludeCRDs = includeCrds rel, err := runInstall(args, client, valueOpts, out) diff --git a/pkg/cmd/upgrade_test.go b/pkg/cmd/upgrade_test.go index d7375dcad..9b17f187d 100644 --- a/pkg/cmd/upgrade_test.go +++ b/pkg/cmd/upgrade_test.go @@ -24,6 +24,7 @@ import ( "strings" "testing" + "helm.sh/helm/v4/pkg/chart/common" chart "helm.sh/helm/v4/pkg/chart/v2" "helm.sh/helm/v4/pkg/chart/v2/loader" chartutil "helm.sh/helm/v4/pkg/chart/v2/util" @@ -382,7 +383,7 @@ func prepareMockRelease(t *testing.T, releaseName string) (func(n string, v int, Description: "A Helm chart for Kubernetes", Version: "0.1.0", }, - Templates: []*chart.File{{Name: "templates/configmap.yaml", Data: configmapData}}, + Templates: []*common.File{{Name: "templates/configmap.yaml", Data: configmapData}}, } chartPath := filepath.Join(tmpChart, cfile.Metadata.Name) if err := chartutil.SaveDir(cfile, tmpChart); err != nil { @@ -490,7 +491,7 @@ func prepareMockReleaseWithSecret(t *testing.T, releaseName string) (func(n stri Description: "A Helm chart for Kubernetes", Version: "0.1.0", }, - Templates: []*chart.File{{Name: "templates/configmap.yaml", Data: configmapData}, {Name: "templates/secret.yaml", Data: secretData}}, + Templates: []*common.File{{Name: "templates/configmap.yaml", Data: configmapData}, {Name: "templates/secret.yaml", Data: secretData}}, } chartPath := filepath.Join(tmpChart, cfile.Metadata.Name) if err := chartutil.SaveDir(cfile, tmpChart); err != nil { diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index 6e47a0e39..a0ca17f08 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -30,8 +30,8 @@ import ( "k8s.io/client-go/rest" - chart "helm.sh/helm/v4/pkg/chart/v2" - chartutil "helm.sh/helm/v4/pkg/chart/v2/util" + ci "helm.sh/helm/v4/pkg/chart" + "helm.sh/helm/v4/pkg/chart/common" ) // taken from https://cs.opensource.google/go/go/+/refs/tags/go1.23.6:src/text/template/exec.go;l=141 @@ -88,21 +88,21 @@ func New(config *rest.Config) Engine { // that section of the values will be passed into the "foo" chart. And if that // section contains a value named "bar", that value will be passed on to the // bar chart during render time. -func (e Engine) Render(chrt *chart.Chart, values chartutil.Values) (map[string]string, error) { +func (e Engine) Render(chrt ci.Charter, values common.Values) (map[string]string, error) { tmap := allTemplates(chrt, values) return e.render(tmap) } // Render takes a chart, optional values, and value overrides, and attempts to // render the Go templates using the default options. -func Render(chrt *chart.Chart, values chartutil.Values) (map[string]string, error) { +func Render(chrt ci.Charter, values common.Values) (map[string]string, error) { return new(Engine).Render(chrt, values) } // RenderWithClient takes a chart, optional values, and value overrides, and attempts to // render the Go templates using the default options. This engine is client aware and so can have template // functions that interact with the client. -func RenderWithClient(chrt *chart.Chart, values chartutil.Values, config *rest.Config) (map[string]string, error) { +func RenderWithClient(chrt ci.Charter, values common.Values, config *rest.Config) (map[string]string, error) { var clientProvider ClientProvider = clientProviderFromConfig{config} return Engine{ clientProvider: &clientProvider, @@ -113,7 +113,7 @@ func RenderWithClient(chrt *chart.Chart, values chartutil.Values, config *rest.C // render the Go templates using the default options. This engine is client aware and so can have template // functions that interact with the client. // This function differs from RenderWithClient in that it lets you customize the way a dynamic client is constructed. -func RenderWithClientProvider(chrt *chart.Chart, values chartutil.Values, clientProvider ClientProvider) (map[string]string, error) { +func RenderWithClientProvider(chrt ci.Charter, values common.Values, clientProvider ClientProvider) (map[string]string, error) { return Engine{ clientProvider: &clientProvider, }.Render(chrt, values) @@ -124,7 +124,7 @@ type renderable struct { // tpl is the current template. tpl string // vals are the values to be supplied to the template. - vals chartutil.Values + vals common.Values // namespace prefix to the templates of the current chart basePath string } @@ -312,7 +312,7 @@ func (e Engine) render(tpls map[string]renderable) (rendered map[string]string, } // At render time, add information about the template that is being rendered. vals := tpls[filename].vals - vals["Template"] = chartutil.Values{"Name": filename, "BasePath": tpls[filename].basePath} + vals["Template"] = common.Values{"Name": filename, "BasePath": tpls[filename].basePath} var buf strings.Builder if err := t.ExecuteTemplate(&buf, filename, vals); err != nil { return map[string]string{}, reformatExecErrorMsg(filename, err) @@ -455,7 +455,7 @@ func (p byPathLen) Less(i, j int) bool { // allTemplates returns all templates for a chart and its dependencies. // // As it goes, it also prepares the values in a scope-sensitive manner. -func allTemplates(c *chart.Chart, vals chartutil.Values) map[string]renderable { +func allTemplates(c ci.Charter, vals common.Values) map[string]renderable { templates := make(map[string]renderable) recAllTpls(c, templates, vals) return templates @@ -465,40 +465,46 @@ func allTemplates(c *chart.Chart, vals chartutil.Values) map[string]renderable { // // As it recurses, it also sets the values to be appropriate for the template // scope. -func recAllTpls(c *chart.Chart, templates map[string]renderable, vals chartutil.Values) map[string]interface{} { +func recAllTpls(c ci.Charter, templates map[string]renderable, values common.Values) map[string]interface{} { + vals := values.AsMap() subCharts := make(map[string]interface{}) - chartMetaData := struct { - chart.Metadata - IsRoot bool - }{*c.Metadata, c.IsRoot()} + accessor, err := ci.NewAccessor(c) + if err != nil { + slog.Error("error accessing chart", "error", err) + } + chartMetaData := accessor.MetadataAsMap() + fmt.Printf("metadata: %v\n", chartMetaData) + chartMetaData["IsRoot"] = accessor.IsRoot() next := map[string]interface{}{ "Chart": chartMetaData, - "Files": newFiles(c.Files), + "Files": newFiles(accessor.Files()), "Release": vals["Release"], "Capabilities": vals["Capabilities"], - "Values": make(chartutil.Values), + "Values": make(common.Values), "Subcharts": subCharts, } // If there is a {{.Values.ThisChart}} in the parent metadata, // copy that into the {{.Values}} for this template. - if c.IsRoot() { + if accessor.IsRoot() { next["Values"] = vals["Values"] - } else if vs, err := vals.Table("Values." + c.Name()); err == nil { + } else if vs, err := values.Table("Values." + accessor.Name()); err == nil { next["Values"] = vs } - for _, child := range c.Dependencies() { - subCharts[child.Name()] = recAllTpls(child, templates, next) + for _, child := range accessor.Dependencies() { + // TODO: Handle error + sub, _ := ci.NewAccessor(child) + subCharts[sub.Name()] = recAllTpls(child, templates, next) } - newParentID := c.ChartFullPath() - for _, t := range c.Templates { + newParentID := accessor.ChartFullPath() + for _, t := range accessor.Templates() { if t == nil { continue } - if !isTemplateValid(c, t.Name) { + if !isTemplateValid(accessor, t.Name) { continue } templates[path.Join(newParentID, t.Name)] = renderable{ @@ -512,14 +518,9 @@ func recAllTpls(c *chart.Chart, templates map[string]renderable, vals chartutil. } // isTemplateValid returns true if the template is valid for the chart type -func isTemplateValid(ch *chart.Chart, templateName string) bool { - if isLibraryChart(ch) { +func isTemplateValid(accessor ci.Accessor, templateName string) bool { + if accessor.IsLibraryChart() { return strings.HasPrefix(filepath.Base(templateName), "_") } return true } - -// isLibraryChart returns true if the chart is a library chart -func isLibraryChart(c *chart.Chart) bool { - return strings.EqualFold(c.Metadata.Type, "library") -} diff --git a/pkg/engine/engine_test.go b/pkg/engine/engine_test.go index f4228fbd7..7ac892cec 100644 --- a/pkg/engine/engine_test.go +++ b/pkg/engine/engine_test.go @@ -32,8 +32,9 @@ import ( "k8s.io/client-go/dynamic" "k8s.io/client-go/dynamic/fake" + "helm.sh/helm/v4/pkg/chart/common" + "helm.sh/helm/v4/pkg/chart/common/util" chart "helm.sh/helm/v4/pkg/chart/v2" - chartutil "helm.sh/helm/v4/pkg/chart/v2/util" ) func TestSortTemplates(t *testing.T) { @@ -94,7 +95,7 @@ func TestRender(t *testing.T) { Name: "moby", Version: "1.2.3", }, - Templates: []*chart.File{ + Templates: []*common.File{ {Name: "templates/test1", Data: []byte("{{.Values.outer | title }} {{.Values.inner | title}}")}, {Name: "templates/test2", Data: []byte("{{.Values.global.callme | lower }}")}, {Name: "templates/test3", Data: []byte("{{.noValue}}")}, @@ -114,7 +115,7 @@ func TestRender(t *testing.T) { }, } - v, err := chartutil.CoalesceValues(c, vals) + v, err := util.CoalesceValues(c, vals) if err != nil { t.Fatalf("Failed to coalesce values: %s", err) } @@ -144,7 +145,7 @@ func TestRenderRefsOrdering(t *testing.T) { Name: "parent", Version: "1.2.3", }, - Templates: []*chart.File{ + Templates: []*common.File{ {Name: "templates/_helpers.tpl", Data: []byte(`{{- define "test" -}}parent value{{- end -}}`)}, {Name: "templates/test.yaml", Data: []byte(`{{ tpl "{{ include \"test\" . }}" . }}`)}, }, @@ -154,7 +155,7 @@ func TestRenderRefsOrdering(t *testing.T) { Name: "child", Version: "1.2.3", }, - Templates: []*chart.File{ + Templates: []*common.File{ {Name: "templates/_helpers.tpl", Data: []byte(`{{- define "test" -}}child value{{- end -}}`)}, }, } @@ -165,7 +166,7 @@ func TestRenderRefsOrdering(t *testing.T) { } for i := 0; i < 100; i++ { - out, err := Render(parentChart, chartutil.Values{}) + out, err := Render(parentChart, common.Values{}) if err != nil { t.Fatalf("Failed to render templates: %s", err) } @@ -181,7 +182,7 @@ func TestRenderRefsOrdering(t *testing.T) { func TestRenderInternals(t *testing.T) { // Test the internals of the rendering tool. - vals := chartutil.Values{"Name": "one", "Value": "two"} + vals := common.Values{"Name": "one", "Value": "two"} tpls := map[string]renderable{ "one": {tpl: `Hello {{title .Name}}`, vals: vals}, "two": {tpl: `Goodbye {{upper .Value}}`, vals: vals}, @@ -218,7 +219,7 @@ func TestRenderWithDNS(t *testing.T) { Name: "moby", Version: "1.2.3", }, - Templates: []*chart.File{ + Templates: []*common.File{ {Name: "templates/test1", Data: []byte("{{getHostByName \"helm.sh\"}}")}, }, Values: map[string]interface{}{}, @@ -228,7 +229,7 @@ func TestRenderWithDNS(t *testing.T) { "Values": map[string]interface{}{}, } - v, err := chartutil.CoalesceValues(c, vals) + v, err := util.CoalesceValues(c, vals) if err != nil { t.Fatalf("Failed to coalesce values: %s", err) } @@ -355,7 +356,7 @@ func TestRenderWithClientProvider(t *testing.T) { } for name, exp := range cases { - c.Templates = append(c.Templates, &chart.File{ + c.Templates = append(c.Templates, &common.File{ Name: path.Join("templates", name), Data: []byte(exp.template), }) @@ -365,7 +366,7 @@ func TestRenderWithClientProvider(t *testing.T) { "Values": map[string]interface{}{}, } - v, err := chartutil.CoalesceValues(c, vals) + v, err := util.CoalesceValues(c, vals) if err != nil { t.Fatalf("Failed to coalesce values: %s", err) } @@ -391,7 +392,7 @@ func TestRenderWithClientProvider_error(t *testing.T) { Name: "moby", Version: "1.2.3", }, - Templates: []*chart.File{ + Templates: []*common.File{ {Name: "templates/error", Data: []byte(`{{ lookup "v1" "Error" "" "" }}`)}, }, Values: map[string]interface{}{}, @@ -401,7 +402,7 @@ func TestRenderWithClientProvider_error(t *testing.T) { "Values": map[string]interface{}{}, } - v, err := chartutil.CoalesceValues(c, vals) + v, err := util.CoalesceValues(c, vals) if err != nil { t.Fatalf("Failed to coalesce values: %s", err) } @@ -448,7 +449,7 @@ func TestParallelRenderInternals(t *testing.T) { } func TestParseErrors(t *testing.T) { - vals := chartutil.Values{"Values": map[string]interface{}{}} + vals := common.Values{"Values": map[string]interface{}{}} tplsUndefinedFunction := map[string]renderable{ "undefined_function": {tpl: `{{foo}}`, vals: vals}, @@ -464,7 +465,7 @@ func TestParseErrors(t *testing.T) { } func TestExecErrors(t *testing.T) { - vals := chartutil.Values{"Values": map[string]interface{}{}} + vals := common.Values{"Values": map[string]interface{}{}} cases := []struct { name string tpls map[string]renderable @@ -528,7 +529,7 @@ linebreak`, } func TestFailErrors(t *testing.T) { - vals := chartutil.Values{"Values": map[string]interface{}{}} + vals := common.Values{"Values": map[string]interface{}{}} failtpl := `All your base are belong to us{{ fail "This is an error" }}` tplsFailed := map[string]renderable{ @@ -559,14 +560,14 @@ func TestFailErrors(t *testing.T) { func TestAllTemplates(t *testing.T) { ch1 := &chart.Chart{ Metadata: &chart.Metadata{Name: "ch1"}, - Templates: []*chart.File{ + Templates: []*common.File{ {Name: "templates/foo", Data: []byte("foo")}, {Name: "templates/bar", Data: []byte("bar")}, }, } dep1 := &chart.Chart{ Metadata: &chart.Metadata{Name: "laboratory mice"}, - Templates: []*chart.File{ + Templates: []*common.File{ {Name: "templates/pinky", Data: []byte("pinky")}, {Name: "templates/brain", Data: []byte("brain")}, }, @@ -575,13 +576,13 @@ func TestAllTemplates(t *testing.T) { dep2 := &chart.Chart{ Metadata: &chart.Metadata{Name: "same thing we do every night"}, - Templates: []*chart.File{ + Templates: []*common.File{ {Name: "templates/innermost", Data: []byte("innermost")}, }, } dep1.AddDependency(dep2) - tpls := allTemplates(ch1, chartutil.Values{}) + tpls := allTemplates(ch1, common.Values{}) if len(tpls) != 5 { t.Errorf("Expected 5 charts, got %d", len(tpls)) } @@ -590,19 +591,19 @@ func TestAllTemplates(t *testing.T) { func TestChartValuesContainsIsRoot(t *testing.T) { ch1 := &chart.Chart{ Metadata: &chart.Metadata{Name: "parent"}, - Templates: []*chart.File{ + Templates: []*common.File{ {Name: "templates/isroot", Data: []byte("{{.Chart.IsRoot}}")}, }, } dep1 := &chart.Chart{ Metadata: &chart.Metadata{Name: "child"}, - Templates: []*chart.File{ + Templates: []*common.File{ {Name: "templates/isroot", Data: []byte("{{.Chart.IsRoot}}")}, }, } ch1.AddDependency(dep1) - out, err := Render(ch1, chartutil.Values{}) + out, err := Render(ch1, common.Values{}) if err != nil { t.Fatalf("failed to render templates: %s", err) } @@ -622,13 +623,13 @@ func TestRenderDependency(t *testing.T) { toptpl := `Hello {{template "myblock"}}` ch := &chart.Chart{ Metadata: &chart.Metadata{Name: "outerchart"}, - Templates: []*chart.File{ + Templates: []*common.File{ {Name: "templates/outer", Data: []byte(toptpl)}, }, } ch.AddDependency(&chart.Chart{ Metadata: &chart.Metadata{Name: "innerchart"}, - Templates: []*chart.File{ + Templates: []*common.File{ {Name: "templates/inner", Data: []byte(deptpl)}, }, }) @@ -660,7 +661,7 @@ func TestRenderNestedValues(t *testing.T) { deepest := &chart.Chart{ Metadata: &chart.Metadata{Name: "deepest"}, - Templates: []*chart.File{ + Templates: []*common.File{ {Name: deepestpath, Data: []byte(`And this same {{.Values.what}} that smiles {{.Values.global.when}}`)}, {Name: checkrelease, Data: []byte(`Tomorrow will be {{default "happy" .Release.Name }}`)}, }, @@ -669,7 +670,7 @@ func TestRenderNestedValues(t *testing.T) { inner := &chart.Chart{ Metadata: &chart.Metadata{Name: "herrick"}, - Templates: []*chart.File{ + Templates: []*common.File{ {Name: innerpath, Data: []byte(`Old {{.Values.who}} is still a-flyin'`)}, }, Values: map[string]interface{}{"who": "Robert", "what": "glasses"}, @@ -678,7 +679,7 @@ func TestRenderNestedValues(t *testing.T) { outer := &chart.Chart{ Metadata: &chart.Metadata{Name: "top"}, - Templates: []*chart.File{ + Templates: []*common.File{ {Name: outerpath, Data: []byte(`Gather ye {{.Values.what}} while ye may`)}, {Name: subchartspath, Data: []byte(`The glorious Lamp of {{.Subcharts.herrick.Subcharts.deepest.Values.where}}, the {{.Subcharts.herrick.Values.what}}`)}, }, @@ -706,15 +707,15 @@ func TestRenderNestedValues(t *testing.T) { }, } - tmp, err := chartutil.CoalesceValues(outer, injValues) + tmp, err := util.CoalesceValues(outer, injValues) if err != nil { t.Fatalf("Failed to coalesce values: %s", err) } - inject := chartutil.Values{ + inject := common.Values{ "Values": tmp, "Chart": outer.Metadata, - "Release": chartutil.Values{ + "Release": common.Values{ "Name": "dyin", }, } @@ -754,30 +755,30 @@ func TestRenderNestedValues(t *testing.T) { func TestRenderBuiltinValues(t *testing.T) { inner := &chart.Chart{ - Metadata: &chart.Metadata{Name: "Latium"}, - Templates: []*chart.File{ + Metadata: &chart.Metadata{Name: "Latium", APIVersion: chart.APIVersionV2}, + Templates: []*common.File{ {Name: "templates/Lavinia", Data: []byte(`{{.Template.Name}}{{.Chart.Name}}{{.Release.Name}}`)}, {Name: "templates/From", Data: []byte(`{{.Files.author | printf "%s"}} {{.Files.Get "book/title.txt"}}`)}, }, - Files: []*chart.File{ + Files: []*common.File{ {Name: "author", Data: []byte("Virgil")}, {Name: "book/title.txt", Data: []byte("Aeneid")}, }, } outer := &chart.Chart{ - Metadata: &chart.Metadata{Name: "Troy"}, - Templates: []*chart.File{ + Metadata: &chart.Metadata{Name: "Troy", APIVersion: chart.APIVersionV2}, + Templates: []*common.File{ {Name: "templates/Aeneas", Data: []byte(`{{.Template.Name}}{{.Chart.Name}}{{.Release.Name}}`)}, {Name: "templates/Amata", Data: []byte(`{{.Subcharts.Latium.Chart.Name}} {{.Subcharts.Latium.Files.author | printf "%s"}}`)}, }, } outer.AddDependency(inner) - inject := chartutil.Values{ + inject := common.Values{ "Values": "", "Chart": outer.Metadata, - "Release": chartutil.Values{ + "Release": common.Values{ "Name": "Aeneid", }, } @@ -806,7 +807,7 @@ func TestRenderBuiltinValues(t *testing.T) { func TestAlterFuncMap_include(t *testing.T) { c := &chart.Chart{ Metadata: &chart.Metadata{Name: "conrad"}, - Templates: []*chart.File{ + Templates: []*common.File{ {Name: "templates/quote", Data: []byte(`{{include "conrad/templates/_partial" . | indent 2}} dead.`)}, {Name: "templates/_partial", Data: []byte(`{{.Release.Name}} - he`)}, }, @@ -815,16 +816,16 @@ func TestAlterFuncMap_include(t *testing.T) { // Check nested reference in include FuncMap d := &chart.Chart{ Metadata: &chart.Metadata{Name: "nested"}, - Templates: []*chart.File{ + Templates: []*common.File{ {Name: "templates/quote", Data: []byte(`{{include "nested/templates/quote" . | indent 2}} dead.`)}, {Name: "templates/_partial", Data: []byte(`{{.Release.Name}} - he`)}, }, } - v := chartutil.Values{ + v := common.Values{ "Values": "", "Chart": c.Metadata, - "Release": chartutil.Values{ + "Release": common.Values{ "Name": "Mistah Kurtz", }, } @@ -849,19 +850,19 @@ func TestAlterFuncMap_include(t *testing.T) { func TestAlterFuncMap_require(t *testing.T) { c := &chart.Chart{ Metadata: &chart.Metadata{Name: "conan"}, - Templates: []*chart.File{ + Templates: []*common.File{ {Name: "templates/quote", Data: []byte(`All your base are belong to {{ required "A valid 'who' is required" .Values.who }}`)}, {Name: "templates/bases", Data: []byte(`All {{ required "A valid 'bases' is required" .Values.bases }} of them!`)}, }, } - v := chartutil.Values{ - "Values": chartutil.Values{ + v := common.Values{ + "Values": common.Values{ "who": "us", "bases": 2, }, "Chart": c.Metadata, - "Release": chartutil.Values{ + "Release": common.Values{ "Name": "That 90s meme", }, } @@ -882,12 +883,12 @@ func TestAlterFuncMap_require(t *testing.T) { // test required without passing in needed values with lint mode on // verifies lint replaces required with an empty string (should not fail) - lintValues := chartutil.Values{ - "Values": chartutil.Values{ + lintValues := common.Values{ + "Values": common.Values{ "who": "us", }, "Chart": c.Metadata, - "Release": chartutil.Values{ + "Release": common.Values{ "Name": "That 90s meme", }, } @@ -911,17 +912,17 @@ func TestAlterFuncMap_require(t *testing.T) { func TestAlterFuncMap_tpl(t *testing.T) { c := &chart.Chart{ Metadata: &chart.Metadata{Name: "TplFunction"}, - Templates: []*chart.File{ + Templates: []*common.File{ {Name: "templates/base", Data: []byte(`Evaluate tpl {{tpl "Value: {{ .Values.value}}" .}}`)}, }, } - v := chartutil.Values{ - "Values": chartutil.Values{ + v := common.Values{ + "Values": common.Values{ "value": "myvalue", }, "Chart": c.Metadata, - "Release": chartutil.Values{ + "Release": common.Values{ "Name": "TestRelease", }, } @@ -940,17 +941,17 @@ func TestAlterFuncMap_tpl(t *testing.T) { func TestAlterFuncMap_tplfunc(t *testing.T) { c := &chart.Chart{ Metadata: &chart.Metadata{Name: "TplFunction"}, - Templates: []*chart.File{ + Templates: []*common.File{ {Name: "templates/base", Data: []byte(`Evaluate tpl {{tpl "Value: {{ .Values.value | quote}}" .}}`)}, }, } - v := chartutil.Values{ - "Values": chartutil.Values{ + v := common.Values{ + "Values": common.Values{ "value": "myvalue", }, "Chart": c.Metadata, - "Release": chartutil.Values{ + "Release": common.Values{ "Name": "TestRelease", }, } @@ -969,17 +970,17 @@ func TestAlterFuncMap_tplfunc(t *testing.T) { func TestAlterFuncMap_tplinclude(t *testing.T) { c := &chart.Chart{ Metadata: &chart.Metadata{Name: "TplFunction"}, - Templates: []*chart.File{ + Templates: []*common.File{ {Name: "templates/base", Data: []byte(`{{ tpl "{{include ` + "`" + `TplFunction/templates/_partial` + "`" + ` . | quote }}" .}}`)}, {Name: "templates/_partial", Data: []byte(`{{.Template.Name}}`)}, }, } - v := chartutil.Values{ - "Values": chartutil.Values{ + v := common.Values{ + "Values": common.Values{ "value": "myvalue", }, "Chart": c.Metadata, - "Release": chartutil.Values{ + "Release": common.Values{ "Name": "TestRelease", }, } @@ -1000,15 +1001,15 @@ func TestRenderRecursionLimit(t *testing.T) { // endless recursion should produce an error c := &chart.Chart{ Metadata: &chart.Metadata{Name: "bad"}, - Templates: []*chart.File{ + Templates: []*common.File{ {Name: "templates/base", Data: []byte(`{{include "recursion" . }}`)}, {Name: "templates/recursion", Data: []byte(`{{define "recursion"}}{{include "recursion" . }}{{end}}`)}, }, } - v := chartutil.Values{ + v := common.Values{ "Values": "", "Chart": c.Metadata, - "Release": chartutil.Values{ + "Release": common.Values{ "Name": "TestRelease", }, } @@ -1030,7 +1031,7 @@ func TestRenderRecursionLimit(t *testing.T) { d := &chart.Chart{ Metadata: &chart.Metadata{Name: "overlook"}, - Templates: []*chart.File{ + Templates: []*common.File{ {Name: "templates/quote", Data: []byte(repeatedIncl)}, {Name: "templates/_function", Data: []byte(printFunc)}, }, @@ -1054,23 +1055,23 @@ func TestRenderRecursionLimit(t *testing.T) { func TestRenderLoadTemplateForTplFromFile(t *testing.T) { c := &chart.Chart{ Metadata: &chart.Metadata{Name: "TplLoadFromFile"}, - Templates: []*chart.File{ + Templates: []*common.File{ {Name: "templates/base", Data: []byte(`{{ tpl (.Files.Get .Values.filename) . }}`)}, {Name: "templates/_function", Data: []byte(`{{define "test-function"}}test-function{{end}}`)}, }, - Files: []*chart.File{ + Files: []*common.File{ {Name: "test", Data: []byte(`{{ tpl (.Files.Get .Values.filename2) .}}`)}, {Name: "test2", Data: []byte(`{{include "test-function" .}}{{define "nested-define"}}nested-define-content{{end}} {{include "nested-define" .}}`)}, }, } - v := chartutil.Values{ - "Values": chartutil.Values{ + v := common.Values{ + "Values": common.Values{ "filename": "test", "filename2": "test2", }, "Chart": c.Metadata, - "Release": chartutil.Values{ + "Release": common.Values{ "Name": "TestRelease", }, } @@ -1089,15 +1090,15 @@ func TestRenderLoadTemplateForTplFromFile(t *testing.T) { func TestRenderTplEmpty(t *testing.T) { c := &chart.Chart{ Metadata: &chart.Metadata{Name: "TplEmpty"}, - Templates: []*chart.File{ + Templates: []*common.File{ {Name: "templates/empty-string", Data: []byte(`{{tpl "" .}}`)}, {Name: "templates/empty-action", Data: []byte(`{{tpl "{{ \"\"}}" .}}`)}, {Name: "templates/only-defines", Data: []byte(`{{tpl "{{define \"not-invoked\"}}not-rendered{{end}}" .}}`)}, }, } - v := chartutil.Values{ + v := common.Values{ "Chart": c.Metadata, - "Release": chartutil.Values{ + "Release": common.Values{ "Name": "TestRelease", }, } @@ -1123,7 +1124,7 @@ func TestRenderTplTemplateNames(t *testing.T) { // .Template.BasePath and .Name make it through c := &chart.Chart{ Metadata: &chart.Metadata{Name: "TplTemplateNames"}, - Templates: []*chart.File{ + Templates: []*common.File{ {Name: "templates/default-basepath", Data: []byte(`{{tpl "{{ .Template.BasePath }}" .}}`)}, {Name: "templates/default-name", Data: []byte(`{{tpl "{{ .Template.Name }}" .}}`)}, {Name: "templates/modified-basepath", Data: []byte(`{{tpl "{{ .Template.BasePath }}" .Values.dot}}`)}, @@ -1131,10 +1132,10 @@ func TestRenderTplTemplateNames(t *testing.T) { {Name: "templates/modified-field", Data: []byte(`{{tpl "{{ .Template.Field }}" .Values.dot}}`)}, }, } - v := chartutil.Values{ - "Values": chartutil.Values{ - "dot": chartutil.Values{ - "Template": chartutil.Values{ + v := common.Values{ + "Values": common.Values{ + "dot": common.Values{ + "Template": common.Values{ "BasePath": "path/to/template", "Name": "name-of-template", "Field": "extra-field", @@ -1142,7 +1143,7 @@ func TestRenderTplTemplateNames(t *testing.T) { }, }, "Chart": c.Metadata, - "Release": chartutil.Values{ + "Release": common.Values{ "Name": "TestRelease", }, } @@ -1170,7 +1171,7 @@ func TestRenderTplRedefines(t *testing.T) { // Redefining a template inside 'tpl' does not affect the outer definition c := &chart.Chart{ Metadata: &chart.Metadata{Name: "TplRedefines"}, - Templates: []*chart.File{ + Templates: []*common.File{ {Name: "templates/_partials", Data: []byte(`{{define "partial"}}original-in-partial{{end}}`)}, {Name: "templates/partial", Data: []byte( `before: {{include "partial" .}}\n{{tpl .Values.partialText .}}\nafter: {{include "partial" .}}`, @@ -1192,8 +1193,8 @@ func TestRenderTplRedefines(t *testing.T) { )}, }, } - v := chartutil.Values{ - "Values": chartutil.Values{ + v := common.Values{ + "Values": common.Values{ "partialText": `{{define "partial"}}redefined-in-tpl{{end}}tpl: {{include "partial" .}}`, "manifestText": `{{define "manifest"}}redefined-in-tpl{{end}}tpl: {{include "manifest" .}}`, "manifestOnlyText": `tpl: {{include "manifest-only" .}}`, @@ -1205,7 +1206,7 @@ func TestRenderTplRedefines(t *testing.T) { "innerText": `{{define "nested"}}redefined-in-inner-tpl{{end}}inner-tpl: {{include "nested" .}} {{include "nested-outer" . }}`, }, "Chart": c.Metadata, - "Release": chartutil.Values{ + "Release": common.Values{ "Name": "TestRelease", }, } @@ -1236,16 +1237,16 @@ func TestRenderTplMissingKey(t *testing.T) { // Rendering a missing key results in empty/zero output. c := &chart.Chart{ Metadata: &chart.Metadata{Name: "TplMissingKey"}, - Templates: []*chart.File{ + Templates: []*common.File{ {Name: "templates/manifest", Data: []byte( `missingValue: {{tpl "{{.Values.noSuchKey}}" .}}`, )}, }, } - v := chartutil.Values{ - "Values": chartutil.Values{}, + v := common.Values{ + "Values": common.Values{}, "Chart": c.Metadata, - "Release": chartutil.Values{ + "Release": common.Values{ "Name": "TestRelease", }, } @@ -1269,16 +1270,16 @@ func TestRenderTplMissingKeyString(t *testing.T) { // Rendering a missing key results in error c := &chart.Chart{ Metadata: &chart.Metadata{Name: "TplMissingKeyStrict"}, - Templates: []*chart.File{ + Templates: []*common.File{ {Name: "templates/manifest", Data: []byte( `missingValue: {{tpl "{{.Values.noSuchKey}}" .}}`, )}, }, } - v := chartutil.Values{ - "Values": chartutil.Values{}, + v := common.Values{ + "Values": common.Values{}, "Chart": c.Metadata, - "Release": chartutil.Values{ + "Release": common.Values{ "Name": "TestRelease", }, } @@ -1301,7 +1302,7 @@ func TestRenderTplMissingKeyString(t *testing.T) { func TestNestedHelpersProducesMultilineStacktrace(t *testing.T) { c := &chart.Chart{ Metadata: &chart.Metadata{Name: "NestedHelperFunctions"}, - Templates: []*chart.File{ + Templates: []*common.File{ {Name: "templates/svc.yaml", Data: []byte( `name: {{ include "nested_helper.name" . }}`, )}, @@ -1324,9 +1325,9 @@ NestedHelperFunctions/charts/common/templates/_helpers_2.tpl:1:49 executing "common.names.get_name" at <.Values.nonexistant.key>: nil pointer evaluating interface {}.key` - v := chartutil.Values{} + v := common.Values{} - val, _ := chartutil.CoalesceValues(c, v) + val, _ := util.CoalesceValues(c, v) vals := map[string]interface{}{ "Values": val.AsMap(), } @@ -1339,7 +1340,7 @@ NestedHelperFunctions/charts/common/templates/_helpers_2.tpl:1:49 func TestMultilineNoTemplateAssociatedError(t *testing.T) { c := &chart.Chart{ Metadata: &chart.Metadata{Name: "multiline"}, - Templates: []*chart.File{ + Templates: []*common.File{ {Name: "templates/svc.yaml", Data: []byte( `name: {{ include "nested_helper.name" . }}`, )}, @@ -1357,9 +1358,9 @@ func TestMultilineNoTemplateAssociatedError(t *testing.T) { error calling include: template: no template "nested_helper.name" associated with template "gotpl"` - v := chartutil.Values{} + v := common.Values{} - val, _ := chartutil.CoalesceValues(c, v) + val, _ := util.CoalesceValues(c, v) vals := map[string]interface{}{ "Values": val.AsMap(), } @@ -1373,7 +1374,7 @@ func TestRenderCustomTemplateFuncs(t *testing.T) { // Create a chart with two templates that use custom functions c := &chart.Chart{ Metadata: &chart.Metadata{Name: "CustomFunc"}, - Templates: []*chart.File{ + Templates: []*common.File{ { Name: "templates/manifest", Data: []byte(`{{exclaim .Values.message}}`), @@ -1384,12 +1385,12 @@ func TestRenderCustomTemplateFuncs(t *testing.T) { }, }, } - v := chartutil.Values{ - "Values": chartutil.Values{ + v := common.Values{ + "Values": common.Values{ "message": "hello", }, "Chart": c.Metadata, - "Release": chartutil.Values{ + "Release": common.Values{ "Name": "TestRelease", }, } diff --git a/pkg/engine/files.go b/pkg/engine/files.go index 87166728c..f0a86988e 100644 --- a/pkg/engine/files.go +++ b/pkg/engine/files.go @@ -23,7 +23,7 @@ import ( "github.com/gobwas/glob" - chart "helm.sh/helm/v4/pkg/chart/v2" + "helm.sh/helm/v4/pkg/chart/common" ) // files is a map of files in a chart that can be accessed from a template. @@ -31,7 +31,7 @@ type files map[string][]byte // NewFiles creates a new files from chart files. // Given an []*chart.File (the format for files in a chart.Chart), extract a map of files. -func newFiles(from []*chart.File) files { +func newFiles(from []*common.File) files { files := make(map[string][]byte) for _, f := range from { files[f.Name] = f.Data diff --git a/pkg/engine/lookup_func.go b/pkg/engine/lookup_func.go index 605b43a48..18ed2b63b 100644 --- a/pkg/engine/lookup_func.go +++ b/pkg/engine/lookup_func.go @@ -35,7 +35,7 @@ type lookupFunc = func(apiversion string, resource string, namespace string, nam // NewLookupFunction returns a function for looking up objects in the cluster. // // If the resource does not exist, no error is raised. -func NewLookupFunction(config *rest.Config) lookupFunc { +func NewLookupFunction(config *rest.Config) lookupFunc { //nolint:revive return newLookupFunction(clientProviderFromConfig{config: config}) } diff --git a/pkg/release/v1/mock.go b/pkg/release/v1/mock.go index 3d3b0c2e2..c3a6594cc 100644 --- a/pkg/release/v1/mock.go +++ b/pkg/release/v1/mock.go @@ -20,6 +20,7 @@ import ( "fmt" "math/rand" + "helm.sh/helm/v4/pkg/chart/common" chart "helm.sh/helm/v4/pkg/chart/v2" "helm.sh/helm/v4/pkg/time" ) @@ -98,7 +99,7 @@ func Mock(opts *MockReleaseOptions) *Release { }, }, }, - Templates: []*chart.File{ + Templates: []*common.File{ {Name: "templates/foo.tpl", Data: []byte(MockManifest)}, }, } diff --git a/pkg/release/v1/util/manifest_sorter.go b/pkg/release/v1/util/manifest_sorter.go index 21fdec7c6..6f7b4ea8b 100644 --- a/pkg/release/v1/util/manifest_sorter.go +++ b/pkg/release/v1/util/manifest_sorter.go @@ -26,7 +26,7 @@ import ( "sigs.k8s.io/yaml" - chartutil "helm.sh/helm/v4/pkg/chart/v2/util" + "helm.sh/helm/v4/pkg/chart/common" release "helm.sh/helm/v4/pkg/release/v1" ) @@ -74,7 +74,7 @@ var events = map[string]release.HookEvent{ // // Files that do not parse into the expected format are simply placed into a map and // returned. -func SortManifests(files map[string]string, _ chartutil.VersionSet, ordering KindSortOrder) ([]*release.Hook, []Manifest, error) { +func SortManifests(files map[string]string, _ common.VersionSet, ordering KindSortOrder) ([]*release.Hook, []Manifest, error) { result := &result{} var sortedFilePaths []string From a8151ef4fef684992a66956399fded22f7f24502 Mon Sep 17 00:00:00 2001 From: George Jenkins Date: Mon, 25 Aug 2025 11:45:49 -0700 Subject: [PATCH 82/88] Cleanup plugin config Signed-off-by: George Jenkins --- internal/plugin/config.go | 63 +++++-------------- internal/plugin/config_test.go | 56 +++++++++++++++++ internal/plugin/loader_test.go | 8 ++- internal/plugin/metadata.go | 30 +-------- internal/plugin/plugin_test.go | 4 +- internal/plugin/plugin_type_registry.go | 10 ++- internal/plugin/plugin_type_registry_test.go | 2 +- internal/plugin/runtime_subprocess_test.go | 2 +- internal/plugin/schema/cli.go | 19 ++++++ internal/plugin/schema/doc.go | 18 ++++++ internal/plugin/schema/getter.go | 21 ++++++- internal/plugin/schema/postrenderer.go | 6 ++ pkg/cmd/load_plugins.go | 4 +- pkg/cmd/plugin_list.go | 3 +- .../helm/plugins/postrenderer-v1/plugin.yaml | 4 -- pkg/getter/plugingetter.go | 2 +- pkg/getter/plugingetter_test.go | 2 +- 17 files changed, 161 insertions(+), 93 deletions(-) create mode 100644 internal/plugin/config_test.go create mode 100644 internal/plugin/schema/doc.go diff --git a/internal/plugin/config.go b/internal/plugin/config.go index e8bf4e356..e1f491779 100644 --- a/internal/plugin/config.go +++ b/internal/plugin/config.go @@ -16,72 +16,39 @@ limitations under the License. package plugin import ( + "bytes" "fmt" + "reflect" "go.yaml.in/yaml/v3" ) -// Config interface defines the methods that all plugin type configurations must implement +// Config represents an plugin type specific configuration +// It is expected to type assert (cast) the a Config to its expected underlying type (schema.ConfigCLIV1, schema.ConfigGetterV1, etc). type Config interface { Validate() error } -// ConfigCLI represents the configuration for CLI plugins -type ConfigCLI struct { - // Usage is the single-line usage text shown in help - // For recommended syntax, see [spf13/cobra.command.Command] Use field comment: - // https://pkg.go.dev/github.com/spf13/cobra#Command - Usage string `yaml:"usage"` - // ShortHelp is the short description shown in the 'helm help' output - ShortHelp string `yaml:"shortHelp"` - // LongHelp is the long message shown in the 'helm help ' output - LongHelp string `yaml:"longHelp"` - // IgnoreFlags ignores any flags passed in from Helm - IgnoreFlags bool `yaml:"ignoreFlags"` -} - -// ConfigGetter represents the configuration for download plugins -type ConfigGetter struct { - // Protocols are the list of URL schemes supported by this downloader - Protocols []string `yaml:"protocols"` -} - -// ConfigPostrenderer represents the configuration for postrenderer plugins -// there are no runtime-independent configurations for postrenderer/v1 plugin type -type ConfigPostrenderer struct{} - -func (c *ConfigCLI) Validate() error { - // Config validation for CLI plugins - return nil -} +func unmarshaConfig(pluginType string, configData map[string]any) (Config, error) { -func (c *ConfigGetter) Validate() error { - if len(c.Protocols) == 0 { - return fmt.Errorf("getter has no protocols") - } - for i, protocol := range c.Protocols { - if protocol == "" { - return fmt.Errorf("getter has empty protocol at index %d", i) - } + pluginTypeMeta, ok := pluginTypesIndex[pluginType] + if !ok { + return nil, fmt.Errorf("unknown plugin type %q", pluginType) } - return nil -} -func (c *ConfigPostrenderer) Validate() error { - // Config validation for postrenderer plugins - return nil -} + // TODO: Avoid (yaml) serialization/deserialization for type conversion here -func remarshalConfig[T Config](configData map[string]any) (Config, error) { data, err := yaml.Marshal(configData) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to marshel config data (plugin type %s): %w", pluginType, err) } - var config T - if err := yaml.Unmarshal(data, &config); err != nil { + config := reflect.New(pluginTypeMeta.configType) + d := yaml.NewDecoder(bytes.NewReader(data)) + d.KnownFields(true) + if err := d.Decode(config.Interface()); err != nil { return nil, err } - return config, nil + return config.Interface().(Config), nil } diff --git a/internal/plugin/config_test.go b/internal/plugin/config_test.go new file mode 100644 index 000000000..c51b77ff0 --- /dev/null +++ b/internal/plugin/config_test.go @@ -0,0 +1,56 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugin + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "helm.sh/helm/v4/internal/plugin/schema" +) + +func TestUnmarshaConfig(t *testing.T) { + // Test unmarshalling a CLI plugin config + { + config, err := unmarshaConfig("cli/v1", map[string]any{ + "usage": "usage string", + "shortHelp": "short help string", + "longHelp": "long help string", + "ignoreFlags": true, + }) + require.NoError(t, err) + + require.IsType(t, &schema.ConfigCLIV1{}, config) + assert.Equal(t, schema.ConfigCLIV1{ + Usage: "usage string", + ShortHelp: "short help string", + LongHelp: "long help string", + IgnoreFlags: true, + }, *(config.(*schema.ConfigCLIV1))) + } + + // Test unmarshalling invalid config data + { + config, err := unmarshaConfig("cli/v1", map[string]any{ + "invalid field": "foo", + }) + require.Error(t, err) + assert.Contains(t, err.Error(), "field not found") + assert.Nil(t, config) + } +} diff --git a/internal/plugin/loader_test.go b/internal/plugin/loader_test.go index d214f7b6b..47c214910 100644 --- a/internal/plugin/loader_test.go +++ b/internal/plugin/loader_test.go @@ -22,6 +22,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "helm.sh/helm/v4/internal/plugin/schema" ) func TestPeekAPIVersion(t *testing.T) { @@ -73,7 +75,7 @@ func TestLoadDir(t *testing.T) { Version: "0.1.0", Type: "cli/v1", Runtime: "subprocess", - Config: &ConfigCLI{ + Config: &schema.ConfigCLIV1{ Usage: usage, ShortHelp: "echo hello message", LongHelp: "description", @@ -145,7 +147,7 @@ func TestLoadDirGetter(t *testing.T) { Type: "getter/v1", APIVersion: "v1", Runtime: "subprocess", - Config: &ConfigGetter{ + Config: &schema.ConfigGetterV1{ Protocols: []string{"myprotocol", "myprotocols"}, }, RuntimeConfig: &RuntimeConfigSubprocess{ @@ -173,7 +175,7 @@ func TestPostRenderer(t *testing.T) { Type: "postrenderer/v1", APIVersion: "v1", Runtime: "subprocess", - Config: &ConfigPostrenderer{}, + Config: &schema.ConfigPostRendererV1{}, RuntimeConfig: &RuntimeConfigSubprocess{ PlatformCommand: []PlatformCommand{ { diff --git a/internal/plugin/metadata.go b/internal/plugin/metadata.go index 1c4f02836..111c0599f 100644 --- a/internal/plugin/metadata.go +++ b/internal/plugin/metadata.go @@ -123,11 +123,11 @@ func buildLegacyConfig(m MetadataLegacy, pluginType string) Config { for _, d := range m.Downloaders { protocols = append(protocols, d.Protocols...) } - return &ConfigGetter{ + return &schema.ConfigGetterV1{ Protocols: protocols, } case "cli/v1": - return &ConfigCLI{ + return &schema.ConfigCLIV1{ Usage: "", // Legacy plugins don't have Usage field for command syntax ShortHelp: m.Usage, // Map legacy usage to shortHelp LongHelp: m.Description, // Map legacy description to longHelp @@ -175,7 +175,7 @@ func buildLegacyRuntimeConfig(m MetadataLegacy) RuntimeConfig { func fromMetadataV1(mv1 MetadataV1) (*Metadata, error) { - config, err := convertMetadataConfig(mv1.Type, mv1.Config) + config, err := unmarshaConfig(mv1.Type, mv1.Config) if err != nil { return nil, err } @@ -197,30 +197,6 @@ func fromMetadataV1(mv1 MetadataV1) (*Metadata, error) { }, nil } -func convertMetadataConfig(pluginType string, configRaw map[string]any) (Config, error) { - var err error - var config Config - - switch pluginType { - case "test/v1": - config, err = remarshalConfig[*schema.ConfigTestV1](configRaw) - case "cli/v1": - config, err = remarshalConfig[*ConfigCLI](configRaw) - case "getter/v1": - config, err = remarshalConfig[*ConfigGetter](configRaw) - case "postrenderer/v1": - config, err = remarshalConfig[*ConfigPostrenderer](configRaw) - default: - return nil, fmt.Errorf("unsupported plugin type: %s", pluginType) - } - - if err != nil { - return nil, fmt.Errorf("failed to unmarshal config for %s plugin type: %w", pluginType, err) - } - - return config, nil -} - func convertMetdataRuntimeConfig(runtimeType string, runtimeConfigRaw map[string]any) (RuntimeConfig, error) { var runtimeConfig RuntimeConfig var err error diff --git a/internal/plugin/plugin_test.go b/internal/plugin/plugin_test.go index a4de8e52a..b6c2245ff 100644 --- a/internal/plugin/plugin_test.go +++ b/internal/plugin/plugin_test.go @@ -17,6 +17,8 @@ package plugin import ( "testing" + + "helm.sh/helm/v4/internal/plugin/schema" ) func mockSubprocessCLIPlugin(t *testing.T, pluginName string) *SubprocessPluginRuntime { @@ -46,7 +48,7 @@ func mockSubprocessCLIPlugin(t *testing.T, pluginName string) *SubprocessPluginR Type: "cli/v1", APIVersion: "v1", Runtime: "subprocess", - Config: &ConfigCLI{ + Config: &schema.ConfigCLIV1{ Usage: "Mock plugin", ShortHelp: "Mock plugin", LongHelp: "Mock plugin for testing", diff --git a/internal/plugin/plugin_type_registry.go b/internal/plugin/plugin_type_registry.go index 63450b823..da6546c47 100644 --- a/internal/plugin/plugin_type_registry.go +++ b/internal/plugin/plugin_type_registry.go @@ -81,13 +81,19 @@ var pluginTypes = []pluginTypeMeta{ pluginType: "cli/v1", inputType: reflect.TypeOf(schema.InputMessageCLIV1{}), outputType: reflect.TypeOf(schema.OutputMessageCLIV1{}), - configType: reflect.TypeOf(ConfigCLI{}), + configType: reflect.TypeOf(schema.ConfigCLIV1{}), }, { pluginType: "getter/v1", inputType: reflect.TypeOf(schema.InputMessageGetterV1{}), outputType: reflect.TypeOf(schema.OutputMessageGetterV1{}), - configType: reflect.TypeOf(ConfigGetter{}), + configType: reflect.TypeOf(schema.ConfigGetterV1{}), + }, + { + pluginType: "postrenderer/v1", + inputType: reflect.TypeOf(schema.InputMessagePostRendererV1{}), + outputType: reflect.TypeOf(schema.OutputMessagePostRendererV1{}), + configType: reflect.TypeOf(schema.ConfigPostRendererV1{}), }, } diff --git a/internal/plugin/plugin_type_registry_test.go b/internal/plugin/plugin_type_registry_test.go index ee8a44bb6..22f26262d 100644 --- a/internal/plugin/plugin_type_registry_test.go +++ b/internal/plugin/plugin_type_registry_test.go @@ -34,5 +34,5 @@ func TestMakeOutputMessage(t *testing.T) { func TestMakeConfig(t *testing.T) { ptm := pluginTypesIndex["getter/v1"] config := reflect.New(ptm.configType).Interface().(Config) - assert.IsType(t, &ConfigGetter{}, config) + assert.IsType(t, &schema.ConfigGetterV1{}, config) } diff --git a/internal/plugin/runtime_subprocess_test.go b/internal/plugin/runtime_subprocess_test.go index dab372027..243f4ad7c 100644 --- a/internal/plugin/runtime_subprocess_test.go +++ b/internal/plugin/runtime_subprocess_test.go @@ -45,7 +45,7 @@ func mockSubprocessCLIPluginErrorExit(t *testing.T, pluginName string, exitCode Type: "cli/v1", APIVersion: "v1", Runtime: "subprocess", - Config: &ConfigCLI{ + Config: &schema.ConfigCLIV1{ Usage: "Mock plugin", ShortHelp: "Mock plugin", LongHelp: "Mock plugin for testing", diff --git a/internal/plugin/schema/cli.go b/internal/plugin/schema/cli.go index 3976d3737..702b27e45 100644 --- a/internal/plugin/schema/cli.go +++ b/internal/plugin/schema/cli.go @@ -27,3 +27,22 @@ type InputMessageCLIV1 struct { type OutputMessageCLIV1 struct { Data *bytes.Buffer `json:"data"` } + +// ConfigCLIV1 represents the configuration for CLI plugins +type ConfigCLIV1 struct { + // Usage is the single-line usage text shown in help + // For recommended syntax, see [spf13/cobra.command.Command] Use field comment: + // https://pkg.go.dev/github.com/spf13/cobra#Command + Usage string `yaml:"usage"` + // ShortHelp is the short description shown in the 'helm help' output + ShortHelp string `yaml:"shortHelp"` + // LongHelp is the long message shown in the 'helm help ' output + LongHelp string `yaml:"longHelp"` + // IgnoreFlags ignores any flags passed in from Helm + IgnoreFlags bool `yaml:"ignoreFlags"` +} + +func (c *ConfigCLIV1) Validate() error { + // Config validation for CLI plugins + return nil +} diff --git a/internal/plugin/schema/doc.go b/internal/plugin/schema/doc.go new file mode 100644 index 000000000..4b3fe5d49 --- /dev/null +++ b/internal/plugin/schema/doc.go @@ -0,0 +1,18 @@ +/* + Copyright The Helm Authors. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + + */ + +package schema diff --git a/internal/plugin/schema/getter.go b/internal/plugin/schema/getter.go index f9840008e..2c5e81df1 100644 --- a/internal/plugin/schema/getter.go +++ b/internal/plugin/schema/getter.go @@ -14,10 +14,11 @@ package schema import ( + "fmt" "time" ) -// TODO: can we generate these plugin input/outputs? +// TODO: can we generate these plugin input/output messages? type GetterOptionsV1 struct { URL string @@ -45,3 +46,21 @@ type InputMessageGetterV1 struct { type OutputMessageGetterV1 struct { Data []byte `json:"data"` } + +// ConfigGetterV1 represents the configuration for download plugins +type ConfigGetterV1 struct { + // Protocols are the list of URL schemes supported by this downloader + Protocols []string `yaml:"protocols"` +} + +func (c *ConfigGetterV1) Validate() error { + if len(c.Protocols) == 0 { + return fmt.Errorf("getter has no protocols") + } + for i, protocol := range c.Protocols { + if protocol == "" { + return fmt.Errorf("getter has empty protocol at index %d", i) + } + } + return nil +} diff --git a/internal/plugin/schema/postrenderer.go b/internal/plugin/schema/postrenderer.go index 82fd3059f..ef51a8a61 100644 --- a/internal/plugin/schema/postrenderer.go +++ b/internal/plugin/schema/postrenderer.go @@ -30,3 +30,9 @@ type InputMessagePostRendererV1 struct { type OutputMessagePostRendererV1 struct { Manifests *bytes.Buffer `json:"manifests"` } + +type ConfigPostRendererV1 struct{} + +func (c *ConfigPostRendererV1) Validate() error { + return nil +} diff --git a/pkg/cmd/load_plugins.go b/pkg/cmd/load_plugins.go index 75cfdc3cf..c0593f384 100644 --- a/pkg/cmd/load_plugins.go +++ b/pkg/cmd/load_plugins.go @@ -71,7 +71,7 @@ func loadCLIPlugins(baseCmd *cobra.Command, out io.Writer) { for _, plug := range found { var use, short, long string var ignoreFlags bool - if cliConfig, ok := plug.Metadata().Config.(*plugin.ConfigCLI); ok { + if cliConfig, ok := plug.Metadata().Config.(*schema.ConfigCLIV1); ok { use = cliConfig.Usage short = cliConfig.ShortHelp long = cliConfig.LongHelp @@ -340,7 +340,7 @@ func pluginDynamicComp(plug plugin.Plugin, cmd *cobra.Command, args []string, to } var ignoreFlags bool - if cliConfig, ok := subprocessPlug.Metadata().Config.(*plugin.ConfigCLI); ok { + if cliConfig, ok := subprocessPlug.Metadata().Config.(*schema.ConfigCLIV1); ok { ignoreFlags = cliConfig.IgnoreFlags } diff --git a/pkg/cmd/plugin_list.go b/pkg/cmd/plugin_list.go index 9b2895441..74e969e04 100644 --- a/pkg/cmd/plugin_list.go +++ b/pkg/cmd/plugin_list.go @@ -26,6 +26,7 @@ import ( "github.com/spf13/cobra" "helm.sh/helm/v4/internal/plugin" + "helm.sh/helm/v4/internal/plugin/schema" ) func newPluginListCmd(out io.Writer) *cobra.Command { @@ -106,7 +107,7 @@ func compListPlugins(_ string, ignoredPluginNames []string) []string { for _, p := range filteredPlugins { m := p.Metadata() var shortHelp string - if config, ok := m.Config.(*plugin.ConfigCLI); ok { + if config, ok := m.Config.(*schema.ConfigCLIV1); ok { shortHelp = config.ShortHelp } pNames = append(pNames, fmt.Sprintf("%s\t%s", p.Metadata().Name, shortHelp)) diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/postrenderer-v1/plugin.yaml b/pkg/cmd/testdata/helmhome/helm/plugins/postrenderer-v1/plugin.yaml index d4cd57a13..b6e8afa57 100644 --- a/pkg/cmd/testdata/helmhome/helm/plugins/postrenderer-v1/plugin.yaml +++ b/pkg/cmd/testdata/helmhome/helm/plugins/postrenderer-v1/plugin.yaml @@ -4,10 +4,6 @@ name: "postrenderer-v1" version: "1.2.3" type: postrenderer/v1 runtime: subprocess -config: - shortHelp: "echo test" - longHelp: "This echos test" - ignoreFlags: false runtimeConfig: platformCommand: - command: "${HELM_PLUGIN_DIR}/sed-test.sh" diff --git a/pkg/getter/plugingetter.go b/pkg/getter/plugingetter.go index b2dfb3e42..32dbc70c9 100644 --- a/pkg/getter/plugingetter.go +++ b/pkg/getter/plugingetter.go @@ -49,7 +49,7 @@ func collectGetterPlugins(settings *cli.EnvSettings) (Providers, error) { } results := make([]Provider, 0, len(plgs)) for _, plg := range plgs { - if c, ok := plg.Metadata().Config.(*plugin.ConfigGetter); ok { + if c, ok := plg.Metadata().Config.(*schema.ConfigGetterV1); ok { results = append(results, Provider{ Schemes: c.Protocols, New: pluginConstructorBuilder(plg), diff --git a/pkg/getter/plugingetter_test.go b/pkg/getter/plugingetter_test.go index 23cfc80f8..8faaf7329 100644 --- a/pkg/getter/plugingetter_test.go +++ b/pkg/getter/plugingetter_test.go @@ -110,7 +110,7 @@ func (t *testPlugin) Metadata() plugin.Metadata { Type: "cli/v1", APIVersion: "v1", Runtime: "subprocess", - Config: &plugin.ConfigCLI{}, + Config: &schema.ConfigCLIV1{}, RuntimeConfig: &plugin.RuntimeConfigSubprocess{ PlatformCommand: []plugin.PlatformCommand{ { From 38d1a7376ff77a609874b3263427f711da946e32 Mon Sep 17 00:00:00 2001 From: Kamil Swiechowski Date: Fri, 11 Jul 2025 16:52:58 +0200 Subject: [PATCH 83/88] fix: throw warning when chart version is not semverv2 Signed-off-by: Kamil Swiechowski --- pkg/chart/v2/lint/lint_test.go | 13 +++++-- pkg/chart/v2/lint/rules/chartfile.go | 11 ++++++ pkg/chart/v2/lint/rules/chartfile_test.go | 39 ++++++++++++++++++- pkg/chart/v2/metadata.go | 2 +- ...hart-with-bad-subcharts-with-subcharts.txt | 1 + 5 files changed, 59 insertions(+), 7 deletions(-) diff --git a/pkg/chart/v2/lint/lint_test.go b/pkg/chart/v2/lint/lint_test.go index 3c777e2bb..bd3ec1f1f 100644 --- a/pkg/chart/v2/lint/lint_test.go +++ b/pkg/chart/v2/lint/lint_test.go @@ -42,12 +42,12 @@ const invalidChartFileDir = "rules/testdata/invalidchartfile" func TestBadChart(t *testing.T) { m := RunAll(badChartDir, values, namespace).Messages - if len(m) != 8 { + if len(m) != 9 { t.Errorf("Number of errors %v", len(m)) t.Errorf("All didn't fail with expected errors, got %#v", m) } - // There should be one INFO, one WARNING, and 2 ERROR messages, check for them - var i, w, e, e2, e3, e4, e5, e6 bool + // There should be one INFO, 2 WARNING and 2 ERROR messages, check for them + var i, w, w2, e, e2, e3, e4, e5, e6 bool for _, msg := range m { if msg.Severity == support.InfoSev { if strings.Contains(msg.Err.Error(), "icon is recommended") { @@ -83,8 +83,13 @@ func TestBadChart(t *testing.T) { e6 = true } } + if msg.Severity == support.WarningSev { + if strings.Contains(msg.Err.Error(), "version '0.0.0.0' is not a valid SemVerV2") { + w2 = true + } + } } - if !e || !e2 || !e3 || !e4 || !e5 || !i || !e6 || !w { + if !e || !e2 || !e3 || !e4 || !e5 || !i || !e6 || !w || !w2 { t.Errorf("Didn't find all the expected errors, got %#v", m) } } diff --git a/pkg/chart/v2/lint/rules/chartfile.go b/pkg/chart/v2/lint/rules/chartfile.go index 185f524a4..806363477 100644 --- a/pkg/chart/v2/lint/rules/chartfile.go +++ b/pkg/chart/v2/lint/rules/chartfile.go @@ -67,6 +67,7 @@ func Chartfile(linter *support.Linter) { linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartIconURL(chartFile)) linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartType(chartFile)) linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartDependencies(chartFile)) + linter.RunLinterRule(support.WarningSev, chartFileName, validateChartVersionStrictSemVerV2(chartFile)) } func validateChartVersionType(data map[string]interface{}) error { @@ -158,6 +159,16 @@ func validateChartVersion(cf *chart.Metadata) error { return nil } +func validateChartVersionStrictSemVerV2(cf *chart.Metadata) error { + _, err := semver.StrictNewVersion(cf.Version) + + if err != nil { + return fmt.Errorf("version '%s' is not a valid SemVerV2", cf.Version) + } + + return nil +} + func validateChartMaintainer(cf *chart.Metadata) error { for _, maintainer := range cf.Maintainers { if maintainer == nil { diff --git a/pkg/chart/v2/lint/rules/chartfile_test.go b/pkg/chart/v2/lint/rules/chartfile_test.go index 5a1ad2f24..ddaa72510 100644 --- a/pkg/chart/v2/lint/rules/chartfile_test.go +++ b/pkg/chart/v2/lint/rules/chartfile_test.go @@ -108,6 +108,35 @@ func TestValidateChartVersion(t *testing.T) { } } +func TestValidateChartVersionStrictSemVerV2(t *testing.T) { + var failTest = []struct { + Version string + ErrorMsg string + }{ + {"", "version '' is not a valid SemVerV2"}, + {"1", "version '1' is not a valid SemVerV2"}, + {"1.1", "version '1.1' is not a valid SemVerV2"}, + } + + var successTest = []string{"1.1.1", "0.0.1+build", "0.0.1-beta"} + + for _, test := range failTest { + badChart.Version = test.Version + err := validateChartVersionStrictSemVerV2(badChart) + if err == nil || !strings.Contains(err.Error(), test.ErrorMsg) { + t.Errorf("validateChartVersion(%s) to return \"%s\", got no error", test.Version, test.ErrorMsg) + } + } + + for _, version := range successTest { + badChart.Version = version + err := validateChartVersionStrictSemVerV2(badChart) + if err != nil { + t.Errorf("validateChartVersion(%s) to return no error, got a linter error", version) + } + } +} + func TestValidateChartMaintainer(t *testing.T) { var failTest = []struct { Name string @@ -226,7 +255,7 @@ func TestChartfile(t *testing.T) { linter := support.Linter{ChartDir: badChartDir} Chartfile(&linter) msgs := linter.Messages - expectedNumberOfErrorMessages := 6 + expectedNumberOfErrorMessages := 7 if len(msgs) != expectedNumberOfErrorMessages { t.Errorf("Expected %d errors, got %d", expectedNumberOfErrorMessages, len(msgs)) @@ -256,13 +285,16 @@ func TestChartfile(t *testing.T) { if !strings.Contains(msgs[5].Err.Error(), "dependencies are not valid in the Chart file with apiVersion") { t.Errorf("Unexpected message 5: %s", msgs[5].Err) } + if !strings.Contains(msgs[6].Err.Error(), "version '0.0.0.0' is not a valid SemVerV2") { + t.Errorf("Unexpected message 6: %s", msgs[6].Err) + } }) t.Run("Chart.yaml validity issues due to type mismatch", func(t *testing.T) { linter := support.Linter{ChartDir: anotherBadChartDir} Chartfile(&linter) msgs := linter.Messages - expectedNumberOfErrorMessages := 3 + expectedNumberOfErrorMessages := 4 if len(msgs) != expectedNumberOfErrorMessages { t.Errorf("Expected %d errors, got %d", expectedNumberOfErrorMessages, len(msgs)) @@ -280,5 +312,8 @@ func TestChartfile(t *testing.T) { if !strings.Contains(msgs[2].Err.Error(), "appVersion should be of type string") { t.Errorf("Unexpected message 2: %s", msgs[2].Err) } + if !strings.Contains(msgs[3].Err.Error(), "version '7.2445e+06' is not a valid SemVerV2") { + t.Errorf("Unexpected message 3: %s", msgs[3].Err) + } }) } diff --git a/pkg/chart/v2/metadata.go b/pkg/chart/v2/metadata.go index d213a3491..c46007863 100644 --- a/pkg/chart/v2/metadata.go +++ b/pkg/chart/v2/metadata.go @@ -52,7 +52,7 @@ type Metadata struct { Home string `json:"home,omitempty"` // Source is the URL to the source code of this chart Sources []string `json:"sources,omitempty"` - // A SemVer 2 conformant version string of the chart. Required. + // A version string of the chart. Required. Version string `json:"version,omitempty"` // A one-sentence description of the chart Description string `json:"description,omitempty"` diff --git a/pkg/cmd/testdata/output/lint-chart-with-bad-subcharts-with-subcharts.txt b/pkg/cmd/testdata/output/lint-chart-with-bad-subcharts-with-subcharts.txt index 7b445a69a..67ed58ec3 100644 --- a/pkg/cmd/testdata/output/lint-chart-with-bad-subcharts-with-subcharts.txt +++ b/pkg/cmd/testdata/output/lint-chart-with-bad-subcharts-with-subcharts.txt @@ -9,6 +9,7 @@ [ERROR] Chart.yaml: apiVersion is required. The value must be either "v1" or "v2" [ERROR] Chart.yaml: version is required [INFO] Chart.yaml: icon is recommended +[WARNING] Chart.yaml: version '' is not a valid SemVerV2 [WARNING] templates/: directory does not exist [ERROR] : unable to load chart validation: chart.metadata.name is required From cd76ae1c934aeaa22842ef30c898e5888022973b Mon Sep 17 00:00:00 2001 From: Kamil Swiechowski Date: Wed, 3 Sep 2025 08:20:52 +0200 Subject: [PATCH 84/88] feat:strict compliance with semverv2 for chart/v3/linter Signed-off-by: Kamil Swiechowski --- internal/chart/v3/lint/lint_test.go | 2 +- internal/chart/v3/lint/rules/chartfile.go | 4 ++-- internal/chart/v3/lint/rules/chartfile_test.go | 8 +++++--- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/internal/chart/v3/lint/lint_test.go b/internal/chart/v3/lint/lint_test.go index af44cc58d..6f5912ae7 100644 --- a/internal/chart/v3/lint/lint_test.go +++ b/internal/chart/v3/lint/lint_test.go @@ -60,7 +60,7 @@ func TestBadChartV3(t *testing.T) { } } if msg.Severity == support.ErrorSev { - if strings.Contains(msg.Err.Error(), "version '0.0.0.0' is not a valid SemVer") { + if strings.Contains(msg.Err.Error(), "version '0.0.0.0' is not a valid SemVerV2") { e = true } if strings.Contains(msg.Err.Error(), "name is required") { diff --git a/internal/chart/v3/lint/rules/chartfile.go b/internal/chart/v3/lint/rules/chartfile.go index e72a0d3b2..fc246ba80 100644 --- a/internal/chart/v3/lint/rules/chartfile.go +++ b/internal/chart/v3/lint/rules/chartfile.go @@ -140,9 +140,9 @@ func validateChartVersion(cf *chart.Metadata) error { return errors.New("version is required") } - version, err := semver.NewVersion(cf.Version) + version, err := semver.StrictNewVersion(cf.Version) if err != nil { - return fmt.Errorf("version '%s' is not a valid SemVer", cf.Version) + return fmt.Errorf("version '%s' is not a valid SemVerV2", cf.Version) } c, err := semver.NewConstraint(">0.0.0-0") diff --git a/internal/chart/v3/lint/rules/chartfile_test.go b/internal/chart/v3/lint/rules/chartfile_test.go index 070cc244d..57893e151 100644 --- a/internal/chart/v3/lint/rules/chartfile_test.go +++ b/internal/chart/v3/lint/rules/chartfile_test.go @@ -84,9 +84,11 @@ func TestValidateChartVersion(t *testing.T) { ErrorMsg string }{ {"", "version is required"}, - {"1.2.3.4", "version '1.2.3.4' is not a valid SemVer"}, - {"waps", "'waps' is not a valid SemVer"}, - {"-3", "'-3' is not a valid SemVer"}, + {"1.2.3.4", "version '1.2.3.4' is not a valid SemVerV2"}, + {"waps", "'waps' is not a valid SemVerV2"}, + {"-3", "'-3' is not a valid SemVerV2"}, + {"1.1", "'1.1' is not a valid SemVerV2"}, + {"1", "'1' is not a valid SemVerV2"}, } var successTest = []string{"0.0.1", "0.0.1+build", "0.0.1-beta"} From 031050675baae8bcf183cdb615d154f909db614b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 3 Sep 2025 16:54:43 +0000 Subject: [PATCH 85/88] chore(deps): bump github.com/spf13/cobra from 1.9.1 to 1.10.1 Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 1.9.1 to 1.10.1. - [Release notes](https://github.com/spf13/cobra/releases) - [Commits](https://github.com/spf13/cobra/compare/v1.9.1...v1.10.1) --- updated-dependencies: - dependency-name: github.com/spf13/cobra dependency-version: 1.10.1 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 4 ++-- go.sum | 9 ++++----- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index ab8797e6f..bbd273413 100644 --- a/go.mod +++ b/go.mod @@ -30,8 +30,8 @@ require ( github.com/opencontainers/image-spec v1.1.1 github.com/rubenv/sql-migrate v1.8.0 github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 - github.com/spf13/cobra v1.9.1 - github.com/spf13/pflag v1.0.7 + github.com/spf13/cobra v1.10.1 + github.com/spf13/pflag v1.0.9 github.com/stretchr/testify v1.11.1 github.com/tetratelabs/wazero v1.9.0 go.yaml.in/yaml/v3 v3.0.4 diff --git a/go.sum b/go.sum index 076b6e5bd..cca32e249 100644 --- a/go.sum +++ b/go.sum @@ -300,11 +300,10 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= -github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= -github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M= -github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= From 3e97f216cc19bd26ad970a5298d6802ac5fa2ccb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 4 Sep 2025 21:06:24 +0000 Subject: [PATCH 86/88] chore(deps): bump actions/stale from 9.1.0 to 10.0.0 Bumps [actions/stale](https://github.com/actions/stale) from 9.1.0 to 10.0.0. - [Release notes](https://github.com/actions/stale/releases) - [Changelog](https://github.com/actions/stale/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/stale/compare/5bef64f19d7facfb25b37b414482c7164d639639...3a9db7e6a41a89f618792c92c0e97cc736e1b13f) --- updated-dependencies: - dependency-name: actions/stale dependency-version: 10.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/stale.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stale.yaml b/.github/workflows/stale.yaml index 3417e1734..965410793 100644 --- a/.github/workflows/stale.yaml +++ b/.github/workflows/stale.yaml @@ -7,7 +7,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0 + - uses: actions/stale@3a9db7e6a41a89f618792c92c0e97cc736e1b13f # v10.0.0 with: repo-token: ${{ secrets.GITHUB_TOKEN }} stale-issue-message: 'This issue has been marked as stale because it has been open for 90 days with no activity. This thread will be automatically closed in 30 days if no further activity occurs.' From a645dfb7f8d1315b500535c1e0fa2b703f097c67 Mon Sep 17 00:00:00 2001 From: Kamil Swiechowski Date: Fri, 5 Sep 2025 13:10:31 +0200 Subject: [PATCH 87/88] fix:semverv2 lint test error message Signed-off-by: Kamil Swiechowski --- pkg/chart/v2/lint/rules/chartfile_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/chart/v2/lint/rules/chartfile_test.go b/pkg/chart/v2/lint/rules/chartfile_test.go index ddaa72510..692358426 100644 --- a/pkg/chart/v2/lint/rules/chartfile_test.go +++ b/pkg/chart/v2/lint/rules/chartfile_test.go @@ -124,7 +124,7 @@ func TestValidateChartVersionStrictSemVerV2(t *testing.T) { badChart.Version = test.Version err := validateChartVersionStrictSemVerV2(badChart) if err == nil || !strings.Contains(err.Error(), test.ErrorMsg) { - t.Errorf("validateChartVersion(%s) to return \"%s\", got no error", test.Version, test.ErrorMsg) + t.Errorf("validateChartVersionStrictSemVerV2(%s) to return \"%s\", got no error", test.Version, test.ErrorMsg) } } @@ -132,7 +132,7 @@ func TestValidateChartVersionStrictSemVerV2(t *testing.T) { badChart.Version = version err := validateChartVersionStrictSemVerV2(badChart) if err != nil { - t.Errorf("validateChartVersion(%s) to return no error, got a linter error", version) + t.Errorf("validateChartVersionStrictSemVerV2(%s) to return no error, got a linter error", version) } } } From 1904ef6ad87c43cb0190ec45e3cf1cd03c7bdea8 Mon Sep 17 00:00:00 2001 From: Stephanie Hohenberg Date: Sun, 7 Sep 2025 11:01:16 -0400 Subject: [PATCH 88/88] Adapt test-coverage command to be able to run for a certain package Signed-off-by: Stephanie Hohenberg --- Makefile | 9 +++------ scripts/coverage.sh | 3 ++- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/Makefile b/Makefile index 5e1bfc6c2..e3e6cb538 100644 --- a/Makefile +++ b/Makefile @@ -118,11 +118,12 @@ test-unit: go test $(GOFLAGS) -run ^TestHelmCreateChart_CheckDeprecatedWarnings$$ ./internal/chart/v3/lint/ $(TESTFLAGS) -ldflags '$(LDFLAGS)' +# To run the coverage for a specific package use: make test-coverage PKG=./pkg/action .PHONY: test-coverage test-coverage: @echo - @echo "==> Running unit tests with coverage <==" - @ ./scripts/coverage.sh + @echo "==> Running unit tests with coverage: $(PKG) <==" + @ ./scripts/coverage.sh $(PKG) .PHONY: test-style test-style: @@ -148,10 +149,6 @@ test-acceptance: build build-cross test-acceptance-completion: ACCEPTANCE_RUN_TESTS = shells.robot test-acceptance-completion: test-acceptance -.PHONY: coverage -coverage: - @scripts/coverage.sh - .PHONY: format format: $(GOIMPORTS) go list -f '{{.Dir}}' ./... | xargs $(GOIMPORTS) -w -local helm.sh/helm diff --git a/scripts/coverage.sh b/scripts/coverage.sh index 2164d94da..487d4eeee 100755 --- a/scripts/coverage.sh +++ b/scripts/coverage.sh @@ -19,9 +19,10 @@ set -euo pipefail covermode=${COVERMODE:-atomic} coverdir=$(mktemp -d /tmp/coverage.XXXXXXXXXX) profile="${coverdir}/cover.out" +target="${1:-./...}" # by default the whole repository is tested generate_cover_data() { - for d in $(go list ./...) ; do + for d in $(go list "$target"); do ( local output="${coverdir}/${d//\//-}.cover" go test -coverprofile="${output}" -covermode="$covermode" "$d"