pull/30867/merge
Matthieu MOREL 1 month ago committed by GitHub
commit 1596b8fba6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -20,6 +20,7 @@ linters:
enable: enable:
- depguard - depguard
- dupl - dupl
- gocritic
- gomodguard - gomodguard
- govet - govet
- ineffassign - ineffassign
@ -58,6 +59,26 @@ linters:
dupl: dupl:
threshold: 400 threshold: 400
gocritic:
disabled-checks:
- appendAssign
- badCall
- commentedOutCode
- deferInLoop
- exposedSyncMutex
- filepathJoin
- hugeParam
- importShadow
- nestingReduce
- ptrToRefParam
- rangeValCopy
- tooManyResultsChecker
- typeAssertChain
- unnamedResult
- unnecessaryDefer
- whyNoLint
enable-all: true
gomodguard: gomodguard:
blocked: blocked:
modules: modules:

@ -113,7 +113,8 @@ func LoadDir(dir string) (*chart.Chart, error) {
files = append(files, &BufferedFile{Name: n, Data: data}) files = append(files, &BufferedFile{Name: n, Data: data})
return nil return nil
} }
if err = sympath.Walk(topdir, walk); err != nil { err = sympath.Walk(topdir, walk)
if err != nil {
return c, err return c, err
} }

@ -363,7 +363,7 @@ func TestLoadInvalidArchive(t *testing.T) {
h := &tar.Header{ h := &tar.Header{
Name: internalPath, Name: internalPath,
Mode: 0755, Mode: 0o755,
Size: int64(len(body)), Size: int64(len(body)),
ModTime: time.Now(), ModTime: time.Now(),
} }

@ -58,7 +58,7 @@ func SaveChartfile(filename string, cf *chart.Metadata) error {
if err != nil { if err != nil {
return err return err
} }
return os.WriteFile(filename, out, 0644) return os.WriteFile(filename, out, 0o644)
} }
// IsChartDir validate a chart directory. // IsChartDir validate a chart directory.

@ -290,17 +290,19 @@ func coalesceTablesFullKey(printf printFn, dst, src map[string]interface{}, pref
// values. // values.
for key, val := range src { for key, val := range src {
fullkey := concatPrefix(prefix, key) fullkey := concatPrefix(prefix, key)
if dv, ok := dst[key]; ok && !merge && dv == nil { dv, ok := dst[key]
switch {
case ok && !merge && dv == nil:
delete(dst, key) delete(dst, key)
} else if !ok { case !ok:
dst[key] = val dst[key] = val
} else if istable(val) { case istable(val):
if istable(dv) { if istable(dv) {
coalesceTablesFullKey(printf, dv.(map[string]interface{}), val.(map[string]interface{}), fullkey, merge) coalesceTablesFullKey(printf, dv.(map[string]interface{}), val.(map[string]interface{}), fullkey, merge)
} else { } else {
printf("warning: cannot overwrite table with non table for %s (%v)", fullkey, val) printf("warning: cannot overwrite table with non table for %s (%v)", fullkey, val)
} }
} else if istable(dv) && val != nil { case istable(dv) && val != nil:
printf("warning: destination for %s is a table. Ignoring non-table value (%v)", fullkey, val) printf("warning: destination for %s is a table. Ignoring non-table value (%v)", fullkey, val)
} }
} }

@ -802,7 +802,7 @@ func Create(name, dir string) (string, error) {
} }
} }
// Need to add the ChartsDir explicitly as it does not contain any file OOTB // Need to add the ChartsDir explicitly as it does not contain any file OOTB
if err := os.MkdirAll(filepath.Join(cdir, ChartsDir), 0755); err != nil { if err := os.MkdirAll(filepath.Join(cdir, ChartsDir), 0o755); err != nil {
return cdir, err return cdir, err
} }
return cdir, nil return cdir, nil
@ -815,10 +815,10 @@ func transform(src, replacement string) []byte {
} }
func writeFile(name string, content []byte) error { func writeFile(name string, content []byte) error {
if err := os.MkdirAll(filepath.Dir(name), 0755); err != nil { if err := os.MkdirAll(filepath.Dir(name), 0o755); err != nil {
return err return err
} }
return os.WriteFile(name, content, 0644) return os.WriteFile(name, content, 0o644)
} }
func validateChartName(name string) error { func validateChartName(name string) error {

@ -39,7 +39,7 @@ func processDependencyConditions(reqs []*chart.Dependency, cvals Values, cpath s
} }
for _, r := range reqs { for _, r := range reqs {
for c := range strings.SplitSeq(strings.TrimSpace(r.Condition), ",") { for c := range strings.SplitSeq(strings.TrimSpace(r.Condition), ",") {
if len(c) > 0 { if c != "" {
// retrieve value // retrieve value
vv, err := cvals.PathValue(cpath + c) vv, err := cvals.PathValue(cpath + c)
if err == nil { if err == nil {

@ -71,11 +71,11 @@ func Expand(dir string, r io.Reader) error {
// Make sure the necessary subdirs get created. // Make sure the necessary subdirs get created.
basedir := filepath.Dir(outpath) basedir := filepath.Dir(outpath)
if err := os.MkdirAll(basedir, 0755); err != nil { if err := os.MkdirAll(basedir, 0o755); err != nil {
return err return err
} }
if err := os.WriteFile(outpath, file.Data, 0644); err != nil { if err := os.WriteFile(outpath, file.Data, 0o644); err != nil {
return err return err
} }
} }

@ -48,7 +48,7 @@ func SaveDir(c *chart.Chart, dest string) error {
if fi, err := os.Stat(outdir); err == nil && !fi.IsDir() { if fi, err := os.Stat(outdir); err == nil && !fi.IsDir() {
return fmt.Errorf("file %s already exists and is not a directory", outdir) return fmt.Errorf("file %s already exists and is not a directory", outdir)
} }
if err := os.MkdirAll(outdir, 0755); err != nil { if err := os.MkdirAll(outdir, 0o755); err != nil {
return err return err
} }
@ -114,7 +114,7 @@ func Save(c *chart.Chart, outDir string) (string, error) {
dir := filepath.Dir(filename) dir := filepath.Dir(filename)
if stat, err := os.Stat(dir); err != nil { if stat, err := os.Stat(dir); err != nil {
if errors.Is(err, fs.ErrNotExist) { if errors.Is(err, fs.ErrNotExist) {
if err2 := os.MkdirAll(dir, 0755); err2 != nil { if err2 := os.MkdirAll(dir, 0o755); err2 != nil {
return "", err2 return "", err2
} }
} else { } else {
@ -229,7 +229,7 @@ func writeToTar(out *tar.Writer, name string, body []byte) error {
// TODO: Do we need to create dummy parent directory names if none exist? // TODO: Do we need to create dummy parent directory names if none exist?
h := &tar.Header{ h := &tar.Header{
Name: filepath.ToSlash(name), Name: filepath.ToSlash(name),
Mode: 0644, Mode: 0o644,
Size: int64(len(body)), Size: int64(len(body)),
ModTime: time.Now(), ModTime: time.Now(),
} }

@ -251,7 +251,7 @@ func TestSaveDir(t *testing.T) {
tmp2 := t.TempDir() tmp2 := t.TempDir()
c.Metadata.Name = "../ahab" c.Metadata.Name = "../ahab"
pth := filepath.Join(tmp2, "tmpcharts") pth := filepath.Join(tmp2, "tmpcharts")
if err := os.MkdirAll(filepath.Join(pth), 0755); err != nil { if err := os.MkdirAll(filepath.Join(pth), 0o755); err != nil {
t.Fatal(err) t.Fatal(err)
} }

@ -29,7 +29,7 @@ func TestAtomicWriteFile(t *testing.T) {
testpath := filepath.Join(dir, "test") testpath := filepath.Join(dir, "test")
stringContent := "Test content" stringContent := "Test content"
reader := bytes.NewReader([]byte(stringContent)) reader := bytes.NewReader([]byte(stringContent))
mode := os.FileMode(0644) mode := os.FileMode(0o644)
err := AtomicWriteFile(testpath, reader, mode) err := AtomicWriteFile(testpath, reader, mode)
if err != nil { if err != nil {

@ -114,7 +114,7 @@ func (c *Client) Search(term string) ([]SearchResult, error) {
p.RawQuery = "q=" + url.QueryEscape(term) p.RawQuery = "q=" + url.QueryEscape(term)
// Create request // Create request
req, err := http.NewRequest(http.MethodGet, p.String(), nil) req, err := http.NewRequest(http.MethodGet, p.String(), http.NoBody)
if err != nil { if err != nil {
return nil, err return nil, err
} }

@ -92,7 +92,7 @@ func makeTree(t *testing.T) {
fd.Close() fd.Close()
} }
} else { } else {
if err := os.Mkdir(path, 0770); err != nil { if err := os.Mkdir(path, 0o770); err != nil {
t.Fatalf("makeTree: %v", err) t.Fatalf("makeTree: %v", err)
} }
} }

@ -49,7 +49,7 @@ func AssertGoldenString(t TestingT, actual, filename string) {
} }
// AssertGoldenFile asserts that the content of the actual file matches the contents of the expected file // AssertGoldenFile asserts that the content of the actual file matches the contents of the expected file
func AssertGoldenFile(t TestingT, actualFileName string, expectedFilename string) { func AssertGoldenFile(t TestingT, actualFileName, expectedFilename string) {
t.Helper() t.Helper()
actual, err := os.ReadFile(actualFileName) actual, err := os.ReadFile(actualFileName)
@ -87,7 +87,7 @@ func update(filename string, in []byte) error {
if !*updateGolden { if !*updateGolden {
return nil return nil
} }
return os.WriteFile(filename, normalize(in), 0666) return os.WriteFile(filename, normalize(in), 0o666)
} }
func normalize(in []byte) []byte { func normalize(in []byte) []byte {

@ -192,7 +192,7 @@ func copyFile(src, dst string) (err error) {
} }
// Check for write errors on Close // Check for write errors on Close
if err = out.Close(); err != nil { if err := out.Close(); err != nil {
return err return err
} }

@ -58,12 +58,12 @@ func TestRenameWithFallback(t *testing.T) {
} }
srcpath = filepath.Join(dir, "a") srcpath = filepath.Join(dir, "a")
if err := os.MkdirAll(srcpath, 0777); err != nil { if err := os.MkdirAll(srcpath, 0o777); err != nil {
t.Fatal(err) t.Fatal(err)
} }
dstpath := filepath.Join(dir, "b") dstpath := filepath.Join(dir, "b")
if err := os.MkdirAll(dstpath, 0777); err != nil { if err := os.MkdirAll(dstpath, 0o777); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -76,7 +76,7 @@ func TestCopyDir(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
srcdir := filepath.Join(dir, "src") srcdir := filepath.Join(dir, "src")
if err := os.MkdirAll(srcdir, 0755); err != nil { if err := os.MkdirAll(srcdir, 0o755); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -93,7 +93,7 @@ func TestCopyDir(t *testing.T) {
for i, file := range files { for i, file := range files {
fn := filepath.Join(srcdir, file.path) fn := filepath.Join(srcdir, file.path)
dn := filepath.Dir(fn) dn := filepath.Dir(fn)
if err := os.MkdirAll(dn, 0755); err != nil { if err := os.MkdirAll(dn, 0o755); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -102,7 +102,7 @@ func TestCopyDir(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
if _, err = fh.Write([]byte(file.contents)); err != nil { if _, err = fh.WriteString(file.contents); err != nil {
t.Fatal(err) t.Fatal(err)
} }
fh.Close() fh.Close()
@ -170,7 +170,7 @@ func TestCopyDirFail_SrcInaccessible(t *testing.T) {
cleanup := setupInaccessibleDir(t, func(dir string) error { cleanup := setupInaccessibleDir(t, func(dir string) error {
srcdir = filepath.Join(dir, "src") srcdir = filepath.Join(dir, "src")
return os.MkdirAll(srcdir, 0755) return os.MkdirAll(srcdir, 0o755)
}) })
defer cleanup() defer cleanup()
@ -202,7 +202,7 @@ func TestCopyDirFail_DstInaccessible(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
srcdir = filepath.Join(dir, "src") srcdir = filepath.Join(dir, "src")
if err := os.MkdirAll(srcdir, 0755); err != nil { if err := os.MkdirAll(srcdir, 0o755); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -247,12 +247,12 @@ func TestCopyDirFail_DstExists(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
srcdir = filepath.Join(dir, "src") srcdir = filepath.Join(dir, "src")
if err = os.MkdirAll(srcdir, 0755); err != nil { if err = os.MkdirAll(srcdir, 0o755); err != nil {
t.Fatal(err) t.Fatal(err)
} }
dstdir = filepath.Join(dir, "dst") dstdir = filepath.Join(dir, "dst")
if err = os.MkdirAll(dstdir, 0755); err != nil { if err = os.MkdirAll(dstdir, 0o755); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -288,7 +288,7 @@ func TestCopyDirFailOpen(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
srcdir = filepath.Join(dir, "src") srcdir = filepath.Join(dir, "src")
if err := os.MkdirAll(srcdir, 0755); err != nil { if err := os.MkdirAll(srcdir, 0o755); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -300,7 +300,7 @@ func TestCopyDirFailOpen(t *testing.T) {
srcf.Close() srcf.Close()
// setup source file so that it cannot be read // setup source file so that it cannot be read
if err = os.Chmod(srcfn, 0222); err != nil { if err = os.Chmod(srcfn, 0o222); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -320,7 +320,7 @@ func TestCopyFile(t *testing.T) {
} }
want := "hello world" want := "hello world"
if _, err := srcf.Write([]byte(want)); err != nil { if _, err := srcf.WriteString(want); err != nil {
t.Fatal(err) t.Fatal(err)
} }
srcf.Close() srcf.Close()
@ -433,7 +433,7 @@ func TestCopyFileFail(t *testing.T) {
cleanup := setupInaccessibleDir(t, func(dir string) error { cleanup := setupInaccessibleDir(t, func(dir string) error {
dstdir = filepath.Join(dir, "dir") dstdir = filepath.Join(dir, "dir")
return os.Mkdir(dstdir, 0777) return os.Mkdir(dstdir, 0o777)
}) })
defer cleanup() defer cleanup()
@ -463,12 +463,12 @@ func setupInaccessibleDir(t *testing.T, op func(dir string) error) func() {
subdir := filepath.Join(dir, "dir") subdir := filepath.Join(dir, "dir")
cleanup := func() { cleanup := func() {
if err := os.Chmod(subdir, 0777); err != nil { if err := os.Chmod(subdir, 0o777); err != nil {
t.Error(err) t.Error(err)
} }
} }
if err := os.Mkdir(subdir, 0777); err != nil { if err := os.Mkdir(subdir, 0o777); err != nil {
cleanup() cleanup()
t.Fatal(err) t.Fatal(err)
return nil return nil
@ -480,7 +480,7 @@ func setupInaccessibleDir(t *testing.T, op func(dir string) error) func() {
return nil return nil
} }
if err := os.Chmod(subdir, 0666); err != nil { if err := os.Chmod(subdir, 0o666); err != nil {
cleanup() cleanup()
t.Fatal(err) t.Fatal(err)
return nil return nil
@ -507,7 +507,7 @@ func TestIsDir(t *testing.T) {
cleanup := setupInaccessibleDir(t, func(dir string) error { cleanup := setupInaccessibleDir(t, func(dir string) error {
dn = filepath.Join(dir, "dir") dn = filepath.Join(dir, "dir")
return os.Mkdir(dn, 0777) return os.Mkdir(dn, 0o777)
}) })
defer cleanup() defer cleanup()
@ -554,7 +554,7 @@ func TestIsSymlink(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
dirPath := filepath.Join(dir, "directory") dirPath := filepath.Join(dir, "directory")
if err := os.MkdirAll(dirPath, 0777); err != nil { if err := os.MkdirAll(dirPath, 0o777); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -582,9 +582,11 @@ func TestIsSymlink(t *testing.T) {
cleanup := setupInaccessibleDir(t, func(dir string) error { cleanup := setupInaccessibleDir(t, func(dir string) error {
inaccessibleFile = filepath.Join(dir, "file") inaccessibleFile = filepath.Join(dir, "file")
if fh, err := os.Create(inaccessibleFile); err != nil { fh, err := os.Create(inaccessibleFile)
if err != nil {
return err return err
} else if err = fh.Close(); err != nil { }
if err := fh.Close(); err != nil {
return err return err
} }

@ -291,9 +291,9 @@ func (cfg *Configuration) renderResources(ch *chart.Chart, values chartutil.Valu
if includeCrds { if includeCrds {
for _, crd := range ch.CRDObjects() { for _, crd := range ch.CRDObjects() {
if outputDir == "" { if outputDir == "" {
fmt.Fprintf(b, "---\n# Source: %s\n%s\n", crd.Filename, string(crd.File.Data[:])) fmt.Fprintf(b, "---\n# Source: %s\n%s\n", crd.Filename, string(crd.File.Data))
} else { } else {
err = writeToFile(outputDir, crd.Filename, string(crd.File.Data[:]), fileWritten[crd.Filename]) err = writeToFile(outputDir, crd.Filename, string(crd.File.Data), fileWritten[crd.Filename])
if err != nil { if err != nil {
return hs, b, "", err return hs, b, "", err
} }

@ -70,7 +70,7 @@ func TestDependencyStatus_Dashes(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
chartpath := filepath.Join(dir, "charts") chartpath := filepath.Join(dir, "charts")
if err := os.MkdirAll(chartpath, 0700); err != nil { if err := os.MkdirAll(chartpath, 0o700); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -104,7 +104,7 @@ func TestStatArchiveForStatus(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
chartpath := filepath.Join(dir, "charts") chartpath := filepath.Join(dir, "charts")
if err := os.MkdirAll(chartpath, 0700); err != nil { if err := os.MkdirAll(chartpath, 0o700); err != nil {
t.Fatal(err) t.Fatal(err)
} }

@ -63,7 +63,7 @@ import (
// since there can be filepath in front of it. // since there can be filepath in front of it.
const notesFileSuffix = "NOTES.txt" const notesFileSuffix = "NOTES.txt"
const defaultDirectoryPermission = 0755 const defaultDirectoryPermission = 0o755
// Install performs an installation operation. // Install performs an installation operation.
type Install struct { type Install struct {
@ -427,7 +427,7 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma
return rel, err return rel, err
} }
func (i *Install) performInstallCtx(ctx context.Context, rel *release.Release, toBeAdopted kube.ResourceList, resources kube.ResourceList) (*release.Release, error) { func (i *Install) performInstallCtx(ctx context.Context, rel *release.Release, toBeAdopted, resources kube.ResourceList) (*release.Release, error) {
type Msg struct { type Msg struct {
r *release.Release r *release.Release
e error e error
@ -455,7 +455,7 @@ func (i *Install) isDryRun() bool {
return false return false
} }
func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.ResourceList, resources kube.ResourceList) (*release.Release, error) { func (i *Install) performInstall(rel *release.Release, toBeAdopted, resources kube.ResourceList) (*release.Release, error) {
var err error var err error
// pre-install hooks // pre-install hooks
if !i.DisableHooks { if !i.DisableHooks {
@ -500,7 +500,7 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource
} }
} }
if len(i.Description) > 0 { if i.Description != "" {
rel.SetStatus(release.StatusDeployed, i.Description) rel.SetStatus(release.StatusDeployed, i.Description)
} else { } else {
rel.SetStatus(release.StatusDeployed, "Install complete") rel.SetStatus(release.StatusDeployed, "Install complete")
@ -621,8 +621,8 @@ func (i *Install) replaceRelease(rel *release.Release) error {
} }
// write the <data> to <output-dir>/<name>. <appendData> controls if the file is created or content will be appended // write the <data> to <output-dir>/<name>. <appendData> controls if the file is created or content will be appended
func writeToFile(outputDir string, name string, data string, appendData bool) error { func writeToFile(outputDir, name, data string, appendData bool) error {
outfileName := strings.Join([]string{outputDir, name}, string(filepath.Separator)) outfileName := outputDir + string(filepath.Separator) + name
err := ensureDirectoryForFile(outfileName) err := ensureDirectoryForFile(outfileName)
if err != nil { if err != nil {
@ -648,7 +648,7 @@ func writeToFile(outputDir string, name string, data string, appendData bool) er
func createOrOpenFile(filename string, appendData bool) (*os.File, error) { func createOrOpenFile(filename string, appendData bool) (*os.File, error) {
if appendData { if appendData {
return os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0600) return os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0o600)
} }
return os.Create(filename) return os.Create(filename)
} }
@ -864,7 +864,7 @@ func (c *ChartPathOptions) LocateChart(name string, settings *cli.EnvSettings) (
dl.Options = append(dl.Options, getter.WithBasicAuth(c.Username, c.Password)) dl.Options = append(dl.Options, getter.WithBasicAuth(c.Username, c.Password))
} }
if err := os.MkdirAll(settings.RepositoryCache, 0755); err != nil { if err := os.MkdirAll(settings.RepositoryCache, 0o755); err != nil {
return "", err return "", err
} }

@ -173,7 +173,7 @@ func (l *List) Run() ([]*release.Release, error) {
} }
if results == nil { if results == nil {
return results, nil return nil, nil
} }
// by definition, superseded releases are never shown if // by definition, superseded releases are never shown if

@ -148,7 +148,7 @@ func (p *Package) Clearsign(filename string) error {
return err return err
} }
return os.WriteFile(filename+".prov", []byte(sig), 0644) return os.WriteFile(filename+".prov", []byte(sig), 0o644)
} }
// promptUser implements provenance.PassphraseFetcher // promptUser implements provenance.PassphraseFetcher

@ -90,7 +90,7 @@ func TestPassphraseFileFetcher_WithStdinAndMultipleFetches(t *testing.T) {
passphrase := "secret-from-stdin" passphrase := "secret-from-stdin"
go func() { go func() {
w.Write([]byte(passphrase + "\n")) w.WriteString(passphrase + "\n")
}() }()
for i := 0; i < 4; i++ { for i := 0; i < 4; i++ {

@ -161,7 +161,7 @@ func (p *Pull) Run(chartRef string) (string, error) {
} }
if _, err := os.Stat(udCheck); err != nil { if _, err := os.Stat(udCheck); err != nil {
if err := os.MkdirAll(udCheck, 0755); err != nil { if err := os.MkdirAll(udCheck, 0o755); err != nil {
return out.String(), fmt.Errorf("failed to untar (mkdir): %w", err) return out.String(), fmt.Errorf("failed to untar (mkdir): %w", err)
} }
} else { } else {

@ -90,7 +90,7 @@ func NewPushWithOpts(opts ...PushOpt) *Push {
} }
// Run executes 'helm push' against the given chart archive. // Run executes 'helm push' against the given chart archive.
func (p *Push) Run(chartRef string, remote string) (string, error) { func (p *Push) Run(chartRef, remote string) (string, error) {
var out strings.Builder var out strings.Builder
c := uploader.ChartUploader{ c := uploader.ChartUploader{

@ -82,7 +82,7 @@ func NewRegistryLogin(cfg *Configuration) *RegistryLogin {
} }
// Run executes the registry login operation // Run executes the registry login operation
func (a *RegistryLogin) Run(_ io.Writer, hostname string, username string, password string, opts ...RegistryLoginOpt) error { func (a *RegistryLogin) Run(_ io.Writer, hostname, username, password string, opts ...RegistryLoginOpt) error {
for _, opt := range opts { for _, opt := range opts {
if err := opt(a); err != nil { if err := opt(a); err != nil {
return err return err

@ -76,7 +76,7 @@ func TestShowNoValues(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
if len(output) != 0 { if output != "" {
t.Errorf("expected empty values buffer, got %s", output) t.Errorf("expected empty values buffer, got %s", output)
} }
} }

@ -146,7 +146,7 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error)
} }
rel.Info.Status = release.StatusUninstalled rel.Info.Status = release.StatusUninstalled
if len(u.Description) > 0 { if u.Description != "" {
rel.Info.Description = u.Description rel.Info.Description = u.Description
} else { } else {
rel.Info.Description = "Uninstallation complete" rel.Info.Description = "Uninstallation complete"

@ -300,7 +300,7 @@ func (u *Upgrade) prepareUpgrade(name string, chart *chart.Chart, vals map[strin
Labels: mergeCustomLabels(lastRelease.Labels, u.Labels), Labels: mergeCustomLabels(lastRelease.Labels, u.Labels),
} }
if len(notesTxt) > 0 { if notesTxt != "" {
upgradedRelease.Info.Notes = notesTxt upgradedRelease.Info.Notes = notesTxt
} }
err = validateManifest(u.cfg.KubeClient, manifestDoc.Bytes(), !u.DisableOpenAPIValidation) err = validateManifest(u.cfg.KubeClient, manifestDoc.Bytes(), !u.DisableOpenAPIValidation)
@ -364,7 +364,7 @@ func (u *Upgrade) performUpgrade(ctx context.Context, originalRelease, upgradedR
// Run if it is a dry run // Run if it is a dry run
if u.isDryRun() { if u.isDryRun() {
slog.Debug("dry run for release", "name", upgradedRelease.Name) slog.Debug("dry run for release", "name", upgradedRelease.Name)
if len(u.Description) > 0 { if u.Description != "" {
upgradedRelease.Info.Description = u.Description upgradedRelease.Info.Description = u.Description
} else { } else {
upgradedRelease.Info.Description = "Dry run complete" upgradedRelease.Info.Description = "Dry run complete"
@ -414,7 +414,7 @@ func (u *Upgrade) handleContext(ctx context.Context, done chan interface{}, c ch
return return
} }
} }
func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *release.Release, current kube.ResourceList, target kube.ResourceList, originalRelease *release.Release) { func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *release.Release, current, target kube.ResourceList, originalRelease *release.Release) {
// pre-upgrade hooks // pre-upgrade hooks
if !u.DisableHooks { if !u.DisableHooks {
@ -465,7 +465,7 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele
u.cfg.recordRelease(originalRelease) u.cfg.recordRelease(originalRelease)
upgradedRelease.Info.Status = release.StatusDeployed upgradedRelease.Info.Status = release.StatusDeployed
if len(u.Description) > 0 { if u.Description != "" {
upgradedRelease.Info.Description = u.Description upgradedRelease.Info.Description = u.Description
} else { } else {
upgradedRelease.Info.Description = "Upgrade complete" upgradedRelease.Info.Description = "Upgrade complete"

@ -73,7 +73,7 @@ func newMissingDeployment(name, namespace string) *resource.Info {
return info return info
} }
func newDeploymentWithOwner(name, namespace string, labels map[string]string, annotations map[string]string) *resource.Info { func newDeploymentWithOwner(name, namespace string, labels, annotations map[string]string) *resource.Info {
obj := &appsv1.Deployment{ obj := &appsv1.Deployment{
ObjectMeta: v1.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,

@ -113,7 +113,7 @@ func LoadDir(dir string) (*chart.Chart, error) {
files = append(files, &BufferedFile{Name: n, Data: data}) files = append(files, &BufferedFile{Name: n, Data: data})
return nil return nil
} }
if err = sympath.Walk(topdir, walk); err != nil { if err := sympath.Walk(topdir, walk); err != nil {
return c, err return c, err
} }

@ -415,7 +415,7 @@ func TestLoadInvalidArchive(t *testing.T) {
h := &tar.Header{ h := &tar.Header{
Name: internalPath, Name: internalPath,
Mode: 0755, Mode: 0o755,
Size: int64(len(body)), Size: int64(len(body)),
ModTime: time.Now(), ModTime: time.Now(),
} }

@ -67,7 +67,7 @@ func SaveChartfile(filename string, cf *chart.Metadata) error {
if err != nil { if err != nil {
return err return err
} }
return os.WriteFile(filename, out, 0644) return os.WriteFile(filename, out, 0o644)
} }
// IsChartDir validate a chart directory. // IsChartDir validate a chart directory.

@ -290,17 +290,19 @@ func coalesceTablesFullKey(printf printFn, dst, src map[string]interface{}, pref
// values. // values.
for key, val := range src { for key, val := range src {
fullkey := concatPrefix(prefix, key) fullkey := concatPrefix(prefix, key)
if dv, ok := dst[key]; ok && !merge && dv == nil { dv, ok := dst[key]
switch {
case ok && !merge && dv == nil:
delete(dst, key) delete(dst, key)
} else if !ok { case !ok:
dst[key] = val dst[key] = val
} else if istable(val) { case istable(val):
if istable(dv) { if istable(dv) {
coalesceTablesFullKey(printf, dv.(map[string]interface{}), val.(map[string]interface{}), fullkey, merge) coalesceTablesFullKey(printf, dv.(map[string]interface{}), val.(map[string]interface{}), fullkey, merge)
} else { } else {
printf("warning: cannot overwrite table with non table for %s (%v)", fullkey, val) printf("warning: cannot overwrite table with non table for %s (%v)", fullkey, val)
} }
} else if istable(dv) && val != nil { case istable(dv) && val != nil:
printf("warning: destination for %s is a table. Ignoring non-table value (%v)", fullkey, val) printf("warning: destination for %s is a table. Ignoring non-table value (%v)", fullkey, val)
} }
} }

@ -802,7 +802,7 @@ func Create(name, dir string) (string, error) {
} }
} }
// Need to add the ChartsDir explicitly as it does not contain any file OOTB // Need to add the ChartsDir explicitly as it does not contain any file OOTB
if err := os.MkdirAll(filepath.Join(cdir, ChartsDir), 0755); err != nil { if err := os.MkdirAll(filepath.Join(cdir, ChartsDir), 0o755); err != nil {
return cdir, err return cdir, err
} }
return cdir, nil return cdir, nil
@ -815,10 +815,10 @@ func transform(src, replacement string) []byte {
} }
func writeFile(name string, content []byte) error { func writeFile(name string, content []byte) error {
if err := os.MkdirAll(filepath.Dir(name), 0755); err != nil { if err := os.MkdirAll(filepath.Dir(name), 0o755); err != nil {
return err return err
} }
return os.WriteFile(name, content, 0644) return os.WriteFile(name, content, 0o644)
} }
func validateChartName(name string) error { func validateChartName(name string) error {

@ -39,7 +39,7 @@ func processDependencyConditions(reqs []*chart.Dependency, cvals Values, cpath s
} }
for _, r := range reqs { for _, r := range reqs {
for c := range strings.SplitSeq(strings.TrimSpace(r.Condition), ",") { for c := range strings.SplitSeq(strings.TrimSpace(r.Condition), ",") {
if len(c) > 0 { if c != "" {
// retrieve value // retrieve value
vv, err := cvals.PathValue(cpath + c) vv, err := cvals.PathValue(cpath + c)
if err == nil { if err == nil {

@ -71,11 +71,11 @@ func Expand(dir string, r io.Reader) error {
// Make sure the necessary subdirs get created. // Make sure the necessary subdirs get created.
basedir := filepath.Dir(outpath) basedir := filepath.Dir(outpath)
if err := os.MkdirAll(basedir, 0755); err != nil { if err := os.MkdirAll(basedir, 0o755); err != nil {
return err return err
} }
if err := os.WriteFile(outpath, file.Data, 0644); err != nil { if err := os.WriteFile(outpath, file.Data, 0o644); err != nil {
return err return err
} }
} }

@ -48,7 +48,7 @@ func SaveDir(c *chart.Chart, dest string) error {
if fi, err := os.Stat(outdir); err == nil && !fi.IsDir() { if fi, err := os.Stat(outdir); err == nil && !fi.IsDir() {
return fmt.Errorf("file %s already exists and is not a directory", outdir) return fmt.Errorf("file %s already exists and is not a directory", outdir)
} }
if err := os.MkdirAll(outdir, 0755); err != nil { if err := os.MkdirAll(outdir, 0o755); err != nil {
return err return err
} }
@ -114,7 +114,7 @@ func Save(c *chart.Chart, outDir string) (string, error) {
dir := filepath.Dir(filename) dir := filepath.Dir(filename)
if stat, err := os.Stat(dir); err != nil { if stat, err := os.Stat(dir); err != nil {
if errors.Is(err, fs.ErrNotExist) { if errors.Is(err, fs.ErrNotExist) {
if err2 := os.MkdirAll(dir, 0755); err2 != nil { if err2 := os.MkdirAll(dir, 0o755); err2 != nil {
return "", err2 return "", err2
} }
} else { } else {
@ -241,7 +241,7 @@ func writeToTar(out *tar.Writer, name string, body []byte) error {
// TODO: Do we need to create dummy parent directory names if none exist? // TODO: Do we need to create dummy parent directory names if none exist?
h := &tar.Header{ h := &tar.Header{
Name: filepath.ToSlash(name), Name: filepath.ToSlash(name),
Mode: 0644, Mode: 0o644,
Size: int64(len(body)), Size: int64(len(body)),
ModTime: time.Now(), ModTime: time.Now(),
} }

@ -254,7 +254,7 @@ func TestSaveDir(t *testing.T) {
tmp2 := t.TempDir() tmp2 := t.TempDir()
c.Metadata.Name = "../ahab" c.Metadata.Name = "../ahab"
pth := filepath.Join(tmp2, "tmpcharts") pth := filepath.Join(tmp2, "tmpcharts")
if err := os.MkdirAll(filepath.Join(pth), 0755); err != nil { if err := os.MkdirAll(filepath.Join(pth), 0o755); err != nil {
t.Fatal(err) t.Fatal(err)
} }

@ -62,7 +62,7 @@ func (opts *Options) MergeValues(p getter.Providers) (map[string]interface{}, er
// User specified a value via --set-json // User specified a value via --set-json
for _, value := range opts.JSONValues { for _, value := range opts.JSONValues {
trimmedValue := strings.TrimSpace(value) trimmedValue := strings.TrimSpace(value)
if len(trimmedValue) > 0 && trimmedValue[0] == '{' { if trimmedValue != "" && trimmedValue[0] == '{' {
// If value is JSON object format, parse it as map // If value is JSON object format, parse it as map
var jsonMap map[string]interface{} var jsonMap map[string]interface{}
if err := json.Unmarshal([]byte(trimmedValue), &jsonMap); err != nil { if err := json.Unmarshal([]byte(trimmedValue), &jsonMap); err != nil {

@ -91,7 +91,7 @@ func TestReadFile(t *testing.T) {
tmpDir := t.TempDir() tmpDir := t.TempDir()
filePath := filepath.Join(tmpDir, "test.txt") filePath := filepath.Join(tmpDir, "test.txt")
content := []byte("local file content") content := []byte("local file content")
err := os.WriteFile(filePath, content, 0644) err := os.WriteFile(filePath, content, 0o644)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -155,7 +155,7 @@ func TestReadFile(t *testing.T) {
fileName := "ftp_file.txt" // Valid filename for filesystem fileName := "ftp_file.txt" // Valid filename for filesystem
filePath := filepath.Join(tmpDir, fileName) filePath := filepath.Join(tmpDir, fileName)
content := []byte("local fallback content") content := []byte("local fallback content")
err := os.WriteFile(filePath, content, 0644) err := os.WriteFile(filePath, content, 0o644)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

@ -86,7 +86,7 @@ func (o *docsOptions) run(_ io.Writer) error {
base := filepath.Base(filename) base := filepath.Base(filename)
name := strings.TrimSuffix(base, path.Ext(base)) name := strings.TrimSuffix(base, path.Ext(base))
title := cases.Title(language.Und, cases.NoLower).String(strings.ReplaceAll(name, "_", " ")) title := cases.Title(language.Und, cases.NoLower).String(strings.ReplaceAll(name, "_", " "))
return fmt.Sprintf("---\ntitle: \"%s\"\n---\n\n", title) return fmt.Sprintf("---\ntitle: %q\n---\n\n", title)
} }
return doc.GenMarkdownTreeCustom(o.topCmd, o.dest, hdrFunc, standardLinks) return doc.GenMarkdownTreeCustom(o.topCmd, o.dest, hdrFunc, standardLinks)

@ -246,7 +246,7 @@ func (p *postRendererArgsSlice) GetSlice() []string {
return p.options.args return p.options.args
} }
func compVersionFlag(chartRef string, _ string) ([]string, cobra.ShellCompDirective) { func compVersionFlag(chartRef, _ string) ([]string, cobra.ShellCompDirective) {
chartInfo := strings.Split(chartRef, "/") chartInfo := strings.Split(chartRef, "/")
if len(chartInfo) != 2 { if len(chartInfo) != 2 {
return nil, cobra.ShellCompDirectiveNoFileComp return nil, cobra.ShellCompDirectiveNoFileComp

@ -50,7 +50,7 @@ func newGetNotesCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
if err != nil { if err != nil {
return err return err
} }
if len(res.Info.Notes) > 0 { if res.Info.Notes != "" {
fmt.Fprintf(out, "NOTES:\n%s\n", res.Info.Notes) fmt.Fprintf(out, "NOTES:\n%s\n", res.Info.Notes)
} }
return nil return nil

@ -150,7 +150,7 @@ type releaseListWriter struct {
noColor bool noColor bool
} }
func newReleaseListWriter(releases []*release.Release, timeFormat string, noHeaders bool, noColor bool) *releaseListWriter { func newReleaseListWriter(releases []*release.Release, timeFormat string, noHeaders, noColor bool) *releaseListWriter {
// Initialize the array so no results returns an empty array instead of null // Initialize the array so no results returns an empty array instead of null
elements := make([]releaseElement, 0, len(releases)) elements := make([]releaseElement, 0, len(releases))
for _, r := range releases { for _, r := range releases {

@ -121,7 +121,7 @@ func processParent(cmd *cobra.Command, args []string) ([]string, error) {
// This function is used to setup the environment for the plugin and then // This function is used to setup the environment for the plugin and then
// call the executable specified by the parameter 'main' // call the executable specified by the parameter 'main'
func callPluginExecutable(pluginName string, main string, argv []string, out io.Writer) error { func callPluginExecutable(pluginName, main string, argv []string, out io.Writer) error {
env := os.Environ() env := os.Environ()
for k, v := range settings.EnvVars() { for k, v := range settings.EnvVars() {
env = append(env, fmt.Sprintf("%s=%s", k, v)) env = append(env, fmt.Sprintf("%s=%s", k, v))
@ -203,8 +203,7 @@ type pluginCommand struct {
// and add the dynamic completion hook to call the optional plugin.complete // and add the dynamic completion hook to call the optional plugin.complete
func loadCompletionForPlugin(pluginCmd *cobra.Command, plugin *plugin.Plugin) { func loadCompletionForPlugin(pluginCmd *cobra.Command, plugin *plugin.Plugin) {
// Parse the yaml file providing the plugin's sub-commands and flags // Parse the yaml file providing the plugin's sub-commands and flags
cmds, err := loadFile(strings.Join( cmds, err := loadFile(plugin.Dir + string(filepath.Separator) + pluginStaticCompletionFile)
[]string{plugin.Dir, pluginStaticCompletionFile}, string(filepath.Separator)))
if err != nil { if err != nil {
// The file could be missing or invalid. No static completion for this plugin. // The file could be missing or invalid. No static completion for this plugin.
@ -228,7 +227,7 @@ func addPluginCommands(plugin *plugin.Plugin, baseCmd *cobra.Command, cmds *plug
return return
} }
if len(cmds.Name) == 0 { if cmds.Name == "" {
// Missing name for a command // Missing name for a command
if settings.Debug { if settings.Debug {
log.Output(2, fmt.Sprintf("[info] sub-command name field missing for %s", baseCmd.CommandPath())) log.Output(2, fmt.Sprintf("[info] sub-command name field missing for %s", baseCmd.CommandPath()))
@ -329,7 +328,7 @@ func pluginDynamicComp(plug *plugin.Plugin, cmd *cobra.Command, args []string, t
} }
// We will call the dynamic completion script of the plugin // We will call the dynamic completion script of the plugin
main := strings.Join([]string{plug.Dir, pluginDynamicCompletionExecutable}, string(filepath.Separator)) main := plug.Dir + string(filepath.Separator) + pluginDynamicCompletionExecutable
// We must include all sub-commands passed on the command-line. // We must include all sub-commands passed on the command-line.
// To do that, we pass-in the entire CommandPath, except the first two elements // To do that, we pass-in the entire CommandPath, except the first two elements
@ -352,7 +351,7 @@ func pluginDynamicComp(plug *plugin.Plugin, cmd *cobra.Command, args []string, t
var completions []string var completions []string
for comp := range strings.SplitSeq(buf.String(), "\n") { for comp := range strings.SplitSeq(buf.String(), "\n") {
// Remove any empty lines // Remove any empty lines
if len(comp) > 0 { if comp != "" {
completions = append(completions, comp) completions = append(completions, comp)
} }
} }

@ -119,7 +119,7 @@ func TestPackage(t *testing.T) {
} }
// This is an unfortunate byproduct of the tmpdir // This is an unfortunate byproduct of the tmpdir
if v, ok := tt.flags["keyring"]; ok && len(v) > 0 { if v, ok := tt.flags["keyring"]; ok && v != "" {
tt.flags["keyring"] = filepath.Join(origDir, v) tt.flags["keyring"] = filepath.Join(origDir, v)
} }
@ -147,7 +147,7 @@ func TestPackage(t *testing.T) {
t.Fatalf("%q: expected error %q, got %q", tt.name, tt.expect, err) t.Fatalf("%q: expected error %q, got %q", tt.name, tt.expect, err)
} }
if len(tt.hasfile) > 0 { if tt.hasfile != "" {
if fi, err := os.Stat(tt.hasfile); err != nil { if fi, err := os.Stat(tt.hasfile); err != nil {
t.Errorf("%q: expected file %q, got err %q", tt.name, tt.hasfile, err) t.Errorf("%q: expected file %q, got err %q", tt.name, tt.hasfile, err)
} else if fi.Size() == 0 { } else if fi.Size() == 0 {

@ -52,9 +52,9 @@ func runHook(p *plugin.Plugin, event string) error {
cmds := p.Metadata.PlatformHooks[event] cmds := p.Metadata.PlatformHooks[event]
expandArgs := true expandArgs := true
if len(cmds) == 0 && len(p.Metadata.Hooks) > 0 { if len(cmds) == 0 && len(p.Metadata.Hooks) > 0 { //nolint:staticcheck
cmd := p.Metadata.Hooks[event] cmd := p.Metadata.Hooks[event] //nolint:staticcheck
if len(cmd) > 0 { if cmd != "" {
cmds = []plugin.PlatformCommand{{Command: "sh", Args: []string{"-c", cmd}}} cmds = []plugin.PlatformCommand{{Command: "sh", Args: []string{"-c", cmd}}}
expandArgs = false expandArgs = false
} }

@ -305,7 +305,7 @@ func checkCommand(t *testing.T, plugins []*cobra.Command, tests []staticCompleti
var pflags []string var pflags []string
pp.LocalFlags().VisitAll(func(flag *pflag.Flag) { pp.LocalFlags().VisitAll(func(flag *pflag.Flag) {
pflags = append(pflags, flag.Name) pflags = append(pflags, flag.Name)
if len(flag.Shorthand) > 0 && flag.Shorthand != flag.Name { if flag.Shorthand != "" && flag.Shorthand != flag.Name {
pflags = append(pflags, flag.Shorthand) pflags = append(pflags, flag.Shorthand)
} }
}) })

@ -232,7 +232,7 @@ func TestPullCmd(t *testing.T) {
} }
if tt.existDir != "" { if tt.existDir != "" {
file := filepath.Join(outdir, tt.existDir) file := filepath.Join(outdir, tt.existDir)
err := os.Mkdir(file, 0755) err := os.Mkdir(file, 0o755)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -343,7 +343,7 @@ func TestPullWithCredentialsCmd(t *testing.T) {
} }
if tt.existDir != "" { if tt.existDir != "" {
file := filepath.Join(outdir, tt.existDir) file := filepath.Join(outdir, tt.existDir)
err := os.Mkdir(file, 0755) err := os.Mkdir(file, 0o755)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

@ -91,19 +91,20 @@ func newRegistryLoginCmd(cfg *action.Configuration, out io.Writer) *cobra.Comman
} }
// Adapted from https://github.com/oras-project/oras // Adapted from https://github.com/oras-project/oras
func getUsernamePassword(usernameOpt string, passwordOpt string, passwordFromStdinOpt bool) (string, string, error) { func getUsernamePassword(usernameOpt, passwordOpt string, passwordFromStdinOpt bool) (string, string, error) {
var err error var err error
username := usernameOpt username := usernameOpt
password := passwordOpt password := passwordOpt
if passwordFromStdinOpt { switch {
case passwordFromStdinOpt:
passwordFromStdin, err := io.ReadAll(os.Stdin) passwordFromStdin, err := io.ReadAll(os.Stdin)
if err != nil { if err != nil {
return "", "", err return "", "", err
} }
password = strings.TrimSuffix(string(passwordFromStdin), "\n") password = strings.TrimSuffix(string(passwordFromStdin), "\n")
password = strings.TrimSuffix(password, "\r") password = strings.TrimSuffix(password, "\r")
} else if password == "" { case password == "":
if username == "" { if username == "" {
username, err = readLine("Username: ", false) username, err = readLine("Username: ", false)
if err != nil { if err != nil {
@ -126,7 +127,7 @@ func getUsernamePassword(usernameOpt string, passwordOpt string, passwordFromStd
return "", "", errors.New("password required") return "", "", errors.New("password required")
} }
} }
} else { default:
slog.Warn("using --password via the CLI is insecure. Use --password-stdin") slog.Warn("using --password via the CLI is insecure. Use --password-stdin")
} }

@ -121,7 +121,7 @@ func (o *repoAddOptions) run(out io.Writer) error {
// Acquire a file lock for process synchronization // Acquire a file lock for process synchronization
repoFileExt := filepath.Ext(o.repoFile) repoFileExt := filepath.Ext(o.repoFile)
var lockPath string var lockPath string
if len(repoFileExt) > 0 && len(repoFileExt) < len(o.repoFile) { if repoFileExt != "" && len(repoFileExt) < len(o.repoFile) {
lockPath = strings.TrimSuffix(o.repoFile, repoFileExt) + ".lock" lockPath = strings.TrimSuffix(o.repoFile, repoFileExt) + ".lock"
} else { } else {
lockPath = o.repoFile + ".lock" lockPath = o.repoFile + ".lock"

@ -270,7 +270,7 @@ func TestRepoAddWithPasswordFromStdin(t *testing.T) {
t.Errorf("unexpected error, got '%v'", err) t.Errorf("unexpected error, got '%v'", err)
} }
if !strings.Contains(result, fmt.Sprintf("\"%s\" has been added to your repositories", testName)) { if !strings.Contains(result, fmt.Sprintf("%q has been added to your repositories", testName)) {
t.Errorf("Repo was not successfully added. Output: %s", result) t.Errorf("Repo was not successfully added. Output: %s", result)
} }
} }

@ -68,7 +68,7 @@ func (o *repoRemoveOptions) run(out io.Writer) error {
if !r.Remove(name) { if !r.Remove(name) {
return fmt.Errorf("no repo named %q found", name) return fmt.Errorf("no repo named %q found", name)
} }
if err := r.WriteFile(o.repoFile, 0600); err != nil { if err := r.WriteFile(o.repoFile, 0o600); err != nil {
return err return err
} }

@ -140,7 +140,7 @@ func TestRepoRemove(t *testing.T) {
} }
} }
func createCacheFiles(rootDir string, repoName string) (cacheIndexFile string, cacheChartsFile string) { func createCacheFiles(rootDir, repoName string) (cacheIndexFile, cacheChartsFile string) {
cacheIndexFile = filepath.Join(rootDir, helmpath.CacheIndexFile(repoName)) cacheIndexFile = filepath.Join(rootDir, helmpath.CacheIndexFile(repoName))
mf, _ := os.Create(cacheIndexFile) mf, _ := os.Create(cacheIndexFile)
mf.Close() mf.Close()
@ -152,7 +152,7 @@ func createCacheFiles(rootDir string, repoName string) (cacheIndexFile string, c
return cacheIndexFile, cacheChartsFile return cacheIndexFile, cacheChartsFile
} }
func testCacheFiles(t *testing.T, cacheIndexFile string, cacheChartsFile string, repoName string) { func testCacheFiles(t *testing.T, cacheIndexFile, cacheChartsFile, repoName string) {
t.Helper() t.Helper()
if _, err := os.Stat(cacheIndexFile); err == nil { if _, err := os.Stat(cacheIndexFile); err == nil {
t.Errorf("Error cache index file was not removed for repository %s", repoName) t.Errorf("Error cache index file was not removed for repository %s", repoName)

@ -227,7 +227,7 @@ func newRootCmdWithConfig(actionConfig *action.Configuration, out io.Writer, arg
cobra.CompDebugln("About to get the different kube-contexts", settings.Debug) cobra.CompDebugln("About to get the different kube-contexts", settings.Debug)
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
if len(settings.KubeConfig) > 0 { if settings.KubeConfig != "" {
loadingRules = &clientcmd.ClientConfigLoadingRules{ExplicitPath: settings.KubeConfig} loadingRules = &clientcmd.ClientConfigLoadingRules{ExplicitPath: settings.KubeConfig}
} }
if config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( if config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(

@ -194,7 +194,7 @@ func (o *searchRepoOptions) buildIndex() (*search.Index, error) {
continue continue
} }
i.AddRepo(n, ind, o.versions || len(o.version) > 0) i.AddRepo(n, ind, o.versions || o.version != "")
} }
return i, nil return i, nil
} }
@ -268,7 +268,7 @@ func (r *repoSearchWriter) encodeByFormat(out io.Writer, format output.Format) e
} }
// Provides the list of charts that are part of the specified repo, and that starts with 'prefix'. // Provides the list of charts that are part of the specified repo, and that starts with 'prefix'.
func compListChartsOfRepo(repoName string, prefix string) []string { func compListChartsOfRepo(repoName, prefix string) []string {
var charts []string var charts []string
path := filepath.Join(settings.RepositoryCache, helmpath.CacheChartsFile(repoName)) path := filepath.Join(settings.RepositoryCache, helmpath.CacheChartsFile(repoName))
@ -361,7 +361,7 @@ func compListCharts(toComplete string, includeFiles bool) ([]string, cobra.Shell
// 2- If there is some input from the user (or else we will end up // 2- If there is some input from the user (or else we will end up
// listing the entire content of the current directory which will // listing the entire content of the current directory which will
// be too many choices for the user to find the real repos) // be too many choices for the user to find the real repos)
if includeFiles && len(completions) > 0 && len(toComplete) > 0 { if includeFiles && len(completions) > 0 && toComplete != "" {
if files, err := os.ReadDir("."); err == nil { if files, err := os.ReadDir("."); err == nil {
for _, file := range files { for _, file := range files {
if strings.HasPrefix(file.Name(), toComplete) { if strings.HasPrefix(file.Name(), toComplete) {
@ -375,22 +375,22 @@ func compListCharts(toComplete string, includeFiles bool) ([]string, cobra.Shell
// If the user didn't provide any input to completion, // If the user didn't provide any input to completion,
// we provide a hint that a path can also be used // we provide a hint that a path can also be used
if includeFiles && len(toComplete) == 0 { if includeFiles && toComplete == "" {
completions = append(completions, "./\tRelative path prefix to local chart", "/\tAbsolute path prefix to local chart") completions = append(completions, "./\tRelative path prefix to local chart", "/\tAbsolute path prefix to local chart")
} }
cobra.CompDebugln(fmt.Sprintf("Completions after checking empty input: %v", completions), settings.Debug) cobra.CompDebugln(fmt.Sprintf("Completions after checking empty input: %v", completions), settings.Debug)
directive := cobra.ShellCompDirectiveDefault directive := cobra.ShellCompDirectiveDefault
if noFile { if noFile {
directive = directive | cobra.ShellCompDirectiveNoFileComp directive |= cobra.ShellCompDirectiveNoFileComp
} }
if noSpace { if noSpace {
directive = directive | cobra.ShellCompDirectiveNoSpace directive |= cobra.ShellCompDirectiveNoSpace
} }
if !includeFiles { if !includeFiles {
// If we should not include files in the completions, // If we should not include files in the completions,
// we should disable file completion // we should disable file completion
directive = directive | cobra.ShellCompDirectiveNoFileComp directive |= cobra.ShellCompDirectiveNoFileComp
} }
return completions, directive return completions, directive
} }

@ -73,7 +73,6 @@ func TestShowPreReleaseChart(t *testing.T) {
filepath.Join(outdir, "repositories.yaml"), filepath.Join(outdir, "repositories.yaml"),
outdir, outdir,
) )
//_, out, err := executeActionCommand(cmd)
_, _, err := executeActionCommand(cmd) _, _, err := executeActionCommand(cmd)
if err != nil { if err != nil {
if tt.fail { if tt.fail {

@ -220,7 +220,7 @@ func (s statusPrinter) WriteTable(out io.Writer) error {
} }
// Hide notes from output - option in install and upgrades // Hide notes from output - option in install and upgrades
if !s.hideNotes && len(s.release.Info.Notes) > 0 { if !s.hideNotes && s.release.Info.Notes != "" {
_, _ = fmt.Fprintf(out, "NOTES:\n%s\n", strings.TrimSpace(s.release.Info.Notes)) _, _ = fmt.Fprintf(out, "NOTES:\n%s\n", strings.TrimSpace(s.release.Info.Notes))
} }
return nil return nil

@ -217,8 +217,8 @@ func isTestHook(h *release.Hook) bool {
// bug introduced by #8156. As part of the todo to refactor renderResources // bug introduced by #8156. As part of the todo to refactor renderResources
// this duplicate code should be removed. It is added here so that the API // this duplicate code should be removed. It is added here so that the API
// surface area is as minimally impacted as possible in fixing the issue. // surface area is as minimally impacted as possible in fixing the issue.
func writeToFile(outputDir string, name string, data string, appendData bool) error { func writeToFile(outputDir, name, data string, appendData bool) error {
outfileName := strings.Join([]string{outputDir, name}, string(filepath.Separator)) outfileName := outputDir + string(filepath.Separator) + name
err := ensureDirectoryForFile(outfileName) err := ensureDirectoryForFile(outfileName)
if err != nil { if err != nil {
@ -244,7 +244,7 @@ func writeToFile(outputDir string, name string, data string, appendData bool) er
func createOrOpenFile(filename string, appendData bool) (*os.File, error) { func createOrOpenFile(filename string, appendData bool) (*os.File, error) {
if appendData { if appendData {
return os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0600) return os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0o600)
} }
return os.Create(filename) return os.Create(filename)
} }
@ -256,5 +256,5 @@ func ensureDirectoryForFile(file string) error {
return err return err
} }
return os.MkdirAll(baseDir, 0755) return os.MkdirAll(baseDir, 0o755)
} }

@ -110,7 +110,7 @@ func (c *ChartDownloader) DownloadTo(ref, version, dest string) (string, *proven
} }
destfile := filepath.Join(dest, name) destfile := filepath.Join(dest, name)
if err := fileutil.AtomicWriteFile(destfile, data, 0644); err != nil { if err := fileutil.AtomicWriteFile(destfile, data, 0o644); err != nil {
return destfile, nil, err return destfile, nil, err
} }
@ -126,7 +126,7 @@ func (c *ChartDownloader) DownloadTo(ref, version, dest string) (string, *proven
return destfile, ver, nil return destfile, ver, nil
} }
provfile := destfile + ".prov" provfile := destfile + ".prov"
if err := fileutil.AtomicWriteFile(provfile, body, 0644); err != nil { if err := fileutil.AtomicWriteFile(provfile, body, 0o644); err != nil {
return destfile, nil, err return destfile, nil, err
} }
@ -176,7 +176,7 @@ func (c *ChartDownloader) ResolveChartVersion(ref, version string) (*url.URL, er
return u, err return u, err
} }
if u.IsAbs() && len(u.Host) > 0 && len(u.Path) > 0 { if u.IsAbs() && u.Host != "" && u.Path != "" {
// In this case, we have to find the parent repo that contains this chart // In this case, we have to find the parent repo that contains this chart
// URL. And this is an unfortunate problem, as it requires actually going // URL. And this is an unfortunate problem, as it requires actually going
// through each repo cache file and finding a matching URL. But basically // through each repo cache file and finding a matching URL. But basically

@ -161,7 +161,7 @@ func TestVerifyChart(t *testing.T) {
} }
// The verification is tested at length in the provenance package. Here, // The verification is tested at length in the provenance package. Here,
// we just want a quick sanity check that the v is not empty. // we just want a quick sanity check that the v is not empty.
if len(v.FileHash) == 0 { if v.FileHash == "" {
t.Error("Digest missing") t.Error("Digest missing")
} }
} }

@ -254,7 +254,7 @@ func (m *Manager) downloadAll(deps []*chart.Dependency) error {
return fmt.Errorf("%q is not a directory", destPath) return fmt.Errorf("%q is not a directory", destPath)
} }
} else if errors.Is(err, stdfs.ErrNotExist) { } else if errors.Is(err, stdfs.ErrNotExist) {
if err := os.MkdirAll(destPath, 0755); err != nil { if err := os.MkdirAll(destPath, 0o755); err != nil {
return err return err
} }
} else { } else {
@ -262,7 +262,7 @@ func (m *Manager) downloadAll(deps []*chart.Dependency) error {
} }
// Prepare tmpPath // Prepare tmpPath
if err := os.MkdirAll(tmpPath, 0755); err != nil { if err := os.MkdirAll(tmpPath, 0o755); err != nil {
return err return err
} }
defer os.RemoveAll(tmpPath) defer os.RemoveAll(tmpPath)
@ -374,7 +374,7 @@ func (m *Manager) downloadAll(deps []*chart.Dependency) error {
} }
func parseOCIRef(chartRef string) (string, string, error) { func parseOCIRef(chartRef string) (string, string, error) {
refTagRegexp := regexp.MustCompile(`^(oci://[^:]+(:[0-9]{1,5})?[^:]+):(.*)$`) refTagRegexp := regexp.MustCompile(`^(oci://[^:]+(:\d{1,5})?[^:]+):(.*)$`)
caps := refTagRegexp.FindStringSubmatch(chartRef) caps := refTagRegexp.FindStringSubmatch(chartRef)
if len(caps) != 4 { if len(caps) != 4 {
return "", "", fmt.Errorf("improperly formatted oci chart reference: %s", chartRef) return "", "", fmt.Errorf("improperly formatted oci chart reference: %s", chartRef)
@ -761,7 +761,7 @@ func (m *Manager) findChartURL(name, version, repoURL string, repos map[string]*
} }
url, err = repo.FindChartInRepoURL(repoURL, name, m.Getters, repo.WithChartVersion(version), repo.WithClientTLS(certFile, keyFile, caFile)) url, err = repo.FindChartInRepoURL(repoURL, name, m.Getters, repo.WithChartVersion(version), repo.WithClientTLS(certFile, keyFile, caFile))
if err == nil { if err == nil {
return url, username, password, false, false, "", "", "", err return url, username, password, false, false, "", "", "", nil
} }
err = fmt.Errorf("chart %s not found in %s: %w", name, repoURL, err) err = fmt.Errorf("chart %s not found in %s: %w", name, repoURL, err)
return url, username, password, false, false, "", "", "", err return url, username, password, false, false, "", "", "", err
@ -865,7 +865,7 @@ func writeLock(chartpath string, lock *chart.Lock, legacyLockfile bool) error {
} }
} }
return os.WriteFile(dest, data, 0644) return os.WriteFile(dest, data, 0o644)
} }
// archive a dep chart from local directory and save it into destPath // archive a dep chart from local directory and save it into destPath

@ -264,7 +264,7 @@ func TestDownloadAll(t *testing.T) {
} }
// create a 'tmpcharts' directory to test #5567 // create a 'tmpcharts' directory to test #5567
if err := os.MkdirAll(filepath.Join(chartPath, "tmpcharts"), 0755); err != nil { if err := os.MkdirAll(filepath.Join(chartPath, "tmpcharts"), 0o755); err != nil {
t.Fatal(err) t.Fatal(err)
} }
if err := m.downloadAll([]*chart.Dependency{signDep, localDep}); err != nil { if err := m.downloadAll([]*chart.Dependency{signDep, localDep}); err != nil {
@ -282,10 +282,10 @@ func TestDownloadAll(t *testing.T) {
description: A Helm chart for Kubernetes description: A Helm chart for Kubernetes
name: ../bad-local-subchart name: ../bad-local-subchart
version: 0.1.0` version: 0.1.0`
if err := os.MkdirAll(filepath.Join(chartPath, "testdata", "bad-local-subchart"), 0755); err != nil { if err := os.MkdirAll(filepath.Join(chartPath, "testdata", "bad-local-subchart"), 0o755); err != nil {
t.Fatal(err) t.Fatal(err)
} }
err = os.WriteFile(filepath.Join(chartPath, "testdata", "bad-local-subchart", "Chart.yaml"), []byte(badchartyaml), 0644) err = os.WriteFile(filepath.Join(chartPath, "testdata", "bad-local-subchart", "Chart.yaml"), []byte(badchartyaml), 0o644)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -733,7 +733,7 @@ func TestWriteLock(t *testing.T) {
t.Run("overwrite existing lock file", func(t *testing.T) { t.Run("overwrite existing lock file", func(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
lockfilePath := filepath.Join(dir, "Chart.lock") lockfilePath := filepath.Join(dir, "Chart.lock")
assert.NoError(t, os.WriteFile(lockfilePath, []byte("old content"), 0644)) assert.NoError(t, os.WriteFile(lockfilePath, []byte("old content"), 0o644))
err = writeLock(dir, lock, false) err = writeLock(dir, lock, false)
assert.NoError(t, err) assert.NoError(t, err)
@ -746,7 +746,7 @@ func TestWriteLock(t *testing.T) {
t.Run("lock file is a symlink", func(t *testing.T) { t.Run("lock file is a symlink", func(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
dummyFile := filepath.Join(dir, "dummy.txt") dummyFile := filepath.Join(dir, "dummy.txt")
assert.NoError(t, os.WriteFile(dummyFile, []byte("dummy"), 0644)) assert.NoError(t, os.WriteFile(dummyFile, []byte("dummy"), 0o644))
lockfilePath := filepath.Join(dir, "Chart.lock") lockfilePath := filepath.Join(dir, "Chart.lock")
assert.NoError(t, os.Symlink(dummyFile, lockfilePath)) assert.NoError(t, os.Symlink(dummyFile, lockfilePath))
@ -759,7 +759,7 @@ func TestWriteLock(t *testing.T) {
t.Run("chart path is not a directory", func(t *testing.T) { t.Run("chart path is not a directory", func(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
filePath := filepath.Join(dir, "not-a-dir") filePath := filepath.Join(dir, "not-a-dir")
assert.NoError(t, os.WriteFile(filePath, []byte("file"), 0644)) assert.NoError(t, os.WriteFile(filePath, []byte("file"), 0o644))
err = writeLock(filePath, lock, false) err = writeLock(filePath, lock, false)
assert.Error(t, err) assert.Error(t, err)

@ -447,7 +447,7 @@ func (p byPathLen) Less(i, j int) bool {
a, b := p[i], p[j] a, b := p[i], p[j]
ca, cb := strings.Count(a, "/"), strings.Count(b, "/") ca, cb := strings.Count(a, "/"), strings.Count(b, "/")
if ca == cb { if ca == cb {
return strings.Compare(a, b) == -1 return a < b
} }
return ca < cb return ca < cb
} }

@ -94,7 +94,7 @@ func newLookupFunction(clientProvider ClientProvider) lookupFunc {
} }
// getDynamicClientOnKind returns a dynamic client on an Unstructured type. This client can be further namespaced. // getDynamicClientOnKind returns a dynamic client on an Unstructured type. This client can be further namespaced.
func getDynamicClientOnKind(apiversion string, kind string, config *rest.Config) (dynamic.NamespaceableResourceInterface, bool, error) { func getDynamicClientOnKind(apiversion, kind string, config *rest.Config) (dynamic.NamespaceableResourceInterface, bool, error) {
gvk := schema.FromAPIVersionAndKind(apiversion, kind) gvk := schema.FromAPIVersionAndKind(apiversion, kind)
apiRes, err := getAPIResourceForGVK(gvk, config) apiRes, err := getAPIResourceForGVK(gvk, config)
if err != nil { if err != nil {

@ -46,7 +46,7 @@ func (g *HTTPGetter) Get(href string, options ...Option) (*bytes.Buffer, error)
func (g *HTTPGetter) get(href string) (*bytes.Buffer, error) { func (g *HTTPGetter) get(href string) (*bytes.Buffer, error) {
// Set a helm specific user agent so that a repo server and metrics can // Set a helm specific user agent so that a repo server and metrics can
// separate helm calls from other tools interacting with repos. // separate helm calls from other tools interacting with repos.
req, err := http.NewRequest(http.MethodGet, href, nil) req, err := http.NewRequest(http.MethodGet, href, http.NoBody)
if err != nil { if err != nil {
return nil, err return nil, err
} }

@ -225,7 +225,7 @@ func TestDownload(t *testing.T) {
// A different host is provided for the WithURL from the one used for Get // A different host is provided for the WithURL from the one used for Get
u2, _ := url.ParseRequestURI(crossAuthSrv.URL) u2, _ := url.ParseRequestURI(crossAuthSrv.URL)
host := strings.Split(u2.Host, ":") host := strings.Split(u2.Host, ":")
host[0] = host[0] + "a" host[0] += "a"
u2.Host = strings.Join(host, ":") u2.Host = strings.Join(host, ":")
httpgetter, err = NewHTTPGetter( httpgetter, err = NewHTTPGetter(
WithURL(u2.String()), WithURL(u2.String()),
@ -260,7 +260,7 @@ func TestDownload(t *testing.T) {
// A different host is provided for the WithURL from the one used for Get // A different host is provided for the WithURL from the one used for Get
u2, _ = url.ParseRequestURI(crossAuthSrv.URL) u2, _ = url.ParseRequestURI(crossAuthSrv.URL)
host = strings.Split(u2.Host, ":") host = strings.Split(u2.Host, ":")
host[0] = host[0] + "a" host[0] += "a"
u2.Host = strings.Join(host, ":") u2.Host = strings.Join(host, ":")
httpgetter, err = NewHTTPGetter( httpgetter, err = NewHTTPGetter(
WithURL(u2.String()), WithURL(u2.String()),
@ -570,7 +570,7 @@ func TestHttpClientInsecureSkipVerify(t *testing.T) {
g.opts.keyFile = "testdata/client.key" g.opts.keyFile = "testdata/client.key"
g.opts.insecureSkipVerifyTLS = true g.opts.insecureSkipVerifyTLS = true
transport := verifyInsecureSkipVerify(t, &g, "HTTPGetter with 2 way ssl", true) transport := verifyInsecureSkipVerify(t, &g, "HTTPGetter with 2 way ssl", true)
if len(transport.TLSClientConfig.Certificates) <= 0 { if len(transport.TLSClientConfig.Certificates) == 0 {
t.Fatal("transport.TLSClientConfig.Certificates is not present") t.Fatal("transport.TLSClientConfig.Certificates is not present")
} }
} }

@ -62,9 +62,11 @@ type pluginGetter struct {
} }
func (p *pluginGetter) setupOptionsEnv(env []string) []string { func (p *pluginGetter) setupOptionsEnv(env []string) []string {
env = append(env, fmt.Sprintf("HELM_PLUGIN_USERNAME=%s", p.opts.username)) env = append(env,
env = append(env, fmt.Sprintf("HELM_PLUGIN_PASSWORD=%s", p.opts.password)) fmt.Sprintf("HELM_PLUGIN_USERNAME=%s", p.opts.username),
env = append(env, fmt.Sprintf("HELM_PLUGIN_PASS_CREDENTIALS_ALL=%t", p.opts.passCredentialsAll)) fmt.Sprintf("HELM_PLUGIN_PASSWORD=%s", p.opts.password),
fmt.Sprintf("HELM_PLUGIN_PASS_CREDENTIALS_ALL=%t", p.opts.passCredentialsAll),
)
return env return env
} }

@ -558,7 +558,7 @@ func getManagedFieldsManager() string {
} }
// When no manager is set and no calling application can be found it is unknown // When no manager is set and no calling application can be found it is unknown
if len(os.Args[0]) == 0 { if os.Args[0] == "" {
return "unknown" return "unknown"
} }

@ -1176,11 +1176,9 @@ func TestIsReachable(t *testing.T) {
t.Errorf("expected error message to contain '%s', got: %v", tt.errorContains, err) t.Errorf("expected error message to contain '%s', got: %v", tt.errorContains, err)
} }
} else { } else if err != nil {
if err != nil {
t.Errorf("expected no error but got: %v", err) t.Errorf("expected no error but got: %v", err)
} }
}
}) })
} }
} }

@ -1469,7 +1469,7 @@ func newReplicationController(name string, generationInSync bool) *corev1.Replic
} }
} }
func newReplicaSet(name string, replicas int, readyReplicas int, generationInSync bool) *appsv1.ReplicaSet { func newReplicaSet(name string, replicas, readyReplicas int, generationInSync bool) *appsv1.ReplicaSet {
d := newDeployment(name, replicas, 0, 0, generationInSync) d := newDeployment(name, replicas, 0, 0, generationInSync)
return &appsv1.ReplicaSet{ return &appsv1.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -1528,7 +1528,7 @@ func newPersistentVolumeClaim(name string, phase corev1.PersistentVolumeClaimPha
} }
} }
func newJob(name string, backoffLimit int, completions *int32, succeeded int, failed int) *batchv1.Job { func newJob(name string, backoffLimit int, completions *int32, succeeded, failed int) *batchv1.Job {
return &batchv1.Job{ return &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: name, Name: name,

@ -134,7 +134,7 @@ func TestRetryingRoundTripper_RoundTrip(t *testing.T) {
rt := RetryingRoundTripper{ rt := RetryingRoundTripper{
Wrapped: fakeRT, Wrapped: fakeRT,
} }
req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil) req, _ := http.NewRequest(http.MethodGet, "http://example.com", http.NoBody)
resp, err := rt.RoundTrip(req) resp, err := rt.RoundTrip(req)
if tt.expectedErr != "" { if tt.expectedErr != "" {

@ -141,8 +141,7 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, sw w
defer cancel() defer cancel()
resources := []object.ObjMetadata{} resources := []object.ObjMetadata{}
for _, resource := range resourceList { for _, resource := range resourceList {
switch value := AsVersioned(resource).(type) { if value, ok := AsVersioned(resource).(*appsv1.Deployment); ok {
case *appsv1.Deployment:
if value.Spec.Paused { if value.Spec.Paused {
continue continue
} }

@ -160,11 +160,12 @@ func validateChartVersion(cf *chart.Metadata) error {
func validateChartMaintainer(cf *chart.Metadata) error { func validateChartMaintainer(cf *chart.Metadata) error {
for _, maintainer := range cf.Maintainers { for _, maintainer := range cf.Maintainers {
if maintainer.Name == "" { switch {
case maintainer.Name == "":
return errors.New("each maintainer requires a name") return errors.New("each maintainer requires a name")
} else if maintainer.Email != "" && !govalidator.IsEmail(maintainer.Email) { case maintainer.Email != "" && !govalidator.IsEmail(maintainer.Email):
return fmt.Errorf("invalid email '%s' for maintainer '%s'", maintainer.Email, maintainer.Name) return fmt.Errorf("invalid email '%s' for maintainer '%s'", maintainer.Email, maintainer.Name)
} else if maintainer.URL != "" && !govalidator.IsURL(maintainer.URL) { case maintainer.URL != "" && !govalidator.IsURL(maintainer.URL):
return fmt.Errorf("invalid url '%s' for maintainer '%s'", maintainer.URL, maintainer.Name) return fmt.Errorf("invalid url '%s' for maintainer '%s'", maintainer.URL, maintainer.Name)
} }
} }
@ -202,7 +203,7 @@ func validateChartDependencies(cf *chart.Metadata) error {
} }
func validateChartType(cf *chart.Metadata) error { func validateChartType(cf *chart.Metadata) error {
if len(cf.Type) > 0 && cf.APIVersion != chart.APIVersionV2 { if cf.Type != "" && cf.APIVersion != chart.APIVersionV2 {
return fmt.Errorf("chart type is not valid in apiVersion '%s'. It is valid in apiVersion '%s'", cf.APIVersion, chart.APIVersionV2) return fmt.Errorf("chart type is not valid in apiVersion '%s'. It is valid in apiVersion '%s'", cf.APIVersion, chart.APIVersionV2)
} }
return nil return nil

@ -162,7 +162,7 @@ func TestValidateValuesFile(t *testing.T) {
func createTestingSchema(t *testing.T, dir string) string { func createTestingSchema(t *testing.T, dir string) string {
t.Helper() t.Helper()
schemafile := filepath.Join(dir, "values.schema.json") schemafile := filepath.Join(dir, "values.schema.json")
if err := os.WriteFile(schemafile, []byte(testSchema), 0700); err != nil { if err := os.WriteFile(schemafile, []byte(testSchema), 0o700); err != nil {
t.Fatalf("Failed to write schema to tmpdir: %s", err) t.Fatalf("Failed to write schema to tmpdir: %s", err)
} }
return schemafile return schemafile

@ -120,7 +120,7 @@ func stripPluginName(name string) string {
break break
} }
} }
re := regexp.MustCompile(`(.*)-[0-9]+\..*`) re := regexp.MustCompile(`(.*)-\d+\..*`)
return re.ReplaceAllString(strippedName, `$1`) return re.ReplaceAllString(strippedName, `$1`)
} }
@ -227,7 +227,7 @@ func (g *TarGzExtractor) Extract(buffer *bytes.Buffer, targetDir string) error {
return err return err
} }
if err := os.MkdirAll(targetDir, 0755); err != nil { if err := os.MkdirAll(targetDir, 0o755); err != nil {
return err return err
} }
@ -248,7 +248,7 @@ func (g *TarGzExtractor) Extract(buffer *bytes.Buffer, targetDir string) error {
switch header.Typeflag { switch header.Typeflag {
case tar.TypeDir: case tar.TypeDir:
if err := os.Mkdir(path, 0755); err != nil { if err := os.Mkdir(path, 0o755); err != nil {
return err return err
} }
case tar.TypeReg: case tar.TypeReg:

@ -85,7 +85,7 @@ func TestHTTPInstaller(t *testing.T) {
defer srv.Close() defer srv.Close()
source := srv.URL + "/plugins/fake-plugin-0.0.1.tar.gz" source := srv.URL + "/plugins/fake-plugin-0.0.1.tar.gz"
if err := os.MkdirAll(helmpath.DataPath("plugins"), 0755); err != nil { if err := os.MkdirAll(helmpath.DataPath("plugins"), 0o755); err != nil {
t.Fatalf("Could not create %s: %s", helmpath.DataPath("plugins"), err) t.Fatalf("Could not create %s: %s", helmpath.DataPath("plugins"), err)
} }
@ -133,7 +133,7 @@ func TestHTTPInstallerNonExistentVersion(t *testing.T) {
defer srv.Close() defer srv.Close()
source := srv.URL + "/plugins/fake-plugin-0.0.1.tar.gz" source := srv.URL + "/plugins/fake-plugin-0.0.1.tar.gz"
if err := os.MkdirAll(helmpath.DataPath("plugins"), 0755); err != nil { if err := os.MkdirAll(helmpath.DataPath("plugins"), 0o755); err != nil {
t.Fatalf("Could not create %s: %s", helmpath.DataPath("plugins"), err) t.Fatalf("Could not create %s: %s", helmpath.DataPath("plugins"), err)
} }
@ -166,7 +166,7 @@ func TestHTTPInstallerUpdate(t *testing.T) {
source := srv.URL + "/plugins/fake-plugin-0.0.1.tar.gz" source := srv.URL + "/plugins/fake-plugin-0.0.1.tar.gz"
ensure.HelmHome(t) ensure.HelmHome(t)
if err := os.MkdirAll(helmpath.DataPath("plugins"), 0755); err != nil { if err := os.MkdirAll(helmpath.DataPath("plugins"), 0o755); err != nil {
t.Fatalf("Could not create %s: %s", helmpath.DataPath("plugins"), err) t.Fatalf("Could not create %s: %s", helmpath.DataPath("plugins"), err)
} }
@ -211,7 +211,7 @@ func TestExtract(t *testing.T) {
tempDir := t.TempDir() tempDir := t.TempDir()
// Set the umask to default open permissions so we can actually test // Set the umask to default open permissions so we can actually test
oldmask := syscall.Umask(0000) oldmask := syscall.Umask(0o000)
defer func() { defer func() {
syscall.Umask(oldmask) syscall.Umask(oldmask)
}() }()
@ -223,8 +223,8 @@ func TestExtract(t *testing.T) {
Name, Body string Name, Body string
Mode int64 Mode int64
}{ }{
{"plugin.yaml", "plugin metadata", 0600}, {"plugin.yaml", "plugin metadata", 0o600},
{"README.md", "some text", 0777}, {"README.md", "some text", 0o777},
} }
for _, file := range files { for _, file := range files {
hdr := &tar.Header{ hdr := &tar.Header{
@ -280,7 +280,7 @@ func TestExtract(t *testing.T) {
t.Fatalf("Expected %s to exist but doesn't", pluginYAMLFullPath) t.Fatalf("Expected %s to exist but doesn't", pluginYAMLFullPath)
} }
t.Fatal(err) t.Fatal(err)
} else if info.Mode().Perm() != 0600 { } else if info.Mode().Perm() != 0o600 {
t.Fatalf("Expected %s to have 0600 mode it but has %o", pluginYAMLFullPath, info.Mode().Perm()) t.Fatalf("Expected %s to have 0600 mode it but has %o", pluginYAMLFullPath, info.Mode().Perm())
} }
@ -290,7 +290,7 @@ func TestExtract(t *testing.T) {
t.Fatalf("Expected %s to exist but doesn't", readmeFullPath) t.Fatalf("Expected %s to exist but doesn't", readmeFullPath)
} }
t.Fatal(err) t.Fatal(err)
} else if info.Mode().Perm() != 0777 { } else if info.Mode().Perm() != 0o777 {
t.Fatalf("Expected %s to have 0777 mode it but has %o", readmeFullPath, info.Mode().Perm()) t.Fatalf("Expected %s to have 0777 mode it but has %o", readmeFullPath, info.Mode().Perm())
} }
@ -343,7 +343,7 @@ func TestMediaTypeToExtension(t *testing.T) {
if shouldPass && ext == "" { if shouldPass && ext == "" {
t.Errorf("Expected an extension but got empty string") t.Errorf("Expected an extension but got empty string")
} }
if !shouldPass && len(ext) != 0 { if !shouldPass && ext != "" {
t.Error("Expected extension to be empty for unrecognized type") t.Error("Expected extension to be empty for unrecognized type")
} }
} }

@ -43,7 +43,7 @@ type Installer interface {
// Install installs a plugin. // Install installs a plugin.
func Install(i Installer) error { func Install(i Installer) error {
if err := os.MkdirAll(filepath.Dir(i.Path()), 0755); err != nil { if err := os.MkdirAll(filepath.Dir(i.Path()), 0o755); err != nil {
return err return err
} }
if _, pathErr := os.Stat(i.Path()); !os.IsNotExist(pathErr) { if _, pathErr := os.Stat(i.Path()); !os.IsNotExist(pathErr) {

@ -30,7 +30,7 @@ func TestLocalInstaller(t *testing.T) {
ensure.HelmHome(t) ensure.HelmHome(t)
// Make a temp dir // Make a temp dir
tdir := t.TempDir() tdir := t.TempDir()
if err := os.WriteFile(filepath.Join(tdir, "plugin.yaml"), []byte{}, 0644); err != nil { if err := os.WriteFile(filepath.Join(tdir, "plugin.yaml"), []byte{}, 0o644); err != nil {
t.Fatal(err) t.Fatal(err)
} }

@ -52,7 +52,7 @@ func (r *testRepo) UpdateVersion(version string) error {
func TestVCSInstaller(t *testing.T) { func TestVCSInstaller(t *testing.T) {
ensure.HelmHome(t) ensure.HelmHome(t)
if err := os.MkdirAll(helmpath.DataPath("plugins"), 0755); err != nil { if err := os.MkdirAll(helmpath.DataPath("plugins"), 0o755); err != nil {
t.Fatalf("Could not create %s: %s", helmpath.DataPath("plugins"), err) t.Fatalf("Could not create %s: %s", helmpath.DataPath("plugins"), err)
} }

@ -92,7 +92,7 @@ type Metadata struct {
// Note that command is not executed in a shell. To do so, we suggest // Note that command is not executed in a shell. To do so, we suggest
// pointing the command to a shell script. // pointing the command to a shell script.
// //
// DEPRECATED: Use PlatformCommand instead. Remove in Helm 4. // Deprecated: Use PlatformCommand instead. Remove in Helm 4.
Command string `json:"command"` Command string `json:"command"`
// IgnoreFlags ignores any flags passed in from Helm // IgnoreFlags ignores any flags passed in from Helm
@ -126,7 +126,7 @@ type Metadata struct {
// //
// Note that the command is executed in the sh shell. // Note that the command is executed in the sh shell.
// //
// DEPRECATED: Use PlatformHooks instead. Remove in Helm 4. // Deprecated: Use PlatformHooks instead. Remove in Helm 4.
Hooks Hooks Hooks Hooks
// Downloaders field is used if the plugin supply downloader mechanism // Downloaders field is used if the plugin supply downloader mechanism
@ -160,12 +160,12 @@ func getPlatformCommand(cmds []PlatformCommand) ([]string, []string) {
return strings.Split(c.Command, " "), c.Args return strings.Split(c.Command, " "), c.Args
} }
if (len(c.OperatingSystem) > 0 && !eq(c.OperatingSystem, runtime.GOOS)) || len(c.Architecture) > 0 { if (c.OperatingSystem != "" && !eq(c.OperatingSystem, runtime.GOOS)) || c.Architecture != "" {
// Skip if OS is not empty and doesn't match or if arch is set as a set arch requires an OS match // Skip if OS is not empty and doesn't match or if arch is set as a set arch requires an OS match
continue continue
} }
if !foundOs && len(c.OperatingSystem) > 0 && eq(c.OperatingSystem, runtime.GOOS) { if !foundOs && c.OperatingSystem != "" && eq(c.OperatingSystem, runtime.GOOS) {
// First OS match with empty arch, can only be overridden by a direct match // First OS match with empty arch, can only be overridden by a direct match
command = strings.Split(c.Command, " ") command = strings.Split(c.Command, " ")
args = c.Args args = c.Args
@ -235,7 +235,7 @@ func (p *Plugin) PrepareCommand(extraArgs []string) (string, []string, error) {
} }
cmds := p.Metadata.PlatformCommand cmds := p.Metadata.PlatformCommand
if len(cmds) == 0 && len(p.Metadata.Command) > 0 { if len(cmds) == 0 && p.Metadata.Command != "" {
cmds = []PlatformCommand{{Command: p.Metadata.Command}} cmds = []PlatformCommand{{Command: p.Metadata.Command}}
} }
@ -258,7 +258,7 @@ func validatePluginData(plug *Plugin, filepath string) error {
} }
plug.Metadata.Usage = sanitizeString(plug.Metadata.Usage) plug.Metadata.Usage = sanitizeString(plug.Metadata.Usage)
if len(plug.Metadata.PlatformCommand) > 0 && len(plug.Metadata.Command) > 0 { if len(plug.Metadata.PlatformCommand) > 0 && plug.Metadata.Command != "" {
return fmt.Errorf("both platformCommand and command are set in %q", filepath) return fmt.Errorf("both platformCommand and command are set in %q", filepath)
} }

@ -309,13 +309,15 @@ func TestVerify(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
if ver, err := signer.Verify(testChartfile, testSigBlock); err != nil { ver, err := signer.Verify(testChartfile, testSigBlock)
switch {
case err != nil:
t.Errorf("Failed to pass verify. Err: %s", err) t.Errorf("Failed to pass verify. Err: %s", err)
} else if len(ver.FileHash) == 0 { case ver.FileHash == "":
t.Error("Verification is missing hash.") t.Error("Verification is missing hash.")
} else if ver.SignedBy == nil { case ver.SignedBy == nil:
t.Error("No SignedBy field") t.Error("No SignedBy field")
} else if ver.FileName != filepath.Base(testChartfile) { case ver.FileName != filepath.Base(testChartfile):
t.Errorf("FileName is unexpectedly %q", ver.FileName) t.Errorf("FileName is unexpectedly %q", ver.FileName)
} }

@ -301,12 +301,12 @@ func TestOCIPusher_Push_ChartOperations(t *testing.T) {
dst.Close() dst.Close()
// Make the file unreadable // Make the file unreadable
if err := os.Chmod(tempChart, 0000); err != nil { if err := os.Chmod(tempChart, 0o000); err != nil {
t.Fatal(err) t.Fatal(err)
} }
return tempChart, func() { return tempChart, func() {
os.Chmod(tempChart, 0644) // Restore permissions for cleanup os.Chmod(tempChart, 0o644) // Restore permissions for cleanup
} }
}, },
href: "oci://localhost:5000/test", href: "oci://localhost:5000/test",
@ -342,7 +342,7 @@ func TestOCIPusher_Push_ChartOperations(t *testing.T) {
dst.Close() dst.Close()
// Create provenance file // Create provenance file
if err := os.WriteFile(tempProv, []byte("test provenance data"), 0644); err != nil { if err := os.WriteFile(tempProv, []byte("test provenance data"), 0o644); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -384,11 +384,9 @@ func TestOCIPusher_Push_ChartOperations(t *testing.T) {
if tt.errorContains != "" && !strings.Contains(err.Error(), tt.errorContains) { if tt.errorContains != "" && !strings.Contains(err.Error(), tt.errorContains) {
t.Errorf("Expected error containing %q, got %q", tt.errorContains, err.Error()) t.Errorf("Expected error containing %q, got %q", tt.errorContains, err.Error())
} }
} else { } else if err != nil {
if err != nil {
t.Fatalf("Unexpected error: %v", err) t.Fatalf("Unexpected error: %v", err)
} }
}
}) })
} }
} }

@ -253,7 +253,7 @@ func (c *Client) Login(host string, options ...LoginOption) error {
} }
// LoginOptBasicAuth returns a function that sets the username/password settings on login // LoginOptBasicAuth returns a function that sets the username/password settings on login
func LoginOptBasicAuth(username string, password string) LoginOption { func LoginOptBasicAuth(username, password string) LoginOption {
return func(o *loginOperation) { return func(o *loginOperation) {
o.client.username = username o.client.username = username
o.client.password = password o.client.password = password
@ -279,8 +279,7 @@ func ensureTLSConfig(client *auth.Client, setConfig *tls.Config) (*tls.Config, e
case *http.Transport: case *http.Transport:
transport = t transport = t
case *LoggingTransport: case *LoggingTransport:
switch t := t.RoundTripper.(type) { if t, ok := t.RoundTripper.(*http.Transport); ok {
case *http.Transport:
transport = t transport = t
} }
} }
@ -834,11 +833,9 @@ func (c *Client) ValidateReference(ref, version string, u *url.URL) (*url.URL, e
if version == "" { if version == "" {
// Use OCI URI tag as default // Use OCI URI tag as default
version = registryReference.Tag version = registryReference.Tag
} else { } else if registryReference.Tag != "" && registryReference.Tag != version {
if registryReference.Tag != "" && registryReference.Tag != version {
return nil, fmt.Errorf("chart reference and version mismatch: %s is not %s", version, registryReference.Tag) return nil, fmt.Errorf("chart reference and version mismatch: %s is not %s", version, registryReference.Tag)
} }
}
if registryReference.Digest != "" { if registryReference.Digest != "" {
if version == "" { if version == "" {

@ -83,11 +83,12 @@ func (t *LoggingTransport) RoundTrip(req *http.Request) (resp *http.Response, er
slog.Debug(req.Method, "id", id, "url", req.URL, "header", logHeader(req.Header)) slog.Debug(req.Method, "id", id, "url", req.URL, "header", logHeader(req.Header))
resp, err = t.RoundTripper.RoundTrip(req) resp, err = t.RoundTripper.RoundTrip(req)
if err != nil { switch {
case err != nil:
slog.Debug("Response"[:len(req.Method)], "id", id, "error", err) slog.Debug("Response"[:len(req.Method)], "id", id, "error", err)
} else if resp != nil { case resp != nil:
slog.Debug("Response"[:len(req.Method)], "id", id, "status", resp.Status, "header", logHeader(resp.Header), "body", logResponseBody(resp)) slog.Debug("Response"[:len(req.Method)], "id", id, "status", resp.Status, "header", logHeader(resp.Header), "body", logResponseBody(resp))
} else { default:
slog.Debug("Response"[:len(req.Method)], "id", id, "response", "nil") slog.Debug("Response"[:len(req.Method)], "id", id, "response", "nil")
} }
@ -142,7 +143,7 @@ func logResponseBody(resp *http.Response) string {
} }
readBody := buf.String() readBody := buf.String()
if len(readBody) == 0 { if readBody == "" {
return " Response body is empty" return " Response body is empty"
} }
if containsCredentials(readBody) { if containsCredentials(readBody) {

@ -156,7 +156,7 @@ func generateChartOCIAnnotations(meta *chart.Metadata, creationTime string) map[
chartOCIAnnotations = addToMap(chartOCIAnnotations, ocispec.AnnotationVersion, meta.Version) chartOCIAnnotations = addToMap(chartOCIAnnotations, ocispec.AnnotationVersion, meta.Version)
chartOCIAnnotations = addToMap(chartOCIAnnotations, ocispec.AnnotationURL, meta.Home) chartOCIAnnotations = addToMap(chartOCIAnnotations, ocispec.AnnotationURL, meta.Home)
if len(creationTime) == 0 { if creationTime == "" {
creationTime = helmtime.Now().UTC().Format(time.RFC3339) creationTime = helmtime.Now().UTC().Format(time.RFC3339)
} }
@ -171,11 +171,11 @@ func generateChartOCIAnnotations(meta *chart.Metadata, creationTime string) map[
for maintainerIdx, maintainer := range meta.Maintainers { for maintainerIdx, maintainer := range meta.Maintainers {
if len(maintainer.Name) > 0 { if maintainer.Name != "" {
maintainerSb.WriteString(maintainer.Name) maintainerSb.WriteString(maintainer.Name)
} }
if len(maintainer.Email) > 0 { if maintainer.Email != "" {
maintainerSb.WriteString(" (") maintainerSb.WriteString(" (")
maintainerSb.WriteString(maintainer.Email) maintainerSb.WriteString(maintainer.Email)
maintainerSb.WriteString(")") maintainerSb.WriteString(")")
@ -195,10 +195,10 @@ func generateChartOCIAnnotations(meta *chart.Metadata, creationTime string) map[
} }
// addToMap takes an existing map and adds an item if the value is not empty // addToMap takes an existing map and adds an item if the value is not empty
func addToMap(inputMap map[string]string, newKey string, newValue string) map[string]string { func addToMap(inputMap map[string]string, newKey, newValue string) map[string]string {
// Add item to map if its // Add item to map if its
if len(strings.TrimSpace(newValue)) > 0 { if strings.TrimSpace(newValue) != "" {
inputMap[newKey] = newValue inputMap[newKey] = newValue
} }

@ -73,7 +73,7 @@ type TestSuite struct {
func setup(suite *TestSuite, tlsEnabled, insecure bool) *registry.Registry { func setup(suite *TestSuite, tlsEnabled, insecure bool) *registry.Registry {
suite.WorkspaceDir = testWorkspaceDir suite.WorkspaceDir = testWorkspaceDir
os.RemoveAll(suite.WorkspaceDir) os.RemoveAll(suite.WorkspaceDir)
os.Mkdir(suite.WorkspaceDir, 0700) os.Mkdir(suite.WorkspaceDir, 0o700)
var ( var (
out bytes.Buffer out bytes.Buffer
@ -121,7 +121,7 @@ func setup(suite *TestSuite, tlsEnabled, insecure bool) *registry.Registry {
pwBytes, err := bcrypt.GenerateFromPassword([]byte(testPassword), bcrypt.DefaultCost) pwBytes, err := bcrypt.GenerateFromPassword([]byte(testPassword), bcrypt.DefaultCost)
suite.Nil(err, "no error generating bcrypt password for test htpasswd file") suite.Nil(err, "no error generating bcrypt password for test htpasswd file")
htpasswdPath := filepath.Join(suite.WorkspaceDir, testHtpasswdFileBasename) htpasswdPath := filepath.Join(suite.WorkspaceDir, testHtpasswdFileBasename)
err = os.WriteFile(htpasswdPath, []byte(fmt.Sprintf("%s:%s\n", testUsername, string(pwBytes))), 0644) err = os.WriteFile(htpasswdPath, []byte(fmt.Sprintf("%s:%s\n", testUsername, string(pwBytes))), 0o644)
suite.Nil(err, "no error creating test htpasswd file") suite.Nil(err, "no error creating test htpasswd file")
// Registry config // Registry config
@ -185,7 +185,8 @@ func teardown(suite *TestSuite) {
func initCompromisedRegistryTestServer() string { func initCompromisedRegistryTestServer() string {
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if strings.Contains(r.URL.Path, "manifests") { switch {
case strings.Contains(r.URL.Path, "manifests"):
w.Header().Set("Content-Type", "application/vnd.oci.image.manifest.v1+json") w.Header().Set("Content-Type", "application/vnd.oci.image.manifest.v1+json")
w.WriteHeader(http.StatusOK) w.WriteHeader(http.StatusOK)
@ -202,17 +203,17 @@ func initCompromisedRegistryTestServer() string {
} }
] ]
}`, ConfigMediaType, ChartLayerMediaType) }`, ConfigMediaType, ChartLayerMediaType)
} else if r.URL.Path == "/v2/testrepo/supposedlysafechart/blobs/sha256:a705ee2789ab50a5ba20930f246dbd5cc01ff9712825bb98f57ee8414377f133" { case r.URL.Path == "/v2/testrepo/supposedlysafechart/blobs/sha256:a705ee2789ab50a5ba20930f246dbd5cc01ff9712825bb98f57ee8414377f133":
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK) w.WriteHeader(http.StatusOK)
w.Write([]byte("{\"name\":\"mychart\",\"version\":\"0.1.0\",\"description\":\"A Helm chart for Kubernetes\\n" + w.Write([]byte("{\"name\":\"mychart\",\"version\":\"0.1.0\",\"description\":\"A Helm chart for Kubernetes\\n" +
"an 'application' or a 'library' chart.\",\"apiVersion\":\"v2\",\"appVersion\":\"1.16.0\",\"type\":" + "an 'application' or a 'library' chart.\",\"apiVersion\":\"v2\",\"appVersion\":\"1.16.0\",\"type\":" +
"\"application\"}")) "\"application\"}"))
} else if r.URL.Path == "/v2/testrepo/supposedlysafechart/blobs/sha256:ca978112ca1bbdcafac231b39a23dc4da786eff8147c4e72b9807785afee48bb" { case r.URL.Path == "/v2/testrepo/supposedlysafechart/blobs/sha256:ca978112ca1bbdcafac231b39a23dc4da786eff8147c4e72b9807785afee48bb":
w.Header().Set("Content-Type", ChartLayerMediaType) w.Header().Set("Content-Type", ChartLayerMediaType)
w.WriteHeader(http.StatusOK) w.WriteHeader(http.StatusOK)
w.Write([]byte("b")) w.Write([]byte("b"))
} else { default:
w.WriteHeader(http.StatusInternalServerError) w.WriteHeader(http.StatusInternalServerError)
} }
})) }))

@ -137,7 +137,7 @@ func sortHooksByKind(hooks []*release.Hook, ordering KindSortOrder) []*release.H
return h return h
} }
func lessByKind(_ interface{}, _ interface{}, kindA string, kindB string, o KindSortOrder) bool { func lessByKind(_, _ interface{}, kindA, kindB string, o KindSortOrder) bool {
ordering := make(map[string]int, len(o)) ordering := make(map[string]int, len(o))
for v, k := range o { for v, k := range o {
ordering[k] = v ordering[k] = v

@ -54,7 +54,7 @@ func SplitManifests(bigFile string) map[string]string {
d = strings.TrimSpace(d) d = strings.TrimSpace(d)
res[fmt.Sprintf(tpl, count)] = d res[fmt.Sprintf(tpl, count)] = d
count = count + 1 count++
} }
return res return res
} }

@ -107,13 +107,13 @@ func (r *ChartRepository) DownloadIndexFile() (string, error) {
fmt.Fprintln(&charts, name) fmt.Fprintln(&charts, name)
} }
chartsFile := filepath.Join(r.CachePath, helmpath.CacheChartsFile(r.Config.Name)) chartsFile := filepath.Join(r.CachePath, helmpath.CacheChartsFile(r.Config.Name))
os.MkdirAll(filepath.Dir(chartsFile), 0755) os.MkdirAll(filepath.Dir(chartsFile), 0o755)
os.WriteFile(chartsFile, []byte(charts.String()), 0644) os.WriteFile(chartsFile, []byte(charts.String()), 0o644)
// Create the index file in the cache directory // Create the index file in the cache directory
fname := filepath.Join(r.CachePath, helmpath.CacheIndexFile(r.Config.Name)) fname := filepath.Join(r.CachePath, helmpath.CacheIndexFile(r.Config.Name))
os.MkdirAll(filepath.Dir(fname), 0755) os.MkdirAll(filepath.Dir(fname), 0o755)
return fname, os.WriteFile(fname, index, 0644) return fname, os.WriteFile(fname, index, 0o644)
} }
type findChartInRepoURLOptions struct { type findChartInRepoURLOptions struct {
@ -169,7 +169,7 @@ func WithInsecureSkipTLSverify(insecureSkipTLSverify bool) FindChartInRepoURLOpt
// FindChartInRepoURL finds chart in chart repository pointed by repoURL // FindChartInRepoURL finds chart in chart repository pointed by repoURL
// without adding repo to repositories // without adding repo to repositories
func FindChartInRepoURL(repoURL string, chartName string, getters getter.Providers, options ...FindChartInRepoURLOption) (string, error) { func FindChartInRepoURL(repoURL, chartName string, getters getter.Providers, options ...FindChartInRepoURLOption) (string, error) {
opts := findChartInRepoURLOptions{} opts := findChartInRepoURLOptions{}
for _, option := range options { for _, option := range options {

@ -200,7 +200,7 @@ func (i IndexFile) Get(name, version string) (*ChartVersion, error) {
} }
// when customer inputs specific version, check whether there's an exact match first // when customer inputs specific version, check whether there's an exact match first
if len(version) != 0 { if version != "" {
for _, ver := range vs { for _, ver := range vs {
if version == ver.Version { if version == ver.Version {
return ver, nil return ver, nil
@ -365,7 +365,8 @@ func loadIndex(data []byte, source string) (*IndexFile, error) {
if cvs[idx].APIVersion == "" { if cvs[idx].APIVersion == "" {
cvs[idx].APIVersion = chart.APIVersionV1 cvs[idx].APIVersion = chart.APIVersionV1
} }
if err := cvs[idx].Validate(); ignoreSkippableChartValidationError(err) != nil { err := cvs[idx].Validate()
if ignoreSkippableChartValidationError(err) != nil {
slog.Warn("skipping loading invalid entry for chart %q %q from %s: %s", name, cvs[idx].Version, source, err) slog.Warn("skipping loading invalid entry for chart %q %q from %s: %s", name, cvs[idx].Version, source, err)
cvs = append(cvs[:idx], cvs[idx+1:]...) cvs = append(cvs[:idx], cvs[idx+1:]...)
} }

@ -550,7 +550,7 @@ func TestIndexWrite(t *testing.T) {
} }
dir := t.TempDir() dir := t.TempDir()
testpath := filepath.Join(dir, "test") testpath := filepath.Join(dir, "test")
i.WriteFile(testpath, 0600) i.WriteFile(testpath, 0o600)
got, err := os.ReadFile(testpath) got, err := os.ReadFile(testpath)
if err != nil { if err != nil {
@ -568,7 +568,7 @@ func TestIndexJSONWrite(t *testing.T) {
} }
dir := t.TempDir() dir := t.TempDir()
testpath := filepath.Join(dir, "test") testpath := filepath.Join(dir, "test")
i.WriteJSONFile(testpath, 0600) i.WriteJSONFile(testpath, 0o600)
got, err := os.ReadFile(testpath) got, err := os.ReadFile(testpath)
if err != nil { if err != nil {

@ -118,7 +118,7 @@ func (r *File) WriteFile(path string, perm os.FileMode) error {
if err != nil { if err != nil {
return err return err
} }
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil {
return err return err
} }
return os.WriteFile(path, data, perm) return os.WriteFile(path, data, perm)

@ -202,7 +202,7 @@ func TestWriteFile(t *testing.T) {
t.Errorf("failed to create test-file (%v)", err) t.Errorf("failed to create test-file (%v)", err)
} }
defer os.Remove(file.Name()) defer os.Remove(file.Name())
if err := sampleRepository.WriteFile(file.Name(), 0600); err != nil { if err := sampleRepository.WriteFile(file.Name(), 0o600); err != nil {
t.Errorf("failed to write file (%v)", err) t.Errorf("failed to write file (%v)", err)
} }

@ -35,7 +35,6 @@ func MakeTestTLSConfig(t *testing.T, path string) *tls.Config {
tlsutil.WithCertKeyPairFiles(pub, priv), tlsutil.WithCertKeyPairFiles(pub, priv),
tlsutil.WithCAFile(ca), tlsutil.WithCAFile(ca),
) )
//require.Nil(t, err, err.Error())
require.Nil(t, err) require.Nil(t, err)
tlsConf.ServerName = "helm.sh" tlsConf.ServerName = "helm.sh"

@ -211,7 +211,7 @@ func (cfgmaps *ConfigMaps) Delete(key string) (rls *rspb.Release, err error) {
return nil, err return nil, err
} }
// delete the release // delete the release
if err = cfgmaps.impl.Delete(context.Background(), key, metav1.DeleteOptions{}); err != nil { if err := cfgmaps.impl.Delete(context.Background(), key, metav1.DeleteOptions{}); err != nil {
return rls, err return rls, err
} }
return rls, nil return rls, nil

@ -276,7 +276,7 @@ type SQLReleaseCustomLabelWrapper struct {
} }
// NewSQL initializes a new sql driver. // NewSQL initializes a new sql driver.
func NewSQL(connectionString string, namespace string) (*SQL, error) { func NewSQL(connectionString, namespace string) (*SQL, error) {
db, err := sqlx.Connect(postgreSQLDialect, connectionString) db, err := sqlx.Connect(postgreSQLDialect, connectionString)
if err != nil { if err != nil {
return nil, err return nil, err
@ -659,7 +659,7 @@ func (s *SQL) Delete(key string) (*rspb.Release, error) {
} }
// Get release custom labels from database // Get release custom labels from database
func (s *SQL) getReleaseCustomLabels(key string, _ string) (map[string]string, error) { func (s *SQL) getReleaseCustomLabels(key, _ string) (map[string]string, error) {
query, args, err := s.statementBuilder. query, args, err := s.statementBuilder.
Select(sqlCustomLabelsTableKeyColumn, sqlCustomLabelsTableValueColumn). Select(sqlCustomLabelsTableKeyColumn, sqlCustomLabelsTableValueColumn).
From(sqlCustomLabelsTableName). From(sqlCustomLabelsTableName).

@ -81,12 +81,14 @@ func TestSQLGet(t *testing.T) {
func TestSQLList(t *testing.T) { func TestSQLList(t *testing.T) {
releases := []*rspb.Release{} releases := []*rspb.Release{}
releases = append(releases, releaseStub("key-1", 1, "default", rspb.StatusUninstalled)) releases = append(releases,
releases = append(releases, releaseStub("key-2", 1, "default", rspb.StatusUninstalled)) releaseStub("key-1", 1, "default", rspb.StatusUninstalled),
releases = append(releases, releaseStub("key-3", 1, "default", rspb.StatusDeployed)) releaseStub("key-2", 1, "default", rspb.StatusUninstalled),
releases = append(releases, releaseStub("key-4", 1, "default", rspb.StatusDeployed)) releaseStub("key-3", 1, "default", rspb.StatusDeployed),
releases = append(releases, releaseStub("key-5", 1, "default", rspb.StatusSuperseded)) releaseStub("key-4", 1, "default", rspb.StatusDeployed),
releases = append(releases, releaseStub("key-6", 1, "default", rspb.StatusSuperseded)) releaseStub("key-5", 1, "default", rspb.StatusSuperseded),
releaseStub("key-6", 1, "default", rspb.StatusSuperseded),
)
sqlDriver, mock := newTestFixtureSQL(t) sqlDriver, mock := newTestFixtureSQL(t)
@ -520,7 +522,7 @@ func TestSqlDelete(t *testing.T) {
} }
} }
func mockGetReleaseCustomLabels(mock sqlmock.Sqlmock, key string, namespace string, labels map[string]string) { func mockGetReleaseCustomLabels(mock sqlmock.Sqlmock, key, namespace string, labels map[string]string) {
query := fmt.Sprintf( query := fmt.Sprintf(
regexp.QuoteMeta("SELECT %s, %s FROM %s WHERE %s = $1 AND %s = $2"), regexp.QuoteMeta("SELECT %s, %s FROM %s WHERE %s = $1 AND %s = $2"),
sqlCustomLabelsTableKeyColumn, sqlCustomLabelsTableKeyColumn,

@ -309,7 +309,6 @@ func (d *MaxHistoryMockDriver) Name() string {
} }
func TestMaxHistoryErrorHandling(t *testing.T) { func TestMaxHistoryErrorHandling(t *testing.T) {
//func TestStorageRemoveLeastRecentWithError(t *testing.T) {
storage := Init(NewMaxHistoryMockDriver(driver.NewMemory())) storage := Init(NewMaxHistoryMockDriver(driver.NewMemory()))
storage.MaxHistory = 1 storage.MaxHistory = 1

@ -16,6 +16,7 @@ limitations under the License.
package strvals package strvals
import ( import (
"bytes"
"fmt" "fmt"
"testing" "testing"
@ -296,7 +297,7 @@ func TestParseLiteral(t *testing.T) {
t.Fatalf("Error serializing parsed value: %s", err) t.Fatalf("Error serializing parsed value: %s", err)
} }
if string(y1) != string(y2) { if !bytes.Equal(y1, y2) {
t.Errorf("%s: Expected:\n%s\nGot:\n%s", tt.str, y1, y2) t.Errorf("%s: Expected:\n%s\nGot:\n%s", tt.str, y1, y2)
} }
} }
@ -409,7 +410,7 @@ func TestParseLiteralInto(t *testing.T) {
t.Fatalf("Error serializing parsed value: %s", err) t.Fatalf("Error serializing parsed value: %s", err)
} }
if string(y1) != string(y2) { if !bytes.Equal(y1, y2) {
t.Errorf("%s: Expected:\n%s\nGot:\n%s", tt.input, y1, y2) t.Errorf("%s: Expected:\n%s\nGot:\n%s", tt.input, y1, y2)
} }
} }
@ -421,7 +422,7 @@ func TestParseLiteralNestedLevels(t *testing.T) {
for i := 1; i <= MaxNestedNameLevel+2; i++ { for i := 1; i <= MaxNestedNameLevel+2; i++ {
tmpStr := fmt.Sprintf("name%d", i) tmpStr := fmt.Sprintf("name%d", i)
if i <= MaxNestedNameLevel+1 { if i <= MaxNestedNameLevel+1 {
tmpStr = tmpStr + "." tmpStr += "."
} }
keyMultipleNestedLevels += tmpStr keyMultipleNestedLevels += tmpStr
} }
@ -473,7 +474,7 @@ func TestParseLiteralNestedLevels(t *testing.T) {
t.Fatalf("Error serializing parsed value: %s", err) t.Fatalf("Error serializing parsed value: %s", err)
} }
if string(y1) != string(y2) { if !bytes.Equal(y1, y2) {
t.Errorf("%s: Expected:\n%s\nGot:\n%s", tt.str, y1, y2) t.Errorf("%s: Expected:\n%s\nGot:\n%s", tt.str, y1, y2)
} }
} }

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save