Merge branch 'master' into helm-ls-modification

pull/3728/head
Derek Bassett 8 years ago committed by GitHub
commit ae5d13c5e1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -0,0 +1,9 @@
<!-- If you need help or think you have found a bug, please help us with your issue by entering the following information (otherwise you can delete this text): -->
Output of `helm version`:
Output of `kubectl version`:
Cloud Provider/Platform (AKS, GKE, Minikube etc.):

@ -120,7 +120,6 @@ coverage:
HAS_GLIDE := $(shell command -v glide;) HAS_GLIDE := $(shell command -v glide;)
HAS_GOX := $(shell command -v gox;) HAS_GOX := $(shell command -v gox;)
HAS_GIT := $(shell command -v git;) HAS_GIT := $(shell command -v git;)
HAS_HG := $(shell command -v hg;)
.PHONY: bootstrap .PHONY: bootstrap
bootstrap: bootstrap:
@ -133,9 +132,6 @@ endif
ifndef HAS_GIT ifndef HAS_GIT
$(error You must install Git) $(error You must install Git)
endif
ifndef HAS_HG
$(error You must install Mercurial)
endif endif
glide install --strip-vendor glide install --strip-vendor
go build -o bin/protoc-gen-go ./vendor/github.com/golang/protobuf/protoc-gen-go go build -o bin/protoc-gen-go ./vendor/github.com/golang/protobuf/protoc-gen-go

@ -40,7 +40,11 @@ Binary downloads of the Helm client can be found at the following links:
- [Windows](https://kubernetes-helm.storage.googleapis.com/helm-v2.8.2-windows-amd64.tar.gz) - [Windows](https://kubernetes-helm.storage.googleapis.com/helm-v2.8.2-windows-amd64.tar.gz)
Unpack the `helm` binary and add it to your PATH and you are good to go! Unpack the `helm` binary and add it to your PATH and you are good to go!
macOS/[homebrew](https://brew.sh/) users can also use `brew install kubernetes-helm`.
If you want to use a package manager:
- macOS/[homebrew](https://brew.sh/) users can use `brew install kubernetes-helm`.
- Windows/[chocolatey](https://chocolatey.org/) users can use `choco install kubernetes-helm`.
To rapidly get Helm up and running, start with the [Quick Start Guide](https://docs.helm.sh/using_helm/#quickstart-guide). To rapidly get Helm up and running, start with the [Quick Start Guide](https://docs.helm.sh/using_helm/#quickstart-guide).

@ -38,6 +38,7 @@ message Hook {
enum DeletePolicy { enum DeletePolicy {
SUCCEEDED = 0; SUCCEEDED = 0;
FAILED = 1; FAILED = 1;
BEFORE_HOOK_CREATION = 2;
} }
string name = 1; string name = 1;
// Kind is the Kubernetes kind. // Kind is the Kubernetes kind.

@ -46,7 +46,7 @@ func TestCreateCmd(t *testing.T) {
defer os.Chdir(pwd) defer os.Chdir(pwd)
// Run a create // Run a create
cmd := newCreateCmd(os.Stdout) cmd := newCreateCmd(ioutil.Discard)
if err := cmd.RunE(cmd, []string{cname}); err != nil { if err := cmd.RunE(cmd, []string{cname}); err != nil {
t.Errorf("Failed to run create: %s", err) t.Errorf("Failed to run create: %s", err)
return return
@ -117,7 +117,7 @@ func TestCreateStarterCmd(t *testing.T) {
defer os.Chdir(pwd) defer os.Chdir(pwd)
// Run a create // Run a create
cmd := newCreateCmd(os.Stdout) cmd := newCreateCmd(ioutil.Discard)
cmd.ParseFlags([]string{"--starter", "starterchart"}) cmd.ParseFlags([]string{"--starter", "starterchart"})
if err := cmd.RunE(cmd, []string{cname}); err != nil { if err := cmd.RunE(cmd, []string{cname}); err != nil {
t.Errorf("Failed to run create: %s", err) t.Errorf("Failed to run create: %s", err)

@ -55,9 +55,7 @@ func newGetManifestCmd(client helm.Interface, out io.Writer) *cobra.Command {
return errReleaseRequired return errReleaseRequired
} }
get.release = args[0] get.release = args[0]
if get.client == nil { get.client = ensureHelmClient(get.client)
get.client = helm.NewClient(helm.Host(settings.TillerHost))
}
return get.run() return get.run()
}, },
} }

@ -30,6 +30,8 @@ import (
"k8s.io/client-go/rest" "k8s.io/client-go/rest"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
// Import to initialize client auth plugins.
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/helm/pkg/helm" "k8s.io/helm/pkg/helm"
helm_env "k8s.io/helm/pkg/helm/environment" helm_env "k8s.io/helm/pkg/helm/environment"
"k8s.io/helm/pkg/helm/portforwarder" "k8s.io/helm/pkg/helm/portforwarder"
@ -214,7 +216,7 @@ func prettyError(err error) error {
} }
// If it's grpc's error, make it more user-friendly. // If it's grpc's error, make it more user-friendly.
if s, ok := status.FromError(err); ok { if s, ok := status.FromError(err); ok {
return s.Err() return fmt.Errorf(s.Message())
} }
// Else return the original error. // Else return the original error.
return err return err

@ -43,7 +43,10 @@ func runReleaseCases(t *testing.T, tests []releaseCase, rcmd releaseCmd) {
var buf bytes.Buffer var buf bytes.Buffer
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
c := &helm.FakeClient{Rels: tt.rels} c := &helm.FakeClient{
Rels: tt.rels,
Responses: tt.responses,
}
cmd := rcmd(c, &buf) cmd := rcmd(c, &buf)
cmd.ParseFlags(tt.flags) cmd.ParseFlags(tt.flags)
err := cmd.RunE(cmd, tt.args) err := cmd.RunE(cmd, tt.args)
@ -70,6 +73,7 @@ type releaseCase struct {
resp *release.Release resp *release.Release
// Rels are the available releases at the start of the test. // Rels are the available releases at the start of the test.
rels []*release.Release rels []*release.Release
responses map[string]release.TestRun_Status
} }
// tempHelmHome sets up a Helm Home in a temp dir. // tempHelmHome sets up a Helm Home in a temp dir.

@ -313,11 +313,6 @@ func mergeValues(dest map[string]interface{}, src map[string]interface{}) map[st
dest[k] = v dest[k] = v
continue continue
} }
// If the key doesn't exist already, then just set the key to that value
if _, exists := dest[k]; !exists {
dest[k] = nextMap
continue
}
// Edge case: If the key exists in the destination, but isn't a map // Edge case: If the key exists in the destination, but isn't a map
destMap, isMap := dest[k].(map[string]interface{}) destMap, isMap := dest[k].(map[string]interface{})
// If the source map has a map for this key, prefer it // If the source map has a map for this key, prefer it
@ -463,7 +458,7 @@ func locateChartPath(repoURL, username, password, name, version string, verify b
return filename, err return filename, err
} }
return filename, fmt.Errorf("failed to download %q", name) return filename, fmt.Errorf("failed to download %q (hint: running `helm repo update` may help)", name)
} }
func generateName(nameTemplate string) (string, error) { func generateName(nameTemplate string) (string, error) {

@ -176,6 +176,7 @@ func generateDeployment(opts *Options) (*v1beta1.Deployment, error) {
return nil, err return nil, err
} }
} }
automountServiceAccountToken := opts.ServiceAccount != ""
d := &v1beta1.Deployment{ d := &v1beta1.Deployment{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Namespace: opts.Namespace, Namespace: opts.Namespace,
@ -190,6 +191,7 @@ func generateDeployment(opts *Options) (*v1beta1.Deployment, error) {
}, },
Spec: v1.PodSpec{ Spec: v1.PodSpec{
ServiceAccountName: opts.ServiceAccount, ServiceAccountName: opts.ServiceAccount,
AutomountServiceAccountToken: &automountServiceAccountToken,
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Name: "tiller", Name: "tiller",

@ -96,6 +96,9 @@ func TestDeploymentManifestForServiceAccount(t *testing.T) {
if got := d.Spec.Template.Spec.ServiceAccountName; got != tt.serviceAccount { if got := d.Spec.Template.Spec.ServiceAccountName; got != tt.serviceAccount {
t.Errorf("%s: expected service account value %q, got %q", tt.name, tt.serviceAccount, got) t.Errorf("%s: expected service account value %q, got %q", tt.name, tt.serviceAccount, got)
} }
if got := *d.Spec.Template.Spec.AutomountServiceAccountToken; got != (tt.serviceAccount != "") {
t.Errorf("%s: unexpected automountServiceAccountToken = %t for serviceAccount %q", tt.name, got, tt.serviceAccount)
}
} }
} }

@ -101,6 +101,9 @@ func (l *lintCmd) run() error {
if linter, err := lintChart(path, rvals, l.namespace, l.strict); err != nil { if linter, err := lintChart(path, rvals, l.namespace, l.strict); err != nil {
fmt.Println("==> Skipping", path) fmt.Println("==> Skipping", path)
fmt.Println(err) fmt.Println(err)
if err == errLintNoChart {
failures = failures + 1
}
} else { } else {
fmt.Println("==> Linting", path) fmt.Println("==> Linting", path)

@ -28,6 +28,7 @@ var (
archivedChartPathWithHyphens = "testdata/testcharts/compressedchart-with-hyphens-0.1.0.tgz" archivedChartPathWithHyphens = "testdata/testcharts/compressedchart-with-hyphens-0.1.0.tgz"
invalidArchivedChartPath = "testdata/testcharts/invalidcompressedchart0.1.0.tgz" invalidArchivedChartPath = "testdata/testcharts/invalidcompressedchart0.1.0.tgz"
chartDirPath = "testdata/testcharts/decompressedchart/" chartDirPath = "testdata/testcharts/decompressedchart/"
chartMissingManifest = "testdata/testcharts/chart-missing-manifest"
) )
func TestLintChart(t *testing.T) { func TestLintChart(t *testing.T) {
@ -46,4 +47,8 @@ func TestLintChart(t *testing.T) {
if _, err := lintChart(invalidArchivedChartPath, values, namespace, strict); err == nil { if _, err := lintChart(invalidArchivedChartPath, values, namespace, strict); err == nil {
t.Errorf("Expected a chart parsing error") t.Errorf("Expected a chart parsing error")
} }
if _, err := lintChart(chartMissingManifest, values, namespace, strict); err == nil {
t.Errorf("Expected a chart parsing error")
}
} }

@ -148,7 +148,7 @@ func (l *listCmd) run() error {
return prettyError(err) return prettyError(err)
} }
if len(res.Releases) == 0 { if len(res.GetReleases()) == 0 {
return nil return nil
} }
@ -239,13 +239,17 @@ func formatList(rels []*release.Release, colWidth uint) string {
table.MaxColWidth = colWidth table.MaxColWidth = colWidth
table.AddRow("NAME", "REVISION", "UPDATED", "STATUS", "CHART", "APP VERSION", "NAMESPACE") table.AddRow("NAME", "REVISION", "UPDATED", "STATUS", "CHART", "APP VERSION", "NAMESPACE")
for _, r := range rels { for _, r := range rels {
c := fmt.Sprintf("%s-%s", r.Chart.Metadata.Name, r.Chart.Metadata.Version) md := r.GetChart().GetMetadata()
t := timeconv.String(r.Info.LastDeployed) c := fmt.Sprintf("%s-%s", md.GetName(), md.GetVersion())
s := r.Info.Status.Code.String() t := "-"
v := r.Version if tspb := r.GetInfo().GetLastDeployed(); tspb != nil {
a := r.Chart.Metadata.AppVersion t = timeconv.String(tspb)
n := r.Namespace }
table.AddRow(r.Name, v, t, s, c, a, n) s := r.GetInfo().GetStatus().GetCode().String()
v := r.GetVersion()
a := md.GetAppVersion()
n := r.GetNamespace()
table.AddRow(r.GetName(), v, t, s, c, a, n)
} }
return table.String() return table.String()
} }

@ -53,6 +53,9 @@ type packageCmd struct {
save bool save bool
sign bool sign bool
path string path string
valueFiles valueFiles
values []string
stringValues []string
key string key string
keyring string keyring string
version string version string
@ -95,6 +98,9 @@ func newPackageCmd(out io.Writer) *cobra.Command {
} }
f := cmd.Flags() f := cmd.Flags()
f.VarP(&pkg.valueFiles, "values", "f", "specify values in a YAML file or a URL(can specify multiple)")
f.StringArrayVar(&pkg.values, "set", []string{}, "set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)")
f.StringArrayVar(&pkg.stringValues, "set-string", []string{}, "set STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)")
f.BoolVar(&pkg.save, "save", true, "save packaged chart to local chart repository") f.BoolVar(&pkg.save, "save", true, "save packaged chart to local chart repository")
f.BoolVar(&pkg.sign, "sign", false, "use a PGP private key to sign this package") f.BoolVar(&pkg.sign, "sign", false, "use a PGP private key to sign this package")
f.StringVar(&pkg.key, "key", "", "name of the key to use when signing. Used if --sign is true") f.StringVar(&pkg.key, "key", "", "name of the key to use when signing. Used if --sign is true")
@ -133,6 +139,20 @@ func (p *packageCmd) run() error {
return err return err
} }
overrideVals, err := vals(p.valueFiles, p.values, p.stringValues)
if err != nil {
return err
}
combinedVals, err := chartutil.CoalesceValues(ch, &chart.Config{Raw: string(overrideVals)})
if err != nil {
return err
}
newVals, err := combinedVals.YAML()
if err != nil {
return err
}
ch.Values = &chart.Config{Raw: newVals}
// If version is set, modify the version. // If version is set, modify the version.
if len(p.version) != 0 { if len(p.version) != 0 {
if err := setVersion(ch, p.version); err != nil { if err := setVersion(ch, p.version); err != nil {

@ -21,6 +21,7 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"regexp" "regexp"
"strings"
"testing" "testing"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@ -122,6 +123,13 @@ func TestPackage(t *testing.T) {
hasfile: "chart-missing-deps-0.1.0.tgz", hasfile: "chart-missing-deps-0.1.0.tgz",
err: true, err: true,
}, },
{
name: "package --values does-not-exist",
args: []string{"testdata/testcharts/alpine"},
flags: map[string]string{"values": "does-not-exist"},
expect: "does-not-exist: no such file or directory",
err: true,
},
} }
// Because these tests are destructive, we run them in a tempdir. // Because these tests are destructive, we run them in a tempdir.
@ -245,6 +253,150 @@ func TestSetAppVersion(t *testing.T) {
} }
} }
func TestPackageValues(t *testing.T) {
testCases := []struct {
desc string
args []string
valuefilesContents []string
flags map[string]string
expected []string
}{
{
desc: "helm package, single values file",
args: []string{"testdata/testcharts/alpine"},
valuefilesContents: []string{"Name: chart-name-foo"},
expected: []string{"Name: chart-name-foo"},
},
{
desc: "helm package, multiple values files",
args: []string{"testdata/testcharts/alpine"},
valuefilesContents: []string{"Name: chart-name-foo", "foo: bar"},
expected: []string{"Name: chart-name-foo", "foo: bar"},
},
{
desc: "helm package, with set option",
args: []string{"testdata/testcharts/alpine"},
flags: map[string]string{"set": "Name=chart-name-foo"},
expected: []string{"Name: chart-name-foo"},
},
{
desc: "helm package, set takes precedence over value file",
args: []string{"testdata/testcharts/alpine"},
valuefilesContents: []string{"Name: chart-name-foo"},
flags: map[string]string{"set": "Name=chart-name-bar"},
expected: []string{"Name: chart-name-bar"},
},
}
thome, err := tempHelmHome(t)
if err != nil {
t.Fatal(err)
}
cleanup := resetEnv()
defer func() {
os.RemoveAll(thome.String())
cleanup()
}()
settings.Home = thome
for _, tc := range testCases {
var files []string
for _, contents := range tc.valuefilesContents {
f, err := createValuesFile(contents)
if err != nil {
t.Errorf("%q unexpected error creating temporary values file: %q", tc.desc, err)
}
defer os.RemoveAll(filepath.Dir(f))
files = append(files, f)
}
valueFiles := strings.Join(files, ",")
expected, err := chartutil.ReadValues([]byte(strings.Join(tc.expected, "\n")))
if err != nil {
t.Errorf("unexpected error parsing values: %q", err)
}
runAndVerifyPackageCommandValues(t, tc.args, tc.flags, valueFiles, expected)
}
}
func runAndVerifyPackageCommandValues(t *testing.T, args []string, flags map[string]string, valueFiles string, expected chartutil.Values) {
outputDir, err := ioutil.TempDir("", "helm-package")
if err != nil {
t.Errorf("unexpected error creating temporary output directory: %q", err)
}
defer os.RemoveAll(outputDir)
if len(flags) == 0 {
flags = make(map[string]string)
}
flags["destination"] = outputDir
if len(valueFiles) > 0 {
flags["values"] = valueFiles
}
cmd := newPackageCmd(&bytes.Buffer{})
setFlags(cmd, flags)
err = cmd.RunE(cmd, args)
if err != nil {
t.Errorf("unexpected error: %q", err)
}
outputFile := filepath.Join(outputDir, "alpine-0.1.0.tgz")
verifyOutputChartExists(t, outputFile)
var actual chartutil.Values
actual, err = getChartValues(outputFile)
if err != nil {
t.Errorf("unexpected error extracting chart values: %q", err)
}
verifyValues(t, actual, expected)
}
func createValuesFile(data string) (string, error) {
outputDir, err := ioutil.TempDir("", "values-file")
if err != nil {
return "", err
}
outputFile := filepath.Join(outputDir, "values.yaml")
if err = ioutil.WriteFile(outputFile, []byte(data), 0755); err != nil {
os.RemoveAll(outputFile)
return "", err
}
return outputFile, nil
}
func getChartValues(chartPath string) (chartutil.Values, error) {
chart, err := chartutil.Load(chartPath)
if err != nil {
return nil, err
}
return chartutil.ReadValues([]byte(chart.Values.Raw))
}
func verifyValues(t *testing.T, actual, expected chartutil.Values) {
for key, value := range expected.AsMap() {
if got := actual[key]; got != value {
t.Errorf("Expected %q, got %q (%v)", value, got, actual)
}
}
}
func verifyOutputChartExists(t *testing.T, chartPath string) {
if chartFile, err := os.Stat(chartPath); err != nil {
t.Errorf("expected file %q, got err %q", chartPath, err)
} else if chartFile.Size() == 0 {
t.Errorf("file %q has zero bytes.", chartPath)
}
}
func setFlags(cmd *cobra.Command, flags map[string]string) { func setFlags(cmd *cobra.Command, flags map[string]string) {
dest := cmd.Flags() dest := cmd.Flags()
for f, v := range flags { for f, v := range flags {

@ -17,55 +17,50 @@ limitations under the License.
package main package main
import ( import (
"bytes" "io"
"testing" "testing"
"github.com/spf13/cobra"
"k8s.io/helm/pkg/helm" "k8s.io/helm/pkg/helm"
"k8s.io/helm/pkg/proto/hapi/release" "k8s.io/helm/pkg/proto/hapi/release"
) )
func TestReleaseTesting(t *testing.T) { func TestReleaseTesting(t *testing.T) {
tests := []struct { tests := []releaseCase{
name string
args []string
flags []string
responses map[string]release.TestRun_Status
fail bool
}{
{ {
name: "basic test", name: "basic test",
args: []string{"example-release"}, args: []string{"example-release"},
flags: []string{}, flags: []string{},
responses: map[string]release.TestRun_Status{"PASSED: green lights everywhere": release.TestRun_SUCCESS}, responses: map[string]release.TestRun_Status{"PASSED: green lights everywhere": release.TestRun_SUCCESS},
fail: false, err: false,
}, },
{ {
name: "test failure", name: "test failure",
args: []string{"example-fail"}, args: []string{"example-fail"},
flags: []string{}, flags: []string{},
responses: map[string]release.TestRun_Status{"FAILURE: red lights everywhere": release.TestRun_FAILURE}, responses: map[string]release.TestRun_Status{"FAILURE: red lights everywhere": release.TestRun_FAILURE},
fail: true, err: true,
}, },
{ {
name: "test unknown", name: "test unknown",
args: []string{"example-unknown"}, args: []string{"example-unknown"},
flags: []string{}, flags: []string{},
responses: map[string]release.TestRun_Status{"UNKNOWN: yellow lights everywhere": release.TestRun_UNKNOWN}, responses: map[string]release.TestRun_Status{"UNKNOWN: yellow lights everywhere": release.TestRun_UNKNOWN},
fail: false, err: false,
}, },
{ {
name: "test error", name: "test error",
args: []string{"example-error"}, args: []string{"example-error"},
flags: []string{}, flags: []string{},
responses: map[string]release.TestRun_Status{"ERROR: yellow lights everywhere": release.TestRun_FAILURE}, responses: map[string]release.TestRun_Status{"ERROR: yellow lights everywhere": release.TestRun_FAILURE},
fail: true, err: true,
}, },
{ {
name: "test running", name: "test running",
args: []string{"example-running"}, args: []string{"example-running"},
flags: []string{}, flags: []string{},
responses: map[string]release.TestRun_Status{"RUNNING: things are happpeningggg": release.TestRun_RUNNING}, responses: map[string]release.TestRun_Status{"RUNNING: things are happpeningggg": release.TestRun_RUNNING},
fail: false, err: false,
}, },
{ {
name: "multiple tests example", name: "multiple tests example",
@ -78,29 +73,11 @@ func TestReleaseTesting(t *testing.T) {
"FAILURE: good thing u checked :)": release.TestRun_FAILURE, "FAILURE: good thing u checked :)": release.TestRun_FAILURE,
"RUNNING: things are happpeningggg yet again": release.TestRun_RUNNING, "RUNNING: things are happpeningggg yet again": release.TestRun_RUNNING,
"PASSED: feel free to party again": release.TestRun_SUCCESS}, "PASSED: feel free to party again": release.TestRun_SUCCESS},
fail: true, err: true,
}, },
} }
for _, tt := range tests { runReleaseCases(t, tests, func(c *helm.FakeClient, out io.Writer) *cobra.Command {
c := &helm.FakeClient{Responses: tt.responses} return newReleaseTestCmd(c, out)
})
buf := bytes.NewBuffer(nil)
cmd := newReleaseTestCmd(c, buf)
cmd.ParseFlags(tt.flags)
err := cmd.RunE(cmd, tt.args)
if err == nil && tt.fail {
t.Errorf("%q did not fail but should have failed", tt.name)
}
if err != nil {
if tt.fail {
continue
} else {
t.Errorf("%q reported error: %s", tt.name, err)
}
}
}
} }

@ -19,6 +19,7 @@ package main
import ( import (
"fmt" "fmt"
"io" "io"
"os"
"path/filepath" "path/filepath"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@ -86,10 +87,17 @@ func index(dir, url, mergeTo string) error {
return err return err
} }
if mergeTo != "" { if mergeTo != "" {
i2, err := repo.LoadIndexFile(mergeTo) // if index.yaml is missing then create an empty one to merge into
var i2 *repo.IndexFile
if _, err := os.Stat(mergeTo); os.IsNotExist(err) {
i2 = repo.NewIndexFile()
i2.WriteFile(mergeTo, 0755)
} else {
i2, err = repo.LoadIndexFile(mergeTo)
if err != nil { if err != nil {
return fmt.Errorf("Merge failed: %s", err) return fmt.Errorf("Merge failed: %s", err)
} }
}
i.Merge(i2) i.Merge(i2)
} }
i.SortEntries() i.SortEntries()

@ -112,6 +112,36 @@ func TestRepoIndexCmd(t *testing.T) {
if vs[0].Version != expectedVersion { if vs[0].Version != expectedVersion {
t.Errorf("expected %q, got %q", expectedVersion, vs[0].Version) t.Errorf("expected %q, got %q", expectedVersion, vs[0].Version)
} }
// test that index.yaml gets generated on merge even when it doesn't exist
if err := os.Remove(destIndex); err != nil {
t.Fatal(err)
}
c.ParseFlags([]string{"--merge", destIndex})
if err := c.RunE(c, []string{dir}); err != nil {
t.Error(err)
}
_, err = repo.LoadIndexFile(destIndex)
if err != nil {
t.Fatal(err)
}
// verify it didn't create an empty index.yaml and the merged happened
if len(index.Entries) != 2 {
t.Errorf("expected 2 entries, got %d: %#v", len(index.Entries), index.Entries)
}
vs = index.Entries["compressedchart"]
if len(vs) != 3 {
t.Errorf("expected 3 versions, got %d: %#v", len(vs), vs)
}
expectedVersion = "0.3.0"
if vs[0].Version != expectedVersion {
t.Errorf("expected %q, got %q", expectedVersion, vs[0].Version)
}
} }
func linkOrCopy(old, new string) error { func linkOrCopy(old, new string) error {

@ -31,106 +31,58 @@ import (
"k8s.io/helm/pkg/proto/hapi/release" "k8s.io/helm/pkg/proto/hapi/release"
) )
func TestResetCmd(t *testing.T) { type resetCase struct {
home, err := ioutil.TempDir("", "helm_home") name string
if err != nil { err bool
t.Fatal(err) resp []*release.Release
removeHelmHome bool
force bool
expectedActions int
expectedOutput string
} }
defer os.Remove(home)
var buf bytes.Buffer func TestResetCmd(t *testing.T) {
c := &helm.FakeClient{}
fc := fake.NewSimpleClientset()
cmd := &resetCmd{
out: &buf,
home: helmpath.Home(home),
client: c,
kubeClient: fc,
namespace: core.NamespaceDefault,
}
if err := cmd.run(); err != nil {
t.Errorf("unexpected error: %v", err)
}
actions := fc.Actions()
if len(actions) != 3 {
t.Errorf("Expected 3 actions, got %d", len(actions))
}
expected := "Tiller (the Helm server-side component) has been uninstalled from your Kubernetes Cluster."
if !strings.Contains(buf.String(), expected) {
t.Errorf("expected %q, got %q", expected, buf.String())
}
if _, err := os.Stat(home); err != nil {
t.Errorf("Helm home directory %s does not exists", home)
}
}
func TestResetCmd_removeHelmHome(t *testing.T) { verifyResetCmd(t, resetCase{
home, err := ioutil.TempDir("", "helm_home") name: "test reset command",
if err != nil { expectedActions: 3,
t.Fatal(err) expectedOutput: "Tiller (the Helm server-side component) has been uninstalled from your Kubernetes Cluster.",
})
} }
defer os.Remove(home)
var buf bytes.Buffer func TestResetCmd_removeHelmHome(t *testing.T) {
c := &helm.FakeClient{} verifyResetCmd(t, resetCase{
fc := fake.NewSimpleClientset() name: "test reset command - remove helm home",
cmd := &resetCmd{
removeHelmHome: true, removeHelmHome: true,
out: &buf, expectedActions: 3,
home: helmpath.Home(home), expectedOutput: "Tiller (the Helm server-side component) has been uninstalled from your Kubernetes Cluster.",
client: c, })
kubeClient: fc,
namespace: core.NamespaceDefault,
}
if err := cmd.run(); err != nil {
t.Errorf("unexpected error: %v", err)
}
actions := fc.Actions()
if len(actions) != 3 {
t.Errorf("Expected 3 actions, got %d", len(actions))
}
expected := "Tiller (the Helm server-side component) has been uninstalled from your Kubernetes Cluster."
if !strings.Contains(buf.String(), expected) {
t.Errorf("expected %q, got %q", expected, buf.String())
}
if _, err := os.Stat(home); err == nil {
t.Errorf("Helm home directory %s already exists", home)
}
} }
func TestReset_deployedReleases(t *testing.T) { func TestReset_deployedReleases(t *testing.T) {
home, err := ioutil.TempDir("", "helm_home") verifyResetCmd(t, resetCase{
if err != nil { name: "test reset command - deployed releases",
t.Fatal(err) resp: []*release.Release{
helm.ReleaseMock(&helm.MockReleaseOptions{Name: "atlas-guide", StatusCode: release.Status_DEPLOYED}),
},
err: true,
expectedOutput: "there are still 1 deployed releases (Tip: use --force to remove Tiller. Releases will not be deleted.)",
})
} }
defer os.Remove(home)
var buf bytes.Buffer func TestReset_forceFlag(t *testing.T) {
resp := []*release.Release{ verifyResetCmd(t, resetCase{
name: "test reset command - force flag",
force: true,
resp: []*release.Release{
helm.ReleaseMock(&helm.MockReleaseOptions{Name: "atlas-guide", StatusCode: release.Status_DEPLOYED}), helm.ReleaseMock(&helm.MockReleaseOptions{Name: "atlas-guide", StatusCode: release.Status_DEPLOYED}),
} },
c := &helm.FakeClient{ expectedActions: 3,
Rels: resp, expectedOutput: "Tiller (the Helm server-side component) has been uninstalled from your Kubernetes Cluster.",
} })
fc := fake.NewSimpleClientset()
cmd := &resetCmd{
out: &buf,
home: helmpath.Home(home),
client: c,
kubeClient: fc,
namespace: core.NamespaceDefault,
}
err = cmd.run()
expected := "there are still 1 deployed releases (Tip: use --force to remove Tiller. Releases will not be deleted.)"
if !strings.Contains(err.Error(), expected) {
t.Errorf("unexpected error: %v", err)
}
if _, err := os.Stat(home); err != nil {
t.Errorf("Helm home directory %s does not exists", home)
}
} }
func TestReset_forceFlag(t *testing.T) { func verifyResetCmd(t *testing.T, tc resetCase) {
home, err := ioutil.TempDir("", "helm_home") home, err := ioutil.TempDir("", "helm_home")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -138,33 +90,42 @@ func TestReset_forceFlag(t *testing.T) {
defer os.Remove(home) defer os.Remove(home)
var buf bytes.Buffer var buf bytes.Buffer
resp := []*release.Release{
helm.ReleaseMock(&helm.MockReleaseOptions{Name: "atlas-guide", StatusCode: release.Status_DEPLOYED}),
}
c := &helm.FakeClient{ c := &helm.FakeClient{
Rels: resp, Rels: tc.resp,
} }
fc := fake.NewSimpleClientset() fc := fake.NewSimpleClientset()
cmd := &resetCmd{ cmd := &resetCmd{
force: true, removeHelmHome: tc.removeHelmHome,
force: tc.force,
out: &buf, out: &buf,
home: helmpath.Home(home), home: helmpath.Home(home),
client: c, client: c,
kubeClient: fc, kubeClient: fc,
namespace: core.NamespaceDefault, namespace: core.NamespaceDefault,
} }
if err := cmd.run(); err != nil {
err = cmd.run()
if !tc.err && err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
got := buf.String()
if tc.err {
got = err.Error()
}
actions := fc.Actions() actions := fc.Actions()
if len(actions) != 3 { if tc.expectedActions > 0 && len(actions) != tc.expectedActions {
t.Errorf("Expected 3 actions, got %d", len(actions)) t.Errorf("Expected %d actions, got %d", tc.expectedActions, len(actions))
} }
expected := "Tiller (the Helm server-side component) has been uninstalled from your Kubernetes Cluster." if !strings.Contains(got, tc.expectedOutput) {
if !strings.Contains(buf.String(), expected) { t.Errorf("expected %q, got %q", tc.expectedOutput, got)
t.Errorf("expected %q, got %q", expected, buf.String())
} }
if _, err := os.Stat(home); err != nil { _, err = os.Stat(home)
if !tc.removeHelmHome && err != nil {
t.Errorf("Helm home directory %s does not exists", home) t.Errorf("Helm home directory %s does not exists", home)
} }
if tc.removeHelmHome && err == nil {
t.Errorf("Helm home directory %s exists", home)
}
} }

@ -117,31 +117,10 @@ func (t *templateCmd) run(cmd *cobra.Command, args []string) error {
} else { } else {
return err return err
} }
// verify specified templates exist relative to chart
rf := []string{}
var af string
var err error
if len(t.renderFiles) > 0 {
for _, f := range t.renderFiles {
if !filepath.IsAbs(f) {
af, err = filepath.Abs(filepath.Join(t.chartPath, f))
if err != nil {
return fmt.Errorf("could not resolve template path: %s", err)
}
} else {
af = f
}
rf = append(rf, af)
if _, err := os.Stat(af); err != nil {
return fmt.Errorf("could not resolve template path: %s", err)
}
}
}
// verify that output-dir exists if provided // verify that output-dir exists if provided
if t.outputDir != "" { if t.outputDir != "" {
_, err = os.Stat(t.outputDir) _, err := os.Stat(t.outputDir)
if os.IsNotExist(err) { if os.IsNotExist(err) {
return fmt.Errorf("output-dir '%s' does not exist", t.outputDir) return fmt.Errorf("output-dir '%s' does not exist", t.outputDir)
} }
@ -232,19 +211,7 @@ func (t *templateCmd) run(cmd *cobra.Command, args []string) error {
m := tiller.Manifest{Name: k, Content: v, Head: &util.SimpleHead{Kind: h}} m := tiller.Manifest{Name: k, Content: v, Head: &util.SimpleHead{Kind: h}}
listManifests = append(listManifests, m) listManifests = append(listManifests, m)
} }
in := func(needle string, haystack []string) bool {
// make needle path absolute
d := strings.Split(needle, string(os.PathSeparator))
dd := d[1:]
an := filepath.Join(t.chartPath, strings.Join(dd, string(os.PathSeparator)))
for _, h := range haystack {
if h == an {
return true
}
}
return false
}
if settings.Debug { if settings.Debug {
rel := &release.Release{ rel := &release.Release{
Name: t.releaseName, Name: t.releaseName,
@ -257,10 +224,45 @@ func (t *templateCmd) run(cmd *cobra.Command, args []string) error {
printRelease(os.Stdout, rel) printRelease(os.Stdout, rel)
} }
for _, m := range tiller.SortByKind(listManifests) { var manifestsToRender []tiller.Manifest
if len(t.renderFiles) > 0 && !in(m.Name, rf) {
continue // if we have a list of files to render, then check that each of the
// provided files exists in the chart.
if len(t.renderFiles) > 0 {
for _, f := range t.renderFiles {
missing := true
if !filepath.IsAbs(f) {
newF, err := filepath.Abs(filepath.Join(t.chartPath, f))
if err != nil {
return fmt.Errorf("could not turn template path %s into absolute path: %s", f, err)
}
f = newF
} }
for _, manifest := range listManifests {
manifestPathSplit := strings.Split(manifest.Name, string(filepath.Separator))
// remove the chart name from the path
manifestPathSplit = manifestPathSplit[1:]
toJoin := append([]string{t.chartPath}, manifestPathSplit...)
manifestPath := filepath.Join(toJoin...)
// if the filepath provided matches a manifest path in the
// chart, render that manifest
if f == manifestPath {
manifestsToRender = append(manifestsToRender, manifest)
missing = false
}
}
if missing {
return fmt.Errorf("could not find template %s in chart", f)
}
}
} else {
// no renderFiles provided, render all manifests in the chart
manifestsToRender = listManifests
}
for _, m := range tiller.SortByKind(manifestsToRender) {
data := m.Content data := m.Content
b := filepath.Base(m.Name) b := filepath.Base(m.Name)
if !t.showNotes && b == "NOTES.txt" { if !t.showNotes && b == "NOTES.txt" {

@ -27,10 +27,13 @@ import (
"testing" "testing"
) )
var chartPath = "./../../pkg/chartutil/testdata/subpop/charts/subchart1" var (
subchart1ChartPath = "./../../pkg/chartutil/testdata/subpop/charts/subchart1"
frobnitzChartPath = "./../../pkg/chartutil/testdata/frobnitz"
)
func TestTemplateCmd(t *testing.T) { func TestTemplateCmd(t *testing.T) {
absChartPath, err := filepath.Abs(chartPath) subchart1AbsChartPath, err := filepath.Abs(subchart1ChartPath)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -40,74 +43,95 @@ func TestTemplateCmd(t *testing.T) {
args []string args []string
expectKey string expectKey string
expectValue string expectValue string
expectError string
}{ }{
{ {
name: "check_name", name: "check_name",
desc: "check for a known name in chart", desc: "check for a known name in chart",
args: []string{chartPath}, args: []string{subchart1ChartPath},
expectKey: "subchart1/templates/service.yaml", expectKey: "subchart1/templates/service.yaml",
expectValue: "protocol: TCP\n name: nginx", expectValue: "protocol: TCP\n name: nginx",
}, },
{ {
name: "check_set_name", name: "check_set_name",
desc: "verify --set values exist", desc: "verify --set values exist",
args: []string{chartPath, "-x", "templates/service.yaml", "--set", "service.name=apache"}, args: []string{subchart1ChartPath, "-x", "templates/service.yaml", "--set", "service.name=apache"},
expectKey: "subchart1/templates/service.yaml", expectKey: "subchart1/templates/service.yaml",
expectValue: "protocol: TCP\n name: apache", expectValue: "protocol: TCP\n name: apache",
}, },
{ {
name: "check_execute", name: "check_execute",
desc: "verify --execute single template", desc: "verify --execute single template",
args: []string{chartPath, "-x", "templates/service.yaml", "--set", "service.name=apache"}, args: []string{subchart1ChartPath, "-x", "templates/service.yaml", "--set", "service.name=apache"},
expectKey: "subchart1/templates/service.yaml", expectKey: "subchart1/templates/service.yaml",
expectValue: "protocol: TCP\n name: apache", expectValue: "protocol: TCP\n name: apache",
}, },
{
name: "check_execute_non_existent",
desc: "verify --execute fails on a template that doesnt exist",
args: []string{subchart1ChartPath, "-x", "templates/thisdoesntexist.yaml"},
expectError: "could not find template",
},
{ {
name: "check_execute_absolute", name: "check_execute_absolute",
desc: "verify --execute single template", desc: "verify --execute single template",
args: []string{chartPath, "-x", absChartPath + "/" + "templates/service.yaml", "--set", "service.name=apache"}, args: []string{subchart1ChartPath, "-x", subchart1AbsChartPath + "/" + "templates/service.yaml", "--set", "service.name=apache"},
expectKey: "subchart1/templates/service.yaml", expectKey: "subchart1/templates/service.yaml",
expectValue: "protocol: TCP\n name: apache", expectValue: "protocol: TCP\n name: apache",
}, },
{
name: "check_execute_subchart_template",
desc: "verify --execute single template on a subchart template",
args: []string{subchart1ChartPath, "-x", "charts/subcharta/templates/service.yaml", "--set", "subcharta.service.name=foobar"},
expectKey: "subchart1/charts/subcharta/templates/service.yaml",
expectValue: "protocol: TCP\n name: foobar",
},
{
name: "check_execute_subchart_template_for_tgz_subchart",
desc: "verify --execute single template on a subchart template where the subchart is a .tgz in the chart directory",
args: []string{frobnitzChartPath, "-x", "charts/mariner/templates/placeholder.tpl", "--set", "mariner.name=moon"},
expectKey: "frobnitz/charts/mariner/templates/placeholder.tpl",
expectValue: "Goodbye moon",
},
{ {
name: "check_namespace", name: "check_namespace",
desc: "verify --namespace", desc: "verify --namespace",
args: []string{chartPath, "--namespace", "test"}, args: []string{subchart1ChartPath, "--namespace", "test"},
expectKey: "subchart1/templates/service.yaml", expectKey: "subchart1/templates/service.yaml",
expectValue: "namespace: \"test\"", expectValue: "namespace: \"test\"",
}, },
{ {
name: "check_release_name", name: "check_release_name",
desc: "verify --release exists", desc: "verify --release exists",
args: []string{chartPath, "--name", "test"}, args: []string{subchart1ChartPath, "--name", "test"},
expectKey: "subchart1/templates/service.yaml", expectKey: "subchart1/templates/service.yaml",
expectValue: "release-name: \"test\"", expectValue: "release-name: \"test\"",
}, },
{ {
name: "check_notes", name: "check_notes",
desc: "verify --notes shows notes", desc: "verify --notes shows notes",
args: []string{chartPath, "--notes", "true"}, args: []string{subchart1ChartPath, "--notes", "true"},
expectKey: "subchart1/templates/NOTES.txt", expectKey: "subchart1/templates/NOTES.txt",
expectValue: "Sample notes for subchart1", expectValue: "Sample notes for subchart1",
}, },
{ {
name: "check_values_files", name: "check_values_files",
desc: "verify --values files values exist", desc: "verify --values files values exist",
args: []string{chartPath, "--values", chartPath + "/charts/subchartA/values.yaml"}, args: []string{subchart1ChartPath, "--values", subchart1ChartPath + "/charts/subchartA/values.yaml"},
expectKey: "subchart1/templates/service.yaml", expectKey: "subchart1/templates/service.yaml",
expectValue: "name: apache", expectValue: "name: apache",
}, },
{ {
name: "check_name_template", name: "check_name_template",
desc: "verify --name-template result exists", desc: "verify --name-template result exists",
args: []string{chartPath, "--name-template", "foobar-{{ b64enc \"abc\" }}-baz"}, args: []string{subchart1ChartPath, "--name-template", "foobar-{{ b64enc \"abc\" }}-baz"},
expectKey: "subchart1/templates/service.yaml", expectKey: "subchart1/templates/service.yaml",
expectValue: "release-name: \"foobar-YWJj-baz\"", expectValue: "release-name: \"foobar-YWJj-baz\"",
}, },
{ {
name: "check_kube_version", name: "check_kube_version",
desc: "verify --kube-version overrides the kubernetes version", desc: "verify --kube-version overrides the kubernetes version",
args: []string{chartPath, "--kube-version", "1.6"}, args: []string{subchart1ChartPath, "--kube-version", "1.6"},
expectKey: "subchart1/templates/service.yaml", expectKey: "subchart1/templates/service.yaml",
expectValue: "kube-version/major: \"1\"\n kube-version/minor: \"6\"\n kube-version/gitversion: \"v1.6.0\"", expectValue: "kube-version/major: \"1\"\n kube-version/minor: \"6\"\n kube-version/gitversion: \"v1.6.0\"",
}, },
@ -115,7 +139,8 @@ func TestTemplateCmd(t *testing.T) {
var buf bytes.Buffer var buf bytes.Buffer
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(T *testing.T) { tt := tt
t.Run(tt.name, func(t *testing.T) {
// capture stdout // capture stdout
old := os.Stdout old := os.Stdout
r, w, _ := os.Pipe() r, w, _ := os.Pipe()
@ -125,8 +150,20 @@ func TestTemplateCmd(t *testing.T) {
cmd := newTemplateCmd(out) cmd := newTemplateCmd(out)
cmd.SetArgs(tt.args) cmd.SetArgs(tt.args)
err := cmd.Execute() err := cmd.Execute()
if err != nil {
t.Errorf("expected: %v, got %v", tt.expectValue, err) if tt.expectError != "" {
if err == nil {
t.Errorf("expected err: %s, but no error occurred", tt.expectError)
}
// non nil error, check if it contains the expected error
if strings.Contains(err.Error(), tt.expectError) {
// had the error we were looking for, this test case is
// done
return
}
t.Fatalf("expected err: %q, got: %q", tt.expectError, err)
} else if err != nil {
t.Errorf("expected no error, got %v", err)
} }
// restore stdout // restore stdout
w.Close() w.Close()

@ -130,7 +130,7 @@ func newUpgradeCmd(client helm.Interface, out io.Writer) *cobra.Command {
f.StringVar(&upgrade.version, "version", "", "specify the exact chart version to use. If this is not specified, the latest version is used") f.StringVar(&upgrade.version, "version", "", "specify the exact chart version to use. If this is not specified, the latest version is used")
f.Int64Var(&upgrade.timeout, "timeout", 300, "time in seconds to wait for any individual Kubernetes operation (like Jobs for hooks)") f.Int64Var(&upgrade.timeout, "timeout", 300, "time in seconds to wait for any individual Kubernetes operation (like Jobs for hooks)")
f.BoolVar(&upgrade.resetValues, "reset-values", false, "when upgrading, reset the values to the ones built into the chart") f.BoolVar(&upgrade.resetValues, "reset-values", false, "when upgrading, reset the values to the ones built into the chart")
f.BoolVar(&upgrade.reuseValues, "reuse-values", false, "when upgrading, reuse the last release's values, and merge in any new values. If '--reset-values' is specified, this is ignored.") f.BoolVar(&upgrade.reuseValues, "reuse-values", false, "when upgrading, reuse the last release's values and merge in any overrides from the command line via --set and -f. If '--reset-values' is specified, this is ignored.")
f.BoolVar(&upgrade.wait, "wait", false, "if set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment are in a ready state before marking the release as successful. It will wait for as long as --timeout") f.BoolVar(&upgrade.wait, "wait", false, "if set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment are in a ready state before marking the release as successful. It will wait for as long as --timeout")
f.StringVar(&upgrade.repoURL, "repo", "", "chart repository url where to locate the requested chart") f.StringVar(&upgrade.repoURL, "repo", "", "chart repository url where to locate the requested chart")
f.StringVar(&upgrade.username, "username", "", "chart repository username where to locate the requested chart") f.StringVar(&upgrade.username, "username", "", "chart repository username where to locate the requested chart")

@ -0,0 +1,37 @@
# Custom Resource Definitions
This section of the Best Practices Guide deals with creating and using Custom Resource Definition
objects.
When working with Custom Resource Definitions (CRDs), it is important to distinguish
two different pieces:
- There is a declaration of a CRD. This is the YAML file that has the kind `CustomResourceDefinition`
- Then there are resources that _use_ the CRD. Say a CRD defines `foo.example.com/v1`. Any resource
that has `apiVersion: example.com/v1` and kind `Foo` is a resource that uses the CRD.
## Install a CRD Declaration Before Using the Resource
Helm is optimized to load as many resources into Kubernetes as fast as possible.
By design, Kubernetes can take an entire set of manifests and bring them all
online (this is called the reconciliation loop).
But there's a difference with CRDs.
For a CRD, the declaration must be registered before any resources of that CRDs
kind(s) can be used. And the registration process sometimes takes a few seconds.
### Method 1: Separate Charts
One way to do this is to put the CRD definition in one chart, and then put any
resources that use that CRD in _another_ chart.
In this method, each chart must be installed separately.
### Method 2: Pre-install Hooks
To package the two together, add a `pre-install` hook to the CRD definition so
that it is fully installed before the rest of the chart is executed.
Note that if you create the CRD with a `pre-install` hook, that CRD definition
will not be deleted when `helm delete` is run.

@ -155,7 +155,7 @@ Template comments should be used when documenting features of a template, such a
```yaml ```yaml
{{- /* {{- /*
mychart.shortname provides a 6 char truncated version of the release name. mychart.shortname provides a 6 char truncated version of the release name.
*/ }} */ -}}
{{ define "mychart.shortname" -}} {{ define "mychart.shortname" -}}
{{ .Release.Name | trunc 6 }} {{ .Release.Name | trunc 6 }}
{{- end -}} {{- end -}}

@ -1,38 +0,0 @@
# Third Party Resources
This section of the Best Practices Guide deals with creating and using Third Party Resource
objects.
When working with Third Party Resources (TPRs), it is important to distinguish
two different pieces:
- There is a declaration of a TPR. This is the YAML file that has the kind `ThirdPartyResource`
- Then there are resources that _use_ the TPR. Say a TPR defines `foo.example.com/v1`. Any resource
that has `apiVersion: example.com/v1` and kind `Foo` is a resource that uses the
TPR.
## Install a TPR Declaration Before Using the Resource
Helm is optimized to load as many resources into Kubernetes as fast as possible.
By design, Kubernetes can take an entire set of manifests and bring them all
online (this is called the reconciliation loop).
But there's a difference with TPRs.
For a TPR, the declaration must be registered before any resources of that TPRs
kind(s) can be used. And the registration process sometimes takes a few seconds.
### Method 1: Separate Charts
One way to do this is to put the TPR definition in one chart, and then put any
resources that use that TPR in _another_ chart.
In this method, each chart must be installed separately.
### Method 2: Pre-install Hooks
To package the two together, add a `pre-install` hook to the TPR definition so
that it is fully installed before the rest of the chart is executed.
Note that if you create the TPR with a `pre-install` hook, that TPR definition
will not be deleted when `helm delete` is run.

@ -275,7 +275,7 @@ fantastic-charts https://fantastic-charts.storage.googleapis.com
If the charts are backed by HTTP basic authentication, you can also supply the If the charts are backed by HTTP basic authentication, you can also supply the
username and password here: username and password here:
``console ```console
$ helm repo add fantastic-charts https://fantastic-charts.storage.googleapis.com --username my-username --password my-password $ helm repo add fantastic-charts https://fantastic-charts.storage.googleapis.com --username my-username --password my-password
$ helm repo list $ helm repo list
fantastic-charts https://fantastic-charts.storage.googleapis.com fantastic-charts https://fantastic-charts.storage.googleapis.com

@ -3,7 +3,7 @@
## Prerequisites ## Prerequisites
* Install the [gsutil](https://cloud.google.com/storage/docs/gsutil) tool. *We rely heavily on the gsutil rsync functionality* * Install the [gsutil](https://cloud.google.com/storage/docs/gsutil) tool. *We rely heavily on the gsutil rsync functionality*
* Be sure to have access to the helm binary * Be sure to have access to the Helm binary
* _Optional: We recommend you set [object versioning](https://cloud.google.com/storage/docs/gsutil/addlhelp/ObjectVersioningandConcurrencyControl#top_of_page) on your GCS bucket in case you accidentally delete something._ * _Optional: We recommend you set [object versioning](https://cloud.google.com/storage/docs/gsutil/addlhelp/ObjectVersioningandConcurrencyControl#top_of_page) on your GCS bucket in case you accidentally delete something._
## Set up a local chart repository directory ## Set up a local chart repository directory
@ -16,7 +16,7 @@ $ mv alpine-0.1.0.tgz fantastic-charts/
``` ```
## Generate an updated index.yaml ## Generate an updated index.yaml
Use helm to generate an updated index.yaml file by passing in the directory path and the url of the remote repository to the `helm repo index` command like this: Use Helm to generate an updated index.yaml file by passing in the directory path and the url of the remote repository to the `helm repo index` command like this:
```console ```console
$ helm repo index fantastic-charts/ --url https://fantastic-charts.storage.googleapis.com $ helm repo index fantastic-charts/ --url https://fantastic-charts.storage.googleapis.com

@ -105,7 +105,7 @@ data:
{{- end }} {{- end }}
``` ```
As mentioned above, **template names are global**. As a result of this, if two templates are declared with the same name the last occurance will be the one that is used. Since templates in subcharts are compiled together with top-level templates, it is best to name your templates with _chart specific names_. A popular naming convention is to prefix each defined template with the name of the chart: `{{ define "mychart.labels" }}`. As mentioned above, **template names are global**. As a result of this, if two templates are declared with the same name the last occurrence will be the one that is used. Since templates in subcharts are compiled together with top-level templates, it is best to name your templates with _chart specific names_. A popular naming convention is to prefix each defined template with the name of the chart: `{{ define "mychart.labels" }}`.
## Setting the scope of a template ## Setting the scope of a template

@ -27,8 +27,8 @@ wordpress/
README.md # OPTIONAL: A human-readable README file README.md # OPTIONAL: A human-readable README file
requirements.yaml # OPTIONAL: A YAML file listing dependencies for the chart requirements.yaml # OPTIONAL: A YAML file listing dependencies for the chart
values.yaml # The default configuration values for this chart values.yaml # The default configuration values for this chart
charts/ # OPTIONAL: A directory containing any charts upon which this chart depends. charts/ # A directory containing any charts upon which this chart depends.
templates/ # OPTIONAL: A directory of templates that, when combined with values, templates/ # A directory of templates that, when combined with values,
# will generate valid Kubernetes manifest files. # will generate valid Kubernetes manifest files.
templates/NOTES.txt # OPTIONAL: A plain text file containing short usage notes templates/NOTES.txt # OPTIONAL: A plain text file containing short usage notes
``` ```
@ -36,13 +36,12 @@ wordpress/
Helm reserves use of the `charts/` and `templates/` directories, and of Helm reserves use of the `charts/` and `templates/` directories, and of
the listed file names. Other files will be left as they are. the listed file names. Other files will be left as they are.
While the `charts` and `templates` directories are optional there must be at least one chart dependency or template file for the chart to be valid.
## The Chart.yaml File ## The Chart.yaml File
The `Chart.yaml` file is required for a chart. It contains the following fields: The `Chart.yaml` file is required for a chart. It contains the following fields:
```yaml ```yaml
apiVersion: The chart API version, always "v1" (required)
name: The name of the chart (required) name: The name of the chart (required)
version: A SemVer 2 version (required) version: A SemVer 2 version (required)
kubeVersion: A SemVer range of compatible Kubernetes versions (optional) kubeVersion: A SemVer range of compatible Kubernetes versions (optional)
@ -850,7 +849,7 @@ considerations in mind:
- The `Chart.yaml` will be overwritten by the generator. - The `Chart.yaml` will be overwritten by the generator.
- Users will expect to modify such a chart's contents, so documentation - Users will expect to modify such a chart's contents, so documentation
should indicate how users can do so. should indicate how users can do so.
- All occurences of `<CHARTNAME>` will be replaced with the specified chart - All occurrences of `<CHARTNAME>` will be replaced with the specified chart
name so that starter charts can be used as templates. name so that starter charts can be used as templates.
Currently the only way to add a chart to `$HELM_HOME/starters` is to manually Currently the only way to add a chart to `$HELM_HOME/starters` is to manually

@ -180,4 +180,19 @@ It is also possible to define policies that determine when to delete correspondi
"helm.sh/hook-delete-policy": hook-succeeded "helm.sh/hook-delete-policy": hook-succeeded
``` ```
When using `"helm.sh/hook-delete-policy"` annotation, you can choose its value from `"hook-succeeded"` and `"hook-failed"`. The value `"hook-succeeded"` specifies Tiller should delete the hook after the hook is successfully executed, while the value `"hook-failed"`specifies Tiller should delete the hook if the hook failed during execution. You can choose one or more defined annotation values:
* `"hook-succeeded"` specifies Tiller should delete the hook after the hook is successfully executed.
* `"hook-failed"` specifies Tiller should delete the hook if the hook failed during execution.
* `"before-hook-creation"` specifies Tiller should delete the previous hook before the new hook is launched.
### Automatically delete hook from previous release
When helm release being updated it is possible, that hook resource already exists in cluster. By default helm will try to create resource and fail with `"... already exists"` error.
One might choose `"helm.sh/hook-delete-policy": "before-hook-creation"` over `"helm.sh/hook-delete-policy": "hook-succeeded,hook-failed"` because:
* It is convenient to keep failed hook job resource in kubernetes for example for manual debug.
* It may be necessary to keep succeeded hook resource in kubernetes for some reason.
* At the same time it is not desirable to do manual resource deletion before helm release upgrade.
`"helm.sh/hook-delete-policy": "before-hook-creation"` annotation on hook causes tiller to remove the hook from previous release if there is one before the new hook is launched and can be used with another policy.

@ -10,7 +10,6 @@ Helm and Tiller.
- A Kubernetes cluster w/ kubectl (optional) - A Kubernetes cluster w/ kubectl (optional)
- The gRPC toolchain - The gRPC toolchain
- Git - Git
- Mercurial
## Building Helm/Tiller ## Building Helm/Tiller

@ -29,7 +29,10 @@ helm package [flags] [CHART_PATH] [...]
--key string name of the key to use when signing. Used if --sign is true --key string name of the key to use when signing. Used if --sign is true
--keyring string location of a public keyring (default "~/.gnupg/pubring.gpg") --keyring string location of a public keyring (default "~/.gnupg/pubring.gpg")
--save save packaged chart to local chart repository (default true) --save save packaged chart to local chart repository (default true)
--set stringArray set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)
--set-string stringArray set STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)
--sign use a PGP private key to sign this package --sign use a PGP private key to sign this package
-f, --values valueFiles specify values in a YAML file or a URL(can specify multiple) (default [])
--version string set the version on the chart to this semver version --version string set the version on the chart to this semver version
``` ```
@ -47,4 +50,4 @@ helm package [flags] [CHART_PATH] [...]
### SEE ALSO ### SEE ALSO
* [helm](helm.md) - The Helm package manager for Kubernetes. * [helm](helm.md) - The Helm package manager for Kubernetes.
###### Auto generated by spf13/cobra on 8-Mar-2018 ###### Auto generated by spf13/cobra on 6-Apr-2018

@ -51,7 +51,7 @@ helm upgrade [RELEASE] [CHART]
--recreate-pods performs pods restart for the resource if applicable --recreate-pods performs pods restart for the resource if applicable
--repo string chart repository url where to locate the requested chart --repo string chart repository url where to locate the requested chart
--reset-values when upgrading, reset the values to the ones built into the chart --reset-values when upgrading, reset the values to the ones built into the chart
--reuse-values when upgrading, reuse the last release's values, and merge in any new values. If '--reset-values' is specified, this is ignored. --reuse-values when upgrading, reuse the last release's values and merge in any overrides from the command line via --set and -f. If '--reset-values' is specified, this is ignored.
--set stringArray set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2) --set stringArray set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)
--set-string stringArray set STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2) --set-string stringArray set STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)
--timeout int time in seconds to wait for any individual Kubernetes operation (like Jobs for hooks) (default 300) --timeout int time in seconds to wait for any individual Kubernetes operation (like Jobs for hooks) (default 300)
@ -81,4 +81,4 @@ helm upgrade [RELEASE] [CHART]
### SEE ALSO ### SEE ALSO
* [helm](helm.md) - The Helm package manager for Kubernetes. * [helm](helm.md) - The Helm package manager for Kubernetes.
###### Auto generated by spf13/cobra on 20-Mar-2018 ###### Auto generated by spf13/cobra on 4-Apr-2018

@ -36,6 +36,15 @@ brew install kubernetes-helm
(Note: There is also a formula for emacs-helm, which is a different (Note: There is also a formula for emacs-helm, which is a different
project.) project.)
### From Chocolatey (Windows)
Members of the Kubernetes community have contributed a [Helm package](https://chocolatey.org/packages/kubernetes-helm) build to
[Chocolatey](https://chocolatey.org/). This package is generally up to date.
```
choco install kubernetes-helm
```
## From Script ## From Script
Helm now has an installer script that will automatically grab the latest version Helm now has an installer script that will automatically grab the latest version
@ -72,7 +81,7 @@ Building Helm from source is slightly more work, but is the best way to
go if you want to test the latest (pre-release) Helm version. go if you want to test the latest (pre-release) Helm version.
You must have a working Go environment with You must have a working Go environment with
[glide](https://github.com/Masterminds/glide) and Mercurial installed. [glide](https://github.com/Masterminds/glide) installed.
```console ```console
$ cd $GOPATH $ cd $GOPATH

@ -224,7 +224,7 @@ I am trying to remove stuff.
**Q: When I delete the Tiller deployment, how come all the releases are still there?** **Q: When I delete the Tiller deployment, how come all the releases are still there?**
Releases are stored in ConfigMaps inside of the `kube-system` namespace. You will Releases are stored in ConfigMaps inside of the `kube-system` namespace. You will
have to manually delete them to get rid of the record. have to manually delete them to get rid of the record, or use ```helm delete --purge```.
**Q: I want to delete my local Helm. Where are all its files?** **Q: I want to delete my local Helm. Where are all its files?**

@ -43,3 +43,9 @@ Helm works straightforward on OpenShift Online, OpenShift Dedicated, OpenShift C
## Platform9 ## Platform9
Helm Client and Helm Server (Tiller) are pre-installed with [Platform9 Managed Kubernetes](https://platform9.com/managed-kubernetes/?utm_source=helm_distro_notes). Platform9 provides access to all official Helm charts through the App Catalog UI and native Kubernetes CLI. Additional repositories can be manually added. Further details are available in this [Platform9 App Catalog article](https://platform9.com/support/deploying-kubernetes-apps-platform9-managed-kubernetes/?utm_source=helm_distro_notes). Helm Client and Helm Server (Tiller) are pre-installed with [Platform9 Managed Kubernetes](https://platform9.com/managed-kubernetes/?utm_source=helm_distro_notes). Platform9 provides access to all official Helm charts through the App Catalog UI and native Kubernetes CLI. Additional repositories can be manually added. Further details are available in this [Platform9 App Catalog article](https://platform9.com/support/deploying-kubernetes-apps-platform9-managed-kubernetes/?utm_source=helm_distro_notes).
## DC/OS
Helm (both client and server) has been tested and is working on Mesospheres DC/OS 1.11 Kubernetes platform, and requires
no additional configuration.

@ -134,7 +134,7 @@ downloaders:
If such plugin is installed, Helm can interact with the repository using the specified If such plugin is installed, Helm can interact with the repository using the specified
protocol scheme by invoking the `command`. The special repository shall be added protocol scheme by invoking the `command`. The special repository shall be added
similarily to the regular ones: `helm repo add favorite myprotocol://example.com/` similarly to the regular ones: `helm repo add favorite myprotocol://example.com/`
The rules for the special repos are the same to the regular ones: Helm must be able The rules for the special repos are the same to the regular ones: Helm must be able
to download the `index.yaml` file in order to discover and cache the list of to download the `index.yaml` file in order to discover and cache the list of
available Charts. available Charts.

@ -4,13 +4,6 @@ Helm has provenance tools which help chart users verify the integrity and origin
of a package. Using industry-standard tools based on PKI, GnuPG, and well-respected of a package. Using industry-standard tools based on PKI, GnuPG, and well-respected
package managers, Helm can generate and verify signature files. package managers, Helm can generate and verify signature files.
**Note:**
Version 2.0.0-alpha.4 introduced a system for verifying the authenticity of charts.
While we do not anticipate that any major changes will be made to the file formats
or provenancing algorithms, this portion of Helm is not considered _frozen_ until
2.0.0-RC1 is released. The original plan for this feature can be found
[at issue 983](https://github.com/kubernetes/helm/issues/983).
## Overview ## Overview
Integrity is established by comparing a chart to a provenance record. Provenance Integrity is established by comparing a chart to a provenance record. Provenance

@ -4,11 +4,11 @@ In Kubernetes, granting a role to an application-specific service account is a b
Bitnami also has a fantastic guide for [configuring RBAC in your cluster](https://docs.bitnami.com/kubernetes/how-to/configure-rbac-in-your-kubernetes-cluster/) that takes you through RBAC basics. Bitnami also has a fantastic guide for [configuring RBAC in your cluster](https://docs.bitnami.com/kubernetes/how-to/configure-rbac-in-your-kubernetes-cluster/) that takes you through RBAC basics.
This guide is for users who want to restrict tiller's capabilities to install resources to certain namespaces, or to grant a helm client running access to a tiller instance. This guide is for users who want to restrict Tiller's capabilities to install resources to certain namespaces, or to grant a Helm client running access to a Tiller instance.
## Tiller and Role-based Access Control ## Tiller and Role-based Access Control
You can add a service account to Tiller using the `--service-account <NAME>` flag while you're configuring helm. As a prerequisite, you'll have to create a role binding which specifies a [role](https://kubernetes.io/docs/admin/authorization/rbac/#role-and-clusterrole) and a [service account](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) name that have been set up in advance. You can add a service account to Tiller using the `--service-account <NAME>` flag while you're configuring Helm. As a prerequisite, you'll have to create a role binding which specifies a [role](https://kubernetes.io/docs/admin/authorization/rbac/#role-and-clusterrole) and a [service account](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) name that have been set up in advance.
Once you have satisfied the pre-requisite and have a service account with the correct permissions, you'll run a command like this: `helm init --service-account <NAME>` Once you have satisfied the pre-requisite and have a service account with the correct permissions, you'll run a command like this: `helm init --service-account <NAME>`
@ -51,7 +51,7 @@ clusterrolebinding "tiller" created
$ helm init --service-account tiller $ helm init --service-account tiller
``` ```
### Example: Deploy tiller in a namespace, restricted to deploying resources only in that namespace ### Example: Deploy Tiller in a namespace, restricted to deploying resources only in that namespace
In the example above, we gave Tiller admin access to the entire cluster. You are not at all required to give Tiller cluster-admin access for it to work. Instead of specifying a ClusterRole or a ClusterRoleBinding, you can specify a Role and RoleBinding to limit Tiller's scope to a particular namespace. In the example above, we gave Tiller admin access to the entire cluster. You are not at all required to give Tiller cluster-admin access for it to work. Instead of specifying a ClusterRole or a ClusterRoleBinding, you can specify a Role and RoleBinding to limit Tiller's scope to a particular namespace.
@ -62,7 +62,7 @@ $ kubectl create serviceaccount tiller --namespace tiller-world
serviceaccount "tiller" created serviceaccount "tiller" created
``` ```
Define a Role that allows tiller to manage all resources in `tiller-world` like in `role-tiller.yaml`: Define a Role that allows Tiller to manage all resources in `tiller-world` like in `role-tiller.yaml`:
```yaml ```yaml
kind: Role kind: Role
@ -104,13 +104,13 @@ $ kubectl create -f rolebinding-tiller.yaml
rolebinding "tiller-binding" created rolebinding "tiller-binding" created
``` ```
Afterwards you can run `helm init` to install tiller in the `tiller-world` namespace. Afterwards you can run `helm init` to install Tiller in the `tiller-world` namespace.
```console ```console
$ helm init --service-account tiller --tiller-namespace tiller-world $ helm init --service-account tiller --tiller-namespace tiller-world
$HELM_HOME has been configured at /Users/awesome-user/.helm. $HELM_HOME has been configured at /Users/awesome-user/.helm.
Tiller (the helm server side component) has been installed into your Kubernetes Cluster. Tiller (the Helm server side component) has been installed into your Kubernetes Cluster.
Happy Helming! Happy Helming!
$ helm install nginx --tiller-namespace tiller-world --namespace tiller-world $ helm install nginx --tiller-namespace tiller-world --namespace tiller-world
@ -125,11 +125,11 @@ NAME READY STATUS RESTARTS AGE
wayfaring-yak-alpine 0/1 ContainerCreating 0 0s wayfaring-yak-alpine 0/1 ContainerCreating 0 0s
``` ```
### Example: Deploy tiller in a namespace, restricted to deploying resources in another namespace ### Example: Deploy Tiller in a namespace, restricted to deploying resources in another namespace
In the example above, we gave Tiller admin access to the namespace it was deployed inside. Now, let's limit Tiller's scope to deploy resources in a different namespace! In the example above, we gave Tiller admin access to the namespace it was deployed inside. Now, let's limit Tiller's scope to deploy resources in a different namespace!
For example, let's install tiller in the namespace `myorg-system` and allow tiller to deploy resources in the namespace `myorg-users`. For example, let's install Tiller in the namespace `myorg-system` and allow Tiller to deploy resources in the namespace `myorg-users`.
```console ```console
$ kubectl create namespace myorg-system $ kubectl create namespace myorg-system
@ -138,7 +138,7 @@ $ kubectl create serviceaccount tiller --namespace myorg-system
serviceaccount "tiller" created serviceaccount "tiller" created
``` ```
Define a Role that allows tiller to manage all resources in `myorg-users` like in `role-tiller.yaml`: Define a Role that allows Tiller to manage all resources in `myorg-users` like in `role-tiller.yaml`:
```yaml ```yaml
kind: Role kind: Role
@ -180,7 +180,7 @@ $ kubectl create -f rolebinding-tiller.yaml
rolebinding "tiller-binding" created rolebinding "tiller-binding" created
``` ```
We'll also need to grant tiller access to read configmaps in myorg-system so it can store release information. In `role-tiller-myorg-system.yaml`: We'll also need to grant Tiller access to read configmaps in myorg-system so it can store release information. In `role-tiller-myorg-system.yaml`:
```yaml ```yaml
kind: Role kind: Role
@ -224,11 +224,11 @@ rolebinding "tiller-binding" created
## Helm and Role-based Access Control ## Helm and Role-based Access Control
When running a helm client in a pod, in order for the helm client to talk to a tiller instance, it will need certain privileges to be granted. Specifically, the helm client will need to be able to create pods, forward ports and be able to list pods in the namespace where tiller is running (so it can find tiller). When running a Helm client in a pod, in order for the Helm client to talk to a Tiller instance, it will need certain privileges to be granted. Specifically, the Helm client will need to be able to create pods, forward ports and be able to list pods in the namespace where Tiller is running (so it can find Tiller).
### Example: Deploy Helm in a namespace, talking to Tiller in another namespace ### Example: Deploy Helm in a namespace, talking to Tiller in another namespace
In this example, we will assume tiller is running in a namespace called `tiller-world` and that the helm client is running in a namespace called `helm-world`. By default, tiller is running in the `kube-system` namespace. In this example, we will assume Tiller is running in a namespace called `tiller-world` and that the Helm client is running in a namespace called `helm-world`. By default, Tiller is running in the `kube-system` namespace.
In `helm-user.yaml`: In `helm-user.yaml`:

@ -78,6 +78,7 @@ Platforms, distributions, and services that include Helm support.
- [Cabin](http://www.skippbox.com/cabin/) - Mobile App for Managing Kubernetes - [Cabin](http://www.skippbox.com/cabin/) - Mobile App for Managing Kubernetes
- [Qstack](https://qstack.com) - [Qstack](https://qstack.com)
- [Fabric8](https://fabric8.io) - Integrated development platform for Kubernetes - [Fabric8](https://fabric8.io) - Integrated development platform for Kubernetes
- [Jenkins X](http://jenkins-x.io/) - open source automated CI/CD for Kubernetes which uses Helm for [promoting](http://jenkins-x.io/about/features/#promotion) applications through [environments via GitOps](http://jenkins-x.io/about/features/#environments)
## Misc ## Misc

@ -197,7 +197,7 @@ From here on just repeat this process, continuously testing until you're happy w
## 6. Finalize the Release ## 6. Finalize the Release
When you're finally happy with the quality of a release candidate, you can move on and create the real thing. Double-check one last time to make sure eveything is in order, then finally push the release tag. When you're finally happy with the quality of a release candidate, you can move on and create the real thing. Double-check one last time to make sure everything is in order, then finally push the release tag.
```shell ```shell
git checkout $RELEASE_BRANCH_NAME git checkout $RELEASE_BRANCH_NAME

@ -71,7 +71,7 @@ Enabling this feature currently requires setting the `--storage=secret` flag in
Because of the relative longevity of Helm, the Helm chart ecosystem evolved without the immediate concern for cluster-wide control, and especially in the developer space this makes complete sense. However, charts are a kind of package that not only installs containers you may or may not have validated yourself, but it may also install into more than one namespace. Because of the relative longevity of Helm, the Helm chart ecosystem evolved without the immediate concern for cluster-wide control, and especially in the developer space this makes complete sense. However, charts are a kind of package that not only installs containers you may or may not have validated yourself, but it may also install into more than one namespace.
As with all shared software, in a controlled or shared environment you must validate all software you install yourself _before_ you install it. If you have secured Tiller with TLS and have installed it with permissions to only one or a subset of namespaces, some charts may fail to install -- but in these environments, that is exactly what you want. If you need to use the chart, you may have to work with the creator or modify it yourself in order to use it securely in a mulitenant cluster with proper RBAC rules applied. The `helm template` command renders the chart locally and displays the output. As with all shared software, in a controlled or shared environment you must validate all software you install yourself _before_ you install it. If you have secured Tiller with TLS and have installed it with permissions to only one or a subset of namespaces, some charts may fail to install -- but in these environments, that is exactly what you want. If you need to use the chart, you may have to work with the creator or modify it yourself in order to use it securely in a multitenant cluster with proper RBAC rules applied. The `helm template` command renders the chart locally and displays the output.
Once vetted, you can use Helm's provenance tools to [ensure the provenance and integrity of charts](provenance.md) that you use. Once vetted, you can use Helm's provenance tools to [ensure the provenance and integrity of charts](provenance.md) that you use.

@ -251,7 +251,7 @@ This configuration sends our client-side certificate to establish identity, uses
the client key for encryption, and uses the CA certificate to validate the remote the client key for encryption, and uses the CA certificate to validate the remote
Tiller's identity. Tiller's identity.
Typing a line that that is cumbersome, though. The shortcut is to move the key, Typing a line that is cumbersome, though. The shortcut is to move the key,
cert, and CA into `$HELM_HOME`: cert, and CA into `$HELM_HOME`:
```console ```console

148
glide.lock generated

@ -1,58 +1,21 @@
hash: d93f565214b112cf8560e9cd2da2f3ab7852a1f19544569fc112bd4fb2d1d506 hash: 6837936360d447b64aa7a09d3c89c18ac5540b009a57fc4d3227af299bf40268
updated: 2018-03-08T14:06:06.497394911-08:00 updated: 2018-04-03T08:17:14.801847688-07:00
imports: imports:
- name: cloud.google.com/go - name: cloud.google.com/go
version: 3b1ae45394a234c385be014e9a488f2bb6eef821 version: 3b1ae45394a234c385be014e9a488f2bb6eef821
repo: https://github.com/GoogleCloudPlatform/google-cloud-go.git
subpackages: subpackages:
- compute
- compute/metadata - compute/metadata
- internal - internal
- name: github.com/aokoli/goutils - name: github.com/aokoli/goutils
version: 9c37978a95bd5c709a15883b6242714ea6709e64 version: 9c37978a95bd5c709a15883b6242714ea6709e64
- name: github.com/asaskevich/govalidator - name: github.com/asaskevich/govalidator
version: 7664702784775e51966f0885f5cd27435916517b version: 7664702784775e51966f0885f5cd27435916517b
- name: github.com/aws/aws-sdk-go
version: 760741802ad40f49ae9fc4a69ef6706d2527d62e
subpackages:
- aws
- aws/awserr
- aws/awsutil
- aws/client
- aws/client/metadata
- aws/corehandlers
- aws/credentials
- aws/credentials/ec2rolecreds
- aws/credentials/endpointcreds
- aws/credentials/stscreds
- aws/defaults
- aws/ec2metadata
- aws/endpoints
- aws/request
- aws/session
- aws/signer/v4
- internal/shareddefaults
- private/protocol
- private/protocol/ec2query
- private/protocol/json/jsonutil
- private/protocol/jsonrpc
- private/protocol/query
- private/protocol/query/queryutil
- private/protocol/rest
- private/protocol/xml/xmlutil
- service/autoscaling
- service/ec2
- service/ecr
- service/elb
- service/elbv2
- service/kms
- service/sts
- name: github.com/Azure/go-ansiterm - name: github.com/Azure/go-ansiterm
version: 19f72df4d05d31cbe1c56bfc8045c96babff6c7e version: 19f72df4d05d31cbe1c56bfc8045c96babff6c7e
subpackages: subpackages:
- winterm - winterm
- name: github.com/Azure/go-autorest - name: github.com/Azure/go-autorest
version: e14a70c556c8e0db173358d1a903dca345a8e75e version: d4e6b95c12a08b4de2d48b45d5b4d594e5d32fab
subpackages: subpackages:
- autorest - autorest
- autorest/adal - autorest/adal
@ -64,8 +27,6 @@ imports:
- quantile - quantile
- name: github.com/BurntSushi/toml - name: github.com/BurntSushi/toml
version: b26d9c308763d68093482582cea63d69be07a0f0 version: b26d9c308763d68093482582cea63d69be07a0f0
- name: github.com/chai2010/gettext-go
version: bf70f2a70fb1b1f36d90d671a72795984eab0fcb
- name: github.com/cpuguy83/go-md2man - name: github.com/cpuguy83/go-md2man
version: 71acacd42f85e5e82f70a55327789582a5200a90 version: 71acacd42f85e5e82f70a55327789582a5200a90
subpackages: subpackages:
@ -106,8 +67,9 @@ imports:
- pkg/jsonmessage - pkg/jsonmessage
- pkg/longpath - pkg/longpath
- pkg/mount - pkg/mount
- pkg/parsers
- pkg/stdcopy - pkg/stdcopy
- pkg/symlink - pkg/sysinfo
- pkg/system - pkg/system
- pkg/term - pkg/term
- pkg/term/windows - pkg/term/windows
@ -124,10 +86,6 @@ imports:
version: 449fdfce4d962303d702fec724ef0ad181c92528 version: 449fdfce4d962303d702fec724ef0ad181c92528
subpackages: subpackages:
- spdy - spdy
- name: github.com/emicklei/go-restful
version: ff4f55a206334ef123e4f79bbf348980da81ca46
subpackages:
- log
- name: github.com/evanphx/json-patch - name: github.com/evanphx/json-patch
version: 944e07253867aacae43c04b2e6a239005443f33a version: 944e07253867aacae43c04b2e6a239005443f33a
- name: github.com/exponent-io/jsonpath - name: github.com/exponent-io/jsonpath
@ -136,14 +94,12 @@ imports:
version: f6a740d52f961c60348ebb109adde9f4635d7540 version: f6a740d52f961c60348ebb109adde9f4635d7540
- name: github.com/ghodss/yaml - name: github.com/ghodss/yaml
version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee
- name: github.com/go-ini/ini
version: 300e940a926eb277d3901b20bdfcc54928ad3642
- name: github.com/go-openapi/jsonpointer - name: github.com/go-openapi/jsonpointer
version: 46af16f9f7b149af66e5d1bd010e3574dc06de98 version: 46af16f9f7b149af66e5d1bd010e3574dc06de98
- name: github.com/go-openapi/jsonreference - name: github.com/go-openapi/jsonreference
version: 13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272 version: 13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272
- name: github.com/go-openapi/spec - name: github.com/go-openapi/spec
version: 7abd5745472fff5eb3685386d5fb8bf38683154d version: 1de3e0542de65ad8d75452a595886fdd0befb363
- name: github.com/go-openapi/swag - name: github.com/go-openapi/swag
version: f3f9494671f93fcff853e3c6e9e948b3eb71e590 version: f3f9494671f93fcff853e3c6e9e948b3eb71e590
- name: github.com/gobwas/glob - name: github.com/gobwas/glob
@ -179,6 +135,8 @@ imports:
version: 7d79101e329e5a3adf994758c578dab82b90c017 version: 7d79101e329e5a3adf994758c578dab82b90c017
- name: github.com/google/gofuzz - name: github.com/google/gofuzz
version: 44d81051d367757e1c7c6a5a86423ece9afcf63c version: 44d81051d367757e1c7c6a5a86423ece9afcf63c
- name: github.com/google/uuid
version: 064e2069ce9c359c118179501254f67d7d37ba24
- name: github.com/googleapis/gnostic - name: github.com/googleapis/gnostic
version: 0c5108395e2debce0d731cf0287ddf7242066aba version: 0c5108395e2debce0d731cf0287ddf7242066aba
subpackages: subpackages:
@ -186,7 +144,7 @@ imports:
- compiler - compiler
- extensions - extensions
- name: github.com/gophercloud/gophercloud - name: github.com/gophercloud/gophercloud
version: 8183543f90d1aef267a5ecc209f2e0715b355acb version: 6da026c32e2d622cc242d32984259c77237aefe1
subpackages: subpackages:
- openstack - openstack
- openstack/identity/v2/tenants - openstack/identity/v2/tenants
@ -217,12 +175,8 @@ imports:
version: 6633656539c1639d9d78127b7d47c622b5d7b6dc version: 6633656539c1639d9d78127b7d47c622b5d7b6dc
- name: github.com/inconshreveable/mousetrap - name: github.com/inconshreveable/mousetrap
version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
- name: github.com/jmespath/go-jmespath
version: 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74
- name: github.com/json-iterator/go - name: github.com/json-iterator/go
version: 36b14963da70d11297d313183d7e6388c8510e1e version: 13f86432b882000a51c6e610c620974462691a97
- name: github.com/juju/ratelimit
version: 5b9ff866471762aa2ab2dced63c9fb6f53921342
- name: github.com/mailru/easyjson - name: github.com/mailru/easyjson
version: 2f5df55504ebc322e4d52d34df6a1f5b503bf26d version: 2f5df55504ebc322e4d52d34df6a1f5b503bf26d
subpackages: subpackages:
@ -234,7 +188,7 @@ imports:
- name: github.com/Masterminds/semver - name: github.com/Masterminds/semver
version: 517734cc7d6470c0d07130e40fd40bdeb9bcd3fd version: 517734cc7d6470c0d07130e40fd40bdeb9bcd3fd
- name: github.com/Masterminds/sprig - name: github.com/Masterminds/sprig
version: b217b9c388de2cacde4354c536e520c52c055563 version: 6b2a58267f6a8b1dc8e2eb5519b984008fa85e8c
- name: github.com/Masterminds/vcs - name: github.com/Masterminds/vcs
version: 3084677c2c188840777bff30054f2b553729d329 version: 3084677c2c188840777bff30054f2b553729d329
- name: github.com/mattn/go-runewidth - name: github.com/mattn/go-runewidth
@ -245,8 +199,6 @@ imports:
- pbutil - pbutil
- name: github.com/mitchellh/go-wordwrap - name: github.com/mitchellh/go-wordwrap
version: ad45545899c7b13c020ea92b2072220eefad42b8 version: ad45545899c7b13c020ea92b2072220eefad42b8
- name: github.com/naoina/go-stringutil
version: 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
- name: github.com/opencontainers/go-digest - name: github.com/opencontainers/go-digest
version: a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb version: a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb
- name: github.com/opencontainers/image-spec - name: github.com/opencontainers/image-spec
@ -283,8 +235,6 @@ imports:
version: 5bd2802263f21d8788851d5305584c82a5c75d7e version: 5bd2802263f21d8788851d5305584c82a5c75d7e
- name: github.com/russross/blackfriday - name: github.com/russross/blackfriday
version: 300106c228d52c8941d4b3de6054a6062a86dda3 version: 300106c228d52c8941d4b3de6054a6062a86dda3
- name: github.com/satori/go.uuid
version: f58768cc1a7a7e77a3bd49e98cdd21419399b6a3
- name: github.com/shurcooL/sanitized_anchor_name - name: github.com/shurcooL/sanitized_anchor_name
version: 10ef21a441db47d8b13ebcc5fd2310f636973c77 version: 10ef21a441db47d8b13ebcc5fd2310f636973c77
- name: github.com/sirupsen/logrus - name: github.com/sirupsen/logrus
@ -301,6 +251,8 @@ imports:
version: 81e90905daefcd6fd217b62423c0908922eadb30 version: 81e90905daefcd6fd217b62423c0908922eadb30
subpackages: subpackages:
- cast5 - cast5
- ed25519
- ed25519/internal/edwards25519
- openpgp - openpgp
- openpgp/armor - openpgp/armor
- openpgp/clearsign - openpgp/clearsign
@ -353,6 +305,10 @@ imports:
- unicode/bidi - unicode/bidi
- unicode/norm - unicode/norm
- width - width
- name: golang.org/x/time
version: f51c12702a4d776e4c1fa9b0fabab841babae631
subpackages:
- rate
- name: google.golang.org/appengine - name: google.golang.org/appengine
version: 12d5545dc1cfa6047a286d5e853841b6471f4c19 version: 12d5545dc1cfa6047a286d5e853841b6471f4c19
subpackages: subpackages:
@ -390,20 +346,18 @@ imports:
- status - status
- tap - tap
- transport - transport
- name: gopkg.in/gcfg.v1
version: 27e4946190b4a327b539185f2b5b1f7c84730728
subpackages:
- scanner
- token
- types
- name: gopkg.in/inf.v0 - name: gopkg.in/inf.v0
version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
- name: gopkg.in/warnings.v0 - name: gopkg.in/square/go-jose.v2
version: 8a331561fe74dadba6edfc59f3be66c22c3b065d version: f8f38de21b4dcd69d0413faf231983f5fd6634b1
subpackages:
- cipher
- json
- jwt
- name: gopkg.in/yaml.v2 - name: gopkg.in/yaml.v2
version: 53feefa2559fb8dfa8d81baad31be332c97d6c77 version: 53feefa2559fb8dfa8d81baad31be332c97d6c77
- name: k8s.io/api - name: k8s.io/api
version: 006a217681ae70cbacdd66a5e2fca1a61a8ff28e version: c699ec51538f0cfd4afa8bfcfe1e0779cafbe666
subpackages: subpackages:
- admission/v1beta1 - admission/v1beta1
- admissionregistration/v1alpha1 - admissionregistration/v1alpha1
@ -436,11 +390,11 @@ imports:
- storage/v1alpha1 - storage/v1alpha1
- storage/v1beta1 - storage/v1beta1
- name: k8s.io/apiextensions-apiserver - name: k8s.io/apiextensions-apiserver
version: a5bbfd114a9b122acd741c61d88c84812375d9e1 version: 898b0eda132e1aeac43a459785144ee4bf9b0a2e
subpackages: subpackages:
- pkg/features - pkg/features
- name: k8s.io/apimachinery - name: k8s.io/apimachinery
version: 68f9c3a1feb3140df59c67ced62d3a5df8e6c9c2 version: 54101a56dda9a0962bc48751c058eb4c546dcbb9
subpackages: subpackages:
- pkg/api/equality - pkg/api/equality
- pkg/api/errors - pkg/api/errors
@ -454,7 +408,7 @@ imports:
- pkg/apis/meta/v1 - pkg/apis/meta/v1
- pkg/apis/meta/v1/unstructured - pkg/apis/meta/v1/unstructured
- pkg/apis/meta/v1/validation - pkg/apis/meta/v1/validation
- pkg/apis/meta/v1alpha1 - pkg/apis/meta/v1beta1
- pkg/conversion - pkg/conversion
- pkg/conversion/queryparams - pkg/conversion/queryparams
- pkg/fields - pkg/fields
@ -472,6 +426,7 @@ imports:
- pkg/util/cache - pkg/util/cache
- pkg/util/clock - pkg/util/clock
- pkg/util/diff - pkg/util/diff
- pkg/util/duration
- pkg/util/errors - pkg/util/errors
- pkg/util/framer - pkg/util/framer
- pkg/util/httpstream - pkg/util/httpstream
@ -496,7 +451,7 @@ imports:
- third_party/forked/golang/netutil - third_party/forked/golang/netutil
- third_party/forked/golang/reflect - third_party/forked/golang/reflect
- name: k8s.io/apiserver - name: k8s.io/apiserver
version: 2a1092aaa7202e8f9b188281ff9424a014ce61c2 version: ea53f8588c655568158b4ff53f5ec6fa4ebfc332
subpackages: subpackages:
- pkg/apis/audit - pkg/apis/audit
- pkg/authentication/authenticator - pkg/authentication/authenticator
@ -507,7 +462,7 @@ imports:
- pkg/util/feature - pkg/util/feature
- pkg/util/flag - pkg/util/flag
- name: k8s.io/client-go - name: k8s.io/client-go
version: 78700dec6369ba22221b72770783300f143df150 version: 23781f4d6632d88e869066eaebb743857aa1ef9b
subpackages: subpackages:
- discovery - discovery
- discovery/fake - discovery/fake
@ -635,15 +590,26 @@ imports:
- listers/storage/v1 - listers/storage/v1
- listers/storage/v1alpha1 - listers/storage/v1alpha1
- listers/storage/v1beta1 - listers/storage/v1beta1
- pkg/apis/clientauthentication
- pkg/apis/clientauthentication/v1alpha1
- pkg/version - pkg/version
- plugin/pkg/client/auth - plugin/pkg/client/auth
- plugin/pkg/client/auth/azure - plugin/pkg/client/auth/azure
- plugin/pkg/client/auth/exec
- plugin/pkg/client/auth/gcp - plugin/pkg/client/auth/gcp
- plugin/pkg/client/auth/oidc - plugin/pkg/client/auth/oidc
- plugin/pkg/client/auth/openstack - plugin/pkg/client/auth/openstack
- rest - rest
- rest/fake - rest/fake
- rest/watch - rest/watch
- scale
- scale/scheme
- scale/scheme/appsint
- scale/scheme/appsv1beta1
- scale/scheme/appsv1beta2
- scale/scheme/autoscalingv1
- scale/scheme/extensionsint
- scale/scheme/extensionsv1beta1
- testing - testing
- third_party/forked/golang/template - third_party/forked/golang/template
- tools/auth - tools/auth
@ -670,13 +636,12 @@ imports:
- util/retry - util/retry
- util/workqueue - util/workqueue
- name: k8s.io/kube-openapi - name: k8s.io/kube-openapi
version: 39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1 version: 50ae88d24ede7b8bad68e23c805b5d3da5c8abaf
subpackages: subpackages:
- pkg/common
- pkg/util/proto - pkg/util/proto
- pkg/util/proto/validation - pkg/util/proto/validation
- name: k8s.io/kubernetes - name: k8s.io/kubernetes
version: 5fa2db2bd46ac79e5e00a4e6ed24191080aa463b version: a22f9fd34871d9dc9e5db2c02c713821d18ab2cd
subpackages: subpackages:
- pkg/api/events - pkg/api/events
- pkg/api/legacyscheme - pkg/api/legacyscheme
@ -686,7 +651,6 @@ imports:
- pkg/api/service - pkg/api/service
- pkg/api/testapi - pkg/api/testapi
- pkg/api/v1/pod - pkg/api/v1/pod
- pkg/api/v1/service
- pkg/apis/admission - pkg/apis/admission
- pkg/apis/admission/install - pkg/apis/admission/install
- pkg/apis/admission/v1beta1 - pkg/apis/admission/v1beta1
@ -799,9 +763,8 @@ imports:
- pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/fake - pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/fake
- pkg/client/clientset_generated/internalclientset/typed/storage/internalversion - pkg/client/clientset_generated/internalclientset/typed/storage/internalversion
- pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/fake - pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/fake
- pkg/client/unversioned - pkg/client/conditions
- pkg/cloudprovider - pkg/cloudprovider
- pkg/cloudprovider/providers/aws
- pkg/controller - pkg/controller
- pkg/controller/daemon - pkg/controller/daemon
- pkg/controller/daemon/util - pkg/controller/daemon/util
@ -810,8 +773,8 @@ imports:
- pkg/controller/statefulset - pkg/controller/statefulset
- pkg/controller/volume/events - pkg/controller/volume/events
- pkg/controller/volume/persistentvolume - pkg/controller/volume/persistentvolume
- pkg/controller/volume/persistentvolume/metrics
- pkg/credentialprovider - pkg/credentialprovider
- pkg/credentialprovider/aws
- pkg/features - pkg/features
- pkg/fieldpath - pkg/fieldpath
- pkg/kubectl - pkg/kubectl
@ -830,6 +793,7 @@ imports:
- pkg/kubectl/util/hash - pkg/kubectl/util/hash
- pkg/kubectl/util/slice - pkg/kubectl/util/slice
- pkg/kubectl/util/term - pkg/kubectl/util/term
- pkg/kubectl/util/transport
- pkg/kubectl/validation - pkg/kubectl/validation
- pkg/kubelet/apis - pkg/kubelet/apis
- pkg/kubelet/types - pkg/kubelet/types
@ -837,6 +801,13 @@ imports:
- pkg/printers - pkg/printers
- pkg/printers/internalversion - pkg/printers/internalversion
- pkg/registry/rbac/validation - pkg/registry/rbac/validation
- pkg/scheduler/algorithm
- pkg/scheduler/algorithm/predicates
- pkg/scheduler/algorithm/priorities/util
- pkg/scheduler/api
- pkg/scheduler/schedulercache
- pkg/scheduler/util
- pkg/scheduler/volumebinder
- pkg/security/apparmor - pkg/security/apparmor
- pkg/serviceaccount - pkg/serviceaccount
- pkg/util/file - pkg/util/file
@ -858,14 +829,9 @@ imports:
- pkg/version - pkg/version
- pkg/volume - pkg/volume
- pkg/volume/util - pkg/volume/util
- pkg/watch/json - pkg/volume/util/fs
- plugin/pkg/scheduler/algorithm - pkg/volume/util/recyclerclient
- plugin/pkg/scheduler/algorithm/predicates - pkg/volume/util/types
- plugin/pkg/scheduler/algorithm/priorities/util
- plugin/pkg/scheduler/api
- plugin/pkg/scheduler/schedulercache
- plugin/pkg/scheduler/util
- plugin/pkg/scheduler/volumebinder
- name: k8s.io/utils - name: k8s.io/utils
version: aedf551cdb8b0119df3a19c65fde413a13b34997 version: aedf551cdb8b0119df3a19c65fde413a13b34997
subpackages: subpackages:
@ -874,8 +840,6 @@ imports:
- exec/testing - exec/testing
- name: vbom.ml/util - name: vbom.ml/util
version: db5cfe13f5cc80a4990d98e2e1b0707a4d1a5394 version: db5cfe13f5cc80a4990d98e2e1b0707a4d1a5394
repo: https://github.com/fvbommel/util.git
vcs: git
subpackages: subpackages:
- sortorder - sortorder
testImports: testImports:

@ -43,28 +43,20 @@ import:
- package: github.com/evanphx/json-patch - package: github.com/evanphx/json-patch
- package: github.com/BurntSushi/toml - package: github.com/BurntSushi/toml
version: ~0.3.0 version: ~0.3.0
- package: github.com/naoina/go-stringutil
version: ~0.1.0
- package: github.com/chai2010/gettext-go
- package: github.com/prometheus/client_golang - package: github.com/prometheus/client_golang
version: 0.8.0 version: 0.8.0
- package: vbom.ml/util - package: github.com/grpc-ecosystem/go-grpc-prometheus
repo: https://github.com/fvbommel/util.git
vcs: git
- package: k8s.io/kubernetes - package: k8s.io/kubernetes
version: 1.9.2 version: release-1.10
- package: k8s.io/client-go - package: k8s.io/client-go
version: ~6.0.0 version: kubernetes-1.10.0
- package: k8s.io/api - package: k8s.io/api
version: kubernetes-1.9.2 version: release-1.10
- package: k8s.io/apimachinery - package: k8s.io/apimachinery
version: kubernetes-1.9.2 version: release-1.10
- package: k8s.io/apiserver - package: k8s.io/apiserver
version: kubernetes-1.9.2 version: release-1.10
- package: cloud.google.com/go/compute
repo: https://github.com/GoogleCloudPlatform/google-cloud-go.git
testImports: testImports:
- package: github.com/stretchr/testify - package: github.com/stretchr/testify

@ -31,7 +31,7 @@ import (
// ApiVersionV1 is the API version number for version 1. // ApiVersionV1 is the API version number for version 1.
// //
// This is ApiVersionV1 instead of APIVersionV1 to match the protobuf-generated name. // This is ApiVersionV1 instead of APIVersionV1 to match the protobuf-generated name.
const ApiVersionV1 = "v1" const ApiVersionV1 = "v1" // nolint
// UnmarshalChartfile takes raw Chart.yaml data and unmarshals it. // UnmarshalChartfile takes raw Chart.yaml data and unmarshals it.
func UnmarshalChartfile(data []byte) (*chart.Metadata, error) { func UnmarshalChartfile(data []byte) (*chart.Metadata, error) {

@ -59,6 +59,9 @@ image:
tag: stable tag: stable
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
nameOverride: ""
fullnameOverride: ""
service: service:
type: ClusterIP type: ClusterIP
port: 80 port: 80
@ -120,7 +123,6 @@ const defaultIgnore = `# Patterns to ignore when building packages.
const defaultIngress = `{{- if .Values.ingress.enabled -}} const defaultIngress = `{{- if .Values.ingress.enabled -}}
{{- $fullName := include "<CHARTNAME>.fullname" . -}} {{- $fullName := include "<CHARTNAME>.fullname" . -}}
{{- $servicePort := .Values.service.port -}}
{{- $ingressPath := .Values.ingress.path -}} {{- $ingressPath := .Values.ingress.path -}}
apiVersion: extensions/v1beta1 apiVersion: extensions/v1beta1
kind: Ingress kind: Ingress

@ -175,7 +175,7 @@ func ToYaml(v interface{}) string {
// Swallow errors inside of a template. // Swallow errors inside of a template.
return "" return ""
} }
return string(data) return strings.TrimSuffix(string(data), "\n")
} }
// FromYaml converts a YAML document into a map[string]interface{}. // FromYaml converts a YAML document into a map[string]interface{}.

@ -72,10 +72,10 @@ func TestToConfig(t *testing.T) {
f := NewFiles(getTestFiles()) f := NewFiles(getTestFiles())
out := f.Glob("**/captain.txt").AsConfig() out := f.Glob("**/captain.txt").AsConfig()
as.Equal("captain.txt: The Captain\n", out) as.Equal("captain.txt: The Captain", out)
out = f.Glob("ship/**").AsConfig() out = f.Glob("ship/**").AsConfig()
as.Equal("captain.txt: The Captain\nstowaway.txt: Legatt\n", out) as.Equal("captain.txt: The Captain\nstowaway.txt: Legatt", out)
} }
func TestToSecret(t *testing.T) { func TestToSecret(t *testing.T) {
@ -84,7 +84,7 @@ func TestToSecret(t *testing.T) {
f := NewFiles(getTestFiles()) f := NewFiles(getTestFiles())
out := f.Glob("ship/**").AsSecrets() out := f.Glob("ship/**").AsSecrets()
as.Equal("captain.txt: VGhlIENhcHRhaW4=\nstowaway.txt: TGVnYXR0\n", out) as.Equal("captain.txt: VGhlIENhcHRhaW4=\nstowaway.txt: TGVnYXR0", out)
} }
func TestLines(t *testing.T) { func TestLines(t *testing.T) {
@ -99,7 +99,7 @@ func TestLines(t *testing.T) {
} }
func TestToYaml(t *testing.T) { func TestToYaml(t *testing.T) {
expect := "foo: bar\n" expect := "foo: bar"
v := struct { v := struct {
Foo string `json:"foo"` Foo string `json:"foo"`
}{ }{

@ -394,6 +394,21 @@ func processImportValues(c *chart.Chart) error {
b := make(map[string]interface{}, 0) b := make(map[string]interface{}, 0)
// import values from each dependency if specified in import-values // import values from each dependency if specified in import-values
for _, r := range reqs.Dependencies { for _, r := range reqs.Dependencies {
// only process raw requirement that is found in chart's dependencies (enabled)
found := false
name := r.Name
for _, v := range c.Dependencies {
if v.Metadata.Name == r.Name {
found = true
}
if v.Metadata.Name == r.Alias {
found = true
name = r.Alias
}
}
if !found {
continue
}
if len(r.ImportValues) > 0 { if len(r.ImportValues) > 0 {
var outiv []interface{} var outiv []interface{}
for _, riv := range r.ImportValues { for _, riv := range r.ImportValues {
@ -404,7 +419,7 @@ func processImportValues(c *chart.Chart) error {
"parent": iv["parent"].(string), "parent": iv["parent"].(string),
} }
outiv = append(outiv, nm) outiv = append(outiv, nm)
s := r.Name + "." + nm["child"] s := name + "." + nm["child"]
// get child table // get child table
vv, err := cvals.Table(s) vv, err := cvals.Table(s)
if err != nil { if err != nil {
@ -420,7 +435,7 @@ func processImportValues(c *chart.Chart) error {
"parent": ".", "parent": ".",
} }
outiv = append(outiv, nm) outiv = append(outiv, nm)
s := r.Name + "." + nm["child"] s := name + "." + nm["child"]
vm, err := cvals.Table(s) vm, err := cvals.Table(s)
if err != nil { if err != nil {
log.Printf("Warning: ImportValues missing table: %v", err) log.Printf("Warning: ImportValues missing table: %v", err)

Binary file not shown.

@ -1 +1,3 @@
# This is a placeholder. # This is a placeholder.
Goodbye {{.Values.name | default "world"}}

@ -151,7 +151,7 @@ func (c *ChartDownloader) ResolveChartVersion(ref, version string) (*url.URL, ge
return u, nil, err return u, nil, err
} }
// Same as the ResolveChartVersion method, but returns the chart repositoryy. // ResolveChartVersionAndGetRepo is the same as the ResolveChartVersion method, but returns the chart repositoryy.
func (c *ChartDownloader) ResolveChartVersionAndGetRepo(ref, version string) (*url.URL, *repo.ChartRepository, *getter.HttpGetter, error) { func (c *ChartDownloader) ResolveChartVersionAndGetRepo(ref, version string) (*url.URL, *repo.ChartRepository, *getter.HttpGetter, error) {
u, err := url.Parse(ref) u, err := url.Parse(ref)
if err != nil { if err != nil {
@ -167,7 +167,6 @@ func (c *ChartDownloader) ResolveChartVersionAndGetRepo(ref, version string) (*u
if err != nil { if err != nil {
return u, nil, nil, err return u, nil, nil, err
} }
g.SetCredentials(c.getRepoCredentials(nil))
if u.IsAbs() && len(u.Host) > 0 && len(u.Path) > 0 { if u.IsAbs() && len(u.Host) > 0 && len(u.Path) > 0 {
// In this case, we have to find the parent repo that contains this chart // In this case, we have to find the parent repo that contains this chart
@ -203,6 +202,7 @@ func (c *ChartDownloader) ResolveChartVersionAndGetRepo(ref, version string) (*u
repoName := p[0] repoName := p[0]
chartName := p[1] chartName := p[1]
rc, err := pickChartRepositoryConfigByName(repoName, rf.Repositories) rc, err := pickChartRepositoryConfigByName(repoName, rf.Repositories)
if err != nil { if err != nil {
return u, nil, nil, err return u, nil, nil, err
} }
@ -211,6 +211,7 @@ func (c *ChartDownloader) ResolveChartVersionAndGetRepo(ref, version string) (*u
if err != nil { if err != nil {
return u, nil, nil, err return u, nil, nil, err
} }
g.SetCredentials(c.getRepoCredentials(r))
// Next, we need to load the index, and actually look up the chart. // Next, we need to load the index, and actually look up the chart.
i, err := repo.LoadIndexFile(c.HelmHome.CacheIndex(r.Config.Name)) i, err := repo.LoadIndexFile(c.HelmHome.CacheIndex(r.Config.Name))

@ -23,17 +23,18 @@ import (
"strings" "strings"
"k8s.io/helm/pkg/tlsutil" "k8s.io/helm/pkg/tlsutil"
"k8s.io/helm/pkg/urlutil"
"k8s.io/helm/pkg/version" "k8s.io/helm/pkg/version"
) )
//httpGetter is the efault HTTP(/S) backend handler //HttpGetter is the efault HTTP(/S) backend handler
type HttpGetter struct { // TODO: change the name to HTTPGetter in Helm 3
type HttpGetter struct { //nolint
client *http.Client client *http.Client
username string username string
password string password string
} }
//SetCredentials sets the credentials for the getter
func (g *HttpGetter) SetCredentials(username, password string) { func (g *HttpGetter) SetCredentials(username, password string) {
g.username = username g.username = username
g.password = password g.password = password
@ -80,19 +81,11 @@ func newHTTPGetter(URL, CertFile, KeyFile, CAFile string) (Getter, error) {
// NewHTTPGetter constructs a valid http/https client as HttpGetter // NewHTTPGetter constructs a valid http/https client as HttpGetter
func NewHTTPGetter(URL, CertFile, KeyFile, CAFile string) (*HttpGetter, error) { func NewHTTPGetter(URL, CertFile, KeyFile, CAFile string) (*HttpGetter, error) {
var client HttpGetter var client HttpGetter
if CertFile != "" && KeyFile != "" { if (CertFile != "" && KeyFile != "") || CAFile != "" {
tlsConf, err := tlsutil.NewClientTLS(CertFile, KeyFile, CAFile) tlsConf, err := tlsutil.NewTLSConfig(URL, CertFile, KeyFile, CAFile)
if err != nil { if err != nil {
return &client, fmt.Errorf("can't create TLS config for client: %s", err.Error()) return &client, fmt.Errorf("can't create TLS config: %s", err.Error())
} }
tlsConf.BuildNameToCertificate()
sni, err := urlutil.ExtractHostname(URL)
if err != nil {
return &client, err
}
tlsConf.ServerName = sni
client.client = &http.Client{ client.client = &http.Client{
Transport: &http.Transport{ Transport: &http.Transport{
TLSClientConfig: tlsConf, TLSClientConfig: tlsConf,

@ -28,7 +28,7 @@ func TestHTTPGetter(t *testing.T) {
} }
if hg, ok := g.(*HttpGetter); !ok { if hg, ok := g.(*HttpGetter); !ok {
t.Fatal("Expected newHTTPGetter to produce an httpGetter") t.Fatal("Expected newHTTPGetter to produce an HttpGetter")
} else if hg.client != http.DefaultClient { } else if hg.client != http.DefaultClient {
t.Fatal("Expected newHTTPGetter to return a default HTTP client.") t.Fatal("Expected newHTTPGetter to return a default HTTP client.")
} }
@ -37,12 +37,24 @@ func TestHTTPGetter(t *testing.T) {
cd := "../../testdata" cd := "../../testdata"
join := filepath.Join join := filepath.Join
ca, pub, priv := join(cd, "ca.pem"), join(cd, "crt.pem"), join(cd, "key.pem") ca, pub, priv := join(cd, "ca.pem"), join(cd, "crt.pem"), join(cd, "key.pem")
g, err = newHTTPGetter("http://example.com/", pub, priv, ca) g, err = newHTTPGetter("https://example.com/", pub, priv, ca)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if hg, ok := g.(*HttpGetter); !ok {
t.Fatal("Expected newHTTPGetter to produce an HttpGetter")
} else if hg.client == http.DefaultClient {
t.Fatal("Expected newHTTPGetter to return a non-default HTTP client")
}
if _, ok := g.(*HttpGetter); !ok { // Test with SSL, caFile only
t.Fatal("Expected newHTTPGetter to produce an httpGetter") g, err = newHTTPGetter("https://example.com/", "", "", ca)
if err != nil {
t.Fatal(err)
}
if hg, ok := g.(*HttpGetter); !ok {
t.Fatal("Expected newHTTPGetter to produce an HttpGetter")
} else if hg.client == http.DefaultClient {
t.Fatal("Expected newHTTPGetter to return a non-default HTTP client")
} }
} }

@ -44,6 +44,8 @@ type Client struct {
// NewClient creates a new client. // NewClient creates a new client.
func NewClient(opts ...Option) *Client { func NewClient(opts ...Option) *Client {
var c Client var c Client
// set some sane defaults
c.Option(ConnectTimeout(5))
return c.Option(opts...) return c.Option(opts...)
} }
@ -344,8 +346,22 @@ func (h *Client) list(ctx context.Context, req *rls.ListReleasesRequest) (*rls.L
if err != nil { if err != nil {
return nil, err return nil, err
} }
var resp *rls.ListReleasesResponse
return s.Recv() for {
r, err := s.Recv()
if err == io.EOF {
break
}
if err != nil {
return nil, err
}
if resp == nil {
resp = r
continue
}
resp.Releases = append(resp.Releases, r.GetReleases()[0])
}
return resp, nil
} }
// Executes tiller.InstallRelease RPC. // Executes tiller.InstallRelease RPC.

@ -0,0 +1,34 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helm
import (
"testing"
"time"
)
func TestNewClient(t *testing.T) {
helmClient := NewClient()
if helmClient.opts.connectTimeout != 5*time.Second {
t.Errorf("expected default timeout duration to be 5 seconds, got %v", helmClient.opts.connectTimeout)
}
helmClient = NewClient(ConnectTimeout(60))
if helmClient.opts.connectTimeout != time.Minute {
t.Errorf("expected timeout duration to be 1 minute, got %v", helmClient.opts.connectTimeout)
}
}

@ -47,6 +47,7 @@ const (
const ( const (
HookSucceeded = "hook-succeeded" HookSucceeded = "hook-succeeded"
HookFailed = "hook-failed" HookFailed = "hook-failed"
BeforeHookCreation = "before-hook-creation"
) )
// FilterTestHooks filters the list of hooks are returns only testing hooks. // FilterTestHooks filters the list of hooks are returns only testing hooks.

@ -47,7 +47,7 @@ import (
"k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd"
batchinternal "k8s.io/kubernetes/pkg/apis/batch" batchinternal "k8s.io/kubernetes/pkg/apis/batch"
"k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/core"
conditions "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/client/conditions"
"k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/kubectl/resource" "k8s.io/kubernetes/pkg/kubectl/resource"
@ -56,7 +56,7 @@ import (
) )
const ( const (
// MissingGetHeader is added to Get's outout when a resource is not found. // MissingGetHeader is added to Get's output when a resource is not found.
MissingGetHeader = "==> MISSING\nKIND\t\tNAME\n" MissingGetHeader = "==> MISSING\nKIND\t\tNAME\n"
) )
@ -77,10 +77,12 @@ func New(config clientcmd.ClientConfig) *Client {
return &Client{ return &Client{
Factory: cmdutil.NewFactory(config), Factory: cmdutil.NewFactory(config),
SchemaCacheDir: clientcmd.RecommendedSchemaFile, SchemaCacheDir: clientcmd.RecommendedSchemaFile,
Log: func(_ string, _ ...interface{}) {}, Log: nopLogger,
} }
} }
var nopLogger = func(_ string, _ ...interface{}) {}
// ResourceActorFunc performs an action on a single resource. // ResourceActorFunc performs an action on a single resource.
type ResourceActorFunc func(*resource.Info) error type ResourceActorFunc func(*resource.Info) error
@ -205,7 +207,10 @@ func (c *Client) Get(namespace string, reader io.Reader) (string, error) {
// an object type changes, so we can just rely on that. Problem is it doesn't seem to keep // an object type changes, so we can just rely on that. Problem is it doesn't seem to keep
// track of tab widths. // track of tab widths.
buf := new(bytes.Buffer) buf := new(bytes.Buffer)
p, _ := c.Printer(nil, printers.PrintOptions{}) p, err := cmdutil.PrinterForOptions(&printers.PrintOptions{})
if err != nil {
return "", err
}
for t, ot := range objs { for t, ot := range objs {
if _, err = buf.WriteString("==> " + t + "\n"); err != nil { if _, err = buf.WriteString("==> " + t + "\n"); err != nil {
return "", err return "", err
@ -401,7 +406,7 @@ func createPatch(mapping *meta.RESTMapping, target, current runtime.Object) ([]b
// While different objects need different merge types, the parent function // While different objects need different merge types, the parent function
// that calls this does not try to create a patch when the data (first // that calls this does not try to create a patch when the data (first
// returned object) is nil. We can skip calculating the the merge type as // returned object) is nil. We can skip calculating the merge type as
// the returned merge type is ignored. // the returned merge type is ignored.
if apiequality.Semantic.DeepEqual(oldData, newData) { if apiequality.Semantic.DeepEqual(oldData, newData) {
return nil, types.StrategicMergePatchType, nil return nil, types.StrategicMergePatchType, nil
@ -608,7 +613,8 @@ func (c *Client) AsVersionedObject(obj runtime.Object) (runtime.Object, error) {
return nil, err return nil, err
} }
versions := &runtime.VersionedObjects{} versions := &runtime.VersionedObjects{}
err = runtime.DecodeInto(c.Decoder(true), json, versions) decoder := unstructured.UnstructuredJSONScheme
err = runtime.DecodeInto(decoder, json, versions)
return versions.First(), err return versions.First(), err
} }
@ -686,7 +692,7 @@ func (c *Client) watchPodUntilComplete(timeout time.Duration, info *resource.Inf
return err return err
} }
//get an kubernetes resources's relation pods //get a kubernetes resources' relation pods
// kubernetes resource used select labels to relate pods // kubernetes resource used select labels to relate pods
func (c *Client) getSelectRelationPod(info *resource.Info, objPods map[string][]core.Pod) (map[string][]core.Pod, error) { func (c *Client) getSelectRelationPod(info *resource.Info, objPods map[string][]core.Pod) (map[string][]core.Pod, error) {
if info == nil { if info == nil {

@ -18,8 +18,6 @@ package kube
import ( import (
"bytes" "bytes"
"encoding/json"
"fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
@ -31,19 +29,20 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/dynamic" "k8s.io/client-go/dynamic"
"k8s.io/client-go/rest/fake" "k8s.io/client-go/rest/fake"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl"
cmdtesting "k8s.io/kubernetes/pkg/kubectl/cmd/testing" cmdtesting "k8s.io/kubernetes/pkg/kubectl/cmd/testing"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/kubectl/resource" "k8s.io/kubernetes/pkg/kubectl/resource"
"k8s.io/kubernetes/pkg/printers" "k8s.io/kubernetes/pkg/kubectl/scheme"
watchjson "k8s.io/kubernetes/pkg/watch/json"
) )
var unstructuredSerializer = dynamic.ContentConfig().NegotiatedSerializer
func objBody(codec runtime.Codec, obj runtime.Object) io.ReadCloser { func objBody(codec runtime.Codec, obj runtime.Object) io.ReadCloser {
return ioutil.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(codec, obj)))) return ioutil.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(codec, obj))))
} }
@ -117,31 +116,21 @@ func (f *fakeReaperFactory) Reaper(mapping *meta.RESTMapping) (kubectl.Reaper, e
return f.reaper, nil return f.reaper, nil
} }
func newEventResponse(code int, e *watch.Event) (*http.Response, error) { type testClient struct {
dispatchedEvent, err := encodeAndMarshalEvent(e) *Client
if err != nil { *cmdtesting.TestFactory
return nil, err
}
header := http.Header{}
header.Set("Content-Type", runtime.ContentTypeJSON)
body := ioutil.NopCloser(bytes.NewReader(dispatchedEvent))
return &http.Response{StatusCode: code, Header: header, Body: body}, nil
} }
func encodeAndMarshalEvent(e *watch.Event) ([]byte, error) { func newTestClient() *testClient {
encodedEvent, err := watchjson.Object(testapi.Default.Codec(), e) tf := cmdtesting.NewTestFactory()
if err != nil { c := &Client{
return nil, err Factory: tf,
Log: nopLogger,
} }
return &testClient{
return json.Marshal(encodedEvent) Client: c,
TestFactory: tf,
} }
func newTestClient(f cmdutil.Factory) *Client {
c := New(nil)
c.Factory = f
return c
} }
func TestUpdate(t *testing.T) { func TestUpdate(t *testing.T) {
@ -153,10 +142,11 @@ func TestUpdate(t *testing.T) {
var actions []string var actions []string
f, tf, codec, _ := cmdtesting.NewAPIFactory() tf := cmdtesting.NewTestFactory()
defer tf.Cleanup()
tf.UnstructuredClient = &fake.RESTClient{ tf.UnstructuredClient = &fake.RESTClient{
GroupVersion: schema.GroupVersion{Version: "v1"}, GroupVersion: schema.GroupVersion{Version: "v1"},
NegotiatedSerializer: dynamic.ContentConfig().NegotiatedSerializer, NegotiatedSerializer: unstructuredSerializer,
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
p, m := req.URL.Path, req.Method p, m := req.URL.Path, req.Method
actions = append(actions, p+":"+m) actions = append(actions, p+":"+m)
@ -190,9 +180,11 @@ func TestUpdate(t *testing.T) {
}), }),
} }
c := newTestClient()
reaper := &fakeReaper{} reaper := &fakeReaper{}
rf := &fakeReaperFactory{Factory: f, reaper: reaper} rf := &fakeReaperFactory{Factory: tf, reaper: reaper}
c := newTestClient(rf) c.Client.Factory = rf
codec := legacyscheme.Codecs.LegacyCodec(scheme.Versions...)
if err := c.Update(core.NamespaceDefault, objBody(codec, &listA), objBody(codec, &listB), false, false, 0, false); err != nil { if err := c.Update(core.NamespaceDefault, objBody(codec, &listA), objBody(codec, &listB), false, false, 0, false); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -251,54 +243,35 @@ func TestBuild(t *testing.T) {
}, },
} }
c := newTestClient()
for _, tt := range tests { for _, tt := range tests {
f, _, _, _ := cmdtesting.NewAPIFactory() t.Run(tt.name, func(t *testing.T) {
c := newTestClient(f) c.Cleanup()
// Test for an invalid manifest // Test for an invalid manifest
infos, err := c.Build(tt.namespace, tt.reader) infos, err := c.Build(tt.namespace, tt.reader)
if err != nil && !tt.err { if err != nil && !tt.err {
t.Errorf("%q. Got error message when no error should have occurred: %v", tt.name, err) t.Errorf("Got error message when no error should have occurred: %v", err)
} else if err != nil && strings.Contains(err.Error(), "--validate=false") { } else if err != nil && strings.Contains(err.Error(), "--validate=false") {
t.Errorf("%q. error message was not scrubbed", tt.name) t.Error("error message was not scrubbed")
} }
if len(infos) != tt.count { if len(infos) != tt.count {
t.Errorf("%q. expected %d result objects, got %d", tt.name, tt.count, len(infos)) t.Errorf("expected %d result objects, got %d", tt.count, len(infos))
} }
})
} }
} }
type testPrinter struct {
Objects []runtime.Object
Err error
printers.ResourcePrinter
}
func (t *testPrinter) PrintObj(obj runtime.Object, out io.Writer) error {
t.Objects = append(t.Objects, obj)
fmt.Fprintf(out, "%#v", obj)
return t.Err
}
func (t *testPrinter) HandledResources() []string {
return []string{}
}
func (t *testPrinter) AfterPrint(io.Writer, string) error {
return t.Err
}
func TestGet(t *testing.T) { func TestGet(t *testing.T) {
list := newPodList("starfish", "otter") list := newPodList("starfish", "otter")
f, tf, _, _ := cmdtesting.NewAPIFactory() c := newTestClient()
tf.Printer = &testPrinter{} defer c.Cleanup()
tf.UnstructuredClient = &fake.RESTClient{ c.TestFactory.UnstructuredClient = &fake.RESTClient{
GroupVersion: schema.GroupVersion{Version: "v1"}, GroupVersion: schema.GroupVersion{Version: "v1"},
NegotiatedSerializer: dynamic.ContentConfig().NegotiatedSerializer, NegotiatedSerializer: unstructuredSerializer,
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
p, m := req.URL.Path, req.Method p, m := req.URL.Path, req.Method
//actions = append(actions, p+":"+m)
t.Logf("got request %s %s", p, m) t.Logf("got request %s %s", p, m)
switch { switch {
case p == "/namespaces/default/pods/starfish" && m == "GET": case p == "/namespaces/default/pods/starfish" && m == "GET":
@ -311,7 +284,6 @@ func TestGet(t *testing.T) {
} }
}), }),
} }
c := newTestClient(f)
// Test Success // Test Success
data := strings.NewReader("kind: Pod\napiVersion: v1\nmetadata:\n name: otter") data := strings.NewReader("kind: Pod\napiVersion: v1\nmetadata:\n name: otter")
@ -358,101 +330,37 @@ func TestPerform(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
results := []*resource.Info{} results := []*resource.Info{}
fn := func(info *resource.Info) error { fn := func(info *resource.Info) error {
results = append(results, info) results = append(results, info)
if info.Namespace != tt.namespace { if info.Namespace != tt.namespace {
t.Errorf("%q. expected namespace to be '%s', got %s", tt.name, tt.namespace, info.Namespace) t.Errorf("expected namespace to be '%s', got %s", tt.namespace, info.Namespace)
} }
return nil return nil
} }
f, _, _, _ := cmdtesting.NewAPIFactory() c := newTestClient()
c := newTestClient(f) defer c.Cleanup()
infos, err := c.Build(tt.namespace, tt.reader) infos, err := c.Build(tt.namespace, tt.reader)
if err != nil && err.Error() != tt.errMessage { if err != nil && err.Error() != tt.errMessage {
t.Errorf("%q. Error while building manifests: %v", tt.name, err) t.Errorf("Error while building manifests: %v", err)
} }
err = perform(infos, fn) err = perform(infos, fn)
if (err != nil) != tt.err { if (err != nil) != tt.err {
t.Errorf("%q. expected error: %v, got %v", tt.name, tt.err, err) t.Errorf("expected error: %v, got %v", tt.err, err)
} }
if err != nil && err.Error() != tt.errMessage { if err != nil && err.Error() != tt.errMessage {
t.Errorf("%q. expected error message: %v, got %v", tt.name, tt.errMessage, err) t.Errorf("expected error message: %v, got %v", tt.errMessage, err)
} }
if len(results) != tt.count { if len(results) != tt.count {
t.Errorf("%q. expected %d result objects, got %d", tt.name, tt.count, len(results)) t.Errorf("expected %d result objects, got %d", tt.count, len(results))
}
}
}
func TestWaitAndGetCompletedPodPhase(t *testing.T) {
tests := []struct {
podPhase core.PodPhase
expectedPhase core.PodPhase
err bool
errMessage string
}{
{
podPhase: core.PodPending,
expectedPhase: core.PodUnknown,
err: true,
errMessage: "watch closed before Until timeout",
}, {
podPhase: core.PodRunning,
expectedPhase: core.PodUnknown,
err: true,
errMessage: "watch closed before Until timeout",
}, {
podPhase: core.PodSucceeded,
expectedPhase: core.PodSucceeded,
}, {
podPhase: core.PodFailed,
expectedPhase: core.PodFailed,
},
}
for _, tt := range tests {
f, tf, codec, ns := cmdtesting.NewAPIFactory()
actions := make(map[string]string)
var testPodList core.PodList
testPodList.Items = append(testPodList.Items, newPodWithStatus("bestpod", core.PodStatus{Phase: tt.podPhase}, "test"))
tf.Client = &fake.RESTClient{
NegotiatedSerializer: ns,
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
p, m := req.URL.Path, req.Method
actions[p] = m
switch {
case p == "/namespaces/test/pods/bestpod" && m == "GET":
return newResponse(200, &testPodList.Items[0])
case p == "/namespaces/test/pods" && m == "GET":
event := watch.Event{Type: watch.Added, Object: &testPodList.Items[0]}
return newEventResponse(200, &event)
default:
t.Fatalf("unexpected request: %#v\n%#v", req.URL, req)
return nil, nil
}
}),
}
c := newTestClient(f)
phase, err := c.WaitAndGetCompletedPodPhase("test", objBody(codec, &testPodList), 1*time.Second)
if (err != nil) != tt.err {
t.Fatalf("Expected error but there was none.")
}
if err != nil && err.Error() != tt.errMessage {
t.Fatalf("Expected error %s, got %s", tt.errMessage, err.Error())
}
if phase != tt.expectedPhase {
t.Fatalf("Expected pod phase %s, got %s", tt.expectedPhase, phase)
} }
})
} }
} }

@ -91,15 +91,18 @@ type Hook_DeletePolicy int32
const ( const (
Hook_SUCCEEDED Hook_DeletePolicy = 0 Hook_SUCCEEDED Hook_DeletePolicy = 0
Hook_FAILED Hook_DeletePolicy = 1 Hook_FAILED Hook_DeletePolicy = 1
Hook_BEFORE_HOOK_CREATION Hook_DeletePolicy = 2
) )
var Hook_DeletePolicy_name = map[int32]string{ var Hook_DeletePolicy_name = map[int32]string{
0: "SUCCEEDED", 0: "SUCCEEDED",
1: "FAILED", 1: "FAILED",
2: "BEFORE_HOOK_CREATION",
} }
var Hook_DeletePolicy_value = map[string]int32{ var Hook_DeletePolicy_value = map[string]int32{
"SUCCEEDED": 0, "SUCCEEDED": 0,
"FAILED": 1, "FAILED": 1,
"BEFORE_HOOK_CREATION": 2,
} }
func (x Hook_DeletePolicy) String() string { func (x Hook_DeletePolicy) String() string {
@ -196,32 +199,33 @@ func init() {
func init() { proto.RegisterFile("hapi/release/hook.proto", fileDescriptor0) } func init() { proto.RegisterFile("hapi/release/hook.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{ var fileDescriptor0 = []byte{
// 428 bytes of a gzipped FileDescriptorProto // 445 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0xd1, 0x6e, 0xda, 0x30, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0x51, 0x8f, 0x9a, 0x40,
0x14, 0x86, 0x9b, 0x12, 0x12, 0x38, 0x50, 0xea, 0x59, 0xd3, 0x66, 0x71, 0x53, 0xc4, 0x15, 0xbb, 0x10, 0x80, 0x8f, 0x13, 0x41, 0x47, 0xcf, 0xdb, 0x6e, 0x9a, 0x76, 0xe3, 0xcb, 0x19, 0x9f, 0x7c,
0x09, 0x53, 0xa7, 0x3d, 0x40, 0x4a, 0xce, 0xd6, 0xaa, 0x11, 0x20, 0x27, 0x68, 0xd2, 0x6e, 0xa2, 0xc2, 0xe6, 0x9a, 0xfe, 0x00, 0x84, 0xb9, 0x6a, 0x24, 0x60, 0x16, 0x4c, 0x93, 0xbe, 0x10, 0xae,
0x74, 0xb8, 0x10, 0x11, 0xe2, 0x88, 0x98, 0x4d, 0x7b, 0xa6, 0xbd, 0xce, 0x1e, 0x68, 0xb2, 0x09, 0xee, 0x29, 0x11, 0x81, 0x08, 0xb6, 0xe9, 0x0f, 0xec, 0x3f, 0xe8, 0x0f, 0x6a, 0x76, 0x45, 0x7b,
0x59, 0xa5, 0xed, 0xee, 0x9c, 0xef, 0x7c, 0x76, 0xce, 0x1f, 0xc3, 0xdb, 0x6d, 0x5a, 0x66, 0xd3, 0x49, 0xfb, 0x36, 0xf3, 0xcd, 0x37, 0xc3, 0x0c, 0x0b, 0xef, 0x77, 0x49, 0x99, 0x4e, 0x8f, 0x22,
0x83, 0xc8, 0x45, 0x5a, 0x89, 0xe9, 0x56, 0xca, 0x9d, 0x57, 0x1e, 0xa4, 0x92, 0xb4, 0xaf, 0x07, 0x13, 0x49, 0x25, 0xa6, 0xbb, 0xa2, 0xd8, 0x5b, 0xe5, 0xb1, 0xa8, 0x0b, 0xda, 0x97, 0x05, 0xab,
0x5e, 0x3d, 0x18, 0xde, 0x6c, 0xa4, 0xdc, 0xe4, 0x62, 0x6a, 0x66, 0x4f, 0xc7, 0xe7, 0xa9, 0xca, 0x29, 0x0c, 0x1f, 0xb6, 0x45, 0xb1, 0xcd, 0xc4, 0x54, 0xd5, 0x9e, 0x4f, 0x2f, 0xd3, 0x3a, 0x3d,
0xf6, 0xa2, 0x52, 0xe9, 0xbe, 0x3c, 0xe9, 0xe3, 0x5f, 0x36, 0xd8, 0xf7, 0x52, 0xee, 0x28, 0x05, 0x88, 0xaa, 0x4e, 0x0e, 0xe5, 0x59, 0x1f, 0xff, 0xd2, 0x41, 0x9f, 0x17, 0xc5, 0x9e, 0x52, 0xd0,
0xbb, 0x48, 0xf7, 0x82, 0x59, 0x23, 0x6b, 0xd2, 0xe5, 0xa6, 0xd6, 0x6c, 0x97, 0x15, 0x6b, 0x76, 0xf3, 0xe4, 0x20, 0x98, 0x36, 0xd2, 0x26, 0x5d, 0xae, 0x62, 0xc9, 0xf6, 0x69, 0xbe, 0x61, 0xb7,
0x79, 0x62, 0xba, 0xd6, 0xac, 0x4c, 0xd5, 0x96, 0xb5, 0x4e, 0x4c, 0xd7, 0x74, 0x08, 0x9d, 0x7d, 0x67, 0x26, 0x63, 0xc9, 0xca, 0xa4, 0xde, 0xb1, 0xd6, 0x99, 0xc9, 0x98, 0x0e, 0xa1, 0x73, 0x48,
0x5a, 0x64, 0xcf, 0xa2, 0x52, 0xcc, 0x36, 0xbc, 0xe9, 0xe9, 0x7b, 0x70, 0xc4, 0x77, 0x51, 0xa8, 0xf2, 0xf4, 0x45, 0x54, 0x35, 0xd3, 0x15, 0xbf, 0xe6, 0xf4, 0x03, 0x18, 0xe2, 0xbb, 0xc8, 0xeb,
0x8a, 0xb5, 0x47, 0xad, 0xc9, 0xe0, 0x96, 0x79, 0x2f, 0x17, 0xf4, 0xf4, 0xb7, 0x3d, 0xd4, 0x02, 0x8a, 0xb5, 0x47, 0xad, 0xc9, 0xe0, 0x91, 0x59, 0xaf, 0x17, 0xb4, 0xe4, 0xb7, 0x2d, 0x94, 0x02,
0xaf, 0x3d, 0xfa, 0x11, 0x3a, 0x79, 0x5a, 0xa9, 0xe4, 0x70, 0x2c, 0x98, 0x33, 0xb2, 0x26, 0xbd, 0x6f, 0x3c, 0xfa, 0x09, 0x3a, 0x59, 0x52, 0xd5, 0xf1, 0xf1, 0x94, 0x33, 0x63, 0xa4, 0x4d, 0x7a,
0xdb, 0xa1, 0x77, 0x8a, 0xe1, 0x9d, 0x63, 0x78, 0xf1, 0x39, 0x06, 0x77, 0xb5, 0xcb, 0x8f, 0x05, 0x8f, 0x43, 0xeb, 0x7c, 0x86, 0x75, 0x39, 0xc3, 0x8a, 0x2e, 0x67, 0x70, 0x53, 0xba, 0xfc, 0x94,
0x7d, 0x03, 0xce, 0x0f, 0x91, 0x6d, 0xb6, 0x8a, 0xb9, 0x23, 0x6b, 0xd2, 0xe6, 0x75, 0x47, 0xef, 0xd3, 0x77, 0x60, 0xfc, 0x10, 0xe9, 0x76, 0x57, 0x33, 0x73, 0xa4, 0x4d, 0xda, 0xbc, 0xc9, 0xe8,
0xe1, 0x7a, 0x2d, 0x72, 0xa1, 0x44, 0x52, 0xca, 0x3c, 0xfb, 0x96, 0x89, 0x8a, 0x75, 0xcc, 0x26, 0x1c, 0xee, 0x37, 0x22, 0x13, 0xb5, 0x88, 0xcb, 0x22, 0x4b, 0xbf, 0xa5, 0xa2, 0x62, 0x1d, 0xb5,
0x37, 0xff, 0xd9, 0x24, 0x30, 0xe6, 0x52, 0x8b, 0x3f, 0xf9, 0x60, 0xfd, 0xb7, 0xcb, 0x44, 0x35, 0xc9, 0xc3, 0x7f, 0x36, 0x71, 0x95, 0xb9, 0x92, 0xe2, 0x4f, 0x3e, 0xd8, 0xfc, 0xcd, 0x52, 0x51,
0xfe, 0x6d, 0x41, 0xdb, 0xac, 0x4a, 0x7b, 0xe0, 0xae, 0xe6, 0x8f, 0xf3, 0xc5, 0x97, 0x39, 0xb9, 0x8d, 0x7f, 0x6b, 0xd0, 0x56, 0xab, 0xd2, 0x1e, 0x98, 0x6b, 0x7f, 0xe9, 0x07, 0x5f, 0x7c, 0x72,
0xa0, 0xd7, 0xd0, 0x5b, 0x72, 0x4c, 0x1e, 0xe6, 0x51, 0xec, 0x87, 0x21, 0xb1, 0x28, 0x81, 0xfe, 0x43, 0xef, 0xa1, 0xb7, 0xe2, 0x18, 0x2f, 0xfc, 0x30, 0xb2, 0x3d, 0x8f, 0x68, 0x94, 0x40, 0x7f,
0x72, 0x11, 0xc5, 0x0d, 0xb9, 0xa4, 0x03, 0x00, 0xad, 0x04, 0x18, 0x62, 0x8c, 0xa4, 0x65, 0x8e, 0x15, 0x84, 0xd1, 0x95, 0xdc, 0xd2, 0x01, 0x80, 0x54, 0x5c, 0xf4, 0x30, 0x42, 0xd2, 0x52, 0x2d,
0x68, 0xa3, 0x06, 0xf6, 0xf9, 0x8e, 0xd5, 0xf2, 0x33, 0xf7, 0x03, 0x24, 0xed, 0xe6, 0x8e, 0x33, 0xd2, 0x68, 0x80, 0x7e, 0x99, 0xb1, 0x5e, 0x7d, 0xe6, 0xb6, 0x8b, 0xa4, 0x7d, 0x9d, 0x71, 0x21,
0x71, 0x0c, 0xe1, 0x98, 0xf0, 0x45, 0x18, 0xde, 0xf9, 0xb3, 0x47, 0xe2, 0xd2, 0x57, 0x70, 0x65, 0x86, 0x22, 0x1c, 0x63, 0x1e, 0x78, 0xde, 0xcc, 0x76, 0x96, 0xc4, 0xa4, 0x6f, 0xe0, 0x4e, 0x39,
0x9c, 0x06, 0x75, 0x28, 0x83, 0xd7, 0x1c, 0x43, 0xf4, 0x23, 0x4c, 0x62, 0x8c, 0xe2, 0x24, 0x5a, 0x57, 0xd4, 0xa1, 0x0c, 0xde, 0x72, 0xf4, 0xd0, 0x0e, 0x31, 0x8e, 0x30, 0x8c, 0xe2, 0x70, 0xed,
0xcd, 0x66, 0x18, 0x45, 0xa4, 0xfb, 0xcf, 0xe4, 0x93, 0xff, 0x10, 0xae, 0x38, 0x12, 0x18, 0xbf, 0x38, 0x18, 0x86, 0xa4, 0xfb, 0x4f, 0xe5, 0xc9, 0x5e, 0x78, 0x6b, 0x8e, 0x04, 0xc6, 0x0e, 0xf4,
0x83, 0xfe, 0xcb, 0xd8, 0xf4, 0x0a, 0xba, 0xe6, 0x18, 0x06, 0x18, 0x90, 0x0b, 0x0a, 0xe0, 0x68, 0x5f, 0x9f, 0x4d, 0xef, 0xa0, 0xab, 0xda, 0xd0, 0x45, 0x97, 0xdc, 0x50, 0x00, 0x43, 0xba, 0xe8,
0x17, 0x03, 0x62, 0xdd, 0x75, 0xbf, 0xba, 0xf5, 0xef, 0x7a, 0x72, 0xcc, 0x5b, 0x7c, 0xf8, 0x13, 0x12, 0x4d, 0x0e, 0x99, 0xe1, 0x53, 0xc0, 0x31, 0x9e, 0x07, 0xc1, 0x32, 0x76, 0x38, 0xda, 0xd1,
0x00, 0x00, 0xff, 0xff, 0xb9, 0x8a, 0xe1, 0xaf, 0x89, 0x02, 0x00, 0x00, 0x22, 0xf0, 0xc9, 0xed, 0xac, 0xfb, 0xd5, 0x6c, 0x7e, 0xe4, 0xb3, 0xa1, 0x5e, 0xe9, 0xe3, 0x9f,
0x00, 0x00, 0x00, 0xff, 0xff, 0x13, 0x64, 0x75, 0x6c, 0xa3, 0x02, 0x00, 0x00,
} }

@ -196,9 +196,9 @@ func FindChartInRepoURL(repoURL, chartName, chartVersion, certFile, keyFile, caF
return FindChartInAuthRepoURL(repoURL, "", "", chartName, chartVersion, certFile, keyFile, caFile, getters) return FindChartInAuthRepoURL(repoURL, "", "", chartName, chartVersion, certFile, keyFile, caFile, getters)
} }
// FindChartInRepoURL finds chart in chart repository pointed by repoURL // FindChartInAuthRepoURL finds chart in chart repository pointed by repoURL
// without adding repo to repositories. // without adding repo to repositories, like FindChartInRepoURL,
// Unlike the FindChartInRepoURL function, this function also receives credentials for the chart repository. // but it also receives credentials for the chart repository.
func FindChartInAuthRepoURL(repoURL, username, password, chartName, chartVersion, certFile, keyFile, caFile string, getters getter.Providers) (string, error) { func FindChartInAuthRepoURL(repoURL, username, password, chartName, chartVersion, certFile, keyFile, caFile string, getters getter.Providers) (string, error) {
// Download and write the index file to a temporary location // Download and write the index file to a temporary location

@ -31,7 +31,8 @@ import (
var ErrRepoOutOfDate = errors.New("repository file is out of date") var ErrRepoOutOfDate = errors.New("repository file is out of date")
// RepoFile represents the repositories.yaml file in $HELM_HOME // RepoFile represents the repositories.yaml file in $HELM_HOME
type RepoFile struct { // TODO: change type name to File in Helm 3 to resolve linter warning
type RepoFile struct { // nolint
APIVersion string `json:"apiVersion"` APIVersion string `json:"apiVersion"`
Generated time.Time `json:"generated"` Generated time.Time `json:"generated"`
Repositories []*Entry `json:"repositories"` Repositories []*Entry `json:"repositories"`

@ -36,7 +36,7 @@ func ToYAML(s string) (string, error) {
return "", err return "", err
} }
d, err := yaml.Marshal(m) d, err := yaml.Marshal(m)
return string(d), err return strings.TrimSuffix(string(d), "\n"), err
} }
// Parse parses a set line. // Parse parses a set line.
@ -50,7 +50,7 @@ func Parse(s string) (map[string]interface{}, error) {
return vals, err return vals, err
} }
// Parse parses a set line and forces a string value. // ParseString parses a set line and forces a string value.
// //
// A set line is of the form name1=value1,name2=value2 // A set line is of the form name1=value1,name2=value2
func ParseString(s string) (map[string]interface{}, error) { func ParseString(s string) (map[string]interface{}, error) {

@ -365,7 +365,7 @@ func TestToYAML(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
expect := "name: value\n" expect := "name: value"
if o != expect { if o != expect {
t.Errorf("Expected %q, got %q", expect, o) t.Errorf("Expected %q, got %q", expect, o)
} }

@ -48,6 +48,7 @@ var events = map[string]release.Hook_Event{
var deletePolices = map[string]release.Hook_DeletePolicy{ var deletePolices = map[string]release.Hook_DeletePolicy{
hooks.HookSucceeded: release.Hook_SUCCEEDED, hooks.HookSucceeded: release.Hook_SUCCEEDED,
hooks.HookFailed: release.Hook_FAILED, hooks.HookFailed: release.Hook_FAILED,
hooks.BeforeHookCreation: release.Hook_BEFORE_HOOK_CREATION,
} }
// Manifest represents a manifest file, which has a name and some content. // Manifest represents a manifest file, which has a name and some content.
@ -189,21 +190,14 @@ func (file *manifestFile) sort(result *result) error {
result.hooks = append(result.hooks, h) result.hooks = append(result.hooks, h)
isKnownDeletePolices := false operateAnnotationValues(entry, hooks.HookDeleteAnno, func(value string) {
dps, ok := entry.Metadata.Annotations[hooks.HookDeleteAnno] policy, exist := deletePolices[value]
if ok {
for _, dp := range strings.Split(dps, ",") {
dp = strings.ToLower(strings.TrimSpace(dp))
p, exist := deletePolices[dp]
if exist { if exist {
isKnownDeletePolices = true h.DeletePolicies = append(h.DeletePolicies, policy)
h.DeletePolicies = append(h.DeletePolicies, p) } else {
} log.Printf("info: skipping unknown hook delete policy: %q", value)
}
if !isKnownDeletePolices {
log.Printf("info: skipping unknown hook delete policy: %q", dps)
}
} }
})
} }
return nil return nil
@ -228,3 +222,12 @@ func calculateHookWeight(entry util.SimpleHead) int32 {
return int32(hw) return int32(hw)
} }
func operateAnnotationValues(entry util.SimpleHead, annotation string, operate func(p string)) {
if dps, ok := entry.Metadata.Annotations[annotation]; ok {
for _, dp := range strings.Split(dps, ",") {
dp = strings.ToLower(strings.TrimSpace(dp))
operate(dp)
}
}
}

@ -30,6 +30,7 @@ var InstallOrder SortOrder = []string{
"Namespace", "Namespace",
"ResourceQuota", "ResourceQuota",
"LimitRange", "LimitRange",
"PodSecurityPolicy",
"Secret", "Secret",
"ConfigMap", "ConfigMap",
"StorageClass", "StorageClass",
@ -80,6 +81,7 @@ var UninstallOrder SortOrder = []string{
"StorageClass", "StorageClass",
"ConfigMap", "ConfigMap",
"Secret", "Secret",
"PodSecurityPolicy",
"LimitRange", "LimitRange",
"ResourceQuota", "ResourceQuota",
"Namespace", "Namespace",

@ -85,6 +85,10 @@ func TestKindSorter(t *testing.T) {
Name: "o", Name: "o",
Head: &util.SimpleHead{Kind: "Pod"}, Head: &util.SimpleHead{Kind: "Pod"},
}, },
{
Name: "3",
Head: &util.SimpleHead{Kind: "PodSecurityPolicy"},
},
{ {
Name: "q", Name: "q",
Head: &util.SimpleHead{Kind: "ReplicaSet"}, Head: &util.SimpleHead{Kind: "ReplicaSet"},
@ -136,8 +140,8 @@ func TestKindSorter(t *testing.T) {
order SortOrder order SortOrder
expected string expected string
}{ }{
{"install", InstallOrder, "abcde1fgh2ijklmnopqrstuvw!"}, {"install", InstallOrder, "abc3de1fgh2ijklmnopqrstuvw!"},
{"uninstall", UninstallOrder, "wvmutsrqponlkji2hgf1edcba!"}, {"uninstall", UninstallOrder, "wvmutsrqponlkji2hgf1ed3cba!"},
} { } {
var buf bytes.Buffer var buf bytes.Buffer
t.Run(test.description, func(t *testing.T) { t.Run(test.description, func(t *testing.T) {

@ -22,7 +22,6 @@ import (
"testing" "testing"
"k8s.io/helm/pkg/helm" "k8s.io/helm/pkg/helm"
"k8s.io/helm/pkg/proto/hapi/chart"
"k8s.io/helm/pkg/proto/hapi/release" "k8s.io/helm/pkg/proto/hapi/release"
"k8s.io/helm/pkg/proto/hapi/services" "k8s.io/helm/pkg/proto/hapi/services"
"k8s.io/helm/pkg/version" "k8s.io/helm/pkg/version"
@ -32,17 +31,7 @@ func TestInstallRelease(t *testing.T) {
c := helm.NewContext() c := helm.NewContext()
rs := rsFixture() rs := rsFixture()
// TODO: Refactor this into a mock. req := installRequest()
req := &services.InstallReleaseRequest{
Namespace: "spaced",
Chart: &chart.Chart{
Metadata: &chart.Metadata{Name: "hello"},
Templates: []*chart.Template{
{Name: "templates/hello", Data: []byte("hello: world")},
{Name: "templates/hooks", Data: []byte(manifestWithHook)},
},
},
}
res, err := rs.InstallRelease(c, req) res, err := rs.InstallRelease(c, req)
if err != nil { if err != nil {
t.Fatalf("Failed install: %s", err) t.Fatalf("Failed install: %s", err)
@ -96,18 +85,9 @@ func TestInstallRelease_WithNotes(t *testing.T) {
c := helm.NewContext() c := helm.NewContext()
rs := rsFixture() rs := rsFixture()
// TODO: Refactor this into a mock. req := installRequest(
req := &services.InstallReleaseRequest{ withChart(withNotes(notesText)),
Namespace: "spaced", )
Chart: &chart.Chart{
Metadata: &chart.Metadata{Name: "hello"},
Templates: []*chart.Template{
{Name: "templates/hello", Data: []byte("hello: world")},
{Name: "templates/hooks", Data: []byte(manifestWithHook)},
{Name: "templates/NOTES.txt", Data: []byte(notesText)},
},
},
}
res, err := rs.InstallRelease(c, req) res, err := rs.InstallRelease(c, req)
if err != nil { if err != nil {
t.Fatalf("Failed install: %s", err) t.Fatalf("Failed install: %s", err)
@ -165,18 +145,9 @@ func TestInstallRelease_WithNotesRendered(t *testing.T) {
c := helm.NewContext() c := helm.NewContext()
rs := rsFixture() rs := rsFixture()
// TODO: Refactor this into a mock. req := installRequest(
req := &services.InstallReleaseRequest{ withChart(withNotes(notesText + " {{.Release.Name}}")),
Namespace: "spaced", )
Chart: &chart.Chart{
Metadata: &chart.Metadata{Name: "hello"},
Templates: []*chart.Template{
{Name: "templates/hello", Data: []byte("hello: world")},
{Name: "templates/hooks", Data: []byte(manifestWithHook)},
{Name: "templates/NOTES.txt", Data: []byte(notesText + " {{.Release.Name}}")},
},
},
}
res, err := rs.InstallRelease(c, req) res, err := rs.InstallRelease(c, req)
if err != nil { if err != nil {
t.Fatalf("Failed install: %s", err) t.Fatalf("Failed install: %s", err)
@ -236,17 +207,9 @@ func TestInstallRelease_TillerVersion(t *testing.T) {
c := helm.NewContext() c := helm.NewContext()
rs := rsFixture() rs := rsFixture()
// TODO: Refactor this into a mock. req := installRequest(
req := &services.InstallReleaseRequest{ withChart(withTiller(">=2.2.0")),
Namespace: "spaced", )
Chart: &chart.Chart{
Metadata: &chart.Metadata{Name: "hello", TillerVersion: ">=2.2.0"},
Templates: []*chart.Template{
{Name: "templates/hello", Data: []byte("hello: world")},
{Name: "templates/hooks", Data: []byte(manifestWithHook)},
},
},
}
_, err := rs.InstallRelease(c, req) _, err := rs.InstallRelease(c, req)
if err != nil { if err != nil {
t.Fatalf("Expected valid range. Got %q", err) t.Fatalf("Expected valid range. Got %q", err)
@ -258,17 +221,9 @@ func TestInstallRelease_WrongTillerVersion(t *testing.T) {
c := helm.NewContext() c := helm.NewContext()
rs := rsFixture() rs := rsFixture()
// TODO: Refactor this into a mock. req := installRequest(
req := &services.InstallReleaseRequest{ withChart(withTiller("<2.0.0")),
Namespace: "spaced", )
Chart: &chart.Chart{
Metadata: &chart.Metadata{Name: "hello", TillerVersion: "<2.0.0"},
Templates: []*chart.Template{
{Name: "templates/hello", Data: []byte("hello: world")},
{Name: "templates/hooks", Data: []byte(manifestWithHook)},
},
},
}
_, err := rs.InstallRelease(c, req) _, err := rs.InstallRelease(c, req)
if err == nil { if err == nil {
t.Fatalf("Expected to fail because of wrong version") t.Fatalf("Expected to fail because of wrong version")
@ -284,29 +239,10 @@ func TestInstallRelease_WithChartAndDependencyNotes(t *testing.T) {
c := helm.NewContext() c := helm.NewContext()
rs := rsFixture() rs := rsFixture()
// TODO: Refactor this into a mock. req := installRequest(withChart(
req := &services.InstallReleaseRequest{ withNotes(notesText),
Namespace: "spaced", withDependency(withNotes(notesText+" child")),
Chart: &chart.Chart{ ))
Metadata: &chart.Metadata{Name: "hello"},
Templates: []*chart.Template{
{Name: "templates/hello", Data: []byte("hello: world")},
{Name: "templates/hooks", Data: []byte(manifestWithHook)},
{Name: "templates/NOTES.txt", Data: []byte(notesText)},
},
Dependencies: []*chart.Chart{
{
Metadata: &chart.Metadata{Name: "hello"},
Templates: []*chart.Template{
{Name: "templates/hello", Data: []byte("hello: world")},
{Name: "templates/hooks", Data: []byte(manifestWithHook)},
{Name: "templates/NOTES.txt", Data: []byte(notesText + " child")},
},
},
},
},
}
res, err := rs.InstallRelease(c, req) res, err := rs.InstallRelease(c, req)
if err != nil { if err != nil {
t.Fatalf("Failed install: %s", err) t.Fatalf("Failed install: %s", err)
@ -335,10 +271,9 @@ func TestInstallRelease_DryRun(t *testing.T) {
c := helm.NewContext() c := helm.NewContext()
rs := rsFixture() rs := rsFixture()
req := &services.InstallReleaseRequest{ req := installRequest(withDryRun(),
Chart: chartStub(), withChart(withSampleTemplates()),
DryRun: true, )
}
res, err := rs.InstallRelease(c, req) res, err := rs.InstallRelease(c, req)
if err != nil { if err != nil {
t.Errorf("Failed install: %s", err) t.Errorf("Failed install: %s", err)
@ -389,10 +324,7 @@ func TestInstallRelease_NoHooks(t *testing.T) {
rs := rsFixture() rs := rsFixture()
rs.env.Releases.Create(releaseStub()) rs.env.Releases.Create(releaseStub())
req := &services.InstallReleaseRequest{ req := installRequest(withDisabledHooks())
Chart: chartStub(),
DisableHooks: true,
}
res, err := rs.InstallRelease(c, req) res, err := rs.InstallRelease(c, req)
if err != nil { if err != nil {
t.Errorf("Failed install: %s", err) t.Errorf("Failed install: %s", err)
@ -409,9 +341,7 @@ func TestInstallRelease_FailedHooks(t *testing.T) {
rs.env.Releases.Create(releaseStub()) rs.env.Releases.Create(releaseStub())
rs.env.KubeClient = newHookFailingKubeClient() rs.env.KubeClient = newHookFailingKubeClient()
req := &services.InstallReleaseRequest{ req := installRequest()
Chart: chartStub(),
}
res, err := rs.InstallRelease(c, req) res, err := rs.InstallRelease(c, req)
if err == nil { if err == nil {
t.Error("Expected failed install") t.Error("Expected failed install")
@ -429,11 +359,10 @@ func TestInstallRelease_ReuseName(t *testing.T) {
rel.Info.Status.Code = release.Status_DELETED rel.Info.Status.Code = release.Status_DELETED
rs.env.Releases.Create(rel) rs.env.Releases.Create(rel)
req := &services.InstallReleaseRequest{ req := installRequest(
Chart: chartStub(), withReuseName(),
ReuseName: true, withName(rel.Name),
Name: rel.Name, )
}
res, err := rs.InstallRelease(c, req) res, err := rs.InstallRelease(c, req)
if err != nil { if err != nil {
t.Fatalf("Failed install: %s", err) t.Fatalf("Failed install: %s", err)
@ -457,18 +386,10 @@ func TestInstallRelease_KubeVersion(t *testing.T) {
c := helm.NewContext() c := helm.NewContext()
rs := rsFixture() rs := rsFixture()
// TODO: Refactor this into a mock. req := installRequest(
req := &services.InstallReleaseRequest{ withChart(withKube(">=0.0.0")),
Chart: &chart.Chart{ )
Metadata: &chart.Metadata{Name: "hello", KubeVersion: ">=0.0.0"},
Templates: []*chart.Template{
{Name: "templates/hello", Data: []byte("hello: world")},
{Name: "templates/hooks", Data: []byte(manifestWithHook)},
},
},
}
_, err := rs.InstallRelease(c, req) _, err := rs.InstallRelease(c, req)
fmt.Println(err)
if err != nil { if err != nil {
t.Fatalf("Expected valid range. Got %q", err) t.Fatalf("Expected valid range. Got %q", err)
} }
@ -478,16 +399,10 @@ func TestInstallRelease_WrongKubeVersion(t *testing.T) {
c := helm.NewContext() c := helm.NewContext()
rs := rsFixture() rs := rsFixture()
// TODO: Refactor this into a mock. req := installRequest(
req := &services.InstallReleaseRequest{ withChart(withKube(">=5.0.0")),
Chart: &chart.Chart{ )
Metadata: &chart.Metadata{Name: "hello", KubeVersion: ">=5.0.0"},
Templates: []*chart.Template{
{Name: "templates/hello", Data: []byte("hello: world")},
{Name: "templates/hooks", Data: []byte(manifestWithHook)},
},
},
}
_, err := rs.InstallRelease(c, req) _, err := rs.InstallRelease(c, req)
if err == nil { if err == nil {
t.Fatalf("Expected to fail because of wrong version") t.Fatalf("Expected to fail because of wrong version")

@ -18,11 +18,11 @@ package tiller
import ( import (
"fmt" "fmt"
"regexp" "github.com/golang/protobuf/proto"
"k8s.io/helm/pkg/proto/hapi/release" "k8s.io/helm/pkg/proto/hapi/release"
"k8s.io/helm/pkg/proto/hapi/services" "k8s.io/helm/pkg/proto/hapi/services"
relutil "k8s.io/helm/pkg/releaseutil" relutil "k8s.io/helm/pkg/releaseutil"
"regexp"
) )
// ListReleases lists the releases found by the server. // ListReleases lists the releases found by the server.
@ -107,14 +107,50 @@ func (s *ReleaseServer) ListReleases(req *services.ListReleasesRequest, stream s
rels = rels[0:req.Limit] rels = rels[0:req.Limit]
l = int64(len(rels)) l = int64(len(rels))
} }
res := &services.ListReleasesResponse{ res := &services.ListReleasesResponse{
Next: next, Next: next,
Count: l, Count: l,
Total: total, Total: total,
Releases: rels,
} }
return stream.Send(res) chunks := s.partition(rels[:min(len(rels), int(req.Limit))], maxMsgSize-proto.Size(res))
for res.Releases = range chunks {
if err := stream.Send(res); err != nil {
for range chunks { // drain
}
return err
}
}
return nil
}
// partition packs releases into slices upto the capacity cap in bytes.
func (s *ReleaseServer) partition(rels []*release.Release, cap int) <-chan []*release.Release {
chunks := make(chan []*release.Release, 1)
go func() {
var (
fill = 0 // fill is space available to fill
size int // size is size of a release
)
var chunk []*release.Release
for _, rls := range rels {
if size = proto.Size(rls); size+fill > cap {
// Over-cap, push chunk onto channel to send over gRPC stream
s.Log("partitioned at %d with %d releases (cap=%d)", fill, len(chunk), cap)
chunks <- chunk
// reset paritioning state
chunk = chunk[:0]
fill = 0
}
chunk = append(chunk, rls)
fill += size
}
if len(chunk) > 0 {
// send remaining if any
chunks <- chunk
}
close(chunks)
}()
return chunks
} }
func filterByNamespace(namespace string, rels []*release.Release) ([]*release.Release, error) { func filterByNamespace(namespace string, rels []*release.Release) ([]*release.Release, error) {

@ -135,7 +135,7 @@ func (s *ReleaseServer) performRollback(currentRelease, targetRelease *release.R
targetRelease.Info.Status.Code = release.Status_FAILED targetRelease.Info.Status.Code = release.Status_FAILED
targetRelease.Info.Description = msg targetRelease.Info.Description = msg
s.recordRelease(currentRelease, true) s.recordRelease(currentRelease, true)
s.recordRelease(targetRelease, false) s.recordRelease(targetRelease, true)
return res, err return res, err
} }

@ -25,6 +25,7 @@ import (
"strings" "strings"
"github.com/technosophos/moniker" "github.com/technosophos/moniker"
"gopkg.in/yaml.v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/discovery" "k8s.io/client-go/discovery"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
@ -135,7 +136,22 @@ func (s *ReleaseServer) reuseValues(req *services.UpdateReleaseRequest, current
if err != nil { if err != nil {
return err return err
} }
// merge new values with current
req.Values.Raw = current.Config.Raw + "\n" + req.Values.Raw
req.Chart.Values = &chart.Config{Raw: nv} req.Chart.Values = &chart.Config{Raw: nv}
// yaml unmarshal and marshal to remove duplicate keys
y := map[string]interface{}{}
if err := yaml.Unmarshal([]byte(req.Values.Raw), &y); err != nil {
return err
}
data, err := yaml.Marshal(y)
if err != nil {
return err
}
req.Values.Raw = string(data)
return nil return nil
} }
@ -347,6 +363,9 @@ func (s *ReleaseServer) execHook(hs []*release.Hook, name, namespace, hook strin
executingHooks = sortByHookWeight(executingHooks) executingHooks = sortByHookWeight(executingHooks)
for _, h := range executingHooks { for _, h := range executingHooks {
if err := s.deleteHookIfShouldBeDeletedByDeletePolicy(h, hooks.BeforeHookCreation, name, namespace, hook, kubeCli); err != nil {
return err
}
b := bytes.NewBufferString(h.Manifest) b := bytes.NewBufferString(h.Manifest)
if err := kubeCli.Create(namespace, b, timeout, false); err != nil { if err := kubeCli.Create(namespace, b, timeout, false); err != nil {
@ -356,18 +375,13 @@ func (s *ReleaseServer) execHook(hs []*release.Hook, name, namespace, hook strin
// No way to rewind a bytes.Buffer()? // No way to rewind a bytes.Buffer()?
b.Reset() b.Reset()
b.WriteString(h.Manifest) b.WriteString(h.Manifest)
if err := kubeCli.WatchUntilReady(namespace, b, timeout, false); err != nil { if err := kubeCli.WatchUntilReady(namespace, b, timeout, false); err != nil {
s.Log("warning: Release %s %s %s could not complete: %s", name, hook, h.Path, err) s.Log("warning: Release %s %s %s could not complete: %s", name, hook, h.Path, err)
// If a hook is failed, checkout the annotation of the hook to determine whether the hook should be deleted // If a hook is failed, checkout the annotation of the hook to determine whether the hook should be deleted
// under failed condition. If so, then clear the corresponding resource object in the hook // under failed condition. If so, then clear the corresponding resource object in the hook
if hookShouldBeDeleted(h, hooks.HookFailed) { if err := s.deleteHookIfShouldBeDeletedByDeletePolicy(h, hooks.HookFailed, name, namespace, hook, kubeCli); err != nil {
b.Reset() return err
b.WriteString(h.Manifest)
s.Log("deleting %s hook %s for release %s due to %q policy", hook, h.Name, name, hooks.HookFailed)
if errHookDelete := kubeCli.Delete(namespace, b); errHookDelete != nil {
s.Log("warning: Release %s %s %S could not be deleted: %s", name, hook, h.Path, errHookDelete)
return errHookDelete
}
} }
return err return err
} }
@ -377,13 +391,8 @@ func (s *ReleaseServer) execHook(hs []*release.Hook, name, namespace, hook strin
// If all hooks are succeeded, checkout the annotation of each hook to determine whether the hook should be deleted // If all hooks are succeeded, checkout the annotation of each hook to determine whether the hook should be deleted
// under succeeded condition. If so, then clear the corresponding resource object in each hook // under succeeded condition. If so, then clear the corresponding resource object in each hook
for _, h := range executingHooks { for _, h := range executingHooks {
b := bytes.NewBufferString(h.Manifest) if err := s.deleteHookIfShouldBeDeletedByDeletePolicy(h, hooks.HookSucceeded, name, namespace, hook, kubeCli); err != nil {
if hookShouldBeDeleted(h, hooks.HookSucceeded) { return err
s.Log("deleting %s hook %s for release %s due to %q policy", hook, h.Name, name, hooks.HookSucceeded)
if errHookDelete := kubeCli.Delete(namespace, b); errHookDelete != nil {
s.Log("warning: Release %s %s %S could not be deleted: %s", name, hook, h.Path, errHookDelete)
return errHookDelete
}
} }
h.LastRun = timeconv.Now() h.LastRun = timeconv.Now()
} }
@ -409,11 +418,23 @@ func validateReleaseName(releaseName string) error {
return nil return nil
} }
func (s *ReleaseServer) deleteHookIfShouldBeDeletedByDeletePolicy(h *release.Hook, policy string, name, namespace, hook string, kubeCli environment.KubeClient) error {
b := bytes.NewBufferString(h.Manifest)
if hookHasDeletePolicy(h, policy) {
s.Log("deleting %s hook %s for release %s due to %q policy", hook, h.Name, name, policy)
if errHookDelete := kubeCli.Delete(namespace, b); errHookDelete != nil {
s.Log("warning: Release %s %s %S could not be deleted: %s", name, hook, h.Path, errHookDelete)
return errHookDelete
}
}
return nil
}
// hookShouldBeDeleted determines whether the defined hook deletion policy matches the hook deletion polices // hookShouldBeDeleted determines whether the defined hook deletion policy matches the hook deletion polices
// supported by helm. If so, mark the hook as one should be deleted. // supported by helm. If so, mark the hook as one should be deleted.
func hookShouldBeDeleted(hook *release.Hook, policy string) bool { func hookHasDeletePolicy(h *release.Hook, policy string) bool {
if dp, ok := deletePolices[policy]; ok { if dp, ok := deletePolices[policy]; ok {
for _, v := range hook.DeletePolicies { for _, v := range h.DeletePolicies {
if dp == v { if dp == v {
return true return true
} }

@ -18,18 +18,25 @@ package tiller
import ( import (
"errors" "errors"
"fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
"regexp" "regexp"
"testing" "testing"
"time"
"github.com/ghodss/yaml"
"github.com/golang/protobuf/ptypes/timestamp" "github.com/golang/protobuf/ptypes/timestamp"
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/grpc/metadata" "google.golang.org/grpc/metadata"
"k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/kubectl/resource"
"k8s.io/helm/pkg/helm" "k8s.io/helm/pkg/helm"
"k8s.io/helm/pkg/hooks"
"k8s.io/helm/pkg/kube"
"k8s.io/helm/pkg/proto/hapi/chart" "k8s.io/helm/pkg/proto/hapi/chart"
"k8s.io/helm/pkg/proto/hapi/release" "k8s.io/helm/pkg/proto/hapi/release"
"k8s.io/helm/pkg/proto/hapi/services" "k8s.io/helm/pkg/proto/hapi/services"
@ -98,23 +105,128 @@ func rsFixture() *ReleaseServer {
} }
} }
// chartStub creates a fully stubbed out chart. type chartOptions struct {
func chartStub() *chart.Chart { *chart.Chart
return &chart.Chart{ }
type chartOption func(*chartOptions)
func buildChart(opts ...chartOption) *chart.Chart {
c := &chartOptions{
Chart: &chart.Chart{
// TODO: This should be more complete. // TODO: This should be more complete.
Metadata: &chart.Metadata{ Metadata: &chart.Metadata{
Name: "hello", Name: "hello",
}, },
// This adds basic templates, partials, and hooks. // This adds a basic template and hooks.
Templates: []*chart.Template{ Templates: []*chart.Template{
{Name: "templates/hello", Data: []byte("hello: world")}, {Name: "templates/hello", Data: []byte("hello: world")},
{Name: "templates/hooks", Data: []byte(manifestWithHook)},
},
},
}
for _, opt := range opts {
opt(c)
}
return c.Chart
}
func withKube(version string) chartOption {
return func(opts *chartOptions) {
opts.Metadata.KubeVersion = version
}
}
func withTiller(version string) chartOption {
return func(opts *chartOptions) {
opts.Metadata.TillerVersion = version
}
}
func withDependency(dependencyOpts ...chartOption) chartOption {
return func(opts *chartOptions) {
opts.Dependencies = append(opts.Dependencies, buildChart(dependencyOpts...))
}
}
func withNotes(notes string) chartOption {
return func(opts *chartOptions) {
opts.Templates = append(opts.Templates, &chart.Template{
Name: "templates/NOTES.txt",
Data: []byte(notes),
})
}
}
func withSampleTemplates() chartOption {
return func(opts *chartOptions) {
sampleTemplates := []*chart.Template{
// This adds basic templates and partials.
{Name: "templates/goodbye", Data: []byte("goodbye: world")}, {Name: "templates/goodbye", Data: []byte("goodbye: world")},
{Name: "templates/empty", Data: []byte("")}, {Name: "templates/empty", Data: []byte("")},
{Name: "templates/with-partials", Data: []byte(`hello: {{ template "_planet" . }}`)}, {Name: "templates/with-partials", Data: []byte(`hello: {{ template "_planet" . }}`)},
{Name: "templates/partials/_planet", Data: []byte(`{{define "_planet"}}Earth{{end}}`)}, {Name: "templates/partials/_planet", Data: []byte(`{{define "_planet"}}Earth{{end}}`)},
{Name: "templates/hooks", Data: []byte(manifestWithHook)}, }
opts.Templates = append(opts.Templates, sampleTemplates...)
}
}
type installOptions struct {
*services.InstallReleaseRequest
}
type installOption func(*installOptions)
func withName(name string) installOption {
return func(opts *installOptions) {
opts.Name = name
}
}
func withDryRun() installOption {
return func(opts *installOptions) {
opts.DryRun = true
}
}
func withDisabledHooks() installOption {
return func(opts *installOptions) {
opts.DisableHooks = true
}
}
func withReuseName() installOption {
return func(opts *installOptions) {
opts.ReuseName = true
}
}
func withChart(chartOpts ...chartOption) installOption {
return func(opts *installOptions) {
opts.Chart = buildChart(chartOpts...)
}
}
func installRequest(opts ...installOption) *services.InstallReleaseRequest {
reqOpts := &installOptions{
&services.InstallReleaseRequest{
Namespace: "spaced",
Chart: buildChart(),
}, },
} }
for _, opt := range opts {
opt(reqOpts)
}
return reqOpts.InstallReleaseRequest
}
// chartStub creates a fully stubbed out chart.
func chartStub() *chart.Chart {
return buildChart(withSampleTemplates())
} }
// releaseStub creates a release stub, complete with the chartStub as its chart. // releaseStub creates a release stub, complete with the chartStub as its chart.
@ -345,3 +457,460 @@ func (rs mockRunReleaseTestServer) SetTrailer(m metadata.MD) {}
func (rs mockRunReleaseTestServer) SendMsg(v interface{}) error { return nil } func (rs mockRunReleaseTestServer) SendMsg(v interface{}) error { return nil }
func (rs mockRunReleaseTestServer) RecvMsg(v interface{}) error { return nil } func (rs mockRunReleaseTestServer) RecvMsg(v interface{}) error { return nil }
func (rs mockRunReleaseTestServer) Context() context.Context { return helm.NewContext() } func (rs mockRunReleaseTestServer) Context() context.Context { return helm.NewContext() }
type mockHooksManifest struct {
Metadata struct {
Name string
Annotations map[string]string
}
}
type mockHooksKubeClient struct {
Resources map[string]*mockHooksManifest
}
var errResourceExists = errors.New("resource already exists")
func (kc *mockHooksKubeClient) makeManifest(r io.Reader) (*mockHooksManifest, error) {
b, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
manifest := &mockHooksManifest{}
err = yaml.Unmarshal(b, manifest)
if err != nil {
return nil, err
}
return manifest, nil
}
func (kc *mockHooksKubeClient) Create(ns string, r io.Reader, timeout int64, shouldWait bool) error {
manifest, err := kc.makeManifest(r)
if err != nil {
return err
}
if _, hasKey := kc.Resources[manifest.Metadata.Name]; hasKey {
return errResourceExists
}
kc.Resources[manifest.Metadata.Name] = manifest
return nil
}
func (kc *mockHooksKubeClient) Get(ns string, r io.Reader) (string, error) {
return "", nil
}
func (kc *mockHooksKubeClient) Delete(ns string, r io.Reader) error {
manifest, err := kc.makeManifest(r)
if err != nil {
return err
}
delete(kc.Resources, manifest.Metadata.Name)
return nil
}
func (kc *mockHooksKubeClient) WatchUntilReady(ns string, r io.Reader, timeout int64, shouldWait bool) error {
paramManifest, err := kc.makeManifest(r)
if err != nil {
return err
}
manifest, hasManifest := kc.Resources[paramManifest.Metadata.Name]
if !hasManifest {
return fmt.Errorf("mockHooksKubeClient.WatchUntilReady: no such resource %s found", paramManifest.Metadata.Name)
}
if manifest.Metadata.Annotations["mockHooksKubeClient/Emulate"] == "hook-failed" {
return fmt.Errorf("mockHooksKubeClient.WatchUntilReady: hook-failed")
}
return nil
}
func (kc *mockHooksKubeClient) Update(ns string, currentReader, modifiedReader io.Reader, force bool, recreate bool, timeout int64, shouldWait bool) error {
return nil
}
func (kc *mockHooksKubeClient) Build(ns string, reader io.Reader) (kube.Result, error) {
return []*resource.Info{}, nil
}
func (kc *mockHooksKubeClient) BuildUnstructured(ns string, reader io.Reader) (kube.Result, error) {
return []*resource.Info{}, nil
}
func (kc *mockHooksKubeClient) WaitAndGetCompletedPodPhase(namespace string, reader io.Reader, timeout time.Duration) (core.PodPhase, error) {
return core.PodUnknown, nil
}
func deletePolicyStub(kubeClient *mockHooksKubeClient) *ReleaseServer {
e := environment.New()
e.Releases = storage.Init(driver.NewMemory())
e.KubeClient = kubeClient
clientset := fake.NewSimpleClientset()
return &ReleaseServer{
ReleaseModule: &LocalReleaseModule{
clientset: clientset,
},
env: e,
clientset: clientset,
Log: func(_ string, _ ...interface{}) {},
}
}
func deletePolicyHookStub(hookName string, extraAnnotations map[string]string, DeletePolicies []release.Hook_DeletePolicy) *release.Hook {
extraAnnotationsStr := ""
for k, v := range extraAnnotations {
extraAnnotationsStr += fmt.Sprintf(" \"%s\": \"%s\"\n", k, v)
}
return &release.Hook{
Name: hookName,
Kind: "Job",
Path: hookName,
Manifest: fmt.Sprintf(`kind: Job
metadata:
name: %s
annotations:
"helm.sh/hook": pre-install,pre-upgrade
%sdata:
name: value`, hookName, extraAnnotationsStr),
Events: []release.Hook_Event{
release.Hook_PRE_INSTALL,
release.Hook_PRE_UPGRADE,
},
DeletePolicies: DeletePolicies,
}
}
func execHookShouldSucceed(rs *ReleaseServer, hook *release.Hook, releaseName string, namespace string, hookType string) error {
err := rs.execHook([]*release.Hook{hook}, releaseName, namespace, hookType, 600)
if err != nil {
return fmt.Errorf("expected hook %s to be successful: %s", hook.Name, err)
}
return nil
}
func execHookShouldFail(rs *ReleaseServer, hook *release.Hook, releaseName string, namespace string, hookType string) error {
err := rs.execHook([]*release.Hook{hook}, releaseName, namespace, hookType, 600)
if err == nil {
return fmt.Errorf("expected hook %s to be failed", hook.Name)
}
return nil
}
func execHookShouldFailWithError(rs *ReleaseServer, hook *release.Hook, releaseName string, namespace string, hookType string, expectedError error) error {
err := rs.execHook([]*release.Hook{hook}, releaseName, namespace, hookType, 600)
if err != expectedError {
return fmt.Errorf("expected hook %s to fail with error %v, got %v", hook.Name, expectedError, err)
}
return nil
}
type deletePolicyContext struct {
ReleaseServer *ReleaseServer
ReleaseName string
Namespace string
HookName string
KubeClient *mockHooksKubeClient
}
func newDeletePolicyContext() *deletePolicyContext {
kubeClient := &mockHooksKubeClient{
Resources: make(map[string]*mockHooksManifest),
}
return &deletePolicyContext{
KubeClient: kubeClient,
ReleaseServer: deletePolicyStub(kubeClient),
ReleaseName: "flying-carp",
Namespace: "river",
HookName: "migration-job",
}
}
func TestSuccessfulHookWithoutDeletePolicy(t *testing.T) {
ctx := newDeletePolicyContext()
hook := deletePolicyHookStub(ctx.HookName, nil, nil)
err := execHookShouldSucceed(ctx.ReleaseServer, hook, ctx.ReleaseName, ctx.Namespace, hooks.PreInstall)
if err != nil {
t.Error(err)
}
if _, hasResource := ctx.KubeClient.Resources[hook.Name]; !hasResource {
t.Errorf("expected resource %s to be created by kube client", hook.Name)
}
}
func TestFailedHookWithoutDeletePolicy(t *testing.T) {
ctx := newDeletePolicyContext()
hook := deletePolicyHookStub(ctx.HookName,
map[string]string{"mockHooksKubeClient/Emulate": "hook-failed"},
nil,
)
err := execHookShouldFail(ctx.ReleaseServer, hook, ctx.ReleaseName, ctx.Namespace, hooks.PreInstall)
if err != nil {
t.Error(err)
}
if _, hasResource := ctx.KubeClient.Resources[hook.Name]; !hasResource {
t.Errorf("expected resource %s to be created by kube client", hook.Name)
}
}
func TestSuccessfulHookWithSucceededDeletePolicy(t *testing.T) {
ctx := newDeletePolicyContext()
hook := deletePolicyHookStub(ctx.HookName,
map[string]string{"helm.sh/hook-delete-policy": "hook-succeeded"},
[]release.Hook_DeletePolicy{release.Hook_SUCCEEDED},
)
err := execHookShouldSucceed(ctx.ReleaseServer, hook, ctx.ReleaseName, ctx.Namespace, hooks.PreInstall)
if err != nil {
t.Error(err)
}
if _, hasResource := ctx.KubeClient.Resources[hook.Name]; hasResource {
t.Errorf("expected resource %s to be unexisting after hook succeeded", hook.Name)
}
}
func TestSuccessfulHookWithFailedDeletePolicy(t *testing.T) {
ctx := newDeletePolicyContext()
hook := deletePolicyHookStub(ctx.HookName,
map[string]string{"helm.sh/hook-delete-policy": "hook-failed"},
[]release.Hook_DeletePolicy{release.Hook_FAILED},
)
err := execHookShouldSucceed(ctx.ReleaseServer, hook, ctx.ReleaseName, ctx.Namespace, hooks.PreInstall)
if err != nil {
t.Error(err)
}
if _, hasResource := ctx.KubeClient.Resources[hook.Name]; !hasResource {
t.Errorf("expected resource %s to be existing after hook succeeded", hook.Name)
}
}
func TestFailedHookWithSucceededDeletePolicy(t *testing.T) {
ctx := newDeletePolicyContext()
hook := deletePolicyHookStub(ctx.HookName,
map[string]string{
"mockHooksKubeClient/Emulate": "hook-failed",
"helm.sh/hook-delete-policy": "hook-succeeded",
},
[]release.Hook_DeletePolicy{release.Hook_SUCCEEDED},
)
err := execHookShouldFail(ctx.ReleaseServer, hook, ctx.ReleaseName, ctx.Namespace, hooks.PreInstall)
if err != nil {
t.Error(err)
}
if _, hasResource := ctx.KubeClient.Resources[hook.Name]; !hasResource {
t.Errorf("expected resource %s to be existing after hook failed", hook.Name)
}
}
func TestFailedHookWithFailedDeletePolicy(t *testing.T) {
ctx := newDeletePolicyContext()
hook := deletePolicyHookStub(ctx.HookName,
map[string]string{
"mockHooksKubeClient/Emulate": "hook-failed",
"helm.sh/hook-delete-policy": "hook-failed",
},
[]release.Hook_DeletePolicy{release.Hook_FAILED},
)
err := execHookShouldFail(ctx.ReleaseServer, hook, ctx.ReleaseName, ctx.Namespace, hooks.PreInstall)
if err != nil {
t.Error(err)
}
if _, hasResource := ctx.KubeClient.Resources[hook.Name]; hasResource {
t.Errorf("expected resource %s to be unexisting after hook failed", hook.Name)
}
}
func TestSuccessfulHookWithSuccededOrFailedDeletePolicy(t *testing.T) {
ctx := newDeletePolicyContext()
hook := deletePolicyHookStub(ctx.HookName,
map[string]string{
"helm.sh/hook-delete-policy": "hook-succeeded,hook-failed",
},
[]release.Hook_DeletePolicy{release.Hook_SUCCEEDED, release.Hook_FAILED},
)
err := execHookShouldSucceed(ctx.ReleaseServer, hook, ctx.ReleaseName, ctx.Namespace, hooks.PreInstall)
if err != nil {
t.Error(err)
}
if _, hasResource := ctx.KubeClient.Resources[hook.Name]; hasResource {
t.Errorf("expected resource %s to be unexisting after hook succeeded", hook.Name)
}
}
func TestFailedHookWithSuccededOrFailedDeletePolicy(t *testing.T) {
ctx := newDeletePolicyContext()
hook := deletePolicyHookStub(ctx.HookName,
map[string]string{
"mockHooksKubeClient/Emulate": "hook-failed",
"helm.sh/hook-delete-policy": "hook-succeeded,hook-failed",
},
[]release.Hook_DeletePolicy{release.Hook_SUCCEEDED, release.Hook_FAILED},
)
err := execHookShouldFail(ctx.ReleaseServer, hook, ctx.ReleaseName, ctx.Namespace, hooks.PreInstall)
if err != nil {
t.Error(err)
}
if _, hasResource := ctx.KubeClient.Resources[hook.Name]; hasResource {
t.Errorf("expected resource %s to be unexisting after hook failed", hook.Name)
}
}
func TestHookAlreadyExists(t *testing.T) {
ctx := newDeletePolicyContext()
hook := deletePolicyHookStub(ctx.HookName, nil, nil)
err := execHookShouldSucceed(ctx.ReleaseServer, hook, ctx.ReleaseName, ctx.Namespace, hooks.PreInstall)
if err != nil {
t.Error(err)
}
if _, hasResource := ctx.KubeClient.Resources[hook.Name]; !hasResource {
t.Errorf("expected resource %s to be existing after hook succeeded", hook.Name)
}
err = execHookShouldFailWithError(ctx.ReleaseServer, hook, ctx.ReleaseName, ctx.Namespace, hooks.PreUpgrade, errResourceExists)
if err != nil {
t.Error(err)
}
if _, hasResource := ctx.KubeClient.Resources[hook.Name]; !hasResource {
t.Errorf("expected resource %s to be existing after already exists error", hook.Name)
}
}
func TestHookDeletingWithBeforeHookCreationDeletePolicy(t *testing.T) {
ctx := newDeletePolicyContext()
hook := deletePolicyHookStub(ctx.HookName,
map[string]string{"helm.sh/hook-delete-policy": "before-hook-creation"},
[]release.Hook_DeletePolicy{release.Hook_BEFORE_HOOK_CREATION},
)
err := execHookShouldSucceed(ctx.ReleaseServer, hook, ctx.ReleaseName, ctx.Namespace, hooks.PreInstall)
if err != nil {
t.Error(err)
}
if _, hasResource := ctx.KubeClient.Resources[hook.Name]; !hasResource {
t.Errorf("expected resource %s to be existing after hook succeeded", hook.Name)
}
err = execHookShouldSucceed(ctx.ReleaseServer, hook, ctx.ReleaseName, ctx.Namespace, hooks.PreUpgrade)
if err != nil {
t.Error(err)
}
if _, hasResource := ctx.KubeClient.Resources[hook.Name]; !hasResource {
t.Errorf("expected resource %s to be existing after hook succeeded", hook.Name)
}
}
func TestSuccessfulHookWithMixedDeletePolicies(t *testing.T) {
ctx := newDeletePolicyContext()
hook := deletePolicyHookStub(ctx.HookName,
map[string]string{
"helm.sh/hook-delete-policy": "hook-succeeded,before-hook-creation",
},
[]release.Hook_DeletePolicy{release.Hook_SUCCEEDED, release.Hook_BEFORE_HOOK_CREATION},
)
err := execHookShouldSucceed(ctx.ReleaseServer, hook, ctx.ReleaseName, ctx.Namespace, hooks.PreInstall)
if err != nil {
t.Error(err)
}
if _, hasResource := ctx.KubeClient.Resources[hook.Name]; hasResource {
t.Errorf("expected resource %s to be unexisting after hook succeeded", hook.Name)
}
err = execHookShouldSucceed(ctx.ReleaseServer, hook, ctx.ReleaseName, ctx.Namespace, hooks.PreUpgrade)
if err != nil {
t.Error(err)
}
if _, hasResource := ctx.KubeClient.Resources[hook.Name]; hasResource {
t.Errorf("expected resource %s to be unexisting after hook succeeded", hook.Name)
}
}
func TestFailedHookWithMixedDeletePolicies(t *testing.T) {
ctx := newDeletePolicyContext()
hook := deletePolicyHookStub(ctx.HookName,
map[string]string{
"mockHooksKubeClient/Emulate": "hook-failed",
"helm.sh/hook-delete-policy": "hook-succeeded,before-hook-creation",
},
[]release.Hook_DeletePolicy{release.Hook_SUCCEEDED, release.Hook_BEFORE_HOOK_CREATION},
)
err := execHookShouldFail(ctx.ReleaseServer, hook, ctx.ReleaseName, ctx.Namespace, hooks.PreInstall)
if err != nil {
t.Error(err)
}
if _, hasResource := ctx.KubeClient.Resources[hook.Name]; !hasResource {
t.Errorf("expected resource %s to be existing after hook failed", hook.Name)
}
err = execHookShouldFail(ctx.ReleaseServer, hook, ctx.ReleaseName, ctx.Namespace, hooks.PreUpgrade)
if err != nil {
t.Error(err)
}
if _, hasResource := ctx.KubeClient.Resources[hook.Name]; !hasResource {
t.Errorf("expected resource %s to be existing after hook failed", hook.Name)
}
}
func TestFailedThenSuccessfulHookWithMixedDeletePolicies(t *testing.T) {
ctx := newDeletePolicyContext()
hook := deletePolicyHookStub(ctx.HookName,
map[string]string{
"mockHooksKubeClient/Emulate": "hook-failed",
"helm.sh/hook-delete-policy": "hook-succeeded,before-hook-creation",
},
[]release.Hook_DeletePolicy{release.Hook_SUCCEEDED, release.Hook_BEFORE_HOOK_CREATION},
)
err := execHookShouldFail(ctx.ReleaseServer, hook, ctx.ReleaseName, ctx.Namespace, hooks.PreInstall)
if err != nil {
t.Error(err)
}
if _, hasResource := ctx.KubeClient.Resources[hook.Name]; !hasResource {
t.Errorf("expected resource %s to be existing after hook failed", hook.Name)
}
hook = deletePolicyHookStub(ctx.HookName,
map[string]string{
"helm.sh/hook-delete-policy": "hook-succeeded,before-hook-creation",
},
[]release.Hook_DeletePolicy{release.Hook_SUCCEEDED, release.Hook_BEFORE_HOOK_CREATION},
)
err = execHookShouldSucceed(ctx.ReleaseServer, hook, ctx.ReleaseName, ctx.Namespace, hooks.PreUpgrade)
if err != nil {
t.Error(err)
}
if _, hasResource := ctx.KubeClient.Resources[hook.Name]; hasResource {
t.Errorf("expected resource %s to be unexisting after hook succeeded", hook.Name)
}
}

@ -80,7 +80,7 @@ func (s *ReleaseServer) prepareUpdate(req *services.UpdateReleaseRequest) (*rele
return nil, nil, err return nil, nil, err
} }
// If new values were not supplied in the upgrade, re-use the existing values. // determine if values will be reused
if err := s.reuseValues(req, currentRelease); err != nil { if err := s.reuseValues(req, currentRelease); err != nil {
return nil, nil, err return nil, nil, err
} }

@ -17,6 +17,7 @@ limitations under the License.
package tiller package tiller
import ( import (
"fmt"
"strings" "strings"
"testing" "testing"
@ -128,6 +129,107 @@ func TestUpdateRelease_ResetValues(t *testing.T) {
} }
} }
// This is a regression test for bug found in issue #3655
func TestUpdateRelease_ComplexReuseValues(t *testing.T) {
c := helm.NewContext()
rs := rsFixture()
installReq := &services.InstallReleaseRequest{
Namespace: "spaced",
Chart: &chart.Chart{
Metadata: &chart.Metadata{Name: "hello"},
Templates: []*chart.Template{
{Name: "templates/hello", Data: []byte("hello: world")},
{Name: "templates/hooks", Data: []byte(manifestWithHook)},
},
Values: &chart.Config{Raw: "defaultFoo: defaultBar"},
},
Values: &chart.Config{Raw: "foo: bar"},
}
fmt.Println("Running Install release with foo: bar override")
installResp, err := rs.InstallRelease(c, installReq)
if err != nil {
t.Fatal(err)
}
rel := installResp.Release
req := &services.UpdateReleaseRequest{
Name: rel.Name,
Chart: &chart.Chart{
Metadata: &chart.Metadata{Name: "hello"},
Templates: []*chart.Template{
{Name: "templates/hello", Data: []byte("hello: world")},
{Name: "templates/hooks", Data: []byte(manifestWithUpgradeHooks)},
},
Values: &chart.Config{Raw: "defaultFoo: defaultBar"},
},
}
fmt.Println("Running Update release with no overrides and no reuse-values flag")
res, err := rs.UpdateRelease(c, req)
if err != nil {
t.Fatalf("Failed updated: %s", err)
}
expect := "foo: bar"
if res.Release.Config != nil && res.Release.Config.Raw != expect {
t.Errorf("Expected chart values to be %q, got %q", expect, res.Release.Config.Raw)
}
rel = res.Release
req = &services.UpdateReleaseRequest{
Name: rel.Name,
Chart: &chart.Chart{
Metadata: &chart.Metadata{Name: "hello"},
Templates: []*chart.Template{
{Name: "templates/hello", Data: []byte("hello: world")},
{Name: "templates/hooks", Data: []byte(manifestWithUpgradeHooks)},
},
Values: &chart.Config{Raw: "defaultFoo: defaultBar"},
},
Values: &chart.Config{Raw: "foo2: bar2"},
ReuseValues: true,
}
fmt.Println("Running Update release with foo2: bar2 override and reuse-values")
res, err = rs.UpdateRelease(c, req)
if err != nil {
t.Fatalf("Failed updated: %s", err)
}
// This should have the newly-passed overrides.
expect = "foo: bar\nfoo2: bar2\n"
if res.Release.Config != nil && res.Release.Config.Raw != expect {
t.Errorf("Expected request config to be %q, got %q", expect, res.Release.Config.Raw)
}
rel = res.Release
req = &services.UpdateReleaseRequest{
Name: rel.Name,
Chart: &chart.Chart{
Metadata: &chart.Metadata{Name: "hello"},
Templates: []*chart.Template{
{Name: "templates/hello", Data: []byte("hello: world")},
{Name: "templates/hooks", Data: []byte(manifestWithUpgradeHooks)},
},
Values: &chart.Config{Raw: "defaultFoo: defaultBar"},
},
Values: &chart.Config{Raw: "foo: baz"},
ReuseValues: true,
}
fmt.Println("Running Update release with foo=baz override with reuse-values flag")
res, err = rs.UpdateRelease(c, req)
if err != nil {
t.Fatalf("Failed updated: %s", err)
}
expect = "foo: baz\nfoo2: bar2\n"
if res.Release.Config != nil && res.Release.Config.Raw != expect {
t.Errorf("Expected chart values to be %q, got %q", expect, res.Release.Config.Raw)
}
}
func TestUpdateRelease_ReuseValues(t *testing.T) { func TestUpdateRelease_ReuseValues(t *testing.T) {
c := helm.NewContext() c := helm.NewContext()
rs := rsFixture() rs := rsFixture()
@ -157,8 +259,8 @@ func TestUpdateRelease_ReuseValues(t *testing.T) {
if res.Release.Chart.Values != nil && res.Release.Chart.Values.Raw != expect { if res.Release.Chart.Values != nil && res.Release.Chart.Values.Raw != expect {
t.Errorf("Expected chart values to be %q, got %q", expect, res.Release.Chart.Values.Raw) t.Errorf("Expected chart values to be %q, got %q", expect, res.Release.Chart.Values.Raw)
} }
// This should have the newly-passed overrides. // This should have the newly-passed overrides and any other computed values. `name: value` comes from release Config via releaseStub()
expect = "name2: val2" expect = "name: value\nname2: val2\n"
if res.Release.Config != nil && res.Release.Config.Raw != expect { if res.Release.Config != nil && res.Release.Config.Raw != expect {
t.Errorf("Expected request config to be %q, got %q", expect, res.Release.Config.Raw) t.Errorf("Expected request config to be %q, got %q", expect, res.Release.Config.Raw)
} }

@ -21,17 +21,20 @@ import (
"crypto/x509" "crypto/x509"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"k8s.io/helm/pkg/urlutil"
) )
// NewClientTLS returns tls.Config appropriate for client auth. func newTLSConfigCommon(certFile, keyFile, caFile string) (*tls.Config, error) {
func NewClientTLS(certFile, keyFile, caFile string) (*tls.Config, error) { config := tls.Config{}
if certFile != "" && keyFile != "" {
cert, err := CertFromFilePair(certFile, keyFile) cert, err := CertFromFilePair(certFile, keyFile)
if err != nil { if err != nil {
return nil, err return nil, err
} }
config := tls.Config{ config.Certificates = []tls.Certificate{*cert}
Certificates: []tls.Certificate{*cert},
} }
if caFile != "" { if caFile != "" {
cp, err := CertPoolFromFile(caFile) cp, err := CertPoolFromFile(caFile)
if err != nil { if err != nil {
@ -39,9 +42,32 @@ func NewClientTLS(certFile, keyFile, caFile string) (*tls.Config, error) {
} }
config.RootCAs = cp config.RootCAs = cp
} }
return &config, nil return &config, nil
} }
// NewClientTLS returns tls.Config appropriate for client auth.
func NewClientTLS(certFile, keyFile, caFile string) (*tls.Config, error) {
return newTLSConfigCommon(certFile, keyFile, caFile)
}
// NewTLSConfig returns tls.Config appropriate for client and/or server auth.
func NewTLSConfig(url, certFile, keyFile, caFile string) (*tls.Config, error) {
config, err := newTLSConfigCommon(certFile, keyFile, caFile)
if err != nil {
return nil, err
}
config.BuildNameToCertificate()
serverName, err := urlutil.ExtractHostname(url)
if err != nil {
return nil, err
}
config.ServerName = serverName
return config, nil
}
// CertPoolFromFile returns an x509.CertPool containing the certificates // CertPoolFromFile returns an x509.CertPool containing the certificates
// in the given PEM-encoded file. // in the given PEM-encoded file.
// Returns an error if the file could not be read, a certificate could not // Returns an error if the file could not be read, a certificate could not

@ -80,7 +80,7 @@ checkDesiredVersion() {
if type "curl" > /dev/null; then if type "curl" > /dev/null; then
TAG=$(curl -SsL $release_url | awk '/\/tag\//' | grep -v no-underline | cut -d '"' -f 2 | awk '{n=split($NF,a,"/");print a[n]}' | awk 'a !~ $0{print}; {a=$0}') TAG=$(curl -SsL $release_url | awk '/\/tag\//' | grep -v no-underline | cut -d '"' -f 2 | awk '{n=split($NF,a,"/");print a[n]}' | awk 'a !~ $0{print}; {a=$0}')
elif type "wget" > /dev/null; then elif type "wget" > /dev/null; then
TAG=$(wget -q -O - $release_url | awk '/\/tag\//' | cut -d '"' -f 2 | awk '{n=split($NF,a,"/");print a[n]}' | awk 'a !~ $0{print}; {a=$0}') TAG=$(wget -q -O - $release_url | awk '/\/tag\//' | grep -v no-underline | cut -d '"' -f 2 | awk '{n=split($NF,a,"/");print a[n]}' | awk 'a !~ $0{print}; {a=$0}')
fi fi
if [ "x$TAG" == "x" ]; then if [ "x$TAG" == "x" ]; then
echo "Cannot determine ${DESIRED_VERSION} tag." echo "Cannot determine ${DESIRED_VERSION} tag."
@ -184,7 +184,9 @@ help () {
# cleanup temporary files to avoid https://github.com/kubernetes/helm/issues/2977 # cleanup temporary files to avoid https://github.com/kubernetes/helm/issues/2977
cleanup() { cleanup() {
if [[ -d "${HELM_TMP_ROOT:-}" ]]; then
rm -rf "$HELM_TMP_ROOT" rm -rf "$HELM_TMP_ROOT"
fi
} }
# Execution # Execution

Loading…
Cancel
Save