Merge branch 'master' into helm-ls-modification

pull/3728/head
Derek Bassett 8 years ago committed by GitHub
commit ae5d13c5e1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -0,0 +1,9 @@
<!-- If you need help or think you have found a bug, please help us with your issue by entering the following information (otherwise you can delete this text): -->
Output of `helm version`:
Output of `kubectl version`:
Cloud Provider/Platform (AKS, GKE, Minikube etc.):

@ -120,7 +120,6 @@ coverage:
HAS_GLIDE := $(shell command -v glide;)
HAS_GOX := $(shell command -v gox;)
HAS_GIT := $(shell command -v git;)
HAS_HG := $(shell command -v hg;)
.PHONY: bootstrap
bootstrap:
@ -133,9 +132,6 @@ endif
ifndef HAS_GIT
$(error You must install Git)
endif
ifndef HAS_HG
$(error You must install Mercurial)
endif
glide install --strip-vendor
go build -o bin/protoc-gen-go ./vendor/github.com/golang/protobuf/protoc-gen-go

@ -40,7 +40,11 @@ Binary downloads of the Helm client can be found at the following links:
- [Windows](https://kubernetes-helm.storage.googleapis.com/helm-v2.8.2-windows-amd64.tar.gz)
Unpack the `helm` binary and add it to your PATH and you are good to go!
macOS/[homebrew](https://brew.sh/) users can also use `brew install kubernetes-helm`.
If you want to use a package manager:
- macOS/[homebrew](https://brew.sh/) users can use `brew install kubernetes-helm`.
- Windows/[chocolatey](https://chocolatey.org/) users can use `choco install kubernetes-helm`.
To rapidly get Helm up and running, start with the [Quick Start Guide](https://docs.helm.sh/using_helm/#quickstart-guide).

@ -38,6 +38,7 @@ message Hook {
enum DeletePolicy {
SUCCEEDED = 0;
FAILED = 1;
BEFORE_HOOK_CREATION = 2;
}
string name = 1;
// Kind is the Kubernetes kind.

@ -46,7 +46,7 @@ func TestCreateCmd(t *testing.T) {
defer os.Chdir(pwd)
// Run a create
cmd := newCreateCmd(os.Stdout)
cmd := newCreateCmd(ioutil.Discard)
if err := cmd.RunE(cmd, []string{cname}); err != nil {
t.Errorf("Failed to run create: %s", err)
return
@ -117,7 +117,7 @@ func TestCreateStarterCmd(t *testing.T) {
defer os.Chdir(pwd)
// Run a create
cmd := newCreateCmd(os.Stdout)
cmd := newCreateCmd(ioutil.Discard)
cmd.ParseFlags([]string{"--starter", "starterchart"})
if err := cmd.RunE(cmd, []string{cname}); err != nil {
t.Errorf("Failed to run create: %s", err)

@ -55,9 +55,7 @@ func newGetManifestCmd(client helm.Interface, out io.Writer) *cobra.Command {
return errReleaseRequired
}
get.release = args[0]
if get.client == nil {
get.client = helm.NewClient(helm.Host(settings.TillerHost))
}
get.client = ensureHelmClient(get.client)
return get.run()
},
}

@ -30,6 +30,8 @@ import (
"k8s.io/client-go/rest"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
// Import to initialize client auth plugins.
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/helm/pkg/helm"
helm_env "k8s.io/helm/pkg/helm/environment"
"k8s.io/helm/pkg/helm/portforwarder"
@ -214,7 +216,7 @@ func prettyError(err error) error {
}
// If it's grpc's error, make it more user-friendly.
if s, ok := status.FromError(err); ok {
return s.Err()
return fmt.Errorf(s.Message())
}
// Else return the original error.
return err

@ -43,7 +43,10 @@ func runReleaseCases(t *testing.T, tests []releaseCase, rcmd releaseCmd) {
var buf bytes.Buffer
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := &helm.FakeClient{Rels: tt.rels}
c := &helm.FakeClient{
Rels: tt.rels,
Responses: tt.responses,
}
cmd := rcmd(c, &buf)
cmd.ParseFlags(tt.flags)
err := cmd.RunE(cmd, tt.args)
@ -69,7 +72,8 @@ type releaseCase struct {
err bool
resp *release.Release
// Rels are the available releases at the start of the test.
rels []*release.Release
rels []*release.Release
responses map[string]release.TestRun_Status
}
// tempHelmHome sets up a Helm Home in a temp dir.

@ -313,11 +313,6 @@ func mergeValues(dest map[string]interface{}, src map[string]interface{}) map[st
dest[k] = v
continue
}
// If the key doesn't exist already, then just set the key to that value
if _, exists := dest[k]; !exists {
dest[k] = nextMap
continue
}
// Edge case: If the key exists in the destination, but isn't a map
destMap, isMap := dest[k].(map[string]interface{})
// If the source map has a map for this key, prefer it
@ -463,7 +458,7 @@ func locateChartPath(repoURL, username, password, name, version string, verify b
return filename, err
}
return filename, fmt.Errorf("failed to download %q", name)
return filename, fmt.Errorf("failed to download %q (hint: running `helm repo update` may help)", name)
}
func generateName(nameTemplate string) (string, error) {

@ -176,6 +176,7 @@ func generateDeployment(opts *Options) (*v1beta1.Deployment, error) {
return nil, err
}
}
automountServiceAccountToken := opts.ServiceAccount != ""
d := &v1beta1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Namespace: opts.Namespace,
@ -189,7 +190,8 @@ func generateDeployment(opts *Options) (*v1beta1.Deployment, error) {
Labels: labels,
},
Spec: v1.PodSpec{
ServiceAccountName: opts.ServiceAccount,
ServiceAccountName: opts.ServiceAccount,
AutomountServiceAccountToken: &automountServiceAccountToken,
Containers: []v1.Container{
{
Name: "tiller",

@ -96,6 +96,9 @@ func TestDeploymentManifestForServiceAccount(t *testing.T) {
if got := d.Spec.Template.Spec.ServiceAccountName; got != tt.serviceAccount {
t.Errorf("%s: expected service account value %q, got %q", tt.name, tt.serviceAccount, got)
}
if got := *d.Spec.Template.Spec.AutomountServiceAccountToken; got != (tt.serviceAccount != "") {
t.Errorf("%s: unexpected automountServiceAccountToken = %t for serviceAccount %q", tt.name, got, tt.serviceAccount)
}
}
}

@ -101,6 +101,9 @@ func (l *lintCmd) run() error {
if linter, err := lintChart(path, rvals, l.namespace, l.strict); err != nil {
fmt.Println("==> Skipping", path)
fmt.Println(err)
if err == errLintNoChart {
failures = failures + 1
}
} else {
fmt.Println("==> Linting", path)

@ -28,6 +28,7 @@ var (
archivedChartPathWithHyphens = "testdata/testcharts/compressedchart-with-hyphens-0.1.0.tgz"
invalidArchivedChartPath = "testdata/testcharts/invalidcompressedchart0.1.0.tgz"
chartDirPath = "testdata/testcharts/decompressedchart/"
chartMissingManifest = "testdata/testcharts/chart-missing-manifest"
)
func TestLintChart(t *testing.T) {
@ -46,4 +47,8 @@ func TestLintChart(t *testing.T) {
if _, err := lintChart(invalidArchivedChartPath, values, namespace, strict); err == nil {
t.Errorf("Expected a chart parsing error")
}
if _, err := lintChart(chartMissingManifest, values, namespace, strict); err == nil {
t.Errorf("Expected a chart parsing error")
}
}

@ -148,7 +148,7 @@ func (l *listCmd) run() error {
return prettyError(err)
}
if len(res.Releases) == 0 {
if len(res.GetReleases()) == 0 {
return nil
}
@ -239,13 +239,17 @@ func formatList(rels []*release.Release, colWidth uint) string {
table.MaxColWidth = colWidth
table.AddRow("NAME", "REVISION", "UPDATED", "STATUS", "CHART", "APP VERSION", "NAMESPACE")
for _, r := range rels {
c := fmt.Sprintf("%s-%s", r.Chart.Metadata.Name, r.Chart.Metadata.Version)
t := timeconv.String(r.Info.LastDeployed)
s := r.Info.Status.Code.String()
v := r.Version
a := r.Chart.Metadata.AppVersion
n := r.Namespace
table.AddRow(r.Name, v, t, s, c, a, n)
md := r.GetChart().GetMetadata()
c := fmt.Sprintf("%s-%s", md.GetName(), md.GetVersion())
t := "-"
if tspb := r.GetInfo().GetLastDeployed(); tspb != nil {
t = timeconv.String(tspb)
}
s := r.GetInfo().GetStatus().GetCode().String()
v := r.GetVersion()
a := md.GetAppVersion()
n := r.GetNamespace()
table.AddRow(r.GetName(), v, t, s, c, a, n)
}
return table.String()
}

@ -53,6 +53,9 @@ type packageCmd struct {
save bool
sign bool
path string
valueFiles valueFiles
values []string
stringValues []string
key string
keyring string
version string
@ -95,6 +98,9 @@ func newPackageCmd(out io.Writer) *cobra.Command {
}
f := cmd.Flags()
f.VarP(&pkg.valueFiles, "values", "f", "specify values in a YAML file or a URL(can specify multiple)")
f.StringArrayVar(&pkg.values, "set", []string{}, "set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)")
f.StringArrayVar(&pkg.stringValues, "set-string", []string{}, "set STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)")
f.BoolVar(&pkg.save, "save", true, "save packaged chart to local chart repository")
f.BoolVar(&pkg.sign, "sign", false, "use a PGP private key to sign this package")
f.StringVar(&pkg.key, "key", "", "name of the key to use when signing. Used if --sign is true")
@ -133,6 +139,20 @@ func (p *packageCmd) run() error {
return err
}
overrideVals, err := vals(p.valueFiles, p.values, p.stringValues)
if err != nil {
return err
}
combinedVals, err := chartutil.CoalesceValues(ch, &chart.Config{Raw: string(overrideVals)})
if err != nil {
return err
}
newVals, err := combinedVals.YAML()
if err != nil {
return err
}
ch.Values = &chart.Config{Raw: newVals}
// If version is set, modify the version.
if len(p.version) != 0 {
if err := setVersion(ch, p.version); err != nil {

@ -21,6 +21,7 @@ import (
"os"
"path/filepath"
"regexp"
"strings"
"testing"
"github.com/spf13/cobra"
@ -122,6 +123,13 @@ func TestPackage(t *testing.T) {
hasfile: "chart-missing-deps-0.1.0.tgz",
err: true,
},
{
name: "package --values does-not-exist",
args: []string{"testdata/testcharts/alpine"},
flags: map[string]string{"values": "does-not-exist"},
expect: "does-not-exist: no such file or directory",
err: true,
},
}
// Because these tests are destructive, we run them in a tempdir.
@ -245,6 +253,150 @@ func TestSetAppVersion(t *testing.T) {
}
}
func TestPackageValues(t *testing.T) {
testCases := []struct {
desc string
args []string
valuefilesContents []string
flags map[string]string
expected []string
}{
{
desc: "helm package, single values file",
args: []string{"testdata/testcharts/alpine"},
valuefilesContents: []string{"Name: chart-name-foo"},
expected: []string{"Name: chart-name-foo"},
},
{
desc: "helm package, multiple values files",
args: []string{"testdata/testcharts/alpine"},
valuefilesContents: []string{"Name: chart-name-foo", "foo: bar"},
expected: []string{"Name: chart-name-foo", "foo: bar"},
},
{
desc: "helm package, with set option",
args: []string{"testdata/testcharts/alpine"},
flags: map[string]string{"set": "Name=chart-name-foo"},
expected: []string{"Name: chart-name-foo"},
},
{
desc: "helm package, set takes precedence over value file",
args: []string{"testdata/testcharts/alpine"},
valuefilesContents: []string{"Name: chart-name-foo"},
flags: map[string]string{"set": "Name=chart-name-bar"},
expected: []string{"Name: chart-name-bar"},
},
}
thome, err := tempHelmHome(t)
if err != nil {
t.Fatal(err)
}
cleanup := resetEnv()
defer func() {
os.RemoveAll(thome.String())
cleanup()
}()
settings.Home = thome
for _, tc := range testCases {
var files []string
for _, contents := range tc.valuefilesContents {
f, err := createValuesFile(contents)
if err != nil {
t.Errorf("%q unexpected error creating temporary values file: %q", tc.desc, err)
}
defer os.RemoveAll(filepath.Dir(f))
files = append(files, f)
}
valueFiles := strings.Join(files, ",")
expected, err := chartutil.ReadValues([]byte(strings.Join(tc.expected, "\n")))
if err != nil {
t.Errorf("unexpected error parsing values: %q", err)
}
runAndVerifyPackageCommandValues(t, tc.args, tc.flags, valueFiles, expected)
}
}
func runAndVerifyPackageCommandValues(t *testing.T, args []string, flags map[string]string, valueFiles string, expected chartutil.Values) {
outputDir, err := ioutil.TempDir("", "helm-package")
if err != nil {
t.Errorf("unexpected error creating temporary output directory: %q", err)
}
defer os.RemoveAll(outputDir)
if len(flags) == 0 {
flags = make(map[string]string)
}
flags["destination"] = outputDir
if len(valueFiles) > 0 {
flags["values"] = valueFiles
}
cmd := newPackageCmd(&bytes.Buffer{})
setFlags(cmd, flags)
err = cmd.RunE(cmd, args)
if err != nil {
t.Errorf("unexpected error: %q", err)
}
outputFile := filepath.Join(outputDir, "alpine-0.1.0.tgz")
verifyOutputChartExists(t, outputFile)
var actual chartutil.Values
actual, err = getChartValues(outputFile)
if err != nil {
t.Errorf("unexpected error extracting chart values: %q", err)
}
verifyValues(t, actual, expected)
}
func createValuesFile(data string) (string, error) {
outputDir, err := ioutil.TempDir("", "values-file")
if err != nil {
return "", err
}
outputFile := filepath.Join(outputDir, "values.yaml")
if err = ioutil.WriteFile(outputFile, []byte(data), 0755); err != nil {
os.RemoveAll(outputFile)
return "", err
}
return outputFile, nil
}
func getChartValues(chartPath string) (chartutil.Values, error) {
chart, err := chartutil.Load(chartPath)
if err != nil {
return nil, err
}
return chartutil.ReadValues([]byte(chart.Values.Raw))
}
func verifyValues(t *testing.T, actual, expected chartutil.Values) {
for key, value := range expected.AsMap() {
if got := actual[key]; got != value {
t.Errorf("Expected %q, got %q (%v)", value, got, actual)
}
}
}
func verifyOutputChartExists(t *testing.T, chartPath string) {
if chartFile, err := os.Stat(chartPath); err != nil {
t.Errorf("expected file %q, got err %q", chartPath, err)
} else if chartFile.Size() == 0 {
t.Errorf("file %q has zero bytes.", chartPath)
}
}
func setFlags(cmd *cobra.Command, flags map[string]string) {
dest := cmd.Flags()
for f, v := range flags {

@ -17,55 +17,50 @@ limitations under the License.
package main
import (
"bytes"
"io"
"testing"
"github.com/spf13/cobra"
"k8s.io/helm/pkg/helm"
"k8s.io/helm/pkg/proto/hapi/release"
)
func TestReleaseTesting(t *testing.T) {
tests := []struct {
name string
args []string
flags []string
responses map[string]release.TestRun_Status
fail bool
}{
tests := []releaseCase{
{
name: "basic test",
args: []string{"example-release"},
flags: []string{},
responses: map[string]release.TestRun_Status{"PASSED: green lights everywhere": release.TestRun_SUCCESS},
fail: false,
err: false,
},
{
name: "test failure",
args: []string{"example-fail"},
flags: []string{},
responses: map[string]release.TestRun_Status{"FAILURE: red lights everywhere": release.TestRun_FAILURE},
fail: true,
err: true,
},
{
name: "test unknown",
args: []string{"example-unknown"},
flags: []string{},
responses: map[string]release.TestRun_Status{"UNKNOWN: yellow lights everywhere": release.TestRun_UNKNOWN},
fail: false,
err: false,
},
{
name: "test error",
args: []string{"example-error"},
flags: []string{},
responses: map[string]release.TestRun_Status{"ERROR: yellow lights everywhere": release.TestRun_FAILURE},
fail: true,
err: true,
},
{
name: "test running",
args: []string{"example-running"},
flags: []string{},
responses: map[string]release.TestRun_Status{"RUNNING: things are happpeningggg": release.TestRun_RUNNING},
fail: false,
err: false,
},
{
name: "multiple tests example",
@ -78,29 +73,11 @@ func TestReleaseTesting(t *testing.T) {
"FAILURE: good thing u checked :)": release.TestRun_FAILURE,
"RUNNING: things are happpeningggg yet again": release.TestRun_RUNNING,
"PASSED: feel free to party again": release.TestRun_SUCCESS},
fail: true,
err: true,
},
}
for _, tt := range tests {
c := &helm.FakeClient{Responses: tt.responses}
buf := bytes.NewBuffer(nil)
cmd := newReleaseTestCmd(c, buf)
cmd.ParseFlags(tt.flags)
err := cmd.RunE(cmd, tt.args)
if err == nil && tt.fail {
t.Errorf("%q did not fail but should have failed", tt.name)
}
if err != nil {
if tt.fail {
continue
} else {
t.Errorf("%q reported error: %s", tt.name, err)
}
}
}
runReleaseCases(t, tests, func(c *helm.FakeClient, out io.Writer) *cobra.Command {
return newReleaseTestCmd(c, out)
})
}

@ -19,6 +19,7 @@ package main
import (
"fmt"
"io"
"os"
"path/filepath"
"github.com/spf13/cobra"
@ -86,9 +87,16 @@ func index(dir, url, mergeTo string) error {
return err
}
if mergeTo != "" {
i2, err := repo.LoadIndexFile(mergeTo)
if err != nil {
return fmt.Errorf("Merge failed: %s", err)
// if index.yaml is missing then create an empty one to merge into
var i2 *repo.IndexFile
if _, err := os.Stat(mergeTo); os.IsNotExist(err) {
i2 = repo.NewIndexFile()
i2.WriteFile(mergeTo, 0755)
} else {
i2, err = repo.LoadIndexFile(mergeTo)
if err != nil {
return fmt.Errorf("Merge failed: %s", err)
}
}
i.Merge(i2)
}

@ -112,6 +112,36 @@ func TestRepoIndexCmd(t *testing.T) {
if vs[0].Version != expectedVersion {
t.Errorf("expected %q, got %q", expectedVersion, vs[0].Version)
}
// test that index.yaml gets generated on merge even when it doesn't exist
if err := os.Remove(destIndex); err != nil {
t.Fatal(err)
}
c.ParseFlags([]string{"--merge", destIndex})
if err := c.RunE(c, []string{dir}); err != nil {
t.Error(err)
}
_, err = repo.LoadIndexFile(destIndex)
if err != nil {
t.Fatal(err)
}
// verify it didn't create an empty index.yaml and the merged happened
if len(index.Entries) != 2 {
t.Errorf("expected 2 entries, got %d: %#v", len(index.Entries), index.Entries)
}
vs = index.Entries["compressedchart"]
if len(vs) != 3 {
t.Errorf("expected 3 versions, got %d: %#v", len(vs), vs)
}
expectedVersion = "0.3.0"
if vs[0].Version != expectedVersion {
t.Errorf("expected %q, got %q", expectedVersion, vs[0].Version)
}
}
func linkOrCopy(old, new string) error {

@ -31,40 +31,58 @@ import (
"k8s.io/helm/pkg/proto/hapi/release"
)
type resetCase struct {
name string
err bool
resp []*release.Release
removeHelmHome bool
force bool
expectedActions int
expectedOutput string
}
func TestResetCmd(t *testing.T) {
home, err := ioutil.TempDir("", "helm_home")
if err != nil {
t.Fatal(err)
}
defer os.Remove(home)
var buf bytes.Buffer
c := &helm.FakeClient{}
fc := fake.NewSimpleClientset()
cmd := &resetCmd{
out: &buf,
home: helmpath.Home(home),
client: c,
kubeClient: fc,
namespace: core.NamespaceDefault,
}
if err := cmd.run(); err != nil {
t.Errorf("unexpected error: %v", err)
}
actions := fc.Actions()
if len(actions) != 3 {
t.Errorf("Expected 3 actions, got %d", len(actions))
}
expected := "Tiller (the Helm server-side component) has been uninstalled from your Kubernetes Cluster."
if !strings.Contains(buf.String(), expected) {
t.Errorf("expected %q, got %q", expected, buf.String())
}
if _, err := os.Stat(home); err != nil {
t.Errorf("Helm home directory %s does not exists", home)
}
verifyResetCmd(t, resetCase{
name: "test reset command",
expectedActions: 3,
expectedOutput: "Tiller (the Helm server-side component) has been uninstalled from your Kubernetes Cluster.",
})
}
func TestResetCmd_removeHelmHome(t *testing.T) {
verifyResetCmd(t, resetCase{
name: "test reset command - remove helm home",
removeHelmHome: true,
expectedActions: 3,
expectedOutput: "Tiller (the Helm server-side component) has been uninstalled from your Kubernetes Cluster.",
})
}
func TestReset_deployedReleases(t *testing.T) {
verifyResetCmd(t, resetCase{
name: "test reset command - deployed releases",
resp: []*release.Release{
helm.ReleaseMock(&helm.MockReleaseOptions{Name: "atlas-guide", StatusCode: release.Status_DEPLOYED}),
},
err: true,
expectedOutput: "there are still 1 deployed releases (Tip: use --force to remove Tiller. Releases will not be deleted.)",
})
}
func TestReset_forceFlag(t *testing.T) {
verifyResetCmd(t, resetCase{
name: "test reset command - force flag",
force: true,
resp: []*release.Release{
helm.ReleaseMock(&helm.MockReleaseOptions{Name: "atlas-guide", StatusCode: release.Status_DEPLOYED}),
},
expectedActions: 3,
expectedOutput: "Tiller (the Helm server-side component) has been uninstalled from your Kubernetes Cluster.",
})
}
func verifyResetCmd(t *testing.T, tc resetCase) {
home, err := ioutil.TempDir("", "helm_home")
if err != nil {
t.Fatal(err)
@ -72,99 +90,42 @@ func TestResetCmd_removeHelmHome(t *testing.T) {
defer os.Remove(home)
var buf bytes.Buffer
c := &helm.FakeClient{}
c := &helm.FakeClient{
Rels: tc.resp,
}
fc := fake.NewSimpleClientset()
cmd := &resetCmd{
removeHelmHome: true,
removeHelmHome: tc.removeHelmHome,
force: tc.force,
out: &buf,
home: helmpath.Home(home),
client: c,
kubeClient: fc,
namespace: core.NamespaceDefault,
}
if err := cmd.run(); err != nil {
t.Errorf("unexpected error: %v", err)
}
actions := fc.Actions()
if len(actions) != 3 {
t.Errorf("Expected 3 actions, got %d", len(actions))
}
expected := "Tiller (the Helm server-side component) has been uninstalled from your Kubernetes Cluster."
if !strings.Contains(buf.String(), expected) {
t.Errorf("expected %q, got %q", expected, buf.String())
}
if _, err := os.Stat(home); err == nil {
t.Errorf("Helm home directory %s already exists", home)
}
}
func TestReset_deployedReleases(t *testing.T) {
home, err := ioutil.TempDir("", "helm_home")
if err != nil {
t.Fatal(err)
}
defer os.Remove(home)
var buf bytes.Buffer
resp := []*release.Release{
helm.ReleaseMock(&helm.MockReleaseOptions{Name: "atlas-guide", StatusCode: release.Status_DEPLOYED}),
}
c := &helm.FakeClient{
Rels: resp,
}
fc := fake.NewSimpleClientset()
cmd := &resetCmd{
out: &buf,
home: helmpath.Home(home),
client: c,
kubeClient: fc,
namespace: core.NamespaceDefault,
}
err = cmd.run()
expected := "there are still 1 deployed releases (Tip: use --force to remove Tiller. Releases will not be deleted.)"
if !strings.Contains(err.Error(), expected) {
if !tc.err && err != nil {
t.Errorf("unexpected error: %v", err)
}
if _, err := os.Stat(home); err != nil {
t.Errorf("Helm home directory %s does not exists", home)
}
}
func TestReset_forceFlag(t *testing.T) {
home, err := ioutil.TempDir("", "helm_home")
if err != nil {
t.Fatal(err)
got := buf.String()
if tc.err {
got = err.Error()
}
defer os.Remove(home)
var buf bytes.Buffer
resp := []*release.Release{
helm.ReleaseMock(&helm.MockReleaseOptions{Name: "atlas-guide", StatusCode: release.Status_DEPLOYED}),
}
c := &helm.FakeClient{
Rels: resp,
}
fc := fake.NewSimpleClientset()
cmd := &resetCmd{
force: true,
out: &buf,
home: helmpath.Home(home),
client: c,
kubeClient: fc,
namespace: core.NamespaceDefault,
}
if err := cmd.run(); err != nil {
t.Errorf("unexpected error: %v", err)
}
actions := fc.Actions()
if len(actions) != 3 {
t.Errorf("Expected 3 actions, got %d", len(actions))
if tc.expectedActions > 0 && len(actions) != tc.expectedActions {
t.Errorf("Expected %d actions, got %d", tc.expectedActions, len(actions))
}
expected := "Tiller (the Helm server-side component) has been uninstalled from your Kubernetes Cluster."
if !strings.Contains(buf.String(), expected) {
t.Errorf("expected %q, got %q", expected, buf.String())
if !strings.Contains(got, tc.expectedOutput) {
t.Errorf("expected %q, got %q", tc.expectedOutput, got)
}
if _, err := os.Stat(home); err != nil {
_, err = os.Stat(home)
if !tc.removeHelmHome && err != nil {
t.Errorf("Helm home directory %s does not exists", home)
}
if tc.removeHelmHome && err == nil {
t.Errorf("Helm home directory %s exists", home)
}
}

@ -117,31 +117,10 @@ func (t *templateCmd) run(cmd *cobra.Command, args []string) error {
} else {
return err
}
// verify specified templates exist relative to chart
rf := []string{}
var af string
var err error
if len(t.renderFiles) > 0 {
for _, f := range t.renderFiles {
if !filepath.IsAbs(f) {
af, err = filepath.Abs(filepath.Join(t.chartPath, f))
if err != nil {
return fmt.Errorf("could not resolve template path: %s", err)
}
} else {
af = f
}
rf = append(rf, af)
if _, err := os.Stat(af); err != nil {
return fmt.Errorf("could not resolve template path: %s", err)
}
}
}
// verify that output-dir exists if provided
if t.outputDir != "" {
_, err = os.Stat(t.outputDir)
_, err := os.Stat(t.outputDir)
if os.IsNotExist(err) {
return fmt.Errorf("output-dir '%s' does not exist", t.outputDir)
}
@ -232,19 +211,7 @@ func (t *templateCmd) run(cmd *cobra.Command, args []string) error {
m := tiller.Manifest{Name: k, Content: v, Head: &util.SimpleHead{Kind: h}}
listManifests = append(listManifests, m)
}
in := func(needle string, haystack []string) bool {
// make needle path absolute
d := strings.Split(needle, string(os.PathSeparator))
dd := d[1:]
an := filepath.Join(t.chartPath, strings.Join(dd, string(os.PathSeparator)))
for _, h := range haystack {
if h == an {
return true
}
}
return false
}
if settings.Debug {
rel := &release.Release{
Name: t.releaseName,
@ -257,10 +224,45 @@ func (t *templateCmd) run(cmd *cobra.Command, args []string) error {
printRelease(os.Stdout, rel)
}
for _, m := range tiller.SortByKind(listManifests) {
if len(t.renderFiles) > 0 && !in(m.Name, rf) {
continue
var manifestsToRender []tiller.Manifest
// if we have a list of files to render, then check that each of the
// provided files exists in the chart.
if len(t.renderFiles) > 0 {
for _, f := range t.renderFiles {
missing := true
if !filepath.IsAbs(f) {
newF, err := filepath.Abs(filepath.Join(t.chartPath, f))
if err != nil {
return fmt.Errorf("could not turn template path %s into absolute path: %s", f, err)
}
f = newF
}
for _, manifest := range listManifests {
manifestPathSplit := strings.Split(manifest.Name, string(filepath.Separator))
// remove the chart name from the path
manifestPathSplit = manifestPathSplit[1:]
toJoin := append([]string{t.chartPath}, manifestPathSplit...)
manifestPath := filepath.Join(toJoin...)
// if the filepath provided matches a manifest path in the
// chart, render that manifest
if f == manifestPath {
manifestsToRender = append(manifestsToRender, manifest)
missing = false
}
}
if missing {
return fmt.Errorf("could not find template %s in chart", f)
}
}
} else {
// no renderFiles provided, render all manifests in the chart
manifestsToRender = listManifests
}
for _, m := range tiller.SortByKind(manifestsToRender) {
data := m.Content
b := filepath.Base(m.Name)
if !t.showNotes && b == "NOTES.txt" {

@ -27,10 +27,13 @@ import (
"testing"
)
var chartPath = "./../../pkg/chartutil/testdata/subpop/charts/subchart1"
var (
subchart1ChartPath = "./../../pkg/chartutil/testdata/subpop/charts/subchart1"
frobnitzChartPath = "./../../pkg/chartutil/testdata/frobnitz"
)
func TestTemplateCmd(t *testing.T) {
absChartPath, err := filepath.Abs(chartPath)
subchart1AbsChartPath, err := filepath.Abs(subchart1ChartPath)
if err != nil {
t.Fatal(err)
}
@ -40,74 +43,95 @@ func TestTemplateCmd(t *testing.T) {
args []string
expectKey string
expectValue string
expectError string
}{
{
name: "check_name",
desc: "check for a known name in chart",
args: []string{chartPath},
args: []string{subchart1ChartPath},
expectKey: "subchart1/templates/service.yaml",
expectValue: "protocol: TCP\n name: nginx",
},
{
name: "check_set_name",
desc: "verify --set values exist",
args: []string{chartPath, "-x", "templates/service.yaml", "--set", "service.name=apache"},
args: []string{subchart1ChartPath, "-x", "templates/service.yaml", "--set", "service.name=apache"},
expectKey: "subchart1/templates/service.yaml",
expectValue: "protocol: TCP\n name: apache",
},
{
name: "check_execute",
desc: "verify --execute single template",
args: []string{chartPath, "-x", "templates/service.yaml", "--set", "service.name=apache"},
args: []string{subchart1ChartPath, "-x", "templates/service.yaml", "--set", "service.name=apache"},
expectKey: "subchart1/templates/service.yaml",
expectValue: "protocol: TCP\n name: apache",
},
{
name: "check_execute_non_existent",
desc: "verify --execute fails on a template that doesnt exist",
args: []string{subchart1ChartPath, "-x", "templates/thisdoesntexist.yaml"},
expectError: "could not find template",
},
{
name: "check_execute_absolute",
desc: "verify --execute single template",
args: []string{chartPath, "-x", absChartPath + "/" + "templates/service.yaml", "--set", "service.name=apache"},
args: []string{subchart1ChartPath, "-x", subchart1AbsChartPath + "/" + "templates/service.yaml", "--set", "service.name=apache"},
expectKey: "subchart1/templates/service.yaml",
expectValue: "protocol: TCP\n name: apache",
},
{
name: "check_execute_subchart_template",
desc: "verify --execute single template on a subchart template",
args: []string{subchart1ChartPath, "-x", "charts/subcharta/templates/service.yaml", "--set", "subcharta.service.name=foobar"},
expectKey: "subchart1/charts/subcharta/templates/service.yaml",
expectValue: "protocol: TCP\n name: foobar",
},
{
name: "check_execute_subchart_template_for_tgz_subchart",
desc: "verify --execute single template on a subchart template where the subchart is a .tgz in the chart directory",
args: []string{frobnitzChartPath, "-x", "charts/mariner/templates/placeholder.tpl", "--set", "mariner.name=moon"},
expectKey: "frobnitz/charts/mariner/templates/placeholder.tpl",
expectValue: "Goodbye moon",
},
{
name: "check_namespace",
desc: "verify --namespace",
args: []string{chartPath, "--namespace", "test"},
args: []string{subchart1ChartPath, "--namespace", "test"},
expectKey: "subchart1/templates/service.yaml",
expectValue: "namespace: \"test\"",
},
{
name: "check_release_name",
desc: "verify --release exists",
args: []string{chartPath, "--name", "test"},
args: []string{subchart1ChartPath, "--name", "test"},
expectKey: "subchart1/templates/service.yaml",
expectValue: "release-name: \"test\"",
},
{
name: "check_notes",
desc: "verify --notes shows notes",
args: []string{chartPath, "--notes", "true"},
args: []string{subchart1ChartPath, "--notes", "true"},
expectKey: "subchart1/templates/NOTES.txt",
expectValue: "Sample notes for subchart1",
},
{
name: "check_values_files",
desc: "verify --values files values exist",
args: []string{chartPath, "--values", chartPath + "/charts/subchartA/values.yaml"},
args: []string{subchart1ChartPath, "--values", subchart1ChartPath + "/charts/subchartA/values.yaml"},
expectKey: "subchart1/templates/service.yaml",
expectValue: "name: apache",
},
{
name: "check_name_template",
desc: "verify --name-template result exists",
args: []string{chartPath, "--name-template", "foobar-{{ b64enc \"abc\" }}-baz"},
args: []string{subchart1ChartPath, "--name-template", "foobar-{{ b64enc \"abc\" }}-baz"},
expectKey: "subchart1/templates/service.yaml",
expectValue: "release-name: \"foobar-YWJj-baz\"",
},
{
name: "check_kube_version",
desc: "verify --kube-version overrides the kubernetes version",
args: []string{chartPath, "--kube-version", "1.6"},
args: []string{subchart1ChartPath, "--kube-version", "1.6"},
expectKey: "subchart1/templates/service.yaml",
expectValue: "kube-version/major: \"1\"\n kube-version/minor: \"6\"\n kube-version/gitversion: \"v1.6.0\"",
},
@ -115,7 +139,8 @@ func TestTemplateCmd(t *testing.T) {
var buf bytes.Buffer
for _, tt := range tests {
t.Run(tt.name, func(T *testing.T) {
tt := tt
t.Run(tt.name, func(t *testing.T) {
// capture stdout
old := os.Stdout
r, w, _ := os.Pipe()
@ -125,8 +150,20 @@ func TestTemplateCmd(t *testing.T) {
cmd := newTemplateCmd(out)
cmd.SetArgs(tt.args)
err := cmd.Execute()
if err != nil {
t.Errorf("expected: %v, got %v", tt.expectValue, err)
if tt.expectError != "" {
if err == nil {
t.Errorf("expected err: %s, but no error occurred", tt.expectError)
}
// non nil error, check if it contains the expected error
if strings.Contains(err.Error(), tt.expectError) {
// had the error we were looking for, this test case is
// done
return
}
t.Fatalf("expected err: %q, got: %q", tt.expectError, err)
} else if err != nil {
t.Errorf("expected no error, got %v", err)
}
// restore stdout
w.Close()

@ -130,7 +130,7 @@ func newUpgradeCmd(client helm.Interface, out io.Writer) *cobra.Command {
f.StringVar(&upgrade.version, "version", "", "specify the exact chart version to use. If this is not specified, the latest version is used")
f.Int64Var(&upgrade.timeout, "timeout", 300, "time in seconds to wait for any individual Kubernetes operation (like Jobs for hooks)")
f.BoolVar(&upgrade.resetValues, "reset-values", false, "when upgrading, reset the values to the ones built into the chart")
f.BoolVar(&upgrade.reuseValues, "reuse-values", false, "when upgrading, reuse the last release's values, and merge in any new values. If '--reset-values' is specified, this is ignored.")
f.BoolVar(&upgrade.reuseValues, "reuse-values", false, "when upgrading, reuse the last release's values and merge in any overrides from the command line via --set and -f. If '--reset-values' is specified, this is ignored.")
f.BoolVar(&upgrade.wait, "wait", false, "if set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment are in a ready state before marking the release as successful. It will wait for as long as --timeout")
f.StringVar(&upgrade.repoURL, "repo", "", "chart repository url where to locate the requested chart")
f.StringVar(&upgrade.username, "username", "", "chart repository username where to locate the requested chart")

@ -0,0 +1,37 @@
# Custom Resource Definitions
This section of the Best Practices Guide deals with creating and using Custom Resource Definition
objects.
When working with Custom Resource Definitions (CRDs), it is important to distinguish
two different pieces:
- There is a declaration of a CRD. This is the YAML file that has the kind `CustomResourceDefinition`
- Then there are resources that _use_ the CRD. Say a CRD defines `foo.example.com/v1`. Any resource
that has `apiVersion: example.com/v1` and kind `Foo` is a resource that uses the CRD.
## Install a CRD Declaration Before Using the Resource
Helm is optimized to load as many resources into Kubernetes as fast as possible.
By design, Kubernetes can take an entire set of manifests and bring them all
online (this is called the reconciliation loop).
But there's a difference with CRDs.
For a CRD, the declaration must be registered before any resources of that CRDs
kind(s) can be used. And the registration process sometimes takes a few seconds.
### Method 1: Separate Charts
One way to do this is to put the CRD definition in one chart, and then put any
resources that use that CRD in _another_ chart.
In this method, each chart must be installed separately.
### Method 2: Pre-install Hooks
To package the two together, add a `pre-install` hook to the CRD definition so
that it is fully installed before the rest of the chart is executed.
Note that if you create the CRD with a `pre-install` hook, that CRD definition
will not be deleted when `helm delete` is run.

@ -155,7 +155,7 @@ Template comments should be used when documenting features of a template, such a
```yaml
{{- /*
mychart.shortname provides a 6 char truncated version of the release name.
*/ }}
*/ -}}
{{ define "mychart.shortname" -}}
{{ .Release.Name | trunc 6 }}
{{- end -}}

@ -1,38 +0,0 @@
# Third Party Resources
This section of the Best Practices Guide deals with creating and using Third Party Resource
objects.
When working with Third Party Resources (TPRs), it is important to distinguish
two different pieces:
- There is a declaration of a TPR. This is the YAML file that has the kind `ThirdPartyResource`
- Then there are resources that _use_ the TPR. Say a TPR defines `foo.example.com/v1`. Any resource
that has `apiVersion: example.com/v1` and kind `Foo` is a resource that uses the
TPR.
## Install a TPR Declaration Before Using the Resource
Helm is optimized to load as many resources into Kubernetes as fast as possible.
By design, Kubernetes can take an entire set of manifests and bring them all
online (this is called the reconciliation loop).
But there's a difference with TPRs.
For a TPR, the declaration must be registered before any resources of that TPRs
kind(s) can be used. And the registration process sometimes takes a few seconds.
### Method 1: Separate Charts
One way to do this is to put the TPR definition in one chart, and then put any
resources that use that TPR in _another_ chart.
In this method, each chart must be installed separately.
### Method 2: Pre-install Hooks
To package the two together, add a `pre-install` hook to the TPR definition so
that it is fully installed before the rest of the chart is executed.
Note that if you create the TPR with a `pre-install` hook, that TPR definition
will not be deleted when `helm delete` is run.

@ -275,7 +275,7 @@ fantastic-charts https://fantastic-charts.storage.googleapis.com
If the charts are backed by HTTP basic authentication, you can also supply the
username and password here:
``console
```console
$ helm repo add fantastic-charts https://fantastic-charts.storage.googleapis.com --username my-username --password my-password
$ helm repo list
fantastic-charts https://fantastic-charts.storage.googleapis.com

@ -3,7 +3,7 @@
## Prerequisites
* Install the [gsutil](https://cloud.google.com/storage/docs/gsutil) tool. *We rely heavily on the gsutil rsync functionality*
* Be sure to have access to the helm binary
* Be sure to have access to the Helm binary
* _Optional: We recommend you set [object versioning](https://cloud.google.com/storage/docs/gsutil/addlhelp/ObjectVersioningandConcurrencyControl#top_of_page) on your GCS bucket in case you accidentally delete something._
## Set up a local chart repository directory
@ -16,7 +16,7 @@ $ mv alpine-0.1.0.tgz fantastic-charts/
```
## Generate an updated index.yaml
Use helm to generate an updated index.yaml file by passing in the directory path and the url of the remote repository to the `helm repo index` command like this:
Use Helm to generate an updated index.yaml file by passing in the directory path and the url of the remote repository to the `helm repo index` command like this:
```console
$ helm repo index fantastic-charts/ --url https://fantastic-charts.storage.googleapis.com

@ -105,7 +105,7 @@ data:
{{- end }}
```
As mentioned above, **template names are global**. As a result of this, if two templates are declared with the same name the last occurance will be the one that is used. Since templates in subcharts are compiled together with top-level templates, it is best to name your templates with _chart specific names_. A popular naming convention is to prefix each defined template with the name of the chart: `{{ define "mychart.labels" }}`.
As mentioned above, **template names are global**. As a result of this, if two templates are declared with the same name the last occurrence will be the one that is used. Since templates in subcharts are compiled together with top-level templates, it is best to name your templates with _chart specific names_. A popular naming convention is to prefix each defined template with the name of the chart: `{{ define "mychart.labels" }}`.
## Setting the scope of a template

@ -27,8 +27,8 @@ wordpress/
README.md # OPTIONAL: A human-readable README file
requirements.yaml # OPTIONAL: A YAML file listing dependencies for the chart
values.yaml # The default configuration values for this chart
charts/ # OPTIONAL: A directory containing any charts upon which this chart depends.
templates/ # OPTIONAL: A directory of templates that, when combined with values,
charts/ # A directory containing any charts upon which this chart depends.
templates/ # A directory of templates that, when combined with values,
# will generate valid Kubernetes manifest files.
templates/NOTES.txt # OPTIONAL: A plain text file containing short usage notes
```
@ -36,13 +36,12 @@ wordpress/
Helm reserves use of the `charts/` and `templates/` directories, and of
the listed file names. Other files will be left as they are.
While the `charts` and `templates` directories are optional there must be at least one chart dependency or template file for the chart to be valid.
## The Chart.yaml File
The `Chart.yaml` file is required for a chart. It contains the following fields:
```yaml
apiVersion: The chart API version, always "v1" (required)
name: The name of the chart (required)
version: A SemVer 2 version (required)
kubeVersion: A SemVer range of compatible Kubernetes versions (optional)
@ -850,7 +849,7 @@ considerations in mind:
- The `Chart.yaml` will be overwritten by the generator.
- Users will expect to modify such a chart's contents, so documentation
should indicate how users can do so.
- All occurences of `<CHARTNAME>` will be replaced with the specified chart
- All occurrences of `<CHARTNAME>` will be replaced with the specified chart
name so that starter charts can be used as templates.
Currently the only way to add a chart to `$HELM_HOME/starters` is to manually

@ -180,4 +180,19 @@ It is also possible to define policies that determine when to delete correspondi
"helm.sh/hook-delete-policy": hook-succeeded
```
When using `"helm.sh/hook-delete-policy"` annotation, you can choose its value from `"hook-succeeded"` and `"hook-failed"`. The value `"hook-succeeded"` specifies Tiller should delete the hook after the hook is successfully executed, while the value `"hook-failed"`specifies Tiller should delete the hook if the hook failed during execution.
You can choose one or more defined annotation values:
* `"hook-succeeded"` specifies Tiller should delete the hook after the hook is successfully executed.
* `"hook-failed"` specifies Tiller should delete the hook if the hook failed during execution.
* `"before-hook-creation"` specifies Tiller should delete the previous hook before the new hook is launched.
### Automatically delete hook from previous release
When helm release being updated it is possible, that hook resource already exists in cluster. By default helm will try to create resource and fail with `"... already exists"` error.
One might choose `"helm.sh/hook-delete-policy": "before-hook-creation"` over `"helm.sh/hook-delete-policy": "hook-succeeded,hook-failed"` because:
* It is convenient to keep failed hook job resource in kubernetes for example for manual debug.
* It may be necessary to keep succeeded hook resource in kubernetes for some reason.
* At the same time it is not desirable to do manual resource deletion before helm release upgrade.
`"helm.sh/hook-delete-policy": "before-hook-creation"` annotation on hook causes tiller to remove the hook from previous release if there is one before the new hook is launched and can be used with another policy.

@ -10,7 +10,6 @@ Helm and Tiller.
- A Kubernetes cluster w/ kubectl (optional)
- The gRPC toolchain
- Git
- Mercurial
## Building Helm/Tiller

@ -23,14 +23,17 @@ helm package [flags] [CHART_PATH] [...]
### Options
```
--app-version string set the appVersion on the chart to this version
-u, --dependency-update update dependencies from "requirements.yaml" to dir "charts/" before packaging
-d, --destination string location to write the chart. (default ".")
--key string name of the key to use when signing. Used if --sign is true
--keyring string location of a public keyring (default "~/.gnupg/pubring.gpg")
--save save packaged chart to local chart repository (default true)
--sign use a PGP private key to sign this package
--version string set the version on the chart to this semver version
--app-version string set the appVersion on the chart to this version
-u, --dependency-update update dependencies from "requirements.yaml" to dir "charts/" before packaging
-d, --destination string location to write the chart. (default ".")
--key string name of the key to use when signing. Used if --sign is true
--keyring string location of a public keyring (default "~/.gnupg/pubring.gpg")
--save save packaged chart to local chart repository (default true)
--set stringArray set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)
--set-string stringArray set STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)
--sign use a PGP private key to sign this package
-f, --values valueFiles specify values in a YAML file or a URL(can specify multiple) (default [])
--version string set the version on the chart to this semver version
```
### Options inherited from parent commands
@ -47,4 +50,4 @@ helm package [flags] [CHART_PATH] [...]
### SEE ALSO
* [helm](helm.md) - The Helm package manager for Kubernetes.
###### Auto generated by spf13/cobra on 8-Mar-2018
###### Auto generated by spf13/cobra on 6-Apr-2018

@ -51,7 +51,7 @@ helm upgrade [RELEASE] [CHART]
--recreate-pods performs pods restart for the resource if applicable
--repo string chart repository url where to locate the requested chart
--reset-values when upgrading, reset the values to the ones built into the chart
--reuse-values when upgrading, reuse the last release's values, and merge in any new values. If '--reset-values' is specified, this is ignored.
--reuse-values when upgrading, reuse the last release's values and merge in any overrides from the command line via --set and -f. If '--reset-values' is specified, this is ignored.
--set stringArray set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)
--set-string stringArray set STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)
--timeout int time in seconds to wait for any individual Kubernetes operation (like Jobs for hooks) (default 300)
@ -81,4 +81,4 @@ helm upgrade [RELEASE] [CHART]
### SEE ALSO
* [helm](helm.md) - The Helm package manager for Kubernetes.
###### Auto generated by spf13/cobra on 20-Mar-2018
###### Auto generated by spf13/cobra on 4-Apr-2018

@ -36,6 +36,15 @@ brew install kubernetes-helm
(Note: There is also a formula for emacs-helm, which is a different
project.)
### From Chocolatey (Windows)
Members of the Kubernetes community have contributed a [Helm package](https://chocolatey.org/packages/kubernetes-helm) build to
[Chocolatey](https://chocolatey.org/). This package is generally up to date.
```
choco install kubernetes-helm
```
## From Script
Helm now has an installer script that will automatically grab the latest version
@ -72,7 +81,7 @@ Building Helm from source is slightly more work, but is the best way to
go if you want to test the latest (pre-release) Helm version.
You must have a working Go environment with
[glide](https://github.com/Masterminds/glide) and Mercurial installed.
[glide](https://github.com/Masterminds/glide) installed.
```console
$ cd $GOPATH

@ -224,7 +224,7 @@ I am trying to remove stuff.
**Q: When I delete the Tiller deployment, how come all the releases are still there?**
Releases are stored in ConfigMaps inside of the `kube-system` namespace. You will
have to manually delete them to get rid of the record.
have to manually delete them to get rid of the record, or use ```helm delete --purge```.
**Q: I want to delete my local Helm. Where are all its files?**

@ -43,3 +43,9 @@ Helm works straightforward on OpenShift Online, OpenShift Dedicated, OpenShift C
## Platform9
Helm Client and Helm Server (Tiller) are pre-installed with [Platform9 Managed Kubernetes](https://platform9.com/managed-kubernetes/?utm_source=helm_distro_notes). Platform9 provides access to all official Helm charts through the App Catalog UI and native Kubernetes CLI. Additional repositories can be manually added. Further details are available in this [Platform9 App Catalog article](https://platform9.com/support/deploying-kubernetes-apps-platform9-managed-kubernetes/?utm_source=helm_distro_notes).
## DC/OS
Helm (both client and server) has been tested and is working on Mesospheres DC/OS 1.11 Kubernetes platform, and requires
no additional configuration.

@ -134,7 +134,7 @@ downloaders:
If such plugin is installed, Helm can interact with the repository using the specified
protocol scheme by invoking the `command`. The special repository shall be added
similarily to the regular ones: `helm repo add favorite myprotocol://example.com/`
similarly to the regular ones: `helm repo add favorite myprotocol://example.com/`
The rules for the special repos are the same to the regular ones: Helm must be able
to download the `index.yaml` file in order to discover and cache the list of
available Charts.

@ -4,13 +4,6 @@ Helm has provenance tools which help chart users verify the integrity and origin
of a package. Using industry-standard tools based on PKI, GnuPG, and well-respected
package managers, Helm can generate and verify signature files.
**Note:**
Version 2.0.0-alpha.4 introduced a system for verifying the authenticity of charts.
While we do not anticipate that any major changes will be made to the file formats
or provenancing algorithms, this portion of Helm is not considered _frozen_ until
2.0.0-RC1 is released. The original plan for this feature can be found
[at issue 983](https://github.com/kubernetes/helm/issues/983).
## Overview
Integrity is established by comparing a chart to a provenance record. Provenance

@ -4,11 +4,11 @@ In Kubernetes, granting a role to an application-specific service account is a b
Bitnami also has a fantastic guide for [configuring RBAC in your cluster](https://docs.bitnami.com/kubernetes/how-to/configure-rbac-in-your-kubernetes-cluster/) that takes you through RBAC basics.
This guide is for users who want to restrict tiller's capabilities to install resources to certain namespaces, or to grant a helm client running access to a tiller instance.
This guide is for users who want to restrict Tiller's capabilities to install resources to certain namespaces, or to grant a Helm client running access to a Tiller instance.
## Tiller and Role-based Access Control
You can add a service account to Tiller using the `--service-account <NAME>` flag while you're configuring helm. As a prerequisite, you'll have to create a role binding which specifies a [role](https://kubernetes.io/docs/admin/authorization/rbac/#role-and-clusterrole) and a [service account](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) name that have been set up in advance.
You can add a service account to Tiller using the `--service-account <NAME>` flag while you're configuring Helm. As a prerequisite, you'll have to create a role binding which specifies a [role](https://kubernetes.io/docs/admin/authorization/rbac/#role-and-clusterrole) and a [service account](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) name that have been set up in advance.
Once you have satisfied the pre-requisite and have a service account with the correct permissions, you'll run a command like this: `helm init --service-account <NAME>`
@ -51,7 +51,7 @@ clusterrolebinding "tiller" created
$ helm init --service-account tiller
```
### Example: Deploy tiller in a namespace, restricted to deploying resources only in that namespace
### Example: Deploy Tiller in a namespace, restricted to deploying resources only in that namespace
In the example above, we gave Tiller admin access to the entire cluster. You are not at all required to give Tiller cluster-admin access for it to work. Instead of specifying a ClusterRole or a ClusterRoleBinding, you can specify a Role and RoleBinding to limit Tiller's scope to a particular namespace.
@ -62,7 +62,7 @@ $ kubectl create serviceaccount tiller --namespace tiller-world
serviceaccount "tiller" created
```
Define a Role that allows tiller to manage all resources in `tiller-world` like in `role-tiller.yaml`:
Define a Role that allows Tiller to manage all resources in `tiller-world` like in `role-tiller.yaml`:
```yaml
kind: Role
@ -104,13 +104,13 @@ $ kubectl create -f rolebinding-tiller.yaml
rolebinding "tiller-binding" created
```
Afterwards you can run `helm init` to install tiller in the `tiller-world` namespace.
Afterwards you can run `helm init` to install Tiller in the `tiller-world` namespace.
```console
$ helm init --service-account tiller --tiller-namespace tiller-world
$HELM_HOME has been configured at /Users/awesome-user/.helm.
Tiller (the helm server side component) has been installed into your Kubernetes Cluster.
Tiller (the Helm server side component) has been installed into your Kubernetes Cluster.
Happy Helming!
$ helm install nginx --tiller-namespace tiller-world --namespace tiller-world
@ -125,11 +125,11 @@ NAME READY STATUS RESTARTS AGE
wayfaring-yak-alpine 0/1 ContainerCreating 0 0s
```
### Example: Deploy tiller in a namespace, restricted to deploying resources in another namespace
### Example: Deploy Tiller in a namespace, restricted to deploying resources in another namespace
In the example above, we gave Tiller admin access to the namespace it was deployed inside. Now, let's limit Tiller's scope to deploy resources in a different namespace!
For example, let's install tiller in the namespace `myorg-system` and allow tiller to deploy resources in the namespace `myorg-users`.
For example, let's install Tiller in the namespace `myorg-system` and allow Tiller to deploy resources in the namespace `myorg-users`.
```console
$ kubectl create namespace myorg-system
@ -138,7 +138,7 @@ $ kubectl create serviceaccount tiller --namespace myorg-system
serviceaccount "tiller" created
```
Define a Role that allows tiller to manage all resources in `myorg-users` like in `role-tiller.yaml`:
Define a Role that allows Tiller to manage all resources in `myorg-users` like in `role-tiller.yaml`:
```yaml
kind: Role
@ -180,7 +180,7 @@ $ kubectl create -f rolebinding-tiller.yaml
rolebinding "tiller-binding" created
```
We'll also need to grant tiller access to read configmaps in myorg-system so it can store release information. In `role-tiller-myorg-system.yaml`:
We'll also need to grant Tiller access to read configmaps in myorg-system so it can store release information. In `role-tiller-myorg-system.yaml`:
```yaml
kind: Role
@ -224,11 +224,11 @@ rolebinding "tiller-binding" created
## Helm and Role-based Access Control
When running a helm client in a pod, in order for the helm client to talk to a tiller instance, it will need certain privileges to be granted. Specifically, the helm client will need to be able to create pods, forward ports and be able to list pods in the namespace where tiller is running (so it can find tiller).
When running a Helm client in a pod, in order for the Helm client to talk to a Tiller instance, it will need certain privileges to be granted. Specifically, the Helm client will need to be able to create pods, forward ports and be able to list pods in the namespace where Tiller is running (so it can find Tiller).
### Example: Deploy Helm in a namespace, talking to Tiller in another namespace
In this example, we will assume tiller is running in a namespace called `tiller-world` and that the helm client is running in a namespace called `helm-world`. By default, tiller is running in the `kube-system` namespace.
In this example, we will assume Tiller is running in a namespace called `tiller-world` and that the Helm client is running in a namespace called `helm-world`. By default, Tiller is running in the `kube-system` namespace.
In `helm-user.yaml`:

@ -78,6 +78,7 @@ Platforms, distributions, and services that include Helm support.
- [Cabin](http://www.skippbox.com/cabin/) - Mobile App for Managing Kubernetes
- [Qstack](https://qstack.com)
- [Fabric8](https://fabric8.io) - Integrated development platform for Kubernetes
- [Jenkins X](http://jenkins-x.io/) - open source automated CI/CD for Kubernetes which uses Helm for [promoting](http://jenkins-x.io/about/features/#promotion) applications through [environments via GitOps](http://jenkins-x.io/about/features/#environments)
## Misc

@ -197,7 +197,7 @@ From here on just repeat this process, continuously testing until you're happy w
## 6. Finalize the Release
When you're finally happy with the quality of a release candidate, you can move on and create the real thing. Double-check one last time to make sure eveything is in order, then finally push the release tag.
When you're finally happy with the quality of a release candidate, you can move on and create the real thing. Double-check one last time to make sure everything is in order, then finally push the release tag.
```shell
git checkout $RELEASE_BRANCH_NAME

@ -71,7 +71,7 @@ Enabling this feature currently requires setting the `--storage=secret` flag in
Because of the relative longevity of Helm, the Helm chart ecosystem evolved without the immediate concern for cluster-wide control, and especially in the developer space this makes complete sense. However, charts are a kind of package that not only installs containers you may or may not have validated yourself, but it may also install into more than one namespace.
As with all shared software, in a controlled or shared environment you must validate all software you install yourself _before_ you install it. If you have secured Tiller with TLS and have installed it with permissions to only one or a subset of namespaces, some charts may fail to install -- but in these environments, that is exactly what you want. If you need to use the chart, you may have to work with the creator or modify it yourself in order to use it securely in a mulitenant cluster with proper RBAC rules applied. The `helm template` command renders the chart locally and displays the output.
As with all shared software, in a controlled or shared environment you must validate all software you install yourself _before_ you install it. If you have secured Tiller with TLS and have installed it with permissions to only one or a subset of namespaces, some charts may fail to install -- but in these environments, that is exactly what you want. If you need to use the chart, you may have to work with the creator or modify it yourself in order to use it securely in a multitenant cluster with proper RBAC rules applied. The `helm template` command renders the chart locally and displays the output.
Once vetted, you can use Helm's provenance tools to [ensure the provenance and integrity of charts](provenance.md) that you use.

@ -251,7 +251,7 @@ This configuration sends our client-side certificate to establish identity, uses
the client key for encryption, and uses the CA certificate to validate the remote
Tiller's identity.
Typing a line that that is cumbersome, though. The shortcut is to move the key,
Typing a line that is cumbersome, though. The shortcut is to move the key,
cert, and CA into `$HELM_HOME`:
```console

148
glide.lock generated

@ -1,58 +1,21 @@
hash: d93f565214b112cf8560e9cd2da2f3ab7852a1f19544569fc112bd4fb2d1d506
updated: 2018-03-08T14:06:06.497394911-08:00
hash: 6837936360d447b64aa7a09d3c89c18ac5540b009a57fc4d3227af299bf40268
updated: 2018-04-03T08:17:14.801847688-07:00
imports:
- name: cloud.google.com/go
version: 3b1ae45394a234c385be014e9a488f2bb6eef821
repo: https://github.com/GoogleCloudPlatform/google-cloud-go.git
subpackages:
- compute
- compute/metadata
- internal
- name: github.com/aokoli/goutils
version: 9c37978a95bd5c709a15883b6242714ea6709e64
- name: github.com/asaskevich/govalidator
version: 7664702784775e51966f0885f5cd27435916517b
- name: github.com/aws/aws-sdk-go
version: 760741802ad40f49ae9fc4a69ef6706d2527d62e
subpackages:
- aws
- aws/awserr
- aws/awsutil
- aws/client
- aws/client/metadata
- aws/corehandlers
- aws/credentials
- aws/credentials/ec2rolecreds
- aws/credentials/endpointcreds
- aws/credentials/stscreds
- aws/defaults
- aws/ec2metadata
- aws/endpoints
- aws/request
- aws/session
- aws/signer/v4
- internal/shareddefaults
- private/protocol
- private/protocol/ec2query
- private/protocol/json/jsonutil
- private/protocol/jsonrpc
- private/protocol/query
- private/protocol/query/queryutil
- private/protocol/rest
- private/protocol/xml/xmlutil
- service/autoscaling
- service/ec2
- service/ecr
- service/elb
- service/elbv2
- service/kms
- service/sts
- name: github.com/Azure/go-ansiterm
version: 19f72df4d05d31cbe1c56bfc8045c96babff6c7e
subpackages:
- winterm
- name: github.com/Azure/go-autorest
version: e14a70c556c8e0db173358d1a903dca345a8e75e
version: d4e6b95c12a08b4de2d48b45d5b4d594e5d32fab
subpackages:
- autorest
- autorest/adal
@ -64,8 +27,6 @@ imports:
- quantile
- name: github.com/BurntSushi/toml
version: b26d9c308763d68093482582cea63d69be07a0f0
- name: github.com/chai2010/gettext-go
version: bf70f2a70fb1b1f36d90d671a72795984eab0fcb
- name: github.com/cpuguy83/go-md2man
version: 71acacd42f85e5e82f70a55327789582a5200a90
subpackages:
@ -106,8 +67,9 @@ imports:
- pkg/jsonmessage
- pkg/longpath
- pkg/mount
- pkg/parsers
- pkg/stdcopy
- pkg/symlink
- pkg/sysinfo
- pkg/system
- pkg/term
- pkg/term/windows
@ -124,10 +86,6 @@ imports:
version: 449fdfce4d962303d702fec724ef0ad181c92528
subpackages:
- spdy
- name: github.com/emicklei/go-restful
version: ff4f55a206334ef123e4f79bbf348980da81ca46
subpackages:
- log
- name: github.com/evanphx/json-patch
version: 944e07253867aacae43c04b2e6a239005443f33a
- name: github.com/exponent-io/jsonpath
@ -136,14 +94,12 @@ imports:
version: f6a740d52f961c60348ebb109adde9f4635d7540
- name: github.com/ghodss/yaml
version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee
- name: github.com/go-ini/ini
version: 300e940a926eb277d3901b20bdfcc54928ad3642
- name: github.com/go-openapi/jsonpointer
version: 46af16f9f7b149af66e5d1bd010e3574dc06de98
- name: github.com/go-openapi/jsonreference
version: 13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272
- name: github.com/go-openapi/spec
version: 7abd5745472fff5eb3685386d5fb8bf38683154d
version: 1de3e0542de65ad8d75452a595886fdd0befb363
- name: github.com/go-openapi/swag
version: f3f9494671f93fcff853e3c6e9e948b3eb71e590
- name: github.com/gobwas/glob
@ -179,6 +135,8 @@ imports:
version: 7d79101e329e5a3adf994758c578dab82b90c017
- name: github.com/google/gofuzz
version: 44d81051d367757e1c7c6a5a86423ece9afcf63c
- name: github.com/google/uuid
version: 064e2069ce9c359c118179501254f67d7d37ba24
- name: github.com/googleapis/gnostic
version: 0c5108395e2debce0d731cf0287ddf7242066aba
subpackages:
@ -186,7 +144,7 @@ imports:
- compiler
- extensions
- name: github.com/gophercloud/gophercloud
version: 8183543f90d1aef267a5ecc209f2e0715b355acb
version: 6da026c32e2d622cc242d32984259c77237aefe1
subpackages:
- openstack
- openstack/identity/v2/tenants
@ -217,12 +175,8 @@ imports:
version: 6633656539c1639d9d78127b7d47c622b5d7b6dc
- name: github.com/inconshreveable/mousetrap
version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
- name: github.com/jmespath/go-jmespath
version: 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74
- name: github.com/json-iterator/go
version: 36b14963da70d11297d313183d7e6388c8510e1e
- name: github.com/juju/ratelimit
version: 5b9ff866471762aa2ab2dced63c9fb6f53921342
version: 13f86432b882000a51c6e610c620974462691a97
- name: github.com/mailru/easyjson
version: 2f5df55504ebc322e4d52d34df6a1f5b503bf26d
subpackages:
@ -234,7 +188,7 @@ imports:
- name: github.com/Masterminds/semver
version: 517734cc7d6470c0d07130e40fd40bdeb9bcd3fd
- name: github.com/Masterminds/sprig
version: b217b9c388de2cacde4354c536e520c52c055563
version: 6b2a58267f6a8b1dc8e2eb5519b984008fa85e8c
- name: github.com/Masterminds/vcs
version: 3084677c2c188840777bff30054f2b553729d329
- name: github.com/mattn/go-runewidth
@ -245,8 +199,6 @@ imports:
- pbutil
- name: github.com/mitchellh/go-wordwrap
version: ad45545899c7b13c020ea92b2072220eefad42b8
- name: github.com/naoina/go-stringutil
version: 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
- name: github.com/opencontainers/go-digest
version: a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb
- name: github.com/opencontainers/image-spec
@ -283,8 +235,6 @@ imports:
version: 5bd2802263f21d8788851d5305584c82a5c75d7e
- name: github.com/russross/blackfriday
version: 300106c228d52c8941d4b3de6054a6062a86dda3
- name: github.com/satori/go.uuid
version: f58768cc1a7a7e77a3bd49e98cdd21419399b6a3
- name: github.com/shurcooL/sanitized_anchor_name
version: 10ef21a441db47d8b13ebcc5fd2310f636973c77
- name: github.com/sirupsen/logrus
@ -301,6 +251,8 @@ imports:
version: 81e90905daefcd6fd217b62423c0908922eadb30
subpackages:
- cast5
- ed25519
- ed25519/internal/edwards25519
- openpgp
- openpgp/armor
- openpgp/clearsign
@ -353,6 +305,10 @@ imports:
- unicode/bidi
- unicode/norm
- width
- name: golang.org/x/time
version: f51c12702a4d776e4c1fa9b0fabab841babae631
subpackages:
- rate
- name: google.golang.org/appengine
version: 12d5545dc1cfa6047a286d5e853841b6471f4c19
subpackages:
@ -390,20 +346,18 @@ imports:
- status
- tap
- transport
- name: gopkg.in/gcfg.v1
version: 27e4946190b4a327b539185f2b5b1f7c84730728
subpackages:
- scanner
- token
- types
- name: gopkg.in/inf.v0
version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
- name: gopkg.in/warnings.v0
version: 8a331561fe74dadba6edfc59f3be66c22c3b065d
- name: gopkg.in/square/go-jose.v2
version: f8f38de21b4dcd69d0413faf231983f5fd6634b1
subpackages:
- cipher
- json
- jwt
- name: gopkg.in/yaml.v2
version: 53feefa2559fb8dfa8d81baad31be332c97d6c77
- name: k8s.io/api
version: 006a217681ae70cbacdd66a5e2fca1a61a8ff28e
version: c699ec51538f0cfd4afa8bfcfe1e0779cafbe666
subpackages:
- admission/v1beta1
- admissionregistration/v1alpha1
@ -436,11 +390,11 @@ imports:
- storage/v1alpha1
- storage/v1beta1
- name: k8s.io/apiextensions-apiserver
version: a5bbfd114a9b122acd741c61d88c84812375d9e1
version: 898b0eda132e1aeac43a459785144ee4bf9b0a2e
subpackages:
- pkg/features
- name: k8s.io/apimachinery
version: 68f9c3a1feb3140df59c67ced62d3a5df8e6c9c2
version: 54101a56dda9a0962bc48751c058eb4c546dcbb9
subpackages:
- pkg/api/equality
- pkg/api/errors
@ -454,7 +408,7 @@ imports:
- pkg/apis/meta/v1
- pkg/apis/meta/v1/unstructured
- pkg/apis/meta/v1/validation
- pkg/apis/meta/v1alpha1
- pkg/apis/meta/v1beta1
- pkg/conversion
- pkg/conversion/queryparams
- pkg/fields
@ -472,6 +426,7 @@ imports:
- pkg/util/cache
- pkg/util/clock
- pkg/util/diff
- pkg/util/duration
- pkg/util/errors
- pkg/util/framer
- pkg/util/httpstream
@ -496,7 +451,7 @@ imports:
- third_party/forked/golang/netutil
- third_party/forked/golang/reflect
- name: k8s.io/apiserver
version: 2a1092aaa7202e8f9b188281ff9424a014ce61c2
version: ea53f8588c655568158b4ff53f5ec6fa4ebfc332
subpackages:
- pkg/apis/audit
- pkg/authentication/authenticator
@ -507,7 +462,7 @@ imports:
- pkg/util/feature
- pkg/util/flag
- name: k8s.io/client-go
version: 78700dec6369ba22221b72770783300f143df150
version: 23781f4d6632d88e869066eaebb743857aa1ef9b
subpackages:
- discovery
- discovery/fake
@ -635,15 +590,26 @@ imports:
- listers/storage/v1
- listers/storage/v1alpha1
- listers/storage/v1beta1
- pkg/apis/clientauthentication
- pkg/apis/clientauthentication/v1alpha1
- pkg/version
- plugin/pkg/client/auth
- plugin/pkg/client/auth/azure
- plugin/pkg/client/auth/exec
- plugin/pkg/client/auth/gcp
- plugin/pkg/client/auth/oidc
- plugin/pkg/client/auth/openstack
- rest
- rest/fake
- rest/watch
- scale
- scale/scheme
- scale/scheme/appsint
- scale/scheme/appsv1beta1
- scale/scheme/appsv1beta2
- scale/scheme/autoscalingv1
- scale/scheme/extensionsint
- scale/scheme/extensionsv1beta1
- testing
- third_party/forked/golang/template
- tools/auth
@ -670,13 +636,12 @@ imports:
- util/retry
- util/workqueue
- name: k8s.io/kube-openapi
version: 39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1
version: 50ae88d24ede7b8bad68e23c805b5d3da5c8abaf
subpackages:
- pkg/common
- pkg/util/proto
- pkg/util/proto/validation
- name: k8s.io/kubernetes
version: 5fa2db2bd46ac79e5e00a4e6ed24191080aa463b
version: a22f9fd34871d9dc9e5db2c02c713821d18ab2cd
subpackages:
- pkg/api/events
- pkg/api/legacyscheme
@ -686,7 +651,6 @@ imports:
- pkg/api/service
- pkg/api/testapi
- pkg/api/v1/pod
- pkg/api/v1/service
- pkg/apis/admission
- pkg/apis/admission/install
- pkg/apis/admission/v1beta1
@ -799,9 +763,8 @@ imports:
- pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/fake
- pkg/client/clientset_generated/internalclientset/typed/storage/internalversion
- pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/fake
- pkg/client/unversioned
- pkg/client/conditions
- pkg/cloudprovider
- pkg/cloudprovider/providers/aws
- pkg/controller
- pkg/controller/daemon
- pkg/controller/daemon/util
@ -810,8 +773,8 @@ imports:
- pkg/controller/statefulset
- pkg/controller/volume/events
- pkg/controller/volume/persistentvolume
- pkg/controller/volume/persistentvolume/metrics
- pkg/credentialprovider
- pkg/credentialprovider/aws
- pkg/features
- pkg/fieldpath
- pkg/kubectl
@ -830,6 +793,7 @@ imports:
- pkg/kubectl/util/hash
- pkg/kubectl/util/slice
- pkg/kubectl/util/term
- pkg/kubectl/util/transport
- pkg/kubectl/validation
- pkg/kubelet/apis
- pkg/kubelet/types
@ -837,6 +801,13 @@ imports:
- pkg/printers
- pkg/printers/internalversion
- pkg/registry/rbac/validation
- pkg/scheduler/algorithm
- pkg/scheduler/algorithm/predicates
- pkg/scheduler/algorithm/priorities/util
- pkg/scheduler/api
- pkg/scheduler/schedulercache
- pkg/scheduler/util
- pkg/scheduler/volumebinder
- pkg/security/apparmor
- pkg/serviceaccount
- pkg/util/file
@ -858,14 +829,9 @@ imports:
- pkg/version
- pkg/volume
- pkg/volume/util
- pkg/watch/json
- plugin/pkg/scheduler/algorithm
- plugin/pkg/scheduler/algorithm/predicates
- plugin/pkg/scheduler/algorithm/priorities/util
- plugin/pkg/scheduler/api
- plugin/pkg/scheduler/schedulercache
- plugin/pkg/scheduler/util
- plugin/pkg/scheduler/volumebinder
- pkg/volume/util/fs
- pkg/volume/util/recyclerclient
- pkg/volume/util/types
- name: k8s.io/utils
version: aedf551cdb8b0119df3a19c65fde413a13b34997
subpackages:
@ -874,8 +840,6 @@ imports:
- exec/testing
- name: vbom.ml/util
version: db5cfe13f5cc80a4990d98e2e1b0707a4d1a5394
repo: https://github.com/fvbommel/util.git
vcs: git
subpackages:
- sortorder
testImports:

@ -43,28 +43,20 @@ import:
- package: github.com/evanphx/json-patch
- package: github.com/BurntSushi/toml
version: ~0.3.0
- package: github.com/naoina/go-stringutil
version: ~0.1.0
- package: github.com/chai2010/gettext-go
- package: github.com/prometheus/client_golang
version: 0.8.0
- package: vbom.ml/util
repo: https://github.com/fvbommel/util.git
vcs: git
- package: github.com/grpc-ecosystem/go-grpc-prometheus
- package: k8s.io/kubernetes
version: 1.9.2
version: release-1.10
- package: k8s.io/client-go
version: ~6.0.0
version: kubernetes-1.10.0
- package: k8s.io/api
version: kubernetes-1.9.2
version: release-1.10
- package: k8s.io/apimachinery
version: kubernetes-1.9.2
version: release-1.10
- package: k8s.io/apiserver
version: kubernetes-1.9.2
- package: cloud.google.com/go/compute
repo: https://github.com/GoogleCloudPlatform/google-cloud-go.git
version: release-1.10
testImports:
- package: github.com/stretchr/testify

@ -31,7 +31,7 @@ import (
// ApiVersionV1 is the API version number for version 1.
//
// This is ApiVersionV1 instead of APIVersionV1 to match the protobuf-generated name.
const ApiVersionV1 = "v1"
const ApiVersionV1 = "v1" // nolint
// UnmarshalChartfile takes raw Chart.yaml data and unmarshals it.
func UnmarshalChartfile(data []byte) (*chart.Metadata, error) {

@ -59,6 +59,9 @@ image:
tag: stable
pullPolicy: IfNotPresent
nameOverride: ""
fullnameOverride: ""
service:
type: ClusterIP
port: 80
@ -120,7 +123,6 @@ const defaultIgnore = `# Patterns to ignore when building packages.
const defaultIngress = `{{- if .Values.ingress.enabled -}}
{{- $fullName := include "<CHARTNAME>.fullname" . -}}
{{- $servicePort := .Values.service.port -}}
{{- $ingressPath := .Values.ingress.path -}}
apiVersion: extensions/v1beta1
kind: Ingress

@ -175,7 +175,7 @@ func ToYaml(v interface{}) string {
// Swallow errors inside of a template.
return ""
}
return string(data)
return strings.TrimSuffix(string(data), "\n")
}
// FromYaml converts a YAML document into a map[string]interface{}.

@ -72,10 +72,10 @@ func TestToConfig(t *testing.T) {
f := NewFiles(getTestFiles())
out := f.Glob("**/captain.txt").AsConfig()
as.Equal("captain.txt: The Captain\n", out)
as.Equal("captain.txt: The Captain", out)
out = f.Glob("ship/**").AsConfig()
as.Equal("captain.txt: The Captain\nstowaway.txt: Legatt\n", out)
as.Equal("captain.txt: The Captain\nstowaway.txt: Legatt", out)
}
func TestToSecret(t *testing.T) {
@ -84,7 +84,7 @@ func TestToSecret(t *testing.T) {
f := NewFiles(getTestFiles())
out := f.Glob("ship/**").AsSecrets()
as.Equal("captain.txt: VGhlIENhcHRhaW4=\nstowaway.txt: TGVnYXR0\n", out)
as.Equal("captain.txt: VGhlIENhcHRhaW4=\nstowaway.txt: TGVnYXR0", out)
}
func TestLines(t *testing.T) {
@ -99,7 +99,7 @@ func TestLines(t *testing.T) {
}
func TestToYaml(t *testing.T) {
expect := "foo: bar\n"
expect := "foo: bar"
v := struct {
Foo string `json:"foo"`
}{

@ -394,6 +394,21 @@ func processImportValues(c *chart.Chart) error {
b := make(map[string]interface{}, 0)
// import values from each dependency if specified in import-values
for _, r := range reqs.Dependencies {
// only process raw requirement that is found in chart's dependencies (enabled)
found := false
name := r.Name
for _, v := range c.Dependencies {
if v.Metadata.Name == r.Name {
found = true
}
if v.Metadata.Name == r.Alias {
found = true
name = r.Alias
}
}
if !found {
continue
}
if len(r.ImportValues) > 0 {
var outiv []interface{}
for _, riv := range r.ImportValues {
@ -404,7 +419,7 @@ func processImportValues(c *chart.Chart) error {
"parent": iv["parent"].(string),
}
outiv = append(outiv, nm)
s := r.Name + "." + nm["child"]
s := name + "." + nm["child"]
// get child table
vv, err := cvals.Table(s)
if err != nil {
@ -420,7 +435,7 @@ func processImportValues(c *chart.Chart) error {
"parent": ".",
}
outiv = append(outiv, nm)
s := r.Name + "." + nm["child"]
s := name + "." + nm["child"]
vm, err := cvals.Table(s)
if err != nil {
log.Printf("Warning: ImportValues missing table: %v", err)

Binary file not shown.

@ -1 +1,3 @@
# This is a placeholder.
Goodbye {{.Values.name | default "world"}}

@ -151,7 +151,7 @@ func (c *ChartDownloader) ResolveChartVersion(ref, version string) (*url.URL, ge
return u, nil, err
}
// Same as the ResolveChartVersion method, but returns the chart repositoryy.
// ResolveChartVersionAndGetRepo is the same as the ResolveChartVersion method, but returns the chart repositoryy.
func (c *ChartDownloader) ResolveChartVersionAndGetRepo(ref, version string) (*url.URL, *repo.ChartRepository, *getter.HttpGetter, error) {
u, err := url.Parse(ref)
if err != nil {
@ -167,7 +167,6 @@ func (c *ChartDownloader) ResolveChartVersionAndGetRepo(ref, version string) (*u
if err != nil {
return u, nil, nil, err
}
g.SetCredentials(c.getRepoCredentials(nil))
if u.IsAbs() && len(u.Host) > 0 && len(u.Path) > 0 {
// In this case, we have to find the parent repo that contains this chart
@ -203,6 +202,7 @@ func (c *ChartDownloader) ResolveChartVersionAndGetRepo(ref, version string) (*u
repoName := p[0]
chartName := p[1]
rc, err := pickChartRepositoryConfigByName(repoName, rf.Repositories)
if err != nil {
return u, nil, nil, err
}
@ -211,6 +211,7 @@ func (c *ChartDownloader) ResolveChartVersionAndGetRepo(ref, version string) (*u
if err != nil {
return u, nil, nil, err
}
g.SetCredentials(c.getRepoCredentials(r))
// Next, we need to load the index, and actually look up the chart.
i, err := repo.LoadIndexFile(c.HelmHome.CacheIndex(r.Config.Name))

@ -23,17 +23,18 @@ import (
"strings"
"k8s.io/helm/pkg/tlsutil"
"k8s.io/helm/pkg/urlutil"
"k8s.io/helm/pkg/version"
)
//httpGetter is the efault HTTP(/S) backend handler
type HttpGetter struct {
//HttpGetter is the efault HTTP(/S) backend handler
// TODO: change the name to HTTPGetter in Helm 3
type HttpGetter struct { //nolint
client *http.Client
username string
password string
}
//SetCredentials sets the credentials for the getter
func (g *HttpGetter) SetCredentials(username, password string) {
g.username = username
g.password = password
@ -80,19 +81,11 @@ func newHTTPGetter(URL, CertFile, KeyFile, CAFile string) (Getter, error) {
// NewHTTPGetter constructs a valid http/https client as HttpGetter
func NewHTTPGetter(URL, CertFile, KeyFile, CAFile string) (*HttpGetter, error) {
var client HttpGetter
if CertFile != "" && KeyFile != "" {
tlsConf, err := tlsutil.NewClientTLS(CertFile, KeyFile, CAFile)
if (CertFile != "" && KeyFile != "") || CAFile != "" {
tlsConf, err := tlsutil.NewTLSConfig(URL, CertFile, KeyFile, CAFile)
if err != nil {
return &client, fmt.Errorf("can't create TLS config for client: %s", err.Error())
return &client, fmt.Errorf("can't create TLS config: %s", err.Error())
}
tlsConf.BuildNameToCertificate()
sni, err := urlutil.ExtractHostname(URL)
if err != nil {
return &client, err
}
tlsConf.ServerName = sni
client.client = &http.Client{
Transport: &http.Transport{
TLSClientConfig: tlsConf,

@ -28,7 +28,7 @@ func TestHTTPGetter(t *testing.T) {
}
if hg, ok := g.(*HttpGetter); !ok {
t.Fatal("Expected newHTTPGetter to produce an httpGetter")
t.Fatal("Expected newHTTPGetter to produce an HttpGetter")
} else if hg.client != http.DefaultClient {
t.Fatal("Expected newHTTPGetter to return a default HTTP client.")
}
@ -37,12 +37,24 @@ func TestHTTPGetter(t *testing.T) {
cd := "../../testdata"
join := filepath.Join
ca, pub, priv := join(cd, "ca.pem"), join(cd, "crt.pem"), join(cd, "key.pem")
g, err = newHTTPGetter("http://example.com/", pub, priv, ca)
g, err = newHTTPGetter("https://example.com/", pub, priv, ca)
if err != nil {
t.Fatal(err)
}
if hg, ok := g.(*HttpGetter); !ok {
t.Fatal("Expected newHTTPGetter to produce an HttpGetter")
} else if hg.client == http.DefaultClient {
t.Fatal("Expected newHTTPGetter to return a non-default HTTP client")
}
if _, ok := g.(*HttpGetter); !ok {
t.Fatal("Expected newHTTPGetter to produce an httpGetter")
// Test with SSL, caFile only
g, err = newHTTPGetter("https://example.com/", "", "", ca)
if err != nil {
t.Fatal(err)
}
if hg, ok := g.(*HttpGetter); !ok {
t.Fatal("Expected newHTTPGetter to produce an HttpGetter")
} else if hg.client == http.DefaultClient {
t.Fatal("Expected newHTTPGetter to return a non-default HTTP client")
}
}

@ -44,6 +44,8 @@ type Client struct {
// NewClient creates a new client.
func NewClient(opts ...Option) *Client {
var c Client
// set some sane defaults
c.Option(ConnectTimeout(5))
return c.Option(opts...)
}
@ -344,8 +346,22 @@ func (h *Client) list(ctx context.Context, req *rls.ListReleasesRequest) (*rls.L
if err != nil {
return nil, err
}
return s.Recv()
var resp *rls.ListReleasesResponse
for {
r, err := s.Recv()
if err == io.EOF {
break
}
if err != nil {
return nil, err
}
if resp == nil {
resp = r
continue
}
resp.Releases = append(resp.Releases, r.GetReleases()[0])
}
return resp, nil
}
// Executes tiller.InstallRelease RPC.

@ -0,0 +1,34 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helm
import (
"testing"
"time"
)
func TestNewClient(t *testing.T) {
helmClient := NewClient()
if helmClient.opts.connectTimeout != 5*time.Second {
t.Errorf("expected default timeout duration to be 5 seconds, got %v", helmClient.opts.connectTimeout)
}
helmClient = NewClient(ConnectTimeout(60))
if helmClient.opts.connectTimeout != time.Minute {
t.Errorf("expected timeout duration to be 1 minute, got %v", helmClient.opts.connectTimeout)
}
}

@ -45,8 +45,9 @@ const (
// Type of policy for deleting the hook
const (
HookSucceeded = "hook-succeeded"
HookFailed = "hook-failed"
HookSucceeded = "hook-succeeded"
HookFailed = "hook-failed"
BeforeHookCreation = "before-hook-creation"
)
// FilterTestHooks filters the list of hooks are returns only testing hooks.

@ -47,7 +47,7 @@ import (
"k8s.io/client-go/tools/clientcmd"
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
"k8s.io/kubernetes/pkg/apis/core"
conditions "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/client/conditions"
"k8s.io/kubernetes/pkg/kubectl"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/kubectl/resource"
@ -56,7 +56,7 @@ import (
)
const (
// MissingGetHeader is added to Get's outout when a resource is not found.
// MissingGetHeader is added to Get's output when a resource is not found.
MissingGetHeader = "==> MISSING\nKIND\t\tNAME\n"
)
@ -77,10 +77,12 @@ func New(config clientcmd.ClientConfig) *Client {
return &Client{
Factory: cmdutil.NewFactory(config),
SchemaCacheDir: clientcmd.RecommendedSchemaFile,
Log: func(_ string, _ ...interface{}) {},
Log: nopLogger,
}
}
var nopLogger = func(_ string, _ ...interface{}) {}
// ResourceActorFunc performs an action on a single resource.
type ResourceActorFunc func(*resource.Info) error
@ -205,7 +207,10 @@ func (c *Client) Get(namespace string, reader io.Reader) (string, error) {
// an object type changes, so we can just rely on that. Problem is it doesn't seem to keep
// track of tab widths.
buf := new(bytes.Buffer)
p, _ := c.Printer(nil, printers.PrintOptions{})
p, err := cmdutil.PrinterForOptions(&printers.PrintOptions{})
if err != nil {
return "", err
}
for t, ot := range objs {
if _, err = buf.WriteString("==> " + t + "\n"); err != nil {
return "", err
@ -401,7 +406,7 @@ func createPatch(mapping *meta.RESTMapping, target, current runtime.Object) ([]b
// While different objects need different merge types, the parent function
// that calls this does not try to create a patch when the data (first
// returned object) is nil. We can skip calculating the the merge type as
// returned object) is nil. We can skip calculating the merge type as
// the returned merge type is ignored.
if apiequality.Semantic.DeepEqual(oldData, newData) {
return nil, types.StrategicMergePatchType, nil
@ -608,7 +613,8 @@ func (c *Client) AsVersionedObject(obj runtime.Object) (runtime.Object, error) {
return nil, err
}
versions := &runtime.VersionedObjects{}
err = runtime.DecodeInto(c.Decoder(true), json, versions)
decoder := unstructured.UnstructuredJSONScheme
err = runtime.DecodeInto(decoder, json, versions)
return versions.First(), err
}
@ -686,7 +692,7 @@ func (c *Client) watchPodUntilComplete(timeout time.Duration, info *resource.Inf
return err
}
//get an kubernetes resources's relation pods
//get a kubernetes resources' relation pods
// kubernetes resource used select labels to relate pods
func (c *Client) getSelectRelationPod(info *resource.Info, objPods map[string][]core.Pod) (map[string][]core.Pod, error) {
if info == nil {

@ -18,8 +18,6 @@ package kube
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
@ -31,19 +29,20 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/rest/fake"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/kubectl"
cmdtesting "k8s.io/kubernetes/pkg/kubectl/cmd/testing"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/kubectl/resource"
"k8s.io/kubernetes/pkg/printers"
watchjson "k8s.io/kubernetes/pkg/watch/json"
"k8s.io/kubernetes/pkg/kubectl/scheme"
)
var unstructuredSerializer = dynamic.ContentConfig().NegotiatedSerializer
func objBody(codec runtime.Codec, obj runtime.Object) io.ReadCloser {
return ioutil.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(codec, obj))))
}
@ -117,31 +116,21 @@ func (f *fakeReaperFactory) Reaper(mapping *meta.RESTMapping) (kubectl.Reaper, e
return f.reaper, nil
}
func newEventResponse(code int, e *watch.Event) (*http.Response, error) {
dispatchedEvent, err := encodeAndMarshalEvent(e)
if err != nil {
return nil, err
}
header := http.Header{}
header.Set("Content-Type", runtime.ContentTypeJSON)
body := ioutil.NopCloser(bytes.NewReader(dispatchedEvent))
return &http.Response{StatusCode: code, Header: header, Body: body}, nil
type testClient struct {
*Client
*cmdtesting.TestFactory
}
func encodeAndMarshalEvent(e *watch.Event) ([]byte, error) {
encodedEvent, err := watchjson.Object(testapi.Default.Codec(), e)
if err != nil {
return nil, err
func newTestClient() *testClient {
tf := cmdtesting.NewTestFactory()
c := &Client{
Factory: tf,
Log: nopLogger,
}
return &testClient{
Client: c,
TestFactory: tf,
}
return json.Marshal(encodedEvent)
}
func newTestClient(f cmdutil.Factory) *Client {
c := New(nil)
c.Factory = f
return c
}
func TestUpdate(t *testing.T) {
@ -153,10 +142,11 @@ func TestUpdate(t *testing.T) {
var actions []string
f, tf, codec, _ := cmdtesting.NewAPIFactory()
tf := cmdtesting.NewTestFactory()
defer tf.Cleanup()
tf.UnstructuredClient = &fake.RESTClient{
GroupVersion: schema.GroupVersion{Version: "v1"},
NegotiatedSerializer: dynamic.ContentConfig().NegotiatedSerializer,
NegotiatedSerializer: unstructuredSerializer,
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
p, m := req.URL.Path, req.Method
actions = append(actions, p+":"+m)
@ -190,9 +180,11 @@ func TestUpdate(t *testing.T) {
}),
}
c := newTestClient()
reaper := &fakeReaper{}
rf := &fakeReaperFactory{Factory: f, reaper: reaper}
c := newTestClient(rf)
rf := &fakeReaperFactory{Factory: tf, reaper: reaper}
c.Client.Factory = rf
codec := legacyscheme.Codecs.LegacyCodec(scheme.Versions...)
if err := c.Update(core.NamespaceDefault, objBody(codec, &listA), objBody(codec, &listB), false, false, 0, false); err != nil {
t.Fatal(err)
}
@ -251,54 +243,35 @@ func TestBuild(t *testing.T) {
},
}
c := newTestClient()
for _, tt := range tests {
f, _, _, _ := cmdtesting.NewAPIFactory()
c := newTestClient(f)
// Test for an invalid manifest
infos, err := c.Build(tt.namespace, tt.reader)
if err != nil && !tt.err {
t.Errorf("%q. Got error message when no error should have occurred: %v", tt.name, err)
} else if err != nil && strings.Contains(err.Error(), "--validate=false") {
t.Errorf("%q. error message was not scrubbed", tt.name)
}
t.Run(tt.name, func(t *testing.T) {
c.Cleanup()
// Test for an invalid manifest
infos, err := c.Build(tt.namespace, tt.reader)
if err != nil && !tt.err {
t.Errorf("Got error message when no error should have occurred: %v", err)
} else if err != nil && strings.Contains(err.Error(), "--validate=false") {
t.Error("error message was not scrubbed")
}
if len(infos) != tt.count {
t.Errorf("%q. expected %d result objects, got %d", tt.name, tt.count, len(infos))
}
if len(infos) != tt.count {
t.Errorf("expected %d result objects, got %d", tt.count, len(infos))
}
})
}
}
type testPrinter struct {
Objects []runtime.Object
Err error
printers.ResourcePrinter
}
func (t *testPrinter) PrintObj(obj runtime.Object, out io.Writer) error {
t.Objects = append(t.Objects, obj)
fmt.Fprintf(out, "%#v", obj)
return t.Err
}
func (t *testPrinter) HandledResources() []string {
return []string{}
}
func (t *testPrinter) AfterPrint(io.Writer, string) error {
return t.Err
}
func TestGet(t *testing.T) {
list := newPodList("starfish", "otter")
f, tf, _, _ := cmdtesting.NewAPIFactory()
tf.Printer = &testPrinter{}
tf.UnstructuredClient = &fake.RESTClient{
c := newTestClient()
defer c.Cleanup()
c.TestFactory.UnstructuredClient = &fake.RESTClient{
GroupVersion: schema.GroupVersion{Version: "v1"},
NegotiatedSerializer: dynamic.ContentConfig().NegotiatedSerializer,
NegotiatedSerializer: unstructuredSerializer,
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
p, m := req.URL.Path, req.Method
//actions = append(actions, p+":"+m)
t.Logf("got request %s %s", p, m)
switch {
case p == "/namespaces/default/pods/starfish" && m == "GET":
@ -311,7 +284,6 @@ func TestGet(t *testing.T) {
}
}),
}
c := newTestClient(f)
// Test Success
data := strings.NewReader("kind: Pod\napiVersion: v1\nmetadata:\n name: otter")
@ -358,101 +330,37 @@ func TestPerform(t *testing.T) {
}
for _, tt := range tests {
results := []*resource.Info{}
t.Run(tt.name, func(t *testing.T) {
results := []*resource.Info{}
fn := func(info *resource.Info) error {
results = append(results, info)
fn := func(info *resource.Info) error {
results = append(results, info)
if info.Namespace != tt.namespace {
t.Errorf("%q. expected namespace to be '%s', got %s", tt.name, tt.namespace, info.Namespace)
if info.Namespace != tt.namespace {
t.Errorf("expected namespace to be '%s', got %s", tt.namespace, info.Namespace)
}
return nil
}
return nil
}
f, _, _, _ := cmdtesting.NewAPIFactory()
c := newTestClient(f)
infos, err := c.Build(tt.namespace, tt.reader)
if err != nil && err.Error() != tt.errMessage {
t.Errorf("%q. Error while building manifests: %v", tt.name, err)
}
err = perform(infos, fn)
if (err != nil) != tt.err {
t.Errorf("%q. expected error: %v, got %v", tt.name, tt.err, err)
}
if err != nil && err.Error() != tt.errMessage {
t.Errorf("%q. expected error message: %v, got %v", tt.name, tt.errMessage, err)
}
if len(results) != tt.count {
t.Errorf("%q. expected %d result objects, got %d", tt.name, tt.count, len(results))
}
}
}
func TestWaitAndGetCompletedPodPhase(t *testing.T) {
tests := []struct {
podPhase core.PodPhase
expectedPhase core.PodPhase
err bool
errMessage string
}{
{
podPhase: core.PodPending,
expectedPhase: core.PodUnknown,
err: true,
errMessage: "watch closed before Until timeout",
}, {
podPhase: core.PodRunning,
expectedPhase: core.PodUnknown,
err: true,
errMessage: "watch closed before Until timeout",
}, {
podPhase: core.PodSucceeded,
expectedPhase: core.PodSucceeded,
}, {
podPhase: core.PodFailed,
expectedPhase: core.PodFailed,
},
}
for _, tt := range tests {
f, tf, codec, ns := cmdtesting.NewAPIFactory()
actions := make(map[string]string)
var testPodList core.PodList
testPodList.Items = append(testPodList.Items, newPodWithStatus("bestpod", core.PodStatus{Phase: tt.podPhase}, "test"))
tf.Client = &fake.RESTClient{
NegotiatedSerializer: ns,
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
p, m := req.URL.Path, req.Method
actions[p] = m
switch {
case p == "/namespaces/test/pods/bestpod" && m == "GET":
return newResponse(200, &testPodList.Items[0])
case p == "/namespaces/test/pods" && m == "GET":
event := watch.Event{Type: watch.Added, Object: &testPodList.Items[0]}
return newEventResponse(200, &event)
default:
t.Fatalf("unexpected request: %#v\n%#v", req.URL, req)
return nil, nil
}
}),
}
c := newTestClient()
defer c.Cleanup()
infos, err := c.Build(tt.namespace, tt.reader)
if err != nil && err.Error() != tt.errMessage {
t.Errorf("Error while building manifests: %v", err)
}
c := newTestClient(f)
err = perform(infos, fn)
if (err != nil) != tt.err {
t.Errorf("expected error: %v, got %v", tt.err, err)
}
if err != nil && err.Error() != tt.errMessage {
t.Errorf("expected error message: %v, got %v", tt.errMessage, err)
}
phase, err := c.WaitAndGetCompletedPodPhase("test", objBody(codec, &testPodList), 1*time.Second)
if (err != nil) != tt.err {
t.Fatalf("Expected error but there was none.")
}
if err != nil && err.Error() != tt.errMessage {
t.Fatalf("Expected error %s, got %s", tt.errMessage, err.Error())
}
if phase != tt.expectedPhase {
t.Fatalf("Expected pod phase %s, got %s", tt.expectedPhase, phase)
}
if len(results) != tt.count {
t.Errorf("expected %d result objects, got %d", tt.count, len(results))
}
})
}
}

@ -89,17 +89,20 @@ func (Hook_Event) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []i
type Hook_DeletePolicy int32
const (
Hook_SUCCEEDED Hook_DeletePolicy = 0
Hook_FAILED Hook_DeletePolicy = 1
Hook_SUCCEEDED Hook_DeletePolicy = 0
Hook_FAILED Hook_DeletePolicy = 1
Hook_BEFORE_HOOK_CREATION Hook_DeletePolicy = 2
)
var Hook_DeletePolicy_name = map[int32]string{
0: "SUCCEEDED",
1: "FAILED",
2: "BEFORE_HOOK_CREATION",
}
var Hook_DeletePolicy_value = map[string]int32{
"SUCCEEDED": 0,
"FAILED": 1,
"SUCCEEDED": 0,
"FAILED": 1,
"BEFORE_HOOK_CREATION": 2,
}
func (x Hook_DeletePolicy) String() string {
@ -196,32 +199,33 @@ func init() {
func init() { proto.RegisterFile("hapi/release/hook.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 428 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0xd1, 0x6e, 0xda, 0x30,
0x14, 0x86, 0x9b, 0x12, 0x12, 0x38, 0x50, 0xea, 0x59, 0xd3, 0x66, 0x71, 0x53, 0xc4, 0x15, 0xbb,
0x09, 0x53, 0xa7, 0x3d, 0x40, 0x4a, 0xce, 0xd6, 0xaa, 0x11, 0x20, 0x27, 0x68, 0xd2, 0x6e, 0xa2,
0x74, 0xb8, 0x10, 0x11, 0xe2, 0x88, 0x98, 0x4d, 0x7b, 0xa6, 0xbd, 0xce, 0x1e, 0x68, 0xb2, 0x09,
0x59, 0xa5, 0xed, 0xee, 0x9c, 0xef, 0x7c, 0x76, 0xce, 0x1f, 0xc3, 0xdb, 0x6d, 0x5a, 0x66, 0xd3,
0x83, 0xc8, 0x45, 0x5a, 0x89, 0xe9, 0x56, 0xca, 0x9d, 0x57, 0x1e, 0xa4, 0x92, 0xb4, 0xaf, 0x07,
0x5e, 0x3d, 0x18, 0xde, 0x6c, 0xa4, 0xdc, 0xe4, 0x62, 0x6a, 0x66, 0x4f, 0xc7, 0xe7, 0xa9, 0xca,
0xf6, 0xa2, 0x52, 0xe9, 0xbe, 0x3c, 0xe9, 0xe3, 0x5f, 0x36, 0xd8, 0xf7, 0x52, 0xee, 0x28, 0x05,
0xbb, 0x48, 0xf7, 0x82, 0x59, 0x23, 0x6b, 0xd2, 0xe5, 0xa6, 0xd6, 0x6c, 0x97, 0x15, 0x6b, 0x76,
0x79, 0x62, 0xba, 0xd6, 0xac, 0x4c, 0xd5, 0x96, 0xb5, 0x4e, 0x4c, 0xd7, 0x74, 0x08, 0x9d, 0x7d,
0x5a, 0x64, 0xcf, 0xa2, 0x52, 0xcc, 0x36, 0xbc, 0xe9, 0xe9, 0x7b, 0x70, 0xc4, 0x77, 0x51, 0xa8,
0x8a, 0xb5, 0x47, 0xad, 0xc9, 0xe0, 0x96, 0x79, 0x2f, 0x17, 0xf4, 0xf4, 0xb7, 0x3d, 0xd4, 0x02,
0xaf, 0x3d, 0xfa, 0x11, 0x3a, 0x79, 0x5a, 0xa9, 0xe4, 0x70, 0x2c, 0x98, 0x33, 0xb2, 0x26, 0xbd,
0xdb, 0xa1, 0x77, 0x8a, 0xe1, 0x9d, 0x63, 0x78, 0xf1, 0x39, 0x06, 0x77, 0xb5, 0xcb, 0x8f, 0x05,
0x7d, 0x03, 0xce, 0x0f, 0x91, 0x6d, 0xb6, 0x8a, 0xb9, 0x23, 0x6b, 0xd2, 0xe6, 0x75, 0x47, 0xef,
0xe1, 0x7a, 0x2d, 0x72, 0xa1, 0x44, 0x52, 0xca, 0x3c, 0xfb, 0x96, 0x89, 0x8a, 0x75, 0xcc, 0x26,
0x37, 0xff, 0xd9, 0x24, 0x30, 0xe6, 0x52, 0x8b, 0x3f, 0xf9, 0x60, 0xfd, 0xb7, 0xcb, 0x44, 0x35,
0xfe, 0x6d, 0x41, 0xdb, 0xac, 0x4a, 0x7b, 0xe0, 0xae, 0xe6, 0x8f, 0xf3, 0xc5, 0x97, 0x39, 0xb9,
0xa0, 0xd7, 0xd0, 0x5b, 0x72, 0x4c, 0x1e, 0xe6, 0x51, 0xec, 0x87, 0x21, 0xb1, 0x28, 0x81, 0xfe,
0x72, 0x11, 0xc5, 0x0d, 0xb9, 0xa4, 0x03, 0x00, 0xad, 0x04, 0x18, 0x62, 0x8c, 0xa4, 0x65, 0x8e,
0x68, 0xa3, 0x06, 0xf6, 0xf9, 0x8e, 0xd5, 0xf2, 0x33, 0xf7, 0x03, 0x24, 0xed, 0xe6, 0x8e, 0x33,
0x71, 0x0c, 0xe1, 0x98, 0xf0, 0x45, 0x18, 0xde, 0xf9, 0xb3, 0x47, 0xe2, 0xd2, 0x57, 0x70, 0x65,
0x9c, 0x06, 0x75, 0x28, 0x83, 0xd7, 0x1c, 0x43, 0xf4, 0x23, 0x4c, 0x62, 0x8c, 0xe2, 0x24, 0x5a,
0xcd, 0x66, 0x18, 0x45, 0xa4, 0xfb, 0xcf, 0xe4, 0x93, 0xff, 0x10, 0xae, 0x38, 0x12, 0x18, 0xbf,
0x83, 0xfe, 0xcb, 0xd8, 0xf4, 0x0a, 0xba, 0xe6, 0x18, 0x06, 0x18, 0x90, 0x0b, 0x0a, 0xe0, 0x68,
0x17, 0x03, 0x62, 0xdd, 0x75, 0xbf, 0xba, 0xf5, 0xef, 0x7a, 0x72, 0xcc, 0x5b, 0x7c, 0xf8, 0x13,
0x00, 0x00, 0xff, 0xff, 0xb9, 0x8a, 0xe1, 0xaf, 0x89, 0x02, 0x00, 0x00,
// 445 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0x51, 0x8f, 0x9a, 0x40,
0x10, 0x80, 0x8f, 0x13, 0x41, 0x47, 0xcf, 0xdb, 0x6e, 0x9a, 0x76, 0xe3, 0xcb, 0x19, 0x9f, 0x7c,
0xc2, 0xe6, 0x9a, 0xfe, 0x00, 0x84, 0xb9, 0x6a, 0x24, 0x60, 0x16, 0x4c, 0x93, 0xbe, 0x10, 0xae,
0xee, 0x29, 0x11, 0x81, 0x08, 0xb6, 0xe9, 0x0f, 0xec, 0x3f, 0xe8, 0x0f, 0x6a, 0x76, 0x45, 0x7b,
0x49, 0xfb, 0x36, 0xf3, 0xcd, 0x37, 0xc3, 0x0c, 0x0b, 0xef, 0x77, 0x49, 0x99, 0x4e, 0x8f, 0x22,
0x13, 0x49, 0x25, 0xa6, 0xbb, 0xa2, 0xd8, 0x5b, 0xe5, 0xb1, 0xa8, 0x0b, 0xda, 0x97, 0x05, 0xab,
0x29, 0x0c, 0x1f, 0xb6, 0x45, 0xb1, 0xcd, 0xc4, 0x54, 0xd5, 0x9e, 0x4f, 0x2f, 0xd3, 0x3a, 0x3d,
0x88, 0xaa, 0x4e, 0x0e, 0xe5, 0x59, 0x1f, 0xff, 0xd2, 0x41, 0x9f, 0x17, 0xc5, 0x9e, 0x52, 0xd0,
0xf3, 0xe4, 0x20, 0x98, 0x36, 0xd2, 0x26, 0x5d, 0xae, 0x62, 0xc9, 0xf6, 0x69, 0xbe, 0x61, 0xb7,
0x67, 0x26, 0x63, 0xc9, 0xca, 0xa4, 0xde, 0xb1, 0xd6, 0x99, 0xc9, 0x98, 0x0e, 0xa1, 0x73, 0x48,
0xf2, 0xf4, 0x45, 0x54, 0x35, 0xd3, 0x15, 0xbf, 0xe6, 0xf4, 0x03, 0x18, 0xe2, 0xbb, 0xc8, 0xeb,
0x8a, 0xb5, 0x47, 0xad, 0xc9, 0xe0, 0x91, 0x59, 0xaf, 0x17, 0xb4, 0xe4, 0xb7, 0x2d, 0x94, 0x02,
0x6f, 0x3c, 0xfa, 0x09, 0x3a, 0x59, 0x52, 0xd5, 0xf1, 0xf1, 0x94, 0x33, 0x63, 0xa4, 0x4d, 0x7a,
0x8f, 0x43, 0xeb, 0x7c, 0x86, 0x75, 0x39, 0xc3, 0x8a, 0x2e, 0x67, 0x70, 0x53, 0xba, 0xfc, 0x94,
0xd3, 0x77, 0x60, 0xfc, 0x10, 0xe9, 0x76, 0x57, 0x33, 0x73, 0xa4, 0x4d, 0xda, 0xbc, 0xc9, 0xe8,
0x1c, 0xee, 0x37, 0x22, 0x13, 0xb5, 0x88, 0xcb, 0x22, 0x4b, 0xbf, 0xa5, 0xa2, 0x62, 0x1d, 0xb5,
0xc9, 0xc3, 0x7f, 0x36, 0x71, 0x95, 0xb9, 0x92, 0xe2, 0x4f, 0x3e, 0xd8, 0xfc, 0xcd, 0x52, 0x51,
0x8d, 0x7f, 0x6b, 0xd0, 0x56, 0xab, 0xd2, 0x1e, 0x98, 0x6b, 0x7f, 0xe9, 0x07, 0x5f, 0x7c, 0x72,
0x43, 0xef, 0xa1, 0xb7, 0xe2, 0x18, 0x2f, 0xfc, 0x30, 0xb2, 0x3d, 0x8f, 0x68, 0x94, 0x40, 0x7f,
0x15, 0x84, 0xd1, 0x95, 0xdc, 0xd2, 0x01, 0x80, 0x54, 0x5c, 0xf4, 0x30, 0x42, 0xd2, 0x52, 0x2d,
0xd2, 0x68, 0x80, 0x7e, 0x99, 0xb1, 0x5e, 0x7d, 0xe6, 0xb6, 0x8b, 0xa4, 0x7d, 0x9d, 0x71, 0x21,
0x86, 0x22, 0x1c, 0x63, 0x1e, 0x78, 0xde, 0xcc, 0x76, 0x96, 0xc4, 0xa4, 0x6f, 0xe0, 0x4e, 0x39,
0x57, 0xd4, 0xa1, 0x0c, 0xde, 0x72, 0xf4, 0xd0, 0x0e, 0x31, 0x8e, 0x30, 0x8c, 0xe2, 0x70, 0xed,
0x38, 0x18, 0x86, 0xa4, 0xfb, 0x4f, 0xe5, 0xc9, 0x5e, 0x78, 0x6b, 0x8e, 0x04, 0xc6, 0x0e, 0xf4,
0x5f, 0x9f, 0x4d, 0xef, 0xa0, 0xab, 0xda, 0xd0, 0x45, 0x97, 0xdc, 0x50, 0x00, 0x43, 0xba, 0xe8,
0x12, 0x4d, 0x0e, 0x99, 0xe1, 0x53, 0xc0, 0x31, 0x9e, 0x07, 0xc1, 0x32, 0x76, 0x38, 0xda, 0xd1,
0x22, 0xf0, 0xc9, 0xed, 0xac, 0xfb, 0xd5, 0x6c, 0x7e, 0xe4, 0xb3, 0xa1, 0x5e, 0xe9, 0xe3, 0x9f,
0x00, 0x00, 0x00, 0xff, 0xff, 0x13, 0x64, 0x75, 0x6c, 0xa3, 0x02, 0x00, 0x00,
}

@ -196,9 +196,9 @@ func FindChartInRepoURL(repoURL, chartName, chartVersion, certFile, keyFile, caF
return FindChartInAuthRepoURL(repoURL, "", "", chartName, chartVersion, certFile, keyFile, caFile, getters)
}
// FindChartInRepoURL finds chart in chart repository pointed by repoURL
// without adding repo to repositories.
// Unlike the FindChartInRepoURL function, this function also receives credentials for the chart repository.
// FindChartInAuthRepoURL finds chart in chart repository pointed by repoURL
// without adding repo to repositories, like FindChartInRepoURL,
// but it also receives credentials for the chart repository.
func FindChartInAuthRepoURL(repoURL, username, password, chartName, chartVersion, certFile, keyFile, caFile string, getters getter.Providers) (string, error) {
// Download and write the index file to a temporary location

@ -31,7 +31,8 @@ import (
var ErrRepoOutOfDate = errors.New("repository file is out of date")
// RepoFile represents the repositories.yaml file in $HELM_HOME
type RepoFile struct {
// TODO: change type name to File in Helm 3 to resolve linter warning
type RepoFile struct { // nolint
APIVersion string `json:"apiVersion"`
Generated time.Time `json:"generated"`
Repositories []*Entry `json:"repositories"`

@ -36,7 +36,7 @@ func ToYAML(s string) (string, error) {
return "", err
}
d, err := yaml.Marshal(m)
return string(d), err
return strings.TrimSuffix(string(d), "\n"), err
}
// Parse parses a set line.
@ -50,7 +50,7 @@ func Parse(s string) (map[string]interface{}, error) {
return vals, err
}
// Parse parses a set line and forces a string value.
// ParseString parses a set line and forces a string value.
//
// A set line is of the form name1=value1,name2=value2
func ParseString(s string) (map[string]interface{}, error) {

@ -365,7 +365,7 @@ func TestToYAML(t *testing.T) {
if err != nil {
t.Fatal(err)
}
expect := "name: value\n"
expect := "name: value"
if o != expect {
t.Errorf("Expected %q, got %q", expect, o)
}

@ -46,8 +46,9 @@ var events = map[string]release.Hook_Event{
// deletePolices represents a mapping between the key in the annotation for label deleting policy and its real meaning
var deletePolices = map[string]release.Hook_DeletePolicy{
hooks.HookSucceeded: release.Hook_SUCCEEDED,
hooks.HookFailed: release.Hook_FAILED,
hooks.HookSucceeded: release.Hook_SUCCEEDED,
hooks.HookFailed: release.Hook_FAILED,
hooks.BeforeHookCreation: release.Hook_BEFORE_HOOK_CREATION,
}
// Manifest represents a manifest file, which has a name and some content.
@ -189,21 +190,14 @@ func (file *manifestFile) sort(result *result) error {
result.hooks = append(result.hooks, h)
isKnownDeletePolices := false
dps, ok := entry.Metadata.Annotations[hooks.HookDeleteAnno]
if ok {
for _, dp := range strings.Split(dps, ",") {
dp = strings.ToLower(strings.TrimSpace(dp))
p, exist := deletePolices[dp]
if exist {
isKnownDeletePolices = true
h.DeletePolicies = append(h.DeletePolicies, p)
}
operateAnnotationValues(entry, hooks.HookDeleteAnno, func(value string) {
policy, exist := deletePolices[value]
if exist {
h.DeletePolicies = append(h.DeletePolicies, policy)
} else {
log.Printf("info: skipping unknown hook delete policy: %q", value)
}
if !isKnownDeletePolices {
log.Printf("info: skipping unknown hook delete policy: %q", dps)
}
}
})
}
return nil
@ -228,3 +222,12 @@ func calculateHookWeight(entry util.SimpleHead) int32 {
return int32(hw)
}
func operateAnnotationValues(entry util.SimpleHead, annotation string, operate func(p string)) {
if dps, ok := entry.Metadata.Annotations[annotation]; ok {
for _, dp := range strings.Split(dps, ",") {
dp = strings.ToLower(strings.TrimSpace(dp))
operate(dp)
}
}
}

@ -30,6 +30,7 @@ var InstallOrder SortOrder = []string{
"Namespace",
"ResourceQuota",
"LimitRange",
"PodSecurityPolicy",
"Secret",
"ConfigMap",
"StorageClass",
@ -80,6 +81,7 @@ var UninstallOrder SortOrder = []string{
"StorageClass",
"ConfigMap",
"Secret",
"PodSecurityPolicy",
"LimitRange",
"ResourceQuota",
"Namespace",

@ -85,6 +85,10 @@ func TestKindSorter(t *testing.T) {
Name: "o",
Head: &util.SimpleHead{Kind: "Pod"},
},
{
Name: "3",
Head: &util.SimpleHead{Kind: "PodSecurityPolicy"},
},
{
Name: "q",
Head: &util.SimpleHead{Kind: "ReplicaSet"},
@ -136,8 +140,8 @@ func TestKindSorter(t *testing.T) {
order SortOrder
expected string
}{
{"install", InstallOrder, "abcde1fgh2ijklmnopqrstuvw!"},
{"uninstall", UninstallOrder, "wvmutsrqponlkji2hgf1edcba!"},
{"install", InstallOrder, "abc3de1fgh2ijklmnopqrstuvw!"},
{"uninstall", UninstallOrder, "wvmutsrqponlkji2hgf1ed3cba!"},
} {
var buf bytes.Buffer
t.Run(test.description, func(t *testing.T) {

@ -22,7 +22,6 @@ import (
"testing"
"k8s.io/helm/pkg/helm"
"k8s.io/helm/pkg/proto/hapi/chart"
"k8s.io/helm/pkg/proto/hapi/release"
"k8s.io/helm/pkg/proto/hapi/services"
"k8s.io/helm/pkg/version"
@ -32,17 +31,7 @@ func TestInstallRelease(t *testing.T) {
c := helm.NewContext()
rs := rsFixture()
// TODO: Refactor this into a mock.
req := &services.InstallReleaseRequest{
Namespace: "spaced",
Chart: &chart.Chart{
Metadata: &chart.Metadata{Name: "hello"},
Templates: []*chart.Template{
{Name: "templates/hello", Data: []byte("hello: world")},
{Name: "templates/hooks", Data: []byte(manifestWithHook)},
},
},
}
req := installRequest()
res, err := rs.InstallRelease(c, req)
if err != nil {
t.Fatalf("Failed install: %s", err)
@ -96,18 +85,9 @@ func TestInstallRelease_WithNotes(t *testing.T) {
c := helm.NewContext()
rs := rsFixture()
// TODO: Refactor this into a mock.
req := &services.InstallReleaseRequest{
Namespace: "spaced",
Chart: &chart.Chart{
Metadata: &chart.Metadata{Name: "hello"},
Templates: []*chart.Template{
{Name: "templates/hello", Data: []byte("hello: world")},
{Name: "templates/hooks", Data: []byte(manifestWithHook)},
{Name: "templates/NOTES.txt", Data: []byte(notesText)},
},
},
}
req := installRequest(
withChart(withNotes(notesText)),
)
res, err := rs.InstallRelease(c, req)
if err != nil {
t.Fatalf("Failed install: %s", err)
@ -165,18 +145,9 @@ func TestInstallRelease_WithNotesRendered(t *testing.T) {
c := helm.NewContext()
rs := rsFixture()
// TODO: Refactor this into a mock.
req := &services.InstallReleaseRequest{
Namespace: "spaced",
Chart: &chart.Chart{
Metadata: &chart.Metadata{Name: "hello"},
Templates: []*chart.Template{
{Name: "templates/hello", Data: []byte("hello: world")},
{Name: "templates/hooks", Data: []byte(manifestWithHook)},
{Name: "templates/NOTES.txt", Data: []byte(notesText + " {{.Release.Name}}")},
},
},
}
req := installRequest(
withChart(withNotes(notesText + " {{.Release.Name}}")),
)
res, err := rs.InstallRelease(c, req)
if err != nil {
t.Fatalf("Failed install: %s", err)
@ -236,17 +207,9 @@ func TestInstallRelease_TillerVersion(t *testing.T) {
c := helm.NewContext()
rs := rsFixture()
// TODO: Refactor this into a mock.
req := &services.InstallReleaseRequest{
Namespace: "spaced",
Chart: &chart.Chart{
Metadata: &chart.Metadata{Name: "hello", TillerVersion: ">=2.2.0"},
Templates: []*chart.Template{
{Name: "templates/hello", Data: []byte("hello: world")},
{Name: "templates/hooks", Data: []byte(manifestWithHook)},
},
},
}
req := installRequest(
withChart(withTiller(">=2.2.0")),
)
_, err := rs.InstallRelease(c, req)
if err != nil {
t.Fatalf("Expected valid range. Got %q", err)
@ -258,17 +221,9 @@ func TestInstallRelease_WrongTillerVersion(t *testing.T) {
c := helm.NewContext()
rs := rsFixture()
// TODO: Refactor this into a mock.
req := &services.InstallReleaseRequest{
Namespace: "spaced",
Chart: &chart.Chart{
Metadata: &chart.Metadata{Name: "hello", TillerVersion: "<2.0.0"},
Templates: []*chart.Template{
{Name: "templates/hello", Data: []byte("hello: world")},
{Name: "templates/hooks", Data: []byte(manifestWithHook)},
},
},
}
req := installRequest(
withChart(withTiller("<2.0.0")),
)
_, err := rs.InstallRelease(c, req)
if err == nil {
t.Fatalf("Expected to fail because of wrong version")
@ -284,29 +239,10 @@ func TestInstallRelease_WithChartAndDependencyNotes(t *testing.T) {
c := helm.NewContext()
rs := rsFixture()
// TODO: Refactor this into a mock.
req := &services.InstallReleaseRequest{
Namespace: "spaced",
Chart: &chart.Chart{
Metadata: &chart.Metadata{Name: "hello"},
Templates: []*chart.Template{
{Name: "templates/hello", Data: []byte("hello: world")},
{Name: "templates/hooks", Data: []byte(manifestWithHook)},
{Name: "templates/NOTES.txt", Data: []byte(notesText)},
},
Dependencies: []*chart.Chart{
{
Metadata: &chart.Metadata{Name: "hello"},
Templates: []*chart.Template{
{Name: "templates/hello", Data: []byte("hello: world")},
{Name: "templates/hooks", Data: []byte(manifestWithHook)},
{Name: "templates/NOTES.txt", Data: []byte(notesText + " child")},
},
},
},
},
}
req := installRequest(withChart(
withNotes(notesText),
withDependency(withNotes(notesText+" child")),
))
res, err := rs.InstallRelease(c, req)
if err != nil {
t.Fatalf("Failed install: %s", err)
@ -335,10 +271,9 @@ func TestInstallRelease_DryRun(t *testing.T) {
c := helm.NewContext()
rs := rsFixture()
req := &services.InstallReleaseRequest{
Chart: chartStub(),
DryRun: true,
}
req := installRequest(withDryRun(),
withChart(withSampleTemplates()),
)
res, err := rs.InstallRelease(c, req)
if err != nil {
t.Errorf("Failed install: %s", err)
@ -389,10 +324,7 @@ func TestInstallRelease_NoHooks(t *testing.T) {
rs := rsFixture()
rs.env.Releases.Create(releaseStub())
req := &services.InstallReleaseRequest{
Chart: chartStub(),
DisableHooks: true,
}
req := installRequest(withDisabledHooks())
res, err := rs.InstallRelease(c, req)
if err != nil {
t.Errorf("Failed install: %s", err)
@ -409,9 +341,7 @@ func TestInstallRelease_FailedHooks(t *testing.T) {
rs.env.Releases.Create(releaseStub())
rs.env.KubeClient = newHookFailingKubeClient()
req := &services.InstallReleaseRequest{
Chart: chartStub(),
}
req := installRequest()
res, err := rs.InstallRelease(c, req)
if err == nil {
t.Error("Expected failed install")
@ -429,11 +359,10 @@ func TestInstallRelease_ReuseName(t *testing.T) {
rel.Info.Status.Code = release.Status_DELETED
rs.env.Releases.Create(rel)
req := &services.InstallReleaseRequest{
Chart: chartStub(),
ReuseName: true,
Name: rel.Name,
}
req := installRequest(
withReuseName(),
withName(rel.Name),
)
res, err := rs.InstallRelease(c, req)
if err != nil {
t.Fatalf("Failed install: %s", err)
@ -457,18 +386,10 @@ func TestInstallRelease_KubeVersion(t *testing.T) {
c := helm.NewContext()
rs := rsFixture()
// TODO: Refactor this into a mock.
req := &services.InstallReleaseRequest{
Chart: &chart.Chart{
Metadata: &chart.Metadata{Name: "hello", KubeVersion: ">=0.0.0"},
Templates: []*chart.Template{
{Name: "templates/hello", Data: []byte("hello: world")},
{Name: "templates/hooks", Data: []byte(manifestWithHook)},
},
},
}
req := installRequest(
withChart(withKube(">=0.0.0")),
)
_, err := rs.InstallRelease(c, req)
fmt.Println(err)
if err != nil {
t.Fatalf("Expected valid range. Got %q", err)
}
@ -478,16 +399,10 @@ func TestInstallRelease_WrongKubeVersion(t *testing.T) {
c := helm.NewContext()
rs := rsFixture()
// TODO: Refactor this into a mock.
req := &services.InstallReleaseRequest{
Chart: &chart.Chart{
Metadata: &chart.Metadata{Name: "hello", KubeVersion: ">=5.0.0"},
Templates: []*chart.Template{
{Name: "templates/hello", Data: []byte("hello: world")},
{Name: "templates/hooks", Data: []byte(manifestWithHook)},
},
},
}
req := installRequest(
withChart(withKube(">=5.0.0")),
)
_, err := rs.InstallRelease(c, req)
if err == nil {
t.Fatalf("Expected to fail because of wrong version")

@ -18,11 +18,11 @@ package tiller
import (
"fmt"
"regexp"
"github.com/golang/protobuf/proto"
"k8s.io/helm/pkg/proto/hapi/release"
"k8s.io/helm/pkg/proto/hapi/services"
relutil "k8s.io/helm/pkg/releaseutil"
"regexp"
)
// ListReleases lists the releases found by the server.
@ -107,14 +107,50 @@ func (s *ReleaseServer) ListReleases(req *services.ListReleasesRequest, stream s
rels = rels[0:req.Limit]
l = int64(len(rels))
}
res := &services.ListReleasesResponse{
Next: next,
Count: l,
Total: total,
Releases: rels,
Next: next,
Count: l,
Total: total,
}
return stream.Send(res)
chunks := s.partition(rels[:min(len(rels), int(req.Limit))], maxMsgSize-proto.Size(res))
for res.Releases = range chunks {
if err := stream.Send(res); err != nil {
for range chunks { // drain
}
return err
}
}
return nil
}
// partition packs releases into slices upto the capacity cap in bytes.
func (s *ReleaseServer) partition(rels []*release.Release, cap int) <-chan []*release.Release {
chunks := make(chan []*release.Release, 1)
go func() {
var (
fill = 0 // fill is space available to fill
size int // size is size of a release
)
var chunk []*release.Release
for _, rls := range rels {
if size = proto.Size(rls); size+fill > cap {
// Over-cap, push chunk onto channel to send over gRPC stream
s.Log("partitioned at %d with %d releases (cap=%d)", fill, len(chunk), cap)
chunks <- chunk
// reset paritioning state
chunk = chunk[:0]
fill = 0
}
chunk = append(chunk, rls)
fill += size
}
if len(chunk) > 0 {
// send remaining if any
chunks <- chunk
}
close(chunks)
}()
return chunks
}
func filterByNamespace(namespace string, rels []*release.Release) ([]*release.Release, error) {

@ -135,7 +135,7 @@ func (s *ReleaseServer) performRollback(currentRelease, targetRelease *release.R
targetRelease.Info.Status.Code = release.Status_FAILED
targetRelease.Info.Description = msg
s.recordRelease(currentRelease, true)
s.recordRelease(targetRelease, false)
s.recordRelease(targetRelease, true)
return res, err
}

@ -25,6 +25,7 @@ import (
"strings"
"github.com/technosophos/moniker"
"gopkg.in/yaml.v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/discovery"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
@ -135,7 +136,22 @@ func (s *ReleaseServer) reuseValues(req *services.UpdateReleaseRequest, current
if err != nil {
return err
}
// merge new values with current
req.Values.Raw = current.Config.Raw + "\n" + req.Values.Raw
req.Chart.Values = &chart.Config{Raw: nv}
// yaml unmarshal and marshal to remove duplicate keys
y := map[string]interface{}{}
if err := yaml.Unmarshal([]byte(req.Values.Raw), &y); err != nil {
return err
}
data, err := yaml.Marshal(y)
if err != nil {
return err
}
req.Values.Raw = string(data)
return nil
}
@ -347,6 +363,9 @@ func (s *ReleaseServer) execHook(hs []*release.Hook, name, namespace, hook strin
executingHooks = sortByHookWeight(executingHooks)
for _, h := range executingHooks {
if err := s.deleteHookIfShouldBeDeletedByDeletePolicy(h, hooks.BeforeHookCreation, name, namespace, hook, kubeCli); err != nil {
return err
}
b := bytes.NewBufferString(h.Manifest)
if err := kubeCli.Create(namespace, b, timeout, false); err != nil {
@ -356,18 +375,13 @@ func (s *ReleaseServer) execHook(hs []*release.Hook, name, namespace, hook strin
// No way to rewind a bytes.Buffer()?
b.Reset()
b.WriteString(h.Manifest)
if err := kubeCli.WatchUntilReady(namespace, b, timeout, false); err != nil {
s.Log("warning: Release %s %s %s could not complete: %s", name, hook, h.Path, err)
// If a hook is failed, checkout the annotation of the hook to determine whether the hook should be deleted
// under failed condition. If so, then clear the corresponding resource object in the hook
if hookShouldBeDeleted(h, hooks.HookFailed) {
b.Reset()
b.WriteString(h.Manifest)
s.Log("deleting %s hook %s for release %s due to %q policy", hook, h.Name, name, hooks.HookFailed)
if errHookDelete := kubeCli.Delete(namespace, b); errHookDelete != nil {
s.Log("warning: Release %s %s %S could not be deleted: %s", name, hook, h.Path, errHookDelete)
return errHookDelete
}
if err := s.deleteHookIfShouldBeDeletedByDeletePolicy(h, hooks.HookFailed, name, namespace, hook, kubeCli); err != nil {
return err
}
return err
}
@ -377,13 +391,8 @@ func (s *ReleaseServer) execHook(hs []*release.Hook, name, namespace, hook strin
// If all hooks are succeeded, checkout the annotation of each hook to determine whether the hook should be deleted
// under succeeded condition. If so, then clear the corresponding resource object in each hook
for _, h := range executingHooks {
b := bytes.NewBufferString(h.Manifest)
if hookShouldBeDeleted(h, hooks.HookSucceeded) {
s.Log("deleting %s hook %s for release %s due to %q policy", hook, h.Name, name, hooks.HookSucceeded)
if errHookDelete := kubeCli.Delete(namespace, b); errHookDelete != nil {
s.Log("warning: Release %s %s %S could not be deleted: %s", name, hook, h.Path, errHookDelete)
return errHookDelete
}
if err := s.deleteHookIfShouldBeDeletedByDeletePolicy(h, hooks.HookSucceeded, name, namespace, hook, kubeCli); err != nil {
return err
}
h.LastRun = timeconv.Now()
}
@ -409,11 +418,23 @@ func validateReleaseName(releaseName string) error {
return nil
}
func (s *ReleaseServer) deleteHookIfShouldBeDeletedByDeletePolicy(h *release.Hook, policy string, name, namespace, hook string, kubeCli environment.KubeClient) error {
b := bytes.NewBufferString(h.Manifest)
if hookHasDeletePolicy(h, policy) {
s.Log("deleting %s hook %s for release %s due to %q policy", hook, h.Name, name, policy)
if errHookDelete := kubeCli.Delete(namespace, b); errHookDelete != nil {
s.Log("warning: Release %s %s %S could not be deleted: %s", name, hook, h.Path, errHookDelete)
return errHookDelete
}
}
return nil
}
// hookShouldBeDeleted determines whether the defined hook deletion policy matches the hook deletion polices
// supported by helm. If so, mark the hook as one should be deleted.
func hookShouldBeDeleted(hook *release.Hook, policy string) bool {
func hookHasDeletePolicy(h *release.Hook, policy string) bool {
if dp, ok := deletePolices[policy]; ok {
for _, v := range hook.DeletePolicies {
for _, v := range h.DeletePolicies {
if dp == v {
return true
}

@ -18,18 +18,25 @@ package tiller
import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"regexp"
"testing"
"time"
"github.com/ghodss/yaml"
"github.com/golang/protobuf/ptypes/timestamp"
"golang.org/x/net/context"
"google.golang.org/grpc/metadata"
"k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/kubectl/resource"
"k8s.io/helm/pkg/helm"
"k8s.io/helm/pkg/hooks"
"k8s.io/helm/pkg/kube"
"k8s.io/helm/pkg/proto/hapi/chart"
"k8s.io/helm/pkg/proto/hapi/release"
"k8s.io/helm/pkg/proto/hapi/services"
@ -98,23 +105,128 @@ func rsFixture() *ReleaseServer {
}
}
// chartStub creates a fully stubbed out chart.
func chartStub() *chart.Chart {
return &chart.Chart{
// TODO: This should be more complete.
Metadata: &chart.Metadata{
Name: "hello",
type chartOptions struct {
*chart.Chart
}
type chartOption func(*chartOptions)
func buildChart(opts ...chartOption) *chart.Chart {
c := &chartOptions{
Chart: &chart.Chart{
// TODO: This should be more complete.
Metadata: &chart.Metadata{
Name: "hello",
},
// This adds a basic template and hooks.
Templates: []*chart.Template{
{Name: "templates/hello", Data: []byte("hello: world")},
{Name: "templates/hooks", Data: []byte(manifestWithHook)},
},
},
// This adds basic templates, partials, and hooks.
Templates: []*chart.Template{
{Name: "templates/hello", Data: []byte("hello: world")},
}
for _, opt := range opts {
opt(c)
}
return c.Chart
}
func withKube(version string) chartOption {
return func(opts *chartOptions) {
opts.Metadata.KubeVersion = version
}
}
func withTiller(version string) chartOption {
return func(opts *chartOptions) {
opts.Metadata.TillerVersion = version
}
}
func withDependency(dependencyOpts ...chartOption) chartOption {
return func(opts *chartOptions) {
opts.Dependencies = append(opts.Dependencies, buildChart(dependencyOpts...))
}
}
func withNotes(notes string) chartOption {
return func(opts *chartOptions) {
opts.Templates = append(opts.Templates, &chart.Template{
Name: "templates/NOTES.txt",
Data: []byte(notes),
})
}
}
func withSampleTemplates() chartOption {
return func(opts *chartOptions) {
sampleTemplates := []*chart.Template{
// This adds basic templates and partials.
{Name: "templates/goodbye", Data: []byte("goodbye: world")},
{Name: "templates/empty", Data: []byte("")},
{Name: "templates/with-partials", Data: []byte(`hello: {{ template "_planet" . }}`)},
{Name: "templates/partials/_planet", Data: []byte(`{{define "_planet"}}Earth{{end}}`)},
{Name: "templates/hooks", Data: []byte(manifestWithHook)},
}
opts.Templates = append(opts.Templates, sampleTemplates...)
}
}
type installOptions struct {
*services.InstallReleaseRequest
}
type installOption func(*installOptions)
func withName(name string) installOption {
return func(opts *installOptions) {
opts.Name = name
}
}
func withDryRun() installOption {
return func(opts *installOptions) {
opts.DryRun = true
}
}
func withDisabledHooks() installOption {
return func(opts *installOptions) {
opts.DisableHooks = true
}
}
func withReuseName() installOption {
return func(opts *installOptions) {
opts.ReuseName = true
}
}
func withChart(chartOpts ...chartOption) installOption {
return func(opts *installOptions) {
opts.Chart = buildChart(chartOpts...)
}
}
func installRequest(opts ...installOption) *services.InstallReleaseRequest {
reqOpts := &installOptions{
&services.InstallReleaseRequest{
Namespace: "spaced",
Chart: buildChart(),
},
}
for _, opt := range opts {
opt(reqOpts)
}
return reqOpts.InstallReleaseRequest
}
// chartStub creates a fully stubbed out chart.
func chartStub() *chart.Chart {
return buildChart(withSampleTemplates())
}
// releaseStub creates a release stub, complete with the chartStub as its chart.
@ -345,3 +457,460 @@ func (rs mockRunReleaseTestServer) SetTrailer(m metadata.MD) {}
func (rs mockRunReleaseTestServer) SendMsg(v interface{}) error { return nil }
func (rs mockRunReleaseTestServer) RecvMsg(v interface{}) error { return nil }
func (rs mockRunReleaseTestServer) Context() context.Context { return helm.NewContext() }
type mockHooksManifest struct {
Metadata struct {
Name string
Annotations map[string]string
}
}
type mockHooksKubeClient struct {
Resources map[string]*mockHooksManifest
}
var errResourceExists = errors.New("resource already exists")
func (kc *mockHooksKubeClient) makeManifest(r io.Reader) (*mockHooksManifest, error) {
b, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
manifest := &mockHooksManifest{}
err = yaml.Unmarshal(b, manifest)
if err != nil {
return nil, err
}
return manifest, nil
}
func (kc *mockHooksKubeClient) Create(ns string, r io.Reader, timeout int64, shouldWait bool) error {
manifest, err := kc.makeManifest(r)
if err != nil {
return err
}
if _, hasKey := kc.Resources[manifest.Metadata.Name]; hasKey {
return errResourceExists
}
kc.Resources[manifest.Metadata.Name] = manifest
return nil
}
func (kc *mockHooksKubeClient) Get(ns string, r io.Reader) (string, error) {
return "", nil
}
func (kc *mockHooksKubeClient) Delete(ns string, r io.Reader) error {
manifest, err := kc.makeManifest(r)
if err != nil {
return err
}
delete(kc.Resources, manifest.Metadata.Name)
return nil
}
func (kc *mockHooksKubeClient) WatchUntilReady(ns string, r io.Reader, timeout int64, shouldWait bool) error {
paramManifest, err := kc.makeManifest(r)
if err != nil {
return err
}
manifest, hasManifest := kc.Resources[paramManifest.Metadata.Name]
if !hasManifest {
return fmt.Errorf("mockHooksKubeClient.WatchUntilReady: no such resource %s found", paramManifest.Metadata.Name)
}
if manifest.Metadata.Annotations["mockHooksKubeClient/Emulate"] == "hook-failed" {
return fmt.Errorf("mockHooksKubeClient.WatchUntilReady: hook-failed")
}
return nil
}
func (kc *mockHooksKubeClient) Update(ns string, currentReader, modifiedReader io.Reader, force bool, recreate bool, timeout int64, shouldWait bool) error {
return nil
}
func (kc *mockHooksKubeClient) Build(ns string, reader io.Reader) (kube.Result, error) {
return []*resource.Info{}, nil
}
func (kc *mockHooksKubeClient) BuildUnstructured(ns string, reader io.Reader) (kube.Result, error) {
return []*resource.Info{}, nil
}
func (kc *mockHooksKubeClient) WaitAndGetCompletedPodPhase(namespace string, reader io.Reader, timeout time.Duration) (core.PodPhase, error) {
return core.PodUnknown, nil
}
func deletePolicyStub(kubeClient *mockHooksKubeClient) *ReleaseServer {
e := environment.New()
e.Releases = storage.Init(driver.NewMemory())
e.KubeClient = kubeClient
clientset := fake.NewSimpleClientset()
return &ReleaseServer{
ReleaseModule: &LocalReleaseModule{
clientset: clientset,
},
env: e,
clientset: clientset,
Log: func(_ string, _ ...interface{}) {},
}
}
func deletePolicyHookStub(hookName string, extraAnnotations map[string]string, DeletePolicies []release.Hook_DeletePolicy) *release.Hook {
extraAnnotationsStr := ""
for k, v := range extraAnnotations {
extraAnnotationsStr += fmt.Sprintf(" \"%s\": \"%s\"\n", k, v)
}
return &release.Hook{
Name: hookName,
Kind: "Job",
Path: hookName,
Manifest: fmt.Sprintf(`kind: Job
metadata:
name: %s
annotations:
"helm.sh/hook": pre-install,pre-upgrade
%sdata:
name: value`, hookName, extraAnnotationsStr),
Events: []release.Hook_Event{
release.Hook_PRE_INSTALL,
release.Hook_PRE_UPGRADE,
},
DeletePolicies: DeletePolicies,
}
}
func execHookShouldSucceed(rs *ReleaseServer, hook *release.Hook, releaseName string, namespace string, hookType string) error {
err := rs.execHook([]*release.Hook{hook}, releaseName, namespace, hookType, 600)
if err != nil {
return fmt.Errorf("expected hook %s to be successful: %s", hook.Name, err)
}
return nil
}
func execHookShouldFail(rs *ReleaseServer, hook *release.Hook, releaseName string, namespace string, hookType string) error {
err := rs.execHook([]*release.Hook{hook}, releaseName, namespace, hookType, 600)
if err == nil {
return fmt.Errorf("expected hook %s to be failed", hook.Name)
}
return nil
}
func execHookShouldFailWithError(rs *ReleaseServer, hook *release.Hook, releaseName string, namespace string, hookType string, expectedError error) error {
err := rs.execHook([]*release.Hook{hook}, releaseName, namespace, hookType, 600)
if err != expectedError {
return fmt.Errorf("expected hook %s to fail with error %v, got %v", hook.Name, expectedError, err)
}
return nil
}
type deletePolicyContext struct {
ReleaseServer *ReleaseServer
ReleaseName string
Namespace string
HookName string
KubeClient *mockHooksKubeClient
}
func newDeletePolicyContext() *deletePolicyContext {
kubeClient := &mockHooksKubeClient{
Resources: make(map[string]*mockHooksManifest),
}
return &deletePolicyContext{
KubeClient: kubeClient,
ReleaseServer: deletePolicyStub(kubeClient),
ReleaseName: "flying-carp",
Namespace: "river",
HookName: "migration-job",
}
}
func TestSuccessfulHookWithoutDeletePolicy(t *testing.T) {
ctx := newDeletePolicyContext()
hook := deletePolicyHookStub(ctx.HookName, nil, nil)
err := execHookShouldSucceed(ctx.ReleaseServer, hook, ctx.ReleaseName, ctx.Namespace, hooks.PreInstall)
if err != nil {
t.Error(err)
}
if _, hasResource := ctx.KubeClient.Resources[hook.Name]; !hasResource {
t.Errorf("expected resource %s to be created by kube client", hook.Name)
}
}
func TestFailedHookWithoutDeletePolicy(t *testing.T) {
ctx := newDeletePolicyContext()
hook := deletePolicyHookStub(ctx.HookName,
map[string]string{"mockHooksKubeClient/Emulate": "hook-failed"},
nil,
)
err := execHookShouldFail(ctx.ReleaseServer, hook, ctx.ReleaseName, ctx.Namespace, hooks.PreInstall)
if err != nil {
t.Error(err)
}
if _, hasResource := ctx.KubeClient.Resources[hook.Name]; !hasResource {
t.Errorf("expected resource %s to be created by kube client", hook.Name)
}
}
func TestSuccessfulHookWithSucceededDeletePolicy(t *testing.T) {
ctx := newDeletePolicyContext()
hook := deletePolicyHookStub(ctx.HookName,
map[string]string{"helm.sh/hook-delete-policy": "hook-succeeded"},
[]release.Hook_DeletePolicy{release.Hook_SUCCEEDED},
)
err := execHookShouldSucceed(ctx.ReleaseServer, hook, ctx.ReleaseName, ctx.Namespace, hooks.PreInstall)
if err != nil {
t.Error(err)
}
if _, hasResource := ctx.KubeClient.Resources[hook.Name]; hasResource {
t.Errorf("expected resource %s to be unexisting after hook succeeded", hook.Name)
}
}
func TestSuccessfulHookWithFailedDeletePolicy(t *testing.T) {
ctx := newDeletePolicyContext()
hook := deletePolicyHookStub(ctx.HookName,
map[string]string{"helm.sh/hook-delete-policy": "hook-failed"},
[]release.Hook_DeletePolicy{release.Hook_FAILED},
)
err := execHookShouldSucceed(ctx.ReleaseServer, hook, ctx.ReleaseName, ctx.Namespace, hooks.PreInstall)
if err != nil {
t.Error(err)
}
if _, hasResource := ctx.KubeClient.Resources[hook.Name]; !hasResource {
t.Errorf("expected resource %s to be existing after hook succeeded", hook.Name)
}
}
func TestFailedHookWithSucceededDeletePolicy(t *testing.T) {
ctx := newDeletePolicyContext()
hook := deletePolicyHookStub(ctx.HookName,
map[string]string{
"mockHooksKubeClient/Emulate": "hook-failed",
"helm.sh/hook-delete-policy": "hook-succeeded",
},
[]release.Hook_DeletePolicy{release.Hook_SUCCEEDED},
)
err := execHookShouldFail(ctx.ReleaseServer, hook, ctx.ReleaseName, ctx.Namespace, hooks.PreInstall)
if err != nil {
t.Error(err)
}
if _, hasResource := ctx.KubeClient.Resources[hook.Name]; !hasResource {
t.Errorf("expected resource %s to be existing after hook failed", hook.Name)
}
}
func TestFailedHookWithFailedDeletePolicy(t *testing.T) {
ctx := newDeletePolicyContext()
hook := deletePolicyHookStub(ctx.HookName,
map[string]string{
"mockHooksKubeClient/Emulate": "hook-failed",
"helm.sh/hook-delete-policy": "hook-failed",
},
[]release.Hook_DeletePolicy{release.Hook_FAILED},
)
err := execHookShouldFail(ctx.ReleaseServer, hook, ctx.ReleaseName, ctx.Namespace, hooks.PreInstall)
if err != nil {
t.Error(err)
}
if _, hasResource := ctx.KubeClient.Resources[hook.Name]; hasResource {
t.Errorf("expected resource %s to be unexisting after hook failed", hook.Name)
}
}
func TestSuccessfulHookWithSuccededOrFailedDeletePolicy(t *testing.T) {
ctx := newDeletePolicyContext()
hook := deletePolicyHookStub(ctx.HookName,
map[string]string{
"helm.sh/hook-delete-policy": "hook-succeeded,hook-failed",
},
[]release.Hook_DeletePolicy{release.Hook_SUCCEEDED, release.Hook_FAILED},
)
err := execHookShouldSucceed(ctx.ReleaseServer, hook, ctx.ReleaseName, ctx.Namespace, hooks.PreInstall)
if err != nil {
t.Error(err)
}
if _, hasResource := ctx.KubeClient.Resources[hook.Name]; hasResource {
t.Errorf("expected resource %s to be unexisting after hook succeeded", hook.Name)
}
}
func TestFailedHookWithSuccededOrFailedDeletePolicy(t *testing.T) {
ctx := newDeletePolicyContext()
hook := deletePolicyHookStub(ctx.HookName,
map[string]string{
"mockHooksKubeClient/Emulate": "hook-failed",
"helm.sh/hook-delete-policy": "hook-succeeded,hook-failed",
},
[]release.Hook_DeletePolicy{release.Hook_SUCCEEDED, release.Hook_FAILED},
)
err := execHookShouldFail(ctx.ReleaseServer, hook, ctx.ReleaseName, ctx.Namespace, hooks.PreInstall)
if err != nil {
t.Error(err)
}
if _, hasResource := ctx.KubeClient.Resources[hook.Name]; hasResource {
t.Errorf("expected resource %s to be unexisting after hook failed", hook.Name)
}
}
func TestHookAlreadyExists(t *testing.T) {
ctx := newDeletePolicyContext()
hook := deletePolicyHookStub(ctx.HookName, nil, nil)
err := execHookShouldSucceed(ctx.ReleaseServer, hook, ctx.ReleaseName, ctx.Namespace, hooks.PreInstall)
if err != nil {
t.Error(err)
}
if _, hasResource := ctx.KubeClient.Resources[hook.Name]; !hasResource {
t.Errorf("expected resource %s to be existing after hook succeeded", hook.Name)
}
err = execHookShouldFailWithError(ctx.ReleaseServer, hook, ctx.ReleaseName, ctx.Namespace, hooks.PreUpgrade, errResourceExists)
if err != nil {
t.Error(err)
}
if _, hasResource := ctx.KubeClient.Resources[hook.Name]; !hasResource {
t.Errorf("expected resource %s to be existing after already exists error", hook.Name)
}
}
func TestHookDeletingWithBeforeHookCreationDeletePolicy(t *testing.T) {
ctx := newDeletePolicyContext()
hook := deletePolicyHookStub(ctx.HookName,
map[string]string{"helm.sh/hook-delete-policy": "before-hook-creation"},
[]release.Hook_DeletePolicy{release.Hook_BEFORE_HOOK_CREATION},
)
err := execHookShouldSucceed(ctx.ReleaseServer, hook, ctx.ReleaseName, ctx.Namespace, hooks.PreInstall)
if err != nil {
t.Error(err)
}
if _, hasResource := ctx.KubeClient.Resources[hook.Name]; !hasResource {
t.Errorf("expected resource %s to be existing after hook succeeded", hook.Name)
}
err = execHookShouldSucceed(ctx.ReleaseServer, hook, ctx.ReleaseName, ctx.Namespace, hooks.PreUpgrade)
if err != nil {
t.Error(err)
}
if _, hasResource := ctx.KubeClient.Resources[hook.Name]; !hasResource {
t.Errorf("expected resource %s to be existing after hook succeeded", hook.Name)
}
}
func TestSuccessfulHookWithMixedDeletePolicies(t *testing.T) {
ctx := newDeletePolicyContext()
hook := deletePolicyHookStub(ctx.HookName,
map[string]string{
"helm.sh/hook-delete-policy": "hook-succeeded,before-hook-creation",
},
[]release.Hook_DeletePolicy{release.Hook_SUCCEEDED, release.Hook_BEFORE_HOOK_CREATION},
)
err := execHookShouldSucceed(ctx.ReleaseServer, hook, ctx.ReleaseName, ctx.Namespace, hooks.PreInstall)
if err != nil {
t.Error(err)
}
if _, hasResource := ctx.KubeClient.Resources[hook.Name]; hasResource {
t.Errorf("expected resource %s to be unexisting after hook succeeded", hook.Name)
}
err = execHookShouldSucceed(ctx.ReleaseServer, hook, ctx.ReleaseName, ctx.Namespace, hooks.PreUpgrade)
if err != nil {
t.Error(err)
}
if _, hasResource := ctx.KubeClient.Resources[hook.Name]; hasResource {
t.Errorf("expected resource %s to be unexisting after hook succeeded", hook.Name)
}
}
func TestFailedHookWithMixedDeletePolicies(t *testing.T) {
ctx := newDeletePolicyContext()
hook := deletePolicyHookStub(ctx.HookName,
map[string]string{
"mockHooksKubeClient/Emulate": "hook-failed",
"helm.sh/hook-delete-policy": "hook-succeeded,before-hook-creation",
},
[]release.Hook_DeletePolicy{release.Hook_SUCCEEDED, release.Hook_BEFORE_HOOK_CREATION},
)
err := execHookShouldFail(ctx.ReleaseServer, hook, ctx.ReleaseName, ctx.Namespace, hooks.PreInstall)
if err != nil {
t.Error(err)
}
if _, hasResource := ctx.KubeClient.Resources[hook.Name]; !hasResource {
t.Errorf("expected resource %s to be existing after hook failed", hook.Name)
}
err = execHookShouldFail(ctx.ReleaseServer, hook, ctx.ReleaseName, ctx.Namespace, hooks.PreUpgrade)
if err != nil {
t.Error(err)
}
if _, hasResource := ctx.KubeClient.Resources[hook.Name]; !hasResource {
t.Errorf("expected resource %s to be existing after hook failed", hook.Name)
}
}
func TestFailedThenSuccessfulHookWithMixedDeletePolicies(t *testing.T) {
ctx := newDeletePolicyContext()
hook := deletePolicyHookStub(ctx.HookName,
map[string]string{
"mockHooksKubeClient/Emulate": "hook-failed",
"helm.sh/hook-delete-policy": "hook-succeeded,before-hook-creation",
},
[]release.Hook_DeletePolicy{release.Hook_SUCCEEDED, release.Hook_BEFORE_HOOK_CREATION},
)
err := execHookShouldFail(ctx.ReleaseServer, hook, ctx.ReleaseName, ctx.Namespace, hooks.PreInstall)
if err != nil {
t.Error(err)
}
if _, hasResource := ctx.KubeClient.Resources[hook.Name]; !hasResource {
t.Errorf("expected resource %s to be existing after hook failed", hook.Name)
}
hook = deletePolicyHookStub(ctx.HookName,
map[string]string{
"helm.sh/hook-delete-policy": "hook-succeeded,before-hook-creation",
},
[]release.Hook_DeletePolicy{release.Hook_SUCCEEDED, release.Hook_BEFORE_HOOK_CREATION},
)
err = execHookShouldSucceed(ctx.ReleaseServer, hook, ctx.ReleaseName, ctx.Namespace, hooks.PreUpgrade)
if err != nil {
t.Error(err)
}
if _, hasResource := ctx.KubeClient.Resources[hook.Name]; hasResource {
t.Errorf("expected resource %s to be unexisting after hook succeeded", hook.Name)
}
}

@ -80,7 +80,7 @@ func (s *ReleaseServer) prepareUpdate(req *services.UpdateReleaseRequest) (*rele
return nil, nil, err
}
// If new values were not supplied in the upgrade, re-use the existing values.
// determine if values will be reused
if err := s.reuseValues(req, currentRelease); err != nil {
return nil, nil, err
}

@ -17,6 +17,7 @@ limitations under the License.
package tiller
import (
"fmt"
"strings"
"testing"
@ -128,6 +129,107 @@ func TestUpdateRelease_ResetValues(t *testing.T) {
}
}
// This is a regression test for bug found in issue #3655
func TestUpdateRelease_ComplexReuseValues(t *testing.T) {
c := helm.NewContext()
rs := rsFixture()
installReq := &services.InstallReleaseRequest{
Namespace: "spaced",
Chart: &chart.Chart{
Metadata: &chart.Metadata{Name: "hello"},
Templates: []*chart.Template{
{Name: "templates/hello", Data: []byte("hello: world")},
{Name: "templates/hooks", Data: []byte(manifestWithHook)},
},
Values: &chart.Config{Raw: "defaultFoo: defaultBar"},
},
Values: &chart.Config{Raw: "foo: bar"},
}
fmt.Println("Running Install release with foo: bar override")
installResp, err := rs.InstallRelease(c, installReq)
if err != nil {
t.Fatal(err)
}
rel := installResp.Release
req := &services.UpdateReleaseRequest{
Name: rel.Name,
Chart: &chart.Chart{
Metadata: &chart.Metadata{Name: "hello"},
Templates: []*chart.Template{
{Name: "templates/hello", Data: []byte("hello: world")},
{Name: "templates/hooks", Data: []byte(manifestWithUpgradeHooks)},
},
Values: &chart.Config{Raw: "defaultFoo: defaultBar"},
},
}
fmt.Println("Running Update release with no overrides and no reuse-values flag")
res, err := rs.UpdateRelease(c, req)
if err != nil {
t.Fatalf("Failed updated: %s", err)
}
expect := "foo: bar"
if res.Release.Config != nil && res.Release.Config.Raw != expect {
t.Errorf("Expected chart values to be %q, got %q", expect, res.Release.Config.Raw)
}
rel = res.Release
req = &services.UpdateReleaseRequest{
Name: rel.Name,
Chart: &chart.Chart{
Metadata: &chart.Metadata{Name: "hello"},
Templates: []*chart.Template{
{Name: "templates/hello", Data: []byte("hello: world")},
{Name: "templates/hooks", Data: []byte(manifestWithUpgradeHooks)},
},
Values: &chart.Config{Raw: "defaultFoo: defaultBar"},
},
Values: &chart.Config{Raw: "foo2: bar2"},
ReuseValues: true,
}
fmt.Println("Running Update release with foo2: bar2 override and reuse-values")
res, err = rs.UpdateRelease(c, req)
if err != nil {
t.Fatalf("Failed updated: %s", err)
}
// This should have the newly-passed overrides.
expect = "foo: bar\nfoo2: bar2\n"
if res.Release.Config != nil && res.Release.Config.Raw != expect {
t.Errorf("Expected request config to be %q, got %q", expect, res.Release.Config.Raw)
}
rel = res.Release
req = &services.UpdateReleaseRequest{
Name: rel.Name,
Chart: &chart.Chart{
Metadata: &chart.Metadata{Name: "hello"},
Templates: []*chart.Template{
{Name: "templates/hello", Data: []byte("hello: world")},
{Name: "templates/hooks", Data: []byte(manifestWithUpgradeHooks)},
},
Values: &chart.Config{Raw: "defaultFoo: defaultBar"},
},
Values: &chart.Config{Raw: "foo: baz"},
ReuseValues: true,
}
fmt.Println("Running Update release with foo=baz override with reuse-values flag")
res, err = rs.UpdateRelease(c, req)
if err != nil {
t.Fatalf("Failed updated: %s", err)
}
expect = "foo: baz\nfoo2: bar2\n"
if res.Release.Config != nil && res.Release.Config.Raw != expect {
t.Errorf("Expected chart values to be %q, got %q", expect, res.Release.Config.Raw)
}
}
func TestUpdateRelease_ReuseValues(t *testing.T) {
c := helm.NewContext()
rs := rsFixture()
@ -157,8 +259,8 @@ func TestUpdateRelease_ReuseValues(t *testing.T) {
if res.Release.Chart.Values != nil && res.Release.Chart.Values.Raw != expect {
t.Errorf("Expected chart values to be %q, got %q", expect, res.Release.Chart.Values.Raw)
}
// This should have the newly-passed overrides.
expect = "name2: val2"
// This should have the newly-passed overrides and any other computed values. `name: value` comes from release Config via releaseStub()
expect = "name: value\nname2: val2\n"
if res.Release.Config != nil && res.Release.Config.Raw != expect {
t.Errorf("Expected request config to be %q, got %q", expect, res.Release.Config.Raw)
}

@ -21,17 +21,20 @@ import (
"crypto/x509"
"fmt"
"io/ioutil"
"k8s.io/helm/pkg/urlutil"
)
// NewClientTLS returns tls.Config appropriate for client auth.
func NewClientTLS(certFile, keyFile, caFile string) (*tls.Config, error) {
cert, err := CertFromFilePair(certFile, keyFile)
if err != nil {
return nil, err
}
config := tls.Config{
Certificates: []tls.Certificate{*cert},
func newTLSConfigCommon(certFile, keyFile, caFile string) (*tls.Config, error) {
config := tls.Config{}
if certFile != "" && keyFile != "" {
cert, err := CertFromFilePair(certFile, keyFile)
if err != nil {
return nil, err
}
config.Certificates = []tls.Certificate{*cert}
}
if caFile != "" {
cp, err := CertPoolFromFile(caFile)
if err != nil {
@ -39,9 +42,32 @@ func NewClientTLS(certFile, keyFile, caFile string) (*tls.Config, error) {
}
config.RootCAs = cp
}
return &config, nil
}
// NewClientTLS returns tls.Config appropriate for client auth.
func NewClientTLS(certFile, keyFile, caFile string) (*tls.Config, error) {
return newTLSConfigCommon(certFile, keyFile, caFile)
}
// NewTLSConfig returns tls.Config appropriate for client and/or server auth.
func NewTLSConfig(url, certFile, keyFile, caFile string) (*tls.Config, error) {
config, err := newTLSConfigCommon(certFile, keyFile, caFile)
if err != nil {
return nil, err
}
config.BuildNameToCertificate()
serverName, err := urlutil.ExtractHostname(url)
if err != nil {
return nil, err
}
config.ServerName = serverName
return config, nil
}
// CertPoolFromFile returns an x509.CertPool containing the certificates
// in the given PEM-encoded file.
// Returns an error if the file could not be read, a certificate could not

@ -80,7 +80,7 @@ checkDesiredVersion() {
if type "curl" > /dev/null; then
TAG=$(curl -SsL $release_url | awk '/\/tag\//' | grep -v no-underline | cut -d '"' -f 2 | awk '{n=split($NF,a,"/");print a[n]}' | awk 'a !~ $0{print}; {a=$0}')
elif type "wget" > /dev/null; then
TAG=$(wget -q -O - $release_url | awk '/\/tag\//' | cut -d '"' -f 2 | awk '{n=split($NF,a,"/");print a[n]}' | awk 'a !~ $0{print}; {a=$0}')
TAG=$(wget -q -O - $release_url | awk '/\/tag\//' | grep -v no-underline | cut -d '"' -f 2 | awk '{n=split($NF,a,"/");print a[n]}' | awk 'a !~ $0{print}; {a=$0}')
fi
if [ "x$TAG" == "x" ]; then
echo "Cannot determine ${DESIRED_VERSION} tag."
@ -184,7 +184,9 @@ help () {
# cleanup temporary files to avoid https://github.com/kubernetes/helm/issues/2977
cleanup() {
rm -rf "$HELM_TMP_ROOT"
if [[ -d "${HELM_TMP_ROOT:-}" ]]; then
rm -rf "$HELM_TMP_ROOT"
fi
}
# Execution

Loading…
Cancel
Save