Merge branch 'master' into feat/app-version

Signed-off-by: Kevin Labesse <kevin@labesse.me>
pull/4961/head
Labesse Kévin 7 years ago committed by GitHub
commit d43268ce92

@ -6,10 +6,10 @@ environment:
install:
- ps: iex ((New-Object System.Net.WebClient).DownloadString('https://raw.githubusercontent.com/fishworks/gofish/master/scripts/install.ps1'))
- gofish init
- gofish install dep
- dep ensure -v
- gofish install glide
- glide install --strip-vendor
cache:
- vendor -> Gopkg.lock
- vendor -> glide.lock
build: "off"
deploy: "off"
test_script:

@ -0,0 +1,12 @@
<!-- Thanks for sending a pull request! Here are some tips for you:
1. Make sure to read the Contributing Guide before submitting your PR: https://github.com/helm/helm/blob/master/CONTRIBUTING.md
2. If this PR closes another issue, add 'closes #<issue number>' somewhere in the PR summary. GitHub will automatically close that issue when this PR gets merged. Alternatively, adding 'refs #<issue number>' will not close the issue, but help provide the reviewer more context.-->
**What this PR does / why we need it**:
**Special notes for your reviewer**:
**If applicable**:
- [ ] this PR contains documentation
- [ ] this PR contains unit tests
- [ ] this PR has been tested for backwards compatibility

1
.gitignore vendored

@ -11,3 +11,4 @@ rootfs/rudder
vendor/
*.exe
.idea/
*.iml

@ -178,7 +178,7 @@ contributing to Helm. All issue types follow the same general lifecycle. Differe
1. Submit a pull request.
Coding conventions and standards are explained in the official developer docs:
https://github.com/helm/helm/blob/master/docs/developers.md
[Developers Guide](docs/developers.md)
The next section contains more information on the workflow followed for PRs

@ -32,6 +32,7 @@ Think of it like apt/yum/homebrew for Kubernetes.
## Install
Binary downloads of the Helm client can be found on [the Releases page](https://github.com/helm/helm/releases/latest).
Unpack the `helm` binary and add it to your PATH and you are good to go!

@ -6,5 +6,5 @@ Packages
- `hapi.chart` Complete serialization of Heml charts
- `hapi.release` Information about installed charts (Releases) such as metadata about when they were installed, their status, and how they were configured.
- `hapi.services.rudder` Definition for the ReleaseModuleService used by Tiller to manipulate releases on a given node
- `hapi.services.tiller` Definition of the ReleaseService provoded by Tiller and used by Helm clients to manipulate releases cluster wide.
- `hapi.services.tiller` Definition of the ReleaseService provided by Tiller and used by Helm clients to manipulate releases cluster wide.
- `hapi.version` Version meta-data used by tiller to express it's version

@ -76,7 +76,7 @@ service ReleaseService {
rpc RollbackRelease(RollbackReleaseRequest) returns (RollbackReleaseResponse) {
}
// ReleaseHistory retrieves a releasse's history.
// ReleaseHistory retrieves a release's history.
rpc GetHistory(GetHistoryRequest) returns (GetHistoryResponse) {
}
@ -212,6 +212,8 @@ message UpdateReleaseRequest {
bool force = 11;
// Description, if set, will set the description for the updated release
string description = 12;
// Render subchart notes if enabled
bool subNotes = 13;
}
// UpdateReleaseResponse is the response to an update request.
@ -281,6 +283,9 @@ message InstallReleaseRequest {
// Description, if set, will set the description for the installed release
string description = 11;
bool subNotes = 12;
}
// InstallReleaseResponse is the response from a release installation.
@ -298,7 +303,7 @@ message UninstallReleaseRequest {
bool purge = 3;
// timeout specifies the max amount of time any kubernetes client command can run.
int64 timeout = 4;
// Description, if set, will set the description for the uninnstalled release
// Description, if set, will set the description for the uninstalled release
string description = 5;
}
@ -339,6 +344,8 @@ message TestReleaseRequest {
int64 timeout = 2;
// cleanup specifies whether or not to attempt pod deletion after test completes
bool cleanup = 3;
// parallel specifies whether or not to run test pods in parallel
bool parallel = 4;
}
// TestReleaseResponse represents a message from executing a test

@ -75,6 +75,9 @@ func newCreateCmd(out io.Writer) *cobra.Command {
if len(args) == 0 {
return errors.New("the name of the new chart is required")
}
if len(args) > 1 {
return errors.New("command 'create' doesn't support multiple arguments")
}
cc.name = args[0]
return cc.run()
},

@ -143,8 +143,9 @@ func TestCreateStarterCmd(t *testing.T) {
t.Errorf("Wrong API version: %q", c.Metadata.ApiVersion)
}
if l := len(c.Templates); l != 7 {
t.Errorf("Expected 6 templates, got %d", l)
expectedTemplateCount := 7
if l := len(c.Templates); l != expectedTemplateCount {
t.Errorf("Expected %d templates, got %d", expectedTemplateCount, l)
}
found := false

@ -29,6 +29,7 @@ import (
"github.com/spf13/cobra"
"k8s.io/client-go/util/homedir"
"k8s.io/helm/pkg/helm"
"k8s.io/helm/pkg/helm/environment"
"k8s.io/helm/pkg/helm/helmpath"
@ -167,7 +168,7 @@ func TestRootCmd(t *testing.T) {
{
name: "defaults",
args: []string{"home"},
home: filepath.Join(os.Getenv("HOME"), "/.helm"),
home: filepath.Join(homedir.HomeDir(), ".helm"),
},
{
name: "with --home set",
@ -236,7 +237,7 @@ func TestTLSFlags(t *testing.T) {
homePath := os.Getenv("HELM_HOME")
if homePath == "" {
homePath = filepath.Join(os.Getenv("HOME"), ".helm")
homePath = filepath.Join(homedir.HomeDir(), ".helm")
}
home := helmpath.Home(homePath)

@ -18,15 +18,13 @@ package main
import (
"fmt"
"io"
"strings"
"github.com/ghodss/yaml"
"github.com/golang/protobuf/ptypes/any"
"github.com/spf13/cobra"
"io"
"strings"
"k8s.io/helm/pkg/chartutil"
"k8s.io/kubernetes/pkg/util/slice"
)
const inspectDesc = `
@ -256,9 +254,23 @@ func (i *inspectCmd) run() error {
func findReadme(files []*any.Any) (file *any.Any) {
for _, file := range files {
if slice.ContainsString(readmeFileNames, strings.ToLower(file.TypeUrl), nil) {
if containsString(readmeFileNames, strings.ToLower(file.TypeUrl), nil) {
return file
}
}
return nil
}
// containsString checks if a given slice of strings contains the provided string.
// If a modifier func is provided, it is called with the slice item before the comparison.
func containsString(slice []string, s string, modifier func(s string) string) bool {
for _, item := range slice {
if item == s {
return true
}
if modifier != nil && modifier(item) == s {
return true
}
}
return false
}

@ -137,6 +137,7 @@ type installCmd struct {
password string
devel bool
depUp bool
subNotes bool
description string
certFile string
@ -221,6 +222,7 @@ func newInstallCmd(c helm.Interface, out io.Writer) *cobra.Command {
f.StringVar(&inst.caFile, "ca-file", "", "verify certificates of HTTPS-enabled servers using this CA bundle")
f.BoolVar(&inst.devel, "devel", false, "use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored.")
f.BoolVar(&inst.depUp, "dep-up", false, "run helm dependency update before installing the chart")
f.BoolVar(&inst.subNotes, "render-subchart-notes", false, "render subchart notes along with the parent")
f.StringVar(&inst.description, "description", "", "specify a description for the release")
// set defaults from environment
@ -306,6 +308,7 @@ func (i *installCmd) run() error {
helm.InstallReuseName(i.replace),
helm.InstallDisableHooks(i.disableHooks),
helm.InstallDisableCRDHook(i.disableCRDHook),
helm.InstallSubNotes(i.subNotes),
helm.InstallTimeout(i.timeout),
helm.InstallWait(i.wait),
helm.InstallDescription(i.description))

@ -17,10 +17,12 @@ package main
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"runtime"
"testing"
"github.com/spf13/cobra"
@ -53,6 +55,13 @@ func TestSetVersion(t *testing.T) {
func TestPackage(t *testing.T) {
statExe := "stat"
statFileMsg := "no such file or directory"
if runtime.GOOS == "windows" {
statExe = "FindFirstFile"
statFileMsg = "The system cannot find the file specified."
}
tests := []struct {
name string
flags map[string]string
@ -106,7 +115,7 @@ func TestPackage(t *testing.T) {
name: "package --destination does-not-exist",
args: []string{"testdata/testcharts/alpine"},
flags: map[string]string{"destination": "does-not-exist"},
expect: "stat does-not-exist: no such file or directory",
expect: fmt.Sprintf("Failed to save: %s does-not-exist: %s", statExe, statFileMsg),
err: true,
},
{

@ -34,11 +34,12 @@ The tests to be run are defined in the chart that was installed.
`
type releaseTestCmd struct {
name string
out io.Writer
client helm.Interface
timeout int64
cleanup bool
name string
out io.Writer
client helm.Interface
timeout int64
cleanup bool
parallel bool
}
func newReleaseTestCmd(c helm.Interface, out io.Writer) *cobra.Command {
@ -67,6 +68,7 @@ func newReleaseTestCmd(c helm.Interface, out io.Writer) *cobra.Command {
settings.AddFlagsTLS(f)
f.Int64Var(&rlsTest.timeout, "timeout", 300, "time in seconds to wait for any individual Kubernetes operation (like Jobs for hooks)")
f.BoolVar(&rlsTest.cleanup, "cleanup", false, "delete test pods upon completion")
f.BoolVar(&rlsTest.parallel, "parallel", false, "run test pods in parallel")
// set defaults from environment
settings.InitTLS(f)
@ -79,6 +81,7 @@ func (t *releaseTestCmd) run() (err error) {
t.name,
helm.ReleaseTestTimeout(t.timeout),
helm.ReleaseTestCleanup(t.cleanup),
helm.ReleaseTestParallel(t.parallel),
)
testErr := &testErr{}

@ -154,7 +154,7 @@ func (s *searchCmd) buildIndex() (*search.Index, error) {
f := s.helmhome.CacheIndex(n)
ind, err := repo.LoadIndexFile(f)
if err != nil {
fmt.Fprintf(s.out, "WARNING: Repo %q is corrupt or missing. Try 'helm repo update'.", n)
fmt.Fprintf(s.out, "WARNING: Repo %q is corrupt or missing. Try 'helm repo update'.\n", n)
continue
}

@ -75,7 +75,7 @@ func TestTemplateCmd(t *testing.T) {
{
name: "check_execute_absolute",
desc: "verify --execute single template",
args: []string{subchart1ChartPath, "-x", subchart1AbsChartPath + "/" + "templates/service.yaml", "--set", "service.name=apache"},
args: []string{subchart1ChartPath, "-x", filepath.Join(subchart1AbsChartPath, "templates", "service.yaml"), "--set", "service.name=apache"},
expectKey: "subchart1/templates/service.yaml",
expectValue: "protocol: TCP\n name: apache",
},

@ -110,6 +110,7 @@ type upgradeCmd struct {
username string
password string
devel bool
subNotes bool
description string
certFile string
@ -175,6 +176,7 @@ func newUpgradeCmd(client helm.Interface, out io.Writer) *cobra.Command {
f.StringVar(&upgrade.keyFile, "key-file", "", "identify HTTPS client using this SSL key file")
f.StringVar(&upgrade.caFile, "ca-file", "", "verify certificates of HTTPS-enabled servers using this CA bundle")
f.BoolVar(&upgrade.devel, "devel", false, "use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored.")
f.BoolVar(&upgrade.subNotes, "render-subchart-notes", false, "render subchart notes along with parent")
f.StringVar(&upgrade.description, "description", "", "specify the description to use for the upgrade, rather than the default")
f.MarkDeprecated("disable-hooks", "use --no-hooks instead")
@ -273,6 +275,7 @@ func (u *upgradeCmd) run() error {
helm.UpgradeTimeout(u.timeout),
helm.ResetValues(u.resetValues),
helm.ReuseValues(u.reuseValues),
helm.UpgradeSubNotes(u.subNotes),
helm.UpgradeWait(u.wait),
helm.UpgradeDescription(u.description))
if err != nil {

@ -28,7 +28,7 @@ func TestVerifyCmd(t *testing.T) {
statPathMsg := "no such file or directory"
statFileMsg := statPathMsg
if runtime.GOOS == "windows" {
statExe = "GetFileAttributesEx"
statExe = "FindFirstFile"
statPathMsg = "The system cannot find the path specified."
statFileMsg = "The system cannot find the file specified."
}

@ -24,6 +24,7 @@
- [Variables](chart_template_guide/variables.md)
- [Named Templates (Partials)](chart_template_guide/named_templates.md)
- [Accessing Files Inside Templates](chart_template_guide/accessing_files.md)
- [Ignoring unwanted files and folders](chart_template_guide/helm_ignore_file.md)
- [Creating a NOTES.txt File](chart_template_guide/notes_files.md)
- [Subcharts and Global Values](chart_template_guide/subcharts_and_globals.md)
- [Debugging Templates](chart_template_guide/debugging.md)

@ -28,10 +28,10 @@ resources that use that CRD in _another_ chart.
In this method, each chart must be installed separately.
### Method 2: Pre-install Hooks
### Method 2: Crd-install Hooks
To package the two together, add a `pre-install` hook to the CRD definition so
To package the two together, add a `crd-install` hook to the CRD definition so
that it is fully installed before the rest of the chart is executed.
Note that if you create the CRD with a `pre-install` hook, that CRD definition
Note that if you create the CRD with a `crd-install` hook, that CRD definition
will not be deleted when `helm delete` is run.

@ -0,0 +1,23 @@
# The .helmignore file
The `.helmignore` file is used to specify files you don't want to include in your helm chart.
If this file exists, the `helm package` command will ignore all the files that match the pattern specified in the `.helmignore` file while packaging your application.
This can help in avoiding unncessary or sensitive files or directories from being added in your helm chart.
The `.helmignore` file supports Unix shell glob matching, relative path matching, and negation (prefixed with !). Only one pattern per line is considered.
Here is an example `.helmignore` file:
```
# comment
.git
*/temp*
*/*/temp*
temp?
```
**We'd love your help** making this document better. To add, correct, or remove
information, [file an issue](https://github.com/helm/helm/issues) or
send us a pull request.

@ -116,15 +116,22 @@ be deprecated. The chart name can later be reused by publishing a newer version
that is not marked as deprecated. The workflow for deprecating charts, as
followed by the [helm/charts](https://github.com/helm/charts)
project is:
- Update chart's `Chart.yaml` to mark the chart as deprecated, bumping the
version
- Release the new chart version in the Chart Repository
- Remove the chart from the source repository (e.g. git)
- Update chart's `Chart.yaml` to mark the chart as deprecated, bumping the version
- Release the new chart version in the Chart Repository
- Remove the chart from the source repository (e.g. git)
## Chart LICENSE, README and NOTES
Charts can also contain files that describe the installation, configuration, usage and license of a
chart. A README for a chart should be formatted in Markdown (README.md), and should generally
chart.
A LICENSE is a plain text file containing the [license](https://en.wikipedia.org/wiki/Software_license)
for the chart. The chart can contain a license as it may have programming logic in the templates and
would therefore not be configuration only. There can also be separate license(s) for the application
installed by the chart, if required.
A README for a chart should be formatted in Markdown (README.md), and should generally
contain:
- A description of the application or service the chart provides
@ -153,7 +160,6 @@ the preferred method of declaring dependencies is by using a
**Note:** The `dependencies:` section of the `Chart.yaml` from Helm
Classic has been completely removed.
### Managing Dependencies with `requirements.yaml`
A `requirements.yaml` file is a simple file for listing your
@ -233,6 +239,7 @@ dependencies:
```
In the above example we will get 3 dependencies in all for `parentchart`
```
subchart
new-subchart-1
@ -259,27 +266,28 @@ Tags - The tags field is a YAML list of labels to associate with this chart.
In the top parent's values, all charts with tags can be enabled or disabled by
specifying the tag and a boolean value.
````
```yaml
# parentchart/requirements.yaml
dependencies:
- name: subchart1
repository: http://localhost:10191
version: 0.1.0
condition: subchart1.enabled,global.subchart1.enabled
tags:
- front-end
- subchart1
- name: subchart2
repository: http://localhost:10191
version: 0.1.0
condition: subchart2.enabled,global.subchart2.enabled
tags:
- back-end
- subchart2
- name: subchart1
repository: http://localhost:10191
version: 0.1.0
condition: subchart1.enabled,global.subchart1.enabled
tags:
- front-end
- subchart1
````
````
- name: subchart2
repository: http://localhost:10191
version: 0.1.0
condition: subchart2.enabled,global.subchart2.enabled
tags:
- back-end
- subchart2
```
```yaml
# parentchart/values.yaml
subchart1:
@ -287,7 +295,7 @@ subchart1:
tags:
front-end: false
back-end: true
````
```
In the above example all charts with the tag `front-end` would be disabled but since the
`subchart1.enabled` path evaluates to 'true' in the parent's values, the condition will override the
@ -307,12 +315,11 @@ helm install --set tags.front-end=true --set subchart2.enabled=false
##### Tags and Condition Resolution
* **Conditions (when set in values) always override tags.** The first condition
path that exists wins and subsequent ones for that chart are ignored.
* Tags are evaluated as 'if any of the chart's tags are true then enable the chart'.
* Tags and conditions values must be set in the top parent's values.
* The `tags:` key in values must be a top level key. Globals and nested `tags:` tables
- **Conditions (when set in values) always override tags.**
- The first condition path that exists wins and subsequent ones for that chart are ignored.
- Tags are evaluated as 'if any of the chart's tags are true then enable the chart'.
- Tags and conditions values must be set in the top parent's values.
- The `tags:` key in values must be a top level key. Globals and nested `tags:` tables
are not currently supported.
#### Importing Child Values via requirements.yaml
@ -338,6 +345,7 @@ directly into the parent's values by specifying the keys to import as in the exa
import-values:
- data
```
```yaml
# child's values.yaml file
...
@ -381,6 +389,7 @@ dependencies:
- child: default.data
parent: myimports
```
In the above example, values found at `default.data` in the subchart1's values will be imported
to the `myimports` key in the parent chart's values as detailed below:
@ -393,6 +402,7 @@ myimports:
mystring: "helm rocks!"
```
```yaml
# subchart1's values.yaml file
@ -402,6 +412,7 @@ default:
mybool: true
```
The parent chart's resulting values would be:
```yaml
@ -504,10 +515,10 @@ through the template engine.
Values for the templates are supplied two ways:
- Chart developers may supply a file called `values.yaml` inside of a
chart. This file can contain default values.
- Chart users may supply a YAML file that contains values. This can be
provided on the command line with `helm install`.
- Chart developers may supply a file called `values.yaml` inside of a
chart. This file can contain default values.
- Chart users may supply a YAML file that contains values. This can be
provided on the command line with `helm install`.
When a user supplies custom values, these values will override the
values in the chart's `values.yaml` file.

@ -167,7 +167,7 @@ workflow for doing this is as follows:
3. Add your repository as a remote for `$GOPATH/src/k8s.io/helm`
4. Create a new working branch (`git checkout -b feat/my-feature`) and
do your work on that branch.
5. When you are ready for us to review, push your branch to GitHub, and
5. When you are ready for us to review, sign your commit, push your branch to GitHub, and
then open a new pull request with us.
For Git commit messages, we follow the [Semantic Commit Messages](http://karma-runner.github.io/0.13/dev/git-commit-msg.html):

@ -94,6 +94,7 @@ helm install [CHART] [flags]
--no-crd-hook prevent CRD hooks from running, but run other hooks
--no-hooks prevent hooks from running during install
--password string chart repository password where to locate the requested chart
--render-subchart-notes render subchart notes along with the parent
--replace re-use the given name, even if that name is already used. This is unsafe in production
--repo string chart repository url where to locate the requested chart
--set stringArray set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)

@ -20,6 +20,7 @@ helm test [RELEASE] [flags]
```
--cleanup delete test pods upon completion
-h, --help help for test
--parallel run test pods in parallel
--timeout int time in seconds to wait for any individual Kubernetes operation (like Jobs for hooks) (default 300)
--tls enable TLS for request
--tls-ca-cert string path to TLS CA certificate file (default "$HELM_HOME/ca.pem")
@ -45,4 +46,4 @@ helm test [RELEASE] [flags]
* [helm](helm.md) - The Helm package manager for Kubernetes.
###### Auto generated by spf13/cobra on 10-Aug-2018
###### Auto generated by spf13/cobra on 9-Nov-2018

@ -80,6 +80,7 @@ helm upgrade [RELEASE] [CHART] [flags]
--no-hooks disable pre/post upgrade hooks
--password string chart repository password where to locate the requested chart
--recreate-pods performs pods restart for the resource if applicable
--render-subchart-notes render subchart notes along with parent
--repo string chart repository url where to locate the requested chart
--reset-values when upgrading, reset the values to the ones built into the chart
--reuse-values when upgrading, reuse the last release's values and merge in any overrides from the command line via --set and -f. If '--reset-values' is specified, this is ignored.

@ -65,7 +65,6 @@ Tools layered on top of Helm or Tiller.
- [AppsCode Swift](https://github.com/appscode/swift) - Ajax friendly Helm Tiller Proxy using [grpc-gateway](https://github.com/grpc-ecosystem/grpc-gateway)
- [Armada](https://github.com/att-comdev/armada) - Manage prefixed releases throughout various Kubernetes namespaces, and removes completed jobs for complex deployments. Used by the [Openstack-Helm](https://github.com/openstack/openstack-helm) team.
- [Autohelm](https://github.com/reactiveops/autohelm) - Autohelm is _another_ simple declarative spec for deploying helm charts. Written in python and supports git urls as a source for helm charts.
- [ChartMuseum](https://github.com/chartmuseum/chartmuseum) - Helm Chart Repository with support for Amazon S3 and Google Cloud Storage
- [Chartify](https://github.com/appscode/chartify) - Generate Helm charts from existing Kubernetes resources.
- [Codefresh](https://codefresh.io) - Kubernetes native CI/CD and management platform with UI dashboards for managing Helm charts and releases
@ -77,10 +76,12 @@ Tools layered on top of Helm or Tiller.
- [Helmsman](https://github.com/Praqma/helmsman) - Helmsman is a helm-charts-as-code tool which enables installing/upgrading/protecting/moving/deleting releases from version controlled desired state files (described in a simple TOML format).
- [Landscaper](https://github.com/Eneco/landscaper/) - "Landscaper takes a set of Helm Chart references with values (a desired state), and realizes this in a Kubernetes cluster."
- [Monocular](https://github.com/helm/monocular) - Web UI for Helm Chart repositories
- [Orca](https://github.com/maorfr/orca) - Advanced CI\CD tool for Kubernetes and Helm made simple.
- [Orca](https://github.com/nuvo/orca) - Advanced CI\CD tool for Kubernetes and Helm made simple.
- [Quay App Registry](https://coreos.com/blog/quay-application-registry-for-kubernetes.html) - Open Kubernetes application registry, including a Helm access client
- [Reckoner](https://github.com/reactiveops/reckoner) - Reckoner (formerly Autohelm) is a tool for declarative management of helm releases. Written in python and supports git urls as a source for helm charts.
- [Rudder](https://github.com/AcalephStorage/rudder) - RESTful (JSON) proxy for Tiller's API
- [Schelm](https://github.com/databus23/schelm) - Render a Helm manifest to a directory
- [Shipper](https://github.com/bookingcom/shipper) - Multi-cluster canary or blue-green rollouts using Helm
- [VIM-Kubernetes](https://github.com/andrewstuart/vim-kubernetes) - VIM plugin for Kubernetes and Helm
## Helm Included

@ -3,8 +3,8 @@
**IMPORTANT**: If your experience deviates from this document, please document the changes to keep it up-to-date.
## Release Meetings
As part of the release process, two of the weekly developer calls will be co-opted
as "release meetings."
As part of the release process, two of the weekly developer calls will be
co-opted as "release meetings."
### Start of the Release Cycle
The first developer call after a release will be used as the release meeting to
@ -17,17 +17,19 @@ identified:
- Any other important details for the community
All of this information should be added to the GitHub milestone for the given
release. This should give the community and maintainers a clear set of guidelines
to follow when choosing whether or not to add issues and PRs to a given release.
release. This should give the community and maintainers a clear set of
guidelines to follow when choosing whether or not to add issues and PRs to a
given release.
### End (almost) of the Release Cycle
The developer call closest to two weeks before the scheduled release date will
be used to review any remaining PRs that should be pulled into the release. This
is the place to debate whether or not we should wait before cutting a release and
any other concerns. At the end of this meeting, if the release date has not been
pushed out, the first RC should be cut. Subsequent developer calls in between this
meeting and the release date should have some time set aside to see if any bugs
were found. Once the release date is reached, the final release can be cut
is the place to debate whether or not we should wait before cutting a release
and any other concerns. At the end of this meeting, if the release date has not
been pushed out, the first RC should be cut. Subsequent developer calls in
between this meeting and the release date should have some time set aside to see
if any bugs were found. Once the release date is reached, the final release can
be cut
## A Maintainer's Guide to Releasing Helm
@ -37,17 +39,28 @@ So you're in charge of a new release for Helm? Cool. Here's what to do...
Just kidding! :trollface:
All releases will be of the form vX.Y.Z where X is the major version number, Y is the minor version number and Z is the patch release number. This project strictly follows [semantic versioning](http://semver.org/) so following this step is critical.
All releases will be of the form vX.Y.Z where X is the major version number, Y
is the minor version number and Z is the patch release number. This project
strictly follows [semantic versioning](http://semver.org/) so following this
step is critical.
It is important to note that this document assumes that the git remote in your repository that corresponds to "https://github.com/helm/helm" is named "upstream". If yours is not (for example, if you've chosen to name it "origin" or something similar instead), be sure to adjust the listed snippets for your local environment accordingly. If you are not sure what your upstream remote is named, use a command like `git remote -v` to find out.
It is important to note that this document assumes that the git remote in your
repository that corresponds to "https://github.com/helm/helm" is named
"upstream". If yours is not (for example, if you've chosen to name it "origin"
or something similar instead), be sure to adjust the listed snippets for your
local environment accordingly. If you are not sure what your upstream remote is
named, use a command like `git remote -v` to find out.
If you don't have an upstream remote, you can add one easily using something like:
If you don't have an upstream remote, you can add one easily using something
like:
```shell
git remote add upstream git@github.com:helm/helm.git
```
In this doc, we are going to reference a few environment variables as well, which you may want to set for convenience. For major/minor releases, use the following:
In this doc, we are going to reference a few environment variables as well,
which you may want to set for convenience. For major/minor releases, use the
following:
```shell
export RELEASE_NAME=vX.Y.0
@ -68,7 +81,10 @@ export RELEASE_CANDIDATE_NAME="$RELEASE_NAME-rc.1"
### Major/Minor Releases
Major releases are for new feature additions and behavioral changes *that break backwards compatibility*. Minor releases are for new feature additions that do not break backwards compatibility. To create a major or minor release, start by creating a `release-vX.Y.0` branch from master.
Major releases are for new feature additions and behavioral changes *that break
backwards compatibility*. Minor releases are for new feature additions that do
not break backwards compatibility. To create a major or minor release, start by
creating a `release-vX.Y.0` branch from master.
```shell
git fetch upstream
@ -76,11 +92,13 @@ git checkout upstream/master
git checkout -b $RELEASE_BRANCH_NAME
```
This new branch is going to be the base for the release, which we are going to iterate upon later.
This new branch is going to be the base for the release, which we are going to
iterate upon later.
### Patch releases
Patch releases are a few critical cherry-picked fixes to existing releases. Start by creating a `release-vX.Y.Z` branch from the latest patch release.
Patch releases are a few critical cherry-picked fixes to existing releases.
Start by creating a `release-vX.Y.Z` branch from the latest patch release.
```shell
git fetch upstream --tags
@ -88,7 +106,8 @@ git checkout $PREVIOUS_PATCH_RELEASE
git checkout -b $RELEASE_BRANCH_NAME
```
From here, we can cherry-pick the commits we want to bring into the patch release:
From here, we can cherry-pick the commits we want to bring into the patch
release:
```shell
# get the commits ids we want to cherry-pick
@ -98,11 +117,13 @@ git cherry-pick -x <commit-id>
git cherry-pick -x <commit-id>
```
This new branch is going to be the base for the release, which we are going to iterate upon later.
This new branch is going to be the base for the release, which we are going to
iterate upon later.
## 2. Change the Version Number in Git
When doing a minor release, make sure to update pkg/version/version.go with the new release version.
When doing a minor release, make sure to update pkg/version/version.go with the
new release version.
```shell
$ git diff pkg/version/version.go
@ -128,28 +149,36 @@ git commit -m "bump version to $RELEASE_CANDIDATE_NAME"
## 3. Commit and Push the Release Branch
In order for others to start testing, we can now push the release branch upstream and start the test process.
In order for others to start testing, we can now push the release branch
upstream and start the test process.
```shell
git push upstream $RELEASE_BRANCH_NAME
```
Make sure to check [helm on CircleCI](https://circleci.com/gh/helm/helm) and make sure the release passed CI before proceeding.
Make sure to check [helm on CircleCI](https://circleci.com/gh/helm/helm) and
make sure the release passed CI before proceeding.
If anyone is available, let others peer-review the branch before continuing to ensure that all the proper changes have been made and all of the commits for the release are there.
If anyone is available, let others peer-review the branch before continuing to
ensure that all the proper changes have been made and all of the commits for the
release are there.
## 4. Create a Release Candidate
Now that the release branch is out and ready, it is time to start creating and iterating on release candidates.
Now that the release branch is out and ready, it is time to start creating and
iterating on release candidates.
```shell
git tag --sign --annotate "${RELEASE_CANDIDATE_NAME}" --message "Helm release ${RELEASE_CANDIDATE_NAME}"
git push upstream $RELEASE_CANDIDATE_NAME
```
CircleCI will automatically create a tagged release image and client binary to test with.
CircleCI will automatically create a tagged release image and client binary to
test with.
For testers, the process to start testing after CircleCI finishes building the artifacts involves the following steps to grab the client from Google Cloud Storage:
For testers, the process to start testing after CircleCI finishes building the
artifacts involves the following steps to grab the client from Google Cloud
Storage:
linux/amd64, using /bin/bash:
@ -169,21 +198,35 @@ windows/amd64, using PowerShell:
PS C:\> Invoke-WebRequest -Uri "https://kubernetes-helm.storage.googleapis.com/helm-$RELEASE_CANDIDATE_NAME-windows-amd64.zip" -OutFile "helm-$ReleaseCandidateName-windows-amd64.zip"
```
Then, unpack and move the binary to somewhere on your $PATH, or move it somewhere and add it to your $PATH (e.g. /usr/local/bin/helm for linux/macOS, C:\Program Files\helm\helm.exe for Windows).
Then, unpack and move the binary to somewhere on your $PATH, or move it
somewhere and add it to your $PATH (e.g. /usr/local/bin/helm for linux/macOS,
C:\Program Files\helm\helm.exe for Windows).
## 5. Iterate on Successive Release Candidates
Spend several days explicitly investing time and resources to try and break helm in every possible way, documenting any findings pertinent to the release. This time should be spent testing and finding ways in which the release might have caused various features or upgrade environments to have issues, not coding. During this time, the release is in code freeze, and any additional code changes will be pushed out to the next release.
Spend several days explicitly investing time and resources to try and break helm
in every possible way, documenting any findings pertinent to the release. This
time should be spent testing and finding ways in which the release might have
caused various features or upgrade environments to have issues, not coding.
During this time, the release is in code freeze, and any additional code changes
will be pushed out to the next release.
During this phase, the $RELEASE_BRANCH_NAME branch will keep evolving as you will produce new release candidates. The frequency of new candidates is up to the release manager: use your best judgement taking into account the severity of reported issues, testers' availability, and the release deadline date. Generally speaking, it is better to let a release roll over the deadline than to ship a broken release.
During this phase, the $RELEASE_BRANCH_NAME branch will keep evolving as you
will produce new release candidates. The frequency of new candidates is up to
the release manager: use your best judgement taking into account the severity of
reported issues, testers' availability, and the release deadline date. Generally
speaking, it is better to let a release roll over the deadline than to ship a
broken release.
Each time you'll want to produce a new release candidate, you will start by adding commits to the branch by cherry-picking from master:
Each time you'll want to produce a new release candidate, you will start by
adding commits to the branch by cherry-picking from master:
```shell
git cherry-pick -x <commit_id>
```
You will also want to update the release version number and the CHANGELOG as we did in steps 2 and 3 as separate commits.
You will also want to update the release version number and the CHANGELOG as we
did in steps 2 and 3 as separate commits.
After that, tag it and notify users of the new release candidate:
@ -197,7 +240,9 @@ From here on just repeat this process, continuously testing until you're happy w
## 6. Finalize the Release
When you're finally happy with the quality of a release candidate, you can move on and create the real thing. Double-check one last time to make sure everything is in order, then finally push the release tag.
When you're finally happy with the quality of a release candidate, you can move
on and create the real thing. Double-check one last time to make sure everything
is in order, then finally push the release tag.
```shell
git checkout $RELEASE_BRANCH_NAME
@ -207,9 +252,13 @@ git push upstream $RELEASE_NAME
## 7. Write the Release Notes
We will auto-generate a changelog based on the commits that occurred during a release cycle, but it is usually more beneficial to the end-user if the release notes are hand-written by a human being/marketing team/dog.
We will auto-generate a changelog based on the commits that occurred during a
release cycle, but it is usually more beneficial to the end-user if the release
notes are hand-written by a human being/marketing team/dog.
If you're releasing a major/minor release, listing notable user-facing features is usually sufficient. For patch releases, do the same, but make note of the symptoms and who is affected.
If you're releasing a major/minor release, listing notable user-facing features
is usually sufficient. For patch releases, do the same, but make note of the
symptoms and who is affected.
An example release note for a minor release would look like this:
@ -226,6 +275,13 @@ The community keeps growing, and we'd love to see you there!
- Hang out at the Public Developer Call: Thursday, 9:30 Pacific via [Zoom](https://zoom.us/j/696660622)
- Test, debug, and contribute charts: [GitHub/helm/charts](https://github.com/helm/charts)
## Features and Changes
- Major
- features
- list
- here
## Installation and Upgrading
Download Helm X.Y. The common platform binaries are here:
@ -250,23 +306,45 @@ The [Quickstart Guide](https://docs.helm.sh/using_helm/#quickstart-guide) will g
## Changelog
- chore(*): bump version to v2.7.0 08c1144f5eb3e3b636d9775617287cc26e53dba4 (Adam Reese)
### Features
- ref(*): kubernetes v1.11 support efadbd88035654b2951f3958167afed014c46bc6 (Adam Reese)
- feat(helm): add $HELM_KEY_PASSPHRASE environment variable for signing helm charts (#4778) 1e26b5300b5166fabb90002535aacd2f9cc7d787
### Bug fixes
- fix circle not building tags f4f932fabd197f7e6d608c8672b33a483b4b76fa (Matthew Fisher)
### Code cleanup
- ref(kube): Gets rid of superfluous Sprintf call 3071a16f5eb3a2b646d9795617287cc26e53dba4 (Taylor Thomas)
- chore(*): bump version to v2.7.0 08c1144f5eb3e3b636d9775617287cc26e53dba4 (Adam Reese)
### Documentation Changes
- docs(release_checklist): fix changelog generation command (#4694) 8442851a5c566a01d9b4c69b368d64daa04f6a7f (Matthew Fisher)
```
The changelog at the bottom of the release notes can be generated with this command:
The changelog at the bottom of the release notes can be generated with this
command:
```shell
PREVIOUS_RELEASE=vX.Y.Z
git log --no-merges --pretty=format:'- %s %H (%aN)' $PREVIOUS_RELEASE..$RELEASE_NAME
```
Once finished, go into GitHub and edit the release notes for the tagged release with the notes written here.
After generating the changelog, you will need to categorize the changes as shown
in the example above.
Once finished, go into GitHub and edit the release notes for the tagged release
with the notes written here.
## 8. Evangelize
Congratulations! You're done. Go grab yourself a $DRINK_OF_CHOICE. You've earned it.
Congratulations! You're done. Go grab yourself a $DRINK_OF_CHOICE. You've earned
it.
After enjoying a nice $DRINK_OF_CHOICE, go forth and announce the glad tidings of the new release in Slack and on Twitter. You should also notify any key partners in the helm community such as the homebrew formula maintainers, the owners of incubator projects (e.g. ChartMuseum) and any other interested parties.
After enjoying a nice $DRINK_OF_CHOICE, go forth and announce the glad tidings
of the new release in Slack and on Twitter. You should also notify any key
partners in the helm community such as the homebrew formula maintainers, the
owners of incubator projects (e.g. ChartMuseum) and any other interested
parties.
Optionally, write a blog post about the new release and showcase some of the new features on there!
Optionally, write a blog post about the new release and showcase some of the new
features on there!

129
glide.lock generated

@ -1,5 +1,5 @@
hash: fcbba2207c6511df365dfe355dfe601a862d340bbf15db47938a404fd0ec58d0
updated: 2018-11-11T19:26:29.631232-05:00
hash: 2af9a5c4f891a0f44109a929a494b5aeaaffa3a87cd1f3881f25f79845703d5b
updated: 2018-12-14T21:39:31.112097Z
imports:
- name: cloud.google.com/go
version: 3b1ae45394a234c385be014e9a488f2bb6eef821
@ -15,12 +15,13 @@ imports:
subpackages:
- winterm
- name: github.com/Azure/go-autorest
version: bca49d5b51a50dc5bb17bbf6204c711c6dbded06
version: ea233b6412b0421a65dc6160e16c893364664a95
subpackages:
- autorest
- autorest/adal
- autorest/azure
- autorest/date
- logger
- version
- name: github.com/beorn7/perks
version: 3ac7bf7a47d159a033b107610db8a1b6575507a4
@ -81,14 +82,6 @@ imports:
- pkg/sysinfo
- pkg/term
- pkg/term/windows
- name: github.com/docker/go-connections
version: 3ede32e2033de7505e6500d6c868c2b9ed9f169d
subpackages:
- nat
- sockets
- tlsconfig
- name: github.com/docker/go-units
version: 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1
- name: github.com/docker/spdystream
version: 449fdfce4d962303d702fec724ef0ad181c92528
subpackages:
@ -102,13 +95,13 @@ imports:
- name: github.com/ghodss/yaml
version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee
- name: github.com/go-openapi/jsonpointer
version: 46af16f9f7b149af66e5d1bd010e3574dc06de98
version: ef5f0afec364d3b9396b7b77b43dbe26bf1f8004
- name: github.com/go-openapi/jsonreference
version: 13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272
version: 8483a886a90412cd6858df4ea3483dce9c8e35a3
- name: github.com/go-openapi/spec
version: 1de3e0542de65ad8d75452a595886fdd0befb363
version: 5bae59e25b21498baea7f9d46e9c147ec106a42e
- name: github.com/go-openapi/swag
version: f3f9494671f93fcff853e3c6e9e948b3eb71e590
version: 5899d5c5e619fda5fa86e14795a835f473ca284c
- name: github.com/gobwas/glob
version: 5ccd90ef52e1e632236f7326478d4faa74f99438
subpackages:
@ -120,12 +113,10 @@ imports:
- util/runes
- util/strings
- name: github.com/gogo/protobuf
version: c0656edd0d9eab7c66d1eb0c568f9039345796f7
version: 342cbe0a04158f6dcb03ca0079991a51a4248c02
subpackages:
- proto
- sortkeys
- name: github.com/golang/glog
version: 44145f04b68cf362d9c4df2182967c2275eaefed
- name: github.com/golang/groupcache
version: 02826c3e79038b59d737d3b1c0a1d937f71a4433
subpackages:
@ -181,7 +172,7 @@ imports:
- name: github.com/inconshreveable/mousetrap
version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
- name: github.com/json-iterator/go
version: f2b4162afba35581b6d4a50d3b8f34e33c144682
version: ab8a2e0c74be9d3be70b3184d9acc634935ded82
- name: github.com/mailru/easyjson
version: 2f5df55504ebc322e4d52d34df6a1f5b503bf26d
subpackages:
@ -207,14 +198,9 @@ imports:
- name: github.com/modern-go/concurrent
version: bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94
- name: github.com/modern-go/reflect2
version: 05fbef0ca5da472bbf96c9322b84a53edc03c9fd
version: 94122c33edd36123c84d5368cfb2b69df93a0ec8
- name: github.com/opencontainers/go-digest
version: a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb
- name: github.com/opencontainers/image-spec
version: 372ad780f63454fbbbbcc7cf80e5b90245c13e13
subpackages:
- specs-go
- specs-go/v1
- name: github.com/peterbourgon/diskv
version: 5f041e8faa004a95c88a202771f4cc3e991971e6
- name: github.com/pkg/errors
@ -273,7 +259,7 @@ imports:
- scrypt
- ssh/terminal
- name: golang.org/x/net
version: 1c05540f6879653db88113bc4a2b70aec4bd491f
version: 0ed95abb35c445290478a5348a7b38bb154135fd
subpackages:
- context
- context/ctxhttp
@ -290,6 +276,10 @@ imports:
- internal
- jws
- jwt
- name: golang.org/x/sync
version: 1d60e4601c6fd243af51cc01ddf169918a5407ca
subpackages:
- semaphore
- name: golang.org/x/sys
version: 95c6576299259db960f6c5b9b69ea52422860fce
subpackages:
@ -366,7 +356,7 @@ imports:
- name: gopkg.in/yaml.v2
version: 670d4cfef0544295bc27a114dbac37980d83185a
- name: k8s.io/api
version: fd83cbc87e7632ccd8bbab63d2b673d4e0c631cc
version: 05914d821849570fba9eacfb29466f2d8d3cd229
subpackages:
- admission/v1beta1
- admissionregistration/v1alpha1
@ -374,6 +364,7 @@ imports:
- apps/v1
- apps/v1beta1
- apps/v1beta2
- auditregistration/v1alpha1
- authentication/v1
- authentication/v1beta1
- authorization/v1
@ -402,11 +393,11 @@ imports:
- storage/v1alpha1
- storage/v1beta1
- name: k8s.io/apiextensions-apiserver
version: 05e89e265cc594459a3d33a63e779d94e6614c63
version: 0fe22c71c47604641d9aa352c785b7912c200562
subpackages:
- pkg/features
- name: k8s.io/apimachinery
version: 6dd46049f39503a1fc8d65de4bd566829e95faff
version: 2b1284ed4c93a43499e781493253e2ac5959c4fd
subpackages:
- pkg/api/equality
- pkg/api/errors
@ -414,7 +405,6 @@ imports:
- pkg/api/meta/testrestmapper
- pkg/api/resource
- pkg/api/validation
- pkg/api/validation/path
- pkg/apis/meta/internalversion
- pkg/apis/meta/v1
- pkg/apis/meta/v1/unstructured
@ -463,23 +453,21 @@ imports:
- third_party/forked/golang/netutil
- third_party/forked/golang/reflect
- name: k8s.io/apiserver
version: e85ad7b666fef0476185731329f4cff1536efff8
version: 3ccfe8365421eb08e334b195786a2973460741d8
subpackages:
- pkg/apis/audit
- pkg/authentication/authenticator
- pkg/authentication/serviceaccount
- pkg/authentication/user
- pkg/endpoints/request
- pkg/features
- pkg/util/feature
- name: k8s.io/cli-runtime
version: 79bf4e0b64544d8c490247abae089bea572ddae6
version: 835b10687cb6556f6b113099ef925146a56d5981
subpackages:
- pkg/genericclioptions
- pkg/genericclioptions/printers
- pkg/genericclioptions/resource
- name: k8s.io/client-go
version: 1638f8970cefaa404ff3a62950f88b08292b2696
version: 8d9ed539ba3134352c586810e749e58df4e94e4f
subpackages:
- discovery
- discovery/fake
@ -498,6 +486,8 @@ imports:
- kubernetes/typed/apps/v1beta1/fake
- kubernetes/typed/apps/v1beta2
- kubernetes/typed/apps/v1beta2/fake
- kubernetes/typed/auditregistration/v1alpha1
- kubernetes/typed/auditregistration/v1alpha1/fake
- kubernetes/typed/authentication/v1
- kubernetes/typed/authentication/v1/fake
- kubernetes/typed/authentication/v1beta1
@ -598,30 +588,20 @@ imports:
- util/integer
- util/jsonpath
- util/retry
- name: k8s.io/klog
version: 8139d8cb77af419532b33dfa7dd09fbc5f1d344f
- name: k8s.io/kube-openapi
version: 0cf8f7e6ed1d2e3d47d02e3b6e559369af24d803
version: c59034cc13d587f5ef4e85ca0ade0c1866ae8e1d
subpackages:
- pkg/util/proto
- pkg/util/proto/testing
- pkg/util/proto/validation
- name: k8s.io/kubernetes
version: 54a352dda957bce0f88e49b65a6ee8bba8c0ba74
version: f2c8f1cadf1808ec28476682e49a3cce2b09efbf
subpackages:
- pkg/api/events
- pkg/api/legacyscheme
- pkg/api/pod
- pkg/api/ref
- pkg/api/resource
- pkg/api/service
- pkg/api/testapi
- pkg/api/v1/pod
- pkg/apis/admission
- pkg/apis/admission/install
- pkg/apis/admission/v1beta1
- pkg/apis/admissionregistration
- pkg/apis/admissionregistration/install
- pkg/apis/admissionregistration/v1alpha1
- pkg/apis/admissionregistration/v1beta1
- pkg/apis/apps
- pkg/apis/apps/install
- pkg/apis/apps/v1
@ -653,7 +633,6 @@ imports:
- pkg/apis/coordination/v1beta1
- pkg/apis/core
- pkg/apis/core/helper
- pkg/apis/core/helper/qos
- pkg/apis/core/install
- pkg/apis/core/pods
- pkg/apis/core/v1
@ -665,12 +644,7 @@ imports:
- pkg/apis/extensions
- pkg/apis/extensions/install
- pkg/apis/extensions/v1beta1
- pkg/apis/imagepolicy
- pkg/apis/imagepolicy/install
- pkg/apis/imagepolicy/v1alpha1
- pkg/apis/networking
- pkg/apis/networking/install
- pkg/apis/networking/v1
- pkg/apis/policy
- pkg/apis/policy/install
- pkg/apis/policy/v1beta1
@ -693,45 +667,36 @@ imports:
- pkg/apis/storage/v1alpha1
- pkg/apis/storage/v1beta1
- pkg/capabilities
- pkg/client/clientset_generated/internalclientset
- pkg/client/clientset_generated/internalclientset/scheme
- pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion
- pkg/client/clientset_generated/internalclientset/typed/apps/internalversion
- pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion
- pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion
- pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion
- pkg/client/clientset_generated/internalclientset/typed/batch/internalversion
- pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion
- pkg/client/clientset_generated/internalclientset/typed/coordination/internalversion
- pkg/client/clientset_generated/internalclientset/typed/core/internalversion
- pkg/client/clientset_generated/internalclientset/typed/events/internalversion
- pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion
- pkg/client/clientset_generated/internalclientset/typed/networking/internalversion
- pkg/client/clientset_generated/internalclientset/typed/policy/internalversion
- pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion
- pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion
- pkg/client/clientset_generated/internalclientset/typed/settings/internalversion
- pkg/client/clientset_generated/internalclientset/typed/storage/internalversion
- pkg/controller
- pkg/controller/deployment/util
- pkg/credentialprovider
- pkg/features
- pkg/fieldpath
- pkg/generated
- pkg/kubectl
- pkg/kubectl/apps
- pkg/kubectl/cmd/get
- pkg/kubectl/cmd/templates
- pkg/kubectl/cmd/testing
- pkg/kubectl/cmd/util
- pkg/kubectl/cmd/util/openapi
- pkg/kubectl/cmd/util/openapi/testing
- pkg/kubectl/cmd/util/openapi/validation
- pkg/kubectl/describe
- pkg/kubectl/describe/versioned
- pkg/kubectl/generated
- pkg/kubectl/scheme
- pkg/kubectl/util
- pkg/kubectl/util/hash
- pkg/kubectl/util/certificate
- pkg/kubectl/util/deployment
- pkg/kubectl/util/event
- pkg/kubectl/util/fieldpath
- pkg/kubectl/util/i18n
- pkg/kubectl/util/podutils
- pkg/kubectl/util/printers
- pkg/kubectl/util/qos
- pkg/kubectl/util/rbac
- pkg/kubectl/util/resource
- pkg/kubectl/util/slice
- pkg/kubectl/util/storage
- pkg/kubectl/util/templates
- pkg/kubectl/util/term
- pkg/kubectl/validation
- pkg/kubelet/apis
@ -739,12 +704,7 @@ imports:
- pkg/master/ports
- pkg/printers
- pkg/printers/internalversion
- pkg/registry/rbac/validation
- pkg/scheduler/algorithm
- pkg/scheduler/algorithm/priorities/util
- pkg/scheduler/api
- pkg/scheduler/cache
- pkg/scheduler/util
- pkg/security/apparmor
- pkg/serviceaccount
- pkg/util/file
@ -754,7 +714,6 @@ imports:
- pkg/util/net/sets
- pkg/util/node
- pkg/util/parsers
- pkg/util/slice
- pkg/util/taints
- pkg/version
- name: k8s.io/utils
@ -764,6 +723,8 @@ imports:
- exec
- exec/testing
- pointer
- name: sigs.k8s.io/yaml
version: fd68e9863619f6ec2fdd8625fe1f02e7c877e480
- name: vbom.ml/util
version: db5cfe13f5cc80a4990d98e2e1b0707a4d1a5394
subpackages:

@ -3,6 +3,9 @@ import:
- package: golang.org/x/net
subpackages:
- context
- package: golang.org/x/sync
subpackages:
- semaphore
- package: github.com/spf13/cobra
version: fe5e611709b0c57fa4a89136deaa8e1d4004d053
- package: github.com/spf13/pflag
@ -41,19 +44,20 @@ import:
- package: github.com/prometheus/client_golang
version: 0.8.0
- package: github.com/grpc-ecosystem/go-grpc-prometheus
- package: k8s.io/kubernetes
version: release-1.12
version: release-1.13
- package: k8s.io/client-go
version: kubernetes-1.12.0
version: kubernetes-1.13.1
- package: k8s.io/api
version: kubernetes-1.12.0
version: kubernetes-1.13.1
- package: k8s.io/apimachinery
version: kubernetes-1.12.0
version: kubernetes-1.13.1
- package: k8s.io/apiserver
version: kubernetes-1.12.0
version: kubernetes-1.13.1
- package: k8s.io/cli-runtime
version: kubernetes-1.12.0
version: kubernetes-1.13.1
- package: k8s.io/apiextensions-apiserver
version: kubernetes-1.13.1
- package: github.com/cyphar/filepath-securejoin
version: ^0.2.1

@ -89,11 +89,11 @@ resources: {}
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
@ -158,12 +158,12 @@ spec:
- host: {{ . | quote }}
http:
paths:
{{- range $ingressPaths }}
{{- range $ingressPaths }}
- path: {{ . }}
backend:
serviceName: {{ $fullName }}
servicePort: http
{{- end }}
{{- end }}
{{- end }}
{{- end }}
`
@ -255,7 +255,7 @@ const defaultNotes = `1. Get the application URL by running these commands:
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get svc -w {{ include "<CHARTNAME>.fullname" . }}'
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "<CHARTNAME>.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "<CHARTNAME>.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}

@ -25,7 +25,7 @@ A chart can be represented on the file system in one of two ways:
- As a tarred gzipped file containing a directory that then contains a
Chart.yaml file.
This package provides utilitites for working with those file formats.
This package provides utilities for working with those file formats.
The preferred way of loading a chart is using 'chartutil.Load`:

@ -17,58 +17,60 @@ limitations under the License.
package chartutil
import (
"archive/tar"
"compress/gzip"
"errors"
"io"
"io/ioutil"
"os"
"path/filepath"
securejoin "github.com/cyphar/filepath-securejoin"
)
// Expand uncompresses and extracts a chart into the specified directory.
func Expand(dir string, r io.Reader) error {
gr, err := gzip.NewReader(r)
files, err := loadArchiveFiles(r)
if err != nil {
return err
}
defer gr.Close()
tr := tar.NewReader(gr)
for {
header, err := tr.Next()
if err == io.EOF {
break
} else if err != nil {
return err
}
//split header name and create missing directories
d, _ := filepath.Split(header.Name)
fullDir := filepath.Join(dir, d)
_, err = os.Stat(fullDir)
if err != nil && d != "" {
if err := os.MkdirAll(fullDir, 0700); err != nil {
// Get the name of the chart
var chartName string
for _, file := range files {
if file.Name == "Chart.yaml" {
ch, err := UnmarshalChartfile(file.Data)
if err != nil {
return err
}
chartName = ch.GetName()
}
}
if chartName == "" {
return errors.New("chart name not specified")
}
path := filepath.Clean(filepath.Join(dir, header.Name))
info := header.FileInfo()
if info.IsDir() {
if err = os.MkdirAll(path, info.Mode()); err != nil {
return err
}
continue
}
// Find the base directory
chartdir, err := securejoin.SecureJoin(dir, chartName)
if err != nil {
return err
}
file, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, info.Mode())
// Copy all files verbatim. We don't parse these files because parsing can remove
// comments.
for _, file := range files {
outpath, err := securejoin.SecureJoin(chartdir, file.Name)
if err != nil {
return err
}
_, err = io.Copy(file, tr)
if err != nil {
file.Close()
// Make sure the necessary subdirs get created.
basedir := filepath.Dir(outpath)
if err := os.MkdirAll(basedir, 0755); err != nil {
return err
}
if err := ioutil.WriteFile(outpath, file.Data, 0644); err != nil {
return err
}
file.Close()
}
return nil
}

@ -0,0 +1,121 @@
/*
Copyright The Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package chartutil
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
)
func TestExpand(t *testing.T) {
dest, err := ioutil.TempDir("", "helm-testing-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dest)
reader, err := os.Open("testdata/frobnitz-1.2.3.tgz")
if err != nil {
t.Fatal(err)
}
if err := Expand(dest, reader); err != nil {
t.Fatal(err)
}
expectedChartPath := filepath.Join(dest, "frobnitz")
fi, err := os.Stat(expectedChartPath)
if err != nil {
t.Fatal(err)
}
if !fi.IsDir() {
t.Fatalf("expected a chart directory at %s", expectedChartPath)
}
dir, err := os.Open(expectedChartPath)
if err != nil {
t.Fatal(err)
}
fis, err := dir.Readdir(0)
if err != nil {
t.Fatal(err)
}
expectLen := 12
if len(fis) != expectLen {
t.Errorf("Expected %d files, but got %d", expectLen, len(fis))
}
for _, fi := range fis {
expect, err := os.Stat(filepath.Join("testdata", "frobnitz", fi.Name()))
if err != nil {
t.Fatal(err)
}
if fi.Size() != expect.Size() {
t.Errorf("Expected %s to have size %d, got %d", fi.Name(), expect.Size(), fi.Size())
}
}
}
func TestExpandFile(t *testing.T) {
dest, err := ioutil.TempDir("", "helm-testing-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dest)
if err := ExpandFile(dest, "testdata/frobnitz-1.2.3.tgz"); err != nil {
t.Fatal(err)
}
expectedChartPath := filepath.Join(dest, "frobnitz")
fi, err := os.Stat(expectedChartPath)
if err != nil {
t.Fatal(err)
}
if !fi.IsDir() {
t.Fatalf("expected a chart directory at %s", expectedChartPath)
}
dir, err := os.Open(expectedChartPath)
if err != nil {
t.Fatal(err)
}
fis, err := dir.Readdir(0)
if err != nil {
t.Fatal(err)
}
expectLen := 12
if len(fis) != expectLen {
t.Errorf("Expected %d files, but got %d", expectLen, len(fis))
}
for _, fi := range fis {
expect, err := os.Stat(filepath.Join("testdata", "frobnitz", fi.Name()))
if err != nil {
t.Fatal(err)
}
if fi.Size() != expect.Size() {
t.Errorf("Expected %s to have size %d, got %d", fi.Name(), expect.Size(), fi.Size())
}
}
}

@ -25,7 +25,9 @@ import (
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"regexp"
"strings"
"github.com/golang/protobuf/ptypes/any"
@ -43,6 +45,7 @@ import (
// If a .helmignore file is present, the directory loader will skip loading any files
// matching it. But .helmignore is not evaluated when reading out of an archive.
func Load(name string) (*chart.Chart, error) {
name = filepath.FromSlash(name)
fi, err := os.Stat(name)
if err != nil {
return nil, err
@ -62,11 +65,13 @@ type BufferedFile struct {
Data []byte
}
// LoadArchive loads from a reader containing a compressed tar archive.
func LoadArchive(in io.Reader) (*chart.Chart, error) {
var drivePathPattern = regexp.MustCompile(`^[a-zA-Z]:/`)
// loadArchiveFiles loads files out of an archive
func loadArchiveFiles(in io.Reader) ([]*BufferedFile, error) {
unzipped, err := gzip.NewReader(in)
if err != nil {
return &chart.Chart{}, err
return nil, err
}
defer unzipped.Close()
@ -79,7 +84,7 @@ func LoadArchive(in io.Reader) (*chart.Chart, error) {
break
}
if err != nil {
return &chart.Chart{}, err
return nil, err
}
if hd.FileInfo().IsDir() {
@ -100,12 +105,33 @@ func LoadArchive(in io.Reader) (*chart.Chart, error) {
// Normalize the path to the / delimiter
n = strings.Replace(n, delimiter, "/", -1)
if path.IsAbs(n) {
return nil, errors.New("chart illegally contains absolute paths")
}
n = path.Clean(n)
if n == "." {
// In this case, the original path was relative when it should have been absolute.
return nil, errors.New("chart illegally contains empty path")
}
if strings.HasPrefix(n, "..") {
return nil, errors.New("chart illegally references parent directory")
}
// In some particularly arcane acts of path creativity, it is possible to intermix
// UNIX and Windows style paths in such a way that you produce a result of the form
// c:/foo even after all the built-in absolute path checks. So we explicitly check
// for this condition.
if drivePathPattern.MatchString(n) {
return nil, errors.New("chart contains illegally named files")
}
if parts[0] == "Chart.yaml" {
return nil, errors.New("chart yaml not in base directory")
}
if _, err := io.Copy(b, tr); err != nil {
return &chart.Chart{}, err
return files, err
}
files = append(files, &BufferedFile{Name: n, Data: b.Bytes()})
@ -115,7 +141,15 @@ func LoadArchive(in io.Reader) (*chart.Chart, error) {
if len(files) == 0 {
return nil, errors.New("no files in chart archive")
}
return files, nil
}
// LoadArchive loads from a reader containing a compressed tar archive.
func LoadArchive(in io.Reader) (*chart.Chart, error) {
files, err := loadArchiveFiles(in)
if err != nil {
return nil, err
}
return LoadFiles(files)
}

@ -17,8 +17,14 @@ limitations under the License.
package chartutil
import (
"archive/tar"
"compress/gzip"
"io/ioutil"
"os"
"path"
"path/filepath"
"testing"
"time"
"k8s.io/helm/pkg/proto/hapi/chart"
)
@ -43,6 +49,97 @@ func TestLoadFile(t *testing.T) {
verifyRequirements(t, c)
}
func TestLoadArchive_InvalidArchive(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "helm-test-")
if err != nil {
t.Fatal(err)
}
defer os.Remove(tmpdir)
writeTar := func(filename, internalPath string, body []byte) {
dest, err := os.Create(filename)
if err != nil {
t.Fatal(err)
}
zipper := gzip.NewWriter(dest)
tw := tar.NewWriter(zipper)
h := &tar.Header{
Name: internalPath,
Mode: 0755,
Size: int64(len(body)),
ModTime: time.Now(),
}
if err := tw.WriteHeader(h); err != nil {
t.Fatal(err)
}
if _, err := tw.Write(body); err != nil {
t.Fatal(err)
}
tw.Close()
zipper.Close()
dest.Close()
}
for _, tt := range []struct {
chartname string
internal string
expectError string
}{
{"illegal-dots.tgz", "../../malformed-helm-test", "chart illegally references parent directory"},
{"illegal-dots2.tgz", "/foo/../../malformed-helm-test", "chart illegally references parent directory"},
{"illegal-dots3.tgz", "/../../malformed-helm-test", "chart illegally references parent directory"},
{"illegal-dots4.tgz", "./../../malformed-helm-test", "chart illegally references parent directory"},
{"illegal-name.tgz", "./.", "chart illegally contains empty path"},
{"illegal-name2.tgz", "/./.", "chart illegally contains empty path"},
{"illegal-name3.tgz", "missing-leading-slash", "chart illegally contains empty path"},
{"illegal-name4.tgz", "/missing-leading-slash", "chart metadata (Chart.yaml) missing"},
{"illegal-abspath.tgz", "//foo", "chart illegally contains absolute paths"},
{"illegal-abspath2.tgz", "///foo", "chart illegally contains absolute paths"},
{"illegal-abspath3.tgz", "\\\\foo", "chart illegally contains absolute paths"},
{"illegal-abspath3.tgz", "\\..\\..\\foo", "chart illegally references parent directory"},
// Under special circumstances, this can get normalized to things that look like absolute Windows paths
{"illegal-abspath4.tgz", "\\.\\c:\\\\foo", "chart contains illegally named files"},
{"illegal-abspath5.tgz", "/./c://foo", "chart contains illegally named files"},
{"illegal-abspath6.tgz", "\\\\?\\Some\\windows\\magic", "chart illegally contains absolute paths"},
} {
illegalChart := filepath.Join(tmpdir, tt.chartname)
writeTar(illegalChart, tt.internal, []byte("hello: world"))
_, err = Load(illegalChart)
if err == nil {
t.Fatal("expected error when unpacking illegal files")
}
if err.Error() != tt.expectError {
t.Errorf("Expected %q, got %q for %s", tt.expectError, err.Error(), tt.chartname)
}
}
// Make sure that absolute path gets interpreted as relative
illegalChart := filepath.Join(tmpdir, "abs-path.tgz")
writeTar(illegalChart, "/Chart.yaml", []byte("hello: world"))
_, err = Load(illegalChart)
if err.Error() != "invalid chart (Chart.yaml): name must not be empty" {
t.Error(err)
}
// And just to validate that the above was not spurious
illegalChart = filepath.Join(tmpdir, "abs-path2.tgz")
writeTar(illegalChart, "files/whatever.yaml", []byte("hello: world"))
_, err = Load(illegalChart)
if err.Error() != "chart metadata (Chart.yaml) missing" {
t.Error(err)
}
// Finally, test that drive letter gets stripped off on Windows
illegalChart = filepath.Join(tmpdir, "abs-winpath.tgz")
writeTar(illegalChart, "c:\\Chart.yaml", []byte("hello: world"))
_, err = Load(illegalChart)
if err.Error() != "invalid chart (Chart.yaml): name must not be empty" {
t.Error(err)
}
}
func TestLoadFiles(t *testing.T) {
goodFiles := []*BufferedFile{
{

@ -428,7 +428,7 @@ func processImportValues(c *chart.Chart) error {
}
// create value map from child to be merged into parent
vm := pathToMap(nm["parent"], vv.AsMap())
b = coalesceTables(cvals, vm)
b = coalesceTables(cvals, vm, c.Metadata.Name)
case string:
nm := map[string]string{
"child": "exports." + iv,
@ -441,14 +441,14 @@ func processImportValues(c *chart.Chart) error {
log.Printf("Warning: ImportValues missing table: %v", err)
continue
}
b = coalesceTables(b, vm.AsMap())
b = coalesceTables(b, vm.AsMap(), c.Metadata.Name)
}
}
// set our formatted import values
r.ImportValues = outiv
}
}
b = coalesceTables(b, cvals)
b = coalesceTables(b, cvals, c.Metadata.Name)
y, err := yaml.Marshal(b)
if err != nil {
return err

@ -15,6 +15,8 @@ limitations under the License.
package chartutil
import (
"os"
"path/filepath"
"sort"
"testing"
@ -426,7 +428,12 @@ func TestDependentChartWithSubChartsHelmignore(t *testing.T) {
}
func TestDependentChartsWithSubChartsSymlink(t *testing.T) {
c, err := Load("testdata/joonix")
joonix := "testdata/joonix"
if err := os.Symlink(filepath.Join("..", "..", "frobnitz"), filepath.Join(joonix, "charts", "frobnitz")); err != nil {
t.Fatal(err)
}
defer os.RemoveAll(filepath.Join(joonix, "charts", "frobnitz"))
c, err := Load(joonix)
if err != nil {
t.Fatalf("Failed to load testdata: %s", err)
}

@ -203,7 +203,7 @@ func coalesceDeps(chrt *chart.Chart, dest map[string]interface{}) (map[string]in
dvmap := dv.(map[string]interface{})
// Get globals out of dest and merge them into dvmap.
coalesceGlobals(dvmap, dest)
coalesceGlobals(dvmap, dest, chrt.Metadata.Name)
var err error
// Now coalesce the rest of the values.
@ -219,20 +219,20 @@ func coalesceDeps(chrt *chart.Chart, dest map[string]interface{}) (map[string]in
// coalesceGlobals copies the globals out of src and merges them into dest.
//
// For convenience, returns dest.
func coalesceGlobals(dest, src map[string]interface{}) map[string]interface{} {
func coalesceGlobals(dest, src map[string]interface{}, chartName string) map[string]interface{} {
var dg, sg map[string]interface{}
if destglob, ok := dest[GlobalKey]; !ok {
dg = map[string]interface{}{}
} else if dg, ok = destglob.(map[string]interface{}); !ok {
log.Printf("warning: skipping globals because destination %s is not a table.", GlobalKey)
log.Printf("Warning: Skipping globals for chart '%s' because destination '%s' is not a table.", chartName, GlobalKey)
return dg
}
if srcglob, ok := src[GlobalKey]; !ok {
sg = map[string]interface{}{}
} else if sg, ok = srcglob.(map[string]interface{}); !ok {
log.Printf("warning: skipping globals because source %s is not a table.", GlobalKey)
log.Printf("Warning: skipping globals for chart '%s' because source '%s' is not a table.", chartName, GlobalKey)
return dg
}
@ -247,11 +247,11 @@ func coalesceGlobals(dest, src map[string]interface{}) map[string]interface{} {
if destvmap, ok := destv.(map[string]interface{}); ok {
// Basically, we reverse order of coalesce here to merge
// top-down.
coalesceTables(vv, destvmap)
coalesceTables(vv, destvmap, chartName)
dg[key] = vv
continue
} else {
log.Printf("Conflict: cannot merge map onto non-map for %q. Skipping.", key)
log.Printf("Warning: For chart '%s', cannot merge map onto non-map for key '%q'. Skipping.", chartName, key)
}
} else {
// Here there is no merge. We're just adding.
@ -259,7 +259,7 @@ func coalesceGlobals(dest, src map[string]interface{}) map[string]interface{} {
}
} else if dv, ok := dg[key]; ok && istable(dv) {
// It's not clear if this condition can actually ever trigger.
log.Printf("key %s is table. Skipping", key)
log.Printf("Warning: For chart '%s', key '%s' is a table. Skipping.", chartName, key)
continue
}
// TODO: Do we need to do any additional checking on the value?
@ -291,7 +291,7 @@ func coalesceValues(c *chart.Chart, v map[string]interface{}) (map[string]interf
// On error, we return just the overridden values.
// FIXME: We should log this error. It indicates that the YAML data
// did not parse.
return v, fmt.Errorf("error reading default values (%s): %s", c.Values.Raw, err)
return v, fmt.Errorf("Error: Reading chart '%s' default values (%s): %s", c.Metadata.Name, c.Values.Raw, err)
}
for key, val := range nv {
@ -305,12 +305,12 @@ func coalesceValues(c *chart.Chart, v map[string]interface{}) (map[string]interf
// if v[key] is a table, merge nv's val table into v[key].
src, ok := val.(map[string]interface{})
if !ok {
log.Printf("warning: skipped value for %s: Not a table.", key)
log.Printf("Warning: Building values map for chart '%s'. Skipped value (%+v) for '%s', as it is not a table.", c.Metadata.Name, src, key)
continue
}
// Because v has higher precedence than nv, dest values override src
// values.
coalesceTables(dest, src)
coalesceTables(dest, src, c.Metadata.Name)
}
} else {
// If the key is not in v, copy it from nv.
@ -323,7 +323,7 @@ func coalesceValues(c *chart.Chart, v map[string]interface{}) (map[string]interf
// coalesceTables merges a source map into a destination map.
//
// dest is considered authoritative.
func coalesceTables(dst, src map[string]interface{}) map[string]interface{} {
func coalesceTables(dst, src map[string]interface{}, chartName string) map[string]interface{} {
// Because dest has higher precedence than src, dest values override src
// values.
for key, val := range src {
@ -331,13 +331,13 @@ func coalesceTables(dst, src map[string]interface{}) map[string]interface{} {
if innerdst, ok := dst[key]; !ok {
dst[key] = val
} else if istable(innerdst) {
coalesceTables(innerdst.(map[string]interface{}), val.(map[string]interface{}))
coalesceTables(innerdst.(map[string]interface{}), val.(map[string]interface{}), chartName)
} else {
log.Printf("warning: cannot overwrite table with non table for %s (%v)", key, val)
log.Printf("Warning: Merging destination map for chart '%s'. Cannot overwrite table item '%s', with non table value: %v", chartName, key, val)
}
continue
} else if dv, ok := dst[key]; ok && istable(dv) {
log.Printf("warning: destination for %s is a table. Ignoring non-table value %v", key, val)
log.Printf("Warning: Merging destination map for chart '%s'. The destination item '%s' is a table and ignoring the source '%s' as it has a non-table value of: %v", chartName, key, key, val)
continue
} else if !ok { // <- ok is still in scope from preceding conditional.
dst[key] = val

@ -386,7 +386,7 @@ func TestCoalesceTables(t *testing.T) {
// What we expect is that anything in dst overrides anything in src, but that
// otherwise the values are coalesced.
coalesceTables(dst, src)
coalesceTables(dst, src, "")
if dst["name"] != "Ishmael" {
t.Errorf("Unexpected name: %s", dst["name"])

@ -159,9 +159,10 @@ func (e *Engine) alterFuncMap(t *template.Template, referenceTpls map[string]ren
if e.LintMode {
// Don't fail on missing required values when linting
log.Printf("[INFO] Missing required value: %s", warn)
return val, nil
return "", nil
}
return val, fmt.Errorf(warn)
// Convert nil to "" in case required is piped into other functions
return "", fmt.Errorf(warn)
} else if _, ok := val.(string); ok {
if val == "" {
if e.LintMode {

@ -466,7 +466,6 @@ func TestAlterFuncMap(t *testing.T) {
if err != nil {
t.Fatal(err)
}
expectStr := "All your base are belong to us"
if gotStr := outReq["conan/templates/quote"]; gotStr != expectStr {
t.Errorf("Expected %q, got %q (%v)", expectStr, gotStr, outReq)
@ -476,6 +475,32 @@ func TestAlterFuncMap(t *testing.T) {
t.Errorf("Expected %q, got %q (%v)", expectNum, gotNum, outReq)
}
// test required without passing in needed values with lint mode on
// verifies lint replaces required with an empty string (should not fail)
lintValues := chartutil.Values{
"Values": chartutil.Values{
"who": "us",
},
"Chart": reqChart.Metadata,
"Release": chartutil.Values{
"Name": "That 90s meme",
},
}
e := New()
e.LintMode = true
outReq, err = e.Render(reqChart, lintValues)
if err != nil {
t.Fatal(err)
}
expectStr = "All your base are belong to us"
if gotStr := outReq["conan/templates/quote"]; gotStr != expectStr {
t.Errorf("Expected %q, got %q (%v)", expectStr, gotStr, outReq)
}
expectNum = "All of them!"
if gotNum := outReq["conan/templates/bases"]; gotNum != expectNum {
t.Errorf("Expected %q, got %q (%v)", expectNum, gotNum, outReq)
}
tplChart := &chart.Chart{
Metadata: &chart.Metadata{Name: "TplFunction"},
Templates: []*chart.Template{

@ -18,6 +18,7 @@ package getter
import (
"os"
"path/filepath"
"runtime"
"strings"
"testing"
@ -67,6 +68,10 @@ func TestCollectPlugins(t *testing.T) {
}
func TestPluginGetter(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("TODO: refactor this test to work on windows")
}
oldhh := os.Getenv("HELM_HOME")
defer os.Setenv("HELM_HOME", oldhh)
os.Setenv("HELM_HOME", "")

@ -227,6 +227,13 @@ func ReleaseTestCleanup(cleanup bool) ReleaseTestOption {
}
}
// ReleaseTestParallel is a boolean value representing whether to run test pods in parallel
func ReleaseTestParallel(parallel bool) ReleaseTestOption {
return func(opts *options) {
opts.testReq.Parallel = parallel
}
}
// RollbackTimeout specifies the number of seconds before kubernetes calls timeout
func RollbackTimeout(timeout int64) RollbackOption {
return func(opts *options) {
@ -339,6 +346,20 @@ func InstallReuseName(reuse bool) InstallOption {
}
}
// InstallSubNotes will (if true) instruct Tiller to render SubChart Notes
func InstallSubNotes(enable bool) InstallOption {
return func(opts *options) {
opts.instReq.SubNotes = enable
}
}
// UpgradeSubNotes will (if true) instruct Tiller to render SubChart Notes
func UpgradeSubNotes(enable bool) UpdateOption {
return func(opts *options) {
opts.updateReq.SubNotes = enable
}
}
// RollbackDisableHooks will disable hooks for a rollback operation
func RollbackDisableHooks(disable bool) RollbackOption {
return func(opts *options) {

@ -85,3 +85,54 @@ func TestGetFirstPod(t *testing.T) {
}
}
}
func TestGetTillerPodImage(t *testing.T) {
tests := []struct {
name string
podSpec v1.PodSpec
expected string
err bool
}{
{
name: "pod with tiller container image",
podSpec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "tiller",
Image: "gcr.io/kubernetes-helm/tiller:v2.0.0",
},
},
},
expected: "gcr.io/kubernetes-helm/tiller:v2.0.0",
err: false,
},
{
name: "pod without tiller container image",
podSpec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "not_tiller",
Image: "gcr.io/kubernetes-helm/not_tiller:v1.0.0",
},
},
},
expected: "",
err: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
mockPod := mockTillerPod()
mockPod.Spec = tt.podSpec
client := fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{mockPod}})
imageName, err := GetTillerPodImage(client.CoreV1(), v1.NamespaceDefault)
if (err != nil) != tt.err {
t.Errorf("%q. expected error: %v, got %v", tt.name, tt.err, err)
}
if imageName != tt.expected {
t.Errorf("%q. expected %q, got %q", tt.name, tt.expected, imageName)
}
})
}
}

@ -48,6 +48,7 @@ import (
"k8s.io/client-go/kubernetes/scheme"
watchtools "k8s.io/client-go/tools/watch"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/kubectl/cmd/get"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/kubectl/validation"
@ -179,7 +180,11 @@ func (c *Client) Get(namespace string, reader io.Reader) (string, error) {
vk := gvk.Version + "/" + gvk.Kind
internalObj, err := asInternal(info)
if err != nil {
c.Log("Warning: conversion to internal type failed: %v", err)
// If the problem is just that the resource is not registered, don't print any
// error. This is normal for custom resources.
if !runtime.IsNotRegisteredError(err) {
c.Log("Warning: conversion to internal type failed: %v", err)
}
// Add the unstructured object in this situation. It will still get listed, just
// with less information.
objs[vk] = append(objs[vk], info.Object)
@ -202,7 +207,9 @@ func (c *Client) Get(namespace string, reader io.Reader) (string, error) {
//here, we will add the objPods to the objs
for key, podItems := range objPods {
for i := range podItems {
objs[key+"(related)"] = append(objs[key+"(related)"], &podItems[i])
pod := &core.Pod{}
legacyscheme.Scheme.Convert(&podItems[i], pod, nil)
objs[key+"(related)"] = append(objs[key+"(related)"], pod)
}
}
@ -211,18 +218,14 @@ func (c *Client) Get(namespace string, reader io.Reader) (string, error) {
// an object type changes, so we can just rely on that. Problem is it doesn't seem to keep
// track of tab widths.
buf := new(bytes.Buffer)
p, _ := get.NewHumanPrintFlags().ToPrinter("")
index := 0
printFlags := get.NewHumanPrintFlags()
for t, ot := range objs {
kindHeader := fmt.Sprintf("==> %s", t)
if index == 0 {
kindHeader = kindHeader + "\n"
}
if _, err = buf.WriteString(kindHeader); err != nil {
if _, err = fmt.Fprintf(buf, "==> %s\n", t); err != nil {
return "", err
}
typePrinter, _ := printFlags.ToPrinter("")
for _, o := range ot {
if err := p.PrintObj(o, buf); err != nil {
if err := typePrinter.PrintObj(o, buf); err != nil {
c.Log("failed to print object type %s, object: %q :\n %v", t, o, err)
return "", err
}
@ -230,7 +233,6 @@ func (c *Client) Get(namespace string, reader io.Reader) (string, error) {
if _, err := buf.WriteString("\n"); err != nil {
return "", err
}
index += 1
}
if len(missing) > 0 {
buf.WriteString(MissingGetHeader)
@ -358,7 +360,7 @@ func (c *Client) watchTimeout(t time.Duration) ResourceActorFunc {
//
// Handling for other kinds will be added as necessary.
func (c *Client) WatchUntilReady(namespace string, reader io.Reader, timeout int64, shouldWait bool) error {
infos, err := c.Build(namespace, reader)
infos, err := c.BuildUnstructured(namespace, reader)
if err != nil {
return err
}
@ -605,12 +607,13 @@ func (c *Client) watchUntilReady(timeout time.Duration, info *resource.Info) err
//
// This operates on an event returned from a watcher.
func (c *Client) waitForJob(e watch.Event, name string) (bool, error) {
o, ok := e.Object.(*batch.Job)
if !ok {
return true, fmt.Errorf("Expected %s to be a *batch.Job, got %T", name, e.Object)
job := &batch.Job{}
err := legacyscheme.Scheme.Convert(e.Object, job, nil)
if err != nil {
return true, err
}
for _, c := range o.Status.Conditions {
for _, c := range job.Status.Conditions {
if c.Type == batch.JobComplete && c.Status == v1.ConditionTrue {
return true, nil
} else if c.Type == batch.JobFailed && c.Status == v1.ConditionTrue {
@ -618,7 +621,7 @@ func (c *Client) waitForJob(e watch.Event, name string) (bool, error) {
}
}
c.Log("%s: Jobs active: %d, jobs failed: %d, jobs succeeded: %d", name, o.Status.Active, o.Status.Failed, o.Status.Succeeded)
c.Log("%s: Jobs active: %d, jobs failed: %d, jobs succeeded: %d", name, job.Status.Active, job.Status.Failed, job.Status.Succeeded)
return false, nil
}

@ -29,8 +29,6 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/apis/core/v1/helper"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
)
@ -203,7 +201,7 @@ func (c *Client) waitForResources(timeout time.Duration, created Result) error {
func (c *Client) podsReady(pods []v1.Pod) bool {
for _, pod := range pods {
if !podutil.IsPodReady(&pod) {
if !isPodReady(&pod) {
c.Log("Pod is not ready: %s/%s", pod.GetNamespace(), pod.GetName())
return false
}
@ -219,7 +217,7 @@ func (c *Client) servicesReady(svc []v1.Service) bool {
}
// Make sure the service is not explicitly set to "None" before checking the IP
if s.Spec.ClusterIP != v1.ClusterIPNone && !helper.IsServiceIPSet(&s) {
if s.Spec.ClusterIP != v1.ClusterIPNone && s.Spec.ClusterIP == "" {
c.Log("Service is not ready: %s/%s", s.GetNamespace(), s.GetName())
return false
}
@ -259,3 +257,15 @@ func getPods(client kubernetes.Interface, namespace string, selector map[string]
})
return list.Items, err
}
func isPodReady(pod *v1.Pod) bool {
if &pod.Status != nil && len(pod.Status.Conditions) > 0 {
for _, condition := range pod.Status.Conditions {
if condition.Type == v1.PodReady &&
condition.Status == v1.ConditionTrue {
return true
}
}
}
return false
}

@ -46,7 +46,8 @@ func Chartfile(linter *support.Linter) {
return
}
linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartName(chartFile))
linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartNamePresence(chartFile))
linter.RunLinterRule(support.WarningSev, chartFileName, validateChartNameFormat(chartFile))
linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartNameDirMatch(linter.ChartDir, chartFile))
// Chart metadata
@ -74,13 +75,20 @@ func validateChartYamlFormat(chartFileError error) error {
return nil
}
func validateChartName(cf *chart.Metadata) error {
func validateChartNamePresence(cf *chart.Metadata) error {
if cf.Name == "" {
return errors.New("name is required")
}
return nil
}
func validateChartNameFormat(cf *chart.Metadata) error {
if strings.Contains(cf.Name, ".") {
return errors.New("name should be lower case letters and numbers. Words may be separated with dashes")
}
return nil
}
func validateChartNameDirMatch(chartDir string, cf *chart.Metadata) error {
if cf.Name != filepath.Base(chartDir) {
return fmt.Errorf("directory name (%s) and chart name (%s) must be the same", filepath.Base(chartDir), cf.Name)

@ -29,17 +29,20 @@ import (
)
const (
badChartDir = "testdata/badchartfile"
goodChartDir = "testdata/goodone"
badChartDir = "testdata/badchartfile"
badNameChartDir = "testdata/badnamechart"
goodChartDir = "testdata/goodone"
)
var (
badChartFilePath = filepath.Join(badChartDir, "Chart.yaml")
badNameChartFilePath = filepath.Join(badNameChartDir, "Chart.yaml")
goodChartFilePath = filepath.Join(goodChartDir, "Chart.yaml")
nonExistingChartFilePath = filepath.Join(os.TempDir(), "Chart.yaml")
)
var badChart, chatLoadRrr = chartutil.LoadChartfile(badChartFilePath)
var badNameChart, _ = chartutil.LoadChartfile(badNameChartFilePath)
var goodChart, _ = chartutil.LoadChartfile(goodChartFilePath)
// Validation functions Test
@ -66,12 +69,19 @@ func TestValidateChartYamlFormat(t *testing.T) {
}
func TestValidateChartName(t *testing.T) {
err := validateChartName(badChart)
err := validateChartNamePresence(badChart)
if err == nil {
t.Errorf("validateChartName to return a linter error, got no error")
}
}
func TestValidateChartNameFormat(t *testing.T) {
err := validateChartNameFormat(badNameChart)
if err == nil {
t.Errorf("validateChartNameFormat to return a linter error, got no error")
}
}
func TestValidateChartNameDirMatch(t *testing.T) {
err := validateChartNameDirMatch(goodChartDir, goodChart)
if err != nil {

@ -0,0 +1,4 @@
name: bad.chart.name
description: A Helm chart for Kubernetes
version: 0.1.0
icon: http://riverrun.io

@ -0,0 +1 @@
# Default values for badchartname.

@ -381,6 +381,8 @@ type UpdateReleaseRequest struct {
Force bool `protobuf:"varint,11,opt,name=force" json:"force,omitempty"`
// Description, if set, will set the description for the updated release
Description string `protobuf:"bytes,12,opt,name=description" json:"description,omitempty"`
// Render subchart notes if enabled
SubNotes bool `protobuf:"varint,13,opt,name=subNotes" json:"subNotes,omitempty"`
}
func (m *UpdateReleaseRequest) Reset() { *m = UpdateReleaseRequest{} }
@ -465,6 +467,13 @@ func (m *UpdateReleaseRequest) GetForce() bool {
return false
}
func (m *UpdateReleaseRequest) GetSubNotes() bool {
if m != nil {
return m.SubNotes
}
return false
}
func (m *UpdateReleaseRequest) GetDescription() string {
if m != nil {
return m.Description
@ -624,6 +633,7 @@ type InstallReleaseRequest struct {
DisableCrdHook bool `protobuf:"varint,10,opt,name=disable_crd_hook,json=disableCrdHook" json:"disable_crd_hook,omitempty"`
// Description, if set, will set the description for the installed release
Description string `protobuf:"bytes,11,opt,name=description" json:"description,omitempty"`
SubNotes bool `protobuf:"varint,12,opt,name=subNotes" json:"subNotes,omitempty"`
}
func (m *InstallReleaseRequest) Reset() { *m = InstallReleaseRequest{} }
@ -701,6 +711,13 @@ func (m *InstallReleaseRequest) GetDisableCrdHook() bool {
return false
}
func (m *InstallReleaseRequest) GetSubNotes() bool {
if m != nil {
return m.SubNotes
}
return false
}
func (m *InstallReleaseRequest) GetDescription() string {
if m != nil {
return m.Description
@ -883,6 +900,8 @@ type TestReleaseRequest struct {
Timeout int64 `protobuf:"varint,2,opt,name=timeout" json:"timeout,omitempty"`
// cleanup specifies whether or not to attempt pod deletion after test completes
Cleanup bool `protobuf:"varint,3,opt,name=cleanup" json:"cleanup,omitempty"`
// parallel specifies whether or not to run test pods in parallel
Parallel bool `protobuf:"varint,4,opt,name=parallel" json:"parallel,omitempty"`
}
func (m *TestReleaseRequest) Reset() { *m = TestReleaseRequest{} }
@ -911,6 +930,13 @@ func (m *TestReleaseRequest) GetCleanup() bool {
return false
}
func (m *TestReleaseRequest) GetParallel() bool {
if m != nil {
return m.Parallel
}
return false
}
// TestReleaseResponse represents a message from executing a test
type TestReleaseResponse struct {
Msg string `protobuf:"bytes,1,opt,name=msg" json:"msg,omitempty"`
@ -1415,85 +1441,86 @@ var _ReleaseService_serviceDesc = grpc.ServiceDesc{
func init() { proto.RegisterFile("hapi/services/tiller.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 1276 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x57, 0xdd, 0x6e, 0xe3, 0x44,
0x14, 0x6e, 0xe2, 0xfc, 0x9e, 0x74, 0x43, 0x76, 0x36, 0xdb, 0x7a, 0xcd, 0x82, 0x82, 0x11, 0x6c,
0x76, 0x61, 0x53, 0x08, 0xdc, 0x20, 0x21, 0xa4, 0x6e, 0x36, 0x6a, 0x0b, 0xa5, 0x2b, 0x39, 0xed,
0x22, 0x21, 0x50, 0xe4, 0x26, 0x93, 0xd6, 0xac, 0x63, 0x07, 0xcf, 0xb8, 0x6c, 0x1f, 0x00, 0x24,
0xde, 0x83, 0x07, 0xe1, 0x3d, 0x78, 0x0e, 0xee, 0x91, 0xe7, 0xc7, 0xf5, 0x38, 0x76, 0x6a, 0x7a,
0xd3, 0x78, 0xe6, 0x9c, 0x39, 0x3f, 0xdf, 0x37, 0xe7, 0xcc, 0x29, 0x18, 0x97, 0xf6, 0xca, 0xd9,
0x23, 0x38, 0xb8, 0x72, 0x66, 0x98, 0xec, 0x51, 0xc7, 0x75, 0x71, 0x30, 0x58, 0x05, 0x3e, 0xf5,
0x51, 0x37, 0x92, 0x0d, 0xa4, 0x6c, 0xc0, 0x65, 0xc6, 0x0e, 0x3b, 0x31, 0xbb, 0xb4, 0x03, 0xca,
0xff, 0x72, 0x6d, 0x63, 0x37, 0xb9, 0xef, 0x7b, 0x0b, 0xe7, 0x42, 0x08, 0xb8, 0x8b, 0x00, 0xbb,
0xd8, 0x26, 0x58, 0xfe, 0x2a, 0x87, 0xa4, 0xcc, 0xf1, 0x16, 0xbe, 0x10, 0xbc, 0xab, 0x08, 0x28,
0x26, 0x74, 0x1a, 0x84, 0x9e, 0x10, 0x3e, 0x52, 0x84, 0x84, 0xda, 0x34, 0x24, 0x8a, 0xb3, 0x2b,
0x1c, 0x10, 0xc7, 0xf7, 0xe4, 0x2f, 0x97, 0x99, 0x7f, 0x97, 0xe1, 0xc1, 0xb1, 0x43, 0xa8, 0xc5,
0x0f, 0x12, 0x0b, 0xff, 0x1a, 0x62, 0x42, 0x51, 0x17, 0xaa, 0xae, 0xb3, 0x74, 0xa8, 0x5e, 0xea,
0x95, 0xfa, 0x9a, 0xc5, 0x17, 0x68, 0x07, 0x6a, 0xfe, 0x62, 0x41, 0x30, 0xd5, 0xcb, 0xbd, 0x52,
0xbf, 0x69, 0x89, 0x15, 0xfa, 0x06, 0xea, 0xc4, 0x0f, 0xe8, 0xf4, 0xfc, 0x5a, 0xd7, 0x7a, 0xa5,
0x7e, 0x7b, 0xf8, 0xd1, 0x20, 0x0b, 0xa7, 0x41, 0xe4, 0x69, 0xe2, 0x07, 0x74, 0x10, 0xfd, 0x79,
0x71, 0x6d, 0xd5, 0x08, 0xfb, 0x8d, 0xec, 0x2e, 0x1c, 0x97, 0xe2, 0x40, 0xaf, 0x70, 0xbb, 0x7c,
0x85, 0x0e, 0x00, 0x98, 0x5d, 0x3f, 0x98, 0xe3, 0x40, 0xaf, 0x32, 0xd3, 0xfd, 0x02, 0xa6, 0x5f,
0x45, 0xfa, 0x56, 0x93, 0xc8, 0x4f, 0xf4, 0x35, 0x6c, 0x73, 0x48, 0xa6, 0x33, 0x7f, 0x8e, 0x89,
0x5e, 0xeb, 0x69, 0xfd, 0xf6, 0xf0, 0x11, 0x37, 0x25, 0xe1, 0x9f, 0x70, 0xd0, 0x46, 0xfe, 0x1c,
0x5b, 0x2d, 0xae, 0x1e, 0x7d, 0x13, 0xf4, 0x18, 0x9a, 0x9e, 0xbd, 0xc4, 0x64, 0x65, 0xcf, 0xb0,
0x5e, 0x67, 0x11, 0xde, 0x6c, 0x98, 0x1e, 0x34, 0xa4, 0x73, 0xf3, 0x05, 0xd4, 0x78, 0x6a, 0xa8,
0x05, 0xf5, 0xb3, 0x93, 0xef, 0x4e, 0x5e, 0xfd, 0x70, 0xd2, 0xd9, 0x42, 0x0d, 0xa8, 0x9c, 0xec,
0x7f, 0x3f, 0xee, 0x94, 0xd0, 0x7d, 0xb8, 0x77, 0xbc, 0x3f, 0x39, 0x9d, 0x5a, 0xe3, 0xe3, 0xf1,
0xfe, 0x64, 0xfc, 0xb2, 0x53, 0x46, 0x6d, 0x80, 0xd1, 0xe1, 0xbe, 0x75, 0x3a, 0x65, 0x2a, 0x9a,
0xf9, 0x3e, 0x34, 0xe3, 0x1c, 0x50, 0x1d, 0xb4, 0xfd, 0xc9, 0x88, 0x9b, 0x78, 0x39, 0x9e, 0x8c,
0x3a, 0x25, 0xf3, 0xcf, 0x12, 0x74, 0x55, 0xca, 0xc8, 0xca, 0xf7, 0x08, 0x8e, 0x38, 0x9b, 0xf9,
0xa1, 0x17, 0x73, 0xc6, 0x16, 0x08, 0x41, 0xc5, 0xc3, 0x6f, 0x25, 0x63, 0xec, 0x3b, 0xd2, 0xa4,
0x3e, 0xb5, 0x5d, 0xc6, 0x96, 0x66, 0xf1, 0x05, 0xfa, 0x1c, 0x1a, 0x02, 0x0a, 0xa2, 0x57, 0x7a,
0x5a, 0xbf, 0x35, 0x7c, 0xa8, 0x02, 0x24, 0x3c, 0x5a, 0xb1, 0x9a, 0x79, 0x00, 0xbb, 0x07, 0x58,
0x46, 0xc2, 0xf1, 0x93, 0x37, 0x28, 0xf2, 0x6b, 0x2f, 0x31, 0x0b, 0x26, 0xf2, 0x6b, 0x2f, 0x31,
0xd2, 0xa1, 0x2e, 0xae, 0x1f, 0x0b, 0xa7, 0x6a, 0xc9, 0xa5, 0x49, 0x41, 0x5f, 0x37, 0x24, 0xf2,
0xca, 0xb2, 0xf4, 0x31, 0x54, 0xa2, 0xca, 0x60, 0x66, 0x5a, 0x43, 0xa4, 0xc6, 0x79, 0xe4, 0x2d,
0x7c, 0x8b, 0xc9, 0x55, 0xea, 0xb4, 0x34, 0x75, 0x87, 0x49, 0xaf, 0x23, 0xdf, 0xa3, 0xd8, 0xa3,
0x77, 0x8b, 0xff, 0x18, 0x1e, 0x65, 0x58, 0x12, 0x09, 0xec, 0x41, 0x5d, 0x84, 0xc6, 0xac, 0xe5,
0xe2, 0x2a, 0xb5, 0xcc, 0xdf, 0x35, 0xe8, 0x9e, 0xad, 0xe6, 0x36, 0xc5, 0x52, 0xb4, 0x21, 0xa8,
0x27, 0x50, 0x65, 0x1d, 0x46, 0x60, 0x71, 0x9f, 0xdb, 0xe6, 0x6d, 0x68, 0x14, 0xfd, 0xb5, 0xb8,
0x1c, 0x3d, 0x83, 0xda, 0x95, 0xed, 0x86, 0x98, 0x30, 0x20, 0x62, 0xd4, 0x84, 0x26, 0x6b, 0x4f,
0x96, 0xd0, 0x40, 0xbb, 0x50, 0x9f, 0x07, 0xd7, 0x51, 0x7f, 0x61, 0x25, 0xd9, 0xb0, 0x6a, 0xf3,
0xe0, 0xda, 0x0a, 0x3d, 0xf4, 0x21, 0xdc, 0x9b, 0x3b, 0xc4, 0x3e, 0x77, 0xf1, 0xf4, 0xd2, 0xf7,
0xdf, 0x10, 0x56, 0x95, 0x0d, 0x6b, 0x5b, 0x6c, 0x1e, 0x46, 0x7b, 0xc8, 0x88, 0x6e, 0xd2, 0x2c,
0xc0, 0x36, 0xc5, 0x7a, 0x8d, 0xc9, 0xe3, 0x75, 0x84, 0x21, 0x75, 0x96, 0xd8, 0x0f, 0x29, 0x2b,
0x25, 0xcd, 0x92, 0x4b, 0xf4, 0x01, 0x6c, 0x07, 0x98, 0x60, 0x3a, 0x15, 0x51, 0x36, 0xd8, 0xc9,
0x16, 0xdb, 0x7b, 0xcd, 0xc3, 0x42, 0x50, 0xf9, 0xcd, 0x76, 0xa8, 0xde, 0x64, 0x22, 0xf6, 0xcd,
0x8f, 0x85, 0x04, 0xcb, 0x63, 0x20, 0x8f, 0x85, 0x04, 0x8b, 0x63, 0x5d, 0xa8, 0x2e, 0xfc, 0x60,
0x86, 0xf5, 0x16, 0x93, 0xf1, 0x05, 0xea, 0x41, 0x6b, 0x8e, 0xc9, 0x2c, 0x70, 0x56, 0x34, 0x62,
0x74, 0x9b, 0x61, 0x9a, 0xdc, 0x32, 0x0f, 0xe1, 0x61, 0x8a, 0x86, 0xbb, 0x32, 0xfa, 0x47, 0x19,
0x76, 0x2c, 0xdf, 0x75, 0xcf, 0xed, 0xd9, 0x9b, 0x02, 0x9c, 0x26, 0xe0, 0x2f, 0x6f, 0x86, 0x5f,
0xcb, 0x80, 0x3f, 0x71, 0x4d, 0x2b, 0xca, 0x35, 0x55, 0x88, 0xa9, 0xe6, 0x13, 0x53, 0x53, 0x89,
0x91, 0xa8, 0xd7, 0x13, 0xa8, 0xc7, 0x90, 0x36, 0x36, 0x40, 0xda, 0x5c, 0x87, 0xf4, 0x5b, 0xd8,
0x5d, 0xc3, 0xe1, 0xae, 0xa0, 0xfe, 0x5b, 0x86, 0x87, 0x47, 0x1e, 0xa1, 0xb6, 0xeb, 0xa6, 0x30,
0x8d, 0x6b, 0xa2, 0x54, 0xb8, 0x26, 0xca, 0xff, 0xa7, 0x26, 0x34, 0x85, 0x14, 0xc9, 0x60, 0x25,
0xc1, 0x60, 0xa1, 0x3a, 0x51, 0xba, 0x53, 0x2d, 0xd5, 0x9d, 0xd0, 0x7b, 0x00, 0xfc, 0x62, 0x33,
0xe3, 0x1c, 0xfc, 0x26, 0xdb, 0x39, 0x11, 0xcd, 0x48, 0xf2, 0xd5, 0xc8, 0xe6, 0x2b, 0x59, 0x25,
0x7d, 0xe8, 0xc8, 0x78, 0x66, 0xc1, 0x9c, 0xc5, 0x24, 0x2a, 0xa5, 0x2d, 0xf6, 0x47, 0xc1, 0x3c,
0x8a, 0x2a, 0xcd, 0x61, 0x6b, 0x9d, 0xc3, 0x23, 0xd8, 0x49, 0xc3, 0x7e, 0x57, 0x0a, 0xff, 0x2a,
0xc1, 0xee, 0x99, 0xe7, 0x64, 0x92, 0x98, 0x55, 0x18, 0x6b, 0xb0, 0x96, 0x33, 0x60, 0xed, 0x42,
0x75, 0x15, 0x06, 0x17, 0x58, 0xd0, 0xc4, 0x17, 0x49, 0xbc, 0x2a, 0x2a, 0x5e, 0xa9, 0x8c, 0xab,
0xeb, 0x19, 0x4f, 0x41, 0x5f, 0x8f, 0xf2, 0x8e, 0x39, 0x47, 0x79, 0xc5, 0x6f, 0x57, 0x93, 0xbf,
0x53, 0xe6, 0x03, 0xb8, 0x7f, 0x80, 0xe9, 0x6b, 0x5e, 0xa6, 0x02, 0x00, 0x73, 0x0c, 0x28, 0xb9,
0x79, 0xe3, 0x4f, 0x6c, 0xa9, 0xfe, 0xe4, 0x60, 0x27, 0xf5, 0xa5, 0x96, 0xf9, 0x15, 0xb3, 0x7d,
0xe8, 0x10, 0xea, 0x07, 0xd7, 0x9b, 0xc0, 0xed, 0x80, 0xb6, 0xb4, 0xdf, 0x8a, 0xa7, 0x2d, 0xfa,
0x34, 0x0f, 0x58, 0x04, 0xf1, 0x51, 0x11, 0x41, 0x72, 0x50, 0x28, 0x15, 0x1b, 0x14, 0x7e, 0x02,
0x74, 0x8a, 0xe3, 0x99, 0xe5, 0x96, 0x37, 0x56, 0xd2, 0x54, 0x56, 0x69, 0xd2, 0xa1, 0x3e, 0x73,
0xb1, 0xed, 0x85, 0x2b, 0x41, 0xac, 0x5c, 0x9a, 0x3f, 0xc3, 0x03, 0xc5, 0xba, 0x88, 0x33, 0xca,
0x87, 0x5c, 0x08, 0xeb, 0xd1, 0x27, 0xfa, 0x12, 0x6a, 0x7c, 0xb0, 0x63, 0xb6, 0xdb, 0xc3, 0xc7,
0x6a, 0xdc, 0xcc, 0x48, 0xe8, 0x89, 0x49, 0xd0, 0x12, 0xba, 0xc3, 0x7f, 0x1a, 0xd0, 0x96, 0xa3,
0x09, 0x1f, 0x3b, 0x91, 0x03, 0xdb, 0xc9, 0x19, 0x0c, 0x3d, 0xcd, 0x9f, 0x4a, 0x53, 0xa3, 0xb5,
0xf1, 0xac, 0x88, 0x2a, 0xcf, 0xc0, 0xdc, 0xfa, 0xac, 0x84, 0x08, 0x74, 0xd2, 0xa3, 0x11, 0x7a,
0x9e, 0x6d, 0x23, 0x67, 0x16, 0x33, 0x06, 0x45, 0xd5, 0xa5, 0x5b, 0x74, 0xc5, 0xee, 0x8c, 0x3a,
0xcf, 0xa0, 0x5b, 0xcd, 0xa8, 0x23, 0x94, 0xb1, 0x57, 0x58, 0x3f, 0xf6, 0xfb, 0x0b, 0xdc, 0x53,
0x5e, 0x5c, 0x94, 0x83, 0x56, 0xd6, 0x74, 0x64, 0x7c, 0x52, 0x48, 0x37, 0xf6, 0xb5, 0x84, 0xb6,
0xda, 0xc6, 0x50, 0x8e, 0x81, 0xcc, 0x37, 0xc6, 0xf8, 0xb4, 0x98, 0x72, 0xec, 0x8e, 0x40, 0x27,
0xdd, 0x43, 0xf2, 0x78, 0xcc, 0xe9, 0x88, 0x79, 0x3c, 0xe6, 0xb5, 0x26, 0x73, 0x0b, 0xd9, 0x00,
0x37, 0x2d, 0x04, 0x3d, 0xc9, 0x25, 0x44, 0xed, 0x3c, 0x46, 0xff, 0x76, 0xc5, 0xd8, 0xc5, 0x0a,
0xde, 0x49, 0xbd, 0xe8, 0x28, 0x07, 0x9a, 0xec, 0x01, 0xc8, 0x78, 0x5e, 0x50, 0x3b, 0x95, 0x94,
0xe8, 0x4a, 0x1b, 0x92, 0x52, 0x5b, 0xde, 0x86, 0xa4, 0x52, 0x0d, 0xce, 0xdc, 0x42, 0x0e, 0xb4,
0xad, 0xd0, 0x13, 0xae, 0xa3, 0xb6, 0x80, 0x72, 0x4e, 0xaf, 0x77, 0x35, 0xe3, 0x69, 0x01, 0xcd,
0x9b, 0xfa, 0x7e, 0x01, 0x3f, 0x36, 0xa4, 0xea, 0x79, 0x8d, 0xfd, 0x57, 0xfe, 0xc5, 0x7f, 0x01,
0x00, 0x00, 0xff, 0xff, 0x38, 0x07, 0x4c, 0x12, 0x83, 0x10, 0x00, 0x00,
// 1289 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x57, 0xed, 0x72, 0xdb, 0x44,
0x17, 0x8e, 0x2d, 0x7f, 0x1e, 0xa7, 0x7e, 0xdd, 0x6d, 0x9a, 0xa8, 0x7a, 0x0b, 0x63, 0xc4, 0x40,
0xdd, 0x42, 0x1d, 0x30, 0xfc, 0x61, 0x86, 0x61, 0x26, 0x75, 0x3d, 0x49, 0x21, 0xa4, 0x33, 0x72,
0x5b, 0x66, 0x98, 0x61, 0x3c, 0x8a, 0xbd, 0x6e, 0x45, 0x65, 0xc9, 0x68, 0x57, 0xa1, 0xb9, 0x00,
0x98, 0xe1, 0x3e, 0xb8, 0x10, 0xee, 0x83, 0xeb, 0xe0, 0x3f, 0xb3, 0x5f, 0x8a, 0x56, 0x96, 0x1c,
0x91, 0x3f, 0xb1, 0x76, 0xcf, 0xd9, 0xf3, 0xf1, 0x3c, 0x7b, 0xce, 0x9e, 0x80, 0xf5, 0xc6, 0x5d,
0x7b, 0x87, 0x04, 0x47, 0x17, 0xde, 0x1c, 0x93, 0x43, 0xea, 0xf9, 0x3e, 0x8e, 0x86, 0xeb, 0x28,
0xa4, 0x21, 0xda, 0x63, 0xb2, 0xa1, 0x92, 0x0d, 0x85, 0xcc, 0xda, 0xe7, 0x27, 0xe6, 0x6f, 0xdc,
0x88, 0x8a, 0xbf, 0x42, 0xdb, 0x3a, 0x48, 0xef, 0x87, 0xc1, 0xd2, 0x7b, 0x2d, 0x05, 0xc2, 0x45,
0x84, 0x7d, 0xec, 0x12, 0xac, 0x7e, 0xb5, 0x43, 0x4a, 0xe6, 0x05, 0xcb, 0x50, 0x0a, 0xfe, 0xaf,
0x09, 0x28, 0x26, 0x74, 0x16, 0xc5, 0x81, 0x14, 0xde, 0xd3, 0x84, 0x84, 0xba, 0x34, 0x26, 0x9a,
0xb3, 0x0b, 0x1c, 0x11, 0x2f, 0x0c, 0xd4, 0xaf, 0x90, 0xd9, 0x7f, 0x55, 0xe1, 0xce, 0xa9, 0x47,
0xa8, 0x23, 0x0e, 0x12, 0x07, 0xff, 0x12, 0x63, 0x42, 0xd1, 0x1e, 0xd4, 0x7d, 0x6f, 0xe5, 0x51,
0xb3, 0xd2, 0xaf, 0x0c, 0x0c, 0x47, 0x2c, 0xd0, 0x3e, 0x34, 0xc2, 0xe5, 0x92, 0x60, 0x6a, 0x56,
0xfb, 0x95, 0x41, 0xdb, 0x91, 0x2b, 0xf4, 0x0d, 0x34, 0x49, 0x18, 0xd1, 0xd9, 0xf9, 0xa5, 0x69,
0xf4, 0x2b, 0x83, 0xee, 0xe8, 0xa3, 0x61, 0x1e, 0x4e, 0x43, 0xe6, 0x69, 0x1a, 0x46, 0x74, 0xc8,
0xfe, 0x3c, 0xb9, 0x74, 0x1a, 0x84, 0xff, 0x32, 0xbb, 0x4b, 0xcf, 0xa7, 0x38, 0x32, 0x6b, 0xc2,
0xae, 0x58, 0xa1, 0x63, 0x00, 0x6e, 0x37, 0x8c, 0x16, 0x38, 0x32, 0xeb, 0xdc, 0xf4, 0xa0, 0x84,
0xe9, 0xe7, 0x4c, 0xdf, 0x69, 0x13, 0xf5, 0x89, 0xbe, 0x86, 0x5d, 0x01, 0xc9, 0x6c, 0x1e, 0x2e,
0x30, 0x31, 0x1b, 0x7d, 0x63, 0xd0, 0x1d, 0xdd, 0x13, 0xa6, 0x14, 0xfc, 0x53, 0x01, 0xda, 0x38,
0x5c, 0x60, 0xa7, 0x23, 0xd4, 0xd9, 0x37, 0x41, 0xf7, 0xa1, 0x1d, 0xb8, 0x2b, 0x4c, 0xd6, 0xee,
0x1c, 0x9b, 0x4d, 0x1e, 0xe1, 0xd5, 0x86, 0x1d, 0x40, 0x4b, 0x39, 0xb7, 0x9f, 0x40, 0x43, 0xa4,
0x86, 0x3a, 0xd0, 0x7c, 0x79, 0xf6, 0xdd, 0xd9, 0xf3, 0x1f, 0xce, 0x7a, 0x3b, 0xa8, 0x05, 0xb5,
0xb3, 0xa3, 0xef, 0x27, 0xbd, 0x0a, 0xba, 0x0d, 0xb7, 0x4e, 0x8f, 0xa6, 0x2f, 0x66, 0xce, 0xe4,
0x74, 0x72, 0x34, 0x9d, 0x3c, 0xed, 0x55, 0x51, 0x17, 0x60, 0x7c, 0x72, 0xe4, 0xbc, 0x98, 0x71,
0x15, 0xc3, 0x7e, 0x1f, 0xda, 0x49, 0x0e, 0xa8, 0x09, 0xc6, 0xd1, 0x74, 0x2c, 0x4c, 0x3c, 0x9d,
0x4c, 0xc7, 0xbd, 0x8a, 0xfd, 0x47, 0x05, 0xf6, 0x74, 0xca, 0xc8, 0x3a, 0x0c, 0x08, 0x66, 0x9c,
0xcd, 0xc3, 0x38, 0x48, 0x38, 0xe3, 0x0b, 0x84, 0xa0, 0x16, 0xe0, 0x77, 0x8a, 0x31, 0xfe, 0xcd,
0x34, 0x69, 0x48, 0x5d, 0x9f, 0xb3, 0x65, 0x38, 0x62, 0x81, 0x3e, 0x87, 0x96, 0x84, 0x82, 0x98,
0xb5, 0xbe, 0x31, 0xe8, 0x8c, 0xee, 0xea, 0x00, 0x49, 0x8f, 0x4e, 0xa2, 0x66, 0x1f, 0xc3, 0xc1,
0x31, 0x56, 0x91, 0x08, 0xfc, 0xd4, 0x0d, 0x62, 0x7e, 0xdd, 0x15, 0xe6, 0xc1, 0x30, 0xbf, 0xee,
0x0a, 0x23, 0x13, 0x9a, 0xf2, 0xfa, 0xf1, 0x70, 0xea, 0x8e, 0x5a, 0xda, 0x14, 0xcc, 0x4d, 0x43,
0x32, 0xaf, 0x3c, 0x4b, 0x1f, 0x43, 0x8d, 0x55, 0x06, 0x37, 0xd3, 0x19, 0x21, 0x3d, 0xce, 0x67,
0xc1, 0x32, 0x74, 0xb8, 0x5c, 0xa7, 0xce, 0xc8, 0x52, 0x77, 0x92, 0xf6, 0x3a, 0x0e, 0x03, 0x8a,
0x03, 0x7a, 0xb3, 0xf8, 0x4f, 0xe1, 0x5e, 0x8e, 0x25, 0x99, 0xc0, 0x21, 0x34, 0x65, 0x68, 0xdc,
0x5a, 0x21, 0xae, 0x4a, 0xcb, 0xfe, 0xcd, 0x80, 0xbd, 0x97, 0xeb, 0x85, 0x4b, 0xb1, 0x12, 0x6d,
0x09, 0xea, 0x01, 0xd4, 0x79, 0x87, 0x91, 0x58, 0xdc, 0x16, 0xb6, 0x45, 0x1b, 0x1a, 0xb3, 0xbf,
0x8e, 0x90, 0xa3, 0x47, 0xd0, 0xb8, 0x70, 0xfd, 0x18, 0x13, 0x0e, 0x44, 0x82, 0x9a, 0xd4, 0xe4,
0xed, 0xc9, 0x91, 0x1a, 0xe8, 0x00, 0x9a, 0x8b, 0xe8, 0x92, 0xf5, 0x17, 0x5e, 0x92, 0x2d, 0xa7,
0xb1, 0x88, 0x2e, 0x9d, 0x38, 0x40, 0x1f, 0xc2, 0xad, 0x85, 0x47, 0xdc, 0x73, 0x1f, 0xcf, 0xde,
0x84, 0xe1, 0x5b, 0xc2, 0xab, 0xb2, 0xe5, 0xec, 0xca, 0xcd, 0x13, 0xb6, 0x87, 0x2c, 0x76, 0x93,
0xe6, 0x11, 0x76, 0x29, 0x36, 0x1b, 0x5c, 0x9e, 0xac, 0x19, 0x86, 0xd4, 0x5b, 0xe1, 0x30, 0xa6,
0xbc, 0x94, 0x0c, 0x47, 0x2d, 0xd1, 0x07, 0xb0, 0x1b, 0x61, 0x82, 0xe9, 0x4c, 0x46, 0xd9, 0xe2,
0x27, 0x3b, 0x7c, 0xef, 0x95, 0x08, 0x0b, 0x41, 0xed, 0x57, 0xd7, 0xa3, 0x66, 0x9b, 0x8b, 0xf8,
0xb7, 0x38, 0x16, 0x13, 0xac, 0x8e, 0x81, 0x3a, 0x16, 0x13, 0x2c, 0x8f, 0xed, 0x41, 0x7d, 0x19,
0x46, 0x73, 0x6c, 0x76, 0xb8, 0x4c, 0x2c, 0x50, 0x1f, 0x3a, 0x0b, 0x4c, 0xe6, 0x91, 0xb7, 0xa6,
0x8c, 0xd1, 0x5d, 0x8e, 0x69, 0x7a, 0xcb, 0x3e, 0x81, 0xbb, 0x19, 0x1a, 0x6e, 0xca, 0xe8, 0xef,
0x55, 0xd8, 0x77, 0x42, 0xdf, 0x3f, 0x77, 0xe7, 0x6f, 0x4b, 0x70, 0x9a, 0x82, 0xbf, 0xba, 0x1d,
0x7e, 0x23, 0x07, 0xfe, 0xd4, 0x35, 0xad, 0x69, 0xd7, 0x54, 0x23, 0xa6, 0x5e, 0x4c, 0x4c, 0x43,
0x27, 0x46, 0xa1, 0xde, 0x4c, 0xa1, 0x9e, 0x40, 0xda, 0xda, 0x02, 0x69, 0x7b, 0x13, 0xd2, 0x6f,
0xe1, 0x60, 0x03, 0x87, 0x9b, 0x82, 0xfa, 0x4f, 0x15, 0xee, 0x3e, 0x0b, 0x08, 0x75, 0x7d, 0x3f,
0x83, 0x69, 0x52, 0x13, 0x95, 0xd2, 0x35, 0x51, 0xfd, 0x2f, 0x35, 0x61, 0x68, 0xa4, 0x28, 0x06,
0x6b, 0x29, 0x06, 0x4b, 0xd5, 0x89, 0xd6, 0x9d, 0x1a, 0x99, 0xee, 0x84, 0xde, 0x03, 0x10, 0x17,
0x9b, 0x1b, 0x17, 0xe0, 0xb7, 0xf9, 0xce, 0x99, 0x6c, 0x46, 0x8a, 0xaf, 0x56, 0x3e, 0x5f, 0xe9,
0x2a, 0x19, 0x40, 0x4f, 0xc5, 0x33, 0x8f, 0x16, 0x3c, 0x26, 0x59, 0x29, 0x5d, 0xb9, 0x3f, 0x8e,
0x16, 0x2c, 0xaa, 0x2c, 0x87, 0x9d, 0x4d, 0x0e, 0x9f, 0xc1, 0x7e, 0x16, 0xf6, 0x9b, 0x52, 0xf8,
0x67, 0x05, 0x0e, 0x5e, 0x06, 0x5e, 0x2e, 0x89, 0x79, 0x85, 0xb1, 0x01, 0x6b, 0x35, 0x07, 0xd6,
0x3d, 0xa8, 0xaf, 0xe3, 0xe8, 0x35, 0x96, 0x34, 0x89, 0x45, 0x1a, 0xaf, 0x9a, 0x8e, 0x57, 0x26,
0xe3, 0xfa, 0x66, 0xc6, 0x33, 0x30, 0x37, 0xa3, 0xbc, 0x61, 0xce, 0x2c, 0xaf, 0xe4, 0xed, 0x6a,
0x8b, 0x77, 0xca, 0xbe, 0x03, 0xb7, 0x8f, 0x31, 0x7d, 0x25, 0xca, 0x54, 0x02, 0x60, 0x4f, 0x00,
0xa5, 0x37, 0xaf, 0xfc, 0xc9, 0x2d, 0xdd, 0x9f, 0x1a, 0xec, 0x94, 0xbe, 0xd2, 0xb2, 0xbf, 0xe2,
0xb6, 0x4f, 0x3c, 0x42, 0xc3, 0xe8, 0x72, 0x1b, 0xb8, 0x3d, 0x30, 0x56, 0xee, 0x3b, 0xf9, 0xb4,
0xb1, 0x4f, 0xfb, 0x98, 0x47, 0x90, 0x1c, 0x95, 0x11, 0xa4, 0x07, 0x85, 0x4a, 0xb9, 0x41, 0xe1,
0x1d, 0xa0, 0x17, 0x38, 0x99, 0x59, 0xae, 0x79, 0x63, 0x15, 0x4d, 0x55, 0x9d, 0x26, 0x13, 0x9a,
0x73, 0x1f, 0xbb, 0x41, 0xbc, 0x96, 0xc4, 0xaa, 0x25, 0x6b, 0x6b, 0x6b, 0x37, 0x72, 0x7d, 0x1f,
0xfb, 0xf2, 0xb9, 0x4a, 0xd6, 0xf6, 0x4f, 0x70, 0x47, 0xf3, 0x2c, 0x73, 0x60, 0xb9, 0x92, 0xd7,
0xd2, 0x33, 0xfb, 0x44, 0x5f, 0x42, 0x43, 0x0c, 0x7d, 0xdc, 0x6f, 0x77, 0x74, 0x5f, 0xcf, 0x89,
0x1b, 0x89, 0x03, 0x39, 0x25, 0x3a, 0x52, 0x77, 0xf4, 0x77, 0x0b, 0xba, 0x6a, 0x6c, 0x11, 0x23,
0x29, 0xf2, 0x60, 0x37, 0x3d, 0x9f, 0xa1, 0x87, 0xc5, 0x13, 0x6b, 0x66, 0xec, 0xb6, 0x1e, 0x95,
0x51, 0x15, 0x19, 0xd8, 0x3b, 0x9f, 0x55, 0x10, 0x81, 0x5e, 0x76, 0x6c, 0x42, 0x8f, 0xf3, 0x6d,
0x14, 0xcc, 0x69, 0xd6, 0xb0, 0xac, 0xba, 0x72, 0x8b, 0x2e, 0xf8, 0x7d, 0xd2, 0x67, 0x1d, 0x74,
0xad, 0x19, 0x7d, 0xbc, 0xb2, 0x0e, 0x4b, 0xeb, 0x27, 0x7e, 0x7f, 0x86, 0x5b, 0xda, 0x6b, 0x8c,
0x0a, 0xd0, 0xca, 0x9b, 0x9c, 0xac, 0x4f, 0x4a, 0xe9, 0x26, 0xbe, 0x56, 0xd0, 0xd5, 0x5b, 0x1c,
0x2a, 0x30, 0x90, 0xfb, 0xfe, 0x58, 0x9f, 0x96, 0x53, 0x4e, 0xdc, 0x11, 0xe8, 0x65, 0xfb, 0x4b,
0x11, 0x8f, 0x05, 0xdd, 0xb2, 0x88, 0xc7, 0xa2, 0xb6, 0x65, 0xef, 0x20, 0x17, 0xe0, 0xaa, 0xbd,
0xa0, 0x07, 0x85, 0x84, 0xe8, 0x5d, 0xc9, 0x1a, 0x5c, 0xaf, 0x98, 0xb8, 0x58, 0xc3, 0xff, 0x32,
0xaf, 0x3d, 0x2a, 0x80, 0x26, 0x7f, 0x38, 0xb2, 0x1e, 0x97, 0xd4, 0xce, 0x24, 0x25, 0x3b, 0xd6,
0x96, 0xa4, 0xf4, 0x76, 0xb8, 0x25, 0xa9, 0x4c, 0xf3, 0xb3, 0x77, 0x90, 0x07, 0x5d, 0x27, 0x0e,
0xa4, 0x6b, 0xd6, 0x16, 0x50, 0xc1, 0xe9, 0xcd, 0x8e, 0x67, 0x3d, 0x2c, 0xa1, 0x79, 0x55, 0xdf,
0x4f, 0xe0, 0xc7, 0x96, 0x52, 0x3d, 0x6f, 0xf0, 0xff, 0xd8, 0xbf, 0xf8, 0x37, 0x00, 0x00, 0xff,
0xff, 0xb6, 0x48, 0x98, 0x76, 0x9f, 0x10, 0x00, 0x00,
}

@ -20,6 +20,7 @@ import (
"bytes"
"fmt"
"log"
"sync"
"time"
"k8s.io/api/core/v1"
@ -31,10 +32,13 @@ import (
// Environment encapsulates information about where test suite executes and returns results
type Environment struct {
Namespace string
KubeClient environment.KubeClient
Stream services.ReleaseService_RunReleaseTestServer
Timeout int64
Namespace string
KubeClient environment.KubeClient
Stream services.ReleaseService_RunReleaseTestServer
Timeout int64
Parallel bool
Parallelism uint32
streamLock sync.Mutex
}
func (env *Environment) createTestPod(test *test) error {
@ -108,6 +112,8 @@ func (env *Environment) streamUnknown(name, info string) error {
func (env *Environment) streamMessage(msg string, status release.TestRun_Status) error {
resp := &services.TestReleaseResponse{Msg: msg, Status: status}
env.streamLock.Lock()
defer env.streamLock.Unlock()
return env.Stream.Send(resp)
}

@ -121,10 +121,11 @@ func newMockTestingEnvironment() *MockTestingEnvironment {
return &MockTestingEnvironment{
Environment: &Environment{
Namespace: "default",
KubeClient: tEnv.KubeClient,
Timeout: 5,
Stream: &mockStream{},
Namespace: "default",
KubeClient: tEnv.KubeClient,
Timeout: 5,
Stream: &mockStream{},
Parallelism: 20,
},
}
}

@ -17,7 +17,9 @@ limitations under the License.
package releasetesting
import (
"context"
"fmt"
"golang.org/x/sync/semaphore"
"strings"
"github.com/ghodss/yaml"
@ -69,53 +71,91 @@ func (ts *TestSuite) Run(env *Environment) error {
env.streamMessage("No Tests Found", release.TestRun_UNKNOWN)
}
var tests []*test
for _, testManifest := range ts.TestManifests {
test, err := newTest(testManifest)
if err != nil {
return err
}
test.result.StartedAt = timeconv.Now()
if err := env.streamRunning(test.result.Name); err != nil {
return err
tests = append(tests, test)
}
if env.Parallel {
c := make(chan error, len(tests))
// Use a semaphore to restrict the number of tests running in parallel.
sem := semaphore.NewWeighted(int64(env.Parallelism))
ctx := context.Background()
for _, t := range tests {
sem.Acquire(ctx, 1)
go func(t *test, sem *semaphore.Weighted) {
defer sem.Release(1)
c <- t.run(env)
}(t, sem)
}
test.result.Status = release.TestRun_RUNNING
resourceCreated := true
if err := env.createTestPod(test); err != nil {
resourceCreated = false
if streamErr := env.streamError(test.result.Info); streamErr != nil {
for range tests {
if err := <-c; err != nil {
return err
}
}
resourceCleanExit := true
status := v1.PodUnknown
if resourceCreated {
status, err = env.getTestPodStatus(test)
if err != nil {
resourceCleanExit = false
if streamErr := env.streamError(test.result.Info); streamErr != nil {
return streamErr
}
} else {
for _, t := range tests {
if err := t.run(env); err != nil {
return err
}
}
}
if resourceCreated && resourceCleanExit {
if err := test.assignTestResult(status); err != nil {
return err
}
for _, t := range tests {
ts.Results = append(ts.Results, t.result)
}
if err := env.streamResult(test.result); err != nil {
return err
ts.CompletedAt = timeconv.Now()
return nil
}
func (t *test) run(env *Environment) error {
t.result.StartedAt = timeconv.Now()
if err := env.streamRunning(t.result.Name); err != nil {
return err
}
t.result.Status = release.TestRun_RUNNING
resourceCreated := true
if err := env.createTestPod(t); err != nil {
resourceCreated = false
if streamErr := env.streamError(t.result.Info); streamErr != nil {
return err
}
}
resourceCleanExit := true
status := v1.PodUnknown
if resourceCreated {
var err error
status, err = env.getTestPodStatus(t)
if err != nil {
resourceCleanExit = false
if streamErr := env.streamError(t.result.Info); streamErr != nil {
return streamErr
}
}
}
if resourceCreated && resourceCleanExit {
if err := t.assignTestResult(status); err != nil {
return err
}
test.result.CompletedAt = timeconv.Now()
ts.Results = append(ts.Results, test.result)
if err := env.streamResult(t.result); err != nil {
return err
}
}
ts.CompletedAt = timeconv.Now()
t.result.CompletedAt = timeconv.Now()
return nil
}

@ -220,6 +220,107 @@ func TestExtractTestManifestsFromHooks(t *testing.T) {
}
}
func TestParallelTestRun(t *testing.T) {
ts := testSuiteFixture([]string{manifestWithTestSuccessHook, manifestWithTestSuccessHook})
env := testEnvFixture()
env.Parallel = true
env.KubeClient = newSleepOnWaitKubeClient()
if err := ts.Run(env); err != nil {
t.Errorf("%s", err)
}
if len(ts.Results) != 2 {
t.Errorf("Expected 2 test result. Got %v", len(ts.Results))
}
stream := env.Stream.(*mockStream)
if len(stream.messages) != 4 {
t.Errorf("Expected four messages, Got: %v", len(stream.messages))
}
if stream.messages[0].Status != release.TestRun_RUNNING {
t.Errorf("Expected first message status to be RUNNING, Got: %v", stream.messages[0].Status)
}
if stream.messages[1].Status != release.TestRun_RUNNING {
t.Errorf("Expected second message status to be RUNNING, Got: %v", stream.messages[1].Status)
}
if stream.messages[2].Status != release.TestRun_SUCCESS {
t.Errorf("Expected third message status to be SUCCESS, Got: %v", stream.messages[2].Status)
}
if stream.messages[3].Status != release.TestRun_SUCCESS {
t.Errorf("Expected fourth message status to be SUCCESS, Got: %v", stream.messages[3].Status)
}
}
func TestParallelTestRunFailure(t *testing.T) {
ts := testSuiteFixture([]string{manifestWithTestSuccessHook, manifestWithTestFailureHook})
env := testEnvFixture()
env.Parallel = true
env.KubeClient = newSleepOnWaitKubeClient()
if err := ts.Run(env); err != nil {
t.Errorf("%s", err)
}
if len(ts.Results) != 2 {
t.Errorf("Expected 2 test result. Got %v", len(ts.Results))
}
stream := env.Stream.(*mockStream)
if len(stream.messages) != 4 {
t.Errorf("Expected four messages, Got: %v", len(stream.messages))
}
if stream.messages[0].Status != release.TestRun_RUNNING {
t.Errorf("Expected first message status to be RUNNING, Got: %v", stream.messages[0].Status)
}
if stream.messages[1].Status != release.TestRun_RUNNING {
t.Errorf("Expected second message status to be RUNNING, Got: %v", stream.messages[1].Status)
}
if ts.Results[0].Status != release.TestRun_SUCCESS {
t.Errorf("Expected first test result to be successful, got: %v", ts.Results[0].Status)
}
if ts.Results[1].Status != release.TestRun_FAILURE {
t.Errorf("Expected second test result to be failure, got: %v", ts.Results[1].Status)
}
}
func TestParallelism(t *testing.T) {
ts := testSuiteFixture([]string{manifestWithTestSuccessHook, manifestWithTestSuccessHook, manifestWithTestFailureHook})
env := testEnvFixture()
env.Parallel = true
env.Parallelism = 2
env.KubeClient = newSleepOnWaitKubeClient()
if err := ts.Run(env); err != nil {
t.Errorf("%s", err)
}
stream := env.Stream.(*mockStream)
if stream.messages[0].Status != release.TestRun_RUNNING {
t.Errorf("Expected first message status to be RUNNING, Got: %v", stream.messages[0].Status)
}
if stream.messages[1].Status != release.TestRun_RUNNING {
t.Errorf("Expected second message status to be RUNNING, Got: %v", stream.messages[1].Status)
}
if stream.messages[2].Status == release.TestRun_RUNNING {
t.Errorf("Expected third message status to be not be RUNNING")
}
if ts.Results[0].Status != release.TestRun_SUCCESS {
t.Errorf("Expected first test result to be successful, got: %v", ts.Results[0].Status)
}
if ts.Results[1].Status != release.TestRun_SUCCESS {
t.Errorf("Expected second test result to be successful, got: %v", ts.Results[1].Status)
}
if ts.Results[2].Status != release.TestRun_FAILURE {
t.Errorf("Expected third test result to be failure, got: %v", ts.Results[2].Status)
}
}
func chartStub() *chart.Chart {
return &chart.Chart{
Metadata: &chart.Metadata{
@ -328,6 +429,26 @@ func (p *podSucceededKubeClient) WaitAndGetCompletedPodPhase(ns string, r io.Rea
return v1.PodSucceeded, nil
}
// For testing parallelism, this kube client
// will sleep for 1ms before returning completed pod
// phase.
type sleepOnWaitKubeClient struct {
tillerEnv.PrintingKubeClient
firstWait bool
}
func newSleepOnWaitKubeClient() *sleepOnWaitKubeClient {
return &sleepOnWaitKubeClient{
PrintingKubeClient: tillerEnv.PrintingKubeClient{Out: ioutil.Discard},
}
}
func (p *sleepOnWaitKubeClient) WaitAndGetCompletedPodPhase(ns string, r io.Reader, timeout time.Duration) (v1.PodPhase, error) {
time.Sleep(1 * time.Millisecond)
return v1.PodSucceeded, nil
}
type podFailedKubeClient struct {
tillerEnv.PrintingKubeClient
}

@ -270,10 +270,12 @@ func ResolveReferenceURL(baseURL, refURL string) (string, error) {
return "", fmt.Errorf("failed to parse %s as URL: %v", refURL, err)
}
// We need a trailing slash for ResolveReference to work, but make sure there isn't already one
parsedBaseURL.Path = strings.TrimSuffix(parsedBaseURL.Path, "/") + "/"
resolvedURL := parsedBaseURL.ResolveReference(parsedRefURL)
// if the base URL contains query string parameters,
// propagate them to the child URL but only if the
// refURL is relative to baseURL
resolvedURL := parsedBaseURL.ResolveReference(parsedRefURL)
if (resolvedURL.Hostname() == parsedBaseURL.Hostname()) && (resolvedURL.Port() == parsedBaseURL.Port()) {
resolvedURL.RawQuery = parsedBaseURL.RawQuery
}

@ -287,6 +287,14 @@ func TestResolveReferenceURL(t *testing.T) {
t.Errorf("%s", chartURL)
}
chartURL, err = ResolveReferenceURL("http://localhost:8123/charts", "nginx-0.2.0.tgz")
if err != nil {
t.Errorf("%s", err)
}
if chartURL != "http://localhost:8123/charts/nginx-0.2.0.tgz" {
t.Errorf("%s", chartURL)
}
chartURL, err = ResolveReferenceURL("http://localhost:8123/charts/?st=2018-08-06T22%3A59%3A04Z&se=2018-08-07T22%3A59%3A04Z&sp=rl&sv=2018-03-28&sr=c&sig=cyqM4%2F5G7HNk%2F3faaHSDMaWxFxefCglvZlYSnmQBwiY%3D", "nginx-0.2.0.tgz")
if err != nil {
t.Errorf("%s", err)

@ -22,6 +22,7 @@ import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"sort"
"strings"
@ -110,7 +111,7 @@ func (i IndexFile) Add(md *chart.Metadata, filename, baseURL, digest string) {
_, file := filepath.Split(filename)
u, err = urlutil.URLJoin(baseURL, file)
if err != nil {
u = filepath.Join(baseURL, file)
u = path.Join(baseURL, file)
}
}
cr := &ChartVersion{
@ -246,9 +247,11 @@ func IndexDirectory(dir, baseURL string) (*IndexFile, error) {
var parentDir string
parentDir, fname = filepath.Split(fname)
// filepath.Split appends an extra slash to the end of parentDir. We want to strip that out.
parentDir = strings.TrimSuffix(parentDir, string(os.PathSeparator))
parentURL, err := urlutil.URLJoin(baseURL, parentDir)
if err != nil {
parentURL = filepath.Join(baseURL, parentDir)
parentURL = path.Join(baseURL, parentDir)
}
c, err := chartutil.Load(arch)

@ -272,7 +272,7 @@ func verifyLocalIndex(t *testing.T, i *IndexFile) {
}
func TestIndexDirectory(t *testing.T) {
dir := "testdata/repository"
dir := filepath.Join("testdata", "repository")
index, err := IndexDirectory(dir, "http://localhost:8080")
if err != nil {
t.Fatal(err)

@ -25,6 +25,7 @@ import (
"k8s.io/helm/pkg/storage/driver"
)
// NoReleasesErr indicates that a given release cannot be found
const NoReleasesErr = "has no deployed releases"
// Storage represents a storage engine for a Release.
@ -181,21 +182,37 @@ func (s *Storage) removeLeastRecent(name string, max int) error {
if len(h) <= max {
return nil
}
overage := len(h) - max
// We want oldest to newest
relutil.SortByRevision(h)
lastDeployed, err := s.Deployed(name)
if err != nil {
return err
}
var toDelete []*rspb.Release
for _, rel := range h {
// once we have enough releases to delete to reach the max, stop
if len(h)-len(toDelete) == max {
break
}
if lastDeployed != nil {
if rel.GetVersion() != lastDeployed.GetVersion() {
toDelete = append(toDelete, rel)
}
} else {
toDelete = append(toDelete, rel)
}
}
// Delete as many as possible. In the case of API throughput limitations,
// multiple invocations of this function will eventually delete them all.
toDelete := h[0:overage]
errors := []error{}
for _, rel := range toDelete {
key := makeKey(name, rel.Version)
_, innerErr := s.Delete(name, rel.Version)
if innerErr != nil {
s.Log("error pruning %s from release history: %s", key, innerErr)
errors = append(errors, innerErr)
err = s.deleteReleaseVersion(name, rel.GetVersion())
if err != nil {
errors = append(errors, err)
}
}
@ -210,6 +227,16 @@ func (s *Storage) removeLeastRecent(name string, max int) error {
}
}
func (s *Storage) deleteReleaseVersion(name string, version int32) error {
key := makeKey(name, version)
_, err := s.Delete(name, version)
if err != nil {
s.Log("error pruning %s from release history: %s", key, err)
return err
}
return nil
}
// Last fetches the last revision of the named release.
func (s *Storage) Last(name string) (*rspb.Release, error) {
s.Log("getting last revision of %q", name)

@ -293,6 +293,57 @@ func TestStorageRemoveLeastRecent(t *testing.T) {
}
}
func TestStorageDontDeleteDeployed(t *testing.T) {
storage := Init(driver.NewMemory())
storage.Log = t.Logf
storage.MaxHistory = 3
const name = "angry-bird"
// setup storage with test releases
setup := func() {
// release records
rls0 := ReleaseTestData{Name: name, Version: 1, Status: rspb.Status_SUPERSEDED}.ToRelease()
rls1 := ReleaseTestData{Name: name, Version: 2, Status: rspb.Status_DEPLOYED}.ToRelease()
rls2 := ReleaseTestData{Name: name, Version: 3, Status: rspb.Status_FAILED}.ToRelease()
rls3 := ReleaseTestData{Name: name, Version: 4, Status: rspb.Status_FAILED}.ToRelease()
// create the release records in the storage
assertErrNil(t.Fatal, storage.Create(rls0), "Storing release 'angry-bird' (v1)")
assertErrNil(t.Fatal, storage.Create(rls1), "Storing release 'angry-bird' (v2)")
assertErrNil(t.Fatal, storage.Create(rls2), "Storing release 'angry-bird' (v3)")
assertErrNil(t.Fatal, storage.Create(rls3), "Storing release 'angry-bird' (v4)")
}
setup()
rls5 := ReleaseTestData{Name: name, Version: 5, Status: rspb.Status_FAILED}.ToRelease()
assertErrNil(t.Fatal, storage.Create(rls5), "Storing release 'angry-bird' (v5)")
// On inserting the 5th record, we expect a total of 3 releases, but we expect version 2
// (the only deployed release), to still exist
hist, err := storage.History(name)
if err != nil {
t.Fatal(err)
} else if len(hist) != storage.MaxHistory {
for _, item := range hist {
t.Logf("%s %v", item.Name, item.Version)
}
t.Fatalf("expected %d items in history, got %d", storage.MaxHistory, len(hist))
}
expectedVersions := map[int32]bool{
2: true,
4: true,
5: true,
}
for _, item := range hist {
if !expectedVersions[item.GetVersion()] {
t.Errorf("Release version %d, found when not expected", item.GetVersion())
}
}
}
func TestStorageLast(t *testing.T) {
storage := Init(driver.NewMemory())

@ -84,7 +84,7 @@ func (s *ReleaseServer) prepareRelease(req *services.InstallReleaseRequest) (*re
return nil, err
}
hooks, manifestDoc, notesTxt, err := s.renderResources(req.Chart, valuesToRender, caps.APIVersions)
hooks, manifestDoc, notesTxt, err := s.renderResources(req.Chart, valuesToRender, req.SubNotes, caps.APIVersions)
if err != nil {
// Return a release with partial data so that client can show debugging
// information.

@ -268,7 +268,7 @@ func TestInstallRelease_WrongTillerVersion(t *testing.T) {
}
}
func TestInstallRelease_WithChartAndDependencyNotes(t *testing.T) {
func TestInstallRelease_WithChartAndDependencyParentNotes(t *testing.T) {
c := helm.NewContext()
rs := rsFixture()
@ -300,6 +300,39 @@ func TestInstallRelease_WithChartAndDependencyNotes(t *testing.T) {
}
}
func TestInstallRelease_WithChartAndDependencyAllNotes(t *testing.T) {
c := helm.NewContext()
rs := rsFixture()
req := installRequest(withSubNotes(),
withChart(
withNotes(notesText),
withDependency(withNotes(notesText+" child")),
))
res, err := rs.InstallRelease(c, req)
if err != nil {
t.Fatalf("Failed install: %s", err)
}
if res.Release.Name == "" {
t.Errorf("Expected release name.")
}
rel, err := rs.env.Releases.Get(res.Release.Name, res.Release.Version)
if err != nil {
t.Errorf("Expected release for %s (%v).", res.Release.Name, rs.env.Releases)
}
t.Logf("rel: %v", rel)
if !strings.Contains(rel.Info.Status.Notes, notesText) || !strings.Contains(rel.Info.Status.Notes, notesText+" child") {
t.Fatalf("Expected '%s', got '%s'", notesText+"\n"+notesText+" child", rel.Info.Status.Notes)
}
if rel.Info.Description != "Install complete" {
t.Errorf("unexpected description: %s", rel.Info.Description)
}
}
func TestInstallRelease_DryRun(t *testing.T) {
c := helm.NewContext()
rs := rsFixture()

@ -278,7 +278,7 @@ func GetVersionSet(client discovery.ServerGroupsInterface) (chartutil.VersionSet
return chartutil.NewVersionSet(versions...), nil
}
func (s *ReleaseServer) renderResources(ch *chart.Chart, values chartutil.Values, vs chartutil.VersionSet) ([]*release.Hook, *bytes.Buffer, string, error) {
func (s *ReleaseServer) renderResources(ch *chart.Chart, values chartutil.Values, subNotes bool, vs chartutil.VersionSet) ([]*release.Hook, *bytes.Buffer, string, error) {
// Guard to make sure Tiller is at the right version to handle this chart.
sver := version.GetVersion()
if ch.Metadata.TillerVersion != "" &&
@ -307,18 +307,23 @@ func (s *ReleaseServer) renderResources(ch *chart.Chart, values chartutil.Values
// text file. We have to spin through this map because the file contains path information, so we
// look for terminating NOTES.txt. We also remove it from the files so that we don't have to skip
// it in the sortHooks.
notes := ""
var notesBuffer bytes.Buffer
for k, v := range files {
if strings.HasSuffix(k, notesFileSuffix) {
// Only apply the notes if it belongs to the parent chart
// Note: Do not use filePath.Join since it creates a path with \ which is not expected
if k == path.Join(ch.Metadata.Name, "templates", notesFileSuffix) {
notes = v
if subNotes || (k == path.Join(ch.Metadata.Name, "templates", notesFileSuffix)) {
// If buffer contains data, add newline before adding more
if notesBuffer.Len() > 0 {
notesBuffer.WriteString("\n")
}
notesBuffer.WriteString(v)
}
delete(files, k)
}
}
notes := notesBuffer.String()
// Sort hooks, manifests, and partials. Only hooks and manifests are returned,
// as partials are not used after renderer.Render. Empty manifests are also
// removed here.

@ -221,6 +221,12 @@ func withChart(chartOpts ...chartOption) installOption {
}
}
func withSubNotes() installOption {
return func(opts *installOptions) {
opts.SubNotes = true
}
}
func installRequest(opts ...installOption) *services.InstallReleaseRequest {
reqOpts := &installOptions{
&services.InstallReleaseRequest{

@ -22,6 +22,8 @@ import (
reltesting "k8s.io/helm/pkg/releasetesting"
)
const maxParallelism = 20
// RunReleaseTest runs pre-defined tests stored as hooks on a given release
func (s *ReleaseServer) RunReleaseTest(req *services.TestReleaseRequest, stream services.ReleaseService_RunReleaseTestServer) error {
@ -37,10 +39,12 @@ func (s *ReleaseServer) RunReleaseTest(req *services.TestReleaseRequest, stream
}
testEnv := &reltesting.Environment{
Namespace: rel.Namespace,
KubeClient: s.env.KubeClient,
Timeout: req.Timeout,
Stream: stream,
Namespace: rel.Namespace,
KubeClient: s.env.KubeClient,
Timeout: req.Timeout,
Stream: stream,
Parallel: req.Parallel,
Parallelism: maxParallelism,
}
s.Log("running tests for release %s", rel.Name)
tSuite, err := reltesting.NewTestSuite(rel)

@ -113,7 +113,7 @@ func (s *ReleaseServer) prepareUpdate(req *services.UpdateReleaseRequest) (*rele
return nil, nil, err
}
hooks, manifestDoc, notesTxt, err := s.renderResources(req.Chart, valuesToRender, caps.APIVersions)
hooks, manifestDoc, notesTxt, err := s.renderResources(req.Chart, valuesToRender, req.SubNotes, caps.APIVersions)
if err != nil {
return nil, nil, err
}

@ -26,7 +26,7 @@ var (
// Increment major number for new feature additions and behavioral changes.
// Increment minor number for bug fixes and performance enhancements.
// Increment patch number for critical fixes to existing releases.
Version = "v2.11"
Version = "v2.12"
// BuildMetadata is extra build time data
BuildMetadata = "unreleased"

Loading…
Cancel
Save