diff --git a/.appveyor.yml b/.appveyor.yml deleted file mode 100644 index 40d02927d..000000000 --- a/.appveyor.yml +++ /dev/null @@ -1,17 +0,0 @@ -version: "{build}" -clone_folder: c:\go\src\k8s.io\helm -environment: - GOPATH: c:\go - PATH: c:\ProgramData\bin;$(PATH) -install: - - ps: iex ((New-Object System.Net.WebClient).DownloadString('https://raw.githubusercontent.com/fishworks/gofish/master/scripts/install.ps1')) - - gofish init - - gofish install glide - - glide install --strip-vendor -cache: - - vendor -> glide.lock -build: "off" -deploy: "off" -test_script: - - go build .\cmd\... - - go test .\... diff --git a/.circleci/config.yml b/.circleci/config.yml index 96f53ee40..14c050746 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -4,13 +4,13 @@ jobs: working_directory: /go/src/k8s.io/helm parallelism: 3 docker: - - image: golang:1.11 + - image: golang:1.12.5 environment: PROJECT_NAME: "kubernetes-helm" steps: - checkout - setup_remote_docker: - version: 17.09.0-ce + version: 18.06.0-ce - restore_cache: keys: - glide-{{ checksum "glide.yaml" }}-{{ checksum "glide.lock" }} diff --git a/.circleci/deploy.sh b/.circleci/deploy.sh index 08adad568..bdaef92ca 100755 --- a/.circleci/deploy.sh +++ b/.circleci/deploy.sh @@ -22,6 +22,8 @@ fi : ${GCLOUD_SERVICE_KEY:?"GCLOUD_SERVICE_KEY environment variable is not set"} : ${PROJECT_NAME:?"PROJECT_NAME environment variable is not set"} +: ${AZURE_STORAGE_CONNECTION_STRING:?"AZURE_STORAGE_CONNECTION_STRING environment variable is not set"} +: ${AZURE_STORAGE_CONTAINER_NAME:?"AZURE_STORAGE_CONTAINER_NAME environment variable is not set"} VERSION= if [[ -n "${CIRCLE_TAG:-}" ]]; then @@ -50,6 +52,14 @@ ${HOME}/google-cloud-sdk/bin/gcloud auth activate-service-account --key-file "${ ${HOME}/google-cloud-sdk/bin/gcloud config set project "${PROJECT_NAME}" docker login -u _json_key -p "$(cat ${HOME}/gcloud-service-key.json)" https://gcr.io +echo "Installing Azure CLI" +apt update +apt install -y apt-transport-https +echo "deb [arch=amd64] https://packages.microsoft.com/repos/azure-cli/ stretch main" | tee /etc/apt/sources.list.d/azure-cli.list +curl -L https://packages.microsoft.com/keys/microsoft.asc | apt-key add +apt update +apt install -y azure-cli + echo "Building the tiller image" make docker-build VERSION="${VERSION}" @@ -62,3 +72,9 @@ make dist checksum VERSION="${VERSION}" echo "Pushing binaries to gs bucket" ${HOME}/google-cloud-sdk/bin/gsutil cp ./_dist/* "gs://${PROJECT_NAME}" + +echo "Pushing binaries to Azure" +az storage blob upload-batch -s _dist/ -d "$AZURE_STORAGE_CONTAINER_NAME" --pattern 'helm-*' --connection-string "$AZURE_STORAGE_CONNECTION_STRING" + +echo "Pushing KEYS file to Azure" +az storage blob upload -f "KEYS" -n "KEYS" -c "$AZURE_STORAGE_CONTAINER_NAME" --connection-string "$AZURE_STORAGE_CONNECTION_STRING" \ No newline at end of file diff --git a/.gitignore b/.gitignore index 7fdfdcf2a..d1ec13265 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,9 @@ rootfs/rudder vendor/ *.exe .idea/ +*.iml +*.swp +*~ +.classpath +.project +.settings/** diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index aba3388a6..aee515e37 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,6 +1,6 @@ # Contributing Guidelines -The Kubernetes Helm project accepts contributions via GitHub pull requests. This document outlines the process to help get your contribution accepted. +The Helm project accepts contributions via GitHub pull requests. This document outlines the process to help get your contribution accepted. ## Reporting a Security Issue @@ -15,7 +15,7 @@ us a chance to try to fix the issue before it is exploited in the wild. The sign-off is a simple line at the end of the explanation for a commit. All commits needs to be signed. Your signature certifies that you wrote the patch or otherwise have the right to contribute the material. The rules are pretty simple, -if you can certify the below (from [developercertificate.org](http://developercertificate.org/)): +if you can certify the below (from [developercertificate.org](https://developercertificate.org/)): ``` Developer Certificate of Origin @@ -84,12 +84,12 @@ your PR will be rejected by the automated DCO check. Whether you are a user or contributor, official support channels include: -- GitHub [issues](https://github.com/helm/helm/issues/new) -- Slack [Kubernetes Slack](http://slack.kubernetes.io/): - - User: #helm-users - - Contributor: #helm-dev +- [Issues](https://github.com/helm/helm/issues) +- Slack: + - User: [#helm-users](https://kubernetes.slack.com/messages/C0NH30761/details/) + - Contributor: [#helm-dev](https://kubernetes.slack.com/messages/C51E88VDG/) -Before opening a new issue or submitting a new pull request, it's helpful to search the project - it's likely that another user has already reported the issue you're facing, or it's a known issue that we're already aware of. +Before opening a new issue or submitting a new pull request, it's helpful to search the project - it's likely that another user has already reported the issue you're facing, or it's a known issue that we're already aware of. It is also worth asking on the Slack channels. ## Milestones @@ -172,41 +172,75 @@ contributing to Helm. All issue types follow the same general lifecycle. Differe ## How to Contribute a Patch -1. Fork the repo, develop and test your code changes. -1. Use sign-off when making each of your commits (see [above](#sign-your-work)). +1. **Fork** the repo [helm](https://github.com/helm/helm) + +Go to https://github.com/helm/helm then hit the `Fork` button to fork your own copy of repository **helm** to your github account. + +2. **Clone** the forked repo to your local working directory. +```sh +$ git clone https://github.com/$your_github_account/helm.git +``` +3. Add an `upstream` remote to keep your fork in sync with the main repo. +```sh +$ cd helm +$ git remote add upstream https://github.com/helm/helm.git +$ git remote -v + +origin https://github.com/$your_github_account/helm.git (fetch) +origin https://github.com/$your_github_account/helm.git (push) +upstream https://github.com/helm/helm.git (fetch) +upstream https://github.com/helm/helm.git (push) +``` +4. Sync your local `master` branch. +```sh +$ git pull upstream master +``` +5. Create a branch to add a new feature or fix issues. +```sh +$ git checkout -b new-feature +``` +6. Make any change on the branch `new-feature` then build and test your codes. +7. Include in what will be committed. +```sh +$ git add +``` +8. Use sign-off when making each of your commits (see [above](#sign-your-work)). If you forgot to sign some commits that are part of the contribution, you can ask [git to rewrite your commit history](https://git-scm.com/book/en/v2/Git-Tools-Rewriting-History). -1. Submit a pull request. +```sh +$ git commit --signoff +``` +9. Submit a pull request. Coding conventions and standards are explained in the official developer docs: [Developers Guide](docs/developers.md) -The next section contains more information on the workflow followed for PRs +The next section contains more information on the workflow followed for Pull Requests. ## Pull Requests -Like any good open source project, we use Pull Requests to track code changes +Like any good open source project, we use Pull Requests (PRs) to track code changes. ### PR Lifecycle 1. PR creation + - PRs are usually created to fix or else be a subset of other PRs that fix a particular issue. - We more than welcome PRs that are currently in progress. They are a great way to keep track of important work that is in-flight, but useful for others to see. If a PR is a work in progress, it **must** be prefaced with "WIP: [title]". Once the PR is ready for review, remove "WIP" from the title. - - It is preferred, but not required, to have a PR tied to a specific issue. + - It is preferred, but not required, to have a PR tied to a specific issue. There can be + circumstances where if it is a quick fix then an issue might be overkill. The details provided + in the PR description would suffice in this case. 2. Triage - The maintainer in charge of triaging will apply the proper labels for the issue. This should include at least a size label, `bug` or `feature`, and `awaiting review` once all labels are applied. - See the [Labels section](#labels) for full details on the definitions of labels + See the [Labels section](#labels) for full details on the definitions of labels. - Add the PR to the correct milestone. This should be the same as the issue the PR closes. 3. Assigning reviews - Once a review has the `awaiting review` label, maintainers will review them as schedule permits. The maintainer who takes the issue should self-request a review. - - Reviews from others in the community, especially those who have encountered a bug or have - requested a feature, are highly encouraged, but not required. Maintainer reviews **are** required - before any merge - Any PR with the `size/large` label requires 2 review approvals from maintainers before it can be - merged. Those with `size/medium` are per the judgement of the maintainers + merged. Those with `size/medium` or `size/small` are per the judgement of the maintainers. 4. Reviewing/Discussion - Once a maintainer begins reviewing a PR, they will remove the `awaiting review` label and add the `in progress` label so the person submitting knows that it is being worked on. This is @@ -214,17 +248,26 @@ Like any good open source project, we use Pull Requests to track code changes - All reviews will be completed using Github review tool. - A "Comment" review should be used when there are questions about the code that should be answered, but that don't involve code changes. This type of review does not count as approval. - - A "Changes Requested" review indicates that changes to the code need to be made before they will be merged. - - Reviewers should update labels as needed (such as `needs rebase`) -5. Address comments by answering questions or changing code + - A "Changes Requested" review indicates that changes to the code need to be made before they will be + merged. + - Reviewers (maintainers) should update labels as needed (such as `needs rebase`). + - Reviews are also welcome from others in the community, especially those who have encountered a bug or + have requested a feature. In the code review, a message can be added, as well as `LGTM` if the PR is + good to merge. It’s also possible to add comments to specific lines in a file, for giving context + to the comment. +5. PR owner should try to be responsive to comments by answering questions or changing code. If the + owner is unsure of any comment, reach out to the person who added the comment in + [#helm-dev](https://kubernetes.slack.com/messages/C51E88VDG/). Once all comments have been addressed, + the PR is ready to be merged. 6. Merge or close - PRs should stay open until merged or if they have not been active for more than 30 days. This will help keep the PR queue to a manageable size and reduce noise. Should the PR need to stay open (like in the case of a WIP), the `keep open` label can be added. - - If the owner of the PR is listed in `OWNERS`, that user **must** merge their own PRs - or explicitly request another OWNER do that for them. - - If the owner of a PR is _not_ listed in `OWNERS`, any core committer may - merge the PR once it is approved. + - Before merging a PR, refer to the topic on [Size Labels](#size-labels) below to determine if + the PR requires more than one LGTM to merge. + - If the owner of the PR is listed in `OWNERS`, that user **must** merge their own PRs or explicitly + request another OWNER do that for them. + - If the owner of a PR is _not_ listed in `OWNERS`, any maintainer may merge the PR once it is approved. #### Documentation PRs @@ -235,7 +278,7 @@ Documentation PRs will follow the same lifecycle as other PRs. They will also be ## The Triager Each week, one of the core maintainers will serve as the designated "triager" starting after the -public standup meetings on Thursday. This person will be in charge triaging new PRs and issues +public stand-up meetings on Thursday. This person will be in charge triaging new PRs and issues throughout the work week. ## Labels @@ -261,7 +304,7 @@ The following tables define all label types used for Helm. It is split up by cat | `help wanted` | This issue is one the core maintainers cannot get to right now and would appreciate help with | | `proposal` | This issue is a proposal | | `question/support` | This issue is a support request or question | -| `starter` | This issue is a good for someone new to contributing to Helm | +| `good first issue` | This issue is a good for someone new to contributing to Helm | | `wont fix` | The issue has been discussed and will not be implemented (or accepted in the case of a proposal) | ### PR Specific @@ -279,13 +322,20 @@ The following tables define all label types used for Helm. It is split up by cat Size labels are used to indicate how "dangerous" a PR is. The guidelines below are used to assign the labels, but ultimately this can be changed by the maintainers. For example, even if a PR only makes -30 lines of changes in 1 file, but it changes key functionality, it will likely be labeled as `size/large` +30 lines of changes in 1 file, but it changes key functionality, it will likely be labeled as `size/L` because it requires sign off from multiple people. Conversely, a PR that adds a small feature, but requires -another 150 lines of tests to cover all cases, could be labeled as `size/small` even though the number +another 150 lines of tests to cover all cases, could be labeled as `size/S` even though the number lines is greater than defined below. +PRs submitted by a core maintainer, regardless of size, only requires approval from one additional +maintainer. This ensures there are at least two maintainers who are aware of any significant PRs +introduced to the codebase. + | Label | Description | | ----- | ----------- | -| `size/small` | Anything less than or equal to 4 files and 150 lines. Only small amounts of manual testing may be required | -| `size/medium` | Anything greater than `size/small` and less than or equal to 8 files and 300 lines. Manual validation should be required. | -| `size/large` | Anything greater than `size/medium`. This should be thoroughly tested before merging and always requires 2 approvals. This also should be applied to anything that is a significant logic change. | +| `size/XS` | Anything less than or equal to 9 lines ignoring generated files. Only small amounts of manual testing may be required. | +| `size/S` | Anything greater than `size/XS` less than or equal to 29 lines ignoring the generated files. Only small amounts of manual testing may be required. | +| `size/M` | Anything greater than `size/S` less than or equal to 99 lines ignoring the generated files. Manual validation should be required. | +| `size/L` | Anything greater than `size/M` less than or equal to 499 lines ignoring the generated files. This should be thoroughly tested before merging and always requires 2 approvals. This also should be applied to anything that is a significant logic change. | +| `size/XL` | Anything greater than `size/L` less than or equal to 999 lines ignoring the generated files. This should be thoroughly tested before merging and always requires 2 approvals. This also should be applied to anything that is a significant logic change. | +| `size/XXL` | Anything greater than `size/XL`. This should be thoroughly tested before merging and always requires 2 approvals. This also should be applied to anything that is a significant logic change. | diff --git a/KEYS b/KEYS new file mode 100644 index 000000000..e01cfdb9d --- /dev/null +++ b/KEYS @@ -0,0 +1,271 @@ +This file contains the PGP keys of developers who have signed releases of Helm. + +For your convenience, commands are provided for those who use pgp and gpg. + +For users to import keys: + pgp < KEYS + or + gpg --import KEYS + +Developers to add their keys: + pgp -kxa and append it to this file. + or + (pgpk -ll && pgpk -xa ) >> KEYS + or + (gpg --list-sigs + && gpg --armor --export ) >> KEYS + +pub rsa4096/0x461449C25E36B98E 2017-11-10 [SC] + 672C657BE06B4B30969C4A57461449C25E36B98E +uid [ultimate] Matthew Farina +sig 3 0x461449C25E36B98E 2017-11-10 Matthew Farina +sig 0x2CDBBFBB37AE822A 2018-12-12 Adnan Abdulhussein +sig 0x1EF612347F8A9958 2018-12-12 Adam Reese +sig 0x62F49E747D911B60 2018-12-12 Matt Butcher +sub rsa4096/0xCCCE67689DF05738 2017-11-10 [E] +sig 0x461449C25E36B98E 2017-11-10 Matthew Farina +sub rsa4096/0x9436E80BFBA46909 2017-11-10 [S] [expires: 2022-11-09] +sig 0x461449C25E36B98E 2017-11-10 Matthew Farina + +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBFoFERgBEADdhgM8EPo9fxnu2iW75r4uha2TrhWaO3EJIo53sa6U9nePIeWc +oWqjDZqYvIMJcylfocrVi4m6HdNcPrWo5pSWeKd8J9X8d4BUhoKFmJdHqWzgokwW +Rk06Doro2FHFyHoPPrI3a1HGVWA0xFhBYqSbim4j/Q0FouS566MofeRGnnacJ88z +Z7yErN5Gy4jk7pOgwvMewoGpEd8FMcyYSJfSjeoqdIZYp89EKTLbgQZuOJ9yVZnY +c0mtpH57UbkrkGv8hRuViWSO99q/mpMQyWQGYVoTV4QM/0q4jUbkRazaeY3N4hGC +I6Xf4ilWyNmmVODI6JcvWY+vXPtxIKjEjYiomVCF6jCYWWCA7cf3+kqJ+T4sc0NF +fseR/TAOkDV/XsZ1ufbSHBEiZTIjLvoAGJ+u+3go+UysVVCw4L1NSGFeDrZ97KSe +w0MeuV2SYfdZ4so7k4YDNbBLTVx0V/wl+laFtdjo167D18AYw54HIv3snHkjABfY +7Q06Ye7FuuKzdrj9KpmzUYnN3hRGqe84GIcM3D5+vElj0vyg8th32Dig5Xi38s0M +sz7hPg+oFk7csslMVAnLtWYvsv2FMSKB9FUHYv9AJ6yjYfyLlQgjjda0z6Sq5zpu +qVZqTNSxEIZFDKfTgQV6rocIK5VKP063KS6qwpHzPxKADaLTUPOWeum9/wARAQAB +tCRNYXR0aGV3IEZhcmluYSA8bWF0dEBtYXR0ZmFyaW5hLmNvbT6JAk4EEwEIADgW +IQRnLGV74GtLMJacSldGFEnCXja5jgUCWgURGAIbAwULCQgHAwUVCgkICwUWAwIB +AAIeAQIXgAAKCRBGFEnCXja5jjtQEADJvSx67Qz8gTxvUH3HaMsXaeb6BG3zLJXj +34pqAGNkKB4/ZgpFVYE1R0QuvYn9CbFpD1UcSank3L3xBroeOEUN3kvOg3D6Bv8f +mtwtW1TDjaWDTa0mZ8icanjXVNfK3K8pAwni2FPrW/tesEt/8GI48ZxPMzHk1qrL +8mETLRn1EBL3vq5qPDIK87XhhW9WAgwsadn6BQKSTSVVUACBAlV7EbqE4DHqhwYz +D1HrEIAtXkkb9JJejUnAbiOqPmm9s6iWC13K1P27FB8EEYiKxL8kb7xv5xW7+Pmg +kb03OqZtZYu9Fl1MF1zVQe4mXVflcbj7mYU1kb8vepD6bOUA89z8FggU2Q38cxkD +TYQsxpGwWz3nvEu29KbHmjQja1+G5D8kQ8bv1mNdiXQbOz51v2+7vowKKUoPQfp9 +n8Ez4dxWVrFtf218Mtt8wbYmmVYijLIBDArYKDeVqNNua8YC9641DcvRdCCvaYEx +Q9vWKjpAWmXKy2bb7TQ2TjGRh+Ly47z+PTluqUeYuBREAN4Hd4xwiClRbhb3I9To +YTJkPOkaOR967zBho5orA8xww4hcsufhjqsoU0/MGbG6jvJihHFR9Jq+0gVzakca +K8tGRSA8l5xdjow5dVOPzeXuKDPuvHEwa63TWsH5H8s6iembNT1H9bate8wQT1TN +9PH/6sthz4kCMwQQAQgAHRYhBFER2nPfEtjoEspGLyzbv7s3roIqBQJcET6LAAoJ +ECzbv7s3roIqozgQAIG5IqJ7hYjndCLW2MBLEa9oA04QSgF9qcqfiG00tjhBVwEK +YE6r7BUgC7r7dP1xVa/+5lVRATfiJ+Raq7udm/RQsamyp9Q8xBOuavPcJDZMX5m7 +OqPZMs+TDFPYM914GIWPAQf9ehaHHnmCNZXExxYlnZBPFsOcLYSNGH/xQeiA+q3F +tCOdRhjcpbt4rcx+Jq/l6X3cxstFwcYeljhvebblpwcVNJVArVrWZmosFl3rz3bs +PKfZKAvjV65knRkra73ZjN+YEYMMr6MzvVh/cnigk9XHgu5Y7imLv9qf1leyFCaa +oJoQDAcHIfs/eQmaEbYUyw/jX53/PyGqXlmkW7D3wqAGH5yx+ske7otCiaHHoTK0 +vHsEvO9b4dLtr0uMMNRO7St+3EtMa070s537XymG1HSeW8QbVEg/+w2YW5DyTe5p +WaNJS6WUc7UuIgEWvgitVxhUheZRumh5/EW673yI8iUchGslAuL1W5R1rXQfMPVA +BsI8D8pWs9EKjP4Lpu1Wgoxm0O4kaAxRbbHjrIYLtoRRrakr+kfqjZ/rJM89JQpl +NWNBZ61IDKROj7U2kLAxCJSB3RfAuqinyFGjxod7ENW7u6z0SCdupybbmylAfD+T +t3Z2DBB9tjxNnsgb2pbcm8cDGrJOZhIDdcVChvMXnHNxEmXbHvTKocci0t4viQIz +BBABCgAdFiEESdCchsPcjaPwoHYiHvYSNH+KmVgFAlwRP38ACgkQHvYSNH+KmVgP +rxAAkhggTXggRwpWzgU7PRsj347DqtH3f/2EfTOhAi6PGOiw2EFocTrx47WHAjs6 +XFT+c0yHCv58fGHKrrfeOT1VCjk2xf0NSdf00CTHO+DqepNiXzFYCJ0fUTL3w2JC +ugrfhwEdVH3TYJffFlmi0VZVCrGT3ZU1H+N/mVcd4FniOPWaGYoSG15iift4cAO/ +CynMFUbl5NYCuE/z9lR8o/3KSu7vuffLsvXdkxCX6fjxkSWcBKgH7ts7OWyPv9H1 +r/I295CoG9ZmeKVtScY7lamb+vOw9ryHbTACo0aprPQ1kCjr+3JIJdodNkRQvzZX +Ayxmc/zWSmPlJ7zjVkmoLaU7YmN7dPaVpQiELQGKhm/TyH++ZxoA4Rw4dwtqqk86 ++F5ncsqJ107IW7ce6lnZVEvUBD4DHkMRQQZOA9hWBxVeDznjXzfpNNTB07mtzArG +nrbbnNu3epUPthZlhQ8C+dZeBOfGzyr3Aj6CQqKMziiL2Tf4Coa7PhHRBs6rf1PD +xNhnnybCvaMJEMSyX6b/lqb967yVI6g3TXQvi0cGGvYmwEBOiKkXSRHtQBjC1Ocq +qUjzg1dvyfJu84S0kSt2oEHL5n1TAvIrwqNNOwS6CL0x2pSLOVhZmpummSqybvsF +YJjctDJvBA7URB9asMOK3CS6UsJaVzUFkybxaYIdUPylh1mJAjMEEAEKAB0WIQSr +olKVmPZibEINM1ti9J50fZEbYAUCXBE1mgAKCRBi9J50fZEbYEcVEACOTG1qO0m/ ++8T2S8rskKDrgoXMi22x3n4SqdKIA5TwWdWp18nVyXIxUWvI1cS73WupHNtEKTLc ++yObvNo1N3syj/5c14RcRLUcWTFKs596TcUP5/xNH33j0nFplKplBP4MegnduXsB +HibxiEycpkTFVxc3xbW9KeWSzqEHxxOXE1okL0SDWTj/oNRToaDc4zdm26veZd25 +ycxqRkksZZCPuczqb2SB/mDqHx1jl4z2B6CzN3OUzMk40a77xwZXKNGTO4+fMEOJ +Flch8YQXh+gPbS1F/Q7qCrQOkhoV3nI/0CxNgWNcPrUd52xtGHzgxbdrgT7L0XMO +/KmIu1O8E+znjOxcSAklwh1xLsT01193vbVyW2pcmmtqo1ku0taLlw4T7VHQNb88 +uOKucXlA10L2lFFnqBWLOuZDcVpgywMjIrKTPoEpDcVPaBUDQCFBZE9ogA/Edhlo +mxGxhtzG/O6wwFcLoleMH1Lf6zMxhwOAIvkWVjsuQ312uVy1RNY7b3UFrxOw8/qq +UBy6AFE/dp9PF8BIQ37NHKeAlvCexEedwJi4RwH0hUQkBhxBeNrTOEE7cCaZ9Shz +IWhPKxSRKKblYY4fpDzl2uMBwdetk9jfZF2ofoSOKXTVh+YJ8PzncD6xJVesbMIW +0aPkERdmz8JeGBclBR0miED+zidofWCgD7kCDQRaBREYARAAqiqhYIA3ci/sJ7y3 +mJaQ/lsL2nsy+RgW52ETpLp3tIO2r3rxNn7CB/kJhPimDIo4OJSV2bl3Sr2llgwX +PrBQ+Z5bCUV70uc1U0vvJEW/r9tkyOu3YV7VXWXtaQWkCgxIqWgNJvU5A/9/6vz9 +u1RdMZwxpjy/4HuWvHYRXlJmeeca/BEoaYWMRlECuJjIBcAzuVJTlKBT7x7U4Ptc +qqZGbzr0+zU39y1kMXu/ayldlsF3k6DKYZYNaa8cKNqorV0FqBVm1JZSjiAAWqGp +tmYxUmv/riY6cP28tP3G6noH1XqzEvZ3fdYIsGM29YQ1Y1vrVrrBVju/aMzss498 +czxMtp8e0sudHt+ommUDkA2WBEPuqJPIcOj+7bvFiv6smyxcU8VmsyEapknq+Dq8 +wG0w3fGsRdy8puc5COz/3xuiFlHQ97wtnnmyWbmdQmx7EfZcGWFfnK6HwEXAbcjO +aaFwSISK8ROgqoKfTss6/8Go+vbmtKJQH2w1fQArnPHGu9qFM/sBNhZ+ieiZ6x1H +CdU3qvuycFZMSsMhk4ER2vJdeJ8tu2jUhMOIuA/VUgUblCJkAaBE9wXaiibCZ/XT +XBXVb81v+EpLsoc5G/wrg35D5U/Gqqc+KAABK2zHa4L7rIs6jb2daeRrUBytsWm2 +Exq5sE1Uf5mioHtZpbr6rKIGzT0AEQEAAYkCNgQYAQgAIBYhBGcsZXvga0swlpxK +V0YUScJeNrmOBQJaBREYAhsMAAoJEEYUScJeNrmOb2oQALYcLV3wFFR5v9zpEPdS +haOIpYyuFBkN0FoID+w7Hb7R3pyl7c6nLI9tyFEkJBM1faGke8vKj6HZSfcyX1Lo +2rBL+yW7Gu8z3uEbkTnPFew9LnutGFuFTnbpVdLcpsbm2lG5yhdmjvJBKI4CfX4Z +UFlhyGtwqsl+1lpUgvOuMI2HjyHcFbzkhiSRDQvtXCgJu6orjzEvqiKNM4MM7PMJ +AwU0Lf3NV/p1H2mFllfotmXVZ/TjXuGcOYH56gcf4XpkuD5Vb2Qhu7IbR6TneC5j +yPdC0yQYcXqrpYhNBmlbXIoEL1m0xXhrFVPxS3QeMfkhQOqjvhaxBGCt29YJaTfQ +ugN7I1YfEJIxTap8xzEdJ+80YL3iNCIzaWSsd/xUKpobHSsu4RU1cv//S+5qD3WZ +NfcUoBgmfPC7NXCoKrEVXk5QKh3efKnAkMQrxdWRiwSuenf4Yk4fWXcTyCXsMPVB +qjcZRuOpow7tU9AuBoMyJ1XrznHoubdnc29iGN51Hrhvp/uNxjsCgPgQtpL/8znk +dgfzXU5CYJDYHa6fubUTHVZfLKbzBEI2XY1nqVu+QEO86tkY9Ef4PFMknThTAJDC +ph3xIx/sBb5s3c/XH9JgWEiyO3rMEzZecgF34OJgwnc5gl63a4k1cF0cxzkCZYi3 +k6XI/RkkRzdN1CSdCapbDJDvuQINBFoFEeUBEAChZUqlI7FLQIY6GEo0bhJ4oMp2 +jQi22zb9ZmqqcmRbWfNKfCfm/cXNDabccqzPRTWezq6hVYYPz6cSnzXpxPBIQufZ +IoMVLKDbTS0RTFVwQsYu9qGdZ52J2bq6qMWK0I2n6lECNkbOB0bZ3aPxe3yw4McP +6u+SU+b0ArMvIGqq1cmKSpkAQB0kBK/gGzEj26d30jMSN393BZ/ESEs7PZyaie3O +CdT71Cmh6xNxv0IwmgbUo54diXL9hEYTrI3hPyCKFeAoiTjlpz9ah7DPoOHgd9lD +Rd4a6VdMrdz7m5aFWo/NVuoty9spGYLG0p9N7zSaUAdO/96mn+W18hbL7EkU7/Db +Ubt5ZP34YOI46aI8YRZKiTq6NI4WglZDxu9PFGoCx4lyvhgKOwcQHySverAyb0Y1 +qeNCL9uk6oBHB2bXlAhBBOORtL5rGD+ICCuCV4g1ZEoN7sJBMxNMXORzRZ1crdlr +10lld/Mg0udl2Hgatfx+i+Y0ae/W0Ibr417H5q7iHr85ivTQ6mRU3hMuzQSoWZK8 +vixjvOK401Gre22q5jq1IPinACcu6VUto9Wbo8C1msSsWgHrqLRFeqp18BoIVY5s +QCvcsGlyD7MdJQohpmJ7al/kNVOidhGf7TtcSolWF7gLZacMRYbGWhbDhpOIhIpl +jiWTg8oWRl9KPbwzBQARAQABiQRyBBgBCAAmFiEEZyxle+BrSzCWnEpXRhRJwl42 +uY4FAloFEeUCGwIFCQlmAYACQAkQRhRJwl42uY7BdCAEGQEIAB0WIQRxHyjVEOHg +vL1fa/6UNugL+6RpCQUCWgUR5QAKCRCUNugL+6RpCSgsD/40XzObgPRpbIRQaJL1 +FgynrXUh3dJHdqB5Yi/pYshFuI+nnjpAGTyYyk75WlfvUmzY4HgNmh9yCjWketc0 +SdulPkWQ093Y38bQ9WGVQ7NLnZ47AUTuImqEdKcR4wu9F3nGD+cyNWE5fao62tYd +hlzrP1rLz8kALtswc9PVYLEKnqNCBtlGoWdeW7K1lYVG4666/uYvHzOzsUQ0MqVT +HDjpvxEcVRA0EW47m2TVj6IYAsM+0J93aFRr4OKXf4bu1ejxRz4Pdx73QsjeZwlN +5F4FpnmegdUbNR3azeGcF0qiOjPCNu3xi5lDFPKCRZLnCAqMsvv92Z/GWryNAuDj +H9tsmbDUwYXc1QUbdsu+p2jVm79yPgJUIvcy/kwOd0/GYUDOme2NvhF252aOO6Mt +OnTCrQoX0mIY/IisIjwi+2LEpQVyNDu7AGu581LYFGhBDUqiy5CyQ2neHS+k9iq2 +06dVdqETpiybizUZm2aQ8FlRV0j6PVKrqAzi0cMYJC+Gh/fNvx61goJ1tEDdh+LK +Mw0Js7OCtH7Wu1D0U/qDl3137PIBSv10BZ3SkbZDqivV5YhyGhvEewiXsbamE6VZ +AHGZ5pfd/0tkqAW9UQqw1AdqYBsAtE4yeU63xPcz7B4VyyIdRNxnjQiEg+SEpDyy +Gl2kGtt+cIbEYZovTrrW2cM0FzGhD/4rRIDfd+IvhZ86BbYoIv4oreiZVjIhFAYI +7e0DfVliBXNOHFErghu3FisUrfTM5g7RHA0Snk8OGO/Yu2mSXYKVvygIlfi3i+7B +0eZxhZEOsHXgO3v4WtY5/67Q1XXF9J7MY9Ke9gqp0E8HRFsECfEoSCRdaaic5PIT +veUEkHs6q6W+J5ULNTqdWsmSdgNWQh3Zbhh0Ih9m9nioAlZHaKnEZXGt8GsUimr7 +ffRuYgxF+kuWT8UwQu0Tc47QrYgZIpxH4WI6Rc6qKAo/4DLK2Q3Y15kJFqi8He0t +U7fWXMtrdQxxkz94WTFokISVVRZxSfZ8VkGjVHAgk6NVBgp+2zjiwfwS16qbOUOY +ikR3WTCbyStdePLaXgAFxA7g/pl5/f0IF3/IoGdTGjWoRqnBZG7NfP7bYF1CKe4f +a87Z47LriyL70BFosJqBNMJUEorS9w8sBbnmMUdpGMyk7PH386W95ib7AEOtRttL +uzYetY4LljxgMsloRgYX+Kg5i6fkntG6rod8LNYg7jWObWaIqlPoTo1RNoujYAnE +qdCDQHoUOgtZ4v6+QaxI3WV1KPBsPb7SAjuphubIQVK/6qHse9OoWVwWAABXHFqX +2qV4dyq6mq87ohTcRrZqt64ekD8H3Qe4xkYSzsWZTc0qovhs+G+dSTJ709xuV2EP ++YMbPW0/IQ== +=g11H +-----END PGP PUBLIC KEY BLOCK----- + +pub rsa4096 2019-05-15 [SC] + F1261BDE929012C8FF2E501D6EA5D7598529A53E +uid [ultimate] Martin Hickey +sig 3 6EA5D7598529A53E 2019-05-15 Martin Hickey +sub rsa4096 2019-05-15 [E] +sig 6EA5D7598529A53E 2019-05-15 Martin Hickey + +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBFzcLlgBEACsmjtsbfMuKiKBl3yV5FsQBxvmNyhIwUJMtjgm5CMFcOLD+jDw +mExfsE8sM5fqfS5P7NFHn3V6NY/GyKNH3DZHGhYwDw/vG6JfHo1s9IzhjySuWEtL +7GUCJBKXk2cDfk4p0lHRgEtoYjG/sRMgk3y7WTR/W0McxllcrQQBB3RREbz8y7r7 +atJCeec36SSZgXqsyXAESx5dx7qRTdIwObPTCGxBdj2ZkgzT3D35EExdi9I8oM6L +bYOyUPy0aEj/FX6HVBOIWNGB0z8TYXjwY6/3gJG1JhaFZK1zvYogJ3p8jO07bTwo +/AzYAG4NoV4TqTyFPmb0d0+wE+lZOWA3FfF0YtYnNe3KPmPJZ/TXdTO6kle24UTy +Q9GK2s8QB3V9NA09/YoSF1qdjRfL5jo7XnRJztfFgIqW118I4EKSF+kz3hCMxH1Y +iCvHIHFQs+WX6g1bXHDI8JWe7VDiCVYwMxap8o/vtEKoETH9fjOEO/f/YF68hqpX +7eYTacDEV72qikHz/O0hNyeS1m/AnavPrd5RQi53vOT/KhwM+wC4a1bAywQUDZDW +KkSEkTqjzcSryj3DJR6EZ9y4F11Kt4TZoxHvh59UCcVyaTZPl/YdcRWom6eGo/5U +K1MFeF7fTK9ZVuJnvG6av2/W7Sbz9KaJxLHhUNAQ+ytdVkN9xfXrx1HP7QARAQAB +tChNYXJ0aW4gSGlja2V5IDxtYXJ0aW4uaGlja2V5QGllLmlibS5jb20+iQJOBBMB +CgA4FiEE8SYb3pKQEsj/LlAdbqXXWYUppT4FAlzcLlgCGwMFCwkIBwIGFQoJCAsC +BBYCAwECHgECF4AACgkQbqXXWYUppT5IFA//b64QqKN/ookqqeKEUMUOMoZUTi2t +4HPtzX/nqOXDb0zyIyaJaJlgxz+LuoN8CrSrwnmTY/ibKsFS7xkFRIeKYSb9b2no +NPb8F0SVtxYFQJ8d4WU1snAWFJd8aMe3+z8w15Mqz1Sd1lS/sN5s101rbh8jtFZD +NnAZqyfUgIhVq243XfhP4/mHPinpXjjF+APlMbdsOqnWgxzp8E9hpCd/YLb6KY0j +JbwryzH52ha9ZDMdMipH557+Xutcl4Wyn8RsJy38J0qBvy2p8AMZIYotw6pSCedi +7Iva+EitGSXXgRWbR6O68JvUgrFDOjcPKSQy7AlwhTase+b4OA9c3DgSxR5SMBR6 +OLYaIuDeVY2Zjr0ydFdxrfQzlHget7axRH0aaMimyCNfRa3HJea8ffF/Ssv2meUF +IPIhYLn7SBrVoTISu38S6WkhBBkDiHAW7nqV+mWR3cnVjIzIjW56bI06NZ4kqtvk +D9TX7b+KV20cSjjbSGI70023oHFoJSpLsj9+otvPwNrYC2oD0qTLBfNMkpcktnnw +I2uynQrPNbQVeA+cKrECJeyl2yAC4WXvP4ZefvFZX6RnL9HiiZ+pDyBt6Yq3A9AA +NhRd8zEAKNwH88tFmWMinTzCZz04bKvql+E7A3MAaR8WS3BG3JfLXMqOKiMfCHr5 +4Gn3rD4UGtFfxoy5Ag0EXNwuWAEQAKuxVJDOjG+xuaaO2Z/6BQfTaz6/zgzql/pR +UHInKSt5ts2LGdRhfvsNBzGBhoneLWZ8PivHRGSZFsFj5Nzy9/DIkopdHSZhP/zB +aqihHgFJTKxKBfrhP60bYQGBkHNMVwqbFuck24DUCzrMyJXG15f252aY7ByCIIem +SHbmPww5q6HPEPS+hHE4ka4N4s+vqL+oK8ktq7lnZCX+AZ4jIuMAoh/C851hLcr5 +EK+a6tXa2yRJtJfj44GX6+nBVm2w+3eHqOpD7JM7NqWmo41+qg3t2J3zHQf/0ejP +ej+OcVdEBD5zlJL+CNZ9PCMBUOrb+IbqY3ybmJieipOJtOCY8nwUyCueyTmq1tso +OwUsGB9hIsVY11wNgoNgrA6PhExGxcM5S/0Rt4+y/pwFjnqYLXBXyBSjXzzmpjhn +zERjmANlI8QLKHDdShgboDUt3Ynw+D/peTS9iJMIPuUTrcGcKgw4+6FNKACnJ5l7 +Wvz7apgD8QmxnSZMquul23bGihhbQMITWvdF5KEHE06Ah1bOzB3KXBEVx00Y0tO/ +hsY8XH4T/pEKv9FsIF6R4o2k/xm6jR9eZutABVIrizMHkZzjjo1ZC8b15olrZvLa +/DtNHzV5nPPSvGZPcey9BYk6b5GGCfT/EiWtJz8Nxm7/cCYRvuuZnGCxriH6XPww +v8kPNihfABEBAAGJAjYEGAEKACAWIQTxJhvekpASyP8uUB1upddZhSmlPgUCXNwu +WAIbDAAKCRBupddZhSmlPikmD/9UrspSeSjwaXSj2vCpO1pWm6ryVQc2ZzyMnXvq +j5HLwzaVsN8HM/YADK5FL6qqhxrROOZdSHjS92sxk2Rab23gGRKbwDUJmerheZ4B +ZXG40fDOPv45PZ8V0Kn9bzliNpPBFPjoaI8X1AKoIXyUqEy98Y/zhnLDhW/+yPrO +gznPfO5ds75+u4xOx9pTfGpdwt6qhfCdNHUoZWsAw/6pafqrCIvbHjGvmMJyYENS +dl6sPYBeiDkJkH67sGvJghjedhNznnXJ8+sm701eTqZkmpxzc0jvzwgnnYb0rAzS +uU3QNj9w5HcGQd/pk29Ui8A4VWLJOUcDCVa/CIQMQqQDPYJKxaj7XgE+dQ9MxQ3a +O0wgpEo2+4BaZ4I/qP8CgaE9q4IopMhNKPR1IeEFUmTsIzLVAktS/InshFWWUp5e +mEss8kiqxU9bAGZvWopllCaPJQTDZElQpW84Z0afyVLPp47CoKcXBSMsITFt3mRf +ZXAA6h8UlSgC7FV1YT4p6qsHqQ3cLERdTSrQFLmaCb2yRCR2V9d0RiMaIwUmnbld +g1jeR4weO3LLghuWpfZHruDrDU2ZvOAObQIQdHBFmCHejA/gilf0MUdJ1h2gApuJ +m3MUub704EDCTSqz9LJc+4/NbA2esZj7mExCtsMEqaoHW7BU4ws6BRHTyeHgi+Le +1qneNQ== +=oCPv +-----END PGP PUBLIC KEY BLOCK----- + +pub rsa4096 2018-03-14 [SC] + 967F8AC5E2216F9F4FD270AD92AA783CBAAE8E3B +uid [ultimate] Matthew Fisher +sig 3 92AA783CBAAE8E3B 2018-03-14 Matthew Fisher +sub rsa4096 2018-03-14 [E] +sig 92AA783CBAAE8E3B 2018-03-14 Matthew Fisher + +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBFqpgxYBEAC1+yf/KFw2AQlineurz7Oz8NyYMlx1JnxZvMOFrL6jbZGyyzyy +jBX5Ii++79Wq1T3BL+F/UFhgruQbbzL8SiAc8Q55Ec7z/BVxM7iQPLCnFRqztllx +Ia1D1dZ9aFIw4P92kQgOQGPOgIxFRwEPA0ZX5nbZfL/teNhphW7vHaauk9xEJddm +Pyy3l9xCRIKQVMwuCaLeH0ZZpBllddwuRV4ptlQ30MpOnaalQda9/j3VhNFEX8Nj +nu8GHn+f4Lzy6XmhHb++JB3AIo5ZfwaUS2xMrnObtvmGHR3+uP/kblh9MzZlmL4T +ldclyGaV7z9Z/xGwnX/+r7xna/fr3mey3GXm29BOP2sUBBQCba05X5nYUd2TjWsZ +OZtE6sLuzUzeOTLEDu28IJoiaYnLKDNzDmuVM26xAYVWXUdCGgn+1rAp0t5OGgHm +qTexvPmckgp3yw+tcPUkR6nh0ft7pmeoK53AQHMt6fk7plZCTuu5UvxZE/oDzt4X +w9+vSTD5GzsNGrTYLTYUSL0muK+iM/uuJtFNJUREOucXfmWxulUsxwOB0st7hnLs +4JmFSr3av1en1WqqdiXswOrdK2msTm4J2+fsOU1jnyF//RJmj+1KPpRDCBTzpAFS +SzE/rRaLZBVE8k2vT0L6yBXvGJ2ONK9TkGT5fnyXu8zDu1d2Koj0c+6m9wARAQAB +tCpNYXR0aGV3IEZpc2hlciA8bWF0dC5maXNoZXJAbWljcm9zb2Z0LmNvbT6JAk4E +EwEIADgWIQSWf4rF4iFvn0/ScK2Sqng8uq6OOwUCWqmDFgIbAwULCQgHAgYVCgkI +CwIEFgIDAQIeAQIXgAAKCRCSqng8uq6OOyTsD/979LDS7ONHIHNoRf7Uud40To0S +/domtZM0rXUCBdbe5R4/xah0HvM1u8aN4OC6U7i0LCXSmEOZxQLKxKBWfX4/d6k7 +lBwuQBSlcM6cM6nDfPInT0C3o8caP8lOGeNAdOkMxrqiEO4gHNP5BvWCV+jQSU5X +uvGhKNTMcpaf+DqZAFbR6zpdL7t5JCK0B0RRhFfaGWb19t3REukI5OF5M5SN7EtQ +XWK/1fyzsltrjTSXgMWuxtJjBchltjme/S3XpHeeoSCm1WWh3a140tCC662ydU1u +EZIlUrn8dfMpH0BY6bb0/4dhHvCJ3bw+zZoCzFJM/LksjP5i+Q4mUOD8PvFWh5aS +46F827YiMdqD/eDMr1QRe66fPw5EtWTHgnf3PX+NmN8lgn2o280AkRXqkrCgl580 +B+lFwZ6hfan2F8RIHXNbF+9Zvc7Nh8bG8s4I8s6uiufmsmOuFdp47J4//q1W0HcU +0fqajDnEhExtGkgwIsum1Ndwq2sWZT/ko7PYyC3J6mbr/MXTvd2TxtnMgG6kpyPv +p3HlDaBw1aO5vO5mji4RTsoZi12MITIyvPsFWh0WtXkJLNaJ30bFSEx5fiJILxu0 +bBoBK0LUhB1Q+8G3Kea3+q3MuOQFnFfjPlMH6q84jpU5Lv5BaW17IeZ2kIfVYrcG +vBvtZ5VHDzY4EhGmlbkCDQRaqYMWARAA3wYv6jbE1PjXwIUWSSO9zxQLBKg7Cn7d +g+wwKx+N5DHjSdQBous6DGwN/wEZfXJOn14S9Yg4p4owmiyJDn0oqJ0BLdsMELoO +imCIZ+zn3AjCWdk2b0oCOhyTwhaVhVgi8yMQruMSUG9/3lkVoFae/GMC32nmE2A0 +BOnj9fVIhIrDKt9OSeTXXRNVaRvNFo9ry8S1hDxgfQ2unD6J0mMPhLH2O7CRZDFW +FyH09E/rhrIDvI3Z7mZw2ufGKR0YEu7fJ0BBBSbIqUOMsUnQNWomb2j/QZyYmhTS +Hg9YRB807H3b+5GuZim+DSUk5DQV2IENEg9LDYvhDftE5COYB3tZUnvEpOvNybBl +URxD8Kgqlb3j93l2FcD1QrIGW5VCmkkuD612ZG+NjMq0ZXlQjv6gxAYir8GTKkWt +tS1OatDm6qe6xEFypT6nlvxOYFxLeFkVVGt4H4QW6+MXvnwMofL0G6fOhRvdlq3R +US9n3WqzTpCwfvJs2lhYi+c3/2nwCx5G42OT9Ix0UFkYwxhGk6PRleKOMsw28PFr +a8DVjyKGOVn+9auVhPXYQcN0sZqFl8LBDkUtaniiRD4WKH91aKYgmX1qo8sJZMhx +t/ZoHOfoHDEEa+kLqfsWu3htyTP1gleCAA8kDcRiy1v/G8v3+p2ioI6q1qegigbr +AqTHcWNOltcAEQEAAYkCNgQYAQgAIBYhBJZ/isXiIW+fT9JwrZKqeDy6ro47BQJa +qYMWAhsMAAoJEJKqeDy6ro47T7gP/j/3R9hPg+kJCErlEKPqxsEOxxlaHx+f4UGg +Zm+P6QK2SrqbrqcPhoKUXeHlbCMm2euxKTonIawgCIr44kCZvp3B8pCGUCR+M0mf +aXGO1O6EJ3MmtlbXJ+OyBAhxpklUWdM6favuzi62fAmvwEKQf1reG/9r+toJb5N4 +KwrrdZNUaLJWhb6D0fwB+1fWJbdRnDO1rozcA+YJGhhunpxF2b2nZ5OtqNuGmbqV +ofxL6/0lM4HqLNcUBlUyQihjk1+hzfWji95SlzIxP2EhH6gJh/e+/EDCaVVV00CM +0n/0dEB25nAuSMGgUx2utNmfCUP84IErGzSUlXdzN20aW5xiBFU3/uSWyz80IGuy +WeyRzksmphGdLwef+sWLKGrOJh+DkOxxpFMRaIqGEG2YViQCg3gyzjiJuI/XAdlK +AhqwVKfRke24vgifd1tN+zeFs+m28Hpw7989vky1hDvqdpK5/fiJfqIBsF0jir/H +AgtqmbiqemX9rUa3uDkBsvyu+Ou41l+wL6ahj9Pnu0+9hQnpeZERIyhq4LWn7gGb +xk5y63wrvGbeS5lev//012oSzWQfSdFWqQVzMTVtOojGFWgvwRCwZiWEPQkRIV5r +VNXtXPUdKiOEkWin01ZrwDPEyBjr3pcnu2mbgLeJETODnCRi79KA5kCtd65JbNF7 +Qknjx8fW +=jz9T +-----END PGP PUBLIC KEY BLOCK----- diff --git a/Makefile b/Makefile index 0677cafe4..fc175a781 100644 --- a/Makefile +++ b/Makefile @@ -1,9 +1,10 @@ DOCKER_REGISTRY ?= gcr.io IMAGE_PREFIX ?= kubernetes-helm -DEV_IMAGE ?= golang:1.11 +DEV_IMAGE ?= golang:1.12.5 SHORT_NAME ?= tiller SHORT_NAME_RUDDER ?= rudder TARGETS ?= darwin/amd64 linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64le linux/s390x windows/amd64 +TARGET_OBJS ?= darwin-amd64.tar.gz darwin-amd64.tar.gz.sha256 linux-amd64.tar.gz linux-amd64.tar.gz.sha256 linux-386.tar.gz linux-386.tar.gz.sha256 linux-arm.tar.gz linux-arm.tar.gz.sha256 linux-arm64.tar.gz linux-arm64.tar.gz.sha256 linux-ppc64le.tar.gz linux-ppc64le.tar.gz.sha256 linux-s390x.tar.gz linux-s390x.tar.gz.sha256 windows-amd64.zip windows-amd64.zip.sha256 DIST_DIRS = find * -type d -exec # go option @@ -44,10 +45,25 @@ dist: $(DIST_DIRS) zip -r helm-${VERSION}-{}.zip {} \; \ ) +.PHONY: fetch-dist +fetch-dist: + mkdir -p _dist + cd _dist && \ + for obj in ${TARGET_OBJS} ; do \ + curl -sSL -o helm-${VERSION}-$${obj} https://get.helm.sh/helm-${VERSION}-$${obj} ; \ + done + +.PHONY: sign +sign: + for f in _dist/*.{gz,zip,sha256} ; do \ + gpg --armor --detach-sign $${f} ; \ + done + .PHONY: checksum checksum: for f in _dist/*.{gz,zip} ; do \ shasum -a 256 "$${f}" | awk '{print $$1}' > "$${f}.sha256" ; \ + echo -n "Checksum: " && cat $${f}.sha256 ; \ done .PHONY: check-docker @@ -125,6 +141,10 @@ docker-test-style: check-docker protoc: $(MAKE) -C _proto/ all +.PHONY: generate +generate: + $(GO) generate ./... + .PHONY: docs docs: build @scripts/update-docs.sh diff --git a/OWNERS b/OWNERS index 39f8c8448..715c8993d 100644 --- a/OWNERS +++ b/OWNERS @@ -1,31 +1,20 @@ maintainers: - - adamreese - - bacongobbler - - jascott1 - - mattfarina - - michelleN - - nebril - - prydonius - - SlickNik - - technosophos - - thomastaylor312 - - viglesiasce -reviewers: - adamreese - bacongobbler - fibonacci1729 - - jascott1 + - hickeyma + - jdolitsky - mattfarina - michelleN - - migmartri - - nebril - prydonius - SlickNik - technosophos - thomastaylor312 - viglesiasce emeritus: + - jascott1 - migmartri + - nebril - seh - vaikas-google - rimusz diff --git a/README.md b/README.md index 1cf345e57..3262ba54f 100644 --- a/README.md +++ b/README.md @@ -32,6 +32,7 @@ Think of it like apt/yum/homebrew for Kubernetes. ## Install + Binary downloads of the Helm client can be found on [the Releases page](https://github.com/helm/helm/releases/latest). Unpack the `helm` binary and add it to your PATH and you are good to go! @@ -40,7 +41,10 @@ If you want to use a package manager: - [Homebrew](https://brew.sh/) users can use `brew install kubernetes-helm`. - [Chocolatey](https://chocolatey.org/) users can use `choco install kubernetes-helm`. +- [Scoop](https://scoop.sh/) users can use `scoop install helm`. - [GoFish](https://gofi.sh/) users can use `gofish install helm`. +- [Snap](https://snapcraft.io/) users can use `sudo snap install helm + --classic`. To rapidly get Helm up and running, start with the [Quick Start Guide](https://docs.helm.sh/using_helm/#quickstart-guide). diff --git a/_proto/README.md b/_proto/README.md index 0ebd6b6a4..f43c1e11e 100644 --- a/_proto/README.md +++ b/_proto/README.md @@ -3,8 +3,8 @@ Protobuf3 type declarations for the Helm API Packages - - `hapi.chart` Complete serialization of Heml charts + - `hapi.chart` Complete serialization of Helm charts - `hapi.release` Information about installed charts (Releases) such as metadata about when they were installed, their status, and how they were configured. - `hapi.services.rudder` Definition for the ReleaseModuleService used by Tiller to manipulate releases on a given node - `hapi.services.tiller` Definition of the ReleaseService provided by Tiller and used by Helm clients to manipulate releases cluster wide. - - `hapi.version` Version meta-data used by tiller to express it's version + - `hapi.version` Version metadata used by Tiller to express its version diff --git a/_proto/hapi/release/hook.proto b/_proto/hapi/release/hook.proto index cf7e25bf6..77fbbe5d5 100644 --- a/_proto/hapi/release/hook.proto +++ b/_proto/hapi/release/hook.proto @@ -56,4 +56,6 @@ message Hook { int32 weight = 7; // DeletePolicies are the policies that indicate when to delete the hook repeated DeletePolicy delete_policies = 8; + // DeleteTimeout indicates how long to wait for a resource to be deleted before timing out + int64 delete_timeout = 9; } diff --git a/_proto/hapi/release/status.proto b/_proto/hapi/release/status.proto index aa90760b3..6cc16b351 100644 --- a/_proto/hapi/release/status.proto +++ b/_proto/hapi/release/status.proto @@ -29,7 +29,7 @@ message Status { UNKNOWN = 0; // Status_DEPLOYED indicates that the release has been pushed to Kubernetes. DEPLOYED = 1; - // Status_DELETED indicates that a release has been deleted from Kubermetes. + // Status_DELETED indicates that a release has been deleted from Kubernetes. DELETED = 2; // Status_SUPERSEDED indicates that this release object is outdated and a newer one exists. SUPERSEDED = 3; @@ -41,7 +41,7 @@ message Status { PENDING_INSTALL = 6; // Status_PENDING_UPGRADE indicates that an upgrade operation is underway. PENDING_UPGRADE = 7; - // Status_PENDING_ROLLBACK indicates that an rollback operation is underway. + // Status_PENDING_ROLLBACK indicates that a rollback operation is underway. PENDING_ROLLBACK = 8; } diff --git a/_proto/hapi/rudder/rudder.proto b/_proto/hapi/rudder/rudder.proto index 188491512..3f3d8030d 100644 --- a/_proto/hapi/rudder/rudder.proto +++ b/_proto/hapi/rudder/rudder.proto @@ -92,6 +92,7 @@ message UpgradeReleaseRequest{ bool Wait = 4; bool Recreate = 5; bool Force = 6; + bool CleanupOnFail = 7; } message UpgradeReleaseResponse{ hapi.release.Release release = 1; @@ -105,6 +106,7 @@ message RollbackReleaseRequest{ bool Wait = 4; bool Recreate = 5; bool Force = 6; + bool CleanupOnFail = 7; } message RollbackReleaseResponse{ hapi.release.Release release = 1; diff --git a/_proto/hapi/services/tiller.proto b/_proto/hapi/services/tiller.proto index 8eba963e4..1d0cc7ec6 100644 --- a/_proto/hapi/services/tiller.proto +++ b/_proto/hapi/services/tiller.proto @@ -76,7 +76,7 @@ service ReleaseService { rpc RollbackRelease(RollbackReleaseRequest) returns (RollbackReleaseResponse) { } - // ReleaseHistory retrieves a releasse's history. + // ReleaseHistory retrieves a release's history. rpc GetHistory(GetHistoryRequest) returns (GetHistoryResponse) { } @@ -212,6 +212,10 @@ message UpdateReleaseRequest { bool force = 11; // Description, if set, will set the description for the updated release string description = 12; + // Render subchart notes if enabled + bool subNotes = 13; + // Allow deletion of new resources created in this update when update failed + bool cleanup_on_fail = 14; } // UpdateReleaseResponse is the response to an update request. @@ -239,6 +243,8 @@ message RollbackReleaseRequest { bool force = 8; // Description, if set, will set the description for the rollback string description = 9; + // Allow deletion of new resources created in this rollback when rollback failed + bool cleanup_on_fail = 10; } // RollbackReleaseResponse is the response to an update request. @@ -281,6 +287,9 @@ message InstallReleaseRequest { // Description, if set, will set the description for the installed release string description = 11; + + bool subNotes = 12; + } // InstallReleaseResponse is the response from a release installation. @@ -298,7 +307,7 @@ message UninstallReleaseRequest { bool purge = 3; // timeout specifies the max amount of time any kubernetes client command can run. int64 timeout = 4; - // Description, if set, will set the description for the uninnstalled release + // Description, if set, will set the description for the uninstalled release string description = 5; } diff --git a/cmd/helm/completion.go b/cmd/helm/completion.go index 2181e723c..2b44dca5e 100644 --- a/cmd/helm/completion.go +++ b/cmd/helm/completion.go @@ -126,13 +126,6 @@ __helm_compgen() { __helm_compopt() { true # don't do anything. Not supported by bashcompinit in zsh } -__helm_declare() { - if [ "$1" == "-F" ]; then - whence -w "$@" - else - builtin declare "$@" - fi -} __helm_ltrim_colon_completions() { if [[ "$1" == *:* && "$COMP_WORDBREAKS" == *:* ]]; then @@ -194,7 +187,7 @@ autoload -U +X bashcompinit && bashcompinit # use word boundary patterns for BSD or GNU sed LWORD='[[:<:]]' RWORD='[[:>:]]' -if sed --help 2>&1 | grep -q GNU; then +if sed --help 2>&1 | grep -q 'GNU\|BusyBox'; then LWORD='\<' RWORD='\>' fi @@ -210,8 +203,10 @@ __helm_convert_bash_to_zsh() { -e "s/${LWORD}__ltrim_colon_completions${RWORD}/__helm_ltrim_colon_completions/g" \ -e "s/${LWORD}compgen${RWORD}/__helm_compgen/g" \ -e "s/${LWORD}compopt${RWORD}/__helm_compopt/g" \ - -e "s/${LWORD}declare${RWORD}/__helm_declare/g" \ + -e "s/${LWORD}declare${RWORD}/builtin declare/g" \ -e "s/\\\$(type${RWORD}/\$(__helm_type/g" \ + -e 's/aliashash\["\(.\{1,\}\)"\]/aliashash[\1]/g' \ + -e 's/FUNCNAME/funcstack/g' \ <<'BASH_COMPLETION_EOF' ` out.Write([]byte(zshInitialization)) diff --git a/cmd/helm/create.go b/cmd/helm/create.go index 0d278c8b5..8ecffdba6 100644 --- a/cmd/helm/create.go +++ b/cmd/helm/create.go @@ -31,7 +31,8 @@ import ( const createDesc = ` This command creates a chart directory along with the common files and -directories used in a chart. +directories used in a chart. It provides a basic example and is not +meant to cover all Kubernetes resources. For example, 'helm create foo' will create a directory structure that looks something like this: @@ -54,6 +55,10 @@ something like this: do not exist, Helm will attempt to create them as it goes. If the given destination exists and there are files in that directory, conflicting files will be overwritten, but other files will be left alone. + +The chart that is created by invoking this command contains a Deployment, Ingress +and a Service. To use other Kubernetes resources with your chart, refer to +[The Chart Template Developer's Guide](https://helm.sh/docs/chart_template_guide). ` type createCmd struct { @@ -68,7 +73,7 @@ func newCreateCmd(out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "create NAME", - Short: "create a new chart with the given name", + Short: "Create a new chart with the given name", Long: createDesc, RunE: func(cmd *cobra.Command, args []string) error { cc.home = settings.Home @@ -83,7 +88,7 @@ func newCreateCmd(out io.Writer) *cobra.Command { }, } - cmd.Flags().StringVarP(&cc.starter, "starter", "p", "", "the named Helm starter scaffold") + cmd.Flags().StringVarP(&cc.starter, "starter", "p", "", "The name or absolute path to Helm starter scaffold") return cmd } @@ -101,6 +106,10 @@ func (c *createCmd) run() error { if c.starter != "" { // Create from the starter lstarter := filepath.Join(c.home.Starters(), c.starter) + // If path is absolute, we don't want to prefix it with helm starters folder + if filepath.IsAbs(c.starter) { + lstarter = c.starter + } return chartutil.CreateFrom(cfile, filepath.Dir(c.name), lstarter) } diff --git a/cmd/helm/create_test.go b/cmd/helm/create_test.go index c9459b477..fb118ba58 100644 --- a/cmd/helm/create_test.go +++ b/cmd/helm/create_test.go @@ -143,7 +143,99 @@ func TestCreateStarterCmd(t *testing.T) { t.Errorf("Wrong API version: %q", c.Metadata.ApiVersion) } - expectedTemplateCount := 7 + expectedTemplateCount := 8 + if l := len(c.Templates); l != expectedTemplateCount { + t.Errorf("Expected %d templates, got %d", expectedTemplateCount, l) + } + + found := false + for _, tpl := range c.Templates { + if tpl.Name == "templates/foo.tpl" { + found = true + data := tpl.Data + if string(data) != "test" { + t.Errorf("Expected template 'test', got %q", string(data)) + } + } + } + if !found { + t.Error("Did not find foo.tpl") + } + +} + +func TestCreateStarterAbsoluteCmd(t *testing.T) { + cname := "testchart" + // Make a temp dir + tdir, err := ioutil.TempDir("", "helm-create-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tdir) + + thome, err := tempHelmHome(t) + if err != nil { + t.Fatal(err) + } + cleanup := resetEnv() + defer func() { + os.RemoveAll(thome.String()) + cleanup() + }() + + settings.Home = thome + + // Create a starter. + starterchart := filepath.Join(tdir, "starters") + os.Mkdir(starterchart, 0755) + if dest, err := chartutil.Create(&chart.Metadata{Name: "starterchart"}, starterchart); err != nil { + t.Fatalf("Could not create chart: %s", err) + } else { + t.Logf("Created %s", dest) + } + tplpath := filepath.Join(starterchart, "starterchart", "templates", "foo.tpl") + if err := ioutil.WriteFile(tplpath, []byte("test"), 0755); err != nil { + t.Fatalf("Could not write template: %s", err) + } + + // CD into it + pwd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + if err := os.Chdir(tdir); err != nil { + t.Fatal(err) + } + defer os.Chdir(pwd) + + // Run a create + cmd := newCreateCmd(ioutil.Discard) + cmd.ParseFlags([]string{"--starter", filepath.Join(starterchart, "starterchart")}) + if err := cmd.RunE(cmd, []string{cname}); err != nil { + t.Errorf("Failed to run create: %s", err) + return + } + + // Test that the chart is there + if fi, err := os.Stat(cname); err != nil { + t.Fatalf("no chart directory: %s", err) + } else if !fi.IsDir() { + t.Fatalf("chart is not directory") + } + + c, err := chartutil.LoadDir(cname) + if err != nil { + t.Fatal(err) + } + + if c.Metadata.Name != cname { + t.Errorf("Expected %q name, got %q", cname, c.Metadata.Name) + } + if c.Metadata.ApiVersion != chartutil.ApiVersionV1 { + t.Errorf("Wrong API version: %q", c.Metadata.ApiVersion) + } + + expectedTemplateCount := 8 if l := len(c.Templates); l != expectedTemplateCount { t.Errorf("Expected %d templates, got %d", expectedTemplateCount, l) } diff --git a/cmd/helm/delete.go b/cmd/helm/delete.go old mode 100755 new mode 100644 index 4f52ffdd9..6aa1c2a4e --- a/cmd/helm/delete.go +++ b/cmd/helm/delete.go @@ -56,7 +56,7 @@ func newDeleteCmd(c helm.Interface, out io.Writer) *cobra.Command { Use: "delete [flags] RELEASE_NAME [...]", Aliases: []string{"del"}, SuggestFor: []string{"remove", "rm"}, - Short: "given a release name, delete the release from Kubernetes", + Short: "Given a release name, delete the release from Kubernetes", Long: deleteDesc, PreRunE: func(_ *cobra.Command, _ []string) error { return setupConnection() }, RunE: func(cmd *cobra.Command, args []string) error { @@ -79,11 +79,11 @@ func newDeleteCmd(c helm.Interface, out io.Writer) *cobra.Command { f := cmd.Flags() settings.AddFlagsTLS(f) - f.BoolVar(&del.dryRun, "dry-run", false, "simulate a delete") - f.BoolVar(&del.disableHooks, "no-hooks", false, "prevent hooks from running during deletion") - f.BoolVar(&del.purge, "purge", false, "remove the release from the store and make its name free for later use") - f.Int64Var(&del.timeout, "timeout", 300, "time in seconds to wait for any individual Kubernetes operation (like Jobs for hooks)") - f.StringVar(&del.description, "description", "", "specify a description for the release") + f.BoolVar(&del.dryRun, "dry-run", false, "Simulate a delete") + f.BoolVar(&del.disableHooks, "no-hooks", false, "Prevent hooks from running during deletion") + f.BoolVar(&del.purge, "purge", false, "Remove the release from the store and make its name free for later use") + f.Int64Var(&del.timeout, "timeout", 300, "Time in seconds to wait for any individual Kubernetes operation (like Jobs for hooks)") + f.StringVar(&del.description, "description", "", "Specify a description for the release") // set defaults from environment settings.InitTLS(f) diff --git a/cmd/helm/dependency.go b/cmd/helm/dependency.go index 1e3079ded..58686950e 100644 --- a/cmd/helm/dependency.go +++ b/cmd/helm/dependency.go @@ -73,7 +73,7 @@ the dependency charts stored locally. The path should start with a prefix of repository: "file://../dependency_chart/nginx" If the dependency chart is retrieved locally, it is not required to have the -repository added to helm by "helm add repo". Version matching is also supported +repository added to helm by "helm repo add". Version matching is also supported for this case. ` @@ -91,7 +91,7 @@ func newDependencyCmd(out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "dependency update|build|list", Aliases: []string{"dep", "dependencies"}, - Short: "manage a chart's dependencies", + Short: "Manage a chart's dependencies", Long: dependencyDesc, } @@ -113,7 +113,7 @@ func newDependencyListCmd(out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "list [flags] CHART", Aliases: []string{"ls"}, - Short: "list the dependencies for the given chart", + Short: "List the dependencies for the given chart", Long: dependencyListDesc, RunE: func(cmd *cobra.Command, args []string) error { cp := "." diff --git a/cmd/helm/dependency_build.go b/cmd/helm/dependency_build.go index 3af3c1243..6b4fd58e6 100644 --- a/cmd/helm/dependency_build.go +++ b/cmd/helm/dependency_build.go @@ -29,11 +29,11 @@ const dependencyBuildDesc = ` Build out the charts/ directory from the requirements.lock file. Build is used to reconstruct a chart's dependencies to the state specified in -the lock file. This will not re-negotiate dependencies, as 'helm dependency update' -does. +the lock file. -If no lock file is found, 'helm dependency build' will mirror the behavior -of 'helm dependency update'. +If no lock file is found, 'helm dependency build' will mirror the behavior of +the 'helm dependency update' command. This means it will update the on-disk +dependencies to mirror the requirements.yaml file and generate a lock file. ` type dependencyBuildCmd struct { @@ -49,7 +49,7 @@ func newDependencyBuildCmd(out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "build [flags] CHART", - Short: "rebuild the charts/ directory based on the requirements.lock file", + Short: "Rebuild the charts/ directory based on the requirements.lock file", Long: dependencyBuildDesc, RunE: func(cmd *cobra.Command, args []string) error { dbc.helmhome = settings.Home @@ -63,8 +63,8 @@ func newDependencyBuildCmd(out io.Writer) *cobra.Command { } f := cmd.Flags() - f.BoolVar(&dbc.verify, "verify", false, "verify the packages against signatures") - f.StringVar(&dbc.keyring, "keyring", defaultKeyring(), "keyring containing public keys") + f.BoolVar(&dbc.verify, "verify", false, "Verify the packages against signatures") + f.StringVar(&dbc.keyring, "keyring", defaultKeyring(), "Keyring containing public keys") return cmd } diff --git a/cmd/helm/dependency_update.go b/cmd/helm/dependency_update.go index a8e54137b..1be29ea93 100644 --- a/cmd/helm/dependency_update.go +++ b/cmd/helm/dependency_update.go @@ -57,7 +57,7 @@ func newDependencyUpdateCmd(out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "update [flags] CHART", Aliases: []string{"up"}, - Short: "update charts/ based on the contents of requirements.yaml", + Short: "Update charts/ based on the contents of requirements.yaml", Long: dependencyUpDesc, RunE: func(cmd *cobra.Command, args []string) error { cp := "." @@ -78,9 +78,9 @@ func newDependencyUpdateCmd(out io.Writer) *cobra.Command { } f := cmd.Flags() - f.BoolVar(&duc.verify, "verify", false, "verify the packages against signatures") - f.StringVar(&duc.keyring, "keyring", defaultKeyring(), "keyring containing public keys") - f.BoolVar(&duc.skipRefresh, "skip-refresh", false, "do not refresh the local repository cache") + f.BoolVar(&duc.verify, "verify", false, "Verify the packages against signatures") + f.StringVar(&duc.keyring, "keyring", defaultKeyring(), "Keyring containing public keys") + f.BoolVar(&duc.skipRefresh, "skip-refresh", false, "Do not refresh the local repository cache") return cmd } diff --git a/cmd/helm/docs.go b/cmd/helm/docs.go index 56e3beaf5..80c10b95a 100644 --- a/cmd/helm/docs.go +++ b/cmd/helm/docs.go @@ -59,8 +59,8 @@ func newDocsCmd(out io.Writer) *cobra.Command { } f := cmd.Flags() - f.StringVar(&dc.dest, "dir", "./", "directory to which documentation is written") - f.StringVar(&dc.docTypeString, "type", "markdown", "the type of documentation to generate (markdown, man, bash)") + f.StringVar(&dc.dest, "dir", "./", "Directory to which documentation is written") + f.StringVar(&dc.docTypeString, "type", "markdown", "The type of documentation to generate (markdown, man, bash)") return cmd } diff --git a/cmd/helm/fetch.go b/cmd/helm/fetch.go index d6f622bb6..bc1c07cb7 100644 --- a/cmd/helm/fetch.go +++ b/cmd/helm/fetch.go @@ -73,7 +73,7 @@ func newFetchCmd(out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "fetch [flags] [chart URL | repo/chartname] [...]", - Short: "download a chart from a repository and (optionally) unpack it in local directory", + Short: "Download a chart from a repository and (optionally) unpack it in local directory", Long: fetchDesc, RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { @@ -96,20 +96,20 @@ func newFetchCmd(out io.Writer) *cobra.Command { } f := cmd.Flags() - f.BoolVar(&fch.untar, "untar", false, "if set to true, will untar the chart after downloading it") - f.StringVar(&fch.untardir, "untardir", ".", "if untar is specified, this flag specifies the name of the directory into which the chart is expanded") - f.BoolVar(&fch.verify, "verify", false, "verify the package against its signature") - f.BoolVar(&fch.verifyLater, "prov", false, "fetch the provenance file, but don't perform verification") - f.StringVar(&fch.version, "version", "", "specific version of a chart. Without this, the latest version is fetched") - f.StringVar(&fch.keyring, "keyring", defaultKeyring(), "keyring containing public keys") - f.StringVarP(&fch.destdir, "destination", "d", ".", "location to write the chart. If this and tardir are specified, tardir is appended to this") - f.StringVar(&fch.repoURL, "repo", "", "chart repository url where to locate the requested chart") - f.StringVar(&fch.certFile, "cert-file", "", "identify HTTPS client using this SSL certificate file") - f.StringVar(&fch.keyFile, "key-file", "", "identify HTTPS client using this SSL key file") - f.StringVar(&fch.caFile, "ca-file", "", "verify certificates of HTTPS-enabled servers using this CA bundle") - f.BoolVar(&fch.devel, "devel", false, "use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored.") - f.StringVar(&fch.username, "username", "", "chart repository username") - f.StringVar(&fch.password, "password", "", "chart repository password") + f.BoolVar(&fch.untar, "untar", false, "If set to true, will untar the chart after downloading it") + f.StringVar(&fch.untardir, "untardir", ".", "If untar is specified, this flag specifies the name of the directory into which the chart is expanded") + f.BoolVar(&fch.verify, "verify", false, "Verify the package against its signature") + f.BoolVar(&fch.verifyLater, "prov", false, "Fetch the provenance file, but don't perform verification") + f.StringVar(&fch.version, "version", "", "Specific version of a chart. Without this, the latest version is fetched") + f.StringVar(&fch.keyring, "keyring", defaultKeyring(), "Keyring containing public keys") + f.StringVarP(&fch.destdir, "destination", "d", ".", "Location to write the chart. If this and tardir are specified, tardir is appended to this") + f.StringVar(&fch.repoURL, "repo", "", "Chart repository url where to locate the requested chart") + f.StringVar(&fch.certFile, "cert-file", "", "Identify HTTPS client using this SSL certificate file") + f.StringVar(&fch.keyFile, "key-file", "", "Identify HTTPS client using this SSL key file") + f.StringVar(&fch.caFile, "ca-file", "", "Verify certificates of HTTPS-enabled servers using this CA bundle") + f.BoolVar(&fch.devel, "devel", false, "Use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored.") + f.StringVar(&fch.username, "username", "", "Chart repository username") + f.StringVar(&fch.password, "password", "", "Chart repository password") return cmd } diff --git a/cmd/helm/get.go b/cmd/helm/get.go index 20a4c042f..5cd0acdd0 100644 --- a/cmd/helm/get.go +++ b/cmd/helm/get.go @@ -41,10 +41,11 @@ chart, the supplied values, and the generated manifest file. var errReleaseRequired = errors.New("release name is required") type getCmd struct { - release string - out io.Writer - client helm.Interface - version int32 + release string + out io.Writer + client helm.Interface + version int32 + template string } func newGetCmd(client helm.Interface, out io.Writer) *cobra.Command { @@ -55,7 +56,7 @@ func newGetCmd(client helm.Interface, out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "get [flags] RELEASE_NAME", - Short: "download a named release", + Short: "Download a named release", Long: getHelp, PreRunE: func(_ *cobra.Command, _ []string) error { return setupConnection() }, RunE: func(cmd *cobra.Command, args []string) error { @@ -72,7 +73,8 @@ func newGetCmd(client helm.Interface, out io.Writer) *cobra.Command { f := cmd.Flags() settings.AddFlagsTLS(f) - f.Int32Var(&get.version, "revision", 0, "get the named release with revision") + f.Int32Var(&get.version, "revision", 0, "Get the named release with revision") + f.StringVar(&get.template, "template", "", "Go template for formatting the output, eg: {{.Release.Name}}") cmd.AddCommand(newGetValuesCmd(nil, out)) cmd.AddCommand(newGetManifestCmd(nil, out)) @@ -91,5 +93,9 @@ func (g *getCmd) run() error { if err != nil { return prettyError(err) } + + if g.template != "" { + return tpl(g.template, res, g.out) + } return printRelease(g.out, res.Release) } diff --git a/cmd/helm/get_hooks.go b/cmd/helm/get_hooks.go index 2706f381c..76592e0c2 100644 --- a/cmd/helm/get_hooks.go +++ b/cmd/helm/get_hooks.go @@ -45,7 +45,7 @@ func newGetHooksCmd(client helm.Interface, out io.Writer) *cobra.Command { } cmd := &cobra.Command{ Use: "hooks [flags] RELEASE_NAME", - Short: "download all hooks for a named release", + Short: "Download all hooks for a named release", Long: getHooksHelp, PreRunE: func(_ *cobra.Command, _ []string) error { return setupConnection() }, RunE: func(cmd *cobra.Command, args []string) error { @@ -59,7 +59,7 @@ func newGetHooksCmd(client helm.Interface, out io.Writer) *cobra.Command { } f := cmd.Flags() settings.AddFlagsTLS(f) - f.Int32Var(&ghc.version, "revision", 0, "get the named release with revision") + f.Int32Var(&ghc.version, "revision", 0, "Get the named release with revision") // set defaults from environment settings.InitTLS(f) diff --git a/cmd/helm/get_manifest.go b/cmd/helm/get_manifest.go index 1cc7e3543..24580c015 100644 --- a/cmd/helm/get_manifest.go +++ b/cmd/helm/get_manifest.go @@ -47,7 +47,7 @@ func newGetManifestCmd(client helm.Interface, out io.Writer) *cobra.Command { } cmd := &cobra.Command{ Use: "manifest [flags] RELEASE_NAME", - Short: "download the manifest for a named release", + Short: "Download the manifest for a named release", Long: getManifestHelp, PreRunE: func(_ *cobra.Command, _ []string) error { return setupConnection() }, RunE: func(cmd *cobra.Command, args []string) error { @@ -62,7 +62,7 @@ func newGetManifestCmd(client helm.Interface, out io.Writer) *cobra.Command { f := cmd.Flags() settings.AddFlagsTLS(f) - f.Int32Var(&get.version, "revision", 0, "get the named release with revision") + f.Int32Var(&get.version, "revision", 0, "Get the named release with revision") // set defaults from environment settings.InitTLS(f) diff --git a/cmd/helm/get_notes.go b/cmd/helm/get_notes.go index c7c3d7797..04142f297 100644 --- a/cmd/helm/get_notes.go +++ b/cmd/helm/get_notes.go @@ -44,7 +44,7 @@ func newGetNotesCmd(client helm.Interface, out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "notes [flags] RELEASE_NAME", - Short: "displays the notes of the named release", + Short: "Displays the notes of the named release", Long: getNotesHelp, PreRunE: func(_ *cobra.Command, _ []string) error { return setupConnection() }, RunE: func(cmd *cobra.Command, args []string) error { @@ -61,7 +61,7 @@ func newGetNotesCmd(client helm.Interface, out io.Writer) *cobra.Command { f := cmd.Flags() settings.AddFlagsTLS(f) - f.Int32Var(&get.version, "revision", 0, "get the notes of the named release with revision") + f.Int32Var(&get.version, "revision", 0, "Get the notes of the named release with revision") // set defaults from environment settings.InitTLS(f) diff --git a/cmd/helm/get_test.go b/cmd/helm/get_test.go index cb230a8a5..d83c85e1e 100644 --- a/cmd/helm/get_test.go +++ b/cmd/helm/get_test.go @@ -35,6 +35,13 @@ func TestGetCmd(t *testing.T) { expected: "REVISION: 1\nRELEASED: (.*)\nCHART: foo-0.1.0-beta.1\nUSER-SUPPLIED VALUES:\nname: \"value\"\nCOMPUTED VALUES:\nname: value\n\nHOOKS:\n---\n# pre-install-hook\n" + helm.MockHookTemplate + "\nMANIFEST:", rels: []*release.Release{helm.ReleaseMock(&helm.MockReleaseOptions{Name: "thomas-guide"})}, }, + { + name: "get with a formatted release", + resp: helm.ReleaseMock(&helm.MockReleaseOptions{Name: "elevated-turkey"}), + args: []string{"elevated-turkey", "--template", "{{.Release.Chart.Metadata.Version}}"}, + expected: "0.1.0-beta.1", + rels: []*release.Release{helm.ReleaseMock(&helm.MockReleaseOptions{Name: "elevated-turkey"})}, + }, { name: "get requires release name arg", err: true, diff --git a/cmd/helm/get_values.go b/cmd/helm/get_values.go index 7cdfa636f..30acc8081 100644 --- a/cmd/helm/get_values.go +++ b/cmd/helm/get_values.go @@ -47,7 +47,7 @@ func newGetValuesCmd(client helm.Interface, out io.Writer) *cobra.Command { } cmd := &cobra.Command{ Use: "values [flags] RELEASE_NAME", - Short: "download the values file for a named release", + Short: "Download the values file for a named release", Long: getValuesHelp, PreRunE: func(_ *cobra.Command, _ []string) error { return setupConnection() }, RunE: func(cmd *cobra.Command, args []string) error { @@ -62,9 +62,9 @@ func newGetValuesCmd(client helm.Interface, out io.Writer) *cobra.Command { f := cmd.Flags() settings.AddFlagsTLS(f) - f.Int32Var(&get.version, "revision", 0, "get the named release with revision") - f.BoolVarP(&get.allValues, "all", "a", false, "dump all (computed) values") - f.StringVar(&get.output, "output", "yaml", "output the specified format (json or yaml)") + f.Int32Var(&get.version, "revision", 0, "Get the named release with revision") + f.BoolVarP(&get.allValues, "all", "a", false, "Dump all (computed) values") + f.StringVar(&get.output, "output", "yaml", "Output the specified format (json or yaml)") // set defaults from environment settings.InitTLS(f) diff --git a/cmd/helm/get_values_test.go b/cmd/helm/get_values_test.go index aec5ce0c2..40b46bfda 100644 --- a/cmd/helm/get_values_test.go +++ b/cmd/helm/get_values_test.go @@ -29,8 +29,11 @@ import ( func TestGetValuesCmd(t *testing.T) { releaseWithValues := helm.ReleaseMock(&helm.MockReleaseOptions{ - Name: "thomas-guide", - Chart: &chart.Chart{Values: &chart.Config{Raw: `foo2: "bar2"`}}, + Name: "thomas-guide", + Chart: &chart.Chart{ + Metadata: &chart.Metadata{Name: "thomas-guide-chart-name"}, + Values: &chart.Config{Raw: `foo2: "bar2"`}, + }, Config: &chart.Config{Raw: `foo: "bar"`}, }) diff --git a/cmd/helm/helm.go b/cmd/helm/helm.go index f7628e44c..f3b8fc215 100644 --- a/cmd/helm/helm.go +++ b/cmd/helm/helm.go @@ -39,6 +39,104 @@ import ( "k8s.io/helm/pkg/tlsutil" ) +const ( + bashCompletionFunc = ` +__helm_override_flag_list=(--kubeconfig --kube-context --host --tiller-namespace --home) +__helm_override_flags() +{ + local ${__helm_override_flag_list[*]##*-} two_word_of of var + for w in "${words[@]}"; do + if [ -n "${two_word_of}" ]; then + eval "${two_word_of##*-}=\"${two_word_of}=\${w}\"" + two_word_of= + continue + fi + for of in "${__helm_override_flag_list[@]}"; do + case "${w}" in + ${of}=*) + eval "${of##*-}=\"${w}\"" + ;; + ${of}) + two_word_of="${of}" + ;; + esac + done + done + for var in "${__helm_override_flag_list[@]##*-}"; do + if eval "test -n \"\$${var}\""; then + eval "echo \${${var}}" + fi + done +} + +__helm_binary_name() +{ + local helm_binary + helm_binary="${words[0]}" + __helm_debug "${FUNCNAME[0]}: helm_binary is ${helm_binary}" + echo ${helm_binary} +} + +__helm_list_releases() +{ + __helm_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + local out filter + # Use ^ to map from the start of the release name + filter="^${words[c]}" + # Use eval in case helm_binary_name or __helm_override_flags contains a variable (e.g., $HOME/bin/h2) + if out=$(eval $(__helm_binary_name) list $(__helm_override_flags) -a -q -m 1000 ${filter} 2>/dev/null); then + COMPREPLY=( $( compgen -W "${out[*]}" -- "$cur" ) ) + fi +} + +__helm_list_repos() +{ + __helm_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + local out oflags + oflags=$(__helm_override_flags) + __helm_debug "${FUNCNAME[0]}: __helm_override_flags are ${oflags}" + # Use eval in case helm_binary_name contains a variable (e.g., $HOME/bin/h2) + if out=$(eval $(__helm_binary_name) repo list ${oflags} 2>/dev/null | tail +2 | cut -f1); then + COMPREPLY=( $( compgen -W "${out[*]}" -- "$cur" ) ) + fi +} + +__helm_list_plugins() +{ + __helm_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + local out oflags + oflags=$(__helm_override_flags) + __helm_debug "${FUNCNAME[0]}: __helm_override_flags are ${oflags}" + # Use eval in case helm_binary_name contains a variable (e.g., $HOME/bin/h2) + if out=$(eval $(__helm_binary_name) plugin list ${oflags} 2>/dev/null | tail +2 | cut -f1); then + COMPREPLY=( $( compgen -W "${out[*]}" -- "$cur" ) ) + fi +} + +__helm_custom_func() +{ + __helm_debug "${FUNCNAME[0]}: c is $c words[@] is ${words[@]}" + case ${last_command} in + helm_delete | helm_history | helm_status | helm_test |\ + helm_upgrade | helm_rollback | helm_get_*) + __helm_list_releases + return + ;; + helm_repo_remove | helm_repo_update) + __helm_list_repos + return + ;; + helm_plugin_remove | helm_plugin_update) + __helm_list_plugins + return + ;; + *) + ;; + esac +} +` +) + var ( tillerTunnel *kube.Tunnel settings helm_env.EnvSettings @@ -55,24 +153,25 @@ It will also set up any necessary local configuration. Common actions from this point include: -- helm search: search for charts -- helm fetch: download a chart to your local directory to view -- helm install: upload the chart to Kubernetes -- helm list: list releases of charts +- helm search: Search for charts +- helm fetch: Download a chart to your local directory to view +- helm install: Upload the chart to Kubernetes +- helm list: List releases of charts Environment: - $HELM_HOME set an alternative location for Helm files. By default, these are stored in ~/.helm - $HELM_HOST set an alternative Tiller host. The format is host:port - $HELM_NO_PLUGINS disable plugins. Set HELM_NO_PLUGINS=1 to disable plugins. - $TILLER_NAMESPACE set an alternative Tiller namespace (default "kube-system") - $KUBECONFIG set an alternative Kubernetes configuration file (default "~/.kube/config") - $HELM_TLS_CA_CERT path to TLS CA certificate used to verify the Helm client and Tiller server certificates (default "$HELM_HOME/ca.pem") - $HELM_TLS_CERT path to TLS client certificate file for authenticating to Tiller (default "$HELM_HOME/cert.pem") - $HELM_TLS_KEY path to TLS client key file for authenticating to Tiller (default "$HELM_HOME/key.pem") - $HELM_TLS_VERIFY enable TLS connection between Helm and Tiller and verify Tiller server certificate (default "false") - $HELM_TLS_ENABLE enable TLS connection between Helm and Tiller (default "false") - $HELM_KEY_PASSPHRASE set HELM_KEY_PASSPHRASE to the passphrase of your PGP private key. If set, you will not be prompted for - the passphrase while signing helm charts + +- $HELM_HOME: Set an alternative location for Helm files. By default, these are stored in ~/.helm +- $HELM_HOST: Set an alternative Tiller host. The format is host:port +- $HELM_NO_PLUGINS: Disable plugins. Set HELM_NO_PLUGINS=1 to disable plugins. +- $TILLER_NAMESPACE: Set an alternative Tiller namespace (default "kube-system") +- $KUBECONFIG: Set an alternative Kubernetes configuration file (default "~/.kube/config") +- $HELM_TLS_CA_CERT: Path to TLS CA certificate used to verify the Helm client and Tiller server certificates (default "$HELM_HOME/ca.pem") +- $HELM_TLS_CERT: Path to TLS client certificate file for authenticating to Tiller (default "$HELM_HOME/cert.pem") +- $HELM_TLS_KEY: Path to TLS client key file for authenticating to Tiller (default "$HELM_HOME/key.pem") +- $HELM_TLS_ENABLE: Enable TLS connection between Helm and Tiller (default "false") +- $HELM_TLS_VERIFY: Enable TLS connection between Helm and Tiller and verify Tiller server certificate (default "false") +- $HELM_TLS_HOSTNAME: The hostname or IP address used to verify the Tiller server certificate (default "127.0.0.1") +- $HELM_KEY_PASSPHRASE: Set HELM_KEY_PASSPHRASE to the passphrase of your PGP private key. If set, you will not be prompted for the passphrase while signing helm charts ` @@ -102,6 +201,7 @@ func newRootCmd(args []string) *cobra.Command { PersistentPostRun: func(*cobra.Command, []string) { teardown() }, + BashCompletionFunction: bashCompletionFunc, } flags := cmd.PersistentFlags() @@ -146,7 +246,7 @@ func newRootCmd(args []string) *cobra.Command { newDocsCmd(out), // Deprecated - markDeprecated(newRepoUpdateCmd(out), "use 'helm repo update'\n"), + markDeprecated(newRepoUpdateCmd(out), "Use 'helm repo update'\n"), ) flags.Parse(args) diff --git a/cmd/helm/helm_test.go b/cmd/helm/helm_test.go index 6e915fa7b..83f1173f2 100644 --- a/cmd/helm/helm_test.go +++ b/cmd/helm/helm_test.go @@ -30,6 +30,7 @@ import ( "github.com/spf13/cobra" "k8s.io/client-go/util/homedir" + "k8s.io/helm/cmd/helm/installer" "k8s.io/helm/pkg/helm" "k8s.io/helm/pkg/helm/environment" "k8s.io/helm/pkg/helm/helmpath" @@ -137,7 +138,7 @@ func ensureTestHome(home helmpath.Home, t *testing.T) error { } } - localRepoIndexFile := home.LocalRepository(localRepositoryIndexFile) + localRepoIndexFile := home.LocalRepository(installer.LocalRepositoryIndexFile) if fi, err := os.Stat(localRepoIndexFile); err != nil { i := repo.NewIndexFile() if err := i.WriteFile(localRepoIndexFile, 0644); err != nil { diff --git a/cmd/helm/history.go b/cmd/helm/history.go index 71210f677..f992e5aea 100644 --- a/cmd/helm/history.go +++ b/cmd/helm/history.go @@ -73,7 +73,7 @@ func newHistoryCmd(c helm.Interface, w io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "history [flags] RELEASE_NAME", Long: historyHelp, - Short: "fetch release history", + Short: "Fetch release history", Aliases: []string{"hist"}, PreRunE: func(_ *cobra.Command, _ []string) error { return setupConnection() }, RunE: func(cmd *cobra.Command, args []string) error { @@ -90,9 +90,9 @@ func newHistoryCmd(c helm.Interface, w io.Writer) *cobra.Command { f := cmd.Flags() settings.AddFlagsTLS(f) - f.Int32Var(&his.max, "max", 256, "maximum number of revision to include in history") - f.UintVar(&his.colWidth, "col-width", 60, "specifies the max column width of output") - f.StringVarP(&his.outputFormat, "output", "o", "table", "prints the output in the specified format (json|table|yaml)") + f.Int32Var(&his.max, "max", 256, "Maximum number of revisions to include in history") + f.UintVar(&his.colWidth, "col-width", 60, "Specifies the max column width of output") + f.StringVarP(&his.outputFormat, "output", "o", "table", "Prints the output in the specified format (json|table|yaml)") // set defaults from environment settings.InitTLS(f) diff --git a/cmd/helm/home.go b/cmd/helm/home.go index ca21088a7..c2aeef995 100644 --- a/cmd/helm/home.go +++ b/cmd/helm/home.go @@ -31,7 +31,7 @@ any helm configuration files live. func newHomeCmd(out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "home", - Short: "displays the location of HELM_HOME", + Short: "Displays the location of HELM_HOME", Long: longHomeHelp, Run: func(cmd *cobra.Command, args []string) { h := settings.Home diff --git a/cmd/helm/init.go b/cmd/helm/init.go index b628dc008..4dcb434f3 100644 --- a/cmd/helm/init.go +++ b/cmd/helm/init.go @@ -31,11 +31,10 @@ import ( "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/helm/cmd/helm/installer" - "k8s.io/helm/pkg/getter" "k8s.io/helm/pkg/helm" "k8s.io/helm/pkg/helm/helmpath" "k8s.io/helm/pkg/helm/portforwarder" - "k8s.io/helm/pkg/repo" + "k8s.io/helm/pkg/version" ) const initDesc = ` @@ -59,12 +58,6 @@ To dump a manifest containing the Tiller deployment YAML, combine the '--dry-run' and '--debug' flags. ` -const ( - stableRepository = "stable" - localRepository = "local" - localRepositoryIndexFile = "index.yaml" -) - var ( stableRepositoryURL = "https://kubernetes-charts.storage.googleapis.com" // This is the IPv4 loopback, not localhost, because we have to force IPv4 @@ -103,7 +96,7 @@ func newInitCmd(out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "init", - Short: "initialize Helm on both client and server", + Short: "Initialize Helm on both client and server", Long: initDesc, RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 0 { @@ -118,38 +111,38 @@ func newInitCmd(out io.Writer) *cobra.Command { } f := cmd.Flags() - f.StringVarP(&i.image, "tiller-image", "i", "", "override Tiller image") - f.BoolVar(&i.canary, "canary-image", false, "use the canary Tiller image") - f.BoolVar(&i.upgrade, "upgrade", false, "upgrade if Tiller is already installed") - f.BoolVar(&i.forceUpgrade, "force-upgrade", false, "force upgrade of Tiller to the current helm version") - f.BoolVarP(&i.clientOnly, "client-only", "c", false, "if set does not install Tiller") - f.BoolVar(&i.dryRun, "dry-run", false, "do not install local or remote") - f.BoolVar(&i.skipRefresh, "skip-refresh", false, "do not refresh (download) the local repository cache") - f.BoolVar(&i.wait, "wait", false, "block until Tiller is running and ready to receive requests") + f.StringVarP(&i.image, "tiller-image", "i", "", "Override Tiller image") + f.BoolVar(&i.canary, "canary-image", false, "Use the canary Tiller image") + f.BoolVar(&i.upgrade, "upgrade", false, "Upgrade if Tiller is already installed") + f.BoolVar(&i.forceUpgrade, "force-upgrade", false, "Force upgrade of Tiller to the current helm version") + f.BoolVarP(&i.clientOnly, "client-only", "c", false, "If set does not install Tiller") + f.BoolVar(&i.dryRun, "dry-run", false, "Do not install local or remote") + f.BoolVar(&i.skipRefresh, "skip-refresh", false, "Do not refresh (download) the local repository cache") + f.BoolVar(&i.wait, "wait", false, "Block until Tiller is running and ready to receive requests") // TODO: replace TLS flags with pkg/helm/environment.AddFlagsTLS() in Helm 3 // // NOTE (bacongobbler): we can't do this in Helm 2 because the flag names differ, and `helm init --tls-ca-cert` // doesn't conform with the rest of the TLS flag names (should be --tiller-tls-ca-cert in Helm 3) - f.BoolVar(&tlsEnable, "tiller-tls", false, "install Tiller with TLS enabled") - f.BoolVar(&tlsVerify, "tiller-tls-verify", false, "install Tiller with TLS enabled and to verify remote certificates") - f.StringVar(&tlsKeyFile, "tiller-tls-key", "", "path to TLS key file to install with Tiller") - f.StringVar(&tlsCertFile, "tiller-tls-cert", "", "path to TLS certificate file to install with Tiller") - f.StringVar(&tlsCaCertFile, "tls-ca-cert", "", "path to CA root certificate") - f.StringVar(&tlsServerName, "tiller-tls-hostname", settings.TillerHost, "the server name used to verify the hostname on the returned certificates from Tiller") + f.BoolVar(&tlsEnable, "tiller-tls", false, "Install Tiller with TLS enabled") + f.BoolVar(&tlsVerify, "tiller-tls-verify", false, "Install Tiller with TLS enabled and to verify remote certificates") + f.StringVar(&tlsKeyFile, "tiller-tls-key", "", "Path to TLS key file to install with Tiller") + f.StringVar(&tlsCertFile, "tiller-tls-cert", "", "Path to TLS certificate file to install with Tiller") + f.StringVar(&tlsCaCertFile, "tls-ca-cert", "", "Path to CA root certificate") + f.StringVar(&tlsServerName, "tiller-tls-hostname", settings.TillerHost, "The server name used to verify the hostname on the returned certificates from Tiller") f.StringVar(&stableRepositoryURL, "stable-repo-url", stableRepositoryURL, "URL for stable repository") f.StringVar(&localRepositoryURL, "local-repo-url", localRepositoryURL, "URL for local repository") - f.BoolVar(&i.opts.EnableHostNetwork, "net-host", false, "install Tiller with net=host") - f.StringVar(&i.serviceAccount, "service-account", "", "name of service account") - f.IntVar(&i.maxHistory, "history-max", 0, "limit the maximum number of revisions saved per release. Use 0 for no limit.") - f.IntVar(&i.replicas, "replicas", 1, "amount of tiller instances to run on the cluster") + f.BoolVar(&i.opts.EnableHostNetwork, "net-host", false, "Install Tiller with net=host") + f.StringVar(&i.serviceAccount, "service-account", "", "Name of service account") + f.IntVar(&i.maxHistory, "history-max", 0, "Limit the maximum number of revisions saved per release. Use 0 for no limit.") + f.IntVar(&i.replicas, "replicas", 1, "Amount of tiller instances to run on the cluster") - f.StringVar(&i.opts.NodeSelectors, "node-selectors", "", "labels to specify the node on which Tiller is installed (app=tiller,helm=rocks)") - f.VarP(&i.opts.Output, "output", "o", "skip installation and output Tiller's manifest in specified format (json or yaml)") - f.StringArrayVar(&i.opts.Values, "override", []string{}, "override values for the Tiller Deployment manifest (can specify multiple or separate values with commas: key1=val1,key2=val2)") - f.BoolVar(&i.opts.AutoMountServiceAccountToken, "automount-service-account-token", true, "auto-mount the given service account to tiller") + f.StringVar(&i.opts.NodeSelectors, "node-selectors", "", "Labels to specify the node on which Tiller is installed (app=tiller,helm=rocks)") + f.VarP(&i.opts.Output, "output", "o", "Skip installation and output Tiller's manifest in specified format (json or yaml)") + f.StringArrayVar(&i.opts.Values, "override", []string{}, "Override values for the Tiller Deployment manifest (can specify multiple or separate values with commas: key1=val1,key2=val2)") + f.BoolVar(&i.opts.AutoMountServiceAccountToken, "automount-service-account-token", true, "Auto-mount the given service account to tiller") return cmd } @@ -265,14 +258,8 @@ func (i *initCmd) run() error { return nil } - if err := ensureDirectories(i.home, i.out); err != nil { - return err - } - if err := ensureDefaultRepos(i.home, i.out, i.skipRefresh); err != nil { - return err - } - if err := ensureRepoFileFormat(i.home.RepositoryFile(), i.out); err != nil { - return err + if err := installer.Initialize(i.home, i.out, i.skipRefresh, settings, stableRepositoryURL, localRepositoryURL); err != nil { + return fmt.Errorf("error initializing: %s", err) } fmt.Fprintf(i.out, "$HELM_HOME has been configured at %s.\n", settings.Home) @@ -295,8 +282,9 @@ func (i *initCmd) run() error { if err := i.ping(i.opts.SelectImage()); err != nil { return err } - fmt.Fprintln(i.out, "\nTiller (the Helm server-side component) has been upgraded to the current version.") + fmt.Fprintln(i.out, "\nTiller (the Helm server-side component) has been updated to", i.opts.SelectImage(), ".") } else { + debug("The error received while trying to init: %s", err) fmt.Fprintln(i.out, "Warning: Tiller is already installed in the cluster.\n"+ "(Use --client-only to suppress this message, or --upgrade to upgrade Tiller to the current version.)") } @@ -315,7 +303,14 @@ func (i *initCmd) run() error { fmt.Fprintln(i.out, "Not installing Tiller due to 'client-only' flag having been set") } - fmt.Fprintln(i.out, "Happy Helming!") + needsDefaultImage := !i.clientOnly && !i.opts.UseCanary && len(i.opts.ImageSpec) == 0 && version.BuildMetadata == "unreleased" + if needsDefaultImage { + fmt.Fprintf(i.out, "\nWarning: You appear to be using an unreleased version of Helm. Please either use the\n"+ + "--canary-image flag, or specify your desired tiller version with --tiller-image.\n\n"+ + "Ex:\n"+ + "$ helm init --tiller-image gcr.io/kubernetes-helm/tiller:v2.8.2\n\n") + } + return nil } @@ -342,117 +337,6 @@ func (i *initCmd) ping(image string) error { return nil } -// ensureDirectories checks to see if $HELM_HOME exists. -// -// If $HELM_HOME does not exist, this function will create it. -func ensureDirectories(home helmpath.Home, out io.Writer) error { - configDirectories := []string{ - home.String(), - home.Repository(), - home.Cache(), - home.LocalRepository(), - home.Plugins(), - home.Starters(), - home.Archive(), - } - for _, p := range configDirectories { - if fi, err := os.Stat(p); err != nil { - fmt.Fprintf(out, "Creating %s \n", p) - if err := os.MkdirAll(p, 0755); err != nil { - return fmt.Errorf("Could not create %s: %s", p, err) - } - } else if !fi.IsDir() { - return fmt.Errorf("%s must be a directory", p) - } - } - - return nil -} - -func ensureDefaultRepos(home helmpath.Home, out io.Writer, skipRefresh bool) error { - repoFile := home.RepositoryFile() - if fi, err := os.Stat(repoFile); err != nil { - fmt.Fprintf(out, "Creating %s \n", repoFile) - f := repo.NewRepoFile() - sr, err := initStableRepo(home.CacheIndex(stableRepository), out, skipRefresh, home) - if err != nil { - return err - } - lr, err := initLocalRepo(home.LocalRepository(localRepositoryIndexFile), home.CacheIndex("local"), out, home) - if err != nil { - return err - } - f.Add(sr) - f.Add(lr) - if err := f.WriteFile(repoFile, 0644); err != nil { - return err - } - } else if fi.IsDir() { - return fmt.Errorf("%s must be a file, not a directory", repoFile) - } - return nil -} - -func initStableRepo(cacheFile string, out io.Writer, skipRefresh bool, home helmpath.Home) (*repo.Entry, error) { - fmt.Fprintf(out, "Adding %s repo with URL: %s \n", stableRepository, stableRepositoryURL) - c := repo.Entry{ - Name: stableRepository, - URL: stableRepositoryURL, - Cache: cacheFile, - } - r, err := repo.NewChartRepository(&c, getter.All(settings)) - if err != nil { - return nil, err - } - - if skipRefresh { - return &c, nil - } - - // In this case, the cacheFile is always absolute. So passing empty string - // is safe. - if err := r.DownloadIndexFile(""); err != nil { - return nil, fmt.Errorf("Looks like %q is not a valid chart repository or cannot be reached: %s", stableRepositoryURL, err.Error()) - } - - return &c, nil -} - -func initLocalRepo(indexFile, cacheFile string, out io.Writer, home helmpath.Home) (*repo.Entry, error) { - if fi, err := os.Stat(indexFile); err != nil { - fmt.Fprintf(out, "Adding %s repo with URL: %s \n", localRepository, localRepositoryURL) - i := repo.NewIndexFile() - if err := i.WriteFile(indexFile, 0644); err != nil { - return nil, err - } - - //TODO: take this out and replace with helm update functionality - if err := createLink(indexFile, cacheFile, home); err != nil { - return nil, err - } - } else if fi.IsDir() { - return nil, fmt.Errorf("%s must be a file, not a directory", indexFile) - } - - return &repo.Entry{ - Name: localRepository, - URL: localRepositoryURL, - Cache: cacheFile, - }, nil -} - -func ensureRepoFileFormat(file string, out io.Writer) error { - r, err := repo.LoadRepositoriesFile(file) - if err == repo.ErrRepoOutOfDate { - fmt.Fprintln(out, "Updating repository file format...") - if err := r.WriteFile(file, 0644); err != nil { - return err - } - } - - return nil -} - // watchTillerUntilReady waits for the tiller pod to become available. This is useful in situations where we // want to wait before we call New(). // diff --git a/cmd/helm/init_test.go b/cmd/helm/init_test.go index fd6ef97c4..f9b3fcd48 100644 --- a/cmd/helm/init_test.go +++ b/cmd/helm/init_test.go @@ -28,8 +28,8 @@ import ( "github.com/ghodss/yaml" - "k8s.io/api/core/v1" - "k8s.io/api/extensions/v1beta1" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -83,7 +83,7 @@ func TestInitCmd_exists(t *testing.T) { defer os.RemoveAll(home) var buf bytes.Buffer - fc := fake.NewSimpleClientset(&v1beta1.Deployment{ + fc := fake.NewSimpleClientset(&appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Namespace: v1.NamespaceDefault, Name: "tiller-deploy", @@ -179,51 +179,6 @@ func TestInitCmd_dryRun(t *testing.T) { } } -func TestEnsureHome(t *testing.T) { - home, err := ioutil.TempDir("", "helm_home") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(home) - - b := bytes.NewBuffer(nil) - hh := helmpath.Home(home) - settings.Home = hh - if err := ensureDirectories(hh, b); err != nil { - t.Error(err) - } - if err := ensureDefaultRepos(hh, b, false); err != nil { - t.Error(err) - } - if err := ensureDefaultRepos(hh, b, true); err != nil { - t.Error(err) - } - if err := ensureRepoFileFormat(hh.RepositoryFile(), b); err != nil { - t.Error(err) - } - - expectedDirs := []string{hh.String(), hh.Repository(), hh.Cache(), hh.LocalRepository()} - for _, dir := range expectedDirs { - if fi, err := os.Stat(dir); err != nil { - t.Errorf("%s", err) - } else if !fi.IsDir() { - t.Errorf("%s is not a directory", fi) - } - } - - if fi, err := os.Stat(hh.RepositoryFile()); err != nil { - t.Error(err) - } else if fi.IsDir() { - t.Errorf("%s should not be a directory", fi) - } - - if fi, err := os.Stat(hh.LocalRepository(localRepositoryIndexFile)); err != nil { - t.Errorf("%s", err) - } else if fi.IsDir() { - t.Errorf("%s should not be a directory", fi) - } -} - func TestInitCmd_tlsOptions(t *testing.T) { const testDir = "../../testdata" diff --git a/cmd/helm/inspect.go b/cmd/helm/inspect.go index 844116bc5..d8ac6b2ef 100644 --- a/cmd/helm/inspect.go +++ b/cmd/helm/inspect.go @@ -27,27 +27,26 @@ import ( "k8s.io/helm/pkg/chartutil" ) -const inspectDesc = ` +const ( + inspectDesc = ` This command inspects a chart and displays information. It takes a chart reference ('stable/drupal'), a full path to a directory or packaged chart, or a URL. Inspect prints the contents of the Chart.yaml file and the values.yaml file. ` - -const inspectValuesDesc = ` + inspectValuesDesc = ` This command inspects a chart (directory, file, or URL) and displays the contents of the values.yaml file ` - -const inspectChartDesc = ` + inspectChartDesc = ` This command inspects a chart (directory, file, or URL) and displays the contents of the Charts.yaml file ` - -const readmeChartDesc = ` + readmeChartDesc = ` This command inspects a chart (directory, file, or URL) and displays the contents of the README file ` +) type inspectCmd struct { chartpath string @@ -59,6 +58,7 @@ type inspectCmd struct { repoURL string username string password string + devel bool certFile string keyFile string @@ -82,18 +82,15 @@ func newInspectCmd(out io.Writer) *cobra.Command { inspectCommand := &cobra.Command{ Use: "inspect [CHART]", - Short: "inspect a chart", + Short: "Inspect a chart", Long: inspectDesc, RunE: func(cmd *cobra.Command, args []string) error { if err := checkArgsLength(len(args), "chart name"); err != nil { return err } - cp, err := locateChartPath(insp.repoURL, insp.username, insp.password, args[0], insp.version, insp.verify, insp.keyring, - insp.certFile, insp.keyFile, insp.caFile) - if err != nil { + if err := insp.prepare(args[0]); err != nil { return err } - insp.chartpath = cp return insp.run() }, } @@ -107,12 +104,9 @@ func newInspectCmd(out io.Writer) *cobra.Command { if err := checkArgsLength(len(args), "chart name"); err != nil { return err } - cp, err := locateChartPath(insp.repoURL, insp.username, insp.password, args[0], insp.version, insp.verify, insp.keyring, - insp.certFile, insp.keyFile, insp.caFile) - if err != nil { + if err := insp.prepare(args[0]); err != nil { return err } - insp.chartpath = cp return insp.run() }, } @@ -126,12 +120,9 @@ func newInspectCmd(out io.Writer) *cobra.Command { if err := checkArgsLength(len(args), "chart name"); err != nil { return err } - cp, err := locateChartPath(insp.repoURL, insp.username, insp.password, args[0], insp.version, insp.verify, insp.keyring, - insp.certFile, insp.keyFile, insp.caFile) - if err != nil { + if err := insp.prepare(args[0]); err != nil { return err } - insp.chartpath = cp return insp.run() }, } @@ -145,68 +136,71 @@ func newInspectCmd(out io.Writer) *cobra.Command { if err := checkArgsLength(len(args), "chart name"); err != nil { return err } - cp, err := locateChartPath(insp.repoURL, insp.username, insp.password, args[0], insp.version, insp.verify, insp.keyring, - insp.certFile, insp.keyFile, insp.caFile) - if err != nil { + if err := insp.prepare(args[0]); err != nil { return err } - insp.chartpath = cp return insp.run() }, } cmds := []*cobra.Command{inspectCommand, readmeSubCmd, valuesSubCmd, chartSubCmd} vflag := "verify" - vdesc := "verify the provenance data for this chart" + vdesc := "Verify the provenance data for this chart" for _, subCmd := range cmds { subCmd.Flags().BoolVar(&insp.verify, vflag, false, vdesc) } kflag := "keyring" - kdesc := "path to the keyring containing public verification keys" + kdesc := "Path to the keyring containing public verification keys" kdefault := defaultKeyring() for _, subCmd := range cmds { subCmd.Flags().StringVar(&insp.keyring, kflag, kdefault, kdesc) } verflag := "version" - verdesc := "version of the chart. By default, the newest chart is shown" + verdesc := "Version of the chart. By default, the newest chart is shown" for _, subCmd := range cmds { subCmd.Flags().StringVar(&insp.version, verflag, "", verdesc) } repoURL := "repo" - repoURLdesc := "chart repository url where to locate the requested chart" + repoURLdesc := "Chart repository url where to locate the requested chart" for _, subCmd := range cmds { subCmd.Flags().StringVar(&insp.repoURL, repoURL, "", repoURLdesc) } username := "username" - usernamedesc := "chart repository username where to locate the requested chart" + usernamedesc := "Chart repository username where to locate the requested chart" inspectCommand.Flags().StringVar(&insp.username, username, "", usernamedesc) valuesSubCmd.Flags().StringVar(&insp.username, username, "", usernamedesc) chartSubCmd.Flags().StringVar(&insp.username, username, "", usernamedesc) password := "password" - passworddesc := "chart repository password where to locate the requested chart" + passworddesc := "Chart repository password where to locate the requested chart" inspectCommand.Flags().StringVar(&insp.password, password, "", passworddesc) valuesSubCmd.Flags().StringVar(&insp.password, password, "", passworddesc) chartSubCmd.Flags().StringVar(&insp.password, password, "", passworddesc) + develFlag := "devel" + develDesc := "Use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored." + for _, subCmd := range cmds { + subCmd.Flags().BoolVar(&insp.devel, develFlag, false, develDesc) + } + certFile := "cert-file" - certFiledesc := "verify certificates of HTTPS-enabled servers using this CA bundle" + certFiledesc := "Verify certificates of HTTPS-enabled servers using this CA bundle" for _, subCmd := range cmds { subCmd.Flags().StringVar(&insp.certFile, certFile, "", certFiledesc) } keyFile := "key-file" - keyFiledesc := "identify HTTPS client using this SSL key file" + keyFiledesc := "Identify HTTPS client using this SSL key file" for _, subCmd := range cmds { subCmd.Flags().StringVar(&insp.keyFile, keyFile, "", keyFiledesc) } caFile := "ca-file" - caFiledesc := "chart repository url where to locate the requested chart" + caFiledesc := "Chart repository url where to locate the requested chart" for _, subCmd := range cmds { subCmd.Flags().StringVar(&insp.caFile, caFile, "", caFiledesc) } @@ -218,6 +212,22 @@ func newInspectCmd(out io.Writer) *cobra.Command { return inspectCommand } +func (i *inspectCmd) prepare(chart string) error { + debug("Original chart version: %q", i.version) + if i.version == "" && i.devel { + debug("setting version to >0.0.0-0") + i.version = ">0.0.0-0" + } + + cp, err := locateChartPath(i.repoURL, i.username, i.password, chart, i.version, i.verify, i.keyring, + i.certFile, i.keyFile, i.caFile) + if err != nil { + return err + } + i.chartpath = cp + return nil +} + func (i *inspectCmd) run() error { chrt, err := chartutil.Load(i.chartpath) if err != nil { diff --git a/cmd/helm/inspect_test.go b/cmd/helm/inspect_test.go index b9dbf2ab6..c4ce005b0 100644 --- a/cmd/helm/inspect_test.go +++ b/cmd/helm/inspect_test.go @@ -19,8 +19,11 @@ package main import ( "bytes" "io/ioutil" + "os" "strings" "testing" + + "k8s.io/helm/pkg/repo/repotest" ) func TestInspect(t *testing.T) { @@ -78,3 +81,66 @@ func TestInspect(t *testing.T) { t.Errorf("expected empty values buffer, got %q", b.String()) } } + +func TestInspectPreReleaseChart(t *testing.T) { + hh, err := tempHelmHome(t) + if err != nil { + t.Fatal(err) + } + cleanup := resetEnv() + defer func() { + os.RemoveAll(hh.String()) + cleanup() + }() + + settings.Home = hh + + srv := repotest.NewServer(hh.String()) + defer srv.Stop() + + if _, err := srv.CopyCharts("testdata/testcharts/*.tgz*"); err != nil { + t.Fatal(err) + } + if err := srv.LinkIndices(); err != nil { + t.Fatal(err) + } + + tests := []struct { + name string + args []string + flags []string + fail bool + expectedErr string + }{ + { + name: "inspect pre-release chart", + args: []string{"prerelease"}, + fail: true, + expectedErr: "chart \"prerelease\" not found", + }, + { + name: "inspect pre-release chart with 'devel' flag", + args: []string{"prerelease"}, + flags: []string{"--devel"}, + fail: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.flags = append(tt.flags, "--repo", srv.URL()) + cmd := newInspectCmd(ioutil.Discard) + cmd.SetArgs(tt.args) + cmd.ParseFlags(tt.flags) + if err := cmd.RunE(cmd, tt.args); err != nil { + if tt.fail { + if !strings.Contains(err.Error(), tt.expectedErr) { + t.Errorf("%q expected error: %s, got: %s", tt.name, tt.expectedErr, err.Error()) + } + return + } + t.Errorf("%q reported error: %s", tt.name, err) + } + }) + } +} diff --git a/cmd/helm/install.go b/cmd/helm/install.go index 05321f1f7..117c7ba5b 100644 --- a/cmd/helm/install.go +++ b/cmd/helm/install.go @@ -131,16 +131,19 @@ type installCmd struct { version string timeout int64 wait bool + atomic bool repoURL string username string password string devel bool depUp bool + subNotes bool description string certFile string keyFile string caFile string + output string } type valueFiles []string @@ -168,7 +171,7 @@ func newInstallCmd(c helm.Interface, out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "install [CHART]", - Short: "install a chart archive", + Short: "Install a chart archive", Long: installDesc, PreRunE: func(_ *cobra.Command, _ []string) error { return setupConnection() }, RunE: func(cmd *cobra.Command, args []string) error { @@ -189,37 +192,42 @@ func newInstallCmd(c helm.Interface, out io.Writer) *cobra.Command { } inst.chartPath = cp inst.client = ensureHelmClient(inst.client) + inst.wait = inst.wait || inst.atomic + return inst.run() }, } f := cmd.Flags() settings.AddFlagsTLS(f) - f.VarP(&inst.valueFiles, "values", "f", "specify values in a YAML file or a URL(can specify multiple)") - f.StringVarP(&inst.name, "name", "n", "", "release name. If unspecified, it will autogenerate one for you") - f.StringVar(&inst.namespace, "namespace", "", "namespace to install the release into. Defaults to the current kube config namespace.") - f.BoolVar(&inst.dryRun, "dry-run", false, "simulate an install") - f.BoolVar(&inst.disableHooks, "no-hooks", false, "prevent hooks from running during install") - f.BoolVar(&inst.disableCRDHook, "no-crd-hook", false, "prevent CRD hooks from running, but run other hooks") - f.BoolVar(&inst.replace, "replace", false, "re-use the given name, even if that name is already used. This is unsafe in production") - f.StringArrayVar(&inst.values, "set", []string{}, "set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)") - f.StringArrayVar(&inst.stringValues, "set-string", []string{}, "set STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)") - f.StringArrayVar(&inst.fileValues, "set-file", []string{}, "set values from respective files specified via the command line (can specify multiple or separate values with commas: key1=path1,key2=path2)") - f.StringVar(&inst.nameTemplate, "name-template", "", "specify template used to name the release") - f.BoolVar(&inst.verify, "verify", false, "verify the package before installing it") - f.StringVar(&inst.keyring, "keyring", defaultKeyring(), "location of public keys used for verification") - f.StringVar(&inst.version, "version", "", "specify the exact chart version to install. If this is not specified, the latest version is installed") - f.Int64Var(&inst.timeout, "timeout", 300, "time in seconds to wait for any individual Kubernetes operation (like Jobs for hooks)") - f.BoolVar(&inst.wait, "wait", false, "if set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment are in a ready state before marking the release as successful. It will wait for as long as --timeout") - f.StringVar(&inst.repoURL, "repo", "", "chart repository url where to locate the requested chart") - f.StringVar(&inst.username, "username", "", "chart repository username where to locate the requested chart") - f.StringVar(&inst.password, "password", "", "chart repository password where to locate the requested chart") - f.StringVar(&inst.certFile, "cert-file", "", "identify HTTPS client using this SSL certificate file") - f.StringVar(&inst.keyFile, "key-file", "", "identify HTTPS client using this SSL key file") - f.StringVar(&inst.caFile, "ca-file", "", "verify certificates of HTTPS-enabled servers using this CA bundle") - f.BoolVar(&inst.devel, "devel", false, "use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored.") - f.BoolVar(&inst.depUp, "dep-up", false, "run helm dependency update before installing the chart") - f.StringVar(&inst.description, "description", "", "specify a description for the release") + f.VarP(&inst.valueFiles, "values", "f", "Specify values in a YAML file or a URL(can specify multiple)") + f.StringVarP(&inst.name, "name", "n", "", "The release name. If unspecified, it will autogenerate one for you") + f.StringVar(&inst.namespace, "namespace", "", "Namespace to install the release into. Defaults to the current kube config namespace.") + f.BoolVar(&inst.dryRun, "dry-run", false, "Simulate an install") + f.BoolVar(&inst.disableHooks, "no-hooks", false, "Prevent hooks from running during install") + f.BoolVar(&inst.disableCRDHook, "no-crd-hook", false, "Prevent CRD hooks from running, but run other hooks") + f.BoolVar(&inst.replace, "replace", false, "Re-use the given name, even if that name is already used. This is unsafe in production") + f.StringArrayVar(&inst.values, "set", []string{}, "Set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)") + f.StringArrayVar(&inst.stringValues, "set-string", []string{}, "Set STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)") + f.StringArrayVar(&inst.fileValues, "set-file", []string{}, "Set values from respective files specified via the command line (can specify multiple or separate values with commas: key1=path1,key2=path2)") + f.StringVar(&inst.nameTemplate, "name-template", "", "Specify template used to name the release") + f.BoolVar(&inst.verify, "verify", false, "Verify the package before installing it") + f.StringVar(&inst.keyring, "keyring", defaultKeyring(), "Location of public keys used for verification") + f.StringVar(&inst.version, "version", "", "Specify the exact chart version to install. If this is not specified, the latest version is installed") + f.Int64Var(&inst.timeout, "timeout", 300, "Time in seconds to wait for any individual Kubernetes operation (like Jobs for hooks)") + f.BoolVar(&inst.wait, "wait", false, "If set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment are in a ready state before marking the release as successful. It will wait for as long as --timeout") + f.BoolVar(&inst.atomic, "atomic", false, "If set, installation process purges chart on fail, also sets --wait flag") + f.StringVar(&inst.repoURL, "repo", "", "Chart repository url where to locate the requested chart") + f.StringVar(&inst.username, "username", "", "Chart repository username where to locate the requested chart") + f.StringVar(&inst.password, "password", "", "Chart repository password where to locate the requested chart") + f.StringVar(&inst.certFile, "cert-file", "", "Identify HTTPS client using this SSL certificate file") + f.StringVar(&inst.keyFile, "key-file", "", "Identify HTTPS client using this SSL key file") + f.StringVar(&inst.caFile, "ca-file", "", "Verify certificates of HTTPS-enabled servers using this CA bundle") + f.BoolVar(&inst.devel, "devel", false, "Use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored.") + f.BoolVar(&inst.depUp, "dep-up", false, "Run helm dependency update before installing the chart") + f.BoolVar(&inst.subNotes, "render-subchart-notes", false, "Render subchart notes along with the parent") + f.StringVar(&inst.description, "description", "", "Specify a description for the release") + bindOutputFlag(cmd, &inst.output) // set defaults from environment settings.InitTLS(f) @@ -249,8 +257,8 @@ func (i *installCmd) run() error { fmt.Printf("FINAL NAME: %s\n", i.name) } - if msgs := validation.IsDNS1123Label(i.name); i.name != "" && len(msgs) > 0 { - return fmt.Errorf("release name %s is not a valid DNS label: %s", i.name, strings.Join(msgs, ";")) + if msgs := validation.IsDNS1123Subdomain(i.name); i.name != "" && len(msgs) > 0 { + return fmt.Errorf("release name %s is invalid: %s", i.name, strings.Join(msgs, ";")) } // Check chart requirements to make sure all dependencies are present in /charts @@ -300,10 +308,28 @@ func (i *installCmd) run() error { helm.InstallReuseName(i.replace), helm.InstallDisableHooks(i.disableHooks), helm.InstallDisableCRDHook(i.disableCRDHook), + helm.InstallSubNotes(i.subNotes), helm.InstallTimeout(i.timeout), helm.InstallWait(i.wait), helm.InstallDescription(i.description)) if err != nil { + if i.atomic { + fmt.Fprintf(os.Stdout, "INSTALL FAILED\nPURGING CHART\nError: %v\n", prettyError(err)) + deleteSideEffects := &deleteCmd{ + name: i.name, + disableHooks: i.disableHooks, + purge: true, + timeout: i.timeout, + description: "", + dryRun: i.dryRun, + out: i.out, + client: i.client, + } + if err := deleteSideEffects.run(); err != nil { + return err + } + fmt.Fprintf(os.Stdout, "Successfully purged a chart!\n") + } return prettyError(err) } @@ -311,7 +337,10 @@ func (i *installCmd) run() error { if rel == nil { return nil } - i.printRelease(rel) + + if outputFormat(i.output) == outputTable { + i.printRelease(rel) + } // If this is a dry run, we can't display status. if i.dryRun { @@ -327,8 +356,8 @@ func (i *installCmd) run() error { if err != nil { return prettyError(err) } - PrintStatus(i.out, status) - return nil + + return write(i.out, &statusWriter{status}, outputFormat(i.output)) } // Merges source and destination map, preferring values from the source map diff --git a/cmd/helm/install_test.go b/cmd/helm/install_test.go index 4a2055640..e00c33a81 100644 --- a/cmd/helm/install_test.go +++ b/cmd/helm/install_test.go @@ -113,6 +113,14 @@ func TestInstall(t *testing.T) { expected: "apollo", resp: helm.ReleaseMock(&helm.MockReleaseOptions{Name: "apollo"}), }, + // Install, with atomic + { + name: "install with a atomic", + args: []string{"testdata/testcharts/alpine"}, + flags: strings.Split("--name apollo", " "), + expected: "apollo", + resp: helm.ReleaseMock(&helm.MockReleaseOptions{Name: "apollo"}), + }, // Install, using the name-template { name: "install with name-template", @@ -169,7 +177,6 @@ func TestInstall(t *testing.T) { name: "install chart with release name using periods", args: []string{"testdata/testcharts/alpine"}, flags: []string{"--name", "foo.bar"}, - err: true, }, { name: "install chart with release name using underscores", @@ -184,6 +191,22 @@ func TestInstall(t *testing.T) { flags: []string{"--name-template", "{{UPPER \"foobar\"}}"}, err: true, }, + // Install, using --output json + { + name: "install using output json", + args: []string{"testdata/testcharts/alpine"}, + flags: strings.Split("--name virgil --output json", " "), + resp: helm.ReleaseMock(&helm.MockReleaseOptions{Name: "virgil"}), + expected: regexp.QuoteMeta(`{"name":"virgil","info":{"status":{"code":1},"first_deployed":{"seconds":242085845},"last_deployed":{"seconds":242085845},"Description":"Release mock"},"namespace":"default"}`), + }, + // Install, using --output yaml + { + name: "install using output yaml", + args: []string{"testdata/testcharts/alpine"}, + flags: strings.Split("--name virgil --output yaml", " "), + resp: helm.ReleaseMock(&helm.MockReleaseOptions{Name: "virgil"}), + expected: "info:\n Description: Release mock\n first_deployed:\n seconds: 242085845\n last_deployed:\n seconds: 242085845\n status:\n code: 1\nname: virgil\nnamespace: default\n", + }, } runReleaseCases(t, tests, func(c *helm.FakeClient, out io.Writer) *cobra.Command { diff --git a/cmd/helm/installer/init.go b/cmd/helm/installer/init.go new file mode 100644 index 000000000..7731a4a98 --- /dev/null +++ b/cmd/helm/installer/init.go @@ -0,0 +1,163 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package installer // import "k8s.io/helm/cmd/helm/installer" + +import ( + "fmt" + "io" + "os" + + "k8s.io/helm/pkg/getter" + helm_env "k8s.io/helm/pkg/helm/environment" + "k8s.io/helm/pkg/helm/helmpath" + "k8s.io/helm/pkg/repo" +) + +const ( + stableRepository = "stable" + + // LocalRepository is the standard name of the local repository + LocalRepository = "local" + + // LocalRepositoryIndexFile is the standard name of the local repository index file + LocalRepositoryIndexFile = "index.yaml" +) + +// Initialize initializes local config +// +// Returns an error if the command failed. +func Initialize(home helmpath.Home, out io.Writer, skipRefresh bool, settings helm_env.EnvSettings, stableRepositoryURL, localRepositoryURL string) error { + if err := ensureDirectories(home, out); err != nil { + return err + } + if err := ensureDefaultRepos(home, out, skipRefresh, settings, stableRepositoryURL, localRepositoryURL); err != nil { + return err + } + + return ensureRepoFileFormat(home.RepositoryFile(), out) +} + +// ensureDirectories checks to see if $HELM_HOME exists. +// +// If $HELM_HOME does not exist, this function will create it. +func ensureDirectories(home helmpath.Home, out io.Writer) error { + configDirectories := []string{ + home.String(), + home.Repository(), + home.Cache(), + home.LocalRepository(), + home.Plugins(), + home.Starters(), + home.Archive(), + } + for _, p := range configDirectories { + if fi, err := os.Stat(p); err != nil { + fmt.Fprintf(out, "Creating %s \n", p) + if err := os.MkdirAll(p, 0755); err != nil { + return fmt.Errorf("Could not create %s: %s", p, err) + } + } else if !fi.IsDir() { + return fmt.Errorf("%s must be a directory", p) + } + } + + return nil +} + +func ensureDefaultRepos(home helmpath.Home, out io.Writer, skipRefresh bool, settings helm_env.EnvSettings, stableRepositoryURL, localRepositoryURL string) error { + repoFile := home.RepositoryFile() + if fi, err := os.Stat(repoFile); err != nil { + fmt.Fprintf(out, "Creating %s \n", repoFile) + f := repo.NewRepoFile() + sr, err := initStableRepo(home.CacheIndex(stableRepository), home, out, skipRefresh, settings, stableRepositoryURL) + if err != nil { + return err + } + lr, err := initLocalRepo(home.LocalRepository(LocalRepositoryIndexFile), home.CacheIndex("local"), home, out, settings, localRepositoryURL) + if err != nil { + return err + } + f.Add(sr) + f.Add(lr) + if err := f.WriteFile(repoFile, 0644); err != nil { + return err + } + } else if fi.IsDir() { + return fmt.Errorf("%s must be a file, not a directory", repoFile) + } + return nil +} + +func initStableRepo(cacheFile string, home helmpath.Home, out io.Writer, skipRefresh bool, settings helm_env.EnvSettings, stableRepositoryURL string) (*repo.Entry, error) { + fmt.Fprintf(out, "Adding %s repo with URL: %s \n", stableRepository, stableRepositoryURL) + c := repo.Entry{ + Name: stableRepository, + URL: stableRepositoryURL, + Cache: cacheFile, + } + r, err := repo.NewChartRepository(&c, getter.All(settings)) + if err != nil { + return nil, err + } + + if skipRefresh { + return &c, nil + } + + // In this case, the cacheFile is always absolute. So passing empty string + // is safe. + if err := r.DownloadIndexFile(""); err != nil { + return nil, fmt.Errorf("Looks like %q is not a valid chart repository or cannot be reached: %s", stableRepositoryURL, err.Error()) + } + + return &c, nil +} + +func initLocalRepo(indexFile, cacheFile string, home helmpath.Home, out io.Writer, settings helm_env.EnvSettings, localRepositoryURL string) (*repo.Entry, error) { + if fi, err := os.Stat(indexFile); err != nil { + fmt.Fprintf(out, "Adding %s repo with URL: %s \n", LocalRepository, localRepositoryURL) + i := repo.NewIndexFile() + if err := i.WriteFile(indexFile, 0644); err != nil { + return nil, err + } + + //TODO: take this out and replace with helm update functionality + if err := createLink(indexFile, cacheFile, home); err != nil { + return nil, err + } + } else if fi.IsDir() { + return nil, fmt.Errorf("%s must be a file, not a directory", indexFile) + } + + return &repo.Entry{ + Name: LocalRepository, + URL: localRepositoryURL, + Cache: cacheFile, + }, nil +} + +func ensureRepoFileFormat(file string, out io.Writer) error { + r, err := repo.LoadRepositoriesFile(file) + if err == repo.ErrRepoOutOfDate { + fmt.Fprintln(out, "Updating repository file format...") + if err := r.WriteFile(file, 0644); err != nil { + return err + } + } + + return nil +} diff --git a/cmd/helm/installer/init_test.go b/cmd/helm/installer/init_test.go new file mode 100644 index 000000000..1d53687e6 --- /dev/null +++ b/cmd/helm/installer/init_test.go @@ -0,0 +1,120 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package installer // import "k8s.io/helm/cmd/helm/installer" + +import ( + "bytes" + "io/ioutil" + "os" + "testing" + + helm_env "k8s.io/helm/pkg/helm/environment" + "k8s.io/helm/pkg/helm/helmpath" +) + +func TestInitialize(t *testing.T) { + home, err := ioutil.TempDir("", "helm_home") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(home) + + b := bytes.NewBuffer(nil) + hh := helmpath.Home(home) + + settings := helm_env.EnvSettings{ + Home: hh, + } + stableRepositoryURL := "https://kubernetes-charts.storage.googleapis.com" + localRepositoryURL := "http://127.0.0.1:8879/charts" + + if err := Initialize(hh, b, false, settings, stableRepositoryURL, localRepositoryURL); err != nil { + t.Error(err) + } + + expectedDirs := []string{hh.String(), hh.Repository(), hh.Cache(), hh.LocalRepository()} + for _, dir := range expectedDirs { + if fi, err := os.Stat(dir); err != nil { + t.Errorf("%s", err) + } else if !fi.IsDir() { + t.Errorf("%s is not a directory", fi) + } + } + + if fi, err := os.Stat(hh.RepositoryFile()); err != nil { + t.Error(err) + } else if fi.IsDir() { + t.Errorf("%s should not be a directory", fi) + } + + if fi, err := os.Stat(hh.LocalRepository(LocalRepositoryIndexFile)); err != nil { + t.Errorf("%s", err) + } else if fi.IsDir() { + t.Errorf("%s should not be a directory", fi) + } +} + +func TestEnsureHome(t *testing.T) { + home, err := ioutil.TempDir("", "helm_home") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(home) + + b := bytes.NewBuffer(nil) + hh := helmpath.Home(home) + + settings := helm_env.EnvSettings{ + Home: hh, + } + stableRepositoryURL := "https://kubernetes-charts.storage.googleapis.com" + localRepositoryURL := "http://127.0.0.1:8879/charts" + + if err := ensureDirectories(hh, b); err != nil { + t.Error(err) + } + if err := ensureDefaultRepos(hh, b, false, settings, stableRepositoryURL, localRepositoryURL); err != nil { + t.Error(err) + } + if err := ensureDefaultRepos(hh, b, true, settings, stableRepositoryURL, localRepositoryURL); err != nil { + t.Error(err) + } + if err := ensureRepoFileFormat(hh.RepositoryFile(), b); err != nil { + t.Error(err) + } + + expectedDirs := []string{hh.String(), hh.Repository(), hh.Cache(), hh.LocalRepository()} + for _, dir := range expectedDirs { + if fi, err := os.Stat(dir); err != nil { + t.Errorf("%s", err) + } else if !fi.IsDir() { + t.Errorf("%s is not a directory", fi) + } + } + + if fi, err := os.Stat(hh.RepositoryFile()); err != nil { + t.Error(err) + } else if fi.IsDir() { + t.Errorf("%s should not be a directory", fi) + } + + if fi, err := os.Stat(hh.LocalRepository(LocalRepositoryIndexFile)); err != nil { + t.Errorf("%s", err) + } else if fi.IsDir() { + t.Errorf("%s should not be a directory", fi) + } +} diff --git a/cmd/helm/init_unix.go b/cmd/helm/installer/init_unix.go similarity index 92% rename from cmd/helm/init_unix.go rename to cmd/helm/installer/init_unix.go index bf61f1925..d7f15a1c2 100644 --- a/cmd/helm/init_unix.go +++ b/cmd/helm/installer/init_unix.go @@ -16,7 +16,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package installer // import "k8s.io/helm/cmd/helm/installer" import ( "os" diff --git a/cmd/helm/init_windows.go b/cmd/helm/installer/init_windows.go similarity index 92% rename from cmd/helm/init_windows.go rename to cmd/helm/installer/init_windows.go index 447044bba..48c56e288 100644 --- a/cmd/helm/init_windows.go +++ b/cmd/helm/installer/init_windows.go @@ -16,7 +16,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package installer // import "k8s.io/helm/cmd/helm/installer" import ( "os" diff --git a/cmd/helm/installer/install.go b/cmd/helm/installer/install.go index 6027fdba8..504b0183d 100644 --- a/cmd/helm/installer/install.go +++ b/cmd/helm/installer/install.go @@ -17,32 +17,32 @@ limitations under the License. package installer // import "k8s.io/helm/cmd/helm/installer" import ( - "errors" "fmt" "io/ioutil" "strings" "github.com/Masterminds/semver" "github.com/ghodss/yaml" + appsv1 "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - "k8s.io/api/extensions/v1beta1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/kubernetes" + appsv1client "k8s.io/client-go/kubernetes/typed/apps/v1" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" - extensionsclient "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" - "k8s.io/helm/pkg/version" "k8s.io/helm/pkg/chartutil" + "k8s.io/helm/pkg/tiller/environment" ) // Install uses Kubernetes client to install Tiller. // // Returns an error if the command failed. func Install(client kubernetes.Interface, opts *Options) error { - if err := createDeployment(client.ExtensionsV1beta1(), opts); err != nil { + if err := createDeployment(client.AppsV1(), opts); err != nil { return err } if err := createService(client.CoreV1(), opts.Namespace); err != nil { @@ -60,51 +60,108 @@ func Install(client kubernetes.Interface, opts *Options) error { // // Returns an error if the command failed. func Upgrade(client kubernetes.Interface, opts *Options) error { - obj, err := client.ExtensionsV1beta1().Deployments(opts.Namespace).Get(deploymentName, metav1.GetOptions{}) - if err != nil { + appsobj, err := client.AppsV1().Deployments(opts.Namespace).Get(deploymentName, metav1.GetOptions{}) + if err == nil { + // Can happen in two cases: + // 1. helm init inserted an apps/v1 Deployment up front in Kubernetes + // 2. helm init inserted an extensions/v1beta1 Deployment against a K8s cluster already + // supporting apps/v1 Deployment. In such a case K8s is returning the apps/v1 object anyway.` + // (for the same reason "kubectl convert" is being deprecated) + return upgradeAppsTillerDeployment(client, opts, appsobj) + } + + extensionsobj, err := client.ExtensionsV1beta1().Deployments(opts.Namespace).Get(deploymentName, metav1.GetOptions{}) + if err == nil { + // User performed helm init against older version of kubernetes (Previous to 1.9) + return upgradeExtensionsTillerDeployment(client, opts, extensionsobj) + } + + return err +} + +func upgradeAppsTillerDeployment(client kubernetes.Interface, opts *Options, obj *appsv1.Deployment) error { + // Update the PodTemplateSpec section of the deployment + if err := updatePodTemplate(&obj.Spec.Template.Spec, opts); err != nil { return err } - tillerImage := obj.Spec.Template.Spec.Containers[0].Image - if semverCompare(tillerImage) == -1 && !opts.ForceUpgrade { - return errors.New("current Tiller version is newer, use --force-upgrade to downgrade") + + if _, err := client.AppsV1().Deployments(opts.Namespace).Update(obj); err != nil { + return err } - obj.Spec.Template.Spec.Containers[0].Image = opts.SelectImage() - obj.Spec.Template.Spec.Containers[0].ImagePullPolicy = opts.pullPolicy() - obj.Spec.Template.Spec.ServiceAccountName = opts.ServiceAccount + + // If the service does not exist that would mean we are upgrading from a Tiller version + // that didn't deploy the service, so install it. + _, err := client.CoreV1().Services(opts.Namespace).Get(serviceName, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + return createService(client.CoreV1(), opts.Namespace) + } + + return err +} + +func upgradeExtensionsTillerDeployment(client kubernetes.Interface, opts *Options, obj *extensionsv1beta1.Deployment) error { + // Update the PodTemplateSpec section of the deployment + if err := updatePodTemplate(&obj.Spec.Template.Spec, opts); err != nil { + return err + } + if _, err := client.ExtensionsV1beta1().Deployments(opts.Namespace).Update(obj); err != nil { return err } + // If the service does not exist that would mean we are upgrading from a Tiller version // that didn't deploy the service, so install it. - _, err = client.CoreV1().Services(opts.Namespace).Get(serviceName, metav1.GetOptions{}) + _, err := client.CoreV1().Services(opts.Namespace).Get(serviceName, metav1.GetOptions{}) if apierrors.IsNotFound(err) { return createService(client.CoreV1(), opts.Namespace) } + return err } -// semverCompare returns whether the client's version is older, equal or newer than the given image's version. -func semverCompare(image string) int { - split := strings.Split(image, ":") - if len(split) < 2 { - // If we don't know the version, we consider the client version newer. - return 1 +func updatePodTemplate(podSpec *v1.PodSpec, opts *Options) error { + tillerImage := podSpec.Containers[0].Image + clientImage := opts.SelectImage() + + if semverCompare(tillerImage, clientImage) == -1 && !opts.ForceUpgrade { + return fmt.Errorf("current Tiller version %s is newer than client version %s, use --force-upgrade to downgrade", tillerImage, clientImage) } - tillerVersion, err := semver.NewVersion(split[1]) + podSpec.Containers[0].Image = clientImage + podSpec.Containers[0].ImagePullPolicy = opts.pullPolicy() + podSpec.ServiceAccountName = opts.ServiceAccount + + return nil +} + +// semverCompare returns whether the client's version is older, equal or newer than the given image's version. +func semverCompare(tillerImage, clientImage string) int { + tillerVersion, err := string2semver(tillerImage) if err != nil { // same thing with unparsable tiller versions (e.g. canary releases). return 1 } - clientVersion, err := semver.NewVersion(version.Version) + + // clientVersion, err := semver.NewVersion(currentVersion) + clientVersion, err := string2semver(clientImage) if err != nil { // aaaaaand same thing with unparsable helm versions (e.g. canary releases). return 1 } + return clientVersion.Compare(tillerVersion) } +func string2semver(image string) (*semver.Version, error) { + split := strings.Split(image, ":") + if len(split) < 2 { + // If we don't know the version, we consider the client version newer. + return nil, fmt.Errorf("no repository in image %s", image) + } + return semver.NewVersion(split[1]) +} + // createDeployment creates the Tiller Deployment resource. -func createDeployment(client extensionsclient.DeploymentsGetter, opts *Options) error { +func createDeployment(client appsv1client.DeploymentsGetter, opts *Options) error { obj, err := generateDeployment(opts) if err != nil { return err @@ -117,14 +174,14 @@ func createDeployment(client extensionsclient.DeploymentsGetter, opts *Options) // Deployment gets a deployment object that can be used to generate a manifest // as a string. This object should not be submitted directly to the Kubernetes // api -func Deployment(opts *Options) (*v1beta1.Deployment, error) { +func Deployment(opts *Options) (*appsv1.Deployment, error) { dep, err := generateDeployment(opts) if err != nil { return nil, err } dep.TypeMeta = metav1.TypeMeta{ Kind: "Deployment", - APIVersion: "extensions/v1beta1", + APIVersion: "apps/v1", } return dep, nil } @@ -183,7 +240,7 @@ func generateLabels(labels map[string]string) map[string]string { return labels } -// parseNodeSelectors parses a comma delimited list of key=values pairs into a map. +// parseNodeSelectorsInto parses a comma delimited list of key=values pairs into a map. func parseNodeSelectorsInto(labels string, m map[string]string) error { kv := strings.Split(labels, ",") for _, v := range kv { @@ -196,7 +253,7 @@ func parseNodeSelectorsInto(labels string, m map[string]string) error { } return nil } -func generateDeployment(opts *Options) (*v1beta1.Deployment, error) { +func generateDeployment(opts *Options) (*appsv1.Deployment, error) { labels := generateLabels(map[string]string{"name": "tiller"}) nodeSelectors := map[string]string{} if len(opts.NodeSelectors) > 0 { @@ -205,14 +262,17 @@ func generateDeployment(opts *Options) (*v1beta1.Deployment, error) { return nil, err } } - d := &v1beta1.Deployment{ + d := &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Namespace: opts.Namespace, Name: deploymentName, Labels: labels, }, - Spec: v1beta1.DeploymentSpec{ + Spec: appsv1.DeploymentSpec{ Replicas: opts.getReplicas(), + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, Template: v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: labels, @@ -226,8 +286,8 @@ func generateDeployment(opts *Options) (*v1beta1.Deployment, error) { Image: opts.SelectImage(), ImagePullPolicy: opts.pullPolicy(), Ports: []v1.ContainerPort{ - {ContainerPort: 44134, Name: "tiller"}, - {ContainerPort: 44135, Name: "http"}, + {ContainerPort: environment.DefaultTillerPort, Name: "tiller"}, + {ContainerPort: environment.DefaultTillerProbePort, Name: "http"}, }, Env: []v1.EnvVar{ {Name: "TILLER_NAMESPACE", Value: opts.Namespace}, @@ -237,7 +297,7 @@ func generateDeployment(opts *Options) (*v1beta1.Deployment, error) { Handler: v1.Handler{ HTTPGet: &v1.HTTPGetAction{ Path: "/liveness", - Port: intstr.FromInt(44135), + Port: intstr.FromInt(environment.DefaultTillerProbePort), }, }, InitialDelaySeconds: 1, @@ -247,7 +307,7 @@ func generateDeployment(opts *Options) (*v1beta1.Deployment, error) { Handler: v1.Handler{ HTTPGet: &v1.HTTPGetAction{ Path: "/readiness", - Port: intstr.FromInt(44135), + Port: intstr.FromInt(environment.DefaultTillerProbePort), }, }, InitialDelaySeconds: 1, @@ -296,7 +356,7 @@ func generateDeployment(opts *Options) (*v1beta1.Deployment, error) { // merge them and convert back to Deployment if len(opts.Values) > 0 { // base deployment struct - var dd v1beta1.Deployment + var dd appsv1.Deployment // get YAML from original deployment dy, err := yaml.Marshal(d) if err != nil { @@ -341,7 +401,7 @@ func generateService(namespace string) *v1.Service { Ports: []v1.ServicePort{ { Name: "tiller", - Port: 44134, + Port: environment.DefaultTillerPort, TargetPort: intstr.FromString("tiller"), }, }, diff --git a/cmd/helm/installer/install_test.go b/cmd/helm/installer/install_test.go index 561b3ed6d..3673712b2 100644 --- a/cmd/helm/installer/install_test.go +++ b/cmd/helm/installer/install_test.go @@ -17,14 +17,15 @@ limitations under the License. package installer // import "k8s.io/helm/cmd/helm/installer" import ( + "encoding/json" "os" "path/filepath" "reflect" "testing" "github.com/ghodss/yaml" + appsv1 "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - "k8s.io/api/extensions/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/fake" @@ -53,6 +54,10 @@ func TestDeployment(t *testing.T) { t.Fatalf("%s: error %q", tt.name, err) } + // Unreleased versions of helm don't have a release image. See issue 3370 + if tt.name == "default" && version.BuildMetadata == "unreleased" { + tt.expect = "gcr.io/kubernetes-helm/tiller:canary" + } if got := dep.Spec.Template.Spec.Containers[0].Image; got != tt.expect { t.Errorf("%s: expected image %q, got %q", tt.name, tt.expect, got) } @@ -187,7 +192,7 @@ func TestInstall(t *testing.T) { fc := &fake.Clientset{} fc.AddReactor("create", "deployments", func(action testcore.Action) (bool, runtime.Object, error) { - obj := action.(testcore.CreateAction).GetObject().(*v1beta1.Deployment) + obj := action.(testcore.CreateAction).GetObject().(*appsv1.Deployment) l := obj.GetLabels() if reflect.DeepEqual(l, map[string]string{"app": "helm"}) { t.Errorf("expected labels = '', got '%s'", l) @@ -234,7 +239,7 @@ func TestInstallHA(t *testing.T) { fc := &fake.Clientset{} fc.AddReactor("create", "deployments", func(action testcore.Action) (bool, runtime.Object, error) { - obj := action.(testcore.CreateAction).GetObject().(*v1beta1.Deployment) + obj := action.(testcore.CreateAction).GetObject().(*appsv1.Deployment) replicas := obj.Spec.Replicas if int(*replicas) != 2 { t.Errorf("expected replicas = 2, got '%d'", replicas) @@ -258,7 +263,7 @@ func TestInstall_WithTLS(t *testing.T) { fc := &fake.Clientset{} fc.AddReactor("create", "deployments", func(action testcore.Action) (bool, runtime.Object, error) { - obj := action.(testcore.CreateAction).GetObject().(*v1beta1.Deployment) + obj := action.(testcore.CreateAction).GetObject().(*appsv1.Deployment) l := obj.GetLabels() if reflect.DeepEqual(l, map[string]string{"app": "helm"}) { t.Errorf("expected labels = '', got '%s'", l) @@ -326,7 +331,7 @@ func TestInstall_WithTLS(t *testing.T) { func TestInstall_canary(t *testing.T) { fc := &fake.Clientset{} fc.AddReactor("create", "deployments", func(action testcore.Action) (bool, runtime.Object, error) { - obj := action.(testcore.CreateAction).GetObject().(*v1beta1.Deployment) + obj := action.(testcore.CreateAction).GetObject().(*appsv1.Deployment) i := obj.Spec.Template.Spec.Containers[0].Image if i != "gcr.io/kubernetes-helm/tiller:canary" { t.Errorf("expected canary image, got '%s'", i) @@ -364,7 +369,7 @@ func TestUpgrade(t *testing.T) { return true, existingDeployment, nil }) fc.AddReactor("update", "deployments", func(action testcore.Action) (bool, runtime.Object, error) { - obj := action.(testcore.UpdateAction).GetObject().(*v1beta1.Deployment) + obj := action.(testcore.UpdateAction).GetObject().(*appsv1.Deployment) i := obj.Spec.Template.Spec.Containers[0].Image if i != image { t.Errorf("expected image = '%s', got '%s'", image, i) @@ -403,7 +408,7 @@ func TestUpgrade_serviceNotFound(t *testing.T) { return true, existingDeployment, nil }) fc.AddReactor("update", "deployments", func(action testcore.Action) (bool, runtime.Object, error) { - obj := action.(testcore.UpdateAction).GetObject().(*v1beta1.Deployment) + obj := action.(testcore.UpdateAction).GetObject().(*appsv1.Deployment) i := obj.Spec.Template.Spec.Containers[0].Image if i != image { t.Errorf("expected image = '%s', got '%s'", image, i) @@ -448,7 +453,7 @@ func TestUgrade_newerVersion(t *testing.T) { return true, existingDeployment, nil }) fc.AddReactor("update", "deployments", func(action testcore.Action) (bool, runtime.Object, error) { - obj := action.(testcore.UpdateAction).GetObject().(*v1beta1.Deployment) + obj := action.(testcore.UpdateAction).GetObject().(*appsv1.Deployment) i := obj.Spec.Template.Spec.Containers[0].Image if i != image { t.Errorf("expected image = '%s', got '%s'", image, i) @@ -508,7 +513,7 @@ func TestUpgrade_identical(t *testing.T) { return true, existingDeployment, nil }) fc.AddReactor("update", "deployments", func(action testcore.Action) (bool, runtime.Object, error) { - obj := action.(testcore.UpdateAction).GetObject().(*v1beta1.Deployment) + obj := action.(testcore.UpdateAction).GetObject().(*appsv1.Deployment) i := obj.Spec.Template.Spec.Containers[0].Image if i != image { t.Errorf("expected image = '%s', got '%s'", image, i) @@ -549,7 +554,7 @@ func TestUpgrade_canaryClient(t *testing.T) { return true, existingDeployment, nil }) fc.AddReactor("update", "deployments", func(action testcore.Action) (bool, runtime.Object, error) { - obj := action.(testcore.UpdateAction).GetObject().(*v1beta1.Deployment) + obj := action.(testcore.UpdateAction).GetObject().(*appsv1.Deployment) i := obj.Spec.Template.Spec.Containers[0].Image if i != image { t.Errorf("expected image = '%s', got '%s'", image, i) @@ -590,7 +595,7 @@ func TestUpgrade_canaryServer(t *testing.T) { return true, existingDeployment, nil }) fc.AddReactor("update", "deployments", func(action testcore.Action) (bool, runtime.Object, error) { - obj := action.(testcore.UpdateAction).GetObject().(*v1beta1.Deployment) + obj := action.(testcore.UpdateAction).GetObject().(*appsv1.Deployment) i := obj.Spec.Template.Spec.Containers[0].Image if i != image { t.Errorf("expected image = '%s', got '%s'", image, i) @@ -712,9 +717,32 @@ func TestDeployment_WithSetValues(t *testing.T) { // convert our expected value to match the result type for comparison ev := tt.expect + intType := reflect.TypeOf(int64(0)) + floatType := reflect.TypeOf(float64(0)) + switch pvt := pv.(type) { + case json.Number: + evv := reflect.ValueOf(ev) + evv = reflect.Indirect(evv) + switch ev.(type) { + case float32, float64: + evv = evv.Convert(floatType) + if fpv, err := pv.(json.Number).Float64(); err != nil { + t.Errorf("Failed to convert json number to float: %s", err) + } else if fpv != evv.Float() { + t.Errorf("%s: expected float value %q, got %f", tt.name, tt.expect, fpv) + } + case byte, int, int32, int64: + evv = evv.Convert(intType) + if ipv, err := pv.(json.Number).Int64(); err != nil { + t.Errorf("Failed to convert json number to int: %s", err) + } else if ipv != evv.Int() { + t.Errorf("%s: expected int value %q, got %d", tt.name, tt.expect, ipv) + } + default: + t.Errorf("Unknown primitive type: %s", reflect.TypeOf(ev)) + } case float64: - floatType := reflect.TypeOf(float64(0)) v := reflect.ValueOf(ev) v = reflect.Indirect(v) if !v.Type().ConvertibleTo(floatType) { diff --git a/cmd/helm/installer/options.go b/cmd/helm/installer/options.go index 196ad8de4..00cadac07 100644 --- a/cmd/helm/installer/options.go +++ b/cmd/helm/installer/options.go @@ -24,7 +24,12 @@ import ( "k8s.io/helm/pkg/version" ) -const defaultImage = "gcr.io/kubernetes-helm/tiller" +const ( + defaultImage = "gcr.io/kubernetes-helm/tiller" + + fmtJSON OutputFormat = "json" + fmtYAML OutputFormat = "yaml" +) // Options control how to install Tiller into a cluster, upgrade, and uninstall Tiller from a cluster. type Options struct { @@ -50,7 +55,7 @@ type Options struct { // AutoMountServiceAccountToken determines whether or not the service account should be added to Tiller. AutoMountServiceAccountToken bool - // Force allows to force upgrading tiller if deployed version is greater than current version + // ForceUpgrade allows to force upgrading tiller if deployed version is greater than current version ForceUpgrade bool // ImageSpec identifies the image Tiller will use when deployed. @@ -105,6 +110,9 @@ func (opts *Options) SelectImage() string { case opts.UseCanary: return defaultImage + ":canary" case opts.ImageSpec == "": + if version.BuildMetadata == "unreleased" { + return defaultImage + ":canary" + } return fmt.Sprintf("%s:%s", defaultImage, version.Version) default: return opts.ImageSpec @@ -151,11 +159,6 @@ func (f *OutputFormat) Type() string { return "OutputFormat" } -const ( - fmtJSON OutputFormat = "json" - fmtYAML OutputFormat = "yaml" -) - // Set validates and sets the value of the OutputFormat func (f *OutputFormat) Set(s string) error { for _, of := range []OutputFormat{fmtJSON, fmtYAML} { diff --git a/cmd/helm/installer/uninstall.go b/cmd/helm/installer/uninstall.go index db824ca0b..b1d78004e 100644 --- a/cmd/helm/installer/uninstall.go +++ b/cmd/helm/installer/uninstall.go @@ -31,13 +31,13 @@ const ( // Uninstall uses Kubernetes client to uninstall Tiller. func Uninstall(client kubernetes.Interface, opts *Options) error { - if err := deleteService(client.Core(), opts.Namespace); err != nil { + if err := deleteService(client.CoreV1(), opts.Namespace); err != nil { return err } if err := deleteDeployment(client, opts.Namespace); err != nil { return err } - return deleteSecret(client.Core(), opts.Namespace) + return deleteSecret(client.CoreV1(), opts.Namespace) } // deleteService deletes the Tiller Service resource @@ -47,10 +47,11 @@ func deleteService(client corev1.ServicesGetter, namespace string) error { } // deleteDeployment deletes the Tiller Deployment resource -// We need to use the reaper instead of the kube API because GC for deployment dependents -// is not yet supported at the k8s server level (<= 1.5) func deleteDeployment(client kubernetes.Interface, namespace string) error { - err := client.Extensions().Deployments(namespace).Delete(deploymentName, &metav1.DeleteOptions{}) + policy := metav1.DeletePropagationBackground + err := client.AppsV1().Deployments(namespace).Delete(deploymentName, &metav1.DeleteOptions{ + PropagationPolicy: &policy, + }) return ingoreNotFound(err) } diff --git a/cmd/helm/lint.go b/cmd/helm/lint.go index d0159d34b..710221bf5 100644 --- a/cmd/helm/lint.go +++ b/cmd/helm/lint.go @@ -61,7 +61,7 @@ func newLintCmd(out io.Writer) *cobra.Command { } cmd := &cobra.Command{ Use: "lint [flags] PATH", - Short: "examines a chart for possible issues", + Short: "Examines a chart for possible issues", Long: longLintHelp, RunE: func(cmd *cobra.Command, args []string) error { if len(args) > 0 { @@ -71,12 +71,12 @@ func newLintCmd(out io.Writer) *cobra.Command { }, } - cmd.Flags().VarP(&l.valueFiles, "values", "f", "specify values in a YAML file (can specify multiple)") - cmd.Flags().StringArrayVar(&l.values, "set", []string{}, "set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)") - cmd.Flags().StringArrayVar(&l.sValues, "set-string", []string{}, "set STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)") - cmd.Flags().StringArrayVar(&l.fValues, "set-file", []string{}, "set values from respective files specified via the command line (can specify multiple or separate values with commas: key1=path1,key2=path2)") - cmd.Flags().StringVar(&l.namespace, "namespace", "default", "namespace to put the release into") - cmd.Flags().BoolVar(&l.strict, "strict", false, "fail on lint warnings") + cmd.Flags().VarP(&l.valueFiles, "values", "f", "Specify values in a YAML file (can specify multiple)") + cmd.Flags().StringArrayVar(&l.values, "set", []string{}, "Set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)") + cmd.Flags().StringArrayVar(&l.sValues, "set-string", []string{}, "Set STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)") + cmd.Flags().StringArrayVar(&l.fValues, "set-file", []string{}, "Set values from respective files specified via the command line (can specify multiple or separate values with commas: key1=path1,key2=path2)") + cmd.Flags().StringVar(&l.namespace, "namespace", "default", "Namespace to put the release into") + cmd.Flags().BoolVar(&l.strict, "strict", false, "Fail on lint warnings") return cmd } @@ -166,7 +166,7 @@ func lintChart(path string, vals []byte, namespace string, strict bool) (support chartPath = path } - // Guard: Error out of this is not a chart. + // Guard: Error out if this is not a chart. if _, err := os.Stat(filepath.Join(chartPath, "Chart.yaml")); err != nil { return linter, errLintNoChart } @@ -177,7 +177,7 @@ func lintChart(path string, vals []byte, namespace string, strict bool) (support // vals merges values from files specified via -f/--values and // directly via --set or --set-string or --set-file, marshaling them to YAML // -// This func is implemented intentionally and separately from the `vals` func for the `install` and `upgrade` comammdsn. +// This func is implemented intentionally and separately from the `vals` func for the `install` and `upgrade` commands. // Compared to the alternative func, this func lacks the parameters for tls opts - ca key, cert, and ca cert. // That's because this command, `lint`, is explicitly forbidden from making server connections. func (l *lintCmd) vals() ([]byte, error) { diff --git a/cmd/helm/list.go b/cmd/helm/list.go index 3ca3fbbfa..87b3ce54d 100644 --- a/cmd/helm/list.go +++ b/cmd/helm/list.go @@ -104,7 +104,7 @@ func newListCmd(client helm.Interface, out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "list [flags] [FILTER]", - Short: "list releases", + Short: "List releases", Long: listHelp, Aliases: []string{"ls"}, PreRunE: func(_ *cobra.Command, _ []string) error { return setupConnection() }, @@ -121,21 +121,21 @@ func newListCmd(client helm.Interface, out io.Writer) *cobra.Command { f := cmd.Flags() settings.AddFlagsTLS(f) - f.BoolVarP(&list.short, "short", "q", false, "output short (quiet) listing format") - f.BoolVarP(&list.byDate, "date", "d", false, "sort by release date") - f.BoolVarP(&list.sortDesc, "reverse", "r", false, "reverse the sort order") - f.IntVarP(&list.limit, "max", "m", 256, "maximum number of releases to fetch") - f.StringVarP(&list.offset, "offset", "o", "", "next release name in the list, used to offset from start value") - f.BoolVarP(&list.all, "all", "a", false, "show all releases, not just the ones marked DEPLOYED") - f.BoolVar(&list.deleted, "deleted", false, "show deleted releases") - f.BoolVar(&list.deleting, "deleting", false, "show releases that are currently being deleted") - f.BoolVar(&list.deployed, "deployed", false, "show deployed releases. If no other is specified, this will be automatically enabled") - f.BoolVar(&list.failed, "failed", false, "show failed releases") - f.BoolVar(&list.pending, "pending", false, "show pending releases") - f.StringVar(&list.namespace, "namespace", "", "show releases within a specific namespace") - f.UintVar(&list.colWidth, "col-width", 60, "specifies the max column width of output") - f.StringVar(&list.output, "output", "", "output the specified format (json or yaml)") - f.BoolVarP(&list.byChartName, "chart-name", "c", false, "sort by chart name") + f.BoolVarP(&list.short, "short", "q", false, "Output short (quiet) listing format") + f.BoolVarP(&list.byDate, "date", "d", false, "Sort by release date") + f.BoolVarP(&list.sortDesc, "reverse", "r", false, "Reverse the sort order") + f.IntVarP(&list.limit, "max", "m", 256, "Maximum number of releases to fetch") + f.StringVarP(&list.offset, "offset", "o", "", "Next release name in the list, used to offset from start value") + f.BoolVarP(&list.all, "all", "a", false, "Show all releases, not just the ones marked DEPLOYED") + f.BoolVar(&list.deleted, "deleted", false, "Show deleted releases") + f.BoolVar(&list.deleting, "deleting", false, "Show releases that are currently being deleted") + f.BoolVar(&list.deployed, "deployed", false, "Show deployed releases. If no other is specified, this will be automatically enabled") + f.BoolVar(&list.failed, "failed", false, "Show failed releases") + f.BoolVar(&list.pending, "pending", false, "Show pending releases") + f.StringVar(&list.namespace, "namespace", "", "Show releases within a specific namespace") + f.UintVar(&list.colWidth, "col-width", 60, "Specifies the max column width of output") + f.StringVar(&list.output, "output", "", "Output the specified format (json or yaml)") + f.BoolVarP(&list.byChartName, "chart-name", "c", false, "Sort by chart name") // TODO: Do we want this as a feature of 'helm list'? //f.BoolVar(&list.superseded, "history", true, "show historical releases") diff --git a/cmd/helm/package.go b/cmd/helm/package.go index 05fdf02f8..c3643e9b5 100644 --- a/cmd/helm/package.go +++ b/cmd/helm/package.go @@ -70,7 +70,7 @@ func newPackageCmd(out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "package [flags] [CHART_PATH] [...]", - Short: "package a chart directory into a chart archive", + Short: "Package a chart directory into a chart archive", Long: packageDesc, RunE: func(cmd *cobra.Command, args []string) error { pkg.home = settings.Home @@ -96,14 +96,14 @@ func newPackageCmd(out io.Writer) *cobra.Command { } f := cmd.Flags() - f.BoolVar(&pkg.save, "save", true, "save packaged chart to local chart repository") - f.BoolVar(&pkg.sign, "sign", false, "use a PGP private key to sign this package") - f.StringVar(&pkg.key, "key", "", "name of the key to use when signing. Used if --sign is true") - f.StringVar(&pkg.keyring, "keyring", defaultKeyring(), "location of a public keyring") - f.StringVar(&pkg.version, "version", "", "set the version on the chart to this semver version") - f.StringVar(&pkg.appVersion, "app-version", "", "set the appVersion on the chart to this version") - f.StringVarP(&pkg.destination, "destination", "d", ".", "location to write the chart.") - f.BoolVarP(&pkg.dependencyUpdate, "dependency-update", "u", false, `update dependencies from "requirements.yaml" to dir "charts/" before packaging`) + f.BoolVar(&pkg.save, "save", true, "Save packaged chart to local chart repository") + f.BoolVar(&pkg.sign, "sign", false, "Use a PGP private key to sign this package") + f.StringVar(&pkg.key, "key", "", "Name of the key to use when signing. Used if --sign is true") + f.StringVar(&pkg.keyring, "keyring", defaultKeyring(), "Location of a public keyring") + f.StringVar(&pkg.version, "version", "", "Set the version on the chart to this semver version") + f.StringVar(&pkg.appVersion, "app-version", "", "Set the appVersion on the chart to this version") + f.StringVarP(&pkg.destination, "destination", "d", ".", "Location to write the chart.") + f.BoolVarP(&pkg.dependencyUpdate, "dependency-update", "u", false, `Update dependencies from "requirements.yaml" to dir "charts/" before packaging`) return cmd } diff --git a/cmd/helm/plugin.go b/cmd/helm/plugin.go index fbdd1245b..99117dbb2 100644 --- a/cmd/helm/plugin.go +++ b/cmd/helm/plugin.go @@ -33,7 +33,7 @@ Manage client-side Helm plugins. func newPluginCmd(out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "plugin", - Short: "add, list, or remove Helm plugins", + Short: "Add, list, or remove Helm plugins", Long: pluginHelp, } cmd.AddCommand( diff --git a/cmd/helm/plugin_install.go b/cmd/helm/plugin_install.go index 7d77be3fc..abf60537b 100644 --- a/cmd/helm/plugin_install.go +++ b/cmd/helm/plugin_install.go @@ -44,7 +44,7 @@ func newPluginInstallCmd(out io.Writer) *cobra.Command { pcmd := &pluginInstallCmd{out: out} cmd := &cobra.Command{ Use: "install [options] ...", - Short: "install one or more Helm plugins", + Short: "Install one or more Helm plugins", Long: pluginInstallDesc, PreRunE: func(cmd *cobra.Command, args []string) error { return pcmd.complete(args) @@ -53,7 +53,7 @@ func newPluginInstallCmd(out io.Writer) *cobra.Command { return pcmd.run() }, } - cmd.Flags().StringVar(&pcmd.version, "version", "", "specify a version constraint. If this is not specified, the latest version is installed") + cmd.Flags().StringVar(&pcmd.version, "version", "", "Specify a version constraint. If this is not specified, the latest version is installed") return cmd } diff --git a/cmd/helm/plugin_list.go b/cmd/helm/plugin_list.go index 9693baaa2..efaac164e 100644 --- a/cmd/helm/plugin_list.go +++ b/cmd/helm/plugin_list.go @@ -34,7 +34,7 @@ func newPluginListCmd(out io.Writer) *cobra.Command { pcmd := &pluginListCmd{out: out} cmd := &cobra.Command{ Use: "list", - Short: "list installed Helm plugins", + Short: "List installed Helm plugins", RunE: func(cmd *cobra.Command, args []string) error { pcmd.home = settings.Home return pcmd.run() diff --git a/cmd/helm/plugin_remove.go b/cmd/helm/plugin_remove.go index f30e5b516..d4321558a 100644 --- a/cmd/helm/plugin_remove.go +++ b/cmd/helm/plugin_remove.go @@ -38,7 +38,7 @@ func newPluginRemoveCmd(out io.Writer) *cobra.Command { pcmd := &pluginRemoveCmd{out: out} cmd := &cobra.Command{ Use: "remove ...", - Short: "remove one or more Helm plugins", + Short: "Remove one or more Helm plugins", PreRunE: func(cmd *cobra.Command, args []string) error { return pcmd.complete(args) }, diff --git a/cmd/helm/plugin_update.go b/cmd/helm/plugin_update.go index f9d5a3fac..285572824 100644 --- a/cmd/helm/plugin_update.go +++ b/cmd/helm/plugin_update.go @@ -39,7 +39,7 @@ func newPluginUpdateCmd(out io.Writer) *cobra.Command { pcmd := &pluginUpdateCmd{out: out} cmd := &cobra.Command{ Use: "update ...", - Short: "update one or more Helm plugins", + Short: "Update one or more Helm plugins", PreRunE: func(cmd *cobra.Command, args []string) error { return pcmd.complete(args) }, diff --git a/cmd/helm/printer.go b/cmd/helm/printer.go index e98b71c64..1c89c04ef 100644 --- a/cmd/helm/printer.go +++ b/cmd/helm/printer.go @@ -17,16 +17,31 @@ limitations under the License. package main import ( + "encoding/json" "fmt" "io" "text/template" "time" + "github.com/ghodss/yaml" + "github.com/gosuri/uitable" + "github.com/spf13/cobra" + "k8s.io/helm/pkg/chartutil" "k8s.io/helm/pkg/proto/hapi/release" "k8s.io/helm/pkg/timeconv" ) +type outputFormat string + +const ( + outputFlag = "output" + + outputTable outputFormat = "table" + outputJSON outputFormat = "json" + outputYAML outputFormat = "yaml" +) + var printReleaseTemplate = `REVISION: {{.Release.Version}} RELEASED: {{.ReleaseDate}} CHART: {{.Release.Chart.Metadata.Name}}-{{.Release.Chart.Metadata.Version}} @@ -66,7 +81,7 @@ func printRelease(out io.Writer, rel *release.Release) error { return tpl(printReleaseTemplate, data, out) } -func tpl(t string, vals map[string]interface{}, out io.Writer) error { +func tpl(t string, vals interface{}, out io.Writer) error { tt, err := template.New("_").Parse(t) if err != nil { return err @@ -80,3 +95,66 @@ func debug(format string, args ...interface{}) { fmt.Printf(format, args...) } } + +// bindOutputFlag will add the output flag to the given command and bind the +// value to the given string pointer +func bindOutputFlag(cmd *cobra.Command, varRef *string) { + cmd.Flags().StringVarP(varRef, outputFlag, "o", string(outputTable), fmt.Sprintf("Prints the output in the specified format. Allowed values: %s, %s, %s", outputTable, outputJSON, outputYAML)) +} + +type outputWriter interface { + WriteTable(out io.Writer) error + WriteJSON(out io.Writer) error + WriteYAML(out io.Writer) error +} + +func write(out io.Writer, ow outputWriter, format outputFormat) error { + switch format { + case outputTable: + return ow.WriteTable(out) + case outputJSON: + return ow.WriteJSON(out) + case outputYAML: + return ow.WriteYAML(out) + } + return fmt.Errorf("unsupported format %s", format) +} + +// encodeJSON is a helper function to decorate any error message with a bit more +// context and avoid writing the same code over and over for printers +func encodeJSON(out io.Writer, obj interface{}) error { + enc := json.NewEncoder(out) + err := enc.Encode(obj) + if err != nil { + return fmt.Errorf("unable to write JSON output: %s", err) + } + return nil +} + +// encodeYAML is a helper function to decorate any error message with a bit more +// context and avoid writing the same code over and over for printers +func encodeYAML(out io.Writer, obj interface{}) error { + raw, err := yaml.Marshal(obj) + if err != nil { + return fmt.Errorf("unable to write YAML output: %s", err) + } + // Append a newline, as with a JSON encoder + raw = append(raw, []byte("\n")...) + _, err = out.Write(raw) + if err != nil { + return fmt.Errorf("unable to write YAML output: %s", err) + } + return nil +} + +// encodeTable is a helper function to decorate any error message with a bit +// more context and avoid writing the same code over and over for printers +func encodeTable(out io.Writer, table *uitable.Table) error { + raw := table.Bytes() + raw = append(raw, []byte("\n")...) + _, err := out.Write(raw) + if err != nil { + return fmt.Errorf("unable to write table output: %s", err) + } + return nil +} diff --git a/cmd/helm/release_testing.go b/cmd/helm/release_testing.go index 91c0d7189..e108ade81 100644 --- a/cmd/helm/release_testing.go +++ b/cmd/helm/release_testing.go @@ -50,7 +50,7 @@ func newReleaseTestCmd(c helm.Interface, out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "test [RELEASE]", - Short: "test a release", + Short: "Test a release", Long: releaseTestDesc, PreRunE: func(_ *cobra.Command, _ []string) error { return setupConnection() }, RunE: func(cmd *cobra.Command, args []string) error { @@ -66,9 +66,9 @@ func newReleaseTestCmd(c helm.Interface, out io.Writer) *cobra.Command { f := cmd.Flags() settings.AddFlagsTLS(f) - f.Int64Var(&rlsTest.timeout, "timeout", 300, "time in seconds to wait for any individual Kubernetes operation (like Jobs for hooks)") - f.BoolVar(&rlsTest.cleanup, "cleanup", false, "delete test pods upon completion") - f.BoolVar(&rlsTest.parallel, "parallel", false, "run test pods in parallel") + f.Int64Var(&rlsTest.timeout, "timeout", 300, "Time in seconds to wait for any individual Kubernetes operation (like Jobs for hooks)") + f.BoolVar(&rlsTest.cleanup, "cleanup", false, "Delete test pods upon completion") + f.BoolVar(&rlsTest.parallel, "parallel", false, "Run test pods in parallel") // set defaults from environment settings.InitTLS(f) diff --git a/cmd/helm/repo.go b/cmd/helm/repo.go index 9f1dc8928..9eac9237d 100644 --- a/cmd/helm/repo.go +++ b/cmd/helm/repo.go @@ -33,7 +33,7 @@ Example usage: func newRepoCmd(out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "repo [FLAGS] add|remove|list|index|update [ARGS]", - Short: "add, list, remove, update, and index chart repositories", + Short: "Add, list, remove, update, and index chart repositories", Long: repoHelm, } diff --git a/cmd/helm/repo_add.go b/cmd/helm/repo_add.go index bfb3f0174..8ae78f473 100644 --- a/cmd/helm/repo_add.go +++ b/cmd/helm/repo_add.go @@ -17,16 +17,20 @@ limitations under the License. package main import ( + "context" "fmt" "io" + "syscall" + "time" + + "golang.org/x/crypto/ssh/terminal" + "github.com/gofrs/flock" "github.com/spf13/cobra" - "golang.org/x/crypto/ssh/terminal" "k8s.io/helm/pkg/getter" "k8s.io/helm/pkg/helm/helmpath" "k8s.io/helm/pkg/repo" - "syscall" ) type repoAddCmd struct { @@ -49,7 +53,7 @@ func newRepoAddCmd(out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "add [flags] [NAME] [URL]", - Short: "add a chart repository", + Short: "Add a chart repository", RunE: func(cmd *cobra.Command, args []string) error { if err := checkArgsLength(len(args), "name for the chart repository", "the url of the chart repository"); err != nil { return err @@ -64,12 +68,12 @@ func newRepoAddCmd(out io.Writer) *cobra.Command { } f := cmd.Flags() - f.StringVar(&add.username, "username", "", "chart repository username") - f.StringVar(&add.password, "password", "", "chart repository password") - f.BoolVar(&add.noupdate, "no-update", false, "raise error if repo is already registered") - f.StringVar(&add.certFile, "cert-file", "", "identify HTTPS client using this SSL certificate file") - f.StringVar(&add.keyFile, "key-file", "", "identify HTTPS client using this SSL key file") - f.StringVar(&add.caFile, "ca-file", "", "verify certificates of HTTPS-enabled servers using this CA bundle") + f.StringVar(&add.username, "username", "", "Chart repository username") + f.StringVar(&add.password, "password", "", "Chart repository password") + f.BoolVar(&add.noupdate, "no-update", false, "Raise error if repo is already registered") + f.StringVar(&add.certFile, "cert-file", "", "Identify HTTPS client using this SSL certificate file") + f.StringVar(&add.keyFile, "key-file", "", "Identify HTTPS client using this SSL key file") + f.StringVar(&add.caFile, "ca-file", "", "Verify certificates of HTTPS-enabled servers using this CA bundle") return cmd } @@ -131,6 +135,25 @@ func addRepository(name, url, username, password string, home helmpath.Home, cer return fmt.Errorf("Looks like %q is not a valid chart repository or cannot be reached: %s", url, err.Error()) } + // Lock the repository file for concurrent goroutines or processes synchronization + fileLock := flock.New(home.RepositoryFile()) + lockCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + locked, err := fileLock.TryLockContext(lockCtx, time.Second) + if err == nil && locked { + defer fileLock.Unlock() + } + if err != nil { + return err + } + + // Re-read the repositories file before updating it as its content may have been changed + // by a concurrent execution after the first read and before being locked + f, err = repo.LoadRepositoriesFile(home.RepositoryFile()) + if err != nil { + return err + } + f.Update(&c) return f.WriteFile(home.RepositoryFile(), 0644) diff --git a/cmd/helm/repo_add_test.go b/cmd/helm/repo_add_test.go index 5a458cef7..7443a476a 100644 --- a/cmd/helm/repo_add_test.go +++ b/cmd/helm/repo_add_test.go @@ -17,13 +17,18 @@ limitations under the License. package main import ( + "fmt" "io" "os" + "os/exec" + "strings" + "sync" "testing" "github.com/spf13/cobra" "k8s.io/helm/pkg/helm" + "k8s.io/helm/pkg/helm/helmpath" "k8s.io/helm/pkg/repo" "k8s.io/helm/pkg/repo/repotest" ) @@ -101,3 +106,111 @@ func TestRepoAdd(t *testing.T) { t.Errorf("Duplicate repository name was added") } } +func TestRepoAddConcurrentGoRoutines(t *testing.T) { + ts, thome, err := repotest.NewTempServer("testdata/testserver/*.*") + if err != nil { + t.Fatal(err) + } + + cleanup := resetEnv() + defer func() { + ts.Stop() + os.RemoveAll(thome.String()) + cleanup() + }() + + settings.Home = thome + if err := ensureTestHome(settings.Home, t); err != nil { + t.Fatal(err) + } + + var wg sync.WaitGroup + wg.Add(3) + for i := 0; i < 3; i++ { + go func(name string) { + defer wg.Done() + if err := addRepository(name, ts.URL(), "", "", settings.Home, "", "", "", true); err != nil { + t.Error(err) + } + }(fmt.Sprintf("%s-%d", testName, i)) + } + wg.Wait() + + f, err := repo.LoadRepositoriesFile(settings.Home.RepositoryFile()) + if err != nil { + t.Error(err) + } + + var name string + for i := 0; i < 3; i++ { + name = fmt.Sprintf("%s-%d", testName, i) + if !f.Has(name) { + t.Errorf("%s was not successfully inserted into %s", name, settings.Home.RepositoryFile()) + } + } +} + +// Same as TestRepoAddConcurrentGoRoutines but with repository additions in sub-processes +func TestRepoAddConcurrentSubProcesses(t *testing.T) { + goWantHelperProcess := os.Getenv("GO_WANT_HELPER_PROCESS") + if goWantHelperProcess == "" { + // parent + + ts, thome, err := repotest.NewTempServer("testdata/testserver/*.*") + if err != nil { + t.Fatal(err) + } + + settings.Home = thome + + cleanup := resetEnv() + defer func() { + ts.Stop() + os.RemoveAll(thome.String()) + cleanup() + }() + if err := ensureTestHome(settings.Home, t); err != nil { + t.Fatal(err) + } + + var wg sync.WaitGroup + wg.Add(2) + for i := 0; i < 2; i++ { + go func(name string) { + defer wg.Done() + + cmd := exec.Command(os.Args[0], "-test.run=^TestRepoAddConcurrentSubProcesses$") + cmd.Env = append(os.Environ(), fmt.Sprintf("GO_WANT_HELPER_PROCESS=%s,%s", name, ts.URL()), fmt.Sprintf("HELM_HOME=%s", settings.Home)) + out, err := cmd.CombinedOutput() + if len(out) > 0 || err != nil { + t.Fatalf("child process: %q, %v", out, err) + } + }(fmt.Sprintf("%s-%d", testName, i)) + } + wg.Wait() + + f, err := repo.LoadRepositoriesFile(settings.Home.RepositoryFile()) + if err != nil { + t.Error(err) + } + + var name string + for i := 0; i < 2; i++ { + name = fmt.Sprintf("%s-%d", testName, i) + if !f.Has(name) { + t.Errorf("%s was not successfully inserted into %s", name, settings.Home.RepositoryFile()) + } + } + } else { + // child + s := strings.Split(goWantHelperProcess, ",") + settings.Home = helmpath.Home(os.Getenv("HELM_HOME")) + repoName := s[0] + tsURL := s[1] + if err := addRepository(repoName, tsURL, "", "", settings.Home, "", "", "", true); err != nil { + t.Fatal(err) + } + + os.Exit(0) + } +} diff --git a/cmd/helm/repo_index.go b/cmd/helm/repo_index.go index b3f49fb97..cd7b2aea0 100644 --- a/cmd/helm/repo_index.go +++ b/cmd/helm/repo_index.go @@ -50,7 +50,7 @@ func newRepoIndexCmd(out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "index [flags] [DIR]", - Short: "generate an index file given a directory containing packaged charts", + Short: "Generate an index file given a directory containing packaged charts", Long: repoIndexDesc, RunE: func(cmd *cobra.Command, args []string) error { if err := checkArgsLength(len(args), "path to a directory"); err != nil { @@ -64,8 +64,8 @@ func newRepoIndexCmd(out io.Writer) *cobra.Command { } f := cmd.Flags() - f.StringVar(&index.url, "url", "", "url of chart repository") - f.StringVar(&index.merge, "merge", "", "merge the generated index into the given index") + f.StringVar(&index.url, "url", "", "URL of the chart repository") + f.StringVar(&index.merge, "merge", "", "Merge the generated index into the given index") return cmd } diff --git a/cmd/helm/repo_list.go b/cmd/helm/repo_list.go index 36887c69b..a65b81908 100644 --- a/cmd/helm/repo_list.go +++ b/cmd/helm/repo_list.go @@ -17,8 +17,6 @@ limitations under the License. package main import ( - "errors" - "fmt" "io" "github.com/gosuri/uitable" @@ -29,8 +27,14 @@ import ( ) type repoListCmd struct { - out io.Writer - home helmpath.Home + out io.Writer + home helmpath.Home + output string +} + +type repositoryElement struct { + Name string + URL string } func newRepoListCmd(out io.Writer) *cobra.Command { @@ -38,29 +42,63 @@ func newRepoListCmd(out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "list [flags]", - Short: "list chart repositories", + Short: "List chart repositories", RunE: func(cmd *cobra.Command, args []string) error { list.home = settings.Home return list.run() }, } + bindOutputFlag(cmd, &list.output) return cmd } func (a *repoListCmd) run() error { - f, err := repo.LoadRepositoriesFile(a.home.RepositoryFile()) + repoFile, err := repo.LoadRepositoriesFile(a.home.RepositoryFile()) if err != nil { return err } - if len(f.Repositories) == 0 { - return errors.New("no repositories to show") - } + + return write(a.out, &repoListWriter{repoFile.Repositories}, outputFormat(a.output)) +} + +//////////// Printer implementation below here +type repoListWriter struct { + repos []*repo.Entry +} + +func (r *repoListWriter) WriteTable(out io.Writer) error { table := uitable.New() table.AddRow("NAME", "URL") - for _, re := range f.Repositories { + for _, re := range r.repos { table.AddRow(re.Name, re.URL) } - fmt.Fprintln(a.out, table) + return encodeTable(out, table) +} + +func (r *repoListWriter) WriteJSON(out io.Writer) error { + return r.encodeByFormat(out, outputJSON) +} + +func (r *repoListWriter) WriteYAML(out io.Writer) error { + return r.encodeByFormat(out, outputYAML) +} + +func (r *repoListWriter) encodeByFormat(out io.Writer, format outputFormat) error { + var repolist []repositoryElement + + for _, re := range r.repos { + repolist = append(repolist, repositoryElement{Name: re.Name, URL: re.URL}) + } + + switch format { + case outputJSON: + return encodeJSON(out, repolist) + case outputYAML: + return encodeYAML(out, repolist) + } + + // Because this is a non-exported function and only called internally by + // WriteJSON and WriteYAML, we shouldn't get invalid types return nil } diff --git a/cmd/helm/repo_remove.go b/cmd/helm/repo_remove.go index f13b8dadb..98b801151 100644 --- a/cmd/helm/repo_remove.go +++ b/cmd/helm/repo_remove.go @@ -39,7 +39,7 @@ func newRepoRemoveCmd(out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "remove [flags] [NAME]", Aliases: []string{"rm"}, - Short: "remove a chart repository", + Short: "Remove a chart repository", RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { return fmt.Errorf("need at least one argument, name of chart repository") diff --git a/cmd/helm/repo_update.go b/cmd/helm/repo_update.go index 526300343..f1e9fb566 100644 --- a/cmd/helm/repo_update.go +++ b/cmd/helm/repo_update.go @@ -24,6 +24,7 @@ import ( "github.com/spf13/cobra" + "k8s.io/helm/cmd/helm/installer" "k8s.io/helm/pkg/getter" "k8s.io/helm/pkg/helm/helmpath" "k8s.io/helm/pkg/repo" @@ -35,15 +36,24 @@ Information is cached locally, where it is used by commands like 'helm search'. 'helm update' is the deprecated form of 'helm repo update'. It will be removed in future releases. + +You can specify the name of a repository you want to update. + + $ helm repo update + +To update all the repositories, use 'helm repo update'. + ` var errNoRepositories = errors.New("no repositories found. You must add one before updating") +var errNoRepositoriesMatchingRepoName = errors.New("no repositories found matching the provided name. Verify if the repo exists") type repoUpdateCmd struct { update func([]*repo.ChartRepository, io.Writer, helmpath.Home, bool) error home helmpath.Home out io.Writer strict bool + name string } func newRepoUpdateCmd(out io.Writer) *cobra.Command { @@ -52,18 +62,21 @@ func newRepoUpdateCmd(out io.Writer) *cobra.Command { update: updateCharts, } cmd := &cobra.Command{ - Use: "update", + Use: "update [REPO_NAME]", Aliases: []string{"up"}, - Short: "update information of available charts locally from chart repositories", + Short: "Update information of available charts locally from chart repositories", Long: updateDesc, RunE: func(cmd *cobra.Command, args []string) error { u.home = settings.Home + if len(args) != 0 { + u.name = args[0] + } return u.run() }, } f := cmd.Flags() - f.BoolVar(&u.strict, "strict", false, "fail on update warnings") + f.BoolVar(&u.strict, "strict", false, "Fail on update warnings") return cmd } @@ -83,8 +96,22 @@ func (u *repoUpdateCmd) run() error { if err != nil { return err } - repos = append(repos, r) + if len(u.name) != 0 { + if cfg.Name == u.name { + repos = append(repos, r) + break + } else { + continue + } + } else { + repos = append(repos, r) + } } + + if len(repos) == 0 { + return errNoRepositoriesMatchingRepoName + } + return u.update(repos, u.out, u.home, u.strict) } @@ -93,21 +120,28 @@ func updateCharts(repos []*repo.ChartRepository, out io.Writer, home helmpath.Ho var ( errorCounter int wg sync.WaitGroup + mu sync.Mutex ) for _, re := range repos { wg.Add(1) go func(re *repo.ChartRepository) { defer wg.Done() - if re.Config.Name == localRepository { + if re.Config.Name == installer.LocalRepository { + mu.Lock() fmt.Fprintf(out, "...Skip %s chart repository\n", re.Config.Name) + mu.Unlock() return } err := re.DownloadIndexFile(home.Cache()) if err != nil { + mu.Lock() errorCounter++ fmt.Fprintf(out, "...Unable to get an update from the %q chart repository (%s):\n\t%s\n", re.Config.Name, re.Config.URL, err) + mu.Unlock() } else { + mu.Lock() fmt.Fprintf(out, "...Successfully got an update from the %q chart repository\n", re.Config.Name) + mu.Unlock() } }(re) } @@ -117,6 +151,6 @@ func updateCharts(repos []*repo.ChartRepository, out io.Writer, home helmpath.Ho return errors.New("Update Failed. Check log for details") } - fmt.Fprintln(out, "Update Complete. ⎈ Happy Helming!⎈ ") + fmt.Fprintln(out, "Update Complete.") return nil } diff --git a/cmd/helm/repo_update_test.go b/cmd/helm/repo_update_test.go index 86af437c5..d26df98c5 100644 --- a/cmd/helm/repo_update_test.go +++ b/cmd/helm/repo_update_test.go @@ -105,3 +105,105 @@ func TestUpdateCharts(t *testing.T) { t.Error("Update was not successful") } } + +func TestUpdateCmdStrictFlag(t *testing.T) { + thome, err := tempHelmHome(t) + if err != nil { + t.Fatal(err) + } + + cleanup := resetEnv() + defer func() { + os.RemoveAll(thome.String()) + cleanup() + }() + + settings.Home = thome + + out := bytes.NewBuffer(nil) + cmd := newRepoUpdateCmd(out) + cmd.ParseFlags([]string{"--strict"}) + + if err := cmd.RunE(cmd, []string{}); err == nil { + t.Fatal("expected error due to strict flag") + } + + if got := out.String(); !strings.Contains(got, "Unable to get an update") { + t.Errorf("Expected 'Unable to get an update', got %q", got) + } +} + +func TestUpdateCmdWithSingleRepoNameWhichDoesntExist(t *testing.T) { + thome, err := tempHelmHome(t) + if err != nil { + t.Fatal(err) + } + + cleanup := resetEnv() + defer func() { + os.RemoveAll(thome.String()) + cleanup() + }() + + settings.Home = thome + + out := bytes.NewBuffer(nil) + cmd := newRepoUpdateCmd(out) + + if err = cmd.RunE(cmd, []string{"randomRepo"}); err == nil { + t.Fatal("expected error due to wrong repo name") + } + + if got := fmt.Sprintf("%v", err); !strings.Contains(got, "no repositories found matching the provided name. Verify if the repo exists") { + t.Errorf("Expected 'no repositories found matching the provided name. Verify if the repo exists', got %q", got) + } +} + +func TestUpdateRepo(t *testing.T) { + ts, thome, err := repotest.NewTempServer("testdata/testserver/*.*") + if err != nil { + t.Fatal(err) + } + + hh := helmpath.Home(thome) + cleanup := resetEnv() + defer func() { + ts.Stop() + os.RemoveAll(thome.String()) + cleanup() + }() + if err := ensureTestHome(hh, t); err != nil { + t.Fatal(err) + } + + settings.Home = thome + + if err := addRepository("repo1", ts.URL(), "", "", hh, "", "", "", true); err != nil { + t.Error(err) + } + + if err := addRepository("repo2", ts.URL(), "", "", hh, "", "", "", true); err != nil { + t.Error(err) + } + + out := bytes.NewBuffer(nil) + cmd := newRepoUpdateCmd(out) + + if err = cmd.RunE(cmd, []string{"repo1"}); err != nil { + t.Fatal("expected to update repo1 correctly") + } + + got := out.String() + + if !strings.Contains(got, "Successfully got an update from the \"repo1\"") { + t.Errorf("Expected to successfully update \"repo1\" repository, got %q", got) + } + + if strings.Contains(got, "Successfully got an update from the \"repo2\"") { + t.Errorf("Shouldn't have updated \"repo2\" repository, got %q", got) + } + + if !strings.Contains(got, "Update Complete.") { + t.Error("Update was not successful") + } +} diff --git a/cmd/helm/reset.go b/cmd/helm/reset.go index 887ce34d0..9e7a14710 100644 --- a/cmd/helm/reset.go +++ b/cmd/helm/reset.go @@ -21,6 +21,7 @@ import ( "fmt" "io" "os" + "strings" "github.com/spf13/cobra" @@ -56,10 +57,14 @@ func newResetCmd(client helm.Interface, out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "reset", - Short: "uninstalls Tiller from a cluster", + Short: "Uninstalls Tiller from a cluster", Long: resetDesc, PreRunE: func(cmd *cobra.Command, args []string) error { - if err := setupConnection(); !d.force && err != nil { + err := setupConnection() + if !d.force && err != nil { + return err + } + if d.force && err != nil && strings.EqualFold(err.Error(), "could not find tiller") { return err } return nil @@ -79,8 +84,8 @@ func newResetCmd(client helm.Interface, out io.Writer) *cobra.Command { f := cmd.Flags() settings.AddFlagsTLS(f) - f.BoolVarP(&d.force, "force", "f", false, "forces Tiller uninstall even if there are releases installed, or if Tiller is not in ready state. Releases are not deleted.)") - f.BoolVar(&d.removeHelmHome, "remove-helm-home", false, "if set deletes $HELM_HOME") + f.BoolVarP(&d.force, "force", "f", false, "Forces Tiller uninstall even if there are releases installed, or if Tiller is not in ready state. Releases are not deleted.)") + f.BoolVar(&d.removeHelmHome, "remove-helm-home", false, "If set, deletes $HELM_HOME") // set defaults from environment settings.InitTLS(f) @@ -110,7 +115,7 @@ func (d *resetCmd) run() error { } if err := installer.Uninstall(d.kubeClient, &installer.Options{Namespace: d.namespace}); err != nil { - return fmt.Errorf("error unstalling Tiller: %s", err) + return fmt.Errorf("error uninstalling Tiller: %s", err) } if d.removeHelmHome { diff --git a/cmd/helm/rollback.go b/cmd/helm/rollback.go index 0c46fa818..970afef32 100644 --- a/cmd/helm/rollback.go +++ b/cmd/helm/rollback.go @@ -31,21 +31,23 @@ This command rolls back a release to a previous revision. The first argument of the rollback command is the name of a release, and the second is a revision (version) number. To see revision numbers, run -'helm history RELEASE'. +'helm history RELEASE'. If you'd like to rollback to the previous release use +'helm rollback [RELEASE] 0'. ` type rollbackCmd struct { - name string - revision int32 - dryRun bool - recreate bool - force bool - disableHooks bool - out io.Writer - client helm.Interface - timeout int64 - wait bool - description string + name string + revision int32 + dryRun bool + recreate bool + force bool + disableHooks bool + out io.Writer + client helm.Interface + timeout int64 + wait bool + description string + cleanupOnFail bool } func newRollbackCmd(c helm.Interface, out io.Writer) *cobra.Command { @@ -56,7 +58,7 @@ func newRollbackCmd(c helm.Interface, out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "rollback [flags] [RELEASE] [REVISION]", - Short: "roll back a release to a previous revision", + Short: "Rollback a release to a previous revision", Long: rollbackDesc, PreRunE: func(_ *cobra.Command, _ []string) error { return setupConnection() }, RunE: func(cmd *cobra.Command, args []string) error { @@ -79,13 +81,14 @@ func newRollbackCmd(c helm.Interface, out io.Writer) *cobra.Command { f := cmd.Flags() settings.AddFlagsTLS(f) - f.BoolVar(&rollback.dryRun, "dry-run", false, "simulate a rollback") - f.BoolVar(&rollback.recreate, "recreate-pods", false, "performs pods restart for the resource if applicable") - f.BoolVar(&rollback.force, "force", false, "force resource update through delete/recreate if needed") - f.BoolVar(&rollback.disableHooks, "no-hooks", false, "prevent hooks from running during rollback") - f.Int64Var(&rollback.timeout, "timeout", 300, "time in seconds to wait for any individual Kubernetes operation (like Jobs for hooks)") - f.BoolVar(&rollback.wait, "wait", false, "if set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment are in a ready state before marking the release as successful. It will wait for as long as --timeout") - f.StringVar(&rollback.description, "description", "", "specify a description for the release") + f.BoolVar(&rollback.dryRun, "dry-run", false, "Simulate a rollback") + f.BoolVar(&rollback.recreate, "recreate-pods", false, "Performs pods restart for the resource if applicable") + f.BoolVar(&rollback.force, "force", false, "Force resource update through delete/recreate if needed") + f.BoolVar(&rollback.disableHooks, "no-hooks", false, "Prevent hooks from running during rollback") + f.Int64Var(&rollback.timeout, "timeout", 300, "Time in seconds to wait for any individual Kubernetes operation (like Jobs for hooks)") + f.BoolVar(&rollback.wait, "wait", false, "If set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment are in a ready state before marking the release as successful. It will wait for as long as --timeout") + f.StringVar(&rollback.description, "description", "", "Specify a description for the release") + f.BoolVar(&rollback.cleanupOnFail, "cleanup-on-fail", false, "Allow deletion of new resources created in this rollback when rollback failed") // set defaults from environment settings.InitTLS(f) @@ -103,12 +106,13 @@ func (r *rollbackCmd) run() error { helm.RollbackVersion(r.revision), helm.RollbackTimeout(r.timeout), helm.RollbackWait(r.wait), - helm.RollbackDescription(r.description)) + helm.RollbackDescription(r.description), + helm.RollbackCleanupOnFail(r.cleanupOnFail)) if err != nil { return prettyError(err) } - fmt.Fprintf(r.out, "Rollback was a success! Happy Helming!\n") + fmt.Fprintf(r.out, "Rollback was a success.\n") return nil } diff --git a/cmd/helm/rollback_test.go b/cmd/helm/rollback_test.go index a98a4096a..5d97dca4b 100644 --- a/cmd/helm/rollback_test.go +++ b/cmd/helm/rollback_test.go @@ -31,25 +31,25 @@ func TestRollbackCmd(t *testing.T) { { name: "rollback a release", args: []string{"funny-honey", "1"}, - expected: "Rollback was a success! Happy Helming!", + expected: "Rollback was a success.", }, { name: "rollback a release with timeout", args: []string{"funny-honey", "1"}, flags: []string{"--timeout", "120"}, - expected: "Rollback was a success! Happy Helming!", + expected: "Rollback was a success.", }, { name: "rollback a release with wait", args: []string{"funny-honey", "1"}, flags: []string{"--wait"}, - expected: "Rollback was a success! Happy Helming!", + expected: "Rollback was a success.", }, { name: "rollback a release with description", args: []string{"funny-honey", "1"}, flags: []string{"--description", "foo"}, - expected: "Rollback was a success! Happy Helming!", + expected: "Rollback was a success.", }, { name: "rollback a release without revision", diff --git a/cmd/helm/search.go b/cmd/helm/search.go index 84f328d41..b55997ec8 100644 --- a/cmd/helm/search.go +++ b/cmd/helm/search.go @@ -48,6 +48,14 @@ type searchCmd struct { regexp bool version string colWidth uint + output string +} + +type chartElement struct { + Name string + Version string + AppVersion string + Description string } func newSearchCmd(out io.Writer) *cobra.Command { @@ -55,7 +63,7 @@ func newSearchCmd(out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "search [keyword]", - Short: "search for a keyword in charts", + Short: "Search for a keyword in charts", Long: searchDesc, RunE: func(cmd *cobra.Command, args []string) error { sc.helmhome = settings.Home @@ -64,10 +72,11 @@ func newSearchCmd(out io.Writer) *cobra.Command { } f := cmd.Flags() - f.BoolVarP(&sc.regexp, "regexp", "r", false, "use regular expressions for searching") - f.BoolVarP(&sc.versions, "versions", "l", false, "show the long listing, with each version of each chart on its own line") - f.StringVarP(&sc.version, "version", "v", "", "search using semantic versioning constraints") - f.UintVar(&sc.colWidth, "col-width", 60, "specifies the max column width of output") + f.BoolVarP(&sc.regexp, "regexp", "r", false, "Use regular expressions for searching") + f.BoolVarP(&sc.versions, "versions", "l", false, "Show the long listing, with each version of each chart on its own line") + f.StringVarP(&sc.version, "version", "v", "", "Search using semantic versioning constraints") + f.UintVar(&sc.colWidth, "col-width", 60, "Specifies the max column width of output") + bindOutputFlag(cmd, &sc.output) return cmd } @@ -95,9 +104,7 @@ func (s *searchCmd) run(args []string) error { return err } - fmt.Fprintln(s.out, s.formatSearchResults(data, s.colWidth)) - - return nil + return write(s.out, &searchWriter{data, s.colWidth}, outputFormat(s.output)) } func (s *searchCmd) applyConstraint(res []*search.Result) ([]*search.Result, error) { @@ -128,19 +135,6 @@ func (s *searchCmd) applyConstraint(res []*search.Result) ([]*search.Result, err return data, nil } -func (s *searchCmd) formatSearchResults(res []*search.Result, colWidth uint) string { - if len(res) == 0 { - return "No results found" - } - table := uitable.New() - table.MaxColWidth = colWidth - table.AddRow("NAME", "CHART VERSION", "APP VERSION", "DESCRIPTION") - for _, r := range res { - table.AddRow(r.Name, r.Chart.Version, r.Chart.AppVersion, r.Chart.Description) - } - return table.String() -} - func (s *searchCmd) buildIndex() (*search.Index, error) { // Load the repositories.yaml rf, err := repo.LoadRepositoriesFile(s.helmhome.RepositoryFile()) @@ -154,7 +148,7 @@ func (s *searchCmd) buildIndex() (*search.Index, error) { f := s.helmhome.CacheIndex(n) ind, err := repo.LoadIndexFile(f) if err != nil { - fmt.Fprintf(s.out, "WARNING: Repo %q is corrupt or missing. Try 'helm repo update'.", n) + fmt.Fprintf(s.out, "WARNING: Repo %q is corrupt or missing. Try 'helm repo update'.\n", n) continue } @@ -162,3 +156,53 @@ func (s *searchCmd) buildIndex() (*search.Index, error) { } return i, nil } + +//////////// Printer implementation below here +type searchWriter struct { + results []*search.Result + columnWidth uint +} + +func (r *searchWriter) WriteTable(out io.Writer) error { + if len(r.results) == 0 { + _, err := out.Write([]byte("No results found\n")) + if err != nil { + return fmt.Errorf("unable to write results: %s", err) + } + return nil + } + table := uitable.New() + table.MaxColWidth = r.columnWidth + table.AddRow("NAME", "CHART VERSION", "APP VERSION", "DESCRIPTION") + for _, r := range r.results { + table.AddRow(r.Name, r.Chart.Version, r.Chart.AppVersion, r.Chart.Description) + } + return encodeTable(out, table) +} + +func (r *searchWriter) WriteJSON(out io.Writer) error { + return r.encodeByFormat(out, outputJSON) +} + +func (r *searchWriter) WriteYAML(out io.Writer) error { + return r.encodeByFormat(out, outputYAML) +} + +func (r *searchWriter) encodeByFormat(out io.Writer, format outputFormat) error { + var chartList []chartElement + + for _, r := range r.results { + chartList = append(chartList, chartElement{r.Name, r.Chart.Version, r.Chart.AppVersion, r.Chart.Description}) + } + + switch format { + case outputJSON: + return encodeJSON(out, chartList) + case outputYAML: + return encodeYAML(out, chartList) + } + + // Because this is a non-exported function and only called internally by + // WriteJSON and WriteYAML, we shouldn't get invalid types + return nil +} diff --git a/cmd/helm/search/search.go b/cmd/helm/search/search.go index 04acb8690..2fd6b4581 100644 --- a/cmd/helm/search/search.go +++ b/cmd/helm/search/search.go @@ -33,6 +33,12 @@ import ( "k8s.io/helm/pkg/repo" ) +const ( + sep = "\v" + // verSep is a separator for version fields in map keys. + verSep = "$$" +) + // Result is a search result. // // Score indicates how close it is to match. The higher the score, the longer @@ -49,16 +55,11 @@ type Index struct { charts map[string]*repo.ChartVersion } -const sep = "\v" - -// NewIndex creats a new Index. +// NewIndex creates a new Index. func NewIndex() *Index { return &Index{lines: map[string]string{}, charts: map[string]*repo.ChartVersion{}} } -// verSep is a separator for version fields in map keys. -const verSep = "$$" - // AddRepo adds a repository index to the search index. func (i *Index) AddRepo(rname string, ind *repo.IndexFile, all bool) { ind.SortEntries() diff --git a/cmd/helm/search_test.go b/cmd/helm/search_test.go index 233f94572..12824407c 100644 --- a/cmd/helm/search_test.go +++ b/cmd/helm/search_test.go @@ -18,6 +18,7 @@ package main import ( "io" + "strings" "testing" "github.com/spf13/cobra" @@ -84,6 +85,30 @@ func TestSearchCmd(t *testing.T) { flags: []string{"--regexp"}, err: true, }, + { + name: "search for 'maria', expect one match output json", + args: []string{"maria"}, + flags: strings.Split("--output json", " "), + expected: `[{"Name":"testing/mariadb","Version":"0.3.0","Appversion":"","Description":"Chart for MariaDB"}]`, + }, + { + name: "search for 'alpine', expect two matches output json", + args: []string{"alpine"}, + flags: strings.Split("--output json", " "), + expected: `[{"Name":"testing/alpine","Version":"0.2.0","Appversion":"2.3.4","Description":"Deploy a basic Alpine Linux pod"}]`, + }, + { + name: "search for 'maria', expect one match output yaml", + args: []string{"maria"}, + flags: strings.Split("--output yaml", " "), + expected: "- AppVersion: \"\"\n Description: Chart for MariaDB\n Name: testing/mariadb\n Version: 0.3.0\n\n", + }, + { + name: "search for 'alpine', expect two matches output yaml", + args: []string{"alpine"}, + flags: strings.Split("--output yaml", " "), + expected: "- AppVersion: 2.3.4\n Description: Deploy a basic Alpine Linux pod\n Name: testing/alpine\n Version: 0.2.0\n\n", + }, } cleanup := resetEnv() diff --git a/cmd/helm/serve.go b/cmd/helm/serve.go index 7ddae6ca2..f1ffdcb15 100644 --- a/cmd/helm/serve.go +++ b/cmd/helm/serve.go @@ -53,7 +53,7 @@ func newServeCmd(out io.Writer) *cobra.Command { srv := &serveCmd{out: out} cmd := &cobra.Command{ Use: "serve", - Short: "start a local http web server", + Short: "Start a local http web server", Long: serveDesc, PreRunE: func(cmd *cobra.Command, args []string) error { return srv.complete() @@ -64,9 +64,9 @@ func newServeCmd(out io.Writer) *cobra.Command { } f := cmd.Flags() - f.StringVar(&srv.repoPath, "repo-path", "", "local directory path from which to serve charts") - f.StringVar(&srv.address, "address", "127.0.0.1:8879", "address to listen on") - f.StringVar(&srv.url, "url", "", "external URL of chart repository") + f.StringVar(&srv.repoPath, "repo-path", "", "Local directory path from which to serve charts") + f.StringVar(&srv.address, "address", "127.0.0.1:8879", "Address to listen on") + f.StringVar(&srv.url, "url", "", "External URL of chart repository") return cmd } diff --git a/cmd/helm/status.go b/cmd/helm/status.go index b03453adc..23120980a 100644 --- a/cmd/helm/status.go +++ b/cmd/helm/status.go @@ -17,13 +17,11 @@ limitations under the License. package main import ( - "encoding/json" "fmt" "io" "regexp" "text/tabwriter" - "github.com/ghodss/yaml" "github.com/gosuri/uitable" "github.com/gosuri/uitable/util/strutil" "github.com/spf13/cobra" @@ -61,7 +59,7 @@ func newStatusCmd(client helm.Interface, out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "status [flags] RELEASE_NAME", - Short: "displays the status of the named release", + Short: "Displays the status of the named release", Long: statusHelp, PreRunE: func(_ *cobra.Command, _ []string) error { return setupConnection() }, RunE: func(cmd *cobra.Command, args []string) error { @@ -78,8 +76,8 @@ func newStatusCmd(client helm.Interface, out io.Writer) *cobra.Command { f := cmd.Flags() settings.AddFlagsTLS(f) - f.Int32Var(&status.version, "revision", 0, "if set, display the status of the named release with revision") - f.StringVarP(&status.outfmt, "output", "o", "", "output the status in the specified format (json or yaml)") + f.Int32Var(&status.version, "revision", 0, "If set, display the status of the named release with revision") + bindOutputFlag(cmd, &status.outfmt) // set defaults from environment settings.InitTLS(f) @@ -93,27 +91,26 @@ func (s *statusCmd) run() error { return prettyError(err) } - switch s.outfmt { - case "": - PrintStatus(s.out, res) - return nil - case "json": - data, err := json.Marshal(res) - if err != nil { - return fmt.Errorf("Failed to Marshal JSON output: %s", err) - } - s.out.Write(data) - return nil - case "yaml": - data, err := yaml.Marshal(res) - if err != nil { - return fmt.Errorf("Failed to Marshal YAML output: %s", err) - } - s.out.Write(data) - return nil - } + return write(s.out, &statusWriter{res}, outputFormat(s.outfmt)) +} + +type statusWriter struct { + status *services.GetReleaseStatusResponse +} + +func (s *statusWriter) WriteTable(out io.Writer) error { + PrintStatus(out, s.status) + // There is no error handling here due to backwards compatibility with + // PrintStatus + return nil +} + +func (s *statusWriter) WriteJSON(out io.Writer) error { + return encodeJSON(out, s.status) +} - return fmt.Errorf("Unknown output format %q", s.outfmt) +func (s *statusWriter) WriteYAML(out io.Writer) error { + return encodeYAML(out, s.status) } // PrintStatus prints out the status of a release. Shared because also used by diff --git a/cmd/helm/template.go b/cmd/helm/template.go index d776f2989..bc9fba9c1 100644 --- a/cmd/helm/template.go +++ b/cmd/helm/template.go @@ -86,24 +86,25 @@ func newTemplateCmd(out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "template [flags] CHART", - Short: fmt.Sprintf("locally render templates"), + Short: "Locally render templates", Long: templateDesc, RunE: t.run, } + cmd.SetOutput(out) f := cmd.Flags() - f.BoolVar(&t.showNotes, "notes", false, "show the computed NOTES.txt file as well") - f.StringVarP(&t.releaseName, "name", "n", "release-name", "release name") - f.BoolVar(&t.releaseIsUpgrade, "is-upgrade", false, "set .Release.IsUpgrade instead of .Release.IsInstall") - f.StringArrayVarP(&t.renderFiles, "execute", "x", []string{}, "only execute the given templates") - f.VarP(&t.valueFiles, "values", "f", "specify values in a YAML file (can specify multiple)") - f.StringVar(&t.namespace, "namespace", "", "namespace to install the release into") - f.StringArrayVar(&t.values, "set", []string{}, "set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)") - f.StringArrayVar(&t.stringValues, "set-string", []string{}, "set STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)") - f.StringArrayVar(&t.fileValues, "set-file", []string{}, "set values from respective files specified via the command line (can specify multiple or separate values with commas: key1=path1,key2=path2)") - f.StringVar(&t.nameTemplate, "name-template", "", "specify template used to name the release") - f.StringVar(&t.kubeVersion, "kube-version", defaultKubeVersion, "kubernetes version used as Capabilities.KubeVersion.Major/Minor") - f.StringVar(&t.outputDir, "output-dir", "", "writes the executed templates to files in output-dir instead of stdout") + f.BoolVar(&t.showNotes, "notes", false, "Show the computed NOTES.txt file as well") + f.StringVarP(&t.releaseName, "name", "n", "release-name", "Release name") + f.BoolVar(&t.releaseIsUpgrade, "is-upgrade", false, "Set .Release.IsUpgrade instead of .Release.IsInstall") + f.StringArrayVarP(&t.renderFiles, "execute", "x", []string{}, "Only execute the given templates") + f.VarP(&t.valueFiles, "values", "f", "Specify values in a YAML file (can specify multiple)") + f.StringVar(&t.namespace, "namespace", "", "Namespace to install the release into") + f.StringArrayVar(&t.values, "set", []string{}, "Set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)") + f.StringArrayVar(&t.stringValues, "set-string", []string{}, "Set STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)") + f.StringArrayVar(&t.fileValues, "set-file", []string{}, "Set values from respective files specified via the command line (can specify multiple or separate values with commas: key1=path1,key2=path2)") + f.StringVar(&t.nameTemplate, "name-template", "", "Specify template used to name the release") + f.StringVar(&t.kubeVersion, "kube-version", defaultKubeVersion, "Kubernetes version used as Capabilities.KubeVersion.Major/Minor") + f.StringVar(&t.outputDir, "output-dir", "", "Writes the executed templates to files in output-dir instead of stdout") return cmd } @@ -147,8 +148,8 @@ func (t *templateCmd) run(cmd *cobra.Command, args []string) error { } } - if msgs := validation.IsDNS1123Label(t.releaseName); t.releaseName != "" && len(msgs) > 0 { - return fmt.Errorf("release name %s is not a valid DNS label: %s", t.releaseName, strings.Join(msgs, ";")) + if msgs := validation.IsDNS1123Subdomain(t.releaseName); t.releaseName != "" && len(msgs) > 0 { + return fmt.Errorf("release name %s is invalid: %s", t.releaseName, strings.Join(msgs, ";")) } // Check chart requirements to make sure all dependencies are present in /charts @@ -241,20 +242,20 @@ func (t *templateCmd) run(cmd *cobra.Command, args []string) error { if whitespaceRegex.MatchString(data) { continue } - err = writeToFile(t.outputDir, m.Name, data) + err = writeToFile(t.outputDir, m.Name, data, t.out) if err != nil { return err } continue } - fmt.Printf("---\n# Source: %s\n", m.Name) - fmt.Println(data) + fmt.Fprintf(t.out, "---\n# Source: %s\n", m.Name) + fmt.Fprintln(t.out, data) } return nil } // write the to / -func writeToFile(outputDir string, name string, data string) error { +func writeToFile(outputDir string, name string, data string, out io.Writer) error { outfileName := strings.Join([]string{outputDir, name}, string(filepath.Separator)) err := ensureDirectoryForFile(outfileName) @@ -275,7 +276,7 @@ func writeToFile(outputDir string, name string, data string) error { return err } - fmt.Printf("wrote %s\n", outfileName) + fmt.Fprintf(out, "wrote %s\n", outfileName) return nil } diff --git a/cmd/helm/template_test.go b/cmd/helm/template_test.go index 98044eff0..10c836a0d 100644 --- a/cmd/helm/template_test.go +++ b/cmd/helm/template_test.go @@ -20,7 +20,6 @@ import ( "bufio" "bytes" "fmt" - "io" "os" "path/filepath" "strings" @@ -112,21 +111,21 @@ func TestTemplateCmd(t *testing.T) { desc: "verify the release name using capitals is invalid", args: []string{subchart1ChartPath, "--name", "FOO"}, expectKey: "subchart1/templates/service.yaml", - expectError: "is not a valid DNS label", + expectError: "is invalid", }, { name: "check_invalid_name_uppercase", desc: "verify the release name using periods is invalid", args: []string{subchart1ChartPath, "--name", "foo.bar"}, expectKey: "subchart1/templates/service.yaml", - expectError: "is not a valid DNS label", + expectValue: "release-name: \"foo.bar\"", }, { name: "check_invalid_name_uppercase", desc: "verify the release name using underscores is invalid", args: []string{subchart1ChartPath, "--name", "foo_bar"}, expectKey: "subchart1/templates/service.yaml", - expectError: "is not a valid DNS label", + expectError: "is invalid", }, { name: "check_release_is_install", @@ -158,9 +157,9 @@ func TestTemplateCmd(t *testing.T) { }, { name: "check_invalid_name_template", - desc: "verify the relase name generate by template is invalid", + desc: "verify the release name generate by template is invalid", args: []string{subchart1ChartPath, "--name-template", "foobar-{{ b64enc \"abc\" }}-baz"}, - expectError: "is not a valid DNS label", + expectError: "is invalid", }, { name: "check_name_template", @@ -178,14 +177,9 @@ func TestTemplateCmd(t *testing.T) { }, } - var buf bytes.Buffer for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { - // capture stdout - old := os.Stdout - r, w, _ := os.Pipe() - os.Stdout = w // execute template command out := bytes.NewBuffer(nil) cmd := newTemplateCmd(out) @@ -206,14 +200,8 @@ func TestTemplateCmd(t *testing.T) { } else if err != nil { t.Errorf("expected no error, got %v", err) } - // restore stdout - w.Close() - os.Stdout = old - var b bytes.Buffer - io.Copy(&b, r) - r.Close() // scan yaml into map[]yaml - scanner := bufio.NewScanner(&b) + scanner := bufio.NewScanner(out) next := false lastKey := "" m := map[string]string{} @@ -239,7 +227,6 @@ func TestTemplateCmd(t *testing.T) { } else { t.Errorf("could not find key %s", tt.expectKey) } - buf.Reset() }) } } diff --git a/cmd/helm/testdata/testcharts/alpine/Chart.yaml b/cmd/helm/testdata/testcharts/alpine/Chart.yaml index fea865aa5..feaa1d78f 100644 --- a/cmd/helm/testdata/testcharts/alpine/Chart.yaml +++ b/cmd/helm/testdata/testcharts/alpine/Chart.yaml @@ -1,3 +1,4 @@ +appVersion: "3.3" description: Deploy a basic Alpine Linux pod home: https://k8s.io/helm name: alpine diff --git a/cmd/helm/testdata/testcharts/alpine/templates/alpine-pod.yaml b/cmd/helm/testdata/testcharts/alpine/templates/alpine-pod.yaml index b8ae22b6c..fc0576d5a 100644 --- a/cmd/helm/testdata/testcharts/alpine/templates/alpine-pod.yaml +++ b/cmd/helm/testdata/testcharts/alpine/templates/alpine-pod.yaml @@ -10,6 +10,7 @@ metadata: # The "release" convention makes it easy to tie a release to all of the # Kubernetes resources that were created as part of that release. app.kubernetes.io/instance: {{.Release.Name | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} # This makes it easy to audit chart usage. helm.sh/chart: "{{.Chart.Name}}-{{.Chart.Version}}" values: {{.Values.test.Name}} diff --git a/cmd/helm/testdata/testcharts/novals/Chart.yaml b/cmd/helm/testdata/testcharts/novals/Chart.yaml index 85f7a5d83..a1b1a0d59 100644 --- a/cmd/helm/testdata/testcharts/novals/Chart.yaml +++ b/cmd/helm/testdata/testcharts/novals/Chart.yaml @@ -4,3 +4,4 @@ name: novals sources: - https://github.com/helm/helm version: 0.2.0 +appVersion: 3.3 diff --git a/cmd/helm/testdata/testcharts/novals/templates/alpine-pod.yaml b/cmd/helm/testdata/testcharts/novals/templates/alpine-pod.yaml index f569d556c..564429dea 100644 --- a/cmd/helm/testdata/testcharts/novals/templates/alpine-pod.yaml +++ b/cmd/helm/testdata/testcharts/novals/templates/alpine-pod.yaml @@ -10,6 +10,7 @@ metadata: # The "release" convention makes it easy to tie a release to all of the # Kubernetes resources that were created as part of that release. app.kubernetes.io/instance: {{.Release.Name | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} # This makes it easy to audit chart usage. helm.sh/chart: "{{.Chart.Name}}-{{.Chart.Version}}" annotations: diff --git a/cmd/helm/testdata/testcharts/prerelease-0.2.0-pre-release.tgz b/cmd/helm/testdata/testcharts/prerelease-0.2.0-pre-release.tgz new file mode 100644 index 000000000..36c8f02da Binary files /dev/null and b/cmd/helm/testdata/testcharts/prerelease-0.2.0-pre-release.tgz differ diff --git a/cmd/helm/testdata/testcharts/prerelease/Chart.yaml b/cmd/helm/testdata/testcharts/prerelease/Chart.yaml new file mode 100644 index 000000000..d8d901473 --- /dev/null +++ b/cmd/helm/testdata/testcharts/prerelease/Chart.yaml @@ -0,0 +1,6 @@ +description: Deploy a basic Alpine Linux pod +home: https://k8s.io/helm +name: prerelease +sources: +- https://github.com/helm/helm +version: 0.2.0-pre-release diff --git a/cmd/helm/testdata/testcharts/prerelease/README.md b/cmd/helm/testdata/testcharts/prerelease/README.md new file mode 100644 index 000000000..3c32de5db --- /dev/null +++ b/cmd/helm/testdata/testcharts/prerelease/README.md @@ -0,0 +1,13 @@ +#Alpine: A simple Helm chart + +Run a single pod of Alpine Linux. + +This example was generated using the command `helm create alpine`. + +The `templates/` directory contains a very simple pod resource with a +couple of parameters. + +The `values.yaml` file contains the default values for the +`alpine-pod.yaml` template. + +You can install this example using `helm install docs/examples/alpine`. diff --git a/cmd/helm/testdata/testcharts/prerelease/templates/alpine-pod.yaml b/cmd/helm/testdata/testcharts/prerelease/templates/alpine-pod.yaml new file mode 100644 index 000000000..f569d556c --- /dev/null +++ b/cmd/helm/testdata/testcharts/prerelease/templates/alpine-pod.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{.Release.Name}}-{{.Values.Name}}" + labels: + # The "heritage" label is used to track which tool deployed a given chart. + # It is useful for admins who want to see what releases a particular tool + # is responsible for. + app.kubernetes.io/managed-by: {{.Release.Service | quote }} + # The "release" convention makes it easy to tie a release to all of the + # Kubernetes resources that were created as part of that release. + app.kubernetes.io/instance: {{.Release.Name | quote }} + # This makes it easy to audit chart usage. + helm.sh/chart: "{{.Chart.Name}}-{{.Chart.Version}}" + annotations: + "helm.sh/created": {{.Release.Time.Seconds | quote }} +spec: + # This shows how to use a simple value. This will look for a passed-in value + # called restartPolicy. If it is not found, it will use the default value. + # {{default "Never" .restartPolicy}} is a slightly optimized version of the + # more conventional syntax: {{.restartPolicy | default "Never"}} + restartPolicy: {{default "Never" .Values.restartPolicy}} + containers: + - name: waiter + image: "alpine:3.3" + command: ["/bin/sleep","9000"] diff --git a/cmd/helm/upgrade.go b/cmd/helm/upgrade.go index d05987b8a..a105820a6 100644 --- a/cmd/helm/upgrade.go +++ b/cmd/helm/upgrade.go @@ -44,7 +44,7 @@ To customize the chart values, use any of - '--set-string' to provide key=val forcing val to be stored as a string, - '--set-file' to provide key=path to read a single large value from a file at path. -To edit or append to the existing customized values, add the +To edit or append to the existing customized values, add the '--reuse-values' flag, otherwise any existing customized values are ignored. If no chart value arguments are provided on the command line, any existing customized values are carried @@ -84,36 +84,40 @@ which results in "pwd: 3jk$o2z=f\30with'quote". ` type upgradeCmd struct { - release string - chart string - out io.Writer - client helm.Interface - dryRun bool - recreate bool - force bool - disableHooks bool - valueFiles valueFiles - values []string - stringValues []string - fileValues []string - verify bool - keyring string - install bool - namespace string - version string - timeout int64 - resetValues bool - reuseValues bool - wait bool - repoURL string - username string - password string - devel bool - description string + release string + chart string + out io.Writer + client helm.Interface + dryRun bool + recreate bool + force bool + disableHooks bool + valueFiles valueFiles + values []string + stringValues []string + fileValues []string + verify bool + keyring string + install bool + namespace string + version string + timeout int64 + resetValues bool + reuseValues bool + wait bool + atomic bool + repoURL string + username string + password string + devel bool + subNotes bool + description string + cleanupOnFail bool certFile string keyFile string caFile string + output string } func newUpgradeCmd(client helm.Interface, out io.Writer) *cobra.Command { @@ -125,7 +129,7 @@ func newUpgradeCmd(client helm.Interface, out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "upgrade [RELEASE] [CHART]", - Short: "upgrade a release", + Short: "Upgrade a release", Long: upgradeDesc, PreRunE: func(_ *cobra.Command, _ []string) error { return setupConnection() }, RunE: func(cmd *cobra.Command, args []string) error { @@ -141,6 +145,7 @@ func newUpgradeCmd(client helm.Interface, out io.Writer) *cobra.Command { upgrade.release = args[0] upgrade.chart = args[1] upgrade.client = ensureHelmClient(upgrade.client) + upgrade.wait = upgrade.wait || upgrade.atomic return upgrade.run() }, @@ -148,34 +153,38 @@ func newUpgradeCmd(client helm.Interface, out io.Writer) *cobra.Command { f := cmd.Flags() settings.AddFlagsTLS(f) - f.VarP(&upgrade.valueFiles, "values", "f", "specify values in a YAML file or a URL(can specify multiple)") - f.BoolVar(&upgrade.dryRun, "dry-run", false, "simulate an upgrade") - f.BoolVar(&upgrade.recreate, "recreate-pods", false, "performs pods restart for the resource if applicable") - f.BoolVar(&upgrade.force, "force", false, "force resource update through delete/recreate if needed") - f.StringArrayVar(&upgrade.values, "set", []string{}, "set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)") - f.StringArrayVar(&upgrade.stringValues, "set-string", []string{}, "set STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)") - f.StringArrayVar(&upgrade.fileValues, "set-file", []string{}, "set values from respective files specified via the command line (can specify multiple or separate values with commas: key1=path1,key2=path2)") - f.BoolVar(&upgrade.disableHooks, "disable-hooks", false, "disable pre/post upgrade hooks. DEPRECATED. Use no-hooks") - f.BoolVar(&upgrade.disableHooks, "no-hooks", false, "disable pre/post upgrade hooks") - f.BoolVar(&upgrade.verify, "verify", false, "verify the provenance of the chart before upgrading") - f.StringVar(&upgrade.keyring, "keyring", defaultKeyring(), "path to the keyring that contains public signing keys") - f.BoolVarP(&upgrade.install, "install", "i", false, "if a release by this name doesn't already exist, run an install") - f.StringVar(&upgrade.namespace, "namespace", "", "namespace to install the release into (only used if --install is set). Defaults to the current kube config namespace") - f.StringVar(&upgrade.version, "version", "", "specify the exact chart version to use. If this is not specified, the latest version is used") - f.Int64Var(&upgrade.timeout, "timeout", 300, "time in seconds to wait for any individual Kubernetes operation (like Jobs for hooks)") - f.BoolVar(&upgrade.resetValues, "reset-values", false, "when upgrading, reset the values to the ones built into the chart") - f.BoolVar(&upgrade.reuseValues, "reuse-values", false, "when upgrading, reuse the last release's values and merge in any overrides from the command line via --set and -f. If '--reset-values' is specified, this is ignored.") - f.BoolVar(&upgrade.wait, "wait", false, "if set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment are in a ready state before marking the release as successful. It will wait for as long as --timeout") - f.StringVar(&upgrade.repoURL, "repo", "", "chart repository url where to locate the requested chart") - f.StringVar(&upgrade.username, "username", "", "chart repository username where to locate the requested chart") - f.StringVar(&upgrade.password, "password", "", "chart repository password where to locate the requested chart") - f.StringVar(&upgrade.certFile, "cert-file", "", "identify HTTPS client using this SSL certificate file") - f.StringVar(&upgrade.keyFile, "key-file", "", "identify HTTPS client using this SSL key file") - f.StringVar(&upgrade.caFile, "ca-file", "", "verify certificates of HTTPS-enabled servers using this CA bundle") - f.BoolVar(&upgrade.devel, "devel", false, "use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored.") - f.StringVar(&upgrade.description, "description", "", "specify the description to use for the upgrade, rather than the default") - - f.MarkDeprecated("disable-hooks", "use --no-hooks instead") + f.VarP(&upgrade.valueFiles, "values", "f", "Specify values in a YAML file or a URL(can specify multiple)") + f.BoolVar(&upgrade.dryRun, "dry-run", false, "Simulate an upgrade") + f.BoolVar(&upgrade.recreate, "recreate-pods", false, "Performs pods restart for the resource if applicable") + f.BoolVar(&upgrade.force, "force", false, "Force resource update through delete/recreate if needed") + f.StringArrayVar(&upgrade.values, "set", []string{}, "Set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)") + f.StringArrayVar(&upgrade.stringValues, "set-string", []string{}, "Set STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)") + f.StringArrayVar(&upgrade.fileValues, "set-file", []string{}, "Set values from respective files specified via the command line (can specify multiple or separate values with commas: key1=path1,key2=path2)") + f.BoolVar(&upgrade.disableHooks, "disable-hooks", false, "Disable pre/post upgrade hooks. DEPRECATED. Use no-hooks") + f.BoolVar(&upgrade.disableHooks, "no-hooks", false, "Disable pre/post upgrade hooks") + f.BoolVar(&upgrade.verify, "verify", false, "Verify the provenance of the chart before upgrading") + f.StringVar(&upgrade.keyring, "keyring", defaultKeyring(), "Path to the keyring that contains public signing keys") + f.BoolVarP(&upgrade.install, "install", "i", false, "If a release by this name doesn't already exist, run an install") + f.StringVar(&upgrade.namespace, "namespace", "", "Namespace to install the release into (only used if --install is set). Defaults to the current kube config namespace") + f.StringVar(&upgrade.version, "version", "", "Specify the exact chart version to use. If this is not specified, the latest version is used") + f.Int64Var(&upgrade.timeout, "timeout", 300, "Time in seconds to wait for any individual Kubernetes operation (like Jobs for hooks)") + f.BoolVar(&upgrade.resetValues, "reset-values", false, "When upgrading, reset the values to the ones built into the chart") + f.BoolVar(&upgrade.reuseValues, "reuse-values", false, "When upgrading, reuse the last release's values and merge in any overrides from the command line via --set and -f. If '--reset-values' is specified, this is ignored.") + f.BoolVar(&upgrade.wait, "wait", false, "If set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment are in a ready state before marking the release as successful. It will wait for as long as --timeout") + f.BoolVar(&upgrade.atomic, "atomic", false, "If set, upgrade process rolls back changes made in case of failed upgrade, also sets --wait flag") + f.StringVar(&upgrade.repoURL, "repo", "", "Chart repository url where to locate the requested chart") + f.StringVar(&upgrade.username, "username", "", "Chart repository username where to locate the requested chart") + f.StringVar(&upgrade.password, "password", "", "Chart repository password where to locate the requested chart") + f.StringVar(&upgrade.certFile, "cert-file", "", "Identify HTTPS client using this SSL certificate file") + f.StringVar(&upgrade.keyFile, "key-file", "", "Identify HTTPS client using this SSL key file") + f.StringVar(&upgrade.caFile, "ca-file", "", "Verify certificates of HTTPS-enabled servers using this CA bundle") + f.BoolVar(&upgrade.devel, "devel", false, "Use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored.") + f.BoolVar(&upgrade.subNotes, "render-subchart-notes", false, "Render subchart notes along with parent") + f.StringVar(&upgrade.description, "description", "", "Specify the description to use for the upgrade, rather than the default") + f.BoolVar(&upgrade.cleanupOnFail, "cleanup-on-fail", false, "Allow deletion of new resources created in this upgrade when upgrade failed") + bindOutputFlag(cmd, &upgrade.output) + + f.MarkDeprecated("disable-hooks", "Use --no-hooks instead") // set defaults from environment settings.InitTLS(f) @@ -189,6 +198,8 @@ func (u *upgradeCmd) run() error { return err } + releaseHistory, err := u.client.ReleaseHistory(u.release, helm.WithMaxHistory(1)) + if u.install { // If a release does not exist, install it. If another error occurs during // the check, ignore the error and continue with the upgrade. @@ -196,7 +207,6 @@ func (u *upgradeCmd) run() error { // The returned error is a grpc.rpcError that wraps the message from the original error. // So we're stuck doing string matching against the wrapped error, which is nested somewhere // inside of the grpc.rpcError message. - releaseHistory, err := u.client.ReleaseHistory(u.release, helm.WithMaxHistory(1)) if err == nil { if u.namespace == "" { @@ -230,6 +240,7 @@ func (u *upgradeCmd) run() error { timeout: u.timeout, wait: u.wait, description: u.description, + atomic: u.atomic, } return ic.run() } @@ -241,7 +252,8 @@ func (u *upgradeCmd) run() error { } // Check chart requirements to make sure all dependencies are present in /charts - if ch, err := chartutil.Load(chartPath); err == nil { + ch, err := chartutil.Load(chartPath) + if err == nil { if req, err := chartutil.LoadRequirements(ch); err == nil { if err := renderutil.CheckDependencies(ch, req); err != nil { return err @@ -253,9 +265,9 @@ func (u *upgradeCmd) run() error { return prettyError(err) } - resp, err := u.client.UpdateRelease( + resp, err := u.client.UpdateReleaseFromChart( u.release, - chartPath, + ch, helm.UpdateValueOverrides(rawVals), helm.UpgradeDryRun(u.dryRun), helm.UpgradeRecreate(u.recreate), @@ -264,9 +276,32 @@ func (u *upgradeCmd) run() error { helm.UpgradeTimeout(u.timeout), helm.ResetValues(u.resetValues), helm.ReuseValues(u.reuseValues), + helm.UpgradeSubNotes(u.subNotes), helm.UpgradeWait(u.wait), - helm.UpgradeDescription(u.description)) + helm.UpgradeDescription(u.description), + helm.UpgradeCleanupOnFail(u.cleanupOnFail)) if err != nil { + fmt.Fprintf(u.out, "UPGRADE FAILED\nError: %v\n", prettyError(err)) + if u.atomic { + fmt.Fprint(u.out, "ROLLING BACK") + rollback := &rollbackCmd{ + out: u.out, + client: u.client, + name: u.release, + dryRun: u.dryRun, + recreate: u.recreate, + force: u.force, + timeout: u.timeout, + wait: u.wait, + description: "", + revision: releaseHistory.Releases[0].Version, + disableHooks: u.disableHooks, + cleanupOnFail: u.cleanupOnFail, + } + if err := rollback.run(); err != nil { + return err + } + } return fmt.Errorf("UPGRADE FAILED: %v", prettyError(err)) } @@ -274,14 +309,14 @@ func (u *upgradeCmd) run() error { printRelease(u.out, resp.Release) } - fmt.Fprintf(u.out, "Release %q has been upgraded. Happy Helming!\n", u.release) - + if outputFormat(u.output) == outputTable { + fmt.Fprintf(u.out, "Release %q has been upgraded.\n", u.release) + } // Print the status like status command does status, err := u.client.ReleaseStatus(u.release) if err != nil { return prettyError(err) } - PrintStatus(u.out, status) - return nil + return write(u.out, &statusWriter{status}, outputFormat(u.output)) } diff --git a/cmd/helm/upgrade_test.go b/cmd/helm/upgrade_test.go index 60b529f63..433f3bd2c 100644 --- a/cmd/helm/upgrade_test.go +++ b/cmd/helm/upgrade_test.go @@ -96,7 +96,7 @@ func TestUpgradeCmd(t *testing.T) { name: "upgrade a release", args: []string{"funny-bunny", chartPath}, resp: helm.ReleaseMock(&helm.MockReleaseOptions{Name: "funny-bunny", Version: 2, Chart: ch}), - expected: "Release \"funny-bunny\" has been upgraded. Happy Helming!\n", + expected: "Release \"funny-bunny\" has been upgraded.\n", rels: []*release.Release{helm.ReleaseMock(&helm.MockReleaseOptions{Name: "funny-bunny", Version: 2, Chart: ch})}, }, { @@ -104,7 +104,7 @@ func TestUpgradeCmd(t *testing.T) { args: []string{"funny-bunny", chartPath}, flags: []string{"--timeout", "120"}, resp: helm.ReleaseMock(&helm.MockReleaseOptions{Name: "funny-bunny", Version: 3, Chart: ch2}), - expected: "Release \"funny-bunny\" has been upgraded. Happy Helming!\n", + expected: "Release \"funny-bunny\" has been upgraded.\n", rels: []*release.Release{helm.ReleaseMock(&helm.MockReleaseOptions{Name: "funny-bunny", Version: 3, Chart: ch2})}, }, { @@ -112,7 +112,7 @@ func TestUpgradeCmd(t *testing.T) { args: []string{"funny-bunny", chartPath}, flags: []string{"--reset-values", "true"}, resp: helm.ReleaseMock(&helm.MockReleaseOptions{Name: "funny-bunny", Version: 4, Chart: ch2}), - expected: "Release \"funny-bunny\" has been upgraded. Happy Helming!\n", + expected: "Release \"funny-bunny\" has been upgraded.\n", rels: []*release.Release{helm.ReleaseMock(&helm.MockReleaseOptions{Name: "funny-bunny", Version: 4, Chart: ch2})}, }, { @@ -120,15 +120,23 @@ func TestUpgradeCmd(t *testing.T) { args: []string{"funny-bunny", chartPath}, flags: []string{"--reuse-values", "true"}, resp: helm.ReleaseMock(&helm.MockReleaseOptions{Name: "funny-bunny", Version: 5, Chart: ch2}), - expected: "Release \"funny-bunny\" has been upgraded. Happy Helming!\n", + expected: "Release \"funny-bunny\" has been upgraded.\n", rels: []*release.Release{helm.ReleaseMock(&helm.MockReleaseOptions{Name: "funny-bunny", Version: 5, Chart: ch2})}, }, + { + name: "install a release with 'upgrade --atomic'", + args: []string{"funny-bunny", chartPath}, + flags: []string{"--atomic"}, + resp: helm.ReleaseMock(&helm.MockReleaseOptions{Name: "funny-bunny", Version: 6, Chart: ch}), + expected: "Release \"funny-bunny\" has been upgraded.\n", + rels: []*release.Release{helm.ReleaseMock(&helm.MockReleaseOptions{Name: "funny-bunny", Version: 6, Chart: ch})}, + }, { name: "install a release with 'upgrade --install'", args: []string{"zany-bunny", chartPath}, flags: []string{"-i"}, resp: helm.ReleaseMock(&helm.MockReleaseOptions{Name: "zany-bunny", Version: 1, Chart: ch}), - expected: "Release \"zany-bunny\" has been upgraded. Happy Helming!\n", + expected: "Release \"zany-bunny\" has been upgraded.\n", rels: []*release.Release{helm.ReleaseMock(&helm.MockReleaseOptions{Name: "zany-bunny", Version: 1, Chart: ch})}, }, { @@ -136,7 +144,7 @@ func TestUpgradeCmd(t *testing.T) { args: []string{"crazy-bunny", chartPath}, flags: []string{"-i", "--timeout", "120"}, resp: helm.ReleaseMock(&helm.MockReleaseOptions{Name: "crazy-bunny", Version: 1, Chart: ch}), - expected: "Release \"crazy-bunny\" has been upgraded. Happy Helming!\n", + expected: "Release \"crazy-bunny\" has been upgraded.\n", rels: []*release.Release{helm.ReleaseMock(&helm.MockReleaseOptions{Name: "crazy-bunny", Version: 1, Chart: ch})}, }, { @@ -144,7 +152,7 @@ func TestUpgradeCmd(t *testing.T) { args: []string{"crazy-bunny", chartPath}, flags: []string{"-i", "--description", "foo"}, resp: helm.ReleaseMock(&helm.MockReleaseOptions{Name: "crazy-bunny", Version: 1, Chart: ch, Description: "foo"}), - expected: "Release \"crazy-bunny\" has been upgraded. Happy Helming!\n", + expected: "Release \"crazy-bunny\" has been upgraded.\n", rels: []*release.Release{helm.ReleaseMock(&helm.MockReleaseOptions{Name: "crazy-bunny", Version: 1, Chart: ch, Description: "foo"})}, }, { @@ -152,7 +160,7 @@ func TestUpgradeCmd(t *testing.T) { args: []string{"crazy-bunny", chartPath}, flags: []string{"--wait"}, resp: helm.ReleaseMock(&helm.MockReleaseOptions{Name: "crazy-bunny", Version: 2, Chart: ch2}), - expected: "Release \"crazy-bunny\" has been upgraded. Happy Helming!\n", + expected: "Release \"crazy-bunny\" has been upgraded.\n", rels: []*release.Release{helm.ReleaseMock(&helm.MockReleaseOptions{Name: "crazy-bunny", Version: 2, Chart: ch2})}, }, { @@ -160,7 +168,7 @@ func TestUpgradeCmd(t *testing.T) { args: []string{"crazy-bunny", chartPath}, flags: []string{"--description", "foo"}, resp: helm.ReleaseMock(&helm.MockReleaseOptions{Name: "crazy-bunny", Version: 2, Chart: ch2}), - expected: "Release \"crazy-bunny\" has been upgraded. Happy Helming!\n", + expected: "Release \"crazy-bunny\" has been upgraded.\n", rels: []*release.Release{helm.ReleaseMock(&helm.MockReleaseOptions{Name: "crazy-bunny", Version: 2, Chart: ch2, Description: "foo"})}, }, { diff --git a/cmd/helm/verify.go b/cmd/helm/verify.go index bbc8347c1..377f511f6 100644 --- a/cmd/helm/verify.go +++ b/cmd/helm/verify.go @@ -27,7 +27,7 @@ import ( const verifyDesc = ` Verify that the given chart has a valid provenance file. -Provenance files provide crytographic verification that a chart has not been +Provenance files provide cryptographic verification that a chart has not been tampered with, and was packaged by a trusted provider. This command can be used to verify a local chart. Several other commands provide @@ -47,7 +47,7 @@ func newVerifyCmd(out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "verify [flags] PATH", - Short: "verify that a chart at the given path has been signed and is valid", + Short: "Verify that a chart at the given path has been signed and is valid", Long: verifyDesc, RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { @@ -59,7 +59,7 @@ func newVerifyCmd(out io.Writer) *cobra.Command { } f := cmd.Flags() - f.StringVar(&vc.keyring, "keyring", defaultKeyring(), "keyring containing public keys") + f.StringVar(&vc.keyring, "keyring", defaultKeyring(), "Keyring containing public keys") return cmd } diff --git a/cmd/helm/version.go b/cmd/helm/version.go index a803a990b..c1a05e0c1 100644 --- a/cmd/helm/version.go +++ b/cmd/helm/version.go @@ -66,7 +66,7 @@ func newVersionCmd(c helm.Interface, out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "version", - Short: "print the client/server version information", + Short: "Print the client/server version information", Long: versionDesc, RunE: func(cmd *cobra.Command, args []string) error { // If neither is explicitly set, show both. @@ -78,10 +78,10 @@ func newVersionCmd(c helm.Interface, out io.Writer) *cobra.Command { } f := cmd.Flags() settings.AddFlagsTLS(f) - f.BoolVarP(&version.showClient, "client", "c", false, "client version only") - f.BoolVarP(&version.showServer, "server", "s", false, "server version only") - f.BoolVar(&version.short, "short", false, "print the version number") - f.StringVar(&version.template, "template", "", "template for version string format") + f.BoolVarP(&version.showClient, "client", "c", false, "Client version only") + f.BoolVarP(&version.showServer, "server", "s", false, "Server version only") + f.BoolVar(&version.short, "short", false, "Print the version number") + f.StringVar(&version.template, "template", "", "Template for version string format") // set defaults from environment settings.InitTLS(f) @@ -151,5 +151,5 @@ func formatVersion(v *pb.Version, short bool) string { if short && v.GitCommit != "" { return fmt.Sprintf("%s+g%s", v.SemVer, v.GitCommit[:7]) } - return fmt.Sprintf("%#v", v) + return fmt.Sprintf("&version.Version{SemVer:\"%s\", GitCommit:\"%s\", GitTreeState:\"%s\"}", v.SemVer, v.GitCommit, v.GitTreeState) } diff --git a/cmd/rudder/rudder.go b/cmd/rudder/rudder.go index 051640542..d68daf453 100644 --- a/cmd/rudder/rudder.go +++ b/cmd/rudder/rudder.go @@ -131,7 +131,13 @@ func (r *ReleaseModuleServiceServer) RollbackRelease(ctx context.Context, in *ru grpclog.Print("rollback") c := bytes.NewBufferString(in.Current.Manifest) t := bytes.NewBufferString(in.Target.Manifest) - err := kubeClient.Update(in.Target.Namespace, c, t, in.Force, in.Recreate, in.Timeout, in.Wait) + err := kubeClient.UpdateWithOptions(in.Target.Namespace, c, t, kube.UpdateOptions{ + Force: in.Force, + Recreate: in.Recreate, + Timeout: in.Timeout, + ShouldWait: in.Wait, + CleanupOnFail: in.CleanupOnFail, + }) return &rudderAPI.RollbackReleaseResponse{}, err } @@ -140,7 +146,13 @@ func (r *ReleaseModuleServiceServer) UpgradeRelease(ctx context.Context, in *rud grpclog.Print("upgrade") c := bytes.NewBufferString(in.Current.Manifest) t := bytes.NewBufferString(in.Target.Manifest) - err := kubeClient.Update(in.Target.Namespace, c, t, in.Force, in.Recreate, in.Timeout, in.Wait) + err := kubeClient.UpdateWithOptions(in.Target.Namespace, c, t, kube.UpdateOptions{ + Force: in.Force, + Recreate: in.Recreate, + Timeout: in.Timeout, + ShouldWait: in.Wait, + CleanupOnFail: in.CleanupOnFail, + }) // upgrade response object should be changed to include status return &rudderAPI.UpgradeReleaseResponse{}, err } diff --git a/cmd/tiller/tiller.go b/cmd/tiller/tiller.go index 478ca92f4..a2ef2764b 100644 --- a/cmd/tiller/tiller.go +++ b/cmd/tiller/tiller.go @@ -36,6 +36,7 @@ import ( "google.golang.org/grpc/health" healthpb "google.golang.org/grpc/health/grpc_health_v1" "google.golang.org/grpc/keepalive" + "k8s.io/klog" // Import to initialize client auth plugins. _ "k8s.io/client-go/plugin/pkg/client/auth" @@ -65,8 +66,8 @@ const ( storageMemory = "memory" storageConfigMap = "configmap" storageSecret = "secret" + storageSQL = "sql" - probeAddr = ":44135" traceAddr = ":44136" // defaultMaxHistory sets the maximum number of releases to 0: unlimited @@ -74,17 +75,23 @@ const ( ) var ( - grpcAddr = flag.String("listen", ":44134", "address:port to listen on") - enableTracing = flag.Bool("trace", false, "enable rpc tracing") - store = flag.String("storage", storageConfigMap, "storage driver to use. One of 'configmap', 'memory', or 'secret'") + grpcAddr = flag.String("listen", fmt.Sprintf(":%v", environment.DefaultTillerPort), "address:port to listen on") + probeAddr = flag.String("probe-listen", fmt.Sprintf(":%v", environment.DefaultTillerProbePort), "address:port to listen on for probes") + enableTracing = flag.Bool("trace", false, "enable rpc tracing") + store = flag.String("storage", storageConfigMap, "storage driver to use. One of 'configmap', 'memory', 'sql' or 'secret'") + + sqlDialect = flag.String("sql-dialect", "postgres", "SQL dialect to use (only postgres is supported for now") + sqlConnectionString = flag.String("sql-connection-string", "", "SQL connection string to use") + remoteReleaseModules = flag.Bool("experimental-release", false, "enable experimental release modules") - tlsEnable = flag.Bool("tls", tlsEnableEnvVarDefault(), "enable TLS") - tlsVerify = flag.Bool("tls-verify", tlsVerifyEnvVarDefault(), "enable TLS and verify remote certificate") - keyFile = flag.String("tls-key", tlsDefaultsFromEnv("tls-key"), "path to TLS private key file") - certFile = flag.String("tls-cert", tlsDefaultsFromEnv("tls-cert"), "path to TLS certificate file") - caCertFile = flag.String("tls-ca-cert", tlsDefaultsFromEnv("tls-ca-cert"), "trust certificates signed by this CA") - maxHistory = flag.Int("history-max", historyMaxFromEnv(), "maximum number of releases kept in release history, with 0 meaning no limit") - printVersion = flag.Bool("version", false, "print the version number") + + tlsEnable = flag.Bool("tls", tlsEnableEnvVarDefault(), "enable TLS") + tlsVerify = flag.Bool("tls-verify", tlsVerifyEnvVarDefault(), "enable TLS and verify remote certificate") + keyFile = flag.String("tls-key", tlsDefaultsFromEnv("tls-key"), "path to TLS private key file") + certFile = flag.String("tls-cert", tlsDefaultsFromEnv("tls-cert"), "path to TLS certificate file") + caCertFile = flag.String("tls-ca-cert", tlsDefaultsFromEnv("tls-ca-cert"), "trust certificates signed by this CA") + maxHistory = flag.Int("history-max", historyMaxFromEnv(), "maximum number of releases kept in release history, with 0 meaning no limit") + printVersion = flag.Bool("version", false, "print the version number") // rootServer is the root gRPC server. // @@ -100,6 +107,7 @@ var ( ) func main() { + klog.InitFlags(nil) // TODO: use spf13/cobra for tiller instead of flags flag.Parse() @@ -141,6 +149,18 @@ func start() { env.Releases = storage.Init(secrets) env.Releases.Log = newLogger("storage").Printf + case storageSQL: + sqlDriver, err := driver.NewSQL( + *sqlDialect, + *sqlConnectionString, + newLogger("storage/driver").Printf, + ) + if err != nil { + logger.Fatalf("Cannot initialize SQL storage driver: %v", err) + } + + env.Releases = storage.Init(sqlDriver) + env.Releases.Log = newLogger("storage").Printf } if *maxHistory > 0 { @@ -185,7 +205,7 @@ func start() { logger.Printf("Starting Tiller %s (tls=%t)", version.GetVersion(), *tlsEnable || *tlsVerify) logger.Printf("GRPC listening on %s", *grpcAddr) - logger.Printf("Probes listening on %s", probeAddr) + logger.Printf("Probes listening on %s", *probeAddr) logger.Printf("Storage driver is %s", env.Releases.Name()) logger.Printf("Max history per release is %d", *maxHistory) @@ -211,7 +231,7 @@ func start() { goprom.Register(rootServer) addPrometheusHandler(mux) - if err := http.ListenAndServe(probeAddr, mux); err != nil { + if err := http.ListenAndServe(*probeAddr, mux); err != nil { probeErrCh <- err } }() diff --git a/docs/chart_best_practices/conventions.md b/docs/chart_best_practices/conventions.md index 524928e25..c8bbd7d94 100644 --- a/docs/chart_best_practices/conventions.md +++ b/docs/chart_best_practices/conventions.md @@ -22,7 +22,7 @@ The directory that contains a chart MUST have the same name as the chart. Thus, ## Version Numbers -Wherever possible, Helm uses [SemVer 2](http://semver.org) to represent version numbers. (Note that Docker image tags do not necessarily follow SemVer, and are thus considered an unfortunate exception to the rule.) +Wherever possible, Helm uses [SemVer 2](https://semver.org) to represent version numbers. (Note that Docker image tags do not necessarily follow SemVer, and are thus considered an unfortunate exception to the rule.) When SemVer versions are stored in Kubernetes labels, we conventionally alter the `+` character to an `_` character, as labels do not allow the `+` sign as a value. diff --git a/docs/chart_best_practices/custom_resource_definitions.md b/docs/chart_best_practices/custom_resource_definitions.md index 96690dc9b..ee6fcf470 100644 --- a/docs/chart_best_practices/custom_resource_definitions.md +++ b/docs/chart_best_practices/custom_resource_definitions.md @@ -28,10 +28,10 @@ resources that use that CRD in _another_ chart. In this method, each chart must be installed separately. -### Method 2: Pre-install Hooks +### Method 2: Crd-install Hooks -To package the two together, add a `pre-install` hook to the CRD definition so +To package the two together, add a `crd-install` hook to the CRD definition so that it is fully installed before the rest of the chart is executed. -Note that if you create the CRD with a `pre-install` hook, that CRD definition +Note that if you create the CRD with a `crd-install` hook, that CRD definition will not be deleted when `helm delete` is run. diff --git a/docs/chart_best_practices/values.md b/docs/chart_best_practices/values.md index 28e3a3eac..bdc53e338 100644 --- a/docs/chart_best_practices/values.md +++ b/docs/chart_best_practices/values.md @@ -88,7 +88,7 @@ data is lost after one parse. ## Consider How Users Will Use Your Values -There are three potential sources of values: +There are four potential sources of values: - A chart's `values.yaml` file - A values file supplied by `helm install -f` or `helm upgrade -f` diff --git a/docs/chart_repository.md b/docs/chart_repository.md index 5291f65e4..c0b3d0609 100644 --- a/docs/chart_repository.md +++ b/docs/chart_repository.md @@ -123,6 +123,35 @@ startup. This part shows several ways to serve a chart repository. +### ChartMuseum + +The Helm project provides an open-source Helm repository server called [ChartMuseum](https://chartmuseum.com) that you can host yourself. + +ChartMuseum supports multiple cloud storage backends. Configure it to point to the directory or bucket containing your chart packages, and the index.yaml file will be generated dynamically. + +It can be deployed easily as a [Helm chart](https://github.com/helm/charts/tree/master/stable/chartmuseum): +``` +helm install stable/chartmuseum +``` + +and also as a [Docker image](https://hub.docker.com/r/chartmuseum/chartmuseum/tags): +``` +docker run --rm -it \ + -p 8080:8080 \ + -v $(pwd)/charts:/charts \ + -e DEBUG=true \ + -e STORAGE=local \ + -e STORAGE_LOCAL_ROOTDIR=/charts \ + chartmuseum/chartmuseum +``` + +You can then add the repo to your local repository list: +``` +helm repo add chartmuseum http://localhost:8080 +``` + +ChartMuseum provides other features, such as an API for chart uploads. Please see the [README](https://github.com/helm/chartmuseum) for more info. + ### Google Cloud Storage The first step is to **create your GCS bucket**. We'll call ours @@ -153,6 +182,10 @@ Charts repository hosts its charts, so you may want to take a You can also set up chart repositories using JFrog Artifactory. Read more about chart repositories with JFrog Artifactory [here](https://www.jfrog.com/confluence/display/RTF/Helm+Chart+Repositories) +### ProGet + +Helm chart repositories are supported by ProGet. For more information, visit the [Helm repository documentation](https://inedo.com/support/documentation/proget/feeds/helm) on the Inedo website. + ### Github Pages example In a similar way you can create charts repository using GitHub Pages. diff --git a/docs/chart_repository_sync_example.md b/docs/chart_repository_sync_example.md index 931275431..91215cb25 100644 --- a/docs/chart_repository_sync_example.md +++ b/docs/chart_repository_sync_example.md @@ -38,7 +38,7 @@ Building synchronization state... Starting synchronization Would copy file://fantastic-charts/alpine-0.1.0.tgz to gs://fantastic-charts/alpine-0.1.0.tgz Would copy file://fantastic-charts/index.yaml to gs://fantastic-charts/index.yaml -Are you sure you would like to continue with these changes?? [y/N]} y +Are you sure you would like to continue with these changes? [y/N]} y Building synchronization state... Starting synchronization Copying file://fantastic-charts/alpine-0.1.0.tgz [Content-Type=application/x-tar]... diff --git a/docs/chart_template_guide/accessing_files.md b/docs/chart_template_guide/accessing_files.md index 206ad0cec..46be8be2e 100644 --- a/docs/chart_template_guide/accessing_files.md +++ b/docs/chart_template_guide/accessing_files.md @@ -54,13 +54,13 @@ metadata: name: {{ .Release.Name }}-configmap data: {{- $files := .Files }} - {{- range tuple "config1.toml" "config2.toml" "config3.toml" }} + {{- range list "config1.toml" "config2.toml" "config3.toml" }} {{ . }}: |- {{ $files.Get . }} {{- end }} ``` -This config map uses several of the techniques discussed in previous sections. For example, we create a `$files` variable to hold a reference to the `.Files` object. We also use the `tuple` function to create a list of files that we loop through. Then we print each file name (`{{.}}: |-`) followed by the contents of the file `{{ $files.Get . }}`. +This config map uses several of the techniques discussed in previous sections. For example, we create a `$files` variable to hold a reference to the `.Files` object. We also use the `list` function to create a list of files that we loop through. Then we print each file name (`{{.}}: |-`) followed by the contents of the file `{{ $files.Get . }}`. Running this template will produce a single ConfigMap with the contents of all three files: @@ -129,6 +129,7 @@ You have multiple options with Globs: Or ```yaml +{{ $root := . }} {{ range $path, $bytes := .Files.Glob "foo/*" }} {{ base $path }}: '{{ $root.Files.Get $path | b64enc }}' {{ end }} diff --git a/docs/chart_template_guide/builtin_objects.md b/docs/chart_template_guide/builtin_objects.md index f7b2857bc..27088a674 100644 --- a/docs/chart_template_guide/builtin_objects.md +++ b/docs/chart_template_guide/builtin_objects.md @@ -1,6 +1,6 @@ # Built-in Objects -Objects are passed into a template from the template engine. And your code can pass objects around (we'll see examples when we look at the `with` and `range` statements). There are even a few ways to create new objects within your templates, like with the `tuple` function we'll see later. +Objects are passed into a template from the template engine. And your code can pass objects around (we'll see examples when we look at the `with` and `range` statements). There are even a few ways to create new objects within your templates, like with the `list` function we'll see later. Objects can be simple, and have just one value. Or they can contain other objects or functions. For example. the `Release` object contains several objects (like `Release.Name`) and the `Files` object has a few functions. @@ -22,7 +22,7 @@ In the previous section, we use `{{.Release.Name}}` to insert the name of a rele - `Files.GetBytes` is a function for getting the contents of a file as an array of bytes instead of as a string. This is useful for things like images. - `Capabilities`: This provides information about what capabilities the Kubernetes cluster supports. - `Capabilities.APIVersions` is a set of versions. - - `Capabilities.APIVersions.Has $version` indicates whether a version (`batch/v1`) is enabled on the cluster. + - `Capabilities.APIVersions.Has $version` indicates whether a version (e.g., `batch/v1`) or resource (e.g., `apps/v1/Deployment`) is available on the cluster. Note, resources were not available before Helm v2.15. - `Capabilities.KubeVersion` provides a way to look up the Kubernetes version. It has the following values: `Major`, `Minor`, `GitVersion`, `GitCommit`, `GitTreeState`, `BuildDate`, `GoVersion`, `Compiler`, and `Platform`. - `Capabilities.TillerVersion` provides a way to look up the Tiller version. It has the following values: `SemVer`, `GitCommit`, and `GitTreeState`. - `Template`: Contains information about the current template that is being executed diff --git a/docs/chart_template_guide/control_structures.md b/docs/chart_template_guide/control_structures.md index 61d9ef9e2..c9d2dd0bf 100644 --- a/docs/chart_template_guide/control_structures.md +++ b/docs/chart_template_guide/control_structures.md @@ -20,7 +20,7 @@ The first control structure we'll look at is for conditionally including blocks The basic structure for a conditional looks like this: -``` +```yaml {{ if PIPELINE }} # Do something {{ else if OTHER PIPELINE }} @@ -53,7 +53,7 @@ data: myvalue: "Hello World" drink: {{ .Values.favorite.drink | default "tea" | quote }} food: {{ .Values.favorite.food | upper | quote }} - {{ if and (.Values.favorite.drink) (eq .Values.favorite.drink "coffee") }}mug: true{{ end }} + {{ if and .Values.favorite.drink (eq .Values.favorite.drink "coffee") }}mug: true{{ end }} ``` Note that `.Values.favorite.drink` must be defined or else it will throw an error when comparing it to "coffee". Since we commented out `drink: coffee` in our last example, the output should not include a `mug: true` flag. But if we add that line back into our `values.yaml` file, the output should look like this: @@ -115,7 +115,7 @@ data: `mug` is incorrectly indented. Let's simply out-dent that one line, and re-run: -``` +```yaml apiVersion: v1 kind: ConfigMap metadata: @@ -224,7 +224,7 @@ The next control structure to look at is the `with` action. This controls variab The syntax for `with` is similar to a simple `if` statement: -``` +```yaml {{ with PIPELINE }} # restricted scope {{ end }} @@ -329,15 +329,15 @@ data: - "Onions" ``` -Now, in this example we've done something tricky. The `toppings: |-` line is declaring a multi-line string. So our list of toppings is actually not a YAML list. It's a big string. Why would we do this? Because the data in ConfigMaps `data` is composed of key/value pairs, where both the key and the value are simple strings. To understand why this is the case, take a look at the [Kubernetes ConfigMap docs](http://kubernetes.io/docs/user-guide/configmap/). For us, though, this detail doesn't matter much. +Now, in this example we've done something tricky. The `toppings: |-` line is declaring a multi-line string. So our list of toppings is actually not a YAML list. It's a big string. Why would we do this? Because the data in ConfigMaps `data` is composed of key/value pairs, where both the key and the value are simple strings. To understand why this is the case, take a look at the [Kubernetes ConfigMap docs](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/). For us, though, this detail doesn't matter much. > The `|-` marker in YAML takes a multi-line string. This can be a useful technique for embedding big blocks of data inside of your manifests, as exemplified here. -Sometimes it's useful to be able to quickly make a list inside of your template, and then iterate over that list. Helm templates have a function to make this easy: `tuple`. In computer science, a tuple is a list-like collection of fixed size, but with arbitrary data types. This roughly conveys the way a `tuple` is used. +Sometimes it's useful to be able to quickly make a list inside of your template, and then iterate over that list. Helm templates have a function that's called just that: `list`. ```yaml sizes: |- - {{- range tuple "small" "medium" "large" }} + {{- range list "small" "medium" "large" }} - {{ . }} {{- end }} ``` @@ -351,4 +351,4 @@ The above will produce this: - large ``` -In addition to lists and tuples, `range` can be used to iterate over collections that have a key and a value (like a `map` or `dict`). We'll see how to do that in the next section when we introduce template variables. +In addition to lists, `range` can be used to iterate over collections that have a key and a value (like a `map` or `dict`). We'll see how to do that in the next section when we introduce template variables. diff --git a/docs/chart_template_guide/debugging.md b/docs/chart_template_guide/debugging.md index fac788cc4..23a6ae70b 100644 --- a/docs/chart_template_guide/debugging.md +++ b/docs/chart_template_guide/debugging.md @@ -12,7 +12,7 @@ When your YAML is failing to parse, but you want to see what is generated, one easy way to retrieve the YAML is to comment out the problem section in the template, and then re-run `helm install --dry-run --debug`: -```YAML +```yaml apiVersion: v1 # some: problem section # {{ .Values.foo | quote }} @@ -20,7 +20,7 @@ apiVersion: v1 The above will be rendered and returned with the comments intact: -```YAML +```yaml apiVersion: v1 # some: problem section # "bar" diff --git a/docs/chart_template_guide/functions_and_pipelines.md b/docs/chart_template_guide/functions_and_pipelines.md index 66176fc59..bbce53e71 100644 --- a/docs/chart_template_guide/functions_and_pipelines.md +++ b/docs/chart_template_guide/functions_and_pipelines.md @@ -4,7 +4,7 @@ So far, we've seen how to place information into a template. But that informatio Let's start with a best practice: When injecting strings from the `.Values` object into the template, we ought to quote these strings. We can do that by calling the `quote` function in the template directive: -``` +```yaml apiVersion: v1 kind: ConfigMap metadata: @@ -104,7 +104,7 @@ drink: {{ .Values.favorite.drink | default "tea" | quote }} If we run this as normal, we'll get our `coffee`: -``` +```yaml # Source: mychart/templates/configmap.yaml apiVersion: v1 kind: ConfigMap @@ -150,6 +150,19 @@ Template functions and pipelines are a powerful way to transform information and ## Operators are functions -For templates, the operators (`eq`, `ne`, `lt`, `gt`, `and`, `or` and so on) are all implemented as functions. In pipelines, operations can be grouped with parentheses (`(`, and `)`). +Operators are implemented as functions that return a boolean value. To use `eq`, `ne`, `lt`, `gt`, `and`, `or`, `not` etcetera place the operator at the front of the statement followed by its parameters just as you would a function. To chain multiple operations together, separate individual functions by surrounding them with parentheses. + +```yaml +{{/* include the body of this if statement when the variable .Values.fooString exists and is set to "foo" */}} +{{ if and .Values.fooString (eq .Values.fooString "foo") }} + {{ ... }} +{{ end }} + + +{{/* include the body of this if statement when the variable .Values.anUnsetVariable is set or .values.aSetVariable is not set */}} +{{ if or .Values.anUnsetVariable (not .Values.aSetVariable) }} + {{ ... }} +{{ end }} +``` Now we can turn from functions and pipelines to flow control with conditions, loops, and scope modifiers. diff --git a/docs/chart_template_guide/getting_started.md b/docs/chart_template_guide/getting_started.md index 87ae5fa3c..c6daec1c6 100644 --- a/docs/chart_template_guide/getting_started.md +++ b/docs/chart_template_guide/getting_started.md @@ -51,8 +51,8 @@ already there. - `NOTES.txt`: The "help text" for your chart. This will be displayed to your users when they run `helm install`. -- `deployment.yaml`: A basic manifest for creating a Kubernetes [deployment](http://kubernetes.io/docs/user-guide/deployments/) -- `service.yaml`: A basic manifest for creating a [service endpoint](http://kubernetes.io/docs/user-guide/services/) for your deployment +- `deployment.yaml`: A basic manifest for creating a Kubernetes [deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) +- `service.yaml`: A basic manifest for creating a [service endpoint](https://kubernetes.io/docs/concepts/services-networking/service/) for your deployment - `_helpers.tpl`: A place to put template helpers that you can re-use throughout the chart And what we're going to do is... _remove them all!_ That way we can work through our tutorial from scratch. We'll actually create our own `NOTES.txt` and `_helpers.tpl` as we go. @@ -187,10 +187,10 @@ instead of `mychart-configmap`. You can run `helm get manifest clunky-serval` to see the entire generated YAML. -At this point, we've seen templates at their most basic: YAML files that have template directives embedded in `{{` and `}}`. In the next part, we'll take a deeper look into templates. But before moving on, there's one quick trick that can make building templates faster: When you want to test the template rendering, but not actually install anything, you can use `helm install --debug --dry-run ./mychart`. This will send the chart to the Tiller server, which will render the templates. But instead of installing the chart, it will return the rendered template to you so you can see the output: +At this point, we've seen templates at their most basic: YAML files that have template directives embedded in `{{` and `}}`. In the next part, we'll take a deeper look into templates. But before moving on, there's one quick trick that can make building templates faster: When you want to test the template rendering, but not actually install anything, you can use `helm install ./mychart --debug --dry-run`. This will send the chart to the Tiller server, which will render the templates. But instead of installing the chart, it will return the rendered template to you so you can see the output: ```console -$ helm install --debug --dry-run ./mychart +$ helm install ./mychart --debug --dry-run SERVER: "localhost:44134" CHART PATH: /Users/mattbutcher/Code/Go/src/k8s.io/helm/_scratch/mychart NAME: goodly-guppy diff --git a/docs/chart_template_guide/helm_ignore_file.md b/docs/chart_template_guide/helm_ignore_file.md index 6793bdfec..5980f439c 100644 --- a/docs/chart_template_guide/helm_ignore_file.md +++ b/docs/chart_template_guide/helm_ignore_file.md @@ -4,7 +4,7 @@ The `.helmignore` file is used to specify files you don't want to include in you If this file exists, the `helm package` command will ignore all the files that match the pattern specified in the `.helmignore` file while packaging your application. -This can help in avoiding unncessary or sensitive files or directories from being added in your helm chart. +This can help in avoiding unnecessary or sensitive files or directories from being added in your helm chart. The `.helmignore` file supports Unix shell glob matching, relative path matching, and negation (prefixed with !). Only one pattern per line is considered. diff --git a/docs/chart_template_guide/named_templates.md b/docs/chart_template_guide/named_templates.md index 08e41d58d..630b98c34 100644 --- a/docs/chart_template_guide/named_templates.md +++ b/docs/chart_template_guide/named_templates.md @@ -215,7 +215,7 @@ data: myvalue: "Hello World" drink: "coffee" food: "pizza" - app_name: mychart +app_name: mychart app_version: "0.1.0+1478129847" ``` diff --git a/docs/chart_template_guide/notes_files.md b/docs/chart_template_guide/notes_files.md index 5a8b78ca4..670f78fc2 100644 --- a/docs/chart_template_guide/notes_files.md +++ b/docs/chart_template_guide/notes_files.md @@ -1,6 +1,6 @@ # Creating a NOTES.txt File -In this section we are going to look at Helm's tool for providing instructions to your chart users. At the end of a `chart install` or `chart upgrade`, Helm can print out a block of helpful information for users. This information is highly customizable using templates. +In this section we are going to look at Helm's tool for providing instructions to your chart users. At the end of a `helm install` or `helm upgrade`, Helm can print out a block of helpful information for users. This information is highly customizable using templates. To add installation notes to your chart, simply create a `templates/NOTES.txt` file. This file is plain text, but it is processed like as a template, and has all the normal template functions and objects available. diff --git a/docs/chart_template_guide/subcharts_and_globals.md b/docs/chart_template_guide/subcharts_and_globals.md index 1954df39a..b37a82bca 100644 --- a/docs/chart_template_guide/subcharts_and_globals.md +++ b/docs/chart_template_guide/subcharts_and_globals.md @@ -63,7 +63,7 @@ data: dessert: cake ``` -## Overriding Values from a Parent Chart +## Overriding Values of a Child Chart Our original chart, `mychart` is now the _parent_ chart of `mysubchart`. This relationship is based entirely on the fact that `mysubchart` is within `mychart/charts`. diff --git a/docs/chart_template_guide/values_files.md b/docs/chart_template_guide/values_files.md index a15047667..a6b179970 100644 --- a/docs/chart_template_guide/values_files.md +++ b/docs/chart_template_guide/values_files.md @@ -1,6 +1,6 @@ # Values Files -In the previous section we looked at the built-in objects that Helm templates offer. One of the four built-in objects is `Values`. This object provides access to values passed into the chart. Its contents come from four sources: +In the previous section we looked at the built-in objects that Helm templates offer. One of these built-in objects is `Values`. This object provides access to values passed into the chart. Its contents come from four sources: - The `values.yaml` file in the chart - If this is a subchart, the `values.yaml` file of a parent chart @@ -29,7 +29,7 @@ data: drink: {{ .Values.favoriteDrink }} ``` -Notice on the last line we access `favoriteDrink` as an attribute of `Values`: `{{ .Values.favoriteDrink}}`. +Notice on the last line we access `favoriteDrink` as an attribute of `Values`: `{{ .Values.favoriteDrink }}`. Let's see how this renders. @@ -54,7 +54,7 @@ data: Because `favoriteDrink` is set in the default `values.yaml` file to `coffee`, that's the value displayed in the template. We can easily override that by adding a `--set` flag in our call to `helm install`: -``` +```console helm install --dry-run --debug --set favoriteDrink=slurm ./mychart SERVER: "localhost:44134" CHART PATH: /Users/mattbutcher/Code/Go/src/k8s.io/helm/_scratch/mychart @@ -85,7 +85,7 @@ favorite: Now we would have to modify the template slightly: -``` +```yaml apiVersion: v1 kind: ConfigMap metadata: diff --git a/docs/chart_template_guide/variables.md b/docs/chart_template_guide/variables.md index d924fe2cf..65f754faf 100644 --- a/docs/chart_template_guide/variables.md +++ b/docs/chart_template_guide/variables.md @@ -98,10 +98,7 @@ data: Variables are normally not "global". They are scoped to the block in which they are declared. Earlier, we assigned `$relname` in the top level of the template. That variable will be in scope for the entire template. But in our last example, `$key` and `$val` will only be in scope inside of the `{{range...}}{{end}}` block. -However, there is one variable that is always global - `$` - this -variable will always point to the root context. This can be very -useful when you are looping in a range need to know the chart's release -name. +However, there is one variable that is always global - `$` - this variable will always point to the root context. This can be very useful when you are looping in a range and need to know the chart's release name. An example illustrating this: ```yaml @@ -111,12 +108,14 @@ kind: Secret metadata: name: {{ .name }} labels: - # Many helm templates would use `.` below, but that will not work, - # however `$` will work here + # Many helm templates would use `.` below, but that will not work, + # however `$` will work here app.kubernetes.io/name: {{ template "fullname" $ }} # I cannot reference .Chart.Name, but I can do $.Chart.Name helm.sh/chart: "{{ $.Chart.Name }}-{{ $.Chart.Version }}" app.kubernetes.io/instance: "{{ $.Release.Name }}" + # Value from appVersion in Chart.yaml + app.kubernetes.io/version: "{{ $.Chart.AppVersion }}" app.kubernetes.io/managed-by: "{{ $.Release.Service }}" type: kubernetes.io/tls data: diff --git a/docs/chart_template_guide/wrapping_up.md b/docs/chart_template_guide/wrapping_up.md index ea5dc1183..c7ecad475 100755 --- a/docs/chart_template_guide/wrapping_up.md +++ b/docs/chart_template_guide/wrapping_up.md @@ -5,7 +5,7 @@ This guide is intended to give you, the chart developer, a strong understanding But there are many things this guide has not covered when it comes to the practical day-to-day development of charts. Here are some useful pointers to other documentation that will help you as you create new charts: - The [Helm Charts project](https://github.com/helm/charts) is an indispensable source of charts. That project is also sets the standard for best practices in chart development. -- The Kubernetes [User's Guide](http://kubernetes.io/docs/user-guide/) provides detailed examples of the various resource kinds that you can use, from ConfigMaps and Secrets to DaemonSets and Deployments. +- The Kubernetes [Documentation](https://kubernetes.io/docs/home/) provides detailed examples of the various resource kinds that you can use, from ConfigMaps and Secrets to DaemonSets and Deployments. - The Helm [Charts Guide](../charts.md) explains the workflow of using charts. - The Helm [Chart Hooks Guide](../charts_hooks.md) explains how to create lifecycle hooks. - The Helm [Charts Tips and Tricks](../charts_tips_and_tricks.md) article provides some useful tips for writing charts. diff --git a/docs/chart_template_guide/yaml_techniques.md b/docs/chart_template_guide/yaml_techniques.md index 44c41f903..0a84e4a25 100644 --- a/docs/chart_template_guide/yaml_techniques.md +++ b/docs/chart_template_guide/yaml_techniques.md @@ -7,7 +7,7 @@ to read. ## Scalars and Collections -According to the [YAML spec](http://yaml.org/spec/1.2/spec.html), there are two +According to the [YAML spec](https://yaml.org/spec/1.2/spec.html), there are two types of collections, and many scalar types. The two types of collections are maps and sequences: @@ -177,7 +177,7 @@ Now the value of `coffee` will be `Latte\nCappuccino\nEspresso\n\n\n`. Indentation inside of a text block is preserved, and results in the preservation of line breaks, too: -``` +```yaml coffee: |- Latte 12 oz @@ -336,7 +336,7 @@ reference is expanded and then discarded. So if we were to decode and then re-encode the example above, the resulting YAML would be: -```YAML +```yaml coffee: yes, please favorite: Cappucino coffees: diff --git a/docs/chart_tests.md b/docs/chart_tests.md index 300eeaf73..408656c4a 100644 --- a/docs/chart_tests.md +++ b/docs/chart_tests.md @@ -64,7 +64,7 @@ spec: ``` ## Steps to Run a Test Suite on a Release -1. `$ helm install wordpress` +1. `$ helm install stable/wordpress` ``` NAME: quirky-walrus LAST DEPLOYED: Mon Feb 13 13:50:43 2017 diff --git a/docs/charts.md b/docs/charts.md index 7bc4f0020..0936a720f 100644 --- a/docs/charts.md +++ b/docs/charts.md @@ -71,7 +71,7 @@ Other fields will be silently ignored. ### Charts and Versioning Every chart must have a version number. A version must follow the -[SemVer 2](http://semver.org/) standard. Unlike Helm Classic, Kubernetes +[SemVer 2](https://semver.org/) standard. Unlike Helm Classic, Kubernetes Helm uses version numbers as release markers. Packages in repositories are identified by name plus version. @@ -191,7 +191,7 @@ Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "stable" chart repository ...Successfully got an update from the "example" chart repository ...Successfully got an update from the "another" chart repository -Update Complete. Happy Helming! +Update Complete. Saving 2 charts Downloading apache from repo http://example.com/charts Downloading mysql from repo http://another.example.com/charts @@ -258,9 +258,11 @@ All charts are loaded by default. If `tags` or `condition` fields are present, they will be evaluated and used to control loading for the chart(s) they are applied to. Condition - The condition field holds one or more YAML paths (delimited by commas). -If this path exists in the top parent's values and resolves to a boolean value, -the chart will be enabled or disabled based on that boolean value. Only the first -valid path found in the list is evaluated and if no paths exist then the condition has no effect. +If this path exists in the parent's values and resolves to a boolean value, +the chart will be enabled or disabled based on that boolean value. Only the first +valid path found in the list is evaluated and if no paths exist then the condition +has no effect. For multiple level dependencies the condition is prependend by the +path to the parent chart. Tags - The tags field is a YAML list of labels to associate with this chart. In the top parent's values, all charts with tags can be enabled or disabled by @@ -272,7 +274,7 @@ dependencies: - name: subchart1 repository: http://localhost:10191 version: 0.1.0 - condition: subchart1.enabled,global.subchart1.enabled + condition: subchart1.enabled tags: - front-end - subchart1 @@ -280,11 +282,19 @@ dependencies: - name: subchart2 repository: http://localhost:10191 version: 0.1.0 - condition: subchart2.enabled,global.subchart2.enabled + condition: subchart2.enabled tags: - back-end - subchart2 +``` +```yaml +# subchart2/requirements.yaml +dependencies: + - name: subsubchart + repository: http://localhost:10191 + version: 0.1.0 + condition: subsubchart.enabled ``` ```yaml @@ -292,6 +302,9 @@ dependencies: subchart1: enabled: true +subchart2: + subsubchart: + enabled: false tags: front-end: false back-end: true @@ -302,9 +315,12 @@ In the above example all charts with the tag `front-end` would be disabled but s `front-end` tag and `subchart1` will be enabled. Since `subchart2` is tagged with `back-end` and that tag evaluates to `true`, `subchart2` will be -enabled. Also notes that although `subchart2` has a condition specified in `requirements.yaml`, there +enabled. Also note that although `subchart2` has a condition specified in `requirements.yaml`, there is no corresponding path and value in the parent's values so that condition has no effect. +`subsubchart` is disabled by default but can be enabled by setting `subchart2.subsubchart.enabled=true`. +Hint: disabling `subchart2` via tag will also disable all sub-charts (even if overriding the value `subchart2.subsubchart.enabled=true`). + ##### Using the CLI with Tags and Conditions The `--set` parameter can be used as usual to alter tag and condition values. @@ -479,7 +495,7 @@ Furthermore, A is dependent on chart B that creates objects - replicaset "B-ReplicaSet" - service "B-Service" -After installation/upgrade of chart A a single Helm release is created/modified. The release will +After installation/upgrade of chart A, a single Helm release is created/modified. The release will create/update all of the above Kubernetes objects in the following order: - A-Namespace @@ -492,7 +508,7 @@ create/update all of the above Kubernetes objects in the following order: This is because when Helm installs/upgrades charts, the Kubernetes objects from the charts and all its dependencies are -- aggregrated into a single set; then +- aggregated into a single set; then - sorted by type followed by name; and then - created/updated in that order. @@ -792,7 +808,7 @@ standard references that will help you out. - [Go templates](https://godoc.org/text/template) - [Extra template functions](https://godoc.org/github.com/Masterminds/sprig) -- [The YAML format](http://yaml.org/spec/) +- [The YAML format](https://yaml.org/spec/) ## Using Helm to Manage Charts diff --git a/docs/charts_hooks.md b/docs/charts_hooks.md index 59c9c91a2..c7fa83124 100644 --- a/docs/charts_hooks.md +++ b/docs/charts_hooks.md @@ -49,6 +49,10 @@ The following hooks are defined: have been modified. - crd-install: Adds CRD resources before any other checks are run. This is used only on CRD definitions that are used by other manifests in the chart. +- test-success: Executes when running `helm test` and expects the pod to + return successfully (return code == 0). +- test-failure: Executes when running `helm test` and expects the pod to + fail (return code != 0). ## Hooks and the Release Lifecycle @@ -76,7 +80,7 @@ hooks, the lifecycle is altered like this: 5. Tiller sorts hooks by weight (assigning a weight of 0 by default) and by name for those hooks with the same weight in ascending order. 6. Tiller then loads the hook with the lowest weight first (negative to positive) 7. Tiller waits until the hook is "Ready" (except for CRDs) -8. Tiller loads the resulting resources into Kubernetes. Note that if the `--wait` +8. Tiller loads the resulting resources into Kubernetes. Note that if the `--wait` flag is set, Tiller will wait until all resources are in a ready state and will not run the `post-install` hook until they are ready. 9. Tiller executes the `post-install` hook (loading hook resources) @@ -129,6 +133,7 @@ metadata: labels: app.kubernetes.io/managed-by: {{.Release.Service | quote }} app.kubernetes.io/instance: {{.Release.Name | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} helm.sh/chart: "{{.Chart.Name}}-{{.Chart.Version}}" annotations: # This is what defines this resource as a hook. Without this line, the @@ -198,6 +203,10 @@ You can choose one or more defined annotation values: * `"hook-failed"` specifies Tiller should delete the hook if the hook failed during execution. * `"before-hook-creation"` specifies Tiller should delete the previous hook before the new hook is launched. +By default Tiller will wait for 60 seconds for a deleted hook to no longer exist in the API server before timing out. This +behavior can be changed using the `helm.sh/hook-delete-timeout` annotation. The value is the number of seconds Tiller +should wait for the hook to be fully deleted. A value of 0 means Tiller does not wait at all. + ### Defining a CRD with the `crd-install` Hook Custom Resource Definitions (CRDs) are a special kind in Kubernetes. They provide @@ -246,12 +255,10 @@ annotated. ### Automatically delete hook from previous release -When helm release being updated it is possible, that hook resource already exists in cluster. By default helm will try to create resource and fail with `"... already exists"` error. +When a helm release, that uses a hook, is being updated, it is possible that the hook resource might already exist in the cluster. In such circumstances, by default, helm will fail trying to install the hook resource with an `"... already exists"` error. + +A common reason why the hook resource might already exist is that it was not deleted following use on a previous install/upgrade. There are, in fact, good reasons why one might want to keep the hook: for example, to aid manual debugging in case something went wrong. In this case, the recommended way of ensuring subsequent attempts to create the hook do not fail is to define a `"hook-delete-policy"` that can handle this: `"helm.sh/hook-delete-policy": "before-hook-creation"`. This hook annotation causes any existing hook to be removed, before the new hook is installed. -One might choose `"helm.sh/hook-delete-policy": "before-hook-creation"` over `"helm.sh/hook-delete-policy": "hook-succeeded,hook-failed"` because: +If it is preferred to actually delete the hook after each use (rather than have to handle it on a subsequent use, as shown above), then this can be achieved using a delete policy of `"helm.sh/hook-delete-policy": "hook-succeeded,hook-failed"`. -* It is convenient to keep failed hook job resource in kubernetes for example for manual debug. -* It may be necessary to keep succeeded hook resource in kubernetes for some reason. -* At the same time it is not desirable to do manual resource deletion before helm release upgrade. -`"helm.sh/hook-delete-policy": "before-hook-creation"` annotation on hook causes tiller to remove the hook from previous release if there is one before the new hook is launched and can be used with another policy. diff --git a/docs/charts_tips_and_tricks.md b/docs/charts_tips_and_tricks.md index e948d3bcf..fd7a08f7e 100644 --- a/docs/charts_tips_and_tricks.md +++ b/docs/charts_tips_and_tricks.md @@ -36,6 +36,12 @@ is required, and will print an error message when that entry is missing: value: {{ required "A valid .Values.who entry required!" .Values.who }} ``` +When using the `include` function, you can pass it a custom object tree built from the current context by using the `dict` function: + +```yaml +{{- include "mytpl" (dict "key1" .Values.originalKey1 "key2" .Values.originalKey2) }} +``` + ## Quote Strings, Don't Quote Integers When you are working with string data, you are always safer quoting the @@ -229,6 +235,9 @@ orphaned. Helm will no longer manage it in any way. This can lead to problems if using `helm install --replace` on a release that has already been deleted, but has kept resources. +To explicitly opt in to resource deletion, for example when overriding a chart's +default annotations, set the resource policy annotation value to `delete`. + ## Using "Partials" and Template Includes Sometimes you want to create some reusable parts in your chart, whether @@ -255,9 +264,9 @@ embed each of the components. Two strong design patterns are illustrated by these projects: -**SAP's [OpenStack chart](https://github.com/sapcc/openstack-helm):** This chart -installs a full OpenStack IaaS on Kubernetes. All of the charts are collected -together in one GitHub repository. +**SAP's [Converged charts](https://github.com/sapcc/helm-charts):** These charts +install SAP Converged Cloud a full OpenStack IaaS on Kubernetes. All of the charts are collected +together in one GitHub repository, except for a few submodules. **Deis's [Workflow](https://github.com/deis/workflow/tree/master/charts/workflow):** This chart exposes the entire Deis PaaS system with one chart. But it's different @@ -275,7 +284,7 @@ According to the YAML specification, YAML is a superset of JSON. That means that any valid JSON structure ought to be valid in YAML. This has an advantage: Sometimes template developers may find it easier -to express a datastructure with a JSON-like syntax rather than deal with +to express a data structure with a JSON-like syntax rather than deal with YAML's whitespace sensitivity. As a best practice, templates should follow a YAML-like syntax _unless_ diff --git a/docs/developers.md b/docs/developers.md index 4edc4bea1..32adbf823 100644 --- a/docs/developers.md +++ b/docs/developers.md @@ -32,7 +32,7 @@ docker-test`. To run Helm and Tiller locally, you can run `bin/helm` or `bin/tiller`. -- Helm and Tiller are known to run on macOS and most Linuxes, including +- Helm and Tiller are known to run on macOS and most Linux distributions, including Alpine. - Tiller must have access to a Kubernetes cluster. It learns about the cluster by examining the Kube config files that `kubectl` uses. @@ -103,7 +103,7 @@ helm init --canary-image For developing on Tiller, it is sometimes more expedient to run Tiller locally instead of packaging it into an image and running it in-cluster. You can do -this by telling the Helm client to us a local instance. +this by telling the Helm client to use a local instance. ```console $ make build @@ -170,7 +170,7 @@ workflow for doing this is as follows: 5. When you are ready for us to review, sign your commit, push your branch to GitHub, and then open a new pull request with us. -For Git commit messages, we follow the [Semantic Commit Messages](http://karma-runner.github.io/0.13/dev/git-commit-msg.html): +For Git commit messages, we follow the [Semantic Commit Messages](https://karma-runner.github.io/0.13/dev/git-commit-msg.html): ``` fix(helm): add --foo flag to 'helm install' @@ -201,7 +201,7 @@ Common scopes: Read more: - The [Deis Guidelines](https://github.com/deis/workflow/blob/master/src/contributing/submitting-a-pull-request.md) were the inspiration for this section. -- Karma Runner [defines](http://karma-runner.github.io/0.13/dev/git-commit-msg.html) the semantic commit message idea. +- Karma Runner [defines](https://karma-runner.github.io/0.13/dev/git-commit-msg.html) the semantic commit message idea. ### Go Conventions diff --git a/docs/examples/alpine/templates/alpine-pod.yaml b/docs/examples/alpine/templates/alpine-pod.yaml index 1fc299b6d..0f48e4059 100644 --- a/docs/examples/alpine/templates/alpine-pod.yaml +++ b/docs/examples/alpine/templates/alpine-pod.yaml @@ -10,6 +10,7 @@ metadata: # The "app.kubernetes.io/instance" convention makes it easy to tie a release to all of the # Kubernetes resources that were created as part of that release. app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} # This makes it easy to audit chart usage. helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} app.kubernetes.io/name: {{ template "alpine.name" . }} diff --git a/docs/examples/alpine/values.yaml b/docs/examples/alpine/values.yaml index afe8cc6c0..225e0472a 100644 --- a/docs/examples/alpine/values.yaml +++ b/docs/examples/alpine/values.yaml @@ -1,6 +1,6 @@ image: repository: alpine - tag: 3.3 + tag: latest pullPolicy: IfNotPresent restartPolicy: Never diff --git a/docs/examples/nginx/templates/deployment.yaml b/docs/examples/nginx/templates/deployment.yaml index 5bb30f9af..59b94a8aa 100644 --- a/docs/examples/nginx/templates/deployment.yaml +++ b/docs/examples/nginx/templates/deployment.yaml @@ -1,4 +1,4 @@ -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 kind: Deployment metadata: # This uses a "fullname" template (see _helpers) @@ -19,6 +19,10 @@ metadata: app.kubernetes.io/name: {{ template "nginx.name" . }} spec: replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app.kubernetes.io/name: {{ template "nginx.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} template: metadata: {{- if .Values.podAnnotations }} diff --git a/docs/examples/nginx/templates/post-install-job.yaml b/docs/examples/nginx/templates/post-install-job.yaml index 6e32086ab..3562e6cf5 100644 --- a/docs/examples/nginx/templates/post-install-job.yaml +++ b/docs/examples/nginx/templates/post-install-job.yaml @@ -32,6 +32,6 @@ spec: restartPolicy: {{ .Values.restartPolicy }} containers: - name: post-install-job - image: "alpine:3.3" + image: "alpine:latest" # All we're going to do is sleep for a while, then exit. command: ["/bin/sleep", "{{ .Values.sleepyTime }}"] diff --git a/docs/examples/nginx/values.yaml b/docs/examples/nginx/values.yaml index b40208cce..36f2505af 100644 --- a/docs/examples/nginx/values.yaml +++ b/docs/examples/nginx/values.yaml @@ -14,7 +14,7 @@ index: >- image: repository: nginx - tag: 1.11.0 + tag: alpine pullPolicy: IfNotPresent service: diff --git a/docs/glossary.md b/docs/glossary.md index 875807268..02a5c125f 100644 --- a/docs/glossary.md +++ b/docs/glossary.md @@ -37,7 +37,7 @@ are bundled with it. ## Chart Version Charts are versioned according to the [SemVer 2 -spec](http://semver.org). A version number is required on every chart. +spec](https://semver.org). A version number is required on every chart. ## Chart.yaml diff --git a/docs/helm/helm.md b/docs/helm/helm.md index 177be7e88..ef9b729ab 100644 --- a/docs/helm/helm.md +++ b/docs/helm/helm.md @@ -15,67 +15,68 @@ It will also set up any necessary local configuration. Common actions from this point include: -- helm search: search for charts -- helm fetch: download a chart to your local directory to view -- helm install: upload the chart to Kubernetes -- helm list: list releases of charts +- helm search: Search for charts +- helm fetch: Download a chart to your local directory to view +- helm install: Upload the chart to Kubernetes +- helm list: List releases of charts Environment: - $HELM_HOME set an alternative location for Helm files. By default, these are stored in ~/.helm - $HELM_HOST set an alternative Tiller host. The format is host:port - $HELM_NO_PLUGINS disable plugins. Set HELM_NO_PLUGINS=1 to disable plugins. - $TILLER_NAMESPACE set an alternative Tiller namespace (default "kube-system") - $KUBECONFIG set an alternative Kubernetes configuration file (default "~/.kube/config") - $HELM_TLS_CA_CERT path to TLS CA certificate used to verify the Helm client and Tiller server certificates (default "$HELM_HOME/ca.pem") - $HELM_TLS_CERT path to TLS client certificate file for authenticating to Tiller (default "$HELM_HOME/cert.pem") - $HELM_TLS_KEY path to TLS client key file for authenticating to Tiller (default "$HELM_HOME/key.pem") - $HELM_TLS_VERIFY enable TLS connection between Helm and Tiller and verify Tiller server certificate (default "false") - $HELM_TLS_ENABLE enable TLS connection between Helm and Tiller (default "false") - $HELM_KEY_PASSPHRASE set HELM_KEY_PASSPHRASE to the passphrase of your PGP private key. If set, you will not be prompted for - the passphrase while signing helm charts + +- $HELM_HOME: Set an alternative location for Helm files. By default, these are stored in ~/.helm +- $HELM_HOST: Set an alternative Tiller host. The format is host:port +- $HELM_NO_PLUGINS: Disable plugins. Set HELM_NO_PLUGINS=1 to disable plugins. +- $TILLER_NAMESPACE: Set an alternative Tiller namespace (default "kube-system") +- $KUBECONFIG: Set an alternative Kubernetes configuration file (default "~/.kube/config") +- $HELM_TLS_CA_CERT: Path to TLS CA certificate used to verify the Helm client and Tiller server certificates (default "$HELM_HOME/ca.pem") +- $HELM_TLS_CERT: Path to TLS client certificate file for authenticating to Tiller (default "$HELM_HOME/cert.pem") +- $HELM_TLS_KEY: Path to TLS client key file for authenticating to Tiller (default "$HELM_HOME/key.pem") +- $HELM_TLS_ENABLE: Enable TLS connection between Helm and Tiller (default "false") +- $HELM_TLS_VERIFY: Enable TLS connection between Helm and Tiller and verify Tiller server certificate (default "false") +- $HELM_TLS_HOSTNAME: The hostname or IP address used to verify the Tiller server certificate (default "127.0.0.1") +- $HELM_KEY_PASSPHRASE: Set HELM_KEY_PASSPHRASE to the passphrase of your PGP private key. If set, you will not be prompted for the passphrase while signing helm charts ### Options ``` - --debug enable verbose output + --debug Enable verbose output -h, --help help for helm - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO * [helm completion](helm_completion.md) - Generate autocompletions script for the specified shell (bash or zsh) -* [helm create](helm_create.md) - create a new chart with the given name -* [helm delete](helm_delete.md) - given a release name, delete the release from Kubernetes -* [helm dependency](helm_dependency.md) - manage a chart's dependencies -* [helm fetch](helm_fetch.md) - download a chart from a repository and (optionally) unpack it in local directory -* [helm get](helm_get.md) - download a named release -* [helm history](helm_history.md) - fetch release history -* [helm home](helm_home.md) - displays the location of HELM_HOME -* [helm init](helm_init.md) - initialize Helm on both client and server -* [helm inspect](helm_inspect.md) - inspect a chart -* [helm install](helm_install.md) - install a chart archive -* [helm lint](helm_lint.md) - examines a chart for possible issues -* [helm list](helm_list.md) - list releases -* [helm package](helm_package.md) - package a chart directory into a chart archive -* [helm plugin](helm_plugin.md) - add, list, or remove Helm plugins -* [helm repo](helm_repo.md) - add, list, remove, update, and index chart repositories -* [helm reset](helm_reset.md) - uninstalls Tiller from a cluster -* [helm rollback](helm_rollback.md) - roll back a release to a previous revision -* [helm search](helm_search.md) - search for a keyword in charts -* [helm serve](helm_serve.md) - start a local http web server -* [helm status](helm_status.md) - displays the status of the named release -* [helm template](helm_template.md) - locally render templates -* [helm test](helm_test.md) - test a release -* [helm upgrade](helm_upgrade.md) - upgrade a release -* [helm verify](helm_verify.md) - verify that a chart at the given path has been signed and is valid -* [helm version](helm_version.md) - print the client/server version information - -###### Auto generated by spf13/cobra on 16-Oct-2018 +* [helm create](helm_create.md) - Create a new chart with the given name +* [helm delete](helm_delete.md) - Given a release name, delete the release from Kubernetes +* [helm dependency](helm_dependency.md) - Manage a chart's dependencies +* [helm fetch](helm_fetch.md) - Download a chart from a repository and (optionally) unpack it in local directory +* [helm get](helm_get.md) - Download a named release +* [helm history](helm_history.md) - Fetch release history +* [helm home](helm_home.md) - Displays the location of HELM_HOME +* [helm init](helm_init.md) - Initialize Helm on both client and server +* [helm inspect](helm_inspect.md) - Inspect a chart +* [helm install](helm_install.md) - Install a chart archive +* [helm lint](helm_lint.md) - Examines a chart for possible issues +* [helm list](helm_list.md) - List releases +* [helm package](helm_package.md) - Package a chart directory into a chart archive +* [helm plugin](helm_plugin.md) - Add, list, or remove Helm plugins +* [helm repo](helm_repo.md) - Add, list, remove, update, and index chart repositories +* [helm reset](helm_reset.md) - Uninstalls Tiller from a cluster +* [helm rollback](helm_rollback.md) - Rollback a release to a previous revision +* [helm search](helm_search.md) - Search for a keyword in charts +* [helm serve](helm_serve.md) - Start a local http web server +* [helm status](helm_status.md) - Displays the status of the named release +* [helm template](helm_template.md) - Locally render templates +* [helm test](helm_test.md) - Test a release +* [helm upgrade](helm_upgrade.md) - Upgrade a release +* [helm verify](helm_verify.md) - Verify that a chart at the given path has been signed and is valid +* [helm version](helm_version.md) - Print the client/server version information + +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/helm/helm_completion.md b/docs/helm/helm_completion.md index 440393076..01f7fedb8 100644 --- a/docs/helm/helm_completion.md +++ b/docs/helm/helm_completion.md @@ -29,17 +29,17 @@ helm completion SHELL [flags] ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO * [helm](helm.md) - The Helm package manager for Kubernetes. -###### Auto generated by spf13/cobra on 1-Aug-2018 +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/helm/helm_create.md b/docs/helm/helm_create.md index 2dc45a77c..f0c5cd037 100644 --- a/docs/helm/helm_create.md +++ b/docs/helm/helm_create.md @@ -1,12 +1,13 @@ ## helm create -create a new chart with the given name +Create a new chart with the given name ### Synopsis This command creates a chart directory along with the common files and -directories used in a chart. +directories used in a chart. It provides a basic example and is not +meant to cover all Kubernetes resources. For example, 'helm create foo' will create a directory structure that looks something like this: @@ -30,6 +31,10 @@ do not exist, Helm will attempt to create them as it goes. If the given destination exists and there are files in that directory, conflicting files will be overwritten, but other files will be left alone. +The chart that is created by invoking this command contains a Deployment, Ingress +and a Service. To use other Kubernetes resources with your chart, refer to +[The Chart Template Developer's Guide](https://helm.sh/docs/chart_template_guide). + ``` helm create NAME [flags] @@ -39,23 +44,23 @@ helm create NAME [flags] ``` -h, --help help for create - -p, --starter string the named Helm starter scaffold + -p, --starter string The name or absolute path to Helm starter scaffold ``` ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO * [helm](helm.md) - The Helm package manager for Kubernetes. -###### Auto generated by spf13/cobra on 18-Sep-2018 +###### Auto generated by spf13/cobra on 7-Jul-2019 diff --git a/docs/helm/helm_delete.md b/docs/helm/helm_delete.md index 3c6a46844..afe8852a9 100644 --- a/docs/helm/helm_delete.md +++ b/docs/helm/helm_delete.md @@ -1,6 +1,6 @@ ## helm delete -given a release name, delete the release from Kubernetes +Given a release name, delete the release from Kubernetes ### Synopsis @@ -19,34 +19,34 @@ helm delete [flags] RELEASE_NAME [...] ### Options ``` - --description string specify a description for the release - --dry-run simulate a delete + --description string Specify a description for the release + --dry-run Simulate a delete -h, --help help for delete - --no-hooks prevent hooks from running during deletion - --purge remove the release from the store and make its name free for later use - --timeout int time in seconds to wait for any individual Kubernetes operation (like Jobs for hooks) (default 300) - --tls enable TLS for request - --tls-ca-cert string path to TLS CA certificate file (default "$HELM_HOME/ca.pem") - --tls-cert string path to TLS certificate file (default "$HELM_HOME/cert.pem") - --tls-hostname string the server name used to verify the hostname on the returned certificates from the server - --tls-key string path to TLS key file (default "$HELM_HOME/key.pem") - --tls-verify enable TLS for request and verify remote + --no-hooks Prevent hooks from running during deletion + --purge Remove the release from the store and make its name free for later use + --timeout int Time in seconds to wait for any individual Kubernetes operation (like Jobs for hooks) (default 300) + --tls Enable TLS for request + --tls-ca-cert string Path to TLS CA certificate file (default "$HELM_HOME/ca.pem") + --tls-cert string Path to TLS certificate file (default "$HELM_HOME/cert.pem") + --tls-hostname string The server name used to verify the hostname on the returned certificates from the server + --tls-key string Path to TLS key file (default "$HELM_HOME/key.pem") + --tls-verify Enable TLS for request and verify remote ``` ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO * [helm](helm.md) - The Helm package manager for Kubernetes. -###### Auto generated by spf13/cobra on 10-Aug-2018 +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/helm/helm_dependency.md b/docs/helm/helm_dependency.md index 317860bdb..3a508764c 100644 --- a/docs/helm/helm_dependency.md +++ b/docs/helm/helm_dependency.md @@ -1,6 +1,6 @@ ## helm dependency -manage a chart's dependencies +Manage a chart's dependencies ### Synopsis @@ -49,7 +49,7 @@ the dependency charts stored locally. The path should start with a prefix of repository: "file://../dependency_chart/nginx" If the dependency chart is retrieved locally, it is not required to have the -repository added to helm by "helm add repo". Version matching is also supported +repository added to helm by "helm repo add". Version matching is also supported for this case. @@ -62,20 +62,20 @@ for this case. ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO * [helm](helm.md) - The Helm package manager for Kubernetes. -* [helm dependency build](helm_dependency_build.md) - rebuild the charts/ directory based on the requirements.lock file -* [helm dependency list](helm_dependency_list.md) - list the dependencies for the given chart -* [helm dependency update](helm_dependency_update.md) - update charts/ based on the contents of requirements.yaml +* [helm dependency build](helm_dependency_build.md) - Rebuild the charts/ directory based on the requirements.lock file +* [helm dependency list](helm_dependency_list.md) - List the dependencies for the given chart +* [helm dependency update](helm_dependency_update.md) - Update charts/ based on the contents of requirements.yaml -###### Auto generated by spf13/cobra on 1-Aug-2018 +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/helm/helm_dependency_build.md b/docs/helm/helm_dependency_build.md index fba70f2ec..281b03418 100644 --- a/docs/helm/helm_dependency_build.md +++ b/docs/helm/helm_dependency_build.md @@ -1,6 +1,6 @@ ## helm dependency build -rebuild the charts/ directory based on the requirements.lock file +Rebuild the charts/ directory based on the requirements.lock file ### Synopsis @@ -8,11 +8,11 @@ rebuild the charts/ directory based on the requirements.lock file Build out the charts/ directory from the requirements.lock file. Build is used to reconstruct a chart's dependencies to the state specified in -the lock file. This will not re-negotiate dependencies, as 'helm dependency update' -does. +the lock file. -If no lock file is found, 'helm dependency build' will mirror the behavior -of 'helm dependency update'. +If no lock file is found, 'helm dependency build' will mirror the behavior of +the 'helm dependency update' command. This means it will update the on-disk +dependencies to mirror the requirements.yaml file and generate a lock file. ``` @@ -23,24 +23,24 @@ helm dependency build [flags] CHART ``` -h, --help help for build - --keyring string keyring containing public keys (default "~/.gnupg/pubring.gpg") - --verify verify the packages against signatures + --keyring string Keyring containing public keys (default "~/.gnupg/pubring.gpg") + --verify Verify the packages against signatures ``` ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO -* [helm dependency](helm_dependency.md) - manage a chart's dependencies +* [helm dependency](helm_dependency.md) - Manage a chart's dependencies -###### Auto generated by spf13/cobra on 1-Aug-2018 +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/helm/helm_dependency_list.md b/docs/helm/helm_dependency_list.md index da754c5d1..449aad202 100644 --- a/docs/helm/helm_dependency_list.md +++ b/docs/helm/helm_dependency_list.md @@ -1,6 +1,6 @@ ## helm dependency list -list the dependencies for the given chart +List the dependencies for the given chart ### Synopsis @@ -27,17 +27,17 @@ helm dependency list [flags] CHART ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO -* [helm dependency](helm_dependency.md) - manage a chart's dependencies +* [helm dependency](helm_dependency.md) - Manage a chart's dependencies -###### Auto generated by spf13/cobra on 1-Aug-2018 +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/helm/helm_dependency_update.md b/docs/helm/helm_dependency_update.md index 88bf3fafd..2dc5a03e1 100644 --- a/docs/helm/helm_dependency_update.md +++ b/docs/helm/helm_dependency_update.md @@ -1,6 +1,6 @@ ## helm dependency update -update charts/ based on the contents of requirements.yaml +Update charts/ based on the contents of requirements.yaml ### Synopsis @@ -27,25 +27,25 @@ helm dependency update [flags] CHART ``` -h, --help help for update - --keyring string keyring containing public keys (default "~/.gnupg/pubring.gpg") - --skip-refresh do not refresh the local repository cache - --verify verify the packages against signatures + --keyring string Keyring containing public keys (default "~/.gnupg/pubring.gpg") + --skip-refresh Do not refresh the local repository cache + --verify Verify the packages against signatures ``` ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO -* [helm dependency](helm_dependency.md) - manage a chart's dependencies +* [helm dependency](helm_dependency.md) - Manage a chart's dependencies -###### Auto generated by spf13/cobra on 1-Aug-2018 +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/helm/helm_fetch.md b/docs/helm/helm_fetch.md index 81c0a9596..056068786 100644 --- a/docs/helm/helm_fetch.md +++ b/docs/helm/helm_fetch.md @@ -1,6 +1,6 @@ ## helm fetch -download a chart from a repository and (optionally) unpack it in local directory +Download a chart from a repository and (optionally) unpack it in local directory ### Synopsis @@ -26,37 +26,37 @@ helm fetch [flags] [chart URL | repo/chartname] [...] ### Options ``` - --ca-file string verify certificates of HTTPS-enabled servers using this CA bundle - --cert-file string identify HTTPS client using this SSL certificate file - -d, --destination string location to write the chart. If this and tardir are specified, tardir is appended to this (default ".") - --devel use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored. + --ca-file string Verify certificates of HTTPS-enabled servers using this CA bundle + --cert-file string Identify HTTPS client using this SSL certificate file + -d, --destination string Location to write the chart. If this and tardir are specified, tardir is appended to this (default ".") + --devel Use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored. -h, --help help for fetch - --key-file string identify HTTPS client using this SSL key file - --keyring string keyring containing public keys (default "~/.gnupg/pubring.gpg") - --password string chart repository password - --prov fetch the provenance file, but don't perform verification - --repo string chart repository url where to locate the requested chart - --untar if set to true, will untar the chart after downloading it - --untardir string if untar is specified, this flag specifies the name of the directory into which the chart is expanded (default ".") - --username string chart repository username - --verify verify the package against its signature - --version string specific version of a chart. Without this, the latest version is fetched + --key-file string Identify HTTPS client using this SSL key file + --keyring string Keyring containing public keys (default "~/.gnupg/pubring.gpg") + --password string Chart repository password + --prov Fetch the provenance file, but don't perform verification + --repo string Chart repository url where to locate the requested chart + --untar If set to true, will untar the chart after downloading it + --untardir string If untar is specified, this flag specifies the name of the directory into which the chart is expanded (default ".") + --username string Chart repository username + --verify Verify the package against its signature + --version string Specific version of a chart. Without this, the latest version is fetched ``` ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO * [helm](helm.md) - The Helm package manager for Kubernetes. -###### Auto generated by spf13/cobra on 1-Aug-2018 +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/helm/helm_get.md b/docs/helm/helm_get.md index 3b99c93d5..3d2dca608 100644 --- a/docs/helm/helm_get.md +++ b/docs/helm/helm_get.md @@ -1,6 +1,6 @@ ## helm get -download a named release +Download a named release ### Synopsis @@ -25,33 +25,34 @@ helm get [flags] RELEASE_NAME ``` -h, --help help for get - --revision int32 get the named release with revision - --tls enable TLS for request - --tls-ca-cert string path to TLS CA certificate file (default "$HELM_HOME/ca.pem") - --tls-cert string path to TLS certificate file (default "$HELM_HOME/cert.pem") - --tls-hostname string the server name used to verify the hostname on the returned certificates from the server - --tls-key string path to TLS key file (default "$HELM_HOME/key.pem") - --tls-verify enable TLS for request and verify remote + --revision int32 Get the named release with revision + --template string Go template for formatting the output, eg: {{.Release.Name}} + --tls Enable TLS for request + --tls-ca-cert string Path to TLS CA certificate file (default "$HELM_HOME/ca.pem") + --tls-cert string Path to TLS certificate file (default "$HELM_HOME/cert.pem") + --tls-hostname string The server name used to verify the hostname on the returned certificates from the server + --tls-key string Path to TLS key file (default "$HELM_HOME/key.pem") + --tls-verify Enable TLS for request and verify remote ``` ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO * [helm](helm.md) - The Helm package manager for Kubernetes. -* [helm get hooks](helm_get_hooks.md) - download all hooks for a named release -* [helm get manifest](helm_get_manifest.md) - download the manifest for a named release -* [helm get notes](helm_get_notes.md) - displays the notes of the named release -* [helm get values](helm_get_values.md) - download the values file for a named release +* [helm get hooks](helm_get_hooks.md) - Download all hooks for a named release +* [helm get manifest](helm_get_manifest.md) - Download the manifest for a named release +* [helm get notes](helm_get_notes.md) - Displays the notes of the named release +* [helm get values](helm_get_values.md) - Download the values file for a named release -###### Auto generated by spf13/cobra on 1-Sep-2018 +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/helm/helm_get_hooks.md b/docs/helm/helm_get_hooks.md index d7097fd59..716e0f692 100644 --- a/docs/helm/helm_get_hooks.md +++ b/docs/helm/helm_get_hooks.md @@ -1,6 +1,6 @@ ## helm get hooks -download all hooks for a named release +Download all hooks for a named release ### Synopsis @@ -18,29 +18,29 @@ helm get hooks [flags] RELEASE_NAME ``` -h, --help help for hooks - --revision int32 get the named release with revision - --tls enable TLS for request - --tls-ca-cert string path to TLS CA certificate file (default "$HELM_HOME/ca.pem") - --tls-cert string path to TLS certificate file (default "$HELM_HOME/cert.pem") - --tls-hostname string the server name used to verify the hostname on the returned certificates from the server - --tls-key string path to TLS key file (default "$HELM_HOME/key.pem") - --tls-verify enable TLS for request and verify remote + --revision int32 Get the named release with revision + --tls Enable TLS for request + --tls-ca-cert string Path to TLS CA certificate file (default "$HELM_HOME/ca.pem") + --tls-cert string Path to TLS certificate file (default "$HELM_HOME/cert.pem") + --tls-hostname string The server name used to verify the hostname on the returned certificates from the server + --tls-key string Path to TLS key file (default "$HELM_HOME/key.pem") + --tls-verify Enable TLS for request and verify remote ``` ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO -* [helm get](helm_get.md) - download a named release +* [helm get](helm_get.md) - Download a named release -###### Auto generated by spf13/cobra on 10-Aug-2018 +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/helm/helm_get_manifest.md b/docs/helm/helm_get_manifest.md index 60bfeac0b..ec3987108 100644 --- a/docs/helm/helm_get_manifest.md +++ b/docs/helm/helm_get_manifest.md @@ -1,6 +1,6 @@ ## helm get manifest -download the manifest for a named release +Download the manifest for a named release ### Synopsis @@ -20,29 +20,29 @@ helm get manifest [flags] RELEASE_NAME ``` -h, --help help for manifest - --revision int32 get the named release with revision - --tls enable TLS for request - --tls-ca-cert string path to TLS CA certificate file (default "$HELM_HOME/ca.pem") - --tls-cert string path to TLS certificate file (default "$HELM_HOME/cert.pem") - --tls-hostname string the server name used to verify the hostname on the returned certificates from the server - --tls-key string path to TLS key file (default "$HELM_HOME/key.pem") - --tls-verify enable TLS for request and verify remote + --revision int32 Get the named release with revision + --tls Enable TLS for request + --tls-ca-cert string Path to TLS CA certificate file (default "$HELM_HOME/ca.pem") + --tls-cert string Path to TLS certificate file (default "$HELM_HOME/cert.pem") + --tls-hostname string The server name used to verify the hostname on the returned certificates from the server + --tls-key string Path to TLS key file (default "$HELM_HOME/key.pem") + --tls-verify Enable TLS for request and verify remote ``` ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO -* [helm get](helm_get.md) - download a named release +* [helm get](helm_get.md) - Download a named release -###### Auto generated by spf13/cobra on 10-Aug-2018 +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/helm/helm_get_notes.md b/docs/helm/helm_get_notes.md index 076aaaa59..b7859533f 100644 --- a/docs/helm/helm_get_notes.md +++ b/docs/helm/helm_get_notes.md @@ -1,6 +1,6 @@ ## helm get notes -displays the notes of the named release +Displays the notes of the named release ### Synopsis @@ -16,29 +16,29 @@ helm get notes [flags] RELEASE_NAME ``` -h, --help help for notes - --revision int32 get the notes of the named release with revision - --tls enable TLS for request - --tls-ca-cert string path to TLS CA certificate file (default "$HELM_HOME/ca.pem") - --tls-cert string path to TLS certificate file (default "$HELM_HOME/cert.pem") - --tls-hostname string the server name used to verify the hostname on the returned certificates from the server - --tls-key string path to TLS key file (default "$HELM_HOME/key.pem") - --tls-verify enable TLS for request and verify remote + --revision int32 Get the notes of the named release with revision + --tls Enable TLS for request + --tls-ca-cert string Path to TLS CA certificate file (default "$HELM_HOME/ca.pem") + --tls-cert string Path to TLS certificate file (default "$HELM_HOME/cert.pem") + --tls-hostname string The server name used to verify the hostname on the returned certificates from the server + --tls-key string Path to TLS key file (default "$HELM_HOME/key.pem") + --tls-verify Enable TLS for request and verify remote ``` ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO -* [helm get](helm_get.md) - download a named release +* [helm get](helm_get.md) - Download a named release -###### Auto generated by spf13/cobra on 1-Sep-2018 +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/helm/helm_get_values.md b/docs/helm/helm_get_values.md index 87d21b954..ab4a4494e 100644 --- a/docs/helm/helm_get_values.md +++ b/docs/helm/helm_get_values.md @@ -1,6 +1,6 @@ ## helm get values -download the values file for a named release +Download the values file for a named release ### Synopsis @@ -15,32 +15,32 @@ helm get values [flags] RELEASE_NAME ### Options ``` - -a, --all dump all (computed) values + -a, --all Dump all (computed) values -h, --help help for values - --output string output the specified format (json or yaml) (default "yaml") - --revision int32 get the named release with revision - --tls enable TLS for request - --tls-ca-cert string path to TLS CA certificate file (default "$HELM_HOME/ca.pem") - --tls-cert string path to TLS certificate file (default "$HELM_HOME/cert.pem") - --tls-hostname string the server name used to verify the hostname on the returned certificates from the server - --tls-key string path to TLS key file (default "$HELM_HOME/key.pem") - --tls-verify enable TLS for request and verify remote + --output string Output the specified format (json or yaml) (default "yaml") + --revision int32 Get the named release with revision + --tls Enable TLS for request + --tls-ca-cert string Path to TLS CA certificate file (default "$HELM_HOME/ca.pem") + --tls-cert string Path to TLS certificate file (default "$HELM_HOME/cert.pem") + --tls-hostname string The server name used to verify the hostname on the returned certificates from the server + --tls-key string Path to TLS key file (default "$HELM_HOME/key.pem") + --tls-verify Enable TLS for request and verify remote ``` ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO -* [helm get](helm_get.md) - download a named release +* [helm get](helm_get.md) - Download a named release -###### Auto generated by spf13/cobra on 7-Sep-2018 +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/helm/helm_history.md b/docs/helm/helm_history.md index 1d7287fd9..17262a258 100755 --- a/docs/helm/helm_history.md +++ b/docs/helm/helm_history.md @@ -1,6 +1,6 @@ ## helm history -fetch release history +Fetch release history ### Synopsis @@ -27,32 +27,32 @@ helm history [flags] RELEASE_NAME ### Options ``` - --col-width uint specifies the max column width of output (default 60) + --col-width uint Specifies the max column width of output (default 60) -h, --help help for history - --max int32 maximum number of revision to include in history (default 256) - -o, --output string prints the output in the specified format (json|table|yaml) (default "table") - --tls enable TLS for request - --tls-ca-cert string path to TLS CA certificate file (default "$HELM_HOME/ca.pem") - --tls-cert string path to TLS certificate file (default "$HELM_HOME/cert.pem") - --tls-hostname string the server name used to verify the hostname on the returned certificates from the server - --tls-key string path to TLS key file (default "$HELM_HOME/key.pem") - --tls-verify enable TLS for request and verify remote + --max int32 Maximum number of revisions to include in history (default 256) + -o, --output string Prints the output in the specified format (json|table|yaml) (default "table") + --tls Enable TLS for request + --tls-ca-cert string Path to TLS CA certificate file (default "$HELM_HOME/ca.pem") + --tls-cert string Path to TLS certificate file (default "$HELM_HOME/cert.pem") + --tls-hostname string The server name used to verify the hostname on the returned certificates from the server + --tls-key string Path to TLS key file (default "$HELM_HOME/key.pem") + --tls-verify Enable TLS for request and verify remote ``` ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO * [helm](helm.md) - The Helm package manager for Kubernetes. -###### Auto generated by spf13/cobra on 17-Dec-2018 +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/helm/helm_home.md b/docs/helm/helm_home.md index 192302424..050251cc1 100644 --- a/docs/helm/helm_home.md +++ b/docs/helm/helm_home.md @@ -1,6 +1,6 @@ ## helm home -displays the location of HELM_HOME +Displays the location of HELM_HOME ### Synopsis @@ -22,17 +22,17 @@ helm home [flags] ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO * [helm](helm.md) - The Helm package manager for Kubernetes. -###### Auto generated by spf13/cobra on 1-Aug-2018 +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/helm/helm_init.md b/docs/helm/helm_init.md index 72fd9e86b..64f8bcf62 100644 --- a/docs/helm/helm_init.md +++ b/docs/helm/helm_init.md @@ -1,6 +1,6 @@ ## helm init -initialize Helm on both client and server +Initialize Helm on both client and server ### Synopsis @@ -32,47 +32,47 @@ helm init [flags] ### Options ``` - --automount-service-account-token auto-mount the given service account to tiller (default true) - --canary-image use the canary Tiller image - -c, --client-only if set does not install Tiller - --dry-run do not install local or remote - --force-upgrade force upgrade of Tiller to the current helm version + --automount-service-account-token Auto-mount the given service account to tiller (default true) + --canary-image Use the canary Tiller image + -c, --client-only If set does not install Tiller + --dry-run Do not install local or remote + --force-upgrade Force upgrade of Tiller to the current helm version -h, --help help for init - --history-max int limit the maximum number of revisions saved per release. Use 0 for no limit. + --history-max int Limit the maximum number of revisions saved per release. Use 0 for no limit. --local-repo-url string URL for local repository (default "http://127.0.0.1:8879/charts") - --net-host install Tiller with net=host - --node-selectors string labels to specify the node on which Tiller is installed (app=tiller,helm=rocks) - -o, --output OutputFormat skip installation and output Tiller's manifest in specified format (json or yaml) - --override stringArray override values for the Tiller Deployment manifest (can specify multiple or separate values with commas: key1=val1,key2=val2) - --replicas int amount of tiller instances to run on the cluster (default 1) - --service-account string name of service account - --skip-refresh do not refresh (download) the local repository cache + --net-host Install Tiller with net=host + --node-selectors string Labels to specify the node on which Tiller is installed (app=tiller,helm=rocks) + -o, --output OutputFormat Skip installation and output Tiller's manifest in specified format (json or yaml) + --override stringArray Override values for the Tiller Deployment manifest (can specify multiple or separate values with commas: key1=val1,key2=val2) + --replicas int Amount of tiller instances to run on the cluster (default 1) + --service-account string Name of service account + --skip-refresh Do not refresh (download) the local repository cache --stable-repo-url string URL for stable repository (default "https://kubernetes-charts.storage.googleapis.com") - -i, --tiller-image string override Tiller image - --tiller-tls install Tiller with TLS enabled - --tiller-tls-cert string path to TLS certificate file to install with Tiller - --tiller-tls-hostname string the server name used to verify the hostname on the returned certificates from Tiller - --tiller-tls-key string path to TLS key file to install with Tiller - --tiller-tls-verify install Tiller with TLS enabled and to verify remote certificates - --tls-ca-cert string path to CA root certificate - --upgrade upgrade if Tiller is already installed - --wait block until Tiller is running and ready to receive requests + -i, --tiller-image string Override Tiller image + --tiller-tls Install Tiller with TLS enabled + --tiller-tls-cert string Path to TLS certificate file to install with Tiller + --tiller-tls-hostname string The server name used to verify the hostname on the returned certificates from Tiller + --tiller-tls-key string Path to TLS key file to install with Tiller + --tiller-tls-verify Install Tiller with TLS enabled and to verify remote certificates + --tls-ca-cert string Path to CA root certificate + --upgrade Upgrade if Tiller is already installed + --wait Block until Tiller is running and ready to receive requests ``` ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO * [helm](helm.md) - The Helm package manager for Kubernetes. -###### Auto generated by spf13/cobra on 4-Sep-2018 +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/helm/helm_inspect.md b/docs/helm/helm_inspect.md index 86689eeaa..d5845c78c 100644 --- a/docs/helm/helm_inspect.md +++ b/docs/helm/helm_inspect.md @@ -1,6 +1,6 @@ ## helm inspect -inspect a chart +Inspect a chart ### Synopsis @@ -18,28 +18,29 @@ helm inspect [CHART] [flags] ### Options ``` - --ca-file string chart repository url where to locate the requested chart - --cert-file string verify certificates of HTTPS-enabled servers using this CA bundle + --ca-file string Chart repository url where to locate the requested chart + --cert-file string Verify certificates of HTTPS-enabled servers using this CA bundle + --devel Use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored. -h, --help help for inspect - --key-file string identify HTTPS client using this SSL key file - --keyring string path to the keyring containing public verification keys (default "~/.gnupg/pubring.gpg") - --password string chart repository password where to locate the requested chart - --repo string chart repository url where to locate the requested chart - --username string chart repository username where to locate the requested chart - --verify verify the provenance data for this chart - --version string version of the chart. By default, the newest chart is shown + --key-file string Identify HTTPS client using this SSL key file + --keyring string Path to the keyring containing public verification keys (default "~/.gnupg/pubring.gpg") + --password string Chart repository password where to locate the requested chart + --repo string Chart repository url where to locate the requested chart + --username string Chart repository username where to locate the requested chart + --verify Verify the provenance data for this chart + --version string Version of the chart. By default, the newest chart is shown ``` ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO @@ -49,4 +50,4 @@ helm inspect [CHART] [flags] * [helm inspect readme](helm_inspect_readme.md) - shows inspect readme * [helm inspect values](helm_inspect_values.md) - shows inspect values -###### Auto generated by spf13/cobra on 1-Aug-2018 +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/helm/helm_inspect_chart.md b/docs/helm/helm_inspect_chart.md index 2b9adbb7e..447de556b 100644 --- a/docs/helm/helm_inspect_chart.md +++ b/docs/helm/helm_inspect_chart.md @@ -16,32 +16,33 @@ helm inspect chart [CHART] [flags] ### Options ``` - --ca-file string chart repository url where to locate the requested chart - --cert-file string verify certificates of HTTPS-enabled servers using this CA bundle + --ca-file string Chart repository url where to locate the requested chart + --cert-file string Verify certificates of HTTPS-enabled servers using this CA bundle + --devel Use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored. -h, --help help for chart - --key-file string identify HTTPS client using this SSL key file - --keyring string path to the keyring containing public verification keys (default "~/.gnupg/pubring.gpg") - --password string chart repository password where to locate the requested chart - --repo string chart repository url where to locate the requested chart - --username string chart repository username where to locate the requested chart - --verify verify the provenance data for this chart - --version string version of the chart. By default, the newest chart is shown + --key-file string Identify HTTPS client using this SSL key file + --keyring string Path to the keyring containing public verification keys (default "~/.gnupg/pubring.gpg") + --password string Chart repository password where to locate the requested chart + --repo string Chart repository url where to locate the requested chart + --username string Chart repository username where to locate the requested chart + --verify Verify the provenance data for this chart + --version string Version of the chart. By default, the newest chart is shown ``` ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO -* [helm inspect](helm_inspect.md) - inspect a chart +* [helm inspect](helm_inspect.md) - Inspect a chart -###### Auto generated by spf13/cobra on 1-Aug-2018 +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/helm/helm_inspect_readme.md b/docs/helm/helm_inspect_readme.md index d222cd53a..5cfe73de2 100644 --- a/docs/helm/helm_inspect_readme.md +++ b/docs/helm/helm_inspect_readme.md @@ -16,30 +16,31 @@ helm inspect readme [CHART] [flags] ### Options ``` - --ca-file string chart repository url where to locate the requested chart - --cert-file string verify certificates of HTTPS-enabled servers using this CA bundle + --ca-file string Chart repository url where to locate the requested chart + --cert-file string Verify certificates of HTTPS-enabled servers using this CA bundle + --devel Use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored. -h, --help help for readme - --key-file string identify HTTPS client using this SSL key file - --keyring string path to the keyring containing public verification keys (default "~/.gnupg/pubring.gpg") - --repo string chart repository url where to locate the requested chart - --verify verify the provenance data for this chart - --version string version of the chart. By default, the newest chart is shown + --key-file string Identify HTTPS client using this SSL key file + --keyring string Path to the keyring containing public verification keys (default "~/.gnupg/pubring.gpg") + --repo string Chart repository url where to locate the requested chart + --verify Verify the provenance data for this chart + --version string Version of the chart. By default, the newest chart is shown ``` ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO -* [helm inspect](helm_inspect.md) - inspect a chart +* [helm inspect](helm_inspect.md) - Inspect a chart -###### Auto generated by spf13/cobra on 1-Aug-2018 +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/helm/helm_inspect_values.md b/docs/helm/helm_inspect_values.md index 9cca2fc32..34d76a7ba 100644 --- a/docs/helm/helm_inspect_values.md +++ b/docs/helm/helm_inspect_values.md @@ -16,32 +16,33 @@ helm inspect values [CHART] [flags] ### Options ``` - --ca-file string chart repository url where to locate the requested chart - --cert-file string verify certificates of HTTPS-enabled servers using this CA bundle + --ca-file string Chart repository url where to locate the requested chart + --cert-file string Verify certificates of HTTPS-enabled servers using this CA bundle + --devel Use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored. -h, --help help for values - --key-file string identify HTTPS client using this SSL key file - --keyring string path to the keyring containing public verification keys (default "~/.gnupg/pubring.gpg") - --password string chart repository password where to locate the requested chart - --repo string chart repository url where to locate the requested chart - --username string chart repository username where to locate the requested chart - --verify verify the provenance data for this chart - --version string version of the chart. By default, the newest chart is shown + --key-file string Identify HTTPS client using this SSL key file + --keyring string Path to the keyring containing public verification keys (default "~/.gnupg/pubring.gpg") + --password string Chart repository password where to locate the requested chart + --repo string Chart repository url where to locate the requested chart + --username string Chart repository username where to locate the requested chart + --verify Verify the provenance data for this chart + --version string Version of the chart. By default, the newest chart is shown ``` ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO -* [helm inspect](helm_inspect.md) - inspect a chart +* [helm inspect](helm_inspect.md) - Inspect a chart -###### Auto generated by spf13/cobra on 1-Aug-2018 +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/helm/helm_install.md b/docs/helm/helm_install.md index 05cdf1e4a..d44f9ded5 100644 --- a/docs/helm/helm_install.md +++ b/docs/helm/helm_install.md @@ -1,6 +1,6 @@ ## helm install -install a chart archive +Install a chart archive ### Synopsis @@ -78,54 +78,57 @@ helm install [CHART] [flags] ### Options ``` - --ca-file string verify certificates of HTTPS-enabled servers using this CA bundle - --cert-file string identify HTTPS client using this SSL certificate file - --dep-up run helm dependency update before installing the chart - --description string specify a description for the release - --devel use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored. - --dry-run simulate an install + --atomic If set, installation process purges chart on fail, also sets --wait flag + --ca-file string Verify certificates of HTTPS-enabled servers using this CA bundle + --cert-file string Identify HTTPS client using this SSL certificate file + --dep-up Run helm dependency update before installing the chart + --description string Specify a description for the release + --devel Use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored. + --dry-run Simulate an install -h, --help help for install - --key-file string identify HTTPS client using this SSL key file - --keyring string location of public keys used for verification (default "~/.gnupg/pubring.gpg") - -n, --name string release name. If unspecified, it will autogenerate one for you - --name-template string specify template used to name the release - --namespace string namespace to install the release into. Defaults to the current kube config namespace. - --no-crd-hook prevent CRD hooks from running, but run other hooks - --no-hooks prevent hooks from running during install - --password string chart repository password where to locate the requested chart - --replace re-use the given name, even if that name is already used. This is unsafe in production - --repo string chart repository url where to locate the requested chart - --set stringArray set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2) - --set-file stringArray set values from respective files specified via the command line (can specify multiple or separate values with commas: key1=path1,key2=path2) - --set-string stringArray set STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2) - --timeout int time in seconds to wait for any individual Kubernetes operation (like Jobs for hooks) (default 300) - --tls enable TLS for request - --tls-ca-cert string path to TLS CA certificate file (default "$HELM_HOME/ca.pem") - --tls-cert string path to TLS certificate file (default "$HELM_HOME/cert.pem") - --tls-hostname string the server name used to verify the hostname on the returned certificates from the server - --tls-key string path to TLS key file (default "$HELM_HOME/key.pem") - --tls-verify enable TLS for request and verify remote - --username string chart repository username where to locate the requested chart - -f, --values valueFiles specify values in a YAML file or a URL(can specify multiple) (default []) - --verify verify the package before installing it - --version string specify the exact chart version to install. If this is not specified, the latest version is installed - --wait if set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment are in a ready state before marking the release as successful. It will wait for as long as --timeout + --key-file string Identify HTTPS client using this SSL key file + --keyring string Location of public keys used for verification (default "~/.gnupg/pubring.gpg") + -n, --name string The release name. If unspecified, it will autogenerate one for you + --name-template string Specify template used to name the release + --namespace string Namespace to install the release into. Defaults to the current kube config namespace. + --no-crd-hook Prevent CRD hooks from running, but run other hooks + --no-hooks Prevent hooks from running during install + -o, --output string Prints the output in the specified format. Allowed values: table, json, yaml (default "table") + --password string Chart repository password where to locate the requested chart + --render-subchart-notes Render subchart notes along with the parent + --replace Re-use the given name, even if that name is already used. This is unsafe in production + --repo string Chart repository url where to locate the requested chart + --set stringArray Set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2) + --set-file stringArray Set values from respective files specified via the command line (can specify multiple or separate values with commas: key1=path1,key2=path2) + --set-string stringArray Set STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2) + --timeout int Time in seconds to wait for any individual Kubernetes operation (like Jobs for hooks) (default 300) + --tls Enable TLS for request + --tls-ca-cert string Path to TLS CA certificate file (default "$HELM_HOME/ca.pem") + --tls-cert string Path to TLS certificate file (default "$HELM_HOME/cert.pem") + --tls-hostname string The server name used to verify the hostname on the returned certificates from the server + --tls-key string Path to TLS key file (default "$HELM_HOME/key.pem") + --tls-verify Enable TLS for request and verify remote + --username string Chart repository username where to locate the requested chart + -f, --values valueFiles Specify values in a YAML file or a URL(can specify multiple) (default []) + --verify Verify the package before installing it + --version string Specify the exact chart version to install. If this is not specified, the latest version is installed + --wait If set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment are in a ready state before marking the release as successful. It will wait for as long as --timeout ``` ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO * [helm](helm.md) - The Helm package manager for Kubernetes. -###### Auto generated by spf13/cobra on 10-Aug-2018 +###### Auto generated by spf13/cobra on 24-Sep-2019 diff --git a/docs/helm/helm_lint.md b/docs/helm/helm_lint.md index bf168184e..e341975b7 100644 --- a/docs/helm/helm_lint.md +++ b/docs/helm/helm_lint.md @@ -1,6 +1,6 @@ ## helm lint -examines a chart for possible issues +Examines a chart for possible issues ### Synopsis @@ -21,28 +21,28 @@ helm lint [flags] PATH ``` -h, --help help for lint - --namespace string namespace to put the release into (default "default") - --set stringArray set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2) - --set-file stringArray set values from respective files specified via the command line (can specify multiple or separate values with commas: key1=path1,key2=path2) - --set-string stringArray set STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2) - --strict fail on lint warnings - -f, --values valueFiles specify values in a YAML file (can specify multiple) (default []) + --namespace string Namespace to put the release into (default "default") + --set stringArray Set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2) + --set-file stringArray Set values from respective files specified via the command line (can specify multiple or separate values with commas: key1=path1,key2=path2) + --set-string stringArray Set STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2) + --strict Fail on lint warnings + -f, --values valueFiles Specify values in a YAML file (can specify multiple) (default []) ``` ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO * [helm](helm.md) - The Helm package manager for Kubernetes. -###### Auto generated by spf13/cobra on 1-Aug-2018 +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/helm/helm_list.md b/docs/helm/helm_list.md index 5087c8a59..568a3c7be 100755 --- a/docs/helm/helm_list.md +++ b/docs/helm/helm_list.md @@ -1,6 +1,6 @@ ## helm list -list releases +List releases ### Synopsis @@ -38,44 +38,44 @@ helm list [flags] [FILTER] ### Options ``` - -a, --all show all releases, not just the ones marked DEPLOYED - -c, --chart-name sort by chart name - --col-width uint specifies the max column width of output (default 60) - -d, --date sort by release date - --deleted show deleted releases - --deleting show releases that are currently being deleted - --deployed show deployed releases. If no other is specified, this will be automatically enabled - --failed show failed releases + -a, --all Show all releases, not just the ones marked DEPLOYED + -c, --chart-name Sort by chart name + --col-width uint Specifies the max column width of output (default 60) + -d, --date Sort by release date + --deleted Show deleted releases + --deleting Show releases that are currently being deleted + --deployed Show deployed releases. If no other is specified, this will be automatically enabled + --failed Show failed releases -h, --help help for list - -m, --max int maximum number of releases to fetch (default 256) - --namespace string show releases within a specific namespace - -o, --offset string next release name in the list, used to offset from start value - --output string output the specified format (json or yaml) - --pending show pending releases - -r, --reverse reverse the sort order - -q, --short output short (quiet) listing format - --tls enable TLS for request - --tls-ca-cert string path to TLS CA certificate file (default "$HELM_HOME/ca.pem") - --tls-cert string path to TLS certificate file (default "$HELM_HOME/cert.pem") - --tls-hostname string the server name used to verify the hostname on the returned certificates from the server - --tls-key string path to TLS key file (default "$HELM_HOME/key.pem") - --tls-verify enable TLS for request and verify remote + -m, --max int Maximum number of releases to fetch (default 256) + --namespace string Show releases within a specific namespace + -o, --offset string Next release name in the list, used to offset from start value + --output string Output the specified format (json or yaml) + --pending Show pending releases + -r, --reverse Reverse the sort order + -q, --short Output short (quiet) listing format + --tls Enable TLS for request + --tls-ca-cert string Path to TLS CA certificate file (default "$HELM_HOME/ca.pem") + --tls-cert string Path to TLS certificate file (default "$HELM_HOME/cert.pem") + --tls-hostname string The server name used to verify the hostname on the returned certificates from the server + --tls-key string Path to TLS key file (default "$HELM_HOME/key.pem") + --tls-verify Enable TLS for request and verify remote ``` ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO * [helm](helm.md) - The Helm package manager for Kubernetes. -###### Auto generated by spf13/cobra on 1-Sep-2018 +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/helm/helm_package.md b/docs/helm/helm_package.md index b772fa70c..a3db8bec8 100644 --- a/docs/helm/helm_package.md +++ b/docs/helm/helm_package.md @@ -1,6 +1,6 @@ ## helm package -package a chart directory into a chart archive +Package a chart directory into a chart archive ### Synopsis @@ -22,31 +22,31 @@ helm package [flags] [CHART_PATH] [...] ### Options ``` - --app-version string set the appVersion on the chart to this version - -u, --dependency-update update dependencies from "requirements.yaml" to dir "charts/" before packaging - -d, --destination string location to write the chart. (default ".") + --app-version string Set the appVersion on the chart to this version + -u, --dependency-update Update dependencies from "requirements.yaml" to dir "charts/" before packaging + -d, --destination string Location to write the chart. (default ".") -h, --help help for package - --key string name of the key to use when signing. Used if --sign is true - --keyring string location of a public keyring (default "~/.gnupg/pubring.gpg") - --save save packaged chart to local chart repository (default true) - --sign use a PGP private key to sign this package - --version string set the version on the chart to this semver version + --key string Name of the key to use when signing. Used if --sign is true + --keyring string Location of a public keyring (default "~/.gnupg/pubring.gpg") + --save Save packaged chart to local chart repository (default true) + --sign Use a PGP private key to sign this package + --version string Set the version on the chart to this semver version ``` ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO * [helm](helm.md) - The Helm package manager for Kubernetes. -###### Auto generated by spf13/cobra on 1-Aug-2018 +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/helm/helm_plugin.md b/docs/helm/helm_plugin.md index 5aa57b69c..ab66d7a05 100644 --- a/docs/helm/helm_plugin.md +++ b/docs/helm/helm_plugin.md @@ -1,6 +1,6 @@ ## helm plugin -add, list, or remove Helm plugins +Add, list, or remove Helm plugins ### Synopsis @@ -17,21 +17,21 @@ Manage client-side Helm plugins. ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO * [helm](helm.md) - The Helm package manager for Kubernetes. -* [helm plugin install](helm_plugin_install.md) - install one or more Helm plugins -* [helm plugin list](helm_plugin_list.md) - list installed Helm plugins -* [helm plugin remove](helm_plugin_remove.md) - remove one or more Helm plugins -* [helm plugin update](helm_plugin_update.md) - update one or more Helm plugins +* [helm plugin install](helm_plugin_install.md) - Install one or more Helm plugins +* [helm plugin list](helm_plugin_list.md) - List installed Helm plugins +* [helm plugin remove](helm_plugin_remove.md) - Remove one or more Helm plugins +* [helm plugin update](helm_plugin_update.md) - Update one or more Helm plugins -###### Auto generated by spf13/cobra on 1-Aug-2018 +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/helm/helm_plugin_install.md b/docs/helm/helm_plugin_install.md index f30bfff55..47f4ea4ca 100644 --- a/docs/helm/helm_plugin_install.md +++ b/docs/helm/helm_plugin_install.md @@ -1,6 +1,6 @@ ## helm plugin install -install one or more Helm plugins +Install one or more Helm plugins ### Synopsis @@ -19,23 +19,23 @@ helm plugin install [options] ... [flags] ``` -h, --help help for install - --version string specify a version constraint. If this is not specified, the latest version is installed + --version string Specify a version constraint. If this is not specified, the latest version is installed ``` ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO -* [helm plugin](helm_plugin.md) - add, list, or remove Helm plugins +* [helm plugin](helm_plugin.md) - Add, list, or remove Helm plugins -###### Auto generated by spf13/cobra on 1-Aug-2018 +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/helm/helm_plugin_list.md b/docs/helm/helm_plugin_list.md index 373462e2b..897a6b5d0 100644 --- a/docs/helm/helm_plugin_list.md +++ b/docs/helm/helm_plugin_list.md @@ -1,10 +1,10 @@ ## helm plugin list -list installed Helm plugins +List installed Helm plugins ### Synopsis -list installed Helm plugins +List installed Helm plugins ``` helm plugin list [flags] @@ -19,17 +19,17 @@ helm plugin list [flags] ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO -* [helm plugin](helm_plugin.md) - add, list, or remove Helm plugins +* [helm plugin](helm_plugin.md) - Add, list, or remove Helm plugins -###### Auto generated by spf13/cobra on 1-Aug-2018 +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/helm/helm_plugin_remove.md b/docs/helm/helm_plugin_remove.md index 30f222c9f..e1a017f9a 100644 --- a/docs/helm/helm_plugin_remove.md +++ b/docs/helm/helm_plugin_remove.md @@ -1,10 +1,10 @@ ## helm plugin remove -remove one or more Helm plugins +Remove one or more Helm plugins ### Synopsis -remove one or more Helm plugins +Remove one or more Helm plugins ``` helm plugin remove ... [flags] @@ -19,17 +19,17 @@ helm plugin remove ... [flags] ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO -* [helm plugin](helm_plugin.md) - add, list, or remove Helm plugins +* [helm plugin](helm_plugin.md) - Add, list, or remove Helm plugins -###### Auto generated by spf13/cobra on 1-Aug-2018 +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/helm/helm_plugin_update.md b/docs/helm/helm_plugin_update.md index 65b16cd9d..bedae44a3 100644 --- a/docs/helm/helm_plugin_update.md +++ b/docs/helm/helm_plugin_update.md @@ -1,10 +1,10 @@ ## helm plugin update -update one or more Helm plugins +Update one or more Helm plugins ### Synopsis -update one or more Helm plugins +Update one or more Helm plugins ``` helm plugin update ... [flags] @@ -19,17 +19,17 @@ helm plugin update ... [flags] ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO -* [helm plugin](helm_plugin.md) - add, list, or remove Helm plugins +* [helm plugin](helm_plugin.md) - Add, list, or remove Helm plugins -###### Auto generated by spf13/cobra on 1-Aug-2018 +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/helm/helm_repo.md b/docs/helm/helm_repo.md index 0b73fbcd0..9f9a6a921 100644 --- a/docs/helm/helm_repo.md +++ b/docs/helm/helm_repo.md @@ -1,6 +1,6 @@ ## helm repo -add, list, remove, update, and index chart repositories +Add, list, remove, update, and index chart repositories ### Synopsis @@ -21,22 +21,22 @@ Example usage: ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO * [helm](helm.md) - The Helm package manager for Kubernetes. -* [helm repo add](helm_repo_add.md) - add a chart repository -* [helm repo index](helm_repo_index.md) - generate an index file given a directory containing packaged charts -* [helm repo list](helm_repo_list.md) - list chart repositories -* [helm repo remove](helm_repo_remove.md) - remove a chart repository -* [helm repo update](helm_repo_update.md) - update information of available charts locally from chart repositories +* [helm repo add](helm_repo_add.md) - Add a chart repository +* [helm repo index](helm_repo_index.md) - Generate an index file given a directory containing packaged charts +* [helm repo list](helm_repo_list.md) - List chart repositories +* [helm repo remove](helm_repo_remove.md) - Remove a chart repository +* [helm repo update](helm_repo_update.md) - Update information of available charts locally from chart repositories -###### Auto generated by spf13/cobra on 1-Aug-2018 +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/helm/helm_repo_add.md b/docs/helm/helm_repo_add.md index 29947147d..f985a32c3 100644 --- a/docs/helm/helm_repo_add.md +++ b/docs/helm/helm_repo_add.md @@ -1,10 +1,10 @@ ## helm repo add -add a chart repository +Add a chart repository ### Synopsis -add a chart repository +Add a chart repository ``` helm repo add [flags] [NAME] [URL] @@ -13,29 +13,29 @@ helm repo add [flags] [NAME] [URL] ### Options ``` - --ca-file string verify certificates of HTTPS-enabled servers using this CA bundle - --cert-file string identify HTTPS client using this SSL certificate file + --ca-file string Verify certificates of HTTPS-enabled servers using this CA bundle + --cert-file string Identify HTTPS client using this SSL certificate file -h, --help help for add - --key-file string identify HTTPS client using this SSL key file - --no-update raise error if repo is already registered - --password string chart repository password - --username string chart repository username + --key-file string Identify HTTPS client using this SSL key file + --no-update Raise error if repo is already registered + --password string Chart repository password + --username string Chart repository username ``` ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO -* [helm repo](helm_repo.md) - add, list, remove, update, and index chart repositories +* [helm repo](helm_repo.md) - Add, list, remove, update, and index chart repositories -###### Auto generated by spf13/cobra on 1-Aug-2018 +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/helm/helm_repo_index.md b/docs/helm/helm_repo_index.md index 4660489f9..e57fd29ff 100644 --- a/docs/helm/helm_repo_index.md +++ b/docs/helm/helm_repo_index.md @@ -1,6 +1,6 @@ ## helm repo index -generate an index file given a directory containing packaged charts +Generate an index file given a directory containing packaged charts ### Synopsis @@ -23,24 +23,24 @@ helm repo index [flags] [DIR] ``` -h, --help help for index - --merge string merge the generated index into the given index - --url string url of chart repository + --merge string Merge the generated index into the given index + --url string URL of the chart repository ``` ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO -* [helm repo](helm_repo.md) - add, list, remove, update, and index chart repositories +* [helm repo](helm_repo.md) - Add, list, remove, update, and index chart repositories -###### Auto generated by spf13/cobra on 1-Aug-2018 +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/helm/helm_repo_list.md b/docs/helm/helm_repo_list.md index bebaa6333..3ff2cbaf0 100644 --- a/docs/helm/helm_repo_list.md +++ b/docs/helm/helm_repo_list.md @@ -1,10 +1,10 @@ ## helm repo list -list chart repositories +List chart repositories ### Synopsis -list chart repositories +List chart repositories ``` helm repo list [flags] @@ -13,23 +13,24 @@ helm repo list [flags] ### Options ``` - -h, --help help for list + -h, --help help for list + -o, --output string Prints the output in the specified format. Allowed values: table, json, yaml (default "table") ``` ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO -* [helm repo](helm_repo.md) - add, list, remove, update, and index chart repositories +* [helm repo](helm_repo.md) - Add, list, remove, update, and index chart repositories -###### Auto generated by spf13/cobra on 1-Aug-2018 +###### Auto generated by spf13/cobra on 24-Sep-2019 diff --git a/docs/helm/helm_repo_remove.md b/docs/helm/helm_repo_remove.md index 89f43a130..87b35b5be 100644 --- a/docs/helm/helm_repo_remove.md +++ b/docs/helm/helm_repo_remove.md @@ -1,10 +1,10 @@ ## helm repo remove -remove a chart repository +Remove a chart repository ### Synopsis -remove a chart repository +Remove a chart repository ``` helm repo remove [flags] [NAME] @@ -19,17 +19,17 @@ helm repo remove [flags] [NAME] ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO -* [helm repo](helm_repo.md) - add, list, remove, update, and index chart repositories +* [helm repo](helm_repo.md) - Add, list, remove, update, and index chart repositories -###### Auto generated by spf13/cobra on 1-Aug-2018 +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/helm/helm_repo_update.md b/docs/helm/helm_repo_update.md index 381c12fb3..d5e8849bc 100644 --- a/docs/helm/helm_repo_update.md +++ b/docs/helm/helm_repo_update.md @@ -1,6 +1,6 @@ ## helm repo update -update information of available charts locally from chart repositories +Update information of available charts locally from chart repositories ### Synopsis @@ -11,32 +11,39 @@ Information is cached locally, where it is used by commands like 'helm search'. 'helm update' is the deprecated form of 'helm repo update'. It will be removed in future releases. +You can specify the name of a repository you want to update. + + $ helm repo update + +To update all the repositories, use 'helm repo update'. + + ``` -helm repo update [flags] +helm repo update [REPO_NAME] [flags] ``` ### Options ``` -h, --help help for update - --strict fail on update warnings + --strict Fail on update warnings ``` ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO -* [helm repo](helm_repo.md) - add, list, remove, update, and index chart repositories +* [helm repo](helm_repo.md) - Add, list, remove, update, and index chart repositories -###### Auto generated by spf13/cobra on 15-Nov-2018 +###### Auto generated by spf13/cobra on 7-Jun-2019 diff --git a/docs/helm/helm_reset.md b/docs/helm/helm_reset.md index 772ac42c3..929a64088 100644 --- a/docs/helm/helm_reset.md +++ b/docs/helm/helm_reset.md @@ -1,6 +1,6 @@ ## helm reset -uninstalls Tiller from a cluster +Uninstalls Tiller from a cluster ### Synopsis @@ -17,31 +17,31 @@ helm reset [flags] ### Options ``` - -f, --force forces Tiller uninstall even if there are releases installed, or if Tiller is not in ready state. Releases are not deleted.) + -f, --force Forces Tiller uninstall even if there are releases installed, or if Tiller is not in ready state. Releases are not deleted.) -h, --help help for reset - --remove-helm-home if set deletes $HELM_HOME - --tls enable TLS for request - --tls-ca-cert string path to TLS CA certificate file (default "$HELM_HOME/ca.pem") - --tls-cert string path to TLS certificate file (default "$HELM_HOME/cert.pem") - --tls-hostname string the server name used to verify the hostname on the returned certificates from the server - --tls-key string path to TLS key file (default "$HELM_HOME/key.pem") - --tls-verify enable TLS for request and verify remote + --remove-helm-home If set, deletes $HELM_HOME + --tls Enable TLS for request + --tls-ca-cert string Path to TLS CA certificate file (default "$HELM_HOME/ca.pem") + --tls-cert string Path to TLS certificate file (default "$HELM_HOME/cert.pem") + --tls-hostname string The server name used to verify the hostname on the returned certificates from the server + --tls-key string Path to TLS key file (default "$HELM_HOME/key.pem") + --tls-verify Enable TLS for request and verify remote ``` ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO * [helm](helm.md) - The Helm package manager for Kubernetes. -###### Auto generated by spf13/cobra on 10-Aug-2018 +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/helm/helm_rollback.md b/docs/helm/helm_rollback.md index 5862b180a..e50960504 100644 --- a/docs/helm/helm_rollback.md +++ b/docs/helm/helm_rollback.md @@ -1,6 +1,6 @@ ## helm rollback -roll back a release to a previous revision +Rollback a release to a previous revision ### Synopsis @@ -9,7 +9,8 @@ This command rolls back a release to a previous revision. The first argument of the rollback command is the name of a release, and the second is a revision (version) number. To see revision numbers, run -'helm history RELEASE'. +'helm history RELEASE'. If you'd like to rollback to the previous release use +'helm rollback [RELEASE] 0'. ``` @@ -19,36 +20,37 @@ helm rollback [flags] [RELEASE] [REVISION] ### Options ``` - --description string specify a description for the release - --dry-run simulate a rollback - --force force resource update through delete/recreate if needed + --cleanup-on-fail Allow deletion of new resources created in this rollback when rollback failed + --description string Specify a description for the release + --dry-run Simulate a rollback + --force Force resource update through delete/recreate if needed -h, --help help for rollback - --no-hooks prevent hooks from running during rollback - --recreate-pods performs pods restart for the resource if applicable - --timeout int time in seconds to wait for any individual Kubernetes operation (like Jobs for hooks) (default 300) - --tls enable TLS for request - --tls-ca-cert string path to TLS CA certificate file (default "$HELM_HOME/ca.pem") - --tls-cert string path to TLS certificate file (default "$HELM_HOME/cert.pem") - --tls-hostname string the server name used to verify the hostname on the returned certificates from the server - --tls-key string path to TLS key file (default "$HELM_HOME/key.pem") - --tls-verify enable TLS for request and verify remote - --wait if set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment are in a ready state before marking the release as successful. It will wait for as long as --timeout + --no-hooks Prevent hooks from running during rollback + --recreate-pods Performs pods restart for the resource if applicable + --timeout int Time in seconds to wait for any individual Kubernetes operation (like Jobs for hooks) (default 300) + --tls Enable TLS for request + --tls-ca-cert string Path to TLS CA certificate file (default "$HELM_HOME/ca.pem") + --tls-cert string Path to TLS certificate file (default "$HELM_HOME/cert.pem") + --tls-hostname string The server name used to verify the hostname on the returned certificates from the server + --tls-key string Path to TLS key file (default "$HELM_HOME/key.pem") + --tls-verify Enable TLS for request and verify remote + --wait If set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment are in a ready state before marking the release as successful. It will wait for as long as --timeout ``` ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO * [helm](helm.md) - The Helm package manager for Kubernetes. -###### Auto generated by spf13/cobra on 10-Aug-2018 +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/helm/helm_search.md b/docs/helm/helm_search.md index c45a397e3..558cadfee 100644 --- a/docs/helm/helm_search.md +++ b/docs/helm/helm_search.md @@ -1,6 +1,6 @@ ## helm search -search for a keyword in charts +Search for a keyword in charts ### Synopsis @@ -18,27 +18,28 @@ helm search [keyword] [flags] ### Options ``` - --col-width uint specifies the max column width of output (default 60) + --col-width uint Specifies the max column width of output (default 60) -h, --help help for search - -r, --regexp use regular expressions for searching - -v, --version string search using semantic versioning constraints - -l, --versions show the long listing, with each version of each chart on its own line + -o, --output string Prints the output in the specified format. Allowed values: table, json, yaml (default "table") + -r, --regexp Use regular expressions for searching + -v, --version string Search using semantic versioning constraints + -l, --versions Show the long listing, with each version of each chart on its own line ``` ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO * [helm](helm.md) - The Helm package manager for Kubernetes. -###### Auto generated by spf13/cobra on 1-Aug-2018 +###### Auto generated by spf13/cobra on 24-Sep-2019 diff --git a/docs/helm/helm_serve.md b/docs/helm/helm_serve.md index 62a68595a..f9b24e7af 100644 --- a/docs/helm/helm_serve.md +++ b/docs/helm/helm_serve.md @@ -1,6 +1,6 @@ ## helm serve -start a local http web server +Start a local http web server ### Synopsis @@ -26,26 +26,26 @@ helm serve [flags] ### Options ``` - --address string address to listen on (default "127.0.0.1:8879") + --address string Address to listen on (default "127.0.0.1:8879") -h, --help help for serve - --repo-path string local directory path from which to serve charts - --url string external URL of chart repository + --repo-path string Local directory path from which to serve charts + --url string External URL of chart repository ``` ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO * [helm](helm.md) - The Helm package manager for Kubernetes. -###### Auto generated by spf13/cobra on 1-Aug-2018 +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/helm/helm_status.md b/docs/helm/helm_status.md index 9dca005fd..91c3a1427 100644 --- a/docs/helm/helm_status.md +++ b/docs/helm/helm_status.md @@ -1,6 +1,6 @@ ## helm status -displays the status of the named release +Displays the status of the named release ### Synopsis @@ -23,30 +23,30 @@ helm status [flags] RELEASE_NAME ``` -h, --help help for status - -o, --output string output the status in the specified format (json or yaml) - --revision int32 if set, display the status of the named release with revision - --tls enable TLS for request - --tls-ca-cert string path to TLS CA certificate file (default "$HELM_HOME/ca.pem") - --tls-cert string path to TLS certificate file (default "$HELM_HOME/cert.pem") - --tls-hostname string the server name used to verify the hostname on the returned certificates from the server - --tls-key string path to TLS key file (default "$HELM_HOME/key.pem") - --tls-verify enable TLS for request and verify remote + -o, --output string Prints the output in the specified format. Allowed values: table, json, yaml (default "table") + --revision int32 If set, display the status of the named release with revision + --tls Enable TLS for request + --tls-ca-cert string Path to TLS CA certificate file (default "$HELM_HOME/ca.pem") + --tls-cert string Path to TLS certificate file (default "$HELM_HOME/cert.pem") + --tls-hostname string The server name used to verify the hostname on the returned certificates from the server + --tls-key string Path to TLS key file (default "$HELM_HOME/key.pem") + --tls-verify Enable TLS for request and verify remote ``` ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO * [helm](helm.md) - The Helm package manager for Kubernetes. -###### Auto generated by spf13/cobra on 10-Aug-2018 +###### Auto generated by spf13/cobra on 6-Sep-2019 diff --git a/docs/helm/helm_template.md b/docs/helm/helm_template.md index 805556096..8f890e27d 100644 --- a/docs/helm/helm_template.md +++ b/docs/helm/helm_template.md @@ -1,6 +1,6 @@ ## helm template -locally render templates +Locally render templates ### Synopsis @@ -24,35 +24,35 @@ helm template [flags] CHART ### Options ``` - -x, --execute stringArray only execute the given templates + -x, --execute stringArray Only execute the given templates -h, --help help for template - --is-upgrade set .Release.IsUpgrade instead of .Release.IsInstall - --kube-version string kubernetes version used as Capabilities.KubeVersion.Major/Minor (default "1.9") - -n, --name string release name (default "release-name") - --name-template string specify template used to name the release - --namespace string namespace to install the release into - --notes show the computed NOTES.txt file as well - --output-dir string writes the executed templates to files in output-dir instead of stdout - --set stringArray set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2) - --set-file stringArray set values from respective files specified via the command line (can specify multiple or separate values with commas: key1=path1,key2=path2) - --set-string stringArray set STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2) - -f, --values valueFiles specify values in a YAML file (can specify multiple) (default []) + --is-upgrade Set .Release.IsUpgrade instead of .Release.IsInstall + --kube-version string Kubernetes version used as Capabilities.KubeVersion.Major/Minor (default "1.14") + -n, --name string Release name (default "release-name") + --name-template string Specify template used to name the release + --namespace string Namespace to install the release into + --notes Show the computed NOTES.txt file as well + --output-dir string Writes the executed templates to files in output-dir instead of stdout + --set stringArray Set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2) + --set-file stringArray Set values from respective files specified via the command line (can specify multiple or separate values with commas: key1=path1,key2=path2) + --set-string stringArray Set STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2) + -f, --values valueFiles Specify values in a YAML file (can specify multiple) (default []) ``` ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO * [helm](helm.md) - The Helm package manager for Kubernetes. -###### Auto generated by spf13/cobra on 1-Aug-2018 +###### Auto generated by spf13/cobra on 17-Jun-2019 diff --git a/docs/helm/helm_test.md b/docs/helm/helm_test.md index e8ddfbc9b..3e1b53120 100644 --- a/docs/helm/helm_test.md +++ b/docs/helm/helm_test.md @@ -1,6 +1,6 @@ ## helm test -test a release +Test a release ### Synopsis @@ -18,32 +18,32 @@ helm test [RELEASE] [flags] ### Options ``` - --cleanup delete test pods upon completion + --cleanup Delete test pods upon completion -h, --help help for test - --parallel run test pods in parallel - --timeout int time in seconds to wait for any individual Kubernetes operation (like Jobs for hooks) (default 300) - --tls enable TLS for request - --tls-ca-cert string path to TLS CA certificate file (default "$HELM_HOME/ca.pem") - --tls-cert string path to TLS certificate file (default "$HELM_HOME/cert.pem") - --tls-hostname string the server name used to verify the hostname on the returned certificates from the server - --tls-key string path to TLS key file (default "$HELM_HOME/key.pem") - --tls-verify enable TLS for request and verify remote + --parallel Run test pods in parallel + --timeout int Time in seconds to wait for any individual Kubernetes operation (like Jobs for hooks) (default 300) + --tls Enable TLS for request + --tls-ca-cert string Path to TLS CA certificate file (default "$HELM_HOME/ca.pem") + --tls-cert string Path to TLS certificate file (default "$HELM_HOME/cert.pem") + --tls-hostname string The server name used to verify the hostname on the returned certificates from the server + --tls-key string Path to TLS key file (default "$HELM_HOME/key.pem") + --tls-verify Enable TLS for request and verify remote ``` ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO * [helm](helm.md) - The Helm package manager for Kubernetes. -###### Auto generated by spf13/cobra on 9-Nov-2018 +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/helm/helm_upgrade.md b/docs/helm/helm_upgrade.md index f18bcf6a7..35888d568 100644 --- a/docs/helm/helm_upgrade.md +++ b/docs/helm/helm_upgrade.md @@ -1,6 +1,6 @@ ## helm upgrade -upgrade a release +Upgrade a release ### Synopsis @@ -19,7 +19,7 @@ To customize the chart values, use any of - '--set-string' to provide key=val forcing val to be stored as a string, - '--set-file' to provide key=path to read a single large value from a file at path. -To edit or append to the existing customized values, add the +To edit or append to the existing customized values, add the '--reuse-values' flag, otherwise any existing customized values are ignored. If no chart value arguments are provided on the command line, any existing customized values are carried @@ -65,54 +65,58 @@ helm upgrade [RELEASE] [CHART] [flags] ### Options ``` - --ca-file string verify certificates of HTTPS-enabled servers using this CA bundle - --cert-file string identify HTTPS client using this SSL certificate file - --description string specify the description to use for the upgrade, rather than the default - --devel use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored. - --dry-run simulate an upgrade - --force force resource update through delete/recreate if needed + --atomic If set, upgrade process rolls back changes made in case of failed upgrade, also sets --wait flag + --ca-file string Verify certificates of HTTPS-enabled servers using this CA bundle + --cert-file string Identify HTTPS client using this SSL certificate file + --cleanup-on-fail Allow deletion of new resources created in this upgrade when upgrade failed + --description string Specify the description to use for the upgrade, rather than the default + --devel Use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored. + --dry-run Simulate an upgrade + --force Force resource update through delete/recreate if needed -h, --help help for upgrade - -i, --install if a release by this name doesn't already exist, run an install - --key-file string identify HTTPS client using this SSL key file - --keyring string path to the keyring that contains public signing keys (default "~/.gnupg/pubring.gpg") - --namespace string namespace to install the release into (only used if --install is set). Defaults to the current kube config namespace - --no-hooks disable pre/post upgrade hooks - --password string chart repository password where to locate the requested chart - --recreate-pods performs pods restart for the resource if applicable - --repo string chart repository url where to locate the requested chart - --reset-values when upgrading, reset the values to the ones built into the chart - --reuse-values when upgrading, reuse the last release's values and merge in any overrides from the command line via --set and -f. If '--reset-values' is specified, this is ignored. - --set stringArray set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2) - --set-file stringArray set values from respective files specified via the command line (can specify multiple or separate values with commas: key1=path1,key2=path2) - --set-string stringArray set STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2) - --timeout int time in seconds to wait for any individual Kubernetes operation (like Jobs for hooks) (default 300) - --tls enable TLS for request - --tls-ca-cert string path to TLS CA certificate file (default "$HELM_HOME/ca.pem") - --tls-cert string path to TLS certificate file (default "$HELM_HOME/cert.pem") - --tls-hostname string the server name used to verify the hostname on the returned certificates from the server - --tls-key string path to TLS key file (default "$HELM_HOME/key.pem") - --tls-verify enable TLS for request and verify remote - --username string chart repository username where to locate the requested chart - -f, --values valueFiles specify values in a YAML file or a URL(can specify multiple) (default []) - --verify verify the provenance of the chart before upgrading - --version string specify the exact chart version to use. If this is not specified, the latest version is used - --wait if set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment are in a ready state before marking the release as successful. It will wait for as long as --timeout + -i, --install If a release by this name doesn't already exist, run an install + --key-file string Identify HTTPS client using this SSL key file + --keyring string Path to the keyring that contains public signing keys (default "~/.gnupg/pubring.gpg") + --namespace string Namespace to install the release into (only used if --install is set). Defaults to the current kube config namespace + --no-hooks Disable pre/post upgrade hooks + -o, --output string Prints the output in the specified format. Allowed values: table, json, yaml (default "table") + --password string Chart repository password where to locate the requested chart + --recreate-pods Performs pods restart for the resource if applicable + --render-subchart-notes Render subchart notes along with parent + --repo string Chart repository url where to locate the requested chart + --reset-values When upgrading, reset the values to the ones built into the chart + --reuse-values When upgrading, reuse the last release's values and merge in any overrides from the command line via --set and -f. If '--reset-values' is specified, this is ignored. + --set stringArray Set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2) + --set-file stringArray Set values from respective files specified via the command line (can specify multiple or separate values with commas: key1=path1,key2=path2) + --set-string stringArray Set STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2) + --timeout int Time in seconds to wait for any individual Kubernetes operation (like Jobs for hooks) (default 300) + --tls Enable TLS for request + --tls-ca-cert string Path to TLS CA certificate file (default "$HELM_HOME/ca.pem") + --tls-cert string Path to TLS certificate file (default "$HELM_HOME/cert.pem") + --tls-hostname string The server name used to verify the hostname on the returned certificates from the server + --tls-key string Path to TLS key file (default "$HELM_HOME/key.pem") + --tls-verify Enable TLS for request and verify remote + --username string Chart repository username where to locate the requested chart + -f, --values valueFiles Specify values in a YAML file or a URL(can specify multiple) (default []) + --verify Verify the provenance of the chart before upgrading + --version string Specify the exact chart version to use. If this is not specified, the latest version is used + --wait If set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment are in a ready state before marking the release as successful. It will wait for as long as --timeout ``` ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO * [helm](helm.md) - The Helm package manager for Kubernetes. -###### Auto generated by spf13/cobra on 24-Aug-2018 +###### Auto generated by spf13/cobra on 24-Sep-2019 diff --git a/docs/helm/helm_verify.md b/docs/helm/helm_verify.md index 30ed43679..98e8bda4f 100644 --- a/docs/helm/helm_verify.md +++ b/docs/helm/helm_verify.md @@ -1,13 +1,13 @@ ## helm verify -verify that a chart at the given path has been signed and is valid +Verify that a chart at the given path has been signed and is valid ### Synopsis Verify that the given chart has a valid provenance file. -Provenance files provide crytographic verification that a chart has not been +Provenance files provide cryptographic verification that a chart has not been tampered with, and was packaged by a trusted provider. This command can be used to verify a local chart. Several other commands provide @@ -23,23 +23,23 @@ helm verify [flags] PATH ``` -h, --help help for verify - --keyring string keyring containing public keys (default "~/.gnupg/pubring.gpg") + --keyring string Keyring containing public keys (default "~/.gnupg/pubring.gpg") ``` ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO * [helm](helm.md) - The Helm package manager for Kubernetes. -###### Auto generated by spf13/cobra on 1-Aug-2018 +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/helm/helm_version.md b/docs/helm/helm_version.md index 33d33cf12..8be50ac96 100644 --- a/docs/helm/helm_version.md +++ b/docs/helm/helm_version.md @@ -1,6 +1,6 @@ ## helm version -print the client/server version information +Print the client/server version information ### Synopsis @@ -29,33 +29,33 @@ helm version [flags] ### Options ``` - -c, --client client version only + -c, --client Client version only -h, --help help for version - -s, --server server version only - --short print the version number - --template string template for version string format - --tls enable TLS for request - --tls-ca-cert string path to TLS CA certificate file (default "$HELM_HOME/ca.pem") - --tls-cert string path to TLS certificate file (default "$HELM_HOME/cert.pem") - --tls-hostname string the server name used to verify the hostname on the returned certificates from the server - --tls-key string path to TLS key file (default "$HELM_HOME/key.pem") - --tls-verify enable TLS for request and verify remote + -s, --server Server version only + --short Print the version number + --template string Template for version string format + --tls Enable TLS for request + --tls-ca-cert string Path to TLS CA certificate file (default "$HELM_HOME/ca.pem") + --tls-cert string Path to TLS certificate file (default "$HELM_HOME/cert.pem") + --tls-hostname string The server name used to verify the hostname on the returned certificates from the server + --tls-key string Path to TLS key file (default "$HELM_HOME/key.pem") + --tls-verify Enable TLS for request and verify remote ``` ### Options inherited from parent commands ``` - --debug enable verbose output - --home string location of your Helm config. Overrides $HELM_HOME (default "~/.helm") - --host string address of Tiller. Overrides $HELM_HOST - --kube-context string name of the kubeconfig context to use - --kubeconfig string absolute path to the kubeconfig file to use - --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) - --tiller-namespace string namespace of Tiller (default "kube-system") + --debug Enable verbose output + --home string Location of your Helm config. Overrides $HELM_HOME (default "~/.helm") + --host string Address of Tiller. Overrides $HELM_HOST + --kube-context string Name of the kubeconfig context to use + --kubeconfig string Absolute path of the kubeconfig file to be used + --tiller-connection-timeout int The duration (in seconds) Helm will wait to establish a connection to Tiller (default 300) + --tiller-namespace string Namespace of Tiller (default "kube-system") ``` ### SEE ALSO * [helm](helm.md) - The Helm package manager for Kubernetes. -###### Auto generated by spf13/cobra on 10-Aug-2018 +###### Auto generated by spf13/cobra on 16-May-2019 diff --git a/docs/history.md b/docs/history.md index 71e63c6b2..1a3d9dbe4 100644 --- a/docs/history.md +++ b/docs/history.md @@ -12,7 +12,7 @@ Differences from Helm Classic: - Helm's chart format has changed for the better: - Dependencies are immutable and stored inside of a chart's `charts/` directory. - - Charts are strongly versioned using [SemVer 2](http://semver.org/spec/v2.0.0.html) + - Charts are strongly versioned using [SemVer 2](https://semver.org/spec/v2.0.0.html) - Charts can be loaded from directories or from chart archive files - Helm supports Go templates without requiring you to run `generate` or `template` commands. diff --git a/docs/images/make-bucket-public.png b/docs/images/make-bucket-public.png index f8a5e17f0..e457e34b2 100644 Binary files a/docs/images/make-bucket-public.png and b/docs/images/make-bucket-public.png differ diff --git a/docs/install.md b/docs/install.md index b47aea6f1..06a319486 100755 --- a/docs/install.md +++ b/docs/install.md @@ -30,7 +30,7 @@ The Snap package for Helm is maintained by [Snapcrafters](https://github.com/snapcrafters/helm). ``` -$ sudo snap install helm --classic +sudo snap install helm --classic ``` ### From Homebrew (macOS) @@ -45,7 +45,7 @@ brew install kubernetes-helm (Note: There is also a formula for emacs-helm, which is a different project.) -### From Chocolatey (Windows) +### From Chocolatey or scoop (Windows) Members of the Kubernetes community have contributed a [Helm package](https://chocolatey.org/packages/kubernetes-helm) build to [Chocolatey](https://chocolatey.org/). This package is generally up to date. @@ -54,21 +54,27 @@ Members of the Kubernetes community have contributed a [Helm package](https://ch choco install kubernetes-helm ``` -## From Script +The binary can also be installed via [`scoop`](https://scoop.sh) command-line installer. + +``` +scoop install helm +``` + +### From Script Helm now has an installer script that will automatically grab the latest version -of the Helm client and [install it locally](https://raw.githubusercontent.com/helm/helm/master/scripts/get). +of the Helm client and [install it locally](https://git.io/get_helm.sh). You can fetch that script, and then execute it locally. It's well documented so that you can read through it and understand what it is doing before you run it. ``` -$ curl https://raw.githubusercontent.com/helm/helm/master/scripts/get > get_helm.sh +$ curl -LO https://git.io/get_helm.sh $ chmod 700 get_helm.sh $ ./get_helm.sh ``` -Yes, you can `curl https://raw.githubusercontent.com/helm/helm/master/scripts/get | bash` that if you want to live on the edge. +Yes, you can `curl -L https://git.io/get_helm.sh | bash` that if you want to live on the edge. ### From Canary Builds @@ -77,12 +83,12 @@ the latest master branch. They are not official releases, and may not be stable. However, they offer the opportunity to test the cutting edge features. -Canary Helm binaries are stored in the [Kubernetes Helm GCS bucket](https://kubernetes-helm.storage.googleapis.com). +Canary Helm binaries are stored at [get.helm.sh](https://get.helm.sh). Here are links to the common builds: -- [Linux AMD64](https://kubernetes-helm.storage.googleapis.com/helm-canary-linux-amd64.tar.gz) -- [macOS AMD64](https://kubernetes-helm.storage.googleapis.com/helm-canary-darwin-amd64.tar.gz) -- [Experimental Windows AMD64](https://kubernetes-helm.storage.googleapis.com/helm-canary-windows-amd64.zip) +- [Linux AMD64](https://get.helm.sh/helm-canary-linux-amd64.tar.gz) +- [macOS AMD64](https://get.helm.sh/helm-canary-darwin-amd64.tar.gz) +- [Experimental Windows AMD64](https://get.helm.sh/helm-canary-windows-amd64.zip) ### From Source (Linux, macOS) @@ -117,7 +123,7 @@ configured to talk to a remote Kubernetes cluster. Most cloud providers enable a feature called Role-Based Access Control - RBAC for short. If your cloud provider enables this feature, you will need to create a service account for Tiller with the right roles and permissions to access resources. -Check the [Kubernetes Distribution Guide](kubernetes_distros.md) to see if there's any further points of interest on using Helm with your cloud provider. Also check out the guide on [Tiller and Role-Based Access Control](rbac.md) for more information on how to run Tiller in an RBAC-enabled Kubernetes cluster. +Check the [Kubernetes Distribution Guide](#kubernetes-distribution-guide) to see if there's any further points of interest on using Helm with your cloud provider. Also check out the guide on [Tiller and Role-Based Access Control](rbac.md) for more information on how to run Tiller in an RBAC-enabled Kubernetes cluster. ### Easy In-Cluster Installation @@ -274,7 +280,7 @@ helm init --override metadata.annotations."deployment\.kubernetes\.io/revision"= Output: ``` -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 kind: Deployment metadata: annotations: @@ -331,7 +337,7 @@ The Tiller installation is skipped and the manifest is output to stdout in JSON format. ``` -"apiVersion": "extensions/v1beta1", +"apiVersion": "apps/v1", "kind": "Deployment", "metadata": { "creationTimestamp": null, @@ -347,10 +353,13 @@ in JSON format. ### Storage backends By default, `tiller` stores release information in `ConfigMaps` in the namespace -where it is running. As of Helm 2.7.0, there is now a beta storage backend that +where it is running. + +#### Secret storage backend +As of Helm 2.7.0, there is now a beta storage backend that uses `Secrets` for storing release information. This was added for additional -security in protecting charts in conjunction with the release of `Secret` -encryption in Kubernetes. +security in protecting charts in conjunction with the release of `Secret` +encryption in Kubernetes. To enable the secrets backend, you'll need to init Tiller with the following options: @@ -363,6 +372,31 @@ Currently, if you want to switch from the default backend to the secrets backend, you'll have to do the migration for this on your own. When this backend graduates from beta, there will be a more official path of migration +#### SQL storage backend +As of Helm 2.14.0 there is now a beta SQL storage backend that stores release +information in an SQL database (only postgres has been tested so far). + +Using such a storage backend is particularly useful if your release information +weighs more than 1MB (in which case, it can't be stored in ConfigMaps/Secrets +because of internal limits in Kubernetes' underlying etcd key-value store). + +To enable the SQL backend, you'll need to deploy a SQL database and init Tiller +with the following options: + +```shell +helm init \ + --override \ + 'spec.template.spec.containers[0].args'='{--storage=sql,--sql-dialect=postgres,--sql-connection-string=postgresql://tiller-postgres:5432/helm?user=helm&password=changeme}' +``` + +**PRODUCTION NOTES**: it's recommended to change the username and password of +the SQL database in production deployments. Enabling SSL is also a good idea. +Last, but not least, perform regular backups/snapshots of your SQL database. + +Currently, if you want to switch from the default backend to the SQL backend, +you'll have to do the migration for this on your own. When this backend +graduates from beta, there will be a more official migration path. + ## Conclusion In most cases, installation is as simple as getting a pre-built `helm` binary diff --git a/docs/install_faq.md b/docs/install_faq.md index d4840417f..c1ac5e6af 100644 --- a/docs/install_faq.md +++ b/docs/install_faq.md @@ -13,8 +13,7 @@ I want to know more about my downloading options. **Q: I can't get to GitHub releases of the newest Helm. Where are they?** -A: We no longer use GitHub releases. Binaries are now stored in a -[GCS public bucket](https://kubernetes-helm.storage.googleapis.com). +Binaries are stored at [get.helm.sh](https://get.helm.sh). **Q: Why aren't there Debian/Fedora/... native packages of Helm?** diff --git a/docs/kubernetes_distros.md b/docs/kubernetes_distros.md index bb14043da..728525f38 100644 --- a/docs/kubernetes_distros.md +++ b/docs/kubernetes_distros.md @@ -6,6 +6,10 @@ environments. We are trying to add more details to this document. Please contribute via Pull Requests if you can. +## MicroK8s + +Helm can be enabled in [MicroK8s](https://microk8s.io) using the command: `microk8s.enable helm` + ## MiniKube Helm is tested and known to work with [minikube](https://github.com/kubernetes/minikube). @@ -22,11 +26,20 @@ Google's GKE hosted Kubernetes platform enables RBAC by default. You will need t See [Tiller and role-based access control](https://docs.helm.sh/using_helm/#role-based-access-control) for more information. +## AKS + +Helm works with [Azure Kubernetes Service](https://docs.microsoft.com/en-us/azure/aks/kubernetes-helm). If using an RBAC-enabled AKS cluster, you need [a service account and role binding for the Tiller service](https://docs.microsoft.com/en-us/azure/aks/kubernetes-helm#create-a-service-account). + +## IKS + +Helm works with [IBM Cloud Kubernetes Service](https://cloud.ibm.com/docs/containers?topic=containers-getting-started). IKS cluster enables RBAC by default and this means you will need [a service account and role binding for the Tiller service](https://cloud.ibm.com/docs/containers?topic=containers-helm#public_helm_install). + ## Ubuntu with 'kubeadm' Kubernetes bootstrapped with `kubeadm` is known to work on the following Linux distributions: +- Arch Linux - Ubuntu 16.04 - Fedora release 25 @@ -49,3 +62,11 @@ Helm Client and Helm Server (Tiller) are pre-installed with [Platform9 Managed K Helm (both client and server) has been tested and is working on Mesospheres DC/OS 1.11 Kubernetes platform, and requires no additional configuration. + +## Kubermatic + +Helm works in user clusters that are created by Kubermatic without caveats. Since seed cluster can be setup up in different ways Helm support depends on them. + +## KubeOne + +Helm works in clusters that are set up by KubeOne without caveats. diff --git a/docs/man/man1/helm_dependency_build.1 b/docs/man/man1/helm_dependency_build.1 index adc225a81..ef92e8959 100644 --- a/docs/man/man1/helm_dependency_build.1 +++ b/docs/man/man1/helm_dependency_build.1 @@ -19,13 +19,12 @@ Build out the charts/ directory from the requirements.lock file. .PP Build is used to reconstruct a chart's dependencies to the state specified in -the lock file. This will not re\-negotiate dependencies, as 'helm dependency update' -does. +the lock file. .PP -If no lock file is found, 'helm dependency build' will mirror the behavior -of 'helm dependency update'. - +If no lock file is found, 'helm dependency build' will mirror the behavior of +the 'helm dependency update' command. This means it will update the on-disk +dependencies to mirror the requirements.yaml file and generate a lock file. .SH OPTIONS .PP diff --git a/docs/man/man1/helm_install.1 b/docs/man/man1/helm_install.1 index fe1856bed..df4011a62 100644 --- a/docs/man/man1/helm_install.1 +++ b/docs/man/man1/helm_install.1 @@ -80,7 +80,7 @@ the '\-\-debug' and '\-\-dry\-run' flags can be combined. This will still requir round\-trip to the Tiller server. .PP -If \-\-verify is set, the chart MUST have a provenance file, and the provenenace +If \-\-verify is set, the chart MUST have a provenance file, and the provenance fall MUST pass all verification steps. .PP diff --git a/docs/man/man1/helm_verify.1 b/docs/man/man1/helm_verify.1 index 5297924ae..341449ad8 100644 --- a/docs/man/man1/helm_verify.1 +++ b/docs/man/man1/helm_verify.1 @@ -18,7 +18,7 @@ helm\-verify \- verify that a chart at the given path has been signed and is val Verify that the given chart has a valid provenance file. .PP -Provenance files provide crytographic verification that a chart has not been +Provenance files provide cryptographic verification that a chart has not been tampered with, and was packaged by a trusted provider. .PP diff --git a/docs/plugins.md b/docs/plugins.md index 3087d1b39..5f5d163ce 100644 --- a/docs/plugins.md +++ b/docs/plugins.md @@ -144,6 +144,11 @@ The defined command will be invoked with the following scheme: repo definition, stored in `$HELM_HOME/repository/repositories.yaml`. Downloader plugin is expected to dump the raw content to stdout and report errors on stderr. +The downloader command also supports sub-commands or arguments, allowing you to specify +for example `bin/mydownloader subcommand -d` in the `plugin.yaml`. This is useful +if you want to use the same executable for the main plugin command and the downloader +command, but with a different sub-command for each. + ## Environment Variables When Helm executes a plugin, it passes the outer environment to the plugin, and diff --git a/docs/provenance.md b/docs/provenance.md index 163e72842..3f259a391 100644 --- a/docs/provenance.md +++ b/docs/provenance.md @@ -180,7 +180,7 @@ The following pieces of provenance data are added: * The signature (SHA256, just like Docker) of the chart package (the .tgz file) is included, and may be used to verify the integrity of the chart package. * The entire body is signed using the algorithm used by PGP (see - [http://keybase.io] for an emerging way of making crypto signing and + [https://keybase.io] for an emerging way of making crypto signing and verification easy). The combination of this gives users the following assurances: @@ -202,7 +202,7 @@ keywords: - proxy source: - https://github.com/foo/bar -home: http://nginx.com +home: https://nginx.com ... files: @@ -221,7 +221,7 @@ first is the Chart.yaml. The second is the checksums, a map of filenames to SHA-256 digests (value shown is fake/truncated) The signature block is a standard PGP signature, which provides [tamper -resistance](http://www.rossde.com/PGP/pgp_signatures.html). +resistance](https://www.rossde.com/PGP/pgp_signatures.html). ## Chart Repositories diff --git a/docs/quickstart.md b/docs/quickstart.md index ef3cb460e..6e760ced8 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -54,9 +54,11 @@ Once you have Helm ready, you can initialize the local CLI and also install Tiller into your Kubernetes cluster in one step: ```console -$ helm init +$ helm init --history-max 200 ``` +**TIP:** Setting `--history-max` on helm init is recommended as configmaps and other objects in helm history can grow large in number if not purged by max limit. Without a max history set the history is kept indefinitely, leaving a large number of records for helm and tiller to maintain. + This will install Tiller into the Kubernetes cluster you saw with `kubectl config current-context`. diff --git a/docs/rbac.md b/docs/rbac.md index 4b39ecdc6..a89579c61 100644 --- a/docs/rbac.md +++ b/docs/rbac.md @@ -43,7 +43,7 @@ _Note: The cluster-admin role is created by default in a Kubernetes cluster, so $ kubectl create -f rbac-config.yaml serviceaccount "tiller" created clusterrolebinding "tiller" created -$ helm init --service-account tiller +$ helm init --service-account tiller --history-max 200 ``` ### Example: Deploy Tiller in a namespace, restricted to deploying resources only in that namespace @@ -106,9 +106,8 @@ $ helm init --service-account tiller --tiller-namespace tiller-world $HELM_HOME has been configured at /Users/awesome-user/.helm. Tiller (the Helm server side component) has been installed into your Kubernetes Cluster. -Happy Helming! -$ helm install nginx --tiller-namespace tiller-world --namespace tiller-world +$ helm install stable/lamp --tiller-namespace tiller-world --namespace tiller-world NAME: wayfaring-yak LAST DEPLOYED: Mon Aug 7 16:00:16 2017 NAMESPACE: tiller-world diff --git a/docs/related.md b/docs/related.md index c6735e0ea..63919790b 100644 --- a/docs/related.md +++ b/docs/related.md @@ -8,17 +8,18 @@ or [pull request](https://github.com/helm/helm/pulls). ## Article, Blogs, How-Tos, and Extra Documentation - [Awesome Helm](https://github.com/cdwv/awesome-helm) - List of awesome Helm resources -- [CI/CD with Kubernetes, Helm & Wercker ](http://www.slideshare.net/Diacode/cicd-with-kubernetes-helm-wercker-madscalability) +- [CI/CD with Kubernetes, Helm & Wercker ](https://www.slideshare.net/Diacode/cicd-with-kubernetes-helm-wercker-madscalability) - [Creating a Helm Plugin in 3 Steps](http://technosophos.com/2017/03/21/creating-a-helm-plugin.html) -- [Deploying Kubernetes Applications with Helm](http://cloudacademy.com/blog/deploying-kubernetes-applications-with-helm/) +- [Deploying Kubernetes Applications with Helm](https://cloudacademy.com/blog/deploying-kubernetes-applications-with-helm/) - [GitLab, Consumer Driven Contracts, Helm and Kubernetes](https://medium.com/@enxebre/gitlab-consumer-driven-contracts-helm-and-kubernetes-b7235a60a1cb#.xwp1y4tgi) - [Honestbee's Helm Chart Conventions](https://gist.github.com/so0k/f927a4b60003cedd101a0911757c605a) - [Releasing backward-incompatible changes: Kubernetes, Jenkins, Prometheus Operator, Helm and Traefik](https://medium.com/@enxebre/releasing-backward-incompatible-changes-kubernetes-jenkins-plugin-prometheus-operator-helm-self-6263ca61a1b1#.e0c7elxhq) -- [The Missing CI/CD Kubernetes Component: Helm package manager](https://hackernoon.com/the-missing-ci-cd-kubernetes-component-helm-package-manager-1fe002aac680#.691sk2zhu) +- [The Missing CI/CD Kubernetes Component: Helm package manager](https://medium.com/@gajus/the-missing-ci-cd-kubernetes-component-helm-package-manager-1fe002aac680) - [Using Helm to Deploy to Kubernetes](https://daemonza.github.io/2017/02/20/using-helm-to-deploy-to-kubernetes/) - [Writing a Helm Chart](https://www.influxdata.com/packaged-kubernetes-deployments-writing-helm-chart/) - [A basic walk through Kubernetes Helm](https://github.com/muffin87/helm-tutorial) - [Tillerless Helm v2](https://rimusz.net/tillerless-helm/) +- [Generating Certificate Authorities and Certificates using Terraform](https://github.com/jbussdieker/tiller-ssl-terraform) ## Video, Audio, and Podcast @@ -37,7 +38,7 @@ or [pull request](https://github.com/helm/helm/pulls). - [helm-cos](https://github.com/imroc/helm-cos) - Plugin to manage repositories on Tencent Cloud Object Storage - [helm-edit](https://github.com/mstrzele/helm-edit) - Plugin for editing release's values - [helm-env](https://github.com/adamreese/helm-env) - Plugin to show current environment -- [helm-gcs](https://github.com/nouney/helm-gcs) - Plugin to manage repositories on Google Cloud Storage +- [helm-gcs](https://github.com/hayorov/helm-gcs) - Plugin to manage repositories on Google Cloud Storage - [helm-github](https://github.com/sagansystems/helm-github) - Plugin to install Helm Charts from Github repositories - [helm-hashtag](https://github.com/balboah/helm-hashtag) - Plugin for tracking docker tag hash digests as values - [helm-inject](https://github.com/maorfr/helm-inject) - Plugin for injecting additional configurations during release upgrade @@ -50,9 +51,11 @@ or [pull request](https://github.com/helm/helm/pulls). - [helm-plugin-utils](https://github.com/maorfr/helm-plugin-utils) - Utility functions to be used within Helm plugins - [helm-restore](https://github.com/maorfr/helm-restore) - Plugin to restore a deployed release to its original state - [helm-secrets](https://github.com/futuresimple/helm-secrets) - Plugin to manage and store secrets safely +- [helm-ssm](https://github.com/codacy/helm-ssm) - Plugin to inject values coming from AWS SSM parameters on the `values.yaml` file - [helm-stop](https://github.com/IBM/helm-stop) - Plugin for stopping a release pods - [helm-template](https://github.com/technosophos/helm-template) - Debug/render templates client-side - [helm-tiller](https://github.com/adamreese/helm-tiller) - Additional commands to work with Tiller +- [helm-tiller-info](https://github.com/maorfr/helm-tiller-info) - Plugin which prints information about Tiller - [helm-unittest](https://github.com/lrills/helm-unittest) - Plugin for unit testing chart locally with YAML - [Tillerless Helm v2](https://github.com/rimusz/helm-tiller) - Helm plugin for using Tiller locally and in CI/CD pipelines @@ -65,9 +68,9 @@ Tools layered on top of Helm or Tiller. - [AppsCode Swift](https://github.com/appscode/swift) - Ajax friendly Helm Tiller Proxy using [grpc-gateway](https://github.com/grpc-ecosystem/grpc-gateway) - [Armada](https://github.com/att-comdev/armada) - Manage prefixed releases throughout various Kubernetes namespaces, and removes completed jobs for complex deployments. Used by the [Openstack-Helm](https://github.com/openstack/openstack-helm) team. -- [Autohelm](https://github.com/reactiveops/autohelm) - Autohelm is _another_ simple declarative spec for deploying helm charts. Written in python and supports git urls as a source for helm charts. - [ChartMuseum](https://github.com/chartmuseum/chartmuseum) - Helm Chart Repository with support for Amazon S3 and Google Cloud Storage - [Chartify](https://github.com/appscode/chartify) - Generate Helm charts from existing Kubernetes resources. +- [Cloudsmith](https://cloudsmith.io/l/helm-repository/) - Fully managed SaaS offering private Helm Chart Repositories - [Codefresh](https://codefresh.io) - Kubernetes native CI/CD and management platform with UI dashboards for managing Helm charts and releases - [Cog](https://github.com/ohaiwalt/cog-helm) - Helm chart to deploy Cog on Kubernetes - [Drone.io Helm Plugin](http://plugins.drone.io/ipedrazas/drone-helm/) - Run Helm inside of the Drone CI/CD system @@ -77,8 +80,9 @@ Tools layered on top of Helm or Tiller. - [Helmsman](https://github.com/Praqma/helmsman) - Helmsman is a helm-charts-as-code tool which enables installing/upgrading/protecting/moving/deleting releases from version controlled desired state files (described in a simple TOML format). - [Landscaper](https://github.com/Eneco/landscaper/) - "Landscaper takes a set of Helm Chart references with values (a desired state), and realizes this in a Kubernetes cluster." - [Monocular](https://github.com/helm/monocular) - Web UI for Helm Chart repositories -- [Orca](https://github.com/maorfr/orca) - Advanced CI\CD tool for Kubernetes and Helm made simple. +- [Orca](https://github.com/nuvo/orca) - Advanced CI\CD tool for Kubernetes and Helm made simple. - [Quay App Registry](https://coreos.com/blog/quay-application-registry-for-kubernetes.html) - Open Kubernetes application registry, including a Helm access client +- [Reckoner](https://github.com/reactiveops/reckoner) - Reckoner (formerly Autohelm) is a tool for declarative management of helm releases. Written in python and supports git urls as a source for helm charts. - [Rudder](https://github.com/AcalephStorage/rudder) - RESTful (JSON) proxy for Tiller's API - [Schelm](https://github.com/databus23/schelm) - Render a Helm manifest to a directory - [Shipper](https://github.com/bookingcom/shipper) - Multi-cluster canary or blue-green rollouts using Helm @@ -88,9 +92,9 @@ Tools layered on top of Helm or Tiller. Platforms, distributions, and services that include Helm support. -- [Cabin](http://www.skippbox.com/cabin/) - Mobile App for Managing Kubernetes +- [Codefresh](https://codefresh.io/) - A CI/CD solution designed specifically for Docker/Kubernetes/Helm. Includes a private Helm repository and graphical dashboards for Helm charts, Helm releases and Helm environments. - [Fabric8](https://fabric8.io) - Integrated development platform for Kubernetes -- [Jenkins X](http://jenkins-x.io/) - open source automated CI/CD for Kubernetes which uses Helm for [promoting](http://jenkins-x.io/about/features/#promotion) applications through [environments via GitOps](http://jenkins-x.io/about/features/#environments) +- [Jenkins X](https://jenkins-x.io/) - open source automated CI/CD for Kubernetes which uses Helm for [promoting](https://jenkins-x.io/about/features/#promotion) applications through [environments via GitOps](https://jenkins-x.io/about/features/#environments) - [Kubernetic](https://kubernetic.com/) - Kubernetes Desktop Client - [Qstack](https://qstack.com) diff --git a/docs/release_checklist.md b/docs/release_checklist.md index c69db9d21..0d877fc66 100644 --- a/docs/release_checklist.md +++ b/docs/release_checklist.md @@ -3,8 +3,8 @@ **IMPORTANT**: If your experience deviates from this document, please document the changes to keep it up-to-date. ## Release Meetings -As part of the release process, two of the weekly developer calls will be co-opted -as "release meetings." +As part of the release process, two of the weekly developer calls will be +co-opted as "release meetings." ### Start of the Release Cycle The first developer call after a release will be used as the release meeting to @@ -17,17 +17,19 @@ identified: - Any other important details for the community All of this information should be added to the GitHub milestone for the given -release. This should give the community and maintainers a clear set of guidelines -to follow when choosing whether or not to add issues and PRs to a given release. +release. This should give the community and maintainers a clear set of +guidelines to follow when choosing whether or not to add issues and PRs to a +given release. ### End (almost) of the Release Cycle The developer call closest to two weeks before the scheduled release date will be used to review any remaining PRs that should be pulled into the release. This -is the place to debate whether or not we should wait before cutting a release and -any other concerns. At the end of this meeting, if the release date has not been -pushed out, the first RC should be cut. Subsequent developer calls in between this -meeting and the release date should have some time set aside to see if any bugs -were found. Once the release date is reached, the final release can be cut +is the place to debate whether or not we should wait before cutting a release +and any other concerns. At the end of this meeting, if the release date has not +been pushed out, the first RC should be cut. Subsequent developer calls in +between this meeting and the release date should have some time set aside to see +if any bugs were found. Once the release date is reached, the final release can +be cut ## A Maintainer's Guide to Releasing Helm @@ -37,17 +39,28 @@ So you're in charge of a new release for Helm? Cool. Here's what to do... Just kidding! :trollface: -All releases will be of the form vX.Y.Z where X is the major version number, Y is the minor version number and Z is the patch release number. This project strictly follows [semantic versioning](http://semver.org/) so following this step is critical. +All releases will be of the form vX.Y.Z where X is the major version number, Y +is the minor version number and Z is the patch release number. This project +strictly follows [semantic versioning](https://semver.org/) so following this +step is critical. -It is important to note that this document assumes that the git remote in your repository that corresponds to "https://github.com/helm/helm" is named "upstream". If yours is not (for example, if you've chosen to name it "origin" or something similar instead), be sure to adjust the listed snippets for your local environment accordingly. If you are not sure what your upstream remote is named, use a command like `git remote -v` to find out. +It is important to note that this document assumes that the git remote in your +repository that corresponds to "https://github.com/helm/helm" is named +"upstream". If yours is not (for example, if you've chosen to name it "origin" +or something similar instead), be sure to adjust the listed snippets for your +local environment accordingly. If you are not sure what your upstream remote is +named, use a command like `git remote -v` to find out. -If you don't have an upstream remote, you can add one easily using something like: +If you don't have an upstream remote, you can add one easily using something +like: ```shell git remote add upstream git@github.com:helm/helm.git ``` -In this doc, we are going to reference a few environment variables as well, which you may want to set for convenience. For major/minor releases, use the following: +In this doc, we are going to reference a few environment variables as well, +which you may want to set for convenience. For major/minor releases, use the +following: ```shell export RELEASE_NAME=vX.Y.0 @@ -64,11 +77,28 @@ export RELEASE_BRANCH_NAME="release-X.Y" export RELEASE_CANDIDATE_NAME="$RELEASE_NAME-rc.1" ``` +We are also going to be adding security and verification of the release process by +hashing the binaries and providing signature files. We perform this using +[GitHub and GPG](https://help.github.com/en/articles/about-commit-signature-verification). +If you do not have GPG already setup you can follow these steps: +1. [Install GPG](https://gnupg.org/index.html) +2. [Generate GPG key](https://help.github.com/en/articles/generating-a-new-gpg-key) +3. [Add key to GitHub account](https://help.github.com/en/articles/adding-a-new-gpg-key-to-your-github-account) +4. [Set signing key in Git](https://help.github.com/en/articles/telling-git-about-your-signing-key) + +Once you have a signing key you need to add it to the KEYS file at the root of +the repository. The instructions for adding it to the KEYS file are in the file. +If you have not done so already, you need to add your public key to the keyserver +network. If you use GnuPG you can follow the [instructions provided by Debian](https://debian-administration.org/article/451/Submitting_your_GPG_key_to_a_keyserver). + ## 1. Create the Release Branch ### Major/Minor Releases -Major releases are for new feature additions and behavioral changes *that break backwards compatibility*. Minor releases are for new feature additions that do not break backwards compatibility. To create a major or minor release, start by creating a `release-vX.Y.0` branch from master. +Major releases are for new feature additions and behavioral changes *that break +backwards compatibility*. Minor releases are for new feature additions that do +not break backwards compatibility. To create a major or minor release, start by +creating a `release-vX.Y.0` branch from master. ```shell git fetch upstream @@ -76,11 +106,13 @@ git checkout upstream/master git checkout -b $RELEASE_BRANCH_NAME ``` -This new branch is going to be the base for the release, which we are going to iterate upon later. +This new branch is going to be the base for the release, which we are going to +iterate upon later. ### Patch releases -Patch releases are a few critical cherry-picked fixes to existing releases. Start by creating a `release-vX.Y.Z` branch from the latest patch release. +Patch releases are a few critical cherry-picked fixes to existing releases. +Start by creating a `release-vX.Y.Z` branch from the latest patch release. ```shell git fetch upstream --tags @@ -88,7 +120,8 @@ git checkout $PREVIOUS_PATCH_RELEASE git checkout -b $RELEASE_BRANCH_NAME ``` -From here, we can cherry-pick the commits we want to bring into the patch release: +From here, we can cherry-pick the commits we want to bring into the patch +release: ```shell # get the commits ids we want to cherry-pick @@ -98,11 +131,13 @@ git cherry-pick -x git cherry-pick -x ``` -This new branch is going to be the base for the release, which we are going to iterate upon later. +This new branch is going to be the base for the release, which we are going to +iterate upon later. ## 2. Change the Version Number in Git -When doing a minor release, make sure to update pkg/version/version.go with the new release version. +When doing a minor release, make sure to update pkg/version/version.go with the +new release version. ```shell $ git diff pkg/version/version.go @@ -126,64 +161,103 @@ git add . git commit -m "bump version to $RELEASE_CANDIDATE_NAME" ``` +This will update it for the $RELEASE_BRANCH_NAME only. You will also need to pull +this change into the master branch for when the next release is being created. + +```shell +# get the last commit id i.e. commit to bump the version +git log --format="%H" -n 1 + +# create new branch off master +git checkout master +git checkout -b bump-version- + +# cherry pick the commit using id from first command +git cherry-pick -x + +# commit the change +git push origin bump-version- +``` + ## 3. Commit and Push the Release Branch -In order for others to start testing, we can now push the release branch upstream and start the test process. +In order for others to start testing, we can now push the release branch +upstream and start the test process. ```shell git push upstream $RELEASE_BRANCH_NAME ``` -Make sure to check [helm on CircleCI](https://circleci.com/gh/helm/helm) and make sure the release passed CI before proceeding. +Make sure to check [helm on CircleCI](https://circleci.com/gh/helm/helm) and +make sure the release passed CI before proceeding. -If anyone is available, let others peer-review the branch before continuing to ensure that all the proper changes have been made and all of the commits for the release are there. +If anyone is available, let others peer-review the branch before continuing to +ensure that all the proper changes have been made and all of the commits for the +release are there. ## 4. Create a Release Candidate -Now that the release branch is out and ready, it is time to start creating and iterating on release candidates. +Now that the release branch is out and ready, it is time to start creating and +iterating on release candidates. ```shell git tag --sign --annotate "${RELEASE_CANDIDATE_NAME}" --message "Helm release ${RELEASE_CANDIDATE_NAME}" git push upstream $RELEASE_CANDIDATE_NAME ``` -CircleCI will automatically create a tagged release image and client binary to test with. +CircleCI will automatically create a tagged release image and client binary to +test with. -For testers, the process to start testing after CircleCI finishes building the artifacts involves the following steps to grab the client from Google Cloud Storage: +For testers, the process to start testing after CircleCI finishes building the +artifacts involves the following steps to grab the client: linux/amd64, using /bin/bash: ```shell -wget https://kubernetes-helm.storage.googleapis.com/helm-$RELEASE_CANDIDATE_NAME-linux-amd64.tar.gz +wget https://get.helm.sh/helm-$RELEASE_CANDIDATE_NAME-linux-amd64.tar.gz ``` darwin/amd64, using Terminal.app: ```shell -wget https://kubernetes-helm.storage.googleapis.com/helm-$RELEASE_CANDIDATE_NAME-darwin-amd64.tar.gz +wget https://get.helm.sh/helm-$RELEASE_CANDIDATE_NAME-darwin-amd64.tar.gz ``` windows/amd64, using PowerShell: ```shell -PS C:\> Invoke-WebRequest -Uri "https://kubernetes-helm.storage.googleapis.com/helm-$RELEASE_CANDIDATE_NAME-windows-amd64.zip" -OutFile "helm-$ReleaseCandidateName-windows-amd64.zip" +PS C:\> Invoke-WebRequest -Uri "https://get.helm.sh/helm-$RELEASE_CANDIDATE_NAME-windows-amd64.zip" -OutFile "helm-$ReleaseCandidateName-windows-amd64.zip" ``` -Then, unpack and move the binary to somewhere on your $PATH, or move it somewhere and add it to your $PATH (e.g. /usr/local/bin/helm for linux/macOS, C:\Program Files\helm\helm.exe for Windows). +Then, unpack and move the binary to somewhere on your $PATH, or move it +somewhere and add it to your $PATH (e.g. /usr/local/bin/helm for linux/macOS, +C:\Program Files\helm\helm.exe for Windows). ## 5. Iterate on Successive Release Candidates -Spend several days explicitly investing time and resources to try and break helm in every possible way, documenting any findings pertinent to the release. This time should be spent testing and finding ways in which the release might have caused various features or upgrade environments to have issues, not coding. During this time, the release is in code freeze, and any additional code changes will be pushed out to the next release. +Spend several days explicitly investing time and resources to try and break helm +in every possible way, documenting any findings pertinent to the release. This +time should be spent testing and finding ways in which the release might have +caused various features or upgrade environments to have issues, not coding. +During this time, the release is in code freeze, and any additional code changes +will be pushed out to the next release. -During this phase, the $RELEASE_BRANCH_NAME branch will keep evolving as you will produce new release candidates. The frequency of new candidates is up to the release manager: use your best judgement taking into account the severity of reported issues, testers' availability, and the release deadline date. Generally speaking, it is better to let a release roll over the deadline than to ship a broken release. +During this phase, the $RELEASE_BRANCH_NAME branch will keep evolving as you +will produce new release candidates. The frequency of new candidates is up to +the release manager: use your best judgement taking into account the severity of +reported issues, testers' availability, and the release deadline date. Generally +speaking, it is better to let a release roll over the deadline than to ship a +broken release. -Each time you'll want to produce a new release candidate, you will start by adding commits to the branch by cherry-picking from master: +Each time you'll want to produce a new release candidate, you will start by +adding commits to the branch by cherry-picking from master: ```shell git cherry-pick -x ``` -You will also want to update the release version number and the CHANGELOG as we did in steps 2 and 3 as separate commits. +You will also want to update the release version number and the CHANGELOG as we +did in steps 2 and 3 as separate commits. After that, tag it and notify users of the new release candidate: @@ -197,7 +271,9 @@ From here on just repeat this process, continuously testing until you're happy w ## 6. Finalize the Release -When you're finally happy with the quality of a release candidate, you can move on and create the real thing. Double-check one last time to make sure everything is in order, then finally push the release tag. +When you're finally happy with the quality of a release candidate, you can move +on and create the real thing. Double-check one last time to make sure everything +is in order, then finally push the release tag. ```shell git checkout $RELEASE_BRANCH_NAME @@ -205,11 +281,37 @@ git tag --sign --annotate "${RELEASE_NAME}" --message "Helm release ${RELEASE_NA git push upstream $RELEASE_NAME ``` -## 7. Write the Release Notes +Verify that the release succeeded in CI. If not, you will need to fix the +release and push the release again. + +## 7. PGP Sign the downloads + +While hashes provide a signature that the content of the downloads is what it +was generated, signed packages provide traceability of where the package came +from. + +To do this, run the following `make` commands: + +```shell +export VERSION="$RELEASE_NAME" +make clean +make fetch-dist +make sign +``` + +This will generate ascii armored signature files for each of the files pushed by CI. + +All of the signature files need to be uploaded to the release on GitHub. + +## 8. Write the Release Notes -We will auto-generate a changelog based on the commits that occurred during a release cycle, but it is usually more beneficial to the end-user if the release notes are hand-written by a human being/marketing team/dog. +We will auto-generate a changelog based on the commits that occurred during a +release cycle, but it is usually more beneficial to the end-user if the release +notes are hand-written by a human being/marketing team/dog. -If you're releasing a major/minor release, listing notable user-facing features is usually sufficient. For patch releases, do the same, but make note of the symptoms and who is affected. +If you're releasing a major/minor release, listing notable user-facing features +is usually sufficient. For patch releases, do the same, but make note of the +symptoms and who is affected. An example release note for a minor release would look like this: @@ -226,18 +328,25 @@ The community keeps growing, and we'd love to see you there! - Hang out at the Public Developer Call: Thursday, 9:30 Pacific via [Zoom](https://zoom.us/j/696660622) - Test, debug, and contribute charts: [GitHub/helm/charts](https://github.com/helm/charts) +## Features and Changes + +- Major +- features +- list +- here + ## Installation and Upgrading Download Helm X.Y. The common platform binaries are here: -- [MacOS amd64](https://storage.googleapis.com/kubernetes-helm/helm-vX.Y.Z-darwin-amd64.tar.gz) ([checksum](https://storage.googleapis.com/kubernetes-helm/helm-vX.Y.Z-darwin-amd64.tar.gz.sha256)) -- [Linux amd64](https://storage.googleapis.com/kubernetes-helm/helm-vX.Y.Z-linux-amd64.tar.gz) ([checksum](https://storage.googleapis.com/kubernetes-helm/helm-vX.Y.Z-linux-amd64.tar.gz.sha256)) -- [Linux arm](https://storage.googleapis.com/kubernetes-helm/helm-vX.Y.Z-linux-arm.tar.gz) ([checksum](https://storage.googleapis.com/kubernetes-helm/helm-vX.Y.Z-linux-arm.tar.gz.sha256)) -- [Linux arm64](https://storage.googleapis.com/kubernetes-helm/helm-vX.Y.Z-linux-arm64.tar.gz) ([checksum](https://storage.googleapis.com/kubernetes-helm/helm-vX.Y.Z-linux-arm64.tar.gz.sha256)) -- [Linux i386](https://storage.googleapis.com/kubernetes-helm/helm-vX.Y.Z-linux-386.tar.gz) ([checksum](https://storage.googleapis.com/kubernetes-helm/helm-vX.Y.Z-linux-386.tar.gz.sha256)) -- [Linux ppc64le](https://storage.googleapis.com/kubernetes-helm/helm-vX.Y.Z-linux-ppc64le.tar.gz) ([checksum](https://storage.googleapis.com/kubernetes-helm/helm-vX.Y.Z-linux-ppc64le.tar.gz.sha256)) -- [Linux s390x](https://storage.googleapis.com/kubernetes-helm/helm-vX.Y.Z-linux-s390x.tar.gz) ([checksum](https://storage.googleapis.com/kubernetes-helm/helm-vX.Y.Z-linux-s390x.tar.gz.sha256)) -- [Windows amd64](https://storage.googleapis.com/kubernetes-helm/helm-vX.Y.Z-windows-amd64.zip) ([checksum](https://storage.googleapis.com/kubernetes-helm/helm-vX.Y.Z-windows-amd64.zip.sha256)) +- [MacOS amd64](https://get.helm.sh/helm-vX.Y.Z-darwin-amd64.tar.gz) ([checksum](https://get.helm.sh/helm-vX.Y.Z-darwin-amd64.tar.gz.sha256) / CHECKSUM_VAL) +- [Linux amd64](https://get.helm.sh/helm-vX.Y.Z-linux-amd64.tar.gz) ([checksum](https://get.helm.sh/helm-vX.Y.Z-linux-amd64.tar.gz.sha256) / CHECKSUM_VAL) +- [Linux arm](https://get.helm.sh/helm-vX.Y.Z-linux-arm.tar.gz) ([checksum](https://get.helm.sh/helm-vX.Y.Z-linux-arm.tar.gz.sha256) / CHECKSUM_VAL) +- [Linux arm64](https://get.helm.sh/helm-vX.Y.Z-linux-arm64.tar.gz) ([checksum](https://get.helm.sh/helm-vX.Y.Z-linux-arm64.tar.gz.sha256) / CHECKSUM_VAL) +- [Linux i386](https://get.helm.sh/helm-vX.Y.Z-linux-386.tar.gz) ([checksum](https://get.helm.sh/helm-vX.Y.Z-linux-386.tar.gz.sha256) / CHECKSUM_VAL) +- [Linux ppc64le](https://get.helm.sh/helm-vX.Y.Z-linux-ppc64le.tar.gz) ([checksum](https://get.helm.sh/helm-vX.Y.Z-linux-ppc64le.tar.gz.sha256) / CHECKSUM_VAL) +- [Linux s390x](https://get.helm.sh/helm-vX.Y.Z-linux-s390x.tar.gz) ([checksum](https://get.helm.sh/helm-vX.Y.Z-linux-s390x.tar.gz.sha256) / CHECKSUM_VAL) +- [Windows amd64](https://get.helm.sh/helm-vX.Y.Z-windows-amd64.zip) ([checksum](https://get.helm.sh/helm-vX.Y.Z-windows-amd64.zip.sha256) / CHECKSUM_VAL) Once you have the client installed, upgrade Tiller with `helm init --upgrade`. @@ -250,23 +359,49 @@ The [Quickstart Guide](https://docs.helm.sh/using_helm/#quickstart-guide) will g ## Changelog -- chore(*): bump version to v2.7.0 08c1144f5eb3e3b636d9775617287cc26e53dba4 (Adam Reese) +### Features +- ref(*): kubernetes v1.11 support efadbd88035654b2951f3958167afed014c46bc6 (Adam Reese) +- feat(helm): add $HELM_KEY_PASSPHRASE environment variable for signing helm charts (#4778) 1e26b5300b5166fabb90002535aacd2f9cc7d787 + +### Bug fixes - fix circle not building tags f4f932fabd197f7e6d608c8672b33a483b4b76fa (Matthew Fisher) + +### Code cleanup +- ref(kube): Gets rid of superfluous Sprintf call 3071a16f5eb3a2b646d9795617287cc26e53dba4 (Taylor Thomas) +- chore(*): bump version to v2.7.0 08c1144f5eb3e3b636d9775617287cc26e53dba4 (Adam Reese) + +### Documentation Changes +- docs(release_checklist): fix changelog generation command (#4694) 8442851a5c566a01d9b4c69b368d64daa04f6a7f (Matthew Fisher) ``` -The changelog at the bottom of the release notes can be generated with this command: +The changelog at the bottom of the release notes can be generated with this +command: ```shell PREVIOUS_RELEASE=vX.Y.Z git log --no-merges --pretty=format:'- %s %H (%aN)' $PREVIOUS_RELEASE..$RELEASE_NAME ``` +After generating the changelog, you will need to categorize the changes as shown +in the example above. + Once finished, go into GitHub and edit the release notes for the tagged release with the notes written here. -## 8. Evangelize +Remember to attach the ascii armored signatures generated in the previous step to the release notes. + +It is now worth getting other people to take a look at the release notes before the release is published. Send +a request out to [#helm-dev](https://kubernetes.slack.com/messages/C51E88VDG) for review. It is always +beneficial as it can be easy to miss something. + +When you are ready to go, hit `publish`. + +## 9. Evangelize -Congratulations! You're done. Go grab yourself a $DRINK_OF_CHOICE. You've earned it. +Congratulations! You're done. Go grab yourself a $DRINK_OF_CHOICE. You've earned +it. -After enjoying a nice $DRINK_OF_CHOICE, go forth and announce the glad tidings of the new release in Slack and on Twitter. You should also notify any key partners in the helm community such as the homebrew formula maintainers, the owners of incubator projects (e.g. ChartMuseum) and any other interested parties. +After enjoying a nice $DRINK_OF_CHOICE, go forth and announce the glad tidings +of the new release in Slack and on Twitter. -Optionally, write a blog post about the new release and showcase some of the new features on there! +Optionally, write a blog post about the new release and showcase some of the new +features on there! diff --git a/docs/securing_installation.md b/docs/securing_installation.md index d47a98bcc..56ebad62c 100644 --- a/docs/securing_installation.md +++ b/docs/securing_installation.md @@ -69,9 +69,10 @@ When Helm clients are connecting from outside of the cluster, the security betwe Contrary to the previous [Enabling TLS](#enabling-tls) section, this section does not involve running a tiller server pod in your cluster (for what it's worth, that lines up with the current [helm v3 proposal](https://github.com/helm/community/blob/master/helm-v3/000-helm-v3.md)), thus there is no gRPC endpoint (and thus there's no need to create & manage TLS certificates to secure each gRPC endpoint). Steps: - * Fetch the latest helm release tarball from the [GitHub release page](https://github.com/helm/helm/releases), and extract and move `helm` and `tiller` somewhere on your `$PATH`. - * "Server": Run `tiller --storage=secret`. (Note that `tiller` has a default value of ":44134" for the `--listen` argument.) - * Client: In another terminal (and on the same host that the aforementioned `tiller` command was run for the previous bullet): Run `export HELM_HOST=:44134`, and then run `helm` commands as usual. + +- Fetch the latest helm release tarball from the [GitHub release page](https://github.com/helm/helm/releases), and extract and move `helm` and `tiller` somewhere on your `$PATH`. +- "Server": Run `tiller --storage=secret`. (Note that `tiller` has a default value of ":44134" for the `--listen` argument.) +- Client: In another terminal (and on the same host that the aforementioned `tiller` command was run for the previous bullet): Run `export HELM_HOST=:44134`, and then run `helm` commands as usual. ### Tiller's Release Information diff --git a/docs/tiller_ssl.md b/docs/tiller_ssl.md index 41e704653..8491c2c78 100644 --- a/docs/tiller_ssl.md +++ b/docs/tiller_ssl.md @@ -42,6 +42,8 @@ on getting ready within a small amount of time. For production configurations, we urge readers to read [the official documentation](https://www.openssl.org) and consult other resources. +There are other alternative ways to generating SSL CAs in addition to `openssl`, for example Terraform. They are not documented here but you can find links to these alternative means in [Related Projects and Documentation](https://helm.sh/docs/related/). + ### Generate a Certificate Authority The simplest way to generate a certificate authority is to run two commands: @@ -288,7 +290,7 @@ not available for public resolution. By default, the Helm client connects to Tiller via tunnel (i.e. kube proxy) at 127.0.0.1. During the TLS handshake, a target, usually provided as a hostname (e.g. example.com), is checked against the subject and subject alternative -names of the certificate (i.e. hostname verficiation). However, because of the tunnel, the target is an IP address. +names of the certificate (i.e. hostname verification). However, because of the tunnel, the target is an IP address. Therefore, to validate the certificate, the IP address 127.0.0.1 must be listed as an IP subject alternative name (IP SAN) in the Tiller certificate. @@ -310,6 +312,6 @@ If your tiller certificate has expired, you'll need to sign a new certificate, b ## References -https://github.com/denji/golang-tls -https://www.openssl.org/docs/ -https://jamielinux.com/docs/openssl-certificate-authority/sign-server-and-client-certificates.html +- https://github.com/denji/golang-tls +- https://www.openssl.org/docs/ +- https://jamielinux.com/docs/openssl-certificate-authority/sign-server-and-client-certificates.html diff --git a/docs/using_helm.md b/docs/using_helm.md index 5716a1302..ad693472e 100755 --- a/docs/using_helm.md +++ b/docs/using_helm.md @@ -1,4 +1,4 @@ -# Using Helm +# Using Helm This guide explains the basics of using Helm (and Tiller) to manage packages on your Kubernetes cluster. It assumes that you have already @@ -17,9 +17,9 @@ cluster. Think of it like the Kubernetes equivalent of a Homebrew formula, an Apt dpkg, or a Yum RPM file. A *Repository* is the place where charts can be collected and shared. -It's like Perl's [CPAN archive](http://www.cpan.org) or the -[Fedora Package Database](https://admin.fedoraproject.org/pkgdb/), but for -Kubernetes packages. +It's like Perl's [CPAN archive](https://www.cpan.org) or the +[Fedora Package Database](https://apps.fedoraproject.org/packages/s/pkgdb), but +for Kubernetes packages. A *Release* is an instance of a chart running in a Kubernetes cluster. One chart can often be installed many times into the same cluster. And @@ -190,7 +190,7 @@ imageTag: 10.1.14-r3 ## Specify a imagePullPolicy ## Default to 'Always' if imageTag is 'latest', else set to 'IfNotPresent' -## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images +## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## # imagePullPolicy: @@ -215,7 +215,10 @@ You can then override any of these settings in a YAML formatted file, and then pass that file during installation. ```console -$ echo '{mariadbUser: user0, mariadbDatabase: user0db}' > config.yaml +$ cat << EOF > config.yaml +mariadbUser: user0 +mariadbDatabase: user0db +EOF $ helm install -f config.yaml stable/mariadb ``` @@ -356,7 +359,7 @@ update things that have changed since the last release. ```console $ helm upgrade -f panda.yaml happy-panda stable/mariadb Fetched stable/mariadb-0.3.0.tgz to /Users/mattbutcher/Code/Go/src/k8s.io/helm/mariadb-0.3.0.tgz -happy-panda has been upgraded. Happy Helming! +happy-panda has been upgraded. Last Deployed: Wed Sep 28 12:47:54 2016 Namespace: default Status: DEPLOYED diff --git a/glide.lock b/glide.lock index 105dada1f..d81ca12ca 100644 --- a/glide.lock +++ b/glide.lock @@ -1,13 +1,10 @@ -hash: 2af9a5c4f891a0f44109a929a494b5aeaaffa3a87cd1f3881f25f79845703d5b -updated: 2018-12-14T21:39:31.112097Z +hash: 13c07a8e64f0777d08cd03d5edba6f254621ec1ee8e3c7b3ef26efc682b643ce +updated: 2019-09-18T12:07:21.888497-04:00 imports: - name: cloud.google.com/go - version: 3b1ae45394a234c385be014e9a488f2bb6eef821 + version: 0ebda48a7f143b1cce9eb37a8c1106ac762a3430 subpackages: - compute/metadata - - internal -- name: github.com/aokoli/goutils - version: 9c37978a95bd5c709a15883b6242714ea6709e64 - name: github.com/asaskevich/govalidator version: 7664702784775e51966f0885f5cd27435916517b - name: github.com/Azure/go-ansiterm @@ -15,7 +12,7 @@ imports: subpackages: - winterm - name: github.com/Azure/go-autorest - version: ea233b6412b0421a65dc6160e16c893364664a95 + version: 1ffcc8896ef6dfe022d90a4317d866f925cf0f9e subpackages: - autorest - autorest/adal @@ -24,7 +21,7 @@ imports: - logger - version - name: github.com/beorn7/perks - version: 3ac7bf7a47d159a033b107610db8a1b6575507a4 + version: 3a771d992973f24aa725d07868b467d1ddfceafb subpackages: - quantile - name: github.com/BurntSushi/toml @@ -43,7 +40,7 @@ imports: - name: github.com/cyphar/filepath-securejoin version: a261ee33d7a517f054effbf451841abaafe3e0fd - name: github.com/davecgh/go-spew - version: 782f4967f2dc4564575ca782fe2d04090b5faca8 + version: 8991bc29aa16c548c550c7ff78260e27b9ab7c73 subpackages: - spew - name: github.com/dgrijalva/jwt-go @@ -54,46 +51,26 @@ imports: - digestset - reference - name: github.com/docker/docker - version: a9fbbdc8dd8794b20af358382ab780559bca589d - subpackages: - - api - - api/types - - api/types/blkiodev - - api/types/container - - api/types/events - - api/types/filters - - api/types/image - - api/types/mount - - api/types/network - - api/types/registry - - api/types/strslice - - api/types/swarm - - api/types/swarm/runtime - - api/types/time - - api/types/versions - - api/types/volume - - client - - daemon/logger/jsonfilelog/jsonlog - - pkg/jsonmessage - - pkg/mount - - pkg/parsers - - pkg/parsers/operatingsystem - - pkg/stdcopy - - pkg/sysinfo + version: be7ac8be2ae072032a4005e8f232be3fc57e4127 + subpackages: - pkg/term - pkg/term/windows - name: github.com/docker/spdystream version: 449fdfce4d962303d702fec724ef0ad181c92528 subpackages: - spdy +- name: github.com/emicklei/go-restful + version: ff4f55a206334ef123e4f79bbf348980da81ca46 + subpackages: + - log - name: github.com/evanphx/json-patch - version: 36442dbdb585210f8d5a1b45e67aa323c197d5c4 + version: 5858425f75500d40c52783dce87d085a483ce135 - name: github.com/exponent-io/jsonpath version: d6023ce2651d8eafb5c75bb0c7167536102ec9f5 - name: github.com/fatih/camelcase version: f6a740d52f961c60348ebb109adde9f4635d7540 - name: github.com/ghodss/yaml - version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee + version: c7ce16629ff4cd059ed96ed06419dd3856fd3577 - name: github.com/go-openapi/jsonpointer version: ef5f0afec364d3b9396b7b77b43dbe26bf1f8004 - name: github.com/go-openapi/jsonreference @@ -112,6 +89,8 @@ imports: - syntax/lexer - util/runes - util/strings +- name: github.com/gofrs/flock + version: 392e7fae8f1b0bdbd67dad7237d23f618feb6dbb - name: github.com/gogo/protobuf version: 342cbe0a04158f6dcb03ca0079991a51a4248c02 subpackages: @@ -122,7 +101,7 @@ imports: subpackages: - lru - name: github.com/golang/protobuf - version: 1643683e1b54a9e88ad26d98f81400c8c9d9f4f9 + version: aa810b61a9c79d51363740d207bb46cf8e620ed5 subpackages: - proto - ptypes @@ -131,10 +110,18 @@ imports: - ptypes/timestamp - name: github.com/google/btree version: 7d79101e329e5a3adf994758c578dab82b90c017 +- name: github.com/google/go-cmp + version: 6f77996f0c42f7b84e5a2b252227263f93432e9b + subpackages: + - cmp + - cmp/internal/diff + - cmp/internal/flags + - cmp/internal/function + - cmp/internal/value - name: github.com/google/gofuzz - version: 44d81051d367757e1c7c6a5a86423ece9afcf63c + version: 24818f796faf91cd76ec7bddd72458fbced7a6c1 - name: github.com/google/uuid - version: 064e2069ce9c359c118179501254f67d7d37ba24 + version: 0cd6bf5da1e1c83f8b45653022c74f71af0538a4 - name: github.com/googleapis/gnostic version: 0c5108395e2debce0d731cf0287ddf7242066aba subpackages: @@ -142,7 +129,7 @@ imports: - compiler - extensions - name: github.com/gophercloud/gophercloud - version: 781450b3c4fcb4f5182bcc5133adb4b2e4a09d1d + version: c818fa66e4c88b30db28038fe3f18f2f4a0db9a8 subpackages: - openstack - openstack/identity/v2/tenants @@ -162,31 +149,45 @@ imports: - name: github.com/grpc-ecosystem/go-grpc-prometheus version: 0c1b191dbfe51efdabe3c14b9f6f3b96429e0722 - name: github.com/hashicorp/golang-lru - version: a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4 + version: 20f1fb78b0740ba8c3cb143a61e86ba5c8669768 subpackages: - simplelru - name: github.com/huandu/xstrings - version: 3959339b333561bf62a38b424fd41517c2c90f40 + version: f02667b379e2fb5916c3cda2cf31e0eb885d79f8 - name: github.com/imdario/mergo version: 9316a62528ac99aaecb4e47eadd6dc8aa6533d58 - name: github.com/inconshreveable/mousetrap version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 +- name: github.com/jmoiron/sqlx + version: d161d7a76b5661016ad0b085869f77fd410f3e6a + subpackages: + - reflectx - name: github.com/json-iterator/go version: ab8a2e0c74be9d3be70b3184d9acc634935ded82 +- name: github.com/konsorten/go-windows-terminal-sequences + version: 5c8c8bd35d3832f5d134ae1e1e375b69a4d25242 +- name: github.com/lib/pq + version: 88edab0803230a3898347e77b474f8c1820a1f20 + subpackages: + - oid +- name: github.com/liggitt/tabwriter + version: 89fcab3d43de07060e4fd4c1547430ed57e87f24 - name: github.com/mailru/easyjson - version: 2f5df55504ebc322e4d52d34df6a1f5b503bf26d + version: 60711f1a8329503b04e1c88535f419d0bb440bff subpackages: - buffer - jlexer - jwriter - name: github.com/MakeNowJust/heredoc version: bb23615498cded5e105af4ce27de75b089cbe851 +- name: github.com/Masterminds/goutils + version: 41ac8693c5c10a92ea1ff5ac3a7f95646f6123b0 - name: github.com/Masterminds/semver - version: 517734cc7d6470c0d07130e40fd40bdeb9bcd3fd + version: 805c489aa98f412e79eb308a37996bf9d8b1c91e - name: github.com/Masterminds/sprig - version: 15f9564e7e9cf0da02a48e0d25f12a7b83559aa6 + version: 2691a9cba2adee8d9a60100a1bc49e770f97b7db - name: github.com/Masterminds/vcs - version: 3084677c2c188840777bff30054f2b553729d329 + version: f94282d8632a0620f79f0c6ff0e82604e8c5c85b - name: github.com/mattn/go-runewidth version: d6bea18f789704b5f83375793155289da36a3c7f - name: github.com/matttproud/golang_protobuf_extensions @@ -206,44 +207,51 @@ imports: - name: github.com/pkg/errors version: 645ef00459ed84a119197bfb8d8205042c6df63d - name: github.com/prometheus/client_golang - version: c5b7fccd204277076155f10851dad72b76a49317 + version: 505eaef017263e299324067d40ca2c48f6a2cf50 subpackages: - prometheus + - prometheus/internal - prometheus/promhttp - name: github.com/prometheus/client_model version: fa8ad6fec33561be4280a8f0514318c79d7f6cb6 subpackages: - go - name: github.com/prometheus/common - version: 13ba4ddd0caa9c28ca7b7bffe1dfa9ed8d5ef207 + version: 4724e9255275ce38f7179b2478abeae4e28c904f subpackages: - expfmt - internal/bitbucket.org/ww/goautoneg - model - name: github.com/prometheus/procfs - version: 65c1f6f8f0fc1e2185eb9863a3bc751496404259 + version: 1dc9a6cbc91aacc3e8b2d63db4d2e957a5394ac4 subpackages: + - internal/util + - nfs - xfs - name: github.com/PuerkitoBio/purell - version: 8a290539e2e8629dbc4e6bad948158f790ec31f4 + version: 0bcb03f4b4d0a9428594752bd2a3b9aa0a9d4bd4 - name: github.com/PuerkitoBio/urlesc - version: 5bd2802263f21d8788851d5305584c82a5c75d7e + version: de5bf2ad457846296e2031421a34e2568e304e35 +- name: github.com/rubenv/sql-migrate + version: 1007f53448d75fe14190968f5de4d95ed63ebb83 + subpackages: + - sqlparse - name: github.com/russross/blackfriday version: 300106c228d52c8941d4b3de6054a6062a86dda3 - name: github.com/shurcooL/sanitized_anchor_name version: 10ef21a441db47d8b13ebcc5fd2310f636973c77 - name: github.com/sirupsen/logrus - version: 89742aefa4b206dcf400792f3bd35b542998eb3b + version: bcd833dfe83d3cebad139e4a29ed79cb2318bf95 - name: github.com/spf13/cobra - version: fe5e611709b0c57fa4a89136deaa8e1d4004d053 + version: f2b07da1e2c38d5f12845a4f607e2e1018cbb1f5 subpackages: - doc - name: github.com/spf13/pflag - version: 298182f68c66c05229eb03ac171abe6e309ee79a + version: e8f29969b682c41a730f8f08b76033b120498464 - name: github.com/technosophos/moniker version: a5dbd03a2245d554160e3ae6bfdcf969fe58b431 - name: golang.org/x/crypto - version: de0752318171da717af4ce24d0a2e8626afaeb11 + version: e84da0312774c21d64ee2317962ef669b27ffb41 subpackages: - cast5 - ed25519 @@ -259,18 +267,18 @@ imports: - scrypt - ssh/terminal - name: golang.org/x/net - version: 0ed95abb35c445290478a5348a7b38bb154135fd + version: 65e2d4e15006aab9813ff8769e768bbf4bb667a0 subpackages: - context - context/ctxhttp + - http/httpguts - http2 - http2/hpack - idna - internal/timeseries - - lex/httplex - trace - name: golang.org/x/oauth2 - version: a6bd8cefa1811bd24b86f8902872e4e8225f74c4 + version: 9f3314589c9a9136388751d9adae6b0ed400978a subpackages: - google - internal @@ -281,25 +289,20 @@ imports: subpackages: - semaphore - name: golang.org/x/sys - version: 95c6576299259db960f6c5b9b69ea52422860fce + version: b90733256f2e882e81d52f9126de08df5615afd9 subpackages: - unix - windows - name: golang.org/x/text - version: b19bf474d317b857955b12035d2c5acb57ce8b01 + version: e6919f6577db79269a6443b9dc46d18f2238fb5d subpackages: - - cases - encoding - encoding/internal - encoding/internal/identifier - encoding/unicode - - internal - - internal/tag - internal/utf8internal - - language - runes - secure/bidirule - - secure/precis - transform - unicode/bidi - unicode/norm @@ -309,7 +312,7 @@ imports: subpackages: - rate - name: google.golang.org/appengine - version: 12d5545dc1cfa6047a286d5e853841b6471f4c19 + version: 54a98f90d1c46b7731eb8fb305d2a321c30ef610 subpackages: - internal - internal/app_identity @@ -325,26 +328,42 @@ imports: subpackages: - googleapis/rpc/status - name: google.golang.org/grpc - version: 5ffe3083946d5603a0578721101dc8165b1d5b5f + version: a02b0774206b209466313a0b525d2c738fe407eb subpackages: - balancer + - balancer/base + - balancer/roundrobin + - binarylog/grpc_binarylog_v1 - codes - connectivity - credentials - - grpclb/grpc_lb_v1/messages + - credentials/internal + - encoding + - encoding/proto - grpclog - health - health/grpc_health_v1 - internal + - internal/backoff + - internal/binarylog + - internal/channelz + - internal/envconfig + - internal/grpcrand + - internal/grpcsync + - internal/syscall + - internal/transport - keepalive - metadata - naming - peer - resolver + - resolver/dns + - resolver/passthrough - stats - status - tap - - transport +- name: gopkg.in/gorp.v1 + version: 6a667da9c028871f98598d85413e3fc4c6daa52e - name: gopkg.in/inf.v0 version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 - name: gopkg.in/square/go-jose.v2 @@ -354,12 +373,11 @@ imports: - json - jwt - name: gopkg.in/yaml.v2 - version: 670d4cfef0544295bc27a114dbac37980d83185a + version: 5420a8b6744d3b0345ab293f6fcba19c978f1183 - name: k8s.io/api - version: 05914d821849570fba9eacfb29466f2d8d3cd229 + version: 7cf5895f2711098d7d9527db0a4a49fb0dff7de2 subpackages: - admission/v1beta1 - - admissionregistration/v1alpha1 - admissionregistration/v1beta1 - apps/v1 - apps/v1beta1 @@ -376,16 +394,21 @@ imports: - batch/v1beta1 - batch/v2alpha1 - certificates/v1beta1 + - coordination/v1 - coordination/v1beta1 - core/v1 - events/v1beta1 - extensions/v1beta1 - imagepolicy/v1alpha1 - networking/v1 + - networking/v1beta1 + - node/v1alpha1 + - node/v1beta1 - policy/v1beta1 - rbac/v1 - rbac/v1alpha1 - rbac/v1beta1 + - scheduling/v1 - scheduling/v1alpha1 - scheduling/v1beta1 - settings/v1alpha1 @@ -393,11 +416,13 @@ imports: - storage/v1alpha1 - storage/v1beta1 - name: k8s.io/apiextensions-apiserver - version: 0fe22c71c47604641d9aa352c785b7912c200562 + version: 14e95df34f1f469647f494f5185a036e26fddcab subpackages: + - pkg/apis/apiextensions + - pkg/apis/apiextensions/v1beta1 - pkg/features - name: k8s.io/apimachinery - version: 2b1284ed4c93a43499e781493253e2ac5959c4fd + version: 1799e75a07195de9460b8ef7300883499f12127b subpackages: - pkg/api/equality - pkg/api/errors @@ -453,7 +478,7 @@ imports: - third_party/forked/golang/netutil - third_party/forked/golang/reflect - name: k8s.io/apiserver - version: 3ccfe8365421eb08e334b195786a2973460741d8 + version: 47dc9a115b1874c96c20cea91d02b36e4faa1bb1 subpackages: - pkg/authentication/authenticator - pkg/authentication/serviceaccount @@ -461,23 +486,84 @@ imports: - pkg/features - pkg/util/feature - name: k8s.io/cli-runtime - version: 835b10687cb6556f6b113099ef925146a56d5981 + version: 2090e6d8f84c1db3e23968b0ee97fb677b363fcf subpackages: - pkg/genericclioptions - - pkg/genericclioptions/printers - - pkg/genericclioptions/resource + - pkg/kustomize + - pkg/kustomize/k8sdeps + - pkg/kustomize/k8sdeps/configmapandsecret + - pkg/kustomize/k8sdeps/kunstruct + - pkg/kustomize/k8sdeps/kv + - pkg/kustomize/k8sdeps/transformer + - pkg/kustomize/k8sdeps/transformer/hash + - pkg/kustomize/k8sdeps/transformer/patch + - pkg/kustomize/k8sdeps/validator + - pkg/printers + - pkg/resource - name: k8s.io/client-go - version: 8d9ed539ba3134352c586810e749e58df4e94e4f + version: 78d2af792babf2dd937ba2e2a8d99c753a5eda89 subpackages: - discovery + - discovery/cached/disk - discovery/fake - dynamic + - dynamic/dynamicinformer + - dynamic/dynamiclister - dynamic/fake + - informers + - informers/admissionregistration + - informers/admissionregistration/v1beta1 + - informers/apps + - informers/apps/v1 + - informers/apps/v1beta1 + - informers/apps/v1beta2 + - informers/auditregistration + - informers/auditregistration/v1alpha1 + - informers/autoscaling + - informers/autoscaling/v1 + - informers/autoscaling/v2beta1 + - informers/autoscaling/v2beta2 + - informers/batch + - informers/batch/v1 + - informers/batch/v1beta1 + - informers/batch/v2alpha1 + - informers/certificates + - informers/certificates/v1beta1 + - informers/coordination + - informers/coordination/v1 + - informers/coordination/v1beta1 + - informers/core + - informers/core/v1 + - informers/events + - informers/events/v1beta1 + - informers/extensions + - informers/extensions/v1beta1 + - informers/internalinterfaces + - informers/networking + - informers/networking/v1 + - informers/networking/v1beta1 + - informers/node + - informers/node/v1alpha1 + - informers/node/v1beta1 + - informers/policy + - informers/policy/v1beta1 + - informers/rbac + - informers/rbac/v1 + - informers/rbac/v1alpha1 + - informers/rbac/v1beta1 + - informers/scheduling + - informers/scheduling/v1 + - informers/scheduling/v1alpha1 + - informers/scheduling/v1beta1 + - informers/settings + - informers/settings/v1alpha1 + - informers/storage + - informers/storage/v1 + - informers/storage/v1alpha1 + - informers/storage/v1beta1 - kubernetes - kubernetes/fake - kubernetes/scheme - - kubernetes/typed/admissionregistration/v1alpha1 - - kubernetes/typed/admissionregistration/v1alpha1/fake - kubernetes/typed/admissionregistration/v1beta1 - kubernetes/typed/admissionregistration/v1beta1/fake - kubernetes/typed/apps/v1 @@ -510,6 +596,8 @@ imports: - kubernetes/typed/batch/v2alpha1/fake - kubernetes/typed/certificates/v1beta1 - kubernetes/typed/certificates/v1beta1/fake + - kubernetes/typed/coordination/v1 + - kubernetes/typed/coordination/v1/fake - kubernetes/typed/coordination/v1beta1 - kubernetes/typed/coordination/v1beta1/fake - kubernetes/typed/core/v1 @@ -520,6 +608,12 @@ imports: - kubernetes/typed/extensions/v1beta1/fake - kubernetes/typed/networking/v1 - kubernetes/typed/networking/v1/fake + - kubernetes/typed/networking/v1beta1 + - kubernetes/typed/networking/v1beta1/fake + - kubernetes/typed/node/v1alpha1 + - kubernetes/typed/node/v1alpha1/fake + - kubernetes/typed/node/v1beta1 + - kubernetes/typed/node/v1beta1/fake - kubernetes/typed/policy/v1beta1 - kubernetes/typed/policy/v1beta1/fake - kubernetes/typed/rbac/v1 @@ -528,6 +622,8 @@ imports: - kubernetes/typed/rbac/v1alpha1/fake - kubernetes/typed/rbac/v1beta1 - kubernetes/typed/rbac/v1beta1/fake + - kubernetes/typed/scheduling/v1 + - kubernetes/typed/scheduling/v1/fake - kubernetes/typed/scheduling/v1alpha1 - kubernetes/typed/scheduling/v1alpha1/fake - kubernetes/typed/scheduling/v1beta1 @@ -540,6 +636,38 @@ imports: - kubernetes/typed/storage/v1alpha1/fake - kubernetes/typed/storage/v1beta1 - kubernetes/typed/storage/v1beta1/fake + - listers/admissionregistration/v1beta1 + - listers/apps/v1 + - listers/apps/v1beta1 + - listers/apps/v1beta2 + - listers/auditregistration/v1alpha1 + - listers/autoscaling/v1 + - listers/autoscaling/v2beta1 + - listers/autoscaling/v2beta2 + - listers/batch/v1 + - listers/batch/v1beta1 + - listers/batch/v2alpha1 + - listers/certificates/v1beta1 + - listers/coordination/v1 + - listers/coordination/v1beta1 + - listers/core/v1 + - listers/events/v1beta1 + - listers/extensions/v1beta1 + - listers/networking/v1 + - listers/networking/v1beta1 + - listers/node/v1alpha1 + - listers/node/v1beta1 + - listers/policy/v1beta1 + - listers/rbac/v1 + - listers/rbac/v1alpha1 + - listers/rbac/v1beta1 + - listers/scheduling/v1 + - listers/scheduling/v1alpha1 + - listers/scheduling/v1beta1 + - listers/settings/v1alpha1 + - listers/storage/v1 + - listers/storage/v1alpha1 + - listers/storage/v1beta1 - pkg/apis/clientauthentication - pkg/apis/clientauthentication/v1alpha1 - pkg/apis/clientauthentication/v1beta1 @@ -574,30 +702,35 @@ imports: - tools/pager - tools/portforward - tools/record + - tools/record/util - tools/reference - tools/remotecommand - tools/watch - transport - transport/spdy - - util/buffer - util/cert - util/connrotation - util/exec - util/flowcontrol - util/homedir - - util/integer - util/jsonpath + - util/keyutil - util/retry +- name: k8s.io/component-base + version: 185d68e6e6ea654214f444cab8f645ec3af3092e + subpackages: + - featuregate - name: k8s.io/klog - version: 8139d8cb77af419532b33dfa7dd09fbc5f1d344f + version: 89e63fd5117f8c20208186ef85f096703a280c20 - name: k8s.io/kube-openapi - version: c59034cc13d587f5ef4e85ca0ade0c1866ae8e1d + version: b3a7cee44a305be0a69e1b9ac03018307287e1b0 subpackages: + - pkg/common - pkg/util/proto - pkg/util/proto/testing - pkg/util/proto/validation - name: k8s.io/kubernetes - version: f2c8f1cadf1808ec28476682e49a3cce2b09efbf + version: e8462b5b5dc2584fdcd18e6bcfe9f1e4d970a529 subpackages: - pkg/api/legacyscheme - pkg/api/service @@ -630,6 +763,7 @@ imports: - pkg/apis/certificates/v1beta1 - pkg/apis/coordination - pkg/apis/coordination/install + - pkg/apis/coordination/v1 - pkg/apis/coordination/v1beta1 - pkg/apis/core - pkg/apis/core/helper @@ -645,6 +779,7 @@ imports: - pkg/apis/extensions/install - pkg/apis/extensions/v1beta1 - pkg/apis/networking + - pkg/apis/node - pkg/apis/policy - pkg/apis/policy/install - pkg/apis/policy/v1beta1 @@ -655,6 +790,7 @@ imports: - pkg/apis/rbac/v1beta1 - pkg/apis/scheduling - pkg/apis/scheduling/install + - pkg/apis/scheduling/v1 - pkg/apis/scheduling/v1alpha1 - pkg/apis/scheduling/v1beta1 - pkg/apis/settings @@ -689,6 +825,7 @@ imports: - pkg/kubectl/util/event - pkg/kubectl/util/fieldpath - pkg/kubectl/util/i18n + - pkg/kubectl/util/interrupt - pkg/kubectl/util/podutils - pkg/kubectl/util/printers - pkg/kubectl/util/qos @@ -699,30 +836,53 @@ imports: - pkg/kubectl/util/templates - pkg/kubectl/util/term - pkg/kubectl/validation - - pkg/kubelet/apis + - pkg/kubectl/version - pkg/kubelet/types - pkg/master/ports - pkg/printers - pkg/printers/internalversion - - pkg/scheduler/api - pkg/security/apparmor - pkg/serviceaccount - - pkg/util/file - pkg/util/hash - - pkg/util/interrupt - pkg/util/labels - - pkg/util/net/sets - pkg/util/node - pkg/util/parsers - pkg/util/taints - - pkg/version - name: k8s.io/utils - version: 66066c83e385e385ccc3c964b44fd7dcd413d0ed + version: c2654d5206da6b7b6ace12841e8f359bb89b443c subpackages: - - clock + - buffer - exec - - exec/testing + - integer + - net + - path - pointer + - trace +- name: sigs.k8s.io/kustomize + version: a6f65144121d1955266b0cd836ce954c04122dc8 + subpackages: + - pkg/commands/build + - pkg/constants + - pkg/expansion + - pkg/factory + - pkg/fs + - pkg/git + - pkg/gvk + - pkg/ifc + - pkg/ifc/transformer + - pkg/image + - pkg/internal/error + - pkg/loader + - pkg/patch + - pkg/patch/transformer + - pkg/resid + - pkg/resmap + - pkg/resource + - pkg/target + - pkg/transformers + - pkg/transformers/config + - pkg/transformers/config/defaultconfig + - pkg/types - name: sigs.k8s.io/yaml version: fd68e9863619f6ec2fdd8625fe1f02e7c877e480 - name: vbom.ml/util @@ -730,8 +890,10 @@ imports: subpackages: - sortorder testImports: +- name: github.com/DATA-DOG/go-sqlmock + version: 472e287dbafe67e526a3797165b64cb14f34705a - name: github.com/pmezard/go-difflib - version: d8ed2627bdf02c080bf22230dbb337003b7aba2d + version: 792786c7400a136282c1664665ae0a8db921c6c2 subpackages: - difflib - name: github.com/stretchr/testify diff --git a/glide.yaml b/glide.yaml index bf81b22ee..565682dad 100644 --- a/glide.yaml +++ b/glide.yaml @@ -2,67 +2,79 @@ package: k8s.io/helm import: - package: golang.org/x/net subpackages: - - context + - context - package: golang.org/x/sync subpackages: - semaphore + - package: golang.org/x/sys + version: b90733256f2e882e81d52f9126de08df5615afd9 + subpackages: + - unix + - windows - package: github.com/spf13/cobra - version: fe5e611709b0c57fa4a89136deaa8e1d4004d053 + version: ^0.0.4 - package: github.com/spf13/pflag version: ~1.0.1 - package: github.com/Masterminds/vcs - # Pin version of mergo that is compatible with both sprig and Kubernetes - package: github.com/imdario/mergo version: v0.3.5 - package: github.com/Masterminds/sprig - version: ^2.16.0 + version: ^2.20.0 - package: github.com/ghodss/yaml + version: c7ce16629ff4cd059ed96ed06419dd3856fd3577 - package: github.com/Masterminds/semver - version: ~1.3.1 + version: ^1.4.2 - package: github.com/technosophos/moniker version: ~0.2 - package: github.com/golang/protobuf - version: 1643683e1b54a9e88ad26d98f81400c8c9d9f4f9 + version: 1.2.0 subpackages: - - proto - - ptypes/any - - ptypes/timestamp + - proto + - ptypes/any + - ptypes/timestamp - package: google.golang.org/grpc - version: 1.7.2 + version: 1.18.0 - package: github.com/gosuri/uitable - package: github.com/asaskevich/govalidator version: ^4.0.0 - package: golang.org/x/crypto subpackages: - - openpgp - - ssh/terminal + - openpgp + - ssh/terminal - package: github.com/gobwas/glob version: ^0.2.1 - package: github.com/evanphx/json-patch - package: github.com/BurntSushi/toml version: ~0.3.0 - package: github.com/prometheus/client_golang - version: 0.8.0 + version: 0.9.2 - package: github.com/grpc-ecosystem/go-grpc-prometheus - package: k8s.io/kubernetes - version: release-1.13 + version: v1.15.0 - package: k8s.io/client-go - version: kubernetes-1.13.1 + version: kubernetes-1.15.0 - package: k8s.io/api - version: kubernetes-1.13.1 + version: kubernetes-1.15.0 - package: k8s.io/apimachinery - version: kubernetes-1.13.1 + version: kubernetes-1.15.0 - package: k8s.io/apiserver - version: kubernetes-1.13.1 + version: kubernetes-1.15.0 - package: k8s.io/cli-runtime - version: kubernetes-1.13.1 + version: kubernetes-1.15.0 - package: k8s.io/apiextensions-apiserver - version: kubernetes-1.13.1 + version: kubernetes-1.15.0 - package: github.com/cyphar/filepath-securejoin version: ^0.2.1 + - package: github.com/jmoiron/sqlx + version: ^1.2.0 + - package: github.com/rubenv/sql-migrate + - package: github.com/gofrs/flock + version: v0.7.1 testImports: - package: github.com/stretchr/testify version: ^1.1.4 subpackages: - assert + - package: github.com/DATA-DOG/go-sqlmock + version: ^1.3.2 diff --git a/pkg/chartutil/capabilities.go b/pkg/chartutil/capabilities.go index d7e660b8a..a6808c702 100644 --- a/pkg/chartutil/capabilities.go +++ b/pkg/chartutil/capabilities.go @@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +//go:generate go run generator/capabilities_default_versions_generate.go package chartutil @@ -24,14 +25,15 @@ import ( ) var ( - // DefaultVersionSet is the default version set, which includes only Core V1 ("v1"). - DefaultVersionSet = NewVersionSet("v1") + // DefaultVersionSet is the default version set in included in Kubernetes for workloads + // Default versions as of Kubernetes 1.14 + DefaultVersionSet = NewVersionSet(defaultVersions()...) // DefaultKubeVersion is the default kubernetes version DefaultKubeVersion = &version.Info{ Major: "1", - Minor: "9", - GitVersion: "v1.9.0", + Minor: "14", + GitVersion: "v1.14.0", GoVersion: runtime.Version(), Compiler: runtime.Compiler, Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH), @@ -40,7 +42,7 @@ var ( // Capabilities describes the capabilities of the Kubernetes cluster that Tiller is attached to. type Capabilities struct { - // List of all supported API versions + // APIVersions list of all supported API versions APIVersions VersionSet // KubeVersion is the Kubernetes version KubeVersion *version.Info diff --git a/pkg/chartutil/capabilities_test.go b/pkg/chartutil/capabilities_test.go index 1f7020a39..666303e44 100644 --- a/pkg/chartutil/capabilities_test.go +++ b/pkg/chartutil/capabilities_test.go @@ -38,9 +38,6 @@ func TestDefaultVersionSet(t *testing.T) { if !DefaultVersionSet.Has("v1") { t.Error("Expected core v1 version set") } - if d := len(DefaultVersionSet); d != 1 { - t.Errorf("Expected only one version, got %d", d) - } } func TestCapabilities(t *testing.T) { @@ -51,4 +48,8 @@ func TestCapabilities(t *testing.T) { if !cap.APIVersions.Has("v1") { t.Error("APIVersions should have v1") } + + if !cap.APIVersions.Has("apps/v1/Deployment") { + t.Error("APIVersions should have apps/v1/Deployment") + } } diff --git a/pkg/chartutil/capabilities_versions_generated.go b/pkg/chartutil/capabilities_versions_generated.go new file mode 100644 index 000000000..bc33a40e8 --- /dev/null +++ b/pkg/chartutil/capabilities_versions_generated.go @@ -0,0 +1,575 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by capabilities_default_versions_generate.go; DO NOT EDIT. +package chartutil + +func defaultVersions() []string { + return []string{ + "__internal", + "__internal/WatchEvent", + "admissionregistration.k8s.io/__internal", + "admissionregistration.k8s.io/__internal/WatchEvent", + "admissionregistration.k8s.io/v1beta1", + "admissionregistration.k8s.io/v1beta1/CreateOptions", + "admissionregistration.k8s.io/v1beta1/DeleteOptions", + "admissionregistration.k8s.io/v1beta1/ExportOptions", + "admissionregistration.k8s.io/v1beta1/GetOptions", + "admissionregistration.k8s.io/v1beta1/ListOptions", + "admissionregistration.k8s.io/v1beta1/MutatingWebhookConfiguration", + "admissionregistration.k8s.io/v1beta1/MutatingWebhookConfigurationList", + "admissionregistration.k8s.io/v1beta1/PatchOptions", + "admissionregistration.k8s.io/v1beta1/UpdateOptions", + "admissionregistration.k8s.io/v1beta1/ValidatingWebhookConfiguration", + "admissionregistration.k8s.io/v1beta1/ValidatingWebhookConfigurationList", + "admissionregistration.k8s.io/v1beta1/WatchEvent", + "apps/__internal", + "apps/__internal/WatchEvent", + "apps/v1", + "apps/v1/ControllerRevision", + "apps/v1/ControllerRevisionList", + "apps/v1/CreateOptions", + "apps/v1/DaemonSet", + "apps/v1/DaemonSetList", + "apps/v1/DeleteOptions", + "apps/v1/Deployment", + "apps/v1/DeploymentList", + "apps/v1/ExportOptions", + "apps/v1/GetOptions", + "apps/v1/ListOptions", + "apps/v1/PatchOptions", + "apps/v1/ReplicaSet", + "apps/v1/ReplicaSetList", + "apps/v1/StatefulSet", + "apps/v1/StatefulSetList", + "apps/v1/UpdateOptions", + "apps/v1/WatchEvent", + "apps/v1beta1", + "apps/v1beta1/ControllerRevision", + "apps/v1beta1/ControllerRevisionList", + "apps/v1beta1/CreateOptions", + "apps/v1beta1/DeleteOptions", + "apps/v1beta1/Deployment", + "apps/v1beta1/DeploymentList", + "apps/v1beta1/DeploymentRollback", + "apps/v1beta1/ExportOptions", + "apps/v1beta1/GetOptions", + "apps/v1beta1/ListOptions", + "apps/v1beta1/PatchOptions", + "apps/v1beta1/Scale", + "apps/v1beta1/StatefulSet", + "apps/v1beta1/StatefulSetList", + "apps/v1beta1/UpdateOptions", + "apps/v1beta1/WatchEvent", + "apps/v1beta2", + "apps/v1beta2/ControllerRevision", + "apps/v1beta2/ControllerRevisionList", + "apps/v1beta2/CreateOptions", + "apps/v1beta2/DaemonSet", + "apps/v1beta2/DaemonSetList", + "apps/v1beta2/DeleteOptions", + "apps/v1beta2/Deployment", + "apps/v1beta2/DeploymentList", + "apps/v1beta2/ExportOptions", + "apps/v1beta2/GetOptions", + "apps/v1beta2/ListOptions", + "apps/v1beta2/PatchOptions", + "apps/v1beta2/ReplicaSet", + "apps/v1beta2/ReplicaSetList", + "apps/v1beta2/Scale", + "apps/v1beta2/StatefulSet", + "apps/v1beta2/StatefulSetList", + "apps/v1beta2/UpdateOptions", + "apps/v1beta2/WatchEvent", + "auditregistration.k8s.io/__internal", + "auditregistration.k8s.io/__internal/WatchEvent", + "auditregistration.k8s.io/v1alpha1", + "auditregistration.k8s.io/v1alpha1/AuditSink", + "auditregistration.k8s.io/v1alpha1/AuditSinkList", + "auditregistration.k8s.io/v1alpha1/CreateOptions", + "auditregistration.k8s.io/v1alpha1/DeleteOptions", + "auditregistration.k8s.io/v1alpha1/ExportOptions", + "auditregistration.k8s.io/v1alpha1/GetOptions", + "auditregistration.k8s.io/v1alpha1/ListOptions", + "auditregistration.k8s.io/v1alpha1/PatchOptions", + "auditregistration.k8s.io/v1alpha1/UpdateOptions", + "auditregistration.k8s.io/v1alpha1/WatchEvent", + "authentication.k8s.io/__internal", + "authentication.k8s.io/__internal/WatchEvent", + "authentication.k8s.io/v1", + "authentication.k8s.io/v1/CreateOptions", + "authentication.k8s.io/v1/DeleteOptions", + "authentication.k8s.io/v1/ExportOptions", + "authentication.k8s.io/v1/GetOptions", + "authentication.k8s.io/v1/ListOptions", + "authentication.k8s.io/v1/PatchOptions", + "authentication.k8s.io/v1/TokenRequest", + "authentication.k8s.io/v1/TokenReview", + "authentication.k8s.io/v1/UpdateOptions", + "authentication.k8s.io/v1/WatchEvent", + "authentication.k8s.io/v1beta1", + "authentication.k8s.io/v1beta1/CreateOptions", + "authentication.k8s.io/v1beta1/DeleteOptions", + "authentication.k8s.io/v1beta1/ExportOptions", + "authentication.k8s.io/v1beta1/GetOptions", + "authentication.k8s.io/v1beta1/ListOptions", + "authentication.k8s.io/v1beta1/PatchOptions", + "authentication.k8s.io/v1beta1/TokenReview", + "authentication.k8s.io/v1beta1/UpdateOptions", + "authentication.k8s.io/v1beta1/WatchEvent", + "authorization.k8s.io/__internal", + "authorization.k8s.io/__internal/WatchEvent", + "authorization.k8s.io/v1", + "authorization.k8s.io/v1/CreateOptions", + "authorization.k8s.io/v1/DeleteOptions", + "authorization.k8s.io/v1/ExportOptions", + "authorization.k8s.io/v1/GetOptions", + "authorization.k8s.io/v1/ListOptions", + "authorization.k8s.io/v1/LocalSubjectAccessReview", + "authorization.k8s.io/v1/PatchOptions", + "authorization.k8s.io/v1/SelfSubjectAccessReview", + "authorization.k8s.io/v1/SelfSubjectRulesReview", + "authorization.k8s.io/v1/SubjectAccessReview", + "authorization.k8s.io/v1/UpdateOptions", + "authorization.k8s.io/v1/WatchEvent", + "authorization.k8s.io/v1beta1", + "authorization.k8s.io/v1beta1/CreateOptions", + "authorization.k8s.io/v1beta1/DeleteOptions", + "authorization.k8s.io/v1beta1/ExportOptions", + "authorization.k8s.io/v1beta1/GetOptions", + "authorization.k8s.io/v1beta1/ListOptions", + "authorization.k8s.io/v1beta1/LocalSubjectAccessReview", + "authorization.k8s.io/v1beta1/PatchOptions", + "authorization.k8s.io/v1beta1/SelfSubjectAccessReview", + "authorization.k8s.io/v1beta1/SelfSubjectRulesReview", + "authorization.k8s.io/v1beta1/SubjectAccessReview", + "authorization.k8s.io/v1beta1/UpdateOptions", + "authorization.k8s.io/v1beta1/WatchEvent", + "autoscaling/__internal", + "autoscaling/__internal/WatchEvent", + "autoscaling/v1", + "autoscaling/v1/CreateOptions", + "autoscaling/v1/DeleteOptions", + "autoscaling/v1/ExportOptions", + "autoscaling/v1/GetOptions", + "autoscaling/v1/HorizontalPodAutoscaler", + "autoscaling/v1/HorizontalPodAutoscalerList", + "autoscaling/v1/ListOptions", + "autoscaling/v1/PatchOptions", + "autoscaling/v1/Scale", + "autoscaling/v1/UpdateOptions", + "autoscaling/v1/WatchEvent", + "autoscaling/v2beta1", + "autoscaling/v2beta1/CreateOptions", + "autoscaling/v2beta1/DeleteOptions", + "autoscaling/v2beta1/ExportOptions", + "autoscaling/v2beta1/GetOptions", + "autoscaling/v2beta1/HorizontalPodAutoscaler", + "autoscaling/v2beta1/HorizontalPodAutoscalerList", + "autoscaling/v2beta1/ListOptions", + "autoscaling/v2beta1/PatchOptions", + "autoscaling/v2beta1/UpdateOptions", + "autoscaling/v2beta1/WatchEvent", + "autoscaling/v2beta2", + "autoscaling/v2beta2/CreateOptions", + "autoscaling/v2beta2/DeleteOptions", + "autoscaling/v2beta2/ExportOptions", + "autoscaling/v2beta2/GetOptions", + "autoscaling/v2beta2/HorizontalPodAutoscaler", + "autoscaling/v2beta2/HorizontalPodAutoscalerList", + "autoscaling/v2beta2/ListOptions", + "autoscaling/v2beta2/PatchOptions", + "autoscaling/v2beta2/UpdateOptions", + "autoscaling/v2beta2/WatchEvent", + "batch/__internal", + "batch/__internal/WatchEvent", + "batch/v1", + "batch/v1/CreateOptions", + "batch/v1/DeleteOptions", + "batch/v1/ExportOptions", + "batch/v1/GetOptions", + "batch/v1/Job", + "batch/v1/JobList", + "batch/v1/ListOptions", + "batch/v1/PatchOptions", + "batch/v1/UpdateOptions", + "batch/v1/WatchEvent", + "batch/v1beta1", + "batch/v1beta1/CreateOptions", + "batch/v1beta1/CronJob", + "batch/v1beta1/CronJobList", + "batch/v1beta1/DeleteOptions", + "batch/v1beta1/ExportOptions", + "batch/v1beta1/GetOptions", + "batch/v1beta1/JobTemplate", + "batch/v1beta1/ListOptions", + "batch/v1beta1/PatchOptions", + "batch/v1beta1/UpdateOptions", + "batch/v1beta1/WatchEvent", + "batch/v2alpha1", + "batch/v2alpha1/CreateOptions", + "batch/v2alpha1/CronJob", + "batch/v2alpha1/CronJobList", + "batch/v2alpha1/DeleteOptions", + "batch/v2alpha1/ExportOptions", + "batch/v2alpha1/GetOptions", + "batch/v2alpha1/JobTemplate", + "batch/v2alpha1/ListOptions", + "batch/v2alpha1/PatchOptions", + "batch/v2alpha1/UpdateOptions", + "batch/v2alpha1/WatchEvent", + "certificates.k8s.io/__internal", + "certificates.k8s.io/__internal/WatchEvent", + "certificates.k8s.io/v1beta1", + "certificates.k8s.io/v1beta1/CertificateSigningRequest", + "certificates.k8s.io/v1beta1/CertificateSigningRequestList", + "certificates.k8s.io/v1beta1/CreateOptions", + "certificates.k8s.io/v1beta1/DeleteOptions", + "certificates.k8s.io/v1beta1/ExportOptions", + "certificates.k8s.io/v1beta1/GetOptions", + "certificates.k8s.io/v1beta1/ListOptions", + "certificates.k8s.io/v1beta1/PatchOptions", + "certificates.k8s.io/v1beta1/UpdateOptions", + "certificates.k8s.io/v1beta1/WatchEvent", + "coordination.k8s.io/__internal", + "coordination.k8s.io/__internal/WatchEvent", + "coordination.k8s.io/v1", + "coordination.k8s.io/v1/CreateOptions", + "coordination.k8s.io/v1/DeleteOptions", + "coordination.k8s.io/v1/ExportOptions", + "coordination.k8s.io/v1/GetOptions", + "coordination.k8s.io/v1/Lease", + "coordination.k8s.io/v1/LeaseList", + "coordination.k8s.io/v1/ListOptions", + "coordination.k8s.io/v1/PatchOptions", + "coordination.k8s.io/v1/UpdateOptions", + "coordination.k8s.io/v1/WatchEvent", + "coordination.k8s.io/v1beta1", + "coordination.k8s.io/v1beta1/CreateOptions", + "coordination.k8s.io/v1beta1/DeleteOptions", + "coordination.k8s.io/v1beta1/ExportOptions", + "coordination.k8s.io/v1beta1/GetOptions", + "coordination.k8s.io/v1beta1/Lease", + "coordination.k8s.io/v1beta1/LeaseList", + "coordination.k8s.io/v1beta1/ListOptions", + "coordination.k8s.io/v1beta1/PatchOptions", + "coordination.k8s.io/v1beta1/UpdateOptions", + "coordination.k8s.io/v1beta1/WatchEvent", + "events.k8s.io/__internal", + "events.k8s.io/__internal/WatchEvent", + "events.k8s.io/v1beta1", + "events.k8s.io/v1beta1/CreateOptions", + "events.k8s.io/v1beta1/DeleteOptions", + "events.k8s.io/v1beta1/Event", + "events.k8s.io/v1beta1/EventList", + "events.k8s.io/v1beta1/ExportOptions", + "events.k8s.io/v1beta1/GetOptions", + "events.k8s.io/v1beta1/ListOptions", + "events.k8s.io/v1beta1/PatchOptions", + "events.k8s.io/v1beta1/UpdateOptions", + "events.k8s.io/v1beta1/WatchEvent", + "extensions/__internal", + "extensions/__internal/WatchEvent", + "extensions/v1beta1", + "extensions/v1beta1/CreateOptions", + "extensions/v1beta1/DaemonSet", + "extensions/v1beta1/DaemonSetList", + "extensions/v1beta1/DeleteOptions", + "extensions/v1beta1/Deployment", + "extensions/v1beta1/DeploymentList", + "extensions/v1beta1/DeploymentRollback", + "extensions/v1beta1/ExportOptions", + "extensions/v1beta1/GetOptions", + "extensions/v1beta1/Ingress", + "extensions/v1beta1/IngressList", + "extensions/v1beta1/ListOptions", + "extensions/v1beta1/NetworkPolicy", + "extensions/v1beta1/NetworkPolicyList", + "extensions/v1beta1/PatchOptions", + "extensions/v1beta1/PodSecurityPolicy", + "extensions/v1beta1/PodSecurityPolicyList", + "extensions/v1beta1/ReplicaSet", + "extensions/v1beta1/ReplicaSetList", + "extensions/v1beta1/ReplicationControllerDummy", + "extensions/v1beta1/Scale", + "extensions/v1beta1/UpdateOptions", + "extensions/v1beta1/WatchEvent", + "networking.k8s.io/__internal", + "networking.k8s.io/__internal/WatchEvent", + "networking.k8s.io/v1", + "networking.k8s.io/v1/CreateOptions", + "networking.k8s.io/v1/DeleteOptions", + "networking.k8s.io/v1/ExportOptions", + "networking.k8s.io/v1/GetOptions", + "networking.k8s.io/v1/ListOptions", + "networking.k8s.io/v1/NetworkPolicy", + "networking.k8s.io/v1/NetworkPolicyList", + "networking.k8s.io/v1/PatchOptions", + "networking.k8s.io/v1/UpdateOptions", + "networking.k8s.io/v1/WatchEvent", + "networking.k8s.io/v1beta1", + "networking.k8s.io/v1beta1/CreateOptions", + "networking.k8s.io/v1beta1/DeleteOptions", + "networking.k8s.io/v1beta1/ExportOptions", + "networking.k8s.io/v1beta1/GetOptions", + "networking.k8s.io/v1beta1/Ingress", + "networking.k8s.io/v1beta1/IngressList", + "networking.k8s.io/v1beta1/ListOptions", + "networking.k8s.io/v1beta1/PatchOptions", + "networking.k8s.io/v1beta1/UpdateOptions", + "networking.k8s.io/v1beta1/WatchEvent", + "node.k8s.io/__internal", + "node.k8s.io/__internal/WatchEvent", + "node.k8s.io/v1alpha1", + "node.k8s.io/v1alpha1/CreateOptions", + "node.k8s.io/v1alpha1/DeleteOptions", + "node.k8s.io/v1alpha1/ExportOptions", + "node.k8s.io/v1alpha1/GetOptions", + "node.k8s.io/v1alpha1/ListOptions", + "node.k8s.io/v1alpha1/PatchOptions", + "node.k8s.io/v1alpha1/RuntimeClass", + "node.k8s.io/v1alpha1/RuntimeClassList", + "node.k8s.io/v1alpha1/UpdateOptions", + "node.k8s.io/v1alpha1/WatchEvent", + "node.k8s.io/v1beta1", + "node.k8s.io/v1beta1/CreateOptions", + "node.k8s.io/v1beta1/DeleteOptions", + "node.k8s.io/v1beta1/ExportOptions", + "node.k8s.io/v1beta1/GetOptions", + "node.k8s.io/v1beta1/ListOptions", + "node.k8s.io/v1beta1/PatchOptions", + "node.k8s.io/v1beta1/RuntimeClass", + "node.k8s.io/v1beta1/RuntimeClassList", + "node.k8s.io/v1beta1/UpdateOptions", + "node.k8s.io/v1beta1/WatchEvent", + "policy/__internal", + "policy/__internal/WatchEvent", + "policy/v1beta1", + "policy/v1beta1/CreateOptions", + "policy/v1beta1/DeleteOptions", + "policy/v1beta1/Eviction", + "policy/v1beta1/ExportOptions", + "policy/v1beta1/GetOptions", + "policy/v1beta1/ListOptions", + "policy/v1beta1/PatchOptions", + "policy/v1beta1/PodDisruptionBudget", + "policy/v1beta1/PodDisruptionBudgetList", + "policy/v1beta1/PodSecurityPolicy", + "policy/v1beta1/PodSecurityPolicyList", + "policy/v1beta1/UpdateOptions", + "policy/v1beta1/WatchEvent", + "rbac.authorization.k8s.io/__internal", + "rbac.authorization.k8s.io/__internal/WatchEvent", + "rbac.authorization.k8s.io/v1", + "rbac.authorization.k8s.io/v1/ClusterRole", + "rbac.authorization.k8s.io/v1/ClusterRoleBinding", + "rbac.authorization.k8s.io/v1/ClusterRoleBindingList", + "rbac.authorization.k8s.io/v1/ClusterRoleList", + "rbac.authorization.k8s.io/v1/CreateOptions", + "rbac.authorization.k8s.io/v1/DeleteOptions", + "rbac.authorization.k8s.io/v1/ExportOptions", + "rbac.authorization.k8s.io/v1/GetOptions", + "rbac.authorization.k8s.io/v1/ListOptions", + "rbac.authorization.k8s.io/v1/PatchOptions", + "rbac.authorization.k8s.io/v1/Role", + "rbac.authorization.k8s.io/v1/RoleBinding", + "rbac.authorization.k8s.io/v1/RoleBindingList", + "rbac.authorization.k8s.io/v1/RoleList", + "rbac.authorization.k8s.io/v1/UpdateOptions", + "rbac.authorization.k8s.io/v1/WatchEvent", + "rbac.authorization.k8s.io/v1alpha1", + "rbac.authorization.k8s.io/v1alpha1/ClusterRole", + "rbac.authorization.k8s.io/v1alpha1/ClusterRoleBinding", + "rbac.authorization.k8s.io/v1alpha1/ClusterRoleBindingList", + "rbac.authorization.k8s.io/v1alpha1/ClusterRoleList", + "rbac.authorization.k8s.io/v1alpha1/CreateOptions", + "rbac.authorization.k8s.io/v1alpha1/DeleteOptions", + "rbac.authorization.k8s.io/v1alpha1/ExportOptions", + "rbac.authorization.k8s.io/v1alpha1/GetOptions", + "rbac.authorization.k8s.io/v1alpha1/ListOptions", + "rbac.authorization.k8s.io/v1alpha1/PatchOptions", + "rbac.authorization.k8s.io/v1alpha1/Role", + "rbac.authorization.k8s.io/v1alpha1/RoleBinding", + "rbac.authorization.k8s.io/v1alpha1/RoleBindingList", + "rbac.authorization.k8s.io/v1alpha1/RoleList", + "rbac.authorization.k8s.io/v1alpha1/UpdateOptions", + "rbac.authorization.k8s.io/v1alpha1/WatchEvent", + "rbac.authorization.k8s.io/v1beta1", + "rbac.authorization.k8s.io/v1beta1/ClusterRole", + "rbac.authorization.k8s.io/v1beta1/ClusterRoleBinding", + "rbac.authorization.k8s.io/v1beta1/ClusterRoleBindingList", + "rbac.authorization.k8s.io/v1beta1/ClusterRoleList", + "rbac.authorization.k8s.io/v1beta1/CreateOptions", + "rbac.authorization.k8s.io/v1beta1/DeleteOptions", + "rbac.authorization.k8s.io/v1beta1/ExportOptions", + "rbac.authorization.k8s.io/v1beta1/GetOptions", + "rbac.authorization.k8s.io/v1beta1/ListOptions", + "rbac.authorization.k8s.io/v1beta1/PatchOptions", + "rbac.authorization.k8s.io/v1beta1/Role", + "rbac.authorization.k8s.io/v1beta1/RoleBinding", + "rbac.authorization.k8s.io/v1beta1/RoleBindingList", + "rbac.authorization.k8s.io/v1beta1/RoleList", + "rbac.authorization.k8s.io/v1beta1/UpdateOptions", + "rbac.authorization.k8s.io/v1beta1/WatchEvent", + "scheduling.k8s.io/__internal", + "scheduling.k8s.io/__internal/WatchEvent", + "scheduling.k8s.io/v1", + "scheduling.k8s.io/v1/CreateOptions", + "scheduling.k8s.io/v1/DeleteOptions", + "scheduling.k8s.io/v1/ExportOptions", + "scheduling.k8s.io/v1/GetOptions", + "scheduling.k8s.io/v1/ListOptions", + "scheduling.k8s.io/v1/PatchOptions", + "scheduling.k8s.io/v1/PriorityClass", + "scheduling.k8s.io/v1/PriorityClassList", + "scheduling.k8s.io/v1/UpdateOptions", + "scheduling.k8s.io/v1/WatchEvent", + "scheduling.k8s.io/v1alpha1", + "scheduling.k8s.io/v1alpha1/CreateOptions", + "scheduling.k8s.io/v1alpha1/DeleteOptions", + "scheduling.k8s.io/v1alpha1/ExportOptions", + "scheduling.k8s.io/v1alpha1/GetOptions", + "scheduling.k8s.io/v1alpha1/ListOptions", + "scheduling.k8s.io/v1alpha1/PatchOptions", + "scheduling.k8s.io/v1alpha1/PriorityClass", + "scheduling.k8s.io/v1alpha1/PriorityClassList", + "scheduling.k8s.io/v1alpha1/UpdateOptions", + "scheduling.k8s.io/v1alpha1/WatchEvent", + "scheduling.k8s.io/v1beta1", + "scheduling.k8s.io/v1beta1/CreateOptions", + "scheduling.k8s.io/v1beta1/DeleteOptions", + "scheduling.k8s.io/v1beta1/ExportOptions", + "scheduling.k8s.io/v1beta1/GetOptions", + "scheduling.k8s.io/v1beta1/ListOptions", + "scheduling.k8s.io/v1beta1/PatchOptions", + "scheduling.k8s.io/v1beta1/PriorityClass", + "scheduling.k8s.io/v1beta1/PriorityClassList", + "scheduling.k8s.io/v1beta1/UpdateOptions", + "scheduling.k8s.io/v1beta1/WatchEvent", + "settings.k8s.io/__internal", + "settings.k8s.io/__internal/WatchEvent", + "settings.k8s.io/v1alpha1", + "settings.k8s.io/v1alpha1/CreateOptions", + "settings.k8s.io/v1alpha1/DeleteOptions", + "settings.k8s.io/v1alpha1/ExportOptions", + "settings.k8s.io/v1alpha1/GetOptions", + "settings.k8s.io/v1alpha1/ListOptions", + "settings.k8s.io/v1alpha1/PatchOptions", + "settings.k8s.io/v1alpha1/PodPreset", + "settings.k8s.io/v1alpha1/PodPresetList", + "settings.k8s.io/v1alpha1/UpdateOptions", + "settings.k8s.io/v1alpha1/WatchEvent", + "storage.k8s.io/__internal", + "storage.k8s.io/__internal/WatchEvent", + "storage.k8s.io/v1", + "storage.k8s.io/v1/CreateOptions", + "storage.k8s.io/v1/DeleteOptions", + "storage.k8s.io/v1/ExportOptions", + "storage.k8s.io/v1/GetOptions", + "storage.k8s.io/v1/ListOptions", + "storage.k8s.io/v1/PatchOptions", + "storage.k8s.io/v1/StorageClass", + "storage.k8s.io/v1/StorageClassList", + "storage.k8s.io/v1/UpdateOptions", + "storage.k8s.io/v1/VolumeAttachment", + "storage.k8s.io/v1/VolumeAttachmentList", + "storage.k8s.io/v1/WatchEvent", + "storage.k8s.io/v1alpha1", + "storage.k8s.io/v1alpha1/CreateOptions", + "storage.k8s.io/v1alpha1/DeleteOptions", + "storage.k8s.io/v1alpha1/ExportOptions", + "storage.k8s.io/v1alpha1/GetOptions", + "storage.k8s.io/v1alpha1/ListOptions", + "storage.k8s.io/v1alpha1/PatchOptions", + "storage.k8s.io/v1alpha1/UpdateOptions", + "storage.k8s.io/v1alpha1/VolumeAttachment", + "storage.k8s.io/v1alpha1/VolumeAttachmentList", + "storage.k8s.io/v1alpha1/WatchEvent", + "storage.k8s.io/v1beta1", + "storage.k8s.io/v1beta1/CSIDriver", + "storage.k8s.io/v1beta1/CSIDriverList", + "storage.k8s.io/v1beta1/CSINode", + "storage.k8s.io/v1beta1/CSINodeList", + "storage.k8s.io/v1beta1/CreateOptions", + "storage.k8s.io/v1beta1/DeleteOptions", + "storage.k8s.io/v1beta1/ExportOptions", + "storage.k8s.io/v1beta1/GetOptions", + "storage.k8s.io/v1beta1/ListOptions", + "storage.k8s.io/v1beta1/PatchOptions", + "storage.k8s.io/v1beta1/StorageClass", + "storage.k8s.io/v1beta1/StorageClassList", + "storage.k8s.io/v1beta1/UpdateOptions", + "storage.k8s.io/v1beta1/VolumeAttachment", + "storage.k8s.io/v1beta1/VolumeAttachmentList", + "storage.k8s.io/v1beta1/WatchEvent", + "v1", + "v1/APIGroup", + "v1/APIGroupList", + "v1/APIResourceList", + "v1/APIVersions", + "v1/Binding", + "v1/ComponentStatus", + "v1/ComponentStatusList", + "v1/ConfigMap", + "v1/ConfigMapList", + "v1/CreateOptions", + "v1/DeleteOptions", + "v1/Endpoints", + "v1/EndpointsList", + "v1/Event", + "v1/EventList", + "v1/ExportOptions", + "v1/GetOptions", + "v1/LimitRange", + "v1/LimitRangeList", + "v1/List", + "v1/ListOptions", + "v1/Namespace", + "v1/NamespaceList", + "v1/Node", + "v1/NodeList", + "v1/NodeProxyOptions", + "v1/PatchOptions", + "v1/PersistentVolume", + "v1/PersistentVolumeClaim", + "v1/PersistentVolumeClaimList", + "v1/PersistentVolumeList", + "v1/Pod", + "v1/PodAttachOptions", + "v1/PodExecOptions", + "v1/PodList", + "v1/PodLogOptions", + "v1/PodPortForwardOptions", + "v1/PodProxyOptions", + "v1/PodStatusResult", + "v1/PodTemplate", + "v1/PodTemplateList", + "v1/RangeAllocation", + "v1/ReplicationController", + "v1/ReplicationControllerList", + "v1/ResourceQuota", + "v1/ResourceQuotaList", + "v1/Secret", + "v1/SecretList", + "v1/SerializedReference", + "v1/Service", + "v1/ServiceAccount", + "v1/ServiceAccountList", + "v1/ServiceList", + "v1/ServiceProxyOptions", + "v1/Status", + "v1/UpdateOptions", + "v1/WatchEvent", + } +} diff --git a/pkg/chartutil/create.go b/pkg/chartutil/create.go index 36b07adb5..09ac93a73 100644 --- a/pkg/chartutil/create.go +++ b/pkg/chartutil/create.go @@ -42,6 +42,8 @@ const ( DeploymentName = "deployment.yaml" // ServiceName is the name of the example service file. ServiceName = "service.yaml" + // ServiceAccountName is the name of the example serviceaccount file. + ServiceAccountName = "serviceaccount.yaml" // NotesName is the name of the example NOTES.txt file. NotesName = "NOTES.txt" // HelpersName is the name of the example helpers file. @@ -63,9 +65,28 @@ image: tag: stable pullPolicy: IfNotPresent +imagePullSecrets: [] nameOverride: "" fullnameOverride: "" +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + service: type: ClusterIP port: 80 @@ -75,9 +96,10 @@ ingress: annotations: {} # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" - paths: [] hosts: - - chart-example.local + - host: chart-example.local + paths: [] + tls: [] # - secretName: chart-example-tls # hosts: @@ -128,16 +150,13 @@ const defaultIgnore = `# Patterns to ignore when building packages. const defaultIngress = `{{- if .Values.ingress.enabled -}} {{- $fullName := include ".fullname" . -}} -{{- $ingressPaths := .Values.ingress.paths -}} +{{- $svcPort := .Values.service.port -}} apiVersion: extensions/v1beta1 kind: Ingress metadata: name: {{ $fullName }} labels: - app.kubernetes.io/name: {{ include ".name" . }} - helm.sh/chart: {{ include ".chart" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} +{{ include ".labels" . | indent 4 }} {{- with .Values.ingress.annotations }} annotations: {{- toYaml . | nindent 4 }} @@ -155,15 +174,15 @@ spec: {{- end }} rules: {{- range .Values.ingress.hosts }} - - host: {{ . | quote }} + - host: {{ .host | quote }} http: paths: - {{- range $ingressPaths }} + {{- range .paths }} - path: {{ . }} backend: serviceName: {{ $fullName }} - servicePort: http - {{- end }} + servicePort: {{ $svcPort }} + {{- end }} {{- end }} {{- end }} ` @@ -173,10 +192,7 @@ kind: Deployment metadata: name: {{ include ".fullname" . }} labels: - app.kubernetes.io/name: {{ include ".name" . }} - helm.sh/chart: {{ include ".chart" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} +{{ include ".labels" . | indent 4 }} spec: replicas: {{ .Values.replicaCount }} selector: @@ -189,8 +205,17 @@ spec: app.kubernetes.io/name: {{ include ".name" . }} app.kubernetes.io/instance: {{ .Release.Name }} spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ template ".serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} containers: - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" imagePullPolicy: {{ .Values.image.pullPolicy }} ports: @@ -226,10 +251,7 @@ kind: Service metadata: name: {{ include ".fullname" . }} labels: - app.kubernetes.io/name: {{ include ".name" . }} - helm.sh/chart: {{ include ".chart" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} +{{ include ".labels" . | indent 4 }} spec: type: {{ .Values.service.type }} ports: @@ -241,12 +263,21 @@ spec: app.kubernetes.io/name: {{ include ".name" . }} app.kubernetes.io/instance: {{ .Release.Name }} ` +const defaultServiceAccount = `{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template ".serviceAccountName" . }} + labels: +{{ include ".labels" . | indent 4 }} +{{- end -}} +` const defaultNotes = `1. Get the application URL by running these commands: {{- if .Values.ingress.enabled }} {{- range $host := .Values.ingress.hosts }} - {{- range $.Values.ingress.paths }} - http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host }}{{ . }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }} {{- end }} {{- end }} {{- else if contains "NodePort" .Values.service.type }} @@ -256,7 +287,7 @@ const defaultNotes = `1. Get the application URL by running these commands: {{- else if contains "LoadBalancer" .Values.service.type }} NOTE: It may take a few minutes for the LoadBalancer IP to be available. You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include ".fullname" . }}' - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include ".fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include ".fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") echo http://$SERVICE_IP:{{ .Values.service.port }} {{- else if contains "ClusterIP" .Values.service.type }} export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include ".name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") @@ -297,6 +328,30 @@ Create chart name and version as used by the chart label. {{- define ".chart" -}} {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} {{- end -}} + +{{/* +Common labels +*/}} +{{- define ".labels" -}} +app.kubernetes.io/name: {{ include ".name" . }} +helm.sh/chart: {{ include ".chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define ".serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include ".fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} ` const defaultTestConnection = `apiVersion: v1 @@ -304,10 +359,7 @@ kind: Pod metadata: name: "{{ include ".fullname" . }}-test-connection" labels: - app.kubernetes.io/name: {{ include ".name" . }} - helm.sh/chart: {{ include ".chart" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} +{{ include ".labels" . | indent 4 }} annotations: "helm.sh/hook": test-success spec: @@ -418,6 +470,11 @@ func Create(chartfile *chart.Metadata, dir string) (string, error) { path: filepath.Join(cdir, TemplatesDir, ServiceName), content: Transform(defaultService, "", chartfile.Name), }, + { + // serviceaccount.yaml + path: filepath.Join(cdir, TemplatesDir, ServiceAccountName), + content: Transform(defaultServiceAccount, "", chartfile.Name), + }, { // NOTES.txt path: filepath.Join(cdir, TemplatesDir, NotesName), diff --git a/pkg/chartutil/doc.go b/pkg/chartutil/doc.go index a4f6d4515..ad2224f94 100644 --- a/pkg/chartutil/doc.go +++ b/pkg/chartutil/doc.go @@ -17,7 +17,7 @@ limitations under the License. /*Package chartutil contains tools for working with charts. Charts are described in the protocol buffer definition (pkg/proto/hapi/charts). -This packe provides utilities for serializing and deserializing charts. +This package provides utilities for serializing and deserializing charts. A chart can be represented on the file system in one of two ways: diff --git a/pkg/chartutil/expand.go b/pkg/chartutil/expand.go index 1d49b159f..9ed021d9c 100644 --- a/pkg/chartutil/expand.go +++ b/pkg/chartutil/expand.go @@ -17,58 +17,60 @@ limitations under the License. package chartutil import ( - "archive/tar" - "compress/gzip" + "errors" "io" + "io/ioutil" "os" "path/filepath" + + securejoin "github.com/cyphar/filepath-securejoin" ) // Expand uncompresses and extracts a chart into the specified directory. func Expand(dir string, r io.Reader) error { - gr, err := gzip.NewReader(r) + files, err := loadArchiveFiles(r) if err != nil { return err } - defer gr.Close() - tr := tar.NewReader(gr) - for { - header, err := tr.Next() - if err == io.EOF { - break - } else if err != nil { - return err - } - //split header name and create missing directories - d, _ := filepath.Split(header.Name) - fullDir := filepath.Join(dir, d) - _, err = os.Stat(fullDir) - if err != nil && d != "" { - if err := os.MkdirAll(fullDir, 0700); err != nil { + // Get the name of the chart + var chartName string + for _, file := range files { + if file.Name == "Chart.yaml" { + ch, err := UnmarshalChartfile(file.Data) + if err != nil { return err } + chartName = ch.GetName() } + } + if chartName == "" { + return errors.New("chart name not specified") + } - path := filepath.Clean(filepath.Join(dir, header.Name)) - info := header.FileInfo() - if info.IsDir() { - if err = os.MkdirAll(path, info.Mode()); err != nil { - return err - } - continue - } + // Find the base directory + chartdir, err := securejoin.SecureJoin(dir, chartName) + if err != nil { + return err + } - file, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, info.Mode()) + // Copy all files verbatim. We don't parse these files because parsing can remove + // comments. + for _, file := range files { + outpath, err := securejoin.SecureJoin(chartdir, file.Name) if err != nil { return err } - _, err = io.Copy(file, tr) - if err != nil { - file.Close() + + // Make sure the necessary subdirs get created. + basedir := filepath.Dir(outpath) + if err := os.MkdirAll(basedir, 0755); err != nil { + return err + } + + if err := ioutil.WriteFile(outpath, file.Data, 0644); err != nil { return err } - file.Close() } return nil } diff --git a/pkg/chartutil/expand_test.go b/pkg/chartutil/expand_test.go new file mode 100644 index 000000000..80fd4416b --- /dev/null +++ b/pkg/chartutil/expand_test.go @@ -0,0 +1,121 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chartutil + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestExpand(t *testing.T) { + dest, err := ioutil.TempDir("", "helm-testing-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dest) + + reader, err := os.Open("testdata/frobnitz-1.2.3.tgz") + if err != nil { + t.Fatal(err) + } + + if err := Expand(dest, reader); err != nil { + t.Fatal(err) + } + + expectedChartPath := filepath.Join(dest, "frobnitz") + fi, err := os.Stat(expectedChartPath) + if err != nil { + t.Fatal(err) + } + if !fi.IsDir() { + t.Fatalf("expected a chart directory at %s", expectedChartPath) + } + + dir, err := os.Open(expectedChartPath) + if err != nil { + t.Fatal(err) + } + + fis, err := dir.Readdir(0) + if err != nil { + t.Fatal(err) + } + + expectLen := 12 + if len(fis) != expectLen { + t.Errorf("Expected %d files, but got %d", expectLen, len(fis)) + } + + for _, fi := range fis { + expect, err := os.Stat(filepath.Join("testdata", "frobnitz", fi.Name())) + if err != nil { + t.Fatal(err) + } + if fi.Size() != expect.Size() { + t.Errorf("Expected %s to have size %d, got %d", fi.Name(), expect.Size(), fi.Size()) + } + } +} + +func TestExpandFile(t *testing.T) { + dest, err := ioutil.TempDir("", "helm-testing-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dest) + + if err := ExpandFile(dest, "testdata/frobnitz-1.2.3.tgz"); err != nil { + t.Fatal(err) + } + + expectedChartPath := filepath.Join(dest, "frobnitz") + fi, err := os.Stat(expectedChartPath) + if err != nil { + t.Fatal(err) + } + if !fi.IsDir() { + t.Fatalf("expected a chart directory at %s", expectedChartPath) + } + + dir, err := os.Open(expectedChartPath) + if err != nil { + t.Fatal(err) + } + + fis, err := dir.Readdir(0) + if err != nil { + t.Fatal(err) + } + + expectLen := 12 + if len(fis) != expectLen { + t.Errorf("Expected %d files, but got %d", expectLen, len(fis)) + } + + for _, fi := range fis { + expect, err := os.Stat(filepath.Join("testdata", "frobnitz", fi.Name())) + if err != nil { + t.Fatal(err) + } + if fi.Size() != expect.Size() { + t.Errorf("Expected %s to have size %d, got %d", fi.Name(), expect.Size(), fi.Size()) + } + } +} diff --git a/pkg/chartutil/generator/capabilities_default_versions_generate.go b/pkg/chartutil/generator/capabilities_default_versions_generate.go new file mode 100644 index 000000000..4ed312465 --- /dev/null +++ b/pkg/chartutil/generator/capabilities_default_versions_generate.go @@ -0,0 +1,106 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Generates the default versions to use with capabilities. This cannot be loaded +// dynamically as it uses enough memory to cause out of memory issues in CI. +// +// +build ignore +package main + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path" + "sort" + + "k8s.io/client-go/kubernetes/scheme" +) + +const licenseHeader = `/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/` + +func main() { + v := getVersions() + + o := createOutput(v) + + err := ioutil.WriteFile("capabilities_versions_generated.go", o, 0644) + if err != nil { + fmt.Printf("writing output: %s", err) + os.Exit(1) + } +} + +func createOutput(v []string) []byte { + var out bytes.Buffer + + fmt.Fprintln(&out, licenseHeader) + fmt.Fprintln(&out, "// Code generated by capabilities_default_versions_generate.go; DO NOT EDIT.") + fmt.Fprint(&out, "package chartutil\n\n") + fmt.Fprintln(&out, "func defaultVersions() []string {") + fmt.Fprintln(&out, "\treturn []string{") + + for _, v := range v { + fmt.Fprintf(&out, "\t\t\"%s\",\n", v) + } + + fmt.Fprintln(&out, "\t}") + fmt.Fprintln(&out, "}") + + return out.Bytes() +} + +func getVersions() []string { + + var s []string + var gv string + var gvk string + + // Check is used so that we only add an item once to the return + check := make(map[string]struct{}) + + // Client go has a default scheme set with everything in it + // This includes over 500 group versions and group versioned kinds + for k := range scheme.Scheme.AllKnownTypes() { + gv = path.Join(k.Group, k.Version) + gvk = path.Join(k.Group, k.Version, k.Kind) + if _, ok := check[gv]; !ok { + check[gv] = struct{}{} + s = append(s, gv) + } + if _, ok := check[gvk]; !ok { + check[gvk] = struct{}{} + s = append(s, gvk) + } + } + + // Put the names in a consistent order + sort.Strings(s) + + return s +} diff --git a/pkg/chartutil/load.go b/pkg/chartutil/load.go index 9f1c80c85..95b835d5b 100644 --- a/pkg/chartutil/load.go +++ b/pkg/chartutil/load.go @@ -25,7 +25,9 @@ import ( "io" "io/ioutil" "os" + "path" "path/filepath" + "regexp" "strings" "github.com/golang/protobuf/ptypes/any" @@ -63,11 +65,13 @@ type BufferedFile struct { Data []byte } -// LoadArchive loads from a reader containing a compressed tar archive. -func LoadArchive(in io.Reader) (*chart.Chart, error) { +var drivePathPattern = regexp.MustCompile(`^[a-zA-Z]:/`) + +// loadArchiveFiles loads files out of an archive +func loadArchiveFiles(in io.Reader) ([]*BufferedFile, error) { unzipped, err := gzip.NewReader(in) if err != nil { - return &chart.Chart{}, err + return nil, err } defer unzipped.Close() @@ -80,7 +84,7 @@ func LoadArchive(in io.Reader) (*chart.Chart, error) { break } if err != nil { - return &chart.Chart{}, err + return nil, err } if hd.FileInfo().IsDir() { @@ -89,6 +93,12 @@ func LoadArchive(in io.Reader) (*chart.Chart, error) { continue } + switch hd.Typeflag { + // We don't want to process these extension header files. + case tar.TypeXGlobalHeader, tar.TypeXHeader: + continue + } + // Archive could contain \ if generated on Windows delimiter := "/" if strings.ContainsRune(hd.Name, '\\') { @@ -101,12 +111,33 @@ func LoadArchive(in io.Reader) (*chart.Chart, error) { // Normalize the path to the / delimiter n = strings.Replace(n, delimiter, "/", -1) + if path.IsAbs(n) { + return nil, errors.New("chart illegally contains absolute paths") + } + + n = path.Clean(n) + if n == "." { + // In this case, the original path was relative when it should have been absolute. + return nil, errors.New("chart illegally contains empty path") + } + if strings.HasPrefix(n, "..") { + return nil, errors.New("chart illegally references parent directory") + } + + // In some particularly arcane acts of path creativity, it is possible to intermix + // UNIX and Windows style paths in such a way that you produce a result of the form + // c:/foo even after all the built-in absolute path checks. So we explicitly check + // for this condition. + if drivePathPattern.MatchString(n) { + return nil, errors.New("chart contains illegally named files") + } + if parts[0] == "Chart.yaml" { return nil, errors.New("chart yaml not in base directory") } if _, err := io.Copy(b, tr); err != nil { - return &chart.Chart{}, err + return files, err } files = append(files, &BufferedFile{Name: n, Data: b.Bytes()}) @@ -116,7 +147,15 @@ func LoadArchive(in io.Reader) (*chart.Chart, error) { if len(files) == 0 { return nil, errors.New("no files in chart archive") } + return files, nil +} +// LoadArchive loads from a reader containing a compressed tar archive. +func LoadArchive(in io.Reader) (*chart.Chart, error) { + files, err := loadArchiveFiles(in) + if err != nil { + return nil, err + } return LoadFiles(files) } @@ -132,6 +171,10 @@ func LoadFiles(files []*BufferedFile) (*chart.Chart, error) { return c, err } c.Metadata = m + var apiVersion = c.Metadata.ApiVersion + if apiVersion != "" && apiVersion != ApiVersionV1 { + return c, fmt.Errorf("apiVersion '%s' is not valid. The value must be \"v1\"", apiVersion) + } } else if f.Name == "values.toml" { return c, errors.New("values.toml is illegal as of 2.0.0-alpha.2") } else if f.Name == "values.yaml" { diff --git a/pkg/chartutil/load_test.go b/pkg/chartutil/load_test.go index 5cb15fbdc..f139abf5c 100644 --- a/pkg/chartutil/load_test.go +++ b/pkg/chartutil/load_test.go @@ -17,8 +17,15 @@ limitations under the License. package chartutil import ( + "archive/tar" + "compress/gzip" + "io/ioutil" + "os" "path" + "path/filepath" + "strings" "testing" + "time" "k8s.io/helm/pkg/proto/hapi/chart" ) @@ -33,6 +40,17 @@ func TestLoadDir(t *testing.T) { verifyRequirements(t, c) } +func TestLoadNonV1Chart(t *testing.T) { + _, err := Load("testdata/frobnitz.v2") + if err != nil { + if strings.Compare(err.Error(), "apiVersion 'v2' is not valid. The value must be \"v1\"") != 0 { + t.Errorf("Unexpected message: %s", err) + } + return + } + t.Fatalf("chart with v2 apiVersion should not load") +} + func TestLoadFile(t *testing.T) { c, err := Load("testdata/frobnitz-1.2.3.tgz") if err != nil { @@ -43,6 +61,97 @@ func TestLoadFile(t *testing.T) { verifyRequirements(t, c) } +func TestLoadArchive_InvalidArchive(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "helm-test-") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tmpdir) + + writeTar := func(filename, internalPath string, body []byte) { + dest, err := os.Create(filename) + if err != nil { + t.Fatal(err) + } + zipper := gzip.NewWriter(dest) + tw := tar.NewWriter(zipper) + + h := &tar.Header{ + Name: internalPath, + Mode: 0755, + Size: int64(len(body)), + ModTime: time.Now(), + } + if err := tw.WriteHeader(h); err != nil { + t.Fatal(err) + } + if _, err := tw.Write(body); err != nil { + t.Fatal(err) + } + tw.Close() + zipper.Close() + dest.Close() + } + + for _, tt := range []struct { + chartname string + internal string + expectError string + }{ + {"illegal-dots.tgz", "../../malformed-helm-test", "chart illegally references parent directory"}, + {"illegal-dots2.tgz", "/foo/../../malformed-helm-test", "chart illegally references parent directory"}, + {"illegal-dots3.tgz", "/../../malformed-helm-test", "chart illegally references parent directory"}, + {"illegal-dots4.tgz", "./../../malformed-helm-test", "chart illegally references parent directory"}, + {"illegal-name.tgz", "./.", "chart illegally contains empty path"}, + {"illegal-name2.tgz", "/./.", "chart illegally contains empty path"}, + {"illegal-name3.tgz", "missing-leading-slash", "chart illegally contains empty path"}, + {"illegal-name4.tgz", "/missing-leading-slash", "chart metadata (Chart.yaml) missing"}, + {"illegal-abspath.tgz", "//foo", "chart illegally contains absolute paths"}, + {"illegal-abspath2.tgz", "///foo", "chart illegally contains absolute paths"}, + {"illegal-abspath3.tgz", "\\\\foo", "chart illegally contains absolute paths"}, + {"illegal-abspath3.tgz", "\\..\\..\\foo", "chart illegally references parent directory"}, + + // Under special circumstances, this can get normalized to things that look like absolute Windows paths + {"illegal-abspath4.tgz", "\\.\\c:\\\\foo", "chart contains illegally named files"}, + {"illegal-abspath5.tgz", "/./c://foo", "chart contains illegally named files"}, + {"illegal-abspath6.tgz", "\\\\?\\Some\\windows\\magic", "chart illegally contains absolute paths"}, + } { + illegalChart := filepath.Join(tmpdir, tt.chartname) + writeTar(illegalChart, tt.internal, []byte("hello: world")) + _, err = Load(illegalChart) + if err == nil { + t.Fatal("expected error when unpacking illegal files") + } + if err.Error() != tt.expectError { + t.Errorf("Expected %q, got %q for %s", tt.expectError, err.Error(), tt.chartname) + } + } + + // Make sure that absolute path gets interpreted as relative + illegalChart := filepath.Join(tmpdir, "abs-path.tgz") + writeTar(illegalChart, "/Chart.yaml", []byte("hello: world")) + _, err = Load(illegalChart) + if err.Error() != "invalid chart (Chart.yaml): name must not be empty" { + t.Error(err) + } + + // And just to validate that the above was not spurious + illegalChart = filepath.Join(tmpdir, "abs-path2.tgz") + writeTar(illegalChart, "files/whatever.yaml", []byte("hello: world")) + _, err = Load(illegalChart) + if err.Error() != "chart metadata (Chart.yaml) missing" { + t.Error(err) + } + + // Finally, test that drive letter gets stripped off on Windows + illegalChart = filepath.Join(tmpdir, "abs-winpath.tgz") + writeTar(illegalChart, "c:\\Chart.yaml", []byte("hello: world")) + _, err = Load(illegalChart) + if err.Error() != "invalid chart (Chart.yaml): name must not be empty" { + t.Error(err) + } +} + func TestLoadFiles(t *testing.T) { goodFiles := []*BufferedFile{ { diff --git a/pkg/chartutil/requirements.go b/pkg/chartutil/requirements.go index 0f1128305..4c9713233 100644 --- a/pkg/chartutil/requirements.go +++ b/pkg/chartutil/requirements.go @@ -85,7 +85,7 @@ type Requirements struct { // // It represents the state that the dependencies should be in. type RequirementsLock struct { - // Genderated is the date the lock file was last generated. + // Generated is the date the lock file was last generated. Generated time.Time `json:"generated"` // Digest is a hash of the requirements file used to generate it. Digest string `json:"digest"` @@ -124,7 +124,7 @@ func LoadRequirementsLock(c *chart.Chart) (*RequirementsLock, error) { } // ProcessRequirementsConditions disables charts based on condition path value in values -func ProcessRequirementsConditions(reqs *Requirements, cvals Values) { +func ProcessRequirementsConditions(reqs *Requirements, cvals Values, cpath string) { var cond string var conds []string if reqs == nil || len(reqs.Dependencies) == 0 { @@ -143,7 +143,7 @@ func ProcessRequirementsConditions(reqs *Requirements, cvals Values) { for _, c := range conds { if len(c) > 0 { // retrieve value - vv, err := cvals.PathValue(c) + vv, err := cvals.PathValue(cpath + c) if err == nil { // if not bool, warn if bv, ok := vv.(bool); ok { @@ -247,6 +247,10 @@ func getAliasDependency(charts []*chart.Chart, aliasChart *Dependency) *chart.Ch // ProcessRequirementsEnabled removes disabled charts from dependencies func ProcessRequirementsEnabled(c *chart.Chart, v *chart.Config) error { + return doProcessRequirementsEnabled(c, v, "") +} + +func doProcessRequirementsEnabled(c *chart.Chart, v *chart.Config, path string) error { reqs, err := LoadRequirements(c) if err != nil { // if not just missing requirements file, return error @@ -303,7 +307,7 @@ func ProcessRequirementsEnabled(c *chart.Chart, v *chart.Config) error { cc := chart.Config{Raw: yvals} // flag dependencies as enabled/disabled ProcessRequirementsTags(reqs, cvals) - ProcessRequirementsConditions(reqs, cvals) + ProcessRequirementsConditions(reqs, cvals, path) // make a map of charts to remove rm := map[string]bool{} for _, r := range reqs.Dependencies { @@ -323,7 +327,8 @@ func ProcessRequirementsEnabled(c *chart.Chart, v *chart.Config) error { } // recursively call self to process sub dependencies for _, t := range cd { - err := ProcessRequirementsEnabled(t, &cc) + subpath := path + t.Metadata.Name + "." + err := doProcessRequirementsEnabled(t, &cc, subpath) // if its not just missing requirements file, return error if nerr, ok := err.(ErrNoRequirementsFile); !ok && err != nil { return nerr @@ -391,7 +396,7 @@ func processImportValues(c *chart.Chart) error { if err != nil { return err } - b := make(map[string]interface{}, 0) + b := cvals.AsMap() // import values from each dependency if specified in import-values for _, r := range reqs.Dependencies { // only process raw requirement that is found in chart's dependencies (enabled) @@ -428,7 +433,7 @@ func processImportValues(c *chart.Chart) error { } // create value map from child to be merged into parent vm := pathToMap(nm["parent"], vv.AsMap()) - b = coalesceTables(cvals, vm, c.Metadata.Name) + b = coalesceTables(b, vm, c.Metadata.Name) case string: nm := map[string]string{ "child": "exports." + iv, @@ -448,7 +453,6 @@ func processImportValues(c *chart.Chart) error { r.ImportValues = outiv } } - b = coalesceTables(b, cvals, c.Metadata.Name) y, err := yaml.Marshal(b) if err != nil { return err diff --git a/pkg/chartutil/requirements_test.go b/pkg/chartutil/requirements_test.go index e433f92ea..17a3b4780 100644 --- a/pkg/chartutil/requirements_test.go +++ b/pkg/chartutil/requirements_test.go @@ -15,6 +15,7 @@ limitations under the License. package chartutil import ( + "encoding/json" "os" "path/filepath" "sort" @@ -157,7 +158,7 @@ func TestRequirementsCombinedDisabledL2(t *testing.T) { t.Fatalf("Failed to load testdata: %s", err) } // tags enabling a parent/child group with condition disabling one child - v := &chart.Config{Raw: "subchartc:\n enabled: false\ntags:\n back-end: true\n"} + v := &chart.Config{Raw: "subchart2:\n subchartc:\n enabled: false\ntags:\n back-end: true\n"} // expected charts including duplicates in alphanumeric order e := []string{"parentchart", "subchart1", "subchart2", "subcharta", "subchartb", "subchartb"} @@ -175,6 +176,15 @@ func TestRequirementsCombinedDisabledL1(t *testing.T) { verifyRequirementsEnabled(t, c, v, e) } +func TestRequirementsAliasCondition(t *testing.T) { + c, err := Load("testdata/subpop") + if err != nil { + t.Fatalf("Failed to load testdata: %s", err) + } + v := &chart.Config{Raw: "subchart1:\n enabled: false\nsubchart2alias:\n enabled: true\n subchartb:\n enabled: true\n"} + e := []string{"parentchart", "subchart2alias", "subchartb"} + verifyRequirementsEnabled(t, c, v, e) +} func verifyRequirementsEnabled(t *testing.T, c *chart.Chart, v *chart.Config, e []string) { out := []*chart.Chart{} @@ -302,6 +312,10 @@ func verifyRequirementsImportValues(t *testing.T, c *chart.Chart, v *chart.Confi } switch pv.(type) { + case json.Number: + if s := pv.(json.Number).String(); s != vv { + t.Errorf("Failed to match imported number value %v with expected %v", s, vv) + } case float64: s := strconv.FormatFloat(pv.(float64), 'f', -1, 64) if s != vv { diff --git a/pkg/chartutil/save.go b/pkg/chartutil/save.go index 400b85e91..0482b1eb9 100644 --- a/pkg/chartutil/save.go +++ b/pkg/chartutil/save.go @@ -63,6 +63,12 @@ func SaveDir(c *chart.Chart, dest string) error { // Save templates for _, f := range c.Templates { n := filepath.Join(outdir, f.Name) + + d := filepath.Dir(n) + if err := os.MkdirAll(d, 0755); err != nil { + return err + } + if err := ioutil.WriteFile(n, f.Data, 0644); err != nil { return err } diff --git a/pkg/chartutil/save_test.go b/pkg/chartutil/save_test.go index 0ec305e78..9952fbbb0 100644 --- a/pkg/chartutil/save_test.go +++ b/pkg/chartutil/save_test.go @@ -48,6 +48,9 @@ func TestSave(t *testing.T) { Files: []*any.Any{ {TypeUrl: "scheherazade/shahryar.txt", Value: []byte("1,001 Nights")}, }, + Templates: []*chart.Template{ + {Name: "templates/scheherazade/shahryar.txt.tmpl", Data: []byte("{{ \"1,001 Nights\" }}")}, + }, } where, err := Save(c, tmp) @@ -75,6 +78,9 @@ func TestSave(t *testing.T) { if len(c2.Files) != 1 || c2.Files[0].TypeUrl != "scheherazade/shahryar.txt" { t.Fatal("Files data did not match") } + if len(c2.Templates) != 1 || c2.Templates[0].Name != "templates/scheherazade/shahryar.txt.tmpl" { + t.Fatal("Templates data did not match") + } } func TestSavePreservesTimestamps(t *testing.T) { @@ -100,6 +106,9 @@ func TestSavePreservesTimestamps(t *testing.T) { Files: []*any.Any{ {TypeUrl: "scheherazade/shahryar.txt", Value: []byte("1,001 Nights")}, }, + Templates: []*chart.Template{ + {Name: "templates/scheherazade/shahryar.txt.tmpl", Data: []byte("{{ \"1,001 Nights\" }}")}, + }, } where, err := Save(c, tmp) @@ -171,6 +180,9 @@ func TestSaveDir(t *testing.T) { Files: []*any.Any{ {TypeUrl: "scheherazade/shahryar.txt", Value: []byte("1,001 Nights")}, }, + Templates: []*chart.Template{ + {Name: "templates/scheherazade/shahryar.txt.tmpl", Data: []byte("{{ \"1,001 Nights\" }}")}, + }, } if err := SaveDir(c, tmp); err != nil { @@ -191,4 +203,7 @@ func TestSaveDir(t *testing.T) { if len(c2.Files) != 1 || c2.Files[0].TypeUrl != "scheherazade/shahryar.txt" { t.Fatal("Files data did not match") } + if len(c2.Templates) != 1 || c2.Templates[0].Name != "templates/scheherazade/shahryar.txt.tmpl" { + t.Fatal("Templates data did not match") + } } diff --git a/pkg/chartutil/testdata/coleridge.yaml b/pkg/chartutil/testdata/coleridge.yaml index b6579628b..15535988b 100644 --- a/pkg/chartutil/testdata/coleridge.yaml +++ b/pkg/chartutil/testdata/coleridge.yaml @@ -10,3 +10,4 @@ water: water: where: "everywhere" nor: "any drop to drink" + temperature: 1234567890 diff --git a/pkg/chartutil/testdata/frobnitz.v2/.helmignore b/pkg/chartutil/testdata/frobnitz.v2/.helmignore new file mode 100644 index 000000000..9973a57b8 --- /dev/null +++ b/pkg/chartutil/testdata/frobnitz.v2/.helmignore @@ -0,0 +1 @@ +ignore/ diff --git a/pkg/chartutil/testdata/frobnitz.v2/Chart.yaml b/pkg/chartutil/testdata/frobnitz.v2/Chart.yaml new file mode 100644 index 000000000..157af54c1 --- /dev/null +++ b/pkg/chartutil/testdata/frobnitz.v2/Chart.yaml @@ -0,0 +1,20 @@ +apiVersion: v2 +name: frobnitz +description: This is a frobnitz as a v2 chart +version: "1.2.3" +keywords: + - frobnitz + - sprocket + - dodad +maintainers: + - name: The Helm Team + email: helm@example.com + - name: Someone Else + email: nobody@example.com +sources: + - https://example.com/foo/bar +home: http://example.com +icon: https://example.com/64x64.png +annotations: + extrakey: extravalue + anotherkey: anothervalue diff --git a/pkg/chartutil/testdata/frobnitz.v2/INSTALL.txt b/pkg/chartutil/testdata/frobnitz.v2/INSTALL.txt new file mode 100644 index 000000000..2010438c2 --- /dev/null +++ b/pkg/chartutil/testdata/frobnitz.v2/INSTALL.txt @@ -0,0 +1 @@ +This is an install document. The client may display this. diff --git a/pkg/chartutil/testdata/frobnitz.v2/LICENSE b/pkg/chartutil/testdata/frobnitz.v2/LICENSE new file mode 100644 index 000000000..6121943b1 --- /dev/null +++ b/pkg/chartutil/testdata/frobnitz.v2/LICENSE @@ -0,0 +1 @@ +LICENSE placeholder. diff --git a/pkg/chartutil/testdata/frobnitz.v2/README.md b/pkg/chartutil/testdata/frobnitz.v2/README.md new file mode 100644 index 000000000..8cf4cc3d7 --- /dev/null +++ b/pkg/chartutil/testdata/frobnitz.v2/README.md @@ -0,0 +1,11 @@ +# Frobnitz + +This is an example chart. + +## Usage + +This is an example. It has no usage. + +## Development + +For developer info, see the top-level repository. diff --git a/pkg/chartutil/testdata/frobnitz.v2/charts/_ignore_me b/pkg/chartutil/testdata/frobnitz.v2/charts/_ignore_me new file mode 100644 index 000000000..2cecca682 --- /dev/null +++ b/pkg/chartutil/testdata/frobnitz.v2/charts/_ignore_me @@ -0,0 +1 @@ +This should be ignored by the loader, but may be included in a chart. diff --git a/pkg/chartutil/testdata/frobnitz.v2/charts/alpine/Chart.yaml b/pkg/chartutil/testdata/frobnitz.v2/charts/alpine/Chart.yaml new file mode 100644 index 000000000..38a4aaa54 --- /dev/null +++ b/pkg/chartutil/testdata/frobnitz.v2/charts/alpine/Chart.yaml @@ -0,0 +1,4 @@ +name: alpine +description: Deploy a basic Alpine Linux pod +version: 0.1.0 +home: https://k8s.io/helm diff --git a/pkg/chartutil/testdata/frobnitz.v2/charts/alpine/README.md b/pkg/chartutil/testdata/frobnitz.v2/charts/alpine/README.md new file mode 100644 index 000000000..a7c84fc41 --- /dev/null +++ b/pkg/chartutil/testdata/frobnitz.v2/charts/alpine/README.md @@ -0,0 +1,9 @@ +This example was generated using the command `helm create alpine`. + +The `templates/` directory contains a very simple pod resource with a +couple of parameters. + +The `values.toml` file contains the default values for the +`alpine-pod.yaml` template. + +You can install this example using `helm install docs/examples/alpine`. diff --git a/pkg/chartutil/testdata/frobnitz.v2/charts/alpine/charts/mast1/Chart.yaml b/pkg/chartutil/testdata/frobnitz.v2/charts/alpine/charts/mast1/Chart.yaml new file mode 100644 index 000000000..171e36156 --- /dev/null +++ b/pkg/chartutil/testdata/frobnitz.v2/charts/alpine/charts/mast1/Chart.yaml @@ -0,0 +1,4 @@ +name: mast1 +description: A Helm chart for Kubernetes +version: 0.1.0 +home: "" diff --git a/pkg/chartutil/testdata/frobnitz.v2/charts/alpine/charts/mast1/values.yaml b/pkg/chartutil/testdata/frobnitz.v2/charts/alpine/charts/mast1/values.yaml new file mode 100644 index 000000000..42c39c262 --- /dev/null +++ b/pkg/chartutil/testdata/frobnitz.v2/charts/alpine/charts/mast1/values.yaml @@ -0,0 +1,4 @@ +# Default values for mast1. +# This is a YAML-formatted file. +# Declare name/value pairs to be passed into your templates. +# name = "value" diff --git a/pkg/chartutil/testdata/frobnitz.v2/charts/alpine/charts/mast2-0.1.0.tgz b/pkg/chartutil/testdata/frobnitz.v2/charts/alpine/charts/mast2-0.1.0.tgz new file mode 100644 index 000000000..ced5a4a6a Binary files /dev/null and b/pkg/chartutil/testdata/frobnitz.v2/charts/alpine/charts/mast2-0.1.0.tgz differ diff --git a/pkg/chartutil/testdata/frobnitz.v2/charts/alpine/templates/alpine-pod.yaml b/pkg/chartutil/testdata/frobnitz.v2/charts/alpine/templates/alpine-pod.yaml new file mode 100644 index 000000000..c34fa8c47 --- /dev/null +++ b/pkg/chartutil/testdata/frobnitz.v2/charts/alpine/templates/alpine-pod.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Pod +metadata: + name: {{.Release.Name}}-{{.Chart.Name}} + labels: + app.kubernetes.io/managed-by: {{.Release.Service}} + chartName: {{.Chart.Name}} + chartVersion: {{.Chart.Version | quote}} + annotations: + "helm.sh/created": "{{.Release.Time.Seconds}}" +spec: + restartPolicy: {{default "Never" .restart_policy}} + containers: + - name: waiter + image: "alpine:3.3" + command: ["/bin/sleep","9000"] diff --git a/pkg/chartutil/testdata/frobnitz.v2/charts/alpine/values.yaml b/pkg/chartutil/testdata/frobnitz.v2/charts/alpine/values.yaml new file mode 100644 index 000000000..6c2aab7ba --- /dev/null +++ b/pkg/chartutil/testdata/frobnitz.v2/charts/alpine/values.yaml @@ -0,0 +1,2 @@ +# The pod name +name: "my-alpine" diff --git a/pkg/chartutil/testdata/frobnitz.v2/charts/mariner-4.3.2.tgz b/pkg/chartutil/testdata/frobnitz.v2/charts/mariner-4.3.2.tgz new file mode 100644 index 000000000..78fabe241 Binary files /dev/null and b/pkg/chartutil/testdata/frobnitz.v2/charts/mariner-4.3.2.tgz differ diff --git a/pkg/chartutil/testdata/frobnitz.v2/docs/README.md b/pkg/chartutil/testdata/frobnitz.v2/docs/README.md new file mode 100644 index 000000000..d40747caf --- /dev/null +++ b/pkg/chartutil/testdata/frobnitz.v2/docs/README.md @@ -0,0 +1 @@ +This is a placeholder for documentation. diff --git a/pkg/chartutil/testdata/frobnitz.v2/icon.svg b/pkg/chartutil/testdata/frobnitz.v2/icon.svg new file mode 100644 index 000000000..892130606 --- /dev/null +++ b/pkg/chartutil/testdata/frobnitz.v2/icon.svg @@ -0,0 +1,8 @@ + + + Example icon + + + diff --git a/pkg/chartutil/testdata/frobnitz.v2/ignore/me.txt b/pkg/chartutil/testdata/frobnitz.v2/ignore/me.txt new file mode 100644 index 000000000..e69de29bb diff --git a/pkg/chartutil/testdata/frobnitz.v2/requirements.lock b/pkg/chartutil/testdata/frobnitz.v2/requirements.lock new file mode 100644 index 000000000..6fcc2ed9f --- /dev/null +++ b/pkg/chartutil/testdata/frobnitz.v2/requirements.lock @@ -0,0 +1,8 @@ +dependencies: + - name: alpine + version: "0.1.0" + repository: https://example.com/charts + - name: mariner + version: "4.3.2" + repository: https://example.com/charts +digest: invalid diff --git a/pkg/chartutil/testdata/frobnitz.v2/requirements.yaml b/pkg/chartutil/testdata/frobnitz.v2/requirements.yaml new file mode 100644 index 000000000..5eb0bc98b --- /dev/null +++ b/pkg/chartutil/testdata/frobnitz.v2/requirements.yaml @@ -0,0 +1,7 @@ +dependencies: + - name: alpine + version: "0.1.0" + repository: https://example.com/charts + - name: mariner + version: "4.3.2" + repository: https://example.com/charts diff --git a/pkg/chartutil/testdata/frobnitz.v2/templates/template.tpl b/pkg/chartutil/testdata/frobnitz.v2/templates/template.tpl new file mode 100644 index 000000000..c651ee6a0 --- /dev/null +++ b/pkg/chartutil/testdata/frobnitz.v2/templates/template.tpl @@ -0,0 +1 @@ +Hello {{.Name | default "world"}} diff --git a/pkg/chartutil/testdata/frobnitz.v2/values.yaml b/pkg/chartutil/testdata/frobnitz.v2/values.yaml new file mode 100644 index 000000000..61f501258 --- /dev/null +++ b/pkg/chartutil/testdata/frobnitz.v2/values.yaml @@ -0,0 +1,6 @@ +# A values file contains configuration. + +name: "Some Name" + +section: + name: "Name in a section" diff --git a/pkg/chartutil/testdata/moby/charts/spouter/values.yaml b/pkg/chartutil/testdata/moby/charts/spouter/values.yaml index f71d92a9f..36cdff290 100644 --- a/pkg/chartutil/testdata/moby/charts/spouter/values.yaml +++ b/pkg/chartutil/testdata/moby/charts/spouter/values.yaml @@ -1 +1,2 @@ scope: spouter +foo: bar \ No newline at end of file diff --git a/pkg/chartutil/testdata/moby/values.yaml b/pkg/chartutil/testdata/moby/values.yaml index 54e1ce463..8cea245ce 100644 --- a/pkg/chartutil/testdata/moby/values.yaml +++ b/pkg/chartutil/testdata/moby/values.yaml @@ -7,3 +7,22 @@ right: exists left: exists front: exists back: exists + +# nested tables for null coalesce testing +web: + livenessProbe: + failureThreshold: 5 + httpGet: + path: /api/v1/info + port: atc + initialDelaySeconds: 10 + periodSeconds: 15 + timeoutSeconds: 3 + readinessProbe: + httpGet: + path: /api/v1/info + port: atc + +# for testing deleting default values in sub charts +spouter: + foo: null \ No newline at end of file diff --git a/pkg/chartutil/testdata/subpop/charts/subchart1/requirements.yaml b/pkg/chartutil/testdata/subpop/charts/subchart1/requirements.yaml index abfe85e76..d9383dc4f 100644 --- a/pkg/chartutil/testdata/subpop/charts/subchart1/requirements.yaml +++ b/pkg/chartutil/testdata/subpop/charts/subchart1/requirements.yaml @@ -2,7 +2,7 @@ dependencies: - name: subcharta repository: http://localhost:10191 version: 0.1.0 - condition: subcharta.enabled,subchart1.subcharta.enabled + condition: subcharta.enabled tags: - front-end - subcharta diff --git a/pkg/chartutil/testdata/subpop/charts/subchart2/requirements.yaml b/pkg/chartutil/testdata/subpop/charts/subchart2/requirements.yaml index 1f0023a08..d65d73dcd 100644 --- a/pkg/chartutil/testdata/subpop/charts/subchart2/requirements.yaml +++ b/pkg/chartutil/testdata/subpop/charts/subchart2/requirements.yaml @@ -2,7 +2,7 @@ dependencies: - name: subchartb repository: http://localhost:10191 version: 0.1.0 - condition: subchartb.enabled,subchart2.subchartb.enabled + condition: subchartb.enabled tags: - back-end - subchartb diff --git a/pkg/chartutil/testdata/subpop/requirements.yaml b/pkg/chartutil/testdata/subpop/requirements.yaml index a8eb0aace..a6ee20f07 100644 --- a/pkg/chartutil/testdata/subpop/requirements.yaml +++ b/pkg/chartutil/testdata/subpop/requirements.yaml @@ -29,3 +29,9 @@ dependencies: tags: - back-end - subchart2 + + - name: subchart2 + alias: subchart2alias + repository: http://localhost:10191 + version: 0.1.0 + condition: subchart2alias.enabled diff --git a/pkg/chartutil/testdata/subpop/values.yaml b/pkg/chartutil/testdata/subpop/values.yaml index 55e872d41..68eb1323c 100644 --- a/pkg/chartutil/testdata/subpop/values.yaml +++ b/pkg/chartutil/testdata/subpop/values.yaml @@ -39,3 +39,5 @@ tags: front-end: true back-end: false +subchart2alias: + enabled: false \ No newline at end of file diff --git a/pkg/chartutil/values.go b/pkg/chartutil/values.go index 352524c13..f82b95950 100644 --- a/pkg/chartutil/values.go +++ b/pkg/chartutil/values.go @@ -17,6 +17,7 @@ limitations under the License. package chartutil import ( + "encoding/json" "errors" "fmt" "io" @@ -132,7 +133,10 @@ func tableLookup(v Values, simple string) (Values, error) { // ReadValues will parse YAML byte data into a Values. func ReadValues(data []byte) (vals Values, err error) { - err = yaml.Unmarshal(data, &vals) + err = yaml.Unmarshal(data, &vals, func(d *json.Decoder) *json.Decoder { + d.UseNumber() + return d + }) if len(vals) == 0 { vals = Values{} } @@ -166,15 +170,10 @@ func CoalesceValues(chrt *chart.Chart, vals *chart.Config) (Values, error) { if err != nil { return cvals, err } - cvals, err = coalesce(chrt, evals) - if err != nil { - return cvals, err - } + return coalesce(chrt, evals) } - var err error - cvals, err = coalesceDeps(chrt, cvals) - return cvals, err + return coalesceDeps(chrt, cvals) } // coalesce coalesces the dest values and the chart values, giving priority to the dest values. @@ -186,8 +185,7 @@ func coalesce(ch *chart.Chart, dest map[string]interface{}) (map[string]interfac if err != nil { return dest, err } - coalesceDeps(ch, dest) - return dest, nil + return coalesceDeps(ch, dest) } // coalesceDeps coalesces the dependencies of the given chart. @@ -203,7 +201,7 @@ func coalesceDeps(chrt *chart.Chart, dest map[string]interface{}) (map[string]in dvmap := dv.(map[string]interface{}) // Get globals out of dest and merge them into dvmap. - coalesceGlobals(dvmap, dest, chrt.Metadata.Name) + dvmap = coalesceGlobals(dvmap, dest, chrt.Metadata.Name) var err error // Now coalesce the rest of the values. @@ -236,45 +234,20 @@ func coalesceGlobals(dest, src map[string]interface{}, chartName string) map[str return dg } + rv := make(map[string]interface{}) + for k, v := range dest { + rv[k] = v + } + // EXPERIMENTAL: In the past, we have disallowed globals to test tables. This // reverses that decision. It may somehow be possible to introduce a loop // here, but I haven't found a way. So for the time being, let's allow // tables in globals. - for key, val := range sg { - if istable(val) { - vv := copyMap(val.(map[string]interface{})) - if destv, ok := dg[key]; ok { - if destvmap, ok := destv.(map[string]interface{}); ok { - // Basically, we reverse order of coalesce here to merge - // top-down. - coalesceTables(vv, destvmap, chartName) - dg[key] = vv - continue - } else { - log.Printf("Warning: For chart '%s', cannot merge map onto non-map for key '%q'. Skipping.", chartName, key) - } - } else { - // Here there is no merge. We're just adding. - dg[key] = vv - } - } else if dv, ok := dg[key]; ok && istable(dv) { - // It's not clear if this condition can actually ever trigger. - log.Printf("Warning: For chart '%s', key '%s' is a table. Skipping.", chartName, key) - continue - } - // TODO: Do we need to do any additional checking on the value? - dg[key] = val - } - dest[GlobalKey] = dg - return dest -} -func copyMap(src map[string]interface{}) map[string]interface{} { - dest := make(map[string]interface{}, len(src)) - for k, v := range src { - dest[k] = v - } - return dest + // Basically, we reverse order of coalesce here to merge + // top-down. + rv[GlobalKey] = coalesceTables(sg, dg, chartName) + return rv } // coalesceValues builds up a values map for a particular chart. @@ -294,30 +267,7 @@ func coalesceValues(c *chart.Chart, v map[string]interface{}) (map[string]interf return v, fmt.Errorf("Error: Reading chart '%s' default values (%s): %s", c.Metadata.Name, c.Values.Raw, err) } - for key, val := range nv { - if value, ok := v[key]; ok { - if value == nil { - // When the YAML value is null, we remove the value's key. - // This allows Helm's various sources of values (value files or --set) to - // remove incompatible keys from any previous chart, file, or set values. - delete(v, key) - } else if dest, ok := value.(map[string]interface{}); ok { - // if v[key] is a table, merge nv's val table into v[key]. - src, ok := val.(map[string]interface{}) - if !ok { - log.Printf("Warning: Building values map for chart '%s'. Skipped value (%+v) for '%s', as it is not a table.", c.Metadata.Name, src, key) - continue - } - // Because v has higher precedence than nv, dest values override src - // values. - coalesceTables(dest, src, c.Metadata.Name) - } - } else { - // If the key is not in v, copy it from nv. - v[key] = val - } - } - return v, nil + return coalesceTables(v, nv.AsMap(), c.Metadata.Name), nil } // coalesceTables merges a source map into a destination map. @@ -326,25 +276,49 @@ func coalesceValues(c *chart.Chart, v map[string]interface{}) (map[string]interf func coalesceTables(dst, src map[string]interface{}, chartName string) map[string]interface{} { // Because dest has higher precedence than src, dest values override src // values. + + rv := make(map[string]interface{}) for key, val := range src { - if istable(val) { - if innerdst, ok := dst[key]; !ok { - dst[key] = val - } else if istable(innerdst) { - coalesceTables(innerdst.(map[string]interface{}), val.(map[string]interface{}), chartName) - } else { - log.Printf("Warning: Merging destination map for chart '%s'. Cannot overwrite table item '%s', with non table value: %v", chartName, key, val) - } + dv, ok := dst[key] + if !ok { // if not in dst, then copy from src + rv[key] = val continue - } else if dv, ok := dst[key]; ok && istable(dv) { - log.Printf("Warning: Merging destination map for chart '%s'. The destination item '%s' is a table and ignoring the source '%s' as it has a non-table value of: %v", chartName, key, key, val) + } + if dv == nil { // if set to nil in dst, then ignore + // When the YAML value is null, we skip the value's key. + // This allows Helm's various sources of values (value files or --set) to + // remove incompatible keys from any previous chart, file, or set values. continue - } else if !ok { // <- ok is still in scope from preceding conditional. - dst[key] = val + } + + srcTable, srcIsTable := val.(map[string]interface{}) + dstTable, dstIsTable := dv.(map[string]interface{}) + switch { + case srcIsTable && dstIsTable: // both tables, we coalesce + rv[key] = coalesceTables(dstTable, srcTable, chartName) + case srcIsTable && !dstIsTable: + log.Printf("Warning: Merging destination map for chart '%s'. Overwriting table item '%s', with non table value: %v", chartName, key, dv) + rv[key] = dv + case !srcIsTable && dstIsTable: + log.Printf("Warning: Merging destination map for chart '%s'. The destination item '%s' is a table and ignoring the source '%s' as it has a non-table value of: %v", chartName, key, key, val) + rv[key] = dv + default: // neither are tables, simply take the dst value + rv[key] = dv + } + } + + // do we have anything in dst that wasn't processed already that we need to copy across? + for key, val := range dst { + if val == nil { continue } + _, ok := rv[key] + if !ok { + rv[key] = val + } } - return dst + + return rv } // ReleaseOptions represents the additional release options needed diff --git a/pkg/chartutil/values_test.go b/pkg/chartutil/values_test.go index 3fea14c3a..04b507fbc 100644 --- a/pkg/chartutil/values_test.go +++ b/pkg/chartutil/values_test.go @@ -21,6 +21,7 @@ import ( "encoding/json" "fmt" "reflect" + "strings" "testing" "text/template" @@ -52,6 +53,7 @@ water: water: where: "everywhere" nor: "any drop to drink" + temperature: 1234567890 ` data, err := ReadValues([]byte(doc)) @@ -265,6 +267,12 @@ func matchValues(t *testing.T, data map[string]interface{}) { } else if o != "everywhere" { t.Errorf("Expected water water everywhere") } + + if o, err := ttpl("{{.water.water.temperature}}", data); err != nil { + t.Errorf(".water.water.temperature: %s", err) + } else if o != "1234567890" { + t.Errorf("Expected water water temperature: 1234567890, got: %s", o) + } } func ttpl(tpl string, v map[string]interface{}) (string, error) { @@ -300,6 +308,25 @@ pequod: sail: true ahab: scope: whale + +# test coalesce with nested null values +web: + livenessProbe: + httpGet: null + exec: + command: + - curl + - -f + - http://localhost:8080/api/v1/info + timeoutSeconds: null + readinessProbe: + httpGet: null + exec: + command: + - curl + - -f + - http://localhost:8080/api/v1/info + timeoutSeconds: null # catches the case where this wasn't defined in the original source... ` func TestCoalesceValues(t *testing.T) { @@ -344,6 +371,13 @@ func TestCoalesceValues(t *testing.T) { {"{{.spouter.global.nested.boat}}", "true"}, {"{{.pequod.global.nested.sail}}", "true"}, {"{{.spouter.global.nested.sail}}", ""}, + + {"{{.web.livenessProbe.failureThreshold}}", "5"}, + {"{{.web.livenessProbe.initialDelaySeconds}}", "10"}, + {"{{.web.livenessProbe.periodSeconds}}", "15"}, + {"{{.web.livenessProbe.exec}}", "map[command:[curl -f http://localhost:8080/api/v1/info]]"}, + + {"{{.web.readinessProbe.exec}}", "map[command:[curl -f http://localhost:8080/api/v1/info]]"}, } for _, tt := range tests { @@ -352,10 +386,29 @@ func TestCoalesceValues(t *testing.T) { } } - nullKeys := []string{"bottom", "right", "left", "front"} + nullKeys := []string{"bottom", "right", "left", "front", + "web.livenessProbe.httpGet", "web.readinessProbe.httpGet", "web.livenessProbe.timeoutSeconds", "web.readinessProbe.timeoutSeconds"} for _, nullKey := range nullKeys { - if _, ok := v[nullKey]; ok { - t.Errorf("Expected key %q to be removed, still present", nullKey) + parts := strings.Split(nullKey, ".") + curMap := v + for partIdx, part := range parts { + nextVal, ok := curMap[part] + if partIdx == len(parts)-1 { // are we the last? + if ok { + t.Errorf("Expected key %q to be removed, still present", nullKey) + break + } + } else { // we are not the last + if !ok { + t.Errorf("Expected key %q to be removed, but partial parent path was not found", nullKey) + break + } + curMap, ok = nextVal.(map[string]interface{}) + if !ok { + t.Errorf("Expected key %q to be removed, but partial parent path did not result in a map", nullKey) + break + } + } } } } @@ -386,7 +439,7 @@ func TestCoalesceTables(t *testing.T) { // What we expect is that anything in dst overrides anything in src, but that // otherwise the values are coalesced. - coalesceTables(dst, src, "") + dst = coalesceTables(dst, src, "") if dst["name"] != "Ishmael" { t.Errorf("Unexpected name: %s", dst["name"]) @@ -422,6 +475,33 @@ func TestCoalesceTables(t *testing.T) { t.Errorf("Expected boat string, got %v", dst["boat"]) } } + +func TestCoalesceSubchart(t *testing.T) { + tchart := "testdata/moby" + c, err := LoadDir(tchart) + if err != nil { + t.Fatal(err) + } + + tvals := &chart.Config{} + + v, err := CoalesceValues(c, tvals) + if err != nil { + t.Fatal(err) + } + j, _ := json.MarshalIndent(v, "", " ") + t.Logf("Coalesced Values: %s", string(j)) + + subchartValues, ok := v["spouter"].(map[string]interface{}) + if !ok { + t.Errorf("Subchart values not found") + } + + if _, ok := subchartValues["foo"]; ok { + t.Errorf("Expected key foo to be removed, still present") + } +} + func TestPathValue(t *testing.T) { doc := ` title: "Moby Dick" @@ -539,3 +619,59 @@ anotherNewKey: } } } + +func TestOverriteTableItemWithNonTableValue(t *testing.T) { + // src has a table value for "foo" + src := map[string]interface{}{ + "foo": map[string]interface{}{ + "baz": "boz", + }, + } + + // dst has a non-table value for "foo" + dst := map[string]interface{}{ + "foo": "bar", + } + + // result - this may print a warning, but we has always "worked" + result := coalesceTables(dst, src, "") + expected := map[string]interface{}{ + "foo": "bar", + } + + if !reflect.DeepEqual(result, expected) { + t.Errorf("Expected %v, but got %v", expected, result) + } +} + +func TestSubchartCoaleseWithNullValue(t *testing.T) { + v, err := CoalesceValues(&chart.Chart{ + Metadata: &chart.Metadata{Name: "demo"}, + Dependencies: []*chart.Chart{ + { + Metadata: &chart.Metadata{Name: "logstash"}, + Values: &chart.Config{ + Raw: `livenessProbe: {httpGet: {path: "/", port: monitor}}`, + }, + }, + }, + Values: &chart.Config{ + Raw: `logstash: {livenessProbe: {httpGet: null, exec: "/bin/true"}}`, + }, + }, &chart.Config{}) + if err != nil { + t.Errorf("Failed with %s", err) + } + result := v.AsMap() + expected := map[string]interface{}{ + "logstash": map[string]interface{}{ + "global": map[string]interface{}{}, + "livenessProbe": map[string]interface{}{ + "exec": "/bin/true", + }, + }, + } + if !reflect.DeepEqual(result, expected) { + t.Errorf("got %+v, expected %+v", result, expected) + } +} diff --git a/pkg/downloader/chart_downloader.go b/pkg/downloader/chart_downloader.go index 5e6287299..c2e9f6dc1 100644 --- a/pkg/downloader/chart_downloader.go +++ b/pkg/downloader/chart_downloader.go @@ -65,11 +65,11 @@ type ChartDownloader struct { Keyring string // HelmHome is the $HELM_HOME. HelmHome helmpath.Home - // Getter collection for the operation + // Getters collection for the operation Getters getter.Providers - // Chart repository username + // Username chart repository username Username string - // Chart repository password + // Password chart repository password Password string } @@ -205,6 +205,11 @@ func (c *ChartDownloader) ResolveChartVersion(ref, version string) (*url.URL, ge } c.setCredentials(r) + // Skip if dependency not contain name + if len(r.Config.Name) == 0 { + return u, r.Client, nil + } + // Next, we need to load the index, and actually look up the chart. i, err := repo.LoadIndexFile(c.HelmHome.CacheIndex(r.Config.Name)) if err != nil { @@ -213,7 +218,7 @@ func (c *ChartDownloader) ResolveChartVersion(ref, version string) (*url.URL, ge cv, err := i.Get(chartName, version) if err != nil { - return u, r.Client, fmt.Errorf("chart %q matching %s not found in %s index. (try 'helm repo update'). %s", chartName, version, r.Config.Name, err) + return u, r.Client, fmt.Errorf("chart %q matching version %q not found in %s index. (try 'helm repo update'). %s", chartName, version, r.Config.Name, err) } if len(cv.URLs) == 0 { @@ -243,14 +248,14 @@ func (c *ChartDownloader) ResolveChartVersion(ref, version string) (*url.URL, ge return u, r.Client, nil } -// If HttpGetter is used, this method sets the configured repository credentials on the HttpGetter. +// setCredentials if HttpGetter is used, this method sets the configured repository credentials on the HttpGetter. func (c *ChartDownloader) setCredentials(r *repo.ChartRepository) { if t, ok := r.Client.(*getter.HttpGetter); ok { t.SetCredentials(c.getRepoCredentials(r)) } } -// If this ChartDownloader is not configured to use credentials, and the chart repository sent as an argument is, +// getRepoCredentials if this ChartDownloader is not configured to use credentials, and the chart repository sent as an argument is, // then the repository's configured credentials are returned. // Else, this ChartDownloader's credentials are returned. func (c *ChartDownloader) getRepoCredentials(r *repo.ChartRepository) (username, password string) { diff --git a/pkg/downloader/manager.go b/pkg/downloader/manager.go index 67f9dc7bf..23a5b587e 100644 --- a/pkg/downloader/manager.go +++ b/pkg/downloader/manager.go @@ -233,7 +233,7 @@ func (m *Manager) downloadAll(deps []*chartutil.Dependency) error { // Any failure to resolve/download a chart should fail: // https://github.com/kubernetes/helm/issues/1439 - churl, username, password, err := findChartURL(dep.Name, dep.Version, dep.Repository, repos) + churl, username, password, err := m.findChartURL(dep.Name, dep.Version, dep.Repository, repos) if err != nil { saveError = fmt.Errorf("could not find %s: %s", churl, err) break @@ -357,7 +357,7 @@ func (m *Manager) hasAllRepos(deps []*chartutil.Dependency) error { return nil } -// getRepoNames returns the repo names of the referenced deps which can be used to fetch the cahced index file. +// getRepoNames returns the repo names of the referenced deps which can be used to fetch the cached index file. func (m *Manager) getRepoNames(deps []*chartutil.Dependency) (map[string]string, error) { rf, err := repo.LoadRepositoriesFile(m.HelmHome.RepositoryFile()) if err != nil { @@ -371,6 +371,9 @@ func (m *Manager) getRepoNames(deps []*chartutil.Dependency) (map[string]string, // by Helm. missing := []string{} for _, dd := range deps { + if dd.Repository == "" { + return nil, fmt.Errorf("no 'repository' field specified for dependency: %q", dd.Name) + } // if dep chart is from local path, verify the path is valid if strings.HasPrefix(dd.Repository, "file://") { if _, err := resolver.GetLocalPath(dd.Repository, m.ChartPath); err != nil { @@ -400,29 +403,36 @@ func (m *Manager) getRepoNames(deps []*chartutil.Dependency) (map[string]string, } } if !found { - missing = append(missing, dd.Repository) + repository := dd.Repository + // Add if URL + _, err := url.ParseRequestURI(repository) + if err == nil { + reposMap[repository] = repository + continue + } + missing = append(missing, repository) } } + if len(missing) > 0 { - if len(missing) > 0 { - errorMessage := fmt.Sprintf("no repository definition for %s. Please add them via 'helm repo add'", strings.Join(missing, ", ")) - // It is common for people to try to enter "stable" as a repository instead of the actual URL. - // For this case, let's give them a suggestion. - containsNonURL := false - for _, repo := range missing { - if !strings.Contains(repo, "//") && !strings.HasPrefix(repo, "@") && !strings.HasPrefix(repo, "alias:") { - containsNonURL = true - } + errorMessage := fmt.Sprintf("no repository definition for %s. Please add them via 'helm repo add'", strings.Join(missing, ", ")) + // It is common for people to try to enter "stable" as a repository instead of the actual URL. + // For this case, let's give them a suggestion. + containsNonURL := false + for _, repo := range missing { + if !strings.Contains(repo, "//") && !strings.HasPrefix(repo, "@") && !strings.HasPrefix(repo, "alias:") { + containsNonURL = true } - if containsNonURL { - errorMessage += ` + } + if containsNonURL { + errorMessage += ` Note that repositories must be URLs or aliases. For example, to refer to the stable repository, use "https://kubernetes-charts.storage.googleapis.com/" or "@stable" instead of "stable". Don't forget to add the repo, too ('helm repo add').` - } - return nil, errors.New(errorMessage) } + return nil, errors.New(errorMessage) } + return reposMap, nil } @@ -432,8 +442,7 @@ func (m *Manager) UpdateRepositories() error { if err != nil { return err } - repos := rf.Repositories - if len(repos) > 0 { + if repos := rf.Repositories; len(repos) > 0 { // This prints warnings straight to out. if err := m.parallelRepoUpdate(repos); err != nil { return err @@ -462,7 +471,7 @@ func (m *Manager) parallelRepoUpdate(repos []*repo.Entry) error { }(r) } wg.Wait() - fmt.Fprintln(out, "Update Complete. ⎈Happy Helming!⎈") + fmt.Fprintln(out, "Update Complete.") return nil } @@ -474,7 +483,7 @@ func (m *Manager) parallelRepoUpdate(repos []*repo.Entry) error { // repoURL is the repository to search // // If it finds a URL that is "relative", it will prepend the repoURL. -func findChartURL(name, version, repoURL string, repos map[string]*repo.ChartRepository) (url, username, password string, err error) { +func (m *Manager) findChartURL(name, version, repoURL string, repos map[string]*repo.ChartRepository) (url, username, password string, err error) { for _, cr := range repos { if urlutil.Equal(repoURL, cr.Config.URL) { var entry repo.ChartVersions @@ -496,6 +505,10 @@ func findChartURL(name, version, repoURL string, repos map[string]*repo.ChartRep return } } + url, err = repo.FindChartInRepoURL(repoURL, name, version, "", "", "", m.Getters) + if err == nil { + return + } err = fmt.Errorf("chart %s not found in %s", name, repoURL) return } @@ -601,8 +614,8 @@ func writeLock(chartpath string, lock *chartutil.RequirementsLock) error { return ioutil.WriteFile(dest, data, 0644) } -// archive a dep chart from local directory and save it into charts/ -func tarFromLocalDir(chartpath string, name string, repo string, version string) (string, error) { +// tarFromLocalDir archive a dep chart from local directory and save it into charts/ +func tarFromLocalDir(chartpath, name, repo, version string) (string, error) { destPath := filepath.Join(chartpath, "charts") if !strings.HasPrefix(repo, "file://") { diff --git a/pkg/downloader/manager_test.go b/pkg/downloader/manager_test.go index 8c2377e47..ef8b95071 100644 --- a/pkg/downloader/manager_test.go +++ b/pkg/downloader/manager_test.go @@ -18,6 +18,7 @@ package downloader import ( "bytes" "reflect" + "strings" "testing" "k8s.io/helm/pkg/chartutil" @@ -77,7 +78,7 @@ func TestFindChartURL(t *testing.T) { version := "0.1.0" repoURL := "http://example.com/charts" - churl, username, password, err := findChartURL(name, version, repoURL, repos) + churl, username, password, err := m.findChartURL(name, version, repoURL, repos) if err != nil { t.Fatal(err) } @@ -99,24 +100,33 @@ func TestGetRepoNames(t *testing.T) { HelmHome: helmpath.Home("testdata/helmhome"), } tests := []struct { - name string - req []*chartutil.Dependency - expect map[string]string - err bool + name string + req []*chartutil.Dependency + expect map[string]string + err bool + expectedErr string }{ { - name: "no repo definition failure", + name: "no repo definition failure -- stable repo", req: []*chartutil.Dependency{ - {Name: "oedipus-rex", Repository: "http://example.com/test"}, + {Name: "oedipus-rex", Repository: "stable"}, }, err: true, }, { - name: "no repo definition failure -- stable repo", + name: "dependency entry missing 'repository' field -- e.g. spelled 'repo'", req: []*chartutil.Dependency{ - {Name: "oedipus-rex", Repository: "stable"}, + {Name: "dependency-missing-repository-field"}, }, - err: true, + err: true, + expectedErr: "no 'repository' field specified for dependency: \"dependency-missing-repository-field\"", + }, + { + name: "dependency repository is url but not exist in repos", + req: []*chartutil.Dependency{ + {Name: "oedipus-rex", Repository: "http://example.com/test"}, + }, + expect: map[string]string{"http://example.com/test": "http://example.com/test"}, }, { name: "no repo definition failure", @@ -152,6 +162,9 @@ func TestGetRepoNames(t *testing.T) { l, err := m.getRepoNames(tt.req) if err != nil { if tt.err { + if !strings.Contains(err.Error(), tt.expectedErr) { + t.Fatalf("%s: expected error: %s, got: %s", tt.name, tt.expectedErr, err.Error()) + } continue } t.Fatal(err) diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index f3dd869c9..b4b6475c9 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -130,7 +130,7 @@ type renderable struct { tpl string // vals are the values to be supplied to the template. vals chartutil.Values - // namespace prefix to the templates of the current chart + // basePath namespace prefix to the templates of the current chart basePath string } @@ -267,7 +267,7 @@ func (e *Engine) renderWithReferences(tpls map[string]renderable, referenceTpls rendered = make(map[string]string, len(files)) var buf bytes.Buffer for _, file := range files { - // Don't render partials. We don't care out the direct output of partials. + // Don't render partials. We don't care about the direct output of partials. // They are only included from other templates. if strings.HasPrefix(path.Base(file), "_") { continue diff --git a/pkg/getter/getter.go b/pkg/getter/getter.go index c595fec69..062c7269e 100644 --- a/pkg/getter/getter.go +++ b/pkg/getter/getter.go @@ -68,7 +68,7 @@ func (p Providers) ByScheme(scheme string) (Constructor, error) { } // All finds all of the registered getters as a list of Provider instances. -// Currently the build-in http/https getter and the discovered +// Currently the built-in http/https getter and the discovered // plugins with downloader notations are collected. func All(settings environment.EnvSettings) Providers { result := Providers{ diff --git a/pkg/getter/httpgetter.go b/pkg/getter/httpgetter.go index 66ea82863..bf99b1cfa 100644 --- a/pkg/getter/httpgetter.go +++ b/pkg/getter/httpgetter.go @@ -26,7 +26,7 @@ import ( "k8s.io/helm/pkg/version" ) -//HttpGetter is the efault HTTP(/S) backend handler +//HttpGetter is the default HTTP(/S) backend handler // TODO: change the name to HTTPGetter in Helm 3 type HttpGetter struct { //nolint client *http.Client diff --git a/pkg/getter/plugingetter.go b/pkg/getter/plugingetter.go index 8f2099de0..c918aa744 100644 --- a/pkg/getter/plugingetter.go +++ b/pkg/getter/plugingetter.go @@ -21,6 +21,7 @@ import ( "os" "os/exec" "path/filepath" + "strings" "k8s.io/helm/pkg/helm/environment" "k8s.io/helm/pkg/plugin" @@ -62,8 +63,9 @@ type pluginGetter struct { // Get runs downloader plugin command func (p *pluginGetter) Get(href string) (*bytes.Buffer, error) { - argv := []string{p.certFile, p.keyFile, p.cAFile, href} - prog := exec.Command(filepath.Join(p.base, p.command), argv...) + commands := strings.Split(p.command, " ") + argv := append(commands[1:], p.certFile, p.keyFile, p.cAFile, href) + prog := exec.Command(filepath.Join(p.base, commands[0]), argv...) plugin.SetupPluginEnv(p.settings, p.name, p.base) prog.Env = os.Environ() buf := bytes.NewBuffer(nil) diff --git a/pkg/getter/plugingetter_test.go b/pkg/getter/plugingetter_test.go index 7c0bd6c1e..c44729478 100644 --- a/pkg/getter/plugingetter_test.go +++ b/pkg/getter/plugingetter_test.go @@ -94,3 +94,30 @@ func TestPluginGetter(t *testing.T) { t.Errorf("Expected %q, got %q", expect, got) } } +func TestPluginSubCommands(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("TODO: refactor this test to work on windows") + } + + oldhh := os.Getenv("HELM_HOME") + defer os.Setenv("HELM_HOME", oldhh) + os.Setenv("HELM_HOME", "") + + env := hh(false) + pg := newPluginGetter("echo -n", env, "test", ".") + g, err := pg("test://foo/bar", "", "", "") + if err != nil { + t.Fatal(err) + } + + data, err := g.Get("test://foo/bar") + if err != nil { + t.Fatal(err) + } + + expect := " test://foo/bar" + got := data.String() + if got != expect { + t.Errorf("Expected %q, got %q", expect, got) + } +} diff --git a/pkg/getter/testdata/repository/cache/stable-index.yaml b/pkg/getter/testdata/repository/cache/stable-index.yaml index d4f883a25..40d2d04b5 100644 --- a/pkg/getter/testdata/repository/cache/stable-index.yaml +++ b/pkg/getter/testdata/repository/cache/stable-index.yaml @@ -412,7 +412,7 @@ entries: companies. All data is stored in plain text files, so no database is required. digest: 5cfff9542341a391abf9029dd9b42e7c44813c520ef0301ce62e9c08586ceca2 engine: gotpl - home: http://www.dokuwiki.org/ + home: https://www.dokuwiki.org/ icon: https://bitnami.com/assets/stacks/dokuwiki/img/dokuwiki-stack-110x117.png keywords: - dokuwiki @@ -433,7 +433,7 @@ entries: companies. All data is stored in plain text files, so no database is required. digest: 3c46f9d9196bbf975711b2bb7c889fd3df1976cc57c3c120c7374d721da0e240 engine: gotpl - home: http://www.dokuwiki.org/ + home: https://www.dokuwiki.org/ icon: https://bitnami.com/assets/stacks/dokuwiki/img/dokuwiki-stack-110x117.png keywords: - dokuwiki @@ -454,7 +454,7 @@ entries: companies. All data is stored in plain text files, so no database is required. digest: f533bc20e08179a49cca77b175f897087dc3f2c57e6c89ecbd7264ab5975d66a engine: gotpl - home: http://www.dokuwiki.org/ + home: https://www.dokuwiki.org/ icon: https://bitnami.com/assets/stacks/dokuwiki/img/dokuwiki-stack-110x117.png keywords: - dokuwiki @@ -475,7 +475,7 @@ entries: companies. All data is stored in plain text files, so no database is required. digest: 34a926398cfafbf426ff468167ef49577252e260ebce5df33380e6e67b79fe59 engine: gotpl - home: http://www.dokuwiki.org/ + home: https://www.dokuwiki.org/ icon: https://bitnami.com/assets/stacks/dokuwiki/img/dokuwiki-stack-110x117.png keywords: - dokuwiki @@ -496,7 +496,7 @@ entries: companies. All data is stored in plain text files, so no database is required. digest: 6825fbacb709cf05901985561be10ba9473a379488d99b71d1590d33f5a81374 engine: gotpl - home: http://www.dokuwiki.org/ + home: https://www.dokuwiki.org/ icon: https://bitnami.com/assets/stacks/dokuwiki/img/dokuwiki-stack-110x117.png keywords: - dokuwiki @@ -516,7 +516,7 @@ entries: description: One of the most versatile open source content management systems. digest: db95c255b19164c5051eb75a6860f3775a1011399a62b27e474cd9ebee0cb578 engine: gotpl - home: http://www.drupal.org/ + home: https://www.drupal.org/ icon: https://bitnami.com/assets/stacks/drupal/img/drupal-stack-220x234.png keywords: - drupal @@ -539,7 +539,7 @@ entries: description: One of the most versatile open source content management systems. digest: 84c13154a9aeb7215dc0d98e9825207207e69ca870f3d54b273bfc2d34699f61 engine: gotpl - home: http://www.drupal.org/ + home: https://www.drupal.org/ icon: https://bitnami.com/assets/stacks/drupal/img/drupal-stack-220x234.png keywords: - drupal @@ -562,7 +562,7 @@ entries: description: One of the most versatile open source content management systems. digest: 17d0bfdcdf5a1a650941343c76b6b928d76d3332fece127c502e91f9597f419e engine: gotpl - home: http://www.drupal.org/ + home: https://www.drupal.org/ icon: https://bitnami.com/assets/stacks/drupal/img/drupal-stack-220x234.png keywords: - drupal @@ -585,7 +585,7 @@ entries: description: One of the most versatile open source content management systems. digest: 317674c89762e0b54156b734203ee93638dd7a25df35120c5cab45546814d89b engine: gotpl - home: http://www.drupal.org/ + home: https://www.drupal.org/ icon: https://bitnami.com/assets/stacks/drupal/img/drupal-stack-220x234.png keywords: - drupal @@ -608,7 +608,7 @@ entries: description: One of the most versatile open source content management systems. digest: 24c4f187b50c0e961cc2cacf6e6b2ce6d6b225c73637c578e001bebd9b3f5d48 engine: gotpl - home: http://www.drupal.org/ + home: https://www.drupal.org/ icon: https://bitnami.com/assets/stacks/drupal/img/drupal-stack-220x234.png keywords: - drupal @@ -631,7 +631,7 @@ entries: description: One of the most versatile open source content management systems. digest: 7fcea4684a3d520454aeaa10beb9f9b1789c09c097680fc484954084f283feb3 engine: gotpl - home: http://www.drupal.org/ + home: https://www.drupal.org/ icon: https://bitnami.com/assets/stacks/drupal/img/drupal-stack-220x234.png keywords: - drupal @@ -654,7 +654,7 @@ entries: description: One of the most versatile open source content management systems. digest: adb23bc71125b9691b407a47dadf4298de3516805218813b56067967e39db7d8 engine: gotpl - home: http://www.drupal.org/ + home: https://www.drupal.org/ icon: https://bitnami.com/assets/stacks/drupal/img/drupal-stack-220x234.png keywords: - drupal @@ -677,7 +677,7 @@ entries: description: One of the most versatile open source content management systems. digest: 5de529e25767e8a37b8d6f413daa0fe99f5c304e48ddcfa8adb4d8c7a0496aa7 engine: gotpl - home: http://www.drupal.org/ + home: https://www.drupal.org/ keywords: - drupal - cms @@ -699,7 +699,7 @@ entries: description: One of the most versatile open source content management systems. digest: a35dbf9d470972cc2461de3e0a8fcf2fec8d0adc04f5a0f1e924505f22c714d7 engine: gotpl - home: http://www.drupal.org/ + home: https://www.drupal.org/ keywords: - drupal - cms @@ -721,7 +721,7 @@ entries: description: One of the most versatile open source content management systems. digest: a62d686d6bd47643dfa489e395dda89286954f785123a43a88db7ef34f3ea48d engine: gotpl - home: http://www.drupal.org/ + home: https://www.drupal.org/ keywords: - drupal - cms @@ -743,7 +743,7 @@ entries: description: One of the most versatile open source content management systems. digest: 2c189424bda94eeebb7e6370e96884f09bdfa81498cb93ac4723d24c92a3938e engine: gotpl - home: http://www.drupal.org/ + home: https://www.drupal.org/ keywords: - drupal - cms @@ -765,7 +765,7 @@ entries: description: One of the most versatile open source content management systems. digest: 3596f47c5dcaa7a975d1c4cb7bf7ef6790c9ad8dda41a5a329e30c1ea8a40d11 engine: gotpl - home: http://www.drupal.org/ + home: https://www.drupal.org/ keywords: - drupal - cms @@ -787,7 +787,7 @@ entries: description: One of the most versatile open source content management systems. digest: 78b2bb3717be63dccb02ea06b711ca7cf7869848b296b880099c6264e86d86d3 engine: gotpl - home: http://www.drupal.org/ + home: https://www.drupal.org/ keywords: - drupal - cms @@ -809,7 +809,7 @@ entries: description: One of the most versatile open source content management systems. digest: 5508b29e20a5d609f76319869774f008dcc4bed13bbbc7ed40546bc9af8c7cd7 engine: gotpl - home: http://www.drupal.org/ + home: https://www.drupal.org/ keywords: - drupal - cms @@ -831,7 +831,7 @@ entries: description: One of the most versatile open source content management systems. digest: 023a282c93f8411fb81bb4fff7820c1337aad0586ccf7dae55bdbed515ad8b05 engine: gotpl - home: http://www.drupal.org/ + home: https://www.drupal.org/ keywords: - drupal - cms @@ -853,7 +853,7 @@ entries: description: One of the most versatile open source content management systems. digest: 9bdaa53f7a9e82c9b32c7ac9b34b84fd142671732a54423a2dcdc893c4162801 engine: gotpl - home: http://www.drupal.org/ + home: https://www.drupal.org/ keywords: - drupal - cms @@ -875,7 +875,7 @@ entries: description: One of the most versatile open source content management systems. digest: 25650526abc1036398dbb314d77a0062cbb644b2c5791a258fb863fdaad5093d engine: gotpl - home: http://www.drupal.org/ + home: https://www.drupal.org/ keywords: - drupal - cms @@ -897,7 +897,7 @@ entries: description: One of the most versatile open source content management systems. digest: 13d5d32d316c08359221d230004dd2adc0152362e87abcc0d61ea191241fa69f engine: gotpl - home: http://www.drupal.org/ + home: https://www.drupal.org/ keywords: - drupal - cms @@ -919,7 +919,7 @@ entries: description: One of the most versatile open source content management systems. digest: b3f09ecd191f8c06275c96d9af4d77a97c94355525864201e9baf151b17bd5a7 engine: gotpl - home: http://www.drupal.org/ + home: https://www.drupal.org/ keywords: - drupal - cms @@ -941,7 +941,7 @@ entries: description: One of the most versatile open source content management systems. digest: c56fc55b93b0dead65af7b81bbd54befd5115860698ca475baa5acb178a12e5a engine: gotpl - home: http://www.drupal.org/ + home: https://www.drupal.org/ keywords: - drupal - cms @@ -1154,7 +1154,7 @@ entries: stories with the world digest: 91d195c99e00b2801eafef5c23fcf9ced218bb29c7097f08139e2bdc80e38a0f engine: gotpl - home: http://www.ghost.org/ + home: https://www.ghost.org/ icon: https://bitnami.com/assets/stacks/ghost/img/ghost-stack-220x234.png keywords: - ghost @@ -1178,7 +1178,7 @@ entries: stories with the world digest: 6342a95aeef40690430c2e80b167fbb116a632746cdca49cfac4cbd535eadb22 engine: gotpl - home: http://www.ghost.org/ + home: https://www.ghost.org/ icon: https://bitnami.com/assets/stacks/ghost/img/ghost-stack-220x234.png keywords: - ghost @@ -1202,7 +1202,7 @@ entries: stories with the world digest: 8998a9a4e75e777edb6f06c05b45d461daebba09021161af3bef523efd193b15 engine: gotpl - home: http://www.ghost.org/ + home: https://www.ghost.org/ icon: https://bitnami.com/assets/stacks/ghost/img/ghost-stack-220x234.png keywords: - ghost @@ -1226,7 +1226,7 @@ entries: stories with the world digest: e44c9a53355086ded1832f769dca515b863337ad118ba618ef97f37b3ef84030 engine: gotpl - home: http://www.ghost.org/ + home: https://www.ghost.org/ icon: https://bitnami.com/assets/stacks/ghost/img/ghost-stack-220x234.png keywords: - ghost @@ -1250,7 +1250,7 @@ entries: stories with the world digest: b0c94a93c88fde68bb4fc78e92691d46cd2eb4d32cbac011e034565fbd35d46b engine: gotpl - home: http://www.ghost.org/ + home: https://www.ghost.org/ keywords: - ghost - blog @@ -1273,7 +1273,7 @@ entries: stories with the world digest: 791ccb42b62d56d50c72b37db3282eb3f2af75d667a25542d76c7991004eb822 engine: gotpl - home: http://www.ghost.org/ + home: https://www.ghost.org/ keywords: - ghost - blog @@ -1296,7 +1296,7 @@ entries: stories with the world digest: 331a2b145bfdb39b626313cda7dc539f32dbda5149893957589c5406317fca53 engine: gotpl - home: http://www.ghost.org/ + home: https://www.ghost.org/ keywords: - ghost - blog @@ -1319,7 +1319,7 @@ entries: stories with the world digest: 804227af037082a0f5c3c579feb9e24eb3682449e78876971c93a109bc716f40 engine: gotpl - home: http://www.ghost.org/ + home: https://www.ghost.org/ keywords: - ghost - blog @@ -1758,7 +1758,7 @@ entries: visualization, and a dashboard feature for compiling multiple custom views digest: 8a649026f55b2fa1e743c93fd331e127e66b49c4d7f20116a2bb06e5937f4828 engine: gotpl - home: http://community.jaspersoft.com/project/jasperreports-server + home: https://community.jaspersoft.com/project/jasperreports-server icon: https://bitnami.com/assets/stacks/jasperserver/img/jasperserver-stack-110x117.png keywords: - business intelligence @@ -1782,7 +1782,7 @@ entries: visualization, and a dashboard feature for compiling multiple custom views digest: d4a62f7ace55256852e5c650a56ccf671633c4f223180d304cfb03b9cd7993aa engine: gotpl - home: http://community.jaspersoft.com/project/jasperreports-server + home: https://community.jaspersoft.com/project/jasperreports-server icon: https://bitnami.com/assets/stacks/jasperserver/img/jasperserver-stack-110x117.png keywords: - business intelligence @@ -1806,7 +1806,7 @@ entries: visualization, and a dashboard feature for compiling multiple custom views digest: 99af0fca7ef1c475b239f2c8fc2dee6b040ea76b3c30bba1431f358df873aa49 engine: gotpl - home: http://community.jaspersoft.com/project/jasperreports-server + home: https://community.jaspersoft.com/project/jasperreports-server icon: https://bitnami.com/assets/stacks/jasperserver/img/jasperserver-stack-110x117.png keywords: - business intelligence @@ -1830,7 +1830,7 @@ entries: visualization, and a dashboard feature for compiling multiple custom views digest: f01e53d1b89c4fb1fcd9702cd5f4e48d77607aed65f249e1f94b8b21f7eef3f4 engine: gotpl - home: http://community.jaspersoft.com/project/jasperreports-server + home: https://community.jaspersoft.com/project/jasperreports-server icon: https://bitnami.com/assets/stacks/jasperserver/img/jasperserver-stack-110x117.png keywords: - business intelligence @@ -1854,7 +1854,7 @@ entries: visualization, and a dashboard feature for compiling multiple custom views digest: 5cc4af8c88691d7030602c97be2ccbc125ef11129b361da0aa236a127c31b965 engine: gotpl - home: http://community.jaspersoft.com/project/jasperreports-server + home: https://community.jaspersoft.com/project/jasperreports-server icon: https://bitnami.com/assets/stacks/jasperserver/img/jasperserver-stack-110x117.png keywords: - business intelligence @@ -2265,7 +2265,7 @@ entries: description: PHP content management system (CMS) for publishing web content digest: 6f9934487533f325515f4877b3af1306c87d64bf3ece9d4bd875289cd73b340d engine: gotpl - home: http://www.joomla.org/ + home: https://www.joomla.org/ icon: https://bitnami.com/assets/stacks/joomla/img/joomla-stack-220x234.png keywords: - joomla @@ -2286,7 +2286,7 @@ entries: description: PHP content management system (CMS) for publishing web content digest: f9dedab2fc2dbf170cf45b2c230baa6d20aad9a6f8ccfcb09c459602fc5213dc engine: gotpl - home: http://www.joomla.org/ + home: https://www.joomla.org/ icon: https://bitnami.com/assets/stacks/joomla/img/joomla-stack-220x234.png keywords: - joomla @@ -2307,7 +2307,7 @@ entries: description: PHP content management system (CMS) for publishing web content digest: 1e067e459873ae832d54ff516a3420f7f0e16ecd8f72f4c4f02be22e47702077 engine: gotpl - home: http://www.joomla.org/ + home: https://www.joomla.org/ icon: https://bitnami.com/assets/stacks/joomla/img/joomla-stack-220x234.png keywords: - joomla @@ -2328,7 +2328,7 @@ entries: description: PHP content management system (CMS) for publishing web content digest: 9a99b15e83e18955eb364985cd545659f1176ef203ac730876dfe39499edfb18 engine: gotpl - home: http://www.joomla.org/ + home: https://www.joomla.org/ icon: https://bitnami.com/assets/stacks/joomla/img/joomla-stack-220x234.png keywords: - joomla @@ -2349,7 +2349,7 @@ entries: description: PHP content management system (CMS) for publishing web content digest: 07c3a16eb674ffc74fe5b2b16191b8bb24c63bdae9bce9710bda1999920c46fc engine: gotpl - home: http://www.joomla.org/ + home: https://www.joomla.org/ keywords: - joomla - cms @@ -2369,7 +2369,7 @@ entries: description: PHP content management system (CMS) for publishing web content digest: 95fbe272015941544609eee90b3bffd5172bfdec10be13636510caa8478a879e engine: gotpl - home: http://www.joomla.org/ + home: https://www.joomla.org/ keywords: - joomla - cms @@ -2389,7 +2389,7 @@ entries: description: PHP content management system (CMS) for publishing web content digest: 0a01ea051ec15274932c8d82076c1a9fd62584b0fb916a81372319bef223c20e engine: gotpl - home: http://www.joomla.org/ + home: https://www.joomla.org/ keywords: - joomla - cms @@ -2771,7 +2771,7 @@ entries: - created: 2017-04-28T00:18:30.087097677Z description: A modern load testing framework digest: eb91b0e3c0b618cf5ad0f24d2685fe4086bc6f497685e58ad8a64032c4e82b7a - home: http://locust.io + home: https://locust.io icon: https://pbs.twimg.com/profile_images/1867636195/locust-logo-orignal.png maintainers: - email: vincent.drl@gmail.com @@ -3351,7 +3351,7 @@ entries: that uses PHP to process and display data stored in a database. digest: 0e51822c5547895109a5b41ce426c77f62d0434b40f3021afee8471ab976a6f5 engine: gotpl - home: http://www.mediawiki.org/ + home: https://www.mediawiki.org/ icon: https://bitnami.com/assets/stacks/mediawiki/img/mediawiki-stack-220x234.png keywords: - mediawiki @@ -3372,7 +3372,7 @@ entries: that uses PHP to process and display data stored in a database. digest: 0e419c2c5d87997f94a32da6597af3f3b52120dc1ec682dcbb6b238fb4825e06 engine: gotpl - home: http://www.mediawiki.org/ + home: https://www.mediawiki.org/ icon: https://bitnami.com/assets/stacks/mediawiki/img/mediawiki-stack-220x234.png keywords: - mediawiki @@ -3393,7 +3393,7 @@ entries: that uses PHP to process and display data stored in a database. digest: 6f4dde26737f7f1aa63ffda6c259ce388e3a3509225f90f334bfc3f0f7617bc1 engine: gotpl - home: http://www.mediawiki.org/ + home: https://www.mediawiki.org/ icon: https://bitnami.com/assets/stacks/mediawiki/img/mediawiki-stack-220x234.png keywords: - mediawiki @@ -3414,7 +3414,7 @@ entries: that uses PHP to process and display data stored in a database. digest: 0ba52b8c4c9e0bee3eb76fe625d2dc88729a1cdf41ace9d13cd4abc5b477cfb8 engine: gotpl - home: http://www.mediawiki.org/ + home: https://www.mediawiki.org/ keywords: - mediawiki - wiki @@ -3434,7 +3434,7 @@ entries: that uses PHP to process and display data stored in a database. digest: f49df3e17f97b238743aad0376eb9db7e4a9bca3829a3a65d7bbb349344a73be engine: gotpl - home: http://www.mediawiki.org/ + home: https://www.mediawiki.org/ keywords: - mediawiki - wiki @@ -3454,7 +3454,7 @@ entries: that uses PHP to process and display data stored in a database. digest: 339a90050d5cf4216140409349a356aa7cd8dc95e2cbdca06e4fdd11e87aa963 engine: gotpl - home: http://www.mediawiki.org/ + home: https://www.mediawiki.org/ keywords: - mediawiki - wiki @@ -3474,7 +3474,7 @@ entries: that uses PHP to process and display data stored in a database. digest: 9617f13f51f5bb016a072f2a026c627420721a1c5b7cd22f32d6cd0c90f34eda engine: gotpl - home: http://www.mediawiki.org/ + home: https://www.mediawiki.org/ keywords: - mediawiki - wiki @@ -3495,7 +3495,7 @@ entries: system. digest: 36ceb2767094598171b2851ecda54bd43d862b9b81aa4b294f3d8c8d59ddd79c engine: gotpl - home: http://memcached.org/ + home: https://memcached.org/ icon: https://upload.wikimedia.org/wikipedia/en/thumb/2/27/Memcached.svg/1024px-Memcached.svg.png keywords: - memcached @@ -3513,7 +3513,7 @@ entries: description: Chart for Memcached digest: 2b918dd8129a9d706e58b3de459004e3367c05a162d3e3cdb031cb6818d5f820 engine: gotpl - home: http://memcached.org/ + home: https://memcached.org/ keywords: - memcached - cache @@ -3911,7 +3911,7 @@ entries: learning environments digest: 386bff8ce61cf61961daf8ed6d68a76cd3a360560a08c1fca80bcbd897f11270 engine: gotpl - home: http://www.moodle.org/ + home: https://www.moodle.org/ icon: https://bitnami.com/assets/stacks/moodle/img/moodle-stack-110x117.png keywords: - moodle @@ -3932,7 +3932,7 @@ entries: learning environments digest: bd85420a7cefd82e9d96088591601f832ecc60016d6389dbcde51a2050327a66 engine: gotpl - home: http://www.moodle.org/ + home: https://www.moodle.org/ icon: https://bitnami.com/assets/stacks/moodle/img/moodle-stack-110x117.png keywords: - moodle @@ -3953,7 +3953,7 @@ entries: learning environments digest: 8656c544a71fa8cc4ac23380e999e072740ec8e481a22aff86517d8362e70121 engine: gotpl - home: http://www.moodle.org/ + home: https://www.moodle.org/ icon: https://bitnami.com/assets/stacks/moodle/img/moodle-stack-110x117.png keywords: - moodle diff --git a/pkg/helm/client.go b/pkg/helm/client.go index 771c7f3d1..fa867c2d3 100644 --- a/pkg/helm/client.go +++ b/pkg/helm/client.go @@ -302,7 +302,7 @@ func (h *Client) RunReleaseTest(rlsName string, opts ...ReleaseTestOption) (<-ch return h.test(ctx, req) } -// PingTiller pings the Tiller pod and ensure's that it is up and running +// PingTiller pings the Tiller pod and ensures that it is up and running func (h *Client) PingTiller() error { ctx := NewContext() return h.ping(ctx) @@ -334,7 +334,7 @@ func (h *Client) connect(ctx context.Context) (conn *grpc.ClientConn, err error) return conn, nil } -// Executes tiller.ListReleases RPC. +// list executes tiller.ListReleases RPC. func (h *Client) list(ctx context.Context, req *rls.ListReleasesRequest) (*rls.ListReleasesResponse, error) { c, err := h.connect(ctx) if err != nil { @@ -365,7 +365,7 @@ func (h *Client) list(ctx context.Context, req *rls.ListReleasesRequest) (*rls.L return resp, nil } -// Executes tiller.InstallRelease RPC. +// install executes tiller.InstallRelease RPC. func (h *Client) install(ctx context.Context, req *rls.InstallReleaseRequest) (*rls.InstallReleaseResponse, error) { c, err := h.connect(ctx) if err != nil { @@ -377,7 +377,7 @@ func (h *Client) install(ctx context.Context, req *rls.InstallReleaseRequest) (* return rlc.InstallRelease(ctx, req) } -// Executes tiller.UninstallRelease RPC. +// delete executes tiller.UninstallRelease RPC. func (h *Client) delete(ctx context.Context, req *rls.UninstallReleaseRequest) (*rls.UninstallReleaseResponse, error) { c, err := h.connect(ctx) if err != nil { @@ -389,7 +389,7 @@ func (h *Client) delete(ctx context.Context, req *rls.UninstallReleaseRequest) ( return rlc.UninstallRelease(ctx, req) } -// Executes tiller.UpdateRelease RPC. +// update executes tiller.UpdateRelease RPC. func (h *Client) update(ctx context.Context, req *rls.UpdateReleaseRequest) (*rls.UpdateReleaseResponse, error) { c, err := h.connect(ctx) if err != nil { @@ -401,7 +401,7 @@ func (h *Client) update(ctx context.Context, req *rls.UpdateReleaseRequest) (*rl return rlc.UpdateRelease(ctx, req) } -// Executes tiller.RollbackRelease RPC. +// rollback executes tiller.RollbackRelease RPC. func (h *Client) rollback(ctx context.Context, req *rls.RollbackReleaseRequest) (*rls.RollbackReleaseResponse, error) { c, err := h.connect(ctx) if err != nil { @@ -413,7 +413,7 @@ func (h *Client) rollback(ctx context.Context, req *rls.RollbackReleaseRequest) return rlc.RollbackRelease(ctx, req) } -// Executes tiller.GetReleaseStatus RPC. +// status executes tiller.GetReleaseStatus RPC. func (h *Client) status(ctx context.Context, req *rls.GetReleaseStatusRequest) (*rls.GetReleaseStatusResponse, error) { c, err := h.connect(ctx) if err != nil { @@ -425,7 +425,7 @@ func (h *Client) status(ctx context.Context, req *rls.GetReleaseStatusRequest) ( return rlc.GetReleaseStatus(ctx, req) } -// Executes tiller.GetReleaseContent RPC. +// content executes tiller.GetReleaseContent RPC. func (h *Client) content(ctx context.Context, req *rls.GetReleaseContentRequest) (*rls.GetReleaseContentResponse, error) { c, err := h.connect(ctx) if err != nil { @@ -437,7 +437,7 @@ func (h *Client) content(ctx context.Context, req *rls.GetReleaseContentRequest) return rlc.GetReleaseContent(ctx, req) } -// Executes tiller.GetVersion RPC. +// version executes tiller.GetVersion RPC. func (h *Client) version(ctx context.Context, req *rls.GetVersionRequest) (*rls.GetVersionResponse, error) { c, err := h.connect(ctx) if err != nil { @@ -449,7 +449,7 @@ func (h *Client) version(ctx context.Context, req *rls.GetVersionRequest) (*rls. return rlc.GetVersion(ctx, req) } -// Executes tiller.GetHistory RPC. +// history executes tiller.GetHistory RPC. func (h *Client) history(ctx context.Context, req *rls.GetHistoryRequest) (*rls.GetHistoryResponse, error) { c, err := h.connect(ctx) if err != nil { @@ -461,7 +461,7 @@ func (h *Client) history(ctx context.Context, req *rls.GetHistoryRequest) (*rls. return rlc.GetHistory(ctx, req) } -// Executes tiller.TestRelease RPC. +// test executes tiller.TestRelease RPC. func (h *Client) test(ctx context.Context, req *rls.TestReleaseRequest) (<-chan *rls.TestReleaseResponse, <-chan error) { errc := make(chan error, 1) c, err := h.connect(ctx) @@ -499,7 +499,7 @@ func (h *Client) test(ctx context.Context, req *rls.TestReleaseRequest) (<-chan return ch, errc } -// Executes tiller.Ping RPC. +// ping executes tiller.Ping RPC. func (h *Client) ping(ctx context.Context) error { c, err := h.connect(ctx) if err != nil { diff --git a/pkg/helm/environment/environment.go b/pkg/helm/environment/environment.go index 6d40fb846..9cfe80a1d 100644 --- a/pkg/helm/environment/environment.go +++ b/pkg/helm/environment/environment.go @@ -80,23 +80,23 @@ type EnvSettings struct { // AddFlags binds flags to the given flagset. func (s *EnvSettings) AddFlags(fs *pflag.FlagSet) { - fs.StringVar((*string)(&s.Home), "home", DefaultHelmHome, "location of your Helm config. Overrides $HELM_HOME") - fs.StringVar(&s.TillerHost, "host", "", "address of Tiller. Overrides $HELM_HOST") - fs.StringVar(&s.KubeContext, "kube-context", "", "name of the kubeconfig context to use") - fs.StringVar(&s.KubeConfig, "kubeconfig", "", "absolute path to the kubeconfig file to use") - fs.BoolVar(&s.Debug, "debug", false, "enable verbose output") - fs.StringVar(&s.TillerNamespace, "tiller-namespace", "kube-system", "namespace of Tiller") - fs.Int64Var(&s.TillerConnectionTimeout, "tiller-connection-timeout", int64(300), "the duration (in seconds) Helm will wait to establish a connection to tiller") + fs.StringVar((*string)(&s.Home), "home", DefaultHelmHome, "Location of your Helm config. Overrides $HELM_HOME") + fs.StringVar(&s.TillerHost, "host", "", "Address of Tiller. Overrides $HELM_HOST") + fs.StringVar(&s.KubeContext, "kube-context", "", "Name of the kubeconfig context to use") + fs.StringVar(&s.KubeConfig, "kubeconfig", "", "Absolute path of the kubeconfig file to be used") + fs.BoolVar(&s.Debug, "debug", false, "Enable verbose output") + fs.StringVar(&s.TillerNamespace, "tiller-namespace", "kube-system", "Namespace of Tiller") + fs.Int64Var(&s.TillerConnectionTimeout, "tiller-connection-timeout", int64(300), "The duration (in seconds) Helm will wait to establish a connection to Tiller") } // AddFlagsTLS adds the flags for supporting client side TLS to the given flagset. func (s *EnvSettings) AddFlagsTLS(fs *pflag.FlagSet) { - fs.StringVar(&s.TLSServerName, "tls-hostname", s.TillerHost, "the server name used to verify the hostname on the returned certificates from the server") - fs.StringVar(&s.TLSCaCertFile, "tls-ca-cert", DefaultTLSCaCert, "path to TLS CA certificate file") - fs.StringVar(&s.TLSCertFile, "tls-cert", DefaultTLSCert, "path to TLS certificate file") - fs.StringVar(&s.TLSKeyFile, "tls-key", DefaultTLSKeyFile, "path to TLS key file") - fs.BoolVar(&s.TLSVerify, "tls-verify", DefaultTLSVerify, "enable TLS for request and verify remote") - fs.BoolVar(&s.TLSEnable, "tls", DefaultTLSEnable, "enable TLS for request") + fs.StringVar(&s.TLSServerName, "tls-hostname", s.TillerHost, "The server name used to verify the hostname on the returned certificates from the server") + fs.StringVar(&s.TLSCaCertFile, "tls-ca-cert", DefaultTLSCaCert, "Path to TLS CA certificate file") + fs.StringVar(&s.TLSCertFile, "tls-cert", DefaultTLSCert, "Path to TLS certificate file") + fs.StringVar(&s.TLSKeyFile, "tls-key", DefaultTLSKeyFile, "Path to TLS key file") + fs.BoolVar(&s.TLSVerify, "tls-verify", DefaultTLSVerify, "Enable TLS for request and verify remote") + fs.BoolVar(&s.TLSEnable, "tls", DefaultTLSEnable, "Enable TLS for request") } // Init sets values from the environment. diff --git a/pkg/helm/fake.go b/pkg/helm/fake.go index 6da0ac70d..b904bc565 100644 --- a/pkg/helm/fake.go +++ b/pkg/helm/fake.go @@ -272,7 +272,7 @@ func (c *FakeClient) RunReleaseTest(rlsName string, opts ...ReleaseTestOption) ( return results, errc } -// PingTiller pings the Tiller pod and ensure's that it is up and running +// PingTiller pings the Tiller pod and ensures that it is up and running func (c *FakeClient) PingTiller() error { return nil } diff --git a/pkg/helm/fake_test.go b/pkg/helm/fake_test.go index cff10051f..261065b59 100644 --- a/pkg/helm/fake_test.go +++ b/pkg/helm/fake_test.go @@ -26,7 +26,8 @@ import ( rls "k8s.io/helm/pkg/proto/hapi/services" ) -const cmInputTemplate = `kind: ConfigMap +const ( + cmInputTemplate = `kind: ConfigMap apiVersion: v1 metadata: name: example @@ -34,7 +35,7 @@ data: Release: {{.Release | toYaml | indent 4}} ` -const cmOutputTemplate = ` + cmOutputTemplate = ` --- # Source: installChart/templates/cm.yaml kind: ConfigMap @@ -53,6 +54,7 @@ data: seconds: 242085845 ` +) var installChart *chart.Chart diff --git a/pkg/helm/option.go b/pkg/helm/option.go index f41d9c6ae..930434178 100644 --- a/pkg/helm/option.go +++ b/pkg/helm/option.go @@ -297,6 +297,20 @@ func DeleteDescription(description string) DeleteOption { } } +// UpgradeCleanupOnFail allows deletion of new resources created in this upgrade when upgrade failed +func UpgradeCleanupOnFail(cleanupOnFail bool) UpdateOption { + return func(opts *options) { + opts.updateReq.CleanupOnFail = cleanupOnFail + } +} + +// RollbackCleanupOnFail allows deletion of new resources created in this rollback when rollback failed +func RollbackCleanupOnFail(cleanupOnFail bool) RollbackOption { + return func(opts *options) { + opts.rollbackReq.CleanupOnFail = cleanupOnFail + } +} + // DeleteDisableHooks will disable hooks for a deletion operation. func DeleteDisableHooks(disable bool) DeleteOption { return func(opts *options) { @@ -346,6 +360,20 @@ func InstallReuseName(reuse bool) InstallOption { } } +// InstallSubNotes will (if true) instruct Tiller to render SubChart Notes +func InstallSubNotes(enable bool) InstallOption { + return func(opts *options) { + opts.instReq.SubNotes = enable + } +} + +// UpgradeSubNotes will (if true) instruct Tiller to render SubChart Notes +func UpgradeSubNotes(enable bool) UpdateOption { + return func(opts *options) { + opts.updateReq.SubNotes = enable + } +} + // RollbackDisableHooks will disable hooks for a rollback operation func RollbackDisableHooks(disable bool) RollbackOption { return func(opts *options) { @@ -460,7 +488,7 @@ type VersionOption func(*options) // the defaults used when running the `helm upgrade` command. type UpdateOption func(*options) -// RollbackOption allows specififying various settings configurable +// RollbackOption allows specifying various settings configurable // by the helm client user for overriding the defaults used when // running the `helm rollback` command. type RollbackOption func(*options) diff --git a/pkg/helm/portforwarder/portforwarder.go b/pkg/helm/portforwarder/portforwarder.go index e962eef7f..0b04478f5 100644 --- a/pkg/helm/portforwarder/portforwarder.go +++ b/pkg/helm/portforwarder/portforwarder.go @@ -27,6 +27,7 @@ import ( "k8s.io/client-go/rest" "k8s.io/helm/pkg/kube" + "k8s.io/helm/pkg/tiller/environment" ) var ( @@ -39,8 +40,7 @@ func New(namespace string, client kubernetes.Interface, config *rest.Config) (*k if err != nil { return nil, err } - const tillerPort = 44134 - t := kube.NewTunnel(client.CoreV1().RESTClient(), config, namespace, podName, tillerPort) + t := kube.NewTunnel(client.CoreV1().RESTClient(), config, namespace, podName, environment.DefaultTillerPort) return t, t.ForwardPort() } diff --git a/pkg/hooks/hooks.go b/pkg/hooks/hooks.go index 5083672cd..6d60fad51 100644 --- a/pkg/hooks/hooks.go +++ b/pkg/hooks/hooks.go @@ -20,14 +20,16 @@ import ( "k8s.io/helm/pkg/proto/hapi/release" ) -// HookAnno is the label name for a hook -const HookAnno = "helm.sh/hook" - -// HookWeightAnno is the label name for a hook weight -const HookWeightAnno = "helm.sh/hook-weight" - -// HookDeleteAnno is the label name for the delete policy for a hook -const HookDeleteAnno = "helm.sh/hook-delete-policy" +const ( + // HookAnno is the label name for a hook + HookAnno = "helm.sh/hook" + // HookWeightAnno is the label name for a hook weight + HookWeightAnno = "helm.sh/hook-weight" + // HookDeleteAnno is the label name for the delete policy for a hook + HookDeleteAnno = "helm.sh/hook-delete-policy" + // HookDeleteTimeoutAnno is the label name for the timeout value for delete policies + HookDeleteTimeoutAnno = "helm.sh/hook-delete-timeout" +) // Types of hooks const ( diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 4a387d524..e78c10f2d 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -24,28 +24,35 @@ import ( "fmt" "io" "log" + "sort" "strings" "time" - jsonpatch "github.com/evanphx/json-patch" + "k8s.io/apimachinery/pkg/api/meta" + + "github.com/evanphx/json-patch" appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" appsv1beta2 "k8s.io/api/apps/v1beta2" batch "k8s.io/api/batch/v1" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" extv1beta1 "k8s.io/api/extensions/v1beta1" + apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/cli-runtime/pkg/genericclioptions/resource" + "k8s.io/cli-runtime/pkg/resource" "k8s.io/client-go/kubernetes/scheme" + cachetools "k8s.io/client-go/tools/cache" watchtools "k8s.io/client-go/tools/watch" "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/core" @@ -60,6 +67,8 @@ const MissingGetHeader = "==> MISSING\nKIND\t\tNAME\n" // ErrNoObjectsVisited indicates that during a visit operation, no matching objects were found. var ErrNoObjectsVisited = goerrors.New("no objects visited") +var metadataAccessor = meta.NewAccessor() + // Client represents a client capable of communicating with the Kubernetes API. type Client struct { cmdutil.Factory @@ -69,8 +78,14 @@ type Client struct { // New creates a new Client. func New(getter genericclioptions.RESTClientGetter) *Client { if getter == nil { - getter = genericclioptions.NewConfigFlags() + getter = genericclioptions.NewConfigFlags(true) + } + + err := apiextv1beta1.AddToScheme(scheme.Scheme) + if err != nil { + panic(err) } + return &Client{ Factory: cmdutil.NewFactory(getter), Log: nopLogger, @@ -128,7 +143,7 @@ func (c *Client) validator() validation.Schema { return schema } -// BuildUnstructured validates for Kubernetes objects and returns unstructured infos. +// BuildUnstructured reads Kubernetes objects and returns unstructured infos. func (c *Client) BuildUnstructured(namespace string, reader io.Reader) (Result, error) { var result Result @@ -138,12 +153,28 @@ func (c *Client) BuildUnstructured(namespace string, reader io.Reader) (Result, NamespaceParam(namespace). DefaultNamespace(). Stream(reader, ""). - Schema(c.validator()). Flatten(). Do().Infos() return result, scrubValidationError(err) } +// Validate reads Kubernetes manifests and validates the content. +// +// This function does not actually do schema validation of manifests. Adding +// validation now breaks existing clients of helm: https://github.com/helm/helm/issues/5750 +func (c *Client) Validate(namespace string, reader io.Reader) error { + _, err := c.NewBuilder(). + Unstructured(). + ContinueOnError(). + NamespaceParam(namespace). + DefaultNamespace(). + // Schema(c.validator()). // No schema validation + Stream(reader, ""). + Flatten(). + Do().Infos() + return scrubValidationError(err) +} + // Build validates for Kubernetes objects and returns resource Infos from a io.Reader. func (c *Client) Build(namespace string, reader io.Reader) (Result, error) { var result Result @@ -151,13 +182,41 @@ func (c *Client) Build(namespace string, reader io.Reader) (Result, error) { return result, scrubValidationError(err) } +// Return the resource info as internal +func resourceInfoToObject(info *resource.Info, c *Client) runtime.Object { + internalObj, err := asInternal(info) + if err != nil { + // If the problem is just that the resource is not registered, don't print any + // error. This is normal for custom resources. + if !runtime.IsNotRegisteredError(err) { + c.Log("Warning: conversion to internal type failed: %v", err) + } + // Add the unstructured object in this situation. It will still get listed, just + // with less information. + return info.Object + } + + return internalObj +} + +func sortByKey(objs map[string](map[string]runtime.Object)) []string { + var keys []string + // Create a simple slice, so we can sort it + for key := range objs { + keys = append(keys, key) + } + // Sort alphabetically by version/kind keys + sort.Strings(keys) + return keys +} + // Get gets Kubernetes resources as pretty-printed string. // // Namespace will set the namespace. func (c *Client) Get(namespace string, reader io.Reader) (string, error) { - // Since we don't know what order the objects come in, let's group them by the types, so + // Since we don't know what order the objects come in, let's group them by the types and then sort them, so // that when we print them, they come out looking good (headers apply to subgroups, etc.). - objs := make(map[string][]runtime.Object) + objs := make(map[string](map[string]runtime.Object)) infos, err := c.BuildUnstructured(namespace, reader) if err != nil { return "", err @@ -178,19 +237,15 @@ func (c *Client) Get(namespace string, reader io.Reader) (string, error) { // versions per cluster, but this certainly won't hurt anything, so let's be safe. gvk := info.ResourceMapping().GroupVersionKind vk := gvk.Version + "/" + gvk.Kind - internalObj, err := asInternal(info) - if err != nil { - // If the problem is just that the resource is not registered, don't print any - // error. This is normal for custom resources. - if !runtime.IsNotRegisteredError(err) { - c.Log("Warning: conversion to internal type failed: %v", err) - } - // Add the unstructured object in this situation. It will still get listed, just - // with less information. - objs[vk] = append(objs[vk], info.Object) - } else { - objs[vk] = append(objs[vk], internalObj) + + // Initialize map. The main map groups resources based on version/kind + // The second level is a simple 'Name' to 'Object', that will help sort + // the individual resource later + if objs[vk] == nil { + objs[vk] = make(map[string]runtime.Object) } + // Map between the resource name to the underlying info object + objs[vk][info.Name] = resourceInfoToObject(info, c) //Get the relation pods objPods, err = c.getSelectRelationPod(info, objPods) @@ -208,8 +263,12 @@ func (c *Client) Get(namespace string, reader io.Reader) (string, error) { for key, podItems := range objPods { for i := range podItems { pod := &core.Pod{} + legacyscheme.Scheme.Convert(&podItems[i], pod, nil) - objs[key+"(related)"] = append(objs[key+"(related)"], pod) + if objs[key+"(related)"] == nil { + objs[key+"(related)"] = make(map[string]runtime.Object) + } + objs[key+"(related)"][pod.ObjectMeta.Name] = runtime.Object(pod) } } @@ -219,14 +278,28 @@ func (c *Client) Get(namespace string, reader io.Reader) (string, error) { // track of tab widths. buf := new(bytes.Buffer) printFlags := get.NewHumanPrintFlags() - for t, ot := range objs { + + // Sort alphabetically by version/kind keys + vkKeys := sortByKey(objs) + // Iterate on sorted version/kind types + for _, t := range vkKeys { if _, err = fmt.Fprintf(buf, "==> %s\n", t); err != nil { return "", err } typePrinter, _ := printFlags.ToPrinter("") - for _, o := range ot { - if err := typePrinter.PrintObj(o, buf); err != nil { - c.Log("failed to print object type %s, object: %q :\n %v", t, o, err) + + var sortedResources []string + for resource := range objs[t] { + sortedResources = append(sortedResources, resource) + } + sort.Strings(sortedResources) + + // Now that each individual resource within the specific version/kind + // is sorted, we print each resource using the k8s printer + vk := objs[t] + for _, resourceName := range sortedResources { + if err := typePrinter.PrintObj(vk[resourceName], buf); err != nil { + c.Log("failed to print object type %s, object: %q :\n %v", t, resourceName, err) return "", err } } @@ -243,13 +316,41 @@ func (c *Client) Get(namespace string, reader io.Reader) (string, error) { return buf.String(), nil } -// Update reads in the current configuration and a target configuration from io.reader -// and creates resources that don't already exists, updates resources that have been modified +// Update reads the current configuration and a target configuration from io.reader +// and creates resources that don't already exist, updates resources that have been modified // in the target configuration and deletes resources from the current configuration that are // not present in the target configuration. // // Namespace will set the namespaces. +// +// Deprecated: use UpdateWithOptions instead. func (c *Client) Update(namespace string, originalReader, targetReader io.Reader, force bool, recreate bool, timeout int64, shouldWait bool) error { + return c.UpdateWithOptions(namespace, originalReader, targetReader, UpdateOptions{ + Force: force, + Recreate: recreate, + Timeout: timeout, + ShouldWait: shouldWait, + }) +} + +// UpdateOptions provides options to control update behavior +type UpdateOptions struct { + Force bool + Recreate bool + Timeout int64 + ShouldWait bool + // Allow deletion of new resources created in this update when update failed + CleanupOnFail bool +} + +// UpdateWithOptions reads the current configuration and a target configuration from io.reader +// and creates resources that don't already exist, updates resources that have been modified +// in the target configuration and deletes resources from the current configuration that are +// not present in the target configuration. +// +// Namespace will set the namespaces. UpdateOptions provides additional parameters to control +// update behavior. +func (c *Client) UpdateWithOptions(namespace string, originalReader, targetReader io.Reader, opts UpdateOptions) error { original, err := c.BuildUnstructured(namespace, originalReader) if err != nil { return fmt.Errorf("failed decoding reader into objects: %s", err) @@ -261,6 +362,7 @@ func (c *Client) Update(namespace string, originalReader, targetReader io.Reader return fmt.Errorf("failed decoding reader into objects: %s", err) } + newlyCreatedResources := []*resource.Info{} updateErrors := []string{} c.Log("checking %d resources for changes", len(target)) @@ -279,6 +381,7 @@ func (c *Client) Update(namespace string, originalReader, targetReader io.Reader if err := createResource(info); err != nil { return fmt.Errorf("failed to create resource: %s", err) } + newlyCreatedResources = append(newlyCreatedResources, info) kind := info.Mapping.GroupVersionKind.Kind c.Log("Created a new %s called %q\n", kind, info.Name) @@ -286,12 +389,21 @@ func (c *Client) Update(namespace string, originalReader, targetReader io.Reader } originalInfo := original.Get(info) + + // The resource already exists in the cluster, but it wasn't defined in the previous release. + // In this case, we consider it to be a resource that was previously un-managed by the release and error out, + // asking for the user to intervene. + // + // See https://github.com/helm/helm/issues/1193 for more info. if originalInfo == nil { - kind := info.Mapping.GroupVersionKind.Kind - return fmt.Errorf("no %s with the name %q found", kind, info.Name) + return fmt.Errorf( + "kind %s with the name %q already exists in the cluster and wasn't defined in the previous release. Before upgrading, please either delete the resource from the cluster or remove it from the chart", + info.Mapping.GroupVersionKind.Kind, + info.Name, + ) } - if err := updateResource(c, info, originalInfo.Object, force, recreate); err != nil { + if err := updateResource(c, info, originalInfo.Object, opts.Force, opts.Recreate); err != nil { c.Log("error updating the resource %q:\n\t %v", info.Name, err) updateErrors = append(updateErrors, err.Error()) } @@ -299,38 +411,98 @@ func (c *Client) Update(namespace string, originalReader, targetReader io.Reader return nil }) + cleanupErrors := []string{} + + if opts.CleanupOnFail && (err != nil || len(updateErrors) != 0) { + c.Log("Cleanup on fail enabled: cleaning up newly created resources due to update manifests failures") + cleanupErrors = c.cleanup(newlyCreatedResources) + } + switch { case err != nil: - return err + return fmt.Errorf(strings.Join(append([]string{err.Error()}, cleanupErrors...), " && ")) case len(updateErrors) != 0: - return fmt.Errorf(strings.Join(updateErrors, " && ")) + return fmt.Errorf(strings.Join(append(updateErrors, cleanupErrors...), " && ")) } for _, info := range original.Difference(target) { c.Log("Deleting %q in %s...", info.Name, info.Namespace) + + if err := info.Get(); err != nil { + c.Log("Unable to get obj %q, err: %s", info.Name, err) + } + annotations, err := metadataAccessor.Annotations(info.Object) + if err != nil { + c.Log("Unable to get annotations on %q, err: %s", info.Name, err) + } + if ResourcePolicyIsKeep(annotations) { + policy := annotations[ResourcePolicyAnno] + c.Log("Skipping delete of %q due to annotation [%s=%s]", info.Name, ResourcePolicyAnno, policy) + continue + } + if err := deleteResource(info); err != nil { c.Log("Failed to delete %q, err: %s", info.Name, err) } } - if shouldWait { - return c.waitForResources(time.Duration(timeout)*time.Second, target) + if opts.ShouldWait { + err := c.waitForResources(time.Duration(opts.Timeout)*time.Second, target) + + if opts.CleanupOnFail && err != nil { + c.Log("Cleanup on fail enabled: cleaning up newly created resources due to wait failure during update") + cleanupErrors = c.cleanup(newlyCreatedResources) + return fmt.Errorf(strings.Join(append([]string{err.Error()}, cleanupErrors...), " && ")) + } + + return err } return nil } +func (c *Client) cleanup(newlyCreatedResources []*resource.Info) (cleanupErrors []string) { + for _, info := range newlyCreatedResources { + kind := info.Mapping.GroupVersionKind.Kind + c.Log("Deleting newly created %s with the name %q in %s...", kind, info.Name, info.Namespace) + if err := deleteResource(info); err != nil { + c.Log("Error deleting newly created %s with the name %q in %s: %s", kind, info.Name, info.Namespace, err) + cleanupErrors = append(cleanupErrors, err.Error()) + } + } + return +} + // Delete deletes Kubernetes resources from an io.reader. // // Namespace will set the namespace. func (c *Client) Delete(namespace string, reader io.Reader) error { + return c.DeleteWithTimeout(namespace, reader, 0, false) +} + +// DeleteWithTimeout deletes Kubernetes resources from an io.reader. If shouldWait is true, the function +// will wait for all resources to be deleted from etcd before returning, or when the timeout +// has expired. +// +// Namespace will set the namespace. +func (c *Client) DeleteWithTimeout(namespace string, reader io.Reader, timeout int64, shouldWait bool) error { infos, err := c.BuildUnstructured(namespace, reader) if err != nil { return err } - return perform(infos, func(info *resource.Info) error { + err = perform(infos, func(info *resource.Info) error { c.Log("Starting delete for %q %s", info.Name, info.Mapping.GroupVersionKind.Kind) err := deleteResource(info) return c.skipIfNotFound(err) }) + if err != nil { + return err + } + + if shouldWait { + c.Log("Waiting for %d seconds for delete to be completed", timeout) + return waitUntilAllResourceDeleted(infos, time.Duration(timeout)*time.Second) + } + + return nil } func (c *Client) skipIfNotFound(err error) error { @@ -341,6 +513,27 @@ func (c *Client) skipIfNotFound(err error) error { return err } +func waitUntilAllResourceDeleted(infos Result, timeout time.Duration) error { + return wait.Poll(2*time.Second, timeout, func() (bool, error) { + allDeleted := true + err := perform(infos, func(info *resource.Info) error { + innerErr := info.Get() + if errors.IsNotFound(innerErr) { + return nil + } + if innerErr != nil { + return innerErr + } + allDeleted = false + return nil + }) + if err != nil { + return false, err + } + return allDeleted, nil + }) +} + func (c *Client) watchTimeout(t time.Duration) ResourceActorFunc { return func(info *resource.Info) error { return c.watchUntilReady(t, info) @@ -369,6 +562,55 @@ func (c *Client) WatchUntilReady(namespace string, reader io.Reader, timeout int return perform(infos, c.watchTimeout(time.Duration(timeout)*time.Second)) } +// WaitUntilCRDEstablished polls the given CRD until it reaches the established +// state. A CRD needs to reach the established state before CRs can be created. +// +// If a naming conflict condition is found, this function will return an error. +func (c *Client) WaitUntilCRDEstablished(reader io.Reader, timeout time.Duration) error { + infos, err := c.BuildUnstructured(metav1.NamespaceAll, reader) + if err != nil { + return err + } + + return perform(infos, c.pollCRDEstablished(timeout)) +} + +func (c *Client) pollCRDEstablished(t time.Duration) ResourceActorFunc { + return func(info *resource.Info) error { + return c.pollCRDUntilEstablished(t, info) + } +} + +func (c *Client) pollCRDUntilEstablished(timeout time.Duration, info *resource.Info) error { + return wait.PollImmediate(time.Second, timeout, func() (bool, error) { + err := info.Get() + if err != nil { + return false, fmt.Errorf("unable to get CRD: %v", err) + } + + crd := &apiextv1beta1.CustomResourceDefinition{} + err = scheme.Scheme.Convert(info.Object, crd, nil) + if err != nil { + return false, fmt.Errorf("unable to convert to CRD type: %v", err) + } + + for _, cond := range crd.Status.Conditions { + switch cond.Type { + case apiextv1beta1.Established: + if cond.Status == apiextv1beta1.ConditionTrue { + return true, nil + } + case apiextv1beta1.NamesAccepted: + if cond.Status == apiextv1beta1.ConditionFalse { + return false, fmt.Errorf("naming conflict detected for CRD %s", crd.GetName()) + } + } + } + + return false, nil + }) +} + func perform(infos Result, fn ResourceActorFunc) error { if len(infos) == 0 { return ErrNoObjectsVisited @@ -416,24 +658,33 @@ func createPatch(target *resource.Info, current runtime.Object) ([]byte, types.P } // Get a versioned object - versionedObject := asVersioned(target) + versionedObject, err := asVersioned(target) - // Unstructured objects, such as CRDs, may not have an not registered error + // Unstructured objects, such as CRDs, may not have a not registered error // returned from ConvertToVersion. Anything that's unstructured should // use the jsonpatch.CreateMergePatch. Strategic Merge Patch is not supported // on objects like CRDs. _, isUnstructured := versionedObject.(runtime.Unstructured) + // On newer K8s versions, CRDs aren't unstructured but has this dedicated type + _, isCRD := versionedObject.(*apiextv1beta1.CustomResourceDefinition) + switch { - case runtime.IsNotRegisteredError(err), isUnstructured: + case runtime.IsNotRegisteredError(err), isUnstructured, isCRD: // fall back to generic JSON merge patch patch, err := jsonpatch.CreateMergePatch(oldData, newData) - return patch, types.MergePatchType, err + if err != nil { + return nil, types.MergePatchType, fmt.Errorf("failed to create merge patch: %v", err) + } + return patch, types.MergePatchType, nil case err != nil: return nil, types.StrategicMergePatchType, fmt.Errorf("failed to get versionedObject: %s", err) default: patch, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, versionedObject) - return patch, types.StrategicMergePatchType, err + if err != nil { + return nil, types.StrategicMergePatchType, fmt.Errorf("failed to create two-way merge patch: %v", err) + } + return patch, types.StrategicMergePatchType, nil } } @@ -488,7 +739,7 @@ func updateResource(c *Client, target *resource.Info, currentObj runtime.Object, return nil } - versioned := asVersioned(target) + versioned := asVersionedOrUnstructured(target) selector, ok := getSelectorFromObject(versioned) if !ok { return nil @@ -561,10 +812,7 @@ func getSelectorFromObject(obj runtime.Object) (map[string]string, bool) { } func (c *Client) watchUntilReady(timeout time.Duration, info *resource.Info) error { - w, err := resource.NewHelper(info.Client, info.Mapping).WatchSingle(info.Namespace, info.Name, info.ResourceVersion) - if err != nil { - return err - } + lw := cachetools.NewListWatchFromClient(info.Client, info.Mapping.Resource.Resource, info.Namespace, fields.Everything()) kind := info.Mapping.GroupVersionKind.Kind c.Log("Watching for changes to %s %s with timeout of %v", kind, info.Name, timeout) @@ -577,7 +825,7 @@ func (c *Client) watchUntilReady(timeout time.Duration, info *resource.Info) err ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout) defer cancel() - _, err = watchtools.UntilWithoutRetry(ctx, w, func(e watch.Event) (bool, error) { + _, err := watchtools.ListWatchUntil(ctx, lw, func(e watch.Event) (bool, error) { switch e.Type { case watch.Added, watch.Modified: // For things like a secret or a config map, this is the best indicator @@ -665,15 +913,12 @@ func (c *Client) WaitAndGetCompletedPodPhase(namespace string, reader io.Reader, } func (c *Client) watchPodUntilComplete(timeout time.Duration, info *resource.Info) error { - w, err := resource.NewHelper(info.Client, info.Mapping).WatchSingle(info.Namespace, info.Name, info.ResourceVersion) - if err != nil { - return err - } + lw := cachetools.NewListWatchFromClient(info.Client, info.Mapping.Resource.Resource, info.Namespace, fields.Everything()) c.Log("Watching pod %s for completion with timeout of %v", info.Name, timeout) ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout) defer cancel() - _, err = watchtools.UntilWithoutRetry(ctx, w, func(e watch.Event) (bool, error) { + _, err := watchtools.ListWatchUntil(ctx, lw, func(e watch.Event) (bool, error) { return isPodComplete(e) }) @@ -704,7 +949,7 @@ func (c *Client) getSelectRelationPod(info *resource.Info, objPods map[string][] c.Log("get relation pod of object: %s/%s/%s", info.Namespace, info.Mapping.GroupVersionKind.Kind, info.Name) - versioned := asVersioned(info) + versioned := asVersionedOrUnstructured(info) selector, ok := getSelectorFromObject(versioned) if !ok { return objPods, nil @@ -737,17 +982,23 @@ func isFoundPod(podItem []v1.Pod, pod v1.Pod) bool { return false } -func asVersioned(info *resource.Info) runtime.Object { +func asVersionedOrUnstructured(info *resource.Info) runtime.Object { + obj, _ := asVersioned(info) + return obj +} + +func asVersioned(info *resource.Info) (runtime.Object, error) { converter := runtime.ObjectConvertor(scheme.Scheme) groupVersioner := runtime.GroupVersioner(schema.GroupVersions(scheme.Scheme.PrioritizedVersionsAllGroups())) if info.Mapping != nil { groupVersioner = info.Mapping.GroupVersionKind.GroupVersion() } - if obj, err := converter.ConvertToVersion(info.Object, groupVersioner); err == nil { - return obj + obj, err := converter.ConvertToVersion(info.Object, groupVersioner) + if err != nil { + return info.Object, err } - return info.Object + return obj, nil } func asInternal(info *resource.Info) (runtime.Object, error) { diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index de33881c8..d33b4b9d9 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -21,26 +21,49 @@ import ( "io" "io/ioutil" "net/http" + "sort" "strings" "testing" + "time" "k8s.io/api/core/v1" + apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/cli-runtime/pkg/genericclioptions/resource" + "k8s.io/cli-runtime/pkg/resource" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest/fake" cmdtesting "k8s.io/kubernetes/pkg/kubectl/cmd/testing" + kubectlscheme "k8s.io/kubernetes/pkg/kubectl/scheme" ) +func init() { + err := apiextv1beta1.AddToScheme(scheme.Scheme) + if err != nil { + panic(err) + } + + // Tiller use the scheme from go-client, but the cmdtesting + // package used here is hardcoded to use the scheme from + // kubectl. So for testing, we need to add the CustomResourceDefinition + // type to both schemes. + err = apiextv1beta1.AddToScheme(kubectlscheme.Scheme) + if err != nil { + panic(err) + } +} + var ( - codec = scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) unstructuredSerializer = resource.UnstructuredPlusDefaultContentConfig().NegotiatedSerializer ) +func getCodec() runtime.Codec { + return scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) +} + func objBody(obj runtime.Object) io.ReadCloser { - return ioutil.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(codec, obj)))) + return ioutil.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(getCodec(), obj)))) } func newPod(name string) v1.Pod { @@ -77,6 +100,18 @@ func newPodList(names ...string) v1.PodList { return list } +func newService(name string) v1.Service { + ns := v1.NamespaceDefault + return v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + SelfLink: "/api/v1/namespaces/default/services/" + name, + }, + Spec: v1.ServiceSpec{}, + } +} + func notFoundBody() *metav1.Status { return &metav1.Status{ Code: http.StatusNotFound, @@ -90,7 +125,7 @@ func notFoundBody() *metav1.Status { func newResponse(code int, obj runtime.Object) (*http.Response, error) { header := http.Header{} header.Set("Content-Type", runtime.ContentTypeJSON) - body := ioutil.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(codec, obj)))) + body := ioutil.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(getCodec(), obj)))) return &http.Response{StatusCode: code, Header: header, Body: body}, nil } @@ -151,6 +186,8 @@ func TestUpdate(t *testing.T) { return newResponse(200, &listB.Items[1]) case p == "/namespaces/default/pods/squid" && m == "DELETE": return newResponse(200, &listB.Items[1]) + case p == "/namespaces/default/pods/squid" && m == "GET": + return newResponse(200, &listA.Items[2]) default: t.Fatalf("unexpected request: %s %s", req.Method, req.URL.Path) return nil, nil @@ -183,6 +220,7 @@ func TestUpdate(t *testing.T) { "/namespaces/default/pods/otter:GET", "/namespaces/default/pods/dolphin:GET", "/namespaces/default/pods:POST", + "/namespaces/default/pods/squid:GET", "/namespaces/default/pods/squid:DELETE", } if len(expectedActions) != len(actions) { @@ -194,6 +232,103 @@ func TestUpdate(t *testing.T) { t.Errorf("expected %s request got %s", v, actions[k]) } } + + // Test resource policy is respected + actions = nil + listA.Items[2].ObjectMeta.Annotations = map[string]string{ResourcePolicyAnno: "keep"} + if err := c.Update(v1.NamespaceDefault, objBody(&listA), objBody(&listB), false, false, 0, false); err != nil { + t.Fatal(err) + } + for _, v := range actions { + if v == "/namespaces/default/pods/squid:DELETE" { + t.Errorf("should not have deleted squid - it has helm.sh/resource-policy=keep") + } + } +} + +func TestUpdateNonManagedResourceError(t *testing.T) { + actual := newPodList("starfish") + current := newPodList() + target := newPodList("starfish") + + tf := cmdtesting.NewTestFactory() + defer tf.Cleanup() + + tf.UnstructuredClient = &fake.RESTClient{ + NegotiatedSerializer: unstructuredSerializer, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + p, m := req.URL.Path, req.Method + t.Logf("got request %s %s", p, m) + switch { + case p == "/namespaces/default/pods/starfish" && m == "GET": + return newResponse(200, &actual.Items[0]) + default: + t.Fatalf("unexpected request: %s %s", req.Method, req.URL.Path) + return nil, nil + } + }), + } + + c := &Client{ + Factory: tf, + Log: nopLogger, + } + + if err := c.Update(v1.NamespaceDefault, objBody(¤t), objBody(&target), false, false, 0, false); err != nil { + if err.Error() != "kind Pod with the name \"starfish\" already exists in the cluster and wasn't defined in the previous release. Before upgrading, please either delete the resource from the cluster or remove it from the chart" { + t.Fatal(err) + } + } else { + t.Fatalf("error expected") + } +} + +func TestDeleteWithTimeout(t *testing.T) { + testCases := map[string]struct { + deleteTimeout int64 + deleteAfter time.Duration + success bool + }{ + "resource is deleted within timeout period": { + int64((2 * time.Minute).Seconds()), + 10 * time.Second, + true, + }, + "resource is not deleted within the timeout period": { + int64((10 * time.Second).Seconds()), + 20 * time.Second, + false, + }, + } + + for tn, tc := range testCases { + t.Run(tn, func(t *testing.T) { + c := newTestClient() + defer c.Cleanup() + + service := newService("my-service") + startTime := time.Now() + c.TestFactory.UnstructuredClient = &fake.RESTClient{ + GroupVersion: schema.GroupVersion{Version: "v1"}, + NegotiatedSerializer: unstructuredSerializer, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + currentTime := time.Now() + if startTime.Add(tc.deleteAfter).Before(currentTime) { + return newResponse(404, notFoundBody()) + } + return newResponse(200, &service) + }), + } + + err := c.DeleteWithTimeout(metav1.NamespaceDefault, strings.NewReader(testServiceManifest), tc.deleteTimeout, true) + if err != nil && tc.success { + t.Errorf("expected no error, but got %v", err) + } + if err == nil && !tc.success { + t.Errorf("expected error, but didn't get one") + } + }) + } } func TestBuild(t *testing.T) { @@ -280,6 +415,177 @@ func TestGet(t *testing.T) { } } +func TestResourceTypeSortOrder(t *testing.T) { + pod := newPod("my-pod") + service := newService("my-service") + c := newTestClient() + defer c.Cleanup() + c.TestFactory.UnstructuredClient = &fake.RESTClient{ + GroupVersion: schema.GroupVersion{Version: "v1"}, + NegotiatedSerializer: unstructuredSerializer, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + p, m := req.URL.Path, req.Method + t.Logf("got request %s %s", p, m) + switch { + case p == "/namespaces/default/pods/my-pod" && m == "GET": + return newResponse(200, &pod) + case p == "/namespaces/default/services/my-service" && m == "GET": + return newResponse(200, &service) + default: + t.Fatalf("unexpected request: %s %s", req.Method, req.URL.Path) + return nil, nil + } + }), + } + + // Test sorting order + data := strings.NewReader(testResourceTypeSortOrder) + o, err := c.Get("default", data) + if err != nil { + t.Errorf("Expected missing results, got %q", err) + } + podIndex := strings.Index(o, "my-pod") + serviceIndex := strings.Index(o, "my-service") + if podIndex == -1 { + t.Errorf("Expected v1/Pod my-pod, got %s", o) + } + if serviceIndex == -1 { + t.Errorf("Expected v1/Service my-service, got %s", o) + } + if !sort.IntsAreSorted([]int{podIndex, serviceIndex}) { + t.Errorf("Expected order: [v1/Pod v1/Service], got %s", o) + } +} + +func TestResourceSortOrder(t *testing.T) { + list := newPodList("albacore", "coral", "beluga") + c := newTestClient() + defer c.Cleanup() + c.TestFactory.UnstructuredClient = &fake.RESTClient{ + GroupVersion: schema.GroupVersion{Version: "v1"}, + NegotiatedSerializer: unstructuredSerializer, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + p, m := req.URL.Path, req.Method + t.Logf("got request %s %s", p, m) + switch { + case p == "/namespaces/default/pods/albacore" && m == "GET": + return newResponse(200, &list.Items[0]) + case p == "/namespaces/default/pods/coral" && m == "GET": + return newResponse(200, &list.Items[1]) + case p == "/namespaces/default/pods/beluga" && m == "GET": + return newResponse(200, &list.Items[2]) + default: + t.Fatalf("unexpected request: %s %s", req.Method, req.URL.Path) + return nil, nil + } + }), + } + + // Test sorting order + data := strings.NewReader(testResourceSortOrder) + o, err := c.Get("default", data) + if err != nil { + t.Errorf("Expected missing results, got %q", err) + } + albacoreIndex := strings.Index(o, "albacore") + belugaIndex := strings.Index(o, "beluga") + coralIndex := strings.Index(o, "coral") + if albacoreIndex == -1 { + t.Errorf("Expected v1/Pod albacore, got %s", o) + } + if belugaIndex == -1 { + t.Errorf("Expected v1/Pod beluga, got %s", o) + } + if coralIndex == -1 { + t.Errorf("Expected v1/Pod coral, got %s", o) + } + if !sort.IntsAreSorted([]int{albacoreIndex, belugaIndex, coralIndex}) { + t.Errorf("Expected order: [albacore beluga coral], got %s", o) + } +} + +func TestWaitUntilCRDEstablished(t *testing.T) { + testCases := map[string]struct { + conditions []apiextv1beta1.CustomResourceDefinitionCondition + returnConditionsAfter int + success bool + }{ + "crd reaches established state after 2 requests": { + conditions: []apiextv1beta1.CustomResourceDefinitionCondition{ + { + Type: apiextv1beta1.Established, + Status: apiextv1beta1.ConditionTrue, + }, + }, + returnConditionsAfter: 2, + success: true, + }, + "crd does not reach established state before timeout": { + conditions: []apiextv1beta1.CustomResourceDefinitionCondition{}, + returnConditionsAfter: 100, + success: false, + }, + "crd name is not accepted": { + conditions: []apiextv1beta1.CustomResourceDefinitionCondition{ + { + Type: apiextv1beta1.NamesAccepted, + Status: apiextv1beta1.ConditionFalse, + }, + }, + returnConditionsAfter: 1, + success: false, + }, + } + + for tn, tc := range testCases { + func(name string) { + c := newTestClient() + defer c.Cleanup() + + crdWithoutConditions := newCrdWithStatus("name", apiextv1beta1.CustomResourceDefinitionStatus{}) + crdWithConditions := newCrdWithStatus("name", apiextv1beta1.CustomResourceDefinitionStatus{ + Conditions: tc.conditions, + }) + + requestCount := 0 + c.TestFactory.UnstructuredClient = &fake.RESTClient{ + GroupVersion: schema.GroupVersion{Version: "v1"}, + NegotiatedSerializer: unstructuredSerializer, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + var crd apiextv1beta1.CustomResourceDefinition + if requestCount < tc.returnConditionsAfter { + crd = crdWithoutConditions + } else { + crd = crdWithConditions + } + requestCount++ + return newResponse(200, &crd) + }), + } + + err := c.WaitUntilCRDEstablished(strings.NewReader(crdManifest), 5*time.Second) + if err != nil && tc.success { + t.Errorf("%s: expected no error, but got %v", name, err) + } + if err == nil && !tc.success { + t.Errorf("%s: expected error, but didn't get one", name) + } + }(tn) + } +} + +func newCrdWithStatus(name string, status apiextv1beta1.CustomResourceDefinitionStatus) apiextv1beta1.CustomResourceDefinition { + crd := apiextv1beta1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: metav1.NamespaceDefault, + }, + Spec: apiextv1beta1.CustomResourceDefinitionSpec{}, + Status: status, + } + return crd +} + func TestPerform(t *testing.T) { tests := []struct { name string @@ -361,6 +667,35 @@ func TestReal(t *testing.T) { } } +const testResourceTypeSortOrder = ` +kind: Service +apiVersion: v1 +metadata: + name: my-service +--- +kind: Pod +apiVersion: v1 +metadata: + name: my-pod +` + +const testResourceSortOrder = ` +kind: Pod +apiVersion: v1 +metadata: + name: albacore +--- +kind: Pod +apiVersion: v1 +metadata: + name: coral +--- +kind: Pod +apiVersion: v1 +metadata: + name: beluga +` + const testServiceManifest = ` kind: Service apiVersion: v1 @@ -413,7 +748,7 @@ spec: tier: backend role: master --- -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 kind: Deployment metadata: name: redis-master @@ -453,7 +788,7 @@ spec: tier: backend role: slave --- -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 kind: Deployment metadata: name: redis-slave @@ -493,7 +828,7 @@ spec: app: guestbook tier: frontend --- -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 kind: Deployment metadata: name: frontend @@ -518,3 +853,41 @@ spec: ports: - containerPort: 80 ` + +const crdManifest = ` +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + controller-tools.k8s.io: "1.0" + name: applications.app.k8s.io +spec: + group: app.k8s.io + names: + kind: Application + plural: applications + scope: Namespaced + validation: + openAPIV3Schema: + properties: + apiVersion: + description: 'Description' + type: string + kind: + description: 'Kind' + type: string + metadata: + type: object + spec: + type: object + status: + type: object + version: v1beta1 +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +` diff --git a/pkg/kube/resource_policy.go b/pkg/kube/resource_policy.go new file mode 100644 index 000000000..3029a22a5 --- /dev/null +++ b/pkg/kube/resource_policy.go @@ -0,0 +1,43 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube + +const ( + // ResourcePolicyAnno is the annotation name for a resource policy + ResourcePolicyAnno = "helm.sh/resource-policy" + + // deletePolicy is the resource policy type for delete + // + // This resource policy type allows explicitly opting in to the default + // resource deletion behavior, for example when overriding a chart's + // default annotations. Any other value allows resources to skip being + // deleted during an uninstallRelease action. + deletePolicy = "delete" +) + +// ResourcePolicyIsKeep accepts a map of Kubernetes resource annotations and +// returns true if the resource should be kept, otherwise false if it is safe +// for Helm to delete. +func ResourcePolicyIsKeep(annotations map[string]string) bool { + if annotations != nil { + resourcePolicyType, ok := annotations[ResourcePolicyAnno] + if ok && resourcePolicyType != deletePolicy { + return true + } + } + return false +} diff --git a/pkg/kube/resource_policy_test.go b/pkg/kube/resource_policy_test.go new file mode 100644 index 000000000..de6061b48 --- /dev/null +++ b/pkg/kube/resource_policy_test.go @@ -0,0 +1,72 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube + +import "testing" + +func TestResourcePolicyIsKeep(t *testing.T) { + type annotations map[string]string + type testcase struct { + annotations + keep bool + } + cases := []testcase{ + {nil, false}, + { + annotations{ + "foo": "bar", + }, + false, + }, + { + annotations{ + ResourcePolicyAnno: "keep", + }, + true, + }, + { + annotations{ + ResourcePolicyAnno: "KEEP ", + }, + true, + }, + { + annotations{ + ResourcePolicyAnno: "", + }, + true, + }, + { + annotations{ + ResourcePolicyAnno: "delete", + }, + false, + }, + { + annotations{ + ResourcePolicyAnno: "DELETE", + }, + true, + }, + } + + for _, tc := range cases { + if tc.keep != ResourcePolicyIsKeep(tc.annotations) { + t.Errorf("Expected function to return %t for annotations %v", tc.keep, tc.annotations) + } + } +} diff --git a/pkg/kube/result.go b/pkg/kube/result.go index cc222a66f..cf4a4195e 100644 --- a/pkg/kube/result.go +++ b/pkg/kube/result.go @@ -16,7 +16,7 @@ limitations under the License. package kube // import "k8s.io/helm/pkg/kube" -import "k8s.io/cli-runtime/pkg/genericclioptions/resource" +import "k8s.io/cli-runtime/pkg/resource" // Result provides convenience methods for comparing collections of Infos. type Result []*resource.Info diff --git a/pkg/kube/result_test.go b/pkg/kube/result_test.go index c4cf989b8..d4c18ee6a 100644 --- a/pkg/kube/result_test.go +++ b/pkg/kube/result_test.go @@ -21,7 +21,7 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/cli-runtime/pkg/genericclioptions/resource" + "k8s.io/cli-runtime/pkg/resource" ) func TestResult(t *testing.T) { diff --git a/pkg/kube/wait.go b/pkg/kube/wait.go index 105d79b93..c7df8ff2d 100644 --- a/pkg/kube/wait.go +++ b/pkg/kube/wait.go @@ -52,8 +52,9 @@ func (c *Client) waitForResources(timeout time.Duration, created Result) error { services := []v1.Service{} pvc := []v1.PersistentVolumeClaim{} deployments := []deployment{} + ingresses := []extensions.Ingress{} for _, v := range created { - switch value := asVersioned(v).(type) { + switch value := asVersionedOrUnstructured(v).(type) { case *v1.ReplicationController: list, err := getPods(kcs, value.Namespace, value.Spec.Selector) if err != nil { @@ -71,6 +72,10 @@ func (c *Client) waitForResources(timeout time.Duration, created Result) error { if err != nil { return false, err } + // If paused deployment will never be ready + if currentDeployment.Spec.Paused { + continue + } // Find RS associated with deployment newReplicaSet, err := deploymentutil.GetNewReplicaSet(currentDeployment, kcs.AppsV1()) if err != nil || newReplicaSet == nil { @@ -86,6 +91,10 @@ func (c *Client) waitForResources(timeout time.Duration, created Result) error { if err != nil { return false, err } + // If paused deployment will never be ready + if currentDeployment.Spec.Paused { + continue + } // Find RS associated with deployment newReplicaSet, err := deploymentutil.GetNewReplicaSet(currentDeployment, kcs.AppsV1()) if err != nil || newReplicaSet == nil { @@ -101,6 +110,10 @@ func (c *Client) waitForResources(timeout time.Duration, created Result) error { if err != nil { return false, err } + // If paused deployment will never be ready + if currentDeployment.Spec.Paused { + continue + } // Find RS associated with deployment newReplicaSet, err := deploymentutil.GetNewReplicaSet(currentDeployment, kcs.AppsV1()) if err != nil || newReplicaSet == nil { @@ -116,6 +129,10 @@ func (c *Client) waitForResources(timeout time.Duration, created Result) error { if err != nil { return false, err } + // If paused deployment will never be ready + if currentDeployment.Spec.Paused { + continue + } // Find RS associated with deployment newReplicaSet, err := deploymentutil.GetNewReplicaSet(currentDeployment, kcs.AppsV1()) if err != nil || newReplicaSet == nil { @@ -192,9 +209,15 @@ func (c *Client) waitForResources(timeout time.Duration, created Result) error { return false, err } services = append(services, *svc) + case *extensions.Ingress: + ingress, err := kcs.ExtensionsV1beta1().Ingresses(value.Namespace).Get(value.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + ingresses = append(ingresses, *ingress) } } - isReady := c.podsReady(pods) && c.servicesReady(services) && c.volumesReady(pvc) && c.deploymentsReady(deployments) + isReady := c.podsReady(pods) && c.servicesReady(services) && c.volumesReady(pvc) && c.deploymentsReady(deployments) && c.ingressesReady(ingresses) return isReady, nil }) } @@ -269,3 +292,13 @@ func isPodReady(pod *v1.Pod) bool { } return false } + +func (c *Client) ingressesReady(ingresses []extensions.Ingress) bool { + for _, ingress := range ingresses { + if &ingress.Status == nil || len(ingress.Status.LoadBalancer.Ingress) == 0 { + c.Log("Ingress is not ready: %s/%s", ingress.GetNamespace(), ingress.GetName()) + return false + } + } + return true +} diff --git a/pkg/lint/lint_test.go b/pkg/lint/lint_test.go index 84dfbf508..0514f7f6d 100644 --- a/pkg/lint/lint_test.go +++ b/pkg/lint/lint_test.go @@ -26,22 +26,23 @@ import ( var values = []byte{} -const namespace = "testNamespace" -const strict = false - -const badChartDir = "rules/testdata/badchartfile" -const badValuesFileDir = "rules/testdata/badvaluesfile" -const badYamlFileDir = "rules/testdata/albatross" -const goodChartDir = "rules/testdata/goodone" +const ( + namespace = "testNamespace" + strict = false + badChartDir = "rules/testdata/badchartfile" + badValuesFileDir = "rules/testdata/badvaluesfile" + badYamlFileDir = "rules/testdata/albatross" + goodChartDir = "rules/testdata/goodone" +) func TestBadChart(t *testing.T) { m := All(badChartDir, values, namespace, strict).Messages - if len(m) != 5 { + if len(m) != 6 { t.Errorf("Number of errors %v", len(m)) t.Errorf("All didn't fail with expected errors, got %#v", m) } // There should be one INFO, 2 WARNINGs and one ERROR messages, check for them - var i, w, e, e2, e3 bool + var i, w, e, e2, e3, e4 bool for _, msg := range m { if msg.Severity == support.InfoSev { if strings.Contains(msg.Err.Error(), "icon is recommended") { @@ -54,7 +55,7 @@ func TestBadChart(t *testing.T) { } } if msg.Severity == support.ErrorSev { - if strings.Contains(msg.Err.Error(), "version 0.0.0 is less than or equal to 0") { + if strings.Contains(msg.Err.Error(), "version '0.0.0.0' is not a valid SemVer") { e = true } if strings.Contains(msg.Err.Error(), "name is required") { @@ -63,9 +64,13 @@ func TestBadChart(t *testing.T) { if strings.Contains(msg.Err.Error(), "directory name (badchartfile) and chart name () must be the same") { e3 = true } + + if strings.Contains(msg.Err.Error(), "apiVersion is required") { + e4 = true + } } } - if !e || !e2 || !e3 || !w || !i { + if !e || !e2 || !e3 || !e4 || !w || !i { t.Errorf("Didn't find all the expected errors, got %#v", m) } } diff --git a/pkg/lint/rules/chartfile.go b/pkg/lint/rules/chartfile.go index 12f028af1..d851e73ab 100644 --- a/pkg/lint/rules/chartfile.go +++ b/pkg/lint/rules/chartfile.go @@ -46,10 +46,12 @@ func Chartfile(linter *support.Linter) { return } - linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartName(chartFile)) + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartNamePresence(chartFile)) + linter.RunLinterRule(support.WarningSev, chartFileName, validateChartNameFormat(chartFile)) linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartNameDirMatch(linter.ChartDir, chartFile)) // Chart metadata + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartAPIVersion(chartFile)) linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartVersion(chartFile)) linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartEngine(chartFile)) linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartMaintainer(chartFile)) @@ -74,13 +76,20 @@ func validateChartYamlFormat(chartFileError error) error { return nil } -func validateChartName(cf *chart.Metadata) error { +func validateChartNamePresence(cf *chart.Metadata) error { if cf.Name == "" { return errors.New("name is required") } return nil } +func validateChartNameFormat(cf *chart.Metadata) error { + if strings.Contains(cf.Name, ".") { + return errors.New("name should be lower case letters and numbers. Words may be separated with dashes") + } + return nil +} + func validateChartNameDirMatch(chartDir string, cf *chart.Metadata) error { if cf.Name != filepath.Base(chartDir) { return fmt.Errorf("directory name (%s) and chart name (%s) must be the same", filepath.Base(chartDir), cf.Name) @@ -88,6 +97,18 @@ func validateChartNameDirMatch(chartDir string, cf *chart.Metadata) error { return nil } +func validateChartAPIVersion(cf *chart.Metadata) error { + if cf.ApiVersion == "" { + return errors.New("apiVersion is required") + } + + if cf.ApiVersion != "v1" { + return fmt.Errorf("apiVersion '%s' is not valid. The value must be \"v1\"", cf.ApiVersion) + } + + return nil +} + func validateChartVersion(cf *chart.Metadata) error { if cf.Version == "" { return errors.New("version is required") @@ -99,7 +120,7 @@ func validateChartVersion(cf *chart.Metadata) error { return fmt.Errorf("version '%s' is not a valid SemVer", cf.Version) } - c, err := semver.NewConstraint("> 0") + c, err := semver.NewConstraint(">0.0.0-0") if err != nil { return err } diff --git a/pkg/lint/rules/chartfile_test.go b/pkg/lint/rules/chartfile_test.go index 235e5fc4c..4ec091fa7 100644 --- a/pkg/lint/rules/chartfile_test.go +++ b/pkg/lint/rules/chartfile_test.go @@ -29,17 +29,20 @@ import ( ) const ( - badChartDir = "testdata/badchartfile" - goodChartDir = "testdata/goodone" + badChartDir = "testdata/badchartfile" + badNameChartDir = "testdata/badnamechart" + goodChartDir = "testdata/goodone" ) var ( badChartFilePath = filepath.Join(badChartDir, "Chart.yaml") + badNameChartFilePath = filepath.Join(badNameChartDir, "Chart.yaml") goodChartFilePath = filepath.Join(goodChartDir, "Chart.yaml") nonExistingChartFilePath = filepath.Join(os.TempDir(), "Chart.yaml") ) var badChart, chatLoadRrr = chartutil.LoadChartfile(badChartFilePath) +var badNameChart, _ = chartutil.LoadChartfile(badNameChartFilePath) var goodChart, _ = chartutil.LoadChartfile(goodChartFilePath) // Validation functions Test @@ -66,12 +69,19 @@ func TestValidateChartYamlFormat(t *testing.T) { } func TestValidateChartName(t *testing.T) { - err := validateChartName(badChart) + err := validateChartNamePresence(badChart) if err == nil { t.Errorf("validateChartName to return a linter error, got no error") } } +func TestValidateChartNameFormat(t *testing.T) { + err := validateChartNameFormat(badNameChart) + if err == nil { + t.Errorf("validateChartNameFormat to return a linter error, got no error") + } +} + func TestValidateChartNameDirMatch(t *testing.T) { err := validateChartNameDirMatch(goodChartDir, goodChart) if err != nil { @@ -96,7 +106,7 @@ func TestValidateChartVersion(t *testing.T) { ErrorMsg string }{ {"", "version is required"}, - {"0", "0 is less than or equal to 0"}, + {"1.2.3.4", "version '1.2.3.4' is not a valid SemVer"}, {"waps", "'waps' is not a valid SemVer"}, {"-3", "'-3' is not a valid SemVer"}, } @@ -226,8 +236,8 @@ func TestChartfile(t *testing.T) { Chartfile(&linter) msgs := linter.Messages - if len(msgs) != 4 { - t.Errorf("Expected 3 errors, got %d", len(msgs)) + if len(msgs) != 5 { + t.Errorf("Expected 4 errors, got %d", len(msgs)) } if !strings.Contains(msgs[0].Err.Error(), "name is required") { @@ -238,12 +248,16 @@ func TestChartfile(t *testing.T) { t.Errorf("Unexpected message 1: %s", msgs[1].Err) } - if !strings.Contains(msgs[2].Err.Error(), "version 0.0.0 is less than or equal to 0") { + if !strings.Contains(msgs[2].Err.Error(), "apiVersion is required") { t.Errorf("Unexpected message 2: %s", msgs[2].Err) } - if !strings.Contains(msgs[3].Err.Error(), "icon is recommended") { - t.Errorf("Unexpected message 3: %s", msgs[3].Err) + if !strings.Contains(msgs[3].Err.Error(), "version '0.0.0.0' is not a valid SemVer") { + t.Errorf("Unexpected message 3: %s", msgs[2].Err) + } + + if !strings.Contains(msgs[4].Err.Error(), "icon is recommended") { + t.Errorf("Unexpected message 4: %s", msgs[3].Err) } } diff --git a/pkg/lint/rules/template.go b/pkg/lint/rules/template.go index 192150737..26c548bac 100644 --- a/pkg/lint/rules/template.go +++ b/pkg/lint/rules/template.go @@ -99,20 +99,13 @@ func Templates(linter *support.Linter, values []byte, namespace string, strict b fileName, _ := template.Name, template.Data path = fileName - linter.RunLinterRule(support.ErrorSev, path, validateAllowedExtension(fileName)) + linter.RunLinterRule(support.WarningSev, path, validateAllowedExtension(fileName)) // We only apply the following lint rules to yaml files if filepath.Ext(fileName) != ".yaml" || filepath.Ext(fileName) == ".yml" { continue } - // NOTE: disabled for now, Refs https://github.com/kubernetes/helm/issues/1463 - // Check that all the templates have a matching value - //linter.RunLinterRule(support.WarningSev, path, validateNoMissingValues(templatesPath, valuesToRender, preExecutedTemplate)) - - // NOTE: disabled for now, Refs https://github.com/kubernetes/helm/issues/1037 - // linter.RunLinterRule(support.WarningSev, path, validateQuotes(string(preExecutedTemplate))) - renderedContent := renderedContentMap[filepath.Join(chart.GetMetadata().Name, fileName)] var yamlStruct K8sYamlStruct // Even though K8sYamlStruct only defines Metadata namespace, an error in any other diff --git a/pkg/lint/rules/template_test.go b/pkg/lint/rules/template_test.go index 41a7384e7..a294d3c57 100644 --- a/pkg/lint/rules/template_test.go +++ b/pkg/lint/rules/template_test.go @@ -25,7 +25,11 @@ import ( "k8s.io/helm/pkg/lint/support" ) -const templateTestBasedir = "./testdata/albatross" +const ( + strict = false + namespace = "testNamespace" + templateTestBasedir = "./testdata/albatross" +) func TestValidateAllowedExtension(t *testing.T) { var failTest = []string{"/foo", "/test.toml"} @@ -46,9 +50,6 @@ func TestValidateAllowedExtension(t *testing.T) { var values = []byte("nameOverride: ''\nhttpPort: 80") -const namespace = "testNamespace" -const strict = false - func TestTemplateParsing(t *testing.T) { linter := support.Linter{ChartDir: templateTestBasedir} Templates(&linter, values, namespace, strict) diff --git a/pkg/lint/rules/testdata/albatross/Chart.yaml b/pkg/lint/rules/testdata/albatross/Chart.yaml index c108fa5e5..21124acfc 100644 --- a/pkg/lint/rules/testdata/albatross/Chart.yaml +++ b/pkg/lint/rules/testdata/albatross/Chart.yaml @@ -1,3 +1,4 @@ +apiVersion: v1 name: albatross description: testing chart version: 199.44.12345-Alpha.1+cafe009 diff --git a/pkg/lint/rules/testdata/badchartfile/Chart.yaml b/pkg/lint/rules/testdata/badchartfile/Chart.yaml index dbb4a1501..c14ed7763 100644 --- a/pkg/lint/rules/testdata/badchartfile/Chart.yaml +++ b/pkg/lint/rules/testdata/badchartfile/Chart.yaml @@ -1,3 +1,3 @@ description: A Helm chart for Kubernetes -version: 0.0.0 +version: 0.0.0.0 home: "" diff --git a/pkg/lint/rules/testdata/badnamechart/Chart.yaml b/pkg/lint/rules/testdata/badnamechart/Chart.yaml new file mode 100644 index 000000000..6ac6cfa42 --- /dev/null +++ b/pkg/lint/rules/testdata/badnamechart/Chart.yaml @@ -0,0 +1,4 @@ +name: bad.chart.name +description: A Helm chart for Kubernetes +version: 0.1.0 +icon: http://riverrun.io diff --git a/pkg/lint/rules/testdata/badnamechart/values.yaml b/pkg/lint/rules/testdata/badnamechart/values.yaml new file mode 100644 index 000000000..54deecf74 --- /dev/null +++ b/pkg/lint/rules/testdata/badnamechart/values.yaml @@ -0,0 +1 @@ +# Default values for badchartname. diff --git a/pkg/lint/rules/testdata/badvaluesfile/Chart.yaml b/pkg/lint/rules/testdata/badvaluesfile/Chart.yaml index bed845249..632919d03 100644 --- a/pkg/lint/rules/testdata/badvaluesfile/Chart.yaml +++ b/pkg/lint/rules/testdata/badvaluesfile/Chart.yaml @@ -1,3 +1,4 @@ +apiVersion: v1 name: badvaluesfile description: A Helm chart for Kubernetes version: 0.0.1 diff --git a/pkg/lint/rules/testdata/goodone/Chart.yaml b/pkg/lint/rules/testdata/goodone/Chart.yaml index de05463ca..cb7a4bf20 100644 --- a/pkg/lint/rules/testdata/goodone/Chart.yaml +++ b/pkg/lint/rules/testdata/goodone/Chart.yaml @@ -1,3 +1,4 @@ +apiVersion: v1 name: goodone description: good testing chart version: 199.44.12345-Alpha.1+cafe009 diff --git a/pkg/plugin/installer/vcs_installer.go b/pkg/plugin/installer/vcs_installer.go index 4b502dae4..9ec61ccfe 100644 --- a/pkg/plugin/installer/vcs_installer.go +++ b/pkg/plugin/installer/vcs_installer.go @@ -136,7 +136,7 @@ func (i *VCSInstaller) solveVersion(repo vcs.Repo) (string, error) { sort.Sort(sort.Reverse(semver.Collection(semvers))) for _, v := range semvers { if constraint.Check(v) { - // If the constrint passes get the original reference + // If the constraint passes get the original reference ver := v.Original() debug("setting to %s", ver) return ver, nil diff --git a/pkg/proto/hapi/chart/chart.pb.go b/pkg/proto/hapi/chart/chart.pb.go index a884ed552..f54c717fb 100644 --- a/pkg/proto/hapi/chart/chart.pb.go +++ b/pkg/proto/hapi/chart/chart.pb.go @@ -1,29 +1,12 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: hapi/chart/chart.proto -/* -Package chart is a generated protocol buffer package. - -It is generated from these files: - hapi/chart/chart.proto - hapi/chart/config.proto - hapi/chart/metadata.proto - hapi/chart/template.proto - -It has these top-level messages: - Chart - Config - Value - Maintainer - Metadata - Template -*/ package chart import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import google_protobuf "github.com/golang/protobuf/ptypes/any" +import any "github.com/golang/protobuf/ptypes/any" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -40,22 +23,44 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // optionally parameterizable templates, and zero or more charts (dependencies). type Chart struct { // Contents of the Chartfile. - Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` // Templates for this chart. - Templates []*Template `protobuf:"bytes,2,rep,name=templates" json:"templates,omitempty"` + Templates []*Template `protobuf:"bytes,2,rep,name=templates,proto3" json:"templates,omitempty"` // Charts that this chart depends on. - Dependencies []*Chart `protobuf:"bytes,3,rep,name=dependencies" json:"dependencies,omitempty"` + Dependencies []*Chart `protobuf:"bytes,3,rep,name=dependencies,proto3" json:"dependencies,omitempty"` // Default config for this template. - Values *Config `protobuf:"bytes,4,opt,name=values" json:"values,omitempty"` + Values *Config `protobuf:"bytes,4,opt,name=values,proto3" json:"values,omitempty"` // Miscellaneous files in a chart archive, // e.g. README, LICENSE, etc. - Files []*google_protobuf.Any `protobuf:"bytes,5,rep,name=files" json:"files,omitempty"` + Files []*any.Any `protobuf:"bytes,5,rep,name=files,proto3" json:"files,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Chart) Reset() { *m = Chart{} } +func (m *Chart) String() string { return proto.CompactTextString(m) } +func (*Chart) ProtoMessage() {} +func (*Chart) Descriptor() ([]byte, []int) { + return fileDescriptor_chart_829b474cf208a7f0, []int{0} +} +func (m *Chart) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Chart.Unmarshal(m, b) +} +func (m *Chart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Chart.Marshal(b, m, deterministic) +} +func (dst *Chart) XXX_Merge(src proto.Message) { + xxx_messageInfo_Chart.Merge(dst, src) +} +func (m *Chart) XXX_Size() int { + return xxx_messageInfo_Chart.Size(m) +} +func (m *Chart) XXX_DiscardUnknown() { + xxx_messageInfo_Chart.DiscardUnknown(m) } -func (m *Chart) Reset() { *m = Chart{} } -func (m *Chart) String() string { return proto.CompactTextString(m) } -func (*Chart) ProtoMessage() {} -func (*Chart) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +var xxx_messageInfo_Chart proto.InternalMessageInfo func (m *Chart) GetMetadata() *Metadata { if m != nil { @@ -85,7 +90,7 @@ func (m *Chart) GetValues() *Config { return nil } -func (m *Chart) GetFiles() []*google_protobuf.Any { +func (m *Chart) GetFiles() []*any.Any { if m != nil { return m.Files } @@ -96,9 +101,9 @@ func init() { proto.RegisterType((*Chart)(nil), "hapi.chart.Chart") } -func init() { proto.RegisterFile("hapi/chart/chart.proto", fileDescriptor0) } +func init() { proto.RegisterFile("hapi/chart/chart.proto", fileDescriptor_chart_829b474cf208a7f0) } -var fileDescriptor0 = []byte{ +var fileDescriptor_chart_829b474cf208a7f0 = []byte{ // 242 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xb1, 0x4e, 0xc3, 0x30, 0x10, 0x86, 0x15, 0x4a, 0x0a, 0x1c, 0x2c, 0x58, 0x08, 0x4c, 0xa7, 0x8a, 0x09, 0x75, 0x70, 0x50, diff --git a/pkg/proto/hapi/chart/config.pb.go b/pkg/proto/hapi/chart/config.pb.go index 30c652700..fce589b14 100644 --- a/pkg/proto/hapi/chart/config.pb.go +++ b/pkg/proto/hapi/chart/config.pb.go @@ -12,16 +12,44 @@ var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + // Config supplies values to the parametrizable templates of a chart. type Config struct { - Raw string `protobuf:"bytes,1,opt,name=raw" json:"raw,omitempty"` - Values map[string]*Value `protobuf:"bytes,2,rep,name=values" json:"values,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Raw string `protobuf:"bytes,1,opt,name=raw,proto3" json:"raw,omitempty"` + Values map[string]*Value `protobuf:"bytes,2,rep,name=values,proto3" json:"values,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Config) Reset() { *m = Config{} } +func (m *Config) String() string { return proto.CompactTextString(m) } +func (*Config) ProtoMessage() {} +func (*Config) Descriptor() ([]byte, []int) { + return fileDescriptor_config_332ead17c4feed84, []int{0} +} +func (m *Config) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Config.Unmarshal(m, b) +} +func (m *Config) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Config.Marshal(b, m, deterministic) +} +func (dst *Config) XXX_Merge(src proto.Message) { + xxx_messageInfo_Config.Merge(dst, src) +} +func (m *Config) XXX_Size() int { + return xxx_messageInfo_Config.Size(m) +} +func (m *Config) XXX_DiscardUnknown() { + xxx_messageInfo_Config.DiscardUnknown(m) } -func (m *Config) Reset() { *m = Config{} } -func (m *Config) String() string { return proto.CompactTextString(m) } -func (*Config) ProtoMessage() {} -func (*Config) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } +var xxx_messageInfo_Config proto.InternalMessageInfo func (m *Config) GetRaw() string { if m != nil { @@ -39,13 +67,35 @@ func (m *Config) GetValues() map[string]*Value { // Value describes a configuration value as a string. type Value struct { - Value string `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"` + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Value) Reset() { *m = Value{} } +func (m *Value) String() string { return proto.CompactTextString(m) } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { + return fileDescriptor_config_332ead17c4feed84, []int{1} +} +func (m *Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Value.Unmarshal(m, b) +} +func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Value.Marshal(b, m, deterministic) +} +func (dst *Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Value.Merge(dst, src) +} +func (m *Value) XXX_Size() int { + return xxx_messageInfo_Value.Size(m) +} +func (m *Value) XXX_DiscardUnknown() { + xxx_messageInfo_Value.DiscardUnknown(m) } -func (m *Value) Reset() { *m = Value{} } -func (m *Value) String() string { return proto.CompactTextString(m) } -func (*Value) ProtoMessage() {} -func (*Value) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } +var xxx_messageInfo_Value proto.InternalMessageInfo func (m *Value) GetValue() string { if m != nil { @@ -56,12 +106,13 @@ func (m *Value) GetValue() string { func init() { proto.RegisterType((*Config)(nil), "hapi.chart.Config") + proto.RegisterMapType((map[string]*Value)(nil), "hapi.chart.Config.ValuesEntry") proto.RegisterType((*Value)(nil), "hapi.chart.Value") } -func init() { proto.RegisterFile("hapi/chart/config.proto", fileDescriptor1) } +func init() { proto.RegisterFile("hapi/chart/config.proto", fileDescriptor_config_332ead17c4feed84) } -var fileDescriptor1 = []byte{ +var fileDescriptor_config_332ead17c4feed84 = []byte{ // 182 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xcf, 0x48, 0x2c, 0xc8, 0xd4, 0x4f, 0xce, 0x48, 0x2c, 0x2a, 0xd1, 0x4f, 0xce, 0xcf, 0x4b, 0xcb, 0x4c, 0xd7, 0x2b, 0x28, diff --git a/pkg/proto/hapi/chart/metadata.pb.go b/pkg/proto/hapi/chart/metadata.pb.go index 9daeaa9e5..ebf59fd9f 100644 --- a/pkg/proto/hapi/chart/metadata.pb.go +++ b/pkg/proto/hapi/chart/metadata.pb.go @@ -12,6 +12,12 @@ var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + type Metadata_Engine int32 const ( @@ -31,22 +37,46 @@ var Metadata_Engine_value = map[string]int32{ func (x Metadata_Engine) String() string { return proto.EnumName(Metadata_Engine_name, int32(x)) } -func (Metadata_Engine) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{1, 0} } +func (Metadata_Engine) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_metadata_d6c714c73a051dcb, []int{1, 0} +} // Maintainer describes a Chart maintainer. type Maintainer struct { // Name is a user name or organization name - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Email is an optional email address to contact the named maintainer - Email string `protobuf:"bytes,2,opt,name=email" json:"email,omitempty"` + Email string `protobuf:"bytes,2,opt,name=email,proto3" json:"email,omitempty"` // Url is an optional URL to an address for the named maintainer - Url string `protobuf:"bytes,3,opt,name=url" json:"url,omitempty"` + Url string `protobuf:"bytes,3,opt,name=url,proto3" json:"url,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Maintainer) Reset() { *m = Maintainer{} } +func (m *Maintainer) String() string { return proto.CompactTextString(m) } +func (*Maintainer) ProtoMessage() {} +func (*Maintainer) Descriptor() ([]byte, []int) { + return fileDescriptor_metadata_d6c714c73a051dcb, []int{0} +} +func (m *Maintainer) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Maintainer.Unmarshal(m, b) +} +func (m *Maintainer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Maintainer.Marshal(b, m, deterministic) +} +func (dst *Maintainer) XXX_Merge(src proto.Message) { + xxx_messageInfo_Maintainer.Merge(dst, src) +} +func (m *Maintainer) XXX_Size() int { + return xxx_messageInfo_Maintainer.Size(m) +} +func (m *Maintainer) XXX_DiscardUnknown() { + xxx_messageInfo_Maintainer.DiscardUnknown(m) } -func (m *Maintainer) Reset() { *m = Maintainer{} } -func (m *Maintainer) String() string { return proto.CompactTextString(m) } -func (*Maintainer) ProtoMessage() {} -func (*Maintainer) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } +var xxx_messageInfo_Maintainer proto.InternalMessageInfo func (m *Maintainer) GetName() string { if m != nil { @@ -74,47 +104,69 @@ func (m *Maintainer) GetUrl() string { // Spec: https://k8s.io/helm/blob/master/docs/design/chart_format.md#the-chart-file type Metadata struct { // The name of the chart - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // The URL to a relevant project page, git repo, or contact person - Home string `protobuf:"bytes,2,opt,name=home" json:"home,omitempty"` + Home string `protobuf:"bytes,2,opt,name=home,proto3" json:"home,omitempty"` // Source is the URL to the source code of this chart - Sources []string `protobuf:"bytes,3,rep,name=sources" json:"sources,omitempty"` + Sources []string `protobuf:"bytes,3,rep,name=sources,proto3" json:"sources,omitempty"` // A SemVer 2 conformant version string of the chart - Version string `protobuf:"bytes,4,opt,name=version" json:"version,omitempty"` + Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` // A one-sentence description of the chart - Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` // A list of string keywords - Keywords []string `protobuf:"bytes,6,rep,name=keywords" json:"keywords,omitempty"` + Keywords []string `protobuf:"bytes,6,rep,name=keywords,proto3" json:"keywords,omitempty"` // A list of name and URL/email address combinations for the maintainer(s) - Maintainers []*Maintainer `protobuf:"bytes,7,rep,name=maintainers" json:"maintainers,omitempty"` + Maintainers []*Maintainer `protobuf:"bytes,7,rep,name=maintainers,proto3" json:"maintainers,omitempty"` // The name of the template engine to use. Defaults to 'gotpl'. - Engine string `protobuf:"bytes,8,opt,name=engine" json:"engine,omitempty"` + Engine string `protobuf:"bytes,8,opt,name=engine,proto3" json:"engine,omitempty"` // The URL to an icon file. - Icon string `protobuf:"bytes,9,opt,name=icon" json:"icon,omitempty"` + Icon string `protobuf:"bytes,9,opt,name=icon,proto3" json:"icon,omitempty"` // The API Version of this chart. - ApiVersion string `protobuf:"bytes,10,opt,name=apiVersion" json:"apiVersion,omitempty"` + ApiVersion string `protobuf:"bytes,10,opt,name=apiVersion,proto3" json:"apiVersion,omitempty"` // The condition to check to enable chart - Condition string `protobuf:"bytes,11,opt,name=condition" json:"condition,omitempty"` + Condition string `protobuf:"bytes,11,opt,name=condition,proto3" json:"condition,omitempty"` // The tags to check to enable chart - Tags string `protobuf:"bytes,12,opt,name=tags" json:"tags,omitempty"` + Tags string `protobuf:"bytes,12,opt,name=tags,proto3" json:"tags,omitempty"` // The version of the application enclosed inside of this chart. - AppVersion string `protobuf:"bytes,13,opt,name=appVersion" json:"appVersion,omitempty"` + AppVersion string `protobuf:"bytes,13,opt,name=appVersion,proto3" json:"appVersion,omitempty"` // Whether or not this chart is deprecated - Deprecated bool `protobuf:"varint,14,opt,name=deprecated" json:"deprecated,omitempty"` + Deprecated bool `protobuf:"varint,14,opt,name=deprecated,proto3" json:"deprecated,omitempty"` // TillerVersion is a SemVer constraints on what version of Tiller is required. // See SemVer ranges here: https://github.com/Masterminds/semver#basic-comparisons - TillerVersion string `protobuf:"bytes,15,opt,name=tillerVersion" json:"tillerVersion,omitempty"` + TillerVersion string `protobuf:"bytes,15,opt,name=tillerVersion,proto3" json:"tillerVersion,omitempty"` // Annotations are additional mappings uninterpreted by Tiller, // made available for inspection by other applications. - Annotations map[string]string `protobuf:"bytes,16,rep,name=annotations" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Annotations map[string]string `protobuf:"bytes,16,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // KubeVersion is a SemVer constraint specifying the version of Kubernetes required. - KubeVersion string `protobuf:"bytes,17,opt,name=kubeVersion" json:"kubeVersion,omitempty"` + KubeVersion string `protobuf:"bytes,17,opt,name=kubeVersion,proto3" json:"kubeVersion,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metadata) Reset() { *m = Metadata{} } +func (m *Metadata) String() string { return proto.CompactTextString(m) } +func (*Metadata) ProtoMessage() {} +func (*Metadata) Descriptor() ([]byte, []int) { + return fileDescriptor_metadata_d6c714c73a051dcb, []int{1} +} +func (m *Metadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Metadata.Unmarshal(m, b) +} +func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) +} +func (dst *Metadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metadata.Merge(dst, src) +} +func (m *Metadata) XXX_Size() int { + return xxx_messageInfo_Metadata.Size(m) +} +func (m *Metadata) XXX_DiscardUnknown() { + xxx_messageInfo_Metadata.DiscardUnknown(m) } -func (m *Metadata) Reset() { *m = Metadata{} } -func (m *Metadata) String() string { return proto.CompactTextString(m) } -func (*Metadata) ProtoMessage() {} -func (*Metadata) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } +var xxx_messageInfo_Metadata proto.InternalMessageInfo func (m *Metadata) GetName() string { if m != nil { @@ -238,12 +290,13 @@ func (m *Metadata) GetKubeVersion() string { func init() { proto.RegisterType((*Maintainer)(nil), "hapi.chart.Maintainer") proto.RegisterType((*Metadata)(nil), "hapi.chart.Metadata") + proto.RegisterMapType((map[string]string)(nil), "hapi.chart.Metadata.AnnotationsEntry") proto.RegisterEnum("hapi.chart.Metadata_Engine", Metadata_Engine_name, Metadata_Engine_value) } -func init() { proto.RegisterFile("hapi/chart/metadata.proto", fileDescriptor2) } +func init() { proto.RegisterFile("hapi/chart/metadata.proto", fileDescriptor_metadata_d6c714c73a051dcb) } -var fileDescriptor2 = []byte{ +var fileDescriptor_metadata_d6c714c73a051dcb = []byte{ // 435 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x52, 0x5d, 0x6b, 0xd4, 0x40, 0x14, 0x35, 0xcd, 0x66, 0x77, 0x73, 0x63, 0x35, 0x0e, 0x52, 0xc6, 0x22, 0x12, 0x16, 0x85, 0x7d, diff --git a/pkg/proto/hapi/chart/template.pb.go b/pkg/proto/hapi/chart/template.pb.go index 439aec5a8..4b77dddd0 100644 --- a/pkg/proto/hapi/chart/template.pb.go +++ b/pkg/proto/hapi/chart/template.pb.go @@ -12,21 +12,49 @@ var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + // Template represents a template as a name/value pair. // // By convention, name is a relative path within the scope of the chart's // base directory. type Template struct { // Name is the path-like name of the template. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Data is the template as byte data. - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Template) Reset() { *m = Template{} } +func (m *Template) String() string { return proto.CompactTextString(m) } +func (*Template) ProtoMessage() {} +func (*Template) Descriptor() ([]byte, []int) { + return fileDescriptor_template_051845a7e9227d35, []int{0} +} +func (m *Template) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Template.Unmarshal(m, b) +} +func (m *Template) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Template.Marshal(b, m, deterministic) +} +func (dst *Template) XXX_Merge(src proto.Message) { + xxx_messageInfo_Template.Merge(dst, src) +} +func (m *Template) XXX_Size() int { + return xxx_messageInfo_Template.Size(m) +} +func (m *Template) XXX_DiscardUnknown() { + xxx_messageInfo_Template.DiscardUnknown(m) } -func (m *Template) Reset() { *m = Template{} } -func (m *Template) String() string { return proto.CompactTextString(m) } -func (*Template) ProtoMessage() {} -func (*Template) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} } +var xxx_messageInfo_Template proto.InternalMessageInfo func (m *Template) GetName() string { if m != nil { @@ -46,9 +74,9 @@ func init() { proto.RegisterType((*Template)(nil), "hapi.chart.Template") } -func init() { proto.RegisterFile("hapi/chart/template.proto", fileDescriptor3) } +func init() { proto.RegisterFile("hapi/chart/template.proto", fileDescriptor_template_051845a7e9227d35) } -var fileDescriptor3 = []byte{ +var fileDescriptor_template_051845a7e9227d35 = []byte{ // 107 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcc, 0x48, 0x2c, 0xc8, 0xd4, 0x4f, 0xce, 0x48, 0x2c, 0x2a, 0xd1, 0x2f, 0x49, 0xcd, 0x2d, 0xc8, 0x49, 0x2c, 0x49, 0xd5, diff --git a/pkg/proto/hapi/release/hook.pb.go b/pkg/proto/hapi/release/hook.pb.go index 0a44165c8..2faf756d7 100644 --- a/pkg/proto/hapi/release/hook.pb.go +++ b/pkg/proto/hapi/release/hook.pb.go @@ -1,31 +1,12 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: hapi/release/hook.proto -/* -Package release is a generated protocol buffer package. - -It is generated from these files: - hapi/release/hook.proto - hapi/release/info.proto - hapi/release/release.proto - hapi/release/status.proto - hapi/release/test_run.proto - hapi/release/test_suite.proto - -It has these top-level messages: - Hook - Info - Release - Status - TestRun - TestSuite -*/ package release import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import google_protobuf "github.com/golang/protobuf/ptypes/timestamp" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -87,7 +68,9 @@ var Hook_Event_value = map[string]int32{ func (x Hook_Event) String() string { return proto.EnumName(Hook_Event_name, int32(x)) } -func (Hook_Event) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } +func (Hook_Event) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_hook_e64400ca8195038e, []int{0, 0} +} type Hook_DeletePolicy int32 @@ -111,31 +94,57 @@ var Hook_DeletePolicy_value = map[string]int32{ func (x Hook_DeletePolicy) String() string { return proto.EnumName(Hook_DeletePolicy_name, int32(x)) } -func (Hook_DeletePolicy) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 1} } +func (Hook_DeletePolicy) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_hook_e64400ca8195038e, []int{0, 1} +} // Hook defines a hook object. type Hook struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Kind is the Kubernetes kind. - Kind string `protobuf:"bytes,2,opt,name=kind" json:"kind,omitempty"` + Kind string `protobuf:"bytes,2,opt,name=kind,proto3" json:"kind,omitempty"` // Path is the chart-relative path to the template. - Path string `protobuf:"bytes,3,opt,name=path" json:"path,omitempty"` + Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"` // Manifest is the manifest contents. - Manifest string `protobuf:"bytes,4,opt,name=manifest" json:"manifest,omitempty"` + Manifest string `protobuf:"bytes,4,opt,name=manifest,proto3" json:"manifest,omitempty"` // Events are the events that this hook fires on. - Events []Hook_Event `protobuf:"varint,5,rep,packed,name=events,enum=hapi.release.Hook_Event" json:"events,omitempty"` + Events []Hook_Event `protobuf:"varint,5,rep,packed,name=events,proto3,enum=hapi.release.Hook_Event" json:"events,omitempty"` // LastRun indicates the date/time this was last run. - LastRun *google_protobuf.Timestamp `protobuf:"bytes,6,opt,name=last_run,json=lastRun" json:"last_run,omitempty"` + LastRun *timestamp.Timestamp `protobuf:"bytes,6,opt,name=last_run,json=lastRun,proto3" json:"last_run,omitempty"` // Weight indicates the sort order for execution among similar Hook type - Weight int32 `protobuf:"varint,7,opt,name=weight" json:"weight,omitempty"` + Weight int32 `protobuf:"varint,7,opt,name=weight,proto3" json:"weight,omitempty"` // DeletePolicies are the policies that indicate when to delete the hook - DeletePolicies []Hook_DeletePolicy `protobuf:"varint,8,rep,packed,name=delete_policies,json=deletePolicies,enum=hapi.release.Hook_DeletePolicy" json:"delete_policies,omitempty"` + DeletePolicies []Hook_DeletePolicy `protobuf:"varint,8,rep,packed,name=delete_policies,json=deletePolicies,proto3,enum=hapi.release.Hook_DeletePolicy" json:"delete_policies,omitempty"` + // DeleteTimeout indicates how long to wait for a resource to be deleted before timing out + DeleteTimeout int64 `protobuf:"varint,9,opt,name=delete_timeout,json=deleteTimeout,proto3" json:"delete_timeout,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Hook) Reset() { *m = Hook{} } +func (m *Hook) String() string { return proto.CompactTextString(m) } +func (*Hook) ProtoMessage() {} +func (*Hook) Descriptor() ([]byte, []int) { + return fileDescriptor_hook_e64400ca8195038e, []int{0} +} +func (m *Hook) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Hook.Unmarshal(m, b) +} +func (m *Hook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Hook.Marshal(b, m, deterministic) +} +func (dst *Hook) XXX_Merge(src proto.Message) { + xxx_messageInfo_Hook.Merge(dst, src) +} +func (m *Hook) XXX_Size() int { + return xxx_messageInfo_Hook.Size(m) +} +func (m *Hook) XXX_DiscardUnknown() { + xxx_messageInfo_Hook.DiscardUnknown(m) } -func (m *Hook) Reset() { *m = Hook{} } -func (m *Hook) String() string { return proto.CompactTextString(m) } -func (*Hook) ProtoMessage() {} -func (*Hook) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +var xxx_messageInfo_Hook proto.InternalMessageInfo func (m *Hook) GetName() string { if m != nil { @@ -172,7 +181,7 @@ func (m *Hook) GetEvents() []Hook_Event { return nil } -func (m *Hook) GetLastRun() *google_protobuf.Timestamp { +func (m *Hook) GetLastRun() *timestamp.Timestamp { if m != nil { return m.LastRun } @@ -193,43 +202,51 @@ func (m *Hook) GetDeletePolicies() []Hook_DeletePolicy { return nil } +func (m *Hook) GetDeleteTimeout() int64 { + if m != nil { + return m.DeleteTimeout + } + return 0 +} + func init() { proto.RegisterType((*Hook)(nil), "hapi.release.Hook") proto.RegisterEnum("hapi.release.Hook_Event", Hook_Event_name, Hook_Event_value) proto.RegisterEnum("hapi.release.Hook_DeletePolicy", Hook_DeletePolicy_name, Hook_DeletePolicy_value) } -func init() { proto.RegisterFile("hapi/release/hook.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 453 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0x51, 0x8f, 0x9a, 0x40, - 0x10, 0x80, 0x8f, 0x53, 0x41, 0x47, 0xcf, 0xdb, 0x6e, 0x9a, 0x76, 0xe3, 0xcb, 0x19, 0x9f, 0x7c, - 0xc2, 0xe6, 0x9a, 0xfe, 0x00, 0x84, 0xb9, 0x6a, 0x24, 0x60, 0x16, 0x4c, 0x93, 0xbe, 0x10, 0xae, - 0xee, 0x29, 0x11, 0x81, 0x08, 0xb6, 0xe9, 0x1f, 0xed, 0x3f, 0xe8, 0xff, 0x68, 0x76, 0x45, 0x7a, - 0x49, 0xfb, 0x36, 0xf3, 0xcd, 0xb7, 0xb3, 0x33, 0xbb, 0xf0, 0x7e, 0x1f, 0x17, 0xc9, 0xec, 0x24, - 0x52, 0x11, 0x97, 0x62, 0xb6, 0xcf, 0xf3, 0x83, 0x59, 0x9c, 0xf2, 0x2a, 0xa7, 0x03, 0x59, 0x30, - 0xeb, 0xc2, 0xe8, 0x61, 0x97, 0xe7, 0xbb, 0x54, 0xcc, 0x54, 0xed, 0xf9, 0xfc, 0x32, 0xab, 0x92, - 0xa3, 0x28, 0xab, 0xf8, 0x58, 0x5c, 0xf4, 0xc9, 0xaf, 0x36, 0xb4, 0x17, 0x79, 0x7e, 0xa0, 0x14, - 0xda, 0x59, 0x7c, 0x14, 0x4c, 0x1b, 0x6b, 0xd3, 0x1e, 0x57, 0xb1, 0x64, 0x87, 0x24, 0xdb, 0xb2, - 0xdb, 0x0b, 0x93, 0xb1, 0x64, 0x45, 0x5c, 0xed, 0x59, 0xeb, 0xc2, 0x64, 0x4c, 0x47, 0xd0, 0x3d, - 0xc6, 0x59, 0xf2, 0x22, 0xca, 0x8a, 0xb5, 0x15, 0x6f, 0x72, 0xfa, 0x01, 0x74, 0xf1, 0x5d, 0x64, - 0x55, 0xc9, 0x3a, 0xe3, 0xd6, 0x74, 0xf8, 0xc8, 0xcc, 0xd7, 0x03, 0x9a, 0xf2, 0x6e, 0x13, 0xa5, - 0xc0, 0x6b, 0x8f, 0x7e, 0x82, 0x6e, 0x1a, 0x97, 0x55, 0x74, 0x3a, 0x67, 0x4c, 0x1f, 0x6b, 0xd3, - 0xfe, 0xe3, 0xc8, 0xbc, 0xac, 0x61, 0x5e, 0xd7, 0x30, 0xc3, 0xeb, 0x1a, 0xdc, 0x90, 0x2e, 0x3f, - 0x67, 0xf4, 0x1d, 0xe8, 0x3f, 0x44, 0xb2, 0xdb, 0x57, 0xcc, 0x18, 0x6b, 0xd3, 0x0e, 0xaf, 0x33, - 0xba, 0x80, 0xfb, 0xad, 0x48, 0x45, 0x25, 0xa2, 0x22, 0x4f, 0x93, 0x6f, 0x89, 0x28, 0x59, 0x57, - 0x4d, 0xf2, 0xf0, 0x9f, 0x49, 0x1c, 0x65, 0xae, 0xa5, 0xf8, 0x93, 0x0f, 0xb7, 0x7f, 0xb3, 0x44, - 0x94, 0x93, 0xdf, 0x1a, 0x74, 0xd4, 0xa8, 0xb4, 0x0f, 0xc6, 0xc6, 0x5b, 0x79, 0xfe, 0x17, 0x8f, - 0xdc, 0xd0, 0x7b, 0xe8, 0xaf, 0x39, 0x46, 0x4b, 0x2f, 0x08, 0x2d, 0xd7, 0x25, 0x1a, 0x25, 0x30, - 0x58, 0xfb, 0x41, 0xd8, 0x90, 0x5b, 0x3a, 0x04, 0x90, 0x8a, 0x83, 0x2e, 0x86, 0x48, 0x5a, 0xea, - 0x88, 0x34, 0x6a, 0xd0, 0xbe, 0xf6, 0xd8, 0xac, 0x3f, 0x73, 0xcb, 0x41, 0xd2, 0x69, 0x7a, 0x5c, - 0x89, 0xae, 0x08, 0xc7, 0x88, 0xfb, 0xae, 0x3b, 0xb7, 0xec, 0x15, 0x31, 0xe8, 0x1b, 0xb8, 0x53, - 0x4e, 0x83, 0xba, 0x94, 0xc1, 0x5b, 0x8e, 0x2e, 0x5a, 0x01, 0x46, 0x21, 0x06, 0x61, 0x14, 0x6c, - 0x6c, 0x1b, 0x83, 0x80, 0xf4, 0xfe, 0xa9, 0x3c, 0x59, 0x4b, 0x77, 0xc3, 0x91, 0x80, 0xbc, 0xdb, - 0xe6, 0x4e, 0x33, 0x6d, 0x7f, 0x62, 0xc3, 0xe0, 0xf5, 0x3b, 0xd0, 0x3b, 0xe8, 0xa9, 0x3e, 0xe8, - 0xa0, 0x43, 0x6e, 0x28, 0x80, 0x2e, 0x0f, 0xa3, 0x43, 0x34, 0xd9, 0x75, 0x8e, 0x4f, 0x3e, 0xc7, - 0x68, 0xe1, 0xfb, 0xab, 0xc8, 0xe6, 0x68, 0x85, 0x4b, 0xdf, 0x23, 0xb7, 0xf3, 0xde, 0x57, 0xa3, - 0x7e, 0xd9, 0x67, 0x5d, 0x7d, 0xdb, 0xc7, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x3b, 0xcf, 0xed, - 0xd9, 0xb4, 0x02, 0x00, 0x00, +func init() { proto.RegisterFile("hapi/release/hook.proto", fileDescriptor_hook_e64400ca8195038e) } + +var fileDescriptor_hook_e64400ca8195038e = []byte{ + // 473 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0xdb, 0x8e, 0xda, 0x3c, + 0x10, 0x80, 0x37, 0x1c, 0x02, 0x0c, 0x87, 0xf5, 0x6f, 0xfd, 0x6a, 0x2d, 0x6e, 0x16, 0x21, 0x55, + 0xe2, 0x2a, 0x54, 0x5b, 0xf5, 0x01, 0x42, 0xe2, 0x2d, 0x88, 0x88, 0x20, 0x27, 0xa8, 0x52, 0x6f, + 0xa2, 0x6c, 0xf1, 0x42, 0x44, 0x88, 0x23, 0x62, 0x5a, 0xf5, 0x81, 0xfb, 0x18, 0x95, 0x2a, 0x3b, + 0x21, 0x5d, 0xa9, 0xbd, 0x9b, 0xf9, 0xe6, 0xf3, 0x78, 0xc6, 0x86, 0xb7, 0xc7, 0x38, 0x4f, 0xe6, + 0x17, 0x9e, 0xf2, 0xb8, 0xe0, 0xf3, 0xa3, 0x10, 0x27, 0x2b, 0xbf, 0x08, 0x29, 0xf0, 0x40, 0x15, + 0xac, 0xaa, 0x30, 0x7e, 0x38, 0x08, 0x71, 0x48, 0xf9, 0x5c, 0xd7, 0x9e, 0xaf, 0x2f, 0x73, 0x99, + 0x9c, 0x79, 0x21, 0xe3, 0x73, 0x5e, 0xea, 0xd3, 0x5f, 0x2d, 0x68, 0x2d, 0x85, 0x38, 0x61, 0x0c, + 0xad, 0x2c, 0x3e, 0x73, 0x62, 0x4c, 0x8c, 0x59, 0x8f, 0xe9, 0x58, 0xb1, 0x53, 0x92, 0xed, 0x49, + 0xa3, 0x64, 0x2a, 0x56, 0x2c, 0x8f, 0xe5, 0x91, 0x34, 0x4b, 0xa6, 0x62, 0x3c, 0x86, 0xee, 0x39, + 0xce, 0x92, 0x17, 0x5e, 0x48, 0xd2, 0xd2, 0xbc, 0xce, 0xf1, 0x7b, 0x30, 0xf9, 0x37, 0x9e, 0xc9, + 0x82, 0xb4, 0x27, 0xcd, 0xd9, 0xe8, 0x91, 0x58, 0xaf, 0x07, 0xb4, 0xd4, 0xdd, 0x16, 0x55, 0x02, + 0xab, 0x3c, 0xfc, 0x11, 0xba, 0x69, 0x5c, 0xc8, 0xe8, 0x72, 0xcd, 0x88, 0x39, 0x31, 0x66, 0xfd, + 0xc7, 0xb1, 0x55, 0xae, 0x61, 0xdd, 0xd6, 0xb0, 0xc2, 0xdb, 0x1a, 0xac, 0xa3, 0x5c, 0x76, 0xcd, + 0xf0, 0x1b, 0x30, 0xbf, 0xf3, 0xe4, 0x70, 0x94, 0xa4, 0x33, 0x31, 0x66, 0x6d, 0x56, 0x65, 0x78, + 0x09, 0xf7, 0x7b, 0x9e, 0x72, 0xc9, 0xa3, 0x5c, 0xa4, 0xc9, 0xd7, 0x84, 0x17, 0xa4, 0xab, 0x27, + 0x79, 0xf8, 0xc7, 0x24, 0xae, 0x36, 0xb7, 0x4a, 0xfc, 0xc1, 0x46, 0xfb, 0x3f, 0x59, 0xc2, 0x0b, + 0xfc, 0x0e, 0x2a, 0x12, 0xa9, 0x57, 0x14, 0x57, 0x49, 0x7a, 0x13, 0x63, 0xd6, 0x64, 0xc3, 0x92, + 0x86, 0x25, 0x9c, 0xfe, 0x34, 0xa0, 0xad, 0x37, 0xc2, 0x7d, 0xe8, 0xec, 0x36, 0xeb, 0x8d, 0xff, + 0x79, 0x83, 0xee, 0xf0, 0x3d, 0xf4, 0xb7, 0x8c, 0x46, 0xab, 0x4d, 0x10, 0xda, 0x9e, 0x87, 0x0c, + 0x8c, 0x60, 0xb0, 0xf5, 0x83, 0xb0, 0x26, 0x0d, 0x3c, 0x02, 0x50, 0x8a, 0x4b, 0x3d, 0x1a, 0x52, + 0xd4, 0xd4, 0x47, 0x94, 0x51, 0x81, 0xd6, 0xad, 0xc7, 0x6e, 0xfb, 0x89, 0xd9, 0x2e, 0x45, 0xed, + 0xba, 0xc7, 0x8d, 0x98, 0x9a, 0x30, 0x1a, 0x31, 0xdf, 0xf3, 0x16, 0xb6, 0xb3, 0x46, 0x1d, 0xfc, + 0x1f, 0x0c, 0xb5, 0x53, 0xa3, 0x2e, 0x26, 0xf0, 0x3f, 0xa3, 0x1e, 0xb5, 0x03, 0x1a, 0x85, 0x34, + 0x08, 0xa3, 0x60, 0xe7, 0x38, 0x34, 0x08, 0x50, 0xef, 0xaf, 0xca, 0x93, 0xbd, 0xf2, 0x76, 0x8c, + 0x22, 0x50, 0x77, 0x3b, 0xcc, 0xad, 0xa7, 0xed, 0x4f, 0x1d, 0x18, 0xbc, 0x7e, 0x2e, 0x3c, 0x84, + 0x9e, 0xee, 0x43, 0x5d, 0xea, 0xa2, 0x3b, 0x0c, 0x60, 0xaa, 0xc3, 0xd4, 0x45, 0x86, 0xea, 0xba, + 0xa0, 0x4f, 0x3e, 0xa3, 0xd1, 0xd2, 0xf7, 0xd7, 0x91, 0xc3, 0xa8, 0x1d, 0xae, 0xfc, 0x0d, 0x6a, + 0x2c, 0x7a, 0x5f, 0x3a, 0xd5, 0x07, 0x3c, 0x9b, 0xfa, 0x77, 0x3f, 0xfc, 0x0e, 0x00, 0x00, 0xff, + 0xff, 0xce, 0x84, 0xd9, 0x98, 0xdb, 0x02, 0x00, 0x00, } diff --git a/pkg/proto/hapi/release/info.pb.go b/pkg/proto/hapi/release/info.pb.go index 7a7ccdd74..5ce2845a6 100644 --- a/pkg/proto/hapi/release/info.pb.go +++ b/pkg/proto/hapi/release/info.pb.go @@ -6,28 +6,56 @@ package release import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import google_protobuf "github.com/golang/protobuf/ptypes/timestamp" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + // Info describes release information. type Info struct { - Status *Status `protobuf:"bytes,1,opt,name=status" json:"status,omitempty"` - FirstDeployed *google_protobuf.Timestamp `protobuf:"bytes,2,opt,name=first_deployed,json=firstDeployed" json:"first_deployed,omitempty"` - LastDeployed *google_protobuf.Timestamp `protobuf:"bytes,3,opt,name=last_deployed,json=lastDeployed" json:"last_deployed,omitempty"` + Status *Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + FirstDeployed *timestamp.Timestamp `protobuf:"bytes,2,opt,name=first_deployed,json=firstDeployed,proto3" json:"first_deployed,omitempty"` + LastDeployed *timestamp.Timestamp `protobuf:"bytes,3,opt,name=last_deployed,json=lastDeployed,proto3" json:"last_deployed,omitempty"` // Deleted tracks when this object was deleted. - Deleted *google_protobuf.Timestamp `protobuf:"bytes,4,opt,name=deleted" json:"deleted,omitempty"` + Deleted *timestamp.Timestamp `protobuf:"bytes,4,opt,name=deleted,proto3" json:"deleted,omitempty"` // Description is human-friendly "log entry" about this release. - Description string `protobuf:"bytes,5,opt,name=Description" json:"Description,omitempty"` + Description string `protobuf:"bytes,5,opt,name=Description,proto3" json:"Description,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Info) Reset() { *m = Info{} } +func (m *Info) String() string { return proto.CompactTextString(m) } +func (*Info) ProtoMessage() {} +func (*Info) Descriptor() ([]byte, []int) { + return fileDescriptor_info_1c62b71ed76c67c1, []int{0} +} +func (m *Info) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Info.Unmarshal(m, b) +} +func (m *Info) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Info.Marshal(b, m, deterministic) +} +func (dst *Info) XXX_Merge(src proto.Message) { + xxx_messageInfo_Info.Merge(dst, src) +} +func (m *Info) XXX_Size() int { + return xxx_messageInfo_Info.Size(m) +} +func (m *Info) XXX_DiscardUnknown() { + xxx_messageInfo_Info.DiscardUnknown(m) } -func (m *Info) Reset() { *m = Info{} } -func (m *Info) String() string { return proto.CompactTextString(m) } -func (*Info) ProtoMessage() {} -func (*Info) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } +var xxx_messageInfo_Info proto.InternalMessageInfo func (m *Info) GetStatus() *Status { if m != nil { @@ -36,21 +64,21 @@ func (m *Info) GetStatus() *Status { return nil } -func (m *Info) GetFirstDeployed() *google_protobuf.Timestamp { +func (m *Info) GetFirstDeployed() *timestamp.Timestamp { if m != nil { return m.FirstDeployed } return nil } -func (m *Info) GetLastDeployed() *google_protobuf.Timestamp { +func (m *Info) GetLastDeployed() *timestamp.Timestamp { if m != nil { return m.LastDeployed } return nil } -func (m *Info) GetDeleted() *google_protobuf.Timestamp { +func (m *Info) GetDeleted() *timestamp.Timestamp { if m != nil { return m.Deleted } @@ -68,9 +96,9 @@ func init() { proto.RegisterType((*Info)(nil), "hapi.release.Info") } -func init() { proto.RegisterFile("hapi/release/info.proto", fileDescriptor1) } +func init() { proto.RegisterFile("hapi/release/info.proto", fileDescriptor_info_1c62b71ed76c67c1) } -var fileDescriptor1 = []byte{ +var fileDescriptor_info_1c62b71ed76c67c1 = []byte{ // 235 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x8f, 0x31, 0x4f, 0xc3, 0x30, 0x10, 0x85, 0x95, 0x52, 0x5a, 0xd5, 0x6d, 0x19, 0x2c, 0x24, 0x42, 0x16, 0x22, 0xa6, 0x0e, 0xc8, diff --git a/pkg/proto/hapi/release/release.pb.go b/pkg/proto/hapi/release/release.pb.go index 511b543d7..e9578f00a 100644 --- a/pkg/proto/hapi/release/release.pb.go +++ b/pkg/proto/hapi/release/release.pb.go @@ -6,40 +6,67 @@ package release import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import hapi_chart "k8s.io/helm/pkg/proto/hapi/chart" -import hapi_chart3 "k8s.io/helm/pkg/proto/hapi/chart" +import chart "k8s.io/helm/pkg/proto/hapi/chart" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + // Release describes a deployment of a chart, together with the chart // and the variables used to deploy that chart. type Release struct { // Name is the name of the release - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Info provides information about a release - Info *Info `protobuf:"bytes,2,opt,name=info" json:"info,omitempty"` + Info *Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` // Chart is the chart that was released. - Chart *hapi_chart3.Chart `protobuf:"bytes,3,opt,name=chart" json:"chart,omitempty"` + Chart *chart.Chart `protobuf:"bytes,3,opt,name=chart,proto3" json:"chart,omitempty"` // Config is the set of extra Values added to the chart. // These values override the default values inside of the chart. - Config *hapi_chart.Config `protobuf:"bytes,4,opt,name=config" json:"config,omitempty"` + Config *chart.Config `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` // Manifest is the string representation of the rendered template. - Manifest string `protobuf:"bytes,5,opt,name=manifest" json:"manifest,omitempty"` + Manifest string `protobuf:"bytes,5,opt,name=manifest,proto3" json:"manifest,omitempty"` // Hooks are all of the hooks declared for this release. - Hooks []*Hook `protobuf:"bytes,6,rep,name=hooks" json:"hooks,omitempty"` + Hooks []*Hook `protobuf:"bytes,6,rep,name=hooks,proto3" json:"hooks,omitempty"` // Version is an int32 which represents the version of the release. - Version int32 `protobuf:"varint,7,opt,name=version" json:"version,omitempty"` + Version int32 `protobuf:"varint,7,opt,name=version,proto3" json:"version,omitempty"` // Namespace is the kubernetes namespace of the release. - Namespace string `protobuf:"bytes,8,opt,name=namespace" json:"namespace,omitempty"` + Namespace string `protobuf:"bytes,8,opt,name=namespace,proto3" json:"namespace,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Release) Reset() { *m = Release{} } +func (m *Release) String() string { return proto.CompactTextString(m) } +func (*Release) ProtoMessage() {} +func (*Release) Descriptor() ([]byte, []int) { + return fileDescriptor_release_4bea5d16ba219619, []int{0} +} +func (m *Release) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Release.Unmarshal(m, b) +} +func (m *Release) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Release.Marshal(b, m, deterministic) +} +func (dst *Release) XXX_Merge(src proto.Message) { + xxx_messageInfo_Release.Merge(dst, src) +} +func (m *Release) XXX_Size() int { + return xxx_messageInfo_Release.Size(m) +} +func (m *Release) XXX_DiscardUnknown() { + xxx_messageInfo_Release.DiscardUnknown(m) } -func (m *Release) Reset() { *m = Release{} } -func (m *Release) String() string { return proto.CompactTextString(m) } -func (*Release) ProtoMessage() {} -func (*Release) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } +var xxx_messageInfo_Release proto.InternalMessageInfo func (m *Release) GetName() string { if m != nil { @@ -55,14 +82,14 @@ func (m *Release) GetInfo() *Info { return nil } -func (m *Release) GetChart() *hapi_chart3.Chart { +func (m *Release) GetChart() *chart.Chart { if m != nil { return m.Chart } return nil } -func (m *Release) GetConfig() *hapi_chart.Config { +func (m *Release) GetConfig() *chart.Config { if m != nil { return m.Config } @@ -101,9 +128,9 @@ func init() { proto.RegisterType((*Release)(nil), "hapi.release.Release") } -func init() { proto.RegisterFile("hapi/release/release.proto", fileDescriptor2) } +func init() { proto.RegisterFile("hapi/release/release.proto", fileDescriptor_release_4bea5d16ba219619) } -var fileDescriptor2 = []byte{ +var fileDescriptor_release_4bea5d16ba219619 = []byte{ // 256 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x90, 0xbf, 0x4e, 0xc3, 0x40, 0x0c, 0xc6, 0x95, 0x36, 0x7f, 0x1a, 0xc3, 0x82, 0x07, 0xb0, 0x22, 0x86, 0x88, 0x01, 0x22, 0x86, diff --git a/pkg/proto/hapi/release/status.pb.go b/pkg/proto/hapi/release/status.pb.go index 284892642..99bcbc585 100644 --- a/pkg/proto/hapi/release/status.pb.go +++ b/pkg/proto/hapi/release/status.pb.go @@ -13,6 +13,12 @@ var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + type Status_Code int32 const ( @@ -20,7 +26,7 @@ const ( Status_UNKNOWN Status_Code = 0 // Status_DEPLOYED indicates that the release has been pushed to Kubernetes. Status_DEPLOYED Status_Code = 1 - // Status_DELETED indicates that a release has been deleted from Kubermetes. + // Status_DELETED indicates that a release has been deleted from Kubernetes. Status_DELETED Status_Code = 2 // Status_SUPERSEDED indicates that this release object is outdated and a newer one exists. Status_SUPERSEDED Status_Code = 3 @@ -32,7 +38,7 @@ const ( Status_PENDING_INSTALL Status_Code = 6 // Status_PENDING_UPGRADE indicates that an upgrade operation is underway. Status_PENDING_UPGRADE Status_Code = 7 - // Status_PENDING_ROLLBACK indicates that an rollback operation is underway. + // Status_PENDING_ROLLBACK indicates that a rollback operation is underway. Status_PENDING_ROLLBACK Status_Code = 8 ) @@ -62,23 +68,47 @@ var Status_Code_value = map[string]int32{ func (x Status_Code) String() string { return proto.EnumName(Status_Code_name, int32(x)) } -func (Status_Code) EnumDescriptor() ([]byte, []int) { return fileDescriptor3, []int{0, 0} } +func (Status_Code) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_status_933517e5a50981ed, []int{0, 0} +} // Status defines the status of a release. type Status struct { - Code Status_Code `protobuf:"varint,1,opt,name=code,enum=hapi.release.Status_Code" json:"code,omitempty"` + Code Status_Code `protobuf:"varint,1,opt,name=code,proto3,enum=hapi.release.Status_Code" json:"code,omitempty"` // Cluster resources as kubectl would print them. - Resources string `protobuf:"bytes,3,opt,name=resources" json:"resources,omitempty"` + Resources string `protobuf:"bytes,3,opt,name=resources,proto3" json:"resources,omitempty"` // Contains the rendered templates/NOTES.txt if available - Notes string `protobuf:"bytes,4,opt,name=notes" json:"notes,omitempty"` + Notes string `protobuf:"bytes,4,opt,name=notes,proto3" json:"notes,omitempty"` // LastTestSuiteRun provides results on the last test run on a release - LastTestSuiteRun *TestSuite `protobuf:"bytes,5,opt,name=last_test_suite_run,json=lastTestSuiteRun" json:"last_test_suite_run,omitempty"` + LastTestSuiteRun *TestSuite `protobuf:"bytes,5,opt,name=last_test_suite_run,json=lastTestSuiteRun,proto3" json:"last_test_suite_run,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Status) Reset() { *m = Status{} } +func (m *Status) String() string { return proto.CompactTextString(m) } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { + return fileDescriptor_status_933517e5a50981ed, []int{0} +} +func (m *Status) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Status.Unmarshal(m, b) +} +func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Status.Marshal(b, m, deterministic) +} +func (dst *Status) XXX_Merge(src proto.Message) { + xxx_messageInfo_Status.Merge(dst, src) +} +func (m *Status) XXX_Size() int { + return xxx_messageInfo_Status.Size(m) +} +func (m *Status) XXX_DiscardUnknown() { + xxx_messageInfo_Status.DiscardUnknown(m) } -func (m *Status) Reset() { *m = Status{} } -func (m *Status) String() string { return proto.CompactTextString(m) } -func (*Status) ProtoMessage() {} -func (*Status) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} } +var xxx_messageInfo_Status proto.InternalMessageInfo func (m *Status) GetCode() Status_Code { if m != nil { @@ -113,9 +143,9 @@ func init() { proto.RegisterEnum("hapi.release.Status_Code", Status_Code_name, Status_Code_value) } -func init() { proto.RegisterFile("hapi/release/status.proto", fileDescriptor3) } +func init() { proto.RegisterFile("hapi/release/status.proto", fileDescriptor_status_933517e5a50981ed) } -var fileDescriptor3 = []byte{ +var fileDescriptor_status_933517e5a50981ed = []byte{ // 333 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x90, 0xd1, 0x6e, 0xa2, 0x40, 0x14, 0x86, 0x17, 0x45, 0xd4, 0xa3, 0x71, 0x27, 0xa3, 0xc9, 0xa2, 0xd9, 0x4d, 0x8c, 0x57, 0xde, diff --git a/pkg/proto/hapi/release/test_run.pb.go b/pkg/proto/hapi/release/test_run.pb.go index 4d39d17c2..f43be231d 100644 --- a/pkg/proto/hapi/release/test_run.pb.go +++ b/pkg/proto/hapi/release/test_run.pb.go @@ -6,13 +6,19 @@ package release import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import google_protobuf "github.com/golang/protobuf/ptypes/timestamp" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + type TestRun_Status int32 const ( @@ -38,20 +44,44 @@ var TestRun_Status_value = map[string]int32{ func (x TestRun_Status) String() string { return proto.EnumName(TestRun_Status_name, int32(x)) } -func (TestRun_Status) EnumDescriptor() ([]byte, []int) { return fileDescriptor4, []int{0, 0} } +func (TestRun_Status) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_test_run_31b133e40c63664e, []int{0, 0} +} type TestRun struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Status TestRun_Status `protobuf:"varint,2,opt,name=status,enum=hapi.release.TestRun_Status" json:"status,omitempty"` - Info string `protobuf:"bytes,3,opt,name=info" json:"info,omitempty"` - StartedAt *google_protobuf.Timestamp `protobuf:"bytes,4,opt,name=started_at,json=startedAt" json:"started_at,omitempty"` - CompletedAt *google_protobuf.Timestamp `protobuf:"bytes,5,opt,name=completed_at,json=completedAt" json:"completed_at,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Status TestRun_Status `protobuf:"varint,2,opt,name=status,proto3,enum=hapi.release.TestRun_Status" json:"status,omitempty"` + Info string `protobuf:"bytes,3,opt,name=info,proto3" json:"info,omitempty"` + StartedAt *timestamp.Timestamp `protobuf:"bytes,4,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` + CompletedAt *timestamp.Timestamp `protobuf:"bytes,5,opt,name=completed_at,json=completedAt,proto3" json:"completed_at,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *TestRun) Reset() { *m = TestRun{} } -func (m *TestRun) String() string { return proto.CompactTextString(m) } -func (*TestRun) ProtoMessage() {} -func (*TestRun) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{0} } +func (m *TestRun) Reset() { *m = TestRun{} } +func (m *TestRun) String() string { return proto.CompactTextString(m) } +func (*TestRun) ProtoMessage() {} +func (*TestRun) Descriptor() ([]byte, []int) { + return fileDescriptor_test_run_31b133e40c63664e, []int{0} +} +func (m *TestRun) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TestRun.Unmarshal(m, b) +} +func (m *TestRun) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TestRun.Marshal(b, m, deterministic) +} +func (dst *TestRun) XXX_Merge(src proto.Message) { + xxx_messageInfo_TestRun.Merge(dst, src) +} +func (m *TestRun) XXX_Size() int { + return xxx_messageInfo_TestRun.Size(m) +} +func (m *TestRun) XXX_DiscardUnknown() { + xxx_messageInfo_TestRun.DiscardUnknown(m) +} + +var xxx_messageInfo_TestRun proto.InternalMessageInfo func (m *TestRun) GetName() string { if m != nil { @@ -74,14 +104,14 @@ func (m *TestRun) GetInfo() string { return "" } -func (m *TestRun) GetStartedAt() *google_protobuf.Timestamp { +func (m *TestRun) GetStartedAt() *timestamp.Timestamp { if m != nil { return m.StartedAt } return nil } -func (m *TestRun) GetCompletedAt() *google_protobuf.Timestamp { +func (m *TestRun) GetCompletedAt() *timestamp.Timestamp { if m != nil { return m.CompletedAt } @@ -93,9 +123,11 @@ func init() { proto.RegisterEnum("hapi.release.TestRun_Status", TestRun_Status_name, TestRun_Status_value) } -func init() { proto.RegisterFile("hapi/release/test_run.proto", fileDescriptor4) } +func init() { + proto.RegisterFile("hapi/release/test_run.proto", fileDescriptor_test_run_31b133e40c63664e) +} -var fileDescriptor4 = []byte{ +var fileDescriptor_test_run_31b133e40c63664e = []byte{ // 274 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x8f, 0xc1, 0x4b, 0xfb, 0x30, 0x1c, 0xc5, 0x7f, 0xe9, 0xf6, 0x6b, 0x69, 0x3a, 0xa4, 0xe4, 0x54, 0xa6, 0x60, 0xd9, 0xa9, 0xa7, diff --git a/pkg/proto/hapi/release/test_suite.pb.go b/pkg/proto/hapi/release/test_suite.pb.go index b7fa26147..d2cf3a979 100644 --- a/pkg/proto/hapi/release/test_suite.pb.go +++ b/pkg/proto/hapi/release/test_suite.pb.go @@ -6,36 +6,64 @@ package release import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import google_protobuf "github.com/golang/protobuf/ptypes/timestamp" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + // TestSuite comprises of the last run of the pre-defined test suite of a release version type TestSuite struct { // StartedAt indicates the date/time this test suite was kicked off - StartedAt *google_protobuf.Timestamp `protobuf:"bytes,1,opt,name=started_at,json=startedAt" json:"started_at,omitempty"` + StartedAt *timestamp.Timestamp `protobuf:"bytes,1,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` // CompletedAt indicates the date/time this test suite was completed - CompletedAt *google_protobuf.Timestamp `protobuf:"bytes,2,opt,name=completed_at,json=completedAt" json:"completed_at,omitempty"` + CompletedAt *timestamp.Timestamp `protobuf:"bytes,2,opt,name=completed_at,json=completedAt,proto3" json:"completed_at,omitempty"` // Results are the results of each segment of the test - Results []*TestRun `protobuf:"bytes,3,rep,name=results" json:"results,omitempty"` + Results []*TestRun `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TestSuite) Reset() { *m = TestSuite{} } +func (m *TestSuite) String() string { return proto.CompactTextString(m) } +func (*TestSuite) ProtoMessage() {} +func (*TestSuite) Descriptor() ([]byte, []int) { + return fileDescriptor_test_suite_06a0016f2c6417b8, []int{0} +} +func (m *TestSuite) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TestSuite.Unmarshal(m, b) +} +func (m *TestSuite) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TestSuite.Marshal(b, m, deterministic) +} +func (dst *TestSuite) XXX_Merge(src proto.Message) { + xxx_messageInfo_TestSuite.Merge(dst, src) +} +func (m *TestSuite) XXX_Size() int { + return xxx_messageInfo_TestSuite.Size(m) +} +func (m *TestSuite) XXX_DiscardUnknown() { + xxx_messageInfo_TestSuite.DiscardUnknown(m) } -func (m *TestSuite) Reset() { *m = TestSuite{} } -func (m *TestSuite) String() string { return proto.CompactTextString(m) } -func (*TestSuite) ProtoMessage() {} -func (*TestSuite) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{0} } +var xxx_messageInfo_TestSuite proto.InternalMessageInfo -func (m *TestSuite) GetStartedAt() *google_protobuf.Timestamp { +func (m *TestSuite) GetStartedAt() *timestamp.Timestamp { if m != nil { return m.StartedAt } return nil } -func (m *TestSuite) GetCompletedAt() *google_protobuf.Timestamp { +func (m *TestSuite) GetCompletedAt() *timestamp.Timestamp { if m != nil { return m.CompletedAt } @@ -53,9 +81,11 @@ func init() { proto.RegisterType((*TestSuite)(nil), "hapi.release.TestSuite") } -func init() { proto.RegisterFile("hapi/release/test_suite.proto", fileDescriptor5) } +func init() { + proto.RegisterFile("hapi/release/test_suite.proto", fileDescriptor_test_suite_06a0016f2c6417b8) +} -var fileDescriptor5 = []byte{ +var fileDescriptor_test_suite_06a0016f2c6417b8 = []byte{ // 207 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x8f, 0xc1, 0x4a, 0x86, 0x40, 0x14, 0x85, 0x31, 0x21, 0x71, 0x74, 0x35, 0x10, 0x88, 0x11, 0x49, 0x2b, 0x57, 0x33, 0x60, 0xab, diff --git a/pkg/proto/hapi/rudder/rudder.pb.go b/pkg/proto/hapi/rudder/rudder.pb.go index 6e26d71eb..d594836c4 100644 --- a/pkg/proto/hapi/rudder/rudder.pb.go +++ b/pkg/proto/hapi/rudder/rudder.pb.go @@ -1,34 +1,12 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: hapi/rudder/rudder.proto -/* -Package rudder is a generated protocol buffer package. - -It is generated from these files: - hapi/rudder/rudder.proto - -It has these top-level messages: - Result - VersionReleaseRequest - VersionReleaseResponse - InstallReleaseRequest - InstallReleaseResponse - DeleteReleaseRequest - DeleteReleaseResponse - UpgradeReleaseRequest - UpgradeReleaseResponse - RollbackReleaseRequest - RollbackReleaseResponse - ReleaseStatusRequest - ReleaseStatusResponse -*/ package rudder import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import hapi_release3 "k8s.io/helm/pkg/proto/hapi/release" -import hapi_release5 "k8s.io/helm/pkg/proto/hapi/release" +import release "k8s.io/helm/pkg/proto/hapi/release" import ( context "golang.org/x/net/context" @@ -75,17 +53,41 @@ var Result_Status_value = map[string]int32{ func (x Result_Status) String() string { return proto.EnumName(Result_Status_name, int32(x)) } -func (Result_Status) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } +func (Result_Status) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_rudder_dd8cdbe38a210d28, []int{0, 0} +} type Result struct { - Info string `protobuf:"bytes,1,opt,name=info" json:"info,omitempty"` - Log []string `protobuf:"bytes,2,rep,name=log" json:"log,omitempty"` + Info string `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"` + Log []string `protobuf:"bytes,2,rep,name=log,proto3" json:"log,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Result) Reset() { *m = Result{} } -func (m *Result) String() string { return proto.CompactTextString(m) } -func (*Result) ProtoMessage() {} -func (*Result) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *Result) Reset() { *m = Result{} } +func (m *Result) String() string { return proto.CompactTextString(m) } +func (*Result) ProtoMessage() {} +func (*Result) Descriptor() ([]byte, []int) { + return fileDescriptor_rudder_dd8cdbe38a210d28, []int{0} +} +func (m *Result) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Result.Unmarshal(m, b) +} +func (m *Result) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Result.Marshal(b, m, deterministic) +} +func (dst *Result) XXX_Merge(src proto.Message) { + xxx_messageInfo_Result.Merge(dst, src) +} +func (m *Result) XXX_Size() int { + return xxx_messageInfo_Result.Size(m) +} +func (m *Result) XXX_DiscardUnknown() { + xxx_messageInfo_Result.DiscardUnknown(m) +} + +var xxx_messageInfo_Result proto.InternalMessageInfo func (m *Result) GetInfo() string { if m != nil { @@ -102,22 +104,66 @@ func (m *Result) GetLog() []string { } type VersionReleaseRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VersionReleaseRequest) Reset() { *m = VersionReleaseRequest{} } +func (m *VersionReleaseRequest) String() string { return proto.CompactTextString(m) } +func (*VersionReleaseRequest) ProtoMessage() {} +func (*VersionReleaseRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_rudder_dd8cdbe38a210d28, []int{1} +} +func (m *VersionReleaseRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VersionReleaseRequest.Unmarshal(m, b) +} +func (m *VersionReleaseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VersionReleaseRequest.Marshal(b, m, deterministic) +} +func (dst *VersionReleaseRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_VersionReleaseRequest.Merge(dst, src) +} +func (m *VersionReleaseRequest) XXX_Size() int { + return xxx_messageInfo_VersionReleaseRequest.Size(m) +} +func (m *VersionReleaseRequest) XXX_DiscardUnknown() { + xxx_messageInfo_VersionReleaseRequest.DiscardUnknown(m) } -func (m *VersionReleaseRequest) Reset() { *m = VersionReleaseRequest{} } -func (m *VersionReleaseRequest) String() string { return proto.CompactTextString(m) } -func (*VersionReleaseRequest) ProtoMessage() {} -func (*VersionReleaseRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +var xxx_messageInfo_VersionReleaseRequest proto.InternalMessageInfo type VersionReleaseResponse struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Version string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *VersionReleaseResponse) Reset() { *m = VersionReleaseResponse{} } -func (m *VersionReleaseResponse) String() string { return proto.CompactTextString(m) } -func (*VersionReleaseResponse) ProtoMessage() {} -func (*VersionReleaseResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (m *VersionReleaseResponse) Reset() { *m = VersionReleaseResponse{} } +func (m *VersionReleaseResponse) String() string { return proto.CompactTextString(m) } +func (*VersionReleaseResponse) ProtoMessage() {} +func (*VersionReleaseResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_rudder_dd8cdbe38a210d28, []int{2} +} +func (m *VersionReleaseResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VersionReleaseResponse.Unmarshal(m, b) +} +func (m *VersionReleaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VersionReleaseResponse.Marshal(b, m, deterministic) +} +func (dst *VersionReleaseResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_VersionReleaseResponse.Merge(dst, src) +} +func (m *VersionReleaseResponse) XXX_Size() int { + return xxx_messageInfo_VersionReleaseResponse.Size(m) +} +func (m *VersionReleaseResponse) XXX_DiscardUnknown() { + xxx_messageInfo_VersionReleaseResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_VersionReleaseResponse proto.InternalMessageInfo func (m *VersionReleaseResponse) GetName() string { if m != nil { @@ -134,15 +180,37 @@ func (m *VersionReleaseResponse) GetVersion() string { } type InstallReleaseRequest struct { - Release *hapi_release5.Release `protobuf:"bytes,1,opt,name=release" json:"release,omitempty"` + Release *release.Release `protobuf:"bytes,1,opt,name=release,proto3" json:"release,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InstallReleaseRequest) Reset() { *m = InstallReleaseRequest{} } +func (m *InstallReleaseRequest) String() string { return proto.CompactTextString(m) } +func (*InstallReleaseRequest) ProtoMessage() {} +func (*InstallReleaseRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_rudder_dd8cdbe38a210d28, []int{3} +} +func (m *InstallReleaseRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InstallReleaseRequest.Unmarshal(m, b) +} +func (m *InstallReleaseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InstallReleaseRequest.Marshal(b, m, deterministic) +} +func (dst *InstallReleaseRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_InstallReleaseRequest.Merge(dst, src) +} +func (m *InstallReleaseRequest) XXX_Size() int { + return xxx_messageInfo_InstallReleaseRequest.Size(m) +} +func (m *InstallReleaseRequest) XXX_DiscardUnknown() { + xxx_messageInfo_InstallReleaseRequest.DiscardUnknown(m) } -func (m *InstallReleaseRequest) Reset() { *m = InstallReleaseRequest{} } -func (m *InstallReleaseRequest) String() string { return proto.CompactTextString(m) } -func (*InstallReleaseRequest) ProtoMessage() {} -func (*InstallReleaseRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +var xxx_messageInfo_InstallReleaseRequest proto.InternalMessageInfo -func (m *InstallReleaseRequest) GetRelease() *hapi_release5.Release { +func (m *InstallReleaseRequest) GetRelease() *release.Release { if m != nil { return m.Release } @@ -150,16 +218,38 @@ func (m *InstallReleaseRequest) GetRelease() *hapi_release5.Release { } type InstallReleaseResponse struct { - Release *hapi_release5.Release `protobuf:"bytes,1,opt,name=release" json:"release,omitempty"` - Result *Result `protobuf:"bytes,2,opt,name=result" json:"result,omitempty"` + Release *release.Release `protobuf:"bytes,1,opt,name=release,proto3" json:"release,omitempty"` + Result *Result `protobuf:"bytes,2,opt,name=result,proto3" json:"result,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *InstallReleaseResponse) Reset() { *m = InstallReleaseResponse{} } -func (m *InstallReleaseResponse) String() string { return proto.CompactTextString(m) } -func (*InstallReleaseResponse) ProtoMessage() {} -func (*InstallReleaseResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (m *InstallReleaseResponse) Reset() { *m = InstallReleaseResponse{} } +func (m *InstallReleaseResponse) String() string { return proto.CompactTextString(m) } +func (*InstallReleaseResponse) ProtoMessage() {} +func (*InstallReleaseResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_rudder_dd8cdbe38a210d28, []int{4} +} +func (m *InstallReleaseResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InstallReleaseResponse.Unmarshal(m, b) +} +func (m *InstallReleaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InstallReleaseResponse.Marshal(b, m, deterministic) +} +func (dst *InstallReleaseResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_InstallReleaseResponse.Merge(dst, src) +} +func (m *InstallReleaseResponse) XXX_Size() int { + return xxx_messageInfo_InstallReleaseResponse.Size(m) +} +func (m *InstallReleaseResponse) XXX_DiscardUnknown() { + xxx_messageInfo_InstallReleaseResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_InstallReleaseResponse proto.InternalMessageInfo -func (m *InstallReleaseResponse) GetRelease() *hapi_release5.Release { +func (m *InstallReleaseResponse) GetRelease() *release.Release { if m != nil { return m.Release } @@ -174,15 +264,37 @@ func (m *InstallReleaseResponse) GetResult() *Result { } type DeleteReleaseRequest struct { - Release *hapi_release5.Release `protobuf:"bytes,1,opt,name=release" json:"release,omitempty"` + Release *release.Release `protobuf:"bytes,1,opt,name=release,proto3" json:"release,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteReleaseRequest) Reset() { *m = DeleteReleaseRequest{} } +func (m *DeleteReleaseRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteReleaseRequest) ProtoMessage() {} +func (*DeleteReleaseRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_rudder_dd8cdbe38a210d28, []int{5} +} +func (m *DeleteReleaseRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteReleaseRequest.Unmarshal(m, b) +} +func (m *DeleteReleaseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteReleaseRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteReleaseRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteReleaseRequest.Merge(dst, src) +} +func (m *DeleteReleaseRequest) XXX_Size() int { + return xxx_messageInfo_DeleteReleaseRequest.Size(m) +} +func (m *DeleteReleaseRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteReleaseRequest.DiscardUnknown(m) } -func (m *DeleteReleaseRequest) Reset() { *m = DeleteReleaseRequest{} } -func (m *DeleteReleaseRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteReleaseRequest) ProtoMessage() {} -func (*DeleteReleaseRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +var xxx_messageInfo_DeleteReleaseRequest proto.InternalMessageInfo -func (m *DeleteReleaseRequest) GetRelease() *hapi_release5.Release { +func (m *DeleteReleaseRequest) GetRelease() *release.Release { if m != nil { return m.Release } @@ -190,16 +302,38 @@ func (m *DeleteReleaseRequest) GetRelease() *hapi_release5.Release { } type DeleteReleaseResponse struct { - Release *hapi_release5.Release `protobuf:"bytes,1,opt,name=release" json:"release,omitempty"` - Result *Result `protobuf:"bytes,2,opt,name=result" json:"result,omitempty"` + Release *release.Release `protobuf:"bytes,1,opt,name=release,proto3" json:"release,omitempty"` + Result *Result `protobuf:"bytes,2,opt,name=result,proto3" json:"result,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *DeleteReleaseResponse) Reset() { *m = DeleteReleaseResponse{} } -func (m *DeleteReleaseResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteReleaseResponse) ProtoMessage() {} -func (*DeleteReleaseResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (m *DeleteReleaseResponse) Reset() { *m = DeleteReleaseResponse{} } +func (m *DeleteReleaseResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteReleaseResponse) ProtoMessage() {} +func (*DeleteReleaseResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_rudder_dd8cdbe38a210d28, []int{6} +} +func (m *DeleteReleaseResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteReleaseResponse.Unmarshal(m, b) +} +func (m *DeleteReleaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteReleaseResponse.Marshal(b, m, deterministic) +} +func (dst *DeleteReleaseResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteReleaseResponse.Merge(dst, src) +} +func (m *DeleteReleaseResponse) XXX_Size() int { + return xxx_messageInfo_DeleteReleaseResponse.Size(m) +} +func (m *DeleteReleaseResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteReleaseResponse.DiscardUnknown(m) +} -func (m *DeleteReleaseResponse) GetRelease() *hapi_release5.Release { +var xxx_messageInfo_DeleteReleaseResponse proto.InternalMessageInfo + +func (m *DeleteReleaseResponse) GetRelease() *release.Release { if m != nil { return m.Release } @@ -214,27 +348,50 @@ func (m *DeleteReleaseResponse) GetResult() *Result { } type UpgradeReleaseRequest struct { - Current *hapi_release5.Release `protobuf:"bytes,1,opt,name=current" json:"current,omitempty"` - Target *hapi_release5.Release `protobuf:"bytes,2,opt,name=target" json:"target,omitempty"` - Timeout int64 `protobuf:"varint,3,opt,name=Timeout" json:"Timeout,omitempty"` - Wait bool `protobuf:"varint,4,opt,name=Wait" json:"Wait,omitempty"` - Recreate bool `protobuf:"varint,5,opt,name=Recreate" json:"Recreate,omitempty"` - Force bool `protobuf:"varint,6,opt,name=Force" json:"Force,omitempty"` + Current *release.Release `protobuf:"bytes,1,opt,name=current,proto3" json:"current,omitempty"` + Target *release.Release `protobuf:"bytes,2,opt,name=target,proto3" json:"target,omitempty"` + Timeout int64 `protobuf:"varint,3,opt,name=Timeout,proto3" json:"Timeout,omitempty"` + Wait bool `protobuf:"varint,4,opt,name=Wait,proto3" json:"Wait,omitempty"` + Recreate bool `protobuf:"varint,5,opt,name=Recreate,proto3" json:"Recreate,omitempty"` + Force bool `protobuf:"varint,6,opt,name=Force,proto3" json:"Force,omitempty"` + CleanupOnFail bool `protobuf:"varint,7,opt,name=CleanupOnFail,proto3" json:"CleanupOnFail,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpgradeReleaseRequest) Reset() { *m = UpgradeReleaseRequest{} } +func (m *UpgradeReleaseRequest) String() string { return proto.CompactTextString(m) } +func (*UpgradeReleaseRequest) ProtoMessage() {} +func (*UpgradeReleaseRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_rudder_dd8cdbe38a210d28, []int{7} +} +func (m *UpgradeReleaseRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpgradeReleaseRequest.Unmarshal(m, b) +} +func (m *UpgradeReleaseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpgradeReleaseRequest.Marshal(b, m, deterministic) +} +func (dst *UpgradeReleaseRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpgradeReleaseRequest.Merge(dst, src) +} +func (m *UpgradeReleaseRequest) XXX_Size() int { + return xxx_messageInfo_UpgradeReleaseRequest.Size(m) +} +func (m *UpgradeReleaseRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpgradeReleaseRequest.DiscardUnknown(m) } -func (m *UpgradeReleaseRequest) Reset() { *m = UpgradeReleaseRequest{} } -func (m *UpgradeReleaseRequest) String() string { return proto.CompactTextString(m) } -func (*UpgradeReleaseRequest) ProtoMessage() {} -func (*UpgradeReleaseRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +var xxx_messageInfo_UpgradeReleaseRequest proto.InternalMessageInfo -func (m *UpgradeReleaseRequest) GetCurrent() *hapi_release5.Release { +func (m *UpgradeReleaseRequest) GetCurrent() *release.Release { if m != nil { return m.Current } return nil } -func (m *UpgradeReleaseRequest) GetTarget() *hapi_release5.Release { +func (m *UpgradeReleaseRequest) GetTarget() *release.Release { if m != nil { return m.Target } @@ -269,17 +426,46 @@ func (m *UpgradeReleaseRequest) GetForce() bool { return false } +func (m *UpgradeReleaseRequest) GetCleanupOnFail() bool { + if m != nil { + return m.CleanupOnFail + } + return false +} + type UpgradeReleaseResponse struct { - Release *hapi_release5.Release `protobuf:"bytes,1,opt,name=release" json:"release,omitempty"` - Result *Result `protobuf:"bytes,2,opt,name=result" json:"result,omitempty"` + Release *release.Release `protobuf:"bytes,1,opt,name=release,proto3" json:"release,omitempty"` + Result *Result `protobuf:"bytes,2,opt,name=result,proto3" json:"result,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpgradeReleaseResponse) Reset() { *m = UpgradeReleaseResponse{} } +func (m *UpgradeReleaseResponse) String() string { return proto.CompactTextString(m) } +func (*UpgradeReleaseResponse) ProtoMessage() {} +func (*UpgradeReleaseResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_rudder_dd8cdbe38a210d28, []int{8} +} +func (m *UpgradeReleaseResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpgradeReleaseResponse.Unmarshal(m, b) +} +func (m *UpgradeReleaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpgradeReleaseResponse.Marshal(b, m, deterministic) +} +func (dst *UpgradeReleaseResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpgradeReleaseResponse.Merge(dst, src) +} +func (m *UpgradeReleaseResponse) XXX_Size() int { + return xxx_messageInfo_UpgradeReleaseResponse.Size(m) +} +func (m *UpgradeReleaseResponse) XXX_DiscardUnknown() { + xxx_messageInfo_UpgradeReleaseResponse.DiscardUnknown(m) } -func (m *UpgradeReleaseResponse) Reset() { *m = UpgradeReleaseResponse{} } -func (m *UpgradeReleaseResponse) String() string { return proto.CompactTextString(m) } -func (*UpgradeReleaseResponse) ProtoMessage() {} -func (*UpgradeReleaseResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +var xxx_messageInfo_UpgradeReleaseResponse proto.InternalMessageInfo -func (m *UpgradeReleaseResponse) GetRelease() *hapi_release5.Release { +func (m *UpgradeReleaseResponse) GetRelease() *release.Release { if m != nil { return m.Release } @@ -294,27 +480,50 @@ func (m *UpgradeReleaseResponse) GetResult() *Result { } type RollbackReleaseRequest struct { - Current *hapi_release5.Release `protobuf:"bytes,1,opt,name=current" json:"current,omitempty"` - Target *hapi_release5.Release `protobuf:"bytes,2,opt,name=target" json:"target,omitempty"` - Timeout int64 `protobuf:"varint,3,opt,name=Timeout" json:"Timeout,omitempty"` - Wait bool `protobuf:"varint,4,opt,name=Wait" json:"Wait,omitempty"` - Recreate bool `protobuf:"varint,5,opt,name=Recreate" json:"Recreate,omitempty"` - Force bool `protobuf:"varint,6,opt,name=Force" json:"Force,omitempty"` + Current *release.Release `protobuf:"bytes,1,opt,name=current,proto3" json:"current,omitempty"` + Target *release.Release `protobuf:"bytes,2,opt,name=target,proto3" json:"target,omitempty"` + Timeout int64 `protobuf:"varint,3,opt,name=Timeout,proto3" json:"Timeout,omitempty"` + Wait bool `protobuf:"varint,4,opt,name=Wait,proto3" json:"Wait,omitempty"` + Recreate bool `protobuf:"varint,5,opt,name=Recreate,proto3" json:"Recreate,omitempty"` + Force bool `protobuf:"varint,6,opt,name=Force,proto3" json:"Force,omitempty"` + CleanupOnFail bool `protobuf:"varint,7,opt,name=CleanupOnFail,proto3" json:"CleanupOnFail,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *RollbackReleaseRequest) Reset() { *m = RollbackReleaseRequest{} } -func (m *RollbackReleaseRequest) String() string { return proto.CompactTextString(m) } -func (*RollbackReleaseRequest) ProtoMessage() {} -func (*RollbackReleaseRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } +func (m *RollbackReleaseRequest) Reset() { *m = RollbackReleaseRequest{} } +func (m *RollbackReleaseRequest) String() string { return proto.CompactTextString(m) } +func (*RollbackReleaseRequest) ProtoMessage() {} +func (*RollbackReleaseRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_rudder_dd8cdbe38a210d28, []int{9} +} +func (m *RollbackReleaseRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RollbackReleaseRequest.Unmarshal(m, b) +} +func (m *RollbackReleaseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RollbackReleaseRequest.Marshal(b, m, deterministic) +} +func (dst *RollbackReleaseRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollbackReleaseRequest.Merge(dst, src) +} +func (m *RollbackReleaseRequest) XXX_Size() int { + return xxx_messageInfo_RollbackReleaseRequest.Size(m) +} +func (m *RollbackReleaseRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RollbackReleaseRequest.DiscardUnknown(m) +} -func (m *RollbackReleaseRequest) GetCurrent() *hapi_release5.Release { +var xxx_messageInfo_RollbackReleaseRequest proto.InternalMessageInfo + +func (m *RollbackReleaseRequest) GetCurrent() *release.Release { if m != nil { return m.Current } return nil } -func (m *RollbackReleaseRequest) GetTarget() *hapi_release5.Release { +func (m *RollbackReleaseRequest) GetTarget() *release.Release { if m != nil { return m.Target } @@ -349,17 +558,46 @@ func (m *RollbackReleaseRequest) GetForce() bool { return false } +func (m *RollbackReleaseRequest) GetCleanupOnFail() bool { + if m != nil { + return m.CleanupOnFail + } + return false +} + type RollbackReleaseResponse struct { - Release *hapi_release5.Release `protobuf:"bytes,1,opt,name=release" json:"release,omitempty"` - Result *Result `protobuf:"bytes,2,opt,name=result" json:"result,omitempty"` + Release *release.Release `protobuf:"bytes,1,opt,name=release,proto3" json:"release,omitempty"` + Result *Result `protobuf:"bytes,2,opt,name=result,proto3" json:"result,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *RollbackReleaseResponse) Reset() { *m = RollbackReleaseResponse{} } -func (m *RollbackReleaseResponse) String() string { return proto.CompactTextString(m) } -func (*RollbackReleaseResponse) ProtoMessage() {} -func (*RollbackReleaseResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } +func (m *RollbackReleaseResponse) Reset() { *m = RollbackReleaseResponse{} } +func (m *RollbackReleaseResponse) String() string { return proto.CompactTextString(m) } +func (*RollbackReleaseResponse) ProtoMessage() {} +func (*RollbackReleaseResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_rudder_dd8cdbe38a210d28, []int{10} +} +func (m *RollbackReleaseResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RollbackReleaseResponse.Unmarshal(m, b) +} +func (m *RollbackReleaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RollbackReleaseResponse.Marshal(b, m, deterministic) +} +func (dst *RollbackReleaseResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollbackReleaseResponse.Merge(dst, src) +} +func (m *RollbackReleaseResponse) XXX_Size() int { + return xxx_messageInfo_RollbackReleaseResponse.Size(m) +} +func (m *RollbackReleaseResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RollbackReleaseResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RollbackReleaseResponse proto.InternalMessageInfo -func (m *RollbackReleaseResponse) GetRelease() *hapi_release5.Release { +func (m *RollbackReleaseResponse) GetRelease() *release.Release { if m != nil { return m.Release } @@ -374,15 +612,37 @@ func (m *RollbackReleaseResponse) GetResult() *Result { } type ReleaseStatusRequest struct { - Release *hapi_release5.Release `protobuf:"bytes,1,opt,name=release" json:"release,omitempty"` + Release *release.Release `protobuf:"bytes,1,opt,name=release,proto3" json:"release,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReleaseStatusRequest) Reset() { *m = ReleaseStatusRequest{} } +func (m *ReleaseStatusRequest) String() string { return proto.CompactTextString(m) } +func (*ReleaseStatusRequest) ProtoMessage() {} +func (*ReleaseStatusRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_rudder_dd8cdbe38a210d28, []int{11} +} +func (m *ReleaseStatusRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReleaseStatusRequest.Unmarshal(m, b) +} +func (m *ReleaseStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReleaseStatusRequest.Marshal(b, m, deterministic) +} +func (dst *ReleaseStatusRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReleaseStatusRequest.Merge(dst, src) +} +func (m *ReleaseStatusRequest) XXX_Size() int { + return xxx_messageInfo_ReleaseStatusRequest.Size(m) +} +func (m *ReleaseStatusRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReleaseStatusRequest.DiscardUnknown(m) } -func (m *ReleaseStatusRequest) Reset() { *m = ReleaseStatusRequest{} } -func (m *ReleaseStatusRequest) String() string { return proto.CompactTextString(m) } -func (*ReleaseStatusRequest) ProtoMessage() {} -func (*ReleaseStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +var xxx_messageInfo_ReleaseStatusRequest proto.InternalMessageInfo -func (m *ReleaseStatusRequest) GetRelease() *hapi_release5.Release { +func (m *ReleaseStatusRequest) GetRelease() *release.Release { if m != nil { return m.Release } @@ -390,23 +650,45 @@ func (m *ReleaseStatusRequest) GetRelease() *hapi_release5.Release { } type ReleaseStatusResponse struct { - Release *hapi_release5.Release `protobuf:"bytes,1,opt,name=release" json:"release,omitempty"` - Info *hapi_release3.Info `protobuf:"bytes,2,opt,name=info" json:"info,omitempty"` + Release *release.Release `protobuf:"bytes,1,opt,name=release,proto3" json:"release,omitempty"` + Info *release.Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ReleaseStatusResponse) Reset() { *m = ReleaseStatusResponse{} } -func (m *ReleaseStatusResponse) String() string { return proto.CompactTextString(m) } -func (*ReleaseStatusResponse) ProtoMessage() {} -func (*ReleaseStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } +func (m *ReleaseStatusResponse) Reset() { *m = ReleaseStatusResponse{} } +func (m *ReleaseStatusResponse) String() string { return proto.CompactTextString(m) } +func (*ReleaseStatusResponse) ProtoMessage() {} +func (*ReleaseStatusResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_rudder_dd8cdbe38a210d28, []int{12} +} +func (m *ReleaseStatusResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReleaseStatusResponse.Unmarshal(m, b) +} +func (m *ReleaseStatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReleaseStatusResponse.Marshal(b, m, deterministic) +} +func (dst *ReleaseStatusResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReleaseStatusResponse.Merge(dst, src) +} +func (m *ReleaseStatusResponse) XXX_Size() int { + return xxx_messageInfo_ReleaseStatusResponse.Size(m) +} +func (m *ReleaseStatusResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ReleaseStatusResponse.DiscardUnknown(m) +} -func (m *ReleaseStatusResponse) GetRelease() *hapi_release5.Release { +var xxx_messageInfo_ReleaseStatusResponse proto.InternalMessageInfo + +func (m *ReleaseStatusResponse) GetRelease() *release.Release { if m != nil { return m.Release } return nil } -func (m *ReleaseStatusResponse) GetInfo() *hapi_release3.Info { +func (m *ReleaseStatusResponse) GetInfo() *release.Info { if m != nil { return m.Info } @@ -438,8 +720,9 @@ var _ grpc.ClientConn // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 -// Client API for ReleaseModuleService service - +// ReleaseModuleServiceClient is the client API for ReleaseModuleService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type ReleaseModuleServiceClient interface { Version(ctx context.Context, in *VersionReleaseRequest, opts ...grpc.CallOption) (*VersionReleaseResponse, error) // InstallRelease requests installation of a chart as a new release. @@ -464,7 +747,7 @@ func NewReleaseModuleServiceClient(cc *grpc.ClientConn) ReleaseModuleServiceClie func (c *releaseModuleServiceClient) Version(ctx context.Context, in *VersionReleaseRequest, opts ...grpc.CallOption) (*VersionReleaseResponse, error) { out := new(VersionReleaseResponse) - err := grpc.Invoke(ctx, "/hapi.services.rudder.ReleaseModuleService/Version", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/hapi.services.rudder.ReleaseModuleService/Version", in, out, opts...) if err != nil { return nil, err } @@ -473,7 +756,7 @@ func (c *releaseModuleServiceClient) Version(ctx context.Context, in *VersionRel func (c *releaseModuleServiceClient) InstallRelease(ctx context.Context, in *InstallReleaseRequest, opts ...grpc.CallOption) (*InstallReleaseResponse, error) { out := new(InstallReleaseResponse) - err := grpc.Invoke(ctx, "/hapi.services.rudder.ReleaseModuleService/InstallRelease", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/hapi.services.rudder.ReleaseModuleService/InstallRelease", in, out, opts...) if err != nil { return nil, err } @@ -482,7 +765,7 @@ func (c *releaseModuleServiceClient) InstallRelease(ctx context.Context, in *Ins func (c *releaseModuleServiceClient) DeleteRelease(ctx context.Context, in *DeleteReleaseRequest, opts ...grpc.CallOption) (*DeleteReleaseResponse, error) { out := new(DeleteReleaseResponse) - err := grpc.Invoke(ctx, "/hapi.services.rudder.ReleaseModuleService/DeleteRelease", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/hapi.services.rudder.ReleaseModuleService/DeleteRelease", in, out, opts...) if err != nil { return nil, err } @@ -491,7 +774,7 @@ func (c *releaseModuleServiceClient) DeleteRelease(ctx context.Context, in *Dele func (c *releaseModuleServiceClient) RollbackRelease(ctx context.Context, in *RollbackReleaseRequest, opts ...grpc.CallOption) (*RollbackReleaseResponse, error) { out := new(RollbackReleaseResponse) - err := grpc.Invoke(ctx, "/hapi.services.rudder.ReleaseModuleService/RollbackRelease", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/hapi.services.rudder.ReleaseModuleService/RollbackRelease", in, out, opts...) if err != nil { return nil, err } @@ -500,7 +783,7 @@ func (c *releaseModuleServiceClient) RollbackRelease(ctx context.Context, in *Ro func (c *releaseModuleServiceClient) UpgradeRelease(ctx context.Context, in *UpgradeReleaseRequest, opts ...grpc.CallOption) (*UpgradeReleaseResponse, error) { out := new(UpgradeReleaseResponse) - err := grpc.Invoke(ctx, "/hapi.services.rudder.ReleaseModuleService/UpgradeRelease", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/hapi.services.rudder.ReleaseModuleService/UpgradeRelease", in, out, opts...) if err != nil { return nil, err } @@ -509,15 +792,14 @@ func (c *releaseModuleServiceClient) UpgradeRelease(ctx context.Context, in *Upg func (c *releaseModuleServiceClient) ReleaseStatus(ctx context.Context, in *ReleaseStatusRequest, opts ...grpc.CallOption) (*ReleaseStatusResponse, error) { out := new(ReleaseStatusResponse) - err := grpc.Invoke(ctx, "/hapi.services.rudder.ReleaseModuleService/ReleaseStatus", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/hapi.services.rudder.ReleaseModuleService/ReleaseStatus", in, out, opts...) if err != nil { return nil, err } return out, nil } -// Server API for ReleaseModuleService service - +// ReleaseModuleServiceServer is the server API for ReleaseModuleService service. type ReleaseModuleServiceServer interface { Version(context.Context, *VersionReleaseRequest) (*VersionReleaseResponse, error) // InstallRelease requests installation of a chart as a new release. @@ -677,46 +959,47 @@ var _ReleaseModuleService_serviceDesc = grpc.ServiceDesc{ Metadata: "hapi/rudder/rudder.proto", } -func init() { proto.RegisterFile("hapi/rudder/rudder.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 597 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x56, 0x5f, 0x8f, 0xd2, 0x4e, - 0x14, 0xa5, 0xb0, 0x14, 0xb8, 0x64, 0x7f, 0x3f, 0x32, 0xa1, 0xd0, 0x34, 0x3e, 0x90, 0x3e, 0x18, - 0xe2, 0xba, 0x25, 0x41, 0x1f, 0x7d, 0x51, 0x96, 0xfd, 0x13, 0x23, 0x9b, 0x0c, 0xe2, 0x26, 0xbe, - 0x75, 0xe1, 0x82, 0xd5, 0xd2, 0xd6, 0xe9, 0x74, 0x1f, 0xd5, 0x4f, 0xe3, 0x57, 0xd2, 0x8f, 0x63, - 0xda, 0x69, 0x89, 0xad, 0xd3, 0x88, 0x6b, 0xc2, 0x83, 0x4f, 0x9d, 0xe9, 0x3d, 0xdc, 0x39, 0xe7, - 0xf4, 0xce, 0x09, 0xa0, 0xbf, 0xb3, 0x03, 0x67, 0xc4, 0xa2, 0xd5, 0x0a, 0x59, 0xfa, 0xb0, 0x02, - 0xe6, 0x73, 0x9f, 0x74, 0xe3, 0x8a, 0x15, 0x22, 0xbb, 0x73, 0x96, 0x18, 0x5a, 0xa2, 0x66, 0xf4, - 0x05, 0x1e, 0x5d, 0xb4, 0x43, 0x1c, 0x39, 0xde, 0xda, 0x17, 0x70, 0xc3, 0xc8, 0x15, 0xd2, 0xa7, - 0xa8, 0x99, 0x2e, 0xa8, 0x14, 0xc3, 0xc8, 0xe5, 0x84, 0xc0, 0x51, 0xfc, 0x1b, 0x5d, 0x19, 0x28, - 0xc3, 0x16, 0x4d, 0xd6, 0xa4, 0x03, 0x35, 0xd7, 0xdf, 0xe8, 0xd5, 0x41, 0x6d, 0xd8, 0xa2, 0xf1, - 0xd2, 0x7c, 0x06, 0xea, 0x9c, 0xdb, 0x3c, 0x0a, 0x49, 0x1b, 0x1a, 0x8b, 0xd9, 0xcb, 0xd9, 0xf5, - 0xcd, 0xac, 0x53, 0x89, 0x37, 0xf3, 0xc5, 0x64, 0x32, 0x9d, 0xcf, 0x3b, 0x0a, 0x39, 0x86, 0xd6, - 0x62, 0x36, 0xb9, 0x7c, 0x3e, 0xbb, 0x98, 0x9e, 0x75, 0xaa, 0xa4, 0x05, 0xf5, 0x29, 0xa5, 0xd7, - 0xb4, 0x53, 0x33, 0xfb, 0xa0, 0xbd, 0x41, 0x16, 0x3a, 0xbe, 0x47, 0x05, 0x0b, 0x8a, 0x1f, 0x23, - 0x0c, 0xb9, 0x79, 0x0e, 0xbd, 0x62, 0x21, 0x0c, 0x7c, 0x2f, 0xc4, 0x98, 0x96, 0x67, 0x6f, 0x31, - 0xa3, 0x15, 0xaf, 0x89, 0x0e, 0x8d, 0x3b, 0x81, 0xd6, 0xab, 0xc9, 0xeb, 0x6c, 0x6b, 0x5e, 0x82, - 0x76, 0xe5, 0x85, 0xdc, 0x76, 0xdd, 0xfc, 0x01, 0x64, 0x04, 0x8d, 0x54, 0x78, 0xd2, 0xa9, 0x3d, - 0xd6, 0xac, 0xc4, 0xc4, 0xcc, 0x8d, 0x0c, 0x9e, 0xa1, 0xcc, 0xcf, 0xd0, 0x2b, 0x76, 0x4a, 0x19, - 0xfd, 0x69, 0x2b, 0xf2, 0x14, 0x54, 0x96, 0x78, 0x9c, 0xb0, 0x6d, 0x8f, 0x1f, 0x58, 0xb2, 0xef, - 0x67, 0x89, 0xef, 0x40, 0x53, 0xac, 0x79, 0x01, 0xdd, 0x33, 0x74, 0x91, 0xe3, 0xdf, 0x2a, 0xf9, - 0x04, 0x5a, 0xa1, 0xd1, 0x61, 0x85, 0x7c, 0x53, 0x40, 0x5b, 0x04, 0x1b, 0x66, 0xaf, 0x24, 0x52, - 0x96, 0x11, 0x63, 0xe8, 0xf1, 0xdf, 0x10, 0x48, 0x51, 0xe4, 0x14, 0x54, 0x6e, 0xb3, 0x0d, 0x66, - 0x04, 0x4a, 0xf0, 0x29, 0x28, 0x9e, 0x93, 0xd7, 0xce, 0x16, 0xfd, 0x88, 0xeb, 0xb5, 0x81, 0x32, - 0xac, 0xd1, 0x6c, 0x1b, 0x4f, 0xd5, 0x8d, 0xed, 0x70, 0xfd, 0x68, 0xa0, 0x0c, 0x9b, 0x34, 0x59, - 0x13, 0x03, 0x9a, 0x14, 0x97, 0x0c, 0x6d, 0x8e, 0x7a, 0x3d, 0x79, 0xbf, 0xdb, 0x93, 0x2e, 0xd4, - 0xcf, 0x7d, 0xb6, 0x44, 0x5d, 0x4d, 0x0a, 0x62, 0x13, 0xcf, 0x48, 0x51, 0xd8, 0x61, 0xad, 0xfd, - 0xae, 0x40, 0x8f, 0xfa, 0xae, 0x7b, 0x6b, 0x2f, 0x3f, 0xfc, 0x63, 0xde, 0x7e, 0x51, 0xa0, 0xff, - 0x8b, 0xb4, 0x83, 0xdf, 0xc0, 0xb4, 0x93, 0x88, 0xbc, 0x7b, 0xdf, 0xc0, 0x00, 0xb4, 0x42, 0xa3, - 0xfb, 0x0a, 0x79, 0x98, 0x86, 0xb4, 0x90, 0x41, 0xf2, 0xe8, 0x2b, 0x6f, 0xed, 0x8b, 0xe0, 0x1e, - 0x7f, 0xad, 0xef, 0xb8, 0xbf, 0xf2, 0x57, 0x91, 0x8b, 0x73, 0x21, 0x95, 0xac, 0xa1, 0x91, 0x06, - 0x2d, 0x39, 0x91, 0x9b, 0x20, 0x0d, 0x68, 0xe3, 0xf1, 0x7e, 0x60, 0xa1, 0xcb, 0xac, 0x90, 0x2d, - 0xfc, 0x97, 0x8f, 0xcf, 0xb2, 0xe3, 0xa4, 0x71, 0x5d, 0x76, 0x9c, 0x3c, 0x91, 0xcd, 0x0a, 0x79, - 0x0f, 0xc7, 0xb9, 0x8c, 0x23, 0x8f, 0xe4, 0x0d, 0x64, 0x89, 0x6a, 0x9c, 0xec, 0x85, 0xdd, 0x9d, - 0x15, 0xc0, 0xff, 0x85, 0xc1, 0x24, 0x25, 0x74, 0xe5, 0x57, 0xd3, 0x38, 0xdd, 0x13, 0xfd, 0xb3, - 0x99, 0xf9, 0x9c, 0x29, 0x33, 0x53, 0x1a, 0xb3, 0x65, 0x66, 0xca, 0xa3, 0x4b, 0x98, 0x99, 0x1b, - 0xd7, 0x32, 0x33, 0x65, 0x97, 0xa3, 0xcc, 0x4c, 0xe9, 0xfc, 0x9b, 0x95, 0x17, 0xcd, 0xb7, 0xaa, - 0x40, 0xdc, 0xaa, 0xc9, 0x1f, 0x92, 0x27, 0x3f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xb6, 0xa5, 0x37, - 0x75, 0xf7, 0x08, 0x00, 0x00, +func init() { proto.RegisterFile("hapi/rudder/rudder.proto", fileDescriptor_rudder_dd8cdbe38a210d28) } + +var fileDescriptor_rudder_dd8cdbe38a210d28 = []byte{ + // 615 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x56, 0x4d, 0x6f, 0xd3, 0x40, + 0x10, 0x8d, 0x9b, 0xc6, 0x69, 0xa6, 0x2a, 0x44, 0xab, 0xba, 0xb5, 0x2c, 0x0e, 0x91, 0x85, 0x50, + 0x44, 0xa9, 0x2b, 0x15, 0x8e, 0x5c, 0x20, 0xfd, 0x14, 0x22, 0x95, 0x36, 0x84, 0x4a, 0xdc, 0xb6, + 0xc9, 0x24, 0x18, 0x36, 0xb6, 0x59, 0xaf, 0x7b, 0x04, 0x7e, 0x0d, 0xff, 0x12, 0x84, 0xec, 0xb5, + 0x23, 0x6c, 0xd6, 0x22, 0x14, 0x29, 0x17, 0x4e, 0xde, 0xd9, 0x79, 0x9d, 0x9d, 0xf7, 0x76, 0xf6, + 0x35, 0x60, 0xbf, 0x67, 0x91, 0x7f, 0x24, 0x92, 0xe9, 0x14, 0x45, 0xfe, 0xf1, 0x22, 0x11, 0xca, + 0x90, 0xec, 0xa6, 0x19, 0x2f, 0x46, 0x71, 0xeb, 0x4f, 0x30, 0xf6, 0x54, 0xce, 0xd9, 0x57, 0x78, + 0xe4, 0xc8, 0x62, 0x3c, 0xf2, 0x83, 0x59, 0xa8, 0xe0, 0x8e, 0x53, 0x4a, 0xe4, 0x5f, 0x95, 0x73, + 0x39, 0x98, 0x14, 0xe3, 0x84, 0x4b, 0x42, 0x60, 0x33, 0xfd, 0x1b, 0xdb, 0xe8, 0x19, 0xfd, 0x0e, + 0xcd, 0xd6, 0xa4, 0x0b, 0x4d, 0x1e, 0xce, 0xed, 0x8d, 0x5e, 0xb3, 0xdf, 0xa1, 0xe9, 0xd2, 0x7d, + 0x0e, 0xe6, 0x48, 0x32, 0x99, 0xc4, 0x64, 0x1b, 0xda, 0xe3, 0xe1, 0xab, 0xe1, 0xd5, 0xf5, 0xb0, + 0xdb, 0x48, 0x83, 0xd1, 0x78, 0x30, 0x38, 0x1d, 0x8d, 0xba, 0x06, 0xd9, 0x81, 0xce, 0x78, 0x38, + 0xb8, 0x78, 0x31, 0x3c, 0x3f, 0x3d, 0xe9, 0x6e, 0x90, 0x0e, 0xb4, 0x4e, 0x29, 0xbd, 0xa2, 0xdd, + 0xa6, 0xbb, 0x0f, 0xd6, 0x5b, 0x14, 0xb1, 0x1f, 0x06, 0x54, 0x75, 0x41, 0xf1, 0x53, 0x82, 0xb1, + 0x74, 0xcf, 0x60, 0xaf, 0x9a, 0x88, 0xa3, 0x30, 0x88, 0x31, 0x6d, 0x2b, 0x60, 0x0b, 0x2c, 0xda, + 0x4a, 0xd7, 0xc4, 0x86, 0xf6, 0xad, 0x42, 0xdb, 0x1b, 0xd9, 0x76, 0x11, 0xba, 0x17, 0x60, 0x5d, + 0x06, 0xb1, 0x64, 0x9c, 0x97, 0x0f, 0x20, 0x47, 0xd0, 0xce, 0x89, 0x67, 0x95, 0xb6, 0x8f, 0x2d, + 0x2f, 0x13, 0xb1, 0x50, 0xa3, 0x80, 0x17, 0x28, 0xf7, 0x0b, 0xec, 0x55, 0x2b, 0xe5, 0x1d, 0xfd, + 0x6d, 0x29, 0xf2, 0x0c, 0x4c, 0x91, 0x69, 0x9c, 0x75, 0xbb, 0x7d, 0xfc, 0xc0, 0xd3, 0xdd, 0x9f, + 0xa7, 0xee, 0x81, 0xe6, 0x58, 0xf7, 0x1c, 0x76, 0x4f, 0x90, 0xa3, 0xc4, 0x7f, 0x65, 0xf2, 0x19, + 0xac, 0x4a, 0xa1, 0xf5, 0x12, 0xf9, 0x6e, 0x80, 0x35, 0x8e, 0xe6, 0x82, 0x4d, 0x35, 0x54, 0x26, + 0x89, 0x10, 0x18, 0xc8, 0x3f, 0x34, 0x90, 0xa3, 0xc8, 0x21, 0x98, 0x92, 0x89, 0x39, 0x16, 0x0d, + 0xd4, 0xe0, 0x73, 0x50, 0x3a, 0x27, 0x6f, 0xfc, 0x05, 0x86, 0x89, 0xb4, 0x9b, 0x3d, 0xa3, 0xdf, + 0xa4, 0x45, 0x98, 0x4e, 0xd5, 0x35, 0xf3, 0xa5, 0xbd, 0xd9, 0x33, 0xfa, 0x5b, 0x34, 0x5b, 0x13, + 0x07, 0xb6, 0x28, 0x4e, 0x04, 0x32, 0x89, 0x76, 0x2b, 0xdb, 0x5f, 0xc6, 0x64, 0x17, 0x5a, 0x67, + 0xa1, 0x98, 0xa0, 0x6d, 0x66, 0x09, 0x15, 0x90, 0x87, 0xb0, 0x33, 0xe0, 0xc8, 0x82, 0x24, 0xba, + 0x0a, 0xce, 0x98, 0xcf, 0xed, 0x76, 0x96, 0x2d, 0x6f, 0xa6, 0x93, 0x54, 0xa5, 0xbf, 0xde, 0x0b, + 0xf8, 0x61, 0xc0, 0x1e, 0x0d, 0x39, 0xbf, 0x61, 0x93, 0x8f, 0xff, 0xe5, 0x0d, 0x7c, 0x35, 0x60, + 0xff, 0x37, 0x01, 0xd6, 0xfe, 0x9a, 0xf3, 0x4a, 0xca, 0x3e, 0xef, 0xfc, 0x9a, 0x23, 0xb0, 0x2a, + 0x85, 0xee, 0x4a, 0xe4, 0x51, 0x6e, 0xf8, 0x8a, 0x06, 0x29, 0xa3, 0x2f, 0x83, 0x59, 0xa8, 0xfe, + 0x09, 0x1c, 0x7f, 0x6b, 0x2d, 0x7b, 0x7f, 0x1d, 0x4e, 0x13, 0x8e, 0x23, 0x45, 0x95, 0xcc, 0xa0, + 0x9d, 0x9b, 0x36, 0x39, 0xd0, 0x8b, 0xa0, 0x35, 0x7b, 0xe7, 0xc9, 0x6a, 0x60, 0xc5, 0xcb, 0x6d, + 0x90, 0x05, 0xdc, 0x2b, 0x5b, 0x71, 0xdd, 0x71, 0x5a, 0xeb, 0xaf, 0x3b, 0x4e, 0xef, 0xee, 0x6e, + 0x83, 0x7c, 0x80, 0x9d, 0x92, 0x5f, 0x92, 0xc7, 0xfa, 0x02, 0x3a, 0x77, 0x76, 0x0e, 0x56, 0xc2, + 0x2e, 0xcf, 0x8a, 0xe0, 0x7e, 0x65, 0x30, 0x49, 0x4d, 0xbb, 0xfa, 0x07, 0xec, 0x1c, 0xae, 0x88, + 0xfe, 0x55, 0xcc, 0xb2, 0x1b, 0xd5, 0x89, 0xa9, 0xb5, 0xec, 0x3a, 0x31, 0xf5, 0x06, 0xa7, 0xc4, + 0x2c, 0x8d, 0x6b, 0x9d, 0x98, 0xba, 0xc7, 0x51, 0x27, 0xa6, 0x76, 0xfe, 0xdd, 0xc6, 0xcb, 0xad, + 0x77, 0xa6, 0x42, 0xdc, 0x98, 0xd9, 0x8f, 0x9b, 0xa7, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xe2, + 0x9e, 0x21, 0x0d, 0x43, 0x09, 0x00, 0x00, } diff --git a/pkg/proto/hapi/services/tiller.pb.go b/pkg/proto/hapi/services/tiller.pb.go index f57ad8582..894d2eb03 100644 --- a/pkg/proto/hapi/services/tiller.pb.go +++ b/pkg/proto/hapi/services/tiller.pb.go @@ -1,47 +1,14 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: hapi/services/tiller.proto -/* -Package services is a generated protocol buffer package. - -It is generated from these files: - hapi/services/tiller.proto - -It has these top-level messages: - ListReleasesRequest - ListSort - ListReleasesResponse - GetReleaseStatusRequest - GetReleaseStatusResponse - GetReleaseContentRequest - GetReleaseContentResponse - UpdateReleaseRequest - UpdateReleaseResponse - RollbackReleaseRequest - RollbackReleaseResponse - InstallReleaseRequest - InstallReleaseResponse - UninstallReleaseRequest - UninstallReleaseResponse - GetVersionRequest - GetVersionResponse - GetHistoryRequest - GetHistoryResponse - TestReleaseRequest - TestReleaseResponse -*/ package services import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import hapi_chart3 "k8s.io/helm/pkg/proto/hapi/chart" -import hapi_chart "k8s.io/helm/pkg/proto/hapi/chart" -import hapi_release5 "k8s.io/helm/pkg/proto/hapi/release" -import hapi_release4 "k8s.io/helm/pkg/proto/hapi/release" -import hapi_release1 "k8s.io/helm/pkg/proto/hapi/release" -import hapi_release3 "k8s.io/helm/pkg/proto/hapi/release" -import hapi_version "k8s.io/helm/pkg/proto/hapi/version" +import chart "k8s.io/helm/pkg/proto/hapi/chart" +import release "k8s.io/helm/pkg/proto/hapi/release" +import version "k8s.io/helm/pkg/proto/hapi/version" import ( context "golang.org/x/net/context" @@ -85,7 +52,9 @@ var ListSort_SortBy_value = map[string]int32{ func (x ListSort_SortBy) String() string { return proto.EnumName(ListSort_SortBy_name, int32(x)) } -func (ListSort_SortBy) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} } +func (ListSort_SortBy) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_tiller_bb72ee4a42494734, []int{1, 0} +} // SortOrder defines sort orders to augment sorting operations. type ListSort_SortOrder int32 @@ -107,7 +76,9 @@ var ListSort_SortOrder_value = map[string]int32{ func (x ListSort_SortOrder) String() string { return proto.EnumName(ListSort_SortOrder_name, int32(x)) } -func (ListSort_SortOrder) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 1} } +func (ListSort_SortOrder) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_tiller_bb72ee4a42494734, []int{1, 1} +} // ListReleasesRequest requests a list of releases. // @@ -116,30 +87,52 @@ func (ListSort_SortOrder) EnumDescriptor() ([]byte, []int) { return fileDescript // Releases can be sorted according to a few pre-determined sort strategies. type ListReleasesRequest struct { // Limit is the maximum number of releases to be returned. - Limit int64 `protobuf:"varint,1,opt,name=limit" json:"limit,omitempty"` + Limit int64 `protobuf:"varint,1,opt,name=limit,proto3" json:"limit,omitempty"` // Offset is the last release name that was seen. The next listing // operation will start with the name after this one. // Example: If list one returns albert, bernie, carl, and sets 'next: dennis'. // dennis is the offset. Supplying 'dennis' for the next request should // cause the next batch to return a set of results starting with 'dennis'. - Offset string `protobuf:"bytes,2,opt,name=offset" json:"offset,omitempty"` + Offset string `protobuf:"bytes,2,opt,name=offset,proto3" json:"offset,omitempty"` // SortBy is the sort field that the ListReleases server should sort data before returning. - SortBy ListSort_SortBy `protobuf:"varint,3,opt,name=sort_by,json=sortBy,enum=hapi.services.tiller.ListSort_SortBy" json:"sort_by,omitempty"` + SortBy ListSort_SortBy `protobuf:"varint,3,opt,name=sort_by,json=sortBy,proto3,enum=hapi.services.tiller.ListSort_SortBy" json:"sort_by,omitempty"` // Filter is a regular expression used to filter which releases should be listed. // // Anything that matches the regexp will be included in the results. - Filter string `protobuf:"bytes,4,opt,name=filter" json:"filter,omitempty"` + Filter string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"` // SortOrder is the ordering directive used for sorting. - SortOrder ListSort_SortOrder `protobuf:"varint,5,opt,name=sort_order,json=sortOrder,enum=hapi.services.tiller.ListSort_SortOrder" json:"sort_order,omitempty"` - StatusCodes []hapi_release3.Status_Code `protobuf:"varint,6,rep,packed,name=status_codes,json=statusCodes,enum=hapi.release.Status_Code" json:"status_codes,omitempty"` + SortOrder ListSort_SortOrder `protobuf:"varint,5,opt,name=sort_order,json=sortOrder,proto3,enum=hapi.services.tiller.ListSort_SortOrder" json:"sort_order,omitempty"` + StatusCodes []release.Status_Code `protobuf:"varint,6,rep,packed,name=status_codes,json=statusCodes,proto3,enum=hapi.release.Status_Code" json:"status_codes,omitempty"` // Namespace is the filter to select releases only from a specific namespace. - Namespace string `protobuf:"bytes,7,opt,name=namespace" json:"namespace,omitempty"` + Namespace string `protobuf:"bytes,7,opt,name=namespace,proto3" json:"namespace,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ListReleasesRequest) Reset() { *m = ListReleasesRequest{} } -func (m *ListReleasesRequest) String() string { return proto.CompactTextString(m) } -func (*ListReleasesRequest) ProtoMessage() {} -func (*ListReleasesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *ListReleasesRequest) Reset() { *m = ListReleasesRequest{} } +func (m *ListReleasesRequest) String() string { return proto.CompactTextString(m) } +func (*ListReleasesRequest) ProtoMessage() {} +func (*ListReleasesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_tiller_bb72ee4a42494734, []int{0} +} +func (m *ListReleasesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListReleasesRequest.Unmarshal(m, b) +} +func (m *ListReleasesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListReleasesRequest.Marshal(b, m, deterministic) +} +func (dst *ListReleasesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListReleasesRequest.Merge(dst, src) +} +func (m *ListReleasesRequest) XXX_Size() int { + return xxx_messageInfo_ListReleasesRequest.Size(m) +} +func (m *ListReleasesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListReleasesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListReleasesRequest proto.InternalMessageInfo func (m *ListReleasesRequest) GetLimit() int64 { if m != nil { @@ -176,7 +169,7 @@ func (m *ListReleasesRequest) GetSortOrder() ListSort_SortOrder { return ListSort_ASC } -func (m *ListReleasesRequest) GetStatusCodes() []hapi_release3.Status_Code { +func (m *ListReleasesRequest) GetStatusCodes() []release.Status_Code { if m != nil { return m.StatusCodes } @@ -192,30 +185,74 @@ func (m *ListReleasesRequest) GetNamespace() string { // ListSort defines sorting fields on a release list. type ListSort struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListSort) Reset() { *m = ListSort{} } +func (m *ListSort) String() string { return proto.CompactTextString(m) } +func (*ListSort) ProtoMessage() {} +func (*ListSort) Descriptor() ([]byte, []int) { + return fileDescriptor_tiller_bb72ee4a42494734, []int{1} +} +func (m *ListSort) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListSort.Unmarshal(m, b) +} +func (m *ListSort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListSort.Marshal(b, m, deterministic) +} +func (dst *ListSort) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListSort.Merge(dst, src) +} +func (m *ListSort) XXX_Size() int { + return xxx_messageInfo_ListSort.Size(m) +} +func (m *ListSort) XXX_DiscardUnknown() { + xxx_messageInfo_ListSort.DiscardUnknown(m) } -func (m *ListSort) Reset() { *m = ListSort{} } -func (m *ListSort) String() string { return proto.CompactTextString(m) } -func (*ListSort) ProtoMessage() {} -func (*ListSort) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +var xxx_messageInfo_ListSort proto.InternalMessageInfo // ListReleasesResponse is a list of releases. type ListReleasesResponse struct { // Count is the expected total number of releases to be returned. - Count int64 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"` + Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` // Next is the name of the next release. If this is other than an empty // string, it means there are more results. - Next string `protobuf:"bytes,2,opt,name=next" json:"next,omitempty"` + Next string `protobuf:"bytes,2,opt,name=next,proto3" json:"next,omitempty"` // Total is the total number of queryable releases. - Total int64 `protobuf:"varint,3,opt,name=total" json:"total,omitempty"` + Total int64 `protobuf:"varint,3,opt,name=total,proto3" json:"total,omitempty"` // Releases is the list of found release objects. - Releases []*hapi_release5.Release `protobuf:"bytes,4,rep,name=releases" json:"releases,omitempty"` + Releases []*release.Release `protobuf:"bytes,4,rep,name=releases,proto3" json:"releases,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ListReleasesResponse) Reset() { *m = ListReleasesResponse{} } -func (m *ListReleasesResponse) String() string { return proto.CompactTextString(m) } -func (*ListReleasesResponse) ProtoMessage() {} -func (*ListReleasesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (m *ListReleasesResponse) Reset() { *m = ListReleasesResponse{} } +func (m *ListReleasesResponse) String() string { return proto.CompactTextString(m) } +func (*ListReleasesResponse) ProtoMessage() {} +func (*ListReleasesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_tiller_bb72ee4a42494734, []int{2} +} +func (m *ListReleasesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListReleasesResponse.Unmarshal(m, b) +} +func (m *ListReleasesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListReleasesResponse.Marshal(b, m, deterministic) +} +func (dst *ListReleasesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListReleasesResponse.Merge(dst, src) +} +func (m *ListReleasesResponse) XXX_Size() int { + return xxx_messageInfo_ListReleasesResponse.Size(m) +} +func (m *ListReleasesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListReleasesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListReleasesResponse proto.InternalMessageInfo func (m *ListReleasesResponse) GetCount() int64 { if m != nil { @@ -238,7 +275,7 @@ func (m *ListReleasesResponse) GetTotal() int64 { return 0 } -func (m *ListReleasesResponse) GetReleases() []*hapi_release5.Release { +func (m *ListReleasesResponse) GetReleases() []*release.Release { if m != nil { return m.Releases } @@ -248,15 +285,37 @@ func (m *ListReleasesResponse) GetReleases() []*hapi_release5.Release { // GetReleaseStatusRequest is a request to get the status of a release. type GetReleaseStatusRequest struct { // Name is the name of the release - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Version is the version of the release - Version int32 `protobuf:"varint,2,opt,name=version" json:"version,omitempty"` + Version int32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *GetReleaseStatusRequest) Reset() { *m = GetReleaseStatusRequest{} } -func (m *GetReleaseStatusRequest) String() string { return proto.CompactTextString(m) } -func (*GetReleaseStatusRequest) ProtoMessage() {} -func (*GetReleaseStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (m *GetReleaseStatusRequest) Reset() { *m = GetReleaseStatusRequest{} } +func (m *GetReleaseStatusRequest) String() string { return proto.CompactTextString(m) } +func (*GetReleaseStatusRequest) ProtoMessage() {} +func (*GetReleaseStatusRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_tiller_bb72ee4a42494734, []int{3} +} +func (m *GetReleaseStatusRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetReleaseStatusRequest.Unmarshal(m, b) +} +func (m *GetReleaseStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetReleaseStatusRequest.Marshal(b, m, deterministic) +} +func (dst *GetReleaseStatusRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetReleaseStatusRequest.Merge(dst, src) +} +func (m *GetReleaseStatusRequest) XXX_Size() int { + return xxx_messageInfo_GetReleaseStatusRequest.Size(m) +} +func (m *GetReleaseStatusRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetReleaseStatusRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetReleaseStatusRequest proto.InternalMessageInfo func (m *GetReleaseStatusRequest) GetName() string { if m != nil { @@ -275,17 +334,39 @@ func (m *GetReleaseStatusRequest) GetVersion() int32 { // GetReleaseStatusResponse is the response indicating the status of the named release. type GetReleaseStatusResponse struct { // Name is the name of the release. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Info contains information about the release. - Info *hapi_release4.Info `protobuf:"bytes,2,opt,name=info" json:"info,omitempty"` + Info *release.Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` // Namespace the release was released into - Namespace string `protobuf:"bytes,3,opt,name=namespace" json:"namespace,omitempty"` + Namespace string `protobuf:"bytes,3,opt,name=namespace,proto3" json:"namespace,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *GetReleaseStatusResponse) Reset() { *m = GetReleaseStatusResponse{} } -func (m *GetReleaseStatusResponse) String() string { return proto.CompactTextString(m) } -func (*GetReleaseStatusResponse) ProtoMessage() {} -func (*GetReleaseStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (m *GetReleaseStatusResponse) Reset() { *m = GetReleaseStatusResponse{} } +func (m *GetReleaseStatusResponse) String() string { return proto.CompactTextString(m) } +func (*GetReleaseStatusResponse) ProtoMessage() {} +func (*GetReleaseStatusResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_tiller_bb72ee4a42494734, []int{4} +} +func (m *GetReleaseStatusResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetReleaseStatusResponse.Unmarshal(m, b) +} +func (m *GetReleaseStatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetReleaseStatusResponse.Marshal(b, m, deterministic) +} +func (dst *GetReleaseStatusResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetReleaseStatusResponse.Merge(dst, src) +} +func (m *GetReleaseStatusResponse) XXX_Size() int { + return xxx_messageInfo_GetReleaseStatusResponse.Size(m) +} +func (m *GetReleaseStatusResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetReleaseStatusResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetReleaseStatusResponse proto.InternalMessageInfo func (m *GetReleaseStatusResponse) GetName() string { if m != nil { @@ -294,7 +375,7 @@ func (m *GetReleaseStatusResponse) GetName() string { return "" } -func (m *GetReleaseStatusResponse) GetInfo() *hapi_release4.Info { +func (m *GetReleaseStatusResponse) GetInfo() *release.Info { if m != nil { return m.Info } @@ -311,15 +392,37 @@ func (m *GetReleaseStatusResponse) GetNamespace() string { // GetReleaseContentRequest is a request to get the contents of a release. type GetReleaseContentRequest struct { // The name of the release - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Version is the version of the release - Version int32 `protobuf:"varint,2,opt,name=version" json:"version,omitempty"` + Version int32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetReleaseContentRequest) Reset() { *m = GetReleaseContentRequest{} } +func (m *GetReleaseContentRequest) String() string { return proto.CompactTextString(m) } +func (*GetReleaseContentRequest) ProtoMessage() {} +func (*GetReleaseContentRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_tiller_bb72ee4a42494734, []int{5} +} +func (m *GetReleaseContentRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetReleaseContentRequest.Unmarshal(m, b) +} +func (m *GetReleaseContentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetReleaseContentRequest.Marshal(b, m, deterministic) +} +func (dst *GetReleaseContentRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetReleaseContentRequest.Merge(dst, src) +} +func (m *GetReleaseContentRequest) XXX_Size() int { + return xxx_messageInfo_GetReleaseContentRequest.Size(m) +} +func (m *GetReleaseContentRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetReleaseContentRequest.DiscardUnknown(m) } -func (m *GetReleaseContentRequest) Reset() { *m = GetReleaseContentRequest{} } -func (m *GetReleaseContentRequest) String() string { return proto.CompactTextString(m) } -func (*GetReleaseContentRequest) ProtoMessage() {} -func (*GetReleaseContentRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +var xxx_messageInfo_GetReleaseContentRequest proto.InternalMessageInfo func (m *GetReleaseContentRequest) GetName() string { if m != nil { @@ -338,15 +441,37 @@ func (m *GetReleaseContentRequest) GetVersion() int32 { // GetReleaseContentResponse is a response containing the contents of a release. type GetReleaseContentResponse struct { // The release content - Release *hapi_release5.Release `protobuf:"bytes,1,opt,name=release" json:"release,omitempty"` + Release *release.Release `protobuf:"bytes,1,opt,name=release,proto3" json:"release,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *GetReleaseContentResponse) Reset() { *m = GetReleaseContentResponse{} } -func (m *GetReleaseContentResponse) String() string { return proto.CompactTextString(m) } -func (*GetReleaseContentResponse) ProtoMessage() {} -func (*GetReleaseContentResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (m *GetReleaseContentResponse) Reset() { *m = GetReleaseContentResponse{} } +func (m *GetReleaseContentResponse) String() string { return proto.CompactTextString(m) } +func (*GetReleaseContentResponse) ProtoMessage() {} +func (*GetReleaseContentResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_tiller_bb72ee4a42494734, []int{6} +} +func (m *GetReleaseContentResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetReleaseContentResponse.Unmarshal(m, b) +} +func (m *GetReleaseContentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetReleaseContentResponse.Marshal(b, m, deterministic) +} +func (dst *GetReleaseContentResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetReleaseContentResponse.Merge(dst, src) +} +func (m *GetReleaseContentResponse) XXX_Size() int { + return xxx_messageInfo_GetReleaseContentResponse.Size(m) +} +func (m *GetReleaseContentResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetReleaseContentResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetReleaseContentResponse proto.InternalMessageInfo -func (m *GetReleaseContentResponse) GetRelease() *hapi_release5.Release { +func (m *GetReleaseContentResponse) GetRelease() *release.Release { if m != nil { return m.Release } @@ -356,37 +481,63 @@ func (m *GetReleaseContentResponse) GetRelease() *hapi_release5.Release { // UpdateReleaseRequest updates a release. type UpdateReleaseRequest struct { // The name of the release - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Chart is the protobuf representation of a chart. - Chart *hapi_chart3.Chart `protobuf:"bytes,2,opt,name=chart" json:"chart,omitempty"` + Chart *chart.Chart `protobuf:"bytes,2,opt,name=chart,proto3" json:"chart,omitempty"` // Values is a string containing (unparsed) YAML values. - Values *hapi_chart.Config `protobuf:"bytes,3,opt,name=values" json:"values,omitempty"` + Values *chart.Config `protobuf:"bytes,3,opt,name=values,proto3" json:"values,omitempty"` // dry_run, if true, will run through the release logic, but neither create - DryRun bool `protobuf:"varint,4,opt,name=dry_run,json=dryRun" json:"dry_run,omitempty"` + DryRun bool `protobuf:"varint,4,opt,name=dry_run,json=dryRun,proto3" json:"dry_run,omitempty"` // DisableHooks causes the server to skip running any hooks for the upgrade. - DisableHooks bool `protobuf:"varint,5,opt,name=disable_hooks,json=disableHooks" json:"disable_hooks,omitempty"` + DisableHooks bool `protobuf:"varint,5,opt,name=disable_hooks,json=disableHooks,proto3" json:"disable_hooks,omitempty"` // Performs pods restart for resources if applicable - Recreate bool `protobuf:"varint,6,opt,name=recreate" json:"recreate,omitempty"` + Recreate bool `protobuf:"varint,6,opt,name=recreate,proto3" json:"recreate,omitempty"` // timeout specifies the max amount of time any kubernetes client command can run. - Timeout int64 `protobuf:"varint,7,opt,name=timeout" json:"timeout,omitempty"` + Timeout int64 `protobuf:"varint,7,opt,name=timeout,proto3" json:"timeout,omitempty"` // ResetValues will cause Tiller to ignore stored values, resetting to default values. - ResetValues bool `protobuf:"varint,8,opt,name=reset_values,json=resetValues" json:"reset_values,omitempty"` + ResetValues bool `protobuf:"varint,8,opt,name=reset_values,json=resetValues,proto3" json:"reset_values,omitempty"` // wait, if true, will wait until all Pods, PVCs, and Services are in a ready state // before marking the release as successful. It will wait for as long as timeout - Wait bool `protobuf:"varint,9,opt,name=wait" json:"wait,omitempty"` + Wait bool `protobuf:"varint,9,opt,name=wait,proto3" json:"wait,omitempty"` // ReuseValues will cause Tiller to reuse the values from the last release. // This is ignored if reset_values is set. - ReuseValues bool `protobuf:"varint,10,opt,name=reuse_values,json=reuseValues" json:"reuse_values,omitempty"` + ReuseValues bool `protobuf:"varint,10,opt,name=reuse_values,json=reuseValues,proto3" json:"reuse_values,omitempty"` // Force resource update through delete/recreate if needed. - Force bool `protobuf:"varint,11,opt,name=force" json:"force,omitempty"` + Force bool `protobuf:"varint,11,opt,name=force,proto3" json:"force,omitempty"` // Description, if set, will set the description for the updated release - Description string `protobuf:"bytes,12,opt,name=description" json:"description,omitempty"` + Description string `protobuf:"bytes,12,opt,name=description,proto3" json:"description,omitempty"` + // Render subchart notes if enabled + SubNotes bool `protobuf:"varint,13,opt,name=subNotes,proto3" json:"subNotes,omitempty"` + // Allow deletion of new resources created in this update when update failed + CleanupOnFail bool `protobuf:"varint,14,opt,name=cleanup_on_fail,json=cleanupOnFail,proto3" json:"cleanup_on_fail,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateReleaseRequest) Reset() { *m = UpdateReleaseRequest{} } +func (m *UpdateReleaseRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateReleaseRequest) ProtoMessage() {} +func (*UpdateReleaseRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_tiller_bb72ee4a42494734, []int{7} +} +func (m *UpdateReleaseRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateReleaseRequest.Unmarshal(m, b) +} +func (m *UpdateReleaseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateReleaseRequest.Marshal(b, m, deterministic) +} +func (dst *UpdateReleaseRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateReleaseRequest.Merge(dst, src) +} +func (m *UpdateReleaseRequest) XXX_Size() int { + return xxx_messageInfo_UpdateReleaseRequest.Size(m) +} +func (m *UpdateReleaseRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateReleaseRequest.DiscardUnknown(m) } -func (m *UpdateReleaseRequest) Reset() { *m = UpdateReleaseRequest{} } -func (m *UpdateReleaseRequest) String() string { return proto.CompactTextString(m) } -func (*UpdateReleaseRequest) ProtoMessage() {} -func (*UpdateReleaseRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +var xxx_messageInfo_UpdateReleaseRequest proto.InternalMessageInfo func (m *UpdateReleaseRequest) GetName() string { if m != nil { @@ -395,14 +546,14 @@ func (m *UpdateReleaseRequest) GetName() string { return "" } -func (m *UpdateReleaseRequest) GetChart() *hapi_chart3.Chart { +func (m *UpdateReleaseRequest) GetChart() *chart.Chart { if m != nil { return m.Chart } return nil } -func (m *UpdateReleaseRequest) GetValues() *hapi_chart.Config { +func (m *UpdateReleaseRequest) GetValues() *chart.Config { if m != nil { return m.Values } @@ -472,17 +623,53 @@ func (m *UpdateReleaseRequest) GetDescription() string { return "" } +func (m *UpdateReleaseRequest) GetSubNotes() bool { + if m != nil { + return m.SubNotes + } + return false +} + +func (m *UpdateReleaseRequest) GetCleanupOnFail() bool { + if m != nil { + return m.CleanupOnFail + } + return false +} + // UpdateReleaseResponse is the response to an update request. type UpdateReleaseResponse struct { - Release *hapi_release5.Release `protobuf:"bytes,1,opt,name=release" json:"release,omitempty"` + Release *release.Release `protobuf:"bytes,1,opt,name=release,proto3" json:"release,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *UpdateReleaseResponse) Reset() { *m = UpdateReleaseResponse{} } -func (m *UpdateReleaseResponse) String() string { return proto.CompactTextString(m) } -func (*UpdateReleaseResponse) ProtoMessage() {} -func (*UpdateReleaseResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +func (m *UpdateReleaseResponse) Reset() { *m = UpdateReleaseResponse{} } +func (m *UpdateReleaseResponse) String() string { return proto.CompactTextString(m) } +func (*UpdateReleaseResponse) ProtoMessage() {} +func (*UpdateReleaseResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_tiller_bb72ee4a42494734, []int{8} +} +func (m *UpdateReleaseResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateReleaseResponse.Unmarshal(m, b) +} +func (m *UpdateReleaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateReleaseResponse.Marshal(b, m, deterministic) +} +func (dst *UpdateReleaseResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateReleaseResponse.Merge(dst, src) +} +func (m *UpdateReleaseResponse) XXX_Size() int { + return xxx_messageInfo_UpdateReleaseResponse.Size(m) +} +func (m *UpdateReleaseResponse) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateReleaseResponse.DiscardUnknown(m) +} -func (m *UpdateReleaseResponse) GetRelease() *hapi_release5.Release { +var xxx_messageInfo_UpdateReleaseResponse proto.InternalMessageInfo + +func (m *UpdateReleaseResponse) GetRelease() *release.Release { if m != nil { return m.Release } @@ -491,30 +678,54 @@ func (m *UpdateReleaseResponse) GetRelease() *hapi_release5.Release { type RollbackReleaseRequest struct { // The name of the release - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // dry_run, if true, will run through the release logic but no create - DryRun bool `protobuf:"varint,2,opt,name=dry_run,json=dryRun" json:"dry_run,omitempty"` + DryRun bool `protobuf:"varint,2,opt,name=dry_run,json=dryRun,proto3" json:"dry_run,omitempty"` // DisableHooks causes the server to skip running any hooks for the rollback - DisableHooks bool `protobuf:"varint,3,opt,name=disable_hooks,json=disableHooks" json:"disable_hooks,omitempty"` + DisableHooks bool `protobuf:"varint,3,opt,name=disable_hooks,json=disableHooks,proto3" json:"disable_hooks,omitempty"` // Version is the version of the release to deploy. - Version int32 `protobuf:"varint,4,opt,name=version" json:"version,omitempty"` + Version int32 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"` // Performs pods restart for resources if applicable - Recreate bool `protobuf:"varint,5,opt,name=recreate" json:"recreate,omitempty"` + Recreate bool `protobuf:"varint,5,opt,name=recreate,proto3" json:"recreate,omitempty"` // timeout specifies the max amount of time any kubernetes client command can run. - Timeout int64 `protobuf:"varint,6,opt,name=timeout" json:"timeout,omitempty"` + Timeout int64 `protobuf:"varint,6,opt,name=timeout,proto3" json:"timeout,omitempty"` // wait, if true, will wait until all Pods, PVCs, and Services are in a ready state // before marking the release as successful. It will wait for as long as timeout - Wait bool `protobuf:"varint,7,opt,name=wait" json:"wait,omitempty"` + Wait bool `protobuf:"varint,7,opt,name=wait,proto3" json:"wait,omitempty"` // Force resource update through delete/recreate if needed. - Force bool `protobuf:"varint,8,opt,name=force" json:"force,omitempty"` + Force bool `protobuf:"varint,8,opt,name=force,proto3" json:"force,omitempty"` // Description, if set, will set the description for the rollback - Description string `protobuf:"bytes,9,opt,name=description" json:"description,omitempty"` + Description string `protobuf:"bytes,9,opt,name=description,proto3" json:"description,omitempty"` + // Allow deletion of new resources created in this rollback when rollback failed + CleanupOnFail bool `protobuf:"varint,10,opt,name=cleanup_on_fail,json=cleanupOnFail,proto3" json:"cleanup_on_fail,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *RollbackReleaseRequest) Reset() { *m = RollbackReleaseRequest{} } -func (m *RollbackReleaseRequest) String() string { return proto.CompactTextString(m) } -func (*RollbackReleaseRequest) ProtoMessage() {} -func (*RollbackReleaseRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } +func (m *RollbackReleaseRequest) Reset() { *m = RollbackReleaseRequest{} } +func (m *RollbackReleaseRequest) String() string { return proto.CompactTextString(m) } +func (*RollbackReleaseRequest) ProtoMessage() {} +func (*RollbackReleaseRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_tiller_bb72ee4a42494734, []int{9} +} +func (m *RollbackReleaseRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RollbackReleaseRequest.Unmarshal(m, b) +} +func (m *RollbackReleaseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RollbackReleaseRequest.Marshal(b, m, deterministic) +} +func (dst *RollbackReleaseRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollbackReleaseRequest.Merge(dst, src) +} +func (m *RollbackReleaseRequest) XXX_Size() int { + return xxx_messageInfo_RollbackReleaseRequest.Size(m) +} +func (m *RollbackReleaseRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RollbackReleaseRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RollbackReleaseRequest proto.InternalMessageInfo func (m *RollbackReleaseRequest) GetName() string { if m != nil { @@ -579,17 +790,46 @@ func (m *RollbackReleaseRequest) GetDescription() string { return "" } +func (m *RollbackReleaseRequest) GetCleanupOnFail() bool { + if m != nil { + return m.CleanupOnFail + } + return false +} + // RollbackReleaseResponse is the response to an update request. type RollbackReleaseResponse struct { - Release *hapi_release5.Release `protobuf:"bytes,1,opt,name=release" json:"release,omitempty"` + Release *release.Release `protobuf:"bytes,1,opt,name=release,proto3" json:"release,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *RollbackReleaseResponse) Reset() { *m = RollbackReleaseResponse{} } -func (m *RollbackReleaseResponse) String() string { return proto.CompactTextString(m) } -func (*RollbackReleaseResponse) ProtoMessage() {} -func (*RollbackReleaseResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } +func (m *RollbackReleaseResponse) Reset() { *m = RollbackReleaseResponse{} } +func (m *RollbackReleaseResponse) String() string { return proto.CompactTextString(m) } +func (*RollbackReleaseResponse) ProtoMessage() {} +func (*RollbackReleaseResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_tiller_bb72ee4a42494734, []int{10} +} +func (m *RollbackReleaseResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RollbackReleaseResponse.Unmarshal(m, b) +} +func (m *RollbackReleaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RollbackReleaseResponse.Marshal(b, m, deterministic) +} +func (dst *RollbackReleaseResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollbackReleaseResponse.Merge(dst, src) +} +func (m *RollbackReleaseResponse) XXX_Size() int { + return xxx_messageInfo_RollbackReleaseResponse.Size(m) +} +func (m *RollbackReleaseResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RollbackReleaseResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RollbackReleaseResponse proto.InternalMessageInfo -func (m *RollbackReleaseResponse) GetRelease() *hapi_release5.Release { +func (m *RollbackReleaseResponse) GetRelease() *release.Release { if m != nil { return m.Release } @@ -599,46 +839,69 @@ func (m *RollbackReleaseResponse) GetRelease() *hapi_release5.Release { // InstallReleaseRequest is the request for an installation of a chart. type InstallReleaseRequest struct { // Chart is the protobuf representation of a chart. - Chart *hapi_chart3.Chart `protobuf:"bytes,1,opt,name=chart" json:"chart,omitempty"` + Chart *chart.Chart `protobuf:"bytes,1,opt,name=chart,proto3" json:"chart,omitempty"` // Values is a string containing (unparsed) YAML values. - Values *hapi_chart.Config `protobuf:"bytes,2,opt,name=values" json:"values,omitempty"` + Values *chart.Config `protobuf:"bytes,2,opt,name=values,proto3" json:"values,omitempty"` // DryRun, if true, will run through the release logic, but neither create // a release object nor deploy to Kubernetes. The release object returned // in the response will be fake. - DryRun bool `protobuf:"varint,3,opt,name=dry_run,json=dryRun" json:"dry_run,omitempty"` + DryRun bool `protobuf:"varint,3,opt,name=dry_run,json=dryRun,proto3" json:"dry_run,omitempty"` // Name is the candidate release name. This must be unique to the // namespace, otherwise the server will return an error. If it is not // supplied, the server will autogenerate one. - Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` // DisableHooks causes the server to skip running any hooks for the install. - DisableHooks bool `protobuf:"varint,5,opt,name=disable_hooks,json=disableHooks" json:"disable_hooks,omitempty"` + DisableHooks bool `protobuf:"varint,5,opt,name=disable_hooks,json=disableHooks,proto3" json:"disable_hooks,omitempty"` // Namespace is the kubernetes namespace of the release. - Namespace string `protobuf:"bytes,6,opt,name=namespace" json:"namespace,omitempty"` + Namespace string `protobuf:"bytes,6,opt,name=namespace,proto3" json:"namespace,omitempty"` // Reuse_name requests that Tiller re-uses a name, instead of erroring out. - ReuseName bool `protobuf:"varint,7,opt,name=reuse_name,json=reuseName" json:"reuse_name,omitempty"` + ReuseName bool `protobuf:"varint,7,opt,name=reuse_name,json=reuseName,proto3" json:"reuse_name,omitempty"` // timeout specifies the max amount of time any kubernetes client command can run. - Timeout int64 `protobuf:"varint,8,opt,name=timeout" json:"timeout,omitempty"` + Timeout int64 `protobuf:"varint,8,opt,name=timeout,proto3" json:"timeout,omitempty"` // wait, if true, will wait until all Pods, PVCs, and Services are in a ready state // before marking the release as successful. It will wait for as long as timeout - Wait bool `protobuf:"varint,9,opt,name=wait" json:"wait,omitempty"` - DisableCrdHook bool `protobuf:"varint,10,opt,name=disable_crd_hook,json=disableCrdHook" json:"disable_crd_hook,omitempty"` + Wait bool `protobuf:"varint,9,opt,name=wait,proto3" json:"wait,omitempty"` + DisableCrdHook bool `protobuf:"varint,10,opt,name=disable_crd_hook,json=disableCrdHook,proto3" json:"disable_crd_hook,omitempty"` // Description, if set, will set the description for the installed release - Description string `protobuf:"bytes,11,opt,name=description" json:"description,omitempty"` + Description string `protobuf:"bytes,11,opt,name=description,proto3" json:"description,omitempty"` + SubNotes bool `protobuf:"varint,12,opt,name=subNotes,proto3" json:"subNotes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InstallReleaseRequest) Reset() { *m = InstallReleaseRequest{} } +func (m *InstallReleaseRequest) String() string { return proto.CompactTextString(m) } +func (*InstallReleaseRequest) ProtoMessage() {} +func (*InstallReleaseRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_tiller_bb72ee4a42494734, []int{11} +} +func (m *InstallReleaseRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InstallReleaseRequest.Unmarshal(m, b) +} +func (m *InstallReleaseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InstallReleaseRequest.Marshal(b, m, deterministic) +} +func (dst *InstallReleaseRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_InstallReleaseRequest.Merge(dst, src) +} +func (m *InstallReleaseRequest) XXX_Size() int { + return xxx_messageInfo_InstallReleaseRequest.Size(m) +} +func (m *InstallReleaseRequest) XXX_DiscardUnknown() { + xxx_messageInfo_InstallReleaseRequest.DiscardUnknown(m) } -func (m *InstallReleaseRequest) Reset() { *m = InstallReleaseRequest{} } -func (m *InstallReleaseRequest) String() string { return proto.CompactTextString(m) } -func (*InstallReleaseRequest) ProtoMessage() {} -func (*InstallReleaseRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +var xxx_messageInfo_InstallReleaseRequest proto.InternalMessageInfo -func (m *InstallReleaseRequest) GetChart() *hapi_chart3.Chart { +func (m *InstallReleaseRequest) GetChart() *chart.Chart { if m != nil { return m.Chart } return nil } -func (m *InstallReleaseRequest) GetValues() *hapi_chart.Config { +func (m *InstallReleaseRequest) GetValues() *chart.Config { if m != nil { return m.Values } @@ -708,17 +971,46 @@ func (m *InstallReleaseRequest) GetDescription() string { return "" } +func (m *InstallReleaseRequest) GetSubNotes() bool { + if m != nil { + return m.SubNotes + } + return false +} + // InstallReleaseResponse is the response from a release installation. type InstallReleaseResponse struct { - Release *hapi_release5.Release `protobuf:"bytes,1,opt,name=release" json:"release,omitempty"` + Release *release.Release `protobuf:"bytes,1,opt,name=release,proto3" json:"release,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *InstallReleaseResponse) Reset() { *m = InstallReleaseResponse{} } -func (m *InstallReleaseResponse) String() string { return proto.CompactTextString(m) } -func (*InstallReleaseResponse) ProtoMessage() {} -func (*InstallReleaseResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } +func (m *InstallReleaseResponse) Reset() { *m = InstallReleaseResponse{} } +func (m *InstallReleaseResponse) String() string { return proto.CompactTextString(m) } +func (*InstallReleaseResponse) ProtoMessage() {} +func (*InstallReleaseResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_tiller_bb72ee4a42494734, []int{12} +} +func (m *InstallReleaseResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InstallReleaseResponse.Unmarshal(m, b) +} +func (m *InstallReleaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InstallReleaseResponse.Marshal(b, m, deterministic) +} +func (dst *InstallReleaseResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_InstallReleaseResponse.Merge(dst, src) +} +func (m *InstallReleaseResponse) XXX_Size() int { + return xxx_messageInfo_InstallReleaseResponse.Size(m) +} +func (m *InstallReleaseResponse) XXX_DiscardUnknown() { + xxx_messageInfo_InstallReleaseResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_InstallReleaseResponse proto.InternalMessageInfo -func (m *InstallReleaseResponse) GetRelease() *hapi_release5.Release { +func (m *InstallReleaseResponse) GetRelease() *release.Release { if m != nil { return m.Release } @@ -728,21 +1020,43 @@ func (m *InstallReleaseResponse) GetRelease() *hapi_release5.Release { // UninstallReleaseRequest represents a request to uninstall a named release. type UninstallReleaseRequest struct { // Name is the name of the release to delete. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // DisableHooks causes the server to skip running any hooks for the uninstall. - DisableHooks bool `protobuf:"varint,2,opt,name=disable_hooks,json=disableHooks" json:"disable_hooks,omitempty"` + DisableHooks bool `protobuf:"varint,2,opt,name=disable_hooks,json=disableHooks,proto3" json:"disable_hooks,omitempty"` // Purge removes the release from the store and make its name free for later use. - Purge bool `protobuf:"varint,3,opt,name=purge" json:"purge,omitempty"` + Purge bool `protobuf:"varint,3,opt,name=purge,proto3" json:"purge,omitempty"` // timeout specifies the max amount of time any kubernetes client command can run. - Timeout int64 `protobuf:"varint,4,opt,name=timeout" json:"timeout,omitempty"` - // Description, if set, will set the description for the uninnstalled release - Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` + Timeout int64 `protobuf:"varint,4,opt,name=timeout,proto3" json:"timeout,omitempty"` + // Description, if set, will set the description for the uninstalled release + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *UninstallReleaseRequest) Reset() { *m = UninstallReleaseRequest{} } -func (m *UninstallReleaseRequest) String() string { return proto.CompactTextString(m) } -func (*UninstallReleaseRequest) ProtoMessage() {} -func (*UninstallReleaseRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } +func (m *UninstallReleaseRequest) Reset() { *m = UninstallReleaseRequest{} } +func (m *UninstallReleaseRequest) String() string { return proto.CompactTextString(m) } +func (*UninstallReleaseRequest) ProtoMessage() {} +func (*UninstallReleaseRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_tiller_bb72ee4a42494734, []int{13} +} +func (m *UninstallReleaseRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninstallReleaseRequest.Unmarshal(m, b) +} +func (m *UninstallReleaseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninstallReleaseRequest.Marshal(b, m, deterministic) +} +func (dst *UninstallReleaseRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninstallReleaseRequest.Merge(dst, src) +} +func (m *UninstallReleaseRequest) XXX_Size() int { + return xxx_messageInfo_UninstallReleaseRequest.Size(m) +} +func (m *UninstallReleaseRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UninstallReleaseRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UninstallReleaseRequest proto.InternalMessageInfo func (m *UninstallReleaseRequest) GetName() string { if m != nil { @@ -782,17 +1096,39 @@ func (m *UninstallReleaseRequest) GetDescription() string { // UninstallReleaseResponse represents a successful response to an uninstall request. type UninstallReleaseResponse struct { // Release is the release that was marked deleted. - Release *hapi_release5.Release `protobuf:"bytes,1,opt,name=release" json:"release,omitempty"` + Release *release.Release `protobuf:"bytes,1,opt,name=release,proto3" json:"release,omitempty"` // Info is an uninstall message - Info string `protobuf:"bytes,2,opt,name=info" json:"info,omitempty"` + Info string `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninstallReleaseResponse) Reset() { *m = UninstallReleaseResponse{} } +func (m *UninstallReleaseResponse) String() string { return proto.CompactTextString(m) } +func (*UninstallReleaseResponse) ProtoMessage() {} +func (*UninstallReleaseResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_tiller_bb72ee4a42494734, []int{14} +} +func (m *UninstallReleaseResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninstallReleaseResponse.Unmarshal(m, b) +} +func (m *UninstallReleaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninstallReleaseResponse.Marshal(b, m, deterministic) +} +func (dst *UninstallReleaseResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninstallReleaseResponse.Merge(dst, src) +} +func (m *UninstallReleaseResponse) XXX_Size() int { + return xxx_messageInfo_UninstallReleaseResponse.Size(m) +} +func (m *UninstallReleaseResponse) XXX_DiscardUnknown() { + xxx_messageInfo_UninstallReleaseResponse.DiscardUnknown(m) } -func (m *UninstallReleaseResponse) Reset() { *m = UninstallReleaseResponse{} } -func (m *UninstallReleaseResponse) String() string { return proto.CompactTextString(m) } -func (*UninstallReleaseResponse) ProtoMessage() {} -func (*UninstallReleaseResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } +var xxx_messageInfo_UninstallReleaseResponse proto.InternalMessageInfo -func (m *UninstallReleaseResponse) GetRelease() *hapi_release5.Release { +func (m *UninstallReleaseResponse) GetRelease() *release.Release { if m != nil { return m.Release } @@ -808,23 +1144,67 @@ func (m *UninstallReleaseResponse) GetInfo() string { // GetVersionRequest requests for version information. type GetVersionRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetVersionRequest) Reset() { *m = GetVersionRequest{} } +func (m *GetVersionRequest) String() string { return proto.CompactTextString(m) } +func (*GetVersionRequest) ProtoMessage() {} +func (*GetVersionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_tiller_bb72ee4a42494734, []int{15} +} +func (m *GetVersionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetVersionRequest.Unmarshal(m, b) +} +func (m *GetVersionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetVersionRequest.Marshal(b, m, deterministic) +} +func (dst *GetVersionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetVersionRequest.Merge(dst, src) +} +func (m *GetVersionRequest) XXX_Size() int { + return xxx_messageInfo_GetVersionRequest.Size(m) +} +func (m *GetVersionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetVersionRequest.DiscardUnknown(m) } -func (m *GetVersionRequest) Reset() { *m = GetVersionRequest{} } -func (m *GetVersionRequest) String() string { return proto.CompactTextString(m) } -func (*GetVersionRequest) ProtoMessage() {} -func (*GetVersionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +var xxx_messageInfo_GetVersionRequest proto.InternalMessageInfo type GetVersionResponse struct { - Version *hapi_version.Version `protobuf:"bytes,1,opt,name=Version" json:"Version,omitempty"` + Version *version.Version `protobuf:"bytes,1,opt,name=Version,proto3" json:"Version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetVersionResponse) Reset() { *m = GetVersionResponse{} } +func (m *GetVersionResponse) String() string { return proto.CompactTextString(m) } +func (*GetVersionResponse) ProtoMessage() {} +func (*GetVersionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_tiller_bb72ee4a42494734, []int{16} +} +func (m *GetVersionResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetVersionResponse.Unmarshal(m, b) +} +func (m *GetVersionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetVersionResponse.Marshal(b, m, deterministic) +} +func (dst *GetVersionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetVersionResponse.Merge(dst, src) +} +func (m *GetVersionResponse) XXX_Size() int { + return xxx_messageInfo_GetVersionResponse.Size(m) +} +func (m *GetVersionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetVersionResponse.DiscardUnknown(m) } -func (m *GetVersionResponse) Reset() { *m = GetVersionResponse{} } -func (m *GetVersionResponse) String() string { return proto.CompactTextString(m) } -func (*GetVersionResponse) ProtoMessage() {} -func (*GetVersionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } +var xxx_messageInfo_GetVersionResponse proto.InternalMessageInfo -func (m *GetVersionResponse) GetVersion() *hapi_version.Version { +func (m *GetVersionResponse) GetVersion() *version.Version { if m != nil { return m.Version } @@ -834,15 +1214,37 @@ func (m *GetVersionResponse) GetVersion() *hapi_version.Version { // GetHistoryRequest requests a release's history. type GetHistoryRequest struct { // The name of the release. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // The maximum number of releases to include. - Max int32 `protobuf:"varint,2,opt,name=max" json:"max,omitempty"` + Max int32 `protobuf:"varint,2,opt,name=max,proto3" json:"max,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *GetHistoryRequest) Reset() { *m = GetHistoryRequest{} } -func (m *GetHistoryRequest) String() string { return proto.CompactTextString(m) } -func (*GetHistoryRequest) ProtoMessage() {} -func (*GetHistoryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } +func (m *GetHistoryRequest) Reset() { *m = GetHistoryRequest{} } +func (m *GetHistoryRequest) String() string { return proto.CompactTextString(m) } +func (*GetHistoryRequest) ProtoMessage() {} +func (*GetHistoryRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_tiller_bb72ee4a42494734, []int{17} +} +func (m *GetHistoryRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetHistoryRequest.Unmarshal(m, b) +} +func (m *GetHistoryRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetHistoryRequest.Marshal(b, m, deterministic) +} +func (dst *GetHistoryRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetHistoryRequest.Merge(dst, src) +} +func (m *GetHistoryRequest) XXX_Size() int { + return xxx_messageInfo_GetHistoryRequest.Size(m) +} +func (m *GetHistoryRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetHistoryRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetHistoryRequest proto.InternalMessageInfo func (m *GetHistoryRequest) GetName() string { if m != nil { @@ -860,15 +1262,37 @@ func (m *GetHistoryRequest) GetMax() int32 { // GetHistoryResponse is received in response to a GetHistory rpc. type GetHistoryResponse struct { - Releases []*hapi_release5.Release `protobuf:"bytes,1,rep,name=releases" json:"releases,omitempty"` + Releases []*release.Release `protobuf:"bytes,1,rep,name=releases,proto3" json:"releases,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *GetHistoryResponse) Reset() { *m = GetHistoryResponse{} } -func (m *GetHistoryResponse) String() string { return proto.CompactTextString(m) } -func (*GetHistoryResponse) ProtoMessage() {} -func (*GetHistoryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } +func (m *GetHistoryResponse) Reset() { *m = GetHistoryResponse{} } +func (m *GetHistoryResponse) String() string { return proto.CompactTextString(m) } +func (*GetHistoryResponse) ProtoMessage() {} +func (*GetHistoryResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_tiller_bb72ee4a42494734, []int{18} +} +func (m *GetHistoryResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetHistoryResponse.Unmarshal(m, b) +} +func (m *GetHistoryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetHistoryResponse.Marshal(b, m, deterministic) +} +func (dst *GetHistoryResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetHistoryResponse.Merge(dst, src) +} +func (m *GetHistoryResponse) XXX_Size() int { + return xxx_messageInfo_GetHistoryResponse.Size(m) +} +func (m *GetHistoryResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetHistoryResponse.DiscardUnknown(m) +} -func (m *GetHistoryResponse) GetReleases() []*hapi_release5.Release { +var xxx_messageInfo_GetHistoryResponse proto.InternalMessageInfo + +func (m *GetHistoryResponse) GetReleases() []*release.Release { if m != nil { return m.Releases } @@ -878,19 +1302,41 @@ func (m *GetHistoryResponse) GetReleases() []*hapi_release5.Release { // TestReleaseRequest is a request to get the status of a release. type TestReleaseRequest struct { // Name is the name of the release - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // timeout specifies the max amount of time any kubernetes client command can run. - Timeout int64 `protobuf:"varint,2,opt,name=timeout" json:"timeout,omitempty"` + Timeout int64 `protobuf:"varint,2,opt,name=timeout,proto3" json:"timeout,omitempty"` // cleanup specifies whether or not to attempt pod deletion after test completes - Cleanup bool `protobuf:"varint,3,opt,name=cleanup" json:"cleanup,omitempty"` + Cleanup bool `protobuf:"varint,3,opt,name=cleanup,proto3" json:"cleanup,omitempty"` // parallel specifies whether or not to run test pods in parallel - Parallel bool `protobuf:"varint,4,opt,name=parallel" json:"parallel,omitempty"` + Parallel bool `protobuf:"varint,4,opt,name=parallel,proto3" json:"parallel,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TestReleaseRequest) Reset() { *m = TestReleaseRequest{} } +func (m *TestReleaseRequest) String() string { return proto.CompactTextString(m) } +func (*TestReleaseRequest) ProtoMessage() {} +func (*TestReleaseRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_tiller_bb72ee4a42494734, []int{19} +} +func (m *TestReleaseRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TestReleaseRequest.Unmarshal(m, b) +} +func (m *TestReleaseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TestReleaseRequest.Marshal(b, m, deterministic) +} +func (dst *TestReleaseRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TestReleaseRequest.Merge(dst, src) +} +func (m *TestReleaseRequest) XXX_Size() int { + return xxx_messageInfo_TestReleaseRequest.Size(m) +} +func (m *TestReleaseRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TestReleaseRequest.DiscardUnknown(m) } -func (m *TestReleaseRequest) Reset() { *m = TestReleaseRequest{} } -func (m *TestReleaseRequest) String() string { return proto.CompactTextString(m) } -func (*TestReleaseRequest) ProtoMessage() {} -func (*TestReleaseRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } +var xxx_messageInfo_TestReleaseRequest proto.InternalMessageInfo func (m *TestReleaseRequest) GetName() string { if m != nil { @@ -922,14 +1368,36 @@ func (m *TestReleaseRequest) GetParallel() bool { // TestReleaseResponse represents a message from executing a test type TestReleaseResponse struct { - Msg string `protobuf:"bytes,1,opt,name=msg" json:"msg,omitempty"` - Status hapi_release1.TestRun_Status `protobuf:"varint,2,opt,name=status,enum=hapi.release.TestRun_Status" json:"status,omitempty"` + Msg string `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg,omitempty"` + Status release.TestRun_Status `protobuf:"varint,2,opt,name=status,proto3,enum=hapi.release.TestRun_Status" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *TestReleaseResponse) Reset() { *m = TestReleaseResponse{} } -func (m *TestReleaseResponse) String() string { return proto.CompactTextString(m) } -func (*TestReleaseResponse) ProtoMessage() {} -func (*TestReleaseResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } +func (m *TestReleaseResponse) Reset() { *m = TestReleaseResponse{} } +func (m *TestReleaseResponse) String() string { return proto.CompactTextString(m) } +func (*TestReleaseResponse) ProtoMessage() {} +func (*TestReleaseResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_tiller_bb72ee4a42494734, []int{20} +} +func (m *TestReleaseResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TestReleaseResponse.Unmarshal(m, b) +} +func (m *TestReleaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TestReleaseResponse.Marshal(b, m, deterministic) +} +func (dst *TestReleaseResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TestReleaseResponse.Merge(dst, src) +} +func (m *TestReleaseResponse) XXX_Size() int { + return xxx_messageInfo_TestReleaseResponse.Size(m) +} +func (m *TestReleaseResponse) XXX_DiscardUnknown() { + xxx_messageInfo_TestReleaseResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_TestReleaseResponse proto.InternalMessageInfo func (m *TestReleaseResponse) GetMsg() string { if m != nil { @@ -938,11 +1406,11 @@ func (m *TestReleaseResponse) GetMsg() string { return "" } -func (m *TestReleaseResponse) GetStatus() hapi_release1.TestRun_Status { +func (m *TestReleaseResponse) GetStatus() release.TestRun_Status { if m != nil { return m.Status } - return hapi_release1.TestRun_UNKNOWN + return release.TestRun_UNKNOWN } func init() { @@ -979,8 +1447,9 @@ var _ grpc.ClientConn // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 -// Client API for ReleaseService service - +// ReleaseServiceClient is the client API for ReleaseService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type ReleaseServiceClient interface { // ListReleases retrieves release history. // TODO: Allow filtering the set of releases by @@ -1001,7 +1470,7 @@ type ReleaseServiceClient interface { GetVersion(ctx context.Context, in *GetVersionRequest, opts ...grpc.CallOption) (*GetVersionResponse, error) // RollbackRelease rolls back a release to a previous version. RollbackRelease(ctx context.Context, in *RollbackReleaseRequest, opts ...grpc.CallOption) (*RollbackReleaseResponse, error) - // ReleaseHistory retrieves a releasse's history. + // ReleaseHistory retrieves a release's history. GetHistory(ctx context.Context, in *GetHistoryRequest, opts ...grpc.CallOption) (*GetHistoryResponse, error) // RunReleaseTest executes the tests defined of a named release RunReleaseTest(ctx context.Context, in *TestReleaseRequest, opts ...grpc.CallOption) (ReleaseService_RunReleaseTestClient, error) @@ -1016,7 +1485,7 @@ func NewReleaseServiceClient(cc *grpc.ClientConn) ReleaseServiceClient { } func (c *releaseServiceClient) ListReleases(ctx context.Context, in *ListReleasesRequest, opts ...grpc.CallOption) (ReleaseService_ListReleasesClient, error) { - stream, err := grpc.NewClientStream(ctx, &_ReleaseService_serviceDesc.Streams[0], c.cc, "/hapi.services.tiller.ReleaseService/ListReleases", opts...) + stream, err := c.cc.NewStream(ctx, &_ReleaseService_serviceDesc.Streams[0], "/hapi.services.tiller.ReleaseService/ListReleases", opts...) if err != nil { return nil, err } @@ -1049,7 +1518,7 @@ func (x *releaseServiceListReleasesClient) Recv() (*ListReleasesResponse, error) func (c *releaseServiceClient) GetReleaseStatus(ctx context.Context, in *GetReleaseStatusRequest, opts ...grpc.CallOption) (*GetReleaseStatusResponse, error) { out := new(GetReleaseStatusResponse) - err := grpc.Invoke(ctx, "/hapi.services.tiller.ReleaseService/GetReleaseStatus", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/hapi.services.tiller.ReleaseService/GetReleaseStatus", in, out, opts...) if err != nil { return nil, err } @@ -1058,7 +1527,7 @@ func (c *releaseServiceClient) GetReleaseStatus(ctx context.Context, in *GetRele func (c *releaseServiceClient) GetReleaseContent(ctx context.Context, in *GetReleaseContentRequest, opts ...grpc.CallOption) (*GetReleaseContentResponse, error) { out := new(GetReleaseContentResponse) - err := grpc.Invoke(ctx, "/hapi.services.tiller.ReleaseService/GetReleaseContent", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/hapi.services.tiller.ReleaseService/GetReleaseContent", in, out, opts...) if err != nil { return nil, err } @@ -1067,7 +1536,7 @@ func (c *releaseServiceClient) GetReleaseContent(ctx context.Context, in *GetRel func (c *releaseServiceClient) UpdateRelease(ctx context.Context, in *UpdateReleaseRequest, opts ...grpc.CallOption) (*UpdateReleaseResponse, error) { out := new(UpdateReleaseResponse) - err := grpc.Invoke(ctx, "/hapi.services.tiller.ReleaseService/UpdateRelease", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/hapi.services.tiller.ReleaseService/UpdateRelease", in, out, opts...) if err != nil { return nil, err } @@ -1076,7 +1545,7 @@ func (c *releaseServiceClient) UpdateRelease(ctx context.Context, in *UpdateRele func (c *releaseServiceClient) InstallRelease(ctx context.Context, in *InstallReleaseRequest, opts ...grpc.CallOption) (*InstallReleaseResponse, error) { out := new(InstallReleaseResponse) - err := grpc.Invoke(ctx, "/hapi.services.tiller.ReleaseService/InstallRelease", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/hapi.services.tiller.ReleaseService/InstallRelease", in, out, opts...) if err != nil { return nil, err } @@ -1085,7 +1554,7 @@ func (c *releaseServiceClient) InstallRelease(ctx context.Context, in *InstallRe func (c *releaseServiceClient) UninstallRelease(ctx context.Context, in *UninstallReleaseRequest, opts ...grpc.CallOption) (*UninstallReleaseResponse, error) { out := new(UninstallReleaseResponse) - err := grpc.Invoke(ctx, "/hapi.services.tiller.ReleaseService/UninstallRelease", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/hapi.services.tiller.ReleaseService/UninstallRelease", in, out, opts...) if err != nil { return nil, err } @@ -1094,7 +1563,7 @@ func (c *releaseServiceClient) UninstallRelease(ctx context.Context, in *Uninsta func (c *releaseServiceClient) GetVersion(ctx context.Context, in *GetVersionRequest, opts ...grpc.CallOption) (*GetVersionResponse, error) { out := new(GetVersionResponse) - err := grpc.Invoke(ctx, "/hapi.services.tiller.ReleaseService/GetVersion", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/hapi.services.tiller.ReleaseService/GetVersion", in, out, opts...) if err != nil { return nil, err } @@ -1103,7 +1572,7 @@ func (c *releaseServiceClient) GetVersion(ctx context.Context, in *GetVersionReq func (c *releaseServiceClient) RollbackRelease(ctx context.Context, in *RollbackReleaseRequest, opts ...grpc.CallOption) (*RollbackReleaseResponse, error) { out := new(RollbackReleaseResponse) - err := grpc.Invoke(ctx, "/hapi.services.tiller.ReleaseService/RollbackRelease", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/hapi.services.tiller.ReleaseService/RollbackRelease", in, out, opts...) if err != nil { return nil, err } @@ -1112,7 +1581,7 @@ func (c *releaseServiceClient) RollbackRelease(ctx context.Context, in *Rollback func (c *releaseServiceClient) GetHistory(ctx context.Context, in *GetHistoryRequest, opts ...grpc.CallOption) (*GetHistoryResponse, error) { out := new(GetHistoryResponse) - err := grpc.Invoke(ctx, "/hapi.services.tiller.ReleaseService/GetHistory", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/hapi.services.tiller.ReleaseService/GetHistory", in, out, opts...) if err != nil { return nil, err } @@ -1120,7 +1589,7 @@ func (c *releaseServiceClient) GetHistory(ctx context.Context, in *GetHistoryReq } func (c *releaseServiceClient) RunReleaseTest(ctx context.Context, in *TestReleaseRequest, opts ...grpc.CallOption) (ReleaseService_RunReleaseTestClient, error) { - stream, err := grpc.NewClientStream(ctx, &_ReleaseService_serviceDesc.Streams[1], c.cc, "/hapi.services.tiller.ReleaseService/RunReleaseTest", opts...) + stream, err := c.cc.NewStream(ctx, &_ReleaseService_serviceDesc.Streams[1], "/hapi.services.tiller.ReleaseService/RunReleaseTest", opts...) if err != nil { return nil, err } @@ -1151,8 +1620,7 @@ func (x *releaseServiceRunReleaseTestClient) Recv() (*TestReleaseResponse, error return m, nil } -// Server API for ReleaseService service - +// ReleaseServiceServer is the server API for ReleaseService service. type ReleaseServiceServer interface { // ListReleases retrieves release history. // TODO: Allow filtering the set of releases by @@ -1173,7 +1641,7 @@ type ReleaseServiceServer interface { GetVersion(context.Context, *GetVersionRequest) (*GetVersionResponse, error) // RollbackRelease rolls back a release to a previous version. RollbackRelease(context.Context, *RollbackReleaseRequest) (*RollbackReleaseResponse, error) - // ReleaseHistory retrieves a releasse's history. + // ReleaseHistory retrieves a release's history. GetHistory(context.Context, *GetHistoryRequest) (*GetHistoryResponse, error) // RunReleaseTest executes the tests defined of a named release RunReleaseTest(*TestReleaseRequest, ReleaseService_RunReleaseTestServer) error @@ -1421,89 +1889,92 @@ var _ReleaseService_serviceDesc = grpc.ServiceDesc{ Metadata: "hapi/services/tiller.proto", } -func init() { proto.RegisterFile("hapi/services/tiller.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 1289 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x57, 0xed, 0x72, 0xdb, 0x44, - 0x17, 0x8e, 0x2d, 0x7f, 0x1e, 0xa7, 0x7e, 0xdd, 0x6d, 0x9a, 0xa8, 0x7a, 0x0b, 0x63, 0xc4, 0x40, - 0xdd, 0x42, 0x1d, 0x30, 0xfc, 0x61, 0x86, 0x61, 0x26, 0x75, 0x3d, 0x49, 0x21, 0xa4, 0x33, 0x72, - 0x5b, 0x66, 0x98, 0x61, 0x3c, 0x8a, 0xbd, 0x6e, 0x45, 0x65, 0xc9, 0x68, 0x57, 0xa1, 0xb9, 0x00, - 0x98, 0xe1, 0x3e, 0xb8, 0x10, 0xee, 0x83, 0xeb, 0xe0, 0x3f, 0xb3, 0x5f, 0x8a, 0x56, 0x96, 0x1c, - 0x91, 0x3f, 0xb1, 0x76, 0xcf, 0xd9, 0xf3, 0xf1, 0x3c, 0x7b, 0xce, 0x9e, 0x80, 0xf5, 0xc6, 0x5d, - 0x7b, 0x87, 0x04, 0x47, 0x17, 0xde, 0x1c, 0x93, 0x43, 0xea, 0xf9, 0x3e, 0x8e, 0x86, 0xeb, 0x28, - 0xa4, 0x21, 0xda, 0x63, 0xb2, 0xa1, 0x92, 0x0d, 0x85, 0xcc, 0xda, 0xe7, 0x27, 0xe6, 0x6f, 0xdc, - 0x88, 0x8a, 0xbf, 0x42, 0xdb, 0x3a, 0x48, 0xef, 0x87, 0xc1, 0xd2, 0x7b, 0x2d, 0x05, 0xc2, 0x45, - 0x84, 0x7d, 0xec, 0x12, 0xac, 0x7e, 0xb5, 0x43, 0x4a, 0xe6, 0x05, 0xcb, 0x50, 0x0a, 0xfe, 0xaf, - 0x09, 0x28, 0x26, 0x74, 0x16, 0xc5, 0x81, 0x14, 0xde, 0xd3, 0x84, 0x84, 0xba, 0x34, 0x26, 0x9a, - 0xb3, 0x0b, 0x1c, 0x11, 0x2f, 0x0c, 0xd4, 0xaf, 0x90, 0xd9, 0x7f, 0x55, 0xe1, 0xce, 0xa9, 0x47, - 0xa8, 0x23, 0x0e, 0x12, 0x07, 0xff, 0x12, 0x63, 0x42, 0xd1, 0x1e, 0xd4, 0x7d, 0x6f, 0xe5, 0x51, - 0xb3, 0xd2, 0xaf, 0x0c, 0x0c, 0x47, 0x2c, 0xd0, 0x3e, 0x34, 0xc2, 0xe5, 0x92, 0x60, 0x6a, 0x56, - 0xfb, 0x95, 0x41, 0xdb, 0x91, 0x2b, 0xf4, 0x0d, 0x34, 0x49, 0x18, 0xd1, 0xd9, 0xf9, 0xa5, 0x69, - 0xf4, 0x2b, 0x83, 0xee, 0xe8, 0xa3, 0x61, 0x1e, 0x4e, 0x43, 0xe6, 0x69, 0x1a, 0x46, 0x74, 0xc8, - 0xfe, 0x3c, 0xb9, 0x74, 0x1a, 0x84, 0xff, 0x32, 0xbb, 0x4b, 0xcf, 0xa7, 0x38, 0x32, 0x6b, 0xc2, - 0xae, 0x58, 0xa1, 0x63, 0x00, 0x6e, 0x37, 0x8c, 0x16, 0x38, 0x32, 0xeb, 0xdc, 0xf4, 0xa0, 0x84, - 0xe9, 0xe7, 0x4c, 0xdf, 0x69, 0x13, 0xf5, 0x89, 0xbe, 0x86, 0x5d, 0x01, 0xc9, 0x6c, 0x1e, 0x2e, - 0x30, 0x31, 0x1b, 0x7d, 0x63, 0xd0, 0x1d, 0xdd, 0x13, 0xa6, 0x14, 0xfc, 0x53, 0x01, 0xda, 0x38, - 0x5c, 0x60, 0xa7, 0x23, 0xd4, 0xd9, 0x37, 0x41, 0xf7, 0xa1, 0x1d, 0xb8, 0x2b, 0x4c, 0xd6, 0xee, - 0x1c, 0x9b, 0x4d, 0x1e, 0xe1, 0xd5, 0x86, 0x1d, 0x40, 0x4b, 0x39, 0xb7, 0x9f, 0x40, 0x43, 0xa4, - 0x86, 0x3a, 0xd0, 0x7c, 0x79, 0xf6, 0xdd, 0xd9, 0xf3, 0x1f, 0xce, 0x7a, 0x3b, 0xa8, 0x05, 0xb5, - 0xb3, 0xa3, 0xef, 0x27, 0xbd, 0x0a, 0xba, 0x0d, 0xb7, 0x4e, 0x8f, 0xa6, 0x2f, 0x66, 0xce, 0xe4, - 0x74, 0x72, 0x34, 0x9d, 0x3c, 0xed, 0x55, 0x51, 0x17, 0x60, 0x7c, 0x72, 0xe4, 0xbc, 0x98, 0x71, - 0x15, 0xc3, 0x7e, 0x1f, 0xda, 0x49, 0x0e, 0xa8, 0x09, 0xc6, 0xd1, 0x74, 0x2c, 0x4c, 0x3c, 0x9d, - 0x4c, 0xc7, 0xbd, 0x8a, 0xfd, 0x47, 0x05, 0xf6, 0x74, 0xca, 0xc8, 0x3a, 0x0c, 0x08, 0x66, 0x9c, - 0xcd, 0xc3, 0x38, 0x48, 0x38, 0xe3, 0x0b, 0x84, 0xa0, 0x16, 0xe0, 0x77, 0x8a, 0x31, 0xfe, 0xcd, - 0x34, 0x69, 0x48, 0x5d, 0x9f, 0xb3, 0x65, 0x38, 0x62, 0x81, 0x3e, 0x87, 0x96, 0x84, 0x82, 0x98, - 0xb5, 0xbe, 0x31, 0xe8, 0x8c, 0xee, 0xea, 0x00, 0x49, 0x8f, 0x4e, 0xa2, 0x66, 0x1f, 0xc3, 0xc1, - 0x31, 0x56, 0x91, 0x08, 0xfc, 0xd4, 0x0d, 0x62, 0x7e, 0xdd, 0x15, 0xe6, 0xc1, 0x30, 0xbf, 0xee, - 0x0a, 0x23, 0x13, 0x9a, 0xf2, 0xfa, 0xf1, 0x70, 0xea, 0x8e, 0x5a, 0xda, 0x14, 0xcc, 0x4d, 0x43, - 0x32, 0xaf, 0x3c, 0x4b, 0x1f, 0x43, 0x8d, 0x55, 0x06, 0x37, 0xd3, 0x19, 0x21, 0x3d, 0xce, 0x67, - 0xc1, 0x32, 0x74, 0xb8, 0x5c, 0xa7, 0xce, 0xc8, 0x52, 0x77, 0x92, 0xf6, 0x3a, 0x0e, 0x03, 0x8a, - 0x03, 0x7a, 0xb3, 0xf8, 0x4f, 0xe1, 0x5e, 0x8e, 0x25, 0x99, 0xc0, 0x21, 0x34, 0x65, 0x68, 0xdc, - 0x5a, 0x21, 0xae, 0x4a, 0xcb, 0xfe, 0xcd, 0x80, 0xbd, 0x97, 0xeb, 0x85, 0x4b, 0xb1, 0x12, 0x6d, - 0x09, 0xea, 0x01, 0xd4, 0x79, 0x87, 0x91, 0x58, 0xdc, 0x16, 0xb6, 0x45, 0x1b, 0x1a, 0xb3, 0xbf, - 0x8e, 0x90, 0xa3, 0x47, 0xd0, 0xb8, 0x70, 0xfd, 0x18, 0x13, 0x0e, 0x44, 0x82, 0x9a, 0xd4, 0xe4, - 0xed, 0xc9, 0x91, 0x1a, 0xe8, 0x00, 0x9a, 0x8b, 0xe8, 0x92, 0xf5, 0x17, 0x5e, 0x92, 0x2d, 0xa7, - 0xb1, 0x88, 0x2e, 0x9d, 0x38, 0x40, 0x1f, 0xc2, 0xad, 0x85, 0x47, 0xdc, 0x73, 0x1f, 0xcf, 0xde, - 0x84, 0xe1, 0x5b, 0xc2, 0xab, 0xb2, 0xe5, 0xec, 0xca, 0xcd, 0x13, 0xb6, 0x87, 0x2c, 0x76, 0x93, - 0xe6, 0x11, 0x76, 0x29, 0x36, 0x1b, 0x5c, 0x9e, 0xac, 0x19, 0x86, 0xd4, 0x5b, 0xe1, 0x30, 0xa6, - 0xbc, 0x94, 0x0c, 0x47, 0x2d, 0xd1, 0x07, 0xb0, 0x1b, 0x61, 0x82, 0xe9, 0x4c, 0x46, 0xd9, 0xe2, - 0x27, 0x3b, 0x7c, 0xef, 0x95, 0x08, 0x0b, 0x41, 0xed, 0x57, 0xd7, 0xa3, 0x66, 0x9b, 0x8b, 0xf8, - 0xb7, 0x38, 0x16, 0x13, 0xac, 0x8e, 0x81, 0x3a, 0x16, 0x13, 0x2c, 0x8f, 0xed, 0x41, 0x7d, 0x19, - 0x46, 0x73, 0x6c, 0x76, 0xb8, 0x4c, 0x2c, 0x50, 0x1f, 0x3a, 0x0b, 0x4c, 0xe6, 0x91, 0xb7, 0xa6, - 0x8c, 0xd1, 0x5d, 0x8e, 0x69, 0x7a, 0xcb, 0x3e, 0x81, 0xbb, 0x19, 0x1a, 0x6e, 0xca, 0xe8, 0xef, - 0x55, 0xd8, 0x77, 0x42, 0xdf, 0x3f, 0x77, 0xe7, 0x6f, 0x4b, 0x70, 0x9a, 0x82, 0xbf, 0xba, 0x1d, - 0x7e, 0x23, 0x07, 0xfe, 0xd4, 0x35, 0xad, 0x69, 0xd7, 0x54, 0x23, 0xa6, 0x5e, 0x4c, 0x4c, 0x43, - 0x27, 0x46, 0xa1, 0xde, 0x4c, 0xa1, 0x9e, 0x40, 0xda, 0xda, 0x02, 0x69, 0x7b, 0x13, 0xd2, 0x6f, - 0xe1, 0x60, 0x03, 0x87, 0x9b, 0x82, 0xfa, 0x4f, 0x15, 0xee, 0x3e, 0x0b, 0x08, 0x75, 0x7d, 0x3f, - 0x83, 0x69, 0x52, 0x13, 0x95, 0xd2, 0x35, 0x51, 0xfd, 0x2f, 0x35, 0x61, 0x68, 0xa4, 0x28, 0x06, - 0x6b, 0x29, 0x06, 0x4b, 0xd5, 0x89, 0xd6, 0x9d, 0x1a, 0x99, 0xee, 0x84, 0xde, 0x03, 0x10, 0x17, - 0x9b, 0x1b, 0x17, 0xe0, 0xb7, 0xf9, 0xce, 0x99, 0x6c, 0x46, 0x8a, 0xaf, 0x56, 0x3e, 0x5f, 0xe9, - 0x2a, 0x19, 0x40, 0x4f, 0xc5, 0x33, 0x8f, 0x16, 0x3c, 0x26, 0x59, 0x29, 0x5d, 0xb9, 0x3f, 0x8e, - 0x16, 0x2c, 0xaa, 0x2c, 0x87, 0x9d, 0x4d, 0x0e, 0x9f, 0xc1, 0x7e, 0x16, 0xf6, 0x9b, 0x52, 0xf8, - 0x67, 0x05, 0x0e, 0x5e, 0x06, 0x5e, 0x2e, 0x89, 0x79, 0x85, 0xb1, 0x01, 0x6b, 0x35, 0x07, 0xd6, - 0x3d, 0xa8, 0xaf, 0xe3, 0xe8, 0x35, 0x96, 0x34, 0x89, 0x45, 0x1a, 0xaf, 0x9a, 0x8e, 0x57, 0x26, - 0xe3, 0xfa, 0x66, 0xc6, 0x33, 0x30, 0x37, 0xa3, 0xbc, 0x61, 0xce, 0x2c, 0xaf, 0xe4, 0xed, 0x6a, - 0x8b, 0x77, 0xca, 0xbe, 0x03, 0xb7, 0x8f, 0x31, 0x7d, 0x25, 0xca, 0x54, 0x02, 0x60, 0x4f, 0x00, - 0xa5, 0x37, 0xaf, 0xfc, 0xc9, 0x2d, 0xdd, 0x9f, 0x1a, 0xec, 0x94, 0xbe, 0xd2, 0xb2, 0xbf, 0xe2, - 0xb6, 0x4f, 0x3c, 0x42, 0xc3, 0xe8, 0x72, 0x1b, 0xb8, 0x3d, 0x30, 0x56, 0xee, 0x3b, 0xf9, 0xb4, - 0xb1, 0x4f, 0xfb, 0x98, 0x47, 0x90, 0x1c, 0x95, 0x11, 0xa4, 0x07, 0x85, 0x4a, 0xb9, 0x41, 0xe1, - 0x1d, 0xa0, 0x17, 0x38, 0x99, 0x59, 0xae, 0x79, 0x63, 0x15, 0x4d, 0x55, 0x9d, 0x26, 0x13, 0x9a, - 0x73, 0x1f, 0xbb, 0x41, 0xbc, 0x96, 0xc4, 0xaa, 0x25, 0x6b, 0x6b, 0x6b, 0x37, 0x72, 0x7d, 0x1f, - 0xfb, 0xf2, 0xb9, 0x4a, 0xd6, 0xf6, 0x4f, 0x70, 0x47, 0xf3, 0x2c, 0x73, 0x60, 0xb9, 0x92, 0xd7, - 0xd2, 0x33, 0xfb, 0x44, 0x5f, 0x42, 0x43, 0x0c, 0x7d, 0xdc, 0x6f, 0x77, 0x74, 0x5f, 0xcf, 0x89, - 0x1b, 0x89, 0x03, 0x39, 0x25, 0x3a, 0x52, 0x77, 0xf4, 0x77, 0x0b, 0xba, 0x6a, 0x6c, 0x11, 0x23, - 0x29, 0xf2, 0x60, 0x37, 0x3d, 0x9f, 0xa1, 0x87, 0xc5, 0x13, 0x6b, 0x66, 0xec, 0xb6, 0x1e, 0x95, - 0x51, 0x15, 0x19, 0xd8, 0x3b, 0x9f, 0x55, 0x10, 0x81, 0x5e, 0x76, 0x6c, 0x42, 0x8f, 0xf3, 0x6d, - 0x14, 0xcc, 0x69, 0xd6, 0xb0, 0xac, 0xba, 0x72, 0x8b, 0x2e, 0xf8, 0x7d, 0xd2, 0x67, 0x1d, 0x74, - 0xad, 0x19, 0x7d, 0xbc, 0xb2, 0x0e, 0x4b, 0xeb, 0x27, 0x7e, 0x7f, 0x86, 0x5b, 0xda, 0x6b, 0x8c, - 0x0a, 0xd0, 0xca, 0x9b, 0x9c, 0xac, 0x4f, 0x4a, 0xe9, 0x26, 0xbe, 0x56, 0xd0, 0xd5, 0x5b, 0x1c, - 0x2a, 0x30, 0x90, 0xfb, 0xfe, 0x58, 0x9f, 0x96, 0x53, 0x4e, 0xdc, 0x11, 0xe8, 0x65, 0xfb, 0x4b, - 0x11, 0x8f, 0x05, 0xdd, 0xb2, 0x88, 0xc7, 0xa2, 0xb6, 0x65, 0xef, 0x20, 0x17, 0xe0, 0xaa, 0xbd, - 0xa0, 0x07, 0x85, 0x84, 0xe8, 0x5d, 0xc9, 0x1a, 0x5c, 0xaf, 0x98, 0xb8, 0x58, 0xc3, 0xff, 0x32, - 0xaf, 0x3d, 0x2a, 0x80, 0x26, 0x7f, 0x38, 0xb2, 0x1e, 0x97, 0xd4, 0xce, 0x24, 0x25, 0x3b, 0xd6, - 0x96, 0xa4, 0xf4, 0x76, 0xb8, 0x25, 0xa9, 0x4c, 0xf3, 0xb3, 0x77, 0x90, 0x07, 0x5d, 0x27, 0x0e, - 0xa4, 0x6b, 0xd6, 0x16, 0x50, 0xc1, 0xe9, 0xcd, 0x8e, 0x67, 0x3d, 0x2c, 0xa1, 0x79, 0x55, 0xdf, - 0x4f, 0xe0, 0xc7, 0x96, 0x52, 0x3d, 0x6f, 0xf0, 0xff, 0xd8, 0xbf, 0xf8, 0x37, 0x00, 0x00, 0xff, - 0xff, 0xb6, 0x48, 0x98, 0x76, 0x9f, 0x10, 0x00, 0x00, +func init() { proto.RegisterFile("hapi/services/tiller.proto", fileDescriptor_tiller_bb72ee4a42494734) } + +var fileDescriptor_tiller_bb72ee4a42494734 = []byte{ + // 1337 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x58, 0xdd, 0x72, 0xdb, 0x44, + 0x14, 0x8e, 0x2d, 0xff, 0x1e, 0x27, 0xae, 0xbb, 0x4d, 0x13, 0xd5, 0x14, 0x26, 0x88, 0xa1, 0x75, + 0x0b, 0x75, 0x20, 0x70, 0xc3, 0x0c, 0xc3, 0x4c, 0xea, 0x86, 0xa4, 0x10, 0xd2, 0x19, 0xb9, 0x2d, + 0x33, 0xcc, 0x30, 0x1e, 0xc5, 0x5e, 0xb7, 0xa2, 0xb2, 0xd6, 0x68, 0x57, 0xa1, 0x79, 0x04, 0x2e, + 0x79, 0x07, 0xae, 0x79, 0x06, 0x6e, 0x79, 0x06, 0x5e, 0x86, 0xd9, 0x3f, 0x45, 0x2b, 0x4b, 0xae, + 0xc8, 0x4d, 0xac, 0xdd, 0x73, 0xf6, 0xfc, 0x7c, 0xdf, 0x9e, 0xb3, 0x67, 0x02, 0xfd, 0xd7, 0xde, + 0xd2, 0xdf, 0xa7, 0x38, 0xba, 0xf0, 0xa7, 0x98, 0xee, 0x33, 0x3f, 0x08, 0x70, 0x34, 0x5c, 0x46, + 0x84, 0x11, 0xb4, 0xcd, 0x65, 0x43, 0x2d, 0x1b, 0x4a, 0x59, 0x7f, 0x47, 0x9c, 0x98, 0xbe, 0xf6, + 0x22, 0x26, 0xff, 0x4a, 0xed, 0xfe, 0x6e, 0x7a, 0x9f, 0x84, 0x73, 0xff, 0x95, 0x12, 0x48, 0x17, + 0x11, 0x0e, 0xb0, 0x47, 0xb1, 0xfe, 0x35, 0x0e, 0x69, 0x99, 0x1f, 0xce, 0x89, 0x12, 0xbc, 0x67, + 0x08, 0x18, 0xa6, 0x6c, 0x12, 0xc5, 0xa1, 0x12, 0xde, 0x31, 0x84, 0x94, 0x79, 0x2c, 0xa6, 0x86, + 0xb3, 0x0b, 0x1c, 0x51, 0x9f, 0x84, 0xfa, 0x57, 0xca, 0x9c, 0xbf, 0xab, 0x70, 0xeb, 0xd4, 0xa7, + 0xcc, 0x95, 0x07, 0xa9, 0x8b, 0x7f, 0x8d, 0x31, 0x65, 0x68, 0x1b, 0xea, 0x81, 0xbf, 0xf0, 0x99, + 0x5d, 0xd9, 0xab, 0x0c, 0x2c, 0x57, 0x2e, 0xd0, 0x0e, 0x34, 0xc8, 0x7c, 0x4e, 0x31, 0xb3, 0xab, + 0x7b, 0x95, 0x41, 0xdb, 0x55, 0x2b, 0xf4, 0x0d, 0x34, 0x29, 0x89, 0xd8, 0xe4, 0xfc, 0xd2, 0xb6, + 0xf6, 0x2a, 0x83, 0xee, 0xc1, 0xc7, 0xc3, 0x3c, 0x9c, 0x86, 0xdc, 0xd3, 0x98, 0x44, 0x6c, 0xc8, + 0xff, 0x3c, 0xbe, 0x74, 0x1b, 0x54, 0xfc, 0x72, 0xbb, 0x73, 0x3f, 0x60, 0x38, 0xb2, 0x6b, 0xd2, + 0xae, 0x5c, 0xa1, 0x63, 0x00, 0x61, 0x97, 0x44, 0x33, 0x1c, 0xd9, 0x75, 0x61, 0x7a, 0x50, 0xc2, + 0xf4, 0x33, 0xae, 0xef, 0xb6, 0xa9, 0xfe, 0x44, 0x5f, 0xc3, 0xa6, 0x84, 0x64, 0x32, 0x25, 0x33, + 0x4c, 0xed, 0xc6, 0x9e, 0x35, 0xe8, 0x1e, 0xdc, 0x91, 0xa6, 0x34, 0xfc, 0x63, 0x09, 0xda, 0x88, + 0xcc, 0xb0, 0xdb, 0x91, 0xea, 0xfc, 0x9b, 0xa2, 0xbb, 0xd0, 0x0e, 0xbd, 0x05, 0xa6, 0x4b, 0x6f, + 0x8a, 0xed, 0xa6, 0x88, 0xf0, 0x6a, 0xc3, 0x09, 0xa1, 0xa5, 0x9d, 0x3b, 0x8f, 0xa1, 0x21, 0x53, + 0x43, 0x1d, 0x68, 0xbe, 0x38, 0xfb, 0xfe, 0xec, 0xd9, 0x8f, 0x67, 0xbd, 0x0d, 0xd4, 0x82, 0xda, + 0xd9, 0xe1, 0x0f, 0x47, 0xbd, 0x0a, 0xba, 0x09, 0x5b, 0xa7, 0x87, 0xe3, 0xe7, 0x13, 0xf7, 0xe8, + 0xf4, 0xe8, 0x70, 0x7c, 0xf4, 0xa4, 0x57, 0x45, 0x5d, 0x80, 0xd1, 0xc9, 0xa1, 0xfb, 0x7c, 0x22, + 0x54, 0x2c, 0xe7, 0x03, 0x68, 0x27, 0x39, 0xa0, 0x26, 0x58, 0x87, 0xe3, 0x91, 0x34, 0xf1, 0xe4, + 0x68, 0x3c, 0xea, 0x55, 0x9c, 0xdf, 0x2b, 0xb0, 0x6d, 0x52, 0x46, 0x97, 0x24, 0xa4, 0x98, 0x73, + 0x36, 0x25, 0x71, 0x98, 0x70, 0x26, 0x16, 0x08, 0x41, 0x2d, 0xc4, 0x6f, 0x35, 0x63, 0xe2, 0x9b, + 0x6b, 0x32, 0xc2, 0xbc, 0x40, 0xb0, 0x65, 0xb9, 0x72, 0x81, 0x3e, 0x87, 0x96, 0x82, 0x82, 0xda, + 0xb5, 0x3d, 0x6b, 0xd0, 0x39, 0xb8, 0x6d, 0x02, 0xa4, 0x3c, 0xba, 0x89, 0x9a, 0x73, 0x0c, 0xbb, + 0xc7, 0x58, 0x47, 0x22, 0xf1, 0xd3, 0x37, 0x88, 0xfb, 0xf5, 0x16, 0x58, 0x04, 0xc3, 0xfd, 0x7a, + 0x0b, 0x8c, 0x6c, 0x68, 0xaa, 0xeb, 0x27, 0xc2, 0xa9, 0xbb, 0x7a, 0xe9, 0x30, 0xb0, 0x57, 0x0d, + 0xa9, 0xbc, 0xf2, 0x2c, 0xdd, 0x83, 0x1a, 0xaf, 0x0c, 0x61, 0xa6, 0x73, 0x80, 0xcc, 0x38, 0x9f, + 0x86, 0x73, 0xe2, 0x0a, 0xb9, 0x49, 0x9d, 0x95, 0xa5, 0xee, 0x24, 0xed, 0x75, 0x44, 0x42, 0x86, + 0x43, 0x76, 0xbd, 0xf8, 0x4f, 0xe1, 0x4e, 0x8e, 0x25, 0x95, 0xc0, 0x3e, 0x34, 0x55, 0x68, 0xc2, + 0x5a, 0x21, 0xae, 0x5a, 0xcb, 0xf9, 0xc7, 0x82, 0xed, 0x17, 0xcb, 0x99, 0xc7, 0xb0, 0x16, 0xad, + 0x09, 0xea, 0x3e, 0xd4, 0x45, 0x87, 0x51, 0x58, 0xdc, 0x94, 0xb6, 0x65, 0x1b, 0x1a, 0xf1, 0xbf, + 0xae, 0x94, 0xa3, 0x87, 0xd0, 0xb8, 0xf0, 0x82, 0x18, 0x53, 0x01, 0x44, 0x82, 0x9a, 0xd2, 0x14, + 0xed, 0xc9, 0x55, 0x1a, 0x68, 0x17, 0x9a, 0xb3, 0xe8, 0x92, 0xf7, 0x17, 0x51, 0x92, 0x2d, 0xb7, + 0x31, 0x8b, 0x2e, 0xdd, 0x38, 0x44, 0x1f, 0xc1, 0xd6, 0xcc, 0xa7, 0xde, 0x79, 0x80, 0x27, 0xaf, + 0x09, 0x79, 0x43, 0x45, 0x55, 0xb6, 0xdc, 0x4d, 0xb5, 0x79, 0xc2, 0xf7, 0x50, 0x9f, 0xdf, 0xa4, + 0x69, 0x84, 0x3d, 0x86, 0xed, 0x86, 0x90, 0x27, 0x6b, 0x8e, 0x21, 0xf3, 0x17, 0x98, 0xc4, 0x4c, + 0x94, 0x92, 0xe5, 0xea, 0x25, 0xfa, 0x10, 0x36, 0x23, 0x4c, 0x31, 0x9b, 0xa8, 0x28, 0x5b, 0xe2, + 0x64, 0x47, 0xec, 0xbd, 0x94, 0x61, 0x21, 0xa8, 0xfd, 0xe6, 0xf9, 0xcc, 0x6e, 0x0b, 0x91, 0xf8, + 0x96, 0xc7, 0x62, 0x8a, 0xf5, 0x31, 0xd0, 0xc7, 0x62, 0x8a, 0xd5, 0xb1, 0x6d, 0xa8, 0xcf, 0x49, + 0x34, 0xc5, 0x76, 0x47, 0xc8, 0xe4, 0x02, 0xed, 0x41, 0x67, 0x86, 0xe9, 0x34, 0xf2, 0x97, 0x8c, + 0x33, 0xba, 0x29, 0x30, 0x4d, 0x6f, 0xf1, 0x3c, 0x68, 0x7c, 0x7e, 0x46, 0x18, 0xa6, 0xf6, 0x96, + 0xcc, 0x43, 0xaf, 0xd1, 0x3d, 0xb8, 0x31, 0x0d, 0xb0, 0x17, 0xc6, 0xcb, 0x09, 0x09, 0x27, 0x73, + 0xcf, 0x0f, 0xec, 0xae, 0x50, 0xd9, 0x52, 0xdb, 0xcf, 0xc2, 0x6f, 0x3d, 0x3f, 0x70, 0x4e, 0xe0, + 0x76, 0x86, 0xca, 0xeb, 0xde, 0x8a, 0xbf, 0xaa, 0xb0, 0xe3, 0x92, 0x20, 0x38, 0xf7, 0xa6, 0x6f, + 0x4a, 0xdc, 0x8b, 0x14, 0x85, 0xd5, 0xf5, 0x14, 0x5a, 0x39, 0x14, 0xa6, 0xae, 0x7a, 0xcd, 0xb8, + 0xea, 0x06, 0xb9, 0xf5, 0x62, 0x72, 0x1b, 0x26, 0xb9, 0x9a, 0xb9, 0x66, 0x8a, 0xb9, 0x84, 0x96, + 0xd6, 0x1a, 0x5a, 0xda, 0xab, 0xb4, 0xe4, 0x40, 0x0f, 0x79, 0xd0, 0x7f, 0x07, 0xbb, 0x2b, 0x78, + 0x5d, 0x17, 0xfc, 0x3f, 0x2c, 0xb8, 0xfd, 0x34, 0xa4, 0xcc, 0x0b, 0x82, 0x0c, 0xf6, 0x49, 0xfd, + 0x55, 0x4a, 0xd7, 0x5f, 0xf5, 0xff, 0xd4, 0x9f, 0x65, 0x90, 0xa7, 0x99, 0xae, 0xa5, 0x98, 0x2e, + 0x55, 0x93, 0x46, 0x27, 0x6c, 0x64, 0x3a, 0x21, 0x7a, 0x1f, 0x40, 0x16, 0x91, 0x30, 0x2e, 0x49, + 0x6a, 0x8b, 0x9d, 0x33, 0xd5, 0xf8, 0x34, 0xaf, 0xad, 0x7c, 0x5e, 0xd3, 0x15, 0x39, 0x80, 0x9e, + 0x8e, 0x67, 0x1a, 0xcd, 0x44, 0x4c, 0x8a, 0xa0, 0xae, 0xda, 0x1f, 0x45, 0x33, 0x1e, 0x55, 0x96, + 0xeb, 0xce, 0xfa, 0x12, 0xdc, 0x34, 0x4b, 0xd0, 0x79, 0x0a, 0x3b, 0x59, 0x4a, 0xae, 0x4b, 0xef, + 0x9f, 0x15, 0xd8, 0x7d, 0x11, 0xfa, 0xb9, 0x04, 0xe7, 0x15, 0xd7, 0x0a, 0xe4, 0xd5, 0x1c, 0xc8, + 0xb7, 0xa1, 0xbe, 0x8c, 0xa3, 0x57, 0x58, 0x51, 0x28, 0x17, 0x69, 0x2c, 0x6b, 0x26, 0x96, 0x19, + 0x34, 0xea, 0x2b, 0x68, 0x38, 0x13, 0xb0, 0x57, 0xa3, 0xbc, 0x66, 0xce, 0x3c, 0xaf, 0xe4, 0x0d, + 0x6d, 0xcb, 0xf7, 0xd2, 0xb9, 0x05, 0x37, 0x8f, 0x31, 0x7b, 0x29, 0x4b, 0x5d, 0x01, 0xe0, 0x1c, + 0x01, 0x4a, 0x6f, 0x5e, 0xf9, 0x53, 0x5b, 0xa6, 0x3f, 0x3d, 0x60, 0x6a, 0x7d, 0xad, 0xe5, 0x7c, + 0x25, 0x6c, 0x9f, 0xf8, 0x94, 0x91, 0xe8, 0x72, 0x1d, 0xb8, 0x3d, 0xb0, 0x16, 0xde, 0x5b, 0xf5, + 0xc4, 0xf2, 0x4f, 0xe7, 0x58, 0x44, 0x90, 0x1c, 0x55, 0x11, 0xa4, 0x07, 0x96, 0x4a, 0xb9, 0x81, + 0xe5, 0x2d, 0xa0, 0xe7, 0x38, 0x99, 0x9d, 0xde, 0xf1, 0xd6, 0x6b, 0x9a, 0xaa, 0x26, 0x4d, 0x36, + 0x34, 0x55, 0x9f, 0x51, 0xc4, 0xea, 0x25, 0xbf, 0xac, 0x4b, 0x2f, 0xf2, 0x82, 0x00, 0x07, 0xea, + 0xd9, 0x4c, 0xd6, 0xce, 0xcf, 0x70, 0xcb, 0xf0, 0xac, 0x72, 0xe0, 0xb9, 0xd2, 0x57, 0xca, 0x33, + 0xff, 0x44, 0x5f, 0x42, 0x43, 0x0e, 0x9f, 0xc2, 0x6f, 0xf7, 0xe0, 0xae, 0x99, 0x93, 0x30, 0x12, + 0x87, 0x6a, 0x5a, 0x75, 0x95, 0xee, 0xc1, 0xbf, 0x2d, 0xe8, 0xea, 0xf1, 0x49, 0x8e, 0xc6, 0xc8, + 0x87, 0xcd, 0xf4, 0x9c, 0x88, 0x1e, 0x14, 0x4f, 0xce, 0x99, 0xf1, 0xbf, 0xff, 0xb0, 0x8c, 0xaa, + 0xcc, 0xc0, 0xd9, 0xf8, 0xac, 0x82, 0x28, 0xf4, 0xb2, 0xe3, 0x1b, 0x7a, 0x94, 0x6f, 0xa3, 0x60, + 0x5e, 0xec, 0x0f, 0xcb, 0xaa, 0x6b, 0xb7, 0xe8, 0x42, 0xdc, 0x27, 0x73, 0xe6, 0x42, 0xef, 0x34, + 0x63, 0x8e, 0x79, 0xfd, 0xfd, 0xd2, 0xfa, 0x89, 0xdf, 0x5f, 0x60, 0xcb, 0x78, 0xd1, 0x51, 0x01, + 0x5a, 0x79, 0x13, 0x5c, 0xff, 0x93, 0x52, 0xba, 0x89, 0xaf, 0x05, 0x74, 0xcd, 0x16, 0x87, 0x0a, + 0x0c, 0xe4, 0xbe, 0x4d, 0xfd, 0x4f, 0xcb, 0x29, 0x27, 0xee, 0x28, 0xf4, 0xb2, 0xfd, 0xa5, 0x88, + 0xc7, 0x82, 0x6e, 0x59, 0xc4, 0x63, 0x51, 0xdb, 0x72, 0x36, 0x90, 0x07, 0x70, 0xd5, 0x5e, 0xd0, + 0xfd, 0x42, 0x42, 0xcc, 0xae, 0xd4, 0x1f, 0xbc, 0x5b, 0x31, 0x71, 0xb1, 0x84, 0x1b, 0x99, 0x49, + 0x00, 0x15, 0x40, 0x93, 0x3f, 0x60, 0xf5, 0x1f, 0x95, 0xd4, 0xce, 0x24, 0xa5, 0x3a, 0xd6, 0x9a, + 0xa4, 0xcc, 0x76, 0xb8, 0x26, 0xa9, 0x4c, 0xf3, 0x73, 0x36, 0x90, 0x0f, 0x5d, 0x37, 0x0e, 0x95, + 0x6b, 0xde, 0x16, 0x50, 0xc1, 0xe9, 0xd5, 0x8e, 0xd7, 0x7f, 0x50, 0x42, 0xf3, 0xaa, 0xbe, 0x1f, + 0xc3, 0x4f, 0x2d, 0xad, 0x7a, 0xde, 0x10, 0xff, 0x39, 0xf8, 0xe2, 0xbf, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x20, 0xcd, 0x9e, 0x3a, 0x27, 0x11, 0x00, 0x00, } diff --git a/pkg/proto/hapi/version/version.pb.go b/pkg/proto/hapi/version/version.pb.go index 13c8568f0..869bb3a5f 100644 --- a/pkg/proto/hapi/version/version.pb.go +++ b/pkg/proto/hapi/version/version.pb.go @@ -1,15 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: hapi/version/version.proto -/* -Package version is a generated protocol buffer package. - -It is generated from these files: - hapi/version/version.proto - -It has these top-level messages: - Version -*/ package version import proto "github.com/golang/protobuf/proto" @@ -29,15 +20,37 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package type Version struct { // Sem ver string for the version - SemVer string `protobuf:"bytes,1,opt,name=sem_ver,json=semVer" json:"sem_ver,omitempty"` - GitCommit string `protobuf:"bytes,2,opt,name=git_commit,json=gitCommit" json:"git_commit,omitempty"` - GitTreeState string `protobuf:"bytes,3,opt,name=git_tree_state,json=gitTreeState" json:"git_tree_state,omitempty"` + SemVer string `protobuf:"bytes,1,opt,name=sem_ver,json=semVer,proto3" json:"sem_ver,omitempty"` + GitCommit string `protobuf:"bytes,2,opt,name=git_commit,json=gitCommit,proto3" json:"git_commit,omitempty"` + GitTreeState string `protobuf:"bytes,3,opt,name=git_tree_state,json=gitTreeState,proto3" json:"git_tree_state,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Version) Reset() { *m = Version{} } +func (m *Version) String() string { return proto.CompactTextString(m) } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { + return fileDescriptor_version_227db6d1d83f2c17, []int{0} +} +func (m *Version) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Version.Unmarshal(m, b) +} +func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Version.Marshal(b, m, deterministic) +} +func (dst *Version) XXX_Merge(src proto.Message) { + xxx_messageInfo_Version.Merge(dst, src) +} +func (m *Version) XXX_Size() int { + return xxx_messageInfo_Version.Size(m) +} +func (m *Version) XXX_DiscardUnknown() { + xxx_messageInfo_Version.DiscardUnknown(m) } -func (m *Version) Reset() { *m = Version{} } -func (m *Version) String() string { return proto.CompactTextString(m) } -func (*Version) ProtoMessage() {} -func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +var xxx_messageInfo_Version proto.InternalMessageInfo func (m *Version) GetSemVer() string { if m != nil { @@ -64,9 +77,9 @@ func init() { proto.RegisterType((*Version)(nil), "hapi.version.Version") } -func init() { proto.RegisterFile("hapi/version/version.proto", fileDescriptor0) } +func init() { proto.RegisterFile("hapi/version/version.proto", fileDescriptor_version_227db6d1d83f2c17) } -var fileDescriptor0 = []byte{ +var fileDescriptor_version_227db6d1d83f2c17 = []byte{ // 151 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xca, 0x48, 0x2c, 0xc8, 0xd4, 0x2f, 0x4b, 0x2d, 0x2a, 0xce, 0xcc, 0xcf, 0x83, 0xd1, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, diff --git a/pkg/provenance/sign.go b/pkg/provenance/sign.go index 5e23c2dda..d0e4d06c7 100644 --- a/pkg/provenance/sign.go +++ b/pkg/provenance/sign.go @@ -122,7 +122,7 @@ func NewFromKeyring(keyringfile, id string) (*Signatory, error) { return s, nil } - // We're gonna go all GnuPG on this and look for a string that _contains_. If + // We're going to go all GnuPG on this and look for a string that _contains_. If // two or more keys contain the string and none are a direct match, we error // out. var candidate *openpgp.Entity @@ -404,6 +404,8 @@ func DigestFile(filename string) (string, error) { // Helm uses SHA256 as its default hash for all non-cryptographic applications. func Digest(in io.Reader) (string, error) { hash := crypto.SHA256.New() - io.Copy(hash, in) + if _, err := io.Copy(hash, in); err != nil { + return "", err + } return hex.EncodeToString(hash.Sum(nil)), nil } diff --git a/pkg/releasetesting/test_suite_test.go b/pkg/releasetesting/test_suite_test.go index 59f122953..7b856ac39 100644 --- a/pkg/releasetesting/test_suite_test.go +++ b/pkg/releasetesting/test_suite_test.go @@ -37,7 +37,8 @@ import ( tillerEnv "k8s.io/helm/pkg/tiller/environment" ) -const manifestWithTestSuccessHook = ` +const ( + manifestWithTestSuccessHook = ` apiVersion: v1 kind: Pod metadata: @@ -51,7 +52,7 @@ spec: cmd: fake-command ` -const manifestWithTestFailureHook = ` + manifestWithTestFailureHook = ` apiVersion: v1 kind: Pod metadata: @@ -64,7 +65,7 @@ spec: image: fake-gold-finding-image cmd: fake-gold-finding-command ` -const manifestWithInstallHooks = `apiVersion: v1 + manifestWithInstallHooks = `apiVersion: v1 kind: ConfigMap metadata: name: test-cm @@ -73,6 +74,7 @@ metadata: data: name: value ` +) func TestNewTestSuite(t *testing.T) { rel := releaseStub() diff --git a/pkg/releaseutil/filter_test.go b/pkg/releaseutil/filter_test.go index 802b1db7a..4ec81a8da 100644 --- a/pkg/releaseutil/filter_test.go +++ b/pkg/releaseutil/filter_test.go @@ -54,6 +54,6 @@ func TestFilterAll(t *testing.T) { case r0.Version == 4: t.Fatal("got release with status revision 4") case r0.Info.Status.Code == rspb.Status_DELETED: - t.Fatal("got release with status DELTED") + t.Fatal("got release with status DELETED") } } diff --git a/pkg/repo/chartrepo_test.go b/pkg/repo/chartrepo_test.go index a2f1daeb8..ed09b5c6d 100644 --- a/pkg/repo/chartrepo_test.go +++ b/pkg/repo/chartrepo_test.go @@ -175,7 +175,7 @@ func verifyIndex(t *testing.T, actual *IndexFile) { t.Errorf("Expected %q, got %q", e.Version, g.Version) } if len(g.Keywords) != 3 { - t.Error("Expected 3 keyrwords.") + t.Error("Expected 3 keywords.") } if len(g.Maintainers) != 2 { t.Error("Expected 2 maintainers.") diff --git a/pkg/repo/index.go b/pkg/repo/index.go index 9031463f3..ac19528fa 100644 --- a/pkg/repo/index.go +++ b/pkg/repo/index.go @@ -168,6 +168,15 @@ func (i IndexFile) Get(name, version string) (*ChartVersion, error) { } } + // when customer input exact version, check whether have exact match one first + if len(version) != 0 { + for _, ver := range vs { + if version == ver.Version { + return ver, nil + } + } + } + for _, ver := range vs { test, err := semver.NewVersion(ver.Version) if err != nil { diff --git a/pkg/repo/index_test.go b/pkg/repo/index_test.go index 7c9239b7a..7e9998a4d 100644 --- a/pkg/repo/index_test.go +++ b/pkg/repo/index_test.go @@ -20,6 +20,7 @@ import ( "io/ioutil" "os" "path/filepath" + "strings" "testing" "k8s.io/helm/pkg/getter" @@ -39,14 +40,17 @@ func TestIndexFile(t *testing.T) { i.Add(&chart.Metadata{Name: "cutter", Version: "0.1.1"}, "cutter-0.1.1.tgz", "http://example.com/charts", "sha256:1234567890abc") i.Add(&chart.Metadata{Name: "cutter", Version: "0.1.0"}, "cutter-0.1.0.tgz", "http://example.com/charts", "sha256:1234567890abc") i.Add(&chart.Metadata{Name: "cutter", Version: "0.2.0"}, "cutter-0.2.0.tgz", "http://example.com/charts", "sha256:1234567890abc") + i.Add(&chart.Metadata{Name: "setter", Version: "0.1.9+alpha"}, "setter-0.1.9+alpha.tgz", "http://example.com/charts", "sha256:1234567890abc") + i.Add(&chart.Metadata{Name: "setter", Version: "0.1.9+beta"}, "setter-0.1.9+beta.tgz", "http://example.com/charts", "sha256:1234567890abc") + i.SortEntries() if i.APIVersion != APIVersionV1 { t.Error("Expected API version v1") } - if len(i.Entries) != 2 { - t.Errorf("Expected 2 charts. Got %d", len(i.Entries)) + if len(i.Entries) != 3 { + t.Errorf("Expected 3 charts. Got %d", len(i.Entries)) } if i.Entries["clipper"][0].Name != "clipper" { @@ -54,13 +58,23 @@ func TestIndexFile(t *testing.T) { } if len(i.Entries["cutter"]) != 3 { - t.Error("Expected two cutters.") + t.Error("Expected three cutters.") } // Test that the sort worked. 0.2 should be at the first index for Cutter. if v := i.Entries["cutter"][0].Version; v != "0.2.0" { t.Errorf("Unexpected first version: %s", v) } + + cv, err := i.Get("setter", "0.1.9") + if err == nil && strings.Index(cv.Metadata.Version, "0.1.9") < 0 { + t.Errorf("Unexpected version: %s", cv.Metadata.Version) + } + + cv, err = i.Get("setter", "0.1.9+alpha") + if err != nil || cv.Metadata.Version != "0.1.9+alpha" { + t.Errorf("Expected version: 0.1.9+alpha") + } } func TestLoadIndex(t *testing.T) { diff --git a/pkg/repo/repo.go b/pkg/repo/repo.go index fa550357a..80166feea 100644 --- a/pkg/repo/repo.go +++ b/pkg/repo/repo.go @@ -117,12 +117,18 @@ func (r *RepoFile) Update(re ...*Entry) { // Has returns true if the given name is already a repository name. func (r *RepoFile) Has(name string) bool { - for _, rf := range r.Repositories { - if rf.Name == name { - return true + _, ok := r.Get(name) + return ok +} + +// Get returns entry by the given name if it exists. +func (r *RepoFile) Get(name string) (*Entry, bool) { + for _, entry := range r.Repositories { + if entry.Name == name { + return entry, true } } - return false + return nil, false } // Remove removes the entry from the list of repositories. diff --git a/pkg/repo/repo_test.go b/pkg/repo/repo_test.go index 264e9bc3c..cf435d8c1 100644 --- a/pkg/repo/repo_test.go +++ b/pkg/repo/repo_test.go @@ -16,10 +16,12 @@ limitations under the License. package repo -import "testing" -import "io/ioutil" -import "os" -import "strings" +import ( + "io/ioutil" + "os" + "strings" + "testing" +) const testRepositoriesFile = "testdata/repositories.yaml" @@ -120,6 +122,43 @@ func TestNewPreV1RepositoriesFile(t *testing.T) { } } +func TestRepoFile_Get(t *testing.T) { + repo := NewRepoFile() + repo.Add( + &Entry{ + Name: "first", + URL: "https://example.com/first", + Cache: "first-index.yaml", + }, + &Entry{ + Name: "second", + URL: "https://example.com/second", + Cache: "second-index.yaml", + }, + &Entry{ + Name: "third", + URL: "https://example.com/third", + Cache: "third-index.yaml", + }, + &Entry{ + Name: "fourth", + URL: "https://example.com/fourth", + Cache: "fourth-index.yaml", + }, + ) + + name := "second" + + entry, ok := repo.Get(name) + if !ok { + t.Fatalf("Expected repo entry %q to be found", name) + } + + if entry.URL != "https://example.com/second" { + t.Fatalf("Expected repo URL to be %q but got %q", "https://example.com/second", entry.URL) + } +} + func TestRemoveRepository(t *testing.T) { sampleRepository := NewRepoFile() sampleRepository.Add( diff --git a/pkg/repo/repotest/server.go b/pkg/repo/repotest/server.go index 36ab10d70..394294bcd 100644 --- a/pkg/repo/repotest/server.go +++ b/pkg/repo/repotest/server.go @@ -148,7 +148,7 @@ func (s *Server) URL() string { return s.srv.URL } -// LinkIndices links the index created with CreateIndex and makes a symboic link to the repositories/cache directory. +// LinkIndices links the index created with CreateIndex and makes a symbolic link to the repositories/cache directory. // // This makes it possible to simulate a local cache of a repository. func (s *Server) LinkIndices() error { diff --git a/pkg/resolver/resolver.go b/pkg/resolver/resolver.go index 8177df2d3..516e9260f 100644 --- a/pkg/resolver/resolver.go +++ b/pkg/resolver/resolver.go @@ -71,7 +71,18 @@ func (r *Resolver) Resolve(reqs *chartutil.Requirements, repoNames map[string]st return nil, fmt.Errorf("dependency %q has an invalid version/constraint format: %s", d.Name, err) } - repoIndex, err := repo.LoadIndexFile(r.helmhome.CacheIndex(repoNames[d.Name])) + // repo does not exist in cache but has url info + cacheRepoName := repoNames[d.Name] + if cacheRepoName == "" && d.Repository != "" { + locked[i] = &chartutil.Dependency{ + Name: d.Name, + Repository: d.Repository, + Version: d.Version, + } + continue + } + + repoIndex, err := repo.LoadIndexFile(r.helmhome.CacheIndex(cacheRepoName)) if err != nil { return nil, fmt.Errorf("no cached repo found. (try 'helm repo update'). %s", err) } diff --git a/pkg/resolver/resolver_test.go b/pkg/resolver/resolver_test.go index 689ffbc32..f35e051fa 100644 --- a/pkg/resolver/resolver_test.go +++ b/pkg/resolver/resolver_test.go @@ -37,15 +37,6 @@ func TestResolve(t *testing.T) { }, err: true, }, - { - name: "cache index failure", - req: &chartutil.Requirements{ - Dependencies: []*chartutil.Dependency{ - {Name: "oedipus-rex", Repository: "http://example.com", Version: "1.0.0"}, - }, - }, - err: true, - }, { name: "chart not found failure", req: &chartutil.Requirements{ diff --git a/pkg/storage/driver/cfgmaps_test.go b/pkg/storage/driver/cfgmaps_test.go index d2e5e942e..1c8ed6652 100644 --- a/pkg/storage/driver/cfgmaps_test.go +++ b/pkg/storage/driver/cfgmaps_test.go @@ -15,7 +15,6 @@ package driver import ( "encoding/base64" - "reflect" "testing" "github.com/gogo/protobuf/proto" @@ -46,7 +45,7 @@ func TestConfigMapGet(t *testing.T) { t.Fatalf("Failed to get release: %s", err) } // compare fetched release with original - if !reflect.DeepEqual(rel, got) { + if !shallowReleaseEqual(rel, got) { t.Errorf("Expected {%q}, got {%q}", rel, got) } } @@ -78,7 +77,7 @@ func TestUNcompressedConfigMapGet(t *testing.T) { t.Fatalf("Failed to get release: %s", err) } // compare fetched release with original - if !reflect.DeepEqual(rel, got) { + if !shallowReleaseEqual(rel, got) { t.Errorf("Expected {%q}, got {%q}", rel, got) } } @@ -151,7 +150,7 @@ func TestConfigMapCreate(t *testing.T) { } // compare created release with original - if !reflect.DeepEqual(rel, got) { + if !shallowReleaseEqual(rel, got) { t.Errorf("Expected {%q}, got {%q}", rel, got) } } diff --git a/pkg/storage/driver/mock_test.go b/pkg/storage/driver/mock_test.go index 363d9dd5d..06f48fba3 100644 --- a/pkg/storage/driver/mock_test.go +++ b/pkg/storage/driver/mock_test.go @@ -20,6 +20,8 @@ import ( "fmt" "testing" + sqlmock "github.com/DATA-DOG/go-sqlmock" + "github.com/jmoiron/sqlx" "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -38,6 +40,16 @@ func releaseStub(name string, vers int32, namespace string, code rspb.Status_Cod } } +func shallowReleaseEqual(r1 *rspb.Release, r2 *rspb.Release) bool { + if r1.Name != r2.Name || + r1.Namespace != r2.Namespace || + r1.Version != r2.Version || + r1.Manifest != r2.Manifest { + return false + } + return true +} + func testKey(name string, vers int32) string { return fmt.Sprintf("%s.v%d", name, vers) } @@ -221,3 +233,17 @@ func (mock *MockSecretsInterface) Delete(name string, opts *metav1.DeleteOptions delete(mock.objects, name) return nil } + +// newTestFixtureSQL mocks the SQL database (for testing purposes) +func newTestFixtureSQL(t *testing.T, releases ...*rspb.Release) (*SQL, sqlmock.Sqlmock) { + sqlDB, mock, err := sqlmock.New() + if err != nil { + t.Fatalf("error when opening stub database connection: %v", err) + } + + sqlxDB := sqlx.NewDb(sqlDB, "sqlmock") + return &SQL{ + db: sqlxDB, + Log: func(_ string, _ ...interface{}) {}, + }, mock +} diff --git a/pkg/storage/driver/secrets.go b/pkg/storage/driver/secrets.go index b79a84272..606d7960b 100644 --- a/pkg/storage/driver/secrets.go +++ b/pkg/storage/driver/secrets.go @@ -45,7 +45,7 @@ type Secrets struct { Log func(string, ...interface{}) } -// NewSecrets initializes a new Secrets wrapping an implmenetation of +// NewSecrets initializes a new Secrets wrapping an implementation of // the kubernetes SecretsInterface. func NewSecrets(impl corev1.SecretInterface) *Secrets { return &Secrets{ diff --git a/pkg/storage/driver/secrets_test.go b/pkg/storage/driver/secrets_test.go index 0d7d1ad83..3c6c1675d 100644 --- a/pkg/storage/driver/secrets_test.go +++ b/pkg/storage/driver/secrets_test.go @@ -15,7 +15,6 @@ package driver import ( "encoding/base64" - "reflect" "testing" "github.com/gogo/protobuf/proto" @@ -46,7 +45,7 @@ func TestSecretGet(t *testing.T) { t.Fatalf("Failed to get release: %s", err) } // compare fetched release with original - if !reflect.DeepEqual(rel, got) { + if !shallowReleaseEqual(rel, got) { t.Errorf("Expected {%q}, got {%q}", rel, got) } } @@ -78,7 +77,7 @@ func TestUNcompressedSecretGet(t *testing.T) { t.Fatalf("Failed to get release: %s", err) } // compare fetched release with original - if !reflect.DeepEqual(rel, got) { + if !shallowReleaseEqual(rel, got) { t.Errorf("Expected {%q}, got {%q}", rel, got) } } @@ -151,7 +150,7 @@ func TestSecretCreate(t *testing.T) { } // compare created release with original - if !reflect.DeepEqual(rel, got) { + if !shallowReleaseEqual(rel, got) { t.Errorf("Expected {%q}, got {%q}", rel, got) } } diff --git a/pkg/storage/driver/sql.go b/pkg/storage/driver/sql.go new file mode 100644 index 000000000..e1677c9ed --- /dev/null +++ b/pkg/storage/driver/sql.go @@ -0,0 +1,336 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver + +import ( + "fmt" + "sort" + "strings" + "time" + + "github.com/jmoiron/sqlx" + migrate "github.com/rubenv/sql-migrate" + + // Import pq for postgres dialect + _ "github.com/lib/pq" + + rspb "k8s.io/helm/pkg/proto/hapi/release" + storageerrors "k8s.io/helm/pkg/storage/errors" +) + +var _ Driver = (*SQL)(nil) + +var labelMap = map[string]string{ + "MODIFIED_AT": "modified_at", + "CREATED_AT": "created_at", + "VERSION": "version", + "STATUS": "status", + "OWNER": "owner", + "NAME": "name", +} + +var supportedSQLDialects = map[string]struct{}{ + "postgres": {}, +} + +// SQLDriverName is the string name of this driver. +const SQLDriverName = "SQL" + +// SQL is the sql storage driver implementation. +type SQL struct { + db *sqlx.DB + Log func(string, ...interface{}) +} + +// Name returns the name of the driver. +func (s *SQL) Name() string { + return SQLDriverName +} + +func (s *SQL) ensureDBSetup() error { + // Populate the database with the relations we need if they don't exist yet + migrations := &migrate.MemoryMigrationSource{ + Migrations: []*migrate.Migration{ + { + Id: "init", + Up: []string{ + ` + CREATE TABLE releases ( + key VARCHAR(67) PRIMARY KEY, + body TEXT NOT NULL, + + name VARCHAR(64) NOT NULL, + version INTEGER NOT NULL, + status TEXT NOT NULL, + owner TEXT NOT NULL, + created_at INTEGER NOT NULL, + modified_at INTEGER NOT NULL DEFAULT 0 + ); + + CREATE INDEX ON releases (key); + CREATE INDEX ON releases (version); + CREATE INDEX ON releases (status); + CREATE INDEX ON releases (owner); + CREATE INDEX ON releases (created_at); + CREATE INDEX ON releases (modified_at); + `, + }, + Down: []string{ + ` + DROP TABLE releases; + `, + }, + }, + }, + } + + _, err := migrate.Exec(s.db.DB, "postgres", migrations, migrate.Up) + return err +} + +// SQLReleaseWrapper describes how Helm releases are stored in an SQL database +type SQLReleaseWrapper struct { + // The primary key, made of {release-name}.{release-version} + Key string `db:"key"` + + // The rspb.Release body, as a base64-encoded string + Body string `db:"body"` + + // Release "labels" that can be used as filters in the storage.Query(labels map[string]string) + // we implemented. Note that allowing Helm users to filter against new dimensions will require a + // new migration to be added, and the Create and/or update functions to be updated accordingly. + Name string `db:"name"` + Version int `db:"version"` + Status string `db:"status"` + Owner string `db:"owner"` + CreatedAt int `db:"created_at"` + ModifiedAt int `db:"modified_at"` +} + +// NewSQL initializes a new memory driver. +func NewSQL(dialect, connectionString string, logger func(string, ...interface{})) (*SQL, error) { + if _, ok := supportedSQLDialects[dialect]; !ok { + return nil, fmt.Errorf("%s dialect isn't supported, only \"postgres\" is available for now", dialect) + } + + db, err := sqlx.Connect(dialect, connectionString) + if err != nil { + return nil, err + } + + driver := &SQL{ + db: db, + Log: logger, + } + + if err := driver.ensureDBSetup(); err != nil { + return nil, err + } + + return driver, nil +} + +// Get returns the release named by key. +func (s *SQL) Get(key string) (*rspb.Release, error) { + var record SQLReleaseWrapper + // Get will return an error if the result is empty + err := s.db.Get(&record, "SELECT body FROM releases WHERE key = $1", key) + if err != nil { + s.Log("got SQL error when getting release %s: %v", key, err) + return nil, storageerrors.ErrReleaseNotFound(key) + } + + release, err := decodeRelease(record.Body) + if err != nil { + s.Log("get: failed to decode data %q: %v", key, err) + return nil, err + } + + return release, nil +} + +// List returns the list of all releases such that filter(release) == true +func (s *SQL) List(filter func(*rspb.Release) bool) ([]*rspb.Release, error) { + var records = []SQLReleaseWrapper{} + if err := s.db.Select(&records, "SELECT body FROM releases WHERE owner = 'TILLER'"); err != nil { + s.Log("list: failed to list: %v", err) + return nil, err + } + + var releases []*rspb.Release + for _, record := range records { + release, err := decodeRelease(record.Body) + if err != nil { + s.Log("list: failed to decode release: %v: %v", record, err) + continue + } + if filter(release) { + releases = append(releases, release) + } + } + + return releases, nil +} + +// Query returns the set of releases that match the provided set of labels. +func (s *SQL) Query(labels map[string]string) ([]*rspb.Release, error) { + var sqlFilterKeys []string + sqlFilter := map[string]interface{}{} + for key, val := range labels { + // Build a slice of where filters e.g + // labels = map[string]string{ "foo": "foo", "bar": "bar" } + // []string{ "foo=?", "bar=?" } + if dbField, ok := labelMap[key]; ok { + sqlFilterKeys = append(sqlFilterKeys, strings.Join([]string{dbField, "=:", dbField}, "")) + sqlFilter[dbField] = val + } else { + s.Log("unknown label %s", key) + return nil, fmt.Errorf("unknown label %s", key) + } + } + sort.Strings(sqlFilterKeys) + + // Build our query + query := strings.Join([]string{ + "SELECT body FROM releases", + "WHERE", + strings.Join(sqlFilterKeys, " AND "), + }, " ") + + rows, err := s.db.NamedQuery(query, sqlFilter) + if err != nil { + s.Log("failed to query with labels: %v", err) + return nil, err + } + + var releases []*rspb.Release + for rows.Next() { + var record SQLReleaseWrapper + if err = rows.StructScan(&record); err != nil { + s.Log("failed to scan record %q: %v", record, err) + return nil, err + } + + release, err := decodeRelease(record.Body) + if err != nil { + s.Log("failed to decode release: %v", err) + continue + } + releases = append(releases, release) + } + + if len(releases) == 0 { + return nil, storageerrors.ErrReleaseNotFound(labels["NAME"]) + } + + return releases, nil +} + +// Create creates a new release. +func (s *SQL) Create(key string, rls *rspb.Release) error { + body, err := encodeRelease(rls) + if err != nil { + s.Log("failed to encode release: %v", err) + return err + } + + transaction, err := s.db.Beginx() + if err != nil { + s.Log("failed to start SQL transaction: %v", err) + return fmt.Errorf("error beginning transaction: %v", err) + } + + if _, err := transaction.NamedExec("INSERT INTO releases (key, body, name, version, status, owner, created_at) VALUES (:key, :body, :name, :version, :status, :owner, :created_at)", + &SQLReleaseWrapper{ + Key: key, + Body: body, + + Name: rls.Name, + Version: int(rls.Version), + Status: rspb.Status_Code_name[int32(rls.Info.Status.Code)], + Owner: "TILLER", + CreatedAt: int(time.Now().Unix()), + }, + ); err != nil { + defer transaction.Rollback() + var record SQLReleaseWrapper + if err := transaction.Get(&record, "SELECT key FROM releases WHERE key = ?", key); err == nil { + s.Log("release %s already exists", key) + return storageerrors.ErrReleaseExists(key) + } + + s.Log("failed to store release %s in SQL database: %v", key, err) + return err + } + defer transaction.Commit() + + return nil +} + +// Update updates a release. +func (s *SQL) Update(key string, rls *rspb.Release) error { + body, err := encodeRelease(rls) + if err != nil { + s.Log("failed to encode release: %v", err) + return err + } + + if _, err := s.db.NamedExec("UPDATE releases SET body=:body, name=:name, version=:version, status=:status, owner=:owner, modified_at=:modified_at WHERE key=:key", + &SQLReleaseWrapper{ + Key: key, + Body: body, + + Name: rls.Name, + Version: int(rls.Version), + Status: rspb.Status_Code_name[int32(rls.Info.Status.Code)], + Owner: "TILLER", + ModifiedAt: int(time.Now().Unix()), + }, + ); err != nil { + s.Log("failed to update release %s in SQL database: %v", key, err) + return err + } + + return nil +} + +// Delete deletes a release or returns ErrReleaseNotFound. +func (s *SQL) Delete(key string) (*rspb.Release, error) { + transaction, err := s.db.Beginx() + if err != nil { + s.Log("failed to start SQL transaction: %v", err) + return nil, fmt.Errorf("error beginning transaction: %v", err) + } + + var record SQLReleaseWrapper + err = transaction.Get(&record, "SELECT body FROM releases WHERE key = $1", key) + if err != nil { + s.Log("release %s not found: %v", key, err) + return nil, storageerrors.ErrReleaseNotFound(key) + } + + release, err := decodeRelease(record.Body) + if err != nil { + s.Log("failed to decode release %s: %v", key, err) + transaction.Rollback() + return nil, err + } + defer transaction.Commit() + + _, err = transaction.Exec("DELETE FROM releases WHERE key = $1", key) + return release, err +} diff --git a/pkg/storage/driver/sql_test.go b/pkg/storage/driver/sql_test.go new file mode 100644 index 000000000..b6aa08588 --- /dev/null +++ b/pkg/storage/driver/sql_test.go @@ -0,0 +1,346 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver + +import ( + "fmt" + "regexp" + "testing" + "time" + + sqlmock "github.com/DATA-DOG/go-sqlmock" + rspb "k8s.io/helm/pkg/proto/hapi/release" +) + +func TestSQLName(t *testing.T) { + sqlDriver, _ := newTestFixtureSQL(t) + if sqlDriver.Name() != SQLDriverName { + t.Errorf("Expected name to be %q, got %q", SQLDriverName, sqlDriver.Name()) + } +} + +func TestSQLGet(t *testing.T) { + vers := int32(1) + name := "smug-pigeon" + namespace := "default" + key := testKey(name, vers) + rel := releaseStub(name, vers, namespace, rspb.Status_DEPLOYED) + + body, err := encodeRelease(rel) + if err != nil { + t.Fatal(err) + } + + sqlDriver, mock := newTestFixtureSQL(t) + mock. + ExpectQuery("SELECT body FROM releases WHERE key = ?"). + WithArgs(key). + WillReturnRows( + mock.NewRows([]string{ + "body", + }).AddRow( + body, + ), + ).RowsWillBeClosed() + + got, err := sqlDriver.Get(key) + if err != nil { + t.Fatalf("Failed to get release: %v", err) + } + + if !shallowReleaseEqual(rel, got) { + t.Errorf("Expected release {%q}, got {%q}", rel, got) + } + + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("sql expectations weren't met: %v", err) + } +} + +func TestSQLList(t *testing.T) { + body1, _ := encodeRelease(releaseStub("key-1", 1, "default", rspb.Status_DELETED)) + body2, _ := encodeRelease(releaseStub("key-2", 1, "default", rspb.Status_DELETED)) + body3, _ := encodeRelease(releaseStub("key-3", 1, "default", rspb.Status_DEPLOYED)) + body4, _ := encodeRelease(releaseStub("key-4", 1, "default", rspb.Status_DEPLOYED)) + body5, _ := encodeRelease(releaseStub("key-5", 1, "default", rspb.Status_SUPERSEDED)) + body6, _ := encodeRelease(releaseStub("key-6", 1, "default", rspb.Status_SUPERSEDED)) + + sqlDriver, mock := newTestFixtureSQL(t) + + for i := 0; i < 3; i++ { + mock. + ExpectQuery("SELECT body FROM releases WHERE owner = 'TILLER'"). + WillReturnRows( + mock.NewRows([]string{ + "body", + }). + AddRow(body1). + AddRow(body2). + AddRow(body3). + AddRow(body4). + AddRow(body5). + AddRow(body6), + ).RowsWillBeClosed() + } + + // list all deleted releases + del, err := sqlDriver.List(func(rel *rspb.Release) bool { + return rel.Info.Status.Code == rspb.Status_DELETED + }) + // check + if err != nil { + t.Errorf("Failed to list deleted: %v", err) + } + if len(del) != 2 { + t.Errorf("Expected 2 deleted, got %d:\n%v\n", len(del), del) + } + + // list all deployed releases + dpl, err := sqlDriver.List(func(rel *rspb.Release) bool { + return rel.Info.Status.Code == rspb.Status_DEPLOYED + }) + // check + if err != nil { + t.Errorf("Failed to list deployed: %v", err) + } + if len(dpl) != 2 { + t.Errorf("Expected 2 deployed, got %d:\n%v\n", len(dpl), dpl) + } + + // list all superseded releases + ssd, err := sqlDriver.List(func(rel *rspb.Release) bool { + return rel.Info.Status.Code == rspb.Status_SUPERSEDED + }) + // check + if err != nil { + t.Errorf("Failed to list superseded: %v", err) + } + if len(ssd) != 2 { + t.Errorf("Expected 2 superseded, got %d:\n%v\n", len(ssd), ssd) + } + + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("sql expectations weren't met: %v", err) + } +} + +func TestSqlCreate(t *testing.T) { + vers := int32(1) + name := "smug-pigeon" + namespace := "default" + key := testKey(name, vers) + rel := releaseStub(name, vers, namespace, rspb.Status_DEPLOYED) + + sqlDriver, mock := newTestFixtureSQL(t) + body, _ := encodeRelease(rel) + + mock.ExpectBegin() + mock. + ExpectExec(regexp.QuoteMeta("INSERT INTO releases (key, body, name, version, status, owner, created_at) VALUES (?, ?, ?, ?, ?, ?, ?)")). + WithArgs(key, body, rel.Name, int(rel.Version), rspb.Status_Code_name[int32(rel.Info.Status.Code)], "TILLER", int(time.Now().Unix())). + WillReturnResult(sqlmock.NewResult(1, 1)) + mock.ExpectCommit() + + if err := sqlDriver.Create(key, rel); err != nil { + t.Fatalf("failed to create release with key %q: %v", key, err) + } + + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("sql expectations weren't met: %v", err) + } +} + +func TestSqlCreateAlreadyExists(t *testing.T) { + vers := int32(1) + name := "smug-pigeon" + namespace := "default" + key := testKey(name, vers) + rel := releaseStub(name, vers, namespace, rspb.Status_DEPLOYED) + + sqlDriver, mock := newTestFixtureSQL(t) + body, _ := encodeRelease(rel) + + // Insert fails (primary key already exists) + mock.ExpectBegin() + mock. + ExpectExec(regexp.QuoteMeta("INSERT INTO releases (key, body, name, version, status, owner, created_at) VALUES (?, ?, ?, ?, ?, ?, ?)")). + WithArgs(key, body, rel.Name, int(rel.Version), rspb.Status_Code_name[int32(rel.Info.Status.Code)], "TILLER", int(time.Now().Unix())). + WillReturnError(fmt.Errorf("dialect dependent SQL error")) + + // Let's check that we do make sure the error is due to a release already existing + mock. + ExpectQuery(regexp.QuoteMeta("SELECT key FROM releases WHERE key = ?")). + WithArgs(key). + WillReturnRows( + mock.NewRows([]string{ + "body", + }).AddRow( + body, + ), + ).RowsWillBeClosed() + mock.ExpectRollback() + + if err := sqlDriver.Create(key, rel); err == nil { + t.Fatalf("failed to create release with key %q: %v", key, err) + } + + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("sql expectations weren't met: %v", err) + } +} + +func TestSqlUpdate(t *testing.T) { + vers := int32(1) + name := "smug-pigeon" + namespace := "default" + key := testKey(name, vers) + rel := releaseStub(name, vers, namespace, rspb.Status_DEPLOYED) + + sqlDriver, mock := newTestFixtureSQL(t) + body, _ := encodeRelease(rel) + + mock. + ExpectExec(regexp.QuoteMeta("UPDATE releases SET body=?, name=?, version=?, status=?, owner=?, modified_at=? WHERE key=?")). + WithArgs(body, rel.Name, int(rel.Version), rspb.Status_Code_name[int32(rel.Info.Status.Code)], "TILLER", int(time.Now().Unix()), key). + WillReturnResult(sqlmock.NewResult(0, 1)) + + if err := sqlDriver.Update(key, rel); err != nil { + t.Fatalf("failed to update release with key %q: %v", key, err) + } + + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("sql expectations weren't met: %v", err) + } +} + +func TestSqlQuery(t *testing.T) { + // Reflect actual use cases in ../storage.go + labelSetDeployed := map[string]string{ + "NAME": "smug-pigeon", + "OWNER": "TILLER", + "STATUS": "DEPLOYED", + } + labelSetAll := map[string]string{ + "NAME": "smug-pigeon", + "OWNER": "TILLER", + } + + supersededRelease := releaseStub("smug-pigeon", 1, "default", rspb.Status_SUPERSEDED) + supersededReleaseBody, _ := encodeRelease(supersededRelease) + deployedRelease := releaseStub("smug-pigeon", 2, "default", rspb.Status_DEPLOYED) + deployedReleaseBody, _ := encodeRelease(deployedRelease) + + // Let's actually start our test + sqlDriver, mock := newTestFixtureSQL(t) + + mock. + ExpectQuery(regexp.QuoteMeta("SELECT body FROM releases WHERE name=? AND owner=? AND status=?")). + WithArgs("smug-pigeon", "TILLER", "DEPLOYED"). + WillReturnRows( + mock.NewRows([]string{ + "body", + }).AddRow( + deployedReleaseBody, + ), + ).RowsWillBeClosed() + + mock. + ExpectQuery(regexp.QuoteMeta("SELECT body FROM releases WHERE name=? AND owner=?")). + WithArgs("smug-pigeon", "TILLER"). + WillReturnRows( + mock.NewRows([]string{ + "body", + }).AddRow( + supersededReleaseBody, + ).AddRow( + deployedReleaseBody, + ), + ).RowsWillBeClosed() + + results, err := sqlDriver.Query(labelSetDeployed) + if err != nil { + t.Fatalf("failed to query for deployed smug-pigeon release: %v", err) + } + + for _, res := range results { + if !shallowReleaseEqual(res, deployedRelease) { + t.Errorf("Expected release {%q}, got {%q}", deployedRelease, res) + } + } + + results, err = sqlDriver.Query(labelSetAll) + if err != nil { + t.Fatalf("failed to query release history for smug-pigeon: %v", err) + } + + if len(results) != 2 { + t.Errorf("expected a resultset of size 2, got %d", len(results)) + } + + for _, res := range results { + if !shallowReleaseEqual(res, deployedRelease) && !shallowReleaseEqual(res, supersededRelease) { + t.Errorf("Expected release {%q} or {%q}, got {%q}", deployedRelease, supersededRelease, res) + } + } + + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("sql expectations weren't met: %v", err) + } +} + +func TestSqlDelete(t *testing.T) { + vers := int32(1) + name := "smug-pigeon" + namespace := "default" + key := testKey(name, vers) + rel := releaseStub(name, vers, namespace, rspb.Status_DEPLOYED) + + body, _ := encodeRelease(rel) + + sqlDriver, mock := newTestFixtureSQL(t) + + mock.ExpectBegin() + mock. + ExpectQuery("SELECT body FROM releases WHERE key = ?"). + WithArgs(key). + WillReturnRows( + mock.NewRows([]string{ + "body", + }).AddRow( + body, + ), + ).RowsWillBeClosed() + + mock. + ExpectExec(regexp.QuoteMeta("DELETE FROM releases WHERE key = $1")). + WithArgs(key). + WillReturnResult(sqlmock.NewResult(0, 1)) + mock.ExpectCommit() + + deletedRelease, err := sqlDriver.Delete(key) + if err != nil { + t.Fatalf("failed to delete release with key %q: %v", key, err) + } + + if !shallowReleaseEqual(rel, deletedRelease) { + t.Errorf("Expected release {%q}, got {%q}", rel, deletedRelease) + } + + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("sql expectations weren't met: %v", err) + } +} diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go index 9520db08b..e79cacc8d 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/storage.go @@ -50,7 +50,7 @@ func (s *Storage) Get(name string, version int32) (*rspb.Release, error) { // Create creates a new storage entry holding the release. An // error is returned if the storage driver failed to store the -// release, or a release with identical an key already exists. +// release, or a release with identical key already exists. func (s *Storage) Create(rls *rspb.Release) error { s.Log("creating release %q", makeKey(rls.Name, rls.Version)) if s.MaxHistory > 0 { diff --git a/pkg/strvals/parser.go b/pkg/strvals/parser.go index 9d52f34c0..d0a647c67 100644 --- a/pkg/strvals/parser.go +++ b/pkg/strvals/parser.go @@ -393,6 +393,10 @@ func typedVal(v []rune, st bool) interface{} { return nil } + if strings.EqualFold(val, "0") { + return int64(0) + } + // If this value does not start with zero, try parsing it to an int if len(val) != 0 && val[0] != '0' { if iv, err := strconv.ParseInt(val, 10, 64); err == nil { diff --git a/pkg/strvals/parser_test.go b/pkg/strvals/parser_test.go index a096f16d2..5d77aed18 100644 --- a/pkg/strvals/parser_test.go +++ b/pkg/strvals/parser_test.go @@ -85,6 +85,11 @@ func TestParseSet(t *testing.T) { expect: map[string]interface{}{"is_null": "null"}, err: false, }, + { + str: "zero=0", + expect: map[string]interface{}{"zero": "0"}, + err: false, + }, } tests := []struct { str string @@ -123,6 +128,10 @@ func TestParseSet(t *testing.T) { str: "leading_zeros=00009", expect: map[string]interface{}{"leading_zeros": "00009"}, }, + { + str: "zero_int=0", + expect: map[string]interface{}{"zero_int": 0}, + }, { str: "long_int=1234567890", expect: map[string]interface{}{"long_int": 1234567890}, diff --git a/pkg/tiller/environment/environment.go b/pkg/tiller/environment/environment.go index 86d077b89..d3a478ea0 100644 --- a/pkg/tiller/environment/environment.go +++ b/pkg/tiller/environment/environment.go @@ -26,8 +26,8 @@ import ( "io" "time" - "k8s.io/api/core/v1" - "k8s.io/cli-runtime/pkg/genericclioptions/resource" + v1 "k8s.io/api/core/v1" + "k8s.io/cli-runtime/pkg/resource" "k8s.io/helm/pkg/chartutil" "k8s.io/helm/pkg/engine" @@ -37,11 +37,19 @@ import ( "k8s.io/helm/pkg/storage/driver" ) -// DefaultTillerNamespace is the default namespace for Tiller. -const DefaultTillerNamespace = "kube-system" +const ( + // DefaultTillerNamespace is the default namespace for Tiller. + DefaultTillerNamespace = "kube-system" -// GoTplEngine is the name of the Go template engine, as registered in the EngineYard. -const GoTplEngine = "gotpl" + // DefaultTillerPort defines the default port tiller listen on for client traffic + DefaultTillerPort = 44134 + + // DefaultTillerProbePort defines the default port to listen on for probes + DefaultTillerProbePort = 44135 + + // GoTplEngine is the name of the Go template engine, as registered in the EngineYard. + GoTplEngine = "gotpl" +) // DefaultEngine points to the engine that the EngineYard should treat as the // default. A chart that does not specify an engine may be run through the @@ -119,28 +127,55 @@ type KubeClient interface { // by "\n---\n"). Delete(namespace string, reader io.Reader) error - // Watch the resource in reader until it is "ready". + // DeleteWithTimeout destroys one or more resources. If shouldWait is true, the function + // will not return until all the resources have been fully deleted or the provided + // timeout has expired. + // + // namespace must contain a valid existing namespace. + // + // reader must contain a YAML stream (one or more YAML documents separated + // by "\n---\n"). + DeleteWithTimeout(namespace string, reader io.Reader, timeout int64, shouldWait bool) error + + // WatchUntilReady watch the resource in reader until it is "ready". // // For Jobs, "ready" means the job ran to completion (excited without error). // For all other kinds, it means the kind was created or modified without // error. WatchUntilReady(namespace string, reader io.Reader, timeout int64, shouldWait bool) error - // Update updates one or more resources or creates the resource + // Deprecated; use UpdateWithOptions instead + Update(namespace string, originalReader, modifiedReader io.Reader, force bool, recreate bool, timeout int64, shouldWait bool) error + + // UpdateWithOptions updates one or more resources or creates the resource // if it doesn't exist. // // namespace must contain a valid existing namespace. // // reader must contain a YAML stream (one or more YAML documents separated // by "\n---\n"). - Update(namespace string, originalReader, modifiedReader io.Reader, force bool, recreate bool, timeout int64, shouldWait bool) error + UpdateWithOptions(namespace string, originalReader, modifiedReader io.Reader, opts kube.UpdateOptions) error Build(namespace string, reader io.Reader) (kube.Result, error) + + // BuildUnstructured reads a stream of manifests from a reader and turns them into + // info objects. Manifests are not validated against the schema, but it will fail if + // any resources types are not known by the apiserver. + // + // reader must contain a YAML stream (one or more YAML documents separated by "\n---\n"). BuildUnstructured(namespace string, reader io.Reader) (kube.Result, error) + // Validate reads a stream of manifests from a reader and validates them against + // the schema from the apiserver. It returns an error if any of the manifests does not validate. + // + // reader must contain a YAML stream (one or more YAML documents separated by "\n---\n"). + Validate(namespace string, reader io.Reader) error + // WaitAndGetCompletedPodPhase waits up to a timeout until a pod enters a completed phase // and returns said phase (PodSucceeded or PodFailed qualify). WaitAndGetCompletedPodPhase(namespace string, reader io.Reader, timeout time.Duration) (v1.PodPhase, error) + + WaitUntilCRDEstablished(reader io.Reader, timeout time.Duration) error } // PrintingKubeClient implements KubeClient, but simply prints the reader to @@ -169,6 +204,14 @@ func (p *PrintingKubeClient) Delete(ns string, r io.Reader) error { return err } +// DeleteWithTimeout implements KubeClient DeleteWithTimeout. +// +// It only prints out the content to be deleted. +func (p *PrintingKubeClient) DeleteWithTimeout(ns string, r io.Reader, timeout int64, shouldWait bool) error { + _, err := io.Copy(p.Out, r) + return err +} + // WatchUntilReady implements KubeClient WatchUntilReady. func (p *PrintingKubeClient) WatchUntilReady(ns string, r io.Reader, timeout int64, shouldWait bool) error { _, err := io.Copy(p.Out, r) @@ -177,6 +220,16 @@ func (p *PrintingKubeClient) WatchUntilReady(ns string, r io.Reader, timeout int // Update implements KubeClient Update. func (p *PrintingKubeClient) Update(ns string, currentReader, modifiedReader io.Reader, force bool, recreate bool, timeout int64, shouldWait bool) error { + return p.UpdateWithOptions(ns, currentReader, modifiedReader, kube.UpdateOptions{ + Force: force, + Recreate: recreate, + Timeout: timeout, + ShouldWait: shouldWait, + }) +} + +// UpdateWithOptions implements KubeClient UpdateWithOptions. +func (p *PrintingKubeClient) UpdateWithOptions(ns string, currentReader, modifiedReader io.Reader, opts kube.UpdateOptions) error { _, err := io.Copy(p.Out, modifiedReader) return err } @@ -191,12 +244,23 @@ func (p *PrintingKubeClient) BuildUnstructured(ns string, reader io.Reader) (kub return []*resource.Info{}, nil } +// Validate implements KubeClient Validate +func (p *PrintingKubeClient) Validate(ns string, reader io.Reader) error { + return nil +} + // WaitAndGetCompletedPodPhase implements KubeClient WaitAndGetCompletedPodPhase. func (p *PrintingKubeClient) WaitAndGetCompletedPodPhase(namespace string, reader io.Reader, timeout time.Duration) (v1.PodPhase, error) { _, err := io.Copy(p.Out, reader) return v1.PodUnknown, err } +// WaitUntilCRDEstablished implements KubeClient WaitUntilCRDEstablished. +func (p *PrintingKubeClient) WaitUntilCRDEstablished(reader io.Reader, timeout time.Duration) error { + _, err := io.Copy(p.Out, reader) + return err +} + // Environment provides the context for executing a client request. // // All services in a context are concurrency safe. diff --git a/pkg/tiller/environment/environment_test.go b/pkg/tiller/environment/environment_test.go index 5c19a9b21..962ff4d93 100644 --- a/pkg/tiller/environment/environment_test.go +++ b/pkg/tiller/environment/environment_test.go @@ -22,8 +22,8 @@ import ( "testing" "time" - "k8s.io/api/core/v1" - "k8s.io/cli-runtime/pkg/genericclioptions/resource" + v1 "k8s.io/api/core/v1" + "k8s.io/cli-runtime/pkg/resource" "k8s.io/helm/pkg/chartutil" "k8s.io/helm/pkg/kube" @@ -49,9 +49,15 @@ func (k *mockKubeClient) Get(ns string, r io.Reader) (string, error) { func (k *mockKubeClient) Delete(ns string, r io.Reader) error { return nil } +func (k *mockKubeClient) DeleteWithTimeout(ns string, r io.Reader, timeout int64, shouldWait bool) error { + return nil +} func (k *mockKubeClient) Update(ns string, currentReader, modifiedReader io.Reader, force bool, recreate bool, timeout int64, shouldWait bool) error { return nil } +func (k *mockKubeClient) UpdateWithOptions(ns string, currentReader, modifiedReader io.Reader, opts kube.UpdateOptions) error { + return nil +} func (k *mockKubeClient) WatchUntilReady(ns string, r io.Reader, timeout int64, shouldWait bool) error { return nil } @@ -61,6 +67,9 @@ func (k *mockKubeClient) Build(ns string, reader io.Reader) (kube.Result, error) func (k *mockKubeClient) BuildUnstructured(ns string, reader io.Reader) (kube.Result, error) { return []*resource.Info{}, nil } +func (k *mockKubeClient) Validate(ns string, reader io.Reader) error { + return nil +} func (k *mockKubeClient) WaitAndGetCompletedPodPhase(namespace string, reader io.Reader, timeout time.Duration) (v1.PodPhase, error) { return v1.PodUnknown, nil } @@ -69,6 +78,10 @@ func (k *mockKubeClient) WaitAndGetCompletedPodStatus(namespace string, reader i return "", nil } +func (k *mockKubeClient) WaitUntilCRDEstablished(reader io.Reader, timeout time.Duration) error { + return nil +} + var _ Engine = &mockEngine{} var _ KubeClient = &mockKubeClient{} var _ KubeClient = &PrintingKubeClient{} diff --git a/pkg/tiller/hooks.go b/pkg/tiller/hooks.go index 472301022..0eae3c475 100644 --- a/pkg/tiller/hooks.go +++ b/pkg/tiller/hooks.go @@ -53,6 +53,9 @@ var deletePolices = map[string]release.Hook_DeletePolicy{ hooks.BeforeHookCreation: release.Hook_BEFORE_HOOK_CREATION, } +// Timeout used when deleting resources with a hook-delete-policy. +const defaultHookDeleteTimeoutInSeconds = int64(60) + // Manifest represents a manifest file, which has a name and some content. type Manifest = manifest.Manifest @@ -174,13 +177,6 @@ func (file *manifestFile) sort(result *result) error { isUnknownHook = true break } - if e == release.Hook_CRD_INSTALL { - result.generic = append(result.generic, Manifest{ - Name: file.path, - Content: m, - Head: &entry, - }) - } h.Events = append(h.Events, e) } @@ -199,6 +195,18 @@ func (file *manifestFile) sort(result *result) error { log.Printf("info: skipping unknown hook delete policy: %q", value) } }) + + // Only check for delete timeout annotation if there is a deletion policy. + if len(h.DeletePolicies) > 0 { + h.DeleteTimeout = defaultHookDeleteTimeoutInSeconds + operateAnnotationValues(entry, hooks.HookDeleteTimeoutAnno, func(value string) { + timeout, err := strconv.ParseInt(value, 10, 64) + if err != nil || timeout < 0 { + log.Printf("info: ignoring invalid hook delete timeout value: %q", value) + } + h.DeleteTimeout = timeout + }) + } } return nil } diff --git a/pkg/tiller/hooks_test.go b/pkg/tiller/hooks_test.go index 86c89b8f3..daf07252e 100644 --- a/pkg/tiller/hooks_test.go +++ b/pkg/tiller/hooks_test.go @@ -17,8 +17,10 @@ limitations under the License. package tiller import ( + "bytes" "reflect" "testing" + "text/template" "github.com/ghodss/yaml" @@ -131,21 +133,6 @@ metadata: name: example-test annotations: "helm.sh/hook": test-success -`, - }, - { - name: []string{"ninth"}, - path: "nine", - kind: []string{"CustomResourceDefinition"}, - hooks: map[string][]release.Hook_Event{"ninth": {release.Hook_CRD_INSTALL}}, - manifest: `apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: ninth - labels: - doesnot: matter - annotations: - "helm.sh/hook": crd-install `, }, } @@ -161,22 +148,22 @@ metadata: } // This test will fail if 'six' or 'seven' was added. - // changed to account for CustomResourceDefinition with crd-install hook being added to generic list of manifests - if len(generic) != 3 { - t.Errorf("Expected 3 generic manifests, got %d", len(generic)) + if len(generic) != 2 { + t.Errorf("Expected 2 generic manifests, got %d", len(generic)) } - // changed to account for 5 hooks now that there is a crd-install hook added as member 9 of the data list. It was 4 before. - if len(hs) != 5 { - t.Errorf("Expected 5 hooks, got %d", len(hs)) + if len(hs) != 4 { + t.Errorf("Expected 4 hooks, got %d", len(hs)) } for _, out := range hs { - t.Logf("Checking name %s path %s and kind %s", out.Name, out.Path, out.Kind) found := false for _, expect := range data { if out.Path == expect.path { found = true + if out.Path != expect.path { + t.Errorf("Expected path %s, got %s", expect.path, out.Path) + } nameFound := false for _, expectedName := range expect.name { if out.Name == expectedName { @@ -224,8 +211,8 @@ metadata: name := sh.Metadata.Name - //only keep track of non-hook manifests, that are not CustomResourceDefinitions with crd-install - if err == nil && (s.hooks[name] == nil || s.hooks[name][0] == release.Hook_CRD_INSTALL) { + //only keep track of non-hook manifests + if err == nil && s.hooks[name] == nil { another := Manifest{ Content: m, Name: name, @@ -244,6 +231,110 @@ metadata: } } +var manifestTemplate = ` +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: example.com + labels: + app: example-crd + annotations: + helm.sh/hook: crd-install +{{- if .HookDeletePolicy}} + {{ .HookDeletePolicy }} +{{- end }} +{{- if .HookDeleteTimeout}} + {{ .HookDeleteTimeout }} +{{- end }} +spec: + group: example.com + version: v1alpha1 + names: + kind: example + plural: examples + scope: Cluster +` + +type manifestTemplateData struct { + HookDeletePolicy, HookDeleteTimeout string +} + +func TestSortManifestsHookDeletion(t *testing.T) { + testCases := map[string]struct { + templateData manifestTemplateData + hasDeletePolicy bool + deletePolicy release.Hook_DeletePolicy + deleteTimeout int64 + }{ + "No delete policy": { + templateData: manifestTemplateData{}, + hasDeletePolicy: false, + deletePolicy: release.Hook_BEFORE_HOOK_CREATION, + deleteTimeout: 0, + }, + "Delete policy, no delete timeout": { + templateData: manifestTemplateData{ + HookDeletePolicy: "helm.sh/hook-delete-policy: before-hook-creation", + }, + hasDeletePolicy: true, + deletePolicy: release.Hook_BEFORE_HOOK_CREATION, + deleteTimeout: defaultHookDeleteTimeoutInSeconds, + }, + "Delete policy and delete timeout": { + templateData: manifestTemplateData{ + HookDeletePolicy: "helm.sh/hook-delete-policy: hook-succeeded", + HookDeleteTimeout: `helm.sh/hook-delete-timeout: "420"`, + }, + hasDeletePolicy: true, + deletePolicy: release.Hook_SUCCEEDED, + deleteTimeout: 420, + }, + } + + for tn, tc := range testCases { + t.Run(tn, func(t *testing.T) { + tmpl := template.Must(template.New("manifest").Parse(manifestTemplate)) + var buf bytes.Buffer + err := tmpl.Execute(&buf, tc.templateData) + if err != nil { + t.Error(err) + } + + manifests := map[string]string{ + "exampleManifest": buf.String(), + } + + hs, _, err := sortManifests(manifests, chartutil.NewVersionSet("v1", "v1beta1"), InstallOrder) + if err != nil { + t.Error(err) + } + + if got, want := len(hs), 1; got != want { + t.Errorf("expected %d hooks, but got %d", want, got) + } + hook := hs[0] + + if len(hook.DeletePolicies) == 0 { + if tc.hasDeletePolicy { + t.Errorf("expected a policy, but got zero") + } + } else { + if !tc.hasDeletePolicy { + t.Errorf("expected no delete policies, but got one") + } + policy := hook.DeletePolicies[0] + if got, want := policy, tc.deletePolicy; got != want { + t.Errorf("expected delete policy %q, but got %q", want, got) + } + } + + if got, want := hook.DeleteTimeout, tc.deleteTimeout; got != want { + t.Errorf("expected timeout %d, but got %d", want, got) + } + }) + } +} + func TestVersionSet(t *testing.T) { vs := chartutil.NewVersionSet("v1", "v1beta1", "extensions/alpha5", "batch/v1") diff --git a/pkg/tiller/kind_sorter.go b/pkg/tiller/kind_sorter.go index 8aff4e6c1..90678cce5 100644 --- a/pkg/tiller/kind_sorter.go +++ b/pkg/tiller/kind_sorter.go @@ -28,6 +28,7 @@ type SortOrder []string // Those occurring earlier in the list get installed before those occurring later in the list. var InstallOrder SortOrder = []string{ "Namespace", + "NetworkPolicy", "ResourceQuota", "LimitRange", "PodSecurityPolicy", @@ -40,15 +41,20 @@ var InstallOrder SortOrder = []string{ "ServiceAccount", "CustomResourceDefinition", "ClusterRole", + "ClusterRoleList", "ClusterRoleBinding", + "ClusterRoleBindingList", "Role", + "RoleList", "RoleBinding", + "RoleBindingList", "Service", "DaemonSet", "Pod", "ReplicationController", "ReplicaSet", "Deployment", + "HorizontalPodAutoscaler", "StatefulSet", "Job", "CronJob", @@ -66,14 +72,19 @@ var UninstallOrder SortOrder = []string{ "CronJob", "Job", "StatefulSet", + "HorizontalPodAutoscaler", "Deployment", "ReplicaSet", "ReplicationController", "Pod", "DaemonSet", + "RoleBindingList", "RoleBinding", + "RoleList", "Role", + "ClusterRoleBindingList", "ClusterRoleBinding", + "ClusterRoleList", "ClusterRole", "CustomResourceDefinition", "ServiceAccount", @@ -86,6 +97,7 @@ var UninstallOrder SortOrder = []string{ "PodSecurityPolicy", "LimitRange", "ResourceQuota", + "NetworkPolicy", "Namespace", } @@ -124,14 +136,15 @@ func (k *kindSorter) Less(i, j int) bool { b := k.manifests[j] first, aok := k.ordering[a.Head.Kind] second, bok := k.ordering[b.Head.Kind] - // if same kind (including unknown) sub sort alphanumeric - if first == second { - // if both are unknown and of different kind sort by kind alphabetically - if !aok && !bok && a.Head.Kind != b.Head.Kind { + + if !aok && !bok { + // if both are unknown then sort alphabetically by kind and name + if a.Head.Kind != b.Head.Kind { return a.Head.Kind < b.Head.Kind } return a.Name < b.Name } + // unknown kind is last if !aok { return false @@ -139,6 +152,11 @@ func (k *kindSorter) Less(i, j int) bool { if !bok { return true } + + // if same kind sub sort alphanumeric + if first == second { + return a.Name < b.Name + } // sort different kinds return first < second } diff --git a/pkg/tiller/kind_sorter_test.go b/pkg/tiller/kind_sorter_test.go index 1c187e90d..f4106f62e 100644 --- a/pkg/tiller/kind_sorter_test.go +++ b/pkg/tiller/kind_sorter_test.go @@ -29,10 +29,18 @@ func TestKindSorter(t *testing.T) { Name: "i", Head: &util.SimpleHead{Kind: "ClusterRole"}, }, + { + Name: "I", + Head: &util.SimpleHead{Kind: "ClusterRoleList"}, + }, { Name: "j", Head: &util.SimpleHead{Kind: "ClusterRoleBinding"}, }, + { + Name: "J", + Head: &util.SimpleHead{Kind: "ClusterRoleBindingList"}, + }, { Name: "e", Head: &util.SimpleHead{Kind: "ConfigMap"}, @@ -105,10 +113,18 @@ func TestKindSorter(t *testing.T) { Name: "k", Head: &util.SimpleHead{Kind: "Role"}, }, + { + Name: "K", + Head: &util.SimpleHead{Kind: "RoleList"}, + }, { Name: "l", Head: &util.SimpleHead{Kind: "RoleBinding"}, }, + { + Name: "L", + Head: &util.SimpleHead{Kind: "RoleBindingList"}, + }, { Name: "d", Head: &util.SimpleHead{Kind: "Secret"}, @@ -137,6 +153,14 @@ func TestKindSorter(t *testing.T) { Name: "z", Head: &util.SimpleHead{Kind: "PodDisruptionBudget"}, }, + { + Name: "x", + Head: &util.SimpleHead{Kind: "HorizontalPodAutoscaler"}, + }, + { + Name: "B", + Head: &util.SimpleHead{Kind: "NetworkPolicy"}, + }, } for _, test := range []struct { @@ -144,8 +168,8 @@ func TestKindSorter(t *testing.T) { order SortOrder expected string }{ - {"install", InstallOrder, "abc3zde1fgh2ijklmnopqrstuvw!"}, - {"uninstall", UninstallOrder, "wvmutsrqponlkji2hgf1edz3cba!"}, + {"install", InstallOrder, "aBbc3zde1fgh2iIjJkKlLmnopqrxstuvw!"}, + {"uninstall", UninstallOrder, "wvmutsxrqponLlKkJjIi2hgf1edz3cbBa!"}, } { var buf bytes.Buffer t.Run(test.description, func(t *testing.T) { @@ -223,3 +247,24 @@ func TestKindSorterSubSort(t *testing.T) { }) } } + +func TestKindSorterNamespaceAgainstUnknown(t *testing.T) { + unknown := Manifest{ + Name: "a", + Head: &util.SimpleHead{Kind: "Unknown"}, + } + namespace := Manifest{ + Name: "b", + Head: &util.SimpleHead{Kind: "Namespace"}, + } + + manifests := []Manifest{unknown, namespace} + sortByKind(manifests, InstallOrder) + + expectedOrder := []Manifest{namespace, unknown} + for i, manifest := range manifests { + if expectedOrder[i].Name != manifest.Name { + t.Errorf("Expected %s, got %s", expectedOrder[i].Name, manifest.Name) + } + } +} diff --git a/pkg/tiller/release_install.go b/pkg/tiller/release_install.go index 973da3581..3cfbcf30e 100644 --- a/pkg/tiller/release_install.go +++ b/pkg/tiller/release_install.go @@ -84,7 +84,7 @@ func (s *ReleaseServer) prepareRelease(req *services.InstallReleaseRequest) (*re return nil, err } - hooks, manifestDoc, notesTxt, err := s.renderResources(req.Chart, valuesToRender, caps.APIVersions) + hooks, manifestDoc, notesTxt, err := s.renderResources(req.Chart, valuesToRender, req.SubNotes, caps.APIVersions) if err != nil { // Return a release with partial data so that client can show debugging // information. diff --git a/pkg/tiller/release_install_test.go b/pkg/tiller/release_install_test.go index f5e84d870..0d985b2e5 100644 --- a/pkg/tiller/release_install_test.go +++ b/pkg/tiller/release_install_test.go @@ -268,7 +268,7 @@ func TestInstallRelease_WrongTillerVersion(t *testing.T) { } } -func TestInstallRelease_WithChartAndDependencyNotes(t *testing.T) { +func TestInstallRelease_WithChartAndDependencyParentNotes(t *testing.T) { c := helm.NewContext() rs := rsFixture() @@ -300,6 +300,39 @@ func TestInstallRelease_WithChartAndDependencyNotes(t *testing.T) { } } +func TestInstallRelease_WithChartAndDependencyAllNotes(t *testing.T) { + c := helm.NewContext() + rs := rsFixture() + + req := installRequest(withSubNotes(), + withChart( + withNotes(notesText), + withDependency(withNotes(notesText+" child")), + )) + res, err := rs.InstallRelease(c, req) + if err != nil { + t.Fatalf("Failed install: %s", err) + } + if res.Release.Name == "" { + t.Errorf("Expected release name.") + } + + rel, err := rs.env.Releases.Get(res.Release.Name, res.Release.Version) + if err != nil { + t.Errorf("Expected release for %s (%v).", res.Release.Name, rs.env.Releases) + } + + t.Logf("rel: %v", rel) + + if !strings.Contains(rel.Info.Status.Notes, notesText) || !strings.Contains(rel.Info.Status.Notes, notesText+" child") { + t.Fatalf("Expected '%s', got '%s'", notesText+"\n"+notesText+" child", rel.Info.Status.Notes) + } + + if rel.Info.Description != "Install complete" { + t.Errorf("unexpected description: %s", rel.Info.Description) + } +} + func TestInstallRelease_DryRun(t *testing.T) { c := helm.NewContext() rs := rsFixture() @@ -469,7 +502,7 @@ func TestInstallRelease_KubeVersion(t *testing.T) { rs := rsFixture() req := installRequest( - withChart(withKube(">=0.0.0")), + withChart(withKube(">=0.0.0-0")), ) _, err := rs.InstallRelease(c, req) if err != nil { diff --git a/pkg/tiller/release_list.go b/pkg/tiller/release_list.go index 3299d3ef2..cd3b63856 100644 --- a/pkg/tiller/release_list.go +++ b/pkg/tiller/release_list.go @@ -126,7 +126,7 @@ func (s *ReleaseServer) ListReleases(req *services.ListReleasesRequest, stream s return nil } -// partition packs releases into slices upto the capacity cap in bytes. +// partition packs releases into slices up to the capacity cap in bytes. func (s *ReleaseServer) partition(rels []*release.Release, cap int) <-chan []*release.Release { chunks := make(chan []*release.Release, 1) go func() { @@ -140,7 +140,7 @@ func (s *ReleaseServer) partition(rels []*release.Release, cap int) <-chan []*re // Over-cap, push chunk onto channel to send over gRPC stream s.Log("partitioned at %d with %d releases (cap=%d)", fill, len(chunk), cap) chunks <- chunk - // reset paritioning state + // reset partitioning state chunk = nil fill = 0 } diff --git a/pkg/tiller/release_modules.go b/pkg/tiller/release_modules.go index 85995480c..360794481 100644 --- a/pkg/tiller/release_modules.go +++ b/pkg/tiller/release_modules.go @@ -58,14 +58,26 @@ func (m *LocalReleaseModule) Create(r *release.Release, req *services.InstallRel func (m *LocalReleaseModule) Update(current, target *release.Release, req *services.UpdateReleaseRequest, env *environment.Environment) error { c := bytes.NewBufferString(current.Manifest) t := bytes.NewBufferString(target.Manifest) - return env.KubeClient.Update(target.Namespace, c, t, req.Force, req.Recreate, req.Timeout, req.Wait) + return env.KubeClient.UpdateWithOptions(target.Namespace, c, t, kube.UpdateOptions{ + Force: req.Force, + Recreate: req.Recreate, + Timeout: req.Timeout, + ShouldWait: req.Wait, + CleanupOnFail: req.CleanupOnFail, + }) } // Rollback performs a rollback from current to target release func (m *LocalReleaseModule) Rollback(current, target *release.Release, req *services.RollbackReleaseRequest, env *environment.Environment) error { c := bytes.NewBufferString(current.Manifest) t := bytes.NewBufferString(target.Manifest) - return env.KubeClient.Update(target.Namespace, c, t, req.Force, req.Recreate, req.Timeout, req.Wait) + return env.KubeClient.UpdateWithOptions(target.Namespace, c, t, kube.UpdateOptions{ + Force: req.Force, + Recreate: req.Recreate, + Timeout: req.Timeout, + ShouldWait: req.Wait, + CleanupOnFail: req.CleanupOnFail, + }) } // Status returns kubectl-like formatted status of release objects diff --git a/pkg/tiller/release_server.go b/pkg/tiller/release_server.go index cf7748f44..b174a32cf 100644 --- a/pkg/tiller/release_server.go +++ b/pkg/tiller/release_server.go @@ -23,6 +23,7 @@ import ( "path" "regexp" "strings" + "time" "github.com/technosophos/moniker" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -40,18 +41,20 @@ import ( "k8s.io/helm/pkg/version" ) -// releaseNameMaxLen is the maximum length of a release name. -// -// As of Kubernetes 1.4, the max limit on a name is 63 chars. We reserve 10 for -// charts to add data. Effectively, that gives us 53 chars. -// See https://github.com/kubernetes/helm/issues/1528 -const releaseNameMaxLen = 53 - -// NOTESFILE_SUFFIX that we want to treat special. It goes through the templating engine -// but it's not a yaml file (resource) hence can't have hooks, etc. And the user actually -// wants to see this file after rendering in the status command. However, it must be a suffix -// since there can be filepath in front of it. -const notesFileSuffix = "NOTES.txt" +const ( + // releaseNameMaxLen is the maximum length of a release name. + // + // As of Kubernetes 1.4, the max limit on a name is 63 chars. We reserve 10 for + // charts to add data. Effectively, that gives us 53 chars. + // See https://github.com/kubernetes/helm/issues/1528 + releaseNameMaxLen = 53 + + // NOTESFILE_SUFFIX that we want to treat special. It goes through the templating engine + // but it's not a yaml file (resource) hence can't have hooks, etc. And the user actually + // wants to see this file after rendering in the status command. However, it must be a suffix + // since there can be filepath in front of it. + notesFileSuffix = "NOTES.txt" +) var ( // errMissingChart indicates that a chart was not provided. @@ -61,7 +64,7 @@ var ( // errInvalidRevision indicates that an invalid release revision number was provided. errInvalidRevision = errors.New("invalid release revision") //errInvalidName indicates that an invalid release name was provided - errInvalidName = errors.New("invalid release name, must match regex ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])+$ and the length must not longer than 53") + errInvalidName = errors.New("invalid release name, must match regex ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])+$ and the length must not be longer than 53") ) // ListDefaultLimit is the default limit for number of items returned in a list. @@ -115,7 +118,7 @@ func NewReleaseServer(env *environment.Environment, clientset kubernetes.Interfa // request values are not altered. func (s *ReleaseServer) reuseValues(req *services.UpdateReleaseRequest, current *release.Release) error { if req.ResetValues { - // If ResetValues is set, we comletely ignore current.Config. + // If ResetValues is set, we completely ignore current.Config. s.Log("resetting values to the chart's original version") return nil } @@ -191,11 +194,11 @@ func (s *ReleaseServer) uniqName(start string, reuse bool) (string, error) { rel := h[0] if st := rel.Info.Status.Code; reuse && (st == release.Status_DELETED || st == release.Status_FAILED) { - // Allowe re-use of names if the previous release is marked deleted. + // Allow re-use of names if the previous release is marked deleted. s.Log("name %s exists but is not in use, reusing name", start) return start, nil } else if reuse { - return "", fmt.Errorf("a released named %s is in use, cannot re-use a name that is still in use", start) + return "", fmt.Errorf("a release named %s is in use, cannot re-use a name that is still in use", start) } return "", fmt.Errorf("a release named %s already exists.\nRun: helm ls --all %s; to check the status of the release\nOr run: helm del --purge %s; to delete it", start, start, start) @@ -248,7 +251,7 @@ func capabilities(disc discovery.DiscoveryInterface) (*chartutil.Capabilities, e if err != nil { return nil, err } - vs, err := GetVersionSet(disc) + vs, err := GetAllVersionSet(disc) if err != nil { return nil, fmt.Errorf("Could not get apiVersions from Kubernetes: %s", err) } @@ -259,6 +262,59 @@ func capabilities(disc discovery.DiscoveryInterface) (*chartutil.Capabilities, e }, nil } +// GetAllVersionSet retrieves a set of available k8s API versions and objects +// +// This is a different function from GetVersionSet because the signature changed. +// To keep compatibility through the public functions this needed to be a new +// function.GetAllVersionSet +// TODO(mattfarina): In Helm v3 merge with GetVersionSet +func GetAllVersionSet(client discovery.ServerResourcesInterface) (chartutil.VersionSet, error) { + groups, resources, err := client.ServerGroupsAndResources() + if err != nil { + return chartutil.DefaultVersionSet, err + } + + // FIXME: The Kubernetes test fixture for cli appears to always return nil + // for calls to Discovery().ServerGroupsAndResources(). So in this case, we + // return the default API list. This is also a safe value to return in any + // other odd-ball case. + if len(groups) == 0 && len(resources) == 0 { + return chartutil.DefaultVersionSet, nil + } + + versionMap := make(map[string]interface{}) + versions := []string{} + + // Extract the groups + for _, g := range groups { + for _, gv := range g.Versions { + versionMap[gv.GroupVersion] = struct{}{} + } + } + + // Extract the resources + var id string + var ok bool + for _, r := range resources { + for _, rl := range r.APIResources { + + // A Kind at a GroupVersion can show up more than once. We only want + // it displayed once in the final output. + id = path.Join(r.GroupVersion, rl.Kind) + if _, ok = versionMap[id]; !ok { + versionMap[id] = struct{}{} + } + } + } + + // Convert to a form that NewVersionSet can use + for k := range versionMap { + versions = append(versions, k) + } + + return chartutil.NewVersionSet(versions...), nil +} + // GetVersionSet retrieves a set of available k8s API versions func GetVersionSet(client discovery.ServerGroupsInterface) (chartutil.VersionSet, error) { groups, err := client.ServerGroups() @@ -278,7 +334,7 @@ func GetVersionSet(client discovery.ServerGroupsInterface) (chartutil.VersionSet return chartutil.NewVersionSet(versions...), nil } -func (s *ReleaseServer) renderResources(ch *chart.Chart, values chartutil.Values, vs chartutil.VersionSet) ([]*release.Hook, *bytes.Buffer, string, error) { +func (s *ReleaseServer) renderResources(ch *chart.Chart, values chartutil.Values, subNotes bool, vs chartutil.VersionSet) ([]*release.Hook, *bytes.Buffer, string, error) { // Guard to make sure Tiller is at the right version to handle this chart. sver := version.GetVersion() if ch.Metadata.TillerVersion != "" && @@ -307,18 +363,23 @@ func (s *ReleaseServer) renderResources(ch *chart.Chart, values chartutil.Values // text file. We have to spin through this map because the file contains path information, so we // look for terminating NOTES.txt. We also remove it from the files so that we don't have to skip // it in the sortHooks. - notes := "" + var notesBuffer bytes.Buffer for k, v := range files { if strings.HasSuffix(k, notesFileSuffix) { - // Only apply the notes if it belongs to the parent chart - // Note: Do not use filePath.Join since it creates a path with \ which is not expected - if k == path.Join(ch.Metadata.Name, "templates", notesFileSuffix) { - notes = v + if subNotes || (k == path.Join(ch.Metadata.Name, "templates", notesFileSuffix)) { + + // If buffer contains data, add newline before adding more + if notesBuffer.Len() > 0 { + notesBuffer.WriteString("\n") + } + notesBuffer.WriteString(v) } delete(files, k) } } + notes := notesBuffer.String() + // Sort hooks, manifests, and partials. Only hooks and manifests are returned, // as partials are not used after renderer.Render. Empty manifests are also // removed here. @@ -394,7 +455,7 @@ func (s *ReleaseServer) execHook(hs []*release.Hook, name, namespace, hook strin b.Reset() b.WriteString(h.Manifest) - // We can't watch CRDs + // We can't watch CRDs, but need to wait until they reach the established state before continuing if hook != hooks.CRDInstall { if err := kubeCli.WatchUntilReady(namespace, b, timeout, false); err != nil { s.Log("warning: Release %s %s %s could not complete: %s", name, hook, h.Path, err) @@ -405,6 +466,11 @@ func (s *ReleaseServer) execHook(hs []*release.Hook, name, namespace, hook strin } return err } + } else { + if err := kubeCli.WaitUntilCRDEstablished(b, time.Duration(timeout)*time.Second); err != nil { + s.Log("warning: Release %s %s %s could not complete: %s", name, hook, h.Path, err) + return err + } } } @@ -423,8 +489,7 @@ func (s *ReleaseServer) execHook(hs []*release.Hook, name, namespace, hook strin func validateManifest(c environment.KubeClient, ns string, manifest []byte) error { r := bytes.NewReader(manifest) - _, err := c.BuildUnstructured(ns, r) - return err + return c.Validate(ns, r) } func validateReleaseName(releaseName string) error { @@ -443,7 +508,8 @@ func (s *ReleaseServer) deleteHookByPolicy(h *release.Hook, policy string, name, b := bytes.NewBufferString(h.Manifest) if hookHasDeletePolicy(h, policy) { s.Log("deleting %s hook %s for release %s due to %q policy", hook, h.Name, name, policy) - if errHookDelete := kubeCli.Delete(namespace, b); errHookDelete != nil { + waitForDelete := h.DeleteTimeout > 0 + if errHookDelete := kubeCli.DeleteWithTimeout(namespace, b, h.DeleteTimeout, waitForDelete); errHookDelete != nil { s.Log("warning: Release %s %s %S could not be deleted: %s", name, hook, h.Path, errHookDelete) return errHookDelete } @@ -451,7 +517,7 @@ func (s *ReleaseServer) deleteHookByPolicy(h *release.Hook, policy string, name, return nil } -// hookShouldBeDeleted determines whether the defined hook deletion policy matches the hook deletion polices +// hookHasDeletePolicy determines whether the defined hook deletion policy matches the hook deletion polices // supported by helm. If so, mark the hook as one should be deleted. func hookHasDeletePolicy(h *release.Hook, policy string) bool { if dp, ok := deletePolices[policy]; ok { diff --git a/pkg/tiller/release_server_test.go b/pkg/tiller/release_server_test.go index d94ea2eeb..a383add91 100644 --- a/pkg/tiller/release_server_test.go +++ b/pkg/tiller/release_server_test.go @@ -31,8 +31,8 @@ import ( "github.com/technosophos/moniker" "golang.org/x/net/context" "google.golang.org/grpc/metadata" - "k8s.io/api/core/v1" - "k8s.io/cli-runtime/pkg/genericclioptions/resource" + v1 "k8s.io/api/core/v1" + "k8s.io/cli-runtime/pkg/resource" "k8s.io/client-go/kubernetes/fake" "k8s.io/helm/pkg/helm" @@ -89,13 +89,22 @@ spec: var manifestWithKeep = `kind: ConfigMap metadata: - name: test-cm-keep + name: test-cm-keep-a annotations: "helm.sh/resource-policy": keep data: name: value ` +var manifestWithKeepEmpty = `kind: ConfigMap +metadata: + name: test-cm-keep-b + annotations: + "helm.sh/resource-policy": "" +data: + name: value +` + var manifestWithUpgradeHooks = `kind: ConfigMap metadata: name: test-cm @@ -221,6 +230,12 @@ func withChart(chartOpts ...chartOption) installOption { } } +func withSubNotes() installOption { + return func(opts *installOptions) { + opts.SubNotes = true + } +} + func installRequest(opts ...installOption) *services.InstallReleaseRequest { reqOpts := &installOptions{ &services.InstallReleaseRequest{ @@ -323,6 +338,20 @@ func TestValidName(t *testing.T) { } } +func TestGetAllVersionSet(t *testing.T) { + rs := rsFixture() + vs, err := GetAllVersionSet(rs.clientset.Discovery()) + if err != nil { + t.Error(err) + } + if !vs.Has("v1") { + t.Errorf("Expected supported versions to at least include v1.") + } + if vs.Has("nosuchversion/v1") { + t.Error("Non-existent version is reported found.") + } +} + func TestGetVersionSet(t *testing.T) { rs := rsFixture() vs, err := GetVersionSet(rs.clientset.Discovery()) @@ -443,23 +472,27 @@ func releaseWithKeepStub(rlsName string) *release.Release { Name: "bunnychart", }, Templates: []*chart.Template{ - {Name: "templates/configmap", Data: []byte(manifestWithKeep)}, + {Name: "templates/configmap-keep-a", Data: []byte(manifestWithKeep)}, + {Name: "templates/configmap-keep-b", Data: []byte(manifestWithKeepEmpty)}, }, } date := timestamp.Timestamp{Seconds: 242085845, Nanos: 0} - return &release.Release{ + rl := &release.Release{ Name: rlsName, Info: &release.Info{ FirstDeployed: &date, LastDeployed: &date, Status: &release.Status{Code: release.Status_DEPLOYED}, }, - Chart: ch, - Config: &chart.Config{Raw: `name: value`}, - Version: 1, - Manifest: manifestWithKeep, + Chart: ch, + Config: &chart.Config{Raw: `name: value`}, + Version: 1, } + + helm.RenderReleaseMock(rl, false) + + return rl } func MockEnvironment() *environment.Environment { @@ -481,6 +514,15 @@ type updateFailingKubeClient struct { } func (u *updateFailingKubeClient) Update(namespace string, originalReader, modifiedReader io.Reader, force bool, recreate bool, timeout int64, shouldWait bool) error { + return u.UpdateWithOptions(namespace, originalReader, modifiedReader, kube.UpdateOptions{ + Force: force, + Recreate: recreate, + Timeout: timeout, + ShouldWait: shouldWait, + }) +} + +func (u *updateFailingKubeClient) UpdateWithOptions(namespace string, originalReader, modifiedReader io.Reader, opts kube.UpdateOptions) error { return errors.New("Failed update in kube client") } @@ -512,6 +554,10 @@ func (d *deleteFailingKubeClient) Delete(ns string, r io.Reader) error { return kube.ErrNoObjectsVisited } +func (d *deleteFailingKubeClient) DeleteWithTimeout(ns string, r io.Reader, timeout int64, shouldWait bool) error { + return kube.ErrNoObjectsVisited +} + type mockListServer struct { val *services.ListReleasesResponse } @@ -584,6 +630,9 @@ func (kc *mockHooksKubeClient) Get(ns string, r io.Reader) (string, error) { return "", nil } func (kc *mockHooksKubeClient) Delete(ns string, r io.Reader) error { + return kc.DeleteWithTimeout(ns, r, 0, false) +} +func (kc *mockHooksKubeClient) DeleteWithTimeout(ns string, r io.Reader, timeout int64, shouldWait bool) error { manifest, err := kc.makeManifest(r) if err != nil { return err @@ -613,16 +662,26 @@ func (kc *mockHooksKubeClient) WatchUntilReady(ns string, r io.Reader, timeout i func (kc *mockHooksKubeClient) Update(ns string, currentReader, modifiedReader io.Reader, force bool, recreate bool, timeout int64, shouldWait bool) error { return nil } +func (kc *mockHooksKubeClient) UpdateWithOptions(ns string, currentReader, modifiedReader io.Reader, opts kube.UpdateOptions) error { + return nil +} func (kc *mockHooksKubeClient) Build(ns string, reader io.Reader) (kube.Result, error) { return []*resource.Info{}, nil } func (kc *mockHooksKubeClient) BuildUnstructured(ns string, reader io.Reader) (kube.Result, error) { return []*resource.Info{}, nil } +func (kc *mockHooksKubeClient) Validate(ns string, reader io.Reader) error { + return nil +} func (kc *mockHooksKubeClient) WaitAndGetCompletedPodPhase(namespace string, reader io.Reader, timeout time.Duration) (v1.PodPhase, error) { return v1.PodUnknown, nil } +func (kc *mockHooksKubeClient) WaitUntilCRDEstablished(reader io.Reader, timeout time.Duration) error { + return nil +} + func deletePolicyStub(kubeClient *mockHooksKubeClient) *ReleaseServer { e := environment.New() e.Releases = storage.Init(driver.NewMemory()) diff --git a/pkg/tiller/release_uninstall_test.go b/pkg/tiller/release_uninstall_test.go index cb59b6bf5..d95a52c4d 100644 --- a/pkg/tiller/release_uninstall_test.go +++ b/pkg/tiller/release_uninstall_test.go @@ -150,7 +150,10 @@ func TestUninstallReleaseWithKeepPolicy(t *testing.T) { if res.Info == "" { t.Errorf("Expected response info to not be empty") } else { - if !strings.Contains(res.Info, "[ConfigMap] test-cm-keep") { + if !strings.Contains(res.Info, "[ConfigMap] test-cm-keep-a") { + t.Errorf("unexpected output: %s", res.Info) + } + if !strings.Contains(res.Info, "[ConfigMap] test-cm-keep-b") { t.Errorf("unexpected output: %s", res.Info) } } diff --git a/pkg/tiller/release_update.go b/pkg/tiller/release_update.go index 8f3cc4e8e..5fb1552bf 100644 --- a/pkg/tiller/release_update.go +++ b/pkg/tiller/release_update.go @@ -38,8 +38,10 @@ func (s *ReleaseServer) UpdateRelease(c ctx.Context, req *services.UpdateRelease s.Log("preparing update for %s", req.Name) currentRelease, updatedRelease, err := s.prepareUpdate(req) if err != nil { + s.Log("failed to prepare update: %s", err) if req.Force { // Use the --force, Luke. + s.Log("performing force update for %s", req.Name) return s.performUpdateForce(req) } return nil, err @@ -113,7 +115,7 @@ func (s *ReleaseServer) prepareUpdate(req *services.UpdateReleaseRequest) (*rele return nil, nil, err } - hooks, manifestDoc, notesTxt, err := s.renderResources(req.Chart, valuesToRender, caps.APIVersions) + hooks, manifestDoc, notesTxt, err := s.renderResources(req.Chart, valuesToRender, req.SubNotes, caps.APIVersions) if err != nil { return nil, nil, err } diff --git a/pkg/tiller/release_update_test.go b/pkg/tiller/release_update_test.go index ea1c88f62..e47e526d6 100644 --- a/pkg/tiller/release_update_test.go +++ b/pkg/tiller/release_update_test.go @@ -604,7 +604,7 @@ func TestUpdateReleasePendingInstall_Force(t *testing.T) { t.Error("Expected failed update") } - expectedError := "a released named forceful-luke is in use, cannot re-use a name that is still in use" + expectedError := "a release named forceful-luke is in use, cannot re-use a name that is still in use" got := err.Error() if err.Error() != expectedError { t.Errorf("Expected error %q, got %q", expectedError, got) diff --git a/pkg/tiller/resource_policy.go b/pkg/tiller/resource_policy.go index cca2391d8..c97621fcd 100644 --- a/pkg/tiller/resource_policy.go +++ b/pkg/tiller/resource_policy.go @@ -24,15 +24,6 @@ import ( "k8s.io/helm/pkg/tiller/environment" ) -// resourcePolicyAnno is the annotation name for a resource policy -const resourcePolicyAnno = "helm.sh/resource-policy" - -// keepPolicy is the resource policy type for keep -// -// This resource policy type allows resources to skip being deleted -// during an uninstallRelease action. -const keepPolicy = "keep" - func filterManifestsToKeep(manifests []Manifest) ([]Manifest, []Manifest) { remaining := []Manifest{} keep := []Manifest{} @@ -43,17 +34,11 @@ func filterManifestsToKeep(manifests []Manifest) ([]Manifest, []Manifest) { continue } - resourcePolicyType, ok := m.Head.Metadata.Annotations[resourcePolicyAnno] - if !ok { - remaining = append(remaining, m) - continue - } - - resourcePolicyType = strings.ToLower(strings.TrimSpace(resourcePolicyType)) - if resourcePolicyType == keepPolicy { + if kube.ResourcePolicyIsKeep(m.Head.Metadata.Annotations) { keep = append(keep, m) + } else { + remaining = append(remaining, m) } - } return keep, remaining } diff --git a/pkg/tlsutil/cfg.go b/pkg/tlsutil/cfg.go index 2c1dfd340..6c2a829df 100644 --- a/pkg/tlsutil/cfg.go +++ b/pkg/tlsutil/cfg.go @@ -40,7 +40,7 @@ type Options struct { ClientAuth tls.ClientAuthType } -// ClientConfig retusn a TLS configuration for use by a Helm client. +// ClientConfig returns a TLS configuration for use by a Helm client. func ClientConfig(opts Options) (cfg *tls.Config, err error) { var cert *tls.Certificate var pool *x509.CertPool diff --git a/pkg/tlsutil/tlsutil_test.go b/pkg/tlsutil/tlsutil_test.go index a4b3c9c22..e4df1e7e7 100644 --- a/pkg/tlsutil/tlsutil_test.go +++ b/pkg/tlsutil/tlsutil_test.go @@ -47,7 +47,7 @@ func TestClientConfig(t *testing.T) { t.Fatalf("expecting 1 client certificates, got %d", got) } if cfg.InsecureSkipVerify { - t.Fatalf("insecure skip verify mistmatch, expecting false") + t.Fatalf("insecure skip verify mismatch, expecting false") } if cfg.RootCAs == nil { t.Fatalf("mismatch tls RootCAs, expecting non-nil") diff --git a/pkg/urlutil/urlutil.go b/pkg/urlutil/urlutil.go index 272907de0..96b691c92 100644 --- a/pkg/urlutil/urlutil.go +++ b/pkg/urlutil/urlutil.go @@ -73,7 +73,7 @@ func ExtractHostname(addr string) (string, error) { return stripPort(u.Host), nil } -// Backported from Go 1.8 because Circle is still on 1.7 +// stripPort from Go 1.8 because Circle is still on 1.7 func stripPort(hostport string) string { colon := strings.IndexByte(hostport, ':') if colon == -1 { diff --git a/pkg/version/version.go b/pkg/version/version.go index dae739500..d32f09c4a 100644 --- a/pkg/version/version.go +++ b/pkg/version/version.go @@ -26,7 +26,7 @@ var ( // Increment major number for new feature additions and behavioral changes. // Increment minor number for bug fixes and performance enhancements. // Increment patch number for critical fixes to existing releases. - Version = "v2.11" + Version = "v2.14" // BuildMetadata is extra build time data BuildMetadata = "unreleased" diff --git a/pkg/version/version_test.go b/pkg/version/version_test.go index eba573533..315e3c9fe 100644 --- a/pkg/version/version_test.go +++ b/pkg/version/version_test.go @@ -39,9 +39,17 @@ func TestGetVersionProto(t *testing.T) { BuildMetadata = tt.buildMetadata GitCommit = tt.gitCommit GitTreeState = tt.gitTreeState - if versionProto := GetVersionProto(); *versionProto != tt.expected { - t.Errorf("expected Semver(%s), GitCommit(%s) and GitTreeState(%s) to be %v", tt.expected, tt.gitCommit, tt.gitTreeState, *versionProto) + if versionProto := GetVersionProto(); !versionEqual(*versionProto, tt.expected) { + t.Errorf("expected Semver(%s+%s), GitCommit(%s) and GitTreeState(%s) to be %v", tt.version, tt.buildMetadata, tt.gitCommit, tt.gitTreeState, *versionProto) } } +} +func versionEqual(v1 version.Version, v2 version.Version) bool { + if v1.SemVer != v2.SemVer || + v1.GitCommit != v2.GitCommit || + v1.GitTreeState != v2.GitTreeState { + return false + } + return true } diff --git a/rootfs/Dockerfile b/rootfs/Dockerfile index 82dfa0d4c..59ead977a 100644 --- a/rootfs/Dockerfile +++ b/rootfs/Dockerfile @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM alpine:3.7 +FROM alpine:3.10 -RUN apk update && apk add ca-certificates socat && rm -rf /var/cache/apk/* +RUN apk add --no-cache ca-certificates socat ENV HOME /tmp @@ -22,6 +22,5 @@ COPY helm /helm COPY tiller /tiller EXPOSE 44134 -USER nobody +USER 65534 ENTRYPOINT ["/tiller"] - diff --git a/rootfs/Dockerfile.experimental b/rootfs/Dockerfile.experimental index ca0c87f30..0fa9254ee 100644 --- a/rootfs/Dockerfile.experimental +++ b/rootfs/Dockerfile.experimental @@ -12,15 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM alpine:3.7 +FROM alpine:3.10 -RUN apk update && apk add ca-certificates && rm -rf /var/cache/apk/* +RUN apk add --no-cache ca-certificates ENV HOME /tmp COPY tiller /tiller EXPOSE 44134 -USER nobody +USER 65534 ENTRYPOINT ["/tiller", "--experimental-release"] - diff --git a/rootfs/Dockerfile.rudder b/rootfs/Dockerfile.rudder index 61afb8af8..ed153ee67 100644 --- a/rootfs/Dockerfile.rudder +++ b/rootfs/Dockerfile.rudder @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM alpine:3.3 +FROM alpine:3.10 -RUN apk update && apk add ca-certificates && rm -rf /var/cache/apk/* +RUN apk add --no-cache ca-certificates ENV HOME /tmp diff --git a/scripts/completions.bash b/scripts/completions.bash index c24f3d257..36cb01f15 100644 --- a/scripts/completions.bash +++ b/scripts/completions.bash @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + # bash completion for helm -*- shell-script -*- __debug() diff --git a/scripts/get b/scripts/get index bf13d25bc..3f645f807 100755 --- a/scripts/get +++ b/scripts/get @@ -29,7 +29,7 @@ initArch() { case $ARCH in armv5*) ARCH="armv5";; armv6*) ARCH="armv6";; - armv7*) ARCH="armv7";; + armv7*) ARCH="arm";; aarch64) ARCH="arm64";; x86) ARCH="386";; x86_64) ARCH="amd64";; @@ -77,16 +77,16 @@ verifySupported() { # checkDesiredVersion checks if the desired version is available. checkDesiredVersion() { - # Use the GitHub releases webpage for the project to find the desired version for this project. - local release_url="https://github.com/helm/helm/releases/${DESIRED_VERSION:-latest}" - if type "curl" > /dev/null; then - TAG=$(curl -SsL $release_url | awk '/\/tag\//' | grep -v no-underline | head -n 1 | cut -d '"' -f 2 | awk '{n=split($NF,a,"/");print a[n]}' | awk 'a !~ $0{print}; {a=$0}') - elif type "wget" > /dev/null; then - TAG=$(wget -q -O - $release_url | awk '/\/tag\//' | grep -v no-underline | head -n 1 | cut -d '"' -f 2 | awk '{n=split($NF,a,"/");print a[n]}' | awk 'a !~ $0{print}; {a=$0}') - fi - if [ "x$TAG" == "x" ]; then - echo "Cannot determine ${DESIRED_VERSION} tag." - exit 1 + if [ "x$DESIRED_VERSION" == "x" ]; then + # Get tag from release URL + local latest_release_url="https://github.com/helm/helm/releases/latest" + if type "curl" > /dev/null; then + TAG=$(curl -Ls -o /dev/null -w %{url_effective} $latest_release_url | grep -oE "[^/]+$" ) + elif type "wget" > /dev/null; then + TAG=$(wget $latest_release_url --server-response -O /dev/null 2>&1 | awk '/^ Location: /{DEST=$2} END{ print DEST}' | grep -oE "[^/]+$") + fi + else + TAG=$DESIRED_VERSION fi } @@ -94,7 +94,7 @@ checkDesiredVersion() { # if it needs to be changed. checkHelmInstalledVersion() { if [[ -f "${HELM_INSTALL_DIR}/${PROJECT_NAME}" ]]; then - local version=$(helm version -c | grep '^Client' | cut -d'"' -f2) + local version=$("${HELM_INSTALL_DIR}/${PROJECT_NAME}" version -c | grep '^Client' | cut -d'"' -f2) if [[ "$version" == "$TAG" ]]; then echo "Helm ${version} is already ${DESIRED_VERSION:-latest}" return 0 @@ -111,7 +111,7 @@ checkHelmInstalledVersion() { # for that binary. downloadFile() { HELM_DIST="helm-$TAG-$OS-$ARCH.tar.gz" - DOWNLOAD_URL="https://kubernetes-helm.storage.googleapis.com/$HELM_DIST" + DOWNLOAD_URL="https://get.helm.sh/$HELM_DIST" CHECKSUM_URL="$DOWNLOAD_URL.sha256" HELM_TMP_ROOT="$(mktemp -dt helm-installer-XXXXXX)" HELM_TMP_FILE="$HELM_TMP_ROOT/$HELM_DIST"