Moving Tiller to new location

The tiller primary location is now GitHub Container Registry.
It is also replicatated to GCR, Docker Hub, and Quay.

Signed-off-by: Matt Farina <matt@mattfarina.com>
pull/8884/head
Matt Farina 5 years ago
parent 0d9779c4f1
commit 01dc62ecfd

@ -29,8 +29,9 @@ VERSION=
if [[ -n "${CIRCLE_TAG:-}" ]]; then if [[ -n "${CIRCLE_TAG:-}" ]]; then
VERSION="${CIRCLE_TAG}" VERSION="${CIRCLE_TAG}"
else else
echo "Skipping deploy step; this is not a tag" # Canary version is used with helm init --canary-image flag.
exit # Does not push canary binary which is Helm v3.
VERSION="canary"
fi fi
echo "Install docker client" echo "Install docker client"
@ -44,35 +45,62 @@ export CLOUDSDK_CORE_DISABLE_PROMPTS=1
curl https://sdk.cloud.google.com | bash curl https://sdk.cloud.google.com | bash
${HOME}/google-cloud-sdk/bin/gcloud --quiet components update ${HOME}/google-cloud-sdk/bin/gcloud --quiet components update
echo "Configuring GitHub Container Repository configuration"
echo ${GH_TOKEN_PUSH_TILLER} | docker login ghcr.io -u helm-bot --password-stdin
echo "Configuring gcloud authentication" echo "Configuring gcloud authentication"
echo "${GCLOUD_SERVICE_KEY}" | base64 --decode > "${HOME}/gcloud-service-key.json" echo "${GCLOUD_SERVICE_KEY}" | base64 --decode > "${HOME}/gcloud-service-key.json"
${HOME}/google-cloud-sdk/bin/gcloud auth activate-service-account --key-file "${HOME}/gcloud-service-key.json" ${HOME}/google-cloud-sdk/bin/gcloud auth activate-service-account --key-file "${HOME}/gcloud-service-key.json"
${HOME}/google-cloud-sdk/bin/gcloud config set project "${PROJECT_NAME}" ${HOME}/google-cloud-sdk/bin/gcloud config set project "${PROJECT_NAME}"
docker login -u _json_key -p "$(cat ${HOME}/gcloud-service-key.json)" https://gcr.io docker login -u _json_key -p "$(cat ${HOME}/gcloud-service-key.json)" https://gcr.io
echo "Installing Azure CLI" echo "Configuring Docker Hub configuration"
apt update echo ${DOCKER_PASS} | docker login -u ${DOCKER_USER} --password-stdin
apt install -y apt-transport-https
echo "deb [arch=amd64] https://packages.microsoft.com/repos/azure-cli/ stretch main" | tee /etc/apt/sources.list.d/azure-cli.list echo "Configuring Quay configuration"
curl -L https://packages.microsoft.com/keys/microsoft.asc | apt-key add echo ${QUAY_PASS} | docker login quay.io -u ${QUAY_USER} --password-stdin
apt update
apt install -y azure-cli
echo "Building the tiller image" echo "Building the tiller image"
make docker-build VERSION="${VERSION}" make docker-build VERSION="${VERSION}"
# Image is pushed to GitHub container repository (ghcr.io),
# GCR, Docker Hub, and Quay.
echo "Pushing image to ghcr.io"
docker push "ghcr.io/helm/tiller:${VERSION}"
echo "Pushing image to gcr.io" echo "Pushing image to gcr.io"
docker tag "ghcr.io/helm/tiller:${VERSION}" "gcr.io/kubernetes-helm/tiller:${VERSION}"
docker push "gcr.io/kubernetes-helm/tiller:${VERSION}" docker push "gcr.io/kubernetes-helm/tiller:${VERSION}"
echo "Building helm binaries" echo "Pushing image to Docker Hub"
make build-cross docker tag "ghcr.io/helm/tiller:${VERSION}" "helmpack/tiller:${VERSION}"
make dist checksum VERSION="${VERSION}" docker push "helmpack/tiller:${VERSION}"
echo "Pushing binaries to gs bucket" echo "Pushing image to Quay"
${HOME}/google-cloud-sdk/bin/gsutil cp ./_dist/* "gs://${PROJECT_NAME}" docker tag "ghcr.io/helm/tiller:${VERSION}" "quay.io/helmpack/tiller:${VERSION}"
docker push "quay.io/helmpack/tiller:${VERSION}"
echo "Pushing binaries to Azure" # Canary version is used with helm init --canary-image flag.
az storage blob upload-batch -s _dist/ -d "$AZURE_STORAGE_CONTAINER_NAME" --pattern 'helm-*' --connection-string "$AZURE_STORAGE_CONNECTION_STRING" # Does not push canary binary which is Helm v3.
if [ "$VERSION" != "canary" ]; then
echo "Installing Azure CLI"
apt update
apt install -y apt-transport-https
echo "deb [arch=amd64] https://packages.microsoft.com/repos/azure-cli/ stretch main" | tee /etc/apt/sources.list.d/azure-cli.list
curl -L https://packages.microsoft.com/keys/microsoft.asc | apt-key add
apt update
apt install -y azure-cli
echo "Pushing KEYS file to Azure" echo "Building helm binaries"
az storage blob upload -f "KEYS" -n "KEYS" -c "$AZURE_STORAGE_CONTAINER_NAME" --connection-string "$AZURE_STORAGE_CONNECTION_STRING" make build-cross
make dist checksum VERSION="${VERSION}"
echo "Pushing binaries to gs bucket"
${HOME}/google-cloud-sdk/bin/gsutil cp ./_dist/* "gs://${PROJECT_NAME}"
echo "Pushing binaries to Azure"
az storage blob upload-batch -s _dist/ -d "$AZURE_STORAGE_CONTAINER_NAME" --pattern 'helm-*' --connection-string "$AZURE_STORAGE_CONNECTION_STRING"
echo "Pushing KEYS file to Azure"
az storage blob upload -f "KEYS" -n "KEYS" -c "$AZURE_STORAGE_CONTAINER_NAME" --connection-string "$AZURE_STORAGE_CONNECTION_STRING"
fi

@ -1,5 +1,5 @@
DOCKER_REGISTRY ?= gcr.io DOCKER_REGISTRY ?= ghcr.io
IMAGE_PREFIX ?= kubernetes-helm IMAGE_PREFIX ?= helm
DEV_IMAGE ?= golang:1.14 DEV_IMAGE ?= golang:1.14
SHORT_NAME ?= tiller SHORT_NAME ?= tiller
SHORT_NAME_RUDDER ?= rudder SHORT_NAME_RUDDER ?= rudder

@ -316,7 +316,7 @@ func (i *initCmd) run() error {
fmt.Fprintf(i.out, "\nWarning: You appear to be using an unreleased version of Helm. Please either use the\n"+ fmt.Fprintf(i.out, "\nWarning: You appear to be using an unreleased version of Helm. Please either use the\n"+
"--canary-image flag, or specify your desired tiller version with --tiller-image.\n\n"+ "--canary-image flag, or specify your desired tiller version with --tiller-image.\n\n"+
"Ex:\n"+ "Ex:\n"+
"$ helm init --tiller-image gcr.io/kubernetes-helm/tiller:v2.8.2\n\n") "$ helm init --tiller-image ghcr.io/helm/tiller:v2.17.0\n\n")
} }
return nil return nil

@ -24,7 +24,7 @@ import (
"github.com/ghodss/yaml" "github.com/ghodss/yaml"
appsv1 "k8s.io/api/apps/v1" appsv1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
@ -42,8 +42,8 @@ func TestDeployment(t *testing.T) {
expect string expect string
imagePullPolicy v1.PullPolicy imagePullPolicy v1.PullPolicy
}{ }{
{"default", "", false, "gcr.io/kubernetes-helm/tiller:" + version.Version, "IfNotPresent"}, {"default", "", false, "ghcr.io/helm/tiller:" + version.Version, "IfNotPresent"},
{"canary", "example.com/tiller", true, "gcr.io/kubernetes-helm/tiller:canary", "Always"}, {"canary", "example.com/tiller", true, "ghcr.io/helm/tiller:canary", "Always"},
{"custom", "example.com/tiller:latest", false, "example.com/tiller:latest", "IfNotPresent"}, {"custom", "example.com/tiller:latest", false, "example.com/tiller:latest", "IfNotPresent"},
} }
@ -55,7 +55,7 @@ func TestDeployment(t *testing.T) {
// Unreleased versions of helm don't have a release image. See issue 3370 // Unreleased versions of helm don't have a release image. See issue 3370
if tt.name == "default" && version.BuildMetadata == "unreleased" { if tt.name == "default" && version.BuildMetadata == "unreleased" {
tt.expect = "gcr.io/kubernetes-helm/tiller:canary" tt.expect = "ghcr.io/helm/tiller:canary"
} }
if got := dep.Spec.Template.Spec.Containers[0].Image; got != tt.expect { if got := dep.Spec.Template.Spec.Containers[0].Image; got != tt.expect {
t.Errorf("%s: expected image %q, got %q", tt.name, tt.expect, got) t.Errorf("%s: expected image %q, got %q", tt.name, tt.expect, got)
@ -80,8 +80,8 @@ func TestDeploymentForServiceAccount(t *testing.T) {
imagePullPolicy v1.PullPolicy imagePullPolicy v1.PullPolicy
serviceAccount string serviceAccount string
}{ }{
{"withSA", "", false, "gcr.io/kubernetes-helm/tiller:latest", "IfNotPresent", "service-account"}, {"withSA", "", false, "ghcr.io/helm/tiller:latest", "IfNotPresent", "service-account"},
{"withoutSA", "", false, "gcr.io/kubernetes-helm/tiller:latest", "IfNotPresent", ""}, {"withoutSA", "", false, "ghcr.io/helm/tiller:latest", "IfNotPresent", ""},
} }
for _, tt := range tests { for _, tt := range tests {
opts := &Options{Namespace: v1.NamespaceDefault, ImageSpec: tt.image, UseCanary: tt.canary, ServiceAccount: tt.serviceAccount} opts := &Options{Namespace: v1.NamespaceDefault, ImageSpec: tt.image, UseCanary: tt.canary, ServiceAccount: tt.serviceAccount}
@ -187,7 +187,7 @@ func TestSecretManifest(t *testing.T) {
} }
func TestInstall(t *testing.T) { func TestInstall(t *testing.T) {
image := "gcr.io/kubernetes-helm/tiller:v2.0.0" image := "ghcr.io/helm/tiller:v2.0.0"
fc := &fake.Clientset{} fc := &fake.Clientset{}
fc.AddReactor("create", "deployments", func(action testcore.Action) (bool, runtime.Object, error) { fc.AddReactor("create", "deployments", func(action testcore.Action) (bool, runtime.Object, error) {
@ -234,7 +234,7 @@ func TestInstall(t *testing.T) {
} }
func TestInstallHA(t *testing.T) { func TestInstallHA(t *testing.T) {
image := "gcr.io/kubernetes-helm/tiller:v2.0.0" image := "ghcr.io/helm/tiller:v2.0.0"
fc := &fake.Clientset{} fc := &fake.Clientset{}
fc.AddReactor("create", "deployments", func(action testcore.Action) (bool, runtime.Object, error) { fc.AddReactor("create", "deployments", func(action testcore.Action) (bool, runtime.Object, error) {
@ -257,7 +257,7 @@ func TestInstallHA(t *testing.T) {
} }
func TestInstall_WithTLS(t *testing.T) { func TestInstall_WithTLS(t *testing.T) {
image := "gcr.io/kubernetes-helm/tiller:v2.0.0" image := "ghcr.io/helm/tiller:v2.0.0"
name := "tiller-secret" name := "tiller-secret"
fc := &fake.Clientset{} fc := &fake.Clientset{}
@ -332,7 +332,7 @@ func TestInstall_canary(t *testing.T) {
fc.AddReactor("create", "deployments", func(action testcore.Action) (bool, runtime.Object, error) { fc.AddReactor("create", "deployments", func(action testcore.Action) (bool, runtime.Object, error) {
obj := action.(testcore.CreateAction).GetObject().(*appsv1.Deployment) obj := action.(testcore.CreateAction).GetObject().(*appsv1.Deployment)
i := obj.Spec.Template.Spec.Containers[0].Image i := obj.Spec.Template.Spec.Containers[0].Image
if i != "gcr.io/kubernetes-helm/tiller:canary" { if i != "ghcr.io/helm/tiller:canary" {
t.Errorf("expected canary image, got '%s'", i) t.Errorf("expected canary image, got '%s'", i)
} }
return true, obj, nil return true, obj, nil
@ -353,7 +353,7 @@ func TestInstall_canary(t *testing.T) {
} }
func TestUpgrade(t *testing.T) { func TestUpgrade(t *testing.T) {
image := "gcr.io/kubernetes-helm/tiller:v2.0.0" image := "ghcr.io/helm/tiller:v2.0.0"
serviceAccount := "newServiceAccount" serviceAccount := "newServiceAccount"
existingDeployment, _ := generateDeployment(&Options{ existingDeployment, _ := generateDeployment(&Options{
Namespace: v1.NamespaceDefault, Namespace: v1.NamespaceDefault,
@ -394,7 +394,7 @@ func TestUpgrade(t *testing.T) {
} }
func TestUpgrade_serviceNotFound(t *testing.T) { func TestUpgrade_serviceNotFound(t *testing.T) {
image := "gcr.io/kubernetes-helm/tiller:v2.0.0" image := "ghcr.io/helm/tiller:v2.0.0"
existingDeployment, _ := generateDeployment(&Options{ existingDeployment, _ := generateDeployment(&Options{
Namespace: v1.NamespaceDefault, Namespace: v1.NamespaceDefault,
@ -437,7 +437,7 @@ func TestUpgrade_serviceNotFound(t *testing.T) {
} }
func TestUgrade_newerVersion(t *testing.T) { func TestUgrade_newerVersion(t *testing.T) {
image := "gcr.io/kubernetes-helm/tiller:v2.0.0" image := "ghcr.io/helm/tiller:v2.0.0"
serviceAccount := "newServiceAccount" serviceAccount := "newServiceAccount"
existingDeployment, _ := generateDeployment(&Options{ existingDeployment, _ := generateDeployment(&Options{
Namespace: v1.NamespaceDefault, Namespace: v1.NamespaceDefault,
@ -497,7 +497,7 @@ func TestUgrade_newerVersion(t *testing.T) {
} }
func TestUpgrade_identical(t *testing.T) { func TestUpgrade_identical(t *testing.T) {
image := "gcr.io/kubernetes-helm/tiller:v2.0.0" image := "ghcr.io/helm/tiller:v2.0.0"
serviceAccount := "newServiceAccount" serviceAccount := "newServiceAccount"
existingDeployment, _ := generateDeployment(&Options{ existingDeployment, _ := generateDeployment(&Options{
Namespace: v1.NamespaceDefault, Namespace: v1.NamespaceDefault,
@ -538,7 +538,7 @@ func TestUpgrade_identical(t *testing.T) {
} }
func TestUpgrade_canaryClient(t *testing.T) { func TestUpgrade_canaryClient(t *testing.T) {
image := "gcr.io/kubernetes-helm/tiller:canary" image := "ghcr.io/helm/tiller:canary"
serviceAccount := "newServiceAccount" serviceAccount := "newServiceAccount"
existingDeployment, _ := generateDeployment(&Options{ existingDeployment, _ := generateDeployment(&Options{
Namespace: v1.NamespaceDefault, Namespace: v1.NamespaceDefault,
@ -579,7 +579,7 @@ func TestUpgrade_canaryClient(t *testing.T) {
} }
func TestUpgrade_canaryServer(t *testing.T) { func TestUpgrade_canaryServer(t *testing.T) {
image := "gcr.io/kubernetes-helm/tiller:v2.0.0" image := "ghcr.io/helm/tiller:v2.0.0"
serviceAccount := "newServiceAccount" serviceAccount := "newServiceAccount"
existingDeployment, _ := generateDeployment(&Options{ existingDeployment, _ := generateDeployment(&Options{
Namespace: v1.NamespaceDefault, Namespace: v1.NamespaceDefault,

@ -19,13 +19,13 @@ package installer // import "k8s.io/helm/cmd/helm/installer"
import ( import (
"fmt" "fmt"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/helm/pkg/strvals" "k8s.io/helm/pkg/strvals"
"k8s.io/helm/pkg/version" "k8s.io/helm/pkg/version"
) )
const ( const (
defaultImage = "gcr.io/kubernetes-helm/tiller" defaultImage = "ghcr.io/helm/tiller"
fmtJSON OutputFormat = "json" fmtJSON OutputFormat = "json"
fmtYAML OutputFormat = "yaml" fmtYAML OutputFormat = "yaml"

@ -92,7 +92,7 @@ development may not be available in Google Cloud Container Registry. If you're g
image pull errors, you can override the version of Tiller. Example: image pull errors, you can override the version of Tiller. Example:
```console ```console
helm init --tiller-image=gcr.io/kubernetes-helm/tiller:2.7.2 helm init --tiller-image=ghcr.io/helm/tiller:2.17.0
``` ```
Or use the latest version: Or use the latest version:

@ -242,7 +242,7 @@ the Tiller image:
```console ```console
$ export TILLER_TAG=v2.0.0-beta.1 # Or whatever version you want $ export TILLER_TAG=v2.0.0-beta.1 # Or whatever version you want
$ kubectl --namespace=kube-system set image deployments/tiller-deploy tiller=gcr.io/kubernetes-helm/tiller:$TILLER_TAG $ kubectl --namespace=kube-system set image deployments/tiller-deploy tiller=ghcr.io/helm/tiller:$TILLER_TAG
deployment "tiller-deploy" image updated deployment "tiller-deploy" image updated
``` ```

@ -19,7 +19,7 @@ package portforwarder
import ( import (
"testing" "testing"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
) )
@ -99,11 +99,11 @@ func TestGetTillerPodImage(t *testing.T) {
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Name: "tiller", Name: "tiller",
Image: "gcr.io/kubernetes-helm/tiller:v2.0.0", Image: "ghcr.io/helm/tiller:v2.0.0",
}, },
}, },
}, },
expected: "gcr.io/kubernetes-helm/tiller:v2.0.0", expected: "ghcr.io/helm/tiller:v2.0.0",
err: false, err: false,
}, },
{ {

Loading…
Cancel
Save