Merge pull request #6462 from keleustes/kube16

Helm init compatible with Kubernetes 1.16
pull/6565/head
Matthew Fisher 5 years ago committed by GitHub
commit 77a7bbb013
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -282,7 +282,7 @@ func (i *initCmd) run() error {
if err := i.ping(i.opts.SelectImage()); err != nil {
return err
}
fmt.Fprintln(i.out, "\nTiller (the Helm server-side component) has been upgraded to the current version.")
fmt.Fprintln(i.out, "\nTiller (the Helm server-side component) has been updated to", i.opts.SelectImage(), ".")
} else {
debug("The error received while trying to init: %s", err)
fmt.Fprintln(i.out, "Warning: Tiller is already installed in the cluster.\n"+

@ -28,8 +28,8 @@ import (
"github.com/ghodss/yaml"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@ -83,7 +83,7 @@ func TestInitCmd_exists(t *testing.T) {
defer os.RemoveAll(home)
var buf bytes.Buffer
fc := fake.NewSimpleClientset(&v1beta1.Deployment{
fc := fake.NewSimpleClientset(&appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Namespace: v1.NamespaceDefault,
Name: "tiller-deploy",

@ -17,23 +17,22 @@ limitations under the License.
package installer // import "k8s.io/helm/cmd/helm/installer"
import (
"errors"
"fmt"
"io/ioutil"
"strings"
"github.com/Masterminds/semver"
"github.com/ghodss/yaml"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/kubernetes"
appsv1client "k8s.io/client-go/kubernetes/typed/apps/v1"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
extensionsclient "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
"k8s.io/helm/pkg/version"
"k8s.io/helm/pkg/chartutil"
"k8s.io/helm/pkg/tiller/environment"
@ -43,7 +42,7 @@ import (
//
// Returns an error if the command failed.
func Install(client kubernetes.Interface, opts *Options) error {
if err := createDeployment(client.ExtensionsV1beta1(), opts); err != nil {
if err := createDeployment(client.AppsV1(), opts); err != nil {
return err
}
if err := createService(client.CoreV1(), opts.Namespace); err != nil {
@ -61,51 +60,108 @@ func Install(client kubernetes.Interface, opts *Options) error {
//
// Returns an error if the command failed.
func Upgrade(client kubernetes.Interface, opts *Options) error {
obj, err := client.ExtensionsV1beta1().Deployments(opts.Namespace).Get(deploymentName, metav1.GetOptions{})
if err != nil {
appsobj, err := client.AppsV1().Deployments(opts.Namespace).Get(deploymentName, metav1.GetOptions{})
if err == nil {
// Can happen in two cases:
// 1. helm init inserted an apps/v1 Deployment up front in Kubernetes
// 2. helm init inserted an extensions/v1beta1 Deployment against a K8s cluster already
// supporting apps/v1 Deployment. In such a case K8s is returning the apps/v1 object anyway.`
// (for the same reason "kubectl convert" is being deprecated)
return upgradeAppsTillerDeployment(client, opts, appsobj)
}
extensionsobj, err := client.ExtensionsV1beta1().Deployments(opts.Namespace).Get(deploymentName, metav1.GetOptions{})
if err == nil {
// User performed helm init against older version of kubernetes (Previous to 1.9)
return upgradeExtensionsTillerDeployment(client, opts, extensionsobj)
}
return err
}
func upgradeAppsTillerDeployment(client kubernetes.Interface, opts *Options, obj *appsv1.Deployment) error {
// Update the PodTemplateSpec section of the deployment
if err := updatePodTemplate(&obj.Spec.Template.Spec, opts); err != nil {
return err
}
tillerImage := obj.Spec.Template.Spec.Containers[0].Image
if semverCompare(tillerImage) == -1 && !opts.ForceUpgrade {
return errors.New("current Tiller version is newer, use --force-upgrade to downgrade")
if _, err := client.AppsV1().Deployments(opts.Namespace).Update(obj); err != nil {
return err
}
obj.Spec.Template.Spec.Containers[0].Image = opts.SelectImage()
obj.Spec.Template.Spec.Containers[0].ImagePullPolicy = opts.pullPolicy()
obj.Spec.Template.Spec.ServiceAccountName = opts.ServiceAccount
// If the service does not exist that would mean we are upgrading from a Tiller version
// that didn't deploy the service, so install it.
_, err := client.CoreV1().Services(opts.Namespace).Get(serviceName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
return createService(client.CoreV1(), opts.Namespace)
}
return err
}
func upgradeExtensionsTillerDeployment(client kubernetes.Interface, opts *Options, obj *extensionsv1beta1.Deployment) error {
// Update the PodTemplateSpec section of the deployment
if err := updatePodTemplate(&obj.Spec.Template.Spec, opts); err != nil {
return err
}
if _, err := client.ExtensionsV1beta1().Deployments(opts.Namespace).Update(obj); err != nil {
return err
}
// If the service does not exist that would mean we are upgrading from a Tiller version
// that didn't deploy the service, so install it.
_, err = client.CoreV1().Services(opts.Namespace).Get(serviceName, metav1.GetOptions{})
_, err := client.CoreV1().Services(opts.Namespace).Get(serviceName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
return createService(client.CoreV1(), opts.Namespace)
}
return err
}
// semverCompare returns whether the client's version is older, equal or newer than the given image's version.
func semverCompare(image string) int {
split := strings.Split(image, ":")
if len(split) < 2 {
// If we don't know the version, we consider the client version newer.
return 1
func updatePodTemplate(podSpec *v1.PodSpec, opts *Options) error {
tillerImage := podSpec.Containers[0].Image
clientImage := opts.SelectImage()
if semverCompare(tillerImage, clientImage) == -1 && !opts.ForceUpgrade {
return fmt.Errorf("current Tiller version %s is newer than client version %s, use --force-upgrade to downgrade", tillerImage, clientImage)
}
tillerVersion, err := semver.NewVersion(split[1])
podSpec.Containers[0].Image = clientImage
podSpec.Containers[0].ImagePullPolicy = opts.pullPolicy()
podSpec.ServiceAccountName = opts.ServiceAccount
return nil
}
// semverCompare returns whether the client's version is older, equal or newer than the given image's version.
func semverCompare(tillerImage, clientImage string) int {
tillerVersion, err := string2semver(tillerImage)
if err != nil {
// same thing with unparsable tiller versions (e.g. canary releases).
return 1
}
clientVersion, err := semver.NewVersion(version.Version)
// clientVersion, err := semver.NewVersion(currentVersion)
clientVersion, err := string2semver(clientImage)
if err != nil {
// aaaaaand same thing with unparsable helm versions (e.g. canary releases).
return 1
}
return clientVersion.Compare(tillerVersion)
}
func string2semver(image string) (*semver.Version, error) {
split := strings.Split(image, ":")
if len(split) < 2 {
// If we don't know the version, we consider the client version newer.
return nil, fmt.Errorf("no repository in image %s", image)
}
return semver.NewVersion(split[1])
}
// createDeployment creates the Tiller Deployment resource.
func createDeployment(client extensionsclient.DeploymentsGetter, opts *Options) error {
func createDeployment(client appsv1client.DeploymentsGetter, opts *Options) error {
obj, err := generateDeployment(opts)
if err != nil {
return err
@ -118,7 +174,7 @@ func createDeployment(client extensionsclient.DeploymentsGetter, opts *Options)
// Deployment gets a deployment object that can be used to generate a manifest
// as a string. This object should not be submitted directly to the Kubernetes
// api
func Deployment(opts *Options) (*v1beta1.Deployment, error) {
func Deployment(opts *Options) (*appsv1.Deployment, error) {
dep, err := generateDeployment(opts)
if err != nil {
return nil, err
@ -197,7 +253,7 @@ func parseNodeSelectorsInto(labels string, m map[string]string) error {
}
return nil
}
func generateDeployment(opts *Options) (*v1beta1.Deployment, error) {
func generateDeployment(opts *Options) (*appsv1.Deployment, error) {
labels := generateLabels(map[string]string{"name": "tiller"})
nodeSelectors := map[string]string{}
if len(opts.NodeSelectors) > 0 {
@ -206,14 +262,17 @@ func generateDeployment(opts *Options) (*v1beta1.Deployment, error) {
return nil, err
}
}
d := &v1beta1.Deployment{
d := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Namespace: opts.Namespace,
Name: deploymentName,
Labels: labels,
},
Spec: v1beta1.DeploymentSpec{
Spec: appsv1.DeploymentSpec{
Replicas: opts.getReplicas(),
Selector: &metav1.LabelSelector{
MatchLabels: labels,
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
@ -297,7 +356,7 @@ func generateDeployment(opts *Options) (*v1beta1.Deployment, error) {
// merge them and convert back to Deployment
if len(opts.Values) > 0 {
// base deployment struct
var dd v1beta1.Deployment
var dd appsv1.Deployment
// get YAML from original deployment
dy, err := yaml.Marshal(d)
if err != nil {

@ -24,8 +24,8 @@ import (
"testing"
"github.com/ghodss/yaml"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
@ -192,7 +192,7 @@ func TestInstall(t *testing.T) {
fc := &fake.Clientset{}
fc.AddReactor("create", "deployments", func(action testcore.Action) (bool, runtime.Object, error) {
obj := action.(testcore.CreateAction).GetObject().(*v1beta1.Deployment)
obj := action.(testcore.CreateAction).GetObject().(*appsv1.Deployment)
l := obj.GetLabels()
if reflect.DeepEqual(l, map[string]string{"app": "helm"}) {
t.Errorf("expected labels = '', got '%s'", l)
@ -239,7 +239,7 @@ func TestInstallHA(t *testing.T) {
fc := &fake.Clientset{}
fc.AddReactor("create", "deployments", func(action testcore.Action) (bool, runtime.Object, error) {
obj := action.(testcore.CreateAction).GetObject().(*v1beta1.Deployment)
obj := action.(testcore.CreateAction).GetObject().(*appsv1.Deployment)
replicas := obj.Spec.Replicas
if int(*replicas) != 2 {
t.Errorf("expected replicas = 2, got '%d'", replicas)
@ -263,7 +263,7 @@ func TestInstall_WithTLS(t *testing.T) {
fc := &fake.Clientset{}
fc.AddReactor("create", "deployments", func(action testcore.Action) (bool, runtime.Object, error) {
obj := action.(testcore.CreateAction).GetObject().(*v1beta1.Deployment)
obj := action.(testcore.CreateAction).GetObject().(*appsv1.Deployment)
l := obj.GetLabels()
if reflect.DeepEqual(l, map[string]string{"app": "helm"}) {
t.Errorf("expected labels = '', got '%s'", l)
@ -331,7 +331,7 @@ func TestInstall_WithTLS(t *testing.T) {
func TestInstall_canary(t *testing.T) {
fc := &fake.Clientset{}
fc.AddReactor("create", "deployments", func(action testcore.Action) (bool, runtime.Object, error) {
obj := action.(testcore.CreateAction).GetObject().(*v1beta1.Deployment)
obj := action.(testcore.CreateAction).GetObject().(*appsv1.Deployment)
i := obj.Spec.Template.Spec.Containers[0].Image
if i != "gcr.io/kubernetes-helm/tiller:canary" {
t.Errorf("expected canary image, got '%s'", i)
@ -369,7 +369,7 @@ func TestUpgrade(t *testing.T) {
return true, existingDeployment, nil
})
fc.AddReactor("update", "deployments", func(action testcore.Action) (bool, runtime.Object, error) {
obj := action.(testcore.UpdateAction).GetObject().(*v1beta1.Deployment)
obj := action.(testcore.UpdateAction).GetObject().(*appsv1.Deployment)
i := obj.Spec.Template.Spec.Containers[0].Image
if i != image {
t.Errorf("expected image = '%s', got '%s'", image, i)
@ -408,7 +408,7 @@ func TestUpgrade_serviceNotFound(t *testing.T) {
return true, existingDeployment, nil
})
fc.AddReactor("update", "deployments", func(action testcore.Action) (bool, runtime.Object, error) {
obj := action.(testcore.UpdateAction).GetObject().(*v1beta1.Deployment)
obj := action.(testcore.UpdateAction).GetObject().(*appsv1.Deployment)
i := obj.Spec.Template.Spec.Containers[0].Image
if i != image {
t.Errorf("expected image = '%s', got '%s'", image, i)
@ -453,7 +453,7 @@ func TestUgrade_newerVersion(t *testing.T) {
return true, existingDeployment, nil
})
fc.AddReactor("update", "deployments", func(action testcore.Action) (bool, runtime.Object, error) {
obj := action.(testcore.UpdateAction).GetObject().(*v1beta1.Deployment)
obj := action.(testcore.UpdateAction).GetObject().(*appsv1.Deployment)
i := obj.Spec.Template.Spec.Containers[0].Image
if i != image {
t.Errorf("expected image = '%s', got '%s'", image, i)
@ -513,7 +513,7 @@ func TestUpgrade_identical(t *testing.T) {
return true, existingDeployment, nil
})
fc.AddReactor("update", "deployments", func(action testcore.Action) (bool, runtime.Object, error) {
obj := action.(testcore.UpdateAction).GetObject().(*v1beta1.Deployment)
obj := action.(testcore.UpdateAction).GetObject().(*appsv1.Deployment)
i := obj.Spec.Template.Spec.Containers[0].Image
if i != image {
t.Errorf("expected image = '%s', got '%s'", image, i)
@ -554,7 +554,7 @@ func TestUpgrade_canaryClient(t *testing.T) {
return true, existingDeployment, nil
})
fc.AddReactor("update", "deployments", func(action testcore.Action) (bool, runtime.Object, error) {
obj := action.(testcore.UpdateAction).GetObject().(*v1beta1.Deployment)
obj := action.(testcore.UpdateAction).GetObject().(*appsv1.Deployment)
i := obj.Spec.Template.Spec.Containers[0].Image
if i != image {
t.Errorf("expected image = '%s', got '%s'", image, i)
@ -595,7 +595,7 @@ func TestUpgrade_canaryServer(t *testing.T) {
return true, existingDeployment, nil
})
fc.AddReactor("update", "deployments", func(action testcore.Action) (bool, runtime.Object, error) {
obj := action.(testcore.UpdateAction).GetObject().(*v1beta1.Deployment)
obj := action.(testcore.UpdateAction).GetObject().(*appsv1.Deployment)
i := obj.Spec.Template.Spec.Containers[0].Image
if i != image {
t.Errorf("expected image = '%s', got '%s'", image, i)

Loading…
Cancel
Save