From 0377ca29e4320fc29a01de16002fab1b9b20c918 Mon Sep 17 00:00:00 2001 From: Aishwarya Thangappa Date: Sat, 1 Jul 2017 21:31:10 -0700 Subject: [PATCH] Added resource limit/request flags to helm init This PR adds the following 4 additional flags to be used with helm init command so that appropriate CPU/Memory limits can be set for tiller container. --tiller-cpu-limit --tiller-cpu-request --tiller-memory-limit --tiller-memory-request To maintain backwards compatibility, when none of these flags are set, no limitrange will be added. When tiller-cpu-limit is specified but not the tiller-cpu-request, then tiller-cpu-request is set to same value as the tiller-cpu-limit. But not vice-versa. Same applies to tiller-memory-limits and tiller-memory-request. Fixes: https://github.com/kubernetes/helm/issues/2135 --- cmd/helm/init.go | 36 +++++++++++++++- cmd/helm/init_test.go | 29 +++++++++++++ cmd/helm/installer/install.go | 25 ++++++++++- cmd/helm/installer/install_test.go | 66 ++++++++++++++++++++++++++++++ cmd/helm/installer/options.go | 13 ++++++ 5 files changed, 166 insertions(+), 3 deletions(-) diff --git a/cmd/helm/init.go b/cmd/helm/init.go index 868a5bf0d..993f76bba 100644 --- a/cmd/helm/init.go +++ b/cmd/helm/init.go @@ -26,6 +26,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/kubernetes" + "k8s.io/apimachinery/pkg/api/resource" "k8s.io/helm/cmd/helm/installer" "k8s.io/helm/pkg/getter" "k8s.io/helm/pkg/helm/helmpath" @@ -79,6 +80,10 @@ type initCmd struct { opts installer.Options kubeClient kubernetes.Interface serviceAccount string + cpuLimit string + memoryLimit string + cpuRequest string + memoryRequest string } func newInitCmd(out io.Writer) *cobra.Command { @@ -118,6 +123,10 @@ func newInitCmd(out io.Writer) *cobra.Command { f.BoolVar(&i.opts.EnableHostNetwork, "net-host", false, "install Tiller with net=host") f.StringVar(&i.serviceAccount, "service-account", "", "name of service account") + f.StringVar(&i.cpuLimit, "tiller-cpu-limit", "", "override Tiller CPU limit") + f.StringVar(&i.memoryLimit, "tiller-memory-limit", "", "override Tiller memory limit") + f.StringVar(&i.cpuRequest, "tiller-cpu-request", "", "override Tiller CPU request") + f.StringVar(&i.memoryRequest, "tiller-memory-request", "", "override Tiller memory request") return cmd } @@ -147,16 +156,39 @@ func (i *initCmd) tlsOptions() error { return nil } -// run initializes local config and installs Tiller to Kubernetes cluster. +func (i *initCmd) generateResourceRequirements() error { + var err error + for _, opt := range []struct { + spec string + quantity *resource.Quantity + }{ + {i.cpuLimit, &i.opts.CPULimit}, + {i.memoryLimit, &i.opts.MemoryLimit}, + {i.cpuRequest, &i.opts.CPURequest}, + {i.memoryRequest, &i.opts.MemoryRequest}, + } { + if opt.spec != "" { + *opt.quantity, err = resource.ParseQuantity(opt.spec) + if err != nil { + return err + } + } + } + return nil +} + +// run initializes local config and installs tiller to Kubernetes Cluster. func (i *initCmd) run() error { if err := i.tlsOptions(); err != nil { return err } + if err := i.generateResourceRequirements(); err != nil { + return err + } i.opts.Namespace = i.namespace i.opts.UseCanary = i.canary i.opts.ImageSpec = i.image i.opts.ServiceAccount = i.serviceAccount - if settings.Debug { writeYAMLManifest := func(apiVersion, kind, body string, first, last bool) error { w := i.out diff --git a/cmd/helm/init_test.go b/cmd/helm/init_test.go index 55b62d284..a4a4bebf0 100644 --- a/cmd/helm/init_test.go +++ b/cmd/helm/init_test.go @@ -302,3 +302,32 @@ func TestInitCmd_tlsOptions(t *testing.T) { } } } + +func TestInitCmd_resourceRequirements(t *testing.T) { + home, err := ioutil.TempDir("", "helm_home") + if err != nil { + t.Fatal(err) + } + defer os.Remove(home) + var buf bytes.Buffer + fc := fake.NewSimpleClientset() + + cmd := &initCmd{ + out: &buf, + home: helmpath.Home(home), + kubeClient: fc, + clientOnly: true, + dryRun: true, + namespace: v1.NamespaceDefault, + cpuLimit: "2", + memoryLimit: "1Gi", + cpuRequest: "100m", + memoryRequest: "500Mi", + } + if err := cmd.run(); err != nil { + t.Fatal(err) + } + if len(fc.Actions()) != 0 { + t.Error("expected client call") + } +} diff --git a/cmd/helm/installer/install.go b/cmd/helm/installer/install.go index 7e8707fc8..72d2bafce 100644 --- a/cmd/helm/installer/install.go +++ b/cmd/helm/installer/install.go @@ -21,6 +21,7 @@ import ( "github.com/ghodss/yaml" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/kubernetes" @@ -116,8 +117,30 @@ func generateLabels(labels map[string]string) map[string]string { return labels } +func generateResourceRequirements(opts *Options) v1.ResourceRequirements { + limits := make(map[v1.ResourceName]resource.Quantity, 2) + requests := make(map[v1.ResourceName]resource.Quantity, 2) + resources := v1.ResourceRequirements{} + if !opts.CPULimit.IsZero() { + limits[v1.ResourceCPU] = opts.CPULimit + } + if !opts.MemoryLimit.IsZero() { + limits[v1.ResourceMemory] = opts.MemoryLimit + } + if !opts.CPURequest.IsZero() { + requests[v1.ResourceCPU] = opts.CPURequest + } + if !opts.MemoryRequest.IsZero() { + requests[v1.ResourceMemory] = opts.MemoryRequest + } + resources.Requests = requests + resources.Limits = limits + return resources +} + func generateDeployment(opts *Options) *v1beta1.Deployment { labels := generateLabels(map[string]string{"name": "tiller"}) + resources := generateResourceRequirements(opts) d := &v1beta1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Namespace: opts.Namespace, @@ -162,6 +185,7 @@ func generateDeployment(opts *Options) *v1beta1.Deployment { InitialDelaySeconds: 1, TimeoutSeconds: 1, }, + Resources: resources, }, }, HostNetwork: opts.EnableHostNetwork, @@ -172,7 +196,6 @@ func generateDeployment(opts *Options) *v1beta1.Deployment { }, }, } - if opts.tls() { const certsDir = "/etc/certs" diff --git a/cmd/helm/installer/install_test.go b/cmd/helm/installer/install_test.go index e1e94d7e5..a3eab2045 100644 --- a/cmd/helm/installer/install_test.go +++ b/cmd/helm/installer/install_test.go @@ -30,6 +30,8 @@ import ( "k8s.io/client-go/pkg/apis/extensions/v1beta1" testcore "k8s.io/client-go/testing" + "fmt" + "k8s.io/apimachinery/pkg/api/resource" "k8s.io/helm/pkg/version" ) @@ -144,6 +146,70 @@ func TestDeploymentManifest_WithTLS(t *testing.T) { } } +func TestDeploymentManifest_WithResourceLimits(t *testing.T) { + tests := []struct { + cpuLimit string + memoryLimit string + cpuRequest string + memoryRequest string + }{ + {cpuLimit: "2", memoryLimit: "1Gi", cpuRequest: "100m", memoryRequest: "500Mi"}, + {cpuLimit: "2", memoryLimit: "1Gi"}, + {cpuRequest: "100m", memoryRequest: "500Mi"}, + {}, + } + for _, tt := range tests { + opts := &Options{Namespace: v1.NamespaceDefault} + var err error + if tt.cpuLimit != "" { + opts.CPULimit, err = resource.ParseQuantity(tt.cpuLimit) + if err != nil { + t.Errorf("Error %q", err) + } + } + if tt.memoryLimit != "" { + opts.MemoryLimit, err = resource.ParseQuantity(tt.memoryLimit) + if err != nil { + t.Errorf("Error %q", err) + } + } + if tt.cpuRequest != "" { + opts.CPURequest, err = resource.ParseQuantity(tt.cpuRequest) + if err != nil { + t.Errorf("Error %q", err) + } + } + if tt.memoryRequest != "" { + opts.MemoryRequest, err = resource.ParseQuantity(tt.memoryRequest) + if err != nil { + t.Errorf("Error %q", err) + } + } + o, err := DeploymentManifest(opts) + if err != nil { + t.Fatalf("error %q", err) + } + var d v1beta1.Deployment + if err := yaml.Unmarshal([]byte(o), &d); err != nil { + t.Fatalf(" error %q", err) + } + fmt.Println(o) + // Verify Resources in Deployment reflect the use of CPU/memory limit and request. + if got := d.Spec.Template.Spec.Containers[0].Resources.Limits[v1.ResourceCPU]; got != opts.CPULimit { + t.Errorf("Expected CPU limit %q, got %q", opts.CPULimit, got) + } + if got := d.Spec.Template.Spec.Containers[0].Resources.Limits[v1.ResourceMemory]; got != opts.MemoryLimit { + t.Errorf("Expected memory limit %q, got %q", opts.MemoryLimit, got) + } + if got := d.Spec.Template.Spec.Containers[0].Resources.Requests[v1.ResourceCPU]; got != opts.CPURequest { + t.Errorf("Expected CPU request %q, got %q", opts.CPURequest, got) + } + if got := d.Spec.Template.Spec.Containers[0].Resources.Requests[v1.ResourceMemory]; got != opts.MemoryRequest { + t.Errorf("Expected memory request %q, got %q", opts.MemoryRequest, got) + } + } +} + func TestServiceManifest(t *testing.T) { o, err := ServiceManifest(v1.NamespaceDefault) if err != nil { diff --git a/cmd/helm/installer/options.go b/cmd/helm/installer/options.go index ddb7706f8..0fc3696b8 100644 --- a/cmd/helm/installer/options.go +++ b/cmd/helm/installer/options.go @@ -19,6 +19,7 @@ package installer // import "k8s.io/helm/cmd/helm/installer" import ( "fmt" + "k8s.io/apimachinery/pkg/api/resource" "k8s.io/client-go/pkg/api/v1" "k8s.io/helm/pkg/version" ) @@ -71,6 +72,18 @@ type Options struct { // EnableHostNetwork installs Tiller with net=host. EnableHostNetwork bool + + // CPULimit is the CPU limit used when deploying Tiller. + CPULimit resource.Quantity + + // MemoryLimit is the memory limit used when deploying Tiller. + MemoryLimit resource.Quantity + + // CPURequest is the CPU request used when deploying Tiller. + CPURequest resource.Quantity + + // MemoryRequest is the memory request used when deploying Tiller. + MemoryRequest resource.Quantity } func (opts *Options) selectImage() string {