From b2ed3407432dc5478c6545c70473842f9e87f07d Mon Sep 17 00:00:00 2001 From: Aishwarya Thangappa Date: Sat, 1 Jul 2017 21:31:10 -0700 Subject: [PATCH] Added resource limit/request flags to helm init This PR adds the following 4 additional flags to be used with helm init command so that appropriate CPU/Memory limits can be set for tiller container. --tiller-cpu-limit --tiller-cpu-request --tiller-memory-limit --tiller-memory-request To maintain backwards compatibility, when none of these flags are set, no limitrange will be added. When tiller-cpu-limit is specified but not the tiller-cpu-request, then tiller-cpu-request is set to same value as the tiller-cpu-limit. But not vice-versa. Same applies to tiller-memory-limits and tiller-memory-request. Fixes: https://github.com/kubernetes/helm/issues/2135 --- cmd/helm/init.go | 36 +++++++++++++++- cmd/helm/init_test.go | 29 +++++++++++++ cmd/helm/installer/install.go | 31 +++++++++++++- cmd/helm/installer/install_test.go | 69 ++++++++++++++++++++++++++++++ cmd/helm/installer/options.go | 13 ++++++ 5 files changed, 175 insertions(+), 3 deletions(-) diff --git a/cmd/helm/init.go b/cmd/helm/init.go index af7fc5b20..51b02127f 100644 --- a/cmd/helm/init.go +++ b/cmd/helm/init.go @@ -26,6 +26,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/kubernetes" + "k8s.io/apimachinery/pkg/api/resource" "k8s.io/helm/cmd/helm/installer" "k8s.io/helm/pkg/getter" "k8s.io/helm/pkg/helm/helmpath" @@ -79,6 +80,10 @@ type initCmd struct { opts installer.Options kubeClient kubernetes.Interface serviceAccount string + cpuLimit string + cpuRequest string + memoryLimit string + memoryRequest string } func newInitCmd(out io.Writer) *cobra.Command { @@ -120,6 +125,10 @@ func newInitCmd(out io.Writer) *cobra.Command { f.BoolVar(&i.opts.EnableHostNetwork, "net-host", false, "install Tiller with net=host") f.StringVar(&i.serviceAccount, "service-account", "", "name of service account") + f.StringVar(&i.cpuRequest, "tiller-cpu-request", "", "override Tiller CPU request") + f.StringVar(&i.cpuLimit, "tiller-cpu-limit", "", "override Tiller CPU limit") + f.StringVar(&i.memoryRequest, "tiller-memory-request", "", "override Tiller memory request") + f.StringVar(&i.memoryLimit, "tiller-memory-limit", "", "override Tiller memory limit") return cmd } @@ -149,16 +158,39 @@ func (i *initCmd) tlsOptions() error { return nil } -// run initializes local config and installs Tiller to Kubernetes cluster. +func (i *initCmd) generateLimits() error { + var err error + for _, opt := range []struct { + spec string + quantity *resource.Quantity + }{ + {i.cpuLimit, &i.opts.CPULimit}, + {i.cpuRequest, &i.opts.CPURequest}, + {i.memoryRequest, &i.opts.MemoryRequest}, + {i.memoryLimit, &i.opts.MemoryLimit}, + } { + if opt.spec != "" { + *opt.quantity, err = resource.ParseQuantity(opt.spec) + if err != nil { + return err + } + } + } + return nil +} + +// run initializes local config and installs tiller to Kubernetes Cluster. func (i *initCmd) run() error { if err := i.tlsOptions(); err != nil { return err } + if err := i.generateLimits(); err != nil { + return err + } i.opts.Namespace = i.namespace i.opts.UseCanary = i.canary i.opts.ImageSpec = i.image i.opts.ServiceAccount = i.serviceAccount - if settings.Debug { writeYAMLManifest := func(apiVersion, kind, body string, first, last bool) error { w := i.out diff --git a/cmd/helm/init_test.go b/cmd/helm/init_test.go index 55b62d284..b715e85ee 100644 --- a/cmd/helm/init_test.go +++ b/cmd/helm/init_test.go @@ -302,3 +302,32 @@ func TestInitCmd_tlsOptions(t *testing.T) { } } } + +func TestInitCmd_resourceLimits(t *testing.T) { + home, err := ioutil.TempDir("", "helm_home") + if err != nil { + t.Fatal(err) + } + defer os.Remove(home) + var buf bytes.Buffer + fc := fake.NewSimpleClientset() + + cmd := &initCmd{ + out: &buf, + home: helmpath.Home(home), + kubeClient: fc, + clientOnly: true, + dryRun: true, + namespace: v1.NamespaceDefault, + cpuLimit: "2", + cpuRequest: "100m", + memoryLimit: "1Gi", + memoryRequest: "500Mi", + } + if err := cmd.run(); err != nil { + t.Fatal(err) + } + if len(fc.Actions()) != 0 { + t.Error("expected client call") + } +} diff --git a/cmd/helm/installer/install.go b/cmd/helm/installer/install.go index 7e8707fc8..b70938aec 100644 --- a/cmd/helm/installer/install.go +++ b/cmd/helm/installer/install.go @@ -21,6 +21,7 @@ import ( "github.com/ghodss/yaml" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/kubernetes" @@ -116,8 +117,36 @@ func generateLabels(labels map[string]string) map[string]string { return labels } +func generateResourceRequirements(opts *Options) v1.ResourceRequirements { + limits := make(map[v1.ResourceName]resource.Quantity, 2) + requests := make(map[v1.ResourceName]resource.Quantity, 2) + resources := v1.ResourceRequirements{} + if opts.CPURequest != (resource.Quantity{}) { + requests[v1.ResourceCPU] = opts.CPURequest + } + if opts.CPULimit != (resource.Quantity{}) { + limits[v1.ResourceCPU] = opts.CPULimit + if opts.CPURequest == (resource.Quantity{}) { + requests[v1.ResourceCPU] = opts.CPULimit + } + } + if opts.MemoryRequest != (resource.Quantity{}) { + requests[v1.ResourceMemory] = opts.MemoryRequest + } + if opts.MemoryLimit != (resource.Quantity{}) { + limits[v1.ResourceMemory] = opts.MemoryLimit + if opts.MemoryRequest == (resource.Quantity{}) { + requests[v1.ResourceMemory] = opts.MemoryLimit + } + } + resources.Requests = requests + resources.Limits = limits + return resources +} + func generateDeployment(opts *Options) *v1beta1.Deployment { labels := generateLabels(map[string]string{"name": "tiller"}) + resources := generateResourceRequirements(opts) d := &v1beta1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Namespace: opts.Namespace, @@ -162,6 +191,7 @@ func generateDeployment(opts *Options) *v1beta1.Deployment { InitialDelaySeconds: 1, TimeoutSeconds: 1, }, + Resources: resources, }, }, HostNetwork: opts.EnableHostNetwork, @@ -172,7 +202,6 @@ func generateDeployment(opts *Options) *v1beta1.Deployment { }, }, } - if opts.tls() { const certsDir = "/etc/certs" diff --git a/cmd/helm/installer/install_test.go b/cmd/helm/installer/install_test.go index e1e94d7e5..3dc456e9b 100644 --- a/cmd/helm/installer/install_test.go +++ b/cmd/helm/installer/install_test.go @@ -30,6 +30,8 @@ import ( "k8s.io/client-go/pkg/apis/extensions/v1beta1" testcore "k8s.io/client-go/testing" + "fmt" + "k8s.io/apimachinery/pkg/api/resource" "k8s.io/helm/pkg/version" ) @@ -144,6 +146,73 @@ func TestDeploymentManifest_WithTLS(t *testing.T) { } } +func TestDeploymentManifest_WithResourceLimits(t *testing.T) { + type test struct { + cpuLimit string + cpuRequest string + memLimit string + memRequest string + } + + var tests = []test{ + {cpuLimit: "2", cpuRequest: "100m", memLimit: "1Gi", memRequest: "500Mi"}, + {cpuLimit: "2", memLimit: "1Gi"}, + {cpuRequest: "100m", memRequest: "500Mi"}, + {}, + } + + for _, tt := range tests { + opts := &Options{Namespace: v1.NamespaceDefault} + var err error + if tt.cpuLimit != "" { + opts.CPULimit, err = resource.ParseQuantity(tt.cpuLimit) + if err != nil { + t.Errorf("Error %q", err) + } + } + if tt.cpuRequest != "" { + opts.CPURequest, err = resource.ParseQuantity(tt.cpuRequest) + if err != nil { + t.Errorf("Error %q", err) + } + } + if tt.memLimit != "" { + opts.MemoryLimit, err = resource.ParseQuantity(tt.memLimit) + if err != nil { + t.Errorf("Error %q", err) + } + } + if tt.memRequest != "" { + opts.MemoryRequest, err = resource.ParseQuantity(tt.memRequest) + if err != nil { + t.Errorf("Error %q", err) + } + } + o, err := DeploymentManifest(opts) + if err != nil { + t.Fatalf("error %q", err) + } + var d v1beta1.Deployment + if err := yaml.Unmarshal([]byte(o), &d); err != nil { + t.Fatalf(" error %q", err) + } + fmt.Println(o) + // verify Resources in deployment reflect the use of cpu/memory limits. + if got := d.Spec.Template.Spec.Containers[0].Resources.Limits[v1.ResourceCPU]; got != opts.CPULimit { + t.Errorf("Expected cpu limits %q, got %q", opts.CPULimit, got) + } + if got := d.Spec.Template.Spec.Containers[0].Resources.Limits[v1.ResourceMemory]; got != opts.MemoryLimit { + t.Errorf("Expected memory limits %q, got %q", opts.MemoryLimit, got) + } + if got := d.Spec.Template.Spec.Containers[0].Resources.Requests[v1.ResourceCPU]; got != opts.CPURequest && got != opts.CPULimit { + t.Errorf("Expected cpu requests %q, got %q", opts.CPURequest, got) + } + if got := d.Spec.Template.Spec.Containers[0].Resources.Requests[v1.ResourceMemory]; got != opts.MemoryRequest && got != opts.MemoryLimit { + t.Errorf("Expected memory requests %q, got %q", opts.MemoryRequest, got) + } + } +} + func TestServiceManifest(t *testing.T) { o, err := ServiceManifest(v1.NamespaceDefault) if err != nil { diff --git a/cmd/helm/installer/options.go b/cmd/helm/installer/options.go index ddb7706f8..6d2876f10 100644 --- a/cmd/helm/installer/options.go +++ b/cmd/helm/installer/options.go @@ -19,6 +19,7 @@ package installer // import "k8s.io/helm/cmd/helm/installer" import ( "fmt" + "k8s.io/apimachinery/pkg/api/resource" "k8s.io/client-go/pkg/api/v1" "k8s.io/helm/pkg/version" ) @@ -71,6 +72,18 @@ type Options struct { // EnableHostNetwork installs Tiller with net=host. EnableHostNetwork bool + + // CPURequest is the CPU request used when deploying tiller. + CPURequest resource.Quantity + + // CPULimit is the CPU limit used when deploying tiller. + CPULimit resource.Quantity + + // MemoryRequest is the memory request used when deploying tiller. + MemoryRequest resource.Quantity + + // MemoryLimit is the memory limit used when deploying tiller. + MemoryLimit resource.Quantity } func (opts *Options) selectImage() string {