Added resource limit/request flags to helm init

This PR adds the following 4 additional flags to be used with helm init
command so that appropriate CPU/Memory limits can be set for tiller
container.
--tiller-cpu-limit
--tiller-cpu-request
--tiller-memory-limit
--tiller-memory-request

To maintain backwards compatibility, when none of these flags are set,
no limitrange will be added. When tiller-cpu-limit is specified but
not the tiller-cpu-request, then tiller-cpu-request is set to same
value as the tiller-cpu-limit. But not vice-versa. Same applies to
tiller-memory-limits and tiller-memory-request.

Fixes: https://github.com/kubernetes/helm/issues/2135
reviewable/pr2632/r3
Aishwarya Thangappa 8 years ago
parent 6eaeadf228
commit ffa06a8d8c

@ -26,6 +26,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/helm/cmd/helm/installer" "k8s.io/helm/cmd/helm/installer"
"k8s.io/helm/pkg/getter" "k8s.io/helm/pkg/getter"
"k8s.io/helm/pkg/helm/helmpath" "k8s.io/helm/pkg/helm/helmpath"
@ -79,6 +80,10 @@ type initCmd struct {
opts installer.Options opts installer.Options
kubeClient kubernetes.Interface kubeClient kubernetes.Interface
serviceAccount string serviceAccount string
cpuLimit string
memoryLimit string
cpuRequest string
memoryRequest string
} }
func newInitCmd(out io.Writer) *cobra.Command { func newInitCmd(out io.Writer) *cobra.Command {
@ -120,6 +125,10 @@ func newInitCmd(out io.Writer) *cobra.Command {
f.BoolVar(&i.opts.EnableHostNetwork, "net-host", false, "install Tiller with net=host") f.BoolVar(&i.opts.EnableHostNetwork, "net-host", false, "install Tiller with net=host")
f.StringVar(&i.serviceAccount, "service-account", "", "name of service account") f.StringVar(&i.serviceAccount, "service-account", "", "name of service account")
f.StringVar(&i.cpuLimit, "tiller-cpu-limit", "", "override Tiller CPU limit")
f.StringVar(&i.memoryLimit, "tiller-memory-limit", "", "override Tiller memory limit")
f.StringVar(&i.cpuRequest, "tiller-cpu-request", "", "override Tiller CPU request")
f.StringVar(&i.memoryRequest, "tiller-memory-request", "", "override Tiller memory request")
return cmd return cmd
} }
@ -149,16 +158,39 @@ func (i *initCmd) tlsOptions() error {
return nil return nil
} }
// run initializes local config and installs Tiller to Kubernetes cluster. func (i *initCmd) generateResourceRequirements() error {
var err error
for _, opt := range []struct {
spec string
quantity *resource.Quantity
}{
{i.cpuLimit, &i.opts.CPULimit},
{i.memoryLimit, &i.opts.MemoryLimit},
{i.cpuRequest, &i.opts.CPURequest},
{i.memoryRequest, &i.opts.MemoryRequest},
} {
if opt.spec != "" {
*opt.quantity, err = resource.ParseQuantity(opt.spec)
if err != nil {
return err
}
}
}
return nil
}
// run initializes local config and installs tiller to Kubernetes Cluster.
func (i *initCmd) run() error { func (i *initCmd) run() error {
if err := i.tlsOptions(); err != nil { if err := i.tlsOptions(); err != nil {
return err return err
} }
if err := i.generateResourceRequirements(); err != nil {
return err
}
i.opts.Namespace = i.namespace i.opts.Namespace = i.namespace
i.opts.UseCanary = i.canary i.opts.UseCanary = i.canary
i.opts.ImageSpec = i.image i.opts.ImageSpec = i.image
i.opts.ServiceAccount = i.serviceAccount i.opts.ServiceAccount = i.serviceAccount
if settings.Debug { if settings.Debug {
writeYAMLManifest := func(apiVersion, kind, body string, first, last bool) error { writeYAMLManifest := func(apiVersion, kind, body string, first, last bool) error {
w := i.out w := i.out

@ -302,3 +302,32 @@ func TestInitCmd_tlsOptions(t *testing.T) {
} }
} }
} }
func TestInitCmd_resourceRequirements(t *testing.T) {
home, err := ioutil.TempDir("", "helm_home")
if err != nil {
t.Fatal(err)
}
defer os.Remove(home)
var buf bytes.Buffer
fc := fake.NewSimpleClientset()
cmd := &initCmd{
out: &buf,
home: helmpath.Home(home),
kubeClient: fc,
clientOnly: true,
dryRun: true,
namespace: v1.NamespaceDefault,
cpuLimit: "2",
memoryLimit: "1Gi",
cpuRequest: "100m",
memoryRequest: "500Mi",
}
if err := cmd.run(); err != nil {
t.Fatal(err)
}
if len(fc.Actions()) != 0 {
t.Error("expected client call")
}
}

@ -21,6 +21,7 @@ import (
"github.com/ghodss/yaml" "github.com/ghodss/yaml"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
@ -116,8 +117,30 @@ func generateLabels(labels map[string]string) map[string]string {
return labels return labels
} }
func generateResourceRequirements(opts *Options) v1.ResourceRequirements {
limits := make(map[v1.ResourceName]resource.Quantity, 2)
requests := make(map[v1.ResourceName]resource.Quantity, 2)
resources := v1.ResourceRequirements{}
if !(opts.CPULimit).IsZero() {
limits[v1.ResourceCPU] = opts.CPULimit
}
if !(opts.MemoryLimit).IsZero() {
limits[v1.ResourceMemory] = opts.MemoryLimit
}
if !(opts.CPURequest).IsZero() {
requests[v1.ResourceCPU] = opts.CPURequest
}
if !(opts.MemoryRequest).IsZero() {
requests[v1.ResourceMemory] = opts.MemoryRequest
}
resources.Requests = requests
resources.Limits = limits
return resources
}
func generateDeployment(opts *Options) *v1beta1.Deployment { func generateDeployment(opts *Options) *v1beta1.Deployment {
labels := generateLabels(map[string]string{"name": "tiller"}) labels := generateLabels(map[string]string{"name": "tiller"})
resources := generateResourceRequirements(opts)
d := &v1beta1.Deployment{ d := &v1beta1.Deployment{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Namespace: opts.Namespace, Namespace: opts.Namespace,
@ -162,6 +185,7 @@ func generateDeployment(opts *Options) *v1beta1.Deployment {
InitialDelaySeconds: 1, InitialDelaySeconds: 1,
TimeoutSeconds: 1, TimeoutSeconds: 1,
}, },
Resources: resources,
}, },
}, },
HostNetwork: opts.EnableHostNetwork, HostNetwork: opts.EnableHostNetwork,
@ -172,7 +196,6 @@ func generateDeployment(opts *Options) *v1beta1.Deployment {
}, },
}, },
} }
if opts.tls() { if opts.tls() {
const certsDir = "/etc/certs" const certsDir = "/etc/certs"

@ -30,6 +30,8 @@ import (
"k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/client-go/pkg/apis/extensions/v1beta1"
testcore "k8s.io/client-go/testing" testcore "k8s.io/client-go/testing"
"fmt"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/helm/pkg/version" "k8s.io/helm/pkg/version"
) )
@ -144,6 +146,70 @@ func TestDeploymentManifest_WithTLS(t *testing.T) {
} }
} }
func TestDeploymentManifest_WithResourceLimits(t *testing.T) {
tests := []struct {
cpuLimit string
memoryLimit string
cpuRequest string
memoryRequest string
}{
{cpuLimit: "2", memoryLimit: "1Gi", cpuRequest: "100m", memoryRequest: "500Mi"},
{cpuLimit: "2", memoryLimit: "1Gi"},
{cpuRequest: "100m", memoryRequest: "500Mi"},
{},
}
for _, tt := range tests {
opts := &Options{Namespace: v1.NamespaceDefault}
var err error
if tt.cpuLimit != "" {
opts.CPULimit, err = resource.ParseQuantity(tt.cpuLimit)
if err != nil {
t.Errorf("Error %q", err)
}
}
if tt.memoryLimit != "" {
opts.MemoryLimit, err = resource.ParseQuantity(tt.memoryLimit)
if err != nil {
t.Errorf("Error %q", err)
}
}
if tt.cpuRequest != "" {
opts.CPURequest, err = resource.ParseQuantity(tt.cpuRequest)
if err != nil {
t.Errorf("Error %q", err)
}
}
if tt.memoryRequest != "" {
opts.MemoryRequest, err = resource.ParseQuantity(tt.memoryRequest)
if err != nil {
t.Errorf("Error %q", err)
}
}
o, err := DeploymentManifest(opts)
if err != nil {
t.Fatalf("error %q", err)
}
var d v1beta1.Deployment
if err := yaml.Unmarshal([]byte(o), &d); err != nil {
t.Fatalf(" error %q", err)
}
fmt.Println(o)
// verify Resources in deployment reflect the use of cpu/memory limit and request.
if got := d.Spec.Template.Spec.Containers[0].Resources.Limits[v1.ResourceCPU]; got != opts.CPULimit {
t.Errorf("Expected CPU limit %q, got %q", opts.CPULimit, got)
}
if got := d.Spec.Template.Spec.Containers[0].Resources.Limits[v1.ResourceMemory]; got != opts.MemoryLimit {
t.Errorf("Expected memory limit %q, got %q", opts.MemoryLimit, got)
}
if got := d.Spec.Template.Spec.Containers[0].Resources.Requests[v1.ResourceCPU]; got != opts.CPURequest {
t.Errorf("Expected CPU request %q, got %q", opts.CPURequest, got)
}
if got := d.Spec.Template.Spec.Containers[0].Resources.Requests[v1.ResourceMemory]; got != opts.MemoryRequest {
t.Errorf("Expected memory request %q, got %q", opts.MemoryRequest, got)
}
}
}
func TestServiceManifest(t *testing.T) { func TestServiceManifest(t *testing.T) {
o, err := ServiceManifest(v1.NamespaceDefault) o, err := ServiceManifest(v1.NamespaceDefault)
if err != nil { if err != nil {

@ -19,6 +19,7 @@ package installer // import "k8s.io/helm/cmd/helm/installer"
import ( import (
"fmt" "fmt"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/client-go/pkg/api/v1" "k8s.io/client-go/pkg/api/v1"
"k8s.io/helm/pkg/version" "k8s.io/helm/pkg/version"
) )
@ -71,6 +72,18 @@ type Options struct {
// EnableHostNetwork installs Tiller with net=host. // EnableHostNetwork installs Tiller with net=host.
EnableHostNetwork bool EnableHostNetwork bool
// CPULimit is the CPU limit used when deploying Tiller.
CPULimit resource.Quantity
// MemoryLimit is the memory limit used when deploying Tiller.
MemoryLimit resource.Quantity
// CPURequest is the CPU request used when deploying Tiller.
CPURequest resource.Quantity
// MemoryRequest is the memory request used when deploying Tiller.
MemoryRequest resource.Quantity
} }
func (opts *Options) selectImage() string { func (opts *Options) selectImage() string {

Loading…
Cancel
Save