pull/2632/merge
Aishwarya Thangappa 8 years ago committed by GitHub
commit 92935cba0d

@ -26,6 +26,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/helm/cmd/helm/installer" "k8s.io/helm/cmd/helm/installer"
"k8s.io/helm/pkg/getter" "k8s.io/helm/pkg/getter"
"k8s.io/helm/pkg/helm/helmpath" "k8s.io/helm/pkg/helm/helmpath"
@ -79,6 +80,10 @@ type initCmd struct {
opts installer.Options opts installer.Options
kubeClient kubernetes.Interface kubeClient kubernetes.Interface
serviceAccount string serviceAccount string
cpuLimit string
memoryLimit string
cpuRequest string
memoryRequest string
} }
func newInitCmd(out io.Writer) *cobra.Command { func newInitCmd(out io.Writer) *cobra.Command {
@ -118,6 +123,10 @@ func newInitCmd(out io.Writer) *cobra.Command {
f.BoolVar(&i.opts.EnableHostNetwork, "net-host", false, "install Tiller with net=host") f.BoolVar(&i.opts.EnableHostNetwork, "net-host", false, "install Tiller with net=host")
f.StringVar(&i.serviceAccount, "service-account", "", "name of service account") f.StringVar(&i.serviceAccount, "service-account", "", "name of service account")
f.StringVar(&i.cpuLimit, "tiller-cpu-limit", "", "override Tiller CPU limit")
f.StringVar(&i.memoryLimit, "tiller-memory-limit", "", "override Tiller memory limit")
f.StringVar(&i.cpuRequest, "tiller-cpu-request", "", "override Tiller CPU request")
f.StringVar(&i.memoryRequest, "tiller-memory-request", "", "override Tiller memory request")
return cmd return cmd
} }
@ -147,16 +156,39 @@ func (i *initCmd) tlsOptions() error {
return nil return nil
} }
// run initializes local config and installs Tiller to Kubernetes cluster. func (i *initCmd) generateResourceRequirements() error {
var err error
for _, opt := range []struct {
spec string
quantity *resource.Quantity
}{
{i.cpuLimit, &i.opts.CPULimit},
{i.memoryLimit, &i.opts.MemoryLimit},
{i.cpuRequest, &i.opts.CPURequest},
{i.memoryRequest, &i.opts.MemoryRequest},
} {
if opt.spec != "" {
*opt.quantity, err = resource.ParseQuantity(opt.spec)
if err != nil {
return err
}
}
}
return nil
}
// run initializes local config and installs tiller to Kubernetes Cluster.
func (i *initCmd) run() error { func (i *initCmd) run() error {
if err := i.tlsOptions(); err != nil { if err := i.tlsOptions(); err != nil {
return err return err
} }
if err := i.generateResourceRequirements(); err != nil {
return err
}
i.opts.Namespace = i.namespace i.opts.Namespace = i.namespace
i.opts.UseCanary = i.canary i.opts.UseCanary = i.canary
i.opts.ImageSpec = i.image i.opts.ImageSpec = i.image
i.opts.ServiceAccount = i.serviceAccount i.opts.ServiceAccount = i.serviceAccount
if settings.Debug { if settings.Debug {
writeYAMLManifest := func(apiVersion, kind, body string, first, last bool) error { writeYAMLManifest := func(apiVersion, kind, body string, first, last bool) error {
w := i.out w := i.out

@ -303,3 +303,32 @@ func TestInitCmd_tlsOptions(t *testing.T) {
} }
} }
} }
func TestInitCmd_resourceRequirements(t *testing.T) {
home, err := ioutil.TempDir("", "helm_home")
if err != nil {
t.Fatal(err)
}
defer os.Remove(home)
var buf bytes.Buffer
fc := fake.NewSimpleClientset()
cmd := &initCmd{
out: &buf,
home: helmpath.Home(home),
kubeClient: fc,
clientOnly: true,
dryRun: true,
namespace: v1.NamespaceDefault,
cpuLimit: "2",
memoryLimit: "1Gi",
cpuRequest: "100m",
memoryRequest: "500Mi",
}
if err := cmd.run(); err != nil {
t.Fatal(err)
}
if len(fc.Actions()) != 0 {
t.Error("expected client call")
}
}

@ -21,6 +21,7 @@ import (
"github.com/ghodss/yaml" "github.com/ghodss/yaml"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
@ -116,8 +117,30 @@ func generateLabels(labels map[string]string) map[string]string {
return labels return labels
} }
func generateResourceRequirements(opts *Options) v1.ResourceRequirements {
limits := make(map[v1.ResourceName]resource.Quantity, 2)
requests := make(map[v1.ResourceName]resource.Quantity, 2)
resources := v1.ResourceRequirements{}
if !opts.CPULimit.IsZero() {
limits[v1.ResourceCPU] = opts.CPULimit
}
if !opts.MemoryLimit.IsZero() {
limits[v1.ResourceMemory] = opts.MemoryLimit
}
if !opts.CPURequest.IsZero() {
requests[v1.ResourceCPU] = opts.CPURequest
}
if !opts.MemoryRequest.IsZero() {
requests[v1.ResourceMemory] = opts.MemoryRequest
}
resources.Requests = requests
resources.Limits = limits
return resources
}
func generateDeployment(opts *Options) *v1beta1.Deployment { func generateDeployment(opts *Options) *v1beta1.Deployment {
labels := generateLabels(map[string]string{"name": "tiller"}) labels := generateLabels(map[string]string{"name": "tiller"})
resources := generateResourceRequirements(opts)
d := &v1beta1.Deployment{ d := &v1beta1.Deployment{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Namespace: opts.Namespace, Namespace: opts.Namespace,
@ -162,6 +185,7 @@ func generateDeployment(opts *Options) *v1beta1.Deployment {
InitialDelaySeconds: 1, InitialDelaySeconds: 1,
TimeoutSeconds: 1, TimeoutSeconds: 1,
}, },
Resources: resources,
}, },
}, },
HostNetwork: opts.EnableHostNetwork, HostNetwork: opts.EnableHostNetwork,
@ -172,7 +196,6 @@ func generateDeployment(opts *Options) *v1beta1.Deployment {
}, },
}, },
} }
if opts.tls() { if opts.tls() {
const certsDir = "/etc/certs" const certsDir = "/etc/certs"

@ -30,6 +30,8 @@ import (
"k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/client-go/pkg/apis/extensions/v1beta1"
testcore "k8s.io/client-go/testing" testcore "k8s.io/client-go/testing"
"fmt"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/helm/pkg/version" "k8s.io/helm/pkg/version"
) )
@ -144,6 +146,70 @@ func TestDeploymentManifest_WithTLS(t *testing.T) {
} }
} }
func TestDeploymentManifest_WithResourceLimits(t *testing.T) {
tests := []struct {
cpuLimit string
memoryLimit string
cpuRequest string
memoryRequest string
}{
{cpuLimit: "2", memoryLimit: "1Gi", cpuRequest: "100m", memoryRequest: "500Mi"},
{cpuLimit: "2", memoryLimit: "1Gi"},
{cpuRequest: "100m", memoryRequest: "500Mi"},
{},
}
for _, tt := range tests {
opts := &Options{Namespace: v1.NamespaceDefault}
var err error
if tt.cpuLimit != "" {
opts.CPULimit, err = resource.ParseQuantity(tt.cpuLimit)
if err != nil {
t.Errorf("Error %q", err)
}
}
if tt.memoryLimit != "" {
opts.MemoryLimit, err = resource.ParseQuantity(tt.memoryLimit)
if err != nil {
t.Errorf("Error %q", err)
}
}
if tt.cpuRequest != "" {
opts.CPURequest, err = resource.ParseQuantity(tt.cpuRequest)
if err != nil {
t.Errorf("Error %q", err)
}
}
if tt.memoryRequest != "" {
opts.MemoryRequest, err = resource.ParseQuantity(tt.memoryRequest)
if err != nil {
t.Errorf("Error %q", err)
}
}
o, err := DeploymentManifest(opts)
if err != nil {
t.Fatalf("error %q", err)
}
var d v1beta1.Deployment
if err := yaml.Unmarshal([]byte(o), &d); err != nil {
t.Fatalf(" error %q", err)
}
fmt.Println(o)
// Verify Resources in Deployment reflect the use of CPU/memory limit and request.
if got := d.Spec.Template.Spec.Containers[0].Resources.Limits[v1.ResourceCPU]; got != opts.CPULimit {
t.Errorf("Expected CPU limit %q, got %q", opts.CPULimit, got)
}
if got := d.Spec.Template.Spec.Containers[0].Resources.Limits[v1.ResourceMemory]; got != opts.MemoryLimit {
t.Errorf("Expected memory limit %q, got %q", opts.MemoryLimit, got)
}
if got := d.Spec.Template.Spec.Containers[0].Resources.Requests[v1.ResourceCPU]; got != opts.CPURequest {
t.Errorf("Expected CPU request %q, got %q", opts.CPURequest, got)
}
if got := d.Spec.Template.Spec.Containers[0].Resources.Requests[v1.ResourceMemory]; got != opts.MemoryRequest {
t.Errorf("Expected memory request %q, got %q", opts.MemoryRequest, got)
}
}
}
func TestServiceManifest(t *testing.T) { func TestServiceManifest(t *testing.T) {
o, err := ServiceManifest(v1.NamespaceDefault) o, err := ServiceManifest(v1.NamespaceDefault)
if err != nil { if err != nil {

@ -19,6 +19,7 @@ package installer // import "k8s.io/helm/cmd/helm/installer"
import ( import (
"fmt" "fmt"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/client-go/pkg/api/v1" "k8s.io/client-go/pkg/api/v1"
"k8s.io/helm/pkg/version" "k8s.io/helm/pkg/version"
) )
@ -71,6 +72,18 @@ type Options struct {
// EnableHostNetwork installs Tiller with net=host. // EnableHostNetwork installs Tiller with net=host.
EnableHostNetwork bool EnableHostNetwork bool
// CPULimit is the CPU limit used when deploying Tiller.
CPULimit resource.Quantity
// MemoryLimit is the memory limit used when deploying Tiller.
MemoryLimit resource.Quantity
// CPURequest is the CPU request used when deploying Tiller.
CPURequest resource.Quantity
// MemoryRequest is the memory request used when deploying Tiller.
MemoryRequest resource.Quantity
} }
func (opts *Options) selectImage() string { func (opts *Options) selectImage() string {

@ -33,21 +33,25 @@ helm init
### Options ### Options
``` ```
--canary-image use the canary Tiller image --canary-image use the canary Tiller image
-c, --client-only if set does not install Tiller -c, --client-only if set does not install Tiller
--dry-run do not install local or remote --dry-run do not install local or remote
--local-repo-url string URL for local repository (default "http://127.0.0.1:8879/charts") --local-repo-url string URL for local repository (default "http://127.0.0.1:8879/charts")
--net-host install Tiller with net=host --net-host install Tiller with net=host
--service-account string name of service account --service-account string name of service account
--skip-refresh do not refresh (download) the local repository cache --skip-refresh do not refresh (download) the local repository cache
--stable-repo-url string URL for stable repository (default "https://kubernetes-charts.storage.googleapis.com") --stable-repo-url string URL for stable repository (default "https://kubernetes-charts.storage.googleapis.com")
-i, --tiller-image string override Tiller image --tiller-cpu-limit string override Tiller CPU limit
--tiller-tls install Tiller with TLS enabled --tiller-cpu-request string override Tiller CPU request
--tiller-tls-cert string path to TLS certificate file to install with Tiller -i, --tiller-image string override Tiller image
--tiller-tls-key string path to TLS key file to install with Tiller --tiller-memory-limit string override Tiller memory limit
--tiller-tls-verify install Tiller with TLS enabled and to verify remote certificates --tiller-memory-request string override Tiller memory request
--tls-ca-cert string path to CA root certificate --tiller-tls install Tiller with TLS enabled
--upgrade upgrade if Tiller is already installed --tiller-tls-cert string path to TLS certificate file to install with Tiller
--tiller-tls-key string path to TLS key file to install with Tiller
--tiller-tls-verify install Tiller with TLS enabled and to verify remote certificates
--tls-ca-cert string path to CA root certificate
--upgrade upgrade if Tiller is already installed
``` ```
### Options inherited from parent commands ### Options inherited from parent commands
@ -63,4 +67,4 @@ helm init
### SEE ALSO ### SEE ALSO
* [helm](helm.md) - The Helm package manager for Kubernetes. * [helm](helm.md) - The Helm package manager for Kubernetes.
###### Auto generated by spf13/cobra on 26-Jun-2017 ###### Auto generated by spf13/cobra on 12-Jul-2017

Loading…
Cancel
Save