feat(*): allow tiller to be installed in an alternate namespace

Adds `--tiller-namespace` flag and `TILLER_NAMESPACE` envvar
to use tiller in an alternate namespace.

closes #1418
pull/1691/head
Adam Reese 8 years ago
parent 7f4ea91028
commit 84928e2751

@ -23,6 +23,7 @@ import (
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
"github.com/spf13/cobra"
@ -40,11 +41,13 @@ const (
localRepoIndexFilePath = "index.yaml"
homeEnvVar = "HELM_HOME"
hostEnvVar = "HELM_HOST"
tillerNamespaceEnvVar = "TILLER_NAMESPACE"
)
var (
helmHome string
tillerHost string
tillerNamespace string
kubeContext string
)
@ -70,7 +73,8 @@ Common actions from this point include:
Environment:
$HELM_HOME set an alternative location for Helm files. By default, these are stored in ~/.helm
$HELM_HOST set an alternative Tiller host. The format is host:port
$KUBECONFIG set an alternate Kubernetes configuration file (default "~/.kube/config")
$TILLER_NAMESPACE set an alternative Tiller namespace (default "kube-namespace")
$KUBECONFIG set an alternative Kubernetes configuration file (default "~/.kube/config")
`
func newRootCmd(out io.Writer) *cobra.Command {
@ -83,16 +87,12 @@ func newRootCmd(out io.Writer) *cobra.Command {
teardown()
},
}
home := os.Getenv(homeEnvVar)
if home == "" {
home = "$HOME/.helm"
}
thost := os.Getenv(hostEnvVar)
p := cmd.PersistentFlags()
p.StringVar(&helmHome, "home", home, "location of your Helm config. Overrides $HELM_HOME")
p.StringVar(&tillerHost, "host", thost, "address of tiller. Overrides $HELM_HOST")
p.StringVar(&helmHome, "home", defaultHelmHome(), "location of your Helm config. Overrides $HELM_HOME")
p.StringVar(&tillerHost, "host", defaultHelmHost(), "address of tiller. Overrides $HELM_HOST")
p.StringVar(&kubeContext, "kube-context", "", "name of the kubeconfig context to use")
p.BoolVar(&flagDebug, "debug", false, "enable verbose output")
p.StringVar(&tillerNamespace, "tiller-namespace", defaultTillerNamespace(), "namespace of tiller")
// Tell gRPC not to log to console.
grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags))
@ -146,7 +146,7 @@ func main() {
func setupConnection(c *cobra.Command, args []string) error {
if tillerHost == "" {
tunnel, err := newTillerPortForwarder(environment.TillerNamespace, kubeContext)
tunnel, err := newTillerPortForwarder(tillerNamespace, kubeContext)
if err != nil {
return err
}
@ -194,10 +194,28 @@ func prettyError(err error) error {
return errors.New(grpc.ErrorDesc(err))
}
func defaultHelmHome() string {
if home := os.Getenv(homeEnvVar); home != "" {
return home
}
return filepath.Join(os.Getenv("HOME"), ".helm")
}
func homePath() string {
return os.ExpandEnv(helmHome)
}
func defaultHelmHost() string {
return os.Getenv(hostEnvVar)
}
func defaultTillerNamespace() string {
if ns := os.Getenv(tillerNamespaceEnvVar); ns != "" {
return ns
}
return environment.DefaultTillerNamespace
}
// getKubeClient is a convenience method for creating kubernetes config and client
// for a given kubeconfig context
func getKubeClient(context string) (*restclient.Config, *internalclientset.Clientset, error) {

@ -29,7 +29,6 @@ import (
"k8s.io/helm/cmd/helm/helmpath"
"k8s.io/helm/cmd/helm/installer"
"k8s.io/helm/pkg/repo"
"k8s.io/helm/pkg/tiller/environment"
)
const initDesc = `
@ -76,7 +75,6 @@ type initCmd struct {
func newInitCmd(out io.Writer) *cobra.Command {
i := &initCmd{
out: out,
namespace: environment.TillerNamespace,
}
cmd := &cobra.Command{
@ -87,6 +85,7 @@ func newInitCmd(out io.Writer) *cobra.Command {
if len(args) != 0 {
return errors.New("This command does not accept arguments")
}
i.namespace = tillerNamespace
i.home = helmpath.Home(homePath())
return i.run()
},

@ -88,7 +88,12 @@ func generateDeployment(namespace, image string) *extensions.Deployment {
Name: "tiller",
Image: image,
ImagePullPolicy: "IfNotPresent",
Ports: []api.ContainerPort{{ContainerPort: 44134, Name: "tiller"}},
Ports: []api.ContainerPort{
{ContainerPort: 44134, Name: "tiller"},
},
Env: []api.EnvVar{
{Name: "TILLER_NAMESPACE", Value: namespace},
},
LivenessProbe: &api.Probe{
Handler: api.Handler{
HTTPGet: &api.HTTPGetAction{

@ -57,6 +57,10 @@ func TestDeploymentManifest(t *testing.T) {
if got := dep.Spec.Template.Spec.Containers[0].Image; got != tt.expect {
t.Errorf("%s: expected image %q, got %q", tt.name, tt.expect, got)
}
if got := dep.Spec.Template.Spec.Containers[0].Env[0].Value; got != api.NamespaceDefault {
t.Errorf("%s: expected namespace %q, got %q", tt.name, api.NamespaceDefault, got)
}
}
}

@ -18,10 +18,12 @@ package main // import "k8s.io/helm/cmd/tiller"
import (
"fmt"
"io/ioutil"
"log"
"net"
"net/http"
"os"
"strings"
"github.com/spf13/cobra"
@ -70,9 +72,11 @@ var rootCommand = &cobra.Command{
Run: start,
}
func main() {
func init() {
log.SetFlags(log.Flags() | log.Lshortfile)
}
func main() {
p := rootCommand.PersistentFlags()
p.StringVarP(&grpcAddr, "listen", "l", ":44134", "address:port to listen on")
p.StringVar(&store, "storage", storageConfigMap, "storage driver to use. One of 'configmap' or 'memory'")
@ -90,7 +94,7 @@ func start(c *cobra.Command, args []string) {
case storageMemory:
env.Releases = storage.Init(driver.NewMemory())
case storageConfigMap:
env.Releases = storage.Init(driver.NewConfigMaps(clientset.Core().ConfigMaps(environment.TillerNamespace)))
env.Releases = storage.Init(driver.NewConfigMaps(clientset.Core().ConfigMaps(namespace())))
}
lstn, err := net.Listen("tcp", grpcAddr)
@ -132,3 +136,19 @@ func start(c *cobra.Command, args []string) {
fmt.Fprintf(os.Stderr, "Probes server died: %s\n", err)
}
}
// namespace returns the namespace of tiller
func namespace() string {
if ns := os.Getenv("POD_NAMESPACE"); ns != "" {
return ns
}
// Fall back to the namespace associated with the service account token, if available
if data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil {
if ns := strings.TrimSpace(string(data)); len(ns) > 0 {
return ns
}
}
return environment.DefaultTillerNamespace
}

@ -110,12 +110,16 @@ You can explicitly tell `helm init` to...
- Install the canary build with the `--canary-image` flag
- Install a particular image (version) with `--tiller-image`
- Install to a particular cluster with `--kube-context`
- Install into a particular namespace with `--tiller-namespace`
Once Tiller is installed, running `helm version` should show you both
the client and server version. (If it shows only the client version,
`helm` cannot yet connect to the server. Use `kubectl` to see if any
`tiller` pods are running.)
If Helm will look for Tiller in the `kube-system` namespace unless
`--tiller-namespace` or `TILLER_NAMESPACE` is set.
### Installing Tiller Canary Builds
Canary images are built from the `master` branch. They may not be

@ -33,8 +33,8 @@ import (
"k8s.io/helm/pkg/storage/driver"
)
// TillerNamespace is the namespace tiller is running in.
const TillerNamespace = "kube-system"
// DefaultTillerNamespace is the default namespace for tiller.
const DefaultTillerNamespace = "kube-system"
// GoTplEngine is the name of the Go template engine, as registered in the EngineYard.
const GoTplEngine = "gotpl"

Loading…
Cancel
Save