|
|
|
/*
|
|
|
|
Copyright The Helm Authors.
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package main // import "k8s.io/helm/cmd/tiller"
|
|
|
|
|
|
|
|
import (
|
|
|
|
"crypto/tls"
|
|
|
|
"flag"
|
|
|
|
"fmt"
|
|
|
|
"io/ioutil"
|
|
|
|
"log"
|
|
|
|
"net"
|
|
|
|
"net/http"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
goprom "github.com/grpc-ecosystem/go-grpc-prometheus"
|
|
|
|
"google.golang.org/grpc"
|
|
|
|
"google.golang.org/grpc/credentials"
|
|
|
|
"google.golang.org/grpc/health"
|
|
|
|
healthpb "google.golang.org/grpc/health/grpc_health_v1"
|
|
|
|
"google.golang.org/grpc/keepalive"
|
|
|
|
"k8s.io/klog"
|
|
|
|
|
|
|
|
// Import to initialize client auth plugins.
|
|
|
|
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
|
|
|
|
|
|
|
"k8s.io/helm/pkg/kube"
|
|
|
|
"k8s.io/helm/pkg/proto/hapi/services"
|
|
|
|
"k8s.io/helm/pkg/storage"
|
|
|
|
"k8s.io/helm/pkg/storage/driver"
|
|
|
|
"k8s.io/helm/pkg/tiller"
|
|
|
|
"k8s.io/helm/pkg/tiller/environment"
|
|
|
|
"k8s.io/helm/pkg/tlsutil"
|
|
|
|
"k8s.io/helm/pkg/version"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
// tlsEnableEnvVar names the environment variable that enables TLS.
|
|
|
|
tlsEnableEnvVar = "TILLER_TLS_ENABLE"
|
|
|
|
// tlsVerifyEnvVar names the environment variable that enables
|
|
|
|
// TLS, as well as certificate verification of the remote.
|
|
|
|
tlsVerifyEnvVar = "TILLER_TLS_VERIFY"
|
|
|
|
// tlsCertsEnvVar names the environment variable that points to
|
|
|
|
// the directory where Tiller's TLS certificates are located.
|
|
|
|
tlsCertsEnvVar = "TILLER_TLS_CERTS"
|
|
|
|
// historyMaxEnvVar is the name of the env var for setting max history.
|
|
|
|
historyMaxEnvVar = "TILLER_HISTORY_MAX"
|
|
|
|
|
|
|
|
storageMemory = "memory"
|
|
|
|
storageConfigMap = "configmap"
|
|
|
|
storageSecret = "secret"
|
|
|
|
storageSQL = "sql"
|
|
|
|
|
|
|
|
traceAddr = ":44136"
|
|
|
|
|
|
|
|
// defaultMaxHistory sets the maximum number of releases to 0: unlimited
|
|
|
|
defaultMaxHistory = 0
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
grpcAddr = flag.String("listen", fmt.Sprintf(":%v", environment.DefaultTillerPort), "address:port to listen on")
|
|
|
|
probeAddr = flag.String("probe-listen", fmt.Sprintf(":%v", environment.DefaultTillerProbePort), "address:port to listen on for probes")
|
|
|
|
enableProbing = flag.Bool("probe", true, "enable probing over http")
|
|
|
|
enableTracing = flag.Bool("trace", false, "enable rpc tracing")
|
|
|
|
store = flag.String("storage", storageConfigMap, "storage driver to use. One of 'configmap', 'memory', 'sql' or 'secret'")
|
|
|
|
|
|
|
|
sqlDialect = flag.String("sql-dialect", "postgres", "SQL dialect to use (only postgres is supported for now")
|
|
|
|
sqlConnectionString = flag.String("sql-connection-string", "", "SQL connection string to use")
|
|
|
|
|
|
|
|
remoteReleaseModules = flag.Bool("experimental-release", false, "enable experimental release modules")
|
|
|
|
|
|
|
|
tlsEnable = flag.Bool("tls", tlsEnableEnvVarDefault(), "enable TLS")
|
|
|
|
tlsVerify = flag.Bool("tls-verify", tlsVerifyEnvVarDefault(), "enable TLS and verify remote certificate")
|
|
|
|
keyFile = flag.String("tls-key", tlsDefaultsFromEnv("tls-key"), "path to TLS private key file")
|
|
|
|
certFile = flag.String("tls-cert", tlsDefaultsFromEnv("tls-cert"), "path to TLS certificate file")
|
|
|
|
caCertFile = flag.String("tls-ca-cert", tlsDefaultsFromEnv("tls-ca-cert"), "trust certificates signed by this CA")
|
|
|
|
maxHistory = flag.Int("history-max", historyMaxFromEnv(), "maximum number of releases kept in release history, with 0 meaning no limit")
|
|
|
|
printVersion = flag.Bool("version", false, "print the version number")
|
|
|
|
|
|
|
|
// rootServer is the root gRPC server.
|
|
|
|
//
|
|
|
|
// Each gRPC service registers itself to this server during start().
|
|
|
|
rootServer *grpc.Server
|
|
|
|
|
|
|
|
// env is the default environment.
|
|
|
|
//
|
|
|
|
// Any changes to env should be done before rootServer.Serve() is called.
|
|
|
|
env = environment.New()
|
|
|
|
|
|
|
|
logger *log.Logger
|
|
|
|
)
|
|
|
|
|
|
|
|
func main() {
|
|
|
|
klog.InitFlags(nil)
|
|
|
|
// TODO: use spf13/cobra for tiller instead of flags
|
|
|
|
flag.Parse()
|
|
|
|
|
|
|
|
if *printVersion {
|
|
|
|
fmt.Println(version.GetVersion())
|
|
|
|
os.Exit(0)
|
|
|
|
}
|
|
|
|
|
|
|
|
if *enableTracing {
|
|
|
|
log.SetFlags(log.Lshortfile)
|
|
|
|
}
|
|
|
|
logger = newLogger("main")
|
|
|
|
|
|
|
|
start()
|
|
|
|
}
|
|
|
|
|
|
|
|
func start() {
|
|
|
|
|
|
|
|
healthSrv := health.NewServer()
|
|
|
|
healthSrv.SetServingStatus("Tiller", healthpb.HealthCheckResponse_NOT_SERVING)
|
|
|
|
|
|
|
|
clientset, err := kube.New(nil).KubernetesClientSet()
|
|
|
|
if err != nil {
|
|
|
|
logger.Fatalf("Cannot initialize Kubernetes connection: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
switch *store {
|
|
|
|
case storageMemory:
|
|
|
|
env.Releases = storage.Init(driver.NewMemory())
|
|
|
|
case storageConfigMap:
|
|
|
|
cfgmaps := driver.NewConfigMaps(clientset.CoreV1().ConfigMaps(namespace()))
|
|
|
|
cfgmaps.Log = newLogger("storage/driver").Printf
|
|
|
|
|
|
|
|
env.Releases = storage.Init(cfgmaps)
|
|
|
|
env.Releases.Log = newLogger("storage").Printf
|
|
|
|
case storageSecret:
|
|
|
|
secrets := driver.NewSecrets(clientset.CoreV1().Secrets(namespace()))
|
|
|
|
secrets.Log = newLogger("storage/driver").Printf
|
|
|
|
|
|
|
|
env.Releases = storage.Init(secrets)
|
|
|
|
env.Releases.Log = newLogger("storage").Printf
|
|
|
|
case storageSQL:
|
|
|
|
sqlDriver, err := driver.NewSQL(
|
|
|
|
*sqlDialect,
|
|
|
|
*sqlConnectionString,
|
|
|
|
newLogger("storage/driver").Printf,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
logger.Fatalf("Cannot initialize SQL storage driver: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
env.Releases = storage.Init(sqlDriver)
|
|
|
|
env.Releases.Log = newLogger("storage").Printf
|
|
|
|
}
|
|
|
|
|
|
|
|
if *maxHistory > 0 {
|
|
|
|
env.Releases.MaxHistory = *maxHistory
|
|
|
|
}
|
|
|
|
|
|
|
|
kubeClient := kube.New(nil)
|
|
|
|
kubeClient.Log = newLogger("kube").Printf
|
|
|
|
env.KubeClient = kubeClient
|
|
|
|
|
|
|
|
if *tlsEnable || *tlsVerify {
|
|
|
|
opts := tlsutil.Options{CertFile: *certFile, KeyFile: *keyFile}
|
|
|
|
if *tlsVerify {
|
|
|
|
opts.CaCertFile = *caCertFile
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var opts []grpc.ServerOption
|
|
|
|
if *tlsEnable || *tlsVerify {
|
|
|
|
cfg, err := tlsutil.ServerConfig(tlsOptions())
|
|
|
|
if err != nil {
|
|
|
|
logger.Fatalf("Could not create server TLS configuration: %v", err)
|
|
|
|
}
|
|
|
|
opts = append(opts, grpc.Creds(credentials.NewTLS(cfg)))
|
|
|
|
}
|
|
|
|
|
|
|
|
opts = append(opts, grpc.KeepaliveParams(keepalive.ServerParameters{
|
|
|
|
MaxConnectionIdle: 10 * time.Minute,
|
|
|
|
// If needed, we can configure the max connection age
|
|
|
|
}))
|
|
|
|
opts = append(opts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
|
|
|
|
MinTime: time.Duration(20) * time.Second, // For compatibility with the client keepalive.ClientParameters
|
|
|
|
}))
|
|
|
|
|
|
|
|
rootServer = tiller.NewServer(opts...)
|
|
|
|
healthpb.RegisterHealthServer(rootServer, healthSrv)
|
|
|
|
|
|
|
|
lstn, err := net.Listen("tcp", *grpcAddr)
|
|
|
|
if err != nil {
|
|
|
|
logger.Fatalf("Server died: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
logger.Printf("Starting Tiller %s (tls=%t)", version.GetVersion(), *tlsEnable || *tlsVerify)
|
|
|
|
logger.Printf("GRPC listening on %s", *grpcAddr)
|
feat: tiller probing endpoints can be disabled
This adds the `--probe=[true|false]` flag to `tiller`, so that you can selectively disable the following probing HTTP endpoints:
- `/readiness`
- `/liveness`
- `/metrics`
One of expected use-cases of this feature would be to avoid consuming an extra port per `tiller`, which becomes more problematic in the [tillerless](https://github.com/rimusz/helm-tiller) setup.
The default is `--probe=true`, which starts the probing endpoints as before.
Implementation-wise, I intentionally made it so that the number of changed lines is as small as possible.
That is, I opted not to factor out the probes server starting logic into its own function, like `startProbesServer`.
Instead, I just added conditionals to the logging part and the server starting part.
As it isn't easily E2E testable, I've verified it to work by running the following commands manually.
With probing enabled(default):
```
$ ./tiller
[main] 2019/04/06 09:20:15 Starting Tiller v2.12+unreleased (tls=false)
[main] 2019/04/06 09:20:15 GRPC listening on :44134
[main] 2019/04/06 09:20:15 Probes listening on :44135
[main] 2019/04/06 09:20:15 Storage driver is ConfigMap
[main] 2019/04/06 09:20:15 Max history per release is 0
```
With probing disabled, you'll see no tiller is no longer listening on 44135:
```
$ ./tiller --probe=false
[main] 2019/04/06 09:20:07 Starting Tiller v2.12+unreleased (tls=false)
[main] 2019/04/06 09:20:07 GRPC listening on :44134
[main] 2019/04/06 09:20:07 Storage driver is ConfigMap
[main] 2019/04/06 09:20:07 Max history per release is 0
```
To ensure that tiller can disable the probing endpoints, I ran multiple tillers at once, with/without `--probe=false`:
The first test runs three tillers without `--probe=false`.
As expected, it results in two tillers failes due to the conflicting port, as you can see in the message `Probes server died: listen tcp :44135: bind: address already in use`.
```
$ bash -c 'for i in {0..2}; do (./tiller --listen=:$((44136+$i)) 2>&1 | sed "s/^/tiller $i: /" )& done; sleep 3 ; pkill tiller'
tiller 1: [main] 2019/04/06 09:57:49 Starting Tiller v2.12+unreleased (tls=false)
tiller 1: [main] 2019/04/06 09:57:49 GRPC listening on :44137
tiller 1: [main] 2019/04/06 09:57:49 Probes listening on :44135
tiller 1: [main] 2019/04/06 09:57:49 Storage driver is ConfigMap
tiller 1: [main] 2019/04/06 09:57:49 Max history per release is 0
tiller 0: [main] 2019/04/06 09:57:49 Starting Tiller v2.12+unreleased (tls=false)
tiller 0: [main] 2019/04/06 09:57:49 GRPC listening on :44136
tiller 0: [main] 2019/04/06 09:57:49 Probes listening on :44135
tiller 0: [main] 2019/04/06 09:57:49 Storage driver is ConfigMap
tiller 0: [main] 2019/04/06 09:57:49 Max history per release is 0
tiller 0: [main] 2019/04/06 09:57:49 Probes server died: listen tcp :44135: bind: address already in use
tiller 2: [main] 2019/04/06 09:57:49 Starting Tiller v2.12+unreleased (tls=false)
tiller 2: [main] 2019/04/06 09:57:49 GRPC listening on :44138
tiller 2: [main] 2019/04/06 09:57:49 Probes listening on :44135
tiller 2: [main] 2019/04/06 09:57:49 Storage driver is ConfigMap
tiller 2: [main] 2019/04/06 09:57:49 Max history per release is 0
tiller 2: [main] 2019/04/06 09:57:49 Probes server died: listen tcp :44135: bind: address already in use
```
The second test runs three tillers with `--probe=false`.
It results in all tillers running without errors, that indicates this feature is working as expected:
```
$ bash -c 'for i in {0..2}; do (./tiller --listen=:$((44136+$i)) --probe=false 2>&1 | sed "s/^/tiller $i: /" )& done; sleep 3 ; pkill tiller'
tiller 1: [main] 2019/04/06 09:58:18 Starting Tiller v2.12+unreleased (tls=false)
tiller 1: [main] 2019/04/06 09:58:18 GRPC listening on :44137
tiller 1: [main] 2019/04/06 09:58:18 Storage driver is ConfigMap
tiller 1: [main] 2019/04/06 09:58:18 Max history per release is 0
tiller 2: [main] 2019/04/06 09:58:18 Starting Tiller v2.12+unreleased (tls=false)
tiller 2: [main] 2019/04/06 09:58:18 GRPC listening on :44138
tiller 2: [main] 2019/04/06 09:58:18 Storage driver is ConfigMap
tiller 2: [main] 2019/04/06 09:58:18 Max history per release is 0
tiller 0: [main] 2019/04/06 09:58:18 Starting Tiller v2.12+unreleased (tls=false)
tiller 0: [main] 2019/04/06 09:58:18 GRPC listening on :44136
tiller 0: [main] 2019/04/06 09:58:18 Storage driver is ConfigMap
tiller 0: [main] 2019/04/06 09:58:18 Max history per release is 0
```
Resolves #3159
Signed-off-by: Yusuke KUOKA <ykuoka@gmail.com>
6 years ago
|
|
|
if *enableProbing {
|
|
|
|
logger.Printf("Probes listening on %s", *probeAddr)
|
|
|
|
}
|
|
|
|
logger.Printf("Storage driver is %s", env.Releases.Name())
|
|
|
|
logger.Printf("Max history per release is %d", *maxHistory)
|
|
|
|
|
|
|
|
if *enableTracing {
|
|
|
|
startTracing(traceAddr)
|
|
|
|
}
|
|
|
|
|
|
|
|
srvErrCh := make(chan error)
|
|
|
|
probeErrCh := make(chan error)
|
|
|
|
go func() {
|
|
|
|
svc := tiller.NewReleaseServer(env, clientset, *remoteReleaseModules)
|
|
|
|
svc.Log = newLogger("tiller").Printf
|
|
|
|
services.RegisterReleaseServiceServer(rootServer, svc)
|
|
|
|
if err := rootServer.Serve(lstn); err != nil {
|
|
|
|
srvErrCh <- err
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
go func() {
|
feat: tiller probing endpoints can be disabled
This adds the `--probe=[true|false]` flag to `tiller`, so that you can selectively disable the following probing HTTP endpoints:
- `/readiness`
- `/liveness`
- `/metrics`
One of expected use-cases of this feature would be to avoid consuming an extra port per `tiller`, which becomes more problematic in the [tillerless](https://github.com/rimusz/helm-tiller) setup.
The default is `--probe=true`, which starts the probing endpoints as before.
Implementation-wise, I intentionally made it so that the number of changed lines is as small as possible.
That is, I opted not to factor out the probes server starting logic into its own function, like `startProbesServer`.
Instead, I just added conditionals to the logging part and the server starting part.
As it isn't easily E2E testable, I've verified it to work by running the following commands manually.
With probing enabled(default):
```
$ ./tiller
[main] 2019/04/06 09:20:15 Starting Tiller v2.12+unreleased (tls=false)
[main] 2019/04/06 09:20:15 GRPC listening on :44134
[main] 2019/04/06 09:20:15 Probes listening on :44135
[main] 2019/04/06 09:20:15 Storage driver is ConfigMap
[main] 2019/04/06 09:20:15 Max history per release is 0
```
With probing disabled, you'll see no tiller is no longer listening on 44135:
```
$ ./tiller --probe=false
[main] 2019/04/06 09:20:07 Starting Tiller v2.12+unreleased (tls=false)
[main] 2019/04/06 09:20:07 GRPC listening on :44134
[main] 2019/04/06 09:20:07 Storage driver is ConfigMap
[main] 2019/04/06 09:20:07 Max history per release is 0
```
To ensure that tiller can disable the probing endpoints, I ran multiple tillers at once, with/without `--probe=false`:
The first test runs three tillers without `--probe=false`.
As expected, it results in two tillers failes due to the conflicting port, as you can see in the message `Probes server died: listen tcp :44135: bind: address already in use`.
```
$ bash -c 'for i in {0..2}; do (./tiller --listen=:$((44136+$i)) 2>&1 | sed "s/^/tiller $i: /" )& done; sleep 3 ; pkill tiller'
tiller 1: [main] 2019/04/06 09:57:49 Starting Tiller v2.12+unreleased (tls=false)
tiller 1: [main] 2019/04/06 09:57:49 GRPC listening on :44137
tiller 1: [main] 2019/04/06 09:57:49 Probes listening on :44135
tiller 1: [main] 2019/04/06 09:57:49 Storage driver is ConfigMap
tiller 1: [main] 2019/04/06 09:57:49 Max history per release is 0
tiller 0: [main] 2019/04/06 09:57:49 Starting Tiller v2.12+unreleased (tls=false)
tiller 0: [main] 2019/04/06 09:57:49 GRPC listening on :44136
tiller 0: [main] 2019/04/06 09:57:49 Probes listening on :44135
tiller 0: [main] 2019/04/06 09:57:49 Storage driver is ConfigMap
tiller 0: [main] 2019/04/06 09:57:49 Max history per release is 0
tiller 0: [main] 2019/04/06 09:57:49 Probes server died: listen tcp :44135: bind: address already in use
tiller 2: [main] 2019/04/06 09:57:49 Starting Tiller v2.12+unreleased (tls=false)
tiller 2: [main] 2019/04/06 09:57:49 GRPC listening on :44138
tiller 2: [main] 2019/04/06 09:57:49 Probes listening on :44135
tiller 2: [main] 2019/04/06 09:57:49 Storage driver is ConfigMap
tiller 2: [main] 2019/04/06 09:57:49 Max history per release is 0
tiller 2: [main] 2019/04/06 09:57:49 Probes server died: listen tcp :44135: bind: address already in use
```
The second test runs three tillers with `--probe=false`.
It results in all tillers running without errors, that indicates this feature is working as expected:
```
$ bash -c 'for i in {0..2}; do (./tiller --listen=:$((44136+$i)) --probe=false 2>&1 | sed "s/^/tiller $i: /" )& done; sleep 3 ; pkill tiller'
tiller 1: [main] 2019/04/06 09:58:18 Starting Tiller v2.12+unreleased (tls=false)
tiller 1: [main] 2019/04/06 09:58:18 GRPC listening on :44137
tiller 1: [main] 2019/04/06 09:58:18 Storage driver is ConfigMap
tiller 1: [main] 2019/04/06 09:58:18 Max history per release is 0
tiller 2: [main] 2019/04/06 09:58:18 Starting Tiller v2.12+unreleased (tls=false)
tiller 2: [main] 2019/04/06 09:58:18 GRPC listening on :44138
tiller 2: [main] 2019/04/06 09:58:18 Storage driver is ConfigMap
tiller 2: [main] 2019/04/06 09:58:18 Max history per release is 0
tiller 0: [main] 2019/04/06 09:58:18 Starting Tiller v2.12+unreleased (tls=false)
tiller 0: [main] 2019/04/06 09:58:18 GRPC listening on :44136
tiller 0: [main] 2019/04/06 09:58:18 Storage driver is ConfigMap
tiller 0: [main] 2019/04/06 09:58:18 Max history per release is 0
```
Resolves #3159
Signed-off-by: Yusuke KUOKA <ykuoka@gmail.com>
6 years ago
|
|
|
if !*enableProbing {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
mux := newProbesMux()
|
|
|
|
|
|
|
|
// Register gRPC server to prometheus to initialized matrix
|
|
|
|
goprom.Register(rootServer)
|
|
|
|
addPrometheusHandler(mux)
|
|
|
|
|
|
|
|
if err := http.ListenAndServe(*probeAddr, mux); err != nil {
|
|
|
|
probeErrCh <- err
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
healthSrv.SetServingStatus("Tiller", healthpb.HealthCheckResponse_SERVING)
|
|
|
|
|
|
|
|
select {
|
|
|
|
case err := <-srvErrCh:
|
|
|
|
logger.Fatalf("Server died: %s", err)
|
|
|
|
case err := <-probeErrCh:
|
|
|
|
logger.Printf("Probes server died: %s", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func newLogger(prefix string) *log.Logger {
|
|
|
|
if len(prefix) > 0 {
|
|
|
|
prefix = fmt.Sprintf("[%s] ", prefix)
|
|
|
|
}
|
|
|
|
return log.New(os.Stderr, prefix, log.Flags())
|
|
|
|
}
|
|
|
|
|
|
|
|
// namespace returns the namespace of tiller
|
|
|
|
func namespace() string {
|
|
|
|
if ns := os.Getenv("TILLER_NAMESPACE"); ns != "" {
|
|
|
|
return ns
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fall back to the namespace associated with the service account token, if available
|
|
|
|
if data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil {
|
|
|
|
if ns := strings.TrimSpace(string(data)); len(ns) > 0 {
|
|
|
|
return ns
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return environment.DefaultTillerNamespace
|
|
|
|
}
|
|
|
|
|
|
|
|
func tlsOptions() tlsutil.Options {
|
|
|
|
opts := tlsutil.Options{CertFile: *certFile, KeyFile: *keyFile}
|
|
|
|
if *tlsVerify {
|
|
|
|
opts.CaCertFile = *caCertFile
|
|
|
|
|
|
|
|
// We want to force the client to not only provide a cert, but to
|
|
|
|
// provide a cert that we can validate.
|
|
|
|
// http://www.bite-code.com/2015/06/25/tls-mutual-auth-in-golang/
|
|
|
|
opts.ClientAuth = tls.RequireAndVerifyClientCert
|
|
|
|
}
|
|
|
|
return opts
|
|
|
|
}
|
|
|
|
|
|
|
|
func tlsDefaultsFromEnv(name string) (value string) {
|
|
|
|
switch certsDir := os.Getenv(tlsCertsEnvVar); name {
|
|
|
|
case "tls-key":
|
|
|
|
return filepath.Join(certsDir, "tls.key")
|
|
|
|
case "tls-cert":
|
|
|
|
return filepath.Join(certsDir, "tls.crt")
|
|
|
|
case "tls-ca-cert":
|
|
|
|
return filepath.Join(certsDir, "ca.crt")
|
|
|
|
}
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
|
|
|
func historyMaxFromEnv() int {
|
|
|
|
val := os.Getenv(historyMaxEnvVar)
|
|
|
|
if val == "" {
|
|
|
|
return defaultMaxHistory
|
|
|
|
}
|
|
|
|
ret, err := strconv.Atoi(val)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("Invalid max history %q. Defaulting to 0.", val)
|
|
|
|
return defaultMaxHistory
|
|
|
|
}
|
|
|
|
return ret
|
|
|
|
}
|
|
|
|
|
|
|
|
func tlsEnableEnvVarDefault() bool { return os.Getenv(tlsEnableEnvVar) != "" }
|
|
|
|
func tlsVerifyEnvVarDefault() bool { return os.Getenv(tlsVerifyEnvVar) != "" }
|