feat(tiller): add disk storage method

There are issues with the configmap and secrets storage when releases
are becoming larger than 1mb, since Kubernetes (etcd) doesn't support
entries larger than that.
This tries to solve that problem by adding a new storage type disk.
This will store the release to the local disk, in order to keep it
persistent.

Signed-off-by: Soren Mathiasen <smo@tradeshift.com>
pull/5233/head
Soren Mathiasen 7 years ago
parent 744cd20eab
commit dfe5c6d5fd
No known key found for this signature in database
GPG Key ID: 9B0D4DFDB7B1412F

@ -208,12 +208,14 @@ func (u *upgradeCmd) run() error {
if u.namespace == "" {
u.namespace = defaultNamespace()
}
previousReleaseNamespace := releaseHistory.Releases[0].Namespace
if previousReleaseNamespace != u.namespace {
fmt.Fprintf(u.out,
"WARNING: Namespace %q doesn't match with previous. Release will be deployed to %s\n",
u.namespace, previousReleaseNamespace,
)
if len(releaseHistory.Releases) > 0 {
previousReleaseNamespace := releaseHistory.Releases[0].Namespace
if previousReleaseNamespace != u.namespace {
fmt.Fprintf(u.out,
"WARNING: Namespace %q doesn't match with previous. Release will be deployed to %s\n",
u.namespace, previousReleaseNamespace,
)
}
}
}

@ -66,6 +66,7 @@ const (
storageMemory = "memory"
storageConfigMap = "configmap"
storageSecret = "secret"
storageDisk = "disk"
probeAddr = ":44135"
traceAddr = ":44136"
@ -77,7 +78,7 @@ const (
var (
grpcAddr = flag.String("listen", ":44134", "address:port to listen on")
enableTracing = flag.Bool("trace", false, "enable rpc tracing")
store = flag.String("storage", storageConfigMap, "storage driver to use. One of 'configmap', 'memory', or 'secret'")
store = flag.String("storage", storageConfigMap, "storage driver to use. One of 'configmap', 'memory', 'disk', or 'secret'")
remoteReleaseModules = flag.Bool("experimental-release", false, "enable experimental release modules")
tlsEnable = flag.Bool("tls", tlsEnableEnvVarDefault(), "enable TLS")
tlsVerify = flag.Bool("tls-verify", tlsVerifyEnvVarDefault(), "enable TLS and verify remote certificate")
@ -143,6 +144,15 @@ func start() {
env.Releases = storage.Init(secrets)
env.Releases.Log = newLogger("storage").Printf
case storageDisk:
disk, err := driver.NewDisk()
if err != nil {
logger.Fatalf("Could not create disk storage: %v", err)
}
disk.Log = newLogger("storage/driver").Printf
env.Releases = storage.Init(disk)
env.Releases.Log = newLogger("storage").Printf
}
if *maxHistory > 0 {

@ -130,12 +130,10 @@ Check the [Kubernetes Distribution Guide](kubernetes_distros.md) to see if there
The easiest way to install `tiller` into the cluster is simply to run
`helm init`. This will validate that `helm`'s local environment is set
up correctly (and set it up if necessary). Then it will connect to
whatever cluster `kubectl` connects to by default (`kubectl config
view`). Once it connects, it will install `tiller` into the
whatever cluster `kubectl` connects to by default (`kubectl config view`). Once it connects, it will install `tiller` into the
`kube-system` namespace.
After `helm init`, you should be able to run `kubectl get pods --namespace
kube-system` and see Tiller running.
After `helm init`, you should be able to run `kubectl get pods --namespace kube-system` and see Tiller running.
You can explicitly tell `helm init` to...
@ -185,8 +183,7 @@ Tiller running on :44134
```
When Tiller is running locally, it will attempt to connect to the
Kubernetes cluster that is configured by `kubectl`. (Run `kubectl config
view` to see which cluster that is.)
Kubernetes cluster that is configured by `kubectl`. (Run `kubectl config view` to see which cluster that is.)
You must tell `helm` to connect to this new local Tiller host instead of
connecting to the one in-cluster. There are two ways to do this. The
@ -222,8 +219,7 @@ Setting `TILLER_TAG=canary` will get the latest snapshot of master.
Because Tiller stores its data in Kubernetes ConfigMaps, you can safely
delete and re-install Tiller without worrying about losing any data. The
recommended way of deleting Tiller is with `kubectl delete deployment
tiller-deploy --namespace kube-system`, or more concisely `helm reset`.
recommended way of deleting Tiller is with `kubectl delete deployment tiller-deploy --namespace kube-system`, or more concisely `helm reset`.
Tiller can then be re-installed from the client with:
@ -260,7 +256,6 @@ spec:
...
```
### Using `--override`
`--override` allows you to specify properties of Tiller's
@ -277,6 +272,7 @@ its value to 1.
```
helm init --override metadata.annotations."deployment\.kubernetes\.io/revision"="1"
```
Output:
```
@ -352,6 +348,7 @@ in JSON format.
```
### Storage backends
By default, `tiller` stores release information in `ConfigMaps` in the namespace
where it is running. As of Helm 2.7.0, there is now a beta storage backend that
uses `Secrets` for storing release information. This was added for additional
@ -365,6 +362,11 @@ options:
helm init --override 'spec.template.spec.containers[0].command'='{/tiller,--storage=secret}'
```
For versions later then 2.13 there is now a storage backend called disk. This
backend moves the configuration out of Kubernetes to the local disk of the container running Tiller. It is therefore
a good idea to volume mount the directory, so the release information will survive pod
restarts. For more information about the disk storage option see this document [disk storage](storage-disk.md)
Currently, if you want to switch from the default backend to the secrets
backend, you'll have to do the migration for this on your own. When this backend
graduates from beta, there will be a more official path of migration

@ -0,0 +1,123 @@
# Disk storage
If you have very large charts (above 1MB), using disk storage is a suggested solution. Etcd has a file size limit of 1MB which will cause deployments of very large charts to fail.
## Usage
You need to start tiller up to use the disk storage option
```shell
helm init --override 'spec.template.spec.containers[0].command'='{/tiller,--storage=disk}'
```
While this method will work, it's not recommended since it won't survive pod restarts, since the data is saved inside
the docker image.
The solution is to do a manually deploy of tiller with a volume mount.
A solution to this can be seen below. Please verify that the image to install is the correct version
```yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
app: helm
name: tiller
name: tiller-deploy
namespace: kube-system
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: helm
name: tiller
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
app: helm
name: tiller
spec:
automountServiceAccountToken: true
volumes:
- name: data
persistentVolumeClaim:
claimName: tiller-releases
initContainers:
- name: take-data-dir-ownership
image: alpine:3.6
command:
- chown
- -R
- nobody:nobody
- /releases
volumeMounts:
- name: data
mountPath: /releases
containers:
- command:
- /tiller
- --storage=disk
env:
- name: TILLER_NAMESPACE
value: kube-system
- name: TILLER_HISTORY_MAX
value: "0"
image: "gcr.io/kubernetes-helm/tiller:v2.13"
imagePullPolicy: Always
volumeMounts:
- name: data
mountPath: /releases
livenessProbe:
failureThreshold: 3
httpGet:
path: /liveness
port: 44135
scheme: HTTP
initialDelaySeconds: 1
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
name: tiller
ports:
- containerPort: 44134
name: tiller
protocol: TCP
- containerPort: 44135
name: http
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readiness
port: 44135
scheme: HTTP
initialDelaySeconds: 1
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
serviceAccount: tiller
serviceAccountName: tiller
terminationGracePeriodSeconds: 30
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
labels:
heritage: Tiller
name: tiller-releases
namespace: kube-system
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 8Gi
storageClassName: default
```

@ -0,0 +1 @@
releases

@ -0,0 +1,181 @@
/*
Copyright The Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package driver
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"strings"
rspb "k8s.io/helm/pkg/proto/hapi/release"
storageerrors "k8s.io/helm/pkg/storage/errors"
)
var _ Driver = (*Disk)(nil)
// DiskDriverName is the string name of this driver.
const DiskDriverName = "Disk"
// Disk is the in-Disk storage driver implementation.
type Disk struct {
dir string
Log func(string, ...interface{})
}
// NewDisk initializes a new Disk driver.
func NewDisk() (*Disk, error) {
disk := &Disk{dir: "releases/data", Log: func(_ string, _ ...interface{}) {}}
if _, err := os.Stat(disk.dir); err != nil {
err := os.MkdirAll(disk.dir, 0744)
if err != nil {
disk.Log("unable to create releases directory", err)
return nil, fmt.Errorf("unable to create releases directory %v", err)
}
}
return disk, nil
}
// Name returns the name of the driver.
func (disk *Disk) Name() string {
return DiskDriverName
}
// Get returns the release named by key or returns ErrReleaseNotFound.
func (disk *Disk) Get(key string) (*rspb.Release, error) {
files, err := ioutil.ReadDir(disk.dir)
if err != nil {
disk.Log(fmt.Sprintf("unable to list files in %v", disk.dir))
return nil, fmt.Errorf("unable to list files in %v", disk.dir)
}
for _, v := range files {
if v.IsDir() {
continue
}
if v.Name() == key {
rel, err := torelease(fmt.Sprintf("%v%v%v", disk.dir, string(os.PathSeparator), v.Name()))
if err != nil {
return nil, err
}
return rel, nil
}
}
disk.Log(fmt.Sprintf("release %v not found", key))
return nil, storageerrors.ErrReleaseNotFound(key)
}
func torelease(f string) (*rspb.Release, error) {
rel := &rspb.Release{}
d, err := ioutil.ReadFile(f)
if err != nil {
return nil, fmt.Errorf("unable to read file %v", f)
}
err = json.Unmarshal(d, rel)
if err != nil {
return nil, fmt.Errorf("unable to unmarshal file %v", f)
}
return rel, nil
}
// List returns the list of all releases such that filter(release) == true
func (disk *Disk) List(filter func(*rspb.Release) bool) ([]*rspb.Release, error) {
files, err := ioutil.ReadDir(disk.dir)
if err != nil {
return nil, fmt.Errorf("unable to list files in %v", disk.dir)
}
result := []*rspb.Release{}
for _, v := range files {
if v.IsDir() {
continue
}
rel, err := torelease(fmt.Sprintf("%v%v%v", disk.dir, string(os.PathSeparator), v.Name()))
if err != nil {
return nil, fmt.Errorf("unable to process file %v", v.Name())
}
if filter(rel) {
result = append(result, rel)
}
}
return result, nil
}
// Query returns the set of releases that match the provided set of labels
func (disk *Disk) Query(keyvals map[string]string) ([]*rspb.Release, error) {
var lbs labels
var ls []*rspb.Release
lbs.init()
lbs.fromMap(keyvals)
disk.List(func(r *rspb.Release) bool {
n := strings.Split(r.GetName(), ".")
rec := newRecord(n[0], r)
if rec == nil {
return false
}
if rec.lbs.match(lbs) {
ls = append(ls, rec.rls)
}
return true
})
if len(ls) == 0 {
return nil, storageerrors.ErrReleaseNotFound(lbs["NAME"])
}
return ls, nil
}
// Create creates a new release or returns ErrReleaseExists.
func (disk *Disk) Create(key string, rls *rspb.Release) error {
d, err := json.Marshal(rls)
if err != nil {
return fmt.Errorf("unable to convert release to json")
}
file := fmt.Sprintf("%v%v%v", disk.dir, string(os.PathSeparator), key)
err = ioutil.WriteFile(file, d, 0644)
if err != nil {
return fmt.Errorf("unable to write release to disk")
}
return nil
}
// Update updates a release or returns ErrReleaseNotFound.
func (disk *Disk) Update(key string, rls *rspb.Release) error {
d, err := json.Marshal(rls)
if err != nil {
return fmt.Errorf("unable to convert release to json")
}
err = ioutil.WriteFile(fmt.Sprintf("%v%v%v", disk.dir, string(os.PathSeparator), key), d, 0644)
if err != nil {
return fmt.Errorf("unable to write release to disk")
}
return nil
}
// Delete deletes a release or returns ErrReleaseNotFound.
func (disk *Disk) Delete(key string) (*rspb.Release, error) {
file := fmt.Sprintf("%v%v%v", disk.dir, string(os.PathSeparator), key)
rel, err := disk.Get(key)
if err != nil {
return nil, storageerrors.ErrReleaseNotFound(key)
}
err = os.Remove(file)
if err != nil {
return nil, fmt.Errorf(fmt.Sprintf("unable to delete file %v", file))
}
return rel, nil
}

@ -0,0 +1,223 @@
/*
Copyright The Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package driver
import (
"fmt"
"reflect"
"testing"
rspb "k8s.io/helm/pkg/proto/hapi/release"
)
func TestDiskName(t *testing.T) {
mem, err := NewDisk()
if mem.Name() != DiskDriverName {
t.Errorf("Expected name to be %q, got %q", DiskDriverName, mem.Name())
}
if err != nil {
t.Error(err)
}
}
func tsFixtureDisk(t *testing.T) *Disk {
hs := []*rspb.Release{
// rls-a
releaseStub("rls-a", 4, "default", rspb.Status_DEPLOYED),
releaseStub("rls-a", 1, "default", rspb.Status_SUPERSEDED),
releaseStub("rls-a", 3, "default", rspb.Status_SUPERSEDED),
releaseStub("rls-a", 2, "default", rspb.Status_SUPERSEDED),
// rls-b
releaseStub("rls-b", 4, "default", rspb.Status_DEPLOYED),
releaseStub("rls-b", 1, "default", rspb.Status_SUPERSEDED),
releaseStub("rls-b", 3, "default", rspb.Status_SUPERSEDED),
releaseStub("rls-b", 2, "default", rspb.Status_SUPERSEDED),
}
mem, _ := NewDisk()
for _, tt := range hs {
err := mem.Create(testKey(tt.Name, tt.Version), tt)
if err != nil {
t.Fatalf("Test setup failed to create: %s\n", err)
}
}
return mem
}
func TestDiskCreate(t *testing.T) {
var tests = []struct {
desc string
rls *rspb.Release
err bool
}{
{
"create should success",
releaseStub("rls-c", 1, "default", rspb.Status_DEPLOYED),
false,
},
{
"create should fail (release already exists)",
releaseStub("rls-a", 1, "default", rspb.Status_DEPLOYED),
true,
},
}
ts := tsFixtureDisk(t)
for _, tt := range tests {
key := testKey(tt.rls.Name, tt.rls.Version)
rls := tt.rls
if err := ts.Create(key, rls); err != nil {
if !tt.err {
t.Fatalf("failed to create %q: %s", tt.desc, err)
}
}
defer ts.Delete(key)
}
}
func TestDiskGet(t *testing.T) {
var tests = []struct {
desc string
key string
err bool
}{
{"release key should exist", "rls-a.v1", false},
{"release key should not exist", "rls-a.v5", true},
}
ts := tsFixtureDisk(t)
for _, tt := range tests {
if _, err := ts.Get(tt.key); err != nil {
if !tt.err {
t.Fatalf("Failed %q to get '%s': %q\n", tt.desc, tt.key, err)
}
}
}
}
func TestDiskQuery(t *testing.T) {
var tests = []struct {
desc string
xlen int
lbs map[string]string
}{
{
"should be 2 query results",
2,
map[string]string{"STATUS": "DEPLOYED"},
},
}
ts := tsFixtureDisk(t)
for _, tt := range tests {
l, err := ts.Query(tt.lbs)
if err != nil {
t.Fatalf("Failed to query: %s\n", err)
}
if tt.xlen != len(l) {
t.Fatalf("Expected %d results, actual %d\n", tt.xlen, len(l))
}
}
}
func TestDiskUpdate(t *testing.T) {
var tests = []struct {
desc string
key string
rls *rspb.Release
err bool
}{
{
"update release status",
"rls-a.v4",
releaseStub("rls-a", 4, "default", rspb.Status_SUPERSEDED),
false,
},
{
"update release does not exist",
"rls-z.v1",
releaseStub("rls-z", 1, "default", rspb.Status_DELETED),
true,
},
}
ts := tsFixtureDisk(t)
for _, tt := range tests {
if err := ts.Update(tt.key, tt.rls); err != nil {
if !tt.err {
t.Fatalf("Failed %q: %s\n", tt.desc, err)
}
continue
}
r, err := ts.Get(tt.key)
if err != nil {
t.Fatalf("Failed to get: %s\n", err)
}
if !reflect.DeepEqual(r, tt.rls) {
t.Fatalf("Expected %s, actual %s\n", tt.rls, r)
}
}
}
func TestDiskDelete(t *testing.T) {
var tests = []struct {
desc string
key string
err bool
}{
{"release key should exist", "rls-a.v1", false},
{"release key should not exist", "rls-a.v5", true},
}
ts := tsFixtureDisk(t)
start, err := ts.Query(map[string]string{"NAME": "rls-a"})
if err != nil {
t.Errorf("Query failed: %s", err)
}
startLen := len(start)
for _, tt := range tests {
if rel, err := ts.Delete(tt.key); err != nil {
if !tt.err {
t.Fatalf("Failed %q to get '%s': %q\n", tt.desc, tt.key, err)
}
continue
} else if fmt.Sprintf("%s.v%d", rel.Name, rel.Version) != tt.key {
t.Fatalf("Asked for delete on %s, but deleted %d", tt.key, rel.Version)
}
_, err := ts.Get(tt.key)
if err == nil {
t.Errorf("Expected an error when asking for a deleted key")
}
}
// Make sure that the deleted records are gone.
end, err := ts.Query(map[string]string{"NAME": "rls-a"})
if err != nil {
t.Errorf("Query failed: %s", err)
}
endLen := len(end)
if startLen <= endLen {
t.Errorf("expected start %d to be less than end %d", startLen, endLen)
for _, ee := range end {
t.Logf("Name: %s, Version: %d", ee.Name, ee.Version)
}
}
}
Loading…
Cancel
Save