Merge pull request #2636 from technosophos/feat/2332-max-history

feat(tiller): limit number of versions stored per release
pull/2825/head
Matt Butcher 7 years ago committed by GitHub
commit a736c2a3fe

@ -79,6 +79,7 @@ type initCmd struct {
opts installer.Options
kubeClient kubernetes.Interface
serviceAccount string
maxHistory int
}
func newInitCmd(out io.Writer) *cobra.Command {
@ -117,6 +118,7 @@ func newInitCmd(out io.Writer) *cobra.Command {
f.BoolVar(&i.opts.EnableHostNetwork, "net-host", false, "install Tiller with net=host")
f.StringVar(&i.serviceAccount, "service-account", "", "name of service account")
f.IntVar(&i.maxHistory, "history-max", 0, "limit the maximum number of revisions saved per release. Use 0 for no limit.")
return cmd
}
@ -156,6 +158,7 @@ func (i *initCmd) run() error {
i.opts.UseCanary = i.canary
i.opts.ImageSpec = i.image
i.opts.ServiceAccount = i.serviceAccount
i.opts.MaxHistory = i.maxHistory
if settings.Debug {
writeYAMLManifest := func(apiVersion, kind, body string, first, last bool) error {

@ -17,6 +17,7 @@ limitations under the License.
package installer // import "k8s.io/helm/cmd/helm/installer"
import (
"fmt"
"io/ioutil"
"github.com/ghodss/yaml"
@ -141,6 +142,7 @@ func generateDeployment(opts *Options) *v1beta1.Deployment {
},
Env: []v1.EnvVar{
{Name: "TILLER_NAMESPACE", Value: opts.Namespace},
{Name: "TILLER_HISTORY_MAX", Value: fmt.Sprintf("%d", opts.MaxHistory)},
},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{

@ -135,10 +135,10 @@ func TestDeploymentManifest_WithTLS(t *testing.T) {
t.Fatalf("%s: error %q", tt.name, err)
}
// verify environment variable in deployment reflect the use of tls being enabled.
if got := d.Spec.Template.Spec.Containers[0].Env[1].Value; got != tt.verify {
if got := d.Spec.Template.Spec.Containers[0].Env[2].Value; got != tt.verify {
t.Errorf("%s: expected tls verify env value %q, got %q", tt.name, tt.verify, got)
}
if got := d.Spec.Template.Spec.Containers[0].Env[2].Value; got != tt.enable {
if got := d.Spec.Template.Spec.Containers[0].Env[3].Value; got != tt.enable {
t.Errorf("%s: expected tls enable env value %q, got %q", tt.name, tt.enable, got)
}
}

@ -71,6 +71,11 @@ type Options struct {
// EnableHostNetwork installs Tiller with net=host.
EnableHostNetwork bool
// MaxHistory sets the maximum number of release versions stored per release.
//
// Less than or equal to zero means no limit.
MaxHistory int
}
func (opts *Options) selectImage() string {

@ -26,6 +26,7 @@ import (
"net/http"
"os"
"path/filepath"
"strconv"
"strings"
goprom "github.com/grpc-ecosystem/go-grpc-prometheus"
@ -51,12 +52,17 @@ const (
// tlsCertsEnvVar names the environment variable that points to
// the directory where Tiller's TLS certificates are located.
tlsCertsEnvVar = "TILLER_TLS_CERTS"
// historyMaxEnvVar is the name of the env var for setting max history.
historyMaxEnvVar = "TILLER_HISTORY_MAX"
storageMemory = "memory"
storageConfigMap = "configmap"
probeAddr = ":44135"
traceAddr = ":44136"
// defaultMaxHistory sets the maximum number of releases to 0: unlimited
defaultMaxHistory = 0
)
var (
@ -69,6 +75,7 @@ var (
keyFile = flag.String("tls-key", tlsDefaultsFromEnv("tls-key"), "path to TLS private key file")
certFile = flag.String("tls-cert", tlsDefaultsFromEnv("tls-cert"), "path to TLS certificate file")
caCertFile = flag.String("tls-ca-cert", tlsDefaultsFromEnv("tls-ca-cert"), "trust certificates signed by this CA")
maxHistory = flag.Int("history-max", historyMaxFromEnv(), "maximum number of releases kept in release history, with 0 meaning no limit")
// rootServer is the root gRPC server.
//
@ -112,6 +119,10 @@ func start() {
env.Releases.Log = newLogger("storage").Printf
}
if *maxHistory > 0 {
env.Releases.MaxHistory = *maxHistory
}
kubeClient := kube.New(nil)
kubeClient.Log = newLogger("kube").Printf
env.KubeClient = kubeClient
@ -143,6 +154,7 @@ func start() {
logger.Printf("GRPC listening on %s", *grpcAddr)
logger.Printf("Probes listening on %s", probeAddr)
logger.Printf("Storage driver is %s", env.Releases.Name())
logger.Printf("Max history per release is %d", *maxHistory)
if *enableTracing {
startTracing(traceAddr)
@ -223,5 +235,18 @@ func tlsDefaultsFromEnv(name string) (value string) {
return ""
}
func historyMaxFromEnv() int {
val := os.Getenv(historyMaxEnvVar)
if val == "" {
return defaultMaxHistory
}
ret, err := strconv.Atoi(val)
if err != nil {
log.Printf("Invalid max history %q. Defaulting to 0.", val)
return defaultMaxHistory
}
return ret
}
func tlsEnableEnvVarDefault() bool { return os.Getenv(tlsEnableEnvVar) != "" }
func tlsVerifyEnvVarDefault() bool { return os.Getenv(tlsVerifyEnvVar) != "" }

@ -36,6 +36,7 @@ helm init
--canary-image use the canary Tiller image
-c, --client-only if set does not install Tiller
--dry-run do not install local or remote
--history-max int limit the maximum number of revisions saved per release. Use 0 for no limit.
--local-repo-url string URL for local repository (default "http://127.0.0.1:8879/charts")
--net-host install Tiller with net=host
--service-account string name of service account
@ -63,4 +64,4 @@ helm init
### SEE ALSO
* [helm](helm.md) - The Helm package manager for Kubernetes.
###### Auto generated by spf13/cobra on 26-Jun-2017
###### Auto generated by spf13/cobra on 10-Aug-2017

@ -94,6 +94,11 @@ func (mem *Memory) Query(keyvals map[string]string) ([]*rspb.Release, error) {
var ls []*rspb.Release
for _, recs := range mem.cache {
recs.Iter(func(_ int, rec *record) bool {
// A query for a release name that doesn't exist (has been deleted)
// can cause rec to be nil.
if rec == nil {
return false
}
if rec.lbs.match(lbs) {
ls = append(ls, rec.rls)
}
@ -133,21 +138,24 @@ func (mem *Memory) Update(key string, rls *rspb.Release) error {
func (mem *Memory) Delete(key string) (*rspb.Release, error) {
defer unlock(mem.wlock())
switch elems := strings.Split(key, ".v"); len(elems) {
case 2:
elems := strings.Split(key, ".v")
if len(elems) != 2 {
return nil, ErrInvalidKey(key)
}
name, ver := elems[0], elems[1]
if _, err := strconv.Atoi(ver); err != nil {
return nil, ErrInvalidKey(key)
}
if recs, ok := mem.cache[name]; ok {
if r := recs.Remove(key); r != nil {
// recs.Remove changes the slice reference, so we have to re-assign it.
mem.cache[name] = recs
return r.rls, nil
}
}
return nil, ErrReleaseNotFound(key)
default:
return nil, ErrInvalidKey(key)
}
}
// wlock locks mem for writing

@ -17,6 +17,7 @@ limitations under the License.
package driver
import (
"fmt"
"reflect"
"testing"
@ -158,11 +159,38 @@ func TestMemoryDelete(t *testing.T) {
}
ts := tsFixtureMemory(t)
start, err := ts.Query(map[string]string{"NAME": "rls-a"})
if err != nil {
t.Errorf("Query failed: %s", err)
}
startLen := len(start)
for _, tt := range tests {
if _, err := ts.Delete(tt.key); err != nil {
if rel, err := ts.Delete(tt.key); err != nil {
if !tt.err {
t.Fatalf("Failed %q to get '%s': %q\n", tt.desc, tt.key, err)
}
continue
} else if fmt.Sprintf("%s.v%d", rel.Name, rel.Version) != tt.key {
t.Fatalf("Asked for delete on %s, but deleted %d", tt.key, rel.Version)
}
_, err := ts.Get(tt.key)
if err == nil {
t.Errorf("Expected an error when asking for a deleted key")
}
}
// Make sure that the deleted records are gone.
end, err := ts.Query(map[string]string{"NAME": "rls-a"})
if err != nil {
t.Errorf("Query failed: %s", err)
}
endLen := len(end)
if startLen <= endLen {
t.Errorf("expected start %d to be less than end %d", startLen, endLen)
for _, ee := range end {
t.Logf("Name: %s, Version: %d", ee.Name, ee.Version)
}
}
}

@ -73,6 +73,8 @@ func TestRecordsRemove(t *testing.T) {
newRecord("rls-a.v2", releaseStub("rls-a", 2, "default", rspb.Status_DEPLOYED)),
})
startLen := rs.Len()
for _, tt := range tests {
if r := rs.Remove(tt.key); r == nil {
if !tt.ok {
@ -84,4 +86,27 @@ func TestRecordsRemove(t *testing.T) {
}
}
}
// We expect the total number of records will be less now than there were
// when we started.
endLen := rs.Len()
if endLen >= startLen {
t.Errorf("expected ending length %d to be less than starting length %d", endLen, startLen)
}
}
func TestRecordsRemoveAt(t *testing.T) {
rs := records([]*record{
newRecord("rls-a.v1", releaseStub("rls-a", 1, "default", rspb.Status_SUPERSEDED)),
newRecord("rls-a.v2", releaseStub("rls-a", 2, "default", rspb.Status_DEPLOYED)),
})
if len(rs) != 2 {
t.Fatal("Expected len=2 for mock")
}
rs.Remove("rls-a.v1")
if len(rs) != 1 {
t.Fatalf("Expected length of rs to be 1, got %d", len(rs))
}
}

@ -27,6 +27,12 @@ import (
// Storage represents a storage engine for a Release.
type Storage struct {
driver.Driver
// MaxHistory specifies the maximum number of historical releases that will
// be retained, including the most recent release. Values of 0 or less are
// ignored (meaning no limits are imposed).
MaxHistory int
Log func(string, ...interface{})
}
@ -43,6 +49,10 @@ func (s *Storage) Get(name string, version int32) (*rspb.Release, error) {
// release, or a release with identical an key already exists.
func (s *Storage) Create(rls *rspb.Release) error {
s.Log("creating release %q", makeKey(rls.Name, rls.Version))
if s.MaxHistory > 0 {
// Want to make space for one more release.
s.removeLeastRecent(rls.Name, s.MaxHistory-1)
}
return s.Driver.Create(makeKey(rls.Name, rls.Version), rls)
}
@ -135,6 +145,51 @@ func (s *Storage) History(name string) ([]*rspb.Release, error) {
return s.Driver.Query(map[string]string{"NAME": name, "OWNER": "TILLER"})
}
// removeLeastRecent removes items from history until the lengh number of releases
// does not exceed max.
//
// We allow max to be set explicitly so that calling functions can "make space"
// for the new records they are going to write.
func (s *Storage) removeLeastRecent(name string, max int) error {
if max < 0 {
return nil
}
h, err := s.History(name)
if err != nil {
return err
}
if len(h) <= max {
return nil
}
overage := len(h) - max
// We want oldest to newest
relutil.SortByRevision(h)
// Delete as many as possible. In the case of API throughput limitations,
// multiple invocations of this function will eventually delete them all.
toDelete := h[0:overage]
errors := []error{}
for _, rel := range toDelete {
key := makeKey(name, rel.Version)
_, innerErr := s.Delete(name, rel.Version)
if innerErr != nil {
s.Log("error pruning %s from release history: %s", key, innerErr)
errors = append(errors, innerErr)
}
}
s.Log("Pruned %d record(s) from %s with %d error(s)", len(toDelete), name, len(errors))
switch c := len(errors); c {
case 0:
return nil
case 1:
return errors[0]
default:
return fmt.Errorf("encountered %d deletion errors. First is: %s", c, errors[0])
}
}
// Last fetches the last revision of the named release.
func (s *Storage) Last(name string) (*rspb.Release, error) {
s.Log("getting last revision of %q", name)

@ -83,8 +83,13 @@ func TestStorageDelete(t *testing.T) {
Name: "angry-beaver",
Version: 1,
}.ToRelease()
rls2 := ReleaseTestData{
Name: "angry-beaver",
Version: 2,
}.ToRelease()
assertErrNil(t.Fatal, storage.Create(rls), "StoreRelease")
assertErrNil(t.Fatal, storage.Create(rls2), "StoreRelease")
// delete the release
res, err := storage.Delete(rls.Name, rls.Version)
@ -94,6 +99,20 @@ func TestStorageDelete(t *testing.T) {
if !reflect.DeepEqual(rls, res) {
t.Fatalf("Expected %q, got %q", rls, res)
}
hist, err := storage.History(rls.Name)
if err != nil {
t.Errorf("unexpected error: %s", err)
}
// We have now deleted one of the two records.
if len(hist) != 1 {
t.Errorf("expected 1 record for deleted release version, got %d", len(hist))
}
if hist[0].Version != 2 {
t.Errorf("Expected version to be 2, got %d", hist[0].Version)
}
}
func TestStorageList(t *testing.T) {
@ -169,7 +188,7 @@ func TestStorageDeployed(t *testing.T) {
setup()
rls, err := storage.Deployed(name)
rls, err := storage.Last(name)
if err != nil {
t.Fatalf("Failed to query for deployed release: %s\n", err)
}
@ -217,12 +236,69 @@ func TestStorageHistory(t *testing.T) {
}
}
func TestStorageLast(t *testing.T) {
func TestStorageRemoveLeastRecent(t *testing.T) {
storage := Init(driver.NewMemory())
storage.Log = t.Logf
// Make sure that specifying this at the outset doesn't cause any bugs.
storage.MaxHistory = 10
const name = "angry-bird"
// setup storage with test releases
setup := func() {
// release records
rls0 := ReleaseTestData{Name: name, Version: 1, Status: rspb.Status_SUPERSEDED}.ToRelease()
rls1 := ReleaseTestData{Name: name, Version: 2, Status: rspb.Status_SUPERSEDED}.ToRelease()
rls2 := ReleaseTestData{Name: name, Version: 3, Status: rspb.Status_SUPERSEDED}.ToRelease()
rls3 := ReleaseTestData{Name: name, Version: 4, Status: rspb.Status_DEPLOYED}.ToRelease()
// create the release records in the storage
assertErrNil(t.Fatal, storage.Create(rls0), "Storing release 'angry-bird' (v1)")
assertErrNil(t.Fatal, storage.Create(rls1), "Storing release 'angry-bird' (v2)")
assertErrNil(t.Fatal, storage.Create(rls2), "Storing release 'angry-bird' (v3)")
assertErrNil(t.Fatal, storage.Create(rls3), "Storing release 'angry-bird' (v4)")
}
setup()
// Because we have not set a limit, we expect 4.
expect := 4
if hist, err := storage.History(name); err != nil {
t.Fatal(err)
} else if len(hist) != expect {
t.Fatalf("expected %d items in history, got %d", expect, len(hist))
}
storage.MaxHistory = 3
rls5 := ReleaseTestData{Name: name, Version: 5, Status: rspb.Status_DEPLOYED}.ToRelease()
assertErrNil(t.Fatal, storage.Create(rls5), "Storing release 'angry-bird' (v5)")
// On inserting the 5th record, we expect two records to be pruned from history.
hist, err := storage.History(name)
if err != nil {
t.Fatal(err)
} else if len(hist) != storage.MaxHistory {
for _, item := range hist {
t.Logf("%s %v", item.Name, item.Version)
}
t.Fatalf("expected %d items in history, got %d", storage.MaxHistory, len(hist))
}
// We expect the existing records to be 3, 4, and 5.
for i, item := range hist {
v := int(item.Version)
if expect := i + 3; v != expect {
t.Errorf("Expected release %d, got %d", expect, v)
}
}
}
func TestStorageLast(t *testing.T) {
storage := Init(driver.NewMemory())
const name = "angry-bird"
// Set up storage with test releases.
setup := func() {
// release records
rls0 := ReleaseTestData{Name: name, Version: 1, Status: rspb.Status_SUPERSEDED}.ToRelease()

Loading…
Cancel
Save