Merge remote-tracking branch 'upstream/master'

pull/6231/head
Eduard Laur 5 years ago
commit 7098f06783

@ -41,7 +41,7 @@ message Status {
PENDING_INSTALL = 6;
// Status_PENDING_UPGRADE indicates that an upgrade operation is underway.
PENDING_UPGRADE = 7;
// Status_PENDING_ROLLBACK indicates that an rollback operation is underway.
// Status_PENDING_ROLLBACK indicates that a rollback operation is underway.
PENDING_ROLLBACK = 8;
}

@ -479,7 +479,7 @@ Furthermore, A is dependent on chart B that creates objects
- replicaset "B-ReplicaSet"
- service "B-Service"
After installation/upgrade of chart A a single Helm release is created/modified. The release will
After installation/upgrade of chart A, a single Helm release is created/modified. The release will
create/update all of the above Kubernetes objects in the following order:
- A-Namespace

@ -103,7 +103,7 @@ helm init --canary-image
For developing on Tiller, it is sometimes more expedient to run Tiller locally
instead of packaging it into an image and running it in-cluster. You can do
this by telling the Helm client to us a local instance.
this by telling the Helm client to use a local instance.
```console
$ make build

@ -643,3 +643,35 @@ func TestOverriteTableItemWithNonTableValue(t *testing.T) {
t.Errorf("Expected %v, but got %v", expected, result)
}
}
func TestSubchartCoaleseWithNullValue(t *testing.T) {
v, err := CoalesceValues(&chart.Chart{
Metadata: &chart.Metadata{Name: "demo"},
Dependencies: []*chart.Chart{
{
Metadata: &chart.Metadata{Name: "logstash"},
Values: &chart.Config{
Raw: `livenessProbe: {httpGet: {path: "/", port: monitor}}`,
},
},
},
Values: &chart.Config{
Raw: `logstash: {livenessProbe: {httpGet: null, exec: "/bin/true"}}`,
},
}, &chart.Config{})
if err != nil {
t.Errorf("Failed with %s", err)
}
result := v.AsMap()
expected := map[string]interface{}{
"logstash": map[string]interface{}{
"global": map[string]interface{}{},
"livenessProbe": map[string]interface{}{
"exec": "/bin/true",
},
},
}
if !reflect.DeepEqual(result, expected) {
t.Errorf("got %+v, expected %+v", result, expected)
}
}

@ -41,6 +41,7 @@ import (
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
@ -51,6 +52,7 @@ import (
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/cli-runtime/pkg/resource"
"k8s.io/client-go/kubernetes/scheme"
cachetools "k8s.io/client-go/tools/cache"
watchtools "k8s.io/client-go/tools/watch"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/core"
@ -658,7 +660,7 @@ func createPatch(target *resource.Info, current runtime.Object) ([]byte, types.P
// Get a versioned object
versionedObject, err := asVersioned(target)
// Unstructured objects, such as CRDs, may not have an not registered error
// Unstructured objects, such as CRDs, may not have a not registered error
// returned from ConvertToVersion. Anything that's unstructured should
// use the jsonpatch.CreateMergePatch. Strategic Merge Patch is not supported
// on objects like CRDs.
@ -810,10 +812,7 @@ func getSelectorFromObject(obj runtime.Object) (map[string]string, bool) {
}
func (c *Client) watchUntilReady(timeout time.Duration, info *resource.Info) error {
w, err := resource.NewHelper(info.Client, info.Mapping).WatchSingle(info.Namespace, info.Name, info.ResourceVersion)
if err != nil {
return err
}
lw := cachetools.NewListWatchFromClient(info.Client, info.Mapping.Resource.Resource, info.Namespace, fields.Everything())
kind := info.Mapping.GroupVersionKind.Kind
c.Log("Watching for changes to %s %s with timeout of %v", kind, info.Name, timeout)
@ -826,7 +825,7 @@ func (c *Client) watchUntilReady(timeout time.Duration, info *resource.Info) err
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout)
defer cancel()
_, err = watchtools.UntilWithoutRetry(ctx, w, func(e watch.Event) (bool, error) {
_, err := watchtools.ListWatchUntil(ctx, lw, func(e watch.Event) (bool, error) {
switch e.Type {
case watch.Added, watch.Modified:
// For things like a secret or a config map, this is the best indicator
@ -914,15 +913,12 @@ func (c *Client) WaitAndGetCompletedPodPhase(namespace string, reader io.Reader,
}
func (c *Client) watchPodUntilComplete(timeout time.Duration, info *resource.Info) error {
w, err := resource.NewHelper(info.Client, info.Mapping).WatchSingle(info.Namespace, info.Name, info.ResourceVersion)
if err != nil {
return err
}
lw := cachetools.NewListWatchFromClient(info.Client, info.Mapping.Resource.Resource, info.Namespace, fields.Everything())
c.Log("Watching pod %s for completion with timeout of %v", info.Name, timeout)
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout)
defer cancel()
_, err = watchtools.UntilWithoutRetry(ctx, w, func(e watch.Event) (bool, error) {
_, err := watchtools.ListWatchUntil(ctx, lw, func(e watch.Event) (bool, error) {
return isPodComplete(e)
})

@ -38,7 +38,7 @@ const (
Status_PENDING_INSTALL Status_Code = 6
// Status_PENDING_UPGRADE indicates that an upgrade operation is underway.
Status_PENDING_UPGRADE Status_Code = 7
// Status_PENDING_ROLLBACK indicates that an rollback operation is underway.
// Status_PENDING_ROLLBACK indicates that a rollback operation is underway.
Status_PENDING_ROLLBACK Status_Code = 8
)

@ -50,7 +50,7 @@ func (s *Storage) Get(name string, version int32) (*rspb.Release, error) {
// Create creates a new storage entry holding the release. An
// error is returned if the storage driver failed to store the
// release, or a release with identical an key already exists.
// release, or a release with identical key already exists.
func (s *Storage) Create(rls *rspb.Release) error {
s.Log("creating release %q", makeKey(rls.Name, rls.Version))
if s.MaxHistory > 0 {

@ -28,6 +28,7 @@ type SortOrder []string
// Those occurring earlier in the list get installed before those occurring later in the list.
var InstallOrder SortOrder = []string{
"Namespace",
"NetworkPolicy",
"ResourceQuota",
"LimitRange",
"PodSecurityPolicy",
@ -96,6 +97,7 @@ var UninstallOrder SortOrder = []string{
"PodSecurityPolicy",
"LimitRange",
"ResourceQuota",
"NetworkPolicy",
"Namespace",
}

@ -157,6 +157,10 @@ func TestKindSorter(t *testing.T) {
Name: "x",
Head: &util.SimpleHead{Kind: "HorizontalPodAutoscaler"},
},
{
Name: "B",
Head: &util.SimpleHead{Kind: "NetworkPolicy"},
},
}
for _, test := range []struct {
@ -164,8 +168,8 @@ func TestKindSorter(t *testing.T) {
order SortOrder
expected string
}{
{"install", InstallOrder, "abc3zde1fgh2iIjJkKlLmnopqrxstuvw!"},
{"uninstall", UninstallOrder, "wvmutsxrqponLlKkJjIi2hgf1edz3cba!"},
{"install", InstallOrder, "aBbc3zde1fgh2iIjJkKlLmnopqrxstuvw!"},
{"uninstall", UninstallOrder, "wvmutsxrqponLlKkJjIi2hgf1edz3cbBa!"},
} {
var buf bytes.Buffer
t.Run(test.description, func(t *testing.T) {

Loading…
Cancel
Save