From 38cc3d325cd950be8d002fc89eeca1e9f9f5e14b Mon Sep 17 00:00:00 2001 From: Luis Davim Date: Mon, 30 May 2022 10:07:38 +0100 Subject: [PATCH 1/2] feat: allow hooks to be kept and updated Signed-off-by: Luis Davim --- pkg/action/hooks.go | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/pkg/action/hooks.go b/pkg/action/hooks.go index 458a6342c..1c382ce48 100644 --- a/pkg/action/hooks.go +++ b/pkg/action/hooks.go @@ -44,12 +44,13 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, } } - // hooke are pre-ordered by kind, so keep order stable + // hooks are pre-ordered by kind, so keep order stable sort.Stable(hookByWeight(executingHooks)) for i, h := range executingHooks { // Set default delete policy to before-hook-creation cfg.hookSetDeletePolicy(h) + setDefaultDeletePolicy(h) if err := cfg.deleteHookByPolicy(h, release.HookBeforeHookCreation, waitStrategy, timeout); err != nil { return err @@ -60,6 +61,17 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, return fmt.Errorf("unable to build kubernetes object for %s hook %s: %w", hook, h.Path, err) } + // It is safe to use "force" here because these are resources currently rendered by the chart. + err = resources.Visit(setMetadataVisitor(rl.Name, rl.Namespace, true)) + if err != nil { + return err + } + + toBeUpdated, err := existingResourceConflict(resources, rl.Name, rl.Namespace) + if err != nil { + return errors.Wrap(err, "rendered hook manifests contain a resource that already exists. Unable to continue") + } + // Record the time at which the hook was applied to the cluster h.LastRun = release.HookExecution{ StartedAt: helmtime.Now(), @@ -76,6 +88,8 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, if _, err := cfg.KubeClient.Create( resources, kube.ClientCreateOptionServerSideApply(serverSideApply, false)); err != nil { + // Create or update the hook resources + if _, err := cfg.KubeClient.Update(toBeUpdated, resources, false); err != nil { h.LastRun.CompletedAt = helmtime.Now() h.LastRun.Phase = release.HookPhaseFailed return fmt.Errorf("warning: Hook %s %s failed: %w", hook, h.Path, err) @@ -257,3 +271,13 @@ func (cfg *Configuration) deriveNamespace(h *release.Hook, namespace string) (st func hookHasOutputLogPolicy(h *release.Hook, policy release.HookOutputLogPolicy) bool { return slices.Contains(h.OutputLogPolicies, policy) } + +func setDefaultDeletePolicy(h *release.Hook) { + if h.DeletePolicies != nil && len(h.DeletePolicies) > 0 { + return + } + // TODO(luisdavim): We should probably add the group/version to the hook resources to avoid conflicts with CRs + if h.Kind == "Job" || h.Kind == "Pod" { + h.DeletePolicies = []release.HookDeletePolicy{release.HookBeforeHookCreation} + } +} From 740051ec7a8add5ddc2915367eedbe90ae93a6e1 Mon Sep 17 00:00:00 2001 From: Luis Davim Date: Mon, 30 May 2022 13:54:31 +0100 Subject: [PATCH 2/2] test: update the test hook manifest to have a deletion policy Signed-off-by: Luis Davim Signed-off-by: Luis Davim --- pkg/action/action_test.go | 2 ++ pkg/action/hooks.go | 38 ++++++++++++++++---------------------- pkg/action/hooks_test.go | 36 +++++++++++++++++++++++++++++++++--- 3 files changed, 51 insertions(+), 25 deletions(-) diff --git a/pkg/action/action_test.go b/pkg/action/action_test.go index 7a510ace6..e9041486a 100644 --- a/pkg/action/action_test.go +++ b/pkg/action/action_test.go @@ -74,6 +74,7 @@ metadata: name: test-cm annotations: "helm.sh/hook": post-install,pre-delete,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation data: name: value` @@ -810,6 +811,7 @@ metadata: name: test-cm-postrendered annotations: "helm.sh/hook": post-install,pre-delete,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation data: name: value` diff --git a/pkg/action/hooks.go b/pkg/action/hooks.go index 1c382ce48..c782f9d72 100644 --- a/pkg/action/hooks.go +++ b/pkg/action/hooks.go @@ -50,7 +50,6 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, for i, h := range executingHooks { // Set default delete policy to before-hook-creation cfg.hookSetDeletePolicy(h) - setDefaultDeletePolicy(h) if err := cfg.deleteHookByPolicy(h, release.HookBeforeHookCreation, waitStrategy, timeout); err != nil { return err @@ -69,7 +68,7 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, toBeUpdated, err := existingResourceConflict(resources, rl.Name, rl.Namespace) if err != nil { - return errors.Wrap(err, "rendered hook manifests contain a resource that already exists. Unable to continue") + return fmt.Errorf("rendered hook manifests contain a resource that already exists. Unable to continue: %w", err) } // Record the time at which the hook was applied to the cluster @@ -88,11 +87,16 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, if _, err := cfg.KubeClient.Create( resources, kube.ClientCreateOptionServerSideApply(serverSideApply, false)); err != nil { - // Create or update the hook resources - if _, err := cfg.KubeClient.Update(toBeUpdated, resources, false); err != nil { - h.LastRun.CompletedAt = helmtime.Now() - h.LastRun.Phase = release.HookPhaseFailed - return fmt.Errorf("warning: Hook %s %s failed: %w", hook, h.Path, err) + // Create or update the hook resources + if _, err := cfg.KubeClient.Update( + toBeUpdated, + resources, + kube.ClientUpdateOptionServerSideApply(serverSideApply, false), + ); err != nil { + h.LastRun.CompletedAt = helmtime.Now() + h.LastRun.Phase = release.HookPhaseFailed + return fmt.Errorf("warning: Hook %s %s failed: %w", hook, h.Path, err) + } } waiter, err := cfg.KubeClient.GetWaiter(waitStrategy) @@ -209,11 +213,11 @@ func (cfg *Configuration) hookHasDeletePolicy(h *release.Hook, policy release.Ho func (cfg *Configuration) hookSetDeletePolicy(h *release.Hook) { cfg.mutex.Lock() defer cfg.mutex.Unlock() - if len(h.DeletePolicies) == 0 { - // TODO(jlegrone): Only apply before-hook-creation delete policy to run to completion - // resources. For all other resource types update in place if a - // resource with the same name already exists and is owned by the - // current release. + if len(h.DeletePolicies) > 0 { + return + } + // TODO(luisdavim): We should probably add the group/version to the hook resources to avoid conflicts with CRs + if h.Kind == "Job" || h.Kind == "Pod" { h.DeletePolicies = []release.HookDeletePolicy{release.HookBeforeHookCreation} } } @@ -271,13 +275,3 @@ func (cfg *Configuration) deriveNamespace(h *release.Hook, namespace string) (st func hookHasOutputLogPolicy(h *release.Hook, policy release.HookOutputLogPolicy) bool { return slices.Contains(h.OutputLogPolicies, policy) } - -func setDefaultDeletePolicy(h *release.Hook) { - if h.DeletePolicies != nil && len(h.DeletePolicies) > 0 { - return - } - // TODO(luisdavim): We should probably add the group/version to the hook resources to avoid conflicts with CRs - if h.Kind == "Job" || h.Kind == "Pod" { - h.DeletePolicies = []release.HookDeletePolicy{release.HookBeforeHookCreation} - } -} diff --git a/pkg/action/hooks_test.go b/pkg/action/hooks_test.go index e3a2c0808..bd4ebecb7 100644 --- a/pkg/action/hooks_test.go +++ b/pkg/action/hooks_test.go @@ -20,14 +20,20 @@ import ( "bytes" "fmt" "io" + "net/http" "reflect" "testing" "time" "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + kuberuntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest/fake" chart "helm.sh/helm/v4/pkg/chart/v2" chartutil "helm.sh/helm/v4/pkg/chart/v2/util" @@ -236,6 +242,7 @@ type HookFailingKubeWaiter struct { } func (*HookFailingKubeClient) Build(reader io.Reader, _ bool) (kube.ResourceList, error) { + var rList kube.ResourceList configMap := &v1.ConfigMap{} err := yaml.NewYAMLOrJSONDecoder(reader, 1000).Decode(configMap) @@ -244,10 +251,33 @@ func (*HookFailingKubeClient) Build(reader io.Reader, _ bool) (kube.ResourceList return kube.ResourceList{}, err } - return kube.ResourceList{{ + coreV1GV := schema.GroupVersion{Version: "v1"} + body := io.NopCloser(bytes.NewReader([]byte(kuberuntime.EncodeOrDie(scheme.Codecs.CodecForVersions(scheme.Codecs.LegacyCodec(coreV1GV), scheme.Codecs.UniversalDecoder(coreV1GV), coreV1GV, coreV1GV), configMap)))) + rList = append(rList, &resource.Info{ Name: configMap.Name, Namespace: configMap.Namespace, - }}, nil + Mapping: &meta.RESTMapping{ + Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmap"}, + GroupVersionKind: schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"}, + Scope: meta.RESTScopeNamespace, + }, + Client: &fake.RESTClient{ + GroupVersion: schema.GroupVersion{Group: "apps", Version: "v1"}, + NegotiatedSerializer: scheme.Codecs.WithoutConversion(), + Client: fake.CreateHTTPClient(func(_ *http.Request) (*http.Response, error) { + header := http.Header{} + header.Set("Content-Type", kuberuntime.ContentTypeJSON) + return &http.Response{ + StatusCode: http.StatusOK, + Header: header, + Body: body, + }, nil + }), + }, + Object: configMap, + }) + + return rList, nil } func (h *HookFailingKubeWaiter) WatchUntilReady(resources kube.ResourceList, _ time.Duration) error { @@ -389,7 +419,7 @@ data: err := configuration.execHook(&tc.inputRelease, hookEvent, kube.StatusWatcherStrategy, 600, serverSideApply) if !reflect.DeepEqual(kubeClient.deleteRecord, tc.expectedDeleteRecord) { - t.Fatalf("Got unexpected delete record, expected: %#v, but got: %#v", kubeClient.deleteRecord, tc.expectedDeleteRecord) + t.Fatalf("Got unexpected delete record, expected: %#v, but got: %#v", tc.expectedDeleteRecord, kubeClient.deleteRecord) } if err != nil && !tc.expectError {