pull/11003/merge
Luis Davim 2 weeks ago committed by GitHub
commit ab2ab0b57b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -74,6 +74,7 @@ metadata:
name: test-cm
annotations:
"helm.sh/hook": post-install,pre-delete,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation
data:
name: value`
@ -810,6 +811,7 @@ metadata:
name: test-cm-postrendered
annotations:
"helm.sh/hook": post-install,pre-delete,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation
data:
name: value`

@ -44,7 +44,7 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent,
}
}
// hooke are pre-ordered by kind, so keep order stable
// hooks are pre-ordered by kind, so keep order stable
sort.Stable(hookByWeight(executingHooks))
for i, h := range executingHooks {
@ -60,6 +60,17 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent,
return fmt.Errorf("unable to build kubernetes object for %s hook %s: %w", hook, h.Path, err)
}
// It is safe to use "force" here because these are resources currently rendered by the chart.
err = resources.Visit(setMetadataVisitor(rl.Name, rl.Namespace, true))
if err != nil {
return err
}
toBeUpdated, err := existingResourceConflict(resources, rl.Name, rl.Namespace)
if err != nil {
return fmt.Errorf("rendered hook manifests contain a resource that already exists. Unable to continue: %w", err)
}
// Record the time at which the hook was applied to the cluster
h.LastRun = release.HookExecution{
StartedAt: helmtime.Now(),
@ -76,9 +87,16 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent,
if _, err := cfg.KubeClient.Create(
resources,
kube.ClientCreateOptionServerSideApply(serverSideApply, false)); err != nil {
h.LastRun.CompletedAt = helmtime.Now()
h.LastRun.Phase = release.HookPhaseFailed
return fmt.Errorf("warning: Hook %s %s failed: %w", hook, h.Path, err)
// Create or update the hook resources
if _, err := cfg.KubeClient.Update(
toBeUpdated,
resources,
kube.ClientUpdateOptionServerSideApply(serverSideApply, false),
); err != nil {
h.LastRun.CompletedAt = helmtime.Now()
h.LastRun.Phase = release.HookPhaseFailed
return fmt.Errorf("warning: Hook %s %s failed: %w", hook, h.Path, err)
}
}
waiter, err := cfg.KubeClient.GetWaiter(waitStrategy)
@ -195,11 +213,11 @@ func (cfg *Configuration) hookHasDeletePolicy(h *release.Hook, policy release.Ho
func (cfg *Configuration) hookSetDeletePolicy(h *release.Hook) {
cfg.mutex.Lock()
defer cfg.mutex.Unlock()
if len(h.DeletePolicies) == 0 {
// TODO(jlegrone): Only apply before-hook-creation delete policy to run to completion
// resources. For all other resource types update in place if a
// resource with the same name already exists and is owned by the
// current release.
if len(h.DeletePolicies) > 0 {
return
}
// TODO(luisdavim): We should probably add the group/version to the hook resources to avoid conflicts with CRs
if h.Kind == "Job" || h.Kind == "Pod" {
h.DeletePolicies = []release.HookDeletePolicy{release.HookBeforeHookCreation}
}
}

@ -20,14 +20,20 @@ import (
"bytes"
"fmt"
"io"
"net/http"
"reflect"
"testing"
"time"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
kuberuntime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/cli-runtime/pkg/resource"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest/fake"
"helm.sh/helm/v4/pkg/chart/common"
"helm.sh/helm/v4/pkg/kube"
@ -235,6 +241,7 @@ type HookFailingKubeWaiter struct {
}
func (*HookFailingKubeClient) Build(reader io.Reader, _ bool) (kube.ResourceList, error) {
var rList kube.ResourceList
configMap := &v1.ConfigMap{}
err := yaml.NewYAMLOrJSONDecoder(reader, 1000).Decode(configMap)
@ -243,10 +250,33 @@ func (*HookFailingKubeClient) Build(reader io.Reader, _ bool) (kube.ResourceList
return kube.ResourceList{}, err
}
return kube.ResourceList{{
coreV1GV := schema.GroupVersion{Version: "v1"}
body := io.NopCloser(bytes.NewReader([]byte(kuberuntime.EncodeOrDie(scheme.Codecs.CodecForVersions(scheme.Codecs.LegacyCodec(coreV1GV), scheme.Codecs.UniversalDecoder(coreV1GV), coreV1GV, coreV1GV), configMap))))
rList = append(rList, &resource.Info{
Name: configMap.Name,
Namespace: configMap.Namespace,
}}, nil
Mapping: &meta.RESTMapping{
Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmap"},
GroupVersionKind: schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"},
Scope: meta.RESTScopeNamespace,
},
Client: &fake.RESTClient{
GroupVersion: schema.GroupVersion{Group: "apps", Version: "v1"},
NegotiatedSerializer: scheme.Codecs.WithoutConversion(),
Client: fake.CreateHTTPClient(func(_ *http.Request) (*http.Response, error) {
header := http.Header{}
header.Set("Content-Type", kuberuntime.ContentTypeJSON)
return &http.Response{
StatusCode: http.StatusOK,
Header: header,
Body: body,
}, nil
}),
},
Object: configMap,
})
return rList, nil
}
func (h *HookFailingKubeWaiter) WatchUntilReady(resources kube.ResourceList, _ time.Duration) error {
@ -388,7 +418,7 @@ data:
err := configuration.execHook(&tc.inputRelease, hookEvent, kube.StatusWatcherStrategy, 600, serverSideApply)
if !reflect.DeepEqual(kubeClient.deleteRecord, tc.expectedDeleteRecord) {
t.Fatalf("Got unexpected delete record, expected: %#v, but got: %#v", kubeClient.deleteRecord, tc.expectedDeleteRecord)
t.Fatalf("Got unexpected delete record, expected: %#v, but got: %#v", tc.expectedDeleteRecord, kubeClient.deleteRecord)
}
if err != nil && !tc.expectError {

Loading…
Cancel
Save