returntargetRelease,errors.Wrapf(fmt.Errorf("unable to cleanup resources: %s",strings.Join(errorList,", ")),"an error occurred while cleaning up resources. original rollback error: %s",err)
u.cfg.Log("Cleanup on fail set, cleaning up %d resources",len(created))
slog.Debug("cleanup on fail set","cleaning_resources",len(created))
_,errs:=u.cfg.KubeClient.Delete(created)
iferrs!=nil{
varerrorList[]string
@ -499,10 +502,10 @@ func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, e
}
returnrel,errors.Wrapf(fmt.Errorf("unable to cleanup resources: %s",strings.Join(errorList,", ")),"an error occurred while cleaning up resources. original upgrade error: %s",err)
}
u.cfg.Log("Resource cleanup complete")
slog.Debug("resource cleanup complete")
}
ifu.Atomic{
u.cfg.Log("Upgrade failed and atomic is set, rolling back to last successful release")
slog.Debug("upgrade failed and atomic is set, rolling back to last successful release")
// As a protection, get the last successful release before rollback.
// If there are no successful releases, bail out
@ -526,7 +529,9 @@ func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, e
rollin:=NewRollback(u.cfg)
rollin.Version=filteredHistory[0].Version
rollin.Wait=true
ifu.WaitStrategy==kube.HookOnlyStrategy{
rollin.WaitStrategy=kube.StatusWatcherStrategy
}
rollin.WaitForJobs=u.WaitForJobs
rollin.DisableHooks=u.DisableHooks
rollin.Recreate=u.Recreate
@ -552,13 +557,13 @@ func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, e
"if specified, will wait until all resources are in the expected state before marking the operation as successful. It will wait for as long as --timeout. Valid inputs are 'watcher' and 'legacy'",
)
// Sets the strategy to use the watcher strategy if `--wait` is used without an argument
f.StringVar(&c.Version,"version","","specify a version constraint for the chart version to use. This constraint can be a specific tag (e.g. 1.1.1) or it may reference a valid range (e.g. ^2.0.0). If this is not specified, the latest version is used")
f.BoolVar(&c.Verify,"verify",false,"verify the package before using it")
f.BoolVar(&client.DisableHooks,"no-hooks",false,"prevent hooks from running during install")
f.BoolVar(&client.Replace,"replace",false,"reuse the given name, only if that name is a deleted release which remains in the history. This is unsafe in production")
f.DurationVar(&client.Timeout,"timeout",300*time.Second,"time to wait for any individual Kubernetes operation (like Jobs for hooks)")
f.BoolVar(&client.Wait,"wait",false,"if set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment, StatefulSet, or ReplicaSet are in a ready state before marking the release as successful. It will wait for as long as --timeout")
f.BoolVar(&client.WaitForJobs,"wait-for-jobs",false,"if set and --wait enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as --timeout")
f.BoolVarP(&client.GenerateName,"generate-name","g",false,"generate the name (and omit the NAME parameter)")
f.StringVar(&client.NameTemplate,"name-template","","specify template used to name the release")
f.BoolVar(&client.Devel,"devel",false,"use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored")
f.BoolVar(&client.DependencyUpdate,"dependency-update",false,"update dependencies if they are missing before installing the chart")
f.BoolVar(&client.DisableOpenAPIValidation,"disable-openapi-validation",false,"if set, the installation process will not validate rendered templates against the Kubernetes OpenAPI Schema")
f.BoolVar(&client.Atomic,"atomic",false,"if set, the installation process deletes the installation on failure. The --wait flag will be set automatically if --atomic is used")
f.BoolVar(&client.Atomic,"atomic",false,"if set, the installation process deletes the installation on failure. The --wait flag will be set automatically to \"watcher\" if --atomic is used")
f.BoolVar(&client.SkipCRDs,"skip-crds",false,"if set, no CRDs will be installed. By default, CRDs are installed if not already present")
f.BoolVar(&client.SubNotes,"render-subchart-notes",false,"if set, render subchart notes along with the parent")
f.BoolVar(&client.TakeOwnership,"take-ownership",false,"if set, install will ignore the check for helm annotations and take ownership of the existing resources")
f.BoolVar(&client.Force,"force",false,"force resource update through delete/recreate if needed")
f.BoolVar(&client.DisableHooks,"no-hooks",false,"prevent hooks from running during rollback")
f.DurationVar(&client.Timeout,"timeout",300*time.Second,"time to wait for any individual Kubernetes operation (like Jobs for hooks)")
f.BoolVar(&client.Wait,"wait",false,"if set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment, StatefulSet, or ReplicaSet are in a ready state before marking the release as successful. It will wait for as long as --timeout")
f.BoolVar(&client.WaitForJobs,"wait-for-jobs",false,"if set and --wait enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as --timeout")
f.BoolVar(&client.CleanupOnFail,"cleanup-on-fail",false,"allow deletion of new resources created in this rollback when rollback fails")
f.IntVar(&client.MaxHistory,"history-max",settings.MaxHistory,"limit the maximum number of revisions saved per release. Use 0 for no limit")
f.BoolVar(&client.DisableHooks,"no-hooks",false,"prevent hooks from running during uninstallation")
f.BoolVar(&client.IgnoreNotFound,"ignore-not-found",false,`Treat "release not found" as a successful uninstall`)
f.BoolVar(&client.KeepHistory,"keep-history",false,"remove all associated resources and mark the release as deleted, but retain the release history")
f.BoolVar(&client.Wait,"wait",false,"if set, will wait until all the resources are deleted before returning. It will wait for as long as --timeout")
f.StringVar(&client.DeletionPropagation,"cascade","background","Must be \"background\", \"orphan\", or \"foreground\". Selects the deletion cascading strategy for the dependents. Defaults to background.")
f.DurationVar(&client.Timeout,"timeout",300*time.Second,"time to wait for any individual Kubernetes operation (like Jobs for hooks)")
f.StringVar(&client.Description,"description","","add a custom description")
f.BoolVar(&client.ResetValues,"reset-values",false,"when upgrading, reset the values to the ones built into the chart")
f.BoolVar(&client.ReuseValues,"reuse-values",false,"when upgrading, reuse the last release's values and merge in any overrides from the command line via --set and -f. If '--reset-values' is specified, this is ignored")
f.BoolVar(&client.ResetThenReuseValues,"reset-then-reuse-values",false,"when upgrading, reset the values to the ones built into the chart, apply the last release's values and merge in any overrides from the command line via --set and -f. If '--reset-values' or '--reuse-values' is specified, this is ignored")
f.BoolVar(&client.Wait,"wait",false,"if set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment, StatefulSet, or ReplicaSet are in a ready state before marking the release as successful. It will wait for as long as --timeout")
f.BoolVar(&client.WaitForJobs,"wait-for-jobs",false,"if set and --wait enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as --timeout")
f.BoolVar(&client.Atomic,"atomic",false,"if set, upgrade process rolls back changes made in case of failed upgrade. The --wait flag will be set automatically if --atomic is used")
f.BoolVar(&client.Atomic,"atomic",false,"if set, upgrade process rolls back changes made in case of failed upgrade. The --wait flag will be set automatically to \"watcher\" if --atomic is used")
f.IntVar(&client.MaxHistory,"history-max",settings.MaxHistory,"limit the maximum number of revisions saved per release. Use 0 for no limit")
f.BoolVar(&client.CleanupOnFail,"cleanup-on-fail",false,"allow deletion of new resources created in this upgrade when upgrade fails")
f.BoolVar(&client.SubNotes,"render-subchart-notes",false,"if set, render subchart notes along with the parent")
c.Log("Skipping delete of %q due to annotation [%s=%s]",info.Name,ResourcePolicyAnno,KeepPolicy)
slog.Debug("skipping delete due to annotation","namespace",info.Namespace,"name",info.Name,"kind",info.Mapping.GroupVersionKind.Kind,"annotation",ResourcePolicyAnno,"value",KeepPolicy)
c.log("Deployment is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).",dep.Namespace,dep.Name,dep.Status.ObservedGeneration,dep.ObjectMeta.Generation)
ifdep.Status.ObservedGeneration!=dep.Generation {
slog.Debug("Deployment is not ready, observedGeneration does not match spec generation","namespace",dep.GetNamespace(),"name",dep.GetName(),"actualGeneration",dep.Status.ObservedGeneration,"expectedGeneration",dep.Generation)
c.log("Deployment is not ready: %s/%s. %d out of %d expected pods are ready",dep.Namespace,dep.Name,rs.Status.ReadyReplicas,expectedReady)
ifrs.Status.ReadyReplicas<expectedReady{
slog.Debug("Deployment does not have enough pods ready","namespace",dep.GetNamespace(),"name",dep.GetName(),"readyPods",rs.Status.ReadyReplicas,"totalPods",expectedReady)
returnfalse
}
slog.Debug("Deployment is ready","namespace",dep.GetNamespace(),"name",dep.GetName(),"readyPods",rs.Status.ReadyReplicas,"totalPods",expectedReady)
c.log("DaemonSet is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).",ds.Namespace,ds.Name,ds.Status.ObservedGeneration,ds.ObjectMeta.Generation)
ifds.Status.ObservedGeneration!=ds.Generation {
slog.Debug("DaemonSet is not ready, observedGeneration does not match spec generation","namespace",ds.GetNamespace(),"name",ds.GetName(),"observedGeneration",ds.Status.ObservedGeneration,"expectedGeneration",ds.Generation)
c.log("DaemonSet is not ready: %s/%s. %d out of %d expected pods have been scheduled",ds.Namespace,ds.Name,ds.Status.UpdatedNumberScheduled,ds.Status.DesiredNumberScheduled)
slog.Debug("DaemonSet does not have enough Pods scheduled","namespace",ds.GetNamespace(),"name",ds.GetName(),"scheduledPods",ds.Status.UpdatedNumberScheduled,"totalPods",ds.Status.DesiredNumberScheduled)
c.log("DaemonSet is not ready: %s/%s. %d out of %d expected pods are ready",ds.Namespace,ds.Name,ds.Status.NumberReady,expectedReady)
ifint(ds.Status.NumberReady)<expectedReady{
slog.Debug("DaemonSet does not have enough Pods ready","namespace",ds.GetNamespace(),"name",ds.GetName(),"readyPods",ds.Status.NumberReady,"totalPods",expectedReady)
returnfalse
}
slog.Debug("DaemonSet is ready","namespace",ds.GetNamespace(),"name",ds.GetName(),"readyPods",ds.Status.NumberReady,"totalPods",expectedReady)
c.log("StatefulSet is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).",sts.Namespace,sts.Name,sts.Status.ObservedGeneration,sts.ObjectMeta.Generation)
ifsts.Status.ObservedGeneration!=sts.Generation {
slog.Debug("StatefulSet is not ready, observedGeneration doest not match spec generation","namespace",sts.GetNamespace(),"name",sts.GetName(),"actualGeneration",sts.Status.ObservedGeneration,"expectedGeneration",sts.Generation)
returnfalse
}
// If the update strategy is not a rolling update, there will be nothing to wait for
c.log("StatefulSet is not ready: %s/%s. %d out of %d expected pods have been scheduled",sts.Namespace,sts.Name,sts.Status.UpdatedReplicas,expectedReplicas)
slog.Debug("StatefulSet does not have enough Pods scheduled","namespace",sts.GetNamespace(),"name",sts.GetName(),"readyPods",sts.Status.UpdatedReplicas,"totalPods",expectedReplicas)
returnfalse
}
ifint(sts.Status.ReadyReplicas)!=replicas{
c.log("StatefulSet is not ready: %s/%s. %d out of %d expected pods are ready",sts.Namespace,sts.Name,sts.Status.ReadyReplicas,replicas)
slog.Debug("StatefulSet does not have enough Pods ready","namespace",sts.GetNamespace(),"name",sts.GetName(),"readyPods",sts.Status.ReadyReplicas,"totalPods",replicas)
returnfalse
}
// This check only makes sense when all partitions are being upgraded otherwise during a
// partitioned rolling upgrade, this condition will never evaluate to true, leading to
c.log("StatefulSet is not ready: %s/%s. currentRevision %s does not yet match updateRevision %s",sts.Namespace,sts.Name,sts.Status.CurrentRevision,sts.Status.UpdateRevision)
slog.Debug("StatefulSet is not ready, currentRevision does not match updateRevision","namespace",sts.GetNamespace(),"name",sts.GetName(),"currentRevision",sts.Status.CurrentRevision,"updateRevision",sts.Status.UpdateRevision)
returnfalse
}
c.log("StatefulSet is ready: %s/%s. %d out of %d expected pods are ready",sts.Namespace,sts.Name,sts.Status.ReadyReplicas,replicas)
slog.Debug("StatefulSet is ready","namespace",sts.GetNamespace(),"name",sts.GetName(),"readyPods",sts.Status.ReadyReplicas,"totalPods",replicas)
c.log("ReplicationController is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).",rc.Namespace,rc.Name,rc.Status.ObservedGeneration,rc.ObjectMeta.Generation)
ifrc.Status.ObservedGeneration!=rc.Generation {
slog.Debug("ReplicationController is not ready, observedGeneration doest not match spec generation","namespace",rc.GetNamespace(),"name",rc.GetName(),"actualGeneration",rc.Status.ObservedGeneration,"expectedGeneration",rc.Generation)
c.log("ReplicaSet is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).",rs.Namespace,rs.Name,rs.Status.ObservedGeneration,rs.ObjectMeta.Generation)
ifrs.Status.ObservedGeneration!=rs.Generation {
slog.Debug("ReplicaSet is not ready, observedGeneration doest not match spec generation","namespace",rs.GetNamespace(),"name",rs.GetName(),"actualGeneration",rs.Status.ObservedGeneration,"expectedGeneration",rs.Generation)
slog.Debug("waiting for resource","name",first.Identifier.Name,"kind",first.Identifier.GroupKind.Kind,"expectedStatus",desired,"actualStatus",first.Status)
returnerrors.New("manifest is a crd-install hook. This hook is no longer supported in v3 and all CRDs should also exist the crds/ directory at the top level of the chart")
}
returnnil
}
funcvalidateNoReleaseTime(manifest[]byte)error{
ifreleaseTimeSearch.Match(manifest){
returnerrors.New(".Release.Time has been removed in v3, please replace with the `now` function in your templates")
}
returnnil
}
// validateMatchSelector ensures that template specs have a selector declared.