ref(helm): unify log with first letter lowercase

Signed-off-by: willise <sunshuai@cmss.chinamobile.com>
pull/5973/head
willise 6 years ago
parent 62448a93ba
commit 369725d030

@ -132,7 +132,7 @@ func ensureTestHome(home helmpath.Home, t *testing.T) error {
return fmt.Errorf("%s must be a file, not a directory", repoFile)
}
if r, err := repo.LoadRepositoriesFile(repoFile); err == repo.ErrRepoOutOfDate {
t.Log("Updating repository file format...")
t.Log("updating repository file format...")
if err := r.WriteFile(repoFile, 0644); err != nil {
return err
}

@ -187,7 +187,7 @@ func resourceInfoToObject(info *resource.Info, c *Client) runtime.Object {
// If the problem is just that the resource is not registered, don't print any
// error. This is normal for custom resources.
if !runtime.IsNotRegisteredError(err) {
c.Log("Warning: conversion to internal type failed: %v", err)
c.Log("warning: conversion to internal type failed: %v", err)
}
// Add the unstructured object in this situation. It will still get listed, just
// with less information.
@ -224,9 +224,9 @@ func (c *Client) Get(namespace string, reader io.Reader) (string, error) {
missing := []string{}
err = perform(infos, func(info *resource.Info) error {
c.Log("Doing get for %s: %q", info.Mapping.GroupVersionKind.Kind, info.Name)
c.Log("doing get for %s: %q", info.Mapping.GroupVersionKind.Kind, info.Name)
if err := info.Get(); err != nil {
c.Log("WARNING: Failed Get for resource %q: %s", info.Name, err)
c.Log("warning: failed get for resource %q: %s", info.Name, err)
missing = append(missing, fmt.Sprintf("%v\t\t%s", info.Mapping.Resource, info.Name))
return nil
}
@ -248,7 +248,7 @@ func (c *Client) Get(namespace string, reader io.Reader) (string, error) {
//Get the relation pods
objPods, err = c.getSelectRelationPod(info, objPods)
if err != nil {
c.Log("Warning: get the relation pod is failed, err:%s", err.Error())
c.Log("warning: get the relation pod is failed, err:%s", err.Error())
}
return nil
@ -374,7 +374,7 @@ func (c *Client) UpdateWithOptions(namespace string, originalReader, targetReade
newlyCreatedResources = append(newlyCreatedResources, info)
kind := info.Mapping.GroupVersionKind.Kind
c.Log("Created a new %s called %q\n", kind, info.Name)
c.Log("created a new %s called %q\n", kind, info.Name)
return nil
}
@ -404,7 +404,7 @@ func (c *Client) UpdateWithOptions(namespace string, originalReader, targetReade
cleanupErrors := []string{}
if opts.CleanupOnFail && (err != nil || len(updateErrors) != 0) {
c.Log("Cleanup on fail enabled: cleaning up newly created resources due to update manifests failures")
c.Log("cleanup on fail enabled: cleaning up newly created resources due to update manifests failures")
cleanupErrors = c.cleanup(newlyCreatedResources)
}
@ -416,30 +416,30 @@ func (c *Client) UpdateWithOptions(namespace string, originalReader, targetReade
}
for _, info := range original.Difference(target) {
c.Log("Deleting %q in %s...", info.Name, info.Namespace)
c.Log("deleting %q in %s...", info.Name, info.Namespace)
if err := info.Get(); err != nil {
c.Log("Unable to get obj %q, err: %s", info.Name, err)
c.Log("unable to get obj %q, err: %s", info.Name, err)
}
annotations, err := metadataAccessor.Annotations(info.Object)
if err != nil {
c.Log("Unable to get annotations on %q, err: %s", info.Name, err)
c.Log("unable to get annotations on %q, err: %s", info.Name, err)
}
if ResourcePolicyIsKeep(annotations) {
policy := annotations[ResourcePolicyAnno]
c.Log("Skipping delete of %q due to annotation [%s=%s]", info.Name, ResourcePolicyAnno, policy)
c.Log("skipping delete of %q due to annotation [%s=%s]", info.Name, ResourcePolicyAnno, policy)
continue
}
if err := deleteResource(info); err != nil {
c.Log("Failed to delete %q, err: %s", info.Name, err)
c.Log("failed to delete %q, err: %s", info.Name, err)
}
}
if opts.ShouldWait {
err := c.waitForResources(time.Duration(opts.Timeout)*time.Second, target)
if opts.CleanupOnFail && err != nil {
c.Log("Cleanup on fail enabled: cleaning up newly created resources due to wait failure during update")
c.Log("cleanup on fail enabled: cleaning up newly created resources due to wait failure during update")
cleanupErrors = c.cleanup(newlyCreatedResources)
return fmt.Errorf(strings.Join(append([]string{err.Error()}, cleanupErrors...), " && "))
}
@ -452,9 +452,9 @@ func (c *Client) UpdateWithOptions(namespace string, originalReader, targetReade
func (c *Client) cleanup(newlyCreatedResources []*resource.Info) (cleanupErrors []string) {
for _, info := range newlyCreatedResources {
kind := info.Mapping.GroupVersionKind.Kind
c.Log("Deleting newly created %s with the name %q in %s...", kind, info.Name, info.Namespace)
c.Log("deleting newly created %s with the name %q in %s...", kind, info.Name, info.Namespace)
if err := deleteResource(info); err != nil {
c.Log("Error deleting newly created %s with the name %q in %s: %s", kind, info.Name, info.Namespace, err)
c.Log("error deleting newly created %s with the name %q in %s: %s", kind, info.Name, info.Namespace, err)
cleanupErrors = append(cleanupErrors, err.Error())
}
}
@ -479,7 +479,7 @@ func (c *Client) DeleteWithTimeout(namespace string, reader io.Reader, timeout i
return err
}
err = perform(infos, func(info *resource.Info) error {
c.Log("Starting delete for %q %s", info.Name, info.Mapping.GroupVersionKind.Kind)
c.Log("starting delete for %q %s", info.Name, info.Mapping.GroupVersionKind.Kind)
err := deleteResource(info)
return c.skipIfNotFound(err)
})
@ -488,7 +488,7 @@ func (c *Client) DeleteWithTimeout(namespace string, reader io.Reader, timeout i
}
if shouldWait {
c.Log("Waiting for %d seconds for delete to be completed", timeout)
c.Log("waiting for %d seconds for delete to be completed", timeout)
return waitUntilAllResourceDeleted(infos, time.Duration(timeout)*time.Second)
}
@ -675,7 +675,7 @@ func updateResource(c *Client, target *resource.Info, currentObj runtime.Object,
return fmt.Errorf("failed to create patch: %s", err)
}
if patch == nil {
c.Log("Looks like there are no changes for %s %q", target.Mapping.GroupVersionKind.Kind, target.Name)
c.Log("looks like there are no changes for %s %q", target.Mapping.GroupVersionKind.Kind, target.Name)
// This needs to happen to make sure that tiller has the latest info from the API
// Otherwise there will be no labels and other functions that use labels will panic
if err := target.Get(); err != nil {
@ -740,7 +740,7 @@ func updateResource(c *Client, target *resource.Info, currentObj runtime.Object,
// Restart pods
for _, pod := range pods.Items {
c.Log("Restarting pod: %v/%v", pod.Namespace, pod.Name)
c.Log("restarting pod: %v/%v", pod.Namespace, pod.Name)
// Delete each pod for get them restarted with changed spec.
if err := client.CoreV1().Pods(pod.Namespace).Delete(pod.Name, metav1.NewPreconditionDeleteOptions(string(pod.UID))); err != nil {
@ -799,7 +799,7 @@ func (c *Client) watchUntilReady(timeout time.Duration, info *resource.Info) err
}
kind := info.Mapping.GroupVersionKind.Kind
c.Log("Watching for changes to %s %s with timeout of %v", kind, info.Name, timeout)
c.Log("watching for changes to %s %s with timeout of %v", kind, info.Name, timeout)
// What we watch for depends on the Kind.
// - For a Job, we watch for completion.
@ -816,17 +816,17 @@ func (c *Client) watchUntilReady(timeout time.Duration, info *resource.Info) err
// we get. We care mostly about jobs, where what we want to see is
// the status go into a good state. For other types, like ReplicaSet
// we don't really do anything to support these as hooks.
c.Log("Add/Modify event for %s: %v", info.Name, e.Type)
c.Log("add/modify event for %s: %v", info.Name, e.Type)
if kind == "Job" {
return c.waitForJob(e, info.Name)
}
return true, nil
case watch.Deleted:
c.Log("Deleted event for %s", info.Name)
c.Log("deleted event for %s", info.Name)
return true, nil
case watch.Error:
// Handle error and return with an error.
c.Log("Error event for %s", info.Name)
c.Log("error event for %s", info.Name)
return true, fmt.Errorf("Failed to deploy %s", info.Name)
default:
return false, nil
@ -853,7 +853,7 @@ func (c *Client) waitForJob(e watch.Event, name string) (bool, error) {
}
}
c.Log("%s: Jobs active: %d, jobs failed: %d, jobs succeeded: %d", name, job.Status.Active, job.Status.Failed, job.Status.Succeeded)
c.Log("%s: jobs active: %d, jobs failed: %d, jobs succeeded: %d", name, job.Status.Active, job.Status.Failed, job.Status.Succeeded)
return false, nil
}
@ -902,7 +902,7 @@ func (c *Client) watchPodUntilComplete(timeout time.Duration, info *resource.Inf
return err
}
c.Log("Watching pod %s for completion with timeout of %v", info.Name, timeout)
c.Log("watching pod %s for completion with timeout of %v", info.Name, timeout)
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout)
defer cancel()
_, err = watchtools.UntilWithoutRetry(ctx, w, func(e watch.Event) (bool, error) {

@ -202,7 +202,7 @@ func (c *Client) waitForResources(timeout time.Duration, created Result) error {
func (c *Client) podsReady(pods []v1.Pod) bool {
for _, pod := range pods {
if !isPodReady(&pod) {
c.Log("Pod is not ready: %s/%s", pod.GetNamespace(), pod.GetName())
c.Log("pod is not ready: %s/%s", pod.GetNamespace(), pod.GetName())
return false
}
}
@ -218,12 +218,12 @@ func (c *Client) servicesReady(svc []v1.Service) bool {
// Make sure the service is not explicitly set to "None" before checking the IP
if s.Spec.ClusterIP != v1.ClusterIPNone && s.Spec.ClusterIP == "" {
c.Log("Service is not ready: %s/%s", s.GetNamespace(), s.GetName())
c.Log("service is not ready: %s/%s", s.GetNamespace(), s.GetName())
return false
}
// This checks if the service has a LoadBalancer and that balancer has an Ingress defined
if s.Spec.Type == v1.ServiceTypeLoadBalancer && s.Status.LoadBalancer.Ingress == nil {
c.Log("Service is not ready: %s/%s", s.GetNamespace(), s.GetName())
c.Log("service is not ready: %s/%s", s.GetNamespace(), s.GetName())
return false
}
}
@ -233,7 +233,7 @@ func (c *Client) servicesReady(svc []v1.Service) bool {
func (c *Client) volumesReady(vols []v1.PersistentVolumeClaim) bool {
for _, v := range vols {
if v.Status.Phase != v1.ClaimBound {
c.Log("PersistentVolumeClaim is not ready: %s/%s", v.GetNamespace(), v.GetName())
c.Log("persistentvolumeclaim is not ready: %s/%s", v.GetNamespace(), v.GetName())
return false
}
}
@ -243,7 +243,7 @@ func (c *Client) volumesReady(vols []v1.PersistentVolumeClaim) bool {
func (c *Client) deploymentsReady(deployments []deployment) bool {
for _, v := range deployments {
if !(v.replicaSets.Status.ReadyReplicas >= *v.deployment.Spec.Replicas-deploymentutil.MaxUnavailable(*v.deployment)) {
c.Log("Deployment is not ready: %s/%s", v.deployment.GetNamespace(), v.deployment.GetName())
c.Log("deployment is not ready: %s/%s", v.deployment.GetNamespace(), v.deployment.GetName())
return false
}
}

@ -150,7 +150,7 @@ func (s *SQL) Get(key string) (*rspb.Release, error) {
// Get will return an error if the result is empty
err := s.db.Get(&record, "SELECT body FROM releases WHERE key = $1", key)
if err != nil {
s.Log("got SQL error when getting release %s: %v", key, err)
s.Log("got sql error when getting release %s: %v", key, err)
return nil, storageerrors.ErrReleaseNotFound(key)
}
@ -250,7 +250,7 @@ func (s *SQL) Create(key string, rls *rspb.Release) error {
transaction, err := s.db.Beginx()
if err != nil {
s.Log("failed to start SQL transaction: %v", err)
s.Log("failed to start sql transaction: %v", err)
return fmt.Errorf("error beginning transaction: %v", err)
}
@ -273,7 +273,7 @@ func (s *SQL) Create(key string, rls *rspb.Release) error {
return storageerrors.ErrReleaseExists(key)
}
s.Log("failed to store release %s in SQL database: %v", key, err)
s.Log("failed to store release %s in sql database: %v", key, err)
return err
}
defer transaction.Commit()
@ -301,7 +301,7 @@ func (s *SQL) Update(key string, rls *rspb.Release) error {
ModifiedAt: int(time.Now().Unix()),
},
); err != nil {
s.Log("failed to update release %s in SQL database: %v", key, err)
s.Log("failed to update release %s in sql database: %v", key, err)
return err
}
@ -312,7 +312,7 @@ func (s *SQL) Update(key string, rls *rspb.Release) error {
func (s *SQL) Delete(key string) (*rspb.Release, error) {
transaction, err := s.db.Beginx()
if err != nil {
s.Log("failed to start SQL transaction: %v", err)
s.Log("failed to start sql transaction: %v", err)
return nil, fmt.Errorf("error beginning transaction: %v", err)
}

@ -216,7 +216,7 @@ func (s *Storage) removeLeastRecent(name string, max int) error {
}
}
s.Log("Pruned %d record(s) from %s with %d error(s)", len(toDelete), name, len(errors))
s.Log("pruned %d record(s) from %s with %d error(s)", len(toDelete), name, len(errors))
switch c := len(errors); c {
case 0:
return nil

@ -25,7 +25,7 @@ import (
// GetReleaseContent gets all of the stored information for the given release.
func (s *ReleaseServer) GetReleaseContent(c ctx.Context, req *services.GetReleaseContentRequest) (*services.GetReleaseContentResponse, error) {
if err := validateReleaseName(req.Name); err != nil {
s.Log("releaseContent: Release name is invalid: %s", req.Name)
s.Log("releasecontent: release name is invalid: %s", req.Name)
return nil, err
}

@ -26,7 +26,7 @@ import (
// GetHistory gets the history for a given release.
func (s *ReleaseServer) GetHistory(ctx context.Context, req *tpb.GetHistoryRequest) (*tpb.GetHistoryResponse, error) {
if err := validateReleaseName(req.Name); err != nil {
s.Log("getHistory: Release name is invalid: %s", req.Name)
s.Log("gethistory: release name is invalid: %s", req.Name)
return nil, err
}

@ -150,7 +150,7 @@ func (s *ReleaseServer) performRelease(r *release.Release, req *services.Install
s.Log("dry run for %s", r.Name)
if !req.DisableCrdHook && hasCRDHook(r.Hooks) {
s.Log("validation skipped because CRD hook is present")
s.Log("validation skipped because crd hook is present")
res.Release.Info.Description = "Validation skipped because CRDs are not installed"
return res, nil
}
@ -172,7 +172,7 @@ func (s *ReleaseServer) performRelease(r *release.Release, req *services.Install
return res, err
}
} else {
s.Log("CRD install hooks disabled for %s", req.Name)
s.Log("crd install hooks disabled for %s", req.Name)
}
// Because the CRDs are installed, they are used for validation during this step.

@ -63,7 +63,7 @@ func (s *ReleaseServer) RollbackRelease(c ctx.Context, req *services.RollbackRel
// the previous release's configuration
func (s *ReleaseServer) prepareRollback(req *services.RollbackReleaseRequest) (*release.Release, *release.Release, error) {
if err := validateReleaseName(req.Name); err != nil {
s.Log("prepareRollback: Release name is invalid: %s", req.Name)
s.Log("preparerollback: release name is invalid: %s", req.Name)
return nil, nil, err
}

@ -210,7 +210,7 @@ func (s *ReleaseServer) uniqName(start string, reuse bool) (string, error) {
return "ERROR", err
}
s.Log("info: Created new release name %s", newname)
s.Log("info: created new release name %s", newname)
return newname, nil
}
@ -227,9 +227,9 @@ func (s *ReleaseServer) createUniqName(m moniker.Namer) (string, error) {
return name, nil
}
}
s.Log("info: generated name %s is taken. Searching again.", name)
s.Log("info: generated name %s is taken. searching again.", name)
}
s.Log("warning: No available release names found after %d tries", maxTries)
s.Log("warning: no available release names found after %d tries", maxTries)
return "ERROR", errors.New("no available release name found")
}
@ -415,10 +415,10 @@ func (s *ReleaseServer) renderResources(ch *chart.Chart, values chartutil.Values
func (s *ReleaseServer) recordRelease(r *release.Release, reuse bool) {
if reuse {
if err := s.env.Releases.Update(r); err != nil {
s.Log("warning: Failed to update release %s: %s", r.Name, err)
s.Log("warning: failed to update release %s: %s", r.Name, err)
}
} else if err := s.env.Releases.Create(r); err != nil {
s.Log("warning: Failed to record release %s: %s", r.Name, err)
s.Log("warning: failed to record release %s: %s", r.Name, err)
}
}
@ -448,7 +448,7 @@ func (s *ReleaseServer) execHook(hs []*release.Hook, name, namespace, hook strin
b := bytes.NewBufferString(h.Manifest)
if err := kubeCli.Create(namespace, b, timeout, false); err != nil {
s.Log("warning: Release %s %s %s failed: %s", name, hook, h.Path, err)
s.Log("warning: release %s %s %s failed: %s", name, hook, h.Path, err)
return err
}
// No way to rewind a bytes.Buffer()?
@ -458,7 +458,7 @@ func (s *ReleaseServer) execHook(hs []*release.Hook, name, namespace, hook strin
// We can't watch CRDs, but need to wait until they reach the established state before continuing
if hook != hooks.CRDInstall {
if err := kubeCli.WatchUntilReady(namespace, b, timeout, false); err != nil {
s.Log("warning: Release %s %s %s could not complete: %s", name, hook, h.Path, err)
s.Log("warning: release %s %s %s could not complete: %s", name, hook, h.Path, err)
// If a hook is failed, checkout the annotation of the hook to determine whether the hook should be deleted
// under failed condition. If so, then clear the corresponding resource object in the hook
if err := s.deleteHookByPolicy(h, hooks.HookFailed, name, namespace, hook, kubeCli); err != nil {
@ -468,7 +468,7 @@ func (s *ReleaseServer) execHook(hs []*release.Hook, name, namespace, hook strin
}
} else {
if err := kubeCli.WaitUntilCRDEstablished(b, time.Duration(timeout)*time.Second); err != nil {
s.Log("warning: Release %s %s %s could not complete: %s", name, hook, h.Path, err)
s.Log("warning: release %s %s %s could not complete: %s", name, hook, h.Path, err)
return err
}
}
@ -510,7 +510,7 @@ func (s *ReleaseServer) deleteHookByPolicy(h *release.Hook, policy string, name,
s.Log("deleting %s hook %s for release %s due to %q policy", hook, h.Name, name, policy)
waitForDelete := h.DeleteTimeout > 0
if errHookDelete := kubeCli.DeleteWithTimeout(namespace, b, h.DeleteTimeout, waitForDelete); errHookDelete != nil {
s.Log("warning: Release %s %s %S could not be deleted: %s", name, hook, h.Path, errHookDelete)
s.Log("warning: release %s %s %s could not be deleted: %s", name, hook, h.Path, errHookDelete)
return errHookDelete
}
}

@ -29,7 +29,7 @@ import (
// GetReleaseStatus gets the status information for a named release.
func (s *ReleaseServer) GetReleaseStatus(c ctx.Context, req *services.GetReleaseStatusRequest) (*services.GetReleaseStatusResponse, error) {
if err := validateReleaseName(req.Name); err != nil {
s.Log("getStatus: Release name is invalid: %s", req.Name)
s.Log("getstatus: release name is invalid: %s", req.Name)
return nil, err
}
@ -69,7 +69,7 @@ func (s *ReleaseServer) GetReleaseStatus(c ctx.Context, req *services.GetRelease
// Skip errors if this is already deleted or failed.
return statusResp, nil
} else if err != nil {
s.Log("warning: Get for %s failed: %v", rel.Name, err)
s.Log("warning: get for %s failed: %v", rel.Name, err)
return nil, err
}
rel.Info.Status.Resources = resp

@ -28,7 +28,7 @@ const maxParallelism = 20
func (s *ReleaseServer) RunReleaseTest(req *services.TestReleaseRequest, stream services.ReleaseService_RunReleaseTestServer) error {
if err := validateReleaseName(req.Name); err != nil {
s.Log("releaseTest: Release name is invalid: %s", req.Name)
s.Log("releasetest: release name is invalid: %s", req.Name)
return err
}
@ -69,7 +69,7 @@ func (s *ReleaseServer) RunReleaseTest(req *services.TestReleaseRequest, stream
}
if err := s.env.Releases.Update(rel); err != nil {
s.Log("test: Failed to store updated release: %s", err)
s.Log("test: failed to store updated release: %s", err)
}
return nil

@ -32,13 +32,13 @@ import (
// UninstallRelease deletes all of the resources associated with this release, and marks the release DELETED.
func (s *ReleaseServer) UninstallRelease(c ctx.Context, req *services.UninstallReleaseRequest) (*services.UninstallReleaseResponse, error) {
if err := validateReleaseName(req.Name); err != nil {
s.Log("uninstallRelease: Release name is invalid: %s", req.Name)
s.Log("uninstallrelease: release name is invalid: %s", req.Name)
return nil, err
}
rels, err := s.env.Releases.History(req.Name)
if err != nil {
s.Log("uninstall: Release not loaded: %s", req.Name)
s.Log("uninstall: release not loaded: %s", req.Name)
return nil, err
}
if len(rels) < 1 {
@ -53,7 +53,7 @@ func (s *ReleaseServer) UninstallRelease(c ctx.Context, req *services.UninstallR
if rel.Info.Status.Code == release.Status_DELETED {
if req.Purge {
if err := s.purgeReleases(rels...); err != nil {
s.Log("uninstall: Failed to purge the release: %s", err)
s.Log("uninstall: failed to purge the release: %s", err)
return nil, err
}
return &services.UninstallReleaseResponse{Release: rel}, nil
@ -61,7 +61,7 @@ func (s *ReleaseServer) UninstallRelease(c ctx.Context, req *services.UninstallR
return nil, fmt.Errorf("the release named %q is already deleted", req.Name)
}
s.Log("uninstall: Deleting %s", req.Name)
s.Log("uninstall: deleting %s", req.Name)
rel.Info.Status.Code = release.Status_DELETING
rel.Info.Deleted = timeconv.Now()
rel.Info.Description = "Deletion in progress (or silently failed)"
@ -78,7 +78,7 @@ func (s *ReleaseServer) UninstallRelease(c ctx.Context, req *services.UninstallR
// From here on out, the release is currently considered to be in Status_DELETING
// state.
if err := s.env.Releases.Update(rel); err != nil {
s.Log("uninstall: Failed to store updated release: %s", err)
s.Log("uninstall: failed to store updated release: %s", err)
}
kept, errs := s.ReleaseModule.Delete(rel, req, s.env)
@ -107,13 +107,13 @@ func (s *ReleaseServer) UninstallRelease(c ctx.Context, req *services.UninstallR
s.Log("purge requested for %s", req.Name)
err := s.purgeReleases(rels...)
if err != nil {
s.Log("uninstall: Failed to purge the release: %s", err)
s.Log("uninstall: failed to purge the release: %s", err)
}
return res, err
}
if err := s.env.Releases.Update(rel); err != nil {
s.Log("uninstall: Failed to store updated release: %s", err)
s.Log("uninstall: failed to store updated release: %s", err)
}
if len(es) > 0 {

@ -32,7 +32,7 @@ import (
// UpdateRelease takes an existing release and new information, and upgrades the release.
func (s *ReleaseServer) UpdateRelease(c ctx.Context, req *services.UpdateReleaseRequest) (*services.UpdateReleaseResponse, error) {
if err := validateReleaseName(req.Name); err != nil {
s.Log("updateRelease: Release name is invalid: %s", req.Name)
s.Log("updaterelease: release name is invalid: %s", req.Name)
return nil, err
}
s.Log("preparing update for %s", req.Name)

Loading…
Cancel
Save