From 8c3340fc3a9f5b39e8b84b015710ca6566eb209e Mon Sep 17 00:00:00 2001 From: Taylor Thomas Date: Wed, 22 Mar 2017 16:26:12 -0700 Subject: [PATCH] feat(tiller): Stage 1 of new logging method This is the first part of what will be several PRs to better the logging on Tiller. This adds a `--log-level` flag with 3 configurable levels for Tiller and adds logging for the `main` and `kube` packages. To avoid creating one giant PR and to allow for feedback on the logging methodology before doing it to everything, this only introduces the changes to the aforementioned packages. 1 or 2 follow up PRs will add the new logging method to all Tiller components and add documentation. --- cmd/tiller/tiller.go | 124 ++++++++++-- cmd/tiller/trace.go | 15 +- glide.lock | 18 +- glide.yaml | 2 + pkg/kube/client.go | 417 +++++++++++++++++++++++++++++++++++++--- pkg/kube/log.go | 7 + pkg/kube/namespace.go | 16 ++ pkg/logutil/log.go | 52 +++++ pkg/logutil/log_test.go | 35 ++++ 9 files changed, 631 insertions(+), 55 deletions(-) create mode 100644 pkg/logutil/log.go create mode 100644 pkg/logutil/log_test.go diff --git a/cmd/tiller/tiller.go b/cmd/tiller/tiller.go index 72388d307..617ef8c4d 100644 --- a/cmd/tiller/tiller.go +++ b/cmd/tiller/tiller.go @@ -19,7 +19,6 @@ package main // import "k8s.io/helm/cmd/tiller" import ( "fmt" "io/ioutil" - "log" "net" "net/http" "os" @@ -27,7 +26,9 @@ import ( "github.com/spf13/cobra" + log "github.com/Sirupsen/logrus" "k8s.io/helm/pkg/kube" + "k8s.io/helm/pkg/logutil" "k8s.io/helm/pkg/proto/hapi/services" "k8s.io/helm/pkg/storage" "k8s.io/helm/pkg/storage/driver" @@ -41,6 +42,9 @@ const ( storageConfigMap = "configmap" ) +// Variable for the string log level +var level string + // rootServer is the root gRPC server. // // Each gRPC service registers itself to this server during init(). @@ -73,8 +77,15 @@ var rootCommand = &cobra.Command{ Run: start, } +// logger with base fields for this package +var logger *log.Entry + func init() { - log.SetFlags(log.Flags() | log.Lshortfile) + logger = log.WithFields(log.Fields{ + "_package": "main", + }) + // Makes the logger use a full timestamp + log.SetFormatter(&log.TextFormatter{FullTimestamp: true}) } func main() { @@ -82,6 +93,7 @@ func main() { p.StringVarP(&grpcAddr, "listen", "l", ":44134", "address:port to listen on") p.StringVar(&store, "storage", storageConfigMap, "storage driver to use. One of 'configmap' or 'memory'") p.BoolVar(&enableTracing, "trace", false, "enable rpc tracing") + p.StringVar(&level, "log-level", "INFO", "level to log at. One of 'ERROR', 'INFO', 'DEBUG'") if err := rootCommand.Execute(); err != nil { fmt.Fprint(os.Stderr, err) @@ -90,12 +102,36 @@ func main() { } func start(c *cobra.Command, args []string) { + // Set the log level first + logLevel, err := logutil.GetLevel(level) + if err != nil { + logger.WithFields(log.Fields{ + "_module": "tiller", + "_context": "start", + "error": err, + }).Error("Unable to get log level") + os.Exit(1) + } + logger.WithFields(log.Fields{ + "_module": "tiller", + "_context": "start", + "level": level, + }).Debug("Setting log level") + log.SetLevel(logLevel) clientset, err := kube.New(nil).ClientSet() if err != nil { - fmt.Fprintf(os.Stderr, "Cannot initialize Kubernetes connection: %s\n", err) + logger.WithFields(log.Fields{ + "_module": "tiller", + "_context": "start", + "error": err, + }).Error("Cannot initialize Kubernetes connection") os.Exit(1) } - + logger.WithFields(log.Fields{ + "_module": "tiller", + "_context": "start", + "driver": store, + }).Debug("Setting storage driver") switch store { case storageMemory: env.Releases = storage.Init(driver.NewMemory()) @@ -103,23 +139,54 @@ func start(c *cobra.Command, args []string) { env.Releases = storage.Init(driver.NewConfigMaps(clientset.Core().ConfigMaps(namespace()))) } + logger.WithFields(log.Fields{ + "_module": "tiller", + "_context": "start", + "address": grpcAddr, + }).Debug("Starting GRPC server") lstn, err := net.Listen("tcp", grpcAddr) if err != nil { + logger.WithFields(log.Fields{ + "_module": "tiller", + "_context": "start", + "error": err, + }).Error("Server died") fmt.Fprintf(os.Stderr, "Server died: %s\n", err) os.Exit(1) } - fmt.Printf("Starting Tiller %s\n", version.GetVersion()) - fmt.Printf("GRPC listening on %s\n", grpcAddr) - fmt.Printf("Probes listening on %s\n", probeAddr) - fmt.Printf("Storage driver is %s\n", env.Releases.Name()) + logger.WithFields(log.Fields{ + "_module": "tiller", + "_context": "start", + }).Infof("Tiller %s running", version.GetVersion()) + logger.WithFields(log.Fields{ + "_module": "tiller", + "_context": "start", + }).Infof("GRPC listening on %s", grpcAddr) + logger.WithFields(log.Fields{ + "_module": "tiller", + "_context": "start", + }).Infof("Probes listening on %s", probeAddr) + logger.WithFields(log.Fields{ + "_module": "tiller", + "_context": "start", + }).Infof("Storage driver is %s", env.Releases.Name()) if enableTracing { + logger.WithFields(log.Fields{ + "_module": "tiller", + "_context": "start", + "address": grpcAddr, + }).Debug("Starting GRPC trace") startTracing(traceAddr) } srvErrCh := make(chan error) probeErrCh := make(chan error) + logger.WithFields(log.Fields{ + "_module": "tiller", + "_context": "start", + }).Debug("Starting release server") go func() { svc := tiller.NewReleaseServer(env, clientset) services.RegisterReleaseServiceServer(rootServer, svc) @@ -127,7 +194,10 @@ func start(c *cobra.Command, args []string) { srvErrCh <- err } }() - + logger.WithFields(log.Fields{ + "_module": "tiller", + "_context": "start", + }).Debug("Starting probes server") go func() { mux := newProbesMux() if err := http.ListenAndServe(probeAddr, mux); err != nil { @@ -137,25 +207,57 @@ func start(c *cobra.Command, args []string) { select { case err := <-srvErrCh: - fmt.Fprintf(os.Stderr, "Server died: %s\n", err) + logger.WithFields(log.Fields{ + "_module": "tiller", + "_context": "start", + "error": err, + }).Error("Release server died") os.Exit(1) case err := <-probeErrCh: - fmt.Fprintf(os.Stderr, "Probes server died: %s\n", err) + logger.WithFields(log.Fields{ + "_module": "tiller", + "_context": "start", + "error": err, + }).Error("Probes server died") } } // namespace returns the namespace of tiller func namespace() string { + logger.WithFields(log.Fields{ + "_module": "tiller", + "_context": "namespace", + }).Debug("Getting tiller namespace") if ns := os.Getenv("TILLER_NAMESPACE"); ns != "" { + logger.WithFields(log.Fields{ + "_module": "tiller", + "_context": "namespace", + "namespace": ns, + }).Debug("Found TILLER_NAMESPACE environment variable") return ns } + logger.WithFields(log.Fields{ + "_module": "tiller", + "_context": "namespace", + }).Debug("No namespace variable set. Attempting to get namespace from service account token") // Fall back to the namespace associated with the service account token, if available if data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil { if ns := strings.TrimSpace(string(data)); len(ns) > 0 { + logger.WithFields(log.Fields{ + "_module": "tiller", + "_context": "namespace", + "namespace": ns, + }).Debug("Found service account token namespace") return ns } } + logger.WithFields(log.Fields{ + "_module": "tiller", + "_context": "namespace", + "namespace": environment.DefaultTillerNamespace, + }).Debug("No namespaces found. Returning default namespace") + return environment.DefaultTillerNamespace } diff --git a/cmd/tiller/trace.go b/cmd/tiller/trace.go index b9e0583f2..33ff24486 100644 --- a/cmd/tiller/trace.go +++ b/cmd/tiller/trace.go @@ -17,17 +17,15 @@ limitations under the License. package main // import "k8s.io/helm/cmd/tiller" import ( - "fmt" - "log" "net/http" _ "net/http/pprof" + log "github.com/Sirupsen/logrus" "google.golang.org/grpc" ) func startTracing(addr string) { - fmt.Printf("Tracing server is listening on %s\n", addr) grpc.EnableTracing = true http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { @@ -41,9 +39,18 @@ func startTracing(addr string) { go func() { if err := http.ListenAndServe(addr, nil); err != nil { - log.Printf("tracing error: %s", err) + logger.WithFields(log.Fields{ + "_module": "trace", + "_context": "startTracing", + "error": err, + }).Error("Tracing error") } }() + logger.WithFields(log.Fields{ + "_module": "trace", + "_context": "startTracing", + "address": addr, + }).Info("Tracing server listening") } const traceIndexHTML = ` diff --git a/glide.lock b/glide.lock index 9672f8c0d..ddf45a5a2 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: 0d1c5b7304a853820dcaa296d3aa1f5f3466a8491dcef80cbcaf43c954acb2a8 -updated: 2017-03-20T15:26:55.446524716-07:00 +hash: 944effe504b921cc01b37b067b1739644c36eeda78a203f7d4cb6cc0eb2a0104 +updated: 2017-03-22T11:59:05.374528386-07:00 imports: - name: cloud.google.com/go version: 3b1ae45394a234c385be014e9a488f2bb6eef821 @@ -99,7 +99,7 @@ imports: - name: github.com/facebookgo/symwalk version: 42004b9f322246749dd73ad71008b1f3160c0052 - name: github.com/ghodss/yaml - version: 04f313413ffd65ce25f2541bfd2b2ceec5c0908c + version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee - name: github.com/go-openapi/jsonpointer version: 46af16f9f7b149af66e5d1bd010e3574dc06de98 - name: github.com/go-openapi/jsonreference @@ -186,7 +186,7 @@ imports: - name: github.com/Masterminds/sprig version: 23597e5f6ad0e4d590e71314bfd0251a4a3cf849 - name: github.com/mattn/go-runewidth - version: 14207d285c6c197daabb5c9793d63e7af9ab2d50 + version: d6bea18f789704b5f83375793155289da36a3c7f - name: github.com/mitchellh/go-wordwrap version: ad45545899c7b13c020ea92b2072220eefad42b8 - name: github.com/naoina/go-stringutil @@ -208,7 +208,7 @@ imports: - name: github.com/shurcooL/sanitized_anchor_name version: 10ef21a441db47d8b13ebcc5fd2310f636973c77 - name: github.com/Sirupsen/logrus - version: 51fe59aca108dc5680109e7b2051cbdcfa5a253c + version: ba1b36c82c5e05c4f912a88eab0dcd91a171688f - name: github.com/spf13/cobra version: f62e98d28ab7ad31d707ba837a966378465c7b57 subpackages: @@ -216,7 +216,7 @@ imports: - name: github.com/spf13/pflag version: 5ccb023bc27df288a957c5e994cd44fd19619465 - name: github.com/technosophos/moniker - version: ab470f5e105a44d0c87ea21bacd6a335c4816d83 + version: 9f956786b91d9786ca11aa5be6104542fa911546 - name: github.com/ugorji/go version: f1f1a805ed361a0e078bb537e4ea78cd37dcf065 subpackages: @@ -298,9 +298,9 @@ imports: - name: gopkg.in/inf.v0 version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 - name: gopkg.in/yaml.v2 - version: a3f3340b5840cee44f372bddb5880fcbc419b46a + version: a83829b6f1293c91addabc89d0571c246397bbf4 - name: k8s.io/kubernetes - version: 00a1fb254bd8e5235575fba1398b958943e39078 + version: 894ff23729bbc0055907dd3a496afb725396adda subpackages: - cmd/kubeadm/app/apis/kubeadm - cmd/kubeadm/app/apis/kubeadm/install @@ -510,5 +510,3 @@ testImports: version: e3a8ff8ce36581f87a15341206f205b1da467059 subpackages: - assert - - mock - - require diff --git a/glide.yaml b/glide.yaml index f8a7af4f4..9cd042c9c 100644 --- a/glide.yaml +++ b/glide.yaml @@ -69,3 +69,5 @@ import: version: ~0.1.0 - package: github.com/naoina/go-stringutil version: ~0.1.0 +- package: github.com/Sirupsen/logrus + version: ~0.11.5 diff --git a/pkg/kube/client.go b/pkg/kube/client.go index a43627416..c767309d3 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -22,10 +22,10 @@ import ( goerrors "errors" "fmt" "io" - "log" "strings" "time" + log "github.com/Sirupsen/logrus" jsonpatch "github.com/evanphx/json-patch" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" @@ -74,6 +74,13 @@ type ResourceActorFunc func(*resource.Info) error // // Namespace will set the namespace func (c *Client) Create(namespace string, reader io.Reader, timeout int64, shouldWait bool) error { + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "Create", + "namespace": namespace, + "timeout": timeout, + "wait": shouldWait, + }).Debug("Beginning resource creation") client, err := c.ClientSet() if err != nil { return err @@ -81,10 +88,19 @@ func (c *Client) Create(namespace string, reader io.Reader, timeout int64, shoul if err := ensureNamespace(client, namespace); err != nil { return err } + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "Create", + }).Debug("Building manifests") infos, buildErr := c.BuildUnstructured(namespace, reader) if buildErr != nil { return buildErr } + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "Create", + "num-manifests": len(infos), + }).Debug("Creating manifests") if err := perform(c, namespace, infos, createResource); err != nil { return err } @@ -97,7 +113,11 @@ func (c *Client) Create(namespace string, reader io.Reader, timeout int64, shoul func (c *Client) newBuilder(namespace string, reader io.Reader) *resource.Result { schema, err := c.Validator(true, c.SchemaCacheDir) if err != nil { - log.Printf("warning: failed to load schema: %s", err) + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "newBuilder", + "error": err, + }).Error("Failed to load schema") } return c.NewBuilder(). ContinueOnError(). @@ -111,14 +131,26 @@ func (c *Client) newBuilder(namespace string, reader io.Reader) *resource.Result // BuildUnstructured validates for Kubernetes objects and returns unstructured infos. func (c *Client) BuildUnstructured(namespace string, reader io.Reader) (Result, error) { + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "BuildUnstructured", + }).Debug("Beginning unstructured object build") schema, err := c.Validator(true, c.SchemaCacheDir) if err != nil { - log.Printf("warning: failed to load schema: %s", err) + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "BuildUnstructured", + "error": err, + }).Error("Failed to load schema") } mapper, typer, err := c.UnstructuredObject() if err != nil { - log.Printf("failed to load mapper: %s", err) + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "BuildUnstructured", + "error": err, + }).Error("Failed to load mapper") return nil, err } var result Result @@ -135,6 +167,10 @@ func (c *Client) BuildUnstructured(namespace string, reader io.Reader) (Result, // Build validates for Kubernetes objects and returns resource Infos from a io.Reader. func (c *Client) Build(namespace string, reader io.Reader) (Result, error) { + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "Build", + }).Debug("Beginning structured object build") var result Result result, err := c.newBuilder(namespace, reader).Infos() return result, scrubValidationError(err) @@ -144,6 +180,11 @@ func (c *Client) Build(namespace string, reader io.Reader) (Result, error) { // // Namespace will set the namespace func (c *Client) Get(namespace string, reader io.Reader) (string, error) { + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "Get", + "namespace": namespace, + }).Debug("Beginning resource get") // Since we don't know what order the objects come in, let's group them by the types, so // that when we print them, they come looking good (headers apply to subgroups, etc.) objs := make(map[string][]runtime.Object) @@ -152,7 +193,11 @@ func (c *Client) Get(namespace string, reader io.Reader) (string, error) { return "", err } err = perform(c, namespace, infos, func(info *resource.Info) error { - log.Printf("Doing get for: '%s'", info.Name) + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "Get", + "name": info.Name, + }).Info("Getting object") obj, err := resource.NewHelper(info.Client, info.Mapping).Get(info.Namespace, info.Name, info.Export) if err != nil { return err @@ -160,7 +205,12 @@ func (c *Client) Get(namespace string, reader io.Reader) (string, error) { // We need to grab the ObjectReference so we can correctly group the objects. or, err := api.GetReference(obj) if err != nil { - log.Printf("FAILED GetReference for: %#v\n%v", obj, err) + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "Get", + "error": err, + "object": fmt.Sprintf("%#v", obj), + }).Error("FAILED GetReference") return err } @@ -174,6 +224,10 @@ func (c *Client) Get(namespace string, reader io.Reader) (string, error) { return "", err } + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "Get", + }).Debug("Grouping returned resources") // Ok, now we have all the objects grouped by types (say, by v1/Pod, v1/Service, etc.), so // spin through them and print them. Printer is cool since it prints the header only when // an object type changes, so we can just rely on that. Problem is it doesn't seem to keep @@ -186,7 +240,12 @@ func (c *Client) Get(namespace string, reader io.Reader) (string, error) { } for _, o := range ot { if err := p.PrintObj(o, buf); err != nil { - log.Printf("failed to print object type '%s', object: '%s' :\n %v", t, o, err) + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "Get", + "type": t, + "object": o, + }).Error("Failed to print") return "", err } } @@ -204,45 +263,102 @@ func (c *Client) Get(namespace string, reader io.Reader) (string, error) { // // Namespace will set the namespaces func (c *Client) Update(namespace string, originalReader, targetReader io.Reader, recreate bool, timeout int64, shouldWait bool) error { + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "Update", + "namespace": namespace, + "timeout": timeout, + "recreate": recreate, + "wait": shouldWait, + }).Debug("Beginning resource update") + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "Update", + }).Debug("Rebuilding original objects") original, err := c.BuildUnstructured(namespace, originalReader) if err != nil { return fmt.Errorf("failed decoding reader into objects: %s", err) } - + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "Update", + }).Debug("Building new objects") target, err := c.BuildUnstructured(namespace, targetReader) if err != nil { return fmt.Errorf("failed decoding reader into objects: %s", err) } updateErrors := []string{} - + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "Update", + "num-objects": len(target), + }).Debug("Visiting new objects for update") err = target.Visit(func(info *resource.Info, err error) error { if err != nil { return err } - + kind := info.Mapping.GroupVersionKind.Kind helper := resource.NewHelper(info.Client, info.Mapping) + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "Update", + "name": info.Name, + "kind": kind, + "namespace": info.Namespace, + }).Debug("Attempting to get resource") if _, err := helper.Get(info.Namespace, info.Name, info.Export); err != nil { if !errors.IsNotFound(err) { return fmt.Errorf("Could not get information about the resource: err: %s", err) } // Since the resource does not exist, create it. + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "Update", + "name": info.Name, + "kind": kind, + "namespace": info.Namespace, + }).Debug("Resource did not exist, attempting to create") if err := createResource(info); err != nil { return fmt.Errorf("failed to create resource: %s", err) } - - kind := info.Mapping.GroupVersionKind.Kind - log.Printf("Created a new %s called %s\n", kind, info.Name) + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "Update", + "name": info.Name, + "kind": kind, + "namespace": info.Namespace, + }).Info("Created new resource") return nil } - + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "Update", + "name": info.Name, + "kind": kind, + "namespace": info.Namespace, + }).Debug("Getting original resource") originalInfo := original.Get(info) if originalInfo == nil { return fmt.Errorf("no resource with the name %s found", info.Name) } - + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "Update", + "name": info.Name, + "kind": kind, + "namespace": info.Namespace, + }).Debug("Beginning resource update") if err := updateResource(c, info, originalInfo.Object, recreate); err != nil { + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "Update", + "name": info.Name, + "kind": kind, + "namespace": info.Namespace, + "error": err, + }).Error("Error updating resource") log.Printf("error updating the resource %s:\n\t %v", info.Name, err) updateErrors = append(updateErrors, err.Error()) } @@ -258,8 +374,22 @@ func (c *Client) Update(namespace string, originalReader, targetReader io.Reader } for _, info := range original.Difference(target) { + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "Update", + "name": info.Name, + "kind": info.Mapping.GroupVersionKind.Kind, + "namespace": info.Namespace, + }).Debug("Deleting resource that no longer in manifest") log.Printf("Deleting %s in %s...", info.Name, info.Namespace) if err := deleteResource(c, info); err != nil { + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "Update", + "name": info.Name, + "kind": info.Mapping.GroupVersionKind.Kind, + "namespace": info.Namespace, + }).Error("Failed to delete resource") log.Printf("Failed to delete %s, err: %s", info.Name, err) } } @@ -273,12 +403,28 @@ func (c *Client) Update(namespace string, originalReader, targetReader io.Reader // // Namespace will set the namespace func (c *Client) Delete(namespace string, reader io.Reader) error { + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "Delete", + "namespace": namespace, + }).Debug("Beginning resource delete") + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "Delete", + "namespace": namespace, + }).Debug("Rebuilding objects from manifest") infos, err := c.BuildUnstructured(namespace, reader) if err != nil { return err } return perform(c, namespace, infos, func(info *resource.Info) error { - log.Printf("Starting delete for %s %s", info.Name, info.Mapping.GroupVersionKind.Kind) + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "Delete", + "namespace": namespace, + "name": info.Name, + "kind": info.Mapping.GroupVersionKind.Kind, + }).Info("Starting delete") err := deleteResource(c, info) return skipIfNotFound(err) }) @@ -286,7 +432,11 @@ func (c *Client) Delete(namespace string, reader io.Reader) error { func skipIfNotFound(err error) error { if errors.IsNotFound(err) { - log.Printf("%v", err) + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "skipIfNotFound", + "error": err, + }).Info("Skipping not found object") return nil } return err @@ -311,6 +461,17 @@ func watchTimeout(t time.Duration) ResourceActorFunc { // // Handling for other kinds will be added as necessary. func (c *Client) WatchUntilReady(namespace string, reader io.Reader, timeout int64, shouldWait bool) error { + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "WatchUntilReady", + "namespace": namespace, + "timeout": timeout, + }).Debug("Beginning watch of Jobs") + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "WatchUntilReady", + "namespace": namespace, + }).Debug("Building resources") infos, err := c.Build(namespace, reader) if err != nil { return err @@ -346,19 +507,44 @@ func deleteResource(c *Client, info *resource.Info) error { if err != nil { // If there is no reaper for this resources, delete it. if kubectl.IsNoSuchReaperError(err) { + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "deleteResource", + "name": info.Name, + "namespace": info.Namespace, + }).Info("Resource does not have reaper. Deleting") return resource.NewHelper(info.Client, info.Mapping).Delete(info.Namespace, info.Name) } return err } - log.Printf("Using reaper for deleting %s", info.Name) + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "deleteResource", + "name": info.Name, + "namespace": info.Namespace, + }).Info("Using reaper for delete") return reaper.Stop(info.Namespace, info.Name, 0, nil) } func createPatch(mapping *meta.RESTMapping, target, current runtime.Object) ([]byte, api.PatchType, error) { + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "createPatch", + }).Debug("Creating patch") + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "createPatch", + "object": current, + }).Debug("Marshalling old object for merge") oldData, err := json.Marshal(current) if err != nil { return nil, api.StrategicMergePatchType, fmt.Errorf("serializing current configuration: %s", err) } + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "createPatch", + "object": target, + }).Debug("Marshalling new object for merge") newData, err := json.Marshal(target) if err != nil { return nil, api.StrategicMergePatchType, fmt.Errorf("serializing target configuration: %s", err) @@ -368,29 +554,53 @@ func createPatch(mapping *meta.RESTMapping, target, current runtime.Object) ([]b return nil, api.StrategicMergePatchType, nil } + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "createPatch", + }).Debug("Attempting to get versioned object") // Get a versioned object versionedObject, err := api.Scheme.New(mapping.GroupVersionKind) switch { case runtime.IsNotRegisteredError(err): + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "createPatch", + "kind": mapping.GroupVersionKind, + }).Debug("Unregistered kind found. This is likely a third party resource. Falling back to JSON merge patch") // fall back to generic JSON merge patch patch, err := jsonpatch.CreateMergePatch(oldData, newData) return patch, api.MergePatchType, err case err != nil: return nil, api.StrategicMergePatchType, fmt.Errorf("failed to get versionedObject: %s", err) default: - log.Printf("generating strategic merge patch for %T", target) + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "createPatch", + "kind": mapping.GroupVersionKind, + }).Info("generating strategic merge patch") patch, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, versionedObject) return patch, api.StrategicMergePatchType, err } } func updateResource(c *Client, target *resource.Info, currentObj runtime.Object, recreate bool) error { + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "updateResource", + "name": target.Name, + "namespace": target.Namespace, + }).Debug("Updating resource") patch, patchType, err := createPatch(target.Mapping, target.Object, currentObj) if err != nil { return fmt.Errorf("failed to create patch: %s", err) } if patch == nil { - log.Printf("Looks like there are no changes for %s", target.Name) + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "updateResource", + "name": target.Name, + "namespace": target.Namespace, + }).Info("Looks like there are no changes to the resource") // This needs to happen to make sure that tiller has the latest info from the API // Otherwise there will be no labels and other functions that use labels will panic if err := target.Get(); err != nil { @@ -399,6 +609,13 @@ func updateResource(c *Client, target *resource.Info, currentObj runtime.Object, return nil } + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "updateResource", + "name": target.Name, + "namespace": target.Namespace, + "patch-type": patchType, + }).Debug("Sending patch to server") // send patch to server helper := resource.NewHelper(target.Client, target.Mapping) obj, err := helper.Patch(target.Namespace, target.Name, patchType, patch) @@ -406,6 +623,12 @@ func updateResource(c *Client, target *resource.Info, currentObj runtime.Object, return err } + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "updateResource", + "name": target.Name, + "namespace": target.Namespace, + }).Debug("Refreshing object") target.Refresh(obj, true) if !recreate { @@ -424,6 +647,12 @@ func updateResource(c *Client, target *resource.Info, currentObj runtime.Object, if err != nil { return nil } + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "updateResource", + "name": target.Name, + "namespace": target.Namespace, + }).Debug("Recreating pods for object") client, _ := c.ClientSet() return recreatePods(client, target.Namespace, selector) } @@ -448,6 +677,11 @@ func getSelectorFromObject(obj runtime.Object) (map[string]string, error) { } func recreatePods(client *internalclientset.Clientset, namespace string, selector map[string]string) error { + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "recreatePods", + "selector": selector, + }).Debug("Using selector to recreate pods") pods, err := client.Pods(namespace).List(api.ListOptions{ FieldSelector: fields.Everything(), LabelSelector: labels.Set(selector).AsSelector(), @@ -458,7 +692,12 @@ func recreatePods(client *internalclientset.Clientset, namespace string, selecto // Restart pods for _, pod := range pods.Items { - log.Printf("Restarting pod: %v/%v", pod.Namespace, pod.Name) + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "recreatePods", + "namespace": pod.Namespace, + "name": pod.Name, + }).Info("Restarting pod") // Delete each pod for get them restarted with changed spec. if err := client.Pods(pod.Namespace).Delete(pod.Name, api.NewPreconditionDeleteOptions(string(pod.UID))); err != nil { @@ -475,7 +714,14 @@ func watchUntilReady(timeout time.Duration, info *resource.Info) error { } kind := info.Mapping.GroupVersionKind.Kind - log.Printf("Watching for changes to %s %s with timeout of %v", kind, info.Name, timeout) + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "watchUntilReady", + "namespace": info.Namespace, + "name": info.Name, + "kind": kind, + "timeout": timeout, + }).Info("Watching for changes") // What we watch for depends on the Kind. // - For a Job, we watch for completion. @@ -490,17 +736,36 @@ func watchUntilReady(timeout time.Duration, info *resource.Info) error { // we get. We care mostly about jobs, where what we want to see is // the status go into a good state. For other types, like ReplicaSet // we don't really do anything to support these as hooks. - log.Printf("Add/Modify event for %s: %v", info.Name, e.Type) + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "watchUntilReady", + "namespace": info.Namespace, + "name": info.Name, + "kind": kind, + "type": e.Type, + }).Info("Add/Modify event on watched resource") if kind == "Job" { return waitForJob(e, info.Name) } return true, nil case watch.Deleted: - log.Printf("Deleted event for %s", info.Name) + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "watchUntilReady", + "namespace": info.Namespace, + "name": info.Name, + "kind": kind, + }).Info("Deleted event on watched resource") return true, nil case watch.Error: // Handle error and return with an error. - log.Printf("Error event for %s", info.Name) + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "watchUntilReady", + "namespace": info.Namespace, + "name": info.Name, + "kind": kind, + }).Error("Error event on watched resource") return true, fmt.Errorf("Failed to deploy %s", info.Name) default: return false, nil @@ -563,12 +828,21 @@ func (c *Client) AsVersionedObject(obj runtime.Object) (runtime.Object, error) { // waitForResources polls to get the current status of all pods, PVCs, and Services // until all are ready or a timeout is reached func (c *Client) waitForResources(timeout time.Duration, created Result) error { - log.Printf("beginning wait for resources with timeout of %v", timeout) + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "waitForResources", + "timeout": timeout, + "num-objects": len(created), + }).Info("Beginning wait for resources") client, _ := c.ClientSet() return wait.Poll(2*time.Second, timeout, func() (bool, error) { pods := []api.Pod{} services := []api.Service{} pvc := []api.PersistentVolumeClaim{} + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "waitForResources", + }).Debug("Getting objects to watch from resource list") for _, v := range created { obj, err := c.AsVersionedObject(v.Object) if err != nil && !runtime.IsNotRegisteredError(err) { @@ -576,18 +850,38 @@ func (c *Client) waitForResources(timeout time.Duration, created Result) error { } switch value := obj.(type) { case (*v1.ReplicationController): + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "waitForResources", + "namespace": value.Namespace, + "name": value.Name, + "selector": value.Spec.Selector, + }).Debug("Found ReplicationController. Getting Pods") list, err := getPods(client, value.Namespace, value.Spec.Selector) if err != nil { return false, err } pods = append(pods, list...) case (*v1.Pod): + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "waitForResources", + "namespace": value.Namespace, + "name": value.Name, + }).Debug("Found pod. Updating info from API") pod, err := client.Pods(value.Namespace).Get(value.Name) if err != nil { return false, err } pods = append(pods, *pod) case (*extensions.Deployment): + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "waitForResources", + "namespace": value.Namespace, + "name": value.Name, + "selector": value.Spec.Selector.MatchLabels, + }).Debug("Found deployment. Getting ReplicaSets") // Get the RS children first rs, err := client.ReplicaSets(value.Namespace).List(api.ListOptions{ FieldSelector: fields.Everything(), @@ -596,6 +890,13 @@ func (c *Client) waitForResources(timeout time.Duration, created Result) error { if err != nil { return false, err } + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "waitForResources", + "namespace": value.Namespace, + "name": value.Name, + "num-objects": len(rs.Items), + }).Debug("Found ReplicaSets from Deployment. Getting Pods") for _, r := range rs.Items { list, err := getPods(client, value.Namespace, r.Spec.Selector.MatchLabels) if err != nil { @@ -604,30 +905,63 @@ func (c *Client) waitForResources(timeout time.Duration, created Result) error { pods = append(pods, list...) } case (*extensions.DaemonSet): + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "waitForResources", + "namespace": value.Namespace, + "name": value.Name, + "selector": value.Spec.Selector.MatchLabels, + }).Debug("Found DaemonSet. Getting Pods") list, err := getPods(client, value.Namespace, value.Spec.Selector.MatchLabels) if err != nil { return false, err } pods = append(pods, list...) case (*apps.StatefulSet): + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "waitForResources", + "namespace": value.Namespace, + "name": value.Name, + "selector": value.Spec.Selector.MatchLabels, + }).Debug("Found StatefulSet. Getting Pods") list, err := getPods(client, value.Namespace, value.Spec.Selector.MatchLabels) if err != nil { return false, err } pods = append(pods, list...) case (*extensions.ReplicaSet): + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "waitForResources", + "namespace": value.Namespace, + "name": value.Name, + "selector": value.Spec.Selector.MatchLabels, + }).Debug("Found ReplicaSet. Getting Pods") list, err := getPods(client, value.Namespace, value.Spec.Selector.MatchLabels) if err != nil { return false, err } pods = append(pods, list...) case (*v1.PersistentVolumeClaim): + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "waitForResources", + "namespace": value.Namespace, + "name": value.Name, + }).Debug("Found PersistentVolumeClaim. Updating info from API") claim, err := client.PersistentVolumeClaims(value.Namespace).Get(value.Name) if err != nil { return false, err } pvc = append(pvc, *claim) case (*v1.Service): + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "waitForResources", + "namespace": value.Namespace, + "name": value.Name, + }).Debug("Found Service. Updating info from API") svc, err := client.Services(value.Namespace).Get(value.Name) if err != nil { return false, err @@ -635,6 +969,13 @@ func (c *Client) waitForResources(timeout time.Duration, created Result) error { services = append(services, *svc) } } + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "waitForResources", + "num-pods": len(pods), + "num-services": len(services), + "num-volumes": len(pvc), + }).Debug("Checking resources for ready state") return podsReady(pods) && servicesReady(services) && volumesReady(pvc), nil }) } @@ -655,8 +996,14 @@ func waitForJob(e watch.Event, name string) (bool, error) { return true, fmt.Errorf("Job failed: %s", c.Reason) } } - - log.Printf("%s: Jobs active: %d, jobs failed: %d, jobs succeeded: %d", name, o.Status.Active, o.Status.Failed, o.Status.Succeeded) + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "waitForJob", + "name": name, + "active": o.Status.Active, + "failed": o.Status.Failed, + "succeeded": o.Status.Succeeded, + }).Info("Waiting on job completion") return false, nil } @@ -676,6 +1023,11 @@ func scrubValidationError(err error) error { // WaitAndGetCompletedPodPhase waits up to a timeout until a pod enters a completed phase // and returns said phase (PodSucceeded or PodFailed qualify) func (c *Client) WaitAndGetCompletedPodPhase(namespace string, reader io.Reader, timeout time.Duration) (api.PodPhase, error) { + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "WaitAndGetCompletedPodPhase", + "namespace": namespace, + }).Debug("Creating pod from manifest") infos, err := c.Build(namespace, reader) if err != nil { return api.PodUnknown, err @@ -704,8 +1056,13 @@ func watchPodUntilComplete(timeout time.Duration, info *resource.Info) error { if err != nil { return err } - - log.Printf("Watching pod %s for completion with timeout of %v", info.Name, timeout) + logger.WithFields(log.Fields{ + "_module": "client", + "_context": "watchPodUntilComplete", + "namespace": info.Namespace, + "name": info.Name, + "timeout": timeout, + }).Info("Watching pod for completion") _, err = watch.Until(timeout, w, func(e watch.Event) (bool, error) { return conditions.PodCompleted(e) }) diff --git a/pkg/kube/log.go b/pkg/kube/log.go index fbe51823a..01ee78310 100644 --- a/pkg/kube/log.go +++ b/pkg/kube/log.go @@ -20,11 +20,18 @@ import ( "flag" "fmt" "os" + + log "github.com/Sirupsen/logrus" ) +var logger *log.Entry + func init() { if level := os.Getenv("KUBE_LOG_LEVEL"); level != "" { flag.Set("vmodule", fmt.Sprintf("loader=%s,round_trippers=%s,request=%s", level, level, level)) flag.Set("logtostderr", "true") } + logger = log.WithFields(log.Fields{ + "_package": "kube", + }) } diff --git a/pkg/kube/namespace.go b/pkg/kube/namespace.go index 115c1139c..7e23d95a3 100644 --- a/pkg/kube/namespace.go +++ b/pkg/kube/namespace.go @@ -17,12 +17,18 @@ limitations under the License. package kube // import "k8s.io/helm/pkg/kube" import ( + log "github.com/Sirupsen/logrus" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" ) func createNamespace(client internalclientset.Interface, namespace string) error { + logger.WithFields(log.Fields{ + "_module": "namespace", + "_context": "createNamespace", + "namespace": namespace, + }).Debug("Creating new namespace") ns := &api.Namespace{ ObjectMeta: api.ObjectMeta{ Name: namespace, @@ -37,8 +43,18 @@ func getNamespace(client internalclientset.Interface, namespace string) (*api.Na } func ensureNamespace(client internalclientset.Interface, namespace string) error { + logger.WithFields(log.Fields{ + "_module": "namespace", + "_context": "ensureNamespace", + "namespace": namespace, + }).Debug("Ensuring that namespace exists") _, err := getNamespace(client, namespace) if err != nil && errors.IsNotFound(err) { + logger.WithFields(log.Fields{ + "_module": "namespace", + "_context": "ensureNamespace", + "namespace": namespace, + }).Debug("Namespace does not exist, creating") return createNamespace(client, namespace) } return err diff --git a/pkg/logutil/log.go b/pkg/logutil/log.go new file mode 100644 index 000000000..9dda916c8 --- /dev/null +++ b/pkg/logutil/log.go @@ -0,0 +1,52 @@ +/* +Copyright 2017 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logutil + +import ( + "fmt" + "strings" + + log "github.com/Sirupsen/logrus" +) + +// LogLevels is the mapping for +var LogLevels = map[string]log.Level{ + "ERROR": log.ErrorLevel, + "INFO": log.InfoLevel, + "DEBUG": log.DebugLevel, +} + +// GetLevel returns the proper logrus level for the given string level. +// It returns an error if the level does not exist +func GetLevel(level string) (log.Level, error) { + var logLevel log.Level + var ok bool + if logLevel, ok = LogLevels[level]; !ok { + return 0, fmt.Errorf("Invalid log level %s given. Must be one of: %s", level, strings.Join(GetLogLevels(), ", ")) + } + return logLevel, nil + +} + +// GetLogLevels returns a list of allowed log levels. Helpful for error messages. +func GetLogLevels() []string { + var levels []string + for k := range LogLevels { + levels = append(levels, k) + } + return levels +} diff --git a/pkg/logutil/log_test.go b/pkg/logutil/log_test.go new file mode 100644 index 000000000..ca98835d6 --- /dev/null +++ b/pkg/logutil/log_test.go @@ -0,0 +1,35 @@ +/* +Copyright 2017 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logutil + +import ( + "testing" +) + +func TestValidGetLevel(t *testing.T) { + _, err := GetLevel("ERROR") + if err != nil { + t.Errorf("Should have gotten valid log level. Got error instead: %v", err) + } +} + +func TestInvalidGetLevel(t *testing.T) { + logLevel, err := GetLevel("BLAH") + if err == nil { + t.Errorf("Should have gotten error. Got log level instead: %v", logLevel) + } +}