Merge pull request #1039 from vaikas-google/master

First cut of adding dynamic cluster support to status command
pull/1044/head
Ville Aikas 8 years ago committed by GitHub
commit c2cdb97ba2

@ -22,20 +22,22 @@ option go_package = "release";
// Status defines the status of a release.
message Status {
enum Code {
// Status_UNKNOWN indicates that a release is in an uncertain state.
UNKNOWN = 0;
// Status_DEPLOYED indicates that the release has been pushed to Kubernetes.
DEPLOYED = 1;
// Status_DELETED indicates that a release has been deleted from Kubermetes.
DELETED = 2;
// Status_SUPERSEDED indicates that this release object is outdated and a newer one exists.
SUPERSEDED = 3;
// Status_FAILED indicates that the release was not successfully deployed.
FAILED = 4;
}
enum Code {
// Status_UNKNOWN indicates that a release is in an uncertain state.
UNKNOWN = 0;
// Status_DEPLOYED indicates that the release has been pushed to Kubernetes.
DEPLOYED = 1;
// Status_DELETED indicates that a release has been deleted from Kubermetes.
DELETED = 2;
// Status_SUPERSEDED indicates that this release object is outdated and a newer one exists.
SUPERSEDED = 3;
// Status_FAILED indicates that the release was not successfully deployed.
FAILED = 4;
}
Code code = 1;
Code code = 1;
google.protobuf.Any details = 2;
google.protobuf.Any details = 2;
// Cluster resources as kubectl would print them.
string resources = 3;
}

@ -68,6 +68,7 @@ func (s *statusCmd) run() error {
fmt.Fprintf(s.out, "Last Deployed: %s\n", timeconv.String(res.Info.LastDeployed))
fmt.Fprintf(s.out, "Status: %s\n", res.Info.Status.Code)
fmt.Fprintf(s.out, "Resources:\n%s\n", res.Info.Status.Resources)
if res.Info.Status.Details != nil {
fmt.Fprintf(s.out, "Details: %s\n", res.Info.Status.Details)
}

@ -103,6 +103,15 @@ type KubeClient interface {
// by "\n---\n").
Create(namespace string, reader io.Reader) error
// Get gets one or more resources. Returned string hsa the format like kubectl
// provides with the column headers separating the resource types.
//
// namespace must contain a valid existing namespace.
//
// reader must contain a YAML stream (one or more YAML documents separated
// by "\n---\n").
Get(namespace string, reader io.Reader) (string, error)
// Delete destroys one or more resources.
//
// namespace must contain a valid existing namespace.
@ -140,6 +149,12 @@ func (p *PrintingKubeClient) Create(ns string, r io.Reader) error {
return err
}
// Get prints the values of what would be created with a real KubeClient.
func (p *PrintingKubeClient) Get(ns string, r io.Reader) (string, error) {
_, err := io.Copy(p.Out, r)
return "", err
}
// Delete implements KubeClient delete.
//
// It only prints out the content to be deleted.

@ -84,6 +84,9 @@ type mockKubeClient struct {
func (k *mockKubeClient) Create(ns string, r io.Reader) error {
return nil
}
func (k *mockKubeClient) Get(ns string, r io.Reader) (string, error) {
return "", nil
}
func (k *mockKubeClient) Delete(ns string, r io.Reader) error {
return nil
}

@ -158,6 +158,17 @@ func (s *releaseServer) GetReleaseStatus(c ctx.Context, req *services.GetRelease
if rel.Info == nil {
return nil, errors.New("release info is missing")
}
// Ok, we got the status of the release as we had jotted down, now we need to match the
// manifest we stashed away with reality from the cluster.
kubeCli := s.env.KubeClient
resp, err := kubeCli.Get(rel.Namespace, bytes.NewBufferString(rel.Manifest))
if err != nil {
log.Printf("warning: Get for %s failed: %v", rel.Name, err)
return nil, err
}
rel.Info.Status.Resources = resp
return &services.GetReleaseStatusResponse{Info: rel.Info}, nil
}

@ -17,6 +17,7 @@ limitations under the License.
package kube // import "k8s.io/helm/pkg/kube"
import (
"bytes"
"fmt"
"io"
"log"
@ -50,7 +51,7 @@ func New(config clientcmd.ClientConfig) *Client {
}
}
// ResourceActorFunc performs an action on a signle resource.
// ResourceActorFunc performs an action on a single resource.
type ResourceActorFunc func(*resource.Info) error
// Create creates kubernetes resources from an io.reader
@ -63,6 +64,59 @@ func (c *Client) Create(namespace string, reader io.Reader) error {
return perform(c, namespace, reader, createResource)
}
// Get gets kubernetes resources as pretty printed string
//
// Namespace will set the namespace
func (c *Client) Get(namespace string, reader io.Reader) (string, error) {
// Since we don't know what order the objects come in, let's group them by the types, so
// that when we print them, they come looking good (headers apply to subgroups, etc.)
objs := make(map[string][]runtime.Object)
err := perform(c, namespace, reader, func(info *resource.Info) error {
log.Printf("Doing get for: '%s'", info.Name)
obj, err := resource.NewHelper(info.Client, info.Mapping).Get(info.Namespace, info.Name, info.Export)
if err != nil {
return err
}
// We need to grab the ObjectReference so we can correctly group the objects.
or, err := api.GetReference(obj)
if err != nil {
log.Printf("FAILED GetReference for: %#v\n%v", obj, err)
return err
}
// Use APIVersion/Kind as grouping mechanism. I'm not sure if you can have multiple
// versions per cluster, but this certainly won't hurt anything, so let's be safe.
objType := or.APIVersion + "/" + or.Kind
objs[objType] = append(objs[objType], obj)
return nil
})
// Ok, now we have all the objects grouped by types (say, by v1/Pod, v1/Service, etc.), so
// spin through them and print them. Printer is cool since it prints the header only when
// an object type changes, so we can just rely on that. Problem is it doesn't seem to keep
// track of tab widths
buf := new(bytes.Buffer)
p := kubectl.NewHumanReadablePrinter(false, false, false, false, false, false, []string{})
for t, ot := range objs {
_, err = buf.WriteString("==> " + t + "\n")
if err != nil {
return "", err
}
for _, o := range ot {
err = p.PrintObj(o, buf)
if err != nil {
log.Printf("failed to print object type '%s', object: '%s' :\n %v", t, o, err)
return "", err
}
}
_, err := buf.WriteString("\n")
if err != nil {
return "", err
}
}
return buf.String(), err
}
// Update reads in the current configuration and a modified configuration from io.reader
// and creates resources that don't already exists, updates resources that have been modified
// and deletes resources from the current configuration that are not present in the

@ -53,6 +53,8 @@ func (Status_Code) EnumDescriptor() ([]byte, []int) { return fileDescriptor3, []
type Status struct {
Code Status_Code `protobuf:"varint,1,opt,name=code,enum=hapi.release.Status_Code" json:"code,omitempty"`
Details *google_protobuf1.Any `protobuf:"bytes,2,opt,name=details" json:"details,omitempty"`
// Cluster resources as kubectl would print them.
Resources string `protobuf:"bytes,3,opt,name=resources" json:"resources,omitempty"`
}
func (m *Status) Reset() { *m = Status{} }
@ -73,20 +75,21 @@ func init() {
}
var fileDescriptor3 = []byte{
// 226 bytes of a gzipped FileDescriptorProto
// 247 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x92, 0xcc, 0x48, 0x2c, 0xc8,
0xd4, 0x2f, 0x4a, 0xcd, 0x49, 0x4d, 0x2c, 0x4e, 0xd5, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6,
0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x01, 0x49, 0xe9, 0x41, 0xa5, 0xa4, 0x24, 0xd3, 0xf3,
0xf3, 0xd3, 0x73, 0x52, 0xf5, 0xc1, 0x72, 0x49, 0xa5, 0x69, 0xfa, 0x89, 0x79, 0x95, 0x10, 0x85,
0x4a, 0x9b, 0x19, 0xb9, 0xd8, 0x82, 0xc1, 0x3a, 0x85, 0x74, 0xb9, 0x58, 0x92, 0xf3, 0x53, 0x52,
0x4a, 0x17, 0x19, 0xb9, 0xd8, 0x82, 0xc1, 0x3a, 0x85, 0x74, 0xb9, 0x58, 0x92, 0xf3, 0x53, 0x52,
0x25, 0x18, 0x15, 0x18, 0x35, 0xf8, 0x8c, 0x24, 0xf5, 0x90, 0x8d, 0xd0, 0x83, 0xa8, 0xd1, 0x73,
0x06, 0x2a, 0x08, 0x02, 0x2b, 0x13, 0xd2, 0xe3, 0x62, 0x4f, 0x49, 0x2d, 0x49, 0xcc, 0xcc, 0x29,
0x96, 0x60, 0x02, 0xea, 0xe0, 0x36, 0x12, 0xd1, 0x83, 0x58, 0xa3, 0x07, 0xb3, 0x46, 0xcf, 0x31,
0xaf, 0x32, 0x08, 0xa6, 0x48, 0xc9, 0x8b, 0x8b, 0x05, 0xa4, 0x5b, 0x88, 0x9b, 0x8b, 0x3d, 0xd4,
0xcf, 0xdb, 0xcf, 0x3f, 0xdc, 0x4f, 0x80, 0x41, 0x88, 0x87, 0x8b, 0xc3, 0xc5, 0x35, 0xc0, 0xc7,
0x3f, 0xd2, 0xd5, 0x45, 0x80, 0x11, 0x24, 0xe5, 0xe2, 0xea, 0xe3, 0x1a, 0x02, 0xe4, 0x30, 0x09,
0xf1, 0x71, 0x71, 0x05, 0x87, 0x06, 0xb8, 0x06, 0x05, 0xbb, 0xba, 0x00, 0xf9, 0xcc, 0x42, 0x5c,
0x5c, 0x6c, 0x6e, 0x8e, 0x9e, 0x3e, 0x40, 0x36, 0x8b, 0x13, 0x67, 0x14, 0x3b, 0xd4, 0x61, 0x49,
0x6c, 0x60, 0xdb, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x8c, 0x99, 0x9a, 0x3b, 0x0d, 0x01,
0x00, 0x00,
0xaf, 0x32, 0x08, 0xa6, 0x48, 0x48, 0x86, 0x8b, 0xb3, 0x28, 0xb5, 0x38, 0xbf, 0xb4, 0x28, 0x39,
0xb5, 0x58, 0x82, 0x19, 0xa8, 0x83, 0x33, 0x08, 0x21, 0xa0, 0xe4, 0xc5, 0xc5, 0x02, 0x32, 0x5b,
0x88, 0x9b, 0x8b, 0x3d, 0xd4, 0xcf, 0xdb, 0xcf, 0x3f, 0xdc, 0x4f, 0x80, 0x41, 0x88, 0x87, 0x8b,
0xc3, 0xc5, 0x35, 0xc0, 0xc7, 0x3f, 0xd2, 0xd5, 0x45, 0x80, 0x11, 0x24, 0xe5, 0xe2, 0xea, 0xe3,
0x1a, 0x02, 0xe4, 0x30, 0x09, 0xf1, 0x71, 0x71, 0x05, 0x87, 0x06, 0xb8, 0x06, 0x05, 0xbb, 0xba,
0x00, 0xf9, 0xcc, 0x42, 0x5c, 0x5c, 0x6c, 0x6e, 0x8e, 0x9e, 0x3e, 0x40, 0x36, 0x8b, 0x13, 0x67,
0x14, 0x3b, 0xd4, 0xd9, 0x49, 0x6c, 0x60, 0xb7, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xd1,
0xc3, 0xbf, 0x50, 0x2b, 0x01, 0x00, 0x00,
}

Loading…
Cancel
Save