pull/2411/merge
Y.W 9 years ago committed by GitHub
commit 8c993d40c2

@ -14,6 +14,7 @@ Use Helm to...
- Intelligently manage your Kubernetes manifest files
- Manage releases of Helm packages
## Helm in a Handbasket
Helm is a tool that streamlines installing and managing Kubernetes applications.

@ -154,6 +154,10 @@ message GetReleaseStatusRequest {
string name = 1;
// Version is the version of the release
int32 version = 2;
// Kind is the kind of the resource to display
string kind = 3;
// Instance is the instance of the resource to display
string instance = 4;
}
// GetReleaseStatusResponse is the response indicating the status of the named release.

@ -47,7 +47,9 @@ type statusCmd struct {
release string
out io.Writer
client helm.Interface
version int32
kind string
instance string
version int32
}
func newStatusCmd(client helm.Interface, out io.Writer) *cobra.Command {
@ -73,13 +75,18 @@ func newStatusCmd(client helm.Interface, out io.Writer) *cobra.Command {
},
}
cmd.PersistentFlags().StringVar(&status.instance, "instance","","if set, display status of an instance in assigned kind.")
cmd.PersistentFlags().StringVar(&status.kind, "kind","","if set, display status of an assigned kind resource")
cmd.PersistentFlags().Int32Var(&status.version, "revision", 0, "if set, display the status of the named release with revision")
return cmd
}
func (s *statusCmd) run() error {
res, err := s.client.ReleaseStatus(s.release, helm.StatusReleaseVersion(s.version))
fmt.Printf("Kind=%s,instnace=%s",s.kind,s.instance)
res, err := s.client.ReleaseStatus(s.release, helm.StatusReleaseVersion(s.version,s.kind,s.instance))
if err != nil {
return prettyError(err)
}

@ -128,8 +128,9 @@ func (r *ReleaseModuleServiceServer) UpgradeRelease(ctx context.Context, in *rud
func (r *ReleaseModuleServiceServer) ReleaseStatus(ctx context.Context, in *rudderAPI.ReleaseStatusRequest) (*rudderAPI.ReleaseStatusResponse, error) {
grpclog.Print("status")
var filter kube.FilterStruct = kube.FilterStruct{Kind:"",Instance:""}
resp, err := kubeClient.Get(in.Release.Namespace, bytes.NewBufferString(in.Release.Manifest))
resp, err := kubeClient.Get(in.Release.Namespace, bytes.NewBufferString(in.Release.Manifest),filter)
in.Release.Info.Status.Resources = resp
return &rudderAPI.ReleaseStatusResponse{
Release: in.Release,

@ -23,6 +23,8 @@ helm status [flags] RELEASE_NAME
### Options
```
--instance string if set, display status of an instance in assigned kind.
--kind string if set, display status of an assigned kind resource
--revision int32 if set, display the status of the named release with revision
--tls enable TLS for request
--tls-ca-cert string path to TLS CA certificate file (default "$HELM_HOME/ca.pem")
@ -44,4 +46,4 @@ helm status [flags] RELEASE_NAME
### SEE ALSO
* [helm](helm.md) - The Helm package manager for Kubernetes.
###### Auto generated by spf13/cobra on 16-Apr-2017
###### Auto generated by spf13/cobra on 9-May-2017

@ -1,6 +1,4 @@
.TH "HELM" "1" "Apr 2017" "Auto generated by spf13/cobra" ""
.nh
.ad l
.TH "HELM" "1" "May 2017" "Auto generated by spf13/cobra" "" ""
.SH NAME
@ -26,6 +24,14 @@ The status consists of:
.SH OPTIONS
.PP
\fB\-\-instance\fP=""
if set, display status of an instance in assigned kind.
.PP
\fB\-\-kind\fP=""
if set, display status of an assigned kind resource
.PP
\fB\-\-revision\fP=0
if set, display the status of the named release with revision
@ -80,4 +86,4 @@ The status consists of:
.SH HISTORY
.PP
16\-Apr\-2017 Auto generated by spf13/cobra
9\-May\-2017 Auto generated by spf13/cobra

@ -285,7 +285,7 @@ func TestReleaseStatus_VerifyOptions(t *testing.T) {
return errSkip
})
if _, err := NewClient(b4c).ReleaseStatus(releaseName, StatusReleaseVersion(revision)); err != errSkip {
if _, err := NewClient(b4c).ReleaseStatus(releaseName, StatusReleaseVersion(revision,"","")); err != errSkip {
t.Fatalf("did not expect error but got (%v)\n``", err)
}
}

@ -371,9 +371,11 @@ type StatusOption func(*options)
// StatusReleaseVersion will instruct Tiller to retrieve the status
// of a particular version of a release.
func StatusReleaseVersion(version int32) StatusOption {
func StatusReleaseVersion(version int32,kind string,instance string) StatusOption {
return func(opts *options) {
opts.statusReq.Version = version
opts.statusReq.Kind = kind
opts.statusReq.Instance = instance
}
}

@ -145,32 +145,65 @@ func (c *Client) Build(namespace string, reader io.Reader) (Result, error) {
// Get gets kubernetes resources as pretty printed string
//
// Namespace will set the namespace
func (c *Client) Get(namespace string, reader io.Reader) (string, error) {
func (c *Client) Get(namespace string, reader io.Reader, filter FilterStruct) (string, error) {
// Since we don't know what order the objects come in, let's group them by the types, so
// that when we print them, they come looking good (headers apply to subgroups, etc.)
objs := make(map[string][]runtime.Object)
infos, err := c.BuildUnstructured(namespace, reader)
infoAllKinds, err := c.BuildUnstructured(namespace, reader)
if err != nil {
return "", err
}
infos := infoAllKinds.Filter(filter.Filter)
var objPods map[string][]api.Pod
if (!filter.IsFilter()) || filter.IsPodType() {
objPods = c.getRelationPods(infoAllKinds)
}
log.Printf("objPods %+v\n", objPods)
missing := []string{}
err = perform(infos, func(info *resource.Info) error {
log.Printf("Doing get for %s: %q", info.Mapping.GroupVersionKind.Kind, info.Name)
if err := info.Get(); err != nil {
log.Printf("WARNING: Failed Get for resource %q: %s", info.Name, err)
missing = append(missing, fmt.Sprintf("%v\t\t%s", info.Mapping.Resource, info.Name))
if len(infos) !=0 {
err = perform(infos, func(info *resource.Info) error {
if err := info.Get(); err != nil {
log.Printf("WARNING: Failed Get for resource %q: %s", info.Name, err)
missing = append(missing, fmt.Sprintf("%v\t\t%s", info.Mapping.Resource, info.Name))
return nil
}
// Use APIVersion/Kind as grouping mechanism. I'm not sure if you can have multiple
// versions per cluster, but this certainly won't hurt anything, so let's be safe.
gvk := info.ResourceMapping().GroupVersionKind
vk := gvk.Version + "/" + gvk.Kind
if gvk.Kind != "Pod" {
objs[vk] = append(objs[vk], info.Object)
} else {
if !IsFoundPodInfo(objPods[vk], info) {
objs[vk] = append(objs[vk], info.Object)
}
}
return nil
})
if err != nil {
return "", err
}
}else{
if !filter.IsFilter(){
return "",ErrNoObjectsVisited
}else if (!filter.IsPodType()) || (len(objPods) == 0) {
return "",nil
}
}
// Use APIVersion/Kind as grouping mechanism. I'm not sure if you can have multiple
// versions per cluster, but this certainly won't hurt anything, so let's be safe.
gvk := info.ResourceMapping().GroupVersionKind
vk := gvk.Version + "/" + gvk.Kind
objs[vk] = append(objs[vk], info.Object)
return nil
})
if err != nil {
return "", err
//here, we will add the objPods to the objs
for key,podItems := range objPods{
for _,pod := range podItems {
objs[key] = append(objs[key],&pod)
}
}
// Ok, now we have all the objects grouped by types (say, by v1/Pod, v1/Service, etc.), so
@ -595,3 +628,89 @@ func watchPodUntilComplete(timeout time.Duration, info *resource.Info) error {
return err
}
func (c *Client) getRelationPods(infos []*resource.Info)(map[string][]api.Pod){
var objPods = make(map[string][]api.Pod)
for _, value := range infos {
objPods , _ = c.getSelectRelationPod(value,objPods)
}
return objPods
}
func (c *Client) getSelectRelationPod(info *resource.Info,objPods map[string][]api.Pod)(map[string][]api.Pod,error){
log.Printf("1222getSelectRelationPod Info: %+v",info)
log.Printf("222222getSelectRelationPod object: %+v",info.Object)
err := info.Get()
if err != nil {
return objPods, err
}
log.Printf("getSelectRelationPod object: %+v",info.Object)
versioned, err := c.AsVersionedObject(info.Object)
if runtime.IsNotRegisteredError(err) {
return objPods,nil
}
if err != nil {
return objPods,err
}
selector, err := getSelectorFromObject(versioned)
log.Printf("getSelectRelationPod selector: %+v",selector)
if err != nil {
return objPods,err
}
client, _ := c.ClientSet()
pods, err := client.Core().Pods(info.Namespace).List(metav1.ListOptions{
FieldSelector: fields.Everything().String(),
LabelSelector: labels.Set(selector).AsSelector().String(),
})
if err != nil {
return objPods,err
}
for _, pod := range pods.Items {
log.Printf("get select relation pod: %v/%v", pod.Namespace, pod.Name)
if pod.APIVersion == "" {
pod.APIVersion = "v1"
}
if pod.Kind == "" {
pod.Kind = "Pod"
}
vk := pod.GroupVersionKind().Version + "/" + pod.GroupVersionKind().Kind
if !IsFoundPod(objPods[vk], pod) {
objPods[vk] = append(objPods[vk], pod)
}
}
return objPods,nil
}
func IsFoundPod(podItem []api.Pod,pod api.Pod) bool {
for _,value := range podItem {
if (value.Namespace == pod.Namespace) && (value.Name==pod.Name) {
return true
}
}
return false
}
func IsFoundPodInfo(podItem []api.Pod,podInfo *resource.Info) bool {
for _, value := range podItem {
if (value.Namespace == podInfo.Namespace) && (value.Name == podInfo.Name) {
return true
}
}
return false
}

@ -323,8 +323,9 @@ func TestGet(t *testing.T) {
c := &Client{Factory: f}
// Test Success
var filter FilterStruct
data := strings.NewReader("kind: Pod\napiVersion: v1\nmetadata:\n name: otter")
o, err := c.Get("default", data)
o, err := c.Get("default", data,filter)
if err != nil {
t.Errorf("Expected missing results, got %q", err)
}
@ -334,7 +335,7 @@ func TestGet(t *testing.T) {
// Test failure
data = strings.NewReader("kind: Pod\napiVersion: v1\nmetadata:\n name: starfish")
o, err = c.Get("default", data)
o, err = c.Get("default", data,filter)
if err != nil {
t.Errorf("Expected missing results, got %q", err)
}

@ -21,6 +21,35 @@ import "k8s.io/kubernetes/pkg/kubectl/resource"
// Result provides convenience methods for comparing collections of Infos.
type Result []*resource.Info
type FilterStruct struct {
Kind string
Instance string
}
func (fs *FilterStruct) Filter (info *resource.Info) bool{
if info != nil {
if (fs.Kind == "") || (fs.Kind == info.Mapping.GroupVersionKind.Kind) {
if (fs.Instance == "") || (fs.Instance == info.Name) {
return true
}
}
}
return false
}
func (fs *FilterStruct) IsFilter() bool{
if (fs.Kind == "") && (fs.Instance == "") {
return false
}
return true
}
func (fs *FilterStruct) IsPodType() bool {
if (fs.Kind == "Pod") {
return true
}
return false
}
// Append adds an Info to the Result.
func (r *Result) Append(val *resource.Info) {
*r = append(*r, val)

@ -248,6 +248,10 @@ type GetReleaseStatusRequest struct {
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
// Version is the version of the release
Version int32 `protobuf:"varint,2,opt,name=version" json:"version,omitempty"`
// Kind is the kind of the resource to display
Kind string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"`
// Instance is the instance of the resource to display
Instance string `protobuf:"bytes,4,opt,name=instance" json:"instance,omitempty"`
}
func (m *GetReleaseStatusRequest) Reset() { *m = GetReleaseStatusRequest{} }

@ -23,6 +23,7 @@ import (
"os"
"testing"
"k8s.io/helm/pkg/kube"
"k8s.io/helm/pkg/proto/hapi/release"
tillerEnv "k8s.io/helm/pkg/tiller/environment"
)
@ -68,8 +69,9 @@ func TestDeleteTestPods(t *testing.T) {
t.Errorf("Expected 0 errors, got at least one: %v", stream.messages)
}
var filter kube.FilterStruct
for _, testManifest := range mockTestSuite.TestManifests {
if _, err := mockTestEnv.KubeClient.Get(mockTestEnv.Namespace, bytes.NewBufferString(testManifest)); err == nil {
if _, err := mockTestEnv.KubeClient.Get(mockTestEnv.Namespace, bytes.NewBufferString(testManifest),filter); err == nil {
t.Error("Expected error, got nil")
}
}
@ -122,7 +124,7 @@ func newGetFailingKubeClient() *getFailingKubeClient {
}
}
func (p *getFailingKubeClient) Get(ns string, r io.Reader) (string, error) {
func (p *getFailingKubeClient) Get(ns string, r io.Reader,filter kube.FilterStruct) (string, error) {
return "", errors.New("In the end, they did not find Nemo.")
}

@ -23,6 +23,7 @@ import (
"google.golang.org/grpc"
rudderAPI "k8s.io/helm/pkg/proto/hapi/rudder"
)
// GrpcPort specifies port on which rudder will spawn a server

@ -123,7 +123,7 @@ type KubeClient interface {
//
// reader must contain a YAML stream (one or more YAML documents separated
// by "\n---\n").
Get(namespace string, reader io.Reader) (string, error)
Get(namespace string, reader io.Reader, filter kube.FilterStruct) (string, error)
// Delete destroys one or more resources.
//
@ -170,7 +170,7 @@ func (p *PrintingKubeClient) Create(ns string, r io.Reader, timeout int64, shoul
}
// Get prints the values of what would be created with a real KubeClient.
func (p *PrintingKubeClient) Get(ns string, r io.Reader) (string, error) {
func (p *PrintingKubeClient) Get(ns string, r io.Reader,filter kube.FilterStruct) (string, error) {
_, err := io.Copy(p.Out, r)
return "", err
}

@ -42,7 +42,7 @@ type mockKubeClient struct{}
func (k *mockKubeClient) Create(ns string, r io.Reader, timeout int64, shouldWait bool) error {
return nil
}
func (k *mockKubeClient) Get(ns string, r io.Reader) (string, error) {
func (k *mockKubeClient) Get(ns string, r io.Reader,filter kube.FilterStruct) (string, error) {
return "", nil
}
func (k *mockKubeClient) Delete(ns string, r io.Reader) error {

@ -71,7 +71,9 @@ func (m *LocalReleaseModule) Rollback(current, target *release.Release, req *ser
// Status returns kubectl-like formatted status of release objects
func (m *LocalReleaseModule) Status(r *release.Release, req *services.GetReleaseStatusRequest, env *environment.Environment) (string, error) {
return env.KubeClient.Get(r.Namespace, bytes.NewBufferString(r.Manifest))
var filter kube.FilterStruct = kube.FilterStruct{Kind:req.Kind,Instance:req.Instance}
return env.KubeClient.Get(r.Namespace, bytes.NewBufferString(r.Manifest),filter)
}
// Delete deletes the release and returns manifests that were kept in the deletion process

@ -264,6 +264,7 @@ func (s *ReleaseServer) GetReleaseStatus(c ctx.Context, req *services.GetRelease
// Ok, we got the status of the release as we had jotted down, now we need to match the
// manifest we stashed away with reality from the cluster.
resp, err := s.ReleaseModule.Status(rel, req, s.env)
if sc == release.Status_DELETED || sc == release.Status_FAILED {
// Skip errors if this is already deleted or failed.
return statusResp, nil

@ -1367,6 +1367,8 @@ _helm_status()
flags_with_completion=()
flags_completion=()
flags+=("--instance=")
flags+=("--kind=")
flags+=("--revision=")
flags+=("--tls")
local_nonpersistent_flags+=("--tls")

Loading…
Cancel
Save