pull/606/merge
Adam Reese 10 years ago
commit 86415153ea

@ -15,6 +15,8 @@
GO_DIRS ?= $(shell glide nv -x ) GO_DIRS ?= $(shell glide nv -x )
GO_PKGS ?= $(shell glide nv) GO_PKGS ?= $(shell glide nv)
BIN_DIR := bin
PATH_WITH_BIN = PATH="$(shell pwd)/$(BIN_DIR):$(PATH)"
ROOTFS := rootfs ROOTFS := rootfs
CLIENT := cmd/helm CLIENT := cmd/helm
@ -86,6 +88,18 @@ test-flake8:
test-style: test-style:
@scripts/validate-go.sh @scripts/validate-go.sh
.PHONY: test-e2e
test-e2e: container local-cluster-up
$(PATH_WITH_BIN) go test -tags=e2e ./test/e2e -v --manager-image=${DOCKER_REGISTRY}/manager:${TAG} --resourcifier-image=${DOCKER_REGISTRY}/resourcifier:${TAG} --expandybird-image=${DOCKER_REGISTRY}/expandybird:${TAG}
.PHONY: local-cluster-up
local-cluster-up:
@scripts/kube-up.sh
.PHONY: local-cluster-down
local-cluster-down:
@scripts/kube-down.sh
HAS_GLIDE := $(shell command -v glide;) HAS_GLIDE := $(shell command -v glide;)
HAS_GOLINT := $(shell command -v golint;) HAS_GOLINT := $(shell command -v golint;)
HAS_GOVET := $(shell command -v go tool vet;) HAS_GOVET := $(shell command -v go tool vet;)

@ -1,4 +1,10 @@
machine: machine:
pre:
- curl -sSL https://s3.amazonaws.com/circle-downloads/install-circleci-docker.sh | bash -s -- 1.10.3
services:
- docker
environment: environment:
GLIDE_VERSION: "0.10.1" GLIDE_VERSION: "0.10.1"
GO15VENDOREXPERIMENT: 1 GO15VENDOREXPERIMENT: 1
@ -15,6 +21,7 @@ dependencies:
- tar -C $HOME -xzf go1.6.linux-amd64.tar.gz - tar -C $HOME -xzf go1.6.linux-amd64.tar.gz
- go version - go version
- go env - go env
- docker info
- sudo chown -R $(whoami):staff /usr/local - sudo chown -R $(whoami):staff /usr/local
- cd $GOPATH - cd $GOPATH
- mkdir -p $GOPATH/src/$IMPORT_PATH - mkdir -p $GOPATH/src/$IMPORT_PATH
@ -30,4 +37,4 @@ dependencies:
test: test:
override: override:
- cd $GOPATH/src/$IMPORT_PATH && make bootstrap test - cd $GOPATH/src/$IMPORT_PATH && make info bootstrap test test-e2e DOCKER_REGISTRY=e2e

@ -61,7 +61,7 @@ else
endif endif
.PHONY: container .PHONY: container
container: .project .docker binary extras container: .project binary extras
docker build -t $(FULL_IMAGE):$(TAG) -f Dockerfile . docker build -t $(FULL_IMAGE):$(TAG) -f Dockerfile .
docker tag -f $(FULL_IMAGE):$(TAG) $(FULL_IMAGE):latest docker tag -f $(FULL_IMAGE):$(TAG) $(FULL_IMAGE):latest

@ -44,11 +44,3 @@ delete_container() {
docker wait "${container[@]}" &>/dev/null || : docker wait "${container[@]}" &>/dev/null || :
docker rm --force --volumes "${container[@]}" &>/dev/null || : docker rm --force --volumes "${container[@]}" &>/dev/null || :
} }
dev_registry() {
if docker inspect registry >/dev/null 2>&1; then
docker start registry
else
docker run --restart="always" -d -p 5000:5000 --name registry registry:2
fi
}

@ -0,0 +1,25 @@
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o pipefail
[[ "$TRACE" ]] && set -x
HELM_ROOT="${BASH_SOURCE[0]%/*}/.."
cd "$HELM_ROOT"
go test -v ./test/e2e -tags e2e
exit 0

@ -21,10 +21,12 @@ set -eo pipefail
[[ "$TRACE" ]] && set -x [[ "$TRACE" ]] && set -x
HELM_ROOT="${BASH_SOURCE[0]%/*}/.." HELM_ROOT="${BASH_SOURCE[0]%/*}/.."
source "${HELM_ROOT}/scripts/common.sh" cd "$HELM_ROOT"
source "${HELM_ROOT}/scripts/docker.sh"
K8S_VERSION=${K8S_VERSION:-1.2.1} source ./scripts/common.sh
source ./scripts/docker.sh
KUBE_VERSION=${KUBE_VERSION:-1.2.1}
KUBE_PORT=${KUBE_PORT:-8080} KUBE_PORT=${KUBE_PORT:-8080}
KUBE_MASTER_IP=${KUBE_MASTER_IP:-$DOCKER_HOST_IP} KUBE_MASTER_IP=${KUBE_MASTER_IP:-$DOCKER_HOST_IP}
KUBE_MASTER_IP=${KUBE_MASTER_IP:-localhost} KUBE_MASTER_IP=${KUBE_MASTER_IP:-localhost}
@ -100,7 +102,7 @@ start_kubernetes() {
--pid=host \ --pid=host \
--privileged=true \ --privileged=true \
-d \ -d \
gcr.io/google_containers/hyperkube-amd64:v${K8S_VERSION} \ gcr.io/google_containers/hyperkube-amd64:v${KUBE_VERSION} \
/hyperkube kubelet \ /hyperkube kubelet \
--hostname-override="127.0.0.1" \ --hostname-override="127.0.0.1" \
--address="0.0.0.0" \ --address="0.0.0.0" \
@ -133,13 +135,13 @@ wait_for_kubernetes_master() {
create_kube_system_namespace() { create_kube_system_namespace() {
echo "Creating kube-system namespace..." echo "Creating kube-system namespace..."
$KUBECTL create -f "${HELM_ROOT}/scripts/cluster/kube-system.yaml" >/dev/null $KUBECTL create -f ./scripts/cluster/kube-system.yaml >/dev/null
} }
create_kube_dns() { create_kube_dns() {
echo "Setting up internal dns..." echo "Setting up internal dns..."
$KUBECTL create -f "${HELM_ROOT}/scripts/cluster/skydns.yaml" >/dev/null $KUBECTL create -f ./scripts/cluster/skydns.yaml >/dev/null
} }
# Generate kubeconfig data for the created cluster. # Generate kubeconfig data for the created cluster.
@ -175,6 +177,49 @@ cleanup_volumes() {
fi fi
} }
uname=$(uname)
if [[ "${uname}" == "Darwin" ]]; then
platform="darwin"
elif [[ "${uname}" == "Linux" ]]; then
platform="linux"
else
error_exit "unsupported platform: (${uname})."
fi
machine=$(uname -m)
if [[ "${machine}" == "x86_64" ]]; then
arch="amd64"
elif [[ "${machine}" == "i686" ]]; then
arch="386"
elif [[ "${machine}" == "arm*" ]]; then
arch="arm"
elif [[ "${machine}" == "s390x*" ]]; then
arch="s390x"
else
error_exit "unsupported architecture (${machine})."
fi
download_kubectl() {
echo "Downloading kubectl binary..."
kubectl_url="https://storage.googleapis.com/kubernetes-release/release/v${KUBE_VERSION}/bin/${platform}/${arch}/kubectl"
(
cd ./bin
# cleanup anything old
rm ./kubectl
if [[ $(which wget) ]]; then
wget "${kubectl_url}"
elif [[ $(which curl) ]]; then
curl -OL "${kubectl_url}"
else
error_exit "Couldn't find curl or wget. Bailing out."
fi
chmod a+x kubectl
)
}
main() {
verify_prereqs verify_prereqs
cleanup_volumes cleanup_volumes
@ -182,6 +227,7 @@ if is_docker_machine; then
setup_iptables setup_iptables
fi fi
download_kubectl
start_kubernetes start_kubernetes
wait_for_kubernetes_master wait_for_kubernetes_master
@ -191,5 +237,7 @@ wait_for_kubernetes_cluster
create_kubeconfig create_kubeconfig
$KUBECTL cluster-info $KUBECTL cluster-info
}
main "$@"

@ -0,0 +1,83 @@
// build +e2e
package e2e
import (
"bytes"
"fmt"
"os/exec"
"regexp"
"strings"
"testing"
"time"
)
// Cmd provides helpers for command output
type Cmd struct {
t *testing.T
path string
args []string
ran bool
status error
stdout, stderr bytes.Buffer
}
func (h *Cmd) String() string {
return fmt.Sprintf("%s %s", h.path, strings.Join(h.args, " "))
}
func (h *Cmd) exec() error {
cmd := exec.Command(h.path, h.args...)
h.stdout.Reset()
h.stderr.Reset()
cmd.Stdout = &h.stdout
cmd.Stderr = &h.stderr
h.t.Logf("Executing command: %s", h)
start := time.Now()
h.status = cmd.Run()
h.t.Logf("Finished in %v", time.Since(start))
if h.stdout.Len() > 0 {
h.t.Logf("standard output:\n%s", h.stdout.String())
}
if h.stderr.Len() > 0 {
h.t.Logf("standard error: %s\n", h.stderr.String())
}
h.ran = true
return h.status
}
// Stdout returns standard output of the Cmd run as a string.
func (h *Cmd) Stdout() string {
if !h.ran {
h.t.Fatal("internal testsuite error: stdout called before run")
}
return h.stdout.String()
}
// Stderr returns standard error of the Cmd run as a string.
func (h *Cmd) Stderr() string {
if !h.ran {
h.t.Fatal("internal testsuite error: stdout called before run")
}
return h.stderr.String()
}
func (c *Cmd) Match(exp string) bool {
re := regexp.MustCompile(exp)
return re.MatchString(c.Stdout())
}
func (h *Cmd) StdoutContains(substring string) bool {
return strings.Contains(h.Stdout(), substring)
}
func (h *Cmd) StderrContains(substring string) bool {
return strings.Contains(h.Stderr(), substring)
}
func (h *Cmd) Contains(substring string) bool {
return h.StdoutContains(substring) || h.StderrContains(substring)
}

@ -0,0 +1,75 @@
// build +e2e
package e2e
import (
"net/http"
"testing"
"time"
)
const (
namespace = "helm"
apiProxy = "/api/v1/proxy/namespaces/" + namespace + "/services/manager-service:manager/"
)
type HelmContext struct {
t *testing.T
Path string
Host string
Timeout time.Duration
}
func NewHelmContext(t *testing.T) *HelmContext {
return &HelmContext{
t: t,
Path: "helm",
Timeout: time.Second * 20,
}
}
func (h *HelmContext) MustRun(args ...string) *Cmd {
cmd := h.newCmd(args...)
if status := cmd.exec(); status != nil {
h.t.Errorf("helm %v failed unexpectedly: %v", args, status)
h.t.Errorf("%s", cmd.Stderr())
h.t.FailNow()
}
return cmd
}
func (h *HelmContext) Run(args ...string) *Cmd {
cmd := h.newCmd(args...)
cmd.exec()
return cmd
}
func (h *HelmContext) RunFail(args ...string) *Cmd {
cmd := h.newCmd(args...)
if status := cmd.exec(); status == nil {
h.t.Fatalf("helm unexpected to fail: %v %v", args, status)
}
return cmd
}
func (h *HelmContext) newCmd(args ...string) *Cmd {
args = append([]string{"--host", h.Host}, args...)
return &Cmd{
t: h.t,
path: h.Path,
args: args,
}
}
func (h *HelmContext) Running() bool {
endpoint := h.Host + "healthz"
resp, err := http.Get(endpoint)
if err != nil {
h.t.Errorf("Could not GET %s: %s", endpoint, err)
}
return resp.StatusCode == 200
//out := h.MustRun("server", "status").Stdout()
//return strings.Count(out, "Running") == 5
}

@ -0,0 +1,144 @@
// build +e2e
package e2e
import (
"flag"
"fmt"
"math/rand"
"os"
"testing"
"time"
)
func init() {
rand.Seed(time.Now().Unix())
}
const (
timeout = 180 * time.Second
poll = 2 * time.Second
)
var (
repoURL = flag.String("repo-url", "gs://kubernetes-charts-testing", "Repository URL")
repoName = flag.String("repo-name", "kubernetes-charts-testing", "Repository name")
chart = flag.String("chart", "gs://kubernetes-charts-testing/redis-2.tgz", "Chart to deploy")
host = flag.String("host", "", "The URL to the helm server")
resourcifierImage = flag.String("resourcifier-image", "", "The full image name of the Docker image for resourcifier.")
expandybirdImage = flag.String("expandybird-image", "", "The full image name of the Docker image for expandybird.")
managerImage = flag.String("manager-image", "", "The full image name of the Docker image for manager.")
)
func logKubeEnv(k *KubeContext) {
config := k.Run("config", "view", "--flatten", "--minify").Stdout()
k.t.Logf("Kubernetes Environment\n%s", config)
}
func TestHelm(t *testing.T) {
kube := NewKubeContext(t)
helm := NewHelmContext(t)
logKubeEnv(kube)
if !kube.Running() {
t.Fatal("Not connected to kubernetes")
}
t.Log(kube.Version())
t.Log(helm.MustRun("--version").Stdout())
helm.Host = helmHost()
if helm.Host == "" {
helm.Host = fmt.Sprintf("%s%s", kube.Server(), apiProxy)
}
t.Logf("Using host: %v", helm.Host)
if !helm.Running() {
t.Log("Helm is not installed")
install(helm)
}
// Add repo if it does not exsit
if !helm.MustRun("repo", "list").Contains(*repoURL) {
t.Logf("Adding repo %s %s", *repoName, *repoURL)
helm.MustRun("repo", "add", *repoName, *repoURL)
}
// Generate a name
deploymentName := genName()
t.Log("Executing deploy")
helm.MustRun("deploy",
"--properties", "namespace=e2e",
"--name", deploymentName,
*chart,
)
//TODO get pods to lookup dynamically
if err := wait(func() bool {
return kube.Run("get", "pods").Match("redis.*Running")
}); err != nil {
t.Fatal(err)
}
t.Log(kube.Run("get", "pods").Stdout())
t.Log("Executing deployment list")
if !helm.MustRun("deployment", "list").Contains(deploymentName) {
t.Fatal("Could not list deployment")
}
t.Log("Executing deployment info")
if !helm.MustRun("deployment", "info", deploymentName).Contains("Deployed") {
t.Fatal("Could not deploy")
}
t.Log("Executing deployment describe")
helm.MustRun("deployment", "describe", deploymentName)
t.Log("Executing deployment delete")
if !helm.MustRun("deployment", "rm", deploymentName).Contains("Deleted") {
t.Fatal("Could not delete deployment")
}
}
type conditionFunc func() bool
func wait(fn conditionFunc) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
if fn() {
return nil
}
}
return fmt.Errorf("Polling timeout")
}
func genName() string {
return fmt.Sprintf("e2e-%d", rand.Uint32())
}
func helmHost() string {
if *host != "" {
return *host
}
return os.Getenv("HELM_HOST")
}
func install(h *HelmContext) {
args := []string{"server", "install"}
if *expandybirdImage != "" {
args = append(args, *expandybirdImage)
}
if *managerImage != "" {
args = append(args, *managerImage)
}
if *resourcifierImage != "" {
args = append(args, *resourcifierImage)
}
h.MustRun(args...)
if err := wait(h.Running); err != nil {
h.t.Fatal(err)
}
}

@ -0,0 +1,61 @@
// build +e2e
package e2e
import (
"strings"
"testing"
)
const defaultKubectlPath = "kubectl"
type KubeContext struct {
t *testing.T
Path string
}
func NewKubeContext(t *testing.T) *KubeContext {
return &KubeContext{
t: t,
Path: defaultKubectlPath,
}
}
func (k *KubeContext) Run(args ...string) *Cmd {
cmd := k.newCmd(args...)
cmd.exec()
return cmd
}
func (k *KubeContext) newCmd(args ...string) *Cmd {
return &Cmd{
t: k.t,
path: k.Path,
args: args,
}
}
func (k *KubeContext) getConfigValue(jsonpath string) string {
return strings.Replace(k.Run("config", "view", "--flatten=true", "--minify=true", "-o", "jsonpath="+jsonpath).Stdout(), "'", "", -1)
}
func (k *KubeContext) Cluster() string {
return k.getConfigValue("'{.clusters[0].name}'")
}
func (k *KubeContext) Server() string {
return k.getConfigValue("'{.clusters[0].cluster.server}'")
}
func (k *KubeContext) CurrentContext() string {
return k.getConfigValue("'{.current-context}'")
}
func (k *KubeContext) Running() bool {
err := k.Run("cluster-info").exec()
return err == nil
}
func (k *KubeContext) Version() string {
return k.Run("version").Stdout()
}

@ -0,0 +1,12 @@
// +build !e2e
package e2e
import (
"os"
"testing"
)
func TestMain(m *testing.M) {
os.Exit(0)
}
Loading…
Cancel
Save