mirror of https://github.com/helm/helm
parent
cf79b90867
commit
5d042a89d3
@ -0,0 +1,33 @@
|
|||||||
|
*** Settings ***
|
||||||
|
Documentation Verify Helm functionality on multiple Kubernetes versions
|
||||||
|
Library lib/Kind.py
|
||||||
|
Library lib/Kubectl.py
|
||||||
|
Library lib/Helm.py
|
||||||
|
Suite Setup Suite Setup
|
||||||
|
Suite Teardown Suite Teardown
|
||||||
|
|
||||||
|
*** Test Cases ***
|
||||||
|
Helm works with Kubernetes 1.14.3
|
||||||
|
Test Helm on Kubernetes version 1.14.3
|
||||||
|
|
||||||
|
Helm works with Kubernetes 1.15.0
|
||||||
|
Test Helm on Kubernetes version 1.15.0
|
||||||
|
|
||||||
|
*** Keyword ***
|
||||||
|
Test Helm on Kubernetes version
|
||||||
|
[Arguments] ${kube_version}
|
||||||
|
Kind.Create test cluster with Kubernetes version ${kube_version}
|
||||||
|
Kind.Wait for cluster
|
||||||
|
|
||||||
|
Kubectl.Get nodes
|
||||||
|
Kubectl.Get pods kube-system
|
||||||
|
|
||||||
|
Helm.List releases
|
||||||
|
|
||||||
|
kind.Delete test cluster
|
||||||
|
|
||||||
|
Suite Setup
|
||||||
|
Kind.cleanup all test clusters
|
||||||
|
|
||||||
|
Suite Teardown
|
||||||
|
Kind.cleanup all test clusters
|
@ -0,0 +1 @@
|
|||||||
|
__pycache__/
|
@ -0,0 +1,7 @@
|
|||||||
|
import common
|
||||||
|
from Kind import kind_auth_wrap
|
||||||
|
|
||||||
|
class Helm(common.CommandRunner):
|
||||||
|
def list_releases(self):
|
||||||
|
cmd = 'helm list'
|
||||||
|
self.run_command(kind_auth_wrap(cmd))
|
@ -0,0 +1,67 @@
|
|||||||
|
import common
|
||||||
|
import time
|
||||||
|
|
||||||
|
DOCKER_HUB_REPO='kindest/node'
|
||||||
|
CLUSTER_PREFIX = 'helm-acceptance-test'
|
||||||
|
LOG_LEVEL = 'debug'
|
||||||
|
|
||||||
|
MAX_WAIT_KIND_NODE_SECONDS = 60
|
||||||
|
KIND_NODE_INTERVAL_SECONDS = 2
|
||||||
|
|
||||||
|
MAX_WAIT_KIND_POD_SECONDS = 60
|
||||||
|
KIND_POD_INTERVAL_SECONDS = 2
|
||||||
|
|
||||||
|
KIND_POD_EXPECTED_NUMBER = 8
|
||||||
|
|
||||||
|
LAST_CLUSTER_NAME = 'UNSET'
|
||||||
|
|
||||||
|
def kind_auth_wrap(cmd):
|
||||||
|
c = 'export KUBECONFIG="$(kind get kubeconfig-path'
|
||||||
|
c += ' --name="'+LAST_CLUSTER_NAME+'")"'
|
||||||
|
return c+' && '+cmd
|
||||||
|
|
||||||
|
class Kind(common.CommandRunner):
|
||||||
|
def create_test_cluster_with_kubernetes_version(self, kube_version):
|
||||||
|
global LAST_CLUSTER_NAME
|
||||||
|
LAST_CLUSTER_NAME = CLUSTER_PREFIX+'-'+common.NOW+'-'+kube_version
|
||||||
|
cmd = 'kind create cluster --loglevel='+LOG_LEVEL
|
||||||
|
cmd += ' --name='+LAST_CLUSTER_NAME
|
||||||
|
cmd += ' --image='+DOCKER_HUB_REPO+':v'+kube_version
|
||||||
|
self.run_command(cmd)
|
||||||
|
|
||||||
|
def delete_test_cluster(self):
|
||||||
|
cmd = 'kind delete cluster --loglevel='+LOG_LEVEL
|
||||||
|
cmd += ' --name='+LAST_CLUSTER_NAME
|
||||||
|
self.run_command(cmd)
|
||||||
|
|
||||||
|
def cleanup_all_test_clusters(self):
|
||||||
|
cmd = 'for i in `kind get clusters| grep ^'+CLUSTER_PREFIX+'-'+common.NOW+'`;'
|
||||||
|
cmd += ' do kind delete cluster --loglevel='+LOG_LEVEL+' --name=$i || true; done'
|
||||||
|
self.run_command(cmd)
|
||||||
|
|
||||||
|
def wait_for_cluster(self):
|
||||||
|
seconds_waited = 0
|
||||||
|
while True:
|
||||||
|
cmd = 'kubectl get nodes | tail -n1 | awk \'{print $2}\''
|
||||||
|
self.run_command('set +x && '+kind_auth_wrap(cmd))
|
||||||
|
status = self.stdout.replace('\n', '').strip()
|
||||||
|
print('Cluster node status: '+status)
|
||||||
|
if status == 'Ready':
|
||||||
|
break
|
||||||
|
if MAX_WAIT_KIND_NODE_SECONDS <= seconds_waited:
|
||||||
|
raise Exception('Max time ('+str(MAX_WAIT_KIND_NODE_SECONDS)+') reached waiting for cluster node')
|
||||||
|
time.sleep(KIND_NODE_INTERVAL_SECONDS)
|
||||||
|
seconds_waited += KIND_NODE_INTERVAL_SECONDS
|
||||||
|
|
||||||
|
seconds_waited = 0
|
||||||
|
while True:
|
||||||
|
cmd = 'kubectl get pods -n kube-system | grep \'1\/1\' | wc -l'
|
||||||
|
self.run_command('set +x && '+kind_auth_wrap(cmd))
|
||||||
|
num_ready = int(self.stdout.replace('\n', '').strip())
|
||||||
|
print('Num pods ready: '+str(num_ready)+'/'+str(KIND_POD_EXPECTED_NUMBER))
|
||||||
|
if KIND_POD_EXPECTED_NUMBER <= num_ready:
|
||||||
|
break
|
||||||
|
if MAX_WAIT_KIND_POD_SECONDS <= seconds_waited:
|
||||||
|
raise Exception('Max time ('+str(MAX_WAIT_KIND_POD_SECONDS)+') reached waiting for kube-system pods')
|
||||||
|
time.sleep(KIND_POD_INTERVAL_SECONDS)
|
||||||
|
seconds_waited += KIND_POD_INTERVAL_SECONDS
|
@ -0,0 +1,11 @@
|
|||||||
|
import common
|
||||||
|
from Kind import kind_auth_wrap
|
||||||
|
|
||||||
|
class Kubectl(common.CommandRunner):
|
||||||
|
def get_nodes(self):
|
||||||
|
cmd = 'kubectl get nodes'
|
||||||
|
self.run_command(kind_auth_wrap(cmd))
|
||||||
|
|
||||||
|
def get_pods(self, namespace):
|
||||||
|
cmd = 'kubectl get pods --namespace='+namespace
|
||||||
|
self.run_command(kind_auth_wrap(cmd))
|
@ -0,0 +1,43 @@
|
|||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
import time
|
||||||
|
|
||||||
|
NOW = time.strftime('%Y%m%d%H%M%S')
|
||||||
|
|
||||||
|
class CommandRunner(object):
|
||||||
|
def __init__(self):
|
||||||
|
self.rc = 0
|
||||||
|
self.pid = 0
|
||||||
|
self.stdout = ''
|
||||||
|
self.rootdir = os.path.realpath(os.path.join(__file__, '../../../'))
|
||||||
|
|
||||||
|
def return_code_should_be(self, expected_rc):
|
||||||
|
if int(expected_rc) != self.rc:
|
||||||
|
raise AssertionError('Expected return code to be "%s" but was "%s".'
|
||||||
|
% (expected_rc, self.rc))
|
||||||
|
|
||||||
|
def return_code_should_not_be(self, expected_rc):
|
||||||
|
if int(expected_rc) == self.rc:
|
||||||
|
raise AssertionError('Expected return code not to be "%s".' % expected_rc)
|
||||||
|
|
||||||
|
def output_contains(self, s):
|
||||||
|
if s not in self.stdout:
|
||||||
|
raise AssertionError('Output does not contain "%s".' % s)
|
||||||
|
|
||||||
|
def output_does_not_contain(self, s):
|
||||||
|
if s in self.stdout:
|
||||||
|
raise AssertionError('Output contains "%s".' % s)
|
||||||
|
|
||||||
|
def run_command(self, command, detach=False):
|
||||||
|
process = subprocess.Popen(['/bin/bash', '-xc', command],
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.STDOUT)
|
||||||
|
if not detach:
|
||||||
|
stdout = process.communicate()[0].strip().decode()
|
||||||
|
self.rc = process.returncode
|
||||||
|
tmp = []
|
||||||
|
for x in stdout.split('\n'):
|
||||||
|
print(x)
|
||||||
|
if not x.startswith('+ '): # Remove debug lines that start with "+ "
|
||||||
|
tmp.append(x)
|
||||||
|
self.stdout = '\n'.join(tmp)
|
@ -0,0 +1,44 @@
|
|||||||
|
#!/bin/bash -ex
|
||||||
|
|
||||||
|
REQUIRED_SYSTEM_COMMANDS=(
|
||||||
|
"kind"
|
||||||
|
"python3"
|
||||||
|
"virtualenv"
|
||||||
|
"pip"
|
||||||
|
)
|
||||||
|
|
||||||
|
set +x
|
||||||
|
for WW in ${REQUIRED_SYSTEM_COMMANDS[@]}; do
|
||||||
|
if [ ! -x "$(command -v ${WW})" ]; then
|
||||||
|
echo "System command missing: $WW"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
set -x
|
||||||
|
|
||||||
|
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||||
|
cd $DIR/../
|
||||||
|
|
||||||
|
# Acceptance test configurables
|
||||||
|
ROBOT_PY_REQUIRES="${ROBOT_PY_REQUIRES:-robotframework==3.1.2}"
|
||||||
|
ROBOT_OUTPUT_DIR="${ROBOT_DIR:-${PWD}/.acceptance}"
|
||||||
|
ROBOT_HELM_HOME_DIR="${ROBOT_HELM_HOME_DIR:-${ROBOT_OUTPUT_DIR}/.helm}"
|
||||||
|
ROBOT_VENV_DIR="${ROBOT_VENV_DIR:-${ROBOT_OUTPUT_DIR}/.venv}"
|
||||||
|
ROBOT_TEST_ROOT_DIR="${ROBOT_TEST_ROOT_DIR:-${PWD}/acceptance_tests}"
|
||||||
|
|
||||||
|
# Setup acceptance test environment:
|
||||||
|
#
|
||||||
|
# - fresh Helm Home at .acceptance/.helm/
|
||||||
|
# - Python virtualenv at .acceptance/.venv/ (cached if already fetched)
|
||||||
|
#
|
||||||
|
export PATH="${PWD}/bin:${VENV_DIR}/bin:${PATH}"
|
||||||
|
export HELM_HOME="${ROBOT_OUTPUT_DIR}/.helm"
|
||||||
|
rm -rf ${HELM_HOME} && mkdir -p ${HELM_HOME}
|
||||||
|
helm init
|
||||||
|
if [ ! -d ${ROBOT_VENV_DIR} ]; then
|
||||||
|
virtualenv -p $(which python3) ${ROBOT_VENV_DIR}
|
||||||
|
pip install ${ROBOT_PY_REQUIRES}
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Run Robot Framework, output
|
||||||
|
robot --outputdir=${ROBOT_OUTPUT_DIR} ${ROBOT_TEST_ROOT_DIR}
|
Loading…
Reference in new issue