diff --git a/Makefile b/Makefile
index 4df23a2..5f9a86c 100644
--- a/Makefile
+++ b/Makefile
@@ -150,3 +150,7 @@ $(CONTROLLER_GEN): $(LOCALBIN)
envtest: $(ENVTEST) ## Download envtest-setup locally if necessary.
$(ENVTEST): $(LOCALBIN)
test -s $(LOCALBIN)/setup-envtest || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest
+
+.PHONY: wait-dep
+wait-dep:
+ kubectl -n ingress-nginx rollout status deploy/ingress-nginx-controller -w --timeout=40m || kubectl -n ingress-nginx get pod -o wide
\ No newline at end of file
diff --git a/e2e.xml b/e2e.xml
new file mode 100644
index 0000000..5153682
--- /dev/null
+++ b/e2e.xml
@@ -0,0 +1,2 @@
+
+
\ No newline at end of file
diff --git a/test/e2e/config.yaml b/test/e2e/config.yaml
index 7166158..0854ee3 100644
--- a/test/e2e/config.yaml
+++ b/test/e2e/config.yaml
@@ -5,18 +5,49 @@ cluster:
config: |
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
+ containerdConfigPatches:
+ - |-
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:5001"]
+ endpoint = ["http://kind-registry:5000"]
nodes:
- role: control-plane
+ kubeadmConfigPatches:
+ - |
+ kind: InitConfiguration
+ nodeRegistration:
+ kubeletExtraArgs:
+ node-labels: "ingress-ready=true"
extraPortMappings:
- containerPort: 30001
hostPort: 30001
+ protocol: TCP
+ - containerPort: 80
+ hostPort: 80
+ protocol: TCP
+ - containerPort: 443
+ hostPort: 443
+ protocol: TCP
install:
steps:
+ - name: set-registry
+ cmd: kubectl
+ args:
+ - apply
+ - -f
+ - ./test/e2e/yamls/local-registry-cm.yaml
+ path: ../..
+ ignoreFail: false
- name: ingress-controller
cmd: kubectl
args:
- apply
- -f
- - https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.3.0/deploy/static/provider/kind/deploy.yaml
+ - ./test/e2e/yamls/ingress-nginx-deploy.yaml
+ path: ../..
+ ignoreFail: false
+ - name: wait-dep
+ cmd: make
+ args:
+ - wait-dep
path: ../..
ignoreFail: false
diff --git a/test/e2e/e2e.xml b/test/e2e/e2e.xml
new file mode 100644
index 0000000..83fc8b4
--- /dev/null
+++ b/test/e2e/e2e.xml
@@ -0,0 +1,2 @@
+
+
\ No newline at end of file
diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go
index 4952e82..3ce5549 100644
--- a/test/e2e/e2e_test.go
+++ b/test/e2e/e2e_test.go
@@ -1,3 +1,6 @@
+//go:build e2e
+// +build e2e
+
package e2e
import (
@@ -13,10 +16,10 @@ var fmw = framework.NewFramework()
// 执行 go test 时候,会被先执行的内容
func TestMain(m *testing.M) {
fmw.Flags(). // 解析命令行
- LoadConfig(ginkgo.GinkgoWriter). // 加载配置
- SynchronizedBeforeSuite(nil). // 同步的,在执行测试任务之前执行的内容
- SynchronizedAfterSuite(nil). // 同步的,在执行测试任务之后执行的内容
- MRun(m) // 运行测试主函数
+ LoadConfig(ginkgo.GinkgoWriter). // 加载配置
+ SynchronizedBeforeSuite(nil). // 同步的,在执行测试任务之前执行的内容
+ SynchronizedAfterSuite(nil). // 同步的,在执行测试任务之后执行的内容
+ MRun(m) // 运行测试主函数
}
// 执行 go test 时候,会被后执行,也就是正常的测试用例
diff --git a/test/e2e/yamls/ingress-nginx-deploy.yaml b/test/e2e/yamls/ingress-nginx-deploy.yaml
new file mode 100644
index 0000000..72465b5
--- /dev/null
+++ b/test/e2e/yamls/ingress-nginx-deploy.yaml
@@ -0,0 +1,653 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ labels:
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ name: ingress-nginx
+---
+apiVersion: v1
+automountServiceAccountToken: true
+kind: ServiceAccount
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.3.0
+ name: ingress-nginx
+ namespace: ingress-nginx
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ app.kubernetes.io/component: admission-webhook
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.3.0
+ name: ingress-nginx-admission
+ namespace: ingress-nginx
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.3.0
+ name: ingress-nginx
+ namespace: ingress-nginx
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - namespaces
+ verbs:
+ - get
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - pods
+ - secrets
+ - endpoints
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - networking.k8s.io
+ resources:
+ - ingresses
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - networking.k8s.io
+ resources:
+ - ingresses/status
+ verbs:
+ - update
+- apiGroups:
+ - networking.k8s.io
+ resources:
+ - ingressclasses
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resourceNames:
+ - ingress-controller-leader
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - update
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - create
+- apiGroups:
+ - coordination.k8s.io
+ resourceNames:
+ - ingress-controller-leader
+ resources:
+ - leases
+ verbs:
+ - get
+ - update
+- apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+- apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ labels:
+ app.kubernetes.io/component: admission-webhook
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.3.0
+ name: ingress-nginx-admission
+ namespace: ingress-nginx
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - get
+ - create
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.3.0
+ name: ingress-nginx
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - endpoints
+ - nodes
+ - pods
+ - secrets
+ - namespaces
+ verbs:
+ - list
+ - watch
+- apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - get
+- apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - networking.k8s.io
+ resources:
+ - ingresses
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+- apiGroups:
+ - networking.k8s.io
+ resources:
+ - ingresses/status
+ verbs:
+ - update
+- apiGroups:
+ - networking.k8s.io
+ resources:
+ - ingressclasses
+ verbs:
+ - get
+ - list
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: admission-webhook
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.3.0
+ name: ingress-nginx-admission
+rules:
+- apiGroups:
+ - admissionregistration.k8s.io
+ resources:
+ - validatingwebhookconfigurations
+ verbs:
+ - get
+ - update
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.3.0
+ name: ingress-nginx
+ namespace: ingress-nginx
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: ingress-nginx
+subjects:
+- kind: ServiceAccount
+ name: ingress-nginx
+ namespace: ingress-nginx
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ app.kubernetes.io/component: admission-webhook
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.3.0
+ name: ingress-nginx-admission
+ namespace: ingress-nginx
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: ingress-nginx-admission
+subjects:
+- kind: ServiceAccount
+ name: ingress-nginx-admission
+ namespace: ingress-nginx
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ labels:
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.3.0
+ name: ingress-nginx
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ingress-nginx
+subjects:
+- kind: ServiceAccount
+ name: ingress-nginx
+ namespace: ingress-nginx
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ labels:
+ app.kubernetes.io/component: admission-webhook
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.3.0
+ name: ingress-nginx-admission
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ingress-nginx-admission
+subjects:
+- kind: ServiceAccount
+ name: ingress-nginx-admission
+ namespace: ingress-nginx
+---
+apiVersion: v1
+data:
+ allow-snippet-annotations: "true"
+kind: ConfigMap
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.3.0
+ name: ingress-nginx-controller
+ namespace: ingress-nginx
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.3.0
+ name: ingress-nginx-controller
+ namespace: ingress-nginx
+spec:
+ ports:
+ - appProtocol: http
+ name: http
+ port: 80
+ protocol: TCP
+ targetPort: http
+ - appProtocol: https
+ name: https
+ port: 443
+ protocol: TCP
+ targetPort: https
+ selector:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ type: NodePort
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.3.0
+ name: ingress-nginx-controller-admission
+ namespace: ingress-nginx
+spec:
+ ports:
+ - appProtocol: https
+ name: https-webhook
+ port: 443
+ targetPort: webhook
+ selector:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ type: ClusterIP
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.3.0
+ name: ingress-nginx-controller
+ namespace: ingress-nginx
+spec:
+ minReadySeconds: 0
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ spec:
+ containers:
+ - args:
+ - /nginx-ingress-controller
+ - --election-id=ingress-controller-leader
+ - --controller-class=k8s.io/ingress-nginx
+ - --ingress-class=nginx
+ - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
+ - --validating-webhook=:8443
+ - --validating-webhook-certificate=/usr/local/certificates/cert
+ - --validating-webhook-key=/usr/local/certificates/key
+ - --watch-ingress-without-class=true
+ - --publish-status-address=localhost
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: LD_PRELOAD
+ value: /usr/local/lib/libmimalloc.so
+ image: localhost:5001/ingress-nginx/controller:v1.3.0
+ imagePullPolicy: IfNotPresent
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - /wait-shutdown
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: 10254
+ scheme: HTTP
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 1
+ name: controller
+ ports:
+ - containerPort: 80
+ hostPort: 80
+ name: http
+ protocol: TCP
+ - containerPort: 443
+ hostPort: 443
+ name: https
+ protocol: TCP
+ - containerPort: 8443
+ name: webhook
+ protocol: TCP
+ readinessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /healthz
+ port: 10254
+ scheme: HTTP
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 1
+ resources:
+ requests:
+ cpu: 100m
+ memory: 90Mi
+ securityContext:
+ allowPrivilegeEscalation: true
+ capabilities:
+ add:
+ - NET_BIND_SERVICE
+ drop:
+ - ALL
+ runAsUser: 101
+ volumeMounts:
+ - mountPath: /usr/local/certificates/
+ name: webhook-cert
+ readOnly: true
+ dnsPolicy: ClusterFirst
+ nodeSelector:
+ ingress-ready: "true"
+ kubernetes.io/os: linux
+ serviceAccountName: ingress-nginx
+ terminationGracePeriodSeconds: 0
+ tolerations:
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+ operator: Equal
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ operator: Equal
+ volumes:
+ - name: webhook-cert
+ secret:
+ secretName: ingress-nginx-admission
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+ labels:
+ app.kubernetes.io/component: admission-webhook
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.3.0
+ name: ingress-nginx-admission-create
+ namespace: ingress-nginx
+spec:
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: admission-webhook
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.3.0
+ name: ingress-nginx-admission-create
+ spec:
+ containers:
+ - args:
+ - create
+ - --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
+ - --namespace=$(POD_NAMESPACE)
+ - --secret-name=ingress-nginx-admission
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ image: localhost:5001/ingress-nginx/kube-webhook-certgen:v1.1.1
+ imagePullPolicy: IfNotPresent
+ name: create
+ securityContext:
+ allowPrivilegeEscalation: false
+ nodeSelector:
+ kubernetes.io/os: linux
+ restartPolicy: OnFailure
+ securityContext:
+ fsGroup: 2000
+ runAsNonRoot: true
+ runAsUser: 2000
+ serviceAccountName: ingress-nginx-admission
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+ labels:
+ app.kubernetes.io/component: admission-webhook
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.3.0
+ name: ingress-nginx-admission-patch
+ namespace: ingress-nginx
+spec:
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: admission-webhook
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.3.0
+ name: ingress-nginx-admission-patch
+ spec:
+ containers:
+ - args:
+ - patch
+ - --webhook-name=ingress-nginx-admission
+ - --namespace=$(POD_NAMESPACE)
+ - --patch-mutating=false
+ - --secret-name=ingress-nginx-admission
+ - --patch-failure-policy=Fail
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ image: localhost:5001/ingress-nginx/kube-webhook-certgen:v1.1.1
+ imagePullPolicy: IfNotPresent
+ name: patch
+ securityContext:
+ allowPrivilegeEscalation: false
+ nodeSelector:
+ kubernetes.io/os: linux
+ restartPolicy: OnFailure
+ securityContext:
+ fsGroup: 2000
+ runAsNonRoot: true
+ runAsUser: 2000
+ serviceAccountName: ingress-nginx-admission
+---
+apiVersion: networking.k8s.io/v1
+kind: IngressClass
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.3.0
+ name: nginx
+spec:
+ controller: k8s.io/ingress-nginx
+---
+apiVersion: admissionregistration.k8s.io/v1
+kind: ValidatingWebhookConfiguration
+metadata:
+ labels:
+ app.kubernetes.io/component: admission-webhook
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ app.kubernetes.io/version: 1.3.0
+ name: ingress-nginx-admission
+webhooks:
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: ingress-nginx-controller-admission
+ namespace: ingress-nginx
+ path: /networking/v1/ingresses
+ failurePolicy: Fail
+ matchPolicy: Equivalent
+ name: validate.nginx.ingress.kubernetes.io
+ rules:
+ - apiGroups:
+ - networking.k8s.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - ingresses
+ sideEffects: None
diff --git a/test/e2e/yamls/local-registry-cm.yaml b/test/e2e/yamls/local-registry-cm.yaml
new file mode 100644
index 0000000..001be65
--- /dev/null
+++ b/test/e2e/yamls/local-registry-cm.yaml
@@ -0,0 +1,9 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: local-registry-hosting
+ namespace: kube-public
+data:
+ localRegistryHosting.v1: |
+ host: "localhost:5001"
+ help: "https://kind.sigs.k8s.io/docs/user/local-registry/"
\ No newline at end of file
diff --git a/test/framework/framework.go b/test/framework/framework.go
index cd0cbad..edfe4e4 100644
--- a/test/framework/framework.go
+++ b/test/framework/framework.go
@@ -1,10 +1,20 @@
package framework
import (
+ "context"
"flag"
+ "fmt"
"io"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/rest"
"math/rand"
"os"
+ "regexp"
+ "strings"
"testing"
"time"
@@ -14,11 +24,16 @@ import (
)
var DefaultStartTimeout = float64(60 * 60)
+var nsRegex = regexp.MustCompile("[^a-z0-9]")
type Framework struct {
Config *Config
ClusterConfig *ClusterConfig
+ factory Factory // 工厂对象,提供创建provider的方法
+ provider ClusterProvider // 存储当前 framework对象中实现的provider
+ client kubernetes.Interface // 用来链接创建的 k8s 集群
+
configFile string // 配置文件的路径
initTimeout float64 // 启动时候,包括安装集群和依赖及本程序的超时时间
}
@@ -119,15 +134,171 @@ func (f *Framework) WithConfig(config *Config) *Framework {
return f
}
-// TODO
-// 创建环境之后,填充f.ClusterConfig
+// DeployTestEnvironment 创建测试环境,并且获取访问集群的配置及client
func (f *Framework) DeployTestEnvironment() error {
- return nil
+ // 1. 检查 f.config
+ if f.Config == nil {
+ return field.Invalid(
+ field.NewPath("config"),
+ nil,
+ "Not inital config object")
+ }
+ // 2. 创建provider
+ ginkgo.By("Getting env provider")
+ var err error
+ if f.provider, err = f.factory.Provider(f.Config); err != nil {
+ return err
+ }
+ // 3. 执行 provider 实现的 validate 方法验证 config
+ ginkgo.By("Validate config for provider")
+ if err := f.provider.Validate(f.Config); err != nil {
+ return err
+ }
+ // 4. 执行 provider 实现的 deploy 方法,创建集群
+ ginkgo.By("Deploying test env")
+ clusterConfig, err := f.provider.Deploy(f.Config)
+ if err != nil {
+ return err
+ }
+ f.ClusterConfig = &clusterConfig
+
+ // 5. 创建 client,用于执行测试用例的时候使用
+ if f.client, err = kubernetes.NewForConfig(f.ClusterConfig.Rest); err != nil {
+ return err
+ }
+
+ return nil
}
-// TODO
+// DestroyTestEnvironment 销毁测试环境。此方法要在执行过 DeployTestEnvironment 方法之后执行。
func (f *Framework) DestroyTestEnvironment() error {
+ // 1. 检查 f.Config
+ if f.Config == nil {
+ return field.Invalid(
+ field.NewPath("config"),
+ nil,
+ "Not inital config object")
+ }
+
+ // 2. 检查 provider
+ if f.provider == nil {
+ return fmt.Errorf("f.provider is nil")
+ }
+
+ // 3. 执行 provider 的 destroy 方法来销毁环境
+ ginkgo.By("Destroying test env")
+ if err := f.provider.Destroy(f.Config); err != nil {
+ return err
+ }
+
+ // 4. 清空 f.provider,保护销毁函数被多次执行而报错
+ f.provider = nil
+
return nil
}
+
+func (f *Framework) Describe(name string, ctxFunc ContextFunc) bool {
+ // 整个函数,实际上是调用 ginkgo的Describe
+ return ginkgo.Describe(name, func() {
+ // 1. 创建 testcontext
+ ctx, err := f.createTestContext(name, false)
+ if err != nil {
+ ginkgo.Fail("Cannot create test context for " + name)
+ return
+ }
+
+ // 2. 执行每次测试任务前,来执行一些我们期望的动作,如创建namespace就往在这里
+ ginkgo.BeforeEach(func() {
+ ctx2, err := f.createTestContext(name, true)
+ if err != nil {
+ ginkgo.Fail("Cannot create testcontext for " + name +
+ " namespace " + ctx2.Namespace)
+ return
+ }
+ ctx = ctx2
+ })
+
+ // 3. 执行每次测试任务之后要做的事情。这里我们来删除testcontext
+ ginkgo.AfterEach(func() {
+ // 回收testcontext
+ _ = f.deleteTestContext(ctx)
+ })
+ // 4. 执行用户的测试函数
+ ctxFunc(&ctx, f)
+ })
+}
+
+func (f *Framework) createTestContext(name string, nsCreate bool) (TestContext, error) {
+ // 1. 创建 testcontext对象
+ tc := TestContext{}
+ // 2. 检查 f 是否为空
+ if f.Config == nil || f.ClusterConfig == nil {
+ return tc, field.Invalid(
+ field.NewPath("config/clusterConfig"),
+ nil,
+ "Not inital config object")
+ }
+
+ // 3. 填充字段
+ tc.Name = name
+ tc.Config = rest.CopyConfig(f.ClusterConfig.Rest)
+ tc.MasterIP = f.ClusterConfig.MasterIP
+
+ // 4. 判断参数,时候创建namespace
+ if nsCreate {
+ // 4.1 如果创建,使用f.client来创建namespace
+ // 4.1.1 处理name,将空格或下划线替换为"-"
+ // 4.1.2 正则检查是否有其他非法字符
+ // 4.1.3 自动生成namespace的机制
+ prefix := nsRegex.ReplaceAllString(
+ strings.ReplaceAll(
+ strings.ReplaceAll(
+ strings.ToLower(name),
+ " ", "-"),
+ "_", "-"),
+ "")
+ if len(prefix) > 30 {
+ prefix = prefix[:30]
+ }
+ ns, err := f.client.CoreV1().Namespaces().Create(
+ context.TODO(),
+ &corev1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ GenerateName: prefix + "-",
+ },
+ },
+ metav1.CreateOptions{})
+ if err != nil {
+ return tc, err
+ }
+ tc.Namespace = ns.GetName()
+ }
+
+ // 5 ..... 执行其他的想要做的事情
+ // 比如我们要创建sa/secret
+ return tc, nil
+}
+
+func (f *Framework) deleteTestContext(ctx TestContext) error {
+ // 删除创建的资源
+ // 这里我们之创建了ns,所以只删除它
+
+ errs := field.ErrorList{}
+ if ctx.Namespace != "" {
+ // 删除ns
+ if err := f.client.CoreV1().Namespaces().Delete(
+ context.TODO(),
+ ctx.Namespace,
+ metav1.DeleteOptions{}); err != nil &&
+ !errors.IsNotFound(err) {
+ errs = append(errs, field.InternalError(field.NewPath("testcontext"), err))
+ }
+ }
+ // 如果我们还创建了更多的资源,需要在下面实现执行删除的操作
+ // 如果执行过程中出现错误,同样使用errs = append(errs, err)来追加错误
+
+ return errs.ToAggregate()
+
+}
diff --git a/test/framework/kind.go b/test/framework/kind.go
index 622a02e..c128208 100644
--- a/test/framework/kind.go
+++ b/test/framework/kind.go
@@ -5,7 +5,6 @@ import (
"github.com/spf13/viper"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/client-go/tools/clientcmd"
- "net/url"
"os"
"os/exec"
)
@@ -18,6 +17,9 @@ type KindConfig struct {
type KindProvider struct{}
+// 检查这个对象是否实现了接口ClusterProvider。这种方法可以也推荐在一切实现某个接口的对象下来检查。
+var _ ClusterProvider = &KindProvider{}
+
func (k *KindProvider) Validate(config *Config) error {
// 1. 获取配置
if config == nil {
@@ -80,23 +82,25 @@ func (k *KindProvider) Deploy(config *Config) (ClusterConfig, error) {
defer func() { _ = os.Remove(KindConfigTempFile) }()
subCommand = append(subCommand, "--config", KindConfigTempFile)
}
+ subCommand = append(subCommand, "--name", kindConfig.Name)
cmd := exec.Command("kind", subCommand...)
cmd.Stdout = config.Stdout
cmd.Stderr = config.Stderr
if err := cmd.Run(); err != nil {
return clusterConfig, err
}
+ kubeConfigFile = KubeconfigTempFile
}
defer func() { _ = os.Remove(kubeConfigFile) }() // 退出函数之前,清空kubeconfig 文件内容。
// 3. 创建 Cluster config
clusterConfig.Name = kindConfig.Name
- clusterConfig.Rest, err = clientcmd.BuildConfigFromFlags("", kubeConfigFile)
- host, _ := url.Parse(clusterConfig.Rest.Host)
- if host != nil {
- clusterConfig.MasterIP = host.Host
+ if clusterConfig.Rest, err = clientcmd.BuildConfigFromFlags("", kubeConfigFile); err != nil {
+ return clusterConfig, err
}
+ clusterConfig.MasterIP = clusterConfig.Rest.Host
+
return clusterConfig, nil
}
diff --git a/test/framework/kubectl.go b/test/framework/kubectl.go
index b742363..a137c77 100644
--- a/test/framework/kubectl.go
+++ b/test/framework/kubectl.go
@@ -47,9 +47,6 @@ func (k *KubectlConfig) SetContext(config *ClusterConfig) error {
currentContext := &bytes.Buffer{}
cmd.Stdout = currentContext
err := cmd.Run()
- if err != nil {
- return err
- }
// 如果执行成功,说明存在 currentcontext,就把它保存起来,在DeleteContext的时候来回复
defer func() {
if err == nil {
diff --git a/test/framework/provider.go b/test/framework/provider.go
index d511ee9..3293249 100644
--- a/test/framework/provider.go
+++ b/test/framework/provider.go
@@ -1,6 +1,10 @@
package framework
-import "k8s.io/client-go/rest"
+import (
+ "fmt"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+ "k8s.io/client-go/rest"
+)
type ClusterConfig struct {
Name string // 存储名字,这个名字在使用 kind create cluster 的时候 --name 传入
@@ -13,3 +17,35 @@ type ClusterProvider interface {
Deploy(config *Config) (ClusterConfig, error)
Destroy(config *Config) error
}
+
+// 1. 定义工厂对象
+type Factory struct{}
+
+// 2. 工厂对象中创建不同实现的对象
+func (f Factory) Provider(config *Config) (ClusterProvider, error) {
+ var clusterProvider ClusterProvider
+ // 1. 检查配置
+ if config.Viper == nil {
+ return clusterProvider, field.Invalid(
+ field.NewPath("config"),
+ nil,
+ "Not inital config object")
+ }
+ // 2. 检查创建集群相关的config
+ if config.Sub("cluster") == nil {
+ return clusterProvider, field.Invalid(
+ field.NewPath("cluster"),
+ nil,
+ "Not inital cluster object")
+ }
+ cluster := config.Sub("cluster")
+
+ // 3. 判断创建 k8s 集群的插件,调用这个插件开创建对象
+ switch {
+ case cluster.Sub("kind") != nil:
+ kind := new(KindProvider)
+ return kind, nil
+ default:
+ return clusterProvider, fmt.Errorf("Not support provider: %#v", cluster.AllSettings())
+ }
+}
diff --git a/test/framework/test-context.go b/test/framework/test-context.go
new file mode 100644
index 0000000..675f736
--- /dev/null
+++ b/test/framework/test-context.go
@@ -0,0 +1,19 @@
+package framework
+
+import "k8s.io/client-go/rest"
+
+// 1. 定义一个测试的入口函数Describe,这里接收测试的描述以及contextFunc
+// 1.1 这里边会调用context创建方法来创建context
+// 1.2 这个context里面会有执行一些我们期望的行为
+// 2. 这个contextFunc的签名符合func(ctx *TestContext, f *Framework)
+// 3. 这个contextFunc的函数体就是测试函数的内容本身
+// 4. 由于这个contextFunc的参数中有ctx入参,那么在执行测试函数体的时候,就可以使用ctx中的内容或方法。
+
+type TestContext struct {
+ Name string
+ Namespace string
+ Config *rest.Config
+ MasterIP string
+}
+
+type ContextFunc func(ctx *TestContext, f *Framework)