From ae487dacd3dfb3323eae0f8e9f667d2e1fe8d96f Mon Sep 17 00:00:00 2001 From: dongming Date: Sat, 7 Jan 2023 14:29:37 +0800 Subject: [PATCH] add webhook --- PROJECT | 10 +- api/v1/msbdeployment_webhook.go | 75 + api/v1/webhook_suite_test.go | 132 + api/v1/zz_generated.deepcopy.go | 2 +- config/certmanager/certificate.yaml | 39 + config/certmanager/kustomization.yaml | 5 + config/certmanager/kustomizeconfig.yaml | 16 + config/default/manager_webhook_patch.yaml | 23 + config/default/webhookcainjection_patch.yaml | 29 + config/webhook/kustomization.yaml | 6 + config/webhook/kustomizeconfig.yaml | 25 + config/webhook/service.yaml | 20 + go.mod | 1 + go.sum | 1 + main.go | 4 + vendor/github.com/go-logr/logr/funcr/funcr.go | 787 ++ vendor/github.com/onsi/ginkgo/v2/.gitignore | 7 + vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md | 616 ++ .../github.com/onsi/ginkgo/v2/CONTRIBUTING.md | 13 + vendor/github.com/onsi/ginkgo/v2/LICENSE | 20 + vendor/github.com/onsi/ginkgo/v2/README.md | 115 + vendor/github.com/onsi/ginkgo/v2/RELEASING.md | 23 + .../onsi/ginkgo/v2/config/deprecated.go | 69 + vendor/github.com/onsi/ginkgo/v2/core_dsl.go | 741 ++ .../onsi/ginkgo/v2/decorator_dsl.go | 133 + .../onsi/ginkgo/v2/deprecated_dsl.go | 135 + .../ginkgo/v2/formatter/colorable_others.go | 41 + .../ginkgo/v2/formatter/colorable_windows.go | 809 ++ .../onsi/ginkgo/v2/formatter/formatter.go | 195 + .../github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go | 45 + .../onsi/ginkgo/v2/internal/counter.go | 9 + .../onsi/ginkgo/v2/internal/failer.go | 99 + .../onsi/ginkgo/v2/internal/focus.go | 125 + .../onsi/ginkgo/v2/internal/global/init.go | 17 + .../onsi/ginkgo/v2/internal/group.go | 363 + .../interrupt_handler/interrupt_handler.go | 162 + .../sigquit_swallower_unix.go | 15 + .../sigquit_swallower_windows.go | 8 + .../onsi/ginkgo/v2/internal/node.go | 895 ++ .../onsi/ginkgo/v2/internal/ordering.go | 121 + .../ginkgo/v2/internal/output_interceptor.go | 250 + .../v2/internal/output_interceptor_unix.go | 62 + .../v2/internal/output_interceptor_win.go | 7 + .../parallel_support/client_server.go | 72 + .../internal/parallel_support/http_client.go | 169 + .../internal/parallel_support/http_server.go | 242 + .../internal/parallel_support/rpc_client.go | 136 + .../internal/parallel_support/rpc_server.go | 75 + .../parallel_support/server_handler.go | 234 + .../ginkgo/v2/internal/progress_report.go | 287 + .../ginkgo/v2/internal/progress_report_bsd.go | 11 + .../v2/internal/progress_report_unix.go | 11 + .../ginkgo/v2/internal/progress_report_win.go | 8 + .../onsi/ginkgo/v2/internal/report_entry.go | 39 + .../onsi/ginkgo/v2/internal/spec.go | 87 + .../onsi/ginkgo/v2/internal/spec_context.go | 90 + .../onsi/ginkgo/v2/internal/suite.go | 993 ++ .../internal/testingtproxy/testing_t_proxy.go | 128 + .../onsi/ginkgo/v2/internal/tree.go | 77 + .../onsi/ginkgo/v2/internal/writer.go | 140 + .../ginkgo/v2/reporters/default_reporter.go | 750 ++ .../v2/reporters/deprecated_reporter.go | 149 + .../onsi/ginkgo/v2/reporters/json_report.go | 60 + .../onsi/ginkgo/v2/reporters/junit_report.go | 359 + .../onsi/ginkgo/v2/reporters/reporter.go | 29 + .../ginkgo/v2/reporters/teamcity_report.go | 101 + .../onsi/ginkgo/v2/reporting_dsl.go | 182 + vendor/github.com/onsi/ginkgo/v2/table_dsl.go | 302 + .../onsi/ginkgo/v2/types/code_location.go | 92 + .../github.com/onsi/ginkgo/v2/types/config.go | 739 ++ .../onsi/ginkgo/v2/types/deprecated_types.go | 141 + .../ginkgo/v2/types/deprecation_support.go | 177 + .../onsi/ginkgo/v2/types/enum_support.go | 43 + .../github.com/onsi/ginkgo/v2/types/errors.go | 621 ++ .../onsi/ginkgo/v2/types/file_filter.go | 106 + .../github.com/onsi/ginkgo/v2/types/flags.go | 489 + .../onsi/ginkgo/v2/types/label_filter.go | 347 + .../onsi/ginkgo/v2/types/report_entry.go | 190 + .../github.com/onsi/ginkgo/v2/types/types.go | 913 ++ .../onsi/ginkgo/v2/types/version.go | 3 + .../v1beta1/.import-restrictions | 5 + .../apis/apiextensions/v1beta1/conversion.go | 70 + .../apis/apiextensions/v1beta1/deepcopy.go | 276 + .../apis/apiextensions/v1beta1/defaults.go | 82 + .../pkg/apis/apiextensions/v1beta1/doc.go | 26 + .../apiextensions/v1beta1/generated.pb.go | 9297 +++++++++++++++++ .../apiextensions/v1beta1/generated.proto | 766 ++ .../pkg/apis/apiextensions/v1beta1/marshal.go | 136 + .../apis/apiextensions/v1beta1/register.go | 62 + .../pkg/apis/apiextensions/v1beta1/types.go | 531 + .../apiextensions/v1beta1/types_jsonschema.go | 331 + .../v1beta1/zz_generated.conversion.go | 1366 +++ .../v1beta1/zz_generated.deepcopy.go | 704 ++ .../v1beta1/zz_generated.defaults.go | 56 + .../zz_generated.prerelease-lifecycle.go | 98 + .../client/clientset/clientset/clientset.go | 133 + .../pkg/client/clientset/clientset/doc.go | 20 + .../client/clientset/clientset/scheme/doc.go | 20 + .../clientset/clientset/scheme/register.go | 58 + .../apiextensions/v1/apiextensions_client.go | 107 + .../v1/customresourcedefinition.go | 184 + .../clientset/typed/apiextensions/v1/doc.go | 20 + .../apiextensions/v1/generated_expansion.go | 21 + .../v1beta1/apiextensions_client.go | 107 + .../v1beta1/customresourcedefinition.go | 184 + .../typed/apiextensions/v1beta1/doc.go | 20 + .../v1beta1/generated_expansion.go | 21 + vendor/k8s.io/client-go/util/retry/OWNERS | 4 + vendor/k8s.io/client-go/util/retry/util.go | 105 + vendor/modules.txt | 25 + .../controller-runtime/pkg/envtest/crd.go | 458 + .../controller-runtime/pkg/envtest/doc.go | 26 + .../controller-runtime/pkg/envtest/helper.go | 69 + .../controller-runtime/pkg/envtest/server.go | 374 + .../controller-runtime/pkg/envtest/webhook.go | 433 + .../pkg/internal/flock/doc.go | 21 + .../pkg/internal/flock/errors.go | 24 + .../pkg/internal/flock/flock_other.go | 24 + .../pkg/internal/flock/flock_unix.go | 48 + .../pkg/internal/testing/addr/manager.go | 142 + .../pkg/internal/testing/certs/tinyca.go | 224 + .../testing/controlplane/apiserver.go | 468 + .../pkg/internal/testing/controlplane/auth.go | 142 + .../pkg/internal/testing/controlplane/etcd.go | 202 + .../internal/testing/controlplane/kubectl.go | 119 + .../internal/testing/controlplane/plane.go | 259 + .../pkg/internal/testing/process/arguments.go | 340 + .../testing/process/bin_path_finder.go | 70 + .../pkg/internal/testing/process/process.go | 272 + 129 files changed, 33408 insertions(+), 4 deletions(-) create mode 100644 api/v1/msbdeployment_webhook.go create mode 100644 api/v1/webhook_suite_test.go create mode 100644 config/certmanager/certificate.yaml create mode 100644 config/certmanager/kustomization.yaml create mode 100644 config/certmanager/kustomizeconfig.yaml create mode 100644 config/default/manager_webhook_patch.yaml create mode 100644 config/default/webhookcainjection_patch.yaml create mode 100644 config/webhook/kustomization.yaml create mode 100644 config/webhook/kustomizeconfig.yaml create mode 100644 config/webhook/service.yaml create mode 100644 vendor/github.com/go-logr/logr/funcr/funcr.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/.gitignore create mode 100644 vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md create mode 100644 vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md create mode 100644 vendor/github.com/onsi/ginkgo/v2/LICENSE create mode 100644 vendor/github.com/onsi/ginkgo/v2/README.md create mode 100644 vendor/github.com/onsi/ginkgo/v2/RELEASING.md create mode 100644 vendor/github.com/onsi/ginkgo/v2/config/deprecated.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/core_dsl.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/counter.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/failer.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/focus.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/global/init.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/group.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/node.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/ordering.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_win.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/progress_report_bsd.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/progress_report_unix.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/progress_report_win.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/spec.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/suite.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/tree.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/writer.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/table_dsl.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/code_location.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/config.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/enum_support.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/errors.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/file_filter.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/flags.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/label_filter.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/report_entry.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/types.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/version.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/.import-restrictions create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/conversion.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/deepcopy.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/doc.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/register.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/clientset.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/doc.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/doc.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/register.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/apiextensions_client.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/customresourcedefinition.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/doc.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/generated_expansion.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/apiextensions_client.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/doc.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/util/retry/OWNERS create mode 100644 vendor/k8s.io/client-go/util/retry/util.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/envtest/doc.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/envtest/helper.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/envtest/webhook.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/doc.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/errors.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/flock_other.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/flock_unix.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/addr/manager.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/certs/tinyca.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/apiserver.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/auth.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/etcd.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/kubectl.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/plane.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/arguments.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/bin_path_finder.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/process.go diff --git a/PROJECT b/PROJECT index e216a1d..a2838cf 100644 --- a/PROJECT +++ b/PROJECT @@ -5,12 +5,16 @@ projectName: mashibing-deployment repo: mashibing.com/pkg/mashibing-deployment resources: - api: - crdVersion: appsv1 + crdVersion: v1 namespaced: true controller: true domain: mashibing.com group: apps kind: MsbDeployment - path: mashibing.com/pkg/mashibing-deployment/api/appsv1 - version: appsv1 + path: mashibing.com/pkg/mashibing-deployment/api/v1 + version: v1 + webhooks: + defaulting: true + validation: true + webhookVersion: v1 version: "3" diff --git a/api/v1/msbdeployment_webhook.go b/api/v1/msbdeployment_webhook.go new file mode 100644 index 0000000..610d595 --- /dev/null +++ b/api/v1/msbdeployment_webhook.go @@ -0,0 +1,75 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" +) + +// log is for logging in this package. +var msbdeploymentlog = logf.Log.WithName("msbdeployment-resource") + +func (r *MsbDeployment) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// TODO(user): EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! + +//+kubebuilder:webhook:path=/mutate-apps-mashibing-com-v1-msbdeployment,mutating=true,failurePolicy=fail,sideEffects=None,groups=apps.mashibing.com,resources=msbdeployments,verbs=create;update,versions=v1,name=mmsbdeployment.kb.io,admissionReviewVersions=v1 + +var _ webhook.Defaulter = &MsbDeployment{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *MsbDeployment) Default() { + msbdeploymentlog.Info("default", "name", r.Name) + + // TODO(user): fill in your defaulting logic. +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +//+kubebuilder:webhook:path=/validate-apps-mashibing-com-v1-msbdeployment,mutating=false,failurePolicy=fail,sideEffects=None,groups=apps.mashibing.com,resources=msbdeployments,verbs=create;update,versions=v1,name=vmsbdeployment.kb.io,admissionReviewVersions=v1 + +var _ webhook.Validator = &MsbDeployment{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *MsbDeployment) ValidateCreate() error { + msbdeploymentlog.Info("validate create", "name", r.Name) + + // TODO(user): fill in your validation logic upon object creation. + return nil +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *MsbDeployment) ValidateUpdate(old runtime.Object) error { + msbdeploymentlog.Info("validate update", "name", r.Name) + + // TODO(user): fill in your validation logic upon object update. + return nil +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *MsbDeployment) ValidateDelete() error { + msbdeploymentlog.Info("validate delete", "name", r.Name) + + // TODO(user): fill in your validation logic upon object deletion. + return nil +} diff --git a/api/v1/webhook_suite_test.go b/api/v1/webhook_suite_test.go new file mode 100644 index 0000000..65db95e --- /dev/null +++ b/api/v1/webhook_suite_test.go @@ -0,0 +1,132 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "crypto/tls" + "fmt" + "net" + "path/filepath" + "testing" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + admissionv1beta1 "k8s.io/api/admission/v1beta1" + //+kubebuilder:scaffold:imports + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment +var ctx context.Context +var cancel context.CancelFunc + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Webhook Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + ctx, cancel = context.WithCancel(context.TODO()) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: false, + WebhookInstallOptions: envtest.WebhookInstallOptions{ + Paths: []string{filepath.Join("..", "..", "config", "webhook")}, + }, + } + + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + scheme := runtime.NewScheme() + err = AddToScheme(scheme) + Expect(err).NotTo(HaveOccurred()) + + err = admissionv1beta1.AddToScheme(scheme) + Expect(err).NotTo(HaveOccurred()) + + //+kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + + // start webhook server using Manager + webhookInstallOptions := &testEnv.WebhookInstallOptions + mgr, err := ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme, + Host: webhookInstallOptions.LocalServingHost, + Port: webhookInstallOptions.LocalServingPort, + CertDir: webhookInstallOptions.LocalServingCertDir, + LeaderElection: false, + MetricsBindAddress: "0", + }) + Expect(err).NotTo(HaveOccurred()) + + err = (&MsbDeployment{}).SetupWebhookWithManager(mgr) + Expect(err).NotTo(HaveOccurred()) + + //+kubebuilder:scaffold:webhook + + go func() { + defer GinkgoRecover() + err = mgr.Start(ctx) + Expect(err).NotTo(HaveOccurred()) + }() + + // wait for the webhook server to get ready + dialer := &net.Dialer{Timeout: time.Second} + addrPort := fmt.Sprintf("%s:%d", webhookInstallOptions.LocalServingHost, webhookInstallOptions.LocalServingPort) + Eventually(func() error { + conn, err := tls.DialWithDialer(dialer, "tcp", addrPort, &tls.Config{InsecureSkipVerify: true}) + if err != nil { + return err + } + conn.Close() + return nil + }).Should(Succeed()) + +}) + +var _ = AfterSuite(func() { + cancel() + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 99a5bed..9df43e7 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -23,7 +23,7 @@ package v1 import ( corev1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. diff --git a/config/certmanager/certificate.yaml b/config/certmanager/certificate.yaml new file mode 100644 index 0000000..dbcef7d --- /dev/null +++ b/config/certmanager/certificate.yaml @@ -0,0 +1,39 @@ +# The following manifests contain a self-signed issuer CR and a certificate CR. +# More document can be found at https://docs.cert-manager.io +# WARNING: Targets CertManager v1.0. Check https://cert-manager.io/docs/installation/upgrading/ for breaking changes. +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + labels: + app.kuberentes.io/name: issuer + app.kubernetes.io/instance: selfsigned-issuer + app.kubernetes.io/component: certificate + app.kubernetes.io/created-by: mashibing-deployment + app.kubernetes.io/part-of: mashibing-deployment + app.kubernetes.io/managed-by: kustomize + name: selfsigned-issuer + namespace: system +spec: + selfSigned: {} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + labels: + app.kubernetes.io/name: certificate + app.kubernetes.io/instance: serving-cert + app.kubernetes.io/component: certificate + app.kubernetes.io/created-by: mashibing-deployment + app.kubernetes.io/part-of: mashibing-deployment + app.kubernetes.io/managed-by: kustomize + name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml + namespace: system +spec: + # $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize + dnsNames: + - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc + - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local + issuerRef: + kind: Issuer + name: selfsigned-issuer + secretName: webhook-server-cert # this secret will not be prefixed, since it's not managed by kustomize diff --git a/config/certmanager/kustomization.yaml b/config/certmanager/kustomization.yaml new file mode 100644 index 0000000..bebea5a --- /dev/null +++ b/config/certmanager/kustomization.yaml @@ -0,0 +1,5 @@ +resources: +- certificate.yaml + +configurations: +- kustomizeconfig.yaml diff --git a/config/certmanager/kustomizeconfig.yaml b/config/certmanager/kustomizeconfig.yaml new file mode 100644 index 0000000..90d7c31 --- /dev/null +++ b/config/certmanager/kustomizeconfig.yaml @@ -0,0 +1,16 @@ +# This configuration is for teaching kustomize how to update name ref and var substitution +nameReference: +- kind: Issuer + group: cert-manager.io + fieldSpecs: + - kind: Certificate + group: cert-manager.io + path: spec/issuerRef/name + +varReference: +- kind: Certificate + group: cert-manager.io + path: spec/commonName +- kind: Certificate + group: cert-manager.io + path: spec/dnsNames diff --git a/config/default/manager_webhook_patch.yaml b/config/default/manager_webhook_patch.yaml new file mode 100644 index 0000000..738de35 --- /dev/null +++ b/config/default/manager_webhook_patch.yaml @@ -0,0 +1,23 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + ports: + - containerPort: 9443 + name: webhook-server + protocol: TCP + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true + volumes: + - name: cert + secret: + defaultMode: 420 + secretName: webhook-server-cert diff --git a/config/default/webhookcainjection_patch.yaml b/config/default/webhookcainjection_patch.yaml new file mode 100644 index 0000000..9797caf --- /dev/null +++ b/config/default/webhookcainjection_patch.yaml @@ -0,0 +1,29 @@ +# This patch add annotation to admission webhook config and +# the variables $(CERTIFICATE_NAMESPACE) and $(CERTIFICATE_NAME) will be substituted by kustomize. +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + labels: + app.kubernetes.io/name: mutatingwebhookconfiguration + app.kubernetes.io/instance: mutating-webhook-configuration + app.kubernetes.io/component: webhook + app.kubernetes.io/created-by: mashibing-deployment + app.kubernetes.io/part-of: mashibing-deployment + app.kubernetes.io/managed-by: kustomize + name: mutating-webhook-configuration + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + app.kubernetes.io/name: validatingwebhookconfiguration + app.kubernetes.io/instance: validating-webhook-configuration + app.kubernetes.io/component: webhook + app.kubernetes.io/created-by: mashibing-deployment + app.kubernetes.io/part-of: mashibing-deployment + app.kubernetes.io/managed-by: kustomize + name: validating-webhook-configuration + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) diff --git a/config/webhook/kustomization.yaml b/config/webhook/kustomization.yaml new file mode 100644 index 0000000..9cf2613 --- /dev/null +++ b/config/webhook/kustomization.yaml @@ -0,0 +1,6 @@ +resources: +- manifests.yaml +- service.yaml + +configurations: +- kustomizeconfig.yaml diff --git a/config/webhook/kustomizeconfig.yaml b/config/webhook/kustomizeconfig.yaml new file mode 100644 index 0000000..25e21e3 --- /dev/null +++ b/config/webhook/kustomizeconfig.yaml @@ -0,0 +1,25 @@ +# the following config is for teaching kustomize where to look at when substituting vars. +# It requires kustomize v2.1.0 or newer to work properly. +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: MutatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/name + - kind: ValidatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/name + +namespace: +- kind: MutatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/namespace + create: true +- kind: ValidatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/namespace + create: true + +varReference: +- path: metadata/annotations diff --git a/config/webhook/service.yaml b/config/webhook/service.yaml new file mode 100644 index 0000000..3cbf80d --- /dev/null +++ b/config/webhook/service.yaml @@ -0,0 +1,20 @@ + +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/name: service + app.kubernetes.io/instance: webhook-service + app.kubernetes.io/component: webhook + app.kubernetes.io/created-by: mashibing-deployment + app.kubernetes.io/part-of: mashibing-deployment + app.kubernetes.io/managed-by: kustomize + name: webhook-service + namespace: system +spec: + ports: + - port: 443 + protocol: TCP + targetPort: 9443 + selector: + control-plane: controller-manager diff --git a/go.mod b/go.mod index 4235532..7b29332 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,7 @@ go 1.19 require ( github.com/onsi/ginkgo v1.16.5 + github.com/onsi/ginkgo/v2 v2.6.0 github.com/onsi/gomega v1.24.1 github.com/spf13/viper v1.14.0 k8s.io/api v0.26.0 diff --git a/go.sum b/go.sum index f19f203..73c5149 100644 --- a/go.sum +++ b/go.sum @@ -247,6 +247,7 @@ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108 github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.6.0 h1:9t9b9vRUbFq3C4qKFCGkVuq/fIHji802N1nrtkh1mNc= +github.com/onsi/ginkgo/v2 v2.6.0/go.mod h1:63DOGlLAH8+REH8jUGdL3YpCpu7JODesutUjdENfUAc= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.24.1 h1:KORJXNNTzJXzu4ScJWssJfJMnJ+2QJqhoQSRwNlze9E= diff --git a/main.go b/main.go index b465017..aa75e1f 100644 --- a/main.go +++ b/main.go @@ -96,6 +96,10 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "MsbDeployment") os.Exit(1) } + if err = (&appsv1.MsbDeployment{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "MsbDeployment") + os.Exit(1) + } //+kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go new file mode 100644 index 0000000..7accdb0 --- /dev/null +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -0,0 +1,787 @@ +/* +Copyright 2021 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package funcr implements formatting of structured log messages and +// optionally captures the call site and timestamp. +// +// The simplest way to use it is via its implementation of a +// github.com/go-logr/logr.LogSink with output through an arbitrary +// "write" function. See New and NewJSON for details. +// +// Custom LogSinks +// +// For users who need more control, a funcr.Formatter can be embedded inside +// your own custom LogSink implementation. This is useful when the LogSink +// needs to implement additional methods, for example. +// +// Formatting +// +// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for +// values which are being logged. When rendering a struct, funcr will use Go's +// standard JSON tags (all except "string"). +package funcr + +import ( + "bytes" + "encoding" + "fmt" + "path/filepath" + "reflect" + "runtime" + "strconv" + "strings" + "time" + + "github.com/go-logr/logr" +) + +// New returns a logr.Logger which is implemented by an arbitrary function. +func New(fn func(prefix, args string), opts Options) logr.Logger { + return logr.New(newSink(fn, NewFormatter(opts))) +} + +// NewJSON returns a logr.Logger which is implemented by an arbitrary function +// and produces JSON output. +func NewJSON(fn func(obj string), opts Options) logr.Logger { + fnWrapper := func(_, obj string) { + fn(obj) + } + return logr.New(newSink(fnWrapper, NewFormatterJSON(opts))) +} + +// Underlier exposes access to the underlying logging function. Since +// callers only have a logr.Logger, they have to know which +// implementation is in use, so this interface is less of an +// abstraction and more of a way to test type conversion. +type Underlier interface { + GetUnderlying() func(prefix, args string) +} + +func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink { + l := &fnlogger{ + Formatter: formatter, + write: fn, + } + // For skipping fnlogger.Info and fnlogger.Error. + l.Formatter.AddCallDepth(1) + return l +} + +// Options carries parameters which influence the way logs are generated. +type Options struct { + // LogCaller tells funcr to add a "caller" key to some or all log lines. + // This has some overhead, so some users might not want it. + LogCaller MessageClass + + // LogCallerFunc tells funcr to also log the calling function name. This + // has no effect if caller logging is not enabled (see Options.LogCaller). + LogCallerFunc bool + + // LogTimestamp tells funcr to add a "ts" key to log lines. This has some + // overhead, so some users might not want it. + LogTimestamp bool + + // TimestampFormat tells funcr how to render timestamps when LogTimestamp + // is enabled. If not specified, a default format will be used. For more + // details, see docs for Go's time.Layout. + TimestampFormat string + + // Verbosity tells funcr which V logs to produce. Higher values enable + // more logs. Info logs at or below this level will be written, while logs + // above this level will be discarded. + Verbosity int + + // RenderBuiltinsHook allows users to mutate the list of key-value pairs + // while a log line is being rendered. The kvList argument follows logr + // conventions - each pair of slice elements is comprised of a string key + // and an arbitrary value (verified and sanitized before calling this + // hook). The value returned must follow the same conventions. This hook + // can be used to audit or modify logged data. For example, you might want + // to prefix all of funcr's built-in keys with some string. This hook is + // only called for built-in (provided by funcr itself) key-value pairs. + // Equivalent hooks are offered for key-value pairs saved via + // logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and + // for user-provided pairs (see RenderArgsHook). + RenderBuiltinsHook func(kvList []interface{}) []interface{} + + // RenderValuesHook is the same as RenderBuiltinsHook, except that it is + // only called for key-value pairs saved via logr.Logger.WithValues. See + // RenderBuiltinsHook for more details. + RenderValuesHook func(kvList []interface{}) []interface{} + + // RenderArgsHook is the same as RenderBuiltinsHook, except that it is only + // called for key-value pairs passed directly to Info and Error. See + // RenderBuiltinsHook for more details. + RenderArgsHook func(kvList []interface{}) []interface{} + + // MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct + // that contains a struct, etc.) it may log. Every time it finds a struct, + // slice, array, or map the depth is increased by one. When the maximum is + // reached, the value will be converted to a string indicating that the max + // depth has been exceeded. If this field is not specified, a default + // value will be used. + MaxLogDepth int +} + +// MessageClass indicates which category or categories of messages to consider. +type MessageClass int + +const ( + // None ignores all message classes. + None MessageClass = iota + // All considers all message classes. + All + // Info only considers info messages. + Info + // Error only considers error messages. + Error +) + +// fnlogger inherits some of its LogSink implementation from Formatter +// and just needs to add some glue code. +type fnlogger struct { + Formatter + write func(prefix, args string) +} + +func (l fnlogger) WithName(name string) logr.LogSink { + l.Formatter.AddName(name) + return &l +} + +func (l fnlogger) WithValues(kvList ...interface{}) logr.LogSink { + l.Formatter.AddValues(kvList) + return &l +} + +func (l fnlogger) WithCallDepth(depth int) logr.LogSink { + l.Formatter.AddCallDepth(depth) + return &l +} + +func (l fnlogger) Info(level int, msg string, kvList ...interface{}) { + prefix, args := l.FormatInfo(level, msg, kvList) + l.write(prefix, args) +} + +func (l fnlogger) Error(err error, msg string, kvList ...interface{}) { + prefix, args := l.FormatError(err, msg, kvList) + l.write(prefix, args) +} + +func (l fnlogger) GetUnderlying() func(prefix, args string) { + return l.write +} + +// Assert conformance to the interfaces. +var _ logr.LogSink = &fnlogger{} +var _ logr.CallDepthLogSink = &fnlogger{} +var _ Underlier = &fnlogger{} + +// NewFormatter constructs a Formatter which emits a JSON-like key=value format. +func NewFormatter(opts Options) Formatter { + return newFormatter(opts, outputKeyValue) +} + +// NewFormatterJSON constructs a Formatter which emits strict JSON. +func NewFormatterJSON(opts Options) Formatter { + return newFormatter(opts, outputJSON) +} + +// Defaults for Options. +const defaultTimestampFormat = "2006-01-02 15:04:05.000000" +const defaultMaxLogDepth = 16 + +func newFormatter(opts Options, outfmt outputFormat) Formatter { + if opts.TimestampFormat == "" { + opts.TimestampFormat = defaultTimestampFormat + } + if opts.MaxLogDepth == 0 { + opts.MaxLogDepth = defaultMaxLogDepth + } + f := Formatter{ + outputFormat: outfmt, + prefix: "", + values: nil, + depth: 0, + opts: opts, + } + return f +} + +// Formatter is an opaque struct which can be embedded in a LogSink +// implementation. It should be constructed with NewFormatter. Some of +// its methods directly implement logr.LogSink. +type Formatter struct { + outputFormat outputFormat + prefix string + values []interface{} + valuesStr string + depth int + opts Options +} + +// outputFormat indicates which outputFormat to use. +type outputFormat int + +const ( + // outputKeyValue emits a JSON-like key=value format, but not strict JSON. + outputKeyValue outputFormat = iota + // outputJSON emits strict JSON. + outputJSON +) + +// PseudoStruct is a list of key-value pairs that gets logged as a struct. +type PseudoStruct []interface{} + +// render produces a log line, ready to use. +func (f Formatter) render(builtins, args []interface{}) string { + // Empirically bytes.Buffer is faster than strings.Builder for this. + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + if f.outputFormat == outputJSON { + buf.WriteByte('{') + } + vals := builtins + if hook := f.opts.RenderBuiltinsHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + f.flatten(buf, vals, false, false) // keys are ours, no need to escape + continuing := len(builtins) > 0 + if len(f.valuesStr) > 0 { + if continuing { + if f.outputFormat == outputJSON { + buf.WriteByte(',') + } else { + buf.WriteByte(' ') + } + } + continuing = true + buf.WriteString(f.valuesStr) + } + vals = args + if hook := f.opts.RenderArgsHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + f.flatten(buf, vals, continuing, true) // escape user-provided keys + if f.outputFormat == outputJSON { + buf.WriteByte('}') + } + return buf.String() +} + +// flatten renders a list of key-value pairs into a buffer. If continuing is +// true, it assumes that the buffer has previous values and will emit a +// separator (which depends on the output format) before the first pair it +// writes. If escapeKeys is true, the keys are assumed to have +// non-JSON-compatible characters in them and must be evaluated for escapes. +// +// This function returns a potentially modified version of kvList, which +// ensures that there is a value for every key (adding a value if needed) and +// that each key is a string (substituting a key if needed). +func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing bool, escapeKeys bool) []interface{} { + // This logic overlaps with sanitize() but saves one type-cast per key, + // which can be measurable. + if len(kvList)%2 != 0 { + kvList = append(kvList, noValue) + } + for i := 0; i < len(kvList); i += 2 { + k, ok := kvList[i].(string) + if !ok { + k = f.nonStringKey(kvList[i]) + kvList[i] = k + } + v := kvList[i+1] + + if i > 0 || continuing { + if f.outputFormat == outputJSON { + buf.WriteByte(',') + } else { + // In theory the format could be something we don't understand. In + // practice, we control it, so it won't be. + buf.WriteByte(' ') + } + } + + if escapeKeys { + buf.WriteString(prettyString(k)) + } else { + // this is faster + buf.WriteByte('"') + buf.WriteString(k) + buf.WriteByte('"') + } + if f.outputFormat == outputJSON { + buf.WriteByte(':') + } else { + buf.WriteByte('=') + } + buf.WriteString(f.pretty(v)) + } + return kvList +} + +func (f Formatter) pretty(value interface{}) string { + return f.prettyWithFlags(value, 0, 0) +} + +const ( + flagRawStruct = 0x1 // do not print braces on structs +) + +// TODO: This is not fast. Most of the overhead goes here. +func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string { + if depth > f.opts.MaxLogDepth { + return `""` + } + + // Handle types that take full control of logging. + if v, ok := value.(logr.Marshaler); ok { + // Replace the value with what the type wants to get logged. + // That then gets handled below via reflection. + value = invokeMarshaler(v) + } + + // Handle types that want to format themselves. + switch v := value.(type) { + case fmt.Stringer: + value = invokeStringer(v) + case error: + value = invokeError(v) + } + + // Handling the most common types without reflect is a small perf win. + switch v := value.(type) { + case bool: + return strconv.FormatBool(v) + case string: + return prettyString(v) + case int: + return strconv.FormatInt(int64(v), 10) + case int8: + return strconv.FormatInt(int64(v), 10) + case int16: + return strconv.FormatInt(int64(v), 10) + case int32: + return strconv.FormatInt(int64(v), 10) + case int64: + return strconv.FormatInt(int64(v), 10) + case uint: + return strconv.FormatUint(uint64(v), 10) + case uint8: + return strconv.FormatUint(uint64(v), 10) + case uint16: + return strconv.FormatUint(uint64(v), 10) + case uint32: + return strconv.FormatUint(uint64(v), 10) + case uint64: + return strconv.FormatUint(v, 10) + case uintptr: + return strconv.FormatUint(uint64(v), 10) + case float32: + return strconv.FormatFloat(float64(v), 'f', -1, 32) + case float64: + return strconv.FormatFloat(v, 'f', -1, 64) + case complex64: + return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"` + case complex128: + return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"` + case PseudoStruct: + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + v = f.sanitize(v) + if flags&flagRawStruct == 0 { + buf.WriteByte('{') + } + for i := 0; i < len(v); i += 2 { + if i > 0 { + buf.WriteByte(',') + } + k, _ := v[i].(string) // sanitize() above means no need to check success + // arbitrary keys might need escaping + buf.WriteString(prettyString(k)) + buf.WriteByte(':') + buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1)) + } + if flags&flagRawStruct == 0 { + buf.WriteByte('}') + } + return buf.String() + } + + buf := bytes.NewBuffer(make([]byte, 0, 256)) + t := reflect.TypeOf(value) + if t == nil { + return "null" + } + v := reflect.ValueOf(value) + switch t.Kind() { + case reflect.Bool: + return strconv.FormatBool(v.Bool()) + case reflect.String: + return prettyString(v.String()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(int64(v.Int()), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return strconv.FormatUint(uint64(v.Uint()), 10) + case reflect.Float32: + return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32) + case reflect.Float64: + return strconv.FormatFloat(v.Float(), 'f', -1, 64) + case reflect.Complex64: + return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"` + case reflect.Complex128: + return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"` + case reflect.Struct: + if flags&flagRawStruct == 0 { + buf.WriteByte('{') + } + for i := 0; i < t.NumField(); i++ { + fld := t.Field(i) + if fld.PkgPath != "" { + // reflect says this field is only defined for non-exported fields. + continue + } + if !v.Field(i).CanInterface() { + // reflect isn't clear exactly what this means, but we can't use it. + continue + } + name := "" + omitempty := false + if tag, found := fld.Tag.Lookup("json"); found { + if tag == "-" { + continue + } + if comma := strings.Index(tag, ","); comma != -1 { + if n := tag[:comma]; n != "" { + name = n + } + rest := tag[comma:] + if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") { + omitempty = true + } + } else { + name = tag + } + } + if omitempty && isEmpty(v.Field(i)) { + continue + } + if i > 0 { + buf.WriteByte(',') + } + if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" { + buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1)) + continue + } + if name == "" { + name = fld.Name + } + // field names can't contain characters which need escaping + buf.WriteByte('"') + buf.WriteString(name) + buf.WriteByte('"') + buf.WriteByte(':') + buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1)) + } + if flags&flagRawStruct == 0 { + buf.WriteByte('}') + } + return buf.String() + case reflect.Slice, reflect.Array: + buf.WriteByte('[') + for i := 0; i < v.Len(); i++ { + if i > 0 { + buf.WriteByte(',') + } + e := v.Index(i) + buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1)) + } + buf.WriteByte(']') + return buf.String() + case reflect.Map: + buf.WriteByte('{') + // This does not sort the map keys, for best perf. + it := v.MapRange() + i := 0 + for it.Next() { + if i > 0 { + buf.WriteByte(',') + } + // If a map key supports TextMarshaler, use it. + keystr := "" + if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok { + txt, err := m.MarshalText() + if err != nil { + keystr = fmt.Sprintf("", err.Error()) + } else { + keystr = string(txt) + } + keystr = prettyString(keystr) + } else { + // prettyWithFlags will produce already-escaped values + keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1) + if t.Key().Kind() != reflect.String { + // JSON only does string keys. Unlike Go's standard JSON, we'll + // convert just about anything to a string. + keystr = prettyString(keystr) + } + } + buf.WriteString(keystr) + buf.WriteByte(':') + buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1)) + i++ + } + buf.WriteByte('}') + return buf.String() + case reflect.Ptr, reflect.Interface: + if v.IsNil() { + return "null" + } + return f.prettyWithFlags(v.Elem().Interface(), 0, depth) + } + return fmt.Sprintf(`""`, t.Kind().String()) +} + +func prettyString(s string) string { + // Avoid escaping (which does allocations) if we can. + if needsEscape(s) { + return strconv.Quote(s) + } + b := bytes.NewBuffer(make([]byte, 0, 1024)) + b.WriteByte('"') + b.WriteString(s) + b.WriteByte('"') + return b.String() +} + +// needsEscape determines whether the input string needs to be escaped or not, +// without doing any allocations. +func needsEscape(s string) bool { + for _, r := range s { + if !strconv.IsPrint(r) || r == '\\' || r == '"' { + return true + } + } + return false +} + +func isEmpty(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Complex64, reflect.Complex128: + return v.Complex() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func invokeMarshaler(m logr.Marshaler) (ret interface{}) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return m.MarshalLog() +} + +func invokeStringer(s fmt.Stringer) (ret string) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return s.String() +} + +func invokeError(e error) (ret string) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return e.Error() +} + +// Caller represents the original call site for a log line, after considering +// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and +// Line fields will always be provided, while the Func field is optional. +// Users can set the render hook fields in Options to examine logged key-value +// pairs, one of which will be {"caller", Caller} if the Options.LogCaller +// field is enabled for the given MessageClass. +type Caller struct { + // File is the basename of the file for this call site. + File string `json:"file"` + // Line is the line number in the file for this call site. + Line int `json:"line"` + // Func is the function name for this call site, or empty if + // Options.LogCallerFunc is not enabled. + Func string `json:"function,omitempty"` +} + +func (f Formatter) caller() Caller { + // +1 for this frame, +1 for Info/Error. + pc, file, line, ok := runtime.Caller(f.depth + 2) + if !ok { + return Caller{"", 0, ""} + } + fn := "" + if f.opts.LogCallerFunc { + if fp := runtime.FuncForPC(pc); fp != nil { + fn = fp.Name() + } + } + + return Caller{filepath.Base(file), line, fn} +} + +const noValue = "" + +func (f Formatter) nonStringKey(v interface{}) string { + return fmt.Sprintf("", f.snippet(v)) +} + +// snippet produces a short snippet string of an arbitrary value. +func (f Formatter) snippet(v interface{}) string { + const snipLen = 16 + + snip := f.pretty(v) + if len(snip) > snipLen { + snip = snip[:snipLen] + } + return snip +} + +// sanitize ensures that a list of key-value pairs has a value for every key +// (adding a value if needed) and that each key is a string (substituting a key +// if needed). +func (f Formatter) sanitize(kvList []interface{}) []interface{} { + if len(kvList)%2 != 0 { + kvList = append(kvList, noValue) + } + for i := 0; i < len(kvList); i += 2 { + _, ok := kvList[i].(string) + if !ok { + kvList[i] = f.nonStringKey(kvList[i]) + } + } + return kvList +} + +// Init configures this Formatter from runtime info, such as the call depth +// imposed by logr itself. +// Note that this receiver is a pointer, so depth can be saved. +func (f *Formatter) Init(info logr.RuntimeInfo) { + f.depth += info.CallDepth +} + +// Enabled checks whether an info message at the given level should be logged. +func (f Formatter) Enabled(level int) bool { + return level <= f.opts.Verbosity +} + +// GetDepth returns the current depth of this Formatter. This is useful for +// implementations which do their own caller attribution. +func (f Formatter) GetDepth() int { + return f.depth +} + +// FormatInfo renders an Info log message into strings. The prefix will be +// empty when no names were set (via AddNames), or when the output is +// configured for JSON. +func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (prefix, argsStr string) { + args := make([]interface{}, 0, 64) // using a constant here impacts perf + prefix = f.prefix + if f.outputFormat == outputJSON { + args = append(args, "logger", prefix) + prefix = "" + } + if f.opts.LogTimestamp { + args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) + } + if policy := f.opts.LogCaller; policy == All || policy == Info { + args = append(args, "caller", f.caller()) + } + args = append(args, "level", level, "msg", msg) + return prefix, f.render(args, kvList) +} + +// FormatError renders an Error log message into strings. The prefix will be +// empty when no names were set (via AddNames), or when the output is +// configured for JSON. +func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (prefix, argsStr string) { + args := make([]interface{}, 0, 64) // using a constant here impacts perf + prefix = f.prefix + if f.outputFormat == outputJSON { + args = append(args, "logger", prefix) + prefix = "" + } + if f.opts.LogTimestamp { + args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) + } + if policy := f.opts.LogCaller; policy == All || policy == Error { + args = append(args, "caller", f.caller()) + } + args = append(args, "msg", msg) + var loggableErr interface{} + if err != nil { + loggableErr = err.Error() + } + args = append(args, "error", loggableErr) + return f.prefix, f.render(args, kvList) +} + +// AddName appends the specified name. funcr uses '/' characters to separate +// name elements. Callers should not pass '/' in the provided name string, but +// this library does not actually enforce that. +func (f *Formatter) AddName(name string) { + if len(f.prefix) > 0 { + f.prefix += "/" + } + f.prefix += name +} + +// AddValues adds key-value pairs to the set of saved values to be logged with +// each log line. +func (f *Formatter) AddValues(kvList []interface{}) { + // Three slice args forces a copy. + n := len(f.values) + f.values = append(f.values[:n:n], kvList...) + + vals := f.values + if hook := f.opts.RenderValuesHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + + // Pre-render values, so we don't have to do it on each Info/Error call. + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + f.flatten(buf, vals, false, true) // escape user-provided keys + f.valuesStr = buf.String() +} + +// AddCallDepth increases the number of stack-frames to skip when attributing +// the log line to a file and line. +func (f *Formatter) AddCallDepth(depth int) { + f.depth += depth +} diff --git a/vendor/github.com/onsi/ginkgo/v2/.gitignore b/vendor/github.com/onsi/ginkgo/v2/.gitignore new file mode 100644 index 0000000..edf0231 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/.gitignore @@ -0,0 +1,7 @@ +.DS_Store +TODO.md +tmp/**/* +*.coverprofile +.vscode +.idea/ +*.log \ No newline at end of file diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md new file mode 100644 index 0000000..0b5ef20 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -0,0 +1,616 @@ +## 2.6.0 + +### Features +- `ReportBeforeSuite` provides access to the suite report before the suite begins. +- Add junit config option for omitting leafnodetype (#1088) [956e6d2] +- Add support to customize junit report config to omit spec labels (#1087) [de44005] + +### Fixes +- Fix stack trace pruning so that it has a chance of working on windows [2165648] + +## 2.5.1 + +### Fixes +- skipped tests only show as 'S' when running with -v [3ab38ae] +- Fix typo in docs/index.md (#1082) [55fc58d] +- Fix typo in docs/index.md (#1081) [8a14f1f] +- Fix link notation in docs/index.md (#1080) [2669612] +- Fix typo in `--progress` deprecation message (#1076) [b4b7edc] + +### Maintenance +- chore: Included githubactions in the dependabot config (#976) [baea341] +- Bump golang.org/x/sys from 0.1.0 to 0.2.0 (#1075) [9646297] + +## 2.5.0 + +### Ginkgo output now includes a timeline-view of the spec + +This commit changes Ginkgo's default output. Spec details are now +presented as a **timeline** that includes events that occur during the spec +lifecycle interleaved with any GinkgoWriter content. This makes is much easier +to understand the flow of a spec and where a given failure occurs. + +The --progress, --slow-spec-threshold, --always-emit-ginkgo-writer flags +and the SuppressProgressReporting decorator have all been deprecated. Instead +the existing -v and -vv flags better capture the level of verbosity to display. However, +a new --show-node-events flag is added to include node `> Enter` and `< Exit` events +in the spec timeline. + +In addition, JUnit reports now include the timeline (rendered with -vv) and custom JUnit +reports can be configured and generated using +`GenerateJUnitReportWithConfig(report types.Report, dst string, config JunitReportConfig)` + +Code should continue to work unchanged with this version of Ginkgo - however if you have tooling that +was relying on the specific output format of Ginkgo you _may_ run into issues. Ginkgo's console output is not guaranteed to be stable for tooling and automation purposes. You should, instead, use Ginkgo's JSON format +to build tooling on top of as it has stronger guarantees to be stable from version to version. + +### Features +- Provide details about which timeout expired [0f2fa27] + +### Fixes +- Add Support Policy to docs [c70867a] + +### Maintenance +- Bump github.com/onsi/gomega from 1.22.1 to 1.23.0 (#1070) [bb3b4e2] + +## 2.4.0 + +### Features + +- DeferCleanup supports functions with multiple-return values [5e33c75] +- Add GinkgoLogr (#1067) [bf78c28] +- Introduction of 'MustPassRepeatedly' decorator (#1051) [047c02f] + +### Fixes +- correcting some typos (#1064) [1403d3c] +- fix flaky internal_integration interupt specs [2105ba3] +- Correct busted link in README [be6b5b9] + +### Maintenance +- Bump actions/checkout from 2 to 3 (#1062) [8a2f483] +- Bump golang.org/x/tools from 0.1.12 to 0.2.0 (#1065) [529c4e8] +- Bump github/codeql-action from 1 to 2 (#1061) [da09146] +- Bump actions/setup-go from 2 to 3 (#1060) [918040d] +- Bump github.com/onsi/gomega from 1.22.0 to 1.22.1 (#1053) [2098e4d] +- Bump nokogiri from 1.13.8 to 1.13.9 in /docs (#1066) [1d74122] +- Add GHA to dependabot config [4442772] + +## 2.3.1 + +## Fixes +Several users were invoking `ginkgo` by installing the latest version of the cli via `go install github.com/onsi/ginkgo/v2/ginkgo@latest`. When 2.3.0 was released this resulted in an influx of issues as CI systems failed due to a change in the internal contract between the Ginkgo CLI and the Ginkgo library. Ginkgo only supports running the same version of the library as the cli (which is why both are packaged in the same repository). + +With this patch release, the ginkgo CLI can now identify a version mismatch and emit a helpful error message. + +- Ginkgo cli can identify version mismatches and emit a helpful error message [bc4ae2f] +- further emphasize that a version match is required when running Ginkgo on CI and/or locally [2691dd8] + +### Maintenance +- bump gomega to v1.22.0 [822a937] + +## 2.3.0 + +### Interruptible Nodes and Timeouts + +Ginkgo now supports per-node and per-spec timeouts on interruptible nodes. Check out the [documentation for all the details](https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes) but the gist is you can now write specs like this: + +```go +It("is interruptible", func(ctx SpecContext) { // or context.Context instead of SpecContext, both are valid. + // do things until `ctx.Done()` is closed, for example: + req, err := http.NewRequestWithContext(ctx, "POST", "/build-widgets", nil) + Expect(err).NotTo(HaveOccured()) + _, err := http.DefaultClient.Do(req) + Expect(err).NotTo(HaveOccured()) + + Eventually(client.WidgetCount).WithContext(ctx).Should(Equal(17)) +}, NodeTimeout(time.Second*20), GracePeriod(5*time.Second)) +``` + +and have Ginkgo ensure that the node completes before the timeout elapses. If it does elapse, or if an external interrupt is received (e.g. `^C`) then Ginkgo will cancel the context and wait for the Grace Period for the node to exit before proceeding with any cleanup nodes associated with the spec. The `ctx` provided by Ginkgo can also be passed down to Gomega's `Eventually` to have all assertions within the node governed by a single deadline. + +### Features + +- Ginkgo now records any additional failures that occur during the cleanup of a failed spec. In prior versions this information was quietly discarded, but the introduction of a more rigorous approach to timeouts and interruptions allows Ginkgo to better track subsequent failures. +- `SpecContext` also provides a mechanism for third-party libraries to provide additional information when a Progress Report is generated. Gomega uses this to provide the current state of an `Eventually().WithContext()` assertion when a Progress Report is requested. +- DescribeTable now exits with an error if it is not passed any Entries [a4c9865] + +## Fixes +- fixes crashes on newer Ruby 3 installations by upgrading github-pages gem dependency [92c88d5] +- Make the outline command able to use the DSL import [1be2427] + +## Maintenance +- chore(docs): delete no meaning d [57c373c] +- chore(docs): Fix hyperlinks [30526d5] +- chore(docs): fix code blocks without language settings [cf611c4] +- fix intra-doc link [b541bcb] + +## 2.2.0 + +### Generate real-time Progress Reports [f91377c] + +Ginkgo can now generate Progress Reports to point users at the current running line of code (including a preview of the actual source code) and a best guess at the most relevant subroutines. + +These Progress Reports allow users to debug stuck or slow tests without exiting the Ginkgo process. A Progress Report can be generated at any time by sending Ginkgo a `SIGINFO` (`^T` on MacOS/BSD) or `SIGUSR1`. + +In addition, the user can specify `--poll-progress-after` and `--poll-progress-interval` to have Ginkgo start periodically emitting progress reports if a given node takes too long. These can be overriden/set on a per-node basis with the `PollProgressAfter` and `PollProgressInterval` decorators. + +Progress Reports are emitted to stdout, and also stored in the machine-redable report formats that Ginkgo supports. + +Ginkgo also uses this progress reporting infrastructure under the hood when handling timeouts and interrupts. This yields much more focused, useful, and informative stack traces than previously. + +### Features +- `BeforeSuite`, `AfterSuite`, `SynchronizedBeforeSuite`, `SynchronizedAfterSuite`, and `ReportAfterSuite` now support (the relevant subset of) decorators. These can be passed in _after_ the callback functions that are usually passed into these nodes. + + As a result the **signature of these methods has changed** and now includes a trailing `args ...interface{}`. For most users simply using the DSL, this change is transparent. However if you were assigning one of these functions to a custom variable (or passing it around) then your code may need to change to reflect the new signature. + +### Maintenance +- Modernize the invocation of Ginkgo in github actions [0ffde58] +- Update reocmmended CI settings in docs [896bbb9] +- Speed up unnecessarily slow integration test [6d3a90e] + +## 2.1.6 + +### Fixes +- Add `SuppressProgressReporting` decorator to turn off --progress announcements for a given node [dfef62a] +- chore: remove duplicate word in comments [7373214] + +## 2.1.5 + +### Fixes +- drop -mod=mod instructions; fixes #1026 [6ad7138] +- Ensure `CurrentSpecReport` and `AddReportEntry` are thread-safe [817c09b] +- remove stale importmap gcflags flag test [3cd8b93] +- Always emit spec summary [5cf23e2] - even when only one spec has failed +- Fix ReportAfterSuite usage in docs [b1864ad] +- fixed typo (#997) [219cc00] +- TrimRight is not designed to trim Suffix [71ebb74] +- refactor: replace strings.Replace with strings.ReplaceAll (#978) [143d208] +- fix syntax in examples (#975) [b69554f] + +### Maintenance +- Bump github.com/onsi/gomega from 1.20.0 to 1.20.1 (#1027) [e5dfce4] +- Bump tzinfo from 1.2.9 to 1.2.10 in /docs (#1006) [7ae91c4] +- Bump github.com/onsi/gomega from 1.19.0 to 1.20.0 (#1005) [e87a85a] +- test: add new Go 1.19 to test matrix (#1014) [bbefe12] +- Bump golang.org/x/tools from 0.1.11 to 0.1.12 (#1012) [9327906] +- Bump golang.org/x/tools from 0.1.10 to 0.1.11 (#993) [f44af96] +- Bump nokogiri from 1.13.3 to 1.13.6 in /docs (#981) [ef336aa] + +## 2.1.4 + +### Fixes +- Numerous documentation typos +- Prepend `when` when using `When` (this behavior was in 1.x but unintentionally lost during the 2.0 rewrite) [efce903] +- improve error message when a parallel process fails to report back [a7bd1fe] +- guard against concurrent map writes in DeprecationTracker [0976569] +- Invoke reporting nodes during dry-run (fixes #956 and #935) [aae4480] +- Fix ginkgo import circle [f779385] + +## 2.1.3 + +See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2) for details on V2. + +### Fixes +- Calling By in a container node now emits a useful error. [ff12cee] + +## 2.1.2 + +### Fixes + +- Track location of focused specs correctly in `ginkgo unfocus` [a612ff1] +- Profiling suites with focused specs no longer generates an erroneous failure message [8fbfa02] +- Several documentation typos fixed. Big thanks to everyone who helped catch them and report/fix them! + +## 2.1.1 + +See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2) for details on V2. + +### Fixes +- Suites that only import the new dsl packages are now correctly identified as Ginkgo suites [ec17e17] + +## 2.1.0 + +See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2) for details on V2. + +2.1.0 is a minor release with a few tweaks: + +- Introduce new DSL packages to enable users to pick-and-choose which portions of the DSL to dot-import. [90868e2] More details [here](https://onsi.github.io/ginkgo/#alternatives-to-dot-importing-ginkgo). +- Add error check for invalid/nil parameters to DescribeTable [6f8577e] +- Myriad docs typos fixed (thanks everyone!) [718542a, ecb7098, 146654c, a8f9913, 6bdffde, 03dcd7e] + +## 2.0.0 + +See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2) + +## 1.16.5 + +Ginkgo 2.0 now has a Release Candidate. 1.16.5 advertises the existence of the RC. +1.16.5 deprecates GinkgoParallelNode in favor of GinkgoParallelProcess + +You can silence the RC advertisement by setting an `ACK_GINKGO_RC=true` environment variable or creating a file in your home directory called `.ack-ginkgo-rc` + +## 1.16.4 + +### Fixes +1.16.4 retracts 1.16.3. There are no code changes. The 1.16.3 tag was associated with the wrong commit and an attempt to change it after-the-fact has proven problematic. 1.16.4 retracts 1.16.3 in Ginkgo's go.mod and creates a new, correctly tagged, release. + +## 1.16.3 + +### Features +- Measure is now deprecated and emits a deprecation warning. + +## 1.16.2 + +### Fixes +- Deprecations can be suppressed by setting an `ACK_GINKGO_DEPRECATIONS=` environment variable. + +## 1.16.1 + +### Fixes +- Suppress --stream deprecation warning on windows (#793) + +## 1.16.0 + +### Features +- Advertise Ginkgo 2.0. Introduce deprecations. [9ef1913] + - Update README.md to advertise that Ginkgo 2.0 is coming. + - Backport the 2.0 DeprecationTracker and start alerting users + about upcoming deprecations. + +- Add slim-sprig template functions to bootstrap/generate (#775) [9162b86] + +- Fix accidental reference to 1488 (#784) [9fb7fe4] + +## 1.15.2 + +### Fixes +- ignore blank `-focus` and `-skip` flags (#780) [e90a4a0] + +## 1.15.1 + +### Fixes +- reporters/junit: Use `system-out` element instead of `passed` (#769) [9eda305] + +## 1.15.0 + +### Features +- Adds 'outline' command to print the outline of specs/containers in a file (#754) [071c369] [6803cc3] [935b538] [06744e8] [0c40583] +- Add support for using template to generate tests (#752) [efb9e69] +- Add a Chinese Doc #755 (#756) [5207632] +- cli: allow multiple -focus and -skip flags (#736) [9a782fb] + +### Fixes +- Add _internal to filename of tests created with internal flag (#751) [43c12da] + +## 1.14.2 + +### Fixes +- correct handling windows backslash in import path (#721) [97f3d51] +- Add additional methods to GinkgoT() to improve compatibility with the testing.TB interface [b5fe44d] + +## 1.14.1 + +### Fixes +- Discard exported method declaration when running ginkgo bootstrap (#558) [f4b0240] + +## 1.14.0 + +### Features +- Defer running top-level container nodes until RunSpecs is called [d44dedf] +- [Document Ginkgo lifecycle](http://onsi.github.io/ginkgo/#understanding-ginkgos-lifecycle) +- Add `extensions/globals` package (#692) [3295c8f] - this can be helpful in contexts where you are test-driving your test-generation code (see [#692](https://github.com/onsi/ginkgo/pull/692)) +- Print Skip reason in JUnit reporter if one was provided [820dfab] + +## 1.13.0 + +### Features +- Add a version of table.Entry that allows dumping the entry parameters. (#689) [21eaef2] + +### Fixes +- Ensure integration tests pass in an environment sans GOPATH [606fba2] +- Add books package (#568) [fc0e44e] +- doc(readme): installation via "tools package" (#677) [83bb20e] +- Solve the undefined: unix.Dup2 compile error on mips64le (#680) [0624f75] +- Import package without dot (#687) [6321024] +- Fix integration tests to stop require GOPATH (#686) [a912ec5] + +## 1.12.3 + +### Fixes +- Print correct code location of failing table test (#666) [c6d7afb] + +## 1.12.2 + +### Fixes +- Update dependencies [ea4a036] + +## 1.12.1 + +### Fixes +- Make unfocus ("blur") much faster (#674) [8b18061] +- Fix typo (#673) [7fdcbe8] +- Test against 1.14 and remove 1.12 [d5c2ad6] +- Test if a coverprofile content is empty before checking its latest character (#670) [14d9fa2] +- replace tail package with maintained one. this fixes go get errors (#667) [4ba33d4] +- improve ginkgo performance - makes progress on #644 [a14f98e] +- fix convert integration tests [1f8ba69] +- fix typo successful -> successful (#663) [1ea49cf] +- Fix invalid link (#658) [b886136] +- convert utility : Include comments from source (#657) [1077c6d] +- Explain what BDD means [d79e7fb] +- skip race detector test on unsupported platform (#642) [f8ab89d] +- Use Dup2 from golang.org/x/sys/unix instead of syscallDup (#638) [5d53c55] +- Fix missing newline in combined coverage file (#641) [6a07ea2] +- check if a spec is run before returning SpecSummary (#645) [8850000] + +## 1.12.0 + +### Features +- Add module definition (#630) [78916ab] + +## 1.11.0 + +### Features +- Add syscall for riscv64 architecture [f66e896] +- teamcity reporter: output location of test failure as well as test definition (#626) [9869142] +- teamcity reporter: output newline after every service message (#625) [3cfa02d] +- Add support for go module when running `generate` command (#578) [9c89e3f] + +## 1.10.3 + +### Fixes +- Set go_import_path in travis.yml to allow internal packages in forks (#607) [3b721db] +- Add integration test [d90e0dc] +- Fix coverage files combining [e5dde8c] +- A new CLI option: -ginkgo.reportFile (#601) [034fd25] + +## 1.10.2 + +### Fixes +- speed up table entry generateIt() (#609) [5049dc5] +- Fix. Write errors to stderr instead of stdout (#610) [7bb3091] + +## 1.10.1 + +### Fixes +- stack backtrace: fix skipping (#600) [2a4c0bd] + +## 1.10.0 + +### Fixes +- stack backtrace: fix alignment and skipping [66915d6] +- fix typo in documentation [8f97b93] + +## 1.9.0 + +### Features +- Option to print output into report, when tests have passed [0545415] + +### Fixes +- Fixed typos in comments [0ecbc58] +- gofmt code [a7f8bfb] +- Simplify code [7454d00] +- Simplify concatenation, incrementation and function assignment [4825557] +- Avoid unnecessary conversions [9d9403c] +- JUnit: include more detailed information about panic [19cca4b] +- Print help to stdout when the user asks for help [4cb7441] + + +## 1.8.0 + +### New Features +- allow config of the vet flag for `go test` (#562) [3cd45fa] +- Support projects using go modules [d56ee76] + +### Fixes and Minor Improvements +- chore(godoc): fixes typos in Measurement funcs [dbaca8e] +- Optimize focus to avoid allocations [f493786] +- Ensure generated test file names are underscored [505cc35] + +## 1.7.0 + +### New Features +- Add JustAfterEach (#484) [0d4f080] + +### Fixes +- Correctly round suite time in junit reporter [2445fc1] +- Avoid using -i argument to go test for Golang 1.10+ [46bbc26] + +## 1.6.0 + +### New Features +- add --debug flag to emit node output to files (#499) [39febac] + +### Fixes +- fix: for `go vet` to pass [69338ec] +- docs: fix for contributing instructions [7004cb1] +- consolidate and streamline contribution docs (#494) [d848015] +- Make generated Junit file compatible with "Maven Surefire" (#488) [e51bee6] +- all: gofmt [000d317] +- Increase eventually timeout to 30s [c73579c] +- Clarify asynchronous test behavior [294d8f4] +- Travis badge should only show master [26d2143] + +## 1.5.0 5/10/2018 + +### New Features +- Supports go v1.10 (#443, #446, #451) [e873237, 468e89e, e37dbfe, a37f4c0, c0b857d, bca5260, 4177ca8] +- Add a When() synonym for Context() (#386) [747514b, 7484dad, 7354a07, dd826c8] +- Re-add noisySkippings flag [652e15c] +- Allow coverage to be displayed for focused specs (#367) [11459a8] +- Handle -outputdir flag (#364) [228e3a8] +- Handle -coverprofile flag (#355) [43392d5] + +### Fixes +- When using custom reporters register the custom reporters *before* the default reporter. This allows users to see the output of any print statements in their customer reporters. (#365) [8382b23] +- When running a test and calculating the coverage using the `-coverprofile` and `-outputdir` flags, Ginkgo fails with an error if the directory does not exist. This is due to an [issue in go 1.10](https://github.com/golang/go/issues/24588) (#446) [b36a6e0] +- `unfocus` command ignores vendor folder (#459) [e5e551c, c556e43, a3b6351, 9a820dd] +- Ignore packages whose tests are all ignored by go (#456) [7430ca7, 6d8be98] +- Increase the threshold when checking time measurements (#455) [2f714bf, 68f622c] +- Fix race condition in coverage tests (#423) [a5a8ff7, ab9c08b] +- Add an extra new line after reporting spec run completion for test2json [874520d] +- added name name field to junit reported testsuite [ae61c63] +- Do not set the run time of a spec when the dryRun flag is used (#438) [457e2d9, ba8e856] +- Process FWhen and FSpecify when unfocusing (#434) [9008c7b, ee65bd, df87dfe] +- Synchronies the access to the state of specs to avoid race conditions (#430) [7d481bc, ae6829d] +- Added Duration on GinkgoTestDescription (#383) [5f49dad, 528417e, 0747408, 329d7ed] +- Fix Ginkgo stack trace on failure for Specify (#415) [b977ede, 65ca40e, 6c46eb8] +- Update README with Go 1.6+, Golang -> Go (#409) [17f6b97, bc14b66, 20d1598] +- Use fmt.Errorf instead of errors.New(fmt.Sprintf (#401) [a299f56, 44e2eaa] +- Imports in generated code should follow conventions (#398) [0bec0b0, e8536d8] +- Prevent data race error when Recording a benchmark value from multiple go routines (#390) [c0c4881, 7a241e9] +- Replace GOPATH in Environment [4b883f0] + + +## 1.4.0 7/16/2017 + +- `ginkgo` now provides a hint if you accidentally forget to run `ginkgo bootstrap` to generate a `*_suite_test.go` file that actually invokes the Ginkgo test runner. [#345](https://github.com/onsi/ginkgo/pull/345) +- thanks to improvements in `go test -c` `ginkgo` no longer needs to fix Go's compilation output to ensure compilation errors are expressed relative to the CWD. [#357] +- `ginkgo watch -watchRegExp=...` allows you to specify a custom regular expression to watch. Only files matching the regular expression are watched for changes (the default is `\.go$`) [#356] +- `ginkgo` now always emits compilation output. Previously, only failed compilation output was printed out. [#277] +- `ginkgo -requireSuite` now fails the test run if there are `*_test.go` files but `go test` fails to detect any tests. Typically this means you forgot to run `ginkgo bootstrap` to generate a suite file. [#344] +- `ginkgo -timeout=DURATION` allows you to adjust the timeout for the entire test suite (default is 24 hours) [#248] + +## 1.3.0 3/28/2017 + +Improvements: + +- Significantly improved parallel test distribution. Now instead of pre-sharding test cases across workers (which can result in idle workers and poor test performance) Ginkgo uses a shared queue to keep all workers busy until all tests are complete. This improves test-time performance and consistency. +- `Skip(message)` can be used to skip the current test. +- Added `extensions/table` - a Ginkgo DSL for [Table Driven Tests](http://onsi.github.io/ginkgo/#table-driven-tests) +- Add `GinkgoRandomSeed()` - shorthand for `config.GinkgoConfig.RandomSeed` +- Support for retrying flaky tests with `--flakeAttempts` +- `ginkgo ./...` now recurses as you'd expect +- Added `Specify` a synonym for `It` +- Support colorise on Windows +- Broader support for various go compilation flags in the `ginkgo` CLI + +Bug Fixes: + +- Ginkgo tests now fail when you `panic(nil)` (#167) + +## 1.2.0 5/31/2015 + +Improvements + +- `ginkgo -coverpkg` calls down to `go test -coverpkg` (#160) +- `ginkgo -afterSuiteHook COMMAND` invokes the passed-in `COMMAND` after a test suite completes (#152) +- Relaxed requirement for Go 1.4+. `ginkgo` now works with Go v1.3+ (#166) + +## 1.2.0-beta + +Ginkgo now requires Go 1.4+ + +Improvements: + +- Call reporters in reverse order when announcing spec completion -- allows custom reporters to emit output before the default reporter does. +- Improved focus behavior. Now, this: + + ```golang + FDescribe("Some describe", func() { + It("A", func() {}) + + FIt("B", func() {}) + }) + ``` + + will run `B` but *not* `A`. This tends to be a common usage pattern when in the thick of writing and debugging tests. +- When `SIGINT` is received, Ginkgo will emit the contents of the `GinkgoWriter` before running the `AfterSuite`. Useful for debugging stuck tests. +- When `--progress` is set, Ginkgo will write test progress (in particular, Ginkgo will say when it is about to run a BeforeEach, AfterEach, It, etc...) to the `GinkgoWriter`. This is useful for debugging stuck tests and tests that generate many logs. +- Improved output when an error occurs in a setup or teardown block. +- When `--dryRun` is set, Ginkgo will walk the spec tree and emit to its reporter *without* actually running anything. Best paired with `-v` to understand which specs will run in which order. +- Add `By` to help document long `It`s. `By` simply writes to the `GinkgoWriter`. +- Add support for precompiled tests: + - `ginkgo build ` will now compile the package, producing a file named `package.test` + - The compiled `package.test` file can be run directly. This runs the tests in series. + - To run precompiled tests in parallel, you can run: `ginkgo -p package.test` +- Support `bootstrap`ping and `generate`ing [Agouti](http://agouti.org) specs. +- `ginkgo generate` and `ginkgo bootstrap` now honor the package name already defined in a given directory +- The `ginkgo` CLI ignores `SIGQUIT`. Prevents its stack dump from interlacing with the underlying test suite's stack dump. +- The `ginkgo` CLI now compiles tests into a temporary directory instead of the package directory. This necessitates upgrading to Go v1.4+. +- `ginkgo -notify` now works on Linux + +Bug Fixes: + +- If --skipPackages is used and all packages are skipped, Ginkgo should exit 0. +- Fix tempfile leak when running in parallel +- Fix incorrect failure message when a panic occurs during a parallel test run +- Fixed an issue where a pending test within a focused context (or a focused test within a pending context) would skip all other tests. +- Be more consistent about handling SIGTERM as well as SIGINT +- When interrupted while concurrently compiling test suites in the background, Ginkgo now cleans up the compiled artifacts. +- Fixed a long standing bug where `ginkgo -p` would hang if a process spawned by one of the Ginkgo parallel nodes does not exit. (Hooray!) + +## 1.1.0 (8/2/2014) + +No changes, just dropping the beta. + +## 1.1.0-beta (7/22/2014) +New Features: + +- `ginkgo watch` now monitors packages *and their dependencies* for changes. The depth of the dependency tree can be modified with the `-depth` flag. +- Test suites with a programmatic focus (`FIt`, `FDescribe`, etc...) exit with non-zero status code, even when they pass. This allows CI systems to detect accidental commits of focused test suites. +- `ginkgo -p` runs the testsuite in parallel with an auto-detected number of nodes. +- `ginkgo -tags=TAG_LIST` passes a list of tags down to the `go build` command. +- `ginkgo --failFast` aborts the test suite after the first failure. +- `ginkgo generate file_1 file_2` can take multiple file arguments. +- Ginkgo now summarizes any spec failures that occurred at the end of the test run. +- `ginkgo --randomizeSuites` will run tests *suites* in random order using the generated/passed-in seed. + +Improvements: + +- `ginkgo -skipPackage` now takes a comma-separated list of strings. If the *relative path* to a package matches one of the entries in the comma-separated list, that package is skipped. +- `ginkgo --untilItFails` no longer recompiles between attempts. +- Ginkgo now panics when a runnable node (`It`, `BeforeEach`, `JustBeforeEach`, `AfterEach`, `Measure`) is nested within another runnable node. This is always a mistake. Any test suites that panic because of this change should be fixed. + +Bug Fixes: + +- `ginkgo boostrap` and `ginkgo generate` no longer fail when dealing with `hyphen-separated-packages`. +- parallel specs are now better distributed across nodes - fixed a crashing bug where (for example) distributing 11 tests across 7 nodes would panic + +## 1.0.0 (5/24/2014) +New Features: + +- Add `GinkgoParallelNode()` - shorthand for `config.GinkgoConfig.ParallelNode` + +Improvements: + +- When compilation fails, the compilation output is rewritten to present a correct *relative* path. Allows ⌘-clicking in iTerm open the file in your text editor. +- `--untilItFails` and `ginkgo watch` now generate new random seeds between test runs, unless a particular random seed is specified. + +Bug Fixes: + +- `-cover` now generates a correctly combined coverprofile when running with in parallel with multiple `-node`s. +- Print out the contents of the `GinkgoWriter` when `BeforeSuite` or `AfterSuite` fail. +- Fix all remaining race conditions in Ginkgo's test suite. + +## 1.0.0-beta (4/14/2014) +Breaking changes: + +- `thirdparty/gomocktestreporter` is gone. Use `GinkgoT()` instead +- Modified the Reporter interface +- `watch` is now a subcommand, not a flag. + +DSL changes: + +- `BeforeSuite` and `AfterSuite` for setting up and tearing down test suites. +- `AfterSuite` is triggered on interrupt (`^C`) as well as exit. +- `SynchronizedBeforeSuite` and `SynchronizedAfterSuite` for setting up and tearing down singleton resources across parallel nodes. + +CLI changes: + +- `watch` is now a subcommand, not a flag +- `--nodot` flag can be passed to `ginkgo generate` and `ginkgo bootstrap` to avoid dot imports. This explicitly imports all exported identifiers in Ginkgo and Gomega. Refreshing this list can be done by running `ginkgo nodot` +- Additional arguments can be passed to specs. Pass them after the `--` separator +- `--skipPackage` flag takes a regexp and ignores any packages with package names passing said regexp. +- `--trace` flag prints out full stack traces when errors occur, not just the line at which the error occurs. + +Misc: + +- Start using semantic versioning +- Start maintaining changelog + +Major refactor: + +- Pull out Ginkgo's internal to `internal` +- Rename `example` everywhere to `spec` +- Much more! diff --git a/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md b/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md new file mode 100644 index 0000000..1da92fe --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md @@ -0,0 +1,13 @@ +# Contributing to Ginkgo + +Your contributions to Ginkgo are essential for its long-term maintenance and improvement. + +- Please **open an issue first** - describe what problem you are trying to solve and give the community a forum for input and feedback ahead of investing time in writing code! +- Ensure adequate test coverage: + - When adding to the Ginkgo library, add unit and/or integration tests (under the `integration` folder). + - When adding to the Ginkgo CLI, note that there are very few unit tests. Please add an integration test. +- Make sure all the tests succeed via `ginkgo -r -p` +- Vet your changes via `go vet ./...` +- Update the documentation. Ginkgo uses `godoc` comments and documentation in `docs/index.md`. You can run `bundle exec jekyll serve` in the `docs` directory to preview your changes. + +Thanks for supporting Ginkgo! \ No newline at end of file diff --git a/vendor/github.com/onsi/ginkgo/v2/LICENSE b/vendor/github.com/onsi/ginkgo/v2/LICENSE new file mode 100644 index 0000000..9415ee7 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2013-2014 Onsi Fakhouri + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/onsi/ginkgo/v2/README.md b/vendor/github.com/onsi/ginkgo/v2/README.md new file mode 100644 index 0000000..d0473a4 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/README.md @@ -0,0 +1,115 @@ +![Ginkgo](https://onsi.github.io/ginkgo/images/ginkgo.png) + +[![test](https://github.com/onsi/ginkgo/workflows/test/badge.svg?branch=master)](https://github.com/onsi/ginkgo/actions?query=workflow%3Atest+branch%3Amaster) | [Ginkgo Docs](https://onsi.github.io/ginkgo/) + +--- + +# Ginkgo + +Ginkgo is a mature testing framework for Go designed to help you write expressive specs. Ginkgo builds on top of Go's `testing` foundation and is complemented by the [Gomega](https://github.com/onsi/gomega) matcher library. Together, Ginkgo and Gomega let you express the intent behind your specs clearly: + +```go +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + ... +) + +Describe("Checking books out of the library", Label("library"), func() { + var library *libraries.Library + var book *books.Book + var valjean *users.User + BeforeEach(func() { + library = libraries.NewClient() + book = &books.Book{ + Title: "Les Miserables", + Author: "Victor Hugo", + } + valjean = users.NewUser("Jean Valjean") + }) + + When("the library has the book in question", func() { + BeforeEach(func(ctx SpecContext) { + Expect(library.Store(ctx, book)).To(Succeed()) + }) + + Context("and the book is available", func() { + It("lends it to the reader", func(ctx SpecContext) { + Expect(valjean.Checkout(ctx, library, "Les Miserables")).To(Succeed()) + Expect(valjean.Books()).To(ContainElement(book)) + Expect(library.UserWithBook(ctx, book)).To(Equal(valjean)) + }, SpecTimeout(time.Second * 5)) + }) + + Context("but the book has already been checked out", func() { + var javert *users.User + BeforeEach(func(ctx SpecContext) { + javert = users.NewUser("Javert") + Expect(javert.Checkout(ctx, library, "Les Miserables")).To(Succeed()) + }) + + It("tells the user", func(ctx SpecContext) { + err := valjean.Checkout(ctx, library, "Les Miserables") + Expect(error).To(MatchError("Les Miserables is currently checked out")) + }, SpecTimeout(time.Second * 5)) + + It("lets the user place a hold and get notified later", func(ctx SpecContext) { + Expect(valjean.Hold(ctx, library, "Les Miserables")).To(Succeed()) + Expect(valjean.Holds(ctx)).To(ContainElement(book)) + + By("when Javert returns the book") + Expect(javert.Return(ctx, library, book)).To(Succeed()) + + By("it eventually informs Valjean") + notification := "Les Miserables is ready for pick up" + Eventually(ctx, valjean.Notifications).Should(ContainElement(notification)) + + Expect(valjean.Checkout(ctx, library, "Les Miserables")).To(Succeed()) + Expect(valjean.Books(ctx)).To(ContainElement(book)) + Expect(valjean.Holds(ctx)).To(BeEmpty()) + }, SpecTimeout(time.Second * 10)) + }) + }) + + When("the library does not have the book in question", func() { + It("tells the reader the book is unavailable", func(ctx SpecContext) { + err := valjean.Checkout(ctx, library, "Les Miserables") + Expect(error).To(MatchError("Les Miserables is not in the library catalog")) + }, SpecTimeout(time.Second * 5)) + }) +}) +``` + +Jump to the [docs](https://onsi.github.io/ginkgo/) to learn more. It's easy to [bootstrap](https://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first specs](https://onsi.github.io/ginkgo/#adding-specs-to-a-suite). + +If you have a question, comment, bug report, feature request, etc. please open a [GitHub issue](https://github.com/onsi/ginkgo/issues/new), or visit the [Ginkgo Slack channel](https://app.slack.com/client/T029RQSE6/CQQ50BBNW). + +## Capabilities + +Whether writing basic unit specs, complex integration specs, or even performance specs - Ginkgo gives you an expressive Domain-Specific Language (DSL) that will be familiar to users coming from frameworks such as [Quick](https://github.com/Quick/Quick), [RSpec](https://rspec.info), [Jasmine](https://jasmine.github.io), and [Busted](https://lunarmodules.github.io/busted/). This style of testing is sometimes referred to as "Behavior-Driven Development" (BDD) though Ginkgo's utility extends beyond acceptance-level testing. + +With Ginkgo's DSL you can use nestable [`Describe`, `Context` and `When` container nodes](https://onsi.github.io/ginkgo/#organizing-specs-with-container-nodes) to help you organize your specs. [`BeforeEach` and `AfterEach` setup nodes](https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and cleanup. [`It` and `Specify` subject nodes](https://onsi.github.io/ginkgo/#spec-subjects-it) that hold your assertions. [`BeforeSuite` and `AfterSuite` nodes](https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite) to prep for and cleanup after a suite... and [much more!](https://onsi.github.io/ginkgo/#writing-specs). + +At runtime, Ginkgo can run your specs in reproducibly [random order](https://onsi.github.io/ginkgo/#spec-randomization) and has sophisticated support for [spec parallelization](https://onsi.github.io/ginkgo/#spec-parallelization). In fact, running specs in parallel is as easy as + +```bash +ginkgo -p +``` + +By following [established patterns for writing parallel specs](https://onsi.github.io/ginkgo/#patterns-for-parallel-integration-specs) you can build even large, complex integration suites that parallelize cleanly and run performantly. And you don't have to worry about your spec suite hanging or leaving a mess behind - Ginkgo provides a per-node `context.Context` and the capability to interrupt the spec after a set period of time - and then clean up. + +As your suites grow Ginkgo helps you keep your specs organized with [labels](https://onsi.github.io/ginkgo/#spec-labels) and lets you easily run [subsets of specs](https://onsi.github.io/ginkgo/#filtering-specs), either [programmatically](https://onsi.github.io/ginkgo/#focused-specs) or on the [command line](https://onsi.github.io/ginkgo/#combining-filters). And Ginkgo's reporting infrastructure generates machine-readable output in a [variety of formats](https://onsi.github.io/ginkgo/#generating-machine-readable-reports) _and_ allows you to build your own [custom reporting infrastructure](https://onsi.github.io/ginkgo/#generating-reports-programmatically). + +Ginkgo ships with `ginkgo`, a [command line tool](https://onsi.github.io/ginkgo/#ginkgo-cli-overview) with support for generating, running, filtering, and profiling Ginkgo suites. You can even have Ginkgo automatically run your specs when it detects a change with `ginkgo watch`, enabling rapid feedback loops during test-driven development. + +And that's just Ginkgo! [Gomega](https://onsi.github.io/gomega/) brings a rich, mature, family of [assertions and matchers](https://onsi.github.io/gomega/#provided-matchers) to your suites. With Gomega you can easily mix [synchronous and asynchronous assertions](https://onsi.github.io/ginkgo/#patterns-for-asynchronous-testing) in your specs. You can even build your own set of expressive domain-specific matchers quickly and easily by composing Gomega's [existing building blocks](https://onsi.github.io/ginkgo/#building-custom-matchers). + +Happy Testing! + +## License + +Ginkgo is MIT-Licensed + +## Contributing + +See [CONTRIBUTING.md](CONTRIBUTING.md) diff --git a/vendor/github.com/onsi/ginkgo/v2/RELEASING.md b/vendor/github.com/onsi/ginkgo/v2/RELEASING.md new file mode 100644 index 0000000..363815d --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/RELEASING.md @@ -0,0 +1,23 @@ +A Ginkgo release is a tagged git sha and a GitHub release. To cut a release: + +1. Ensure CHANGELOG.md is up to date. + - Use + ```bash + LAST_VERSION=$(git tag --sort=version:refname | tail -n1) + CHANGES=$(git log --pretty=format:'- %s [%h]' HEAD...$LAST_VERSION) + echo -e "## NEXT\n\n$CHANGES\n\n### Features\n\n### Fixes\n\n### Maintenance\n\n$(cat CHANGELOG.md)" > CHANGELOG.md + ``` + to update the changelog + - Categorize the changes into + - Breaking Changes (requires a major version) + - New Features (minor version) + - Fixes (fix version) + - Maintenance (which in general should not be mentioned in `CHANGELOG.md` as they have no user impact) +1. Update `VERSION` in `types/version.go` +1. Commit, push, and release: + ``` + git commit -m "vM.m.p" + git push + gh release create "vM.m.p" + git fetch --tags origin master + ``` \ No newline at end of file diff --git a/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go b/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go new file mode 100644 index 0000000..a61021d --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go @@ -0,0 +1,69 @@ +package config + +// GinkgoConfigType has been deprecated and its equivalent now lives in +// the types package. You can no longer access Ginkgo configuration from the config +// package. Instead use the DSL's GinkgoConfiguration() function to get copies of the +// current configuration +// +// GinkgoConfigType is still here so custom V1 reporters do not result in a compilation error +// It will be removed in a future minor release of Ginkgo +type GinkgoConfigType = DeprecatedGinkgoConfigType +type DeprecatedGinkgoConfigType struct { + RandomSeed int64 + RandomizeAllSpecs bool + RegexScansFilePath bool + FocusStrings []string + SkipStrings []string + SkipMeasurements bool + FailOnPending bool + FailFast bool + FlakeAttempts int + EmitSpecProgress bool + DryRun bool + DebugParallel bool + + ParallelNode int + ParallelTotal int + SyncHost string + StreamHost string +} + +// DefaultReporterConfigType has been deprecated and its equivalent now lives in +// the types package. You can no longer access Ginkgo configuration from the config +// package. Instead use the DSL's GinkgoConfiguration() function to get copies of the +// current configuration +// +// DefaultReporterConfigType is still here so custom V1 reporters do not result in a compilation error +// It will be removed in a future minor release of Ginkgo +type DefaultReporterConfigType = DeprecatedDefaultReporterConfigType +type DeprecatedDefaultReporterConfigType struct { + NoColor bool + SlowSpecThreshold float64 + NoisyPendings bool + NoisySkippings bool + Succinct bool + Verbose bool + FullTrace bool + ReportPassed bool + ReportFile string +} + +// Sadly there is no way to gracefully deprecate access to these global config variables. +// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method +// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails +type GinkgoConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead struct{} + +// Sadly there is no way to gracefully deprecate access to these global config variables. +// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method +// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails +var GinkgoConfig = GinkgoConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead{} + +// Sadly there is no way to gracefully deprecate access to these global config variables. +// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method +// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails +type DefaultReporterConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead struct{} + +// Sadly there is no way to gracefully deprecate access to these global config variables. +// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method +// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails +var DefaultReporterConfig = DefaultReporterConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead{} diff --git a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go new file mode 100644 index 0000000..5a08574 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go @@ -0,0 +1,741 @@ +/* +Ginkgo is a testing framework for Go designed to help you write expressive tests. +https://github.com/onsi/ginkgo +MIT-Licensed + +The godoc documentation outlines Ginkgo's API. Since Ginkgo is a Domain-Specific Language it is important to +build a mental model for Ginkgo - the narrative documentation at https://onsi.github.io/ginkgo/ is designed to help you do that. +You should start there - even a brief skim will be helpful. At minimum you should skim through the https://onsi.github.io/ginkgo/#getting-started chapter. + +Ginkgo's is best paired with the Gomega matcher library: https://github.com/onsi/gomega + +You can run Ginkgo specs with go test - however we recommend using the ginkgo cli. It enables functionality +that go test does not (especially running suites in parallel). You can learn more at https://onsi.github.io/ginkgo/#ginkgo-cli-overview +or by running 'ginkgo help'. +*/ +package ginkgo + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/go-logr/logr" + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/internal" + "github.com/onsi/ginkgo/v2/internal/global" + "github.com/onsi/ginkgo/v2/internal/interrupt_handler" + "github.com/onsi/ginkgo/v2/internal/parallel_support" + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +const GINKGO_VERSION = types.VERSION + +var flagSet types.GinkgoFlagSet +var deprecationTracker = types.NewDeprecationTracker() +var suiteConfig = types.NewDefaultSuiteConfig() +var reporterConfig = types.NewDefaultReporterConfig() +var suiteDidRun = false +var outputInterceptor internal.OutputInterceptor +var client parallel_support.Client + +func init() { + var err error + flagSet, err = types.BuildTestSuiteFlagSet(&suiteConfig, &reporterConfig) + exitIfErr(err) + writer := internal.NewWriter(os.Stdout) + GinkgoWriter = writer + GinkgoLogr = internal.GinkgoLogrFunc(writer) +} + +func exitIfErr(err error) { + if err != nil { + if outputInterceptor != nil { + outputInterceptor.Shutdown() + } + if client != nil { + client.Close() + } + fmt.Fprintln(formatter.ColorableStdErr, err.Error()) + os.Exit(1) + } +} + +func exitIfErrors(errors []error) { + if len(errors) > 0 { + if outputInterceptor != nil { + outputInterceptor.Shutdown() + } + if client != nil { + client.Close() + } + for _, err := range errors { + fmt.Fprintln(formatter.ColorableStdErr, err.Error()) + } + os.Exit(1) + } +} + +// The interface implemented by GinkgoWriter +type GinkgoWriterInterface interface { + io.Writer + + Print(a ...interface{}) + Printf(format string, a ...interface{}) + Println(a ...interface{}) + + TeeTo(writer io.Writer) + ClearTeeWriters() +} + +/* +SpecContext is the context object passed into nodes that are subject to a timeout or need to be notified of an interrupt. It implements the standard context.Context interface but also contains additional helpers to provide an extensibility point for Ginkgo. (As an example, Gomega's Eventually can use the methods defined on SpecContext to provide deeper integratoin with Ginkgo). + +You can do anything with SpecContext that you do with a typical context.Context including wrapping it with any of the context.With* methods. + +Ginkgo will cancel the SpecContext when a node is interrupted (e.g. by the user sending an interupt signal) or when a node has exceeded it's allowed run-time. Note, however, that even in cases where a node has a deadline, SpecContext will not return a deadline via .Deadline(). This is because Ginkgo does not use a WithDeadline() context to model node deadlines as Ginkgo needs control over the precise timing of the context cancellation to ensure it can provide an accurate progress report at the moment of cancellation. +*/ +type SpecContext = internal.SpecContext + +/* +GinkgoWriter implements a GinkgoWriterInterface and io.Writer + +When running in verbose mode (ginkgo -v) any writes to GinkgoWriter will be immediately printed +to stdout. Otherwise, GinkgoWriter will buffer any writes produced during the current test and flush them to screen +only if the current test fails. + +GinkgoWriter also provides convenience Print, Printf and Println methods and allows you to tee to a custom writer via GinkgoWriter.TeeTo(writer). +Writes to GinkgoWriter are immediately sent to any registered TeeTo() writers. You can unregister all TeeTo() Writers with GinkgoWriter.ClearTeeWriters() + +You can learn more at https://onsi.github.io/ginkgo/#logging-output +*/ +var GinkgoWriter GinkgoWriterInterface + +/* +GinkgoLogr is a logr.Logger that writes to GinkgoWriter +*/ +var GinkgoLogr logr.Logger + +// The interface by which Ginkgo receives *testing.T +type GinkgoTestingT interface { + Fail() +} + +/* +GinkgoConfiguration returns the configuration of the current suite. + +The first return value is the SuiteConfig which controls aspects of how the suite runs, +the second return value is the ReporterConfig which controls aspects of how Ginkgo's default +reporter emits output. + +Mutating the returned configurations has no effect. To reconfigure Ginkgo programmatically you need +to pass in your mutated copies into RunSpecs(). + +You can learn more at https://onsi.github.io/ginkgo/#overriding-ginkgos-command-line-configuration-in-the-suite +*/ +func GinkgoConfiguration() (types.SuiteConfig, types.ReporterConfig) { + return suiteConfig, reporterConfig +} + +/* +GinkgoRandomSeed returns the seed used to randomize spec execution order. It is +useful for seeding your own pseudorandom number generators to ensure +consistent executions from run to run, where your tests contain variability (for +example, when selecting random spec data). + +You can learn more at https://onsi.github.io/ginkgo/#spec-randomization +*/ +func GinkgoRandomSeed() int64 { + return suiteConfig.RandomSeed +} + +/* +GinkgoParallelProcess returns the parallel process number for the current ginkgo process +The process number is 1-indexed. You can use GinkgoParallelProcess() to shard access to shared +resources across your suites. You can learn more about patterns for sharding at https://onsi.github.io/ginkgo/#patterns-for-parallel-integration-specs + +For more on how specs are parallelized in Ginkgo, see http://onsi.github.io/ginkgo/#spec-parallelization +*/ +func GinkgoParallelProcess() int { + return suiteConfig.ParallelProcess +} + +/* +PauseOutputInterception() pauses Ginkgo's output interception. This is only relevant +when running in parallel and output to stdout/stderr is being intercepted. You generally +don't need to call this function - however there are cases when Ginkgo's output interception +mechanisms can interfere with external processes launched by the test process. + +In particular, if an external process is launched that has cmd.Stdout/cmd.Stderr set to os.Stdout/os.Stderr +then Ginkgo's output interceptor will hang. To circumvent this, set cmd.Stdout/cmd.Stderr to GinkgoWriter. +If, for some reason, you aren't able to do that, you can PauseOutputInterception() before starting the process +then ResumeOutputInterception() after starting it. + +Note that PauseOutputInterception() does not cause stdout writes to print to the console - +this simply stops intercepting and storing stdout writes to an internal buffer. +*/ +func PauseOutputInterception() { + if outputInterceptor == nil { + return + } + outputInterceptor.PauseIntercepting() +} + +// ResumeOutputInterception() - see docs for PauseOutputInterception() +func ResumeOutputInterception() { + if outputInterceptor == nil { + return + } + outputInterceptor.ResumeIntercepting() +} + +/* +RunSpecs is the entry point for the Ginkgo spec runner. + +You must call this within a Golang testing TestX(t *testing.T) function. +If you bootstrapped your suite with "ginkgo bootstrap" this is already +done for you. + +Ginkgo is typically configured via command-line flags. This configuration +can be overridden, however, and passed into RunSpecs as optional arguments: + + func TestMySuite(t *testing.T) { + RegisterFailHandler(gomega.Fail) + // fetch the current config + suiteConfig, reporterConfig := GinkgoConfiguration() + // adjust it + suiteConfig.SkipStrings = []string{"NEVER-RUN"} + reporterConfig.FullTrace = true + // pass it in to RunSpecs + RunSpecs(t, "My Suite", suiteConfig, reporterConfig) + } + +Note that some configuration changes can lead to undefined behavior. For example, +you should not change ParallelProcess or ParallelTotal as the Ginkgo CLI is responsible +for setting these and orchestrating parallel specs across the parallel processes. See http://onsi.github.io/ginkgo/#spec-parallelization +for more on how specs are parallelized in Ginkgo. + +You can also pass suite-level Label() decorators to RunSpecs. The passed-in labels will apply to all specs in the suite. +*/ +func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool { + if suiteDidRun { + exitIfErr(types.GinkgoErrors.RerunningSuite()) + } + suiteDidRun = true + + suiteLabels := Labels{} + configErrors := []error{} + for _, arg := range args { + switch arg := arg.(type) { + case types.SuiteConfig: + suiteConfig = arg + case types.ReporterConfig: + reporterConfig = arg + case Labels: + suiteLabels = append(suiteLabels, arg...) + default: + configErrors = append(configErrors, types.GinkgoErrors.UnknownTypePassedToRunSpecs(arg)) + } + } + exitIfErrors(configErrors) + + configErrors = types.VetConfig(flagSet, suiteConfig, reporterConfig) + if len(configErrors) > 0 { + fmt.Fprintf(formatter.ColorableStdErr, formatter.F("{{red}}Ginkgo detected configuration issues:{{/}}\n")) + for _, err := range configErrors { + fmt.Fprintf(formatter.ColorableStdErr, err.Error()) + } + os.Exit(1) + } + + var reporter reporters.Reporter + if suiteConfig.ParallelTotal == 1 { + reporter = reporters.NewDefaultReporter(reporterConfig, formatter.ColorableStdOut) + outputInterceptor = internal.NoopOutputInterceptor{} + client = nil + } else { + reporter = reporters.NoopReporter{} + switch strings.ToLower(suiteConfig.OutputInterceptorMode) { + case "swap": + outputInterceptor = internal.NewOSGlobalReassigningOutputInterceptor() + case "none": + outputInterceptor = internal.NoopOutputInterceptor{} + default: + outputInterceptor = internal.NewOutputInterceptor() + } + client = parallel_support.NewClient(suiteConfig.ParallelHost) + if !client.Connect() { + client = nil + exitIfErr(types.GinkgoErrors.UnreachableParallelHost(suiteConfig.ParallelHost)) + } + defer client.Close() + } + + writer := GinkgoWriter.(*internal.Writer) + if reporterConfig.Verbosity().GTE(types.VerbosityLevelVerbose) && suiteConfig.ParallelTotal == 1 { + writer.SetMode(internal.WriterModeStreamAndBuffer) + } else { + writer.SetMode(internal.WriterModeBufferOnly) + } + + if reporterConfig.WillGenerateReport() { + registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig) + } + + err := global.Suite.BuildTree() + exitIfErr(err) + + suitePath, err := os.Getwd() + exitIfErr(err) + suitePath, err = filepath.Abs(suitePath) + exitIfErr(err) + + passed, hasFocusedTests := global.Suite.Run(description, suiteLabels, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig) + outputInterceptor.Shutdown() + + flagSet.ValidateDeprecations(deprecationTracker) + if deprecationTracker.DidTrackDeprecations() { + fmt.Fprintln(formatter.ColorableStdErr, deprecationTracker.DeprecationsReport()) + } + + if !passed { + t.Fail() + } + + if passed && hasFocusedTests && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" { + fmt.Println("PASS | FOCUSED") + os.Exit(types.GINKGO_FOCUS_EXIT_CODE) + } + return passed +} + +/* +Skip instructs Ginkgo to skip the current spec + +You can call Skip in any Setup or Subject node closure. + +For more on how to filter specs in Ginkgo see https://onsi.github.io/ginkgo/#filtering-specs +*/ +func Skip(message string, callerSkip ...int) { + skip := 0 + if len(callerSkip) > 0 { + skip = callerSkip[0] + } + cl := types.NewCodeLocationWithStackTrace(skip + 1) + global.Failer.Skip(message, cl) + panic(types.GinkgoErrors.UncaughtGinkgoPanic(cl)) +} + +/* +Fail notifies Ginkgo that the current spec has failed. (Gomega will call Fail for you automatically when an assertion fails.) + +Under the hood, Fail panics to end execution of the current spec. Ginkgo will catch this panic and proceed with +the subsequent spec. If you call Fail, or make an assertion, within a goroutine launched by your spec you must +add defer GinkgoRecover() to the goroutine to catch the panic emitted by Fail. + +You can call Fail in any Setup or Subject node closure. + +You can learn more about how Ginkgo manages failures here: https://onsi.github.io/ginkgo/#mental-model-how-ginkgo-handles-failure +*/ +func Fail(message string, callerSkip ...int) { + skip := 0 + if len(callerSkip) > 0 { + skip = callerSkip[0] + } + + cl := types.NewCodeLocationWithStackTrace(skip + 1) + global.Failer.Fail(message, cl) + panic(types.GinkgoErrors.UncaughtGinkgoPanic(cl)) +} + +/* +AbortSuite instructs Ginkgo to fail the current spec and skip all subsequent specs, thereby aborting the suite. + +You can call AbortSuite in any Setup or Subject node closure. + +You can learn more about how Ginkgo handles suite interruptions here: https://onsi.github.io/ginkgo/#interrupting-aborting-and-timing-out-suites +*/ +func AbortSuite(message string, callerSkip ...int) { + skip := 0 + if len(callerSkip) > 0 { + skip = callerSkip[0] + } + + cl := types.NewCodeLocationWithStackTrace(skip + 1) + global.Failer.AbortSuite(message, cl) + panic(types.GinkgoErrors.UncaughtGinkgoPanic(cl)) +} + +/* +GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail` +Since Gomega assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that +calls out to Gomega + +Here's why: Ginkgo's `Fail` method records the failure and then panics to prevent +further assertions from running. This panic must be recovered. Normally, Ginkgo recovers the panic for you, +however if a panic originates on a goroutine *launched* from one of your specs there's no +way for Ginkgo to rescue the panic. To do this, you must remember to `defer GinkgoRecover()` at the top of such a goroutine. + +You can learn more about how Ginkgo manages failures here: https://onsi.github.io/ginkgo/#mental-model-how-ginkgo-handles-failure +*/ +func GinkgoRecover() { + e := recover() + if e != nil { + global.Failer.Panic(types.NewCodeLocationWithStackTrace(1), e) + } +} + +// pushNode is used by the various test construction DSL methods to push nodes onto the suite +// it handles returned errors, emits a detailed error message to help the user learn what they may have done wrong, then exits +func pushNode(node internal.Node, errors []error) bool { + exitIfErrors(errors) + exitIfErr(global.Suite.PushNode(node)) + return true +} + +/* +Describe nodes are Container nodes that allow you to organize your specs. A Describe node's closure can contain any number of +Setup nodes (e.g. BeforeEach, AfterEach, JustBeforeEach), and Subject nodes (i.e. It). + +Context and When nodes are aliases for Describe - use whichever gives your suite a better narrative flow. It is idomatic +to Describe the behavior of an object or function and, within that Describe, outline a number of Contexts and Whens. + +You can learn more at https://onsi.github.io/ginkgo/#organizing-specs-with-container-nodes +In addition, container nodes can be decorated with a variety of decorators. You can learn more here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +func Describe(text string, args ...interface{}) bool { + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...)) +} + +/* +FDescribe focuses specs within the Describe block. +*/ +func FDescribe(text string, args ...interface{}) bool { + args = append(args, internal.Focus) + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...)) +} + +/* +PDescribe marks specs within the Describe block as pending. +*/ +func PDescribe(text string, args ...interface{}) bool { + args = append(args, internal.Pending) + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...)) +} + +/* +XDescribe marks specs within the Describe block as pending. + +XDescribe is an alias for PDescribe +*/ +var XDescribe = PDescribe + +/* Context is an alias for Describe - it generates the exact same kind of Container node */ +var Context, FContext, PContext, XContext = Describe, FDescribe, PDescribe, XDescribe + +/* When is an alias for Describe - it generates the exact same kind of Container node */ +func When(text string, args ...interface{}) bool { + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...)) +} + +/* When is an alias for Describe - it generates the exact same kind of Container node */ +func FWhen(text string, args ...interface{}) bool { + args = append(args, internal.Focus) + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...)) +} + +/* When is an alias for Describe - it generates the exact same kind of Container node */ +func PWhen(text string, args ...interface{}) bool { + args = append(args, internal.Pending) + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...)) +} + +var XWhen = PWhen + +/* +It nodes are Subject nodes that contain your spec code and assertions. + +Each It node corresponds to an individual Ginkgo spec. You cannot nest any other Ginkgo nodes within an It node's closure. + +You can pass It nodes bare functions (func() {}) or functions that receive a SpecContext or context.Context: func(ctx SpecContext) {} and func (ctx context.Context) {}. If the function takes a context then the It is deemed interruptible and Ginkgo will cancel the context in the event of a timeout (configured via the SpecTimeout() or NodeTimeout() decorators) or of an interrupt signal. + +You can learn more at https://onsi.github.io/ginkgo/#spec-subjects-it +In addition, subject nodes can be decorated with a variety of decorators. You can learn more here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +func It(text string, args ...interface{}) bool { + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...)) +} + +/* +FIt allows you to focus an individual It. +*/ +func FIt(text string, args ...interface{}) bool { + args = append(args, internal.Focus) + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...)) +} + +/* +PIt allows you to mark an individual It as pending. +*/ +func PIt(text string, args ...interface{}) bool { + args = append(args, internal.Pending) + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...)) +} + +/* +XIt allows you to mark an individual It as pending. + +XIt is an alias for PIt +*/ +var XIt = PIt + +/* +Specify is an alias for It - it can allow for more natural wording in some context. +*/ +var Specify, FSpecify, PSpecify, XSpecify = It, FIt, PIt, XIt + +/* +By allows you to better document complex Specs. + +Generally you should try to keep your Its short and to the point. This is not always possible, however, +especially in the context of integration tests that capture complex or lengthy workflows. + +By allows you to document such flows. By may be called within a Setup or Subject node (It, BeforeEach, etc...) +and will simply log the passed in text to the GinkgoWriter. If By is handed a function it will immediately run the function. + +By will also generate and attach a ReportEntry to the spec. This will ensure that By annotations appear in Ginkgo's machine-readable reports. + +Note that By does not generate a new Ginkgo node - rather it is simply synctactic sugar around GinkgoWriter and AddReportEntry +You can learn more about By here: https://onsi.github.io/ginkgo/#documenting-complex-specs-by +*/ +func By(text string, callback ...func()) { + exitIfErr(global.Suite.By(text, callback...)) +} + +/* +BeforeSuite nodes are suite-level Setup nodes that run just once before any specs are run. +When running in parallel, each parallel process will call BeforeSuite. + +You may only register *one* BeforeSuite handler per test suite. You typically do so in your bootstrap file at the top level. + +BeforeSuite can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body. + +You cannot nest any other Ginkgo nodes within a BeforeSuite node's closure. +You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite +*/ +func BeforeSuite(body interface{}, args ...interface{}) bool { + combinedArgs := []interface{}{body} + combinedArgs = append(combinedArgs, args...) + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeSuite, "", combinedArgs...)) +} + +/* +AfterSuite nodes are suite-level Setup nodes run after all specs have finished - regardless of whether specs have passed or failed. +AfterSuite node closures always run, even if Ginkgo receives an interrupt signal (^C), in order to ensure cleanup occurs. + +When running in parallel, each parallel process will call AfterSuite. + +You may only register *one* AfterSuite handler per test suite. You typically do so in your bootstrap file at the top level. + +AfterSuite can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body. + +You cannot nest any other Ginkgo nodes within an AfterSuite node's closure. +You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite +*/ +func AfterSuite(body interface{}, args ...interface{}) bool { + combinedArgs := []interface{}{body} + combinedArgs = append(combinedArgs, args...) + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterSuite, "", combinedArgs...)) +} + +/* +SynchronizedBeforeSuite nodes allow you to perform some of the suite setup just once - on parallel process #1 - and then pass information +from that setup to the rest of the suite setup on all processes. This is useful for performing expensive or singleton setup once, then passing +information from that setup to all parallel processes. + +SynchronizedBeforeSuite accomplishes this by taking *two* function arguments and passing data between them. +The first function is only run on parallel process #1. The second is run on all processes, but *only* after the first function completes successfully. The functions have the following signatures: + +The first function (which only runs on process #1) can have any of the following the signatures: + + func() + func(ctx context.Context) + func(ctx SpecContext) + func() []byte + func(ctx context.Context) []byte + func(ctx SpecContext) []byte + +The byte array returned by the first function (if present) is then passed to the second function, which can have any of the following signature: + + func() + func(ctx context.Context) + func(ctx SpecContext) + func(data []byte) + func(ctx context.Context, data []byte) + func(ctx SpecContext, data []byte) + +If either function receives a context.Context/SpecContext it is considered interruptible. + +You cannot nest any other Ginkgo nodes within an SynchronizedBeforeSuite node's closure. +You can learn more, and see some examples, here: https://onsi.github.io/ginkgo/#parallel-suite-setup-and-cleanup-synchronizedbeforesuite-and-synchronizedaftersuite +*/ +func SynchronizedBeforeSuite(process1Body interface{}, allProcessBody interface{}, args ...interface{}) bool { + combinedArgs := []interface{}{process1Body, allProcessBody} + combinedArgs = append(combinedArgs, args...) + + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeSynchronizedBeforeSuite, "", combinedArgs...)) +} + +/* +SynchronizedAfterSuite nodes complement the SynchronizedBeforeSuite nodes in solving the problem of splitting clean up into a piece that runs on all processes +and a piece that must only run once - on process #1. + +SynchronizedAfterSuite accomplishes this by taking *two* function arguments. The first runs on all processes. The second runs only on parallel process #1 +and *only* after all other processes have finished and exited. This ensures that process #1, and any resources it is managing, remain alive until +all other processes are finished. These two functions can be bare functions (func()) or interruptible (func(context.Context)/func(SpecContext)) + +Note that you can also use DeferCleanup() in SynchronizedBeforeSuite to accomplish similar results. + +You cannot nest any other Ginkgo nodes within an SynchronizedAfterSuite node's closure. +You can learn more, and see some examples, here: https://onsi.github.io/ginkgo/#parallel-suite-setup-and-cleanup-synchronizedbeforesuite-and-synchronizedaftersuite +*/ +func SynchronizedAfterSuite(allProcessBody interface{}, process1Body interface{}, args ...interface{}) bool { + combinedArgs := []interface{}{allProcessBody, process1Body} + combinedArgs = append(combinedArgs, args...) + + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeSynchronizedAfterSuite, "", combinedArgs...)) +} + +/* +BeforeEach nodes are Setup nodes whose closures run before It node closures. When multiple BeforeEach nodes +are defined in nested Container nodes the outermost BeforeEach node closures are run first. + +BeforeEach can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body. + +You cannot nest any other Ginkgo nodes within a BeforeEach node's closure. +You can learn more here: https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach +*/ +func BeforeEach(args ...interface{}) bool { + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeEach, "", args...)) +} + +/* +JustBeforeEach nodes are similar to BeforeEach nodes, however they are guaranteed to run *after* all BeforeEach node closures - just before the It node closure. +This can allow you to separate configuration from creation of resources for a spec. + +JustBeforeEach can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body. + +You cannot nest any other Ginkgo nodes within a JustBeforeEach node's closure. +You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach +*/ +func JustBeforeEach(args ...interface{}) bool { + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeJustBeforeEach, "", args...)) +} + +/* +AfterEach nodes are Setup nodes whose closures run after It node closures. When multiple AfterEach nodes +are defined in nested Container nodes the innermost AfterEach node closures are run first. + +Note that you can also use DeferCleanup() in other Setup or Subject nodes to accomplish similar results. + +AfterEach can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body. + +You cannot nest any other Ginkgo nodes within an AfterEach node's closure. +You can learn more here: https://onsi.github.io/ginkgo/#spec-cleanup-aftereach-and-defercleanup +*/ +func AfterEach(args ...interface{}) bool { + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterEach, "", args...)) +} + +/* +JustAfterEach nodes are similar to AfterEach nodes, however they are guaranteed to run *before* all AfterEach node closures - just after the It node closure. This can allow you to separate diagnostics collection from teardown for a spec. + +JustAfterEach can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body. + +You cannot nest any other Ginkgo nodes within a JustAfterEach node's closure. +You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-diagnostics-collection-and-teardown-justaftereach +*/ +func JustAfterEach(args ...interface{}) bool { + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeJustAfterEach, "", args...)) +} + +/* +BeforeAll nodes are Setup nodes that can occur inside Ordered containers. They run just once before any specs in the Ordered container run. + +Multiple BeforeAll nodes can be defined in a given Ordered container however they cannot be nested inside any other container. + +BeforeAll can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body. + +You cannot nest any other Ginkgo nodes within a BeforeAll node's closure. +You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#ordered-containers +And you can learn more about BeforeAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall +*/ +func BeforeAll(args ...interface{}) bool { + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeAll, "", args...)) +} + +/* +AfterAll nodes are Setup nodes that can occur inside Ordered containers. They run just once after all specs in the Ordered container have run. + +Multiple AfterAll nodes can be defined in a given Ordered container however they cannot be nested inside any other container. + +Note that you can also use DeferCleanup() in a BeforeAll node to accomplish similar behavior. + +AfterAll can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body. + +You cannot nest any other Ginkgo nodes within an AfterAll node's closure. +You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#ordered-containers +And you can learn more about AfterAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall +*/ +func AfterAll(args ...interface{}) bool { + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterAll, "", args...)) +} + +/* +DeferCleanup can be called within any Setup or Subject node to register a cleanup callback that Ginkgo will call at the appropriate time to cleanup after the spec. + +DeferCleanup can be passed: +1. A function that takes no arguments and returns no values. +2. A function that returns multiple values. `DeferCleanup` will ignore all these return values except for the last one. If this last return value is a non-nil error `DeferCleanup` will fail the spec). +3. A function that takes a context.Context or SpecContext (and optionally returns multiple values). The resulting cleanup node is deemed interruptible and the passed-in context will be cancelled in the event of a timeout or interrupt. +4. A function that takes arguments (and optionally returns multiple values) followed by a list of arguments to pass to the function. +5. A function that takes SpecContext and a list of arguments (and optionally returns multiple values) followed by a list of arguments to pass to the function. + +For example: + + BeforeEach(func() { + DeferCleanup(os.SetEnv, "FOO", os.GetEnv("FOO")) + os.SetEnv("FOO", "BAR") + }) + +will register a cleanup handler that will set the environment variable "FOO" to it's current value (obtained by os.GetEnv("FOO")) after the spec runs and then sets the environment variable "FOO" to "BAR" for the current spec. + +Similarly: + + BeforeEach(func() { + DeferCleanup(func(ctx SpecContext, path) { + req, err := http.NewRequestWithContext(ctx, "POST", path, nil) + Expect(err).NotTo(HaveOccured()) + _, err := http.DefaultClient.Do(req) + Expect(err).NotTo(HaveOccured()) + }, "example.com/cleanup", NodeTimeout(time.Second*3)) + }) + +will register a cleanup handler that will have three seconds to successfully complete a request to the specified path. Note that we do not specify a context in the list of arguments passed to DeferCleanup - only in the signature of the function we pass in. Ginkgo will detect the requested context and supply a SpecContext when it invokes the cleanup node. If you want to pass in your own context in addition to the Ginkgo-provided SpecContext you must specify the SpecContext as the first argument (e.g. func(ctx SpecContext, otherCtx context.Context)). + +When DeferCleanup is called in BeforeEach, JustBeforeEach, It, AfterEach, or JustAfterEach the registered callback will be invoked when the spec completes (i.e. it will behave like an AfterEach node) +When DeferCleanup is called in BeforeAll or AfterAll the registered callback will be invoked when the ordered container completes (i.e. it will behave like an AfterAll node) +When DeferCleanup is called in BeforeSuite, SynchronizedBeforeSuite, AfterSuite, or SynchronizedAfterSuite the registered callback will be invoked when the suite completes (i.e. it will behave like an AfterSuite node) + +Note that DeferCleanup does not represent a node but rather dynamically generates the appropriate type of cleanup node based on the context in which it is called. As such you must call DeferCleanup within a Setup or Subject node, and not within a Container node. +You can learn more about DeferCleanup here: https://onsi.github.io/ginkgo/#cleaning-up-our-cleanup-code-defercleanup +*/ +func DeferCleanup(args ...interface{}) { + fail := func(message string, cl types.CodeLocation) { + global.Failer.Fail(message, cl) + } + pushNode(internal.NewCleanupNode(deprecationTracker, fail, args...)) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go b/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go new file mode 100644 index 0000000..e43d9cb --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go @@ -0,0 +1,133 @@ +package ginkgo + +import ( + "github.com/onsi/ginkgo/v2/internal" +) + +/* +Offset(uint) is a decorator that allows you to change the stack-frame offset used when computing the line number of the node in question. + +You can learn more here: https://onsi.github.io/ginkgo/#the-offset-decorator +You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +type Offset = internal.Offset + +/* +FlakeAttempts(uint N) is a decorator that allows you to mark individual specs or spec containers as flaky. Ginkgo will run them up to `N` times until they pass. + +You can learn more here: https://onsi.github.io/ginkgo/#the-flakeattempts-decorator +You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +type FlakeAttempts = internal.FlakeAttempts + +/* +MustPassRepeatedly(uint N) is a decorator that allows you to repeat the execution of individual specs or spec containers. Ginkgo will run them up to `N` times until they fail. + +You can learn more here: https://onsi.github.io/ginkgo/#the-mustpassrepeatedly-decorator +You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +type MustPassRepeatedly = internal.MustPassRepeatedly + +/* +Focus is a decorator that allows you to mark a spec or container as focused. Identical to FIt and FDescribe. + +You can learn more here: https://onsi.github.io/ginkgo/#filtering-specs +You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +const Focus = internal.Focus + +/* +Pending is a decorator that allows you to mark a spec or container as pending. Identical to PIt and PDescribe. + +You can learn more here: https://onsi.github.io/ginkgo/#filtering-specs +You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +const Pending = internal.Pending + +/* +Serial is a decorator that allows you to mark a spec or container as serial. These specs will never run in parallel with other specs. +Tests in ordered containers cannot be marked as serial - mark the ordered container instead. + +You can learn more here: https://onsi.github.io/ginkgo/#serial-specs +You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +const Serial = internal.Serial + +/* +Ordered is a decorator that allows you to mark a container as ordered. Tests in the container will always run in the order they appear. +They will never be randomized and they will never run in parallel with one another, though they may run in parallel with other specs. + +You can learn more here: https://onsi.github.io/ginkgo/#ordered-containers +You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +const Ordered = internal.Ordered + +/* +OncePerOrdered is a decorator that allows you to mark outer BeforeEach, AfterEach, JustBeforeEach, and JustAfterEach setup nodes to run once +per ordered context. Normally these setup nodes run around each individual spec, with OncePerOrdered they will run once around the set of specs in an ordered container. +The behavior for non-Ordered containers/specs is unchanged. + +You can learn more here: https://onsi.github.io/ginkgo/#setup-around-ordered-containers-the-onceperordered-decorator +You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +const OncePerOrdered = internal.OncePerOrdered + +/* +Label decorates specs with Labels. Multiple labels can be passed to Label and these can be arbitrary strings but must not include the following characters: "&|!,()/". +Labels can be applied to container and subject nodes, but not setup nodes. You can provide multiple Labels to a given node and a spec's labels is the union of all labels in its node hierarchy. + +You can learn more here: https://onsi.github.io/ginkgo/#spec-labels +You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +func Label(labels ...string) Labels { + return Labels(labels) +} + +/* +Labels are the type for spec Label decorators. Use Label(...) to construct Labels. +You can learn more here: https://onsi.github.io/ginkgo/#spec-labels +*/ +type Labels = internal.Labels + +/* +PollProgressAfter allows you to override the configured value for --poll-progress-after for a particular node. + +Ginkgo will start emitting node progress if the node is still running after a duration of PollProgressAfter. This allows you to get quicker feedback about the state of a long-running spec. +*/ +type PollProgressAfter = internal.PollProgressAfter + +/* +PollProgressInterval allows you to override the configured value for --poll-progress-interval for a particular node. + +Once a node has been running for longer than PollProgressAfter Ginkgo will emit node progress periodically at an interval of PollProgresInterval. +*/ +type PollProgressInterval = internal.PollProgressInterval + +/* +NodeTimeout allows you to specify a timeout for an indivdiual node. The node cannot be a container and must be interruptible (i.e. it must be passed a function that accepts a SpecContext or context.Context). + +If the node does not exit within the specified NodeTimeout its context will be cancelled. The node wil then have a period of time controlled by the GracePeriod decorator (or global --grace-period command-line argument) to exit. If the node does not exit within GracePeriod Ginkgo will leak the node and proceed to any clean-up nodes associated with the current spec. +*/ +type NodeTimeout = internal.NodeTimeout + +/* +SpecTimeout allows you to specify a timeout for an indivdiual spec. SpecTimeout can only decorate interruptible It nodes. + +All nodes associated with the It node will need to complete before the SpecTimeout has elapsed. Individual nodes (e.g. BeforeEach) may be decorated with different NodeTimeouts - but these can only serve to provide a more stringent deadline for the node in question; they cannot extend the deadline past the SpecTimeout. + +If the spec does not complete within the specified SpecTimeout the currently running node will have its context cancelled. The node wil then have a period of time controlled by that node's GracePeriod decorator (or global --grace-period command-line argument) to exit. If the node does not exit within GracePeriod Ginkgo will leak the node and proceed to any clean-up nodes associated with the current spec. +*/ +type SpecTimeout = internal.SpecTimeout + +/* +GracePeriod denotes the period of time Ginkgo will wait for an interruptible node to exit once an interruption (whether due to a timeout or a user-invoked signal) has occurred. If both the global --grace-period cli flag and a GracePeriod decorator are specified the value in the decorator will take precedence. + +Nodes that do not finish within a GracePeriod will be leaked and Ginkgo will proceed to run subsequent nodes. In the event of a timeout, such leaks will be reported to the user. +*/ +type GracePeriod = internal.GracePeriod + +/* +SuppressProgressReporting is a decorator that allows you to disable progress reporting of a particular node. This is useful if `ginkgo -v -progress` is generating too much noise; particularly +if you have a `ReportAfterEach` node that is running for every skipped spec and is generating lots of progress reports. +*/ +const SuppressProgressReporting = internal.SuppressProgressReporting diff --git a/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go b/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go new file mode 100644 index 0000000..f912bbe --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go @@ -0,0 +1,135 @@ +package ginkgo + +import ( + "time" + + "github.com/onsi/ginkgo/v2/internal" + "github.com/onsi/ginkgo/v2/internal/global" + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +/* +Deprecated: Done Channel for asynchronous testing + +The Done channel pattern is no longer supported in Ginkgo 2.0. +See here for better patterns for asynchronous testing: https://onsi.github.io/ginkgo/#patterns-for-asynchronous-testing + +For a migration guide see: https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-async-testing +*/ +type Done = internal.Done + +/* +Deprecated: Custom Ginkgo test reporters are deprecated in Ginkgo 2.0. + +Use Ginkgo's reporting nodes instead and 2.0 reporting infrastructure instead. You can learn more here: https://onsi.github.io/ginkgo/#reporting-infrastructure +For a migration guide see: https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters +*/ +type Reporter = reporters.DeprecatedReporter + +/* +Deprecated: Custom Reporters have been removed in Ginkgo 2.0. RunSpecsWithDefaultAndCustomReporters will simply call RunSpecs() + +Use Ginkgo's reporting nodes instead and 2.0 reporting infrastructure instead. You can learn more here: https://onsi.github.io/ginkgo/#reporting-infrastructure +For a migration guide see: https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters +*/ +func RunSpecsWithDefaultAndCustomReporters(t GinkgoTestingT, description string, _ []Reporter) bool { + deprecationTracker.TrackDeprecation(types.Deprecations.CustomReporter()) + return RunSpecs(t, description) +} + +/* +Deprecated: Custom Reporters have been removed in Ginkgo 2.0. RunSpecsWithCustomReporters will simply call RunSpecs() + +Use Ginkgo's reporting nodes instead and 2.0 reporting infrastructure instead. You can learn more here: https://onsi.github.io/ginkgo/#reporting-infrastructure +For a migration guide see: https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters +*/ +func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, _ []Reporter) bool { + deprecationTracker.TrackDeprecation(types.Deprecations.CustomReporter()) + return RunSpecs(t, description) +} + +/* +Deprecated: GinkgoTestDescription has been replaced with SpecReport. + +Use CurrentSpecReport() instead. +You can learn more here: https://onsi.github.io/ginkgo/#getting-a-report-for-the-current-spec +The SpecReport type is documented here: https://pkg.go.dev/github.com/onsi/ginkgo/v2/types#SpecReport +*/ +type DeprecatedGinkgoTestDescription struct { + FullTestText string + ComponentTexts []string + TestText string + + FileName string + LineNumber int + + Failed bool + Duration time.Duration +} +type GinkgoTestDescription = DeprecatedGinkgoTestDescription + +/* +Deprecated: CurrentGinkgoTestDescription has been replaced with CurrentSpecReport. + +Use CurrentSpecReport() instead. +You can learn more here: https://onsi.github.io/ginkgo/#getting-a-report-for-the-current-spec +The SpecReport type is documented here: https://pkg.go.dev/github.com/onsi/ginkgo/v2/types#SpecReport +*/ +func CurrentGinkgoTestDescription() DeprecatedGinkgoTestDescription { + deprecationTracker.TrackDeprecation( + types.Deprecations.CurrentGinkgoTestDescription(), + types.NewCodeLocation(1), + ) + report := global.Suite.CurrentSpecReport() + if report.State == types.SpecStateInvalid { + return GinkgoTestDescription{} + } + componentTexts := []string{} + componentTexts = append(componentTexts, report.ContainerHierarchyTexts...) + componentTexts = append(componentTexts, report.LeafNodeText) + + return DeprecatedGinkgoTestDescription{ + ComponentTexts: componentTexts, + FullTestText: report.FullText(), + TestText: report.LeafNodeText, + FileName: report.LeafNodeLocation.FileName, + LineNumber: report.LeafNodeLocation.LineNumber, + Failed: report.State.Is(types.SpecStateFailureStates), + Duration: report.RunTime, + } +} + +/* +Deprecated: GinkgoParallelNode() has been renamed to GinkgoParallelProcess() +*/ +func GinkgoParallelNode() int { + deprecationTracker.TrackDeprecation( + types.Deprecations.ParallelNode(), + types.NewCodeLocation(1), + ) + return GinkgoParallelProcess() +} + +/* +Deprecated: Benchmarker has been removed from Ginkgo 2.0 + +Use Gomega's gmeasure package instead. +You can learn more here: https://onsi.github.io/ginkgo/#benchmarking-code +*/ +type Benchmarker interface { + Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) + RecordValue(name string, value float64, info ...interface{}) + RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{}) +} + +/* +Deprecated: Measure() has been removed from Ginkgo 2.0 + +Use Gomega's gmeasure package instead. +You can learn more here: https://onsi.github.io/ginkgo/#benchmarking-code +*/ +func Measure(_ ...interface{}) bool { + deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), types.NewCodeLocation(1)) + return true +} diff --git a/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go b/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go new file mode 100644 index 0000000..778bfd7 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go @@ -0,0 +1,41 @@ +// +build !windows + +/* +These packages are used for colorize on Windows and contributed by mattn.jp@gmail.com + + * go-colorable: + * go-isatty: + +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package formatter + +import ( + "io" + "os" +) + +func newColorable(file *os.File) io.Writer { + return file +} diff --git a/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go b/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go new file mode 100644 index 0000000..dd1d143 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go @@ -0,0 +1,809 @@ +/* +These packages are used for colorize on Windows and contributed by mattn.jp@gmail.com + + * go-colorable: + * go-isatty: + +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package formatter + +import ( + "bytes" + "fmt" + "io" + "math" + "os" + "strconv" + "strings" + "syscall" + "unsafe" +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") + procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") + procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") +) + +func isTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +const ( + foregroundBlue = 0x1 + foregroundGreen = 0x2 + foregroundRed = 0x4 + foregroundIntensity = 0x8 + foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) + backgroundBlue = 0x10 + backgroundGreen = 0x20 + backgroundRed = 0x40 + backgroundIntensity = 0x80 + backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) +) + +type wchar uint16 +type short int16 +type dword uint32 +type word uint16 + +type coord struct { + x short + y short +} + +type smallRect struct { + left short + top short + right short + bottom short +} + +type consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord +} + +type writer struct { + out io.Writer + handle syscall.Handle + lastbuf bytes.Buffer + oldattr word +} + +func newColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + if isTerminal(file.Fd()) { + var csbi consoleScreenBufferInfo + handle := syscall.Handle(file.Fd()) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + return &writer{out: file, handle: handle, oldattr: csbi.attributes} + } else { + return file + } +} + +var color256 = map[int]int{ + 0: 0x000000, + 1: 0x800000, + 2: 0x008000, + 3: 0x808000, + 4: 0x000080, + 5: 0x800080, + 6: 0x008080, + 7: 0xc0c0c0, + 8: 0x808080, + 9: 0xff0000, + 10: 0x00ff00, + 11: 0xffff00, + 12: 0x0000ff, + 13: 0xff00ff, + 14: 0x00ffff, + 15: 0xffffff, + 16: 0x000000, + 17: 0x00005f, + 18: 0x000087, + 19: 0x0000af, + 20: 0x0000d7, + 21: 0x0000ff, + 22: 0x005f00, + 23: 0x005f5f, + 24: 0x005f87, + 25: 0x005faf, + 26: 0x005fd7, + 27: 0x005fff, + 28: 0x008700, + 29: 0x00875f, + 30: 0x008787, + 31: 0x0087af, + 32: 0x0087d7, + 33: 0x0087ff, + 34: 0x00af00, + 35: 0x00af5f, + 36: 0x00af87, + 37: 0x00afaf, + 38: 0x00afd7, + 39: 0x00afff, + 40: 0x00d700, + 41: 0x00d75f, + 42: 0x00d787, + 43: 0x00d7af, + 44: 0x00d7d7, + 45: 0x00d7ff, + 46: 0x00ff00, + 47: 0x00ff5f, + 48: 0x00ff87, + 49: 0x00ffaf, + 50: 0x00ffd7, + 51: 0x00ffff, + 52: 0x5f0000, + 53: 0x5f005f, + 54: 0x5f0087, + 55: 0x5f00af, + 56: 0x5f00d7, + 57: 0x5f00ff, + 58: 0x5f5f00, + 59: 0x5f5f5f, + 60: 0x5f5f87, + 61: 0x5f5faf, + 62: 0x5f5fd7, + 63: 0x5f5fff, + 64: 0x5f8700, + 65: 0x5f875f, + 66: 0x5f8787, + 67: 0x5f87af, + 68: 0x5f87d7, + 69: 0x5f87ff, + 70: 0x5faf00, + 71: 0x5faf5f, + 72: 0x5faf87, + 73: 0x5fafaf, + 74: 0x5fafd7, + 75: 0x5fafff, + 76: 0x5fd700, + 77: 0x5fd75f, + 78: 0x5fd787, + 79: 0x5fd7af, + 80: 0x5fd7d7, + 81: 0x5fd7ff, + 82: 0x5fff00, + 83: 0x5fff5f, + 84: 0x5fff87, + 85: 0x5fffaf, + 86: 0x5fffd7, + 87: 0x5fffff, + 88: 0x870000, + 89: 0x87005f, + 90: 0x870087, + 91: 0x8700af, + 92: 0x8700d7, + 93: 0x8700ff, + 94: 0x875f00, + 95: 0x875f5f, + 96: 0x875f87, + 97: 0x875faf, + 98: 0x875fd7, + 99: 0x875fff, + 100: 0x878700, + 101: 0x87875f, + 102: 0x878787, + 103: 0x8787af, + 104: 0x8787d7, + 105: 0x8787ff, + 106: 0x87af00, + 107: 0x87af5f, + 108: 0x87af87, + 109: 0x87afaf, + 110: 0x87afd7, + 111: 0x87afff, + 112: 0x87d700, + 113: 0x87d75f, + 114: 0x87d787, + 115: 0x87d7af, + 116: 0x87d7d7, + 117: 0x87d7ff, + 118: 0x87ff00, + 119: 0x87ff5f, + 120: 0x87ff87, + 121: 0x87ffaf, + 122: 0x87ffd7, + 123: 0x87ffff, + 124: 0xaf0000, + 125: 0xaf005f, + 126: 0xaf0087, + 127: 0xaf00af, + 128: 0xaf00d7, + 129: 0xaf00ff, + 130: 0xaf5f00, + 131: 0xaf5f5f, + 132: 0xaf5f87, + 133: 0xaf5faf, + 134: 0xaf5fd7, + 135: 0xaf5fff, + 136: 0xaf8700, + 137: 0xaf875f, + 138: 0xaf8787, + 139: 0xaf87af, + 140: 0xaf87d7, + 141: 0xaf87ff, + 142: 0xafaf00, + 143: 0xafaf5f, + 144: 0xafaf87, + 145: 0xafafaf, + 146: 0xafafd7, + 147: 0xafafff, + 148: 0xafd700, + 149: 0xafd75f, + 150: 0xafd787, + 151: 0xafd7af, + 152: 0xafd7d7, + 153: 0xafd7ff, + 154: 0xafff00, + 155: 0xafff5f, + 156: 0xafff87, + 157: 0xafffaf, + 158: 0xafffd7, + 159: 0xafffff, + 160: 0xd70000, + 161: 0xd7005f, + 162: 0xd70087, + 163: 0xd700af, + 164: 0xd700d7, + 165: 0xd700ff, + 166: 0xd75f00, + 167: 0xd75f5f, + 168: 0xd75f87, + 169: 0xd75faf, + 170: 0xd75fd7, + 171: 0xd75fff, + 172: 0xd78700, + 173: 0xd7875f, + 174: 0xd78787, + 175: 0xd787af, + 176: 0xd787d7, + 177: 0xd787ff, + 178: 0xd7af00, + 179: 0xd7af5f, + 180: 0xd7af87, + 181: 0xd7afaf, + 182: 0xd7afd7, + 183: 0xd7afff, + 184: 0xd7d700, + 185: 0xd7d75f, + 186: 0xd7d787, + 187: 0xd7d7af, + 188: 0xd7d7d7, + 189: 0xd7d7ff, + 190: 0xd7ff00, + 191: 0xd7ff5f, + 192: 0xd7ff87, + 193: 0xd7ffaf, + 194: 0xd7ffd7, + 195: 0xd7ffff, + 196: 0xff0000, + 197: 0xff005f, + 198: 0xff0087, + 199: 0xff00af, + 200: 0xff00d7, + 201: 0xff00ff, + 202: 0xff5f00, + 203: 0xff5f5f, + 204: 0xff5f87, + 205: 0xff5faf, + 206: 0xff5fd7, + 207: 0xff5fff, + 208: 0xff8700, + 209: 0xff875f, + 210: 0xff8787, + 211: 0xff87af, + 212: 0xff87d7, + 213: 0xff87ff, + 214: 0xffaf00, + 215: 0xffaf5f, + 216: 0xffaf87, + 217: 0xffafaf, + 218: 0xffafd7, + 219: 0xffafff, + 220: 0xffd700, + 221: 0xffd75f, + 222: 0xffd787, + 223: 0xffd7af, + 224: 0xffd7d7, + 225: 0xffd7ff, + 226: 0xffff00, + 227: 0xffff5f, + 228: 0xffff87, + 229: 0xffffaf, + 230: 0xffffd7, + 231: 0xffffff, + 232: 0x080808, + 233: 0x121212, + 234: 0x1c1c1c, + 235: 0x262626, + 236: 0x303030, + 237: 0x3a3a3a, + 238: 0x444444, + 239: 0x4e4e4e, + 240: 0x585858, + 241: 0x626262, + 242: 0x6c6c6c, + 243: 0x767676, + 244: 0x808080, + 245: 0x8a8a8a, + 246: 0x949494, + 247: 0x9e9e9e, + 248: 0xa8a8a8, + 249: 0xb2b2b2, + 250: 0xbcbcbc, + 251: 0xc6c6c6, + 252: 0xd0d0d0, + 253: 0xdadada, + 254: 0xe4e4e4, + 255: 0xeeeeee, +} + +func (w *writer) Write(data []byte) (n int, err error) { + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + + er := bytes.NewBuffer(data) +loop: + for { + r1, _, err := procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + if r1 == 0 { + break loop + } + + c1, _, err := er.ReadRune() + if err != nil { + break loop + } + if c1 != 0x1b { + fmt.Fprint(w.out, string(c1)) + continue + } + c2, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + break loop + } + if c2 != 0x5b { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + continue + } + + var buf bytes.Buffer + var m rune + for { + c, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + w.lastbuf.Write(buf.Bytes()) + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + m = c + break + } + buf.Write([]byte(string(c))) + } + + var csbi consoleScreenBufferInfo + switch m { + case 'A': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'B': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'C': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'D': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + if n, err = strconv.Atoi(buf.String()); err == nil { + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + } + case 'E': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'F': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'G': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'H': + token := strings.Split(buf.String(), ";") + if len(token) != 2 { + continue + } + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + n2, err := strconv.Atoi(token[1]) + if err != nil { + continue + } + csbi.cursorPosition.x = short(n2) + csbi.cursorPosition.x = short(n1) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'J': + n, err := strconv.Atoi(buf.String()) + if err != nil { + continue + } + var cursor coord + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + } + var count, written dword + count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x) + procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'K': + n, err := strconv.Atoi(buf.String()) + if err != nil { + continue + } + var cursor coord + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} + } + var count, written dword + count = dword(csbi.size.x - csbi.cursorPosition.x) + procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'm': + attr := csbi.attributes + cs := buf.String() + if cs == "" { + procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr)) + continue + } + token := strings.Split(cs, ";") + for i := 0; i < len(token); i += 1 { + ns := token[i] + if n, err = strconv.Atoi(ns); err == nil { + switch { + case n == 0 || n == 100: + attr = w.oldattr + case 1 <= n && n <= 5: + attr |= foregroundIntensity + case n == 7: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case 22 == n || n == 25 || n == 25: + attr |= foregroundIntensity + case n == 27: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case 30 <= n && n <= 37: + attr = (attr & backgroundMask) + if (n-30)&1 != 0 { + attr |= foregroundRed + } + if (n-30)&2 != 0 { + attr |= foregroundGreen + } + if (n-30)&4 != 0 { + attr |= foregroundBlue + } + case n == 38: // set foreground color. + if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256foreAttr == nil { + n256setup() + } + attr &= backgroundMask + attr |= n256foreAttr[n256] + i += 2 + } + } else { + attr = attr & (w.oldattr & backgroundMask) + } + case n == 39: // reset foreground color. + attr &= backgroundMask + attr |= w.oldattr & foregroundMask + case 40 <= n && n <= 47: + attr = (attr & foregroundMask) + if (n-40)&1 != 0 { + attr |= backgroundRed + } + if (n-40)&2 != 0 { + attr |= backgroundGreen + } + if (n-40)&4 != 0 { + attr |= backgroundBlue + } + case n == 48: // set background color. + if i < len(token)-2 && token[i+1] == "5" { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256backAttr == nil { + n256setup() + } + attr &= foregroundMask + attr |= n256backAttr[n256] + i += 2 + } + } else { + attr = attr & (w.oldattr & foregroundMask) + } + case n == 49: // reset foreground color. + attr &= foregroundMask + attr |= w.oldattr & backgroundMask + case 90 <= n && n <= 97: + attr = (attr & backgroundMask) + attr |= foregroundIntensity + if (n-90)&1 != 0 { + attr |= foregroundRed + } + if (n-90)&2 != 0 { + attr |= foregroundGreen + } + if (n-90)&4 != 0 { + attr |= foregroundBlue + } + case 100 <= n && n <= 107: + attr = (attr & foregroundMask) + attr |= backgroundIntensity + if (n-100)&1 != 0 { + attr |= backgroundRed + } + if (n-100)&2 != 0 { + attr |= backgroundGreen + } + if (n-100)&4 != 0 { + attr |= backgroundBlue + } + } + procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr)) + } + } + } + } + return len(data) - w.lastbuf.Len(), nil +} + +type consoleColor struct { + rgb int + red bool + green bool + blue bool + intensity bool +} + +func (c consoleColor) foregroundAttr() (attr word) { + if c.red { + attr |= foregroundRed + } + if c.green { + attr |= foregroundGreen + } + if c.blue { + attr |= foregroundBlue + } + if c.intensity { + attr |= foregroundIntensity + } + return +} + +func (c consoleColor) backgroundAttr() (attr word) { + if c.red { + attr |= backgroundRed + } + if c.green { + attr |= backgroundGreen + } + if c.blue { + attr |= backgroundBlue + } + if c.intensity { + attr |= backgroundIntensity + } + return +} + +var color16 = []consoleColor{ + consoleColor{0x000000, false, false, false, false}, + consoleColor{0x000080, false, false, true, false}, + consoleColor{0x008000, false, true, false, false}, + consoleColor{0x008080, false, true, true, false}, + consoleColor{0x800000, true, false, false, false}, + consoleColor{0x800080, true, false, true, false}, + consoleColor{0x808000, true, true, false, false}, + consoleColor{0xc0c0c0, true, true, true, false}, + consoleColor{0x808080, false, false, false, true}, + consoleColor{0x0000ff, false, false, true, true}, + consoleColor{0x00ff00, false, true, false, true}, + consoleColor{0x00ffff, false, true, true, true}, + consoleColor{0xff0000, true, false, false, true}, + consoleColor{0xff00ff, true, false, true, true}, + consoleColor{0xffff00, true, true, false, true}, + consoleColor{0xffffff, true, true, true, true}, +} + +type hsv struct { + h, s, v float32 +} + +func (a hsv) dist(b hsv) float32 { + dh := a.h - b.h + switch { + case dh > 0.5: + dh = 1 - dh + case dh < -0.5: + dh = -1 - dh + } + ds := a.s - b.s + dv := a.v - b.v + return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv))) +} + +func toHSV(rgb int) hsv { + r, g, b := float32((rgb&0xFF0000)>>16)/256.0, + float32((rgb&0x00FF00)>>8)/256.0, + float32(rgb&0x0000FF)/256.0 + min, max := minmax3f(r, g, b) + h := max - min + if h > 0 { + if max == r { + h = (g - b) / h + if h < 0 { + h += 6 + } + } else if max == g { + h = 2 + (b-r)/h + } else { + h = 4 + (r-g)/h + } + } + h /= 6.0 + s := max - min + if max != 0 { + s /= max + } + v := max + return hsv{h: h, s: s, v: v} +} + +type hsvTable []hsv + +func toHSVTable(rgbTable []consoleColor) hsvTable { + t := make(hsvTable, len(rgbTable)) + for i, c := range rgbTable { + t[i] = toHSV(c.rgb) + } + return t +} + +func (t hsvTable) find(rgb int) consoleColor { + hsv := toHSV(rgb) + n := 7 + l := float32(5.0) + for i, p := range t { + d := hsv.dist(p) + if d < l { + l, n = d, i + } + } + return color16[n] +} + +func minmax3f(a, b, c float32) (min, max float32) { + if a < b { + if b < c { + return a, c + } else if a < c { + return a, b + } else { + return c, b + } + } else { + if a < c { + return b, c + } else if b < c { + return b, a + } else { + return c, a + } + } +} + +var n256foreAttr []word +var n256backAttr []word + +func n256setup() { + n256foreAttr = make([]word, 256) + n256backAttr = make([]word, 256) + t := toHSVTable(color16) + for i, rgb := range color256 { + c := t.find(rgb) + n256foreAttr[i] = c.foregroundAttr() + n256backAttr[i] = c.backgroundAttr() + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go new file mode 100644 index 0000000..43b1621 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go @@ -0,0 +1,195 @@ +package formatter + +import ( + "fmt" + "os" + "regexp" + "strings" +) + +// ColorableStdOut and ColorableStdErr enable color output support on Windows +var ColorableStdOut = newColorable(os.Stdout) +var ColorableStdErr = newColorable(os.Stderr) + +const COLS = 80 + +type ColorMode uint8 + +const ( + ColorModeNone ColorMode = iota + ColorModeTerminal + ColorModePassthrough +) + +var SingletonFormatter = New(ColorModeTerminal) + +func F(format string, args ...interface{}) string { + return SingletonFormatter.F(format, args...) +} + +func Fi(indentation uint, format string, args ...interface{}) string { + return SingletonFormatter.Fi(indentation, format, args...) +} + +func Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string { + return SingletonFormatter.Fiw(indentation, maxWidth, format, args...) +} + +type Formatter struct { + ColorMode ColorMode + colors map[string]string + styleRe *regexp.Regexp + preserveColorStylingTags bool +} + +func NewWithNoColorBool(noColor bool) Formatter { + if noColor { + return New(ColorModeNone) + } + return New(ColorModeTerminal) +} + +func New(colorMode ColorMode) Formatter { + f := Formatter{ + ColorMode: colorMode, + colors: map[string]string{ + "/": "\x1b[0m", + "bold": "\x1b[1m", + "underline": "\x1b[4m", + + "red": "\x1b[38;5;9m", + "orange": "\x1b[38;5;214m", + "coral": "\x1b[38;5;204m", + "magenta": "\x1b[38;5;13m", + "green": "\x1b[38;5;10m", + "dark-green": "\x1b[38;5;28m", + "yellow": "\x1b[38;5;11m", + "light-yellow": "\x1b[38;5;228m", + "cyan": "\x1b[38;5;14m", + "gray": "\x1b[38;5;243m", + "light-gray": "\x1b[38;5;246m", + "blue": "\x1b[38;5;12m", + }, + } + colors := []string{} + for color := range f.colors { + colors = append(colors, color) + } + f.styleRe = regexp.MustCompile("{{(" + strings.Join(colors, "|") + ")}}") + return f +} + +func (f Formatter) F(format string, args ...interface{}) string { + return f.Fi(0, format, args...) +} + +func (f Formatter) Fi(indentation uint, format string, args ...interface{}) string { + return f.Fiw(indentation, 0, format, args...) +} + +func (f Formatter) Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string { + out := fmt.Sprintf(f.style(format), args...) + + if indentation == 0 && maxWidth == 0 { + return out + } + + lines := strings.Split(out, "\n") + + if maxWidth != 0 { + outLines := []string{} + + maxWidth = maxWidth - indentation*2 + for _, line := range lines { + if f.length(line) <= maxWidth { + outLines = append(outLines, line) + continue + } + words := strings.Split(line, " ") + outWords := []string{words[0]} + length := uint(f.length(words[0])) + for _, word := range words[1:] { + wordLength := f.length(word) + if length+wordLength+1 <= maxWidth { + length += wordLength + 1 + outWords = append(outWords, word) + continue + } + outLines = append(outLines, strings.Join(outWords, " ")) + outWords = []string{word} + length = wordLength + } + if len(outWords) > 0 { + outLines = append(outLines, strings.Join(outWords, " ")) + } + } + + lines = outLines + } + + if indentation == 0 { + return strings.Join(lines, "\n") + } + + padding := strings.Repeat(" ", int(indentation)) + for i := range lines { + if lines[i] != "" { + lines[i] = padding + lines[i] + } + } + + return strings.Join(lines, "\n") +} + +func (f Formatter) length(styled string) uint { + n := uint(0) + inStyle := false + for _, b := range styled { + if inStyle { + if b == 'm' { + inStyle = false + } + continue + } + if b == '\x1b' { + inStyle = true + continue + } + n += 1 + } + return n +} + +func (f Formatter) CycleJoin(elements []string, joiner string, cycle []string) string { + if len(elements) == 0 { + return "" + } + n := len(cycle) + out := "" + for i, text := range elements { + out += cycle[i%n] + text + if i < len(elements)-1 { + out += joiner + } + } + out += "{{/}}" + return f.style(out) +} + +func (f Formatter) style(s string) string { + switch f.ColorMode { + case ColorModeNone: + return f.styleRe.ReplaceAllString(s, "") + case ColorModePassthrough: + return s + case ColorModeTerminal: + return f.styleRe.ReplaceAllStringFunc(s, func(match string) string { + if out, ok := f.colors[strings.Trim(match, "{}")]; ok { + return out + } + return match + }) + } + + return "" +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go new file mode 100644 index 0000000..1beeb11 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go @@ -0,0 +1,45 @@ +package ginkgo + +import "github.com/onsi/ginkgo/v2/internal/testingtproxy" + +/* +GinkgoT() implements an interface analogous to *testing.T and can be used with +third-party libraries that accept *testing.T through an interface. + +GinkgoT() takes an optional offset argument that can be used to get the +correct line number associated with the failure. + +You can learn more here: https://onsi.github.io/ginkgo/#using-third-party-libraries +*/ +func GinkgoT(optionalOffset ...int) GinkgoTInterface { + offset := 3 + if len(optionalOffset) > 0 { + offset = optionalOffset[0] + } + return testingtproxy.New(GinkgoWriter, Fail, Skip, DeferCleanup, CurrentSpecReport, offset) +} + +/* +The interface returned by GinkgoT(). This covers most of the methods in the testing package's T. +*/ +type GinkgoTInterface interface { + Cleanup(func()) + Setenv(kev, value string) + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Fail() + FailNow() + Failed() bool + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Helper() + Log(args ...interface{}) + Logf(format string, args ...interface{}) + Name() string + Parallel() + Skip(args ...interface{}) + SkipNow() + Skipf(format string, args ...interface{}) + Skipped() bool + TempDir() string +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/counter.go b/vendor/github.com/onsi/ginkgo/v2/internal/counter.go new file mode 100644 index 0000000..712d85a --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/counter.go @@ -0,0 +1,9 @@ +package internal + +func MakeIncrementingIndexCounter() func() (int, error) { + idx := -1 + return func() (int, error) { + idx += 1 + return idx, nil + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/failer.go b/vendor/github.com/onsi/ginkgo/v2/internal/failer.go new file mode 100644 index 0000000..e9bd956 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/failer.go @@ -0,0 +1,99 @@ +package internal + +import ( + "fmt" + "sync" + + "github.com/onsi/ginkgo/v2/types" +) + +type Failer struct { + lock *sync.Mutex + failure types.Failure + state types.SpecState +} + +func NewFailer() *Failer { + return &Failer{ + lock: &sync.Mutex{}, + state: types.SpecStatePassed, + } +} + +func (f *Failer) GetState() types.SpecState { + f.lock.Lock() + defer f.lock.Unlock() + return f.state +} + +func (f *Failer) GetFailure() types.Failure { + f.lock.Lock() + defer f.lock.Unlock() + return f.failure +} + +func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{}) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.state == types.SpecStatePassed { + f.state = types.SpecStatePanicked + f.failure = types.Failure{ + Message: "Test Panicked", + Location: location, + ForwardedPanic: fmt.Sprintf("%v", forwardedPanic), + } + } +} + +func (f *Failer) Fail(message string, location types.CodeLocation) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.state == types.SpecStatePassed { + f.state = types.SpecStateFailed + f.failure = types.Failure{ + Message: message, + Location: location, + } + } +} + +func (f *Failer) Skip(message string, location types.CodeLocation) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.state == types.SpecStatePassed { + f.state = types.SpecStateSkipped + f.failure = types.Failure{ + Message: message, + Location: location, + } + } +} + +func (f *Failer) AbortSuite(message string, location types.CodeLocation) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.state == types.SpecStatePassed { + f.state = types.SpecStateAborted + f.failure = types.Failure{ + Message: message, + Location: location, + } + } +} + +func (f *Failer) Drain() (types.SpecState, types.Failure) { + f.lock.Lock() + defer f.lock.Unlock() + + failure := f.failure + outcome := f.state + + f.state = types.SpecStatePassed + f.failure = types.Failure{} + + return outcome, failure +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/focus.go b/vendor/github.com/onsi/ginkgo/v2/internal/focus.go new file mode 100644 index 0000000..966ea0c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/focus.go @@ -0,0 +1,125 @@ +package internal + +import ( + "regexp" + "strings" + + "github.com/onsi/ginkgo/v2/types" +) + +/* + If a container marked as focus has a descendant that is also marked as focus, Ginkgo's policy is to + unmark the container's focus. This gives developers a more intuitive experience when debugging specs. + It is common to focus a container to just run a subset of specs, then identify the specific specs within the container to focus - + this policy allows the developer to simply focus those specific specs and not need to go back and turn the focus off of the container: + + As a common example, consider: + + FDescribe("something to debug", function() { + It("works", function() {...}) + It("works", function() {...}) + FIt("doesn't work", function() {...}) + It("works", function() {...}) + }) + + here the developer's intent is to focus in on the `"doesn't work"` spec and not to run the adjacent specs in the focused `"something to debug"` container. + The nested policy applied by this function enables this behavior. +*/ +func ApplyNestedFocusPolicyToTree(tree *TreeNode) { + var walkTree func(tree *TreeNode) bool + walkTree = func(tree *TreeNode) bool { + if tree.Node.MarkedPending { + return false + } + hasFocusedDescendant := false + for _, child := range tree.Children { + childHasFocus := walkTree(child) + hasFocusedDescendant = hasFocusedDescendant || childHasFocus + } + tree.Node.MarkedFocus = tree.Node.MarkedFocus && !hasFocusedDescendant + return tree.Node.MarkedFocus || hasFocusedDescendant + } + + walkTree(tree) +} + +/* + Ginkgo supports focussing specs using `FIt`, `FDescribe`, etc. - this is called "programmatic focus" + It also supports focussing specs using regular expressions on the command line (`-focus=`, `-skip=`) that match against spec text + and file filters (`-focus-files=`, `-skip-files=`) that match against code locations for nodes in specs. + + If any of the CLI flags are provided they take precedence. The file filters run first followed by the regex filters. + + This function sets the `Skip` property on specs by applying Ginkgo's focus policy: + - If there are no CLI arguments and no programmatic focus, do nothing. + - If there are no CLI arguments but a spec somewhere has programmatic focus, skip any specs that have no programmatic focus. + - If there are CLI arguments parse them and skip any specs that either don't match the focus filters or do match the skip filters. + + *Note:* specs with pending nodes are Skipped when created by NewSpec. +*/ +func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suiteConfig types.SuiteConfig) (Specs, bool) { + focusString := strings.Join(suiteConfig.FocusStrings, "|") + skipString := strings.Join(suiteConfig.SkipStrings, "|") + + hasFocusCLIFlags := focusString != "" || skipString != "" || len(suiteConfig.SkipFiles) > 0 || len(suiteConfig.FocusFiles) > 0 || suiteConfig.LabelFilter != "" + + type SkipCheck func(spec Spec) bool + + // by default, skip any specs marked pending + skipChecks := []SkipCheck{func(spec Spec) bool { return spec.Nodes.HasNodeMarkedPending() }} + hasProgrammaticFocus := false + + if !hasFocusCLIFlags { + // check for programmatic focus + for _, spec := range specs { + if spec.Nodes.HasNodeMarkedFocus() && !spec.Nodes.HasNodeMarkedPending() { + skipChecks = append(skipChecks, func(spec Spec) bool { return !spec.Nodes.HasNodeMarkedFocus() }) + hasProgrammaticFocus = true + break + } + } + } + + if suiteConfig.LabelFilter != "" { + labelFilter, _ := types.ParseLabelFilter(suiteConfig.LabelFilter) + skipChecks = append(skipChecks, func(spec Spec) bool { + return !labelFilter(UnionOfLabels(suiteLabels, spec.Nodes.UnionOfLabels())) + }) + } + + if len(suiteConfig.FocusFiles) > 0 { + focusFilters, _ := types.ParseFileFilters(suiteConfig.FocusFiles) + skipChecks = append(skipChecks, func(spec Spec) bool { return !focusFilters.Matches(spec.Nodes.CodeLocations()) }) + } + + if len(suiteConfig.SkipFiles) > 0 { + skipFilters, _ := types.ParseFileFilters(suiteConfig.SkipFiles) + skipChecks = append(skipChecks, func(spec Spec) bool { return skipFilters.Matches(spec.Nodes.CodeLocations()) }) + } + + if focusString != "" { + // skip specs that don't match the focus string + re := regexp.MustCompile(focusString) + skipChecks = append(skipChecks, func(spec Spec) bool { return !re.MatchString(description + " " + spec.Text()) }) + } + + if skipString != "" { + // skip specs that match the skip string + re := regexp.MustCompile(skipString) + skipChecks = append(skipChecks, func(spec Spec) bool { return re.MatchString(description + " " + spec.Text()) }) + } + + // skip specs if shouldSkip() is true. note that we do nothing if shouldSkip() is false to avoid overwriting skip status established by the node's pending status + processedSpecs := Specs{} + for _, spec := range specs { + for _, skipCheck := range skipChecks { + if skipCheck(spec) { + spec.Skip = true + break + } + } + processedSpecs = append(processedSpecs, spec) + } + + return processedSpecs, hasProgrammaticFocus +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/global/init.go b/vendor/github.com/onsi/ginkgo/v2/internal/global/init.go new file mode 100644 index 0000000..f2c0fd8 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/global/init.go @@ -0,0 +1,17 @@ +package global + +import ( + "github.com/onsi/ginkgo/v2/internal" +) + +var Suite *internal.Suite +var Failer *internal.Failer + +func init() { + InitializeGlobals() +} + +func InitializeGlobals() { + Failer = internal.NewFailer() + Suite = internal.NewSuite() +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/group.go b/vendor/github.com/onsi/ginkgo/v2/internal/group.go new file mode 100644 index 0000000..5c782d3 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/group.go @@ -0,0 +1,363 @@ +package internal + +import ( + "fmt" + "time" + + "github.com/onsi/ginkgo/v2/types" +) + +type runOncePair struct { + //nodeId should only run once... + nodeID uint + nodeType types.NodeType + //...for specs in a hierarchy that includes this context + containerID uint +} + +func (pair runOncePair) isZero() bool { + return pair.nodeID == 0 +} + +func runOncePairForNode(node Node, containerID uint) runOncePair { + return runOncePair{ + nodeID: node.ID, + nodeType: node.NodeType, + containerID: containerID, + } +} + +type runOncePairs []runOncePair + +func runOncePairsForSpec(spec Spec) runOncePairs { + pairs := runOncePairs{} + + containers := spec.Nodes.WithType(types.NodeTypeContainer) + for _, node := range spec.Nodes { + if node.NodeType.Is(types.NodeTypeBeforeAll | types.NodeTypeAfterAll) { + pairs = append(pairs, runOncePairForNode(node, containers.FirstWithNestingLevel(node.NestingLevel-1).ID)) + } else if node.NodeType.Is(types.NodeTypeBeforeEach|types.NodeTypeJustBeforeEach|types.NodeTypeAfterEach|types.NodeTypeJustAfterEach) && node.MarkedOncePerOrdered { + passedIntoAnOrderedContainer := false + firstOrderedContainerDeeperThanNode := containers.FirstSatisfying(func(container Node) bool { + passedIntoAnOrderedContainer = passedIntoAnOrderedContainer || container.MarkedOrdered + return container.NestingLevel >= node.NestingLevel && passedIntoAnOrderedContainer + }) + if firstOrderedContainerDeeperThanNode.IsZero() { + continue + } + pairs = append(pairs, runOncePairForNode(node, firstOrderedContainerDeeperThanNode.ID)) + } + } + + return pairs +} + +func (pairs runOncePairs) runOncePairFor(nodeID uint) runOncePair { + for i := range pairs { + if pairs[i].nodeID == nodeID { + return pairs[i] + } + } + return runOncePair{} +} + +func (pairs runOncePairs) hasRunOncePair(pair runOncePair) bool { + for i := range pairs { + if pairs[i] == pair { + return true + } + } + return false +} + +func (pairs runOncePairs) withType(nodeTypes types.NodeType) runOncePairs { + count := 0 + for i := range pairs { + if pairs[i].nodeType.Is(nodeTypes) { + count++ + } + } + + out, j := make(runOncePairs, count), 0 + for i := range pairs { + if pairs[i].nodeType.Is(nodeTypes) { + out[j] = pairs[i] + j++ + } + } + return out +} + +type group struct { + suite *Suite + specs Specs + runOncePairs map[uint]runOncePairs + runOnceTracker map[runOncePair]types.SpecState + + succeeded bool +} + +func newGroup(suite *Suite) *group { + return &group{ + suite: suite, + runOncePairs: map[uint]runOncePairs{}, + runOnceTracker: map[runOncePair]types.SpecState{}, + succeeded: true, + } +} + +func (g *group) initialReportForSpec(spec Spec) types.SpecReport { + return types.SpecReport{ + ContainerHierarchyTexts: spec.Nodes.WithType(types.NodeTypeContainer).Texts(), + ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(), + ContainerHierarchyLabels: spec.Nodes.WithType(types.NodeTypeContainer).Labels(), + LeafNodeLocation: spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation, + LeafNodeType: types.NodeTypeIt, + LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text, + LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels), + ParallelProcess: g.suite.config.ParallelProcess, + RunningInParallel: g.suite.isRunningInParallel(), + IsSerial: spec.Nodes.HasNodeMarkedSerial(), + IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(), + MaxFlakeAttempts: spec.Nodes.GetMaxFlakeAttempts(), + MaxMustPassRepeatedly: spec.Nodes.GetMaxMustPassRepeatedly(), + } +} + +func (g *group) evaluateSkipStatus(spec Spec) (types.SpecState, types.Failure) { + if spec.Nodes.HasNodeMarkedPending() { + return types.SpecStatePending, types.Failure{} + } + if spec.Skip { + return types.SpecStateSkipped, types.Failure{} + } + if g.suite.interruptHandler.Status().Interrupted() || g.suite.skipAll { + return types.SpecStateSkipped, types.Failure{} + } + if !g.suite.deadline.IsZero() && g.suite.deadline.Before(time.Now()) { + return types.SpecStateSkipped, types.Failure{} + } + if !g.succeeded { + return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt), + "Spec skipped because an earlier spec in an ordered container failed") + } + beforeOncePairs := g.runOncePairs[spec.SubjectID()].withType(types.NodeTypeBeforeAll | types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach) + for _, pair := range beforeOncePairs { + if g.runOnceTracker[pair].Is(types.SpecStateSkipped) { + return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt), + fmt.Sprintf("Spec skipped because Skip() was called in %s", pair.nodeType)) + } + } + if g.suite.config.DryRun { + return types.SpecStatePassed, types.Failure{} + } + return g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure +} + +func (g *group) isLastSpecWithPair(specID uint, pair runOncePair) bool { + lastSpecID := uint(0) + for idx := range g.specs { + if g.specs[idx].Skip { + continue + } + sID := g.specs[idx].SubjectID() + if g.runOncePairs[sID].hasRunOncePair(pair) { + lastSpecID = sID + } + } + return lastSpecID == specID +} + +func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) { + pairs := g.runOncePairs[spec.SubjectID()] + + nodes := spec.Nodes.WithType(types.NodeTypeBeforeAll) + nodes = append(nodes, spec.Nodes.WithType(types.NodeTypeBeforeEach)...).SortedByAscendingNestingLevel() + nodes = append(nodes, spec.Nodes.WithType(types.NodeTypeJustBeforeEach).SortedByAscendingNestingLevel()...) + nodes = append(nodes, spec.Nodes.FirstNodeWithType(types.NodeTypeIt)) + terminatingNode, terminatingPair := Node{}, runOncePair{} + + deadline := time.Time{} + if spec.SpecTimeout() > 0 { + deadline = time.Now().Add(spec.SpecTimeout()) + } + + for _, node := range nodes { + oncePair := pairs.runOncePairFor(node.ID) + if !oncePair.isZero() && g.runOnceTracker[oncePair].Is(types.SpecStatePassed) { + continue + } + g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure = g.suite.runNode(node, deadline, spec.Nodes.BestTextFor(node)) + g.suite.currentSpecReport.RunTime = time.Since(g.suite.currentSpecReport.StartTime) + if !oncePair.isZero() { + g.runOnceTracker[oncePair] = g.suite.currentSpecReport.State + } + if g.suite.currentSpecReport.State != types.SpecStatePassed { + terminatingNode, terminatingPair = node, oncePair + break + } + } + + afterNodeWasRun := map[uint]bool{} + includeDeferCleanups := false + for { + nodes := spec.Nodes.WithType(types.NodeTypeAfterEach) + nodes = append(nodes, spec.Nodes.WithType(types.NodeTypeAfterAll)...).SortedByDescendingNestingLevel() + nodes = append(spec.Nodes.WithType(types.NodeTypeJustAfterEach).SortedByDescendingNestingLevel(), nodes...) + if !terminatingNode.IsZero() { + nodes = nodes.WithinNestingLevel(terminatingNode.NestingLevel) + } + if includeDeferCleanups { + nodes = append(nodes, g.suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterEach).Reverse()...) + nodes = append(nodes, g.suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterAll).Reverse()...) + } + nodes = nodes.Filter(func(node Node) bool { + if afterNodeWasRun[node.ID] { + //this node has already been run on this attempt, don't rerun it + return false + } + pair := runOncePair{} + switch node.NodeType { + case types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll: + // check if we were generated in an AfterNode that has already run + if afterNodeWasRun[node.NodeIDWhereCleanupWasGenerated] { + return true // we were, so we should definitely run this cleanup now + } + // looks like this cleanup nodes was generated by a before node or it. + // the run-once status of a cleanup node is governed by the run-once status of its generator + pair = pairs.runOncePairFor(node.NodeIDWhereCleanupWasGenerated) + default: + pair = pairs.runOncePairFor(node.ID) + } + if pair.isZero() { + // this node is not governed by any run-once policy, we should run it + return true + } + // it's our last chance to run if we're the last spec for our oncePair + isLastSpecWithPair := g.isLastSpecWithPair(spec.SubjectID(), pair) + + switch g.suite.currentSpecReport.State { + case types.SpecStatePassed: //this attempt is passing... + return isLastSpecWithPair //...we should run-once if we'this is our last chance + case types.SpecStateSkipped: //the spec was skipped by the user... + if isLastSpecWithPair { + return true //...we're the last spec, so we should run the AfterNode + } + if !terminatingPair.isZero() && terminatingNode.NestingLevel == node.NestingLevel { + return true //...or, a run-once node at our nesting level was skipped which means this is our last chance to run + } + case types.SpecStateFailed, types.SpecStatePanicked: // the spec has failed... + if isFinalAttempt { + return true //...if this was the last attempt then we're the last spec to run and so the AfterNode should run + } + if !terminatingPair.isZero() { // ...and it failed in a run-once. which will be running again + if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll) { + return terminatingNode.ID == node.NodeIDWhereCleanupWasGenerated // we should run this node if we're a clean-up generated by it + } else { + return terminatingNode.NestingLevel == node.NestingLevel // ...or if we're at the same nesting level + } + } + case types.SpecStateInterrupted, types.SpecStateAborted: // ...we've been interrupted and/or aborted + return true //...that means the test run is over and we should clean up the stack. Run the AfterNode + } + return false + }) + + if len(nodes) == 0 && includeDeferCleanups { + break + } + + for _, node := range nodes { + afterNodeWasRun[node.ID] = true + state, failure := g.suite.runNode(node, deadline, spec.Nodes.BestTextFor(node)) + g.suite.currentSpecReport.RunTime = time.Since(g.suite.currentSpecReport.StartTime) + if g.suite.currentSpecReport.State == types.SpecStatePassed || state == types.SpecStateAborted { + g.suite.currentSpecReport.State = state + g.suite.currentSpecReport.Failure = failure + } else if state.Is(types.SpecStateFailureStates) { + g.suite.currentSpecReport.AdditionalFailures = append(g.suite.currentSpecReport.AdditionalFailures, types.AdditionalFailure{State: state, Failure: failure}) + } + } + includeDeferCleanups = true + } + +} + +func (g *group) run(specs Specs) { + g.specs = specs + for _, spec := range g.specs { + g.runOncePairs[spec.SubjectID()] = runOncePairsForSpec(spec) + } + + for _, spec := range g.specs { + g.suite.selectiveLock.Lock() + g.suite.currentSpecReport = g.initialReportForSpec(spec) + g.suite.selectiveLock.Unlock() + + g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure = g.evaluateSkipStatus(spec) + g.suite.reporter.WillRun(g.suite.currentSpecReport) + g.suite.reportEach(spec, types.NodeTypeReportBeforeEach) + + skip := g.suite.config.DryRun || g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates|types.SpecStateSkipped|types.SpecStatePending) + + g.suite.currentSpecReport.StartTime = time.Now() + if !skip { + + var maxAttempts = 1 + + if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 { + maxAttempts = max(1, spec.MustPassRepeatedly()) + } else if g.suite.config.FlakeAttempts > 0 { + maxAttempts = g.suite.config.FlakeAttempts + g.suite.currentSpecReport.MaxFlakeAttempts = maxAttempts + } else if g.suite.currentSpecReport.MaxFlakeAttempts > 0 { + maxAttempts = max(1, spec.FlakeAttempts()) + } + + for attempt := 0; attempt < maxAttempts; attempt++ { + g.suite.currentSpecReport.NumAttempts = attempt + 1 + g.suite.writer.Truncate() + g.suite.outputInterceptor.StartInterceptingOutput() + if attempt > 0 { + if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 { + g.suite.handleSpecEvent(types.SpecEvent{SpecEventType: types.SpecEventSpecRepeat, Attempt: attempt}) + } + if g.suite.currentSpecReport.MaxFlakeAttempts > 0 { + g.suite.handleSpecEvent(types.SpecEvent{SpecEventType: types.SpecEventSpecRetry, Attempt: attempt}) + } + } + + g.attemptSpec(attempt == maxAttempts-1, spec) + + g.suite.currentSpecReport.EndTime = time.Now() + g.suite.currentSpecReport.RunTime = g.suite.currentSpecReport.EndTime.Sub(g.suite.currentSpecReport.StartTime) + g.suite.currentSpecReport.CapturedGinkgoWriterOutput += string(g.suite.writer.Bytes()) + g.suite.currentSpecReport.CapturedStdOutErr += g.suite.outputInterceptor.StopInterceptingAndReturnOutput() + + if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 { + if g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates | types.SpecStateSkipped) { + break + } + } + if g.suite.currentSpecReport.MaxFlakeAttempts > 0 { + if g.suite.currentSpecReport.State.Is(types.SpecStatePassed | types.SpecStateSkipped | types.SpecStateAborted | types.SpecStateInterrupted) { + break + } else if attempt < maxAttempts-1 { + af := types.AdditionalFailure{State: g.suite.currentSpecReport.State, Failure: g.suite.currentSpecReport.Failure} + af.Failure.Message = fmt.Sprintf("Failure recorded during attempt %d:\n%s", attempt+1, af.Failure.Message) + g.suite.currentSpecReport.AdditionalFailures = append(g.suite.currentSpecReport.AdditionalFailures, af) + } + } + } + } + + g.suite.reportEach(spec, types.NodeTypeReportAfterEach) + g.suite.processCurrentSpecReport() + if g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates) { + g.succeeded = false + } + g.suite.selectiveLock.Lock() + g.suite.currentSpecReport = types.SpecReport{} + g.suite.selectiveLock.Unlock() + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go new file mode 100644 index 0000000..ac6f510 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go @@ -0,0 +1,162 @@ +package interrupt_handler + +import ( + "os" + "os/signal" + "sync" + "syscall" + "time" + + "github.com/onsi/ginkgo/v2/internal/parallel_support" +) + +const ABORT_POLLING_INTERVAL = 500 * time.Millisecond + +type InterruptCause uint + +const ( + InterruptCauseInvalid InterruptCause = iota + InterruptCauseSignal + InterruptCauseAbortByOtherProcess +) + +type InterruptLevel uint + +const ( + InterruptLevelUninterrupted InterruptLevel = iota + InterruptLevelCleanupAndReport + InterruptLevelReportOnly + InterruptLevelBailOut +) + +func (ic InterruptCause) String() string { + switch ic { + case InterruptCauseSignal: + return "Interrupted by User" + case InterruptCauseAbortByOtherProcess: + return "Interrupted by Other Ginkgo Process" + } + return "INVALID_INTERRUPT_CAUSE" +} + +type InterruptStatus struct { + Channel chan interface{} + Level InterruptLevel + Cause InterruptCause +} + +func (s InterruptStatus) Interrupted() bool { + return s.Level != InterruptLevelUninterrupted +} + +func (s InterruptStatus) Message() string { + return s.Cause.String() +} + +func (s InterruptStatus) ShouldIncludeProgressReport() bool { + return s.Cause != InterruptCauseAbortByOtherProcess +} + +type InterruptHandlerInterface interface { + Status() InterruptStatus +} + +type InterruptHandler struct { + c chan interface{} + lock *sync.Mutex + level InterruptLevel + cause InterruptCause + client parallel_support.Client + stop chan interface{} + signals []os.Signal +} + +func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) *InterruptHandler { + if len(signals) == 0 { + signals = []os.Signal{os.Interrupt, syscall.SIGTERM} + } + handler := &InterruptHandler{ + c: make(chan interface{}), + lock: &sync.Mutex{}, + stop: make(chan interface{}), + client: client, + signals: signals, + } + handler.registerForInterrupts() + return handler +} + +func (handler *InterruptHandler) Stop() { + close(handler.stop) +} + +func (handler *InterruptHandler) registerForInterrupts() { + // os signal handling + signalChannel := make(chan os.Signal, 1) + signal.Notify(signalChannel, handler.signals...) + + // cross-process abort handling + var abortChannel chan interface{} + if handler.client != nil { + abortChannel = make(chan interface{}) + go func() { + pollTicker := time.NewTicker(ABORT_POLLING_INTERVAL) + for { + select { + case <-pollTicker.C: + if handler.client.ShouldAbort() { + close(abortChannel) + pollTicker.Stop() + return + } + case <-handler.stop: + pollTicker.Stop() + return + } + } + }() + } + + go func(abortChannel chan interface{}) { + var interruptCause InterruptCause + for { + select { + case <-signalChannel: + interruptCause = InterruptCauseSignal + case <-abortChannel: + interruptCause = InterruptCauseAbortByOtherProcess + case <-handler.stop: + signal.Stop(signalChannel) + return + } + abortChannel = nil + + handler.lock.Lock() + oldLevel := handler.level + handler.cause = interruptCause + if handler.level == InterruptLevelUninterrupted { + handler.level = InterruptLevelCleanupAndReport + } else if handler.level == InterruptLevelCleanupAndReport { + handler.level = InterruptLevelReportOnly + } else if handler.level == InterruptLevelReportOnly { + handler.level = InterruptLevelBailOut + } + if handler.level != oldLevel { + close(handler.c) + handler.c = make(chan interface{}) + } + handler.lock.Unlock() + } + }(abortChannel) +} + +func (handler *InterruptHandler) Status() InterruptStatus { + handler.lock.Lock() + defer handler.lock.Unlock() + + return InterruptStatus{ + Level: handler.level, + Channel: handler.c, + Cause: handler.cause, + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go new file mode 100644 index 0000000..bf0de49 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go @@ -0,0 +1,15 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin || linux || solaris +// +build freebsd openbsd netbsd dragonfly darwin linux solaris + +package interrupt_handler + +import ( + "os" + "os/signal" + "syscall" +) + +func SwallowSigQuit() { + c := make(chan os.Signal, 1024) + signal.Notify(c, syscall.SIGQUIT) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go new file mode 100644 index 0000000..fcf8da8 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go @@ -0,0 +1,8 @@ +//go:build windows +// +build windows + +package interrupt_handler + +func SwallowSigQuit() { + //noop +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/node.go b/vendor/github.com/onsi/ginkgo/v2/internal/node.go new file mode 100644 index 0000000..69928ee --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/node.go @@ -0,0 +1,895 @@ +package internal + +import ( + "context" + "fmt" + "reflect" + "sort" + "time" + + "sync" + + "github.com/onsi/ginkgo/v2/types" +) + +var _global_node_id_counter = uint(0) +var _global_id_mutex = &sync.Mutex{} + +func UniqueNodeID() uint { + //There's a reace in the internal integration tests if we don't make + //accessing _global_node_id_counter safe across goroutines. + _global_id_mutex.Lock() + defer _global_id_mutex.Unlock() + _global_node_id_counter += 1 + return _global_node_id_counter +} + +type Node struct { + ID uint + NodeType types.NodeType + + Text string + Body func(SpecContext) + CodeLocation types.CodeLocation + NestingLevel int + HasContext bool + + SynchronizedBeforeSuiteProc1Body func(SpecContext) []byte + SynchronizedBeforeSuiteProc1BodyHasContext bool + SynchronizedBeforeSuiteAllProcsBody func(SpecContext, []byte) + SynchronizedBeforeSuiteAllProcsBodyHasContext bool + + SynchronizedAfterSuiteAllProcsBody func(SpecContext) + SynchronizedAfterSuiteAllProcsBodyHasContext bool + SynchronizedAfterSuiteProc1Body func(SpecContext) + SynchronizedAfterSuiteProc1BodyHasContext bool + + ReportEachBody func(types.SpecReport) + ReportSuiteBody func(types.Report) + + MarkedFocus bool + MarkedPending bool + MarkedSerial bool + MarkedOrdered bool + MarkedOncePerOrdered bool + FlakeAttempts int + MustPassRepeatedly int + Labels Labels + PollProgressAfter time.Duration + PollProgressInterval time.Duration + NodeTimeout time.Duration + SpecTimeout time.Duration + GracePeriod time.Duration + + NodeIDWhereCleanupWasGenerated uint +} + +// Decoration Types +type focusType bool +type pendingType bool +type serialType bool +type orderedType bool +type honorsOrderedType bool +type suppressProgressReporting bool + +const Focus = focusType(true) +const Pending = pendingType(true) +const Serial = serialType(true) +const Ordered = orderedType(true) +const OncePerOrdered = honorsOrderedType(true) +const SuppressProgressReporting = suppressProgressReporting(true) + +type FlakeAttempts uint +type MustPassRepeatedly uint +type Offset uint +type Done chan<- interface{} // Deprecated Done Channel for asynchronous testing +type Labels []string +type PollProgressInterval time.Duration +type PollProgressAfter time.Duration +type NodeTimeout time.Duration +type SpecTimeout time.Duration +type GracePeriod time.Duration + +func UnionOfLabels(labels ...Labels) Labels { + out := Labels{} + seen := map[string]bool{} + for _, labelSet := range labels { + for _, label := range labelSet { + if !seen[label] { + seen[label] = true + out = append(out, label) + } + } + } + return out +} + +func PartitionDecorations(args ...interface{}) ([]interface{}, []interface{}) { + decorations := []interface{}{} + remainingArgs := []interface{}{} + for _, arg := range args { + if isDecoration(arg) { + decorations = append(decorations, arg) + } else { + remainingArgs = append(remainingArgs, arg) + } + } + return decorations, remainingArgs +} + +func isDecoration(arg interface{}) bool { + switch t := reflect.TypeOf(arg); { + case t == nil: + return false + case t == reflect.TypeOf(Offset(0)): + return true + case t == reflect.TypeOf(types.CodeLocation{}): + return true + case t == reflect.TypeOf(Focus): + return true + case t == reflect.TypeOf(Pending): + return true + case t == reflect.TypeOf(Serial): + return true + case t == reflect.TypeOf(Ordered): + return true + case t == reflect.TypeOf(OncePerOrdered): + return true + case t == reflect.TypeOf(SuppressProgressReporting): + return true + case t == reflect.TypeOf(FlakeAttempts(0)): + return true + case t == reflect.TypeOf(MustPassRepeatedly(0)): + return true + case t == reflect.TypeOf(Labels{}): + return true + case t == reflect.TypeOf(PollProgressInterval(0)): + return true + case t == reflect.TypeOf(PollProgressAfter(0)): + return true + case t == reflect.TypeOf(NodeTimeout(0)): + return true + case t == reflect.TypeOf(SpecTimeout(0)): + return true + case t == reflect.TypeOf(GracePeriod(0)): + return true + case t.Kind() == reflect.Slice && isSliceOfDecorations(arg): + return true + default: + return false + } +} + +func isSliceOfDecorations(slice interface{}) bool { + vSlice := reflect.ValueOf(slice) + if vSlice.Len() == 0 { + return false + } + for i := 0; i < vSlice.Len(); i++ { + if !isDecoration(vSlice.Index(i).Interface()) { + return false + } + } + return true +} + +var contextType = reflect.TypeOf(new(context.Context)).Elem() +var specContextType = reflect.TypeOf(new(SpecContext)).Elem() + +func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeType, text string, args ...interface{}) (Node, []error) { + baseOffset := 2 + node := Node{ + ID: UniqueNodeID(), + NodeType: nodeType, + Text: text, + Labels: Labels{}, + CodeLocation: types.NewCodeLocation(baseOffset), + NestingLevel: -1, + PollProgressAfter: -1, + PollProgressInterval: -1, + GracePeriod: -1, + } + + errors := []error{} + appendError := func(err error) { + if err != nil { + errors = append(errors, err) + } + } + + args = unrollInterfaceSlice(args) + + remainingArgs := []interface{}{} + //First get the CodeLocation up-to-date + for _, arg := range args { + switch v := arg.(type) { + case Offset: + node.CodeLocation = types.NewCodeLocation(baseOffset + int(v)) + case types.CodeLocation: + node.CodeLocation = v + default: + remainingArgs = append(remainingArgs, arg) + } + } + + labelsSeen := map[string]bool{} + trackedFunctionError := false + args = remainingArgs + remainingArgs = []interface{}{} + //now process the rest of the args + for _, arg := range args { + switch t := reflect.TypeOf(arg); { + case t == reflect.TypeOf(float64(0)): + break //ignore deprecated timeouts + case t == reflect.TypeOf(Focus): + node.MarkedFocus = bool(arg.(focusType)) + if !nodeType.Is(types.NodeTypesForContainerAndIt) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Focus")) + } + case t == reflect.TypeOf(Pending): + node.MarkedPending = bool(arg.(pendingType)) + if !nodeType.Is(types.NodeTypesForContainerAndIt) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Pending")) + } + case t == reflect.TypeOf(Serial): + node.MarkedSerial = bool(arg.(serialType)) + if !nodeType.Is(types.NodeTypesForContainerAndIt) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Serial")) + } + case t == reflect.TypeOf(Ordered): + node.MarkedOrdered = bool(arg.(orderedType)) + if !nodeType.Is(types.NodeTypeContainer) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Ordered")) + } + case t == reflect.TypeOf(OncePerOrdered): + node.MarkedOncePerOrdered = bool(arg.(honorsOrderedType)) + if !nodeType.Is(types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach | types.NodeTypeAfterEach | types.NodeTypeJustAfterEach) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "OncePerOrdered")) + } + case t == reflect.TypeOf(SuppressProgressReporting): + deprecationTracker.TrackDeprecation(types.Deprecations.SuppressProgressReporting()) + case t == reflect.TypeOf(FlakeAttempts(0)): + node.FlakeAttempts = int(arg.(FlakeAttempts)) + if !nodeType.Is(types.NodeTypesForContainerAndIt) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "FlakeAttempts")) + } + case t == reflect.TypeOf(MustPassRepeatedly(0)): + node.MustPassRepeatedly = int(arg.(MustPassRepeatedly)) + if !nodeType.Is(types.NodeTypesForContainerAndIt) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "MustPassRepeatedly")) + } + case t == reflect.TypeOf(PollProgressAfter(0)): + node.PollProgressAfter = time.Duration(arg.(PollProgressAfter)) + if nodeType.Is(types.NodeTypeContainer) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "PollProgressAfter")) + } + case t == reflect.TypeOf(PollProgressInterval(0)): + node.PollProgressInterval = time.Duration(arg.(PollProgressInterval)) + if nodeType.Is(types.NodeTypeContainer) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "PollProgressInterval")) + } + case t == reflect.TypeOf(NodeTimeout(0)): + node.NodeTimeout = time.Duration(arg.(NodeTimeout)) + if nodeType.Is(types.NodeTypeContainer) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "NodeTimeout")) + } + case t == reflect.TypeOf(SpecTimeout(0)): + node.SpecTimeout = time.Duration(arg.(SpecTimeout)) + if !nodeType.Is(types.NodeTypeIt) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "SpecTimeout")) + } + case t == reflect.TypeOf(GracePeriod(0)): + node.GracePeriod = time.Duration(arg.(GracePeriod)) + if nodeType.Is(types.NodeTypeContainer) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "GracePeriod")) + } + case t == reflect.TypeOf(Labels{}): + if !nodeType.Is(types.NodeTypesForContainerAndIt) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Label")) + } + for _, label := range arg.(Labels) { + if !labelsSeen[label] { + labelsSeen[label] = true + label, err := types.ValidateAndCleanupLabel(label, node.CodeLocation) + node.Labels = append(node.Labels, label) + appendError(err) + } + } + case t.Kind() == reflect.Func: + if nodeType.Is(types.NodeTypeContainer) { + if node.Body != nil { + appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } + if t.NumOut() > 0 || t.NumIn() > 0 { + appendError(types.GinkgoErrors.InvalidBodyTypeForContainer(t, node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } + body := arg.(func()) + node.Body = func(SpecContext) { body() } + } else if nodeType.Is(types.NodeTypeReportBeforeEach | types.NodeTypeReportAfterEach) { + if node.ReportEachBody == nil { + node.ReportEachBody = arg.(func(types.SpecReport)) + } else { + appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } + } else if nodeType.Is(types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) { + if node.ReportSuiteBody == nil { + node.ReportSuiteBody = arg.(func(types.Report)) + } else { + appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } + } else if nodeType.Is(types.NodeTypeSynchronizedBeforeSuite) { + if node.SynchronizedBeforeSuiteProc1Body != nil && node.SynchronizedBeforeSuiteAllProcsBody != nil { + appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } + if node.SynchronizedBeforeSuiteProc1Body == nil { + body, hasContext := extractSynchronizedBeforeSuiteProc1Body(arg) + if body == nil { + appendError(types.GinkgoErrors.InvalidBodyTypeForSynchronizedBeforeSuiteProc1(t, node.CodeLocation)) + trackedFunctionError = true + } + node.SynchronizedBeforeSuiteProc1Body, node.SynchronizedBeforeSuiteProc1BodyHasContext = body, hasContext + } else if node.SynchronizedBeforeSuiteAllProcsBody == nil { + body, hasContext := extractSynchronizedBeforeSuiteAllProcsBody(arg) + if body == nil { + appendError(types.GinkgoErrors.InvalidBodyTypeForSynchronizedBeforeSuiteAllProcs(t, node.CodeLocation)) + trackedFunctionError = true + } + node.SynchronizedBeforeSuiteAllProcsBody, node.SynchronizedBeforeSuiteAllProcsBodyHasContext = body, hasContext + } + } else if nodeType.Is(types.NodeTypeSynchronizedAfterSuite) { + if node.SynchronizedAfterSuiteAllProcsBody != nil && node.SynchronizedAfterSuiteProc1Body != nil { + appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } + body, hasContext := extractBodyFunction(deprecationTracker, node.CodeLocation, arg) + if body == nil { + appendError(types.GinkgoErrors.InvalidBodyType(t, node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } + if node.SynchronizedAfterSuiteAllProcsBody == nil { + node.SynchronizedAfterSuiteAllProcsBody, node.SynchronizedAfterSuiteAllProcsBodyHasContext = body, hasContext + } else if node.SynchronizedAfterSuiteProc1Body == nil { + node.SynchronizedAfterSuiteProc1Body, node.SynchronizedAfterSuiteProc1BodyHasContext = body, hasContext + } + } else { + if node.Body != nil { + appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } + node.Body, node.HasContext = extractBodyFunction(deprecationTracker, node.CodeLocation, arg) + if node.Body == nil { + appendError(types.GinkgoErrors.InvalidBodyType(t, node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } + } + default: + remainingArgs = append(remainingArgs, arg) + } + } + + //validations + if node.MarkedPending && node.MarkedFocus { + appendError(types.GinkgoErrors.InvalidDeclarationOfFocusedAndPending(node.CodeLocation, nodeType)) + } + + hasContext := node.HasContext || node.SynchronizedAfterSuiteProc1BodyHasContext || node.SynchronizedAfterSuiteAllProcsBodyHasContext || node.SynchronizedBeforeSuiteProc1BodyHasContext || node.SynchronizedBeforeSuiteAllProcsBodyHasContext + + if !hasContext && (node.NodeTimeout > 0 || node.SpecTimeout > 0 || node.GracePeriod > 0) && len(errors) == 0 { + appendError(types.GinkgoErrors.InvalidTimeoutOrGracePeriodForNonContextNode(node.CodeLocation, nodeType)) + } + + if !node.NodeType.Is(types.NodeTypeReportBeforeEach|types.NodeTypeReportAfterEach|types.NodeTypeSynchronizedBeforeSuite|types.NodeTypeSynchronizedAfterSuite|types.NodeTypeReportBeforeSuite|types.NodeTypeReportAfterSuite) && node.Body == nil && !node.MarkedPending && !trackedFunctionError { + appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType)) + } + + if node.NodeType.Is(types.NodeTypeSynchronizedBeforeSuite) && !trackedFunctionError && (node.SynchronizedBeforeSuiteProc1Body == nil || node.SynchronizedBeforeSuiteAllProcsBody == nil) { + appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType)) + } + + if node.NodeType.Is(types.NodeTypeSynchronizedAfterSuite) && !trackedFunctionError && (node.SynchronizedAfterSuiteProc1Body == nil || node.SynchronizedAfterSuiteAllProcsBody == nil) { + appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType)) + } + + for _, arg := range remainingArgs { + appendError(types.GinkgoErrors.UnknownDecorator(node.CodeLocation, nodeType, arg)) + } + + if node.FlakeAttempts > 0 && node.MustPassRepeatedly > 0 { + appendError(types.GinkgoErrors.InvalidDeclarationOfFlakeAttemptsAndMustPassRepeatedly(node.CodeLocation, nodeType)) + } + + if len(errors) > 0 { + return Node{}, errors + } + + return node, errors +} + +var doneType = reflect.TypeOf(make(Done)) + +func extractBodyFunction(deprecationTracker *types.DeprecationTracker, cl types.CodeLocation, arg interface{}) (func(SpecContext), bool) { + t := reflect.TypeOf(arg) + if t.NumOut() > 0 || t.NumIn() > 1 { + return nil, false + } + if t.NumIn() == 1 { + if t.In(0) == doneType { + deprecationTracker.TrackDeprecation(types.Deprecations.Async(), cl) + deprecatedAsyncBody := arg.(func(Done)) + return func(SpecContext) { deprecatedAsyncBody(make(Done)) }, false + } else if t.In(0).Implements(specContextType) { + return arg.(func(SpecContext)), true + } else if t.In(0).Implements(contextType) { + body := arg.(func(context.Context)) + return func(c SpecContext) { body(c) }, true + } + + return nil, false + } + + body := arg.(func()) + return func(SpecContext) { body() }, false +} + +var byteType = reflect.TypeOf([]byte{}) + +func extractSynchronizedBeforeSuiteProc1Body(arg interface{}) (func(SpecContext) []byte, bool) { + t := reflect.TypeOf(arg) + v := reflect.ValueOf(arg) + + if t.NumOut() > 1 || t.NumIn() > 1 { + return nil, false + } else if t.NumOut() == 1 && t.Out(0) != byteType { + return nil, false + } else if t.NumIn() == 1 && !t.In(0).Implements(contextType) { + return nil, false + } + hasContext := t.NumIn() == 1 + + return func(c SpecContext) []byte { + var out []reflect.Value + if hasContext { + out = v.Call([]reflect.Value{reflect.ValueOf(c)}) + } else { + out = v.Call([]reflect.Value{}) + } + if len(out) == 1 { + return (out[0].Interface()).([]byte) + } else { + return []byte{} + } + }, hasContext +} + +func extractSynchronizedBeforeSuiteAllProcsBody(arg interface{}) (func(SpecContext, []byte), bool) { + t := reflect.TypeOf(arg) + v := reflect.ValueOf(arg) + hasContext, hasByte := false, false + + if t.NumOut() > 0 || t.NumIn() > 2 { + return nil, false + } else if t.NumIn() == 2 && t.In(0).Implements(contextType) && t.In(1) == byteType { + hasContext, hasByte = true, true + } else if t.NumIn() == 1 && t.In(0).Implements(contextType) { + hasContext = true + } else if t.NumIn() == 1 && t.In(0) == byteType { + hasByte = true + } else if t.NumIn() != 0 { + return nil, false + } + + return func(c SpecContext, b []byte) { + in := []reflect.Value{} + if hasContext { + in = append(in, reflect.ValueOf(c)) + } + if hasByte { + in = append(in, reflect.ValueOf(b)) + } + v.Call(in) + }, hasContext +} + +var errInterface = reflect.TypeOf((*error)(nil)).Elem() + +func NewCleanupNode(deprecationTracker *types.DeprecationTracker, fail func(string, types.CodeLocation), args ...interface{}) (Node, []error) { + decorations, remainingArgs := PartitionDecorations(args...) + baseOffset := 2 + cl := types.NewCodeLocation(baseOffset) + finalArgs := []interface{}{} + for _, arg := range decorations { + switch t := reflect.TypeOf(arg); { + case t == reflect.TypeOf(Offset(0)): + cl = types.NewCodeLocation(baseOffset + int(arg.(Offset))) + case t == reflect.TypeOf(types.CodeLocation{}): + cl = arg.(types.CodeLocation) + default: + finalArgs = append(finalArgs, arg) + } + } + finalArgs = append(finalArgs, cl) + + if len(remainingArgs) == 0 { + return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(cl)} + } + + callback := reflect.ValueOf(remainingArgs[0]) + if !(callback.Kind() == reflect.Func) { + return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(cl)} + } + + callArgs := []reflect.Value{} + for _, arg := range remainingArgs[1:] { + callArgs = append(callArgs, reflect.ValueOf(arg)) + } + + hasContext := false + t := callback.Type() + if t.NumIn() > 0 { + if t.In(0).Implements(specContextType) { + hasContext = true + } else if t.In(0).Implements(contextType) && (len(callArgs) == 0 || !callArgs[0].Type().Implements(contextType)) { + hasContext = true + } + } + + handleFailure := func(out []reflect.Value) { + if len(out) == 0 { + return + } + last := out[len(out)-1] + if last.Type().Implements(errInterface) && !last.IsNil() { + fail(fmt.Sprintf("DeferCleanup callback returned error: %v", last), cl) + } + } + + if hasContext { + finalArgs = append(finalArgs, func(c SpecContext) { + out := callback.Call(append([]reflect.Value{reflect.ValueOf(c)}, callArgs...)) + handleFailure(out) + }) + } else { + finalArgs = append(finalArgs, func() { + out := callback.Call(callArgs) + handleFailure(out) + }) + } + + return NewNode(deprecationTracker, types.NodeTypeCleanupInvalid, "", finalArgs...) +} + +func (n Node) IsZero() bool { + return n.ID == 0 +} + +/* Nodes */ +type Nodes []Node + +func (n Nodes) CopyAppend(nodes ...Node) Nodes { + numN := len(n) + out := make(Nodes, numN+len(nodes)) + for i, node := range n { + out[i] = node + } + for j, node := range nodes { + out[numN+j] = node + } + return out +} + +func (n Nodes) SplitAround(pivot Node) (Nodes, Nodes) { + pivotIdx := len(n) + for i := range n { + if n[i].ID == pivot.ID { + pivotIdx = i + break + } + } + left := n[:pivotIdx] + right := Nodes{} + if pivotIdx+1 < len(n) { + right = n[pivotIdx+1:] + } + + return left, right +} + +func (n Nodes) FirstNodeWithType(nodeTypes types.NodeType) Node { + for i := range n { + if n[i].NodeType.Is(nodeTypes) { + return n[i] + } + } + return Node{} +} + +func (n Nodes) WithType(nodeTypes types.NodeType) Nodes { + count := 0 + for i := range n { + if n[i].NodeType.Is(nodeTypes) { + count++ + } + } + + out, j := make(Nodes, count), 0 + for i := range n { + if n[i].NodeType.Is(nodeTypes) { + out[j] = n[i] + j++ + } + } + return out +} + +func (n Nodes) WithoutType(nodeTypes types.NodeType) Nodes { + count := 0 + for i := range n { + if !n[i].NodeType.Is(nodeTypes) { + count++ + } + } + + out, j := make(Nodes, count), 0 + for i := range n { + if !n[i].NodeType.Is(nodeTypes) { + out[j] = n[i] + j++ + } + } + return out +} + +func (n Nodes) WithoutNode(nodeToExclude Node) Nodes { + idxToExclude := len(n) + for i := range n { + if n[i].ID == nodeToExclude.ID { + idxToExclude = i + break + } + } + if idxToExclude == len(n) { + return n + } + out, j := make(Nodes, len(n)-1), 0 + for i := range n { + if i == idxToExclude { + continue + } + out[j] = n[i] + j++ + } + return out +} + +func (n Nodes) Filter(filter func(Node) bool) Nodes { + trufa, count := make([]bool, len(n)), 0 + for i := range n { + if filter(n[i]) { + trufa[i] = true + count += 1 + } + } + out, j := make(Nodes, count), 0 + for i := range n { + if trufa[i] { + out[j] = n[i] + j++ + } + } + return out +} + +func (n Nodes) FirstSatisfying(filter func(Node) bool) Node { + for i := range n { + if filter(n[i]) { + return n[i] + } + } + return Node{} +} + +func (n Nodes) WithinNestingLevel(deepestNestingLevel int) Nodes { + count := 0 + for i := range n { + if n[i].NestingLevel <= deepestNestingLevel { + count++ + } + } + out, j := make(Nodes, count), 0 + for i := range n { + if n[i].NestingLevel <= deepestNestingLevel { + out[j] = n[i] + j++ + } + } + return out +} + +func (n Nodes) SortedByDescendingNestingLevel() Nodes { + out := make(Nodes, len(n)) + copy(out, n) + sort.SliceStable(out, func(i int, j int) bool { + return out[i].NestingLevel > out[j].NestingLevel + }) + + return out +} + +func (n Nodes) SortedByAscendingNestingLevel() Nodes { + out := make(Nodes, len(n)) + copy(out, n) + sort.SliceStable(out, func(i int, j int) bool { + return out[i].NestingLevel < out[j].NestingLevel + }) + + return out +} + +func (n Nodes) FirstWithNestingLevel(level int) Node { + for i := range n { + if n[i].NestingLevel == level { + return n[i] + } + } + return Node{} +} + +func (n Nodes) Reverse() Nodes { + out := make(Nodes, len(n)) + for i := range n { + out[len(n)-1-i] = n[i] + } + return out +} + +func (n Nodes) Texts() []string { + out := make([]string, len(n)) + for i := range n { + out[i] = n[i].Text + } + return out +} + +func (n Nodes) Labels() [][]string { + out := make([][]string, len(n)) + for i := range n { + if n[i].Labels == nil { + out[i] = []string{} + } else { + out[i] = []string(n[i].Labels) + } + } + return out +} + +func (n Nodes) UnionOfLabels() []string { + out := []string{} + seen := map[string]bool{} + for i := range n { + for _, label := range n[i].Labels { + if !seen[label] { + seen[label] = true + out = append(out, label) + } + } + } + return out +} + +func (n Nodes) CodeLocations() []types.CodeLocation { + out := make([]types.CodeLocation, len(n)) + for i := range n { + out[i] = n[i].CodeLocation + } + return out +} + +func (n Nodes) BestTextFor(node Node) string { + if node.Text != "" { + return node.Text + } + parentNestingLevel := node.NestingLevel - 1 + for i := range n { + if n[i].Text != "" && n[i].NestingLevel == parentNestingLevel { + return n[i].Text + } + } + + return "" +} + +func (n Nodes) ContainsNodeID(id uint) bool { + for i := range n { + if n[i].ID == id { + return true + } + } + return false +} + +func (n Nodes) HasNodeMarkedPending() bool { + for i := range n { + if n[i].MarkedPending { + return true + } + } + return false +} + +func (n Nodes) HasNodeMarkedFocus() bool { + for i := range n { + if n[i].MarkedFocus { + return true + } + } + return false +} + +func (n Nodes) HasNodeMarkedSerial() bool { + for i := range n { + if n[i].MarkedSerial { + return true + } + } + return false +} + +func (n Nodes) FirstNodeMarkedOrdered() Node { + for i := range n { + if n[i].MarkedOrdered { + return n[i] + } + } + return Node{} +} + +func (n Nodes) GetMaxFlakeAttempts() int { + maxFlakeAttempts := 0 + for i := range n { + if n[i].FlakeAttempts > 0 { + maxFlakeAttempts = n[i].FlakeAttempts + } + } + return maxFlakeAttempts +} + +func (n Nodes) GetMaxMustPassRepeatedly() int { + maxMustPassRepeatedly := 0 + for i := range n { + if n[i].MustPassRepeatedly > 0 { + maxMustPassRepeatedly = n[i].MustPassRepeatedly + } + } + return maxMustPassRepeatedly +} + +func unrollInterfaceSlice(args interface{}) []interface{} { + v := reflect.ValueOf(args) + if v.Kind() != reflect.Slice { + return []interface{}{args} + } + out := []interface{}{} + for i := 0; i < v.Len(); i++ { + el := reflect.ValueOf(v.Index(i).Interface()) + if el.Kind() == reflect.Slice && el.Type() != reflect.TypeOf(Labels{}) { + out = append(out, unrollInterfaceSlice(el.Interface())...) + } else { + out = append(out, v.Index(i).Interface()) + } + } + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go b/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go new file mode 100644 index 0000000..161be82 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go @@ -0,0 +1,121 @@ +package internal + +import ( + "math/rand" + "sort" + + "github.com/onsi/ginkgo/v2/types" +) + +type GroupedSpecIndices []SpecIndices +type SpecIndices []int + +func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices, GroupedSpecIndices) { + /* + Ginkgo has sophisticated support for randomizing specs. Specs are guaranteed to have the same + order for a given seed across test runs. + + By default only top-level containers and specs are shuffled - this makes for a more intuitive debugging + experience - specs within a given container run in the order they appear in the file. + + Developers can set -randomizeAllSpecs to shuffle _all_ specs. + + In addition, spec containers can be marked as Ordered. Specs within an Ordered container are never shuffled. + + Finally, specs and spec containers can be marked as Serial. When running in parallel, serial specs run on Process #1 _after_ all other processes have finished. + */ + + // Seed a new random source based on thee configured random seed. + r := rand.New(rand.NewSource(suiteConfig.RandomSeed)) + + // first break things into execution groups + // a group represents a single unit of execution and is a collection of SpecIndices + // usually a group is just a single spec, however ordered containers must be preserved as a single group + executionGroupIDs := []uint{} + executionGroups := map[uint]SpecIndices{} + for idx, spec := range specs { + groupNode := spec.Nodes.FirstNodeMarkedOrdered() + if groupNode.IsZero() { + groupNode = spec.Nodes.FirstNodeWithType(types.NodeTypeIt) + } + executionGroups[groupNode.ID] = append(executionGroups[groupNode.ID], idx) + if len(executionGroups[groupNode.ID]) == 1 { + executionGroupIDs = append(executionGroupIDs, groupNode.ID) + } + } + + // now, we only shuffle all the execution groups if we're randomizing all specs, otherwise + // we shuffle outermost containers. so we need to form shufflable groupings of GroupIDs + shufflableGroupingIDs := []uint{} + shufflableGroupingIDToGroupIDs := map[uint][]uint{} + shufflableGroupingsIDToSortKeys := map[uint]string{} + + // for each execution group we're going to have to pick a node to represent how the + // execution group is grouped for shuffling: + nodeTypesToShuffle := types.NodeTypesForContainerAndIt + if suiteConfig.RandomizeAllSpecs { + nodeTypesToShuffle = types.NodeTypeIt + } + + //so, fo reach execution group: + for _, groupID := range executionGroupIDs { + // pick out a representative spec + representativeSpec := specs[executionGroups[groupID][0]] + + // and grab the node on the spec that will represent which shufflable group this execution group belongs tu + shufflableGroupingNode := representativeSpec.Nodes.FirstNodeWithType(nodeTypesToShuffle) + + //add the execution group to its shufflable group + shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID] = append(shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID], groupID) + + //and if it's the first one in + if len(shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID]) == 1 { + // record the shuffleable group ID + shufflableGroupingIDs = append(shufflableGroupingIDs, shufflableGroupingNode.ID) + // and record the sort key to use + shufflableGroupingsIDToSortKeys[shufflableGroupingNode.ID] = shufflableGroupingNode.CodeLocation.String() + } + } + + // now we sort the shufflable groups by the sort key. We use the shufflable group nodes code location and break ties using its node id + sort.SliceStable(shufflableGroupingIDs, func(i, j int) bool { + keyA := shufflableGroupingsIDToSortKeys[shufflableGroupingIDs[i]] + keyB := shufflableGroupingsIDToSortKeys[shufflableGroupingIDs[j]] + if keyA == keyB { + return shufflableGroupingIDs[i] < shufflableGroupingIDs[j] + } else { + return keyA < keyB + } + }) + + // now we permute the sorted shufflable grouping IDs and build the ordered Groups + orderedGroups := GroupedSpecIndices{} + permutation := r.Perm(len(shufflableGroupingIDs)) + for _, j := range permutation { + //let's get the execution group IDs for this shufflable group: + executionGroupIDsForJ := shufflableGroupingIDToGroupIDs[shufflableGroupingIDs[j]] + // and we'll add their associated specindices to the orderedGroups slice: + for _, executionGroupID := range executionGroupIDsForJ { + orderedGroups = append(orderedGroups, executionGroups[executionGroupID]) + } + } + + // If we're running in series, we're done. + if suiteConfig.ParallelTotal == 1 { + return orderedGroups, GroupedSpecIndices{} + } + + // We're running in parallel so we need to partition the ordered groups into a parallelizable set and a serialized set. + // The parallelizable groups will run across all Ginkgo processes... + // ...the serial groups will only run on Process #1 after all other processes have exited. + parallelizableGroups, serialGroups := GroupedSpecIndices{}, GroupedSpecIndices{} + for _, specIndices := range orderedGroups { + if specs[specIndices[0]].Nodes.HasNodeMarkedSerial() { + serialGroups = append(serialGroups, specIndices) + } else { + parallelizableGroups = append(parallelizableGroups, specIndices) + } + } + + return parallelizableGroups, serialGroups +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go new file mode 100644 index 0000000..4a1c094 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go @@ -0,0 +1,250 @@ +package internal + +import ( + "bytes" + "io" + "os" + "time" +) + +const BAILOUT_TIME = 1 * time.Second +const BAILOUT_MESSAGE = `Ginkgo detected an issue while intercepting output. + +When running in parallel, Ginkgo captures stdout and stderr output +and attaches it to the running spec. It looks like that process is getting +stuck for this suite. + +This usually happens if you, or a library you are using, spin up an external +process and set cmd.Stdout = os.Stdout and/or cmd.Stderr = os.Stderr. This +causes the external process to keep Ginkgo's output interceptor pipe open and +causes output interception to hang. + +Ginkgo has detected this and shortcircuited the capture process. The specs +will continue running after this message however output from the external +process that caused this issue will not be captured. + +You have several options to fix this. In preferred order they are: + +1. Pass GinkgoWriter instead of os.Stdout or os.Stderr to your process. +2. Ensure your process exits before the current spec completes. If your +process is long-lived and must cross spec boundaries, this option won't +work for you. +3. Pause Ginkgo's output interceptor before starting your process and then +resume it after. Use PauseOutputInterception() and ResumeOutputInterception() +to do this. +4. Set --output-interceptor-mode=none when running your Ginkgo suite. This will +turn off all output interception but allow specs to run in parallel without this +issue. You may miss important output if you do this including output from Go's +race detector. + +More details on issue #851 - https://github.com/onsi/ginkgo/issues/851 +` + +/* +The OutputInterceptor is used by to +intercept and capture all stdin and stderr output during a test run. +*/ +type OutputInterceptor interface { + StartInterceptingOutput() + StartInterceptingOutputAndForwardTo(io.Writer) + StopInterceptingAndReturnOutput() string + + PauseIntercepting() + ResumeIntercepting() + + Shutdown() +} + +type NoopOutputInterceptor struct{} + +func (interceptor NoopOutputInterceptor) StartInterceptingOutput() {} +func (interceptor NoopOutputInterceptor) StartInterceptingOutputAndForwardTo(io.Writer) {} +func (interceptor NoopOutputInterceptor) StopInterceptingAndReturnOutput() string { return "" } +func (interceptor NoopOutputInterceptor) PauseIntercepting() {} +func (interceptor NoopOutputInterceptor) ResumeIntercepting() {} +func (interceptor NoopOutputInterceptor) Shutdown() {} + +type pipePair struct { + reader *os.File + writer *os.File +} + +func startPipeFactory(pipeChannel chan pipePair, shutdown chan interface{}) { + for { + //make the next pipe... + pair := pipePair{} + pair.reader, pair.writer, _ = os.Pipe() + select { + //...and provide it to the next consumer (they are responsible for closing the files) + case pipeChannel <- pair: + continue + //...or close the files if we were told to shutdown + case <-shutdown: + pair.reader.Close() + pair.writer.Close() + return + } + } +} + +type interceptorImplementation interface { + CreateStdoutStderrClones() (*os.File, *os.File) + ConnectPipeToStdoutStderr(*os.File) + RestoreStdoutStderrFromClones(*os.File, *os.File) + ShutdownClones(*os.File, *os.File) +} + +type genericOutputInterceptor struct { + intercepting bool + + stdoutClone *os.File + stderrClone *os.File + pipe pipePair + + shutdown chan interface{} + emergencyBailout chan interface{} + pipeChannel chan pipePair + interceptedContent chan string + + forwardTo io.Writer + accumulatedOutput string + + implementation interceptorImplementation +} + +func (interceptor *genericOutputInterceptor) StartInterceptingOutput() { + interceptor.StartInterceptingOutputAndForwardTo(io.Discard) +} + +func (interceptor *genericOutputInterceptor) StartInterceptingOutputAndForwardTo(w io.Writer) { + if interceptor.intercepting { + return + } + interceptor.accumulatedOutput = "" + interceptor.forwardTo = w + interceptor.ResumeIntercepting() +} + +func (interceptor *genericOutputInterceptor) StopInterceptingAndReturnOutput() string { + if interceptor.intercepting { + interceptor.PauseIntercepting() + } + return interceptor.accumulatedOutput +} + +func (interceptor *genericOutputInterceptor) ResumeIntercepting() { + if interceptor.intercepting { + return + } + interceptor.intercepting = true + if interceptor.stdoutClone == nil { + interceptor.stdoutClone, interceptor.stderrClone = interceptor.implementation.CreateStdoutStderrClones() + interceptor.shutdown = make(chan interface{}) + go startPipeFactory(interceptor.pipeChannel, interceptor.shutdown) + } + + // Now we make a pipe, we'll use this to redirect the input to the 1 and 2 file descriptors (this is how everything else in the world is string to log to stdout and stderr) + // we get the pipe from our pipe factory. it runs in the background so we can request the next pipe while the spec being intercepted is running + interceptor.pipe = <-interceptor.pipeChannel + + interceptor.emergencyBailout = make(chan interface{}) + + //Spin up a goroutine to copy data from the pipe into a buffer, this is how we capture any output the user is emitting + go func() { + buffer := &bytes.Buffer{} + destination := io.MultiWriter(buffer, interceptor.forwardTo) + copyFinished := make(chan interface{}) + reader := interceptor.pipe.reader + go func() { + io.Copy(destination, reader) + reader.Close() // close the read end of the pipe so we don't leak a file descriptor + close(copyFinished) + }() + select { + case <-copyFinished: + interceptor.interceptedContent <- buffer.String() + case <-interceptor.emergencyBailout: + interceptor.interceptedContent <- "" + } + }() + + interceptor.implementation.ConnectPipeToStdoutStderr(interceptor.pipe.writer) +} + +func (interceptor *genericOutputInterceptor) PauseIntercepting() { + if !interceptor.intercepting { + return + } + // first we have to close the write end of the pipe. To do this we have to close all file descriptors pointing + // to the write end. So that would be the pipewriter itself, and FD #1 and FD #2 if we've Dup2'd them + interceptor.pipe.writer.Close() // the pipewriter itself + + // we also need to stop intercepting. we do that by reconnecting the stdout and stderr file descriptions back to their respective #1 and #2 file descriptors; + // this also closes #1 and #2 before it points that their original stdout and stderr file descriptions + interceptor.implementation.RestoreStdoutStderrFromClones(interceptor.stdoutClone, interceptor.stderrClone) + + var content string + select { + case content = <-interceptor.interceptedContent: + case <-time.After(BAILOUT_TIME): + /* + By closing all the pipe writer's file descriptors associated with the pipe writer's file description the io.Copy reading from the reader + should eventually receive an EOF and exit. + + **However**, if the user has spun up an external process and passed in os.Stdout/os.Stderr to cmd.Stdout/cmd.Stderr then the external process + will have a file descriptor pointing to the pipe writer's file description and it will not close until the external process exits. + + That would leave us hanging here waiting for the io.Copy to close forever. Instead we invoke this emergency escape valve. This returns whatever + content we've got but leaves the io.Copy running. This ensures the external process can continue writing without hanging at the cost of leaking a goroutine + and file descriptor (those these will be cleaned up when the process exits). + + We tack on a message to notify the user that they've hit this edgecase and encourage them to address it. + */ + close(interceptor.emergencyBailout) + content = <-interceptor.interceptedContent + BAILOUT_MESSAGE + } + + interceptor.accumulatedOutput += content + interceptor.intercepting = false +} + +func (interceptor *genericOutputInterceptor) Shutdown() { + interceptor.PauseIntercepting() + + if interceptor.stdoutClone != nil { + close(interceptor.shutdown) + interceptor.implementation.ShutdownClones(interceptor.stdoutClone, interceptor.stderrClone) + interceptor.stdoutClone = nil + interceptor.stderrClone = nil + } +} + +/* This is used on windows builds but included here so it can be explicitly tested on unix systems too */ +func NewOSGlobalReassigningOutputInterceptor() OutputInterceptor { + return &genericOutputInterceptor{ + interceptedContent: make(chan string), + pipeChannel: make(chan pipePair), + shutdown: make(chan interface{}), + implementation: &osGlobalReassigningOutputInterceptorImpl{}, + } +} + +type osGlobalReassigningOutputInterceptorImpl struct{} + +func (impl *osGlobalReassigningOutputInterceptorImpl) CreateStdoutStderrClones() (*os.File, *os.File) { + return os.Stdout, os.Stderr +} + +func (impl *osGlobalReassigningOutputInterceptorImpl) ConnectPipeToStdoutStderr(pipeWriter *os.File) { + os.Stdout = pipeWriter + os.Stderr = pipeWriter +} + +func (impl *osGlobalReassigningOutputInterceptorImpl) RestoreStdoutStderrFromClones(stdoutClone *os.File, stderrClone *os.File) { + os.Stdout = stdoutClone + os.Stderr = stderrClone +} + +func (impl *osGlobalReassigningOutputInterceptorImpl) ShutdownClones(_ *os.File, _ *os.File) { + //noop +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go new file mode 100644 index 0000000..f5ae15b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go @@ -0,0 +1,62 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin || linux || solaris +// +build freebsd openbsd netbsd dragonfly darwin linux solaris + +package internal + +import ( + "os" + + "golang.org/x/sys/unix" +) + +func NewOutputInterceptor() OutputInterceptor { + return &genericOutputInterceptor{ + interceptedContent: make(chan string), + pipeChannel: make(chan pipePair), + shutdown: make(chan interface{}), + implementation: &dupSyscallOutputInterceptorImpl{}, + } +} + +type dupSyscallOutputInterceptorImpl struct{} + +func (impl *dupSyscallOutputInterceptorImpl) CreateStdoutStderrClones() (*os.File, *os.File) { + // To clone stdout and stderr we: + // First, create two clone file descriptors that point to the stdout and stderr file descriptions + stdoutCloneFD, _ := unix.Dup(1) + stderrCloneFD, _ := unix.Dup(2) + + // And then wrap the clone file descriptors in files. + // One benefit of this (that we don't use yet) is that we can actually write + // to these files to emit output to the console even though we're intercepting output + stdoutClone := os.NewFile(uintptr(stdoutCloneFD), "stdout-clone") + stderrClone := os.NewFile(uintptr(stderrCloneFD), "stderr-clone") + + //these clones remain alive throughout the lifecycle of the suite and don't need to be recreated + //this speeds things up a bit, actually. + return stdoutClone, stderrClone +} + +func (impl *dupSyscallOutputInterceptorImpl) ConnectPipeToStdoutStderr(pipeWriter *os.File) { + // To redirect output to our pipe we need to point the 1 and 2 file descriptors (which is how the world tries to log things) + // to the write end of the pipe. + // We do this with Dup2 (possibly Dup3 on some architectures) to have file descriptors 1 and 2 point to the same file description as the pipeWriter + // This effectively shunts data written to stdout and stderr to the write end of our pipe + unix.Dup2(int(pipeWriter.Fd()), 1) + unix.Dup2(int(pipeWriter.Fd()), 2) +} + +func (impl *dupSyscallOutputInterceptorImpl) RestoreStdoutStderrFromClones(stdoutClone *os.File, stderrClone *os.File) { + // To restore stdour/stderr from the clones we have the 1 and 2 file descriptors + // point to the original file descriptions that we saved off in the clones. + // This has the added benefit of closing the connection between these descriptors and the write end of the pipe + // which is important to cause the io.Copy on the pipe.Reader to end. + unix.Dup2(int(stdoutClone.Fd()), 1) + unix.Dup2(int(stderrClone.Fd()), 2) +} + +func (impl *dupSyscallOutputInterceptorImpl) ShutdownClones(stdoutClone *os.File, stderrClone *os.File) { + // We're done with the clones so we can close them to clean up after ourselves + stdoutClone.Close() + stderrClone.Close() +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_win.go b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_win.go new file mode 100644 index 0000000..30c2851 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_win.go @@ -0,0 +1,7 @@ +// +build windows + +package internal + +func NewOutputInterceptor() OutputInterceptor { + return NewOSGlobalReassigningOutputInterceptor() +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go new file mode 100644 index 0000000..b3cd642 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go @@ -0,0 +1,72 @@ +package parallel_support + +import ( + "fmt" + "io" + "os" + "time" + + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +type BeforeSuiteState struct { + Data []byte + State types.SpecState +} + +type ParallelIndexCounter struct { + Index int +} + +var ErrorGone = fmt.Errorf("gone") +var ErrorFailed = fmt.Errorf("failed") +var ErrorEarly = fmt.Errorf("early") + +var POLLING_INTERVAL = 50 * time.Millisecond + +type Server interface { + Start() + Close() + Address() string + RegisterAlive(node int, alive func() bool) + GetSuiteDone() chan interface{} + GetOutputDestination() io.Writer + SetOutputDestination(io.Writer) +} + +type Client interface { + Connect() bool + Close() error + + PostSuiteWillBegin(report types.Report) error + PostDidRun(report types.SpecReport) error + PostSuiteDidEnd(report types.Report) error + PostReportBeforeSuiteCompleted(state types.SpecState) error + BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) + PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error + BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) + BlockUntilNonprimaryProcsHaveFinished() error + BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) + FetchNextCounter() (int, error) + PostAbort() error + ShouldAbort() bool + PostEmitProgressReport(report types.ProgressReport) error + Write(p []byte) (int, error) +} + +func NewServer(parallelTotal int, reporter reporters.Reporter) (Server, error) { + if os.Getenv("GINKGO_PARALLEL_PROTOCOL") == "HTTP" { + return newHttpServer(parallelTotal, reporter) + } else { + return newRPCServer(parallelTotal, reporter) + } +} + +func NewClient(serverHost string) Client { + if os.Getenv("GINKGO_PARALLEL_PROTOCOL") == "HTTP" { + return newHttpClient(serverHost) + } else { + return newRPCClient(serverHost) + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go new file mode 100644 index 0000000..6547c7a --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go @@ -0,0 +1,169 @@ +package parallel_support + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "time" + + "github.com/onsi/ginkgo/v2/types" +) + +type httpClient struct { + serverHost string +} + +func newHttpClient(serverHost string) *httpClient { + return &httpClient{ + serverHost: serverHost, + } +} + +func (client *httpClient) Connect() bool { + resp, err := http.Get(client.serverHost + "/up") + if err != nil { + return false + } + resp.Body.Close() + return resp.StatusCode == http.StatusOK +} + +func (client *httpClient) Close() error { + return nil +} + +func (client *httpClient) post(path string, data interface{}) error { + var body io.Reader + if data != nil { + encoded, err := json.Marshal(data) + if err != nil { + return err + } + body = bytes.NewBuffer(encoded) + } + resp, err := http.Post(client.serverHost+path, "application/json", body) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("received unexpected status code %d", resp.StatusCode) + } + return nil +} + +func (client *httpClient) poll(path string, data interface{}) error { + for { + resp, err := http.Get(client.serverHost + path) + if err != nil { + return err + } + if resp.StatusCode == http.StatusTooEarly { + resp.Body.Close() + time.Sleep(POLLING_INTERVAL) + continue + } + defer resp.Body.Close() + if resp.StatusCode == http.StatusGone { + return ErrorGone + } + if resp.StatusCode == http.StatusFailedDependency { + return ErrorFailed + } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("received unexpected status code %d", resp.StatusCode) + } + if data != nil { + return json.NewDecoder(resp.Body).Decode(data) + } + return nil + } +} + +func (client *httpClient) PostSuiteWillBegin(report types.Report) error { + return client.post("/suite-will-begin", report) +} + +func (client *httpClient) PostDidRun(report types.SpecReport) error { + return client.post("/did-run", report) +} + +func (client *httpClient) PostSuiteDidEnd(report types.Report) error { + return client.post("/suite-did-end", report) +} + +func (client *httpClient) PostEmitProgressReport(report types.ProgressReport) error { + return client.post("/progress-report", report) +} + +func (client *httpClient) PostReportBeforeSuiteCompleted(state types.SpecState) error { + return client.post("/report-before-suite-completed", state) +} + +func (client *httpClient) BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) { + var state types.SpecState + err := client.poll("/report-before-suite-state", &state) + if err == ErrorGone { + return types.SpecStateFailed, nil + } + return state, err +} + +func (client *httpClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error { + beforeSuiteState := BeforeSuiteState{ + State: state, + Data: data, + } + return client.post("/before-suite-completed", beforeSuiteState) +} + +func (client *httpClient) BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) { + var beforeSuiteState BeforeSuiteState + err := client.poll("/before-suite-state", &beforeSuiteState) + if err == ErrorGone { + return types.SpecStateInvalid, nil, types.GinkgoErrors.SynchronizedBeforeSuiteDisappearedOnProc1() + } + return beforeSuiteState.State, beforeSuiteState.Data, err +} + +func (client *httpClient) BlockUntilNonprimaryProcsHaveFinished() error { + return client.poll("/have-nonprimary-procs-finished", nil) +} + +func (client *httpClient) BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) { + var report types.Report + err := client.poll("/aggregated-nonprimary-procs-report", &report) + if err == ErrorGone { + return types.Report{}, types.GinkgoErrors.AggregatedReportUnavailableDueToNodeDisappearing() + } + return report, err +} + +func (client *httpClient) FetchNextCounter() (int, error) { + var counter ParallelIndexCounter + err := client.poll("/counter", &counter) + return counter.Index, err +} + +func (client *httpClient) PostAbort() error { + return client.post("/abort", nil) +} + +func (client *httpClient) ShouldAbort() bool { + err := client.poll("/abort", nil) + if err == ErrorGone { + return true + } + return false +} + +func (client *httpClient) Write(p []byte) (int, error) { + resp, err := http.Post(client.serverHost+"/emit-output", "text/plain;charset=UTF-8 ", bytes.NewReader(p)) + resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return 0, fmt.Errorf("failed to emit output") + } + return len(p), err +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go new file mode 100644 index 0000000..d2c71ab --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go @@ -0,0 +1,242 @@ +/* + +The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners. +This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser). + +*/ + +package parallel_support + +import ( + "encoding/json" + "io" + "net" + "net/http" + + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +/* +httpServer spins up on an automatically selected port and listens for communication from the forwarding reporter. +It then forwards that communication to attached reporters. +*/ +type httpServer struct { + listener net.Listener + handler *ServerHandler +} + +// Create a new server, automatically selecting a port +func newHttpServer(parallelTotal int, reporter reporters.Reporter) (*httpServer, error) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, err + } + return &httpServer{ + listener: listener, + handler: newServerHandler(parallelTotal, reporter), + }, nil +} + +// Start the server. You don't need to `go s.Start()`, just `s.Start()` +func (server *httpServer) Start() { + httpServer := &http.Server{} + mux := http.NewServeMux() + httpServer.Handler = mux + + //streaming endpoints + mux.HandleFunc("/suite-will-begin", server.specSuiteWillBegin) + mux.HandleFunc("/did-run", server.didRun) + mux.HandleFunc("/suite-did-end", server.specSuiteDidEnd) + mux.HandleFunc("/emit-output", server.emitOutput) + mux.HandleFunc("/progress-report", server.emitProgressReport) + + //synchronization endpoints + mux.HandleFunc("/report-before-suite-completed", server.handleReportBeforeSuiteCompleted) + mux.HandleFunc("/report-before-suite-state", server.handleReportBeforeSuiteState) + mux.HandleFunc("/before-suite-completed", server.handleBeforeSuiteCompleted) + mux.HandleFunc("/before-suite-state", server.handleBeforeSuiteState) + mux.HandleFunc("/have-nonprimary-procs-finished", server.handleHaveNonprimaryProcsFinished) + mux.HandleFunc("/aggregated-nonprimary-procs-report", server.handleAggregatedNonprimaryProcsReport) + mux.HandleFunc("/counter", server.handleCounter) + mux.HandleFunc("/up", server.handleUp) + mux.HandleFunc("/abort", server.handleAbort) + + go httpServer.Serve(server.listener) +} + +// Stop the server +func (server *httpServer) Close() { + server.listener.Close() +} + +// The address the server can be reached it. Pass this into the `ForwardingReporter`. +func (server *httpServer) Address() string { + return "http://" + server.listener.Addr().String() +} + +func (server *httpServer) GetSuiteDone() chan interface{} { + return server.handler.done +} + +func (server *httpServer) GetOutputDestination() io.Writer { + return server.handler.outputDestination +} + +func (server *httpServer) SetOutputDestination(w io.Writer) { + server.handler.outputDestination = w +} + +func (server *httpServer) RegisterAlive(node int, alive func() bool) { + server.handler.registerAlive(node, alive) +} + +// +// Streaming Endpoints +// + +// The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters` +func (server *httpServer) decode(writer http.ResponseWriter, request *http.Request, object interface{}) bool { + defer request.Body.Close() + if json.NewDecoder(request.Body).Decode(object) != nil { + writer.WriteHeader(http.StatusBadRequest) + return false + } + return true +} + +func (server *httpServer) handleError(err error, writer http.ResponseWriter) bool { + if err == nil { + return false + } + switch err { + case ErrorEarly: + writer.WriteHeader(http.StatusTooEarly) + case ErrorGone: + writer.WriteHeader(http.StatusGone) + case ErrorFailed: + writer.WriteHeader(http.StatusFailedDependency) + default: + writer.WriteHeader(http.StatusInternalServerError) + } + return true +} + +func (server *httpServer) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) { + var report types.Report + if !server.decode(writer, request, &report) { + return + } + + server.handleError(server.handler.SpecSuiteWillBegin(report, voidReceiver), writer) +} + +func (server *httpServer) didRun(writer http.ResponseWriter, request *http.Request) { + var report types.SpecReport + if !server.decode(writer, request, &report) { + return + } + + server.handleError(server.handler.DidRun(report, voidReceiver), writer) +} + +func (server *httpServer) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) { + var report types.Report + if !server.decode(writer, request, &report) { + return + } + server.handleError(server.handler.SpecSuiteDidEnd(report, voidReceiver), writer) +} + +func (server *httpServer) emitOutput(writer http.ResponseWriter, request *http.Request) { + output, err := io.ReadAll(request.Body) + if err != nil { + writer.WriteHeader(http.StatusInternalServerError) + return + } + var n int + server.handleError(server.handler.EmitOutput(output, &n), writer) +} + +func (server *httpServer) emitProgressReport(writer http.ResponseWriter, request *http.Request) { + var report types.ProgressReport + if !server.decode(writer, request, &report) { + return + } + server.handleError(server.handler.EmitProgressReport(report, voidReceiver), writer) +} + +func (server *httpServer) handleReportBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) { + var state types.SpecState + if !server.decode(writer, request, &state) { + return + } + + server.handleError(server.handler.ReportBeforeSuiteCompleted(state, voidReceiver), writer) +} + +func (server *httpServer) handleReportBeforeSuiteState(writer http.ResponseWriter, request *http.Request) { + var state types.SpecState + if server.handleError(server.handler.ReportBeforeSuiteState(voidSender, &state), writer) { + return + } + json.NewEncoder(writer).Encode(state) +} + +func (server *httpServer) handleBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) { + var beforeSuiteState BeforeSuiteState + if !server.decode(writer, request, &beforeSuiteState) { + return + } + + server.handleError(server.handler.BeforeSuiteCompleted(beforeSuiteState, voidReceiver), writer) +} + +func (server *httpServer) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) { + var beforeSuiteState BeforeSuiteState + if server.handleError(server.handler.BeforeSuiteState(voidSender, &beforeSuiteState), writer) { + return + } + json.NewEncoder(writer).Encode(beforeSuiteState) +} + +func (server *httpServer) handleHaveNonprimaryProcsFinished(writer http.ResponseWriter, request *http.Request) { + if server.handleError(server.handler.HaveNonprimaryProcsFinished(voidSender, voidReceiver), writer) { + return + } + writer.WriteHeader(http.StatusOK) +} + +func (server *httpServer) handleAggregatedNonprimaryProcsReport(writer http.ResponseWriter, request *http.Request) { + var aggregatedReport types.Report + if server.handleError(server.handler.AggregatedNonprimaryProcsReport(voidSender, &aggregatedReport), writer) { + return + } + json.NewEncoder(writer).Encode(aggregatedReport) +} + +func (server *httpServer) handleCounter(writer http.ResponseWriter, request *http.Request) { + var n int + if server.handleError(server.handler.Counter(voidSender, &n), writer) { + return + } + json.NewEncoder(writer).Encode(ParallelIndexCounter{Index: n}) +} + +func (server *httpServer) handleUp(writer http.ResponseWriter, request *http.Request) { + writer.WriteHeader(http.StatusOK) +} + +func (server *httpServer) handleAbort(writer http.ResponseWriter, request *http.Request) { + if request.Method == "GET" { + var shouldAbort bool + server.handler.ShouldAbort(voidSender, &shouldAbort) + if shouldAbort { + writer.WriteHeader(http.StatusGone) + } else { + writer.WriteHeader(http.StatusOK) + } + } else { + server.handler.Abort(voidSender, voidReceiver) + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go new file mode 100644 index 0000000..59e8e6f --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go @@ -0,0 +1,136 @@ +package parallel_support + +import ( + "net/rpc" + "time" + + "github.com/onsi/ginkgo/v2/types" +) + +type rpcClient struct { + serverHost string + client *rpc.Client +} + +func newRPCClient(serverHost string) *rpcClient { + return &rpcClient{ + serverHost: serverHost, + } +} + +func (client *rpcClient) Connect() bool { + var err error + if client.client != nil { + return true + } + client.client, err = rpc.DialHTTPPath("tcp", client.serverHost, "/") + if err != nil { + client.client = nil + return false + } + return true +} + +func (client *rpcClient) Close() error { + return client.client.Close() +} + +func (client *rpcClient) poll(method string, data interface{}) error { + for { + err := client.client.Call(method, voidSender, data) + if err == nil { + return nil + } + switch err.Error() { + case ErrorEarly.Error(): + time.Sleep(POLLING_INTERVAL) + case ErrorGone.Error(): + return ErrorGone + case ErrorFailed.Error(): + return ErrorFailed + default: + return err + } + } +} + +func (client *rpcClient) PostSuiteWillBegin(report types.Report) error { + return client.client.Call("Server.SpecSuiteWillBegin", report, voidReceiver) +} + +func (client *rpcClient) PostDidRun(report types.SpecReport) error { + return client.client.Call("Server.DidRun", report, voidReceiver) +} + +func (client *rpcClient) PostSuiteDidEnd(report types.Report) error { + return client.client.Call("Server.SpecSuiteDidEnd", report, voidReceiver) +} + +func (client *rpcClient) Write(p []byte) (int, error) { + var n int + err := client.client.Call("Server.EmitOutput", p, &n) + return n, err +} + +func (client *rpcClient) PostEmitProgressReport(report types.ProgressReport) error { + return client.client.Call("Server.EmitProgressReport", report, voidReceiver) +} + +func (client *rpcClient) PostReportBeforeSuiteCompleted(state types.SpecState) error { + return client.client.Call("Server.ReportBeforeSuiteCompleted", state, voidReceiver) +} + +func (client *rpcClient) BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) { + var state types.SpecState + err := client.poll("Server.ReportBeforeSuiteState", &state) + if err == ErrorGone { + return types.SpecStateFailed, nil + } + return state, err +} + +func (client *rpcClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error { + beforeSuiteState := BeforeSuiteState{ + State: state, + Data: data, + } + return client.client.Call("Server.BeforeSuiteCompleted", beforeSuiteState, voidReceiver) +} + +func (client *rpcClient) BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) { + var beforeSuiteState BeforeSuiteState + err := client.poll("Server.BeforeSuiteState", &beforeSuiteState) + if err == ErrorGone { + return types.SpecStateInvalid, nil, types.GinkgoErrors.SynchronizedBeforeSuiteDisappearedOnProc1() + } + return beforeSuiteState.State, beforeSuiteState.Data, err +} + +func (client *rpcClient) BlockUntilNonprimaryProcsHaveFinished() error { + return client.poll("Server.HaveNonprimaryProcsFinished", voidReceiver) +} + +func (client *rpcClient) BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) { + var report types.Report + err := client.poll("Server.AggregatedNonprimaryProcsReport", &report) + if err == ErrorGone { + return types.Report{}, types.GinkgoErrors.AggregatedReportUnavailableDueToNodeDisappearing() + } + return report, err +} + +func (client *rpcClient) FetchNextCounter() (int, error) { + var counter int + err := client.client.Call("Server.Counter", voidSender, &counter) + return counter, err +} + +func (client *rpcClient) PostAbort() error { + return client.client.Call("Server.Abort", voidSender, voidReceiver) +} + +func (client *rpcClient) ShouldAbort() bool { + var shouldAbort bool + client.client.Call("Server.ShouldAbort", voidSender, &shouldAbort) + return shouldAbort +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go new file mode 100644 index 0000000..2620fd5 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go @@ -0,0 +1,75 @@ +/* + +The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners. +This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser). + +*/ + +package parallel_support + +import ( + "io" + "net" + "net/http" + "net/rpc" + + "github.com/onsi/ginkgo/v2/reporters" +) + +/* +RPCServer spins up on an automatically selected port and listens for communication from the forwarding reporter. +It then forwards that communication to attached reporters. +*/ +type RPCServer struct { + listener net.Listener + handler *ServerHandler +} + +//Create a new server, automatically selecting a port +func newRPCServer(parallelTotal int, reporter reporters.Reporter) (*RPCServer, error) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, err + } + return &RPCServer{ + listener: listener, + handler: newServerHandler(parallelTotal, reporter), + }, nil +} + +//Start the server. You don't need to `go s.Start()`, just `s.Start()` +func (server *RPCServer) Start() { + rpcServer := rpc.NewServer() + rpcServer.RegisterName("Server", server.handler) //register the handler's methods as the server + + httpServer := &http.Server{} + httpServer.Handler = rpcServer + + go httpServer.Serve(server.listener) +} + +//Stop the server +func (server *RPCServer) Close() { + server.listener.Close() +} + +//The address the server can be reached it. Pass this into the `ForwardingReporter`. +func (server *RPCServer) Address() string { + return server.listener.Addr().String() +} + +func (server *RPCServer) GetSuiteDone() chan interface{} { + return server.handler.done +} + +func (server *RPCServer) GetOutputDestination() io.Writer { + return server.handler.outputDestination +} + +func (server *RPCServer) SetOutputDestination(w io.Writer) { + server.handler.outputDestination = w +} + +func (server *RPCServer) RegisterAlive(node int, alive func() bool) { + server.handler.registerAlive(node, alive) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go new file mode 100644 index 0000000..a6d9879 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go @@ -0,0 +1,234 @@ +package parallel_support + +import ( + "io" + "os" + "sync" + + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +type Void struct{} + +var voidReceiver *Void = &Void{} +var voidSender Void + +// ServerHandler is an RPC-compatible handler that is shared between the http server and the rpc server. +// It handles all the business logic to avoid duplication between the two servers + +type ServerHandler struct { + done chan interface{} + outputDestination io.Writer + reporter reporters.Reporter + alives []func() bool + lock *sync.Mutex + beforeSuiteState BeforeSuiteState + reportBeforeSuiteState types.SpecState + parallelTotal int + counter int + counterLock *sync.Mutex + shouldAbort bool + + numSuiteDidBegins int + numSuiteDidEnds int + aggregatedReport types.Report + reportHoldingArea []types.SpecReport +} + +func newServerHandler(parallelTotal int, reporter reporters.Reporter) *ServerHandler { + return &ServerHandler{ + reporter: reporter, + lock: &sync.Mutex{}, + counterLock: &sync.Mutex{}, + alives: make([]func() bool, parallelTotal), + beforeSuiteState: BeforeSuiteState{Data: nil, State: types.SpecStateInvalid}, + + parallelTotal: parallelTotal, + outputDestination: os.Stdout, + done: make(chan interface{}), + } +} + +func (handler *ServerHandler) SpecSuiteWillBegin(report types.Report, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + + handler.numSuiteDidBegins += 1 + + // all summaries are identical, so it's fine to simply emit the last one of these + if handler.numSuiteDidBegins == handler.parallelTotal { + handler.reporter.SuiteWillBegin(report) + + for _, summary := range handler.reportHoldingArea { + handler.reporter.WillRun(summary) + handler.reporter.DidRun(summary) + } + + handler.reportHoldingArea = nil + } + + return nil +} + +func (handler *ServerHandler) DidRun(report types.SpecReport, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + + if handler.numSuiteDidBegins == handler.parallelTotal { + handler.reporter.WillRun(report) + handler.reporter.DidRun(report) + } else { + handler.reportHoldingArea = append(handler.reportHoldingArea, report) + } + + return nil +} + +func (handler *ServerHandler) SpecSuiteDidEnd(report types.Report, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + + handler.numSuiteDidEnds += 1 + if handler.numSuiteDidEnds == 1 { + handler.aggregatedReport = report + } else { + handler.aggregatedReport = handler.aggregatedReport.Add(report) + } + + if handler.numSuiteDidEnds == handler.parallelTotal { + handler.reporter.SuiteDidEnd(handler.aggregatedReport) + close(handler.done) + } + + return nil +} + +func (handler *ServerHandler) EmitOutput(output []byte, n *int) error { + var err error + *n, err = handler.outputDestination.Write(output) + return err +} + +func (handler *ServerHandler) EmitProgressReport(report types.ProgressReport, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + handler.reporter.EmitProgressReport(report) + return nil +} + +func (handler *ServerHandler) registerAlive(proc int, alive func() bool) { + handler.lock.Lock() + defer handler.lock.Unlock() + handler.alives[proc-1] = alive +} + +func (handler *ServerHandler) procIsAlive(proc int) bool { + handler.lock.Lock() + defer handler.lock.Unlock() + alive := handler.alives[proc-1] + if alive == nil { + return true + } + return alive() +} + +func (handler *ServerHandler) haveNonprimaryProcsFinished() bool { + for i := 2; i <= handler.parallelTotal; i++ { + if handler.procIsAlive(i) { + return false + } + } + return true +} + +func (handler *ServerHandler) ReportBeforeSuiteCompleted(reportBeforeSuiteState types.SpecState, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + handler.reportBeforeSuiteState = reportBeforeSuiteState + + return nil +} + +func (handler *ServerHandler) ReportBeforeSuiteState(_ Void, reportBeforeSuiteState *types.SpecState) error { + proc1IsAlive := handler.procIsAlive(1) + handler.lock.Lock() + defer handler.lock.Unlock() + if handler.reportBeforeSuiteState == types.SpecStateInvalid { + if proc1IsAlive { + return ErrorEarly + } else { + return ErrorGone + } + } + *reportBeforeSuiteState = handler.reportBeforeSuiteState + return nil +} + +func (handler *ServerHandler) BeforeSuiteCompleted(beforeSuiteState BeforeSuiteState, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + handler.beforeSuiteState = beforeSuiteState + + return nil +} + +func (handler *ServerHandler) BeforeSuiteState(_ Void, beforeSuiteState *BeforeSuiteState) error { + proc1IsAlive := handler.procIsAlive(1) + handler.lock.Lock() + defer handler.lock.Unlock() + if handler.beforeSuiteState.State == types.SpecStateInvalid { + if proc1IsAlive { + return ErrorEarly + } else { + return ErrorGone + } + } + *beforeSuiteState = handler.beforeSuiteState + return nil +} + +func (handler *ServerHandler) HaveNonprimaryProcsFinished(_ Void, _ *Void) error { + if handler.haveNonprimaryProcsFinished() { + return nil + } else { + return ErrorEarly + } +} + +func (handler *ServerHandler) AggregatedNonprimaryProcsReport(_ Void, report *types.Report) error { + if handler.haveNonprimaryProcsFinished() { + handler.lock.Lock() + defer handler.lock.Unlock() + if handler.numSuiteDidEnds == handler.parallelTotal-1 { + *report = handler.aggregatedReport + return nil + } else { + return ErrorGone + } + } else { + return ErrorEarly + } +} + +func (handler *ServerHandler) Counter(_ Void, counter *int) error { + handler.counterLock.Lock() + defer handler.counterLock.Unlock() + *counter = handler.counter + handler.counter++ + return nil +} + +func (handler *ServerHandler) Abort(_ Void, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + handler.shouldAbort = true + return nil +} + +func (handler *ServerHandler) ShouldAbort(_ Void, shouldAbort *bool) error { + handler.lock.Lock() + defer handler.lock.Unlock() + *shouldAbort = handler.shouldAbort + return nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go new file mode 100644 index 0000000..11269cf --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go @@ -0,0 +1,287 @@ +package internal + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io" + "os" + "os/signal" + "path/filepath" + "runtime" + "strconv" + "strings" + "time" + + "github.com/onsi/ginkgo/v2/types" +) + +var _SOURCE_CACHE = map[string][]string{} + +type ProgressSignalRegistrar func(func()) context.CancelFunc + +func RegisterForProgressSignal(handler func()) context.CancelFunc { + signalChannel := make(chan os.Signal, 1) + if len(PROGRESS_SIGNALS) > 0 { + signal.Notify(signalChannel, PROGRESS_SIGNALS...) + } + ctx, cancel := context.WithCancel(context.Background()) + go func() { + for { + select { + case <-signalChannel: + handler() + case <-ctx.Done(): + signal.Stop(signalChannel) + return + } + } + }() + + return cancel +} + +type ProgressStepCursor struct { + Text string + CodeLocation types.CodeLocation + StartTime time.Time +} + +func NewProgressReport(isRunningInParallel bool, report types.SpecReport, currentNode Node, currentNodeStartTime time.Time, currentStep types.SpecEvent, gwOutput string, timelineLocation types.TimelineLocation, additionalReports []string, sourceRoots []string, includeAll bool) (types.ProgressReport, error) { + pr := types.ProgressReport{ + ParallelProcess: report.ParallelProcess, + RunningInParallel: isRunningInParallel, + ContainerHierarchyTexts: report.ContainerHierarchyTexts, + LeafNodeText: report.LeafNodeText, + LeafNodeLocation: report.LeafNodeLocation, + SpecStartTime: report.StartTime, + + CurrentNodeType: currentNode.NodeType, + CurrentNodeText: currentNode.Text, + CurrentNodeLocation: currentNode.CodeLocation, + CurrentNodeStartTime: currentNodeStartTime, + + CurrentStepText: currentStep.Message, + CurrentStepLocation: currentStep.CodeLocation, + CurrentStepStartTime: currentStep.TimelineLocation.Time, + + AdditionalReports: additionalReports, + + CapturedGinkgoWriterOutput: gwOutput, + TimelineLocation: timelineLocation, + } + + goroutines, err := extractRunningGoroutines() + if err != nil { + return pr, err + } + pr.Goroutines = goroutines + + // now we want to try to find goroutines of interest. these will be goroutines that have any function calls with code in packagesOfInterest: + packagesOfInterest := map[string]bool{} + packageFromFilename := func(filename string) string { + return filepath.Dir(filename) + } + addPackageFor := func(filename string) { + if filename != "" { + packagesOfInterest[packageFromFilename(filename)] = true + } + } + isPackageOfInterest := func(filename string) bool { + stackPackage := packageFromFilename(filename) + for packageOfInterest := range packagesOfInterest { + if strings.HasPrefix(stackPackage, packageOfInterest) { + return true + } + } + return false + } + for _, location := range report.ContainerHierarchyLocations { + addPackageFor(location.FileName) + } + addPackageFor(report.LeafNodeLocation.FileName) + addPackageFor(currentNode.CodeLocation.FileName) + addPackageFor(currentStep.CodeLocation.FileName) + + //First, we find the SpecGoroutine - this will be the goroutine that includes `runNode` + specGoRoutineIdx := -1 + runNodeFunctionCallIdx := -1 +OUTER: + for goroutineIdx, goroutine := range pr.Goroutines { + for functionCallIdx, functionCall := range goroutine.Stack { + if strings.Contains(functionCall.Function, "ginkgo/v2/internal.(*Suite).runNode.func") { + specGoRoutineIdx = goroutineIdx + runNodeFunctionCallIdx = functionCallIdx + break OUTER + } + } + } + + //Now, we find the first non-Ginkgo function call + if specGoRoutineIdx > -1 { + for runNodeFunctionCallIdx >= 0 { + fn := goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Function + file := goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Filename + // these are all things that could potentially happen from within ginkgo + if strings.Contains(fn, "ginkgo/v2/internal") || strings.Contains(fn, "reflect.Value") || strings.Contains(file, "ginkgo/table_dsl") || strings.Contains(file, "ginkgo/core_dsl") { + runNodeFunctionCallIdx-- + continue + } + if strings.Contains(goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Function, "ginkgo/table_dsl") { + + } + //found it! lets add its package of interest + addPackageFor(goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Filename) + break + } + } + + ginkgoEntryPointIdx := -1 +OUTER_GINKGO_ENTRY_POINT: + for goroutineIdx, goroutine := range pr.Goroutines { + for _, functionCall := range goroutine.Stack { + if strings.Contains(functionCall.Function, "ginkgo/v2.RunSpecs") { + ginkgoEntryPointIdx = goroutineIdx + break OUTER_GINKGO_ENTRY_POINT + } + } + } + + // Now we go through all goroutines and highlight any lines with packages in `packagesOfInterest` + // Any goroutines with highlighted lines end up in the HighlightGoRoutines + for goroutineIdx, goroutine := range pr.Goroutines { + if goroutineIdx == ginkgoEntryPointIdx { + continue + } + if goroutineIdx == specGoRoutineIdx { + pr.Goroutines[goroutineIdx].IsSpecGoroutine = true + } + for functionCallIdx, functionCall := range goroutine.Stack { + if isPackageOfInterest(functionCall.Filename) { + goroutine.Stack[functionCallIdx].Highlight = true + goroutine.Stack[functionCallIdx].Source, goroutine.Stack[functionCallIdx].SourceHighlight = fetchSource(functionCall.Filename, functionCall.Line, 2, sourceRoots) + } + } + } + + if !includeAll { + goroutines := []types.Goroutine{pr.SpecGoroutine()} + goroutines = append(goroutines, pr.HighlightedGoroutines()...) + pr.Goroutines = goroutines + } + + return pr, nil +} + +func extractRunningGoroutines() ([]types.Goroutine, error) { + var stack []byte + for size := 64 * 1024; ; size *= 2 { + stack = make([]byte, size) + if n := runtime.Stack(stack, true); n < size { + stack = stack[:n] + break + } + } + r := bufio.NewReader(bytes.NewReader(stack)) + out := []types.Goroutine{} + idx := -1 + for { + line, err := r.ReadString('\n') + if err == io.EOF { + break + } + + line = strings.TrimSuffix(line, "\n") + + //skip blank lines + if line == "" { + continue + } + + //parse headers for new goroutine frames + if strings.HasPrefix(line, "goroutine") { + out = append(out, types.Goroutine{}) + idx = len(out) - 1 + + line = strings.TrimPrefix(line, "goroutine ") + line = strings.TrimSuffix(line, ":") + fields := strings.SplitN(line, " ", 2) + if len(fields) != 2 { + return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid goroutine frame header: %s", line)) + } + out[idx].ID, err = strconv.ParseUint(fields[0], 10, 64) + if err != nil { + return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid goroutine ID: %s", fields[1])) + } + + out[idx].State = strings.TrimSuffix(strings.TrimPrefix(fields[1], "["), "]") + continue + } + + //if we are here we must be at a function call entry in the stack + functionCall := types.FunctionCall{ + Function: strings.TrimPrefix(line, "created by "), // no need to track 'created by' + } + + line, err = r.ReadString('\n') + line = strings.TrimSuffix(line, "\n") + if err == io.EOF { + return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid function call: %s -- missing file name and line number", functionCall.Function)) + } + line = strings.TrimLeft(line, " \t") + delimiterIdx := strings.LastIndex(line, ":") + if delimiterIdx == -1 { + return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid filename and line number: %s", line)) + } + functionCall.Filename = line[:delimiterIdx] + line = strings.Split(line[delimiterIdx+1:], " ")[0] + lineNumber, err := strconv.ParseInt(line, 10, 64) + functionCall.Line = int(lineNumber) + if err != nil { + return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid function call line number: %s\n%s", line, err.Error())) + } + out[idx].Stack = append(out[idx].Stack, functionCall) + } + + return out, nil +} + +func fetchSource(filename string, lineNumber int, span int, configuredSourceRoots []string) ([]string, int) { + if filename == "" { + return []string{}, 0 + } + + var lines []string + var ok bool + if lines, ok = _SOURCE_CACHE[filename]; !ok { + sourceRoots := []string{""} + sourceRoots = append(sourceRoots, configuredSourceRoots...) + var data []byte + var err error + var found bool + for _, root := range sourceRoots { + data, err = os.ReadFile(filepath.Join(root, filename)) + if err == nil { + found = true + break + } + } + if !found { + return []string{}, 0 + } + lines = strings.Split(string(data), "\n") + _SOURCE_CACHE[filename] = lines + } + + startIndex := lineNumber - span - 1 + endIndex := startIndex + span + span + 1 + if startIndex < 0 { + startIndex = 0 + } + if endIndex > len(lines) { + endIndex = len(lines) + } + highlightIndex := lineNumber - 1 - startIndex + return lines[startIndex:endIndex], highlightIndex +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_bsd.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_bsd.go new file mode 100644 index 0000000..61e0ed3 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_bsd.go @@ -0,0 +1,11 @@ +//go:build freebsd || openbsd || netbsd || darwin || dragonfly +// +build freebsd openbsd netbsd darwin dragonfly + +package internal + +import ( + "os" + "syscall" +) + +var PROGRESS_SIGNALS = []os.Signal{syscall.SIGINFO, syscall.SIGUSR1} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_unix.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_unix.go new file mode 100644 index 0000000..ad30de4 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_unix.go @@ -0,0 +1,11 @@ +//go:build linux || solaris +// +build linux solaris + +package internal + +import ( + "os" + "syscall" +) + +var PROGRESS_SIGNALS = []os.Signal{syscall.SIGUSR1} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_win.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_win.go new file mode 100644 index 0000000..0eca251 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_win.go @@ -0,0 +1,8 @@ +//go:build windows +// +build windows + +package internal + +import "os" + +var PROGRESS_SIGNALS = []os.Signal{} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go b/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go new file mode 100644 index 0000000..cc351a3 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go @@ -0,0 +1,39 @@ +package internal + +import ( + "time" + + "github.com/onsi/ginkgo/v2/types" +) + +type ReportEntry = types.ReportEntry + +func NewReportEntry(name string, cl types.CodeLocation, args ...interface{}) (ReportEntry, error) { + out := ReportEntry{ + Visibility: types.ReportEntryVisibilityAlways, + Name: name, + Location: cl, + Time: time.Now(), + } + var didSetValue = false + for _, arg := range args { + switch x := arg.(type) { + case types.ReportEntryVisibility: + out.Visibility = x + case types.CodeLocation: + out.Location = x + case Offset: + out.Location = types.NewCodeLocation(2 + int(x)) + case time.Time: + out.Time = x + default: + if didSetValue { + return ReportEntry{}, types.GinkgoErrors.TooManyReportEntryValues(out.Location, arg) + } + out.Value = types.WrapEntryValue(arg) + didSetValue = true + } + } + + return out, nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/spec.go b/vendor/github.com/onsi/ginkgo/v2/internal/spec.go new file mode 100644 index 0000000..7c4ee5b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/spec.go @@ -0,0 +1,87 @@ +package internal + +import ( + "strings" + "time" + + "github.com/onsi/ginkgo/v2/types" +) + +type Spec struct { + Nodes Nodes + Skip bool +} + +func (s Spec) SubjectID() uint { + return s.Nodes.FirstNodeWithType(types.NodeTypeIt).ID +} + +func (s Spec) Text() string { + texts := []string{} + for i := range s.Nodes { + if s.Nodes[i].Text != "" { + texts = append(texts, s.Nodes[i].Text) + } + } + return strings.Join(texts, " ") +} + +func (s Spec) FirstNodeWithType(nodeTypes types.NodeType) Node { + return s.Nodes.FirstNodeWithType(nodeTypes) +} + +func (s Spec) FlakeAttempts() int { + flakeAttempts := 0 + for i := range s.Nodes { + if s.Nodes[i].FlakeAttempts > 0 { + flakeAttempts = s.Nodes[i].FlakeAttempts + } + } + + return flakeAttempts +} + +func (s Spec) MustPassRepeatedly() int { + mustPassRepeatedly := 0 + for i := range s.Nodes { + if s.Nodes[i].MustPassRepeatedly > 0 { + mustPassRepeatedly = s.Nodes[i].MustPassRepeatedly + } + } + + return mustPassRepeatedly +} + +func (s Spec) SpecTimeout() time.Duration { + return s.FirstNodeWithType(types.NodeTypeIt).SpecTimeout +} + +type Specs []Spec + +func (s Specs) HasAnySpecsMarkedPending() bool { + for i := range s { + if s[i].Nodes.HasNodeMarkedPending() { + return true + } + } + + return false +} + +func (s Specs) CountWithoutSkip() int { + n := 0 + for i := range s { + if !s[i].Skip { + n += 1 + } + } + return n +} + +func (s Specs) AtIndices(indices SpecIndices) Specs { + out := make(Specs, len(indices)) + for i, idx := range indices { + out[i] = s[idx] + } + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go b/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go new file mode 100644 index 0000000..8f569dd --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go @@ -0,0 +1,90 @@ +package internal + +import ( + "context" + "sort" + "sync" + + "github.com/onsi/ginkgo/v2/types" +) + +type SpecContext interface { + context.Context + + SpecReport() types.SpecReport + AttachProgressReporter(func() string) func() +} + +type specContext struct { + context.Context + + cancel context.CancelFunc + lock *sync.Mutex + progressReporters map[int]func() string + prCounter int + + suite *Suite +} + +/* +SpecContext includes a reference to `suite` and embeds itself in itself as a "GINKGO_SPEC_CONTEXT" value. This allows users to create child Contexts without having down-stream consumers (e.g. Gomega) lose access to the SpecContext and its methods. This allows us to build extensions on top of Ginkgo that simply take an all-encompassing context. + +Note that while SpecContext is used to enforce deadlines by Ginkgo it is not configured as a context.WithDeadline. Instead, Ginkgo owns responsibility for cancelling the context when the deadline elapses. + +This is because Ginkgo needs finer control over when the context is canceled. Specifically, Ginkgo needs to generate a ProgressReport before it cancels the context to ensure progress is captured where the spec is currently running. The only way to avoid a race here is to manually control the cancellation. +*/ +func NewSpecContext(suite *Suite) *specContext { + ctx, cancel := context.WithCancel(context.Background()) + sc := &specContext{ + cancel: cancel, + suite: suite, + lock: &sync.Mutex{}, + prCounter: 0, + progressReporters: map[int]func() string{}, + } + ctx = context.WithValue(ctx, "GINKGO_SPEC_CONTEXT", sc) //yes, yes, the go docs say don't use a string for a key... but we'd rather avoid a circular dependency between Gomega and Ginkgo + sc.Context = ctx //thank goodness for garbage collectors that can handle circular dependencies + + return sc +} + +func (sc *specContext) SpecReport() types.SpecReport { + return sc.suite.CurrentSpecReport() +} + +func (sc *specContext) AttachProgressReporter(reporter func() string) func() { + sc.lock.Lock() + defer sc.lock.Unlock() + sc.prCounter += 1 + prCounter := sc.prCounter + sc.progressReporters[prCounter] = reporter + + return func() { + sc.lock.Lock() + defer sc.lock.Unlock() + delete(sc.progressReporters, prCounter) + } +} + +func (sc *specContext) QueryProgressReporters() []string { + sc.lock.Lock() + keys := []int{} + for key := range sc.progressReporters { + keys = append(keys, key) + } + sort.Ints(keys) + reporters := []func() string{} + for _, key := range keys { + reporters = append(reporters, sc.progressReporters[key]) + } + sc.lock.Unlock() + + if len(reporters) == 0 { + return nil + } + out := []string{} + for _, reporter := range reporters { + out = append(out, reporter()) + } + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go new file mode 100644 index 0000000..f470641 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go @@ -0,0 +1,993 @@ +package internal + +import ( + "fmt" + "sync" + "time" + + "github.com/onsi/ginkgo/v2/internal/interrupt_handler" + "github.com/onsi/ginkgo/v2/internal/parallel_support" + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +type Phase uint + +const ( + PhaseBuildTopLevel Phase = iota + PhaseBuildTree + PhaseRun +) + +type Suite struct { + tree *TreeNode + topLevelContainers Nodes + + phase Phase + + suiteNodes Nodes + cleanupNodes Nodes + + failer *Failer + reporter reporters.Reporter + writer WriterInterface + outputInterceptor OutputInterceptor + interruptHandler interrupt_handler.InterruptHandlerInterface + config types.SuiteConfig + deadline time.Time + + skipAll bool + report types.Report + currentSpecReport types.SpecReport + currentNode Node + currentNodeStartTime time.Time + + currentSpecContext *specContext + + currentByStep types.SpecEvent + timelineOrder int + + /* + We don't need to lock around all operations. Just those that *could* happen concurrently. + + Suite, generally, only runs one node at a time - and so the possibiity for races is small. In fact, the presence of a race usually indicates the user has launched a goroutine that has leaked past the node it was launched in. + + However, there are some operations that can happen concurrently: + + - AddReportEntry and CurrentSpecReport can be accessed at any point by the user - including in goroutines that outlive the node intentionally (see, e.g. #1020). They both form a self-contained read-write pair and so a lock in them is sufficent. + - generateProgressReport can be invoked at any point in time by an interrupt or a progres poll. Moreover, it requires access to currentSpecReport, currentNode, currentNodeStartTime, and progressStepCursor. To make it threadsafe we need to lock around generateProgressReport when we read those variables _and_ everywhere those variables are *written*. In general we don't need to worry about all possible field writes to these variables as what `generateProgressReport` does with these variables is fairly selective (hence the name of the lock). Specifically, we dont' need to lock around state and failure message changes on `currentSpecReport` - just the setting of the variable itself. + */ + selectiveLock *sync.Mutex + + client parallel_support.Client +} + +func NewSuite() *Suite { + return &Suite{ + tree: &TreeNode{}, + phase: PhaseBuildTopLevel, + + selectiveLock: &sync.Mutex{}, + } +} + +func (suite *Suite) BuildTree() error { + // During PhaseBuildTopLevel, the top level containers are stored in suite.topLevelCotainers and entered + // We now enter PhaseBuildTree where these top level containers are entered and added to the spec tree + suite.phase = PhaseBuildTree + for _, topLevelContainer := range suite.topLevelContainers { + err := suite.PushNode(topLevelContainer) + if err != nil { + return err + } + } + return nil +} + +func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, progressSignalRegistrar ProgressSignalRegistrar, suiteConfig types.SuiteConfig) (bool, bool) { + if suite.phase != PhaseBuildTree { + panic("cannot run before building the tree = call suite.BuildTree() first") + } + ApplyNestedFocusPolicyToTree(suite.tree) + specs := GenerateSpecsFromTreeRoot(suite.tree) + specs, hasProgrammaticFocus := ApplyFocusToSpecs(specs, description, suiteLabels, suiteConfig) + + suite.phase = PhaseRun + suite.client = client + suite.failer = failer + suite.reporter = reporter + suite.writer = writer + suite.outputInterceptor = outputInterceptor + suite.interruptHandler = interruptHandler + suite.config = suiteConfig + + if suite.config.Timeout > 0 { + suite.deadline = time.Now().Add(suite.config.Timeout) + } + + cancelProgressHandler := progressSignalRegistrar(suite.handleProgressSignal) + + success := suite.runSpecs(description, suiteLabels, suitePath, hasProgrammaticFocus, specs) + + cancelProgressHandler() + + return success, hasProgrammaticFocus +} + +func (suite *Suite) InRunPhase() bool { + return suite.phase == PhaseRun +} + +/* + Tree Construction methods + + PushNode is used during PhaseBuildTopLevel and PhaseBuildTree +*/ + +func (suite *Suite) PushNode(node Node) error { + if node.NodeType.Is(types.NodeTypeCleanupInvalid | types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll | types.NodeTypeCleanupAfterSuite) { + return suite.pushCleanupNode(node) + } + + if node.NodeType.Is(types.NodeTypeBeforeSuite | types.NodeTypeAfterSuite | types.NodeTypeSynchronizedBeforeSuite | types.NodeTypeSynchronizedAfterSuite | types.NodeTypeBeforeSuite | types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) { + return suite.pushSuiteNode(node) + } + + if suite.phase == PhaseRun { + return types.GinkgoErrors.PushingNodeInRunPhase(node.NodeType, node.CodeLocation) + } + + if node.MarkedSerial { + firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered() + if !firstOrderedNode.IsZero() && !firstOrderedNode.MarkedSerial { + return types.GinkgoErrors.InvalidSerialNodeInNonSerialOrderedContainer(node.CodeLocation, node.NodeType) + } + } + + if node.NodeType.Is(types.NodeTypeBeforeAll | types.NodeTypeAfterAll) { + firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered() + if firstOrderedNode.IsZero() { + return types.GinkgoErrors.SetupNodeNotInOrderedContainer(node.CodeLocation, node.NodeType) + } + } + + if node.NodeType == types.NodeTypeContainer { + // During PhaseBuildTopLevel we only track the top level containers without entering them + // We only enter the top level container nodes during PhaseBuildTree + // + // This ensures the tree is only constructed after `go spec` has called `flag.Parse()` and gives + // the user an opportunity to load suiteConfiguration information in the `TestX` go spec hook just before `RunSpecs` + // is invoked. This makes the lifecycle easier to reason about and solves issues like #693. + if suite.phase == PhaseBuildTopLevel { + suite.topLevelContainers = append(suite.topLevelContainers, node) + return nil + } + if suite.phase == PhaseBuildTree { + parentTree := suite.tree + suite.tree = &TreeNode{Node: node} + parentTree.AppendChild(suite.tree) + err := func() (err error) { + defer func() { + if e := recover(); e != nil { + err = types.GinkgoErrors.CaughtPanicDuringABuildPhase(e, node.CodeLocation) + } + }() + node.Body(nil) + return err + }() + suite.tree = parentTree + return err + } + } else { + suite.tree.AppendChild(&TreeNode{Node: node}) + return nil + } + + return nil +} + +func (suite *Suite) pushSuiteNode(node Node) error { + if suite.phase == PhaseBuildTree { + return types.GinkgoErrors.SuiteNodeInNestedContext(node.NodeType, node.CodeLocation) + } + + if suite.phase == PhaseRun { + return types.GinkgoErrors.SuiteNodeDuringRunPhase(node.NodeType, node.CodeLocation) + } + + switch node.NodeType { + case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite: + existingBefores := suite.suiteNodes.WithType(types.NodeTypeBeforeSuite | types.NodeTypeSynchronizedBeforeSuite) + if len(existingBefores) > 0 { + return types.GinkgoErrors.MultipleBeforeSuiteNodes(node.NodeType, node.CodeLocation, existingBefores[0].NodeType, existingBefores[0].CodeLocation) + } + case types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite: + existingAfters := suite.suiteNodes.WithType(types.NodeTypeAfterSuite | types.NodeTypeSynchronizedAfterSuite) + if len(existingAfters) > 0 { + return types.GinkgoErrors.MultipleAfterSuiteNodes(node.NodeType, node.CodeLocation, existingAfters[0].NodeType, existingAfters[0].CodeLocation) + } + } + + suite.suiteNodes = append(suite.suiteNodes, node) + return nil +} + +func (suite *Suite) pushCleanupNode(node Node) error { + if suite.phase != PhaseRun || suite.currentNode.IsZero() { + return types.GinkgoErrors.PushingCleanupNodeDuringTreeConstruction(node.CodeLocation) + } + + switch suite.currentNode.NodeType { + case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite, types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite: + node.NodeType = types.NodeTypeCleanupAfterSuite + case types.NodeTypeBeforeAll, types.NodeTypeAfterAll: + node.NodeType = types.NodeTypeCleanupAfterAll + case types.NodeTypeReportBeforeEach, types.NodeTypeReportAfterEach, types.NodeTypeReportBeforeSuite, types.NodeTypeReportAfterSuite: + return types.GinkgoErrors.PushingCleanupInReportingNode(node.CodeLocation, suite.currentNode.NodeType) + case types.NodeTypeCleanupInvalid, types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll, types.NodeTypeCleanupAfterSuite: + return types.GinkgoErrors.PushingCleanupInCleanupNode(node.CodeLocation) + default: + node.NodeType = types.NodeTypeCleanupAfterEach + } + + node.NodeIDWhereCleanupWasGenerated = suite.currentNode.ID + node.NestingLevel = suite.currentNode.NestingLevel + suite.cleanupNodes = append(suite.cleanupNodes, node) + + return nil +} + +func (suite *Suite) generateTimelineLocation() types.TimelineLocation { + suite.selectiveLock.Lock() + defer suite.selectiveLock.Unlock() + + suite.timelineOrder += 1 + return types.TimelineLocation{ + Offset: len(suite.currentSpecReport.CapturedGinkgoWriterOutput) + suite.writer.Len(), + Order: suite.timelineOrder, + Time: time.Now(), + } +} + +func (suite *Suite) handleSpecEvent(event types.SpecEvent) types.SpecEvent { + event.TimelineLocation = suite.generateTimelineLocation() + suite.selectiveLock.Lock() + suite.currentSpecReport.SpecEvents = append(suite.currentSpecReport.SpecEvents, event) + suite.selectiveLock.Unlock() + suite.reporter.EmitSpecEvent(event) + return event +} + +func (suite *Suite) handleSpecEventEnd(eventType types.SpecEventType, startEvent types.SpecEvent) { + event := startEvent + event.SpecEventType = eventType + event.TimelineLocation = suite.generateTimelineLocation() + event.Duration = event.TimelineLocation.Time.Sub(startEvent.TimelineLocation.Time) + suite.selectiveLock.Lock() + suite.currentSpecReport.SpecEvents = append(suite.currentSpecReport.SpecEvents, event) + suite.selectiveLock.Unlock() + suite.reporter.EmitSpecEvent(event) +} + +func (suite *Suite) By(text string, callback ...func()) error { + cl := types.NewCodeLocation(2) + if suite.phase != PhaseRun { + return types.GinkgoErrors.ByNotDuringRunPhase(cl) + } + + event := suite.handleSpecEvent(types.SpecEvent{ + SpecEventType: types.SpecEventByStart, + CodeLocation: cl, + Message: text, + }) + suite.selectiveLock.Lock() + suite.currentByStep = event + suite.selectiveLock.Unlock() + + if len(callback) == 1 { + defer func() { + suite.selectiveLock.Lock() + suite.currentByStep = types.SpecEvent{} + suite.selectiveLock.Unlock() + suite.handleSpecEventEnd(types.SpecEventByEnd, event) + }() + callback[0]() + } else if len(callback) > 1 { + panic("just one callback per By, please") + } + return nil +} + +/* +Spec Running methods - used during PhaseRun +*/ +func (suite *Suite) CurrentSpecReport() types.SpecReport { + suite.selectiveLock.Lock() + defer suite.selectiveLock.Unlock() + report := suite.currentSpecReport + if suite.writer != nil { + report.CapturedGinkgoWriterOutput = string(suite.writer.Bytes()) + } + report.ReportEntries = make([]ReportEntry, len(report.ReportEntries)) + copy(report.ReportEntries, suite.currentSpecReport.ReportEntries) + return report +} + +func (suite *Suite) AddReportEntry(entry ReportEntry) error { + if suite.phase != PhaseRun { + return types.GinkgoErrors.AddReportEntryNotDuringRunPhase(entry.Location) + } + entry.TimelineLocation = suite.generateTimelineLocation() + entry.Time = entry.TimelineLocation.Time + suite.selectiveLock.Lock() + suite.currentSpecReport.ReportEntries = append(suite.currentSpecReport.ReportEntries, entry) + suite.selectiveLock.Unlock() + suite.reporter.EmitReportEntry(entry) + return nil +} + +func (suite *Suite) generateProgressReport(fullReport bool) types.ProgressReport { + timelineLocation := suite.generateTimelineLocation() + suite.selectiveLock.Lock() + defer suite.selectiveLock.Unlock() + + var additionalReports []string + if suite.currentSpecContext != nil { + additionalReports = suite.currentSpecContext.QueryProgressReporters() + } + gwOutput := suite.currentSpecReport.CapturedGinkgoWriterOutput + string(suite.writer.Bytes()) + pr, err := NewProgressReport(suite.isRunningInParallel(), suite.currentSpecReport, suite.currentNode, suite.currentNodeStartTime, suite.currentByStep, gwOutput, timelineLocation, additionalReports, suite.config.SourceRoots, fullReport) + + if err != nil { + fmt.Printf("{{red}}Failed to generate progress report:{{/}}\n%s\n", err.Error()) + } + return pr +} + +func (suite *Suite) handleProgressSignal() { + report := suite.generateProgressReport(false) + report.Message = "{{bold}}You've requested a progress report:{{/}}" + suite.emitProgressReport(report) +} + +func (suite *Suite) emitProgressReport(report types.ProgressReport) { + suite.selectiveLock.Lock() + suite.currentSpecReport.ProgressReports = append(suite.currentSpecReport.ProgressReports, report.WithoutCapturedGinkgoWriterOutput()) + suite.selectiveLock.Unlock() + + suite.reporter.EmitProgressReport(report) + if suite.isRunningInParallel() { + err := suite.client.PostEmitProgressReport(report) + if err != nil { + fmt.Println(err.Error()) + } + } +} + +func (suite *Suite) isRunningInParallel() bool { + return suite.config.ParallelTotal > 1 +} + +func (suite *Suite) processCurrentSpecReport() { + suite.reporter.DidRun(suite.currentSpecReport) + if suite.isRunningInParallel() { + suite.client.PostDidRun(suite.currentSpecReport) + } + suite.report.SpecReports = append(suite.report.SpecReports, suite.currentSpecReport) + + if suite.currentSpecReport.State.Is(types.SpecStateFailureStates) { + suite.report.SuiteSucceeded = false + if suite.config.FailFast || suite.currentSpecReport.State.Is(types.SpecStateAborted) { + suite.skipAll = true + if suite.isRunningInParallel() { + suite.client.PostAbort() + } + } + } +} + +func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath string, hasProgrammaticFocus bool, specs Specs) bool { + numSpecsThatWillBeRun := specs.CountWithoutSkip() + + suite.report = types.Report{ + SuitePath: suitePath, + SuiteDescription: description, + SuiteLabels: suiteLabels, + SuiteConfig: suite.config, + SuiteHasProgrammaticFocus: hasProgrammaticFocus, + PreRunStats: types.PreRunStats{ + TotalSpecs: len(specs), + SpecsThatWillRun: numSpecsThatWillBeRun, + }, + StartTime: time.Now(), + } + + suite.reporter.SuiteWillBegin(suite.report) + if suite.isRunningInParallel() { + suite.client.PostSuiteWillBegin(suite.report) + } + + suite.report.SuiteSucceeded = true + + suite.runReportSuiteNodesIfNeedBe(types.NodeTypeReportBeforeSuite) + + ranBeforeSuite := suite.report.SuiteSucceeded + if suite.report.SuiteSucceeded { + suite.runBeforeSuite(numSpecsThatWillBeRun) + } + + if suite.report.SuiteSucceeded { + groupedSpecIndices, serialGroupedSpecIndices := OrderSpecs(specs, suite.config) + nextIndex := MakeIncrementingIndexCounter() + if suite.isRunningInParallel() { + nextIndex = suite.client.FetchNextCounter + } + + for { + groupedSpecIdx, err := nextIndex() + if err != nil { + suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, fmt.Sprintf("Failed to iterate over specs:\n%s", err.Error())) + suite.report.SuiteSucceeded = false + break + } + + if groupedSpecIdx >= len(groupedSpecIndices) { + if suite.config.ParallelProcess == 1 && len(serialGroupedSpecIndices) > 0 { + groupedSpecIndices, serialGroupedSpecIndices, nextIndex = serialGroupedSpecIndices, GroupedSpecIndices{}, MakeIncrementingIndexCounter() + suite.client.BlockUntilNonprimaryProcsHaveFinished() + continue + } + break + } + + // the complexity for running groups of specs is very high because of Ordered containers and FlakeAttempts + // we encapsulate that complexity in the notion of a Group that can run + // Group is really just an extension of suite so it gets passed a suite and has access to all its internals + // Note that group is stateful and intended for single use! + newGroup(suite).run(specs.AtIndices(groupedSpecIndices[groupedSpecIdx])) + } + + if specs.HasAnySpecsMarkedPending() && suite.config.FailOnPending { + suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Detected pending specs and --fail-on-pending is set") + suite.report.SuiteSucceeded = false + } + } + + if ranBeforeSuite { + suite.runAfterSuiteCleanup(numSpecsThatWillBeRun) + } + + interruptStatus := suite.interruptHandler.Status() + if interruptStatus.Interrupted() { + suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, interruptStatus.Cause.String()) + suite.report.SuiteSucceeded = false + } + suite.report.EndTime = time.Now() + suite.report.RunTime = suite.report.EndTime.Sub(suite.report.StartTime) + if !suite.deadline.IsZero() && suite.report.EndTime.After(suite.deadline) { + suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Suite Timeout Elapsed") + suite.report.SuiteSucceeded = false + } + + suite.runReportSuiteNodesIfNeedBe(types.NodeTypeReportAfterSuite) + suite.reporter.SuiteDidEnd(suite.report) + if suite.isRunningInParallel() { + suite.client.PostSuiteDidEnd(suite.report) + } + + return suite.report.SuiteSucceeded +} + +func (suite *Suite) runBeforeSuite(numSpecsThatWillBeRun int) { + beforeSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeBeforeSuite | types.NodeTypeSynchronizedBeforeSuite) + if !beforeSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 { + suite.selectiveLock.Lock() + suite.currentSpecReport = types.SpecReport{ + LeafNodeType: beforeSuiteNode.NodeType, + LeafNodeLocation: beforeSuiteNode.CodeLocation, + ParallelProcess: suite.config.ParallelProcess, + RunningInParallel: suite.isRunningInParallel(), + } + suite.selectiveLock.Unlock() + + suite.reporter.WillRun(suite.currentSpecReport) + suite.runSuiteNode(beforeSuiteNode) + if suite.currentSpecReport.State.Is(types.SpecStateSkipped) { + suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Suite skipped in BeforeSuite") + suite.skipAll = true + } + suite.processCurrentSpecReport() + } +} + +func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) { + afterSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeAfterSuite | types.NodeTypeSynchronizedAfterSuite) + if !afterSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 { + suite.selectiveLock.Lock() + suite.currentSpecReport = types.SpecReport{ + LeafNodeType: afterSuiteNode.NodeType, + LeafNodeLocation: afterSuiteNode.CodeLocation, + ParallelProcess: suite.config.ParallelProcess, + RunningInParallel: suite.isRunningInParallel(), + } + suite.selectiveLock.Unlock() + + suite.reporter.WillRun(suite.currentSpecReport) + suite.runSuiteNode(afterSuiteNode) + suite.processCurrentSpecReport() + } + + afterSuiteCleanup := suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterSuite).Reverse() + if len(afterSuiteCleanup) > 0 { + for _, cleanupNode := range afterSuiteCleanup { + suite.selectiveLock.Lock() + suite.currentSpecReport = types.SpecReport{ + LeafNodeType: cleanupNode.NodeType, + LeafNodeLocation: cleanupNode.CodeLocation, + ParallelProcess: suite.config.ParallelProcess, + RunningInParallel: suite.isRunningInParallel(), + } + suite.selectiveLock.Unlock() + + suite.reporter.WillRun(suite.currentSpecReport) + suite.runSuiteNode(cleanupNode) + suite.processCurrentSpecReport() + } + } +} + +func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) { + nodes := spec.Nodes.WithType(nodeType) + if nodeType == types.NodeTypeReportAfterEach { + nodes = nodes.SortedByDescendingNestingLevel() + } + if nodeType == types.NodeTypeReportBeforeEach { + nodes = nodes.SortedByAscendingNestingLevel() + } + if len(nodes) == 0 { + return + } + + for i := range nodes { + suite.writer.Truncate() + suite.outputInterceptor.StartInterceptingOutput() + report := suite.currentSpecReport + nodes[i].Body = func(SpecContext) { + nodes[i].ReportEachBody(report) + } + state, failure := suite.runNode(nodes[i], time.Time{}, spec.Nodes.BestTextFor(nodes[i])) + + // If the spec is not in a failure state (i.e. it's Passed/Skipped/Pending) and the reporter has failed, override the state. + // Also, if the reporter is every aborted - always override the state to propagate the abort + if (!suite.currentSpecReport.State.Is(types.SpecStateFailureStates) && state.Is(types.SpecStateFailureStates)) || state.Is(types.SpecStateAborted) { + suite.currentSpecReport.State = state + suite.currentSpecReport.Failure = failure + } + suite.currentSpecReport.CapturedGinkgoWriterOutput += string(suite.writer.Bytes()) + suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput() + } +} + +func (suite *Suite) runSuiteNode(node Node) { + if suite.config.DryRun { + suite.currentSpecReport.State = types.SpecStatePassed + return + } + + suite.writer.Truncate() + suite.outputInterceptor.StartInterceptingOutput() + suite.currentSpecReport.StartTime = time.Now() + + var err error + switch node.NodeType { + case types.NodeTypeBeforeSuite, types.NodeTypeAfterSuite: + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") + case types.NodeTypeCleanupAfterSuite: + if suite.config.ParallelTotal > 1 && suite.config.ParallelProcess == 1 { + err = suite.client.BlockUntilNonprimaryProcsHaveFinished() + } + if err == nil { + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") + } + case types.NodeTypeSynchronizedBeforeSuite: + var data []byte + var runAllProcs bool + if suite.config.ParallelProcess == 1 { + if suite.config.ParallelTotal > 1 { + suite.outputInterceptor.StopInterceptingAndReturnOutput() + suite.outputInterceptor.StartInterceptingOutputAndForwardTo(suite.client) + } + node.Body = func(c SpecContext) { data = node.SynchronizedBeforeSuiteProc1Body(c) } + node.HasContext = node.SynchronizedBeforeSuiteProc1BodyHasContext + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") + if suite.config.ParallelTotal > 1 { + suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput() + suite.outputInterceptor.StartInterceptingOutput() + if suite.currentSpecReport.State.Is(types.SpecStatePassed) { + err = suite.client.PostSynchronizedBeforeSuiteCompleted(types.SpecStatePassed, data) + } else { + err = suite.client.PostSynchronizedBeforeSuiteCompleted(suite.currentSpecReport.State, nil) + } + } + runAllProcs = suite.currentSpecReport.State.Is(types.SpecStatePassed) && err == nil + } else { + var proc1State types.SpecState + proc1State, data, err = suite.client.BlockUntilSynchronizedBeforeSuiteData() + switch proc1State { + case types.SpecStatePassed: + runAllProcs = true + case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateTimedout: + err = types.GinkgoErrors.SynchronizedBeforeSuiteFailedOnProc1() + case types.SpecStateInterrupted, types.SpecStateAborted, types.SpecStateSkipped: + suite.currentSpecReport.State = proc1State + } + } + if runAllProcs { + node.Body = func(c SpecContext) { node.SynchronizedBeforeSuiteAllProcsBody(c, data) } + node.HasContext = node.SynchronizedBeforeSuiteAllProcsBodyHasContext + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") + } + case types.NodeTypeSynchronizedAfterSuite: + node.Body = node.SynchronizedAfterSuiteAllProcsBody + node.HasContext = node.SynchronizedAfterSuiteAllProcsBodyHasContext + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") + if suite.config.ParallelProcess == 1 { + if suite.config.ParallelTotal > 1 { + err = suite.client.BlockUntilNonprimaryProcsHaveFinished() + } + if err == nil { + if suite.config.ParallelTotal > 1 { + suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput() + suite.outputInterceptor.StartInterceptingOutputAndForwardTo(suite.client) + } + + node.Body = node.SynchronizedAfterSuiteProc1Body + node.HasContext = node.SynchronizedAfterSuiteProc1BodyHasContext + state, failure := suite.runNode(node, time.Time{}, "") + if suite.currentSpecReport.State.Is(types.SpecStatePassed) { + suite.currentSpecReport.State, suite.currentSpecReport.Failure = state, failure + } + } + } + } + + if err != nil && !suite.currentSpecReport.State.Is(types.SpecStateFailureStates) { + suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error()) + suite.reporter.EmitFailure(suite.currentSpecReport.State, suite.currentSpecReport.Failure) + } + + suite.currentSpecReport.EndTime = time.Now() + suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime) + suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes()) + suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput() +} + +func (suite *Suite) runReportSuiteNodesIfNeedBe(nodeType types.NodeType) { + nodes := suite.suiteNodes.WithType(nodeType) + // only run ReportAfterSuite on proc 1 + if nodeType.Is(types.NodeTypeReportAfterSuite) && suite.config.ParallelProcess != 1 { + return + } + // if we're running ReportBeforeSuite on proc > 1 - we should wait until proc 1 has completed + if nodeType.Is(types.NodeTypeReportBeforeSuite) && suite.config.ParallelProcess != 1 && len(nodes) > 0 { + state, err := suite.client.BlockUntilReportBeforeSuiteCompleted() + if err != nil || state.Is(types.SpecStateFailed) { + suite.report.SuiteSucceeded = false + } + return + } + + for _, node := range nodes { + suite.selectiveLock.Lock() + suite.currentSpecReport = types.SpecReport{ + LeafNodeType: node.NodeType, + LeafNodeLocation: node.CodeLocation, + LeafNodeText: node.Text, + ParallelProcess: suite.config.ParallelProcess, + RunningInParallel: suite.isRunningInParallel(), + } + suite.selectiveLock.Unlock() + + suite.reporter.WillRun(suite.currentSpecReport) + suite.runReportSuiteNode(node, suite.report) + suite.processCurrentSpecReport() + } + + // if we're running ReportBeforeSuite and we're running in parallel - we shuld tell the other procs that we're done + if nodeType.Is(types.NodeTypeReportBeforeSuite) && suite.isRunningInParallel() && len(nodes) > 0 { + if suite.report.SuiteSucceeded { + suite.client.PostReportBeforeSuiteCompleted(types.SpecStatePassed) + } else { + suite.client.PostReportBeforeSuiteCompleted(types.SpecStateFailed) + } + } +} + +func (suite *Suite) runReportSuiteNode(node Node, report types.Report) { + suite.writer.Truncate() + suite.outputInterceptor.StartInterceptingOutput() + suite.currentSpecReport.StartTime = time.Now() + + // if we're running a ReportAfterSuite in parallel (on proc 1) we (a) wait until other procs have exited and + // (b) always fetch the latest report as prior ReportAfterSuites will contribute to it + if node.NodeType.Is(types.NodeTypeReportAfterSuite) && suite.isRunningInParallel() { + aggregatedReport, err := suite.client.BlockUntilAggregatedNonprimaryProcsReport() + if err != nil { + suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error()) + suite.reporter.EmitFailure(suite.currentSpecReport.State, suite.currentSpecReport.Failure) + return + } + report = report.Add(aggregatedReport) + } + + node.Body = func(SpecContext) { node.ReportSuiteBody(report) } + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") + + suite.currentSpecReport.EndTime = time.Now() + suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime) + suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes()) + suite.currentSpecReport.CapturedStdOutErr = suite.outputInterceptor.StopInterceptingAndReturnOutput() +} + +func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (types.SpecState, types.Failure) { + if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll | types.NodeTypeCleanupAfterSuite) { + suite.cleanupNodes = suite.cleanupNodes.WithoutNode(node) + } + + interruptStatus := suite.interruptHandler.Status() + if interruptStatus.Level == interrupt_handler.InterruptLevelBailOut { + return types.SpecStateSkipped, types.Failure{} + } + if interruptStatus.Level == interrupt_handler.InterruptLevelReportOnly && !node.NodeType.Is(types.NodeTypesAllowedDuringReportInterrupt) { + return types.SpecStateSkipped, types.Failure{} + } + if interruptStatus.Level == interrupt_handler.InterruptLevelCleanupAndReport && !node.NodeType.Is(types.NodeTypesAllowedDuringReportInterrupt|types.NodeTypesAllowedDuringCleanupInterrupt) { + return types.SpecStateSkipped, types.Failure{} + } + + suite.selectiveLock.Lock() + suite.currentNode = node + suite.currentNodeStartTime = time.Now() + suite.currentByStep = types.SpecEvent{} + suite.selectiveLock.Unlock() + defer func() { + suite.selectiveLock.Lock() + suite.currentNode = Node{} + suite.currentNodeStartTime = time.Time{} + suite.selectiveLock.Unlock() + }() + + if text == "" { + text = "TOP-LEVEL" + } + event := suite.handleSpecEvent(types.SpecEvent{ + SpecEventType: types.SpecEventNodeStart, + NodeType: node.NodeType, + Message: text, + CodeLocation: node.CodeLocation, + }) + defer func() { + suite.handleSpecEventEnd(types.SpecEventNodeEnd, event) + }() + + var failure types.Failure + failure.FailureNodeType, failure.FailureNodeLocation = node.NodeType, node.CodeLocation + if node.NodeType.Is(types.NodeTypeIt) || node.NodeType.Is(types.NodeTypesForSuiteLevelNodes) { + failure.FailureNodeContext = types.FailureNodeIsLeafNode + } else if node.NestingLevel <= 0 { + failure.FailureNodeContext = types.FailureNodeAtTopLevel + } else { + failure.FailureNodeContext, failure.FailureNodeContainerIndex = types.FailureNodeInContainer, node.NestingLevel-1 + } + var outcome types.SpecState + + gracePeriod := suite.config.GracePeriod + if node.GracePeriod >= 0 { + gracePeriod = node.GracePeriod + } + + now := time.Now() + deadline := suite.deadline + timeoutInPlay := "suite" + if deadline.IsZero() || (!specDeadline.IsZero() && specDeadline.Before(deadline)) { + deadline = specDeadline + timeoutInPlay = "spec" + } + if node.NodeTimeout > 0 && (deadline.IsZero() || deadline.Sub(now) > node.NodeTimeout) { + deadline = now.Add(node.NodeTimeout) + timeoutInPlay = "node" + } + if (!deadline.IsZero() && deadline.Before(now)) || interruptStatus.Interrupted() { + //we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't + if node.NodeTimeout > 0 { + deadline = now.Add(node.NodeTimeout) + timeoutInPlay = "node" + } else { + deadline = now.Add(gracePeriod) + timeoutInPlay = "grace period" + } + } + + if !node.HasContext { + // this maps onto the pre-context behavior: + // - an interrupted node exits immediately. with this, context-less nodes that are in a spec with a SpecTimeout and/or are interrupted by other means will simply exit immediately after the timeout/interrupt + // - clean up nodes have up to GracePeriod (formerly hard-coded at 30s) to complete before they are interrupted + gracePeriod = 0 + } + + sc := NewSpecContext(suite) + defer sc.cancel() + + suite.selectiveLock.Lock() + suite.currentSpecContext = sc + suite.selectiveLock.Unlock() + + var deadlineChannel <-chan time.Time + if !deadline.IsZero() { + deadlineChannel = time.After(deadline.Sub(now)) + } + var gracePeriodChannel <-chan time.Time + + outcomeC := make(chan types.SpecState) + failureC := make(chan types.Failure) + + go func() { + finished := false + defer func() { + if e := recover(); e != nil || !finished { + suite.failer.Panic(types.NewCodeLocationWithStackTrace(2), e) + } + + outcomeFromRun, failureFromRun := suite.failer.Drain() + failureFromRun.TimelineLocation = suite.generateTimelineLocation() + outcomeC <- outcomeFromRun + failureC <- failureFromRun + }() + + node.Body(sc) + finished = true + }() + + // progress polling timer and channel + var emitProgressNow <-chan time.Time + var progressPoller *time.Timer + var pollProgressAfter, pollProgressInterval = suite.config.PollProgressAfter, suite.config.PollProgressInterval + if node.PollProgressAfter >= 0 { + pollProgressAfter = node.PollProgressAfter + } + if node.PollProgressInterval >= 0 { + pollProgressInterval = node.PollProgressInterval + } + if pollProgressAfter > 0 { + progressPoller = time.NewTimer(pollProgressAfter) + emitProgressNow = progressPoller.C + defer progressPoller.Stop() + } + + // now we wait for an outcome, an interrupt, a timeout, or a progress poll + for { + select { + case outcomeFromRun := <-outcomeC: + failureFromRun := <-failureC + if outcome.Is(types.SpecStateInterrupted | types.SpecStateTimedout) { + // we've already been interrupted/timed out. we just managed to actually exit + // before the grace period elapsed + // if we have a failure message we attach it as an additional failure + if outcomeFromRun != types.SpecStatePassed { + additionalFailure := types.AdditionalFailure{ + State: outcomeFromRun, + Failure: failure, //we make a copy - this will include all the configuration set up above... + } + //...and then we update the failure with the details from failureFromRun + additionalFailure.Failure.Location, additionalFailure.Failure.ForwardedPanic, additionalFailure.Failure.TimelineLocation = failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation + additionalFailure.Failure.ProgressReport = types.ProgressReport{} + if outcome == types.SpecStateTimedout { + additionalFailure.Failure.Message = fmt.Sprintf("A %s timeout occurred and then the following failure was recorded in the timedout node before it exited:\n%s", timeoutInPlay, failureFromRun.Message) + } else { + additionalFailure.Failure.Message = fmt.Sprintf("An interrupt occurred and then the following failure was recorded in the interrupted node before it exited:\n%s", failureFromRun.Message) + } + suite.reporter.EmitFailure(additionalFailure.State, additionalFailure.Failure) + failure.AdditionalFailure = &additionalFailure + } + return outcome, failure + } + if outcomeFromRun.Is(types.SpecStatePassed) { + return outcomeFromRun, types.Failure{} + } else { + failure.Message, failure.Location, failure.ForwardedPanic, failure.TimelineLocation = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation + suite.reporter.EmitFailure(outcomeFromRun, failure) + return outcomeFromRun, failure + } + case <-gracePeriodChannel: + if node.HasContext && outcome.Is(types.SpecStateTimedout) { + report := suite.generateProgressReport(false) + report.Message = "{{bold}}{{orange}}A running node failed to exit in time{{/}}\nGinkgo is moving on but a node has timed out and failed to exit before its grace period elapsed. The node has now leaked and is running in the background.\nHere's a current progress report:" + suite.emitProgressReport(report) + } + return outcome, failure + case <-deadlineChannel: + // we're out of time - the outcome is a timeout and we capture the failure and progress report + outcome = types.SpecStateTimedout + failure.Message, failure.Location, failure.TimelineLocation = fmt.Sprintf("A %s timeout occurred", timeoutInPlay), node.CodeLocation, suite.generateTimelineLocation() + failure.ProgressReport = suite.generateProgressReport(false).WithoutCapturedGinkgoWriterOutput() + failure.ProgressReport.Message = fmt.Sprintf("{{bold}}This is the Progress Report generated when the %s timeout occurred:{{/}}", timeoutInPlay) + deadlineChannel = nil + suite.reporter.EmitFailure(outcome, failure) + + // tell the spec to stop. it's important we generate the progress report first to make sure we capture where + // the spec is actually stuck + sc.cancel() + //and now we wait for the grace period + gracePeriodChannel = time.After(gracePeriod) + case <-interruptStatus.Channel: + interruptStatus = suite.interruptHandler.Status() + deadlineChannel = nil // don't worry about deadlines, time's up now + + failureTimelineLocation := suite.generateTimelineLocation() + progressReport := suite.generateProgressReport(true) + + if outcome == types.SpecStateInvalid { + outcome = types.SpecStateInterrupted + failure.Message, failure.Location, failure.TimelineLocation = interruptStatus.Message(), node.CodeLocation, failureTimelineLocation + if interruptStatus.ShouldIncludeProgressReport() { + failure.ProgressReport = progressReport.WithoutCapturedGinkgoWriterOutput() + failure.ProgressReport.Message = "{{bold}}This is the Progress Report generated when the interrupt was received:{{/}}" + } + suite.reporter.EmitFailure(outcome, failure) + } + + progressReport = progressReport.WithoutOtherGoroutines() + sc.cancel() + + if interruptStatus.Level == interrupt_handler.InterruptLevelBailOut { + if interruptStatus.ShouldIncludeProgressReport() { + progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\n{{bold}}{{red}}Final interrupt received{{/}}; Ginkgo will not run any cleanup or reporting nodes and will terminate as soon as possible.\nHere's a current progress report:", interruptStatus.Message()) + suite.emitProgressReport(progressReport) + } + return outcome, failure + } + if interruptStatus.ShouldIncludeProgressReport() { + if interruptStatus.Level == interrupt_handler.InterruptLevelCleanupAndReport { + progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nFirst interrupt received; Ginkgo will run any cleanup and reporting nodes but will skip all remaining specs. {{bold}}Interrupt again to skip cleanup{{/}}.\nHere's a current progress report:", interruptStatus.Message()) + } else if interruptStatus.Level == interrupt_handler.InterruptLevelReportOnly { + progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nSecond interrupt received; Ginkgo will run any reporting nodes but will skip all remaining specs and cleanup nodes. {{bold}}Interrupt again to bail immediately{{/}}.\nHere's a current progress report:", interruptStatus.Message()) + } + suite.emitProgressReport(progressReport) + } + + if gracePeriodChannel == nil { + // we haven't given grace yet... so let's + gracePeriodChannel = time.After(gracePeriod) + } else { + // we've already given grace. time's up. now. + return outcome, failure + } + case <-emitProgressNow: + report := suite.generateProgressReport(false) + report.Message = "{{bold}}Automatically polling progress:{{/}}" + suite.emitProgressReport(report) + if pollProgressInterval > 0 { + progressPoller.Reset(pollProgressInterval) + } + } + } +} + +// TODO: search for usages and consider if reporter.EmitFailure() is necessary +func (suite *Suite) failureForLeafNodeWithMessage(node Node, message string) types.Failure { + return types.Failure{ + Message: message, + Location: node.CodeLocation, + TimelineLocation: suite.generateTimelineLocation(), + FailureNodeContext: types.FailureNodeIsLeafNode, + FailureNodeType: node.NodeType, + FailureNodeLocation: node.CodeLocation, + } +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go b/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go new file mode 100644 index 0000000..2f42b26 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go @@ -0,0 +1,128 @@ +package testingtproxy + +import ( + "fmt" + "io" + "os" + + "github.com/onsi/ginkgo/v2/internal" + "github.com/onsi/ginkgo/v2/types" +) + +type failFunc func(message string, callerSkip ...int) +type skipFunc func(message string, callerSkip ...int) +type cleanupFunc func(args ...interface{}) +type reportFunc func() types.SpecReport + +func New(writer io.Writer, fail failFunc, skip skipFunc, cleanup cleanupFunc, report reportFunc, offset int) *ginkgoTestingTProxy { + return &ginkgoTestingTProxy{ + fail: fail, + offset: offset, + writer: writer, + skip: skip, + cleanup: cleanup, + report: report, + } +} + +type ginkgoTestingTProxy struct { + fail failFunc + skip skipFunc + cleanup cleanupFunc + report reportFunc + offset int + writer io.Writer +} + +func (t *ginkgoTestingTProxy) Cleanup(f func()) { + t.cleanup(f, internal.Offset(1)) +} + +func (t *ginkgoTestingTProxy) Setenv(key, value string) { + originalValue, exists := os.LookupEnv(key) + if exists { + t.cleanup(os.Setenv, key, originalValue, internal.Offset(1)) + } else { + t.cleanup(os.Unsetenv, key, internal.Offset(1)) + } + + err := os.Setenv(key, value) + if err != nil { + t.fail(fmt.Sprintf("Failed to set environment variable: %v", err), 1) + } +} + +func (t *ginkgoTestingTProxy) Error(args ...interface{}) { + t.fail(fmt.Sprintln(args...), t.offset) +} + +func (t *ginkgoTestingTProxy) Errorf(format string, args ...interface{}) { + t.fail(fmt.Sprintf(format, args...), t.offset) +} + +func (t *ginkgoTestingTProxy) Fail() { + t.fail("failed", t.offset) +} + +func (t *ginkgoTestingTProxy) FailNow() { + t.fail("failed", t.offset) +} + +func (t *ginkgoTestingTProxy) Failed() bool { + return t.report().Failed() +} + +func (t *ginkgoTestingTProxy) Fatal(args ...interface{}) { + t.fail(fmt.Sprintln(args...), t.offset) +} + +func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) { + t.fail(fmt.Sprintf(format, args...), t.offset) +} + +func (t *ginkgoTestingTProxy) Helper() { + // No-op +} + +func (t *ginkgoTestingTProxy) Log(args ...interface{}) { + fmt.Fprintln(t.writer, args...) +} + +func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) { + t.Log(fmt.Sprintf(format, args...)) +} + +func (t *ginkgoTestingTProxy) Name() string { + return t.report().FullText() +} + +func (t *ginkgoTestingTProxy) Parallel() { + // No-op +} + +func (t *ginkgoTestingTProxy) Skip(args ...interface{}) { + t.skip(fmt.Sprintln(args...), t.offset) +} + +func (t *ginkgoTestingTProxy) SkipNow() { + t.skip("skip", t.offset) +} + +func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) { + t.skip(fmt.Sprintf(format, args...), t.offset) +} + +func (t *ginkgoTestingTProxy) Skipped() bool { + return t.report().State.Is(types.SpecStateSkipped) +} + +func (t *ginkgoTestingTProxy) TempDir() string { + tmpDir, err := os.MkdirTemp("", "ginkgo") + if err != nil { + t.fail(fmt.Sprintf("Failed to create temporary directory: %v", err), 1) + return "" + } + t.cleanup(os.RemoveAll, tmpDir) + + return tmpDir +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/tree.go b/vendor/github.com/onsi/ginkgo/v2/internal/tree.go new file mode 100644 index 0000000..f9d1eeb --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/tree.go @@ -0,0 +1,77 @@ +package internal + +import "github.com/onsi/ginkgo/v2/types" + +type TreeNode struct { + Node Node + Parent *TreeNode + Children TreeNodes +} + +func (tn *TreeNode) AppendChild(child *TreeNode) { + tn.Children = append(tn.Children, child) + child.Parent = tn +} + +func (tn *TreeNode) AncestorNodeChain() Nodes { + if tn.Parent == nil || tn.Parent.Node.IsZero() { + return Nodes{tn.Node} + } + return append(tn.Parent.AncestorNodeChain(), tn.Node) +} + +type TreeNodes []*TreeNode + +func (tn TreeNodes) Nodes() Nodes { + out := make(Nodes, len(tn)) + for i := range tn { + out[i] = tn[i].Node + } + return out +} + +func (tn TreeNodes) WithID(id uint) *TreeNode { + for i := range tn { + if tn[i].Node.ID == id { + return tn[i] + } + } + + return nil +} + +func GenerateSpecsFromTreeRoot(tree *TreeNode) Specs { + var walkTree func(nestingLevel int, lNodes Nodes, rNodes Nodes, trees TreeNodes) Specs + walkTree = func(nestingLevel int, lNodes Nodes, rNodes Nodes, trees TreeNodes) Specs { + tests := Specs{} + + nodes := make(Nodes, len(trees)) + for i := range trees { + nodes[i] = trees[i].Node + nodes[i].NestingLevel = nestingLevel + } + + for i := range nodes { + if !nodes[i].NodeType.Is(types.NodeTypesForContainerAndIt) { + continue + } + leftNodes, rightNodes := nodes.SplitAround(nodes[i]) + leftNodes = leftNodes.WithoutType(types.NodeTypesForContainerAndIt) + rightNodes = rightNodes.WithoutType(types.NodeTypesForContainerAndIt) + + leftNodes = lNodes.CopyAppend(leftNodes...) + rightNodes = rightNodes.CopyAppend(rNodes...) + + if nodes[i].NodeType.Is(types.NodeTypeIt) { + tests = append(tests, Spec{Nodes: leftNodes.CopyAppend(nodes[i]).CopyAppend(rightNodes...)}) + } else { + treeNode := trees.WithID(nodes[i].ID) + tests = append(tests, walkTree(nestingLevel+1, leftNodes.CopyAppend(nodes[i]), rightNodes, treeNode.Children)...) + } + } + + return tests + } + + return walkTree(0, Nodes{}, Nodes{}, tree.Children) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/writer.go b/vendor/github.com/onsi/ginkgo/v2/internal/writer.go new file mode 100644 index 0000000..28a45b0 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/writer.go @@ -0,0 +1,140 @@ +package internal + +import ( + "bytes" + "fmt" + "io" + "sync" + + "github.com/go-logr/logr" + "github.com/go-logr/logr/funcr" +) + +type WriterMode uint + +const ( + WriterModeStreamAndBuffer WriterMode = iota + WriterModeBufferOnly +) + +type WriterInterface interface { + io.Writer + + Truncate() + Bytes() []byte + Len() int +} + +// Writer implements WriterInterface and GinkgoWriterInterface +type Writer struct { + buffer *bytes.Buffer + outWriter io.Writer + lock *sync.Mutex + mode WriterMode + + streamIndent []byte + indentNext bool + + teeWriters []io.Writer +} + +func NewWriter(outWriter io.Writer) *Writer { + return &Writer{ + buffer: &bytes.Buffer{}, + lock: &sync.Mutex{}, + outWriter: outWriter, + mode: WriterModeStreamAndBuffer, + streamIndent: []byte(" "), + indentNext: true, + } +} + +func (w *Writer) SetMode(mode WriterMode) { + w.lock.Lock() + defer w.lock.Unlock() + w.mode = mode +} + +func (w *Writer) Len() int { + w.lock.Lock() + defer w.lock.Unlock() + return w.buffer.Len() +} + +var newline = []byte("\n") + +func (w *Writer) Write(b []byte) (n int, err error) { + w.lock.Lock() + defer w.lock.Unlock() + + for _, teeWriter := range w.teeWriters { + teeWriter.Write(b) + } + + if w.mode == WriterModeStreamAndBuffer { + line, remaining, found := []byte{}, b, false + for len(remaining) > 0 { + line, remaining, found = bytes.Cut(remaining, newline) + if len(line) > 0 { + if w.indentNext { + w.outWriter.Write(w.streamIndent) + w.indentNext = false + } + w.outWriter.Write(line) + } + if found { + w.outWriter.Write(newline) + w.indentNext = true + } + } + } + return w.buffer.Write(b) +} + +func (w *Writer) Truncate() { + w.lock.Lock() + defer w.lock.Unlock() + w.buffer.Reset() +} + +func (w *Writer) Bytes() []byte { + w.lock.Lock() + defer w.lock.Unlock() + b := w.buffer.Bytes() + copied := make([]byte, len(b)) + copy(copied, b) + return copied +} + +// GinkgoWriterInterface +func (w *Writer) TeeTo(writer io.Writer) { + w.lock.Lock() + defer w.lock.Unlock() + + w.teeWriters = append(w.teeWriters, writer) +} + +func (w *Writer) ClearTeeWriters() { + w.lock.Lock() + defer w.lock.Unlock() + + w.teeWriters = []io.Writer{} +} + +func (w *Writer) Print(a ...interface{}) { + fmt.Fprint(w, a...) +} + +func (w *Writer) Printf(format string, a ...interface{}) { + fmt.Fprintf(w, format, a...) +} + +func (w *Writer) Println(a ...interface{}) { + fmt.Fprintln(w, a...) +} + +func GinkgoLogrFunc(writer *Writer) logr.Logger { + return funcr.New(func(prefix, args string) { + writer.Printf("%s", args) + }, funcr.Options{}) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go new file mode 100644 index 0000000..7a27220 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -0,0 +1,750 @@ +/* +Ginkgo's Default Reporter + +A number of command line flags are available to tweak Ginkgo's default output. + +These are documented [here](http://onsi.github.io/ginkgo/#running_tests) +*/ +package reporters + +import ( + "fmt" + "io" + "runtime" + "strings" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/types" +) + +type DefaultReporter struct { + conf types.ReporterConfig + writer io.Writer + + // managing the emission stream + lastChar string + lastEmissionWasDelimiter bool + + // rendering + specDenoter string + retryDenoter string + formatter formatter.Formatter + + runningInParallel bool +} + +func NewDefaultReporterUnderTest(conf types.ReporterConfig, writer io.Writer) *DefaultReporter { + reporter := NewDefaultReporter(conf, writer) + reporter.formatter = formatter.New(formatter.ColorModePassthrough) + + return reporter +} + +func NewDefaultReporter(conf types.ReporterConfig, writer io.Writer) *DefaultReporter { + reporter := &DefaultReporter{ + conf: conf, + writer: writer, + + lastChar: "\n", + lastEmissionWasDelimiter: false, + + specDenoter: "•", + retryDenoter: "↺", + formatter: formatter.NewWithNoColorBool(conf.NoColor), + } + if runtime.GOOS == "windows" { + reporter.specDenoter = "+" + reporter.retryDenoter = "R" + } + + return reporter +} + +/* The Reporter Interface */ + +func (r *DefaultReporter) SuiteWillBegin(report types.Report) { + if r.conf.Verbosity().Is(types.VerbosityLevelSuccinct) { + r.emit(r.f("[%d] {{bold}}%s{{/}} ", report.SuiteConfig.RandomSeed, report.SuiteDescription)) + if len(report.SuiteLabels) > 0 { + r.emit(r.f("{{coral}}[%s]{{/}} ", strings.Join(report.SuiteLabels, ", "))) + } + r.emit(r.f("- %d/%d specs ", report.PreRunStats.SpecsThatWillRun, report.PreRunStats.TotalSpecs)) + if report.SuiteConfig.ParallelTotal > 1 { + r.emit(r.f("- %d procs ", report.SuiteConfig.ParallelTotal)) + } + } else { + banner := r.f("Running Suite: %s - %s", report.SuiteDescription, report.SuitePath) + r.emitBlock(banner) + bannerWidth := len(banner) + if len(report.SuiteLabels) > 0 { + labels := strings.Join(report.SuiteLabels, ", ") + r.emitBlock(r.f("{{coral}}[%s]{{/}} ", labels)) + if len(labels)+2 > bannerWidth { + bannerWidth = len(labels) + 2 + } + } + r.emitBlock(strings.Repeat("=", bannerWidth)) + + out := r.f("Random Seed: {{bold}}%d{{/}}", report.SuiteConfig.RandomSeed) + if report.SuiteConfig.RandomizeAllSpecs { + out += r.f(" - will randomize all specs") + } + r.emitBlock(out) + r.emit("\n") + r.emitBlock(r.f("Will run {{bold}}%d{{/}} of {{bold}}%d{{/}} specs", report.PreRunStats.SpecsThatWillRun, report.PreRunStats.TotalSpecs)) + if report.SuiteConfig.ParallelTotal > 1 { + r.emitBlock(r.f("Running in parallel across {{bold}}%d{{/}} processes", report.SuiteConfig.ParallelTotal)) + } + } +} + +func (r *DefaultReporter) SuiteDidEnd(report types.Report) { + failures := report.SpecReports.WithState(types.SpecStateFailureStates) + if len(failures) > 0 { + r.emitBlock("\n") + if len(failures) > 1 { + r.emitBlock(r.f("{{red}}{{bold}}Summarizing %d Failures:{{/}}", len(failures))) + } else { + r.emitBlock(r.f("{{red}}{{bold}}Summarizing 1 Failure:{{/}}")) + } + for _, specReport := range failures { + highlightColor, heading := "{{red}}", "[FAIL]" + switch specReport.State { + case types.SpecStatePanicked: + highlightColor, heading = "{{magenta}}", "[PANICKED!]" + case types.SpecStateAborted: + highlightColor, heading = "{{coral}}", "[ABORTED]" + case types.SpecStateTimedout: + highlightColor, heading = "{{orange}}", "[TIMEDOUT]" + case types.SpecStateInterrupted: + highlightColor, heading = "{{orange}}", "[INTERRUPTED]" + } + locationBlock := r.codeLocationBlock(specReport, highlightColor, false, true) + r.emitBlock(r.fi(1, highlightColor+"%s{{/}} %s", heading, locationBlock)) + } + } + + //summarize the suite + if r.conf.Verbosity().Is(types.VerbosityLevelSuccinct) && report.SuiteSucceeded { + r.emit(r.f(" {{green}}SUCCESS!{{/}} %s ", report.RunTime)) + return + } + + r.emitBlock("\n") + color, status := "{{green}}{{bold}}", "SUCCESS!" + if !report.SuiteSucceeded { + color, status = "{{red}}{{bold}}", "FAIL!" + } + + specs := report.SpecReports.WithLeafNodeType(types.NodeTypeIt) //exclude any suite setup nodes + r.emitBlock(r.f(color+"Ran %d of %d Specs in %.3f seconds{{/}}", + specs.CountWithState(types.SpecStatePassed)+specs.CountWithState(types.SpecStateFailureStates), + report.PreRunStats.TotalSpecs, + report.RunTime.Seconds()), + ) + + switch len(report.SpecialSuiteFailureReasons) { + case 0: + r.emit(r.f(color+"%s{{/}} -- ", status)) + case 1: + r.emit(r.f(color+"%s - %s{{/}} -- ", status, report.SpecialSuiteFailureReasons[0])) + default: + r.emitBlock(r.f(color+"%s - %s{{/}}\n", status, strings.Join(report.SpecialSuiteFailureReasons, ", "))) + } + + if len(specs) == 0 && report.SpecReports.WithLeafNodeType(types.NodeTypeBeforeSuite|types.NodeTypeSynchronizedBeforeSuite).CountWithState(types.SpecStateFailureStates) > 0 { + r.emit(r.f("{{cyan}}{{bold}}A BeforeSuite node failed so all tests were skipped.{{/}}\n")) + } else { + r.emit(r.f("{{green}}{{bold}}%d Passed{{/}} | ", specs.CountWithState(types.SpecStatePassed))) + r.emit(r.f("{{red}}{{bold}}%d Failed{{/}} | ", specs.CountWithState(types.SpecStateFailureStates))) + if specs.CountOfFlakedSpecs() > 0 { + r.emit(r.f("{{light-yellow}}{{bold}}%d Flaked{{/}} | ", specs.CountOfFlakedSpecs())) + } + if specs.CountOfRepeatedSpecs() > 0 { + r.emit(r.f("{{light-yellow}}{{bold}}%d Repeated{{/}} | ", specs.CountOfRepeatedSpecs())) + } + r.emit(r.f("{{yellow}}{{bold}}%d Pending{{/}} | ", specs.CountWithState(types.SpecStatePending))) + r.emit(r.f("{{cyan}}{{bold}}%d Skipped{{/}}\n", specs.CountWithState(types.SpecStateSkipped))) + } +} + +func (r *DefaultReporter) WillRun(report types.SpecReport) { + v := r.conf.Verbosity() + if v.LT(types.VerbosityLevelVerbose) || report.State.Is(types.SpecStatePending|types.SpecStateSkipped) || report.RunningInParallel { + return + } + + r.emitDelimiter(0) + r.emitBlock(r.f(r.codeLocationBlock(report, "{{/}}", v.Is(types.VerbosityLevelVeryVerbose), false))) +} + +func (r *DefaultReporter) DidRun(report types.SpecReport) { + v := r.conf.Verbosity() + inParallel := report.RunningInParallel + + header := r.specDenoter + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { + header = fmt.Sprintf("[%s]", report.LeafNodeType) + } + highlightColor := r.highlightColorForState(report.State) + + // have we already been streaming the timeline? + timelineHasBeenStreaming := v.GTE(types.VerbosityLevelVerbose) && !inParallel + + // should we show the timeline? + var timeline types.Timeline + showTimeline := !timelineHasBeenStreaming && (v.GTE(types.VerbosityLevelVerbose) || report.Failed()) + if showTimeline { + timeline = report.Timeline().WithoutHiddenReportEntries() + keepVeryVerboseSpecEvents := v.Is(types.VerbosityLevelVeryVerbose) || + (v.Is(types.VerbosityLevelVerbose) && r.conf.ShowNodeEvents) || + (report.Failed() && r.conf.ShowNodeEvents) + if !keepVeryVerboseSpecEvents { + timeline = timeline.WithoutVeryVerboseSpecEvents() + } + if len(timeline) == 0 && report.CapturedGinkgoWriterOutput == "" { + // the timeline is completely empty - don't show it + showTimeline = false + } + if v.LT(types.VerbosityLevelVeryVerbose) && report.CapturedGinkgoWriterOutput == "" && len(timeline) > 0 { + //if we aren't -vv and the timeline only has a single failure, don't show it as it will appear at the end of the report + failure, isFailure := timeline[0].(types.Failure) + if isFailure && (len(timeline) == 1 || (len(timeline) == 2 && failure.AdditionalFailure != nil)) { + showTimeline = false + } + } + } + + // should we have a separate section for always-visible reports? + showSeparateVisibilityAlwaysReportsSection := !timelineHasBeenStreaming && !showTimeline && report.ReportEntries.HasVisibility(types.ReportEntryVisibilityAlways) + + // should we have a separate section for captured stdout/stderr + showSeparateStdSection := inParallel && (report.CapturedStdOutErr != "") + + // given all that - do we have any actual content to show? or are we a single denoter in a stream? + reportHasContent := v.Is(types.VerbosityLevelVeryVerbose) || showTimeline || showSeparateVisibilityAlwaysReportsSection || showSeparateStdSection || report.Failed() || (v.Is(types.VerbosityLevelVerbose) && !report.State.Is(types.SpecStateSkipped)) + + // should we show a runtime? + includeRuntime := !report.State.Is(types.SpecStateSkipped|types.SpecStatePending) || (report.State.Is(types.SpecStateSkipped) && report.Failure.Message != "") + + // should we show the codelocation block? + showCodeLocation := !timelineHasBeenStreaming || !report.State.Is(types.SpecStatePassed) + + switch report.State { + case types.SpecStatePassed: + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) && !reportHasContent { + return + } + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { + header = fmt.Sprintf("%s PASSED", header) + } + if report.NumAttempts > 1 && report.MaxFlakeAttempts > 1 { + header, reportHasContent = fmt.Sprintf("%s [FLAKEY TEST - TOOK %d ATTEMPTS TO PASS]", r.retryDenoter, report.NumAttempts), true + } + case types.SpecStatePending: + header = "P" + if v.GT(types.VerbosityLevelSuccinct) { + header, reportHasContent = "P [PENDING]", true + } + case types.SpecStateSkipped: + header = "S" + if v.Is(types.VerbosityLevelVeryVerbose) || (v.Is(types.VerbosityLevelVerbose) && report.Failure.Message != "") { + header, reportHasContent = "S [SKIPPED]", true + } + default: + header = fmt.Sprintf("%s [%s]", header, r.humanReadableState(report.State)) + if report.MaxMustPassRepeatedly > 1 { + header = fmt.Sprintf("%s DURING REPETITION #%d", header, report.NumAttempts) + } + } + + // If we have no content to show, jsut emit the header and return + if !reportHasContent { + r.emit(r.f(highlightColor + header + "{{/}}")) + return + } + + if includeRuntime { + header = r.f("%s [%.3f seconds]", header, report.RunTime.Seconds()) + } + + // Emit header + if !timelineHasBeenStreaming { + r.emitDelimiter(0) + } + r.emitBlock(r.f(highlightColor + header + "{{/}}")) + if showCodeLocation { + r.emitBlock(r.codeLocationBlock(report, highlightColor, v.Is(types.VerbosityLevelVeryVerbose), false)) + } + + //Emit Stdout/Stderr Output + if showSeparateStdSection { + r.emitBlock("\n") + r.emitBlock(r.fi(1, "{{gray}}Captured StdOut/StdErr Output >>{{/}}")) + r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr)) + r.emitBlock(r.fi(1, "{{gray}}<< Captured StdOut/StdErr Output{{/}}")) + } + + if showSeparateVisibilityAlwaysReportsSection { + r.emitBlock("\n") + r.emitBlock(r.fi(1, "{{gray}}Report Entries >>{{/}}")) + for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) { + r.emitReportEntry(1, entry) + } + r.emitBlock(r.fi(1, "{{gray}}<< Report Entries{{/}}")) + } + + if showTimeline { + r.emitBlock("\n") + r.emitBlock(r.fi(1, "{{gray}}Timeline >>{{/}}")) + r.emitTimeline(1, report, timeline) + r.emitBlock(r.fi(1, "{{gray}}<< Timeline{{/}}")) + } + + // Emit Failure Message + if !report.Failure.IsZero() && !v.Is(types.VerbosityLevelVeryVerbose) { + r.emitBlock("\n") + r.emitFailure(1, report.State, report.Failure, true) + if len(report.AdditionalFailures) > 0 { + r.emitBlock(r.fi(1, "\nThere were {{bold}}{{red}}additional failures{{/}} detected. To view them in detail run {{bold}}ginkgo -vv{{/}}")) + } + } + + r.emitDelimiter(0) +} + +func (r *DefaultReporter) highlightColorForState(state types.SpecState) string { + switch state { + case types.SpecStatePassed: + return "{{green}}" + case types.SpecStatePending: + return "{{yellow}}" + case types.SpecStateSkipped: + return "{{cyan}}" + case types.SpecStateFailed: + return "{{red}}" + case types.SpecStateTimedout: + return "{{orange}}" + case types.SpecStatePanicked: + return "{{magenta}}" + case types.SpecStateInterrupted: + return "{{orange}}" + case types.SpecStateAborted: + return "{{coral}}" + default: + return "{{gray}}" + } +} + +func (r *DefaultReporter) humanReadableState(state types.SpecState) string { + return strings.ToUpper(state.String()) +} + +func (r *DefaultReporter) emitTimeline(indent uint, report types.SpecReport, timeline types.Timeline) { + isVeryVerbose := r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose) + gw := report.CapturedGinkgoWriterOutput + cursor := 0 + for _, entry := range timeline { + tl := entry.GetTimelineLocation() + if tl.Offset < len(gw) { + r.emit(r.fi(indent, "%s", gw[cursor:tl.Offset])) + cursor = tl.Offset + } else if cursor < len(gw) { + r.emit(r.fi(indent, "%s", gw[cursor:])) + cursor = len(gw) + } + switch x := entry.(type) { + case types.Failure: + if isVeryVerbose { + r.emitFailure(indent, report.State, x, false) + } else { + r.emitShortFailure(indent, report.State, x) + } + case types.AdditionalFailure: + if isVeryVerbose { + r.emitFailure(indent, x.State, x.Failure, true) + } else { + r.emitShortFailure(indent, x.State, x.Failure) + } + case types.ReportEntry: + r.emitReportEntry(indent, x) + case types.ProgressReport: + r.emitProgressReport(indent, false, x) + case types.SpecEvent: + if isVeryVerbose || !x.IsOnlyVisibleAtVeryVerbose() || r.conf.ShowNodeEvents { + r.emitSpecEvent(indent, x, isVeryVerbose) + } + } + } + if cursor < len(gw) { + r.emit(r.fi(indent, "%s", gw[cursor:])) + } +} + +func (r *DefaultReporter) EmitFailure(state types.SpecState, failure types.Failure) { + if r.conf.Verbosity().Is(types.VerbosityLevelVerbose) { + r.emitShortFailure(1, state, failure) + } else if r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose) { + r.emitFailure(1, state, failure, true) + } +} + +func (r *DefaultReporter) emitShortFailure(indent uint, state types.SpecState, failure types.Failure) { + r.emitBlock(r.fi(indent, r.highlightColorForState(state)+"[%s]{{/}} in [%s] - %s {{gray}}@ %s{{/}}", + r.humanReadableState(state), + failure.FailureNodeType, + failure.Location, + failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), + )) +} + +func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failure types.Failure, includeAdditionalFailure bool) { + highlightColor := r.highlightColorForState(state) + r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message)) + r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + if failure.ForwardedPanic != "" { + r.emitBlock("\n") + r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic)) + } + + if r.conf.FullTrace || failure.ForwardedPanic != "" { + r.emitBlock("\n") + r.emitBlock(r.fi(indent, highlightColor+"Full Stack Trace{{/}}")) + r.emitBlock(r.fi(indent+1, "%s", failure.Location.FullStackTrace)) + } + + if !failure.ProgressReport.IsZero() { + r.emitBlock("\n") + r.emitProgressReport(indent, false, failure.ProgressReport) + } + + if failure.AdditionalFailure != nil && includeAdditionalFailure { + r.emitBlock("\n") + r.emitFailure(indent, failure.AdditionalFailure.State, failure.AdditionalFailure.Failure, true) + } +} + +func (r *DefaultReporter) EmitProgressReport(report types.ProgressReport) { + r.emitDelimiter(1) + + if report.RunningInParallel { + r.emit(r.fi(1, "{{coral}}Progress Report for Ginkgo Process #{{bold}}%d{{/}}\n", report.ParallelProcess)) + } + shouldEmitGW := report.RunningInParallel || r.conf.Verbosity().LT(types.VerbosityLevelVerbose) + r.emitProgressReport(1, shouldEmitGW, report) + r.emitDelimiter(1) +} + +func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput bool, report types.ProgressReport) { + if report.Message != "" { + r.emitBlock(r.fi(indent, report.Message+"\n")) + indent += 1 + } + if report.LeafNodeText != "" { + subjectIndent := indent + if len(report.ContainerHierarchyTexts) > 0 { + r.emit(r.fi(indent, r.cycleJoin(report.ContainerHierarchyTexts, " "))) + r.emit(" ") + subjectIndent = 0 + } + r.emit(r.fi(subjectIndent, "{{bold}}{{orange}}%s{{/}} (Spec Runtime: %s)\n", report.LeafNodeText, report.Time().Sub(report.SpecStartTime).Round(time.Millisecond))) + r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.LeafNodeLocation)) + indent += 1 + } + if report.CurrentNodeType != types.NodeTypeInvalid { + r.emit(r.fi(indent, "In {{bold}}{{orange}}[%s]{{/}}", report.CurrentNodeType)) + if report.CurrentNodeText != "" && !report.CurrentNodeType.Is(types.NodeTypeIt) { + r.emit(r.f(" {{bold}}{{orange}}%s{{/}}", report.CurrentNodeText)) + } + + r.emit(r.f(" (Node Runtime: %s)\n", report.Time().Sub(report.CurrentNodeStartTime).Round(time.Millisecond))) + r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentNodeLocation)) + indent += 1 + } + if report.CurrentStepText != "" { + r.emit(r.fi(indent, "At {{bold}}{{orange}}[By Step] %s{{/}} (Step Runtime: %s)\n", report.CurrentStepText, report.Time().Sub(report.CurrentStepStartTime).Round(time.Millisecond))) + r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentStepLocation)) + indent += 1 + } + + if indent > 0 { + indent -= 1 + } + + if emitGinkgoWriterOutput && report.CapturedGinkgoWriterOutput != "" { + r.emit("\n") + r.emitBlock(r.fi(indent, "{{gray}}Begin Captured GinkgoWriter Output >>{{/}}")) + limit, lines := 10, strings.Split(report.CapturedGinkgoWriterOutput, "\n") + if len(lines) <= limit { + r.emitBlock(r.fi(indent+1, "%s", report.CapturedGinkgoWriterOutput)) + } else { + r.emitBlock(r.fi(indent+1, "{{gray}}...{{/}}")) + for _, line := range lines[len(lines)-limit-1:] { + r.emitBlock(r.fi(indent+1, "%s", line)) + } + } + r.emitBlock(r.fi(indent, "{{gray}}<< End Captured GinkgoWriter Output{{/}}")) + } + + if !report.SpecGoroutine().IsZero() { + r.emit("\n") + r.emit(r.fi(indent, "{{bold}}{{underline}}Spec Goroutine{{/}}\n")) + r.emitGoroutines(indent, report.SpecGoroutine()) + } + + if len(report.AdditionalReports) > 0 { + r.emit("\n") + r.emitBlock(r.fi(indent, "{{gray}}Begin Additional Progress Reports >>{{/}}")) + for i, additionalReport := range report.AdditionalReports { + r.emit(r.fi(indent+1, additionalReport)) + if i < len(report.AdditionalReports)-1 { + r.emitBlock(r.fi(indent+1, "{{gray}}%s{{/}}", strings.Repeat("-", 10))) + } + } + r.emitBlock(r.fi(indent, "{{gray}}<< End Additional Progress Reports{{/}}")) + } + + highlightedGoroutines := report.HighlightedGoroutines() + if len(highlightedGoroutines) > 0 { + r.emit("\n") + r.emit(r.fi(indent, "{{bold}}{{underline}}Goroutines of Interest{{/}}\n")) + r.emitGoroutines(indent, highlightedGoroutines...) + } + + otherGoroutines := report.OtherGoroutines() + if len(otherGoroutines) > 0 { + r.emit("\n") + r.emit(r.fi(indent, "{{gray}}{{bold}}{{underline}}Other Goroutines{{/}}\n")) + r.emitGoroutines(indent, otherGoroutines...) + } +} + +func (r *DefaultReporter) EmitReportEntry(entry types.ReportEntry) { + if r.conf.Verbosity().LT(types.VerbosityLevelVerbose) || entry.Visibility == types.ReportEntryVisibilityNever { + return + } + r.emitReportEntry(1, entry) +} + +func (r *DefaultReporter) emitReportEntry(indent uint, entry types.ReportEntry) { + r.emitBlock(r.fi(indent, "{{bold}}"+entry.Name+"{{gray}} - %s @ %s{{/}}", entry.Location, entry.Time.Format(types.GINKGO_TIME_FORMAT))) + if representation := entry.StringRepresentation(); representation != "" { + r.emitBlock(r.fi(indent+1, representation)) + } +} + +func (r *DefaultReporter) EmitSpecEvent(event types.SpecEvent) { + v := r.conf.Verbosity() + if v.Is(types.VerbosityLevelVeryVerbose) || (v.Is(types.VerbosityLevelVerbose) && (r.conf.ShowNodeEvents || !event.IsOnlyVisibleAtVeryVerbose())) { + r.emitSpecEvent(1, event, r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose)) + } +} + +func (r *DefaultReporter) emitSpecEvent(indent uint, event types.SpecEvent, includeLocation bool) { + location := "" + if includeLocation { + location = fmt.Sprintf("- %s ", event.CodeLocation.String()) + } + switch event.SpecEventType { + case types.SpecEventInvalid: + return + case types.SpecEventByStart: + r.emitBlock(r.fi(indent, "{{bold}}STEP:{{/}} %s {{gray}}%s@ %s{{/}}", event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + case types.SpecEventByEnd: + r.emitBlock(r.fi(indent, "{{bold}}END STEP:{{/}} %s {{gray}}%s@ %s (%s){{/}}", event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), event.Duration.Round(time.Millisecond))) + case types.SpecEventNodeStart: + r.emitBlock(r.fi(indent, "> Enter {{bold}}[%s]{{/}} %s {{gray}}%s@ %s{{/}}", event.NodeType.String(), event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + case types.SpecEventNodeEnd: + r.emitBlock(r.fi(indent, "< Exit {{bold}}[%s]{{/}} %s {{gray}}%s@ %s (%s){{/}}", event.NodeType.String(), event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), event.Duration.Round(time.Millisecond))) + case types.SpecEventSpecRepeat: + r.emitBlock(r.fi(indent, "\n{{bold}}Attempt #%d {{green}}Passed{{/}}{{bold}}. Repeating %s{{/}} {{gray}}@ %s{{/}}\n\n", event.Attempt, r.retryDenoter, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + case types.SpecEventSpecRetry: + r.emitBlock(r.fi(indent, "\n{{bold}}Attempt #%d {{red}}Failed{{/}}{{bold}}. Retrying %s{{/}} {{gray}}@ %s{{/}}\n\n", event.Attempt, r.retryDenoter, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + } +} + +func (r *DefaultReporter) emitGoroutines(indent uint, goroutines ...types.Goroutine) { + for idx, g := range goroutines { + color := "{{gray}}" + if g.HasHighlights() { + color = "{{orange}}" + } + r.emit(r.fi(indent, color+"goroutine %d [%s]{{/}}\n", g.ID, g.State)) + for _, fc := range g.Stack { + if fc.Highlight { + r.emit(r.fi(indent, color+"{{bold}}> %s{{/}}\n", fc.Function)) + r.emit(r.fi(indent+2, color+"{{bold}}%s:%d{{/}}\n", fc.Filename, fc.Line)) + r.emitSource(indent+3, fc) + } else { + r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", fc.Function)) + r.emit(r.fi(indent+2, "{{gray}}%s:%d{{/}}\n", fc.Filename, fc.Line)) + } + } + + if idx+1 < len(goroutines) { + r.emit("\n") + } + } +} + +func (r *DefaultReporter) emitSource(indent uint, fc types.FunctionCall) { + lines := fc.Source + if len(lines) == 0 { + return + } + + lTrim := 100000 + for _, line := range lines { + lTrimLine := len(line) - len(strings.TrimLeft(line, " \t")) + if lTrimLine < lTrim && len(line) > 0 { + lTrim = lTrimLine + } + } + if lTrim == 100000 { + lTrim = 0 + } + + for idx, line := range lines { + if len(line) > lTrim { + line = line[lTrim:] + } + if idx == fc.SourceHighlight { + r.emit(r.fi(indent, "{{bold}}{{orange}}> %s{{/}}\n", line)) + } else { + r.emit(r.fi(indent, "| %s\n", line)) + } + } +} + +/* Emitting to the writer */ +func (r *DefaultReporter) emit(s string) { + if len(s) > 0 { + r.lastChar = s[len(s)-1:] + r.lastEmissionWasDelimiter = false + r.writer.Write([]byte(s)) + } +} + +func (r *DefaultReporter) emitBlock(s string) { + if len(s) > 0 { + if r.lastChar != "\n" { + r.emit("\n") + } + r.emit(s) + if r.lastChar != "\n" { + r.emit("\n") + } + } +} + +func (r *DefaultReporter) emitDelimiter(indent uint) { + if r.lastEmissionWasDelimiter { + return + } + r.emitBlock(r.fi(indent, "{{gray}}%s{{/}}", strings.Repeat("-", 30))) + r.lastEmissionWasDelimiter = true +} + +/* Rendering text */ +func (r *DefaultReporter) f(format string, args ...interface{}) string { + return r.formatter.F(format, args...) +} + +func (r *DefaultReporter) fi(indentation uint, format string, args ...interface{}) string { + return r.formatter.Fi(indentation, format, args...) +} + +func (r *DefaultReporter) cycleJoin(elements []string, joiner string) string { + return r.formatter.CycleJoin(elements, joiner, []string{"{{/}}", "{{gray}}"}) +} + +func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightColor string, veryVerbose bool, usePreciseFailureLocation bool) string { + texts, locations, labels := []string{}, []types.CodeLocation{}, [][]string{} + texts, locations, labels = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...) + + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { + texts = append(texts, r.f("[%s] %s", report.LeafNodeType, report.LeafNodeText)) + } else { + texts = append(texts, r.f(report.LeafNodeText)) + } + labels = append(labels, report.LeafNodeLabels) + locations = append(locations, report.LeafNodeLocation) + + failureLocation := report.Failure.FailureNodeLocation + if usePreciseFailureLocation { + failureLocation = report.Failure.Location + } + + highlightIndex := -1 + switch report.Failure.FailureNodeContext { + case types.FailureNodeAtTopLevel: + texts = append([]string{fmt.Sprintf("TOP-LEVEL [%s]", report.Failure.FailureNodeType)}, texts...) + locations = append([]types.CodeLocation{failureLocation}, locations...) + labels = append([][]string{{}}, labels...) + highlightIndex = 0 + case types.FailureNodeInContainer: + i := report.Failure.FailureNodeContainerIndex + texts[i] = fmt.Sprintf("%s [%s]", texts[i], report.Failure.FailureNodeType) + locations[i] = failureLocation + highlightIndex = i + case types.FailureNodeIsLeafNode: + i := len(texts) - 1 + texts[i] = fmt.Sprintf("[%s] %s", report.LeafNodeType, report.LeafNodeText) + locations[i] = failureLocation + highlightIndex = i + default: + //there is no failure, so we highlight the leaf ndoe + highlightIndex = len(texts) - 1 + } + + out := "" + if veryVerbose { + for i := range texts { + if i == highlightIndex { + out += r.fi(uint(i), highlightColor+"{{bold}}%s{{/}}", texts[i]) + } else { + out += r.fi(uint(i), "%s", texts[i]) + } + if len(labels[i]) > 0 { + out += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels[i], ", ")) + } + out += "\n" + out += r.fi(uint(i), "{{gray}}%s{{/}}\n", locations[i]) + } + } else { + for i := range texts { + style := "{{/}}" + if i%2 == 1 { + style = "{{gray}}" + } + if i == highlightIndex { + style = highlightColor + "{{bold}}" + } + out += r.f(style+"%s", texts[i]) + if i < len(texts)-1 { + out += " " + } else { + out += r.f("{{/}}") + } + } + flattenedLabels := report.Labels() + if len(flattenedLabels) > 0 { + out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedLabels, ", ")) + } + out += "\n" + if usePreciseFailureLocation { + out += r.f("{{gray}}%s{{/}}", failureLocation) + } else { + leafLocation := locations[len(locations)-1] + if (report.Failure.FailureNodeLocation != types.CodeLocation{}) && (report.Failure.FailureNodeLocation != leafLocation) { + out += r.fi(1, highlightColor+"[%s]{{/}} {{gray}}%s{{/}}\n", report.Failure.FailureNodeType, report.Failure.FailureNodeLocation) + out += r.fi(1, "{{gray}}[%s] %s{{/}}", report.LeafNodeType, leafLocation) + } else { + out += r.f("{{gray}}%s{{/}}", leafLocation) + } + } + + } + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go new file mode 100644 index 0000000..613072e --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go @@ -0,0 +1,149 @@ +package reporters + +import ( + "github.com/onsi/ginkgo/v2/config" + "github.com/onsi/ginkgo/v2/types" +) + +// Deprecated: DeprecatedReporter was how Ginkgo V1 provided support for CustomReporters +// this has been removed in V2. +// Please read the documentation at: +// https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters +// for Ginkgo's new behavior and for a migration path. +type DeprecatedReporter interface { + SuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) + BeforeSuiteDidRun(setupSummary *types.SetupSummary) + SpecWillRun(specSummary *types.SpecSummary) + SpecDidComplete(specSummary *types.SpecSummary) + AfterSuiteDidRun(setupSummary *types.SetupSummary) + SuiteDidEnd(summary *types.SuiteSummary) +} + +// ReportViaDeprecatedReporter takes a V1 custom reporter and a V2 report and +// calls the custom reporter's methods with appropriately transformed data from the V2 report. +// +// ReportViaDeprecatedReporter should be called in a `ReportAfterSuite()` +// +// Deprecated: ReportViaDeprecatedReporter method exists to help developer bridge between deprecated V1 functionality and the new +// reporting support in V2. It will be removed in a future minor version of Ginkgo. +func ReportViaDeprecatedReporter(reporter DeprecatedReporter, report types.Report) { + conf := config.DeprecatedGinkgoConfigType{ + RandomSeed: report.SuiteConfig.RandomSeed, + RandomizeAllSpecs: report.SuiteConfig.RandomizeAllSpecs, + FocusStrings: report.SuiteConfig.FocusStrings, + SkipStrings: report.SuiteConfig.SkipStrings, + FailOnPending: report.SuiteConfig.FailOnPending, + FailFast: report.SuiteConfig.FailFast, + FlakeAttempts: report.SuiteConfig.FlakeAttempts, + EmitSpecProgress: false, + DryRun: report.SuiteConfig.DryRun, + ParallelNode: report.SuiteConfig.ParallelProcess, + ParallelTotal: report.SuiteConfig.ParallelTotal, + SyncHost: report.SuiteConfig.ParallelHost, + StreamHost: report.SuiteConfig.ParallelHost, + } + + summary := &types.DeprecatedSuiteSummary{ + SuiteDescription: report.SuiteDescription, + SuiteID: report.SuitePath, + + NumberOfSpecsBeforeParallelization: report.PreRunStats.TotalSpecs, + NumberOfTotalSpecs: report.PreRunStats.TotalSpecs, + NumberOfSpecsThatWillBeRun: report.PreRunStats.SpecsThatWillRun, + } + + reporter.SuiteWillBegin(conf, summary) + + for _, spec := range report.SpecReports { + switch spec.LeafNodeType { + case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite: + setupSummary := &types.DeprecatedSetupSummary{ + ComponentType: spec.LeafNodeType, + CodeLocation: spec.LeafNodeLocation, + State: spec.State, + RunTime: spec.RunTime, + Failure: failureFor(spec), + CapturedOutput: spec.CombinedOutput(), + SuiteID: report.SuitePath, + } + reporter.BeforeSuiteDidRun(setupSummary) + case types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite: + setupSummary := &types.DeprecatedSetupSummary{ + ComponentType: spec.LeafNodeType, + CodeLocation: spec.LeafNodeLocation, + State: spec.State, + RunTime: spec.RunTime, + Failure: failureFor(spec), + CapturedOutput: spec.CombinedOutput(), + SuiteID: report.SuitePath, + } + reporter.AfterSuiteDidRun(setupSummary) + case types.NodeTypeIt: + componentTexts, componentCodeLocations := []string{}, []types.CodeLocation{} + componentTexts = append(componentTexts, spec.ContainerHierarchyTexts...) + componentCodeLocations = append(componentCodeLocations, spec.ContainerHierarchyLocations...) + componentTexts = append(componentTexts, spec.LeafNodeText) + componentCodeLocations = append(componentCodeLocations, spec.LeafNodeLocation) + + specSummary := &types.DeprecatedSpecSummary{ + ComponentTexts: componentTexts, + ComponentCodeLocations: componentCodeLocations, + State: spec.State, + RunTime: spec.RunTime, + Failure: failureFor(spec), + NumberOfSamples: spec.NumAttempts, + CapturedOutput: spec.CombinedOutput(), + SuiteID: report.SuitePath, + } + reporter.SpecWillRun(specSummary) + reporter.SpecDidComplete(specSummary) + + switch spec.State { + case types.SpecStatePending: + summary.NumberOfPendingSpecs += 1 + case types.SpecStateSkipped: + summary.NumberOfSkippedSpecs += 1 + case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateInterrupted: + summary.NumberOfFailedSpecs += 1 + case types.SpecStatePassed: + summary.NumberOfPassedSpecs += 1 + if spec.NumAttempts > 1 { + summary.NumberOfFlakedSpecs += 1 + } + } + } + } + + summary.SuiteSucceeded = report.SuiteSucceeded + summary.RunTime = report.RunTime + + reporter.SuiteDidEnd(summary) +} + +func failureFor(spec types.SpecReport) types.DeprecatedSpecFailure { + if spec.Failure.IsZero() { + return types.DeprecatedSpecFailure{} + } + + index := 0 + switch spec.Failure.FailureNodeContext { + case types.FailureNodeInContainer: + index = spec.Failure.FailureNodeContainerIndex + case types.FailureNodeAtTopLevel: + index = -1 + case types.FailureNodeIsLeafNode: + index = len(spec.ContainerHierarchyTexts) - 1 + if spec.LeafNodeText != "" { + index += 1 + } + } + + return types.DeprecatedSpecFailure{ + Message: spec.Failure.Message, + Location: spec.Failure.Location, + ForwardedPanic: spec.Failure.ForwardedPanic, + ComponentIndex: index, + ComponentType: spec.Failure.FailureNodeType, + ComponentCodeLocation: spec.Failure.FailureNodeLocation, + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go new file mode 100644 index 0000000..7f96c45 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go @@ -0,0 +1,60 @@ +package reporters + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/onsi/ginkgo/v2/types" +) + +//GenerateJSONReport produces a JSON-formatted report at the passed in destination +func GenerateJSONReport(report types.Report, destination string) error { + f, err := os.Create(destination) + if err != nil { + return err + } + enc := json.NewEncoder(f) + enc.SetIndent("", " ") + err = enc.Encode([]types.Report{ + report, + }) + if err != nil { + return err + } + return f.Close() +} + +//MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources +//It skips over reports that fail to decode but reports on them via the returned messages []string +func MergeAndCleanupJSONReports(sources []string, destination string) ([]string, error) { + messages := []string{} + allReports := []types.Report{} + for _, source := range sources { + reports := []types.Report{} + data, err := os.ReadFile(source) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error())) + continue + } + err = json.Unmarshal(data, &reports) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error())) + continue + } + os.Remove(source) + allReports = append(allReports, reports...) + } + + f, err := os.Create(destination) + if err != nil { + return messages, err + } + enc := json.NewEncoder(f) + enc.SetIndent("", " ") + err = enc.Encode(allReports) + if err != nil { + return messages, err + } + return messages, f.Close() +} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go new file mode 100644 index 0000000..fb87e24 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go @@ -0,0 +1,359 @@ +/* + +JUnit XML Reporter for Ginkgo + +For usage instructions: http://onsi.github.io/ginkgo/#generating_junit_xml_output + +The schema used for the generated JUnit xml file was adapted from https://llg.cubic.org/docs/junit/ + +*/ + +package reporters + +import ( + "encoding/xml" + "fmt" + "os" + "strings" + + "github.com/onsi/ginkgo/v2/config" + "github.com/onsi/ginkgo/v2/types" +) + +type JunitReportConfig struct { + // Spec States for which no timeline should be emitted for system-err + // set this to types.SpecStatePassed|types.SpecStateSkipped|types.SpecStatePending to only match failing specs + OmitTimelinesForSpecState types.SpecState + + // Enable OmitFailureMessageAttr to prevent failure messages appearing in the "message" attribute of the Failure and Error tags + OmitFailureMessageAttr bool + + //Enable OmitCapturedStdOutErr to prevent captured stdout/stderr appearing in system-out + OmitCapturedStdOutErr bool + + // Enable OmitSpecLabels to prevent labels from appearing in the spec name + OmitSpecLabels bool + + // Enable OmitLeafNodeType to prevent the spec leaf node type from appearing in the spec name + OmitLeafNodeType bool +} + +type JUnitTestSuites struct { + XMLName xml.Name `xml:"testsuites"` + // Tests maps onto the total number of specs in all test suites (this includes any suite nodes such as BeforeSuite) + Tests int `xml:"tests,attr"` + // Disabled maps onto specs that are pending and/or skipped + Disabled int `xml:"disabled,attr"` + // Errors maps onto specs that panicked or were interrupted + Errors int `xml:"errors,attr"` + // Failures maps onto specs that failed + Failures int `xml:"failures,attr"` + // Time is the time in seconds to execute all test suites + Time float64 `xml:"time,attr"` + + //The set of all test suites + TestSuites []JUnitTestSuite `xml:"testsuite"` +} + +type JUnitTestSuite struct { + // Name maps onto the description of the test suite - maps onto Report.SuiteDescription + Name string `xml:"name,attr"` + // Package maps onto the absolute path to the test suite - maps onto Report.SuitePath + Package string `xml:"package,attr"` + // Tests maps onto the total number of specs in the test suite (this includes any suite nodes such as BeforeSuite) + Tests int `xml:"tests,attr"` + // Disabled maps onto specs that are pending + Disabled int `xml:"disabled,attr"` + // Skiped maps onto specs that are skipped + Skipped int `xml:"skipped,attr"` + // Errors maps onto specs that panicked or were interrupted + Errors int `xml:"errors,attr"` + // Failures maps onto specs that failed + Failures int `xml:"failures,attr"` + // Time is the time in seconds to execute all the test suite - maps onto Report.RunTime + Time float64 `xml:"time,attr"` + // Timestamp is the ISO 8601 formatted start-time of the suite - maps onto Report.StartTime + Timestamp string `xml:"timestamp,attr"` + + //Properties captures the information stored in the rest of the Report type (including SuiteConfig) as key-value pairs + Properties JUnitProperties `xml:"properties"` + + //TestCases capture the individual specs + TestCases []JUnitTestCase `xml:"testcase"` +} + +type JUnitProperties struct { + Properties []JUnitProperty `xml:"property"` +} + +func (jup JUnitProperties) WithName(name string) string { + for _, property := range jup.Properties { + if property.Name == name { + return property.Value + } + } + return "" +} + +type JUnitProperty struct { + Name string `xml:"name,attr"` + Value string `xml:"value,attr"` +} + +type JUnitTestCase struct { + // Name maps onto the full text of the spec - equivalent to "[SpecReport.LeafNodeType] SpecReport.FullText()" + Name string `xml:"name,attr"` + // Classname maps onto the name of the test suite - equivalent to Report.SuiteDescription + Classname string `xml:"classname,attr"` + // Status maps onto the string representation of SpecReport.State + Status string `xml:"status,attr"` + // Time is the time in seconds to execute the spec - maps onto SpecReport.RunTime + Time float64 `xml:"time,attr"` + //Skipped is populated with a message if the test was skipped or pending + Skipped *JUnitSkipped `xml:"skipped,omitempty"` + //Error is populated if the test panicked or was interrupted + Error *JUnitError `xml:"error,omitempty"` + //Failure is populated if the test failed + Failure *JUnitFailure `xml:"failure,omitempty"` + //SystemOut maps onto any captured stdout/stderr output - maps onto SpecReport.CapturedStdOutErr + SystemOut string `xml:"system-out,omitempty"` + //SystemOut maps onto any captured GinkgoWriter output - maps onto SpecReport.CapturedGinkgoWriterOutput + SystemErr string `xml:"system-err,omitempty"` +} + +type JUnitSkipped struct { + // Message maps onto "pending" if the test was marked pending, "skipped" if the test was marked skipped, and "skipped - REASON" if the user called Skip(REASON) + Message string `xml:"message,attr"` +} + +type JUnitError struct { + //Message maps onto the panic/exception thrown - equivalent to SpecReport.Failure.ForwardedPanic - or to "interrupted" + Message string `xml:"message,attr"` + //Type is one of "panicked" or "interrupted" + Type string `xml:"type,attr"` + //Description maps onto the captured stack trace for a panic, or the failure message for an interrupt which will include the dump of running goroutines + Description string `xml:",chardata"` +} + +type JUnitFailure struct { + //Message maps onto the failure message - equivalent to SpecReport.Failure.Message + Message string `xml:"message,attr"` + //Type is "failed" + Type string `xml:"type,attr"` + //Description maps onto the location and stack trace of the failure + Description string `xml:",chardata"` +} + +func GenerateJUnitReport(report types.Report, dst string) error { + return GenerateJUnitReportWithConfig(report, dst, JunitReportConfig{}) +} + +func GenerateJUnitReportWithConfig(report types.Report, dst string, config JunitReportConfig) error { + suite := JUnitTestSuite{ + Name: report.SuiteDescription, + Package: report.SuitePath, + Time: report.RunTime.Seconds(), + Timestamp: report.StartTime.Format("2006-01-02T15:04:05"), + Properties: JUnitProperties{ + Properties: []JUnitProperty{ + {"SuiteSucceeded", fmt.Sprintf("%t", report.SuiteSucceeded)}, + {"SuiteHasProgrammaticFocus", fmt.Sprintf("%t", report.SuiteHasProgrammaticFocus)}, + {"SpecialSuiteFailureReason", strings.Join(report.SpecialSuiteFailureReasons, ",")}, + {"SuiteLabels", fmt.Sprintf("[%s]", strings.Join(report.SuiteLabels, ","))}, + {"RandomSeed", fmt.Sprintf("%d", report.SuiteConfig.RandomSeed)}, + {"RandomizeAllSpecs", fmt.Sprintf("%t", report.SuiteConfig.RandomizeAllSpecs)}, + {"LabelFilter", report.SuiteConfig.LabelFilter}, + {"FocusStrings", strings.Join(report.SuiteConfig.FocusStrings, ",")}, + {"SkipStrings", strings.Join(report.SuiteConfig.SkipStrings, ",")}, + {"FocusFiles", strings.Join(report.SuiteConfig.FocusFiles, ";")}, + {"SkipFiles", strings.Join(report.SuiteConfig.SkipFiles, ";")}, + {"FailOnPending", fmt.Sprintf("%t", report.SuiteConfig.FailOnPending)}, + {"FailFast", fmt.Sprintf("%t", report.SuiteConfig.FailFast)}, + {"FlakeAttempts", fmt.Sprintf("%d", report.SuiteConfig.FlakeAttempts)}, + {"DryRun", fmt.Sprintf("%t", report.SuiteConfig.DryRun)}, + {"ParallelTotal", fmt.Sprintf("%d", report.SuiteConfig.ParallelTotal)}, + {"OutputInterceptorMode", report.SuiteConfig.OutputInterceptorMode}, + }, + }, + } + for _, spec := range report.SpecReports { + name := fmt.Sprintf("[%s]", spec.LeafNodeType) + if config.OmitLeafNodeType { + name = "" + } + if spec.FullText() != "" { + name = name + " " + spec.FullText() + } + labels := spec.Labels() + if len(labels) > 0 && !config.OmitSpecLabels { + name = name + " [" + strings.Join(labels, ", ") + "]" + } + name = strings.TrimSpace(name) + + test := JUnitTestCase{ + Name: name, + Classname: report.SuiteDescription, + Status: spec.State.String(), + Time: spec.RunTime.Seconds(), + } + if !spec.State.Is(config.OmitTimelinesForSpecState) { + test.SystemErr = systemErrForUnstructuredReporters(spec) + } + if !config.OmitCapturedStdOutErr { + test.SystemOut = systemOutForUnstructuredReporters(spec) + } + suite.Tests += 1 + + switch spec.State { + case types.SpecStateSkipped: + message := "skipped" + if spec.Failure.Message != "" { + message += " - " + spec.Failure.Message + } + test.Skipped = &JUnitSkipped{Message: message} + suite.Skipped += 1 + case types.SpecStatePending: + test.Skipped = &JUnitSkipped{Message: "pending"} + suite.Disabled += 1 + case types.SpecStateFailed: + test.Failure = &JUnitFailure{ + Message: spec.Failure.Message, + Type: "failed", + Description: failureDescriptionForUnstructuredReporters(spec), + } + if config.OmitFailureMessageAttr { + test.Failure.Message = "" + } + suite.Failures += 1 + case types.SpecStateTimedout: + test.Failure = &JUnitFailure{ + Message: spec.Failure.Message, + Type: "timedout", + Description: failureDescriptionForUnstructuredReporters(spec), + } + if config.OmitFailureMessageAttr { + test.Failure.Message = "" + } + suite.Failures += 1 + case types.SpecStateInterrupted: + test.Error = &JUnitError{ + Message: spec.Failure.Message, + Type: "interrupted", + Description: failureDescriptionForUnstructuredReporters(spec), + } + if config.OmitFailureMessageAttr { + test.Error.Message = "" + } + suite.Errors += 1 + case types.SpecStateAborted: + test.Failure = &JUnitFailure{ + Message: spec.Failure.Message, + Type: "aborted", + Description: failureDescriptionForUnstructuredReporters(spec), + } + if config.OmitFailureMessageAttr { + test.Failure.Message = "" + } + suite.Errors += 1 + case types.SpecStatePanicked: + test.Error = &JUnitError{ + Message: spec.Failure.ForwardedPanic, + Type: "panicked", + Description: failureDescriptionForUnstructuredReporters(spec), + } + if config.OmitFailureMessageAttr { + test.Error.Message = "" + } + suite.Errors += 1 + } + + suite.TestCases = append(suite.TestCases, test) + } + + junitReport := JUnitTestSuites{ + Tests: suite.Tests, + Disabled: suite.Disabled + suite.Skipped, + Errors: suite.Errors, + Failures: suite.Failures, + Time: suite.Time, + TestSuites: []JUnitTestSuite{suite}, + } + + f, err := os.Create(dst) + if err != nil { + return err + } + f.WriteString(xml.Header) + encoder := xml.NewEncoder(f) + encoder.Indent(" ", " ") + encoder.Encode(junitReport) + + return f.Close() +} + +func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error) { + messages := []string{} + mergedReport := JUnitTestSuites{} + for _, source := range sources { + report := JUnitTestSuites{} + f, err := os.Open(source) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error())) + continue + } + err = xml.NewDecoder(f).Decode(&report) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error())) + continue + } + os.Remove(source) + + mergedReport.Tests += report.Tests + mergedReport.Disabled += report.Disabled + mergedReport.Errors += report.Errors + mergedReport.Failures += report.Failures + mergedReport.Time += report.Time + mergedReport.TestSuites = append(mergedReport.TestSuites, report.TestSuites...) + } + + f, err := os.Create(dst) + if err != nil { + return messages, err + } + f.WriteString(xml.Header) + encoder := xml.NewEncoder(f) + encoder.Indent(" ", " ") + encoder.Encode(mergedReport) + + return messages, f.Close() +} + +func failureDescriptionForUnstructuredReporters(spec types.SpecReport) string { + out := &strings.Builder{} + NewDefaultReporter(types.ReporterConfig{NoColor: true, VeryVerbose: true}, out).emitFailure(0, spec.State, spec.Failure, true) + if len(spec.AdditionalFailures) > 0 { + out.WriteString("\nThere were additional failures detected after the initial failure. These are visible in the timeline\n") + } + return out.String() +} + +func systemErrForUnstructuredReporters(spec types.SpecReport) string { + out := &strings.Builder{} + NewDefaultReporter(types.ReporterConfig{NoColor: true, VeryVerbose: true}, out).emitTimeline(0, spec, spec.Timeline()) + return out.String() +} + +func systemOutForUnstructuredReporters(spec types.SpecReport) string { + return spec.CapturedStdOutErr +} + +// Deprecated JUnitReporter (so folks can still compile their suites) +type JUnitReporter struct{} + +func NewJUnitReporter(_ string) *JUnitReporter { return &JUnitReporter{} } +func (reporter *JUnitReporter) SuiteWillBegin(_ config.GinkgoConfigType, _ *types.SuiteSummary) {} +func (reporter *JUnitReporter) BeforeSuiteDidRun(_ *types.SetupSummary) {} +func (reporter *JUnitReporter) SpecWillRun(_ *types.SpecSummary) {} +func (reporter *JUnitReporter) SpecDidComplete(_ *types.SpecSummary) {} +func (reporter *JUnitReporter) AfterSuiteDidRun(_ *types.SetupSummary) {} +func (reporter *JUnitReporter) SuiteDidEnd(_ *types.SuiteSummary) {} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go new file mode 100644 index 0000000..5e726c4 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go @@ -0,0 +1,29 @@ +package reporters + +import ( + "github.com/onsi/ginkgo/v2/types" +) + +type Reporter interface { + SuiteWillBegin(report types.Report) + WillRun(report types.SpecReport) + DidRun(report types.SpecReport) + SuiteDidEnd(report types.Report) + + //Timeline emission + EmitFailure(state types.SpecState, failure types.Failure) + EmitProgressReport(progressReport types.ProgressReport) + EmitReportEntry(entry types.ReportEntry) + EmitSpecEvent(event types.SpecEvent) +} + +type NoopReporter struct{} + +func (n NoopReporter) SuiteWillBegin(report types.Report) {} +func (n NoopReporter) WillRun(report types.SpecReport) {} +func (n NoopReporter) DidRun(report types.SpecReport) {} +func (n NoopReporter) SuiteDidEnd(report types.Report) {} +func (n NoopReporter) EmitFailure(state types.SpecState, failure types.Failure) {} +func (n NoopReporter) EmitProgressReport(progressReport types.ProgressReport) {} +func (n NoopReporter) EmitReportEntry(entry types.ReportEntry) {} +func (n NoopReporter) EmitSpecEvent(event types.SpecEvent) {} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go new file mode 100644 index 0000000..c186349 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go @@ -0,0 +1,101 @@ +/* + +TeamCity Reporter for Ginkgo + +Makes use of TeamCity's support for Service Messages +http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-ReportingTests +*/ + +package reporters + +import ( + "fmt" + "os" + "strings" + + "github.com/onsi/ginkgo/v2/types" +) + +func tcEscape(s string) string { + s = strings.ReplaceAll(s, "|", "||") + s = strings.ReplaceAll(s, "'", "|'") + s = strings.ReplaceAll(s, "\n", "|n") + s = strings.ReplaceAll(s, "\r", "|r") + s = strings.ReplaceAll(s, "[", "|[") + s = strings.ReplaceAll(s, "]", "|]") + return s +} + +func GenerateTeamcityReport(report types.Report, dst string) error { + f, err := os.Create(dst) + if err != nil { + return err + } + + name := report.SuiteDescription + labels := report.SuiteLabels + if len(labels) > 0 { + name = name + " [" + strings.Join(labels, ", ") + "]" + } + fmt.Fprintf(f, "##teamcity[testSuiteStarted name='%s']\n", tcEscape(name)) + for _, spec := range report.SpecReports { + name := fmt.Sprintf("[%s]", spec.LeafNodeType) + if spec.FullText() != "" { + name = name + " " + spec.FullText() + } + labels := spec.Labels() + if len(labels) > 0 { + name = name + " [" + strings.Join(labels, ", ") + "]" + } + + name = tcEscape(name) + fmt.Fprintf(f, "##teamcity[testStarted name='%s']\n", name) + switch spec.State { + case types.SpecStatePending: + fmt.Fprintf(f, "##teamcity[testIgnored name='%s' message='pending']\n", name) + case types.SpecStateSkipped: + message := "skipped" + if spec.Failure.Message != "" { + message += " - " + spec.Failure.Message + } + fmt.Fprintf(f, "##teamcity[testIgnored name='%s' message='%s']\n", name, tcEscape(message)) + case types.SpecStateFailed: + details := failureDescriptionForUnstructuredReporters(spec) + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='failed - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) + case types.SpecStatePanicked: + details := failureDescriptionForUnstructuredReporters(spec) + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='panicked - %s' details='%s']\n", name, tcEscape(spec.Failure.ForwardedPanic), tcEscape(details)) + case types.SpecStateTimedout: + details := failureDescriptionForUnstructuredReporters(spec) + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='timedout - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) + case types.SpecStateInterrupted: + details := failureDescriptionForUnstructuredReporters(spec) + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='interrupted - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) + case types.SpecStateAborted: + details := failureDescriptionForUnstructuredReporters(spec) + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='aborted - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) + } + + fmt.Fprintf(f, "##teamcity[testStdOut name='%s' out='%s']\n", name, tcEscape(systemOutForUnstructuredReporters(spec))) + fmt.Fprintf(f, "##teamcity[testStdErr name='%s' out='%s']\n", name, tcEscape(systemErrForUnstructuredReporters(spec))) + fmt.Fprintf(f, "##teamcity[testFinished name='%s' duration='%d']\n", name, int(spec.RunTime.Seconds()*1000.0)) + } + fmt.Fprintf(f, "##teamcity[testSuiteFinished name='%s']\n", tcEscape(report.SuiteDescription)) + + return f.Close() +} + +func MergeAndCleanupTeamcityReports(sources []string, dst string) ([]string, error) { + messages := []string{} + merged := []byte{} + for _, source := range sources { + data, err := os.ReadFile(source) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error())) + continue + } + os.Remove(source) + merged = append(merged, data...) + } + return messages, os.WriteFile(dst, merged, 0666) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go new file mode 100644 index 0000000..f33786a --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go @@ -0,0 +1,182 @@ +package ginkgo + +import ( + "fmt" + "strings" + + "github.com/onsi/ginkgo/v2/internal" + "github.com/onsi/ginkgo/v2/internal/global" + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +/* +Report represents the report for a Suite. +It is documented here: https://pkg.go.dev/github.com/onsi/ginkgo/v2/types#Report +*/ +type Report = types.Report + +/* +Report represents the report for a Spec. +It is documented here: https://pkg.go.dev/github.com/onsi/ginkgo/v2/types#SpecReport +*/ +type SpecReport = types.SpecReport + +/* +CurrentSpecReport returns information about the current running spec. +The returned object is a types.SpecReport which includes helper methods +to make extracting information about the spec easier. + +You can learn more about SpecReport here: https://pkg.go.dev/github.com/onsi/ginkgo/types#SpecReport +You can learn more about CurrentSpecReport() here: https://onsi.github.io/ginkgo/#getting-a-report-for-the-current-spec +*/ +func CurrentSpecReport() SpecReport { + return global.Suite.CurrentSpecReport() +} + +/* + ReportEntryVisibility governs the visibility of ReportEntries in Ginkgo's console reporter + +- ReportEntryVisibilityAlways: the default behavior - the ReportEntry is always emitted. +- ReportEntryVisibilityFailureOrVerbose: the ReportEntry is only emitted if the spec fails or if the tests are run with -v (similar to GinkgoWriters behavior). +- ReportEntryVisibilityNever: the ReportEntry is never emitted though it appears in any generated machine-readable reports (e.g. by setting `--json-report`). + +You can learn more about Report Entries here: https://onsi.github.io/ginkgo/#attaching-data-to-reports +*/ +type ReportEntryVisibility = types.ReportEntryVisibility + +const ReportEntryVisibilityAlways, ReportEntryVisibilityFailureOrVerbose, ReportEntryVisibilityNever = types.ReportEntryVisibilityAlways, types.ReportEntryVisibilityFailureOrVerbose, types.ReportEntryVisibilityNever + +/* +AddReportEntry generates and adds a new ReportEntry to the current spec's SpecReport. +It can take any of the following arguments: + - A single arbitrary object to attach as the Value of the ReportEntry. This object will be included in any generated reports and will be emitted to the console when the report is emitted. + - A ReportEntryVisibility enum to control the visibility of the ReportEntry + - An Offset or CodeLocation decoration to control the reported location of the ReportEntry + +If the Value object implements `fmt.Stringer`, it's `String()` representation is used when emitting to the console. + +AddReportEntry() must be called within a Subject or Setup node - not in a Container node. + +You can learn more about Report Entries here: https://onsi.github.io/ginkgo/#attaching-data-to-reports +*/ +func AddReportEntry(name string, args ...interface{}) { + cl := types.NewCodeLocation(1) + reportEntry, err := internal.NewReportEntry(name, cl, args...) + if err != nil { + Fail(fmt.Sprintf("Failed to generate Report Entry:\n%s", err.Error()), 1) + } + err = global.Suite.AddReportEntry(reportEntry) + if err != nil { + Fail(fmt.Sprintf("Failed to add Report Entry:\n%s", err.Error()), 1) + } +} + +/* +ReportBeforeEach nodes are run for each spec, even if the spec is skipped or pending. ReportBeforeEach nodes take a function that +receives a SpecReport. They are called before the spec starts. + +You cannot nest any other Ginkgo nodes within a ReportBeforeEach node's closure. +You can learn more about ReportBeforeEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically +*/ +func ReportBeforeEach(body func(SpecReport), args ...interface{}) bool { + combinedArgs := []interface{}{body} + combinedArgs = append(combinedArgs, args...) + + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeEach, "", combinedArgs...)) +} + +/* +ReportAfterEach nodes are run for each spec, even if the spec is skipped or pending. ReportAfterEach nodes take a function that +receives a SpecReport. They are called after the spec has completed and receive the final report for the spec. + +You cannot nest any other Ginkgo nodes within a ReportAfterEach node's closure. +You can learn more about ReportAfterEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically +*/ +func ReportAfterEach(body func(SpecReport), args ...interface{}) bool { + combinedArgs := []interface{}{body} + combinedArgs = append(combinedArgs, args...) + + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterEach, "", combinedArgs...)) +} + +/* +ReportBeforeSuite nodes are run at the beginning of the suite. ReportBeforeSuite nodes take a function that receives a suite Report. + +They are called at the beginning of the suite, before any specs have run and any BeforeSuite or SynchronizedBeforeSuite nodes, and are passed in the initial report for the suite. +ReportBeforeSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node) + +# When running in parallel, Ginkgo ensures that only one of the parallel nodes runs the ReportBeforeSuite + +You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure. +You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically + +You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports +*/ +func ReportBeforeSuite(body func(Report), args ...interface{}) bool { + combinedArgs := []interface{}{body} + combinedArgs = append(combinedArgs, args...) + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeSuite, "", combinedArgs...)) +} + +/* +ReportAfterSuite nodes are run at the end of the suite. ReportAfterSuite nodes take a function that receives a suite Report. + +They are called at the end of the suite, after all specs have run and any AfterSuite or SynchronizedAfterSuite nodes, and are passed in the final report for the suite. +ReportAftersuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node) + +When running in parallel, Ginkgo ensures that only one of the parallel nodes runs the ReportAfterSuite and that it is passed a report that is aggregated across +all parallel nodes + +In addition to using ReportAfterSuite to programmatically generate suite reports, you can also generate JSON, JUnit, and Teamcity formatted reports using the --json-report, --junit-report, and --teamcity-report ginkgo CLI flags. + +You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure. +You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically + +You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports +*/ +func ReportAfterSuite(text string, body func(Report), args ...interface{}) bool { + combinedArgs := []interface{}{body} + combinedArgs = append(combinedArgs, args...) + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterSuite, text, combinedArgs...)) +} + +func registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig types.ReporterConfig) { + body := func(report Report) { + if reporterConfig.JSONReport != "" { + err := reporters.GenerateJSONReport(report, reporterConfig.JSONReport) + if err != nil { + Fail(fmt.Sprintf("Failed to generate JSON report:\n%s", err.Error())) + } + } + if reporterConfig.JUnitReport != "" { + err := reporters.GenerateJUnitReport(report, reporterConfig.JUnitReport) + if err != nil { + Fail(fmt.Sprintf("Failed to generate JUnit report:\n%s", err.Error())) + } + } + if reporterConfig.TeamcityReport != "" { + err := reporters.GenerateTeamcityReport(report, reporterConfig.TeamcityReport) + if err != nil { + Fail(fmt.Sprintf("Failed to generate Teamcity report:\n%s", err.Error())) + } + } + } + + flags := []string{} + if reporterConfig.JSONReport != "" { + flags = append(flags, "--json-report") + } + if reporterConfig.JUnitReport != "" { + flags = append(flags, "--junit-report") + } + if reporterConfig.TeamcityReport != "" { + flags = append(flags, "--teamcity-report") + } + pushNode(internal.NewNode( + deprecationTracker, types.NodeTypeReportAfterSuite, + fmt.Sprintf("Autogenerated ReportAfterSuite for %s", strings.Join(flags, " ")), + body, + types.NewCustomCodeLocation("autogenerated by Ginkgo"), + )) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go new file mode 100644 index 0000000..6836744 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go @@ -0,0 +1,302 @@ +package ginkgo + +import ( + "context" + "fmt" + "reflect" + "strings" + + "github.com/onsi/ginkgo/v2/internal" + "github.com/onsi/ginkgo/v2/types" +) + +/* +The EntryDescription decorator allows you to pass a format string to DescribeTable() and Entry(). This format string is used to generate entry names via: + + fmt.Sprintf(formatString, parameters...) + +where parameters are the parameters passed into the entry. + +When passed into an Entry the EntryDescription is used to generate the name or that entry. When passed to DescribeTable, the EntryDescription is used to generate the names for any entries that have `nil` descriptions. + +You can learn more about generating EntryDescriptions here: https://onsi.github.io/ginkgo/#generating-entry-descriptions +*/ +type EntryDescription string + +func (ed EntryDescription) render(args ...interface{}) string { + return fmt.Sprintf(string(ed), args...) +} + +/* +DescribeTable describes a table-driven spec. + +For example: + + DescribeTable("a simple table", + func(x int, y int, expected bool) { + Ω(x > y).Should(Equal(expected)) + }, + Entry("x > y", 1, 0, true), + Entry("x == y", 0, 0, false), + Entry("x < y", 0, 1, false), + ) + +You can learn more about DescribeTable here: https://onsi.github.io/ginkgo/#table-specs +And can explore some Table patterns here: https://onsi.github.io/ginkgo/#table-specs-patterns +*/ +func DescribeTable(description string, args ...interface{}) bool { + generateTable(description, args...) + return true +} + +/* +You can focus a table with `FDescribeTable`. This is equivalent to `FDescribe`. +*/ +func FDescribeTable(description string, args ...interface{}) bool { + args = append(args, internal.Focus) + generateTable(description, args...) + return true +} + +/* +You can mark a table as pending with `PDescribeTable`. This is equivalent to `PDescribe`. +*/ +func PDescribeTable(description string, args ...interface{}) bool { + args = append(args, internal.Pending) + generateTable(description, args...) + return true +} + +/* +You can mark a table as pending with `XDescribeTable`. This is equivalent to `XDescribe`. +*/ +var XDescribeTable = PDescribeTable + +/* +TableEntry represents an entry in a table test. You generally use the `Entry` constructor. +*/ +type TableEntry struct { + description interface{} + decorations []interface{} + parameters []interface{} + codeLocation types.CodeLocation +} + +/* +Entry constructs a TableEntry. + +The first argument is a description. This can be a string, a function that accepts the parameters passed to the TableEntry and returns a string, an EntryDescription format string, or nil. If nil is provided then the name of the Entry is derived using the table-level entry description. +Subsequent arguments accept any Ginkgo decorators. These are filtered out and the remaining arguments are passed into the Spec function associated with the table. + +Each Entry ends up generating an individual Ginkgo It. The body of the it is the Table Body function with the Entry parameters passed in. + +If you want to generate interruptible specs simply write a Table function that accepts a SpecContext as its first argument. You can then decorate individual Entrys with the NodeTimeout and SpecTimeout decorators. + +You can learn more about Entry here: https://onsi.github.io/ginkgo/#table-specs +*/ +func Entry(description interface{}, args ...interface{}) TableEntry { + decorations, parameters := internal.PartitionDecorations(args...) + return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(1)} +} + +/* +You can focus a particular entry with FEntry. This is equivalent to FIt. +*/ +func FEntry(description interface{}, args ...interface{}) TableEntry { + decorations, parameters := internal.PartitionDecorations(args...) + decorations = append(decorations, internal.Focus) + return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(1)} +} + +/* +You can mark a particular entry as pending with PEntry. This is equivalent to PIt. +*/ +func PEntry(description interface{}, args ...interface{}) TableEntry { + decorations, parameters := internal.PartitionDecorations(args...) + decorations = append(decorations, internal.Pending) + return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(1)} +} + +/* +You can mark a particular entry as pending with XEntry. This is equivalent to XIt. +*/ +var XEntry = PEntry + +var contextType = reflect.TypeOf(new(context.Context)).Elem() +var specContextType = reflect.TypeOf(new(SpecContext)).Elem() + +func generateTable(description string, args ...interface{}) { + cl := types.NewCodeLocation(2) + containerNodeArgs := []interface{}{cl} + + entries := []TableEntry{} + var itBody interface{} + var itBodyType reflect.Type + + var tableLevelEntryDescription interface{} + tableLevelEntryDescription = func(args ...interface{}) string { + out := []string{} + for _, arg := range args { + out = append(out, fmt.Sprint(arg)) + } + return "Entry: " + strings.Join(out, ", ") + } + + if len(args) == 1 { + exitIfErr(types.GinkgoErrors.MissingParametersForTableFunction(cl)) + } + + for i, arg := range args { + switch t := reflect.TypeOf(arg); { + case t == nil: + exitIfErr(types.GinkgoErrors.IncorrectParameterTypeForTable(i, "nil", cl)) + case t == reflect.TypeOf(TableEntry{}): + entries = append(entries, arg.(TableEntry)) + case t == reflect.TypeOf([]TableEntry{}): + entries = append(entries, arg.([]TableEntry)...) + case t == reflect.TypeOf(EntryDescription("")): + tableLevelEntryDescription = arg.(EntryDescription).render + case t.Kind() == reflect.Func && t.NumOut() == 1 && t.Out(0) == reflect.TypeOf(""): + tableLevelEntryDescription = arg + case t.Kind() == reflect.Func: + if itBody != nil { + exitIfErr(types.GinkgoErrors.MultipleEntryBodyFunctionsForTable(cl)) + } + itBody = arg + itBodyType = reflect.TypeOf(itBody) + default: + containerNodeArgs = append(containerNodeArgs, arg) + } + } + + containerNodeArgs = append(containerNodeArgs, func() { + for _, entry := range entries { + var err error + entry := entry + var description string + switch t := reflect.TypeOf(entry.description); { + case t == nil: + err = validateParameters(tableLevelEntryDescription, entry.parameters, "Entry Description function", entry.codeLocation, false) + if err == nil { + description = invokeFunction(tableLevelEntryDescription, entry.parameters)[0].String() + } + case t == reflect.TypeOf(EntryDescription("")): + description = entry.description.(EntryDescription).render(entry.parameters...) + case t == reflect.TypeOf(""): + description = entry.description.(string) + case t.Kind() == reflect.Func && t.NumOut() == 1 && t.Out(0) == reflect.TypeOf(""): + err = validateParameters(entry.description, entry.parameters, "Entry Description function", entry.codeLocation, false) + if err == nil { + description = invokeFunction(entry.description, entry.parameters)[0].String() + } + default: + err = types.GinkgoErrors.InvalidEntryDescription(entry.codeLocation) + } + + itNodeArgs := []interface{}{entry.codeLocation} + itNodeArgs = append(itNodeArgs, entry.decorations...) + + hasContext := false + if itBodyType.NumIn() > 0. { + if itBodyType.In(0).Implements(specContextType) { + hasContext = true + } else if itBodyType.In(0).Implements(contextType) && (len(entry.parameters) == 0 || !reflect.TypeOf(entry.parameters[0]).Implements(contextType)) { + hasContext = true + } + } + + if err == nil { + err = validateParameters(itBody, entry.parameters, "Table Body function", entry.codeLocation, hasContext) + } + + if hasContext { + itNodeArgs = append(itNodeArgs, func(c SpecContext) { + if err != nil { + panic(err) + } + invokeFunction(itBody, append([]interface{}{c}, entry.parameters...)) + }) + } else { + itNodeArgs = append(itNodeArgs, func() { + if err != nil { + panic(err) + } + invokeFunction(itBody, entry.parameters) + }) + } + + pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, description, itNodeArgs...)) + } + }) + + pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, description, containerNodeArgs...)) +} + +func invokeFunction(function interface{}, parameters []interface{}) []reflect.Value { + inValues := make([]reflect.Value, len(parameters)) + + funcType := reflect.TypeOf(function) + limit := funcType.NumIn() + if funcType.IsVariadic() { + limit = limit - 1 + } + + for i := 0; i < limit && i < len(parameters); i++ { + inValues[i] = computeValue(parameters[i], funcType.In(i)) + } + + if funcType.IsVariadic() { + variadicType := funcType.In(limit).Elem() + for i := limit; i < len(parameters); i++ { + inValues[i] = computeValue(parameters[i], variadicType) + } + } + + return reflect.ValueOf(function).Call(inValues) +} + +func validateParameters(function interface{}, parameters []interface{}, kind string, cl types.CodeLocation, hasContext bool) error { + funcType := reflect.TypeOf(function) + limit := funcType.NumIn() + offset := 0 + if hasContext { + limit = limit - 1 + offset = 1 + } + if funcType.IsVariadic() { + limit = limit - 1 + } + if len(parameters) < limit { + return types.GinkgoErrors.TooFewParametersToTableFunction(limit, len(parameters), kind, cl) + } + if len(parameters) > limit && !funcType.IsVariadic() { + return types.GinkgoErrors.TooManyParametersToTableFunction(limit, len(parameters), kind, cl) + } + var i = 0 + for ; i < limit; i++ { + actual := reflect.TypeOf(parameters[i]) + expected := funcType.In(i + offset) + if !(actual == nil) && !actual.AssignableTo(expected) { + return types.GinkgoErrors.IncorrectParameterTypeToTableFunction(i+1, expected, actual, kind, cl) + } + } + if funcType.IsVariadic() { + expected := funcType.In(limit + offset).Elem() + for ; i < len(parameters); i++ { + actual := reflect.TypeOf(parameters[i]) + if !(actual == nil) && !actual.AssignableTo(expected) { + return types.GinkgoErrors.IncorrectVariadicParameterTypeToTableFunction(expected, actual, kind, cl) + } + } + } + + return nil +} + +func computeValue(parameter interface{}, t reflect.Type) reflect.Value { + if parameter == nil { + return reflect.Zero(t) + } else { + return reflect.ValueOf(parameter) + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/code_location.go b/vendor/github.com/onsi/ginkgo/v2/types/code_location.go new file mode 100644 index 0000000..1291091 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/code_location.go @@ -0,0 +1,92 @@ +package types + +import ( + "fmt" + "os" + "regexp" + "runtime" + "runtime/debug" + "strings" +) + +type CodeLocation struct { + FileName string `json:",omitempty"` + LineNumber int `json:",omitempty"` + FullStackTrace string `json:",omitempty"` + CustomMessage string `json:",omitempty"` +} + +func (codeLocation CodeLocation) String() string { + if codeLocation.CustomMessage != "" { + return codeLocation.CustomMessage + } + return fmt.Sprintf("%s:%d", codeLocation.FileName, codeLocation.LineNumber) +} + +func (codeLocation CodeLocation) ContentsOfLine() string { + if codeLocation.CustomMessage != "" { + return "" + } + contents, err := os.ReadFile(codeLocation.FileName) + if err != nil { + return "" + } + lines := strings.Split(string(contents), "\n") + if len(lines) < codeLocation.LineNumber { + return "" + } + return lines[codeLocation.LineNumber-1] +} + +func NewCustomCodeLocation(message string) CodeLocation { + return CodeLocation{ + CustomMessage: message, + } +} + +func NewCodeLocation(skip int) CodeLocation { + _, file, line, _ := runtime.Caller(skip + 1) + return CodeLocation{FileName: file, LineNumber: line} +} + +func NewCodeLocationWithStackTrace(skip int) CodeLocation { + _, file, line, _ := runtime.Caller(skip + 1) + stackTrace := PruneStack(string(debug.Stack()), skip+1) + return CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace} +} + +// PruneStack removes references to functions that are internal to Ginkgo +// and the Go runtime from a stack string and a certain number of stack entries +// at the beginning of the stack. The stack string has the format +// as returned by runtime/debug.Stack. The leading goroutine information is +// optional and always removed if present. Beware that runtime/debug.Stack +// adds itself as first entry, so typically skip must be >= 1 to remove that +// entry. +func PruneStack(fullStackTrace string, skip int) string { + stack := strings.Split(fullStackTrace, "\n") + // Ensure that the even entries are the method names and the + // odd entries the source code information. + if len(stack) > 0 && strings.HasPrefix(stack[0], "goroutine ") { + // Ignore "goroutine 29 [running]:" line. + stack = stack[1:] + } + // The "+1" is for skipping over the initial entry, which is + // runtime/debug.Stack() itself. + if len(stack) > 2*(skip+1) { + stack = stack[2*(skip+1):] + } + prunedStack := []string{} + if os.Getenv("GINKGO_PRUNE_STACK") == "FALSE" { + prunedStack = stack + } else { + re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`) + for i := 0; i < len(stack)/2; i++ { + // We filter out based on the source code file name. + if !re.Match([]byte(stack[i*2+1])) { + prunedStack = append(prunedStack, stack[i*2]) + prunedStack = append(prunedStack, stack[i*2+1]) + } + } + } + return strings.Join(prunedStack, "\n") +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go new file mode 100644 index 0000000..4ec636e --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -0,0 +1,739 @@ +/* +Ginkgo accepts a number of configuration options. +These are documented [here](http://onsi.github.io/ginkgo/#the-ginkgo-cli) +*/ + +package types + +import ( + "flag" + "os" + "runtime" + "strconv" + "strings" + "time" +) + +// Configuration controlling how an individual test suite is run +type SuiteConfig struct { + RandomSeed int64 + RandomizeAllSpecs bool + FocusStrings []string + SkipStrings []string + FocusFiles []string + SkipFiles []string + LabelFilter string + FailOnPending bool + FailFast bool + FlakeAttempts int + DryRun bool + PollProgressAfter time.Duration + PollProgressInterval time.Duration + Timeout time.Duration + OutputInterceptorMode string + SourceRoots []string + GracePeriod time.Duration + + ParallelProcess int + ParallelTotal int + ParallelHost string +} + +func NewDefaultSuiteConfig() SuiteConfig { + return SuiteConfig{ + RandomSeed: time.Now().Unix(), + Timeout: time.Hour, + ParallelProcess: 1, + ParallelTotal: 1, + GracePeriod: 30 * time.Second, + } +} + +type VerbosityLevel uint + +const ( + VerbosityLevelSuccinct VerbosityLevel = iota + VerbosityLevelNormal + VerbosityLevelVerbose + VerbosityLevelVeryVerbose +) + +func (vl VerbosityLevel) GT(comp VerbosityLevel) bool { + return vl > comp +} + +func (vl VerbosityLevel) GTE(comp VerbosityLevel) bool { + return vl >= comp +} + +func (vl VerbosityLevel) Is(comp VerbosityLevel) bool { + return vl == comp +} + +func (vl VerbosityLevel) LTE(comp VerbosityLevel) bool { + return vl <= comp +} + +func (vl VerbosityLevel) LT(comp VerbosityLevel) bool { + return vl < comp +} + +// Configuration for Ginkgo's reporter +type ReporterConfig struct { + NoColor bool + Succinct bool + Verbose bool + VeryVerbose bool + FullTrace bool + ShowNodeEvents bool + + JSONReport string + JUnitReport string + TeamcityReport string +} + +func (rc ReporterConfig) Verbosity() VerbosityLevel { + if rc.Succinct { + return VerbosityLevelSuccinct + } else if rc.Verbose { + return VerbosityLevelVerbose + } else if rc.VeryVerbose { + return VerbosityLevelVeryVerbose + } + return VerbosityLevelNormal +} + +func (rc ReporterConfig) WillGenerateReport() bool { + return rc.JSONReport != "" || rc.JUnitReport != "" || rc.TeamcityReport != "" +} + +func NewDefaultReporterConfig() ReporterConfig { + return ReporterConfig{} +} + +// Configuration for the Ginkgo CLI +type CLIConfig struct { + //for build, run, and watch + Recurse bool + SkipPackage string + RequireSuite bool + NumCompilers int + + //for run and watch only + Procs int + Parallel bool + AfterRunHook string + OutputDir string + KeepSeparateCoverprofiles bool + KeepSeparateReports bool + + //for run only + KeepGoing bool + UntilItFails bool + Repeat int + RandomizeSuites bool + + //for watch only + Depth int + WatchRegExp string +} + +func NewDefaultCLIConfig() CLIConfig { + return CLIConfig{ + Depth: 1, + WatchRegExp: `\.go$`, + } +} + +func (g CLIConfig) ComputedProcs() int { + if g.Procs > 0 { + return g.Procs + } + + n := 1 + if g.Parallel { + n = runtime.NumCPU() + if n > 4 { + n = n - 1 + } + } + return n +} + +func (g CLIConfig) ComputedNumCompilers() int { + if g.NumCompilers > 0 { + return g.NumCompilers + } + + return runtime.NumCPU() +} + +// Configuration for the Ginkgo CLI capturing available go flags +// A subset of Go flags are exposed by Ginkgo. Some are available at compile time (e.g. ginkgo build) and others only at run time (e.g. ginkgo run - which has both build and run time flags). +// More details can be found at: +// https://docs.google.com/spreadsheets/d/1zkp-DS4hU4sAJl5eHh1UmgwxCPQhf3s5a8fbiOI8tJU/ +type GoFlagsConfig struct { + //build-time flags for code-and-performance analysis + Race bool + Cover bool + CoverMode string + CoverPkg string + Vet string + + //run-time flags for code-and-performance analysis + BlockProfile string + BlockProfileRate int + CoverProfile string + CPUProfile string + MemProfile string + MemProfileRate int + MutexProfile string + MutexProfileFraction int + Trace string + + //build-time flags for building + A bool + ASMFlags string + BuildMode string + Compiler string + GCCGoFlags string + GCFlags string + InstallSuffix string + LDFlags string + LinkShared bool + Mod string + N bool + ModFile string + ModCacheRW bool + MSan bool + PkgDir string + Tags string + TrimPath bool + ToolExec string + Work bool + X bool +} + +func NewDefaultGoFlagsConfig() GoFlagsConfig { + return GoFlagsConfig{} +} + +func (g GoFlagsConfig) BinaryMustBePreserved() bool { + return g.BlockProfile != "" || g.CPUProfile != "" || g.MemProfile != "" || g.MutexProfile != "" +} + +// Configuration that were deprecated in 2.0 +type deprecatedConfig struct { + DebugParallel bool + NoisySkippings bool + NoisyPendings bool + RegexScansFilePath bool + SlowSpecThresholdWithFLoatUnits float64 + Stream bool + Notify bool + EmitSpecProgress bool + SlowSpecThreshold time.Duration + AlwaysEmitGinkgoWriter bool +} + +// Flags + +// Flags sections used by both the CLI and the Ginkgo test process +var FlagSections = GinkgoFlagSections{ + {Key: "multiple-suites", Style: "{{dark-green}}", Heading: "Running Multiple Test Suites"}, + {Key: "order", Style: "{{green}}", Heading: "Controlling Test Order"}, + {Key: "parallel", Style: "{{yellow}}", Heading: "Controlling Test Parallelism"}, + {Key: "low-level-parallel", Style: "{{yellow}}", Heading: "Controlling Test Parallelism", + Description: "These are set by the Ginkgo CLI, {{red}}{{bold}}do not set them manually{{/}} via go test.\nUse ginkgo -p or ginkgo -procs=N instead."}, + {Key: "filter", Style: "{{cyan}}", Heading: "Filtering Tests"}, + {Key: "failure", Style: "{{red}}", Heading: "Failure Handling"}, + {Key: "output", Style: "{{magenta}}", Heading: "Controlling Output Formatting"}, + {Key: "code-and-coverage-analysis", Style: "{{orange}}", Heading: "Code and Coverage Analysis"}, + {Key: "performance-analysis", Style: "{{coral}}", Heading: "Performance Analysis"}, + {Key: "debug", Style: "{{blue}}", Heading: "Debugging Tests", + Description: "In addition to these flags, Ginkgo supports a few debugging environment variables. To change the parallel server protocol set {{blue}}GINKGO_PARALLEL_PROTOCOL{{/}} to {{bold}}HTTP{{/}}. To avoid pruning callstacks set {{blue}}GINKGO_PRUNE_STACK{{/}} to {{bold}}FALSE{{/}}."}, + {Key: "watch", Style: "{{light-yellow}}", Heading: "Controlling Ginkgo Watch"}, + {Key: "misc", Style: "{{light-gray}}", Heading: "Miscellaneous"}, + {Key: "go-build", Style: "{{light-gray}}", Heading: "Go Build Flags", Succinct: true, + Description: "These flags are inherited from go build. Run {{bold}}ginkgo help build{{/}} for more detailed flag documentation."}, +} + +// SuiteConfigFlags provides flags for the Ginkgo test process, and CLI +var SuiteConfigFlags = GinkgoFlags{ + {KeyPath: "S.RandomSeed", Name: "seed", SectionKey: "order", UsageDefaultValue: "randomly generated by Ginkgo", + Usage: "The seed used to randomize the spec suite."}, + {KeyPath: "S.RandomizeAllSpecs", Name: "randomize-all", SectionKey: "order", DeprecatedName: "randomizeAllSpecs", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When containers."}, + + {KeyPath: "S.FailOnPending", Name: "fail-on-pending", SectionKey: "failure", DeprecatedName: "failOnPending", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will mark the test suite as failed if any specs are pending."}, + {KeyPath: "S.FailFast", Name: "fail-fast", SectionKey: "failure", DeprecatedName: "failFast", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will stop running a test suite after a failure occurs."}, + {KeyPath: "S.FlakeAttempts", Name: "flake-attempts", SectionKey: "failure", UsageDefaultValue: "0 - failed tests are not retried", DeprecatedName: "flakeAttempts", DeprecatedDocLink: "changed-command-line-flags", + Usage: "Make up to this many attempts to run each spec. If any of the attempts succeed, the suite will not be failed."}, + + {KeyPath: "S.DryRun", Name: "dry-run", SectionKey: "debug", DeprecatedName: "dryRun", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."}, + {KeyPath: "S.PollProgressAfter", Name: "poll-progress-after", SectionKey: "debug", UsageDefaultValue: "0", + Usage: "Emit node progress reports periodically if node hasn't completed after this duration."}, + {KeyPath: "S.PollProgressInterval", Name: "poll-progress-interval", SectionKey: "debug", UsageDefaultValue: "10s", + Usage: "The rate at which to emit node progress reports after poll-progress-after has elapsed."}, + {KeyPath: "S.SourceRoots", Name: "source-root", SectionKey: "debug", + Usage: "The location to look for source code when generating progress reports. You can pass multiple --source-root flags."}, + {KeyPath: "S.Timeout", Name: "timeout", SectionKey: "debug", UsageDefaultValue: "1h", + Usage: "Test suite fails if it does not complete within the specified timeout."}, + {KeyPath: "S.GracePeriod", Name: "grace-period", SectionKey: "debug", UsageDefaultValue: "30s", + Usage: "When interrupted, Ginkgo will wait for GracePeriod for the current running node to exit before moving on to the next one."}, + {KeyPath: "S.OutputInterceptorMode", Name: "output-interceptor-mode", SectionKey: "debug", UsageArgument: "dup, swap, or none", + Usage: "If set, ginkgo will use the specified output interception strategy when running in parallel. Defaults to dup on unix and swap on windows."}, + + {KeyPath: "S.LabelFilter", Name: "label-filter", SectionKey: "filter", UsageArgument: "expression", + Usage: "If set, ginkgo will only run specs with labels that match the label-filter. The passed-in expression can include boolean operations (!, &&, ||, ','), groupings via '()', and regular expressions '/regexp/'. e.g. '(cat || dog) && !fruit'"}, + {KeyPath: "S.FocusStrings", Name: "focus", SectionKey: "filter", + Usage: "If set, ginkgo will only run specs that match this regular expression. Can be specified multiple times, values are ORed."}, + {KeyPath: "S.SkipStrings", Name: "skip", SectionKey: "filter", + Usage: "If set, ginkgo will only run specs that do not match this regular expression. Can be specified multiple times, values are ORed."}, + {KeyPath: "S.FocusFiles", Name: "focus-file", SectionKey: "filter", UsageArgument: "file (regexp) | file:line | file:lineA-lineB | file:line,line,line", + Usage: "If set, ginkgo will only run specs in matching files. Can be specified multiple times, values are ORed."}, + {KeyPath: "S.SkipFiles", Name: "skip-file", SectionKey: "filter", UsageArgument: "file (regexp) | file:line | file:lineA-lineB | file:line,line,line", + Usage: "If set, ginkgo will skip specs in matching files. Can be specified multiple times, values are ORed."}, + + {KeyPath: "D.RegexScansFilePath", DeprecatedName: "regexScansFilePath", DeprecatedDocLink: "removed--regexscansfilepath", DeprecatedVersion: "2.0.0"}, + {KeyPath: "D.DebugParallel", DeprecatedName: "debug", DeprecatedDocLink: "removed--debug", DeprecatedVersion: "2.0.0"}, + {KeyPath: "D.EmitSpecProgress", DeprecatedName: "progress", SectionKey: "debug", + DeprecatedVersion: "2.5.0", Usage: ". The functionality provided by --progress was confusing and is no longer needed. Use --show-node-events instead to see node entry and exit events included in the timeline of failed and verbose specs. Or you can run with -vv to always see all node events. Lastly, --poll-progress-after and the PollProgressAfter decorator now provide a better mechanism for debugging specs that tend to get stuck."}, +} + +// ParallelConfigFlags provides flags for the Ginkgo test process (not the CLI) +var ParallelConfigFlags = GinkgoFlags{ + {KeyPath: "S.ParallelProcess", Name: "parallel.process", SectionKey: "low-level-parallel", UsageDefaultValue: "1", + Usage: "This worker process's (one-indexed) process number. For running specs in parallel."}, + {KeyPath: "S.ParallelTotal", Name: "parallel.total", SectionKey: "low-level-parallel", UsageDefaultValue: "1", + Usage: "The total number of worker processes. For running specs in parallel."}, + {KeyPath: "S.ParallelHost", Name: "parallel.host", SectionKey: "low-level-parallel", UsageDefaultValue: "set by Ginkgo CLI", + Usage: "The address for the server that will synchronize the processes."}, +} + +// ReporterConfigFlags provides flags for the Ginkgo test process, and CLI +var ReporterConfigFlags = GinkgoFlags{ + {KeyPath: "R.NoColor", Name: "no-color", SectionKey: "output", DeprecatedName: "noColor", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, suppress color output in default reporter."}, + {KeyPath: "R.Verbose", Name: "v", SectionKey: "output", + Usage: "If set, emits more output including GinkgoWriter contents."}, + {KeyPath: "R.VeryVerbose", Name: "vv", SectionKey: "output", + Usage: "If set, emits with maximal verbosity - includes skipped and pending tests."}, + {KeyPath: "R.Succinct", Name: "succinct", SectionKey: "output", + Usage: "If set, default reporter prints out a very succinct report"}, + {KeyPath: "R.FullTrace", Name: "trace", SectionKey: "output", + Usage: "If set, default reporter prints out the full stack trace when a failure occurs"}, + {KeyPath: "R.ShowNodeEvents", Name: "show-node-events", SectionKey: "output", + Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"}, + + {KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output", + Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."}, + {KeyPath: "R.JUnitReport", Name: "junit-report", UsageArgument: "filename.xml", SectionKey: "output", DeprecatedName: "reportFile", DeprecatedDocLink: "improved-reporting-infrastructure", + Usage: "If set, Ginkgo will generate a conformant junit test report in the specified file."}, + {KeyPath: "R.TeamcityReport", Name: "teamcity-report", UsageArgument: "filename", SectionKey: "output", + Usage: "If set, Ginkgo will generate a Teamcity-formatted test report at the specified location."}, + + {KeyPath: "D.SlowSpecThresholdWithFLoatUnits", DeprecatedName: "slowSpecThreshold", DeprecatedDocLink: "changed--slowspecthreshold", + Usage: "use --slow-spec-threshold instead and pass in a duration string (e.g. '5s', not '5.0')"}, + {KeyPath: "D.NoisyPendings", DeprecatedName: "noisyPendings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"}, + {KeyPath: "D.NoisySkippings", DeprecatedName: "noisySkippings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"}, + {KeyPath: "D.SlowSpecThreshold", DeprecatedName: "slow-spec-threshold", SectionKey: "output", Usage: "--slow-spec-threshold has been deprecated and will be removed in a future version of Ginkgo. This feature has proved to be more noisy than useful. You can use --poll-progress-after, instead, to get more actionable feedback about potentially slow specs and understand where they might be getting stuck.", DeprecatedVersion: "2.5.0"}, + {KeyPath: "D.AlwaysEmitGinkgoWriter", DeprecatedName: "always-emit-ginkgo-writer", SectionKey: "output", Usage: " - use -v instead, or one of Ginkgo's machine-readable report formats to get GinkgoWriter output for passing specs."}, +} + +// BuildTestSuiteFlagSet attaches to the CommandLine flagset and provides flags for the Ginkgo test process +func BuildTestSuiteFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig) (GinkgoFlagSet, error) { + flags := SuiteConfigFlags.CopyAppend(ParallelConfigFlags...).CopyAppend(ReporterConfigFlags...) + flags = flags.WithPrefix("ginkgo") + bindings := map[string]interface{}{ + "S": suiteConfig, + "R": reporterConfig, + "D": &deprecatedConfig{}, + } + extraGoFlagsSection := GinkgoFlagSection{Style: "{{gray}}", Heading: "Go test flags"} + + return NewAttachedGinkgoFlagSet(flag.CommandLine, flags, bindings, FlagSections, extraGoFlagsSection) +} + +// VetConfig validates that the Ginkgo test process' configuration is sound +func VetConfig(flagSet GinkgoFlagSet, suiteConfig SuiteConfig, reporterConfig ReporterConfig) []error { + errors := []error{} + + if flagSet.WasSet("count") || flagSet.WasSet("test.count") { + flag := flagSet.Lookup("count") + if flag == nil { + flag = flagSet.Lookup("test.count") + } + count, err := strconv.Atoi(flag.Value.String()) + if err != nil || count != 1 { + errors = append(errors, GinkgoErrors.InvalidGoFlagCount()) + } + } + + if flagSet.WasSet("parallel") || flagSet.WasSet("test.parallel") { + errors = append(errors, GinkgoErrors.InvalidGoFlagParallel()) + } + + if suiteConfig.ParallelTotal < 1 { + errors = append(errors, GinkgoErrors.InvalidParallelTotalConfiguration()) + } + + if suiteConfig.ParallelProcess > suiteConfig.ParallelTotal || suiteConfig.ParallelProcess < 1 { + errors = append(errors, GinkgoErrors.InvalidParallelProcessConfiguration()) + } + + if suiteConfig.ParallelTotal > 1 && suiteConfig.ParallelHost == "" { + errors = append(errors, GinkgoErrors.MissingParallelHostConfiguration()) + } + + if suiteConfig.DryRun && suiteConfig.ParallelTotal > 1 { + errors = append(errors, GinkgoErrors.DryRunInParallelConfiguration()) + } + + if suiteConfig.GracePeriod <= 0 { + errors = append(errors, GinkgoErrors.GracePeriodCannotBeZero()) + } + + if len(suiteConfig.FocusFiles) > 0 { + _, err := ParseFileFilters(suiteConfig.FocusFiles) + if err != nil { + errors = append(errors, err) + } + } + + if len(suiteConfig.SkipFiles) > 0 { + _, err := ParseFileFilters(suiteConfig.SkipFiles) + if err != nil { + errors = append(errors, err) + } + } + + if suiteConfig.LabelFilter != "" { + _, err := ParseLabelFilter(suiteConfig.LabelFilter) + if err != nil { + errors = append(errors, err) + } + } + + switch strings.ToLower(suiteConfig.OutputInterceptorMode) { + case "", "dup", "swap", "none": + default: + errors = append(errors, GinkgoErrors.InvalidOutputInterceptorModeConfiguration(suiteConfig.OutputInterceptorMode)) + } + + numVerbosity := 0 + for _, v := range []bool{reporterConfig.Succinct, reporterConfig.Verbose, reporterConfig.VeryVerbose} { + if v { + numVerbosity++ + } + } + if numVerbosity > 1 { + errors = append(errors, GinkgoErrors.ConflictingVerbosityConfiguration()) + } + + return errors +} + +// GinkgoCLISharedFlags provides flags shared by the Ginkgo CLI's build, watch, and run commands +var GinkgoCLISharedFlags = GinkgoFlags{ + {KeyPath: "C.Recurse", Name: "r", SectionKey: "multiple-suites", + Usage: "If set, ginkgo finds and runs test suites under the current directory recursively."}, + {KeyPath: "C.SkipPackage", Name: "skip-package", SectionKey: "multiple-suites", DeprecatedName: "skipPackage", DeprecatedDocLink: "changed-command-line-flags", + UsageArgument: "comma-separated list of packages", + Usage: "A comma-separated list of package names to be skipped. If any part of the package's path matches, that package is ignored."}, + {KeyPath: "C.RequireSuite", Name: "require-suite", SectionKey: "failure", DeprecatedName: "requireSuite", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, Ginkgo fails if there are ginkgo tests in a directory but no invocation of RunSpecs."}, + {KeyPath: "C.NumCompilers", Name: "compilers", SectionKey: "multiple-suites", UsageDefaultValue: "0 (will autodetect)", + Usage: "When running multiple packages, the number of concurrent compilations to perform."}, +} + +// GinkgoCLIRunAndWatchFlags provides flags shared by the Ginkgo CLI's build and watch commands (but not run) +var GinkgoCLIRunAndWatchFlags = GinkgoFlags{ + {KeyPath: "C.Procs", Name: "procs", SectionKey: "parallel", UsageDefaultValue: "1 (run in series)", + Usage: "The number of parallel test nodes to run."}, + {KeyPath: "C.Procs", Name: "nodes", SectionKey: "parallel", UsageDefaultValue: "1 (run in series)", + Usage: "--nodes is an alias for --procs"}, + {KeyPath: "C.Parallel", Name: "p", SectionKey: "parallel", + Usage: "If set, ginkgo will run in parallel with an auto-detected number of nodes."}, + {KeyPath: "C.AfterRunHook", Name: "after-run-hook", SectionKey: "misc", DeprecatedName: "afterSuiteHook", DeprecatedDocLink: "changed-command-line-flags", + Usage: "Command to run when a test suite completes."}, + {KeyPath: "C.OutputDir", Name: "output-dir", SectionKey: "output", UsageArgument: "directory", DeprecatedName: "outputdir", DeprecatedDocLink: "improved-profiling-support", + Usage: "A location to place all generated profiles and reports."}, + {KeyPath: "C.KeepSeparateCoverprofiles", Name: "keep-separate-coverprofiles", SectionKey: "code-and-coverage-analysis", + Usage: "If set, Ginkgo does not merge coverprofiles into one monolithic coverprofile. The coverprofiles will remain in their respective package directories or in -output-dir if set."}, + {KeyPath: "C.KeepSeparateReports", Name: "keep-separate-reports", SectionKey: "output", + Usage: "If set, Ginkgo does not merge per-suite reports (e.g. -json-report) into one monolithic report for the entire testrun. The reports will remain in their respective package directories or in -output-dir if set."}, + + {KeyPath: "D.Stream", DeprecatedName: "stream", DeprecatedDocLink: "removed--stream", DeprecatedVersion: "2.0.0"}, + {KeyPath: "D.Notify", DeprecatedName: "notify", DeprecatedDocLink: "removed--notify", DeprecatedVersion: "2.0.0"}, +} + +// GinkgoCLIRunFlags provides flags for Ginkgo CLI's run command that aren't shared by any other commands +var GinkgoCLIRunFlags = GinkgoFlags{ + {KeyPath: "C.KeepGoing", Name: "keep-going", SectionKey: "multiple-suites", DeprecatedName: "keepGoing", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, failures from earlier test suites do not prevent later test suites from running."}, + {KeyPath: "C.UntilItFails", Name: "until-it-fails", SectionKey: "debug", DeprecatedName: "untilItFails", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will keep rerunning test suites until a failure occurs."}, + {KeyPath: "C.Repeat", Name: "repeat", SectionKey: "debug", UsageArgument: "n", UsageDefaultValue: "0 - i.e. no repetition, run only once", + Usage: "The number of times to re-run a test-suite. Useful for debugging flaky tests. If set to N the suite will be run N+1 times and will be required to pass each time."}, + {KeyPath: "C.RandomizeSuites", Name: "randomize-suites", SectionKey: "order", DeprecatedName: "randomizeSuites", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will randomize the order in which test suites run."}, +} + +// GinkgoCLIRunFlags provides flags for Ginkgo CLI's watch command that aren't shared by any other commands +var GinkgoCLIWatchFlags = GinkgoFlags{ + {KeyPath: "C.Depth", Name: "depth", SectionKey: "watch", + Usage: "Ginkgo will watch dependencies down to this depth in the dependency tree."}, + {KeyPath: "C.WatchRegExp", Name: "watch-regexp", SectionKey: "watch", DeprecatedName: "watchRegExp", DeprecatedDocLink: "changed-command-line-flags", + UsageArgument: "Regular Expression", + UsageDefaultValue: `\.go$`, + Usage: "Only files matching this regular expression will be watched for changes."}, +} + +// GoBuildFlags provides flags for the Ginkgo CLI build, run, and watch commands that capture go's build-time flags. These are passed to go test -c by the ginkgo CLI +var GoBuildFlags = GinkgoFlags{ + {KeyPath: "Go.Race", Name: "race", SectionKey: "code-and-coverage-analysis", + Usage: "enable data race detection. Supported only on linux/amd64, freebsd/amd64, darwin/amd64, windows/amd64, linux/ppc64le and linux/arm64 (only for 48-bit VMA)."}, + {KeyPath: "Go.Vet", Name: "vet", UsageArgument: "list", SectionKey: "code-and-coverage-analysis", + Usage: `Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks. If list is empty, "go test" runs "go vet" with a curated list of checks believed to be always worth addressing. If list is "off", "go test" does not run "go vet" at all. Available checks can be found by running 'go doc cmd/vet'`}, + {KeyPath: "Go.Cover", Name: "cover", SectionKey: "code-and-coverage-analysis", + Usage: "Enable coverage analysis. Note that because coverage works by annotating the source code before compilation, compilation and test failures with coverage enabled may report line numbers that don't correspond to the original sources."}, + {KeyPath: "Go.CoverMode", Name: "covermode", UsageArgument: "set,count,atomic", SectionKey: "code-and-coverage-analysis", + Usage: `Set the mode for coverage analysis for the package[s] being tested. 'set': does this statement run? 'count': how many times does this statement run? 'atomic': like count, but correct in multithreaded tests and more expensive (must use atomic with -race). Sets -cover`}, + {KeyPath: "Go.CoverPkg", Name: "coverpkg", UsageArgument: "pattern1,pattern2,pattern3", SectionKey: "code-and-coverage-analysis", + Usage: "Apply coverage analysis in each test to packages matching the patterns. The default is for each test to analyze only the package being tested. See 'go help packages' for a description of package patterns. Sets -cover."}, + + {KeyPath: "Go.A", Name: "a", SectionKey: "go-build", + Usage: "force rebuilding of packages that are already up-to-date."}, + {KeyPath: "Go.ASMFlags", Name: "asmflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", + Usage: "arguments to pass on each go tool asm invocation."}, + {KeyPath: "Go.BuildMode", Name: "buildmode", UsageArgument: "mode", SectionKey: "go-build", + Usage: "build mode to use. See 'go help buildmode' for more."}, + {KeyPath: "Go.Compiler", Name: "compiler", UsageArgument: "name", SectionKey: "go-build", + Usage: "name of compiler to use, as in runtime.Compiler (gccgo or gc)."}, + {KeyPath: "Go.GCCGoFlags", Name: "gccgoflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", + Usage: "arguments to pass on each gccgo compiler/linker invocation."}, + {KeyPath: "Go.GCFlags", Name: "gcflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", + Usage: "arguments to pass on each go tool compile invocation."}, + {KeyPath: "Go.InstallSuffix", Name: "installsuffix", SectionKey: "go-build", + Usage: "a suffix to use in the name of the package installation directory, in order to keep output separate from default builds. If using the -race flag, the install suffix is automatically set to raceor, if set explicitly, has _race appended to it. Likewise for the -msan flag. Using a -buildmode option that requires non-default compile flags has a similar effect."}, + {KeyPath: "Go.LDFlags", Name: "ldflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", + Usage: "arguments to pass on each go tool link invocation."}, + {KeyPath: "Go.LinkShared", Name: "linkshared", SectionKey: "go-build", + Usage: "build code that will be linked against shared libraries previously created with -buildmode=shared."}, + {KeyPath: "Go.Mod", Name: "mod", UsageArgument: "mode (readonly, vendor, or mod)", SectionKey: "go-build", + Usage: "module download mode to use: readonly, vendor, or mod. See 'go help modules' for more."}, + {KeyPath: "Go.ModCacheRW", Name: "modcacherw", SectionKey: "go-build", + Usage: "leave newly-created directories in the module cache read-write instead of making them read-only."}, + {KeyPath: "Go.ModFile", Name: "modfile", UsageArgument: "file", SectionKey: "go-build", + Usage: `in module aware mode, read (and possibly write) an alternate go.mod file instead of the one in the module root directory. A file named go.mod must still be present in order to determine the module root directory, but it is not accessed. When -modfile is specified, an alternate go.sum file is also used: its path is derived from the -modfile flag by trimming the ".mod" extension and appending ".sum".`}, + {KeyPath: "Go.MSan", Name: "msan", SectionKey: "go-build", + Usage: "enable interoperation with memory sanitizer. Supported only on linux/amd64, linux/arm64 and only with Clang/LLVM as the host C compiler. On linux/arm64, pie build mode will be used."}, + {KeyPath: "Go.N", Name: "n", SectionKey: "go-build", + Usage: "print the commands but do not run them."}, + {KeyPath: "Go.PkgDir", Name: "pkgdir", UsageArgument: "dir", SectionKey: "go-build", + Usage: "install and load all packages from dir instead of the usual locations. For example, when building with a non-standard configuration, use -pkgdir to keep generated packages in a separate location."}, + {KeyPath: "Go.Tags", Name: "tags", UsageArgument: "tag,list", SectionKey: "go-build", + Usage: "a comma-separated list of build tags to consider satisfied during the build. For more information about build tags, see the description of build constraints in the documentation for the go/build package. (Earlier versions of Go used a space-separated list, and that form is deprecated but still recognized.)"}, + {KeyPath: "Go.TrimPath", Name: "trimpath", SectionKey: "go-build", + Usage: `remove all file system paths from the resulting executable. Instead of absolute file system paths, the recorded file names will begin with either "go" (for the standard library), or a module path@version (when using modules), or a plain import path (when using GOPATH).`}, + {KeyPath: "Go.ToolExec", Name: "toolexec", UsageArgument: "'cmd args'", SectionKey: "go-build", + Usage: "a program to use to invoke toolchain programs like vet and asm. For example, instead of running asm, the go command will run cmd args /path/to/asm '."}, + {KeyPath: "Go.Work", Name: "work", SectionKey: "go-build", + Usage: "print the name of the temporary work directory and do not delete it when exiting."}, + {KeyPath: "Go.X", Name: "x", SectionKey: "go-build", + Usage: "print the commands."}, +} + +// GoRunFlags provides flags for the Ginkgo CLI run, and watch commands that capture go's run-time flags. These are passed to the compiled test binary by the ginkgo CLI +var GoRunFlags = GinkgoFlags{ + {KeyPath: "Go.CoverProfile", Name: "coverprofile", UsageArgument: "file", SectionKey: "code-and-coverage-analysis", + Usage: `Write a coverage profile to the file after all tests have passed. Sets -cover.`}, + {KeyPath: "Go.BlockProfile", Name: "blockprofile", UsageArgument: "file", SectionKey: "performance-analysis", + Usage: `Write a goroutine blocking profile to the specified file when all tests are complete. Preserves test binary.`}, + {KeyPath: "Go.BlockProfileRate", Name: "blockprofilerate", UsageArgument: "rate", SectionKey: "performance-analysis", + Usage: `Control the detail provided in goroutine blocking profiles by calling runtime.SetBlockProfileRate with rate. See 'go doc runtime.SetBlockProfileRate'. The profiler aims to sample, on average, one blocking event every n nanoseconds the program spends blocked. By default, if -test.blockprofile is set without this flag, all blocking events are recorded, equivalent to -test.blockprofilerate=1.`}, + {KeyPath: "Go.CPUProfile", Name: "cpuprofile", UsageArgument: "file", SectionKey: "performance-analysis", + Usage: `Write a CPU profile to the specified file before exiting. Preserves test binary.`}, + {KeyPath: "Go.MemProfile", Name: "memprofile", UsageArgument: "file", SectionKey: "performance-analysis", + Usage: `Write an allocation profile to the file after all tests have passed. Preserves test binary.`}, + {KeyPath: "Go.MemProfileRate", Name: "memprofilerate", UsageArgument: "rate", SectionKey: "performance-analysis", + Usage: `Enable more precise (and expensive) memory allocation profiles by setting runtime.MemProfileRate. See 'go doc runtime.MemProfileRate'. To profile all memory allocations, use -test.memprofilerate=1.`}, + {KeyPath: "Go.MutexProfile", Name: "mutexprofile", UsageArgument: "file", SectionKey: "performance-analysis", + Usage: `Write a mutex contention profile to the specified file when all tests are complete. Preserves test binary.`}, + {KeyPath: "Go.MutexProfileFraction", Name: "mutexprofilefraction", UsageArgument: "n", SectionKey: "performance-analysis", + Usage: `if >= 0, calls runtime.SetMutexProfileFraction() Sample 1 in n stack traces of goroutines holding a contended mutex.`}, + {KeyPath: "Go.Trace", Name: "execution-trace", UsageArgument: "file", ExportAs: "trace", SectionKey: "performance-analysis", + Usage: `Write an execution trace to the specified file before exiting.`}, +} + +// VetAndInitializeCLIAndGoConfig validates that the Ginkgo CLI's configuration is sound +// It returns a potentially mutated copy of the config that rationalizes the configuration to ensure consistency for downstream consumers +func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsConfig) (CLIConfig, GoFlagsConfig, []error) { + errors := []error{} + + if cliConfig.Repeat > 0 && cliConfig.UntilItFails { + errors = append(errors, GinkgoErrors.BothRepeatAndUntilItFails()) + } + + //initialize the output directory + if cliConfig.OutputDir != "" { + err := os.MkdirAll(cliConfig.OutputDir, 0777) + if err != nil { + errors = append(errors, err) + } + } + + //ensure cover mode is configured appropriately + if goFlagsConfig.CoverMode != "" || goFlagsConfig.CoverPkg != "" || goFlagsConfig.CoverProfile != "" { + goFlagsConfig.Cover = true + } + if goFlagsConfig.Cover && goFlagsConfig.CoverProfile == "" { + goFlagsConfig.CoverProfile = "coverprofile.out" + } + + return cliConfig, goFlagsConfig, errors +} + +// GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test +func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, packageToBuild string) ([]string, error) { + // if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure + // the built test binary can generate a coverprofile + if goFlagsConfig.CoverProfile != "" { + goFlagsConfig.Cover = true + } + + args := []string{"test", "-c", "-o", destination, packageToBuild} + goArgs, err := GenerateFlagArgs( + GoBuildFlags, + map[string]interface{}{ + "Go": &goFlagsConfig, + }, + ) + + if err != nil { + return []string{}, err + } + args = append(args, goArgs...) + return args, nil +} + +// GenerateGinkgoTestRunArgs is used by the Ginkgo CLI to generate command line arguments to pass to the compiled Ginkgo test binary +func GenerateGinkgoTestRunArgs(suiteConfig SuiteConfig, reporterConfig ReporterConfig, goFlagsConfig GoFlagsConfig) ([]string, error) { + var flags GinkgoFlags + flags = SuiteConfigFlags.WithPrefix("ginkgo") + flags = flags.CopyAppend(ParallelConfigFlags.WithPrefix("ginkgo")...) + flags = flags.CopyAppend(ReporterConfigFlags.WithPrefix("ginkgo")...) + flags = flags.CopyAppend(GoRunFlags.WithPrefix("test")...) + bindings := map[string]interface{}{ + "S": &suiteConfig, + "R": &reporterConfig, + "Go": &goFlagsConfig, + } + + return GenerateFlagArgs(flags, bindings) +} + +// GenerateGoTestRunArgs is used by the Ginkgo CLI to generate command line arguments to pass to the compiled non-Ginkgo test binary +func GenerateGoTestRunArgs(goFlagsConfig GoFlagsConfig) ([]string, error) { + flags := GoRunFlags.WithPrefix("test") + bindings := map[string]interface{}{ + "Go": &goFlagsConfig, + } + + args, err := GenerateFlagArgs(flags, bindings) + if err != nil { + return args, err + } + args = append(args, "--test.v") + return args, nil +} + +// BuildRunCommandFlagSet builds the FlagSet for the `ginkgo run` command +func BuildRunCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig, cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) { + flags := SuiteConfigFlags + flags = flags.CopyAppend(ReporterConfigFlags...) + flags = flags.CopyAppend(GinkgoCLISharedFlags...) + flags = flags.CopyAppend(GinkgoCLIRunAndWatchFlags...) + flags = flags.CopyAppend(GinkgoCLIRunFlags...) + flags = flags.CopyAppend(GoBuildFlags...) + flags = flags.CopyAppend(GoRunFlags...) + + bindings := map[string]interface{}{ + "S": suiteConfig, + "R": reporterConfig, + "C": cliConfig, + "Go": goFlagsConfig, + "D": &deprecatedConfig{}, + } + + return NewGinkgoFlagSet(flags, bindings, FlagSections) +} + +// BuildWatchCommandFlagSet builds the FlagSet for the `ginkgo watch` command +func BuildWatchCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig, cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) { + flags := SuiteConfigFlags + flags = flags.CopyAppend(ReporterConfigFlags...) + flags = flags.CopyAppend(GinkgoCLISharedFlags...) + flags = flags.CopyAppend(GinkgoCLIRunAndWatchFlags...) + flags = flags.CopyAppend(GinkgoCLIWatchFlags...) + flags = flags.CopyAppend(GoBuildFlags...) + flags = flags.CopyAppend(GoRunFlags...) + + bindings := map[string]interface{}{ + "S": suiteConfig, + "R": reporterConfig, + "C": cliConfig, + "Go": goFlagsConfig, + "D": &deprecatedConfig{}, + } + + return NewGinkgoFlagSet(flags, bindings, FlagSections) +} + +// BuildBuildCommandFlagSet builds the FlagSet for the `ginkgo build` command +func BuildBuildCommandFlagSet(cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) { + flags := GinkgoCLISharedFlags + flags = flags.CopyAppend(GoBuildFlags...) + + bindings := map[string]interface{}{ + "C": cliConfig, + "Go": goFlagsConfig, + "D": &deprecatedConfig{}, + } + + flagSections := make(GinkgoFlagSections, len(FlagSections)) + copy(flagSections, FlagSections) + for i := range flagSections { + if flagSections[i].Key == "multiple-suites" { + flagSections[i].Heading = "Building Multiple Suites" + } + if flagSections[i].Key == "go-build" { + flagSections[i] = GinkgoFlagSection{Key: "go-build", Style: "{{/}}", Heading: "Go Build Flags", + Description: "These flags are inherited from go build."} + } + } + + return NewGinkgoFlagSet(flags, bindings, flagSections) +} + +func BuildLabelsCommandFlagSet(cliConfig *CLIConfig) (GinkgoFlagSet, error) { + flags := GinkgoCLISharedFlags.SubsetWithNames("r", "skip-package") + + bindings := map[string]interface{}{ + "C": cliConfig, + } + + flagSections := make(GinkgoFlagSections, len(FlagSections)) + copy(flagSections, FlagSections) + for i := range flagSections { + if flagSections[i].Key == "multiple-suites" { + flagSections[i].Heading = "Fetching Labels from Multiple Suites" + } + } + + return NewGinkgoFlagSet(flags, bindings, flagSections) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go b/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go new file mode 100644 index 0000000..1792230 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go @@ -0,0 +1,141 @@ +package types + +import ( + "strconv" + "time" +) + +/* + A set of deprecations to make the transition from v1 to v2 easier for users who have written custom reporters. +*/ + +type SuiteSummary = DeprecatedSuiteSummary +type SetupSummary = DeprecatedSetupSummary +type SpecSummary = DeprecatedSpecSummary +type SpecMeasurement = DeprecatedSpecMeasurement +type SpecComponentType = NodeType +type SpecFailure = DeprecatedSpecFailure + +var ( + SpecComponentTypeInvalid = NodeTypeInvalid + SpecComponentTypeContainer = NodeTypeContainer + SpecComponentTypeIt = NodeTypeIt + SpecComponentTypeBeforeEach = NodeTypeBeforeEach + SpecComponentTypeJustBeforeEach = NodeTypeJustBeforeEach + SpecComponentTypeAfterEach = NodeTypeAfterEach + SpecComponentTypeJustAfterEach = NodeTypeJustAfterEach + SpecComponentTypeBeforeSuite = NodeTypeBeforeSuite + SpecComponentTypeSynchronizedBeforeSuite = NodeTypeSynchronizedBeforeSuite + SpecComponentTypeAfterSuite = NodeTypeAfterSuite + SpecComponentTypeSynchronizedAfterSuite = NodeTypeSynchronizedAfterSuite +) + +type DeprecatedSuiteSummary struct { + SuiteDescription string + SuiteSucceeded bool + SuiteID string + + NumberOfSpecsBeforeParallelization int + NumberOfTotalSpecs int + NumberOfSpecsThatWillBeRun int + NumberOfPendingSpecs int + NumberOfSkippedSpecs int + NumberOfPassedSpecs int + NumberOfFailedSpecs int + NumberOfFlakedSpecs int + RunTime time.Duration +} + +type DeprecatedSetupSummary struct { + ComponentType SpecComponentType + CodeLocation CodeLocation + + State SpecState + RunTime time.Duration + Failure SpecFailure + + CapturedOutput string + SuiteID string +} + +type DeprecatedSpecSummary struct { + ComponentTexts []string + ComponentCodeLocations []CodeLocation + + State SpecState + RunTime time.Duration + Failure SpecFailure + IsMeasurement bool + NumberOfSamples int + Measurements map[string]*DeprecatedSpecMeasurement + + CapturedOutput string + SuiteID string +} + +func (s DeprecatedSpecSummary) HasFailureState() bool { + return s.State.Is(SpecStateFailureStates) +} + +func (s DeprecatedSpecSummary) TimedOut() bool { + return false +} + +func (s DeprecatedSpecSummary) Panicked() bool { + return s.State == SpecStatePanicked +} + +func (s DeprecatedSpecSummary) Failed() bool { + return s.State == SpecStateFailed +} + +func (s DeprecatedSpecSummary) Passed() bool { + return s.State == SpecStatePassed +} + +func (s DeprecatedSpecSummary) Skipped() bool { + return s.State == SpecStateSkipped +} + +func (s DeprecatedSpecSummary) Pending() bool { + return s.State == SpecStatePending +} + +type DeprecatedSpecFailure struct { + Message string + Location CodeLocation + ForwardedPanic string + + ComponentIndex int + ComponentType SpecComponentType + ComponentCodeLocation CodeLocation +} + +type DeprecatedSpecMeasurement struct { + Name string + Info interface{} + Order int + + Results []float64 + + Smallest float64 + Largest float64 + Average float64 + StdDeviation float64 + + SmallestLabel string + LargestLabel string + AverageLabel string + Units string + Precision int +} + +func (s DeprecatedSpecMeasurement) PrecisionFmt() string { + if s.Precision == 0 { + return "%f" + } + + str := strconv.Itoa(s.Precision) + + return "%." + str + "f" +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go b/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go new file mode 100644 index 0000000..f267bde --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go @@ -0,0 +1,177 @@ +package types + +import ( + "os" + "strconv" + "strings" + "sync" + "unicode" + + "github.com/onsi/ginkgo/v2/formatter" +) + +type Deprecation struct { + Message string + DocLink string + Version string +} + +type deprecations struct{} + +var Deprecations = deprecations{} + +func (d deprecations) CustomReporter() Deprecation { + return Deprecation{ + Message: "Support for custom reporters has been removed in V2. Please read the documentation linked to below for Ginkgo's new behavior and for a migration path:", + DocLink: "removed-custom-reporters", + Version: "1.16.0", + } +} + +func (d deprecations) Async() Deprecation { + return Deprecation{ + Message: "You are passing a Done channel to a test node to test asynchronous behavior. This is deprecated in Ginkgo V2. Your test will run synchronously and the timeout will be ignored.", + DocLink: "removed-async-testing", + Version: "1.16.0", + } +} + +func (d deprecations) Measure() Deprecation { + return Deprecation{ + Message: "Measure is deprecated and will be removed in Ginkgo V2. Please migrate to gomega/gmeasure.", + DocLink: "removed-measure", + Version: "1.16.3", + } +} + +func (d deprecations) ParallelNode() Deprecation { + return Deprecation{ + Message: "GinkgoParallelNode is deprecated and will be removed in Ginkgo V2. Please use GinkgoParallelProcess instead.", + DocLink: "renamed-ginkgoparallelnode", + Version: "1.16.4", + } +} + +func (d deprecations) CurrentGinkgoTestDescription() Deprecation { + return Deprecation{ + Message: "CurrentGinkgoTestDescription() is deprecated in Ginkgo V2. Use CurrentSpecReport() instead.", + DocLink: "changed-currentginkgotestdescription", + Version: "1.16.0", + } +} + +func (d deprecations) Convert() Deprecation { + return Deprecation{ + Message: "The convert command is deprecated in Ginkgo V2", + DocLink: "removed-ginkgo-convert", + Version: "1.16.0", + } +} + +func (d deprecations) Blur() Deprecation { + return Deprecation{ + Message: "The blur command is deprecated in Ginkgo V2. Use 'ginkgo unfocus' instead.", + Version: "1.16.0", + } +} + +func (d deprecations) Nodot() Deprecation { + return Deprecation{ + Message: "The nodot command is deprecated in Ginkgo V2. Please either dot-import Ginkgo or use the package identifier in your code to references objects and types provided by Ginkgo and Gomega.", + DocLink: "removed-ginkgo-nodot", + Version: "1.16.0", + } +} + +func (d deprecations) SuppressProgressReporting() Deprecation { + return Deprecation{ + Message: "Improvements to how reporters emit timeline information means that SuppressProgressReporting is no longer necessary and has been deprecated.", + Version: "2.5.0", + } +} + +type DeprecationTracker struct { + deprecations map[Deprecation][]CodeLocation + lock *sync.Mutex +} + +func NewDeprecationTracker() *DeprecationTracker { + return &DeprecationTracker{ + deprecations: map[Deprecation][]CodeLocation{}, + lock: &sync.Mutex{}, + } +} + +func (d *DeprecationTracker) TrackDeprecation(deprecation Deprecation, cl ...CodeLocation) { + ackVersion := os.Getenv("ACK_GINKGO_DEPRECATIONS") + if deprecation.Version != "" && ackVersion != "" { + ack := ParseSemVer(ackVersion) + version := ParseSemVer(deprecation.Version) + if ack.GreaterThanOrEqualTo(version) { + return + } + } + + d.lock.Lock() + defer d.lock.Unlock() + if len(cl) == 1 { + d.deprecations[deprecation] = append(d.deprecations[deprecation], cl[0]) + } else { + d.deprecations[deprecation] = []CodeLocation{} + } +} + +func (d *DeprecationTracker) DidTrackDeprecations() bool { + d.lock.Lock() + defer d.lock.Unlock() + return len(d.deprecations) > 0 +} + +func (d *DeprecationTracker) DeprecationsReport() string { + d.lock.Lock() + defer d.lock.Unlock() + out := formatter.F("{{light-yellow}}You're using deprecated Ginkgo functionality:{{/}}\n") + out += formatter.F("{{light-yellow}}============================================={{/}}\n") + for deprecation, locations := range d.deprecations { + out += formatter.Fi(1, "{{yellow}}"+deprecation.Message+"{{/}}\n") + if deprecation.DocLink != "" { + out += formatter.Fi(1, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}https://onsi.github.io/ginkgo/MIGRATING_TO_V2#%s{{/}}\n", deprecation.DocLink) + } + for _, location := range locations { + out += formatter.Fi(2, "{{gray}}%s{{/}}\n", location) + } + } + out += formatter.F("\n{{gray}}To silence deprecations that can be silenced set the following environment variable:{{/}}\n") + out += formatter.Fi(1, "{{gray}}ACK_GINKGO_DEPRECATIONS=%s{{/}}\n", VERSION) + return out +} + +type SemVer struct { + Major int + Minor int + Patch int +} + +func (s SemVer) GreaterThanOrEqualTo(o SemVer) bool { + return (s.Major > o.Major) || + (s.Major == o.Major && s.Minor > o.Minor) || + (s.Major == o.Major && s.Minor == o.Minor && s.Patch >= o.Patch) +} + +func ParseSemVer(semver string) SemVer { + out := SemVer{} + semver = strings.TrimFunc(semver, func(r rune) bool { + return !(unicode.IsNumber(r) || r == '.') + }) + components := strings.Split(semver, ".") + if len(components) > 0 { + out.Major, _ = strconv.Atoi(components[0]) + } + if len(components) > 1 { + out.Minor, _ = strconv.Atoi(components[1]) + } + if len(components) > 2 { + out.Patch, _ = strconv.Atoi(components[2]) + } + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/enum_support.go b/vendor/github.com/onsi/ginkgo/v2/types/enum_support.go new file mode 100644 index 0000000..1d96ae0 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/enum_support.go @@ -0,0 +1,43 @@ +package types + +import "encoding/json" + +type EnumSupport struct { + toString map[uint]string + toEnum map[string]uint + maxEnum uint +} + +func NewEnumSupport(toString map[uint]string) EnumSupport { + toEnum, maxEnum := map[string]uint{}, uint(0) + for k, v := range toString { + toEnum[v] = k + if maxEnum < k { + maxEnum = k + } + } + return EnumSupport{toString: toString, toEnum: toEnum, maxEnum: maxEnum} +} + +func (es EnumSupport) String(e uint) string { + if e > es.maxEnum { + return es.toString[0] + } + return es.toString[e] +} + +func (es EnumSupport) UnmarshJSON(b []byte) (uint, error) { + var dec string + if err := json.Unmarshal(b, &dec); err != nil { + return 0, err + } + out := es.toEnum[dec] // if we miss we get 0 which is what we want anyway + return out, nil +} + +func (es EnumSupport) MarshJSON(e uint) ([]byte, error) { + if e == 0 || e > es.maxEnum { + return json.Marshal(nil) + } + return json.Marshal(es.toString[e]) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/errors.go b/vendor/github.com/onsi/ginkgo/v2/types/errors.go new file mode 100644 index 0000000..a0f59fb --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/errors.go @@ -0,0 +1,621 @@ +package types + +import ( + "fmt" + "reflect" + "strings" + + "github.com/onsi/ginkgo/v2/formatter" +) + +type GinkgoError struct { + Heading string + Message string + DocLink string + CodeLocation CodeLocation +} + +func (g GinkgoError) Error() string { + out := formatter.F("{{bold}}{{red}}%s{{/}}\n", g.Heading) + if (g.CodeLocation != CodeLocation{}) { + contentsOfLine := strings.TrimLeft(g.CodeLocation.ContentsOfLine(), "\t ") + if contentsOfLine != "" { + out += formatter.F("{{light-gray}}%s{{/}}\n", contentsOfLine) + } + out += formatter.F("{{gray}}%s{{/}}\n", g.CodeLocation) + } + if g.Message != "" { + out += formatter.Fiw(1, formatter.COLS, g.Message) + out += "\n\n" + } + if g.DocLink != "" { + out += formatter.Fiw(1, formatter.COLS, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}http://onsi.github.io/ginkgo/#%s{{/}}\n", g.DocLink) + } + + return out +} + +type ginkgoErrors struct{} + +var GinkgoErrors = ginkgoErrors{} + +func (g ginkgoErrors) UncaughtGinkgoPanic(cl CodeLocation) error { + return GinkgoError{ + Heading: "Your Test Panicked", + Message: `When you, or your assertion library, calls Ginkgo's Fail(), +Ginkgo panics to prevent subsequent assertions from running. + +Normally Ginkgo rescues this panic so you shouldn't see it. + +However, if you make an assertion in a goroutine, Ginkgo can't capture the panic. +To circumvent this, you should call + + defer GinkgoRecover() + +at the top of the goroutine that caused this panic. + +Alternatively, you may have made an assertion outside of a Ginkgo +leaf node (e.g. in a container node or some out-of-band function) - please move your assertion to +an appropriate Ginkgo node (e.g. a BeforeSuite, BeforeEach, It, etc...).`, + DocLink: "mental-model-how-ginkgo-handles-failure", + CodeLocation: cl, + } +} + +func (g ginkgoErrors) RerunningSuite() error { + return GinkgoError{ + Heading: "Rerunning Suite", + Message: formatter.F(`It looks like you are calling RunSpecs more than once. Ginkgo does not support rerunning suites. If you want to rerun a suite try {{bold}}ginkgo --repeat=N{{/}} or {{bold}}ginkgo --until-it-fails{{/}}`), + DocLink: "repeating-spec-runs-and-managing-flaky-specs", + } +} + +/* Tree construction errors */ + +func (g ginkgoErrors) PushingNodeInRunPhase(nodeType NodeType, cl CodeLocation) error { + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F( + `It looks like you are trying to add a {{bold}}[%s]{{/}} node +to the Ginkgo spec tree in a leaf node {{bold}}after{{/}} the specs started running. + +To enable randomization and parallelization Ginkgo requires the spec tree +to be fully constructed up front. In practice, this means that you can +only create nodes like {{bold}}[%s]{{/}} at the top-level or within the +body of a {{bold}}Describe{{/}}, {{bold}}Context{{/}}, or {{bold}}When{{/}}.`, nodeType, nodeType), + CodeLocation: cl, + DocLink: "mental-model-how-ginkgo-traverses-the-spec-hierarchy", + } +} + +func (g ginkgoErrors) CaughtPanicDuringABuildPhase(caughtPanic interface{}, cl CodeLocation) error { + return GinkgoError{ + Heading: "Assertion or Panic detected during tree construction", + Message: formatter.F( + `Ginkgo detected a panic while constructing the spec tree. +You may be trying to make an assertion in the body of a container node +(i.e. {{bold}}Describe{{/}}, {{bold}}Context{{/}}, or {{bold}}When{{/}}). + +Please ensure all assertions are inside leaf nodes such as {{bold}}BeforeEach{{/}}, +{{bold}}It{{/}}, etc. + +{{bold}}Here's the content of the panic that was caught:{{/}} +%v`, caughtPanic), + CodeLocation: cl, + DocLink: "no-assertions-in-container-nodes", + } +} + +func (g ginkgoErrors) SuiteNodeInNestedContext(nodeType NodeType, cl CodeLocation) error { + docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite" + if nodeType.Is(NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite) { + docLink = "reporting-nodes---reportbeforesuite-and-reportaftersuite" + } + + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F( + `It looks like you are trying to add a {{bold}}[%s]{{/}} node within a container node. + +{{bold}}%s{{/}} can only be called at the top level.`, nodeType, nodeType), + CodeLocation: cl, + DocLink: docLink, + } +} + +func (g ginkgoErrors) SuiteNodeDuringRunPhase(nodeType NodeType, cl CodeLocation) error { + docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite" + if nodeType.Is(NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite) { + docLink = "reporting-nodes---reportbeforesuite-and-reportaftersuite" + } + + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F( + `It looks like you are trying to add a {{bold}}[%s]{{/}} node within a leaf node after the spec started running. + +{{bold}}%s{{/}} can only be called at the top level.`, nodeType, nodeType), + CodeLocation: cl, + DocLink: docLink, + } +} + +func (g ginkgoErrors) MultipleBeforeSuiteNodes(nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error { + return ginkgoErrorMultipleSuiteNodes("setup", nodeType, cl, earlierNodeType, earlierCodeLocation) +} + +func (g ginkgoErrors) MultipleAfterSuiteNodes(nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error { + return ginkgoErrorMultipleSuiteNodes("teardown", nodeType, cl, earlierNodeType, earlierCodeLocation) +} + +func ginkgoErrorMultipleSuiteNodes(setupOrTeardown string, nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error { + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F( + `It looks like you are trying to add a {{bold}}[%s]{{/}} node but +you already have a {{bold}}[%s]{{/}} node defined at: {{gray}}%s{{/}}. + +Ginkgo only allows you to define one suite %s node.`, nodeType, earlierNodeType, earlierCodeLocation, setupOrTeardown), + CodeLocation: cl, + DocLink: "suite-setup-and-cleanup-beforesuite-and-aftersuite", + } +} + +/* Decorator errors */ +func (g ginkgoErrors) InvalidDecoratorForNodeType(cl CodeLocation, nodeType NodeType, decorator string) error { + return GinkgoError{ + Heading: "Invalid Decorator", + Message: formatter.F(`[%s] node cannot be passed a(n) '%s' decorator`, nodeType, decorator), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidDeclarationOfFocusedAndPending(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid Combination of Decorators: Focused and Pending", + Message: formatter.F(`[%s] node was decorated with both Focus and Pending. At most one is allowed.`, nodeType), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidDeclarationOfFlakeAttemptsAndMustPassRepeatedly(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid Combination of Decorators: FlakeAttempts and MustPassRepeatedly", + Message: formatter.F(`[%s] node was decorated with both FlakeAttempts and MustPassRepeatedly. At most one is allowed.`, nodeType), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) UnknownDecorator(cl CodeLocation, nodeType NodeType, decorator interface{}) error { + return GinkgoError{ + Heading: "Unknown Decorator", + Message: formatter.F(`[%s] node was passed an unknown decorator: '%#v'`, nodeType, decorator), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidBodyTypeForContainer(t reflect.Type, cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid Function", + Message: formatter.F(`[%s] node must be passed {{bold}}func(){{/}} - i.e. functions that take nothing and return nothing. You passed {{bold}}%s{{/}} instead.`, nodeType, t), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidBodyType(t reflect.Type, cl CodeLocation, nodeType NodeType) error { + mustGet := "{{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}" + if nodeType.Is(NodeTypeContainer) { + mustGet = "{{bold}}func(){{/}}" + } + return GinkgoError{ + Heading: "Invalid Function", + Message: formatter.F(`[%s] node must be passed `+mustGet+`. +You passed {{bold}}%s{{/}} instead.`, nodeType, t), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidBodyTypeForSynchronizedBeforeSuiteProc1(t reflect.Type, cl CodeLocation) error { + mustGet := "{{bold}}func() []byte{{/}}, {{bold}}func(ctx SpecContext) []byte{{/}}, or {{bold}}func(ctx context.Context) []byte{{/}}, {{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}" + return GinkgoError{ + Heading: "Invalid Function", + Message: formatter.F(`[SynchronizedBeforeSuite] node must be passed `+mustGet+` for its first function. +You passed {{bold}}%s{{/}} instead.`, t), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidBodyTypeForSynchronizedBeforeSuiteAllProcs(t reflect.Type, cl CodeLocation) error { + mustGet := "{{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}, {{bold}}func([]byte){{/}}, {{bold}}func(ctx SpecContext, []byte){{/}}, or {{bold}}func(ctx context.Context, []byte){{/}}" + return GinkgoError{ + Heading: "Invalid Function", + Message: formatter.F(`[SynchronizedBeforeSuite] node must be passed `+mustGet+` for its second function. +You passed {{bold}}%s{{/}} instead.`, t), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) MultipleBodyFunctions(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Multiple Functions", + Message: formatter.F(`[%s] node must be passed a single function - but more than one was passed in.`, nodeType), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) MissingBodyFunction(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Missing Functions", + Message: formatter.F(`[%s] node must be passed a single function - but none was passed in.`, nodeType), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidTimeoutOrGracePeriodForNonContextNode(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid NodeTimeout SpecTimeout, or GracePeriod", + Message: formatter.F(`[%s] was passed NodeTimeout, SpecTimeout, or GracePeriod but does not have a callback that accepts a {{bold}}SpecContext{{/}} or {{bold}}context.Context{{/}}. You must accept a context to enable timeouts and grace periods`, nodeType), + CodeLocation: cl, + DocLink: "spec-timeouts-and-interruptible-nodes", + } +} + +func (g ginkgoErrors) InvalidTimeoutOrGracePeriodForNonContextCleanupNode(cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid NodeTimeout SpecTimeout, or GracePeriod", + Message: formatter.F(`[DeferCleanup] was passed NodeTimeout or GracePeriod but does not have a callback that accepts a {{bold}}SpecContext{{/}} or {{bold}}context.Context{{/}}. You must accept a context to enable timeouts and grace periods`), + CodeLocation: cl, + DocLink: "spec-timeouts-and-interruptible-nodes", + } +} + +/* Ordered Container errors */ +func (g ginkgoErrors) InvalidSerialNodeInNonSerialOrderedContainer(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid Serial Node in Non-Serial Ordered Container", + Message: formatter.F(`[%s] node was decorated with Serial but occurs in an Ordered container that is not marked Serial. Move the Serial decorator to the outer-most Ordered container to mark all ordered specs within the container as serial.`, nodeType), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) SetupNodeNotInOrderedContainer(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Setup Node not in Ordered Container", + Message: fmt.Sprintf("[%s] setup nodes must appear inside an Ordered container. They cannot be nested within other containers, even containers in an ordered container.", nodeType), + CodeLocation: cl, + DocLink: "ordered-containers", + } +} + +/* DeferCleanup errors */ +func (g ginkgoErrors) DeferCleanupInvalidFunction(cl CodeLocation) error { + return GinkgoError{ + Heading: "DeferCleanup requires a valid function", + Message: "You must pass DeferCleanup a function to invoke. This function must return zero or one values - if it does return, it must return an error. The function can take arbitrarily many arguments and you should provide these to DeferCleanup to pass along to the function.", + CodeLocation: cl, + DocLink: "cleaning-up-our-cleanup-code-defercleanup", + } +} + +func (g ginkgoErrors) PushingCleanupNodeDuringTreeConstruction(cl CodeLocation) error { + return GinkgoError{ + Heading: "DeferCleanup must be called inside a setup or subject node", + Message: "You must call DeferCleanup inside a setup node (e.g. BeforeEach, BeforeSuite, AfterAll...) or a subject node (i.e. It). You can't call DeferCleanup at the top-level or in a container node - use the After* family of setup nodes instead.", + CodeLocation: cl, + DocLink: "cleaning-up-our-cleanup-code-defercleanup", + } +} + +func (g ginkgoErrors) PushingCleanupInReportingNode(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: fmt.Sprintf("DeferCleanup cannot be called in %s", nodeType), + Message: "Please inline your cleanup code - Ginkgo won't run cleanup code after a Reporting node.", + CodeLocation: cl, + DocLink: "cleaning-up-our-cleanup-code-defercleanup", + } +} + +func (g ginkgoErrors) PushingCleanupInCleanupNode(cl CodeLocation) error { + return GinkgoError{ + Heading: "DeferCleanup cannot be called in a DeferCleanup callback", + Message: "Please inline your cleanup code - Ginkgo doesn't let you call DeferCleanup from within DeferCleanup", + CodeLocation: cl, + DocLink: "cleaning-up-our-cleanup-code-defercleanup", + } +} + +/* ReportEntry errors */ +func (g ginkgoErrors) TooManyReportEntryValues(cl CodeLocation, arg interface{}) error { + return GinkgoError{ + Heading: "Too Many ReportEntry Values", + Message: formatter.F(`{{bold}}AddGinkgoReport{{/}} can only be given one value. Got unexpected value: %#v`, arg), + CodeLocation: cl, + DocLink: "attaching-data-to-reports", + } +} + +func (g ginkgoErrors) AddReportEntryNotDuringRunPhase(cl CodeLocation) error { + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F(`It looks like you are calling {{bold}}AddGinkgoReport{{/}} outside of a running spec. Make sure you call {{bold}}AddGinkgoReport{{/}} inside a runnable node such as It or BeforeEach and not inside the body of a container such as Describe or Context.`), + CodeLocation: cl, + DocLink: "attaching-data-to-reports", + } +} + +/* By errors */ +func (g ginkgoErrors) ByNotDuringRunPhase(cl CodeLocation) error { + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F(`It looks like you are calling {{bold}}By{{/}} outside of a running spec. Make sure you call {{bold}}By{{/}} inside a runnable node such as It or BeforeEach and not inside the body of a container such as Describe or Context.`), + CodeLocation: cl, + DocLink: "documenting-complex-specs-by", + } +} + +/* FileFilter and SkipFilter errors */ +func (g ginkgoErrors) InvalidFileFilter(filter string) error { + return GinkgoError{ + Heading: "Invalid File Filter", + Message: fmt.Sprintf(`The provided file filter: "%s" is invalid. File filters must have the format "file", "file:lines" where "file" is a regular expression that will match against the file path and lines is a comma-separated list of integers (e.g. file:1,5,7) or line-ranges (e.g. file:1-3,5-9) or both (e.g. file:1,5-9)`, filter), + DocLink: "filtering-specs", + } +} + +func (g ginkgoErrors) InvalidFileFilterRegularExpression(filter string, err error) error { + return GinkgoError{ + Heading: "Invalid File Filter Regular Expression", + Message: fmt.Sprintf(`The provided file filter: "%s" included an invalid regular expression. regexp.Compile error: %s`, filter, err), + DocLink: "filtering-specs", + } +} + +/* Label Errors */ +func (g ginkgoErrors) SyntaxErrorParsingLabelFilter(input string, location int, error string) error { + var message string + if location >= 0 { + for i, r := range input { + if i == location { + message += "{{red}}{{bold}}{{underline}}" + } + message += string(r) + if i == location { + message += "{{/}}" + } + } + } else { + message = input + } + message += "\n" + error + return GinkgoError{ + Heading: "Syntax Error Parsing Label Filter", + Message: message, + DocLink: "spec-labels", + } +} + +func (g ginkgoErrors) InvalidLabel(label string, cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid Label", + Message: fmt.Sprintf("'%s' is an invalid label. Labels cannot contain of the following characters: '&|!,()/'", label), + CodeLocation: cl, + DocLink: "spec-labels", + } +} + +func (g ginkgoErrors) InvalidEmptyLabel(cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid Empty Label", + Message: "Labels cannot be empty", + CodeLocation: cl, + DocLink: "spec-labels", + } +} + +/* Table errors */ +func (g ginkgoErrors) MultipleEntryBodyFunctionsForTable(cl CodeLocation) error { + return GinkgoError{ + Heading: "DescribeTable passed multiple functions", + Message: "It looks like you are passing multiple functions into DescribeTable. Only one function can be passed in. This function will be called for each Entry in the table.", + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) InvalidEntryDescription(cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid Entry description", + Message: "Entry description functions must be a string, a function that accepts the entry parameters and returns a string, or nil.", + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) MissingParametersForTableFunction(cl CodeLocation) error { + return GinkgoError{ + Heading: fmt.Sprintf("No parameters have been passed to the Table Function"), + Message: fmt.Sprintf("The Table Function expected at least 1 parameter"), + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) IncorrectParameterTypeForTable(i int, name string, cl CodeLocation) error { + return GinkgoError{ + Heading: "DescribeTable passed incorrect parameter type", + Message: fmt.Sprintf("Parameter #%d passed to DescribeTable is of incorrect type <%s>", i, name), + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) TooFewParametersToTableFunction(expected, actual int, kind string, cl CodeLocation) error { + return GinkgoError{ + Heading: fmt.Sprintf("Too few parameters passed in to %s", kind), + Message: fmt.Sprintf("The %s expected %d parameters but you passed in %d", kind, expected, actual), + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) TooManyParametersToTableFunction(expected, actual int, kind string, cl CodeLocation) error { + return GinkgoError{ + Heading: fmt.Sprintf("Too many parameters passed in to %s", kind), + Message: fmt.Sprintf("The %s expected %d parameters but you passed in %d", kind, expected, actual), + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) IncorrectParameterTypeToTableFunction(i int, expected, actual reflect.Type, kind string, cl CodeLocation) error { + return GinkgoError{ + Heading: fmt.Sprintf("Incorrect parameters type passed to %s", kind), + Message: fmt.Sprintf("The %s expected parameter #%d to be of type <%s> but you passed in <%s>", kind, i, expected, actual), + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) IncorrectVariadicParameterTypeToTableFunction(expected, actual reflect.Type, kind string, cl CodeLocation) error { + return GinkgoError{ + Heading: fmt.Sprintf("Incorrect parameters type passed to %s", kind), + Message: fmt.Sprintf("The %s expected its variadic parameters to be of type <%s> but you passed in <%s>", kind, expected, actual), + CodeLocation: cl, + DocLink: "table-specs", + } +} + +/* Parallel Synchronization errors */ + +func (g ginkgoErrors) AggregatedReportUnavailableDueToNodeDisappearing() error { + return GinkgoError{ + Heading: "Test Report unavailable because a Ginkgo parallel process disappeared", + Message: "The aggregated report could not be fetched for a ReportAfterSuite node. A Ginkgo parallel process disappeared before it could finish reporting.", + } +} + +func (g ginkgoErrors) SynchronizedBeforeSuiteFailedOnProc1() error { + return GinkgoError{ + Heading: "SynchronizedBeforeSuite failed on Ginkgo parallel process #1", + Message: "The first SynchronizedBeforeSuite function running on Ginkgo parallel process #1 failed. This suite will now abort.", + } +} + +func (g ginkgoErrors) SynchronizedBeforeSuiteDisappearedOnProc1() error { + return GinkgoError{ + Heading: "Process #1 disappeared before SynchronizedBeforeSuite could report back", + Message: "Ginkgo parallel process #1 disappeared before the first SynchronizedBeforeSuite function completed. This suite will now abort.", + } +} + +/* Configuration errors */ + +func (g ginkgoErrors) UnknownTypePassedToRunSpecs(value interface{}) error { + return GinkgoError{ + Heading: "Unknown Type passed to RunSpecs", + Message: fmt.Sprintf("RunSpecs() accepts labels, and configuration of type types.SuiteConfig and/or types.ReporterConfig.\n You passed in: %v", value), + } +} + +var sharedParallelErrorMessage = "It looks like you are trying to run specs in parallel with go test.\nThis is unsupported and you should use the ginkgo CLI instead." + +func (g ginkgoErrors) InvalidParallelTotalConfiguration() error { + return GinkgoError{ + Heading: "-ginkgo.parallel.total must be >= 1", + Message: sharedParallelErrorMessage, + DocLink: "spec-parallelization", + } +} + +func (g ginkgoErrors) InvalidParallelProcessConfiguration() error { + return GinkgoError{ + Heading: "-ginkgo.parallel.process is one-indexed and must be <= ginkgo.parallel.total", + Message: sharedParallelErrorMessage, + DocLink: "spec-parallelization", + } +} + +func (g ginkgoErrors) MissingParallelHostConfiguration() error { + return GinkgoError{ + Heading: "-ginkgo.parallel.host is missing", + Message: sharedParallelErrorMessage, + DocLink: "spec-parallelization", + } +} + +func (g ginkgoErrors) UnreachableParallelHost(host string) error { + return GinkgoError{ + Heading: "Could not reach ginkgo.parallel.host:" + host, + Message: sharedParallelErrorMessage, + DocLink: "spec-parallelization", + } +} + +func (g ginkgoErrors) DryRunInParallelConfiguration() error { + return GinkgoError{ + Heading: "Ginkgo only performs -dryRun in serial mode.", + Message: "Please try running ginkgo -dryRun again, but without -p or -procs to ensure the suite is running in series.", + } +} + +func (g ginkgoErrors) GracePeriodCannotBeZero() error { + return GinkgoError{ + Heading: "Ginkgo requires a positive --grace-period.", + Message: "Please set --grace-period to a positive duration. The default is 30s.", + } +} + +func (g ginkgoErrors) ConflictingVerbosityConfiguration() error { + return GinkgoError{ + Heading: "Conflicting reporter verbosity settings.", + Message: "You can't set more than one of -v, -vv and --succinct. Please pick one!", + } +} + +func (g ginkgoErrors) InvalidOutputInterceptorModeConfiguration(value string) error { + return GinkgoError{ + Heading: fmt.Sprintf("Invalid value '%s' for --output-interceptor-mode.", value), + Message: "You must choose one of 'dup', 'swap', or 'none'.", + } +} + +func (g ginkgoErrors) InvalidGoFlagCount() error { + return GinkgoError{ + Heading: "Use of go test -count", + Message: "Ginkgo does not support using go test -count to rerun suites. Only -count=1 is allowed. To repeat suite runs, please use the ginkgo cli and `ginkgo -until-it-fails` or `ginkgo -repeat=N`.", + } +} + +func (g ginkgoErrors) InvalidGoFlagParallel() error { + return GinkgoError{ + Heading: "Use of go test -parallel", + Message: "Go test's implementation of parallelization does not actually parallelize Ginkgo specs. Please use the ginkgo cli and `ginkgo -p` or `ginkgo -procs=N` instead.", + } +} + +func (g ginkgoErrors) BothRepeatAndUntilItFails() error { + return GinkgoError{ + Heading: "--repeat and --until-it-fails are both set", + Message: "--until-it-fails directs Ginkgo to rerun specs indefinitely until they fail. --repeat directs Ginkgo to rerun specs a set number of times. You can't set both... which would you like?", + } +} + +/* Stack-Trace parsing errors */ + +func (g ginkgoErrors) FailedToParseStackTrace(message string) error { + return GinkgoError{ + Heading: "Failed to Parse Stack Trace", + Message: message, + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/file_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/file_filter.go new file mode 100644 index 0000000..cc21df7 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/file_filter.go @@ -0,0 +1,106 @@ +package types + +import ( + "regexp" + "strconv" + "strings" +) + +func ParseFileFilters(filters []string) (FileFilters, error) { + ffs := FileFilters{} + for _, filter := range filters { + ff := FileFilter{} + if filter == "" { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + components := strings.Split(filter, ":") + if !(len(components) == 1 || len(components) == 2) { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + + var err error + ff.Filename, err = regexp.Compile(components[0]) + if err != nil { + return nil, err + } + if len(components) == 2 { + lineFilters := strings.Split(components[1], ",") + for _, lineFilter := range lineFilters { + components := strings.Split(lineFilter, "-") + if len(components) == 1 { + line, err := strconv.Atoi(strings.TrimSpace(components[0])) + if err != nil { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + ff.LineFilters = append(ff.LineFilters, LineFilter{line, line + 1}) + } else if len(components) == 2 { + line1, err := strconv.Atoi(strings.TrimSpace(components[0])) + if err != nil { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + line2, err := strconv.Atoi(strings.TrimSpace(components[1])) + if err != nil { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + ff.LineFilters = append(ff.LineFilters, LineFilter{line1, line2}) + } else { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + } + } + ffs = append(ffs, ff) + } + return ffs, nil +} + +type FileFilter struct { + Filename *regexp.Regexp + LineFilters LineFilters +} + +func (f FileFilter) Matches(locations []CodeLocation) bool { + for _, location := range locations { + if f.Filename.MatchString(location.FileName) && + f.LineFilters.Matches(location.LineNumber) { + return true + } + + } + return false +} + +type FileFilters []FileFilter + +func (ffs FileFilters) Matches(locations []CodeLocation) bool { + for _, ff := range ffs { + if ff.Matches(locations) { + return true + } + } + + return false +} + +type LineFilter struct { + Min int + Max int +} + +func (lf LineFilter) Matches(line int) bool { + return lf.Min <= line && line < lf.Max +} + +type LineFilters []LineFilter + +func (lfs LineFilters) Matches(line int) bool { + if len(lfs) == 0 { + return true + } + + for _, lf := range lfs { + if lf.Matches(line) { + return true + } + } + return false +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/flags.go b/vendor/github.com/onsi/ginkgo/v2/types/flags.go new file mode 100644 index 0000000..9186ae8 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/flags.go @@ -0,0 +1,489 @@ +package types + +import ( + "flag" + "fmt" + "io" + "reflect" + "strings" + "time" + + "github.com/onsi/ginkgo/v2/formatter" +) + +type GinkgoFlag struct { + Name string + KeyPath string + SectionKey string + + Usage string + UsageArgument string + UsageDefaultValue string + + DeprecatedName string + DeprecatedDocLink string + DeprecatedVersion string + + ExportAs string +} + +type GinkgoFlags []GinkgoFlag + +func (f GinkgoFlags) CopyAppend(flags ...GinkgoFlag) GinkgoFlags { + out := GinkgoFlags{} + out = append(out, f...) + out = append(out, flags...) + return out +} + +func (f GinkgoFlags) WithPrefix(prefix string) GinkgoFlags { + if prefix == "" { + return f + } + out := GinkgoFlags{} + for _, flag := range f { + if flag.Name != "" { + flag.Name = prefix + "." + flag.Name + } + if flag.DeprecatedName != "" { + flag.DeprecatedName = prefix + "." + flag.DeprecatedName + } + if flag.ExportAs != "" { + flag.ExportAs = prefix + "." + flag.ExportAs + } + out = append(out, flag) + } + return out +} + +func (f GinkgoFlags) SubsetWithNames(names ...string) GinkgoFlags { + out := GinkgoFlags{} + for _, flag := range f { + for _, name := range names { + if flag.Name == name { + out = append(out, flag) + break + } + } + } + return out +} + +type GinkgoFlagSection struct { + Key string + Style string + Succinct bool + Heading string + Description string +} + +type GinkgoFlagSections []GinkgoFlagSection + +func (gfs GinkgoFlagSections) Lookup(key string) (GinkgoFlagSection, bool) { + for _, section := range gfs { + if section.Key == key { + return section, true + } + } + + return GinkgoFlagSection{}, false +} + +type GinkgoFlagSet struct { + flags GinkgoFlags + bindings interface{} + + sections GinkgoFlagSections + extraGoFlagsSection GinkgoFlagSection + + flagSet *flag.FlagSet +} + +// Call NewGinkgoFlagSet to create GinkgoFlagSet that creates and binds to it's own *flag.FlagSet +func NewGinkgoFlagSet(flags GinkgoFlags, bindings interface{}, sections GinkgoFlagSections) (GinkgoFlagSet, error) { + return bindFlagSet(GinkgoFlagSet{ + flags: flags, + bindings: bindings, + sections: sections, + }, nil) +} + +// Call NewGinkgoFlagSet to create GinkgoFlagSet that extends an existing *flag.FlagSet +func NewAttachedGinkgoFlagSet(flagSet *flag.FlagSet, flags GinkgoFlags, bindings interface{}, sections GinkgoFlagSections, extraGoFlagsSection GinkgoFlagSection) (GinkgoFlagSet, error) { + return bindFlagSet(GinkgoFlagSet{ + flags: flags, + bindings: bindings, + sections: sections, + extraGoFlagsSection: extraGoFlagsSection, + }, flagSet) +} + +func bindFlagSet(f GinkgoFlagSet, flagSet *flag.FlagSet) (GinkgoFlagSet, error) { + if flagSet == nil { + f.flagSet = flag.NewFlagSet("", flag.ContinueOnError) + //suppress all output as Ginkgo is responsible for formatting usage + f.flagSet.SetOutput(io.Discard) + } else { + f.flagSet = flagSet + //we're piggybacking on an existing flagset (typically go test) so we have limited control + //on user feedback + f.flagSet.Usage = f.substituteUsage + } + + for _, flag := range f.flags { + name := flag.Name + + deprecatedUsage := "[DEPRECATED]" + deprecatedName := flag.DeprecatedName + if name != "" { + deprecatedUsage = fmt.Sprintf("[DEPRECATED] use --%s instead", name) + } else if flag.Usage != "" { + deprecatedUsage += " " + flag.Usage + } + + value, ok := valueAtKeyPath(f.bindings, flag.KeyPath) + if !ok { + return GinkgoFlagSet{}, fmt.Errorf("could not load KeyPath: %s", flag.KeyPath) + } + + iface, addr := value.Interface(), value.Addr().Interface() + + switch value.Type() { + case reflect.TypeOf(string("")): + if name != "" { + f.flagSet.StringVar(addr.(*string), name, iface.(string), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.StringVar(addr.(*string), deprecatedName, iface.(string), deprecatedUsage) + } + case reflect.TypeOf(int64(0)): + if name != "" { + f.flagSet.Int64Var(addr.(*int64), name, iface.(int64), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.Int64Var(addr.(*int64), deprecatedName, iface.(int64), deprecatedUsage) + } + case reflect.TypeOf(float64(0)): + if name != "" { + f.flagSet.Float64Var(addr.(*float64), name, iface.(float64), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.Float64Var(addr.(*float64), deprecatedName, iface.(float64), deprecatedUsage) + } + case reflect.TypeOf(int(0)): + if name != "" { + f.flagSet.IntVar(addr.(*int), name, iface.(int), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.IntVar(addr.(*int), deprecatedName, iface.(int), deprecatedUsage) + } + case reflect.TypeOf(bool(true)): + if name != "" { + f.flagSet.BoolVar(addr.(*bool), name, iface.(bool), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.BoolVar(addr.(*bool), deprecatedName, iface.(bool), deprecatedUsage) + } + case reflect.TypeOf(time.Duration(0)): + if name != "" { + f.flagSet.DurationVar(addr.(*time.Duration), name, iface.(time.Duration), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.DurationVar(addr.(*time.Duration), deprecatedName, iface.(time.Duration), deprecatedUsage) + } + + case reflect.TypeOf([]string{}): + if name != "" { + f.flagSet.Var(stringSliceVar{value}, name, flag.Usage) + } + if deprecatedName != "" { + f.flagSet.Var(stringSliceVar{value}, deprecatedName, deprecatedUsage) + } + default: + return GinkgoFlagSet{}, fmt.Errorf("unsupported type %T", iface) + } + } + + return f, nil +} + +func (f GinkgoFlagSet) IsZero() bool { + return f.flagSet == nil +} + +func (f GinkgoFlagSet) WasSet(name string) bool { + found := false + f.flagSet.Visit(func(f *flag.Flag) { + if f.Name == name { + found = true + } + }) + + return found +} + +func (f GinkgoFlagSet) Lookup(name string) *flag.Flag { + return f.flagSet.Lookup(name) +} + +func (f GinkgoFlagSet) Parse(args []string) ([]string, error) { + if f.IsZero() { + return args, nil + } + err := f.flagSet.Parse(args) + if err != nil { + return []string{}, err + } + return f.flagSet.Args(), nil +} + +func (f GinkgoFlagSet) ValidateDeprecations(deprecationTracker *DeprecationTracker) { + if f.IsZero() { + return + } + f.flagSet.Visit(func(flag *flag.Flag) { + for _, ginkgoFlag := range f.flags { + if ginkgoFlag.DeprecatedName != "" && strings.HasSuffix(flag.Name, ginkgoFlag.DeprecatedName) { + message := fmt.Sprintf("--%s is deprecated", ginkgoFlag.DeprecatedName) + if ginkgoFlag.Name != "" { + message = fmt.Sprintf("--%s is deprecated, use --%s instead", ginkgoFlag.DeprecatedName, ginkgoFlag.Name) + } else if ginkgoFlag.Usage != "" { + message += " " + ginkgoFlag.Usage + } + + deprecationTracker.TrackDeprecation(Deprecation{ + Message: message, + DocLink: ginkgoFlag.DeprecatedDocLink, + Version: ginkgoFlag.DeprecatedVersion, + }) + } + } + }) +} + +func (f GinkgoFlagSet) Usage() string { + if f.IsZero() { + return "" + } + groupedFlags := map[GinkgoFlagSection]GinkgoFlags{} + ungroupedFlags := GinkgoFlags{} + managedFlags := map[string]bool{} + extraGoFlags := []*flag.Flag{} + + for _, flag := range f.flags { + managedFlags[flag.Name] = true + managedFlags[flag.DeprecatedName] = true + + if flag.Name == "" { + continue + } + + section, ok := f.sections.Lookup(flag.SectionKey) + if ok { + groupedFlags[section] = append(groupedFlags[section], flag) + } else { + ungroupedFlags = append(ungroupedFlags, flag) + } + } + + f.flagSet.VisitAll(func(flag *flag.Flag) { + if !managedFlags[flag.Name] { + extraGoFlags = append(extraGoFlags, flag) + } + }) + + out := "" + for _, section := range f.sections { + flags := groupedFlags[section] + if len(flags) == 0 { + continue + } + out += f.usageForSection(section) + if section.Succinct { + succinctFlags := []string{} + for _, flag := range flags { + if flag.Name != "" { + succinctFlags = append(succinctFlags, fmt.Sprintf("--%s", flag.Name)) + } + } + out += formatter.Fiw(1, formatter.COLS, section.Style+strings.Join(succinctFlags, ", ")+"{{/}}\n") + } else { + for _, flag := range flags { + out += f.usageForFlag(flag, section.Style) + } + } + out += "\n" + } + if len(ungroupedFlags) > 0 { + for _, flag := range ungroupedFlags { + out += f.usageForFlag(flag, "") + } + out += "\n" + } + if len(extraGoFlags) > 0 { + out += f.usageForSection(f.extraGoFlagsSection) + for _, goFlag := range extraGoFlags { + out += f.usageForGoFlag(goFlag) + } + } + + return out +} + +func (f GinkgoFlagSet) substituteUsage() { + fmt.Fprintln(f.flagSet.Output(), f.Usage()) +} + +func valueAtKeyPath(root interface{}, keyPath string) (reflect.Value, bool) { + if len(keyPath) == 0 { + return reflect.Value{}, false + } + + val := reflect.ValueOf(root) + components := strings.Split(keyPath, ".") + for _, component := range components { + val = reflect.Indirect(val) + switch val.Kind() { + case reflect.Map: + val = val.MapIndex(reflect.ValueOf(component)) + if val.Kind() == reflect.Interface { + val = reflect.ValueOf(val.Interface()) + } + case reflect.Struct: + val = val.FieldByName(component) + default: + return reflect.Value{}, false + } + if (val == reflect.Value{}) { + return reflect.Value{}, false + } + } + + return val, true +} + +func (f GinkgoFlagSet) usageForSection(section GinkgoFlagSection) string { + out := formatter.F(section.Style + "{{bold}}{{underline}}" + section.Heading + "{{/}}\n") + if section.Description != "" { + out += formatter.Fiw(0, formatter.COLS, section.Description+"\n") + } + return out +} + +func (f GinkgoFlagSet) usageForFlag(flag GinkgoFlag, style string) string { + argument := flag.UsageArgument + defValue := flag.UsageDefaultValue + if argument == "" { + value, _ := valueAtKeyPath(f.bindings, flag.KeyPath) + switch value.Type() { + case reflect.TypeOf(string("")): + argument = "string" + case reflect.TypeOf(int64(0)), reflect.TypeOf(int(0)): + argument = "int" + case reflect.TypeOf(time.Duration(0)): + argument = "duration" + case reflect.TypeOf(float64(0)): + argument = "float" + case reflect.TypeOf([]string{}): + argument = "string" + } + } + if argument != "" { + argument = "[" + argument + "] " + } + if defValue != "" { + defValue = fmt.Sprintf("(default: %s)", defValue) + } + hyphens := "--" + if len(flag.Name) == 1 { + hyphens = "-" + } + + out := formatter.Fi(1, style+"%s%s{{/}} %s{{gray}}%s{{/}}\n", hyphens, flag.Name, argument, defValue) + out += formatter.Fiw(2, formatter.COLS, "{{light-gray}}%s{{/}}\n", flag.Usage) + return out +} + +func (f GinkgoFlagSet) usageForGoFlag(goFlag *flag.Flag) string { + //Taken directly from the flag package + out := fmt.Sprintf(" -%s", goFlag.Name) + name, usage := flag.UnquoteUsage(goFlag) + if len(name) > 0 { + out += " " + name + } + if len(out) <= 4 { + out += "\t" + } else { + out += "\n \t" + } + out += strings.ReplaceAll(usage, "\n", "\n \t") + out += "\n" + return out +} + +type stringSliceVar struct { + slice reflect.Value +} + +func (ssv stringSliceVar) String() string { return "" } +func (ssv stringSliceVar) Set(s string) error { + ssv.slice.Set(reflect.AppendSlice(ssv.slice, reflect.ValueOf([]string{s}))) + return nil +} + +//given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured. +func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) { + result := []string{} + for _, flag := range flags { + name := flag.ExportAs + if name == "" { + name = flag.Name + } + if name == "" { + continue + } + + value, ok := valueAtKeyPath(bindings, flag.KeyPath) + if !ok { + return []string{}, fmt.Errorf("could not load KeyPath: %s", flag.KeyPath) + } + + iface := value.Interface() + switch value.Type() { + case reflect.TypeOf(string("")): + if iface.(string) != "" { + result = append(result, fmt.Sprintf("--%s=%s", name, iface)) + } + case reflect.TypeOf(int64(0)): + if iface.(int64) != 0 { + result = append(result, fmt.Sprintf("--%s=%d", name, iface)) + } + case reflect.TypeOf(float64(0)): + if iface.(float64) != 0 { + result = append(result, fmt.Sprintf("--%s=%f", name, iface)) + } + case reflect.TypeOf(int(0)): + if iface.(int) != 0 { + result = append(result, fmt.Sprintf("--%s=%d", name, iface)) + } + case reflect.TypeOf(bool(true)): + if iface.(bool) { + result = append(result, fmt.Sprintf("--%s", name)) + } + case reflect.TypeOf(time.Duration(0)): + if iface.(time.Duration) != time.Duration(0) { + result = append(result, fmt.Sprintf("--%s=%s", name, iface)) + } + + case reflect.TypeOf([]string{}): + strings := iface.([]string) + for _, s := range strings { + result = append(result, fmt.Sprintf("--%s=%s", name, s)) + } + default: + return []string{}, fmt.Errorf("unsupported type %T", iface) + } + } + + return result, nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go new file mode 100644 index 0000000..0403f9e --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go @@ -0,0 +1,347 @@ +package types + +import ( + "fmt" + "regexp" + "strings" +) + +var DEBUG_LABEL_FILTER_PARSING = false + +type LabelFilter func([]string) bool + +func matchLabelAction(label string) LabelFilter { + expected := strings.ToLower(label) + return func(labels []string) bool { + for i := range labels { + if strings.ToLower(labels[i]) == expected { + return true + } + } + return false + } +} + +func matchLabelRegexAction(regex *regexp.Regexp) LabelFilter { + return func(labels []string) bool { + for i := range labels { + if regex.MatchString(labels[i]) { + return true + } + } + return false + } +} + +func notAction(filter LabelFilter) LabelFilter { + return func(labels []string) bool { return !filter(labels) } +} + +func andAction(a, b LabelFilter) LabelFilter { + return func(labels []string) bool { return a(labels) && b(labels) } +} + +func orAction(a, b LabelFilter) LabelFilter { + return func(labels []string) bool { return a(labels) || b(labels) } +} + +type lfToken uint + +const ( + lfTokenInvalid lfToken = iota + + lfTokenRoot + lfTokenOpenGroup + lfTokenCloseGroup + lfTokenNot + lfTokenAnd + lfTokenOr + lfTokenRegexp + lfTokenLabel + lfTokenEOF +) + +func (l lfToken) Precedence() int { + switch l { + case lfTokenRoot, lfTokenOpenGroup: + return 0 + case lfTokenOr: + return 1 + case lfTokenAnd: + return 2 + case lfTokenNot: + return 3 + } + return -1 +} + +func (l lfToken) String() string { + switch l { + case lfTokenRoot: + return "ROOT" + case lfTokenOpenGroup: + return "(" + case lfTokenCloseGroup: + return ")" + case lfTokenNot: + return "!" + case lfTokenAnd: + return "&&" + case lfTokenOr: + return "||" + case lfTokenRegexp: + return "/regexp/" + case lfTokenLabel: + return "label" + case lfTokenEOF: + return "EOF" + } + return "INVALID" +} + +type treeNode struct { + token lfToken + location int + value string + + parent *treeNode + leftNode *treeNode + rightNode *treeNode +} + +func (tn *treeNode) setRightNode(node *treeNode) { + tn.rightNode = node + node.parent = tn +} + +func (tn *treeNode) setLeftNode(node *treeNode) { + tn.leftNode = node + node.parent = tn +} + +func (tn *treeNode) firstAncestorWithPrecedenceLEQ(precedence int) *treeNode { + if tn.token.Precedence() <= precedence { + return tn + } + return tn.parent.firstAncestorWithPrecedenceLEQ(precedence) +} + +func (tn *treeNode) firstUnmatchedOpenNode() *treeNode { + if tn.token == lfTokenOpenGroup { + return tn + } + if tn.parent == nil { + return nil + } + return tn.parent.firstUnmatchedOpenNode() +} + +func (tn *treeNode) constructLabelFilter(input string) (LabelFilter, error) { + switch tn.token { + case lfTokenOpenGroup: + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, "Mismatched '(' - could not find matching ')'.") + case lfTokenLabel: + return matchLabelAction(tn.value), nil + case lfTokenRegexp: + re, err := regexp.Compile(tn.value) + if err != nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("RegExp compilation error: %s", err)) + } + return matchLabelRegexAction(re), nil + } + + if tn.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, -1, "Unexpected EOF.") + } + rightLF, err := tn.rightNode.constructLabelFilter(input) + if err != nil { + return nil, err + } + + switch tn.token { + case lfTokenRoot, lfTokenCloseGroup: + return rightLF, nil + case lfTokenNot: + return notAction(rightLF), nil + } + + if tn.leftNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Malformed tree - '%s' is missing left operand.", tn.token)) + } + leftLF, err := tn.leftNode.constructLabelFilter(input) + if err != nil { + return nil, err + } + + switch tn.token { + case lfTokenAnd: + return andAction(leftLF, rightLF), nil + case lfTokenOr: + return orAction(leftLF, rightLF), nil + } + + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Invalid token '%s'.", tn.token)) +} + +func (tn *treeNode) tokenString() string { + out := fmt.Sprintf("<%s", tn.token) + if tn.value != "" { + out += " | " + tn.value + } + out += ">" + return out +} + +func (tn *treeNode) toString(indent int) string { + out := tn.tokenString() + "\n" + if tn.leftNode != nil { + out += fmt.Sprintf("%s |_(L)_%s", strings.Repeat(" ", indent), tn.leftNode.toString(indent+1)) + } + if tn.rightNode != nil { + out += fmt.Sprintf("%s |_(R)_%s", strings.Repeat(" ", indent), tn.rightNode.toString(indent+1)) + } + return out +} + +func tokenize(input string) func() (*treeNode, error) { + runes, i := []rune(input), 0 + + peekIs := func(r rune) bool { + if i+1 < len(runes) { + return runes[i+1] == r + } + return false + } + + consumeUntil := func(cutset string) (string, int) { + j := i + for ; j < len(runes); j++ { + if strings.IndexRune(cutset, runes[j]) >= 0 { + break + } + } + return string(runes[i:j]), j - i + } + + return func() (*treeNode, error) { + for i < len(runes) && runes[i] == ' ' { + i += 1 + } + + if i >= len(runes) { + return &treeNode{token: lfTokenEOF}, nil + } + + node := &treeNode{location: i} + switch runes[i] { + case '&': + if !peekIs('&') { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Invalid token '&'. Did you mean '&&'?") + } + i += 2 + node.token = lfTokenAnd + case '|': + if !peekIs('|') { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Invalid token '|'. Did you mean '||'?") + } + i += 2 + node.token = lfTokenOr + case '!': + i += 1 + node.token = lfTokenNot + case ',': + i += 1 + node.token = lfTokenOr + case '(': + i += 1 + node.token = lfTokenOpenGroup + case ')': + i += 1 + node.token = lfTokenCloseGroup + case '/': + i += 1 + value, n := consumeUntil("/") + i += n + 1 + node.token, node.value = lfTokenRegexp, value + default: + value, n := consumeUntil("&|!,()/") + i += n + node.token, node.value = lfTokenLabel, strings.TrimSpace(value) + } + return node, nil + } +} + +func ParseLabelFilter(input string) (LabelFilter, error) { + if DEBUG_LABEL_FILTER_PARSING { + fmt.Println("\n==============") + fmt.Println("Input: ", input) + fmt.Print("Tokens: ") + } + nextToken := tokenize(input) + + root := &treeNode{token: lfTokenRoot} + current := root +LOOP: + for { + node, err := nextToken() + if err != nil { + return nil, err + } + + if DEBUG_LABEL_FILTER_PARSING { + fmt.Print(node.tokenString() + " ") + } + + switch node.token { + case lfTokenEOF: + break LOOP + case lfTokenLabel, lfTokenRegexp: + if current.rightNode != nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found two adjacent labels. You need an operator between them.") + } + current.setRightNode(node) + case lfTokenNot, lfTokenOpenGroup: + if current.rightNode != nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Invalid token '%s'.", node.token)) + } + current.setRightNode(node) + current = node + case lfTokenAnd, lfTokenOr: + if current.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Operator '%s' missing left hand operand.", node.token)) + } + nodeToStealFrom := current.firstAncestorWithPrecedenceLEQ(node.token.Precedence()) + node.setLeftNode(nodeToStealFrom.rightNode) + nodeToStealFrom.setRightNode(node) + current = node + case lfTokenCloseGroup: + firstUnmatchedOpenNode := current.firstUnmatchedOpenNode() + if firstUnmatchedOpenNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Mismatched ')' - could not find matching '('.") + } + if firstUnmatchedOpenNode == current && current.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found empty '()' group.") + } + firstUnmatchedOpenNode.token = lfTokenCloseGroup //signify the group is now closed + current = firstUnmatchedOpenNode.parent + default: + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Unknown token '%s'.", node.token)) + } + } + if DEBUG_LABEL_FILTER_PARSING { + fmt.Printf("\n Tree:\n%s", root.toString(0)) + } + return root.constructLabelFilter(input) +} + +func ValidateAndCleanupLabel(label string, cl CodeLocation) (string, error) { + out := strings.TrimSpace(label) + if out == "" { + return "", GinkgoErrors.InvalidEmptyLabel(cl) + } + if strings.ContainsAny(out, "&|!,()/") { + return "", GinkgoErrors.InvalidLabel(label, cl) + } + return out, nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go b/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go new file mode 100644 index 0000000..7b1524b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go @@ -0,0 +1,190 @@ +package types + +import ( + "encoding/json" + "fmt" + "time" +) + +// ReportEntryValue wraps a report entry's value ensuring it can be encoded and decoded safely into reports +// and across the network connection when running in parallel +type ReportEntryValue struct { + raw interface{} //unexported to prevent gob from freaking out about unregistered structs + AsJSON string + Representation string +} + +func WrapEntryValue(value interface{}) ReportEntryValue { + return ReportEntryValue{ + raw: value, + } +} + +func (rev ReportEntryValue) GetRawValue() interface{} { + return rev.raw +} + +func (rev ReportEntryValue) String() string { + if rev.raw == nil { + return "" + } + if colorableStringer, ok := rev.raw.(ColorableStringer); ok { + return colorableStringer.ColorableString() + } + + if stringer, ok := rev.raw.(fmt.Stringer); ok { + return stringer.String() + } + if rev.Representation != "" { + return rev.Representation + } + return fmt.Sprintf("%+v", rev.raw) +} + +func (rev ReportEntryValue) MarshalJSON() ([]byte, error) { + //All this to capture the representation at encoding-time, not creating time + //This way users can Report on pointers and get their final values at reporting-time + out := struct { + AsJSON string + Representation string + }{ + Representation: rev.String(), + } + asJSON, err := json.Marshal(rev.raw) + if err != nil { + return nil, err + } + out.AsJSON = string(asJSON) + + return json.Marshal(out) +} + +func (rev *ReportEntryValue) UnmarshalJSON(data []byte) error { + in := struct { + AsJSON string + Representation string + }{} + err := json.Unmarshal(data, &in) + if err != nil { + return err + } + rev.AsJSON = in.AsJSON + rev.Representation = in.Representation + return json.Unmarshal([]byte(in.AsJSON), &(rev.raw)) +} + +func (rev ReportEntryValue) GobEncode() ([]byte, error) { + return rev.MarshalJSON() +} + +func (rev *ReportEntryValue) GobDecode(data []byte) error { + return rev.UnmarshalJSON(data) +} + +// ReportEntry captures information attached to `SpecReport` via `AddReportEntry` +type ReportEntry struct { + // Visibility captures the visibility policy for this ReportEntry + Visibility ReportEntryVisibility + // Location captures the location of the AddReportEntry call + Location CodeLocation + + Time time.Time //need this for backwards compatibility + TimelineLocation TimelineLocation + + // Name captures the name of this report + Name string + // Value captures the (optional) object passed into AddReportEntry - this can be + // anything the user wants. The value passed to AddReportEntry is wrapped in a ReportEntryValue to make + // encoding/decoding the value easier. To access the raw value call entry.GetRawValue() + Value ReportEntryValue +} + +// ColorableStringer is an interface that ReportEntry values can satisfy. If they do then ColorableString() is used to generate their representation. +type ColorableStringer interface { + ColorableString() string +} + +// StringRepresentation() returns the string representation of the value associated with the ReportEntry -- +// if value is nil, empty string is returned +// if value is a `ColorableStringer` then `Value.ColorableString()` is returned +// if value is a `fmt.Stringer` then `Value.String()` is returned +// otherwise the value is formatted with "%+v" +func (entry ReportEntry) StringRepresentation() string { + return entry.Value.String() +} + +// GetRawValue returns the Value object that was passed to AddReportEntry +// If called in-process this will be the same object that was passed into AddReportEntry. +// If used from a rehydrated JSON file _or_ in a ReportAfterSuite when running in parallel this will be +// a JSON-decoded {}interface. If you want to reconstitute your original object you can decode the entry.Value.AsJSON +// field yourself. +func (entry ReportEntry) GetRawValue() interface{} { + return entry.Value.GetRawValue() +} + +func (entry ReportEntry) GetTimelineLocation() TimelineLocation { + return entry.TimelineLocation +} + +type ReportEntries []ReportEntry + +func (re ReportEntries) HasVisibility(visibilities ...ReportEntryVisibility) bool { + for _, entry := range re { + if entry.Visibility.Is(visibilities...) { + return true + } + } + return false +} + +func (re ReportEntries) WithVisibility(visibilities ...ReportEntryVisibility) ReportEntries { + out := ReportEntries{} + + for _, entry := range re { + if entry.Visibility.Is(visibilities...) { + out = append(out, entry) + } + } + + return out +} + +// ReportEntryVisibility governs the visibility of ReportEntries in Ginkgo's console reporter +type ReportEntryVisibility uint + +const ( + // Always print out this ReportEntry + ReportEntryVisibilityAlways ReportEntryVisibility = iota + // Only print out this ReportEntry if the spec fails or if the test is run with -v + ReportEntryVisibilityFailureOrVerbose + // Never print out this ReportEntry (note that ReportEntrys are always encoded in machine readable reports (e.g. JSON, JUnit, etc.)) + ReportEntryVisibilityNever +) + +var revEnumSupport = NewEnumSupport(map[uint]string{ + uint(ReportEntryVisibilityAlways): "always", + uint(ReportEntryVisibilityFailureOrVerbose): "failure-or-verbose", + uint(ReportEntryVisibilityNever): "never", +}) + +func (rev ReportEntryVisibility) String() string { + return revEnumSupport.String(uint(rev)) +} +func (rev *ReportEntryVisibility) UnmarshalJSON(b []byte) error { + out, err := revEnumSupport.UnmarshJSON(b) + *rev = ReportEntryVisibility(out) + return err +} +func (rev ReportEntryVisibility) MarshalJSON() ([]byte, error) { + return revEnumSupport.MarshJSON(uint(rev)) +} + +func (v ReportEntryVisibility) Is(visibilities ...ReportEntryVisibility) bool { + for _, visibility := range visibilities { + if v == visibility { + return true + } + } + + return false +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/types.go b/vendor/github.com/onsi/ginkgo/v2/types/types.go new file mode 100644 index 0000000..3e979ba --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/types.go @@ -0,0 +1,913 @@ +package types + +import ( + "encoding/json" + "fmt" + "sort" + "strings" + "time" +) + +const GINKGO_FOCUS_EXIT_CODE = 197 +const GINKGO_TIME_FORMAT = "01/02/06 15:04:05.999" + +// Report captures information about a Ginkgo test run +type Report struct { + //SuitePath captures the absolute path to the test suite + SuitePath string + + //SuiteDescription captures the description string passed to the DSL's RunSpecs() function + SuiteDescription string + + //SuiteLabels captures any labels attached to the suite by the DSL's RunSpecs() function + SuiteLabels []string + + //SuiteSucceeded captures the success or failure status of the test run + //If true, the test run is considered successful. + //If false, the test run is considered unsuccessful + SuiteSucceeded bool + + //SuiteHasProgrammaticFocus captures whether the test suite has a test or set of tests that are programmatically focused + //(i.e an `FIt` or an `FDescribe` + SuiteHasProgrammaticFocus bool + + //SpecialSuiteFailureReasons may contain special failure reasons + //For example, a test suite might be considered "failed" even if none of the individual specs + //have a failure state. For example, if the user has configured --fail-on-pending the test suite + //will have failed if there are pending tests even though all non-pending tests may have passed. In such + //cases, Ginkgo populates SpecialSuiteFailureReasons with a clear message indicating the reason for the failure. + //SpecialSuiteFailureReasons is also populated if the test suite is interrupted by the user. + //Since multiple special failure reasons can occur, this field is a slice. + SpecialSuiteFailureReasons []string + + //PreRunStats contains a set of stats captured before the test run begins. This is primarily used + //by Ginkgo's reporter to tell the user how many specs are in the current suite (PreRunStats.TotalSpecs) + //and how many it intends to run (PreRunStats.SpecsThatWillRun) after applying any relevant focus or skip filters. + PreRunStats PreRunStats + + //StartTime and EndTime capture the start and end time of the test run + StartTime time.Time + EndTime time.Time + + //RunTime captures the duration of the test run + RunTime time.Duration + + //SuiteConfig captures the Ginkgo configuration governing this test run + //SuiteConfig includes information necessary for reproducing an identical test run, + //such as the random seed and any filters applied during the test run + SuiteConfig SuiteConfig + + //SpecReports is a list of all SpecReports generated by this test run + //It is empty when the SuiteReport is provided to ReportBeforeSuite + SpecReports SpecReports +} + +// PreRunStats contains a set of stats captured before the test run begins. This is primarily used +// by Ginkgo's reporter to tell the user how many specs are in the current suite (PreRunStats.TotalSpecs) +// and how many it intends to run (PreRunStats.SpecsThatWillRun) after applying any relevant focus or skip filters. +type PreRunStats struct { + TotalSpecs int + SpecsThatWillRun int +} + +// Add is used by Ginkgo's parallel aggregation mechanisms to combine test run reports form individual parallel processes +// to form a complete final report. +func (report Report) Add(other Report) Report { + report.SuiteSucceeded = report.SuiteSucceeded && other.SuiteSucceeded + + if other.StartTime.Before(report.StartTime) { + report.StartTime = other.StartTime + } + + if other.EndTime.After(report.EndTime) { + report.EndTime = other.EndTime + } + + specialSuiteFailureReasons := []string{} + reasonsLookup := map[string]bool{} + for _, reasons := range [][]string{report.SpecialSuiteFailureReasons, other.SpecialSuiteFailureReasons} { + for _, reason := range reasons { + if !reasonsLookup[reason] { + reasonsLookup[reason] = true + specialSuiteFailureReasons = append(specialSuiteFailureReasons, reason) + } + } + } + report.SpecialSuiteFailureReasons = specialSuiteFailureReasons + report.RunTime = report.EndTime.Sub(report.StartTime) + + reports := make(SpecReports, len(report.SpecReports)+len(other.SpecReports)) + for i := range report.SpecReports { + reports[i] = report.SpecReports[i] + } + offset := len(report.SpecReports) + for i := range other.SpecReports { + reports[i+offset] = other.SpecReports[i] + } + + report.SpecReports = reports + return report +} + +// SpecReport captures information about a Ginkgo spec. +type SpecReport struct { + // ContainerHierarchyTexts is a slice containing the text strings of + // all Describe/Context/When containers in this spec's hierarchy. + ContainerHierarchyTexts []string + + // ContainerHierarchyLocations is a slice containing the CodeLocations of + // all Describe/Context/When containers in this spec's hierarchy. + ContainerHierarchyLocations []CodeLocation + + // ContainerHierarchyLabels is a slice containing the labels of + // all Describe/Context/When containers in this spec's hierarchy + ContainerHierarchyLabels [][]string + + // LeafNodeType, LeadNodeLocation, LeafNodeLabels and LeafNodeText capture the NodeType, CodeLocation, and text + // of the Ginkgo node being tested (typically an NodeTypeIt node, though this can also be + // one of the NodeTypesForSuiteLevelNodes node types) + LeafNodeType NodeType + LeafNodeLocation CodeLocation + LeafNodeLabels []string + LeafNodeText string + + // State captures whether the spec has passed, failed, etc. + State SpecState + + // IsSerial captures whether the spec has the Serial decorator + IsSerial bool + + // IsInOrderedContainer captures whether the spec appears in an Ordered container + IsInOrderedContainer bool + + // StartTime and EndTime capture the start and end time of the spec + StartTime time.Time + EndTime time.Time + + // RunTime captures the duration of the spec + RunTime time.Duration + + // ParallelProcess captures the parallel process that this spec ran on + ParallelProcess int + + // RunningInParallel captures whether this spec is part of a suite that ran in parallel + RunningInParallel bool + + //Failure is populated if a spec has failed, panicked, been interrupted, or skipped by the user (e.g. calling Skip()) + //It includes detailed information about the Failure + Failure Failure + + // NumAttempts captures the number of times this Spec was run. + // Flakey specs can be retried with ginkgo --flake-attempts=N or the use of the FlakeAttempts decorator. + // Repeated specs can be retried with the use of the MustPassRepeatedly decorator + NumAttempts int + + // MaxFlakeAttempts captures whether the spec has been retried with ginkgo --flake-attempts=N or the use of the FlakeAttempts decorator. + MaxFlakeAttempts int + + // MaxMustPassRepeatedly captures whether the spec has the MustPassRepeatedly decorator + MaxMustPassRepeatedly int + + // CapturedGinkgoWriterOutput contains text printed to the GinkgoWriter + CapturedGinkgoWriterOutput string + + // CapturedStdOutErr contains text printed to stdout/stderr (when running in parallel) + // This is always empty when running in series or calling CurrentSpecReport() + // It is used internally by Ginkgo's reporter + CapturedStdOutErr string + + // ReportEntries contains any reports added via `AddReportEntry` + ReportEntries ReportEntries + + // ProgressReports contains any progress reports generated during this spec. These can either be manually triggered, or automatically generated by Ginkgo via the PollProgressAfter() decorator + ProgressReports []ProgressReport + + // AdditionalFailures contains any failures that occurred after the initial spec failure. These typically occur in cleanup nodes after the initial failure and are only emitted when running in verbose mode. + AdditionalFailures []AdditionalFailure + + // SpecEvents capture additional events that occur during the spec run + SpecEvents SpecEvents +} + +func (report SpecReport) MarshalJSON() ([]byte, error) { + //All this to avoid emitting an empty Failure struct in the JSON + out := struct { + ContainerHierarchyTexts []string + ContainerHierarchyLocations []CodeLocation + ContainerHierarchyLabels [][]string + LeafNodeType NodeType + LeafNodeLocation CodeLocation + LeafNodeLabels []string + LeafNodeText string + State SpecState + StartTime time.Time + EndTime time.Time + RunTime time.Duration + ParallelProcess int + Failure *Failure `json:",omitempty"` + NumAttempts int + MaxFlakeAttempts int + MaxMustPassRepeatedly int + CapturedGinkgoWriterOutput string `json:",omitempty"` + CapturedStdOutErr string `json:",omitempty"` + ReportEntries ReportEntries `json:",omitempty"` + ProgressReports []ProgressReport `json:",omitempty"` + AdditionalFailures []AdditionalFailure `json:",omitempty"` + SpecEvents SpecEvents `json:",omitempty"` + }{ + ContainerHierarchyTexts: report.ContainerHierarchyTexts, + ContainerHierarchyLocations: report.ContainerHierarchyLocations, + ContainerHierarchyLabels: report.ContainerHierarchyLabels, + LeafNodeType: report.LeafNodeType, + LeafNodeLocation: report.LeafNodeLocation, + LeafNodeLabels: report.LeafNodeLabels, + LeafNodeText: report.LeafNodeText, + State: report.State, + StartTime: report.StartTime, + EndTime: report.EndTime, + RunTime: report.RunTime, + ParallelProcess: report.ParallelProcess, + Failure: nil, + ReportEntries: nil, + NumAttempts: report.NumAttempts, + MaxFlakeAttempts: report.MaxFlakeAttempts, + MaxMustPassRepeatedly: report.MaxMustPassRepeatedly, + CapturedGinkgoWriterOutput: report.CapturedGinkgoWriterOutput, + CapturedStdOutErr: report.CapturedStdOutErr, + } + + if !report.Failure.IsZero() { + out.Failure = &(report.Failure) + } + if len(report.ReportEntries) > 0 { + out.ReportEntries = report.ReportEntries + } + if len(report.ProgressReports) > 0 { + out.ProgressReports = report.ProgressReports + } + if len(report.AdditionalFailures) > 0 { + out.AdditionalFailures = report.AdditionalFailures + } + if len(report.SpecEvents) > 0 { + out.SpecEvents = report.SpecEvents + } + + return json.Marshal(out) +} + +// CombinedOutput returns a single string representation of both CapturedStdOutErr and CapturedGinkgoWriterOutput +// Note that both are empty when using CurrentSpecReport() so CurrentSpecReport().CombinedOutput() will always be empty. +// CombinedOutput() is used internally by Ginkgo's reporter. +func (report SpecReport) CombinedOutput() string { + if report.CapturedStdOutErr == "" { + return report.CapturedGinkgoWriterOutput + } + if report.CapturedGinkgoWriterOutput == "" { + return report.CapturedStdOutErr + } + return report.CapturedStdOutErr + "\n" + report.CapturedGinkgoWriterOutput +} + +// Failed returns true if report.State is one of the SpecStateFailureStates +// (SpecStateFailed, SpecStatePanicked, SpecStateinterrupted, SpecStateAborted) +func (report SpecReport) Failed() bool { + return report.State.Is(SpecStateFailureStates) +} + +// FullText returns a concatenation of all the report.ContainerHierarchyTexts and report.LeafNodeText +func (report SpecReport) FullText() string { + texts := []string{} + texts = append(texts, report.ContainerHierarchyTexts...) + if report.LeafNodeText != "" { + texts = append(texts, report.LeafNodeText) + } + return strings.Join(texts, " ") +} + +// Labels returns a deduped set of all the spec's Labels. +func (report SpecReport) Labels() []string { + out := []string{} + seen := map[string]bool{} + for _, labels := range report.ContainerHierarchyLabels { + for _, label := range labels { + if !seen[label] { + seen[label] = true + out = append(out, label) + } + } + } + for _, label := range report.LeafNodeLabels { + if !seen[label] { + seen[label] = true + out = append(out, label) + } + } + + return out +} + +// MatchesLabelFilter returns true if the spec satisfies the passed in label filter query +func (report SpecReport) MatchesLabelFilter(query string) (bool, error) { + filter, err := ParseLabelFilter(query) + if err != nil { + return false, err + } + return filter(report.Labels()), nil +} + +// FileName() returns the name of the file containing the spec +func (report SpecReport) FileName() string { + return report.LeafNodeLocation.FileName +} + +// LineNumber() returns the line number of the leaf node +func (report SpecReport) LineNumber() int { + return report.LeafNodeLocation.LineNumber +} + +// FailureMessage() returns the failure message (or empty string if the test hasn't failed) +func (report SpecReport) FailureMessage() string { + return report.Failure.Message +} + +// FailureLocation() returns the location of the failure (or an empty CodeLocation if the test hasn't failed) +func (report SpecReport) FailureLocation() CodeLocation { + return report.Failure.Location +} + +// Timeline() returns a timeline view of the report +func (report SpecReport) Timeline() Timeline { + timeline := Timeline{} + if !report.Failure.IsZero() { + timeline = append(timeline, report.Failure) + if report.Failure.AdditionalFailure != nil { + timeline = append(timeline, *(report.Failure.AdditionalFailure)) + } + } + for _, additionalFailure := range report.AdditionalFailures { + timeline = append(timeline, additionalFailure) + } + for _, reportEntry := range report.ReportEntries { + timeline = append(timeline, reportEntry) + } + for _, progressReport := range report.ProgressReports { + timeline = append(timeline, progressReport) + } + for _, specEvent := range report.SpecEvents { + timeline = append(timeline, specEvent) + } + sort.Sort(timeline) + return timeline +} + +type SpecReports []SpecReport + +// WithLeafNodeType returns the subset of SpecReports with LeafNodeType matching one of the requested NodeTypes +func (reports SpecReports) WithLeafNodeType(nodeTypes NodeType) SpecReports { + count := 0 + for i := range reports { + if reports[i].LeafNodeType.Is(nodeTypes) { + count++ + } + } + + out := make(SpecReports, count) + j := 0 + for i := range reports { + if reports[i].LeafNodeType.Is(nodeTypes) { + out[j] = reports[i] + j++ + } + } + return out +} + +// WithState returns the subset of SpecReports with State matching one of the requested SpecStates +func (reports SpecReports) WithState(states SpecState) SpecReports { + count := 0 + for i := range reports { + if reports[i].State.Is(states) { + count++ + } + } + + out, j := make(SpecReports, count), 0 + for i := range reports { + if reports[i].State.Is(states) { + out[j] = reports[i] + j++ + } + } + return out +} + +// CountWithState returns the number of SpecReports with State matching one of the requested SpecStates +func (reports SpecReports) CountWithState(states SpecState) int { + n := 0 + for i := range reports { + if reports[i].State.Is(states) { + n += 1 + } + } + return n +} + +// If the Spec passes, CountOfFlakedSpecs returns the number of SpecReports that failed after multiple attempts. +func (reports SpecReports) CountOfFlakedSpecs() int { + n := 0 + for i := range reports { + if reports[i].MaxFlakeAttempts > 1 && reports[i].State.Is(SpecStatePassed) && reports[i].NumAttempts > 1 { + n += 1 + } + } + return n +} + +// If the Spec fails, CountOfRepeatedSpecs returns the number of SpecReports that passed after multiple attempts +func (reports SpecReports) CountOfRepeatedSpecs() int { + n := 0 + for i := range reports { + if reports[i].MaxMustPassRepeatedly > 1 && reports[i].State.Is(SpecStateFailureStates) && reports[i].NumAttempts > 1 { + n += 1 + } + } + return n +} + +// TimelineLocation captures the location of an event in the spec's timeline +type TimelineLocation struct { + //Offset is the offset (in bytes) of the event relative to the GinkgoWriter stream + Offset int `json:",omitempty"` + + //Order is the order of the event with respect to other events. The absolute value of Order + //is irrelevant. All that matters is that an event with a lower Order occurs before ane vent with a higher Order + Order int `json:",omitempty"` + + Time time.Time +} + +// TimelineEvent represent an event on the timeline +// consumers of Timeline will need to check the concrete type of each entry to determine how to handle it +type TimelineEvent interface { + GetTimelineLocation() TimelineLocation +} + +type Timeline []TimelineEvent + +func (t Timeline) Len() int { return len(t) } +func (t Timeline) Less(i, j int) bool { + return t[i].GetTimelineLocation().Order < t[j].GetTimelineLocation().Order +} +func (t Timeline) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t Timeline) WithoutHiddenReportEntries() Timeline { + out := Timeline{} + for _, event := range t { + if reportEntry, isReportEntry := event.(ReportEntry); isReportEntry && reportEntry.Visibility == ReportEntryVisibilityNever { + continue + } + out = append(out, event) + } + return out +} + +func (t Timeline) WithoutVeryVerboseSpecEvents() Timeline { + out := Timeline{} + for _, event := range t { + if specEvent, isSpecEvent := event.(SpecEvent); isSpecEvent && specEvent.IsOnlyVisibleAtVeryVerbose() { + continue + } + out = append(out, event) + } + return out +} + +// Failure captures failure information for an individual test +type Failure struct { + // Message - the failure message passed into Fail(...). When using a matcher library + // like Gomega, this will contain the failure message generated by Gomega. + // + // Message is also populated if the user has called Skip(...). + Message string + + // Location - the CodeLocation where the failure occurred + // This CodeLocation will include a fully-populated StackTrace + Location CodeLocation + + TimelineLocation TimelineLocation + + // ForwardedPanic - if the failure represents a captured panic (i.e. Summary.State == SpecStatePanicked) + // then ForwardedPanic will be populated with a string representation of the captured panic. + ForwardedPanic string `json:",omitempty"` + + // FailureNodeContext - one of three contexts describing the node in which the failure occurred: + // FailureNodeIsLeafNode means the failure occurred in the leaf node of the associated SpecReport. None of the other FailureNode fields will be populated + // FailureNodeAtTopLevel means the failure occurred in a non-leaf node that is defined at the top-level of the spec (i.e. not in a container). FailureNodeType and FailureNodeLocation will be populated. + // FailureNodeInContainer means the failure occurred in a non-leaf node that is defined within a container. FailureNodeType, FailureNodeLocation, and FailureNodeContainerIndex will be populated. + // + // FailureNodeType will contain the NodeType of the node in which the failure occurred. + // FailureNodeLocation will contain the CodeLocation of the node in which the failure occurred. + // If populated, FailureNodeContainerIndex will be the index into SpecReport.ContainerHierarchyTexts and SpecReport.ContainerHierarchyLocations that represents the parent container of the node in which the failure occurred. + FailureNodeContext FailureNodeContext `json:",omitempty"` + + FailureNodeType NodeType `json:",omitempty"` + + FailureNodeLocation CodeLocation `json:",omitempty"` + + FailureNodeContainerIndex int `json:",omitempty"` + + //ProgressReport is populated if the spec was interrupted or timed out + ProgressReport ProgressReport `json:",omitempty"` + + //AdditionalFailure is non-nil if a follow-on failure occurred within the same node after the primary failure. This only happens when a node has timed out or been interrupted. In such cases the AdditionalFailure can include information about where/why the spec was stuck. + AdditionalFailure *AdditionalFailure `json:",omitempty"` +} + +func (f Failure) IsZero() bool { + return f.Message == "" && (f.Location == CodeLocation{}) +} + +func (f Failure) GetTimelineLocation() TimelineLocation { + return f.TimelineLocation +} + +// FailureNodeContext captures the location context for the node containing the failing line of code +type FailureNodeContext uint + +const ( + FailureNodeContextInvalid FailureNodeContext = iota + + FailureNodeIsLeafNode + FailureNodeAtTopLevel + FailureNodeInContainer +) + +var fncEnumSupport = NewEnumSupport(map[uint]string{ + uint(FailureNodeContextInvalid): "INVALID FAILURE NODE CONTEXT", + uint(FailureNodeIsLeafNode): "leaf-node", + uint(FailureNodeAtTopLevel): "top-level", + uint(FailureNodeInContainer): "in-container", +}) + +func (fnc FailureNodeContext) String() string { + return fncEnumSupport.String(uint(fnc)) +} +func (fnc *FailureNodeContext) UnmarshalJSON(b []byte) error { + out, err := fncEnumSupport.UnmarshJSON(b) + *fnc = FailureNodeContext(out) + return err +} +func (fnc FailureNodeContext) MarshalJSON() ([]byte, error) { + return fncEnumSupport.MarshJSON(uint(fnc)) +} + +// AdditionalFailure capturs any additional failures that occur after the initial failure of a psec +// these typically occur in clean up nodes after the spec has failed. +// We can't simply use Failure as we want to track the SpecState to know what kind of failure this is +type AdditionalFailure struct { + State SpecState + Failure Failure +} + +func (f AdditionalFailure) GetTimelineLocation() TimelineLocation { + return f.Failure.TimelineLocation +} + +// SpecState captures the state of a spec +// To determine if a given `state` represents a failure state, use `state.Is(SpecStateFailureStates)` +type SpecState uint + +const ( + SpecStateInvalid SpecState = 0 + + SpecStatePending SpecState = 1 << iota + SpecStateSkipped + SpecStatePassed + SpecStateFailed + SpecStateAborted + SpecStatePanicked + SpecStateInterrupted + SpecStateTimedout +) + +var ssEnumSupport = NewEnumSupport(map[uint]string{ + uint(SpecStateInvalid): "INVALID SPEC STATE", + uint(SpecStatePending): "pending", + uint(SpecStateSkipped): "skipped", + uint(SpecStatePassed): "passed", + uint(SpecStateFailed): "failed", + uint(SpecStateAborted): "aborted", + uint(SpecStatePanicked): "panicked", + uint(SpecStateInterrupted): "interrupted", + uint(SpecStateTimedout): "timedout", +}) + +func (ss SpecState) String() string { + return ssEnumSupport.String(uint(ss)) +} +func (ss *SpecState) UnmarshalJSON(b []byte) error { + out, err := ssEnumSupport.UnmarshJSON(b) + *ss = SpecState(out) + return err +} +func (ss SpecState) MarshalJSON() ([]byte, error) { + return ssEnumSupport.MarshJSON(uint(ss)) +} + +var SpecStateFailureStates = SpecStateFailed | SpecStateTimedout | SpecStateAborted | SpecStatePanicked | SpecStateInterrupted + +func (ss SpecState) Is(states SpecState) bool { + return ss&states != 0 +} + +// ProgressReport captures the progress of the current spec. It is, effectively, a structured Ginkgo-aware stack trace +type ProgressReport struct { + Message string `json:",omitempty"` + ParallelProcess int `json:",omitempty"` + RunningInParallel bool `json:",omitempty"` + + ContainerHierarchyTexts []string `json:",omitempty"` + LeafNodeText string `json:",omitempty"` + LeafNodeLocation CodeLocation `json:",omitempty"` + SpecStartTime time.Time `json:",omitempty"` + + CurrentNodeType NodeType `json:",omitempty"` + CurrentNodeText string `json:",omitempty"` + CurrentNodeLocation CodeLocation `json:",omitempty"` + CurrentNodeStartTime time.Time `json:",omitempty"` + + CurrentStepText string `json:",omitempty"` + CurrentStepLocation CodeLocation `json:",omitempty"` + CurrentStepStartTime time.Time `json:",omitempty"` + + AdditionalReports []string `json:",omitempty"` + + CapturedGinkgoWriterOutput string `json:",omitempty"` + TimelineLocation TimelineLocation `json:",omitempty"` + + Goroutines []Goroutine `json:",omitempty"` +} + +func (pr ProgressReport) IsZero() bool { + return pr.CurrentNodeType == NodeTypeInvalid +} + +func (pr ProgressReport) Time() time.Time { + return pr.TimelineLocation.Time +} + +func (pr ProgressReport) SpecGoroutine() Goroutine { + for _, goroutine := range pr.Goroutines { + if goroutine.IsSpecGoroutine { + return goroutine + } + } + return Goroutine{} +} + +func (pr ProgressReport) HighlightedGoroutines() []Goroutine { + out := []Goroutine{} + for _, goroutine := range pr.Goroutines { + if goroutine.IsSpecGoroutine || !goroutine.HasHighlights() { + continue + } + out = append(out, goroutine) + } + return out +} + +func (pr ProgressReport) OtherGoroutines() []Goroutine { + out := []Goroutine{} + for _, goroutine := range pr.Goroutines { + if goroutine.IsSpecGoroutine || goroutine.HasHighlights() { + continue + } + out = append(out, goroutine) + } + return out +} + +func (pr ProgressReport) WithoutCapturedGinkgoWriterOutput() ProgressReport { + out := pr + out.CapturedGinkgoWriterOutput = "" + return out +} + +func (pr ProgressReport) WithoutOtherGoroutines() ProgressReport { + out := pr + filteredGoroutines := []Goroutine{} + for _, goroutine := range pr.Goroutines { + if goroutine.IsSpecGoroutine || goroutine.HasHighlights() { + filteredGoroutines = append(filteredGoroutines, goroutine) + } + } + out.Goroutines = filteredGoroutines + return out +} + +func (pr ProgressReport) GetTimelineLocation() TimelineLocation { + return pr.TimelineLocation +} + +type Goroutine struct { + ID uint64 + State string + Stack []FunctionCall + IsSpecGoroutine bool +} + +func (g Goroutine) IsZero() bool { + return g.ID == 0 +} + +func (g Goroutine) HasHighlights() bool { + for _, fc := range g.Stack { + if fc.Highlight { + return true + } + } + + return false +} + +type FunctionCall struct { + Function string + Filename string + Line int + Highlight bool `json:",omitempty"` + Source []string `json:",omitempty"` + SourceHighlight int `json:",omitempty"` +} + +// NodeType captures the type of a given Ginkgo Node +type NodeType uint + +const ( + NodeTypeInvalid NodeType = 0 + + NodeTypeContainer NodeType = 1 << iota + NodeTypeIt + + NodeTypeBeforeEach + NodeTypeJustBeforeEach + NodeTypeAfterEach + NodeTypeJustAfterEach + + NodeTypeBeforeAll + NodeTypeAfterAll + + NodeTypeBeforeSuite + NodeTypeSynchronizedBeforeSuite + NodeTypeAfterSuite + NodeTypeSynchronizedAfterSuite + + NodeTypeReportBeforeEach + NodeTypeReportAfterEach + NodeTypeReportBeforeSuite + NodeTypeReportAfterSuite + + NodeTypeCleanupInvalid + NodeTypeCleanupAfterEach + NodeTypeCleanupAfterAll + NodeTypeCleanupAfterSuite +) + +var NodeTypesForContainerAndIt = NodeTypeContainer | NodeTypeIt +var NodeTypesForSuiteLevelNodes = NodeTypeBeforeSuite | NodeTypeSynchronizedBeforeSuite | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite | NodeTypeCleanupAfterSuite +var NodeTypesAllowedDuringCleanupInterrupt = NodeTypeAfterEach | NodeTypeJustAfterEach | NodeTypeAfterAll | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeCleanupAfterEach | NodeTypeCleanupAfterAll | NodeTypeCleanupAfterSuite +var NodeTypesAllowedDuringReportInterrupt = NodeTypeReportBeforeEach | NodeTypeReportAfterEach | NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite + +var ntEnumSupport = NewEnumSupport(map[uint]string{ + uint(NodeTypeInvalid): "INVALID NODE TYPE", + uint(NodeTypeContainer): "Container", + uint(NodeTypeIt): "It", + uint(NodeTypeBeforeEach): "BeforeEach", + uint(NodeTypeJustBeforeEach): "JustBeforeEach", + uint(NodeTypeAfterEach): "AfterEach", + uint(NodeTypeJustAfterEach): "JustAfterEach", + uint(NodeTypeBeforeAll): "BeforeAll", + uint(NodeTypeAfterAll): "AfterAll", + uint(NodeTypeBeforeSuite): "BeforeSuite", + uint(NodeTypeSynchronizedBeforeSuite): "SynchronizedBeforeSuite", + uint(NodeTypeAfterSuite): "AfterSuite", + uint(NodeTypeSynchronizedAfterSuite): "SynchronizedAfterSuite", + uint(NodeTypeReportBeforeEach): "ReportBeforeEach", + uint(NodeTypeReportAfterEach): "ReportAfterEach", + uint(NodeTypeReportBeforeSuite): "ReportBeforeSuite", + uint(NodeTypeReportAfterSuite): "ReportAfterSuite", + uint(NodeTypeCleanupInvalid): "DeferCleanup", + uint(NodeTypeCleanupAfterEach): "DeferCleanup (Each)", + uint(NodeTypeCleanupAfterAll): "DeferCleanup (All)", + uint(NodeTypeCleanupAfterSuite): "DeferCleanup (Suite)", +}) + +func (nt NodeType) String() string { + return ntEnumSupport.String(uint(nt)) +} +func (nt *NodeType) UnmarshalJSON(b []byte) error { + out, err := ntEnumSupport.UnmarshJSON(b) + *nt = NodeType(out) + return err +} +func (nt NodeType) MarshalJSON() ([]byte, error) { + return ntEnumSupport.MarshJSON(uint(nt)) +} + +func (nt NodeType) Is(nodeTypes NodeType) bool { + return nt&nodeTypes != 0 +} + +/* +SpecEvent captures a vareity of events that can occur when specs run. See SpecEventType for the list of available events. +*/ +type SpecEvent struct { + SpecEventType SpecEventType + + CodeLocation CodeLocation + TimelineLocation TimelineLocation + + Message string `json:",omitempty"` + Duration time.Duration `json:",omitempty"` + NodeType NodeType `json:",omitempty"` + Attempt int `json:",omitempty"` +} + +func (se SpecEvent) GetTimelineLocation() TimelineLocation { + return se.TimelineLocation +} + +func (se SpecEvent) IsOnlyVisibleAtVeryVerbose() bool { + return se.SpecEventType.Is(SpecEventByEnd | SpecEventNodeStart | SpecEventNodeEnd) +} + +func (se SpecEvent) GomegaString() string { + out := &strings.Builder{} + out.WriteString("[" + se.SpecEventType.String() + " SpecEvent] ") + if se.Message != "" { + out.WriteString("Message=") + out.WriteString(`"` + se.Message + `",`) + } + if se.Duration != 0 { + out.WriteString("Duration=" + se.Duration.String() + ",") + } + if se.NodeType != NodeTypeInvalid { + out.WriteString("NodeType=" + se.NodeType.String() + ",") + } + if se.Attempt != 0 { + out.WriteString(fmt.Sprintf("Attempt=%d", se.Attempt) + ",") + } + out.WriteString("CL=" + se.CodeLocation.String() + ",") + out.WriteString(fmt.Sprintf("TL.Offset=%d", se.TimelineLocation.Offset)) + + return out.String() +} + +type SpecEvents []SpecEvent + +func (se SpecEvents) WithType(seType SpecEventType) SpecEvents { + out := SpecEvents{} + for _, event := range se { + if event.SpecEventType.Is(seType) { + out = append(out, event) + } + } + return out +} + +type SpecEventType uint + +const ( + SpecEventInvalid SpecEventType = 0 + + SpecEventByStart SpecEventType = 1 << iota + SpecEventByEnd + SpecEventNodeStart + SpecEventNodeEnd + SpecEventSpecRepeat + SpecEventSpecRetry +) + +var seEnumSupport = NewEnumSupport(map[uint]string{ + uint(SpecEventInvalid): "INVALID SPEC EVENT", + uint(SpecEventByStart): "By", + uint(SpecEventByEnd): "By (End)", + uint(SpecEventNodeStart): "Node", + uint(SpecEventNodeEnd): "Node (End)", + uint(SpecEventSpecRepeat): "Repeat", + uint(SpecEventSpecRetry): "Retry", +}) + +func (se SpecEventType) String() string { + return seEnumSupport.String(uint(se)) +} +func (se *SpecEventType) UnmarshalJSON(b []byte) error { + out, err := seEnumSupport.UnmarshJSON(b) + *se = SpecEventType(out) + return err +} +func (se SpecEventType) MarshalJSON() ([]byte, error) { + return seEnumSupport.MarshJSON(uint(se)) +} + +func (se SpecEventType) Is(specEventTypes SpecEventType) bool { + return se&specEventTypes != 0 +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go new file mode 100644 index 0000000..b874ca0 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -0,0 +1,3 @@ +package types + +const VERSION = "2.6.0" diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/.import-restrictions b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/.import-restrictions new file mode 100644 index 0000000..7408dd1 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/.import-restrictions @@ -0,0 +1,5 @@ +inverseRules: + # Allow use of this package in all k8s.io packages. + - selectorRegexp: k8s[.]io + allowedPrefixes: + - '' diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/conversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/conversion.go new file mode 100644 index 0000000..eed3fde --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/conversion.go @@ -0,0 +1,70 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "bytes" + + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/util/json" + + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" +) + +func Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(in *apiextensions.JSONSchemaProps, out *JSONSchemaProps, s conversion.Scope) error { + if err := autoConvert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(in, out, s); err != nil { + return err + } + if in.Default != nil && *(in.Default) == nil { + out.Default = nil + } + if in.Example != nil && *(in.Example) == nil { + out.Example = nil + } + return nil +} + +var nullLiteral = []byte(`null`) + +func Convert_apiextensions_JSON_To_v1beta1_JSON(in *apiextensions.JSON, out *JSON, s conversion.Scope) error { + raw, err := json.Marshal(*in) + if err != nil { + return err + } + if len(raw) == 0 || bytes.Equal(raw, nullLiteral) { + // match JSON#UnmarshalJSON treatment of literal nulls + out.Raw = nil + } else { + out.Raw = raw + } + return nil +} + +func Convert_v1beta1_JSON_To_apiextensions_JSON(in *JSON, out *apiextensions.JSON, s conversion.Scope) error { + if in != nil { + var i interface{} + if len(in.Raw) > 0 && !bytes.Equal(in.Raw, nullLiteral) { + if err := json.Unmarshal(in.Raw, &i); err != nil { + return err + } + } + *out = i + } else { + out = nil + } + return nil +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/deepcopy.go new file mode 100644 index 0000000..9f64585 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/deepcopy.go @@ -0,0 +1,276 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// TODO: Update this after a tag is created for interface fields in DeepCopy +func (in *JSONSchemaProps) DeepCopy() *JSONSchemaProps { + if in == nil { + return nil + } + out := new(JSONSchemaProps) + *out = *in + + if in.Ref != nil { + in, out := &in.Ref, &out.Ref + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + + if in.Maximum != nil { + in, out := &in.Maximum, &out.Maximum + if *in == nil { + *out = nil + } else { + *out = new(float64) + **out = **in + } + } + + if in.Minimum != nil { + in, out := &in.Minimum, &out.Minimum + if *in == nil { + *out = nil + } else { + *out = new(float64) + **out = **in + } + } + + if in.MaxLength != nil { + in, out := &in.MaxLength, &out.MaxLength + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + + if in.MinLength != nil { + in, out := &in.MinLength, &out.MinLength + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.MaxItems != nil { + in, out := &in.MaxItems, &out.MaxItems + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + + if in.MinItems != nil { + in, out := &in.MinItems, &out.MinItems + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + + if in.MultipleOf != nil { + in, out := &in.MultipleOf, &out.MultipleOf + if *in == nil { + *out = nil + } else { + *out = new(float64) + **out = **in + } + } + + if in.MaxProperties != nil { + in, out := &in.MaxProperties, &out.MaxProperties + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + + if in.MinProperties != nil { + in, out := &in.MinProperties, &out.MinProperties + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = make([]string, len(*in)) + copy(*out, *in) + } + + if in.Items != nil { + in, out := &in.Items, &out.Items + if *in == nil { + *out = nil + } else { + *out = new(JSONSchemaPropsOrArray) + (*in).DeepCopyInto(*out) + } + } + + if in.AllOf != nil { + in, out := &in.AllOf, &out.AllOf + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + + if in.OneOf != nil { + in, out := &in.OneOf, &out.OneOf + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AnyOf != nil { + in, out := &in.AnyOf, &out.AnyOf + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + + if in.Not != nil { + in, out := &in.Not, &out.Not + if *in == nil { + *out = nil + } else { + *out = new(JSONSchemaProps) + (*in).DeepCopyInto(*out) + } + } + + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]JSONSchemaProps, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + if *in == nil { + *out = nil + } else { + *out = new(JSONSchemaPropsOrBool) + (*in).DeepCopyInto(*out) + } + } + + if in.PatternProperties != nil { + in, out := &in.PatternProperties, &out.PatternProperties + *out = make(map[string]JSONSchemaProps, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + + if in.Dependencies != nil { + in, out := &in.Dependencies, &out.Dependencies + *out = make(JSONSchemaDependencies, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + + if in.AdditionalItems != nil { + in, out := &in.AdditionalItems, &out.AdditionalItems + if *in == nil { + *out = nil + } else { + *out = new(JSONSchemaPropsOrBool) + (*in).DeepCopyInto(*out) + } + } + + if in.Definitions != nil { + in, out := &in.Definitions, &out.Definitions + *out = make(JSONSchemaDefinitions, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + + if in.ExternalDocs != nil { + in, out := &in.ExternalDocs, &out.ExternalDocs + if *in == nil { + *out = nil + } else { + *out = new(ExternalDocumentation) + (*in).DeepCopyInto(*out) + } + } + + if in.XPreserveUnknownFields != nil { + in, out := &in.XPreserveUnknownFields, &out.XPreserveUnknownFields + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + + if in.XListMapKeys != nil { + in, out := &in.XListMapKeys, &out.XListMapKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } + + if in.XListType != nil { + in, out := &in.XListType, &out.XListType + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + + if in.XMapType != nil { + in, out := &in.XMapType, &out.XMapType + *out = new(string) + **out = **in + } + + if in.XValidations != nil { + in, out := &in.XValidations, &out.XValidations + *out = make([]ValidationRule, len(*in)) + copy(*out, *in) + } + + return out +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go new file mode 100644 index 0000000..1a9c223 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go @@ -0,0 +1,82 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "strings" + + "k8s.io/apimachinery/pkg/runtime" + utilpointer "k8s.io/utils/pointer" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return RegisterDefaults(scheme) +} + +func SetDefaults_CustomResourceDefinition(obj *CustomResourceDefinition) { + SetDefaults_CustomResourceDefinitionSpec(&obj.Spec) + if len(obj.Status.StoredVersions) == 0 { + for _, v := range obj.Spec.Versions { + if v.Storage { + obj.Status.StoredVersions = append(obj.Status.StoredVersions, v.Name) + break + } + } + } +} + +func SetDefaults_CustomResourceDefinitionSpec(obj *CustomResourceDefinitionSpec) { + if len(obj.Scope) == 0 { + obj.Scope = NamespaceScoped + } + if len(obj.Names.Singular) == 0 { + obj.Names.Singular = strings.ToLower(obj.Names.Kind) + } + if len(obj.Names.ListKind) == 0 && len(obj.Names.Kind) > 0 { + obj.Names.ListKind = obj.Names.Kind + "List" + } + // If there is no list of versions, create on using deprecated Version field. + if len(obj.Versions) == 0 && len(obj.Version) != 0 { + obj.Versions = []CustomResourceDefinitionVersion{{ + Name: obj.Version, + Storage: true, + Served: true, + }} + } + // For backward compatibility set the version field to the first item in versions list. + if len(obj.Version) == 0 && len(obj.Versions) != 0 { + obj.Version = obj.Versions[0].Name + } + if obj.Conversion == nil { + obj.Conversion = &CustomResourceConversion{ + Strategy: NoneConverter, + } + } + if obj.Conversion.Strategy == WebhookConverter && len(obj.Conversion.ConversionReviewVersions) == 0 { + obj.Conversion.ConversionReviewVersions = []string{SchemeGroupVersion.Version} + } + if obj.PreserveUnknownFields == nil { + obj.PreserveUnknownFields = utilpointer.BoolPtr(true) + } +} + +// SetDefaults_ServiceReference sets defaults for Webhook's ServiceReference +func SetDefaults_ServiceReference(obj *ServiceReference) { + if obj.Port == nil { + obj.Port = utilpointer.Int32Ptr(443) + } +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/doc.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/doc.go new file mode 100644 index 0000000..7a92cb8 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/doc.go @@ -0,0 +1,26 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:conversion-gen=k8s.io/apiextensions-apiserver/pkg/apis/apiextensions +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true +// +groupName=apiextensions.k8s.io + +// Package v1beta1 is the v1beta1 version of the API. +package v1beta1 // import "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go new file mode 100644 index 0000000..6ab5006 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go @@ -0,0 +1,9297 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto + +package v1beta1 + +import ( + encoding_binary "encoding/binary" + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + runtime "k8s.io/apimachinery/pkg/runtime" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *ConversionRequest) Reset() { *m = ConversionRequest{} } +func (*ConversionRequest) ProtoMessage() {} +func (*ConversionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{0} +} +func (m *ConversionRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConversionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConversionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConversionRequest.Merge(m, src) +} +func (m *ConversionRequest) XXX_Size() int { + return m.Size() +} +func (m *ConversionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ConversionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ConversionRequest proto.InternalMessageInfo + +func (m *ConversionResponse) Reset() { *m = ConversionResponse{} } +func (*ConversionResponse) ProtoMessage() {} +func (*ConversionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{1} +} +func (m *ConversionResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConversionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConversionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConversionResponse.Merge(m, src) +} +func (m *ConversionResponse) XXX_Size() int { + return m.Size() +} +func (m *ConversionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ConversionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ConversionResponse proto.InternalMessageInfo + +func (m *ConversionReview) Reset() { *m = ConversionReview{} } +func (*ConversionReview) ProtoMessage() {} +func (*ConversionReview) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{2} +} +func (m *ConversionReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConversionReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConversionReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConversionReview.Merge(m, src) +} +func (m *ConversionReview) XXX_Size() int { + return m.Size() +} +func (m *ConversionReview) XXX_DiscardUnknown() { + xxx_messageInfo_ConversionReview.DiscardUnknown(m) +} + +var xxx_messageInfo_ConversionReview proto.InternalMessageInfo + +func (m *CustomResourceColumnDefinition) Reset() { *m = CustomResourceColumnDefinition{} } +func (*CustomResourceColumnDefinition) ProtoMessage() {} +func (*CustomResourceColumnDefinition) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{3} +} +func (m *CustomResourceColumnDefinition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceColumnDefinition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceColumnDefinition) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceColumnDefinition.Merge(m, src) +} +func (m *CustomResourceColumnDefinition) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceColumnDefinition) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceColumnDefinition.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceColumnDefinition proto.InternalMessageInfo + +func (m *CustomResourceConversion) Reset() { *m = CustomResourceConversion{} } +func (*CustomResourceConversion) ProtoMessage() {} +func (*CustomResourceConversion) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{4} +} +func (m *CustomResourceConversion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceConversion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceConversion) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceConversion.Merge(m, src) +} +func (m *CustomResourceConversion) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceConversion) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceConversion.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceConversion proto.InternalMessageInfo + +func (m *CustomResourceDefinition) Reset() { *m = CustomResourceDefinition{} } +func (*CustomResourceDefinition) ProtoMessage() {} +func (*CustomResourceDefinition) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{5} +} +func (m *CustomResourceDefinition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinition) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinition.Merge(m, src) +} +func (m *CustomResourceDefinition) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinition) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinition.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinition proto.InternalMessageInfo + +func (m *CustomResourceDefinitionCondition) Reset() { *m = CustomResourceDefinitionCondition{} } +func (*CustomResourceDefinitionCondition) ProtoMessage() {} +func (*CustomResourceDefinitionCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{6} +} +func (m *CustomResourceDefinitionCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionCondition.Merge(m, src) +} +func (m *CustomResourceDefinitionCondition) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionCondition) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionCondition proto.InternalMessageInfo + +func (m *CustomResourceDefinitionList) Reset() { *m = CustomResourceDefinitionList{} } +func (*CustomResourceDefinitionList) ProtoMessage() {} +func (*CustomResourceDefinitionList) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{7} +} +func (m *CustomResourceDefinitionList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionList.Merge(m, src) +} +func (m *CustomResourceDefinitionList) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionList) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionList.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionList proto.InternalMessageInfo + +func (m *CustomResourceDefinitionNames) Reset() { *m = CustomResourceDefinitionNames{} } +func (*CustomResourceDefinitionNames) ProtoMessage() {} +func (*CustomResourceDefinitionNames) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{8} +} +func (m *CustomResourceDefinitionNames) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionNames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionNames) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionNames.Merge(m, src) +} +func (m *CustomResourceDefinitionNames) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionNames) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionNames.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionNames proto.InternalMessageInfo + +func (m *CustomResourceDefinitionSpec) Reset() { *m = CustomResourceDefinitionSpec{} } +func (*CustomResourceDefinitionSpec) ProtoMessage() {} +func (*CustomResourceDefinitionSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{9} +} +func (m *CustomResourceDefinitionSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionSpec.Merge(m, src) +} +func (m *CustomResourceDefinitionSpec) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionSpec proto.InternalMessageInfo + +func (m *CustomResourceDefinitionStatus) Reset() { *m = CustomResourceDefinitionStatus{} } +func (*CustomResourceDefinitionStatus) ProtoMessage() {} +func (*CustomResourceDefinitionStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{10} +} +func (m *CustomResourceDefinitionStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionStatus.Merge(m, src) +} +func (m *CustomResourceDefinitionStatus) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionStatus) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionStatus proto.InternalMessageInfo + +func (m *CustomResourceDefinitionVersion) Reset() { *m = CustomResourceDefinitionVersion{} } +func (*CustomResourceDefinitionVersion) ProtoMessage() {} +func (*CustomResourceDefinitionVersion) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{11} +} +func (m *CustomResourceDefinitionVersion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionVersion) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionVersion.Merge(m, src) +} +func (m *CustomResourceDefinitionVersion) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionVersion) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionVersion.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionVersion proto.InternalMessageInfo + +func (m *CustomResourceSubresourceScale) Reset() { *m = CustomResourceSubresourceScale{} } +func (*CustomResourceSubresourceScale) ProtoMessage() {} +func (*CustomResourceSubresourceScale) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{12} +} +func (m *CustomResourceSubresourceScale) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceSubresourceScale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceSubresourceScale) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceSubresourceScale.Merge(m, src) +} +func (m *CustomResourceSubresourceScale) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceSubresourceScale) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceSubresourceScale.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceSubresourceScale proto.InternalMessageInfo + +func (m *CustomResourceSubresourceStatus) Reset() { *m = CustomResourceSubresourceStatus{} } +func (*CustomResourceSubresourceStatus) ProtoMessage() {} +func (*CustomResourceSubresourceStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{13} +} +func (m *CustomResourceSubresourceStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceSubresourceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceSubresourceStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceSubresourceStatus.Merge(m, src) +} +func (m *CustomResourceSubresourceStatus) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceSubresourceStatus) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceSubresourceStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceSubresourceStatus proto.InternalMessageInfo + +func (m *CustomResourceSubresources) Reset() { *m = CustomResourceSubresources{} } +func (*CustomResourceSubresources) ProtoMessage() {} +func (*CustomResourceSubresources) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{14} +} +func (m *CustomResourceSubresources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceSubresources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceSubresources) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceSubresources.Merge(m, src) +} +func (m *CustomResourceSubresources) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceSubresources) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceSubresources.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceSubresources proto.InternalMessageInfo + +func (m *CustomResourceValidation) Reset() { *m = CustomResourceValidation{} } +func (*CustomResourceValidation) ProtoMessage() {} +func (*CustomResourceValidation) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{15} +} +func (m *CustomResourceValidation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceValidation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceValidation) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceValidation.Merge(m, src) +} +func (m *CustomResourceValidation) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceValidation) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceValidation.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceValidation proto.InternalMessageInfo + +func (m *ExternalDocumentation) Reset() { *m = ExternalDocumentation{} } +func (*ExternalDocumentation) ProtoMessage() {} +func (*ExternalDocumentation) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{16} +} +func (m *ExternalDocumentation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalDocumentation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalDocumentation) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalDocumentation.Merge(m, src) +} +func (m *ExternalDocumentation) XXX_Size() int { + return m.Size() +} +func (m *ExternalDocumentation) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalDocumentation.DiscardUnknown(m) +} + +var xxx_messageInfo_ExternalDocumentation proto.InternalMessageInfo + +func (m *JSON) Reset() { *m = JSON{} } +func (*JSON) ProtoMessage() {} +func (*JSON) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{17} +} +func (m *JSON) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSON) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSON) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSON.Merge(m, src) +} +func (m *JSON) XXX_Size() int { + return m.Size() +} +func (m *JSON) XXX_DiscardUnknown() { + xxx_messageInfo_JSON.DiscardUnknown(m) +} + +var xxx_messageInfo_JSON proto.InternalMessageInfo + +func (m *JSONSchemaProps) Reset() { *m = JSONSchemaProps{} } +func (*JSONSchemaProps) ProtoMessage() {} +func (*JSONSchemaProps) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{18} +} +func (m *JSONSchemaProps) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSONSchemaProps) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSONSchemaProps) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSONSchemaProps.Merge(m, src) +} +func (m *JSONSchemaProps) XXX_Size() int { + return m.Size() +} +func (m *JSONSchemaProps) XXX_DiscardUnknown() { + xxx_messageInfo_JSONSchemaProps.DiscardUnknown(m) +} + +var xxx_messageInfo_JSONSchemaProps proto.InternalMessageInfo + +func (m *JSONSchemaPropsOrArray) Reset() { *m = JSONSchemaPropsOrArray{} } +func (*JSONSchemaPropsOrArray) ProtoMessage() {} +func (*JSONSchemaPropsOrArray) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{19} +} +func (m *JSONSchemaPropsOrArray) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSONSchemaPropsOrArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSONSchemaPropsOrArray) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSONSchemaPropsOrArray.Merge(m, src) +} +func (m *JSONSchemaPropsOrArray) XXX_Size() int { + return m.Size() +} +func (m *JSONSchemaPropsOrArray) XXX_DiscardUnknown() { + xxx_messageInfo_JSONSchemaPropsOrArray.DiscardUnknown(m) +} + +var xxx_messageInfo_JSONSchemaPropsOrArray proto.InternalMessageInfo + +func (m *JSONSchemaPropsOrBool) Reset() { *m = JSONSchemaPropsOrBool{} } +func (*JSONSchemaPropsOrBool) ProtoMessage() {} +func (*JSONSchemaPropsOrBool) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{20} +} +func (m *JSONSchemaPropsOrBool) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSONSchemaPropsOrBool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSONSchemaPropsOrBool) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSONSchemaPropsOrBool.Merge(m, src) +} +func (m *JSONSchemaPropsOrBool) XXX_Size() int { + return m.Size() +} +func (m *JSONSchemaPropsOrBool) XXX_DiscardUnknown() { + xxx_messageInfo_JSONSchemaPropsOrBool.DiscardUnknown(m) +} + +var xxx_messageInfo_JSONSchemaPropsOrBool proto.InternalMessageInfo + +func (m *JSONSchemaPropsOrStringArray) Reset() { *m = JSONSchemaPropsOrStringArray{} } +func (*JSONSchemaPropsOrStringArray) ProtoMessage() {} +func (*JSONSchemaPropsOrStringArray) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{21} +} +func (m *JSONSchemaPropsOrStringArray) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSONSchemaPropsOrStringArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSONSchemaPropsOrStringArray) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSONSchemaPropsOrStringArray.Merge(m, src) +} +func (m *JSONSchemaPropsOrStringArray) XXX_Size() int { + return m.Size() +} +func (m *JSONSchemaPropsOrStringArray) XXX_DiscardUnknown() { + xxx_messageInfo_JSONSchemaPropsOrStringArray.DiscardUnknown(m) +} + +var xxx_messageInfo_JSONSchemaPropsOrStringArray proto.InternalMessageInfo + +func (m *ServiceReference) Reset() { *m = ServiceReference{} } +func (*ServiceReference) ProtoMessage() {} +func (*ServiceReference) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{22} +} +func (m *ServiceReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceReference.Merge(m, src) +} +func (m *ServiceReference) XXX_Size() int { + return m.Size() +} +func (m *ServiceReference) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceReference proto.InternalMessageInfo + +func (m *ValidationRule) Reset() { *m = ValidationRule{} } +func (*ValidationRule) ProtoMessage() {} +func (*ValidationRule) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{23} +} +func (m *ValidationRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidationRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidationRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidationRule.Merge(m, src) +} +func (m *ValidationRule) XXX_Size() int { + return m.Size() +} +func (m *ValidationRule) XXX_DiscardUnknown() { + xxx_messageInfo_ValidationRule.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidationRule proto.InternalMessageInfo + +func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } +func (*WebhookClientConfig) ProtoMessage() {} +func (*WebhookClientConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{24} +} +func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WebhookClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WebhookClientConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_WebhookClientConfig.Merge(m, src) +} +func (m *WebhookClientConfig) XXX_Size() int { + return m.Size() +} +func (m *WebhookClientConfig) XXX_DiscardUnknown() { + xxx_messageInfo_WebhookClientConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ConversionRequest)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.ConversionRequest") + proto.RegisterType((*ConversionResponse)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.ConversionResponse") + proto.RegisterType((*ConversionReview)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.ConversionReview") + proto.RegisterType((*CustomResourceColumnDefinition)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceColumnDefinition") + proto.RegisterType((*CustomResourceConversion)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceConversion") + proto.RegisterType((*CustomResourceDefinition)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinition") + proto.RegisterType((*CustomResourceDefinitionCondition)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionCondition") + proto.RegisterType((*CustomResourceDefinitionList)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionList") + proto.RegisterType((*CustomResourceDefinitionNames)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionNames") + proto.RegisterType((*CustomResourceDefinitionSpec)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionSpec") + proto.RegisterType((*CustomResourceDefinitionStatus)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionStatus") + proto.RegisterType((*CustomResourceDefinitionVersion)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionVersion") + proto.RegisterType((*CustomResourceSubresourceScale)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceSubresourceScale") + proto.RegisterType((*CustomResourceSubresourceStatus)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceSubresourceStatus") + proto.RegisterType((*CustomResourceSubresources)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceSubresources") + proto.RegisterType((*CustomResourceValidation)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceValidation") + proto.RegisterType((*ExternalDocumentation)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.ExternalDocumentation") + proto.RegisterType((*JSON)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSON") + proto.RegisterType((*JSONSchemaProps)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps") + proto.RegisterMapType((JSONSchemaDefinitions)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps.DefinitionsEntry") + proto.RegisterMapType((JSONSchemaDependencies)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps.DependenciesEntry") + proto.RegisterMapType((map[string]JSONSchemaProps)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps.PatternPropertiesEntry") + proto.RegisterMapType((map[string]JSONSchemaProps)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps.PropertiesEntry") + proto.RegisterType((*JSONSchemaPropsOrArray)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaPropsOrArray") + proto.RegisterType((*JSONSchemaPropsOrBool)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaPropsOrBool") + proto.RegisterType((*JSONSchemaPropsOrStringArray)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaPropsOrStringArray") + proto.RegisterType((*ServiceReference)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.ServiceReference") + proto.RegisterType((*ValidationRule)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.ValidationRule") + proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.WebhookClientConfig") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto", fileDescriptor_98a4cc6918394e53) +} + +var fileDescriptor_98a4cc6918394e53 = []byte{ + // 3079 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xcf, 0x73, 0x23, 0x47, + 0xf5, 0xdf, 0x91, 0x2d, 0x5b, 0x6e, 0xdb, 0x6b, 0xbb, 0x77, 0xed, 0xcc, 0x3a, 0x1b, 0xcb, 0x56, + 0xbe, 0xd9, 0xaf, 0x93, 0x6c, 0xe4, 0xc4, 0x24, 0x24, 0xa4, 0x48, 0x51, 0x96, 0xed, 0x0d, 0x4e, + 0xd6, 0x3f, 0x68, 0xed, 0x26, 0x86, 0xfc, 0x6c, 0x6b, 0xda, 0xf2, 0xc4, 0xa3, 0x99, 0xd9, 0xe9, + 0x19, 0xd9, 0xae, 0x00, 0xc5, 0x8f, 0x4a, 0x41, 0x51, 0x40, 0x28, 0x92, 0x0b, 0x05, 0x1c, 0x02, + 0xc5, 0x85, 0x03, 0x1c, 0xe0, 0x06, 0x7f, 0x40, 0x8e, 0x29, 0x8a, 0x43, 0x0e, 0x94, 0x20, 0xe2, + 0xca, 0x91, 0x2a, 0xaa, 0x7c, 0xa2, 0xfa, 0xc7, 0xf4, 0xb4, 0x46, 0xd2, 0xee, 0x56, 0x2c, 0x65, + 0xb9, 0x69, 0xde, 0xaf, 0xcf, 0x9b, 0xd7, 0xaf, 0x5f, 0xbf, 0x7e, 0x23, 0xb0, 0x7f, 0xf8, 0x0c, + 0x2d, 0xda, 0xde, 0xd2, 0x61, 0xb4, 0x47, 0x02, 0x97, 0x84, 0x84, 0x2e, 0xd5, 0x89, 0x6b, 0x79, + 0xc1, 0x92, 0x64, 0x60, 0xdf, 0x26, 0xc7, 0x21, 0x71, 0xa9, 0xed, 0xb9, 0xf4, 0x31, 0xec, 0xdb, + 0x94, 0x04, 0x75, 0x12, 0x2c, 0xf9, 0x87, 0x55, 0xc6, 0xa3, 0xad, 0x02, 0x4b, 0xf5, 0x27, 0xf6, + 0x48, 0x88, 0x9f, 0x58, 0xaa, 0x12, 0x97, 0x04, 0x38, 0x24, 0x56, 0xd1, 0x0f, 0xbc, 0xd0, 0x83, + 0xcf, 0x09, 0x73, 0xc5, 0x16, 0xe9, 0x37, 0x94, 0xb9, 0xa2, 0x7f, 0x58, 0x65, 0x3c, 0xda, 0x2a, + 0x50, 0x94, 0xe6, 0x66, 0x1f, 0xab, 0xda, 0xe1, 0x41, 0xb4, 0x57, 0xac, 0x78, 0xb5, 0xa5, 0xaa, + 0x57, 0xf5, 0x96, 0xb8, 0xd5, 0xbd, 0x68, 0x9f, 0x3f, 0xf1, 0x07, 0xfe, 0x4b, 0xa0, 0xcd, 0x3e, + 0x99, 0x38, 0x5f, 0xc3, 0x95, 0x03, 0xdb, 0x25, 0xc1, 0x49, 0xe2, 0x71, 0x8d, 0x84, 0x78, 0xa9, + 0xde, 0xe6, 0xe3, 0xec, 0x52, 0x37, 0xad, 0x20, 0x72, 0x43, 0xbb, 0x46, 0xda, 0x14, 0x3e, 0x7f, + 0x27, 0x05, 0x5a, 0x39, 0x20, 0x35, 0x9c, 0xd6, 0x2b, 0x9c, 0x1a, 0x60, 0x6a, 0xd5, 0x73, 0xeb, + 0x24, 0x60, 0x6f, 0x89, 0xc8, 0xad, 0x88, 0xd0, 0x10, 0x96, 0xc0, 0x40, 0x64, 0x5b, 0xa6, 0x31, + 0x6f, 0x2c, 0x8e, 0x94, 0x1e, 0xff, 0xb0, 0x91, 0x3f, 0xd7, 0x6c, 0xe4, 0x07, 0x6e, 0x6e, 0xac, + 0x9d, 0x36, 0xf2, 0x0b, 0xdd, 0x90, 0xc2, 0x13, 0x9f, 0xd0, 0xe2, 0xcd, 0x8d, 0x35, 0xc4, 0x94, + 0xe1, 0xf3, 0x60, 0xca, 0x22, 0xd4, 0x0e, 0x88, 0xb5, 0xb2, 0xb3, 0xf1, 0x92, 0xb0, 0x6f, 0x66, + 0xb8, 0xc5, 0x4b, 0xd2, 0xe2, 0xd4, 0x5a, 0x5a, 0x00, 0xb5, 0xeb, 0xc0, 0x5d, 0x30, 0xec, 0xed, + 0xbd, 0x45, 0x2a, 0x21, 0x35, 0x07, 0xe6, 0x07, 0x16, 0x47, 0x97, 0x1f, 0x2b, 0x26, 0x2b, 0xa8, + 0x5c, 0xe0, 0xcb, 0x26, 0x5f, 0xb6, 0x88, 0xf0, 0xd1, 0x7a, 0xbc, 0x72, 0xa5, 0x09, 0x89, 0x36, + 0xbc, 0x2d, 0xac, 0xa0, 0xd8, 0x5c, 0xe1, 0xd7, 0x19, 0x00, 0xf5, 0x97, 0xa7, 0xbe, 0xe7, 0x52, + 0xd2, 0x93, 0xb7, 0xa7, 0x60, 0xb2, 0xc2, 0x2d, 0x87, 0xc4, 0x92, 0xb8, 0x66, 0xe6, 0xd3, 0x78, + 0x6f, 0x4a, 0xfc, 0xc9, 0xd5, 0x94, 0x39, 0xd4, 0x06, 0x00, 0x6f, 0x80, 0xa1, 0x80, 0xd0, 0xc8, + 0x09, 0xcd, 0x81, 0x79, 0x63, 0x71, 0x74, 0xf9, 0x6a, 0x57, 0x28, 0x9e, 0xdf, 0x2c, 0xf9, 0x8a, + 0xf5, 0x27, 0x8a, 0xe5, 0x10, 0x87, 0x11, 0x2d, 0x9d, 0x97, 0x48, 0x43, 0x88, 0xdb, 0x40, 0xd2, + 0x56, 0xe1, 0xfb, 0x19, 0x30, 0xa9, 0x47, 0xa9, 0x6e, 0x93, 0x23, 0x78, 0x04, 0x86, 0x03, 0x91, + 0x2c, 0x3c, 0x4e, 0xa3, 0xcb, 0x3b, 0xc5, 0x33, 0x6d, 0xab, 0x62, 0x5b, 0x12, 0x96, 0x46, 0xd9, + 0x9a, 0xc9, 0x07, 0x14, 0xa3, 0xc1, 0xb7, 0x41, 0x2e, 0x90, 0x0b, 0xc5, 0xb3, 0x69, 0x74, 0xf9, + 0x2b, 0x3d, 0x44, 0x16, 0x86, 0x4b, 0x63, 0xcd, 0x46, 0x3e, 0x17, 0x3f, 0x21, 0x05, 0x58, 0x78, + 0x2f, 0x03, 0xe6, 0x56, 0x23, 0x1a, 0x7a, 0x35, 0x44, 0xa8, 0x17, 0x05, 0x15, 0xb2, 0xea, 0x39, + 0x51, 0xcd, 0x5d, 0x23, 0xfb, 0xb6, 0x6b, 0x87, 0x2c, 0x5b, 0xe7, 0xc1, 0xa0, 0x8b, 0x6b, 0x44, + 0x66, 0xcf, 0x98, 0x8c, 0xe9, 0xe0, 0x16, 0xae, 0x11, 0xc4, 0x39, 0x4c, 0x82, 0x25, 0x8b, 0xdc, + 0x0b, 0x4a, 0xe2, 0xc6, 0x89, 0x4f, 0x10, 0xe7, 0xc0, 0x2b, 0x60, 0x68, 0xdf, 0x0b, 0x6a, 0x58, + 0xac, 0xe3, 0x48, 0xb2, 0x32, 0xd7, 0x38, 0x15, 0x49, 0x2e, 0x7c, 0x0a, 0x8c, 0x5a, 0x84, 0x56, + 0x02, 0xdb, 0x67, 0xd0, 0xe6, 0x20, 0x17, 0xbe, 0x20, 0x85, 0x47, 0xd7, 0x12, 0x16, 0xd2, 0xe5, + 0xe0, 0x55, 0x90, 0xf3, 0x03, 0xdb, 0x0b, 0xec, 0xf0, 0xc4, 0xcc, 0xce, 0x1b, 0x8b, 0xd9, 0xd2, + 0xa4, 0xd4, 0xc9, 0xed, 0x48, 0x3a, 0x52, 0x12, 0x70, 0x1e, 0xe4, 0x5e, 0x28, 0x6f, 0x6f, 0xed, + 0xe0, 0xf0, 0xc0, 0x1c, 0xe2, 0x08, 0x83, 0x4c, 0x1a, 0x29, 0x6a, 0xe1, 0x6f, 0x19, 0x60, 0xa6, + 0xa3, 0x12, 0x87, 0x14, 0x5e, 0x03, 0x39, 0x1a, 0xb2, 0x8a, 0x53, 0x3d, 0x91, 0x31, 0x79, 0x24, + 0x06, 0x2b, 0x4b, 0xfa, 0x69, 0x23, 0x3f, 0x93, 0x68, 0xc4, 0x54, 0x1e, 0x0f, 0xa5, 0x0b, 0x7f, + 0x69, 0x80, 0x0b, 0x47, 0x64, 0xef, 0xc0, 0xf3, 0x0e, 0x57, 0x1d, 0x9b, 0xb8, 0xe1, 0xaa, 0xe7, + 0xee, 0xdb, 0x55, 0x99, 0x03, 0xe8, 0x8c, 0x39, 0xf0, 0x72, 0xbb, 0xe5, 0xd2, 0x7d, 0xcd, 0x46, + 0xfe, 0x42, 0x07, 0x06, 0xea, 0xe4, 0x07, 0xdc, 0x05, 0x66, 0x25, 0xb5, 0x49, 0x64, 0x01, 0x13, + 0x65, 0x6b, 0xa4, 0x74, 0xb9, 0xd9, 0xc8, 0x9b, 0xab, 0x5d, 0x64, 0x50, 0x57, 0xed, 0xc2, 0x77, + 0x07, 0xd2, 0xe1, 0xd5, 0xd2, 0xed, 0x4d, 0x90, 0x63, 0xdb, 0xd8, 0xc2, 0x21, 0x96, 0x1b, 0xf1, + 0xf1, 0xbb, 0xdb, 0xf4, 0xa2, 0x66, 0x6c, 0x92, 0x10, 0x97, 0xa0, 0x5c, 0x10, 0x90, 0xd0, 0x90, + 0xb2, 0x0a, 0xbf, 0x01, 0x06, 0xa9, 0x4f, 0x2a, 0x32, 0xd0, 0xaf, 0x9c, 0x75, 0xb3, 0x75, 0x79, + 0x91, 0xb2, 0x4f, 0x2a, 0xc9, 0x5e, 0x60, 0x4f, 0x88, 0xc3, 0xc2, 0x77, 0x0c, 0x30, 0x44, 0x79, + 0x81, 0x92, 0x45, 0xed, 0xb5, 0x7e, 0x79, 0x90, 0xaa, 0x82, 0xe2, 0x19, 0x49, 0xf0, 0xc2, 0xbf, + 0x33, 0x60, 0xa1, 0x9b, 0xea, 0xaa, 0xe7, 0x5a, 0x62, 0x39, 0x36, 0xe4, 0xde, 0x16, 0x99, 0xfe, + 0x94, 0xbe, 0xb7, 0x4f, 0x1b, 0xf9, 0x87, 0xee, 0x68, 0x40, 0x2b, 0x02, 0x5f, 0x50, 0xef, 0x2d, + 0x0a, 0xc5, 0x42, 0xab, 0x63, 0xa7, 0x8d, 0xfc, 0x84, 0x52, 0x6b, 0xf5, 0x15, 0xd6, 0x01, 0x74, + 0x30, 0x0d, 0x6f, 0x04, 0xd8, 0xa5, 0xc2, 0xac, 0x5d, 0x23, 0x32, 0x7c, 0x8f, 0xdc, 0x5d, 0x7a, + 0x30, 0x8d, 0xd2, 0xac, 0x84, 0x84, 0xd7, 0xdb, 0xac, 0xa1, 0x0e, 0x08, 0xac, 0x6e, 0x05, 0x04, + 0x53, 0x55, 0x8a, 0xb4, 0x13, 0x85, 0x51, 0x91, 0xe4, 0xc2, 0x87, 0xc1, 0x70, 0x8d, 0x50, 0x8a, + 0xab, 0x84, 0xd7, 0x9f, 0x91, 0xe4, 0x88, 0xde, 0x14, 0x64, 0x14, 0xf3, 0x59, 0x7f, 0x72, 0xb9, + 0x5b, 0xd4, 0xae, 0xdb, 0x34, 0x84, 0xaf, 0xb6, 0x6d, 0x80, 0xe2, 0xdd, 0xbd, 0x21, 0xd3, 0xe6, + 0xe9, 0xaf, 0x8a, 0x5f, 0x4c, 0xd1, 0x92, 0xff, 0xeb, 0x20, 0x6b, 0x87, 0xa4, 0x16, 0x9f, 0xdd, + 0x2f, 0xf7, 0x29, 0xf7, 0x4a, 0xe3, 0xd2, 0x87, 0xec, 0x06, 0x43, 0x43, 0x02, 0xb4, 0xf0, 0x9b, + 0x0c, 0x78, 0xa0, 0x9b, 0x0a, 0x3b, 0x50, 0x28, 0x8b, 0xb8, 0xef, 0x44, 0x01, 0x76, 0x64, 0xc6, + 0xa9, 0x88, 0xef, 0x70, 0x2a, 0x92, 0x5c, 0x56, 0xf2, 0xa9, 0xed, 0x56, 0x23, 0x07, 0x07, 0x32, + 0x9d, 0xd4, 0x5b, 0x97, 0x25, 0x1d, 0x29, 0x09, 0x58, 0x04, 0x80, 0x1e, 0x78, 0x41, 0xc8, 0x31, + 0x64, 0xf5, 0x3a, 0xcf, 0x0a, 0x44, 0x59, 0x51, 0x91, 0x26, 0xc1, 0x4e, 0xb4, 0x43, 0xdb, 0xb5, + 0xe4, 0xaa, 0xab, 0x5d, 0xfc, 0xa2, 0xed, 0x5a, 0x88, 0x73, 0x18, 0xbe, 0x63, 0xd3, 0x90, 0x51, + 0xe4, 0x92, 0xb7, 0x44, 0x9d, 0x4b, 0x2a, 0x09, 0x86, 0x5f, 0x61, 0x55, 0xdf, 0x0b, 0x6c, 0x42, + 0xcd, 0xa1, 0x04, 0x7f, 0x55, 0x51, 0x91, 0x26, 0x51, 0xf8, 0x57, 0xae, 0x7b, 0x92, 0xb0, 0x52, + 0x02, 0x1f, 0x04, 0xd9, 0x6a, 0xe0, 0x45, 0xbe, 0x8c, 0x92, 0x8a, 0xf6, 0xf3, 0x8c, 0x88, 0x04, + 0x8f, 0x65, 0x65, 0xbd, 0xa5, 0x4d, 0x55, 0x59, 0x19, 0x37, 0xa7, 0x31, 0x1f, 0x7e, 0xdb, 0x00, + 0x59, 0x57, 0x06, 0x87, 0xa5, 0xdc, 0xab, 0x7d, 0xca, 0x0b, 0x1e, 0xde, 0xc4, 0x5d, 0x11, 0x79, + 0x81, 0x0c, 0x9f, 0x04, 0x59, 0x5a, 0xf1, 0x7c, 0x22, 0xa3, 0x3e, 0x17, 0x0b, 0x95, 0x19, 0xf1, + 0xb4, 0x91, 0x1f, 0x8f, 0xcd, 0x71, 0x02, 0x12, 0xc2, 0xf0, 0x7b, 0x06, 0x00, 0x75, 0xec, 0xd8, + 0x16, 0xe6, 0x2d, 0x43, 0x96, 0xbb, 0xdf, 0xdb, 0xb4, 0x7e, 0x49, 0x99, 0x17, 0x8b, 0x96, 0x3c, + 0x23, 0x0d, 0x1a, 0xbe, 0x6b, 0x80, 0x31, 0x1a, 0xed, 0x05, 0x52, 0x8b, 0xf2, 0xe6, 0x62, 0x74, + 0xf9, 0xab, 0x3d, 0xf5, 0xa5, 0xac, 0x01, 0x94, 0x26, 0x9b, 0x8d, 0xfc, 0x98, 0x4e, 0x41, 0x2d, + 0x0e, 0xc0, 0x1f, 0x1a, 0x20, 0x57, 0x8f, 0xcf, 0xec, 0x61, 0xbe, 0xe1, 0x5f, 0xef, 0xd3, 0xc2, + 0xca, 0x8c, 0x4a, 0x76, 0x81, 0xea, 0x03, 0x94, 0x07, 0xf0, 0x4f, 0x06, 0x30, 0xb1, 0x25, 0x0a, + 0x3c, 0x76, 0x76, 0x02, 0xdb, 0x0d, 0x49, 0x20, 0xfa, 0x4d, 0x6a, 0xe6, 0xb8, 0x7b, 0xbd, 0x3d, + 0x0b, 0xd3, 0xbd, 0x6c, 0x69, 0x5e, 0x7a, 0x67, 0xae, 0x74, 0x71, 0x03, 0x75, 0x75, 0x90, 0x27, + 0x5a, 0xd2, 0xd2, 0x98, 0x23, 0x7d, 0x48, 0xb4, 0xa4, 0x97, 0x92, 0xd5, 0x21, 0xe9, 0xa0, 0x34, + 0x68, 0xb8, 0x0d, 0xa6, 0xfd, 0x80, 0x70, 0x80, 0x9b, 0xee, 0xa1, 0xeb, 0x1d, 0xb9, 0xd7, 0x6c, + 0xe2, 0x58, 0xd4, 0x04, 0xf3, 0xc6, 0x62, 0xae, 0x74, 0xa9, 0xd9, 0xc8, 0x4f, 0xef, 0x74, 0x12, + 0x40, 0x9d, 0xf5, 0x0a, 0xef, 0x0e, 0xa4, 0x6f, 0x01, 0xe9, 0x2e, 0x02, 0xbe, 0x2f, 0xde, 0x5e, + 0xc4, 0x86, 0x9a, 0x06, 0x5f, 0xad, 0x37, 0xfb, 0x94, 0x4c, 0xaa, 0x0d, 0x48, 0x3a, 0x39, 0x45, + 0xa2, 0x48, 0xf3, 0x03, 0xfe, 0xcc, 0x00, 0xe3, 0xb8, 0x52, 0x21, 0x7e, 0x48, 0x2c, 0x51, 0xdc, + 0x33, 0x9f, 0x41, 0xfd, 0x9a, 0x96, 0x5e, 0x8d, 0xaf, 0xe8, 0xd0, 0xa8, 0xd5, 0x13, 0xf8, 0x2c, + 0x38, 0x4f, 0x43, 0x2f, 0x20, 0x56, 0xaa, 0x6d, 0x86, 0xcd, 0x46, 0xfe, 0x7c, 0xb9, 0x85, 0x83, + 0x52, 0x92, 0x85, 0xbf, 0x67, 0x41, 0xfe, 0x0e, 0x5b, 0xed, 0x2e, 0x2e, 0x66, 0x57, 0xc0, 0x10, + 0x7f, 0x5d, 0x8b, 0x47, 0x25, 0xa7, 0xb5, 0x82, 0x9c, 0x8a, 0x24, 0x97, 0x1d, 0x14, 0x0c, 0x9f, + 0xb5, 0x2f, 0x03, 0x5c, 0x50, 0x1d, 0x14, 0x65, 0x41, 0x46, 0x31, 0x1f, 0x2e, 0x03, 0x60, 0x11, + 0x3f, 0x20, 0xec, 0xb0, 0xb2, 0xcc, 0x61, 0x2e, 0xad, 0x16, 0x69, 0x4d, 0x71, 0x90, 0x26, 0x05, + 0xaf, 0x01, 0x18, 0x3f, 0xd9, 0x9e, 0xfb, 0x32, 0x0e, 0x5c, 0xdb, 0xad, 0x9a, 0x39, 0xee, 0xf6, + 0x0c, 0xeb, 0xc6, 0xd6, 0xda, 0xb8, 0xa8, 0x83, 0x06, 0x7c, 0x1b, 0x0c, 0x89, 0xa1, 0x0f, 0x3f, + 0x21, 0xfa, 0x58, 0xe5, 0x01, 0x8f, 0x11, 0x87, 0x42, 0x12, 0xb2, 0xbd, 0xba, 0x67, 0xef, 0x75, + 0x75, 0xbf, 0x6d, 0x39, 0x1d, 0xfa, 0x1f, 0x2f, 0xa7, 0x85, 0xff, 0x18, 0xe9, 0x9a, 0xa3, 0xbd, + 0x6a, 0xb9, 0x82, 0x1d, 0x02, 0xd7, 0xc0, 0x24, 0xbb, 0x31, 0x21, 0xe2, 0x3b, 0x76, 0x05, 0x53, + 0x7e, 0x61, 0x17, 0xc9, 0xae, 0x66, 0x48, 0xe5, 0x14, 0x1f, 0xb5, 0x69, 0xc0, 0x17, 0x00, 0x14, + 0xb7, 0x88, 0x16, 0x3b, 0xa2, 0x21, 0x52, 0xf7, 0x81, 0x72, 0x9b, 0x04, 0xea, 0xa0, 0x05, 0x57, + 0xc1, 0x94, 0x83, 0xf7, 0x88, 0x53, 0x26, 0x0e, 0xa9, 0x84, 0x5e, 0xc0, 0x4d, 0x89, 0x91, 0xc6, + 0x74, 0xb3, 0x91, 0x9f, 0xba, 0x9e, 0x66, 0xa2, 0x76, 0xf9, 0xc2, 0x42, 0x7a, 0x6b, 0xeb, 0x2f, + 0x2e, 0xee, 0x66, 0x1f, 0x64, 0xc0, 0x6c, 0xf7, 0xcc, 0x80, 0xdf, 0x49, 0xae, 0x90, 0xe2, 0x86, + 0xf0, 0x7a, 0xbf, 0xb2, 0x50, 0xde, 0x21, 0x41, 0xfb, 0xfd, 0x11, 0x7e, 0x93, 0xb5, 0x6b, 0xd8, + 0x89, 0x87, 0x56, 0xaf, 0xf5, 0xcd, 0x05, 0x06, 0x52, 0x1a, 0x11, 0x9d, 0x20, 0x76, 0x78, 0xe3, + 0x87, 0x1d, 0x52, 0xf8, 0xad, 0x91, 0x9e, 0x22, 0x24, 0x3b, 0x18, 0xfe, 0xc8, 0x00, 0x13, 0x9e, + 0x4f, 0xdc, 0x95, 0x9d, 0x8d, 0x97, 0x3e, 0x27, 0x76, 0xb2, 0x0c, 0xd5, 0xd6, 0x19, 0xfd, 0x7c, + 0xa1, 0xbc, 0xbd, 0x25, 0x0c, 0xee, 0x04, 0x9e, 0x4f, 0x4b, 0x17, 0x9a, 0x8d, 0xfc, 0xc4, 0x76, + 0x2b, 0x14, 0x4a, 0x63, 0x17, 0x6a, 0x60, 0x7a, 0xfd, 0x38, 0x24, 0x81, 0x8b, 0x9d, 0x35, 0xaf, + 0x12, 0xd5, 0x88, 0x1b, 0x0a, 0x47, 0x53, 0x13, 0x2f, 0xe3, 0x2e, 0x27, 0x5e, 0x0f, 0x80, 0x81, + 0x28, 0x70, 0x64, 0x16, 0x8f, 0xaa, 0x89, 0x2e, 0xba, 0x8e, 0x18, 0xbd, 0xb0, 0x00, 0x06, 0x99, + 0x9f, 0xf0, 0x12, 0x18, 0x08, 0xf0, 0x11, 0xb7, 0x3a, 0x56, 0x1a, 0x66, 0x22, 0x08, 0x1f, 0x21, + 0x46, 0x2b, 0xfc, 0x75, 0x01, 0x4c, 0xa4, 0xde, 0x05, 0xce, 0x82, 0x8c, 0x1a, 0x13, 0x03, 0x69, + 0x34, 0xb3, 0xb1, 0x86, 0x32, 0xb6, 0x05, 0x9f, 0x56, 0xc5, 0x57, 0x80, 0xe6, 0xd5, 0x59, 0xc2, + 0xa9, 0xac, 0x3f, 0x4f, 0xcc, 0x31, 0x47, 0xe2, 0xc2, 0xc9, 0x7c, 0x20, 0xfb, 0x72, 0x97, 0x08, + 0x1f, 0xc8, 0x3e, 0x62, 0xb4, 0x4f, 0x3b, 0xee, 0x8b, 0xe7, 0x8d, 0xd9, 0xbb, 0x98, 0x37, 0x0e, + 0xdd, 0x76, 0xde, 0xf8, 0x20, 0xc8, 0x86, 0x76, 0xe8, 0x10, 0x7e, 0x90, 0x69, 0xd7, 0xa8, 0x1b, + 0x8c, 0x88, 0x04, 0x0f, 0xbe, 0x05, 0x86, 0x2d, 0xb2, 0x8f, 0x23, 0x27, 0xe4, 0x67, 0xd6, 0xe8, + 0xf2, 0x6a, 0x0f, 0x52, 0x48, 0x0c, 0x83, 0xd7, 0x84, 0x5d, 0x14, 0x03, 0xc0, 0x87, 0xc0, 0x70, + 0x0d, 0x1f, 0xdb, 0xb5, 0xa8, 0xc6, 0x1b, 0x4c, 0x43, 0x88, 0x6d, 0x0a, 0x12, 0x8a, 0x79, 0xac, + 0x32, 0x92, 0xe3, 0x8a, 0x13, 0x51, 0xbb, 0x4e, 0x24, 0x53, 0x36, 0x7f, 0xaa, 0x32, 0xae, 0xa7, + 0xf8, 0xa8, 0x4d, 0x83, 0x83, 0xd9, 0x2e, 0x57, 0x1e, 0xd5, 0xc0, 0x04, 0x09, 0xc5, 0xbc, 0x56, + 0x30, 0x29, 0x3f, 0xd6, 0x0d, 0x4c, 0x2a, 0xb7, 0x69, 0xc0, 0x47, 0xc1, 0x48, 0x0d, 0x1f, 0x5f, + 0x27, 0x6e, 0x35, 0x3c, 0x30, 0xc7, 0xe7, 0x8d, 0xc5, 0x81, 0xd2, 0x78, 0xb3, 0x91, 0x1f, 0xd9, + 0x8c, 0x89, 0x28, 0xe1, 0x73, 0x61, 0xdb, 0x95, 0xc2, 0xe7, 0x35, 0xe1, 0x98, 0x88, 0x12, 0x3e, + 0xeb, 0x5e, 0x7c, 0x1c, 0xb2, 0xcd, 0x65, 0x4e, 0xb4, 0x5e, 0x73, 0x77, 0x04, 0x19, 0xc5, 0x7c, + 0xb8, 0x08, 0x72, 0x35, 0x7c, 0xcc, 0x47, 0x12, 0xe6, 0x24, 0x37, 0xcb, 0x07, 0xe3, 0x9b, 0x92, + 0x86, 0x14, 0x97, 0x4b, 0xda, 0xae, 0x90, 0x9c, 0xd2, 0x24, 0x25, 0x0d, 0x29, 0x2e, 0x4b, 0xe2, + 0xc8, 0xb5, 0x6f, 0x45, 0x44, 0x08, 0x43, 0x1e, 0x19, 0x95, 0xc4, 0x37, 0x13, 0x16, 0xd2, 0xe5, + 0x60, 0x11, 0x80, 0x5a, 0xe4, 0x84, 0xb6, 0xef, 0x90, 0xed, 0x7d, 0xf3, 0x02, 0x8f, 0x3f, 0x6f, + 0xfa, 0x37, 0x15, 0x15, 0x69, 0x12, 0x90, 0x80, 0x41, 0xe2, 0x46, 0x35, 0xf3, 0x22, 0x3f, 0xd8, + 0x7b, 0x92, 0x82, 0x6a, 0xe7, 0xac, 0xbb, 0x51, 0x0d, 0x71, 0xf3, 0xf0, 0x69, 0x30, 0x5e, 0xc3, + 0xc7, 0xac, 0x1c, 0x90, 0x20, 0xb4, 0x09, 0x35, 0xa7, 0xf9, 0xcb, 0x4f, 0xb1, 0x6e, 0x77, 0x53, + 0x67, 0xa0, 0x56, 0x39, 0xae, 0x68, 0xbb, 0x9a, 0xe2, 0x8c, 0xa6, 0xa8, 0x33, 0x50, 0xab, 0x1c, + 0x8b, 0x74, 0x40, 0x6e, 0x45, 0x76, 0x40, 0x2c, 0xf3, 0x3e, 0xde, 0x20, 0xcb, 0x8f, 0x15, 0x82, + 0x86, 0x14, 0x17, 0xd6, 0xe3, 0xd9, 0x95, 0xc9, 0xb7, 0xe1, 0xcd, 0xde, 0x56, 0xf2, 0xed, 0x60, + 0x25, 0x08, 0xf0, 0x89, 0x38, 0x69, 0xf4, 0xa9, 0x15, 0xa4, 0x20, 0x8b, 0x1d, 0x67, 0x7b, 0xdf, + 0xbc, 0xc4, 0x63, 0xdf, 0xeb, 0x13, 0x44, 0x55, 0x9d, 0x15, 0x06, 0x82, 0x04, 0x16, 0x03, 0xf5, + 0x5c, 0x96, 0x1a, 0xb3, 0xfd, 0x05, 0xdd, 0x66, 0x20, 0x48, 0x60, 0xf1, 0x37, 0x75, 0x4f, 0xb6, + 0xf7, 0xcd, 0xfb, 0xfb, 0xfc, 0xa6, 0x0c, 0x04, 0x09, 0x2c, 0x68, 0x83, 0x01, 0xd7, 0x0b, 0xcd, + 0xcb, 0x7d, 0x39, 0x9e, 0xf9, 0x81, 0xb3, 0xe5, 0x85, 0x88, 0x61, 0xc0, 0x9f, 0x1a, 0x00, 0xf8, + 0x49, 0x8a, 0x3e, 0xd0, 0x93, 0x91, 0x48, 0x0a, 0xb2, 0x98, 0xe4, 0xf6, 0xba, 0x1b, 0x06, 0x27, + 0xc9, 0xf5, 0x48, 0xdb, 0x03, 0x9a, 0x17, 0xf0, 0x57, 0x06, 0xb8, 0xa8, 0xb7, 0xc9, 0xca, 0xbd, + 0x39, 0x1e, 0x91, 0x1b, 0xbd, 0x4e, 0xf3, 0x92, 0xe7, 0x39, 0x25, 0xb3, 0xd9, 0xc8, 0x5f, 0x5c, + 0xe9, 0x80, 0x8a, 0x3a, 0xfa, 0x02, 0x7f, 0x67, 0x80, 0x29, 0x59, 0x45, 0x35, 0x0f, 0xf3, 0x3c, + 0x80, 0xa4, 0xd7, 0x01, 0x4c, 0xe3, 0x88, 0x38, 0xaa, 0x8f, 0xec, 0x6d, 0x7c, 0xd4, 0xee, 0x1a, + 0xfc, 0xa3, 0x01, 0xc6, 0x2c, 0xe2, 0x13, 0xd7, 0x22, 0x6e, 0x85, 0xf9, 0x3a, 0xdf, 0x93, 0x91, + 0x45, 0xda, 0xd7, 0x35, 0x0d, 0x42, 0xb8, 0x59, 0x94, 0x6e, 0x8e, 0xe9, 0xac, 0xd3, 0x46, 0x7e, + 0x26, 0x51, 0xd5, 0x39, 0xa8, 0xc5, 0x4b, 0xf8, 0x9e, 0x01, 0x26, 0x92, 0x05, 0x10, 0x47, 0xca, + 0x42, 0x1f, 0xf3, 0x80, 0xb7, 0xaf, 0x2b, 0xad, 0x80, 0x28, 0xed, 0x01, 0xfc, 0xbd, 0xc1, 0x3a, + 0xb5, 0xf8, 0xde, 0x47, 0xcd, 0x02, 0x8f, 0xe5, 0x1b, 0x3d, 0x8f, 0xa5, 0x42, 0x10, 0xa1, 0xbc, + 0x9a, 0xb4, 0x82, 0x8a, 0x73, 0xda, 0xc8, 0x4f, 0xeb, 0x91, 0x54, 0x0c, 0xa4, 0x7b, 0x08, 0x7f, + 0x60, 0x80, 0x31, 0x92, 0x74, 0xdc, 0xd4, 0x7c, 0xb0, 0x27, 0x41, 0xec, 0xd8, 0xc4, 0x8b, 0x9b, + 0xba, 0xc6, 0xa2, 0xa8, 0x05, 0x9b, 0x75, 0x90, 0xe4, 0x18, 0xd7, 0x7c, 0x87, 0x98, 0xff, 0xd7, + 0xe3, 0x0e, 0x72, 0x5d, 0xd8, 0x45, 0x31, 0x00, 0xbc, 0x0a, 0x72, 0x6e, 0xe4, 0x38, 0x78, 0xcf, + 0x21, 0xe6, 0x43, 0xbc, 0x17, 0x51, 0x23, 0xd9, 0x2d, 0x49, 0x47, 0x4a, 0x02, 0xee, 0x83, 0xf9, + 0xe3, 0x17, 0xd5, 0xdf, 0x93, 0x3a, 0x0e, 0x0d, 0xcd, 0x2b, 0xdc, 0xca, 0x6c, 0xb3, 0x91, 0x9f, + 0xd9, 0xed, 0x3c, 0x56, 0xbc, 0xa3, 0x0d, 0xf8, 0x0a, 0xb8, 0x5f, 0x93, 0x59, 0xaf, 0xed, 0x11, + 0xcb, 0x22, 0x56, 0x7c, 0x71, 0x33, 0xff, 0x5f, 0x0c, 0x2e, 0xe3, 0x0d, 0xbe, 0x9b, 0x16, 0x40, + 0xb7, 0xd3, 0x86, 0xd7, 0xc1, 0x8c, 0xc6, 0xde, 0x70, 0xc3, 0xed, 0xa0, 0x1c, 0x06, 0xb6, 0x5b, + 0x35, 0x17, 0xb9, 0xdd, 0x8b, 0xf1, 0x8e, 0xdc, 0xd5, 0x78, 0xa8, 0x8b, 0x0e, 0xfc, 0x72, 0x8b, + 0x35, 0xfe, 0x09, 0x0d, 0xfb, 0x2f, 0x92, 0x13, 0x6a, 0x3e, 0xcc, 0xbb, 0x13, 0xbe, 0xd8, 0xbb, + 0x1a, 0x1d, 0x75, 0x91, 0x87, 0x5f, 0x02, 0x17, 0x52, 0x1c, 0x76, 0x45, 0x31, 0x1f, 0x11, 0x77, + 0x0d, 0xd6, 0xcf, 0xee, 0xc6, 0x44, 0xd4, 0x49, 0x12, 0x7e, 0x11, 0x40, 0x8d, 0xbc, 0x89, 0x7d, + 0xae, 0xff, 0xa8, 0xb8, 0xf6, 0xb0, 0x15, 0xdd, 0x95, 0x34, 0xd4, 0x41, 0x0e, 0xfe, 0xdc, 0x68, + 0x79, 0x93, 0xe4, 0x76, 0x4c, 0xcd, 0xab, 0x7c, 0xff, 0x6e, 0x9e, 0x31, 0x0b, 0xb5, 0xef, 0x20, + 0x91, 0x43, 0xb4, 0x30, 0x6b, 0x50, 0xa8, 0x8b, 0x0b, 0xb3, 0xec, 0x86, 0x9e, 0xaa, 0xf0, 0x70, + 0x12, 0x0c, 0x1c, 0x12, 0xf9, 0xaf, 0x0a, 0xc4, 0x7e, 0x42, 0x0b, 0x64, 0xeb, 0xd8, 0x89, 0xe2, + 0x21, 0x43, 0x8f, 0xbb, 0x03, 0x24, 0x8c, 0x3f, 0x9b, 0x79, 0xc6, 0x98, 0x7d, 0xdf, 0x00, 0x33, + 0x9d, 0x0f, 0x9e, 0x7b, 0xea, 0xd6, 0x2f, 0x0c, 0x30, 0xd5, 0x76, 0xc6, 0x74, 0xf0, 0xe8, 0x56, + 0xab, 0x47, 0xaf, 0xf4, 0xfa, 0xb0, 0x10, 0x9b, 0x83, 0x77, 0xc8, 0xba, 0x7b, 0x3f, 0x36, 0xc0, + 0x64, 0xba, 0x6c, 0xdf, 0xcb, 0x78, 0x15, 0xde, 0xcf, 0x80, 0x99, 0xce, 0x8d, 0x3d, 0x0c, 0xd4, + 0x04, 0xa3, 0x3f, 0x93, 0xa0, 0x4e, 0x53, 0xe3, 0x77, 0x0c, 0x30, 0xfa, 0x96, 0x92, 0x8b, 0xbf, + 0xba, 0xf7, 0x7c, 0x06, 0x15, 0x9f, 0x93, 0x09, 0x83, 0x22, 0x1d, 0xb7, 0xf0, 0x07, 0x03, 0x4c, + 0x77, 0x6c, 0x00, 0xe0, 0x15, 0x30, 0x84, 0x1d, 0xc7, 0x3b, 0x12, 0xa3, 0x44, 0xed, 0x1b, 0xc1, + 0x0a, 0xa7, 0x22, 0xc9, 0xd5, 0xa2, 0x97, 0xf9, 0xac, 0xa2, 0x57, 0xf8, 0xb3, 0x01, 0x2e, 0xdf, + 0x2e, 0x13, 0xef, 0xc9, 0x92, 0x2e, 0x82, 0x9c, 0x6c, 0xde, 0x4f, 0xf8, 0x72, 0xca, 0x52, 0x2c, + 0x8b, 0x06, 0xff, 0xa3, 0x99, 0xf8, 0x55, 0xf8, 0xc0, 0x00, 0x93, 0x65, 0x12, 0xd4, 0xed, 0x0a, + 0x41, 0x64, 0x9f, 0x04, 0xc4, 0xad, 0x10, 0xb8, 0x04, 0x46, 0xf8, 0xe7, 0x6e, 0x1f, 0x57, 0xe2, + 0x4f, 0x37, 0x53, 0x32, 0xe4, 0x23, 0x5b, 0x31, 0x03, 0x25, 0x32, 0xea, 0x33, 0x4f, 0xa6, 0xeb, + 0x67, 0x9e, 0xcb, 0x60, 0xd0, 0x4f, 0x06, 0xd1, 0x39, 0xc6, 0xe5, 0xb3, 0x67, 0x4e, 0xe5, 0x5c, + 0x2f, 0x08, 0xf9, 0x74, 0x2d, 0x2b, 0xb9, 0x5e, 0x10, 0x22, 0x4e, 0x2d, 0xbc, 0x06, 0xce, 0xb7, + 0x96, 0x71, 0x86, 0x17, 0x44, 0x4e, 0xdb, 0x67, 0x25, 0xc6, 0x43, 0x9c, 0xa3, 0xff, 0xdb, 0x25, + 0x73, 0x87, 0x7f, 0xbb, 0xfc, 0xc5, 0x00, 0x9d, 0xfe, 0x71, 0x06, 0x2f, 0x89, 0xf9, 0xa5, 0x36, + 0x14, 0x8c, 0x67, 0x97, 0xb0, 0x0e, 0x86, 0xa9, 0x08, 0x9a, 0x5c, 0xd4, 0xed, 0x33, 0x2e, 0x6a, + 0x7a, 0x09, 0x44, 0xe3, 0x14, 0x53, 0x63, 0x30, 0xb6, 0xae, 0x15, 0x5c, 0x8a, 0x5c, 0x4b, 0x8e, + 0xb4, 0xc7, 0xc4, 0xba, 0xae, 0xae, 0x08, 0x1a, 0x52, 0xdc, 0x52, 0xe5, 0xc3, 0x4f, 0xe6, 0xce, + 0x7d, 0xf4, 0xc9, 0xdc, 0xb9, 0x8f, 0x3f, 0x99, 0x3b, 0xf7, 0xad, 0xe6, 0x9c, 0xf1, 0x61, 0x73, + 0xce, 0xf8, 0xa8, 0x39, 0x67, 0x7c, 0xdc, 0x9c, 0x33, 0xfe, 0xd1, 0x9c, 0x33, 0x7e, 0xf2, 0xcf, + 0xb9, 0x73, 0x5f, 0x7b, 0xee, 0x4c, 0x7f, 0xf2, 0xfe, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xcb, + 0x02, 0xc9, 0x93, 0x3d, 0x2e, 0x00, 0x00, +} + +func (m *ConversionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConversionRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConversionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Objects) > 0 { + for iNdEx := len(m.Objects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Objects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.DesiredAPIVersion) + copy(dAtA[i:], m.DesiredAPIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DesiredAPIVersion))) + i-- + dAtA[i] = 0x12 + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ConversionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConversionResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConversionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.ConvertedObjects) > 0 { + for iNdEx := len(m.ConvertedObjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ConvertedObjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ConversionReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConversionReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConversionReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Response != nil { + { + size, err := m.Response.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Request != nil { + { + size, err := m.Request.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CustomResourceColumnDefinition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceColumnDefinition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceColumnDefinition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.JSONPath) + copy(dAtA[i:], m.JSONPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.JSONPath))) + i-- + dAtA[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(m.Priority)) + i-- + dAtA[i] = 0x28 + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x22 + i -= len(m.Format) + copy(dAtA[i:], m.Format) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Format))) + i-- + dAtA[i] = 0x1a + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceConversion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceConversion) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceConversion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ConversionReviewVersions) > 0 { + for iNdEx := len(m.ConversionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ConversionReviewVersions[iNdEx]) + copy(dAtA[i:], m.ConversionReviewVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConversionReviewVersions[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if m.WebhookClientConfig != nil { + { + size, err := m.WebhookClientConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Strategy) + copy(dAtA[i:], m.Strategy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Strategy))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinitionCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinitionCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinitionList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinitionList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinitionNames) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinitionNames) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionNames) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Categories) > 0 { + for iNdEx := len(m.Categories) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Categories[iNdEx]) + copy(dAtA[i:], m.Categories[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Categories[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + i -= len(m.ListKind) + copy(dAtA[i:], m.ListKind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ListKind))) + i-- + dAtA[i] = 0x2a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x22 + if len(m.ShortNames) > 0 { + for iNdEx := len(m.ShortNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ShortNames[iNdEx]) + copy(dAtA[i:], m.ShortNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShortNames[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Singular) + copy(dAtA[i:], m.Singular) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Singular))) + i-- + dAtA[i] = 0x12 + i -= len(m.Plural) + copy(dAtA[i:], m.Plural) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Plural))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinitionSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinitionSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.PreserveUnknownFields != nil { + i-- + if *m.PreserveUnknownFields { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if m.Conversion != nil { + { + size, err := m.Conversion.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if len(m.AdditionalPrinterColumns) > 0 { + for iNdEx := len(m.AdditionalPrinterColumns) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AdditionalPrinterColumns[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if len(m.Versions) > 0 { + for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Versions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if m.Subresources != nil { + { + size, err := m.Subresources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.Validation != nil { + { + size, err := m.Validation.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + i -= len(m.Scope) + copy(dAtA[i:], m.Scope) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scope))) + i-- + dAtA[i] = 0x22 + { + size, err := m.Names.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinitionStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinitionStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.StoredVersions) > 0 { + for iNdEx := len(m.StoredVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.StoredVersions[iNdEx]) + copy(dAtA[i:], m.StoredVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoredVersions[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.AcceptedNames.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinitionVersion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinitionVersion) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DeprecationWarning != nil { + i -= len(*m.DeprecationWarning) + copy(dAtA[i:], *m.DeprecationWarning) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DeprecationWarning))) + i-- + dAtA[i] = 0x42 + } + i-- + if m.Deprecated { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + if len(m.AdditionalPrinterColumns) > 0 { + for iNdEx := len(m.AdditionalPrinterColumns) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AdditionalPrinterColumns[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.Subresources != nil { + { + size, err := m.Subresources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Schema != nil { + { + size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + i-- + if m.Storage { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i-- + if m.Served { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceSubresourceScale) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceSubresourceScale) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceSubresourceScale) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LabelSelectorPath != nil { + i -= len(*m.LabelSelectorPath) + copy(dAtA[i:], *m.LabelSelectorPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.LabelSelectorPath))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.StatusReplicasPath) + copy(dAtA[i:], m.StatusReplicasPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StatusReplicasPath))) + i-- + dAtA[i] = 0x12 + i -= len(m.SpecReplicasPath) + copy(dAtA[i:], m.SpecReplicasPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SpecReplicasPath))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceSubresourceStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceSubresourceStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceSubresourceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *CustomResourceSubresources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceSubresources) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceSubresources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Scale != nil { + { + size, err := m.Scale.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Status != nil { + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CustomResourceValidation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceValidation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceValidation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.OpenAPIV3Schema != nil { + { + size, err := m.OpenAPIV3Schema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExternalDocumentation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExternalDocumentation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalDocumentation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.URL) + copy(dAtA[i:], m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) + i-- + dAtA[i] = 0x12 + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *JSON) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JSON) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSON) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Raw != nil { + i -= len(m.Raw) + copy(dAtA[i:], m.Raw) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *JSONSchemaProps) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JSONSchemaProps) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSONSchemaProps) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.XValidations) > 0 { + for iNdEx := len(m.XValidations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.XValidations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xe2 + } + } + if m.XMapType != nil { + i -= len(*m.XMapType) + copy(dAtA[i:], *m.XMapType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.XMapType))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xda + } + if m.XListType != nil { + i -= len(*m.XListType) + copy(dAtA[i:], *m.XListType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.XListType))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xd2 + } + if len(m.XListMapKeys) > 0 { + for iNdEx := len(m.XListMapKeys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.XListMapKeys[iNdEx]) + copy(dAtA[i:], m.XListMapKeys[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.XListMapKeys[iNdEx]))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xca + } + } + i-- + if m.XIntOrString { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xc0 + i-- + if m.XEmbeddedResource { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb8 + if m.XPreserveUnknownFields != nil { + i-- + if *m.XPreserveUnknownFields { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb0 + } + i-- + if m.Nullable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa8 + if m.Example != nil { + { + size, err := m.Example.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa2 + } + if m.ExternalDocs != nil { + { + size, err := m.ExternalDocs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x9a + } + if len(m.Definitions) > 0 { + keysForDefinitions := make([]string, 0, len(m.Definitions)) + for k := range m.Definitions { + keysForDefinitions = append(keysForDefinitions, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefinitions) + for iNdEx := len(keysForDefinitions) - 1; iNdEx >= 0; iNdEx-- { + v := m.Definitions[string(keysForDefinitions[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForDefinitions[iNdEx]) + copy(dAtA[i:], keysForDefinitions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDefinitions[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 + } + } + if m.AdditionalItems != nil { + { + size, err := m.AdditionalItems.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x8a + } + if len(m.Dependencies) > 0 { + keysForDependencies := make([]string, 0, len(m.Dependencies)) + for k := range m.Dependencies { + keysForDependencies = append(keysForDependencies, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDependencies) + for iNdEx := len(keysForDependencies) - 1; iNdEx >= 0; iNdEx-- { + v := m.Dependencies[string(keysForDependencies[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForDependencies[iNdEx]) + copy(dAtA[i:], keysForDependencies[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDependencies[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x82 + } + } + if len(m.PatternProperties) > 0 { + keysForPatternProperties := make([]string, 0, len(m.PatternProperties)) + for k := range m.PatternProperties { + keysForPatternProperties = append(keysForPatternProperties, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPatternProperties) + for iNdEx := len(keysForPatternProperties) - 1; iNdEx >= 0; iNdEx-- { + v := m.PatternProperties[string(keysForPatternProperties[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForPatternProperties[iNdEx]) + copy(dAtA[i:], keysForPatternProperties[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForPatternProperties[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xfa + } + } + if m.AdditionalProperties != nil { + { + size, err := m.AdditionalProperties.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf2 + } + if len(m.Properties) > 0 { + keysForProperties := make([]string, 0, len(m.Properties)) + for k := range m.Properties { + keysForProperties = append(keysForProperties, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForProperties) + for iNdEx := len(keysForProperties) - 1; iNdEx >= 0; iNdEx-- { + v := m.Properties[string(keysForProperties[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForProperties[iNdEx]) + copy(dAtA[i:], keysForProperties[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForProperties[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xea + } + } + if m.Not != nil { + { + size, err := m.Not.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 + } + if len(m.AnyOf) > 0 { + for iNdEx := len(m.AnyOf) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AnyOf[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda + } + } + if len(m.OneOf) > 0 { + for iNdEx := len(m.OneOf) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.OneOf[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 + } + } + if len(m.AllOf) > 0 { + for iNdEx := len(m.AllOf) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllOf[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + } + if m.Items != nil { + { + size, err := m.Items.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if len(m.Required) > 0 { + for iNdEx := len(m.Required) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Required[iNdEx]) + copy(dAtA[i:], m.Required[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Required[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + } + if m.MinProperties != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinProperties)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb0 + } + if m.MaxProperties != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxProperties)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa8 + } + if len(m.Enum) > 0 { + for iNdEx := len(m.Enum) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Enum[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + } + if m.MultipleOf != nil { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.MultipleOf)))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x99 + } + i-- + if m.UniqueItems { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x90 + if m.MinItems != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinItems)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + } + if m.MaxItems != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxItems)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + i -= len(m.Pattern) + copy(dAtA[i:], m.Pattern) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pattern))) + i-- + dAtA[i] = 0x7a + if m.MinLength != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinLength)) + i-- + dAtA[i] = 0x70 + } + if m.MaxLength != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxLength)) + i-- + dAtA[i] = 0x68 + } + i-- + if m.ExclusiveMinimum { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x60 + if m.Minimum != nil { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Minimum)))) + i-- + dAtA[i] = 0x59 + } + i-- + if m.ExclusiveMaximum { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + if m.Maximum != nil { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Maximum)))) + i-- + dAtA[i] = 0x49 + } + if m.Default != nil { + { + size, err := m.Default.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + i -= len(m.Title) + copy(dAtA[i:], m.Title) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Title))) + i-- + dAtA[i] = 0x3a + i -= len(m.Format) + copy(dAtA[i:], m.Format) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Format))) + i-- + dAtA[i] = 0x32 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x2a + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x22 + if m.Ref != nil { + i -= len(*m.Ref) + copy(dAtA[i:], *m.Ref) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Ref))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Schema) + copy(dAtA[i:], m.Schema) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schema))) + i-- + dAtA[i] = 0x12 + i -= len(m.ID) + copy(dAtA[i:], m.ID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *JSONSchemaPropsOrArray) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JSONSchemaPropsOrArray) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSONSchemaPropsOrArray) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.JSONSchemas) > 0 { + for iNdEx := len(m.JSONSchemas) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.JSONSchemas[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Schema != nil { + { + size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *JSONSchemaPropsOrBool) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JSONSchemaPropsOrBool) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSONSchemaPropsOrBool) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Schema != nil { + { + size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i-- + if m.Allows { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *JSONSchemaPropsOrStringArray) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JSONSchemaPropsOrStringArray) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSONSchemaPropsOrStringArray) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Property) > 0 { + for iNdEx := len(m.Property) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Property[iNdEx]) + copy(dAtA[i:], m.Property[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Property[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.Schema != nil { + { + size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ServiceReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Port != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) + i-- + dAtA[i] = 0x20 + } + if m.Path != nil { + i -= len(*m.Path) + copy(dAtA[i:], *m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValidationRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidationRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidationRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.URL != nil { + i -= len(*m.URL) + copy(dAtA[i:], *m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + i-- + dAtA[i] = 0x1a + } + if m.CABundle != nil { + i -= len(m.CABundle) + copy(dAtA[i:], m.CABundle) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) + i-- + dAtA[i] = 0x12 + } + if m.Service != nil { + { + size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ConversionRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DesiredAPIVersion) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Objects) > 0 { + for _, e := range m.Objects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ConversionResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ConvertedObjects) > 0 { + for _, e := range m.ConvertedObjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Result.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ConversionReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Request != nil { + l = m.Request.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Response != nil { + l = m.Response.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CustomResourceColumnDefinition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Format) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Description) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Priority)) + l = len(m.JSONPath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CustomResourceConversion) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Strategy) + n += 1 + l + sovGenerated(uint64(l)) + if m.WebhookClientConfig != nil { + l = m.WebhookClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ConversionReviewVersions) > 0 { + for _, s := range m.ConversionReviewVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CustomResourceDefinition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CustomResourceDefinitionCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CustomResourceDefinitionList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CustomResourceDefinitionNames) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Plural) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Singular) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ShortNames) > 0 { + for _, s := range m.ShortNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ListKind) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Categories) > 0 { + for _, s := range m.Categories { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CustomResourceDefinitionSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Names.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Scope) + n += 1 + l + sovGenerated(uint64(l)) + if m.Validation != nil { + l = m.Validation.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Subresources != nil { + l = m.Subresources.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Versions) > 0 { + for _, e := range m.Versions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.AdditionalPrinterColumns) > 0 { + for _, e := range m.AdditionalPrinterColumns { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Conversion != nil { + l = m.Conversion.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PreserveUnknownFields != nil { + n += 2 + } + return n +} + +func (m *CustomResourceDefinitionStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.AcceptedNames.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.StoredVersions) > 0 { + for _, s := range m.StoredVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CustomResourceDefinitionVersion) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + if m.Schema != nil { + l = m.Schema.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Subresources != nil { + l = m.Subresources.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AdditionalPrinterColumns) > 0 { + for _, e := range m.AdditionalPrinterColumns { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + if m.DeprecationWarning != nil { + l = len(*m.DeprecationWarning) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CustomResourceSubresourceScale) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SpecReplicasPath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StatusReplicasPath) + n += 1 + l + sovGenerated(uint64(l)) + if m.LabelSelectorPath != nil { + l = len(*m.LabelSelectorPath) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CustomResourceSubresourceStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *CustomResourceSubresources) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Scale != nil { + l = m.Scale.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CustomResourceValidation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OpenAPIV3Schema != nil { + l = m.OpenAPIV3Schema.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ExternalDocumentation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Description) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.URL) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JSON) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Raw != nil { + l = len(m.Raw) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *JSONSchemaProps) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Schema) + n += 1 + l + sovGenerated(uint64(l)) + if m.Ref != nil { + l = len(*m.Ref) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Description) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Format) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Title) + n += 1 + l + sovGenerated(uint64(l)) + if m.Default != nil { + l = m.Default.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Maximum != nil { + n += 9 + } + n += 2 + if m.Minimum != nil { + n += 9 + } + n += 2 + if m.MaxLength != nil { + n += 1 + sovGenerated(uint64(*m.MaxLength)) + } + if m.MinLength != nil { + n += 1 + sovGenerated(uint64(*m.MinLength)) + } + l = len(m.Pattern) + n += 1 + l + sovGenerated(uint64(l)) + if m.MaxItems != nil { + n += 2 + sovGenerated(uint64(*m.MaxItems)) + } + if m.MinItems != nil { + n += 2 + sovGenerated(uint64(*m.MinItems)) + } + n += 3 + if m.MultipleOf != nil { + n += 10 + } + if len(m.Enum) > 0 { + for _, e := range m.Enum { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.MaxProperties != nil { + n += 2 + sovGenerated(uint64(*m.MaxProperties)) + } + if m.MinProperties != nil { + n += 2 + sovGenerated(uint64(*m.MinProperties)) + } + if len(m.Required) > 0 { + for _, s := range m.Required { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.Items != nil { + l = m.Items.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.AllOf) > 0 { + for _, e := range m.AllOf { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.OneOf) > 0 { + for _, e := range m.OneOf { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.AnyOf) > 0 { + for _, e := range m.AnyOf { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.Not != nil { + l = m.Not.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.Properties) > 0 { + for k, v := range m.Properties { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.AdditionalProperties != nil { + l = m.AdditionalProperties.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.PatternProperties) > 0 { + for k, v := range m.PatternProperties { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Dependencies) > 0 { + for k, v := range m.Dependencies { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.AdditionalItems != nil { + l = m.AdditionalItems.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.Definitions) > 0 { + for k, v := range m.Definitions { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.ExternalDocs != nil { + l = m.ExternalDocs.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Example != nil { + l = m.Example.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + n += 3 + if m.XPreserveUnknownFields != nil { + n += 3 + } + n += 3 + n += 3 + if len(m.XListMapKeys) > 0 { + for _, s := range m.XListMapKeys { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.XListType != nil { + l = len(*m.XListType) + n += 2 + l + sovGenerated(uint64(l)) + } + if m.XMapType != nil { + l = len(*m.XMapType) + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.XValidations) > 0 { + for _, e := range m.XValidations { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *JSONSchemaPropsOrArray) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Schema != nil { + l = m.Schema.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.JSONSchemas) > 0 { + for _, e := range m.JSONSchemas { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *JSONSchemaPropsOrBool) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + if m.Schema != nil { + l = m.Schema.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *JSONSchemaPropsOrStringArray) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Schema != nil { + l = m.Schema.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Property) > 0 { + for _, s := range m.Property { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Path != nil { + l = len(*m.Path) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Port != nil { + n += 1 + sovGenerated(uint64(*m.Port)) + } + return n +} + +func (m *ValidationRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *WebhookClientConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CABundle != nil { + l = len(m.CABundle) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.URL != nil { + l = len(*m.URL) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ConversionRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForObjects := "[]RawExtension{" + for _, f := range this.Objects { + repeatedStringForObjects += fmt.Sprintf("%v", f) + "," + } + repeatedStringForObjects += "}" + s := strings.Join([]string{`&ConversionRequest{`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `DesiredAPIVersion:` + fmt.Sprintf("%v", this.DesiredAPIVersion) + `,`, + `Objects:` + repeatedStringForObjects + `,`, + `}`, + }, "") + return s +} +func (this *ConversionResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForConvertedObjects := "[]RawExtension{" + for _, f := range this.ConvertedObjects { + repeatedStringForConvertedObjects += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConvertedObjects += "}" + s := strings.Join([]string{`&ConversionResponse{`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `ConvertedObjects:` + repeatedStringForConvertedObjects + `,`, + `Result:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "v1.Status", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ConversionReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConversionReview{`, + `Request:` + strings.Replace(this.Request.String(), "ConversionRequest", "ConversionRequest", 1) + `,`, + `Response:` + strings.Replace(this.Response.String(), "ConversionResponse", "ConversionResponse", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceColumnDefinition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceColumnDefinition{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Format:` + fmt.Sprintf("%v", this.Format) + `,`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `Priority:` + fmt.Sprintf("%v", this.Priority) + `,`, + `JSONPath:` + fmt.Sprintf("%v", this.JSONPath) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceConversion) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceConversion{`, + `Strategy:` + fmt.Sprintf("%v", this.Strategy) + `,`, + `WebhookClientConfig:` + strings.Replace(this.WebhookClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1) + `,`, + `ConversionReviewVersions:` + fmt.Sprintf("%v", this.ConversionReviewVersions) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceDefinition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceDefinition{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CustomResourceDefinitionSpec", "CustomResourceDefinitionSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CustomResourceDefinitionStatus", "CustomResourceDefinitionStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceDefinitionCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceDefinitionCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceDefinitionList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]CustomResourceDefinition{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CustomResourceDefinition", "CustomResourceDefinition", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&CustomResourceDefinitionList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceDefinitionNames) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceDefinitionNames{`, + `Plural:` + fmt.Sprintf("%v", this.Plural) + `,`, + `Singular:` + fmt.Sprintf("%v", this.Singular) + `,`, + `ShortNames:` + fmt.Sprintf("%v", this.ShortNames) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `ListKind:` + fmt.Sprintf("%v", this.ListKind) + `,`, + `Categories:` + fmt.Sprintf("%v", this.Categories) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceDefinitionSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForVersions := "[]CustomResourceDefinitionVersion{" + for _, f := range this.Versions { + repeatedStringForVersions += strings.Replace(strings.Replace(f.String(), "CustomResourceDefinitionVersion", "CustomResourceDefinitionVersion", 1), `&`, ``, 1) + "," + } + repeatedStringForVersions += "}" + repeatedStringForAdditionalPrinterColumns := "[]CustomResourceColumnDefinition{" + for _, f := range this.AdditionalPrinterColumns { + repeatedStringForAdditionalPrinterColumns += strings.Replace(strings.Replace(f.String(), "CustomResourceColumnDefinition", "CustomResourceColumnDefinition", 1), `&`, ``, 1) + "," + } + repeatedStringForAdditionalPrinterColumns += "}" + s := strings.Join([]string{`&CustomResourceDefinitionSpec{`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `Names:` + strings.Replace(strings.Replace(this.Names.String(), "CustomResourceDefinitionNames", "CustomResourceDefinitionNames", 1), `&`, ``, 1) + `,`, + `Scope:` + fmt.Sprintf("%v", this.Scope) + `,`, + `Validation:` + strings.Replace(this.Validation.String(), "CustomResourceValidation", "CustomResourceValidation", 1) + `,`, + `Subresources:` + strings.Replace(this.Subresources.String(), "CustomResourceSubresources", "CustomResourceSubresources", 1) + `,`, + `Versions:` + repeatedStringForVersions + `,`, + `AdditionalPrinterColumns:` + repeatedStringForAdditionalPrinterColumns + `,`, + `Conversion:` + strings.Replace(this.Conversion.String(), "CustomResourceConversion", "CustomResourceConversion", 1) + `,`, + `PreserveUnknownFields:` + valueToStringGenerated(this.PreserveUnknownFields) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceDefinitionStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]CustomResourceDefinitionCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "CustomResourceDefinitionCondition", "CustomResourceDefinitionCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&CustomResourceDefinitionStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `AcceptedNames:` + strings.Replace(strings.Replace(this.AcceptedNames.String(), "CustomResourceDefinitionNames", "CustomResourceDefinitionNames", 1), `&`, ``, 1) + `,`, + `StoredVersions:` + fmt.Sprintf("%v", this.StoredVersions) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceDefinitionVersion) String() string { + if this == nil { + return "nil" + } + repeatedStringForAdditionalPrinterColumns := "[]CustomResourceColumnDefinition{" + for _, f := range this.AdditionalPrinterColumns { + repeatedStringForAdditionalPrinterColumns += strings.Replace(strings.Replace(f.String(), "CustomResourceColumnDefinition", "CustomResourceColumnDefinition", 1), `&`, ``, 1) + "," + } + repeatedStringForAdditionalPrinterColumns += "}" + s := strings.Join([]string{`&CustomResourceDefinitionVersion{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Served:` + fmt.Sprintf("%v", this.Served) + `,`, + `Storage:` + fmt.Sprintf("%v", this.Storage) + `,`, + `Schema:` + strings.Replace(this.Schema.String(), "CustomResourceValidation", "CustomResourceValidation", 1) + `,`, + `Subresources:` + strings.Replace(this.Subresources.String(), "CustomResourceSubresources", "CustomResourceSubresources", 1) + `,`, + `AdditionalPrinterColumns:` + repeatedStringForAdditionalPrinterColumns + `,`, + `Deprecated:` + fmt.Sprintf("%v", this.Deprecated) + `,`, + `DeprecationWarning:` + valueToStringGenerated(this.DeprecationWarning) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceSubresourceScale) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceSubresourceScale{`, + `SpecReplicasPath:` + fmt.Sprintf("%v", this.SpecReplicasPath) + `,`, + `StatusReplicasPath:` + fmt.Sprintf("%v", this.StatusReplicasPath) + `,`, + `LabelSelectorPath:` + valueToStringGenerated(this.LabelSelectorPath) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceSubresourceStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceSubresourceStatus{`, + `}`, + }, "") + return s +} +func (this *CustomResourceSubresources) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceSubresources{`, + `Status:` + strings.Replace(this.Status.String(), "CustomResourceSubresourceStatus", "CustomResourceSubresourceStatus", 1) + `,`, + `Scale:` + strings.Replace(this.Scale.String(), "CustomResourceSubresourceScale", "CustomResourceSubresourceScale", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceValidation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceValidation{`, + `OpenAPIV3Schema:` + strings.Replace(this.OpenAPIV3Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ExternalDocumentation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExternalDocumentation{`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `URL:` + fmt.Sprintf("%v", this.URL) + `,`, + `}`, + }, "") + return s +} +func (this *JSON) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JSON{`, + `Raw:` + valueToStringGenerated(this.Raw) + `,`, + `}`, + }, "") + return s +} +func (this *JSONSchemaProps) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnum := "[]JSON{" + for _, f := range this.Enum { + repeatedStringForEnum += strings.Replace(strings.Replace(f.String(), "JSON", "JSON", 1), `&`, ``, 1) + "," + } + repeatedStringForEnum += "}" + repeatedStringForAllOf := "[]JSONSchemaProps{" + for _, f := range this.AllOf { + repeatedStringForAllOf += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," + } + repeatedStringForAllOf += "}" + repeatedStringForOneOf := "[]JSONSchemaProps{" + for _, f := range this.OneOf { + repeatedStringForOneOf += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," + } + repeatedStringForOneOf += "}" + repeatedStringForAnyOf := "[]JSONSchemaProps{" + for _, f := range this.AnyOf { + repeatedStringForAnyOf += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," + } + repeatedStringForAnyOf += "}" + repeatedStringForXValidations := "[]ValidationRule{" + for _, f := range this.XValidations { + repeatedStringForXValidations += strings.Replace(strings.Replace(f.String(), "ValidationRule", "ValidationRule", 1), `&`, ``, 1) + "," + } + repeatedStringForXValidations += "}" + keysForProperties := make([]string, 0, len(this.Properties)) + for k := range this.Properties { + keysForProperties = append(keysForProperties, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForProperties) + mapStringForProperties := "map[string]JSONSchemaProps{" + for _, k := range keysForProperties { + mapStringForProperties += fmt.Sprintf("%v: %v,", k, this.Properties[k]) + } + mapStringForProperties += "}" + keysForPatternProperties := make([]string, 0, len(this.PatternProperties)) + for k := range this.PatternProperties { + keysForPatternProperties = append(keysForPatternProperties, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPatternProperties) + mapStringForPatternProperties := "map[string]JSONSchemaProps{" + for _, k := range keysForPatternProperties { + mapStringForPatternProperties += fmt.Sprintf("%v: %v,", k, this.PatternProperties[k]) + } + mapStringForPatternProperties += "}" + keysForDependencies := make([]string, 0, len(this.Dependencies)) + for k := range this.Dependencies { + keysForDependencies = append(keysForDependencies, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDependencies) + mapStringForDependencies := "JSONSchemaDependencies{" + for _, k := range keysForDependencies { + mapStringForDependencies += fmt.Sprintf("%v: %v,", k, this.Dependencies[k]) + } + mapStringForDependencies += "}" + keysForDefinitions := make([]string, 0, len(this.Definitions)) + for k := range this.Definitions { + keysForDefinitions = append(keysForDefinitions, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefinitions) + mapStringForDefinitions := "JSONSchemaDefinitions{" + for _, k := range keysForDefinitions { + mapStringForDefinitions += fmt.Sprintf("%v: %v,", k, this.Definitions[k]) + } + mapStringForDefinitions += "}" + s := strings.Join([]string{`&JSONSchemaProps{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Schema:` + fmt.Sprintf("%v", this.Schema) + `,`, + `Ref:` + valueToStringGenerated(this.Ref) + `,`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Format:` + fmt.Sprintf("%v", this.Format) + `,`, + `Title:` + fmt.Sprintf("%v", this.Title) + `,`, + `Default:` + strings.Replace(this.Default.String(), "JSON", "JSON", 1) + `,`, + `Maximum:` + valueToStringGenerated(this.Maximum) + `,`, + `ExclusiveMaximum:` + fmt.Sprintf("%v", this.ExclusiveMaximum) + `,`, + `Minimum:` + valueToStringGenerated(this.Minimum) + `,`, + `ExclusiveMinimum:` + fmt.Sprintf("%v", this.ExclusiveMinimum) + `,`, + `MaxLength:` + valueToStringGenerated(this.MaxLength) + `,`, + `MinLength:` + valueToStringGenerated(this.MinLength) + `,`, + `Pattern:` + fmt.Sprintf("%v", this.Pattern) + `,`, + `MaxItems:` + valueToStringGenerated(this.MaxItems) + `,`, + `MinItems:` + valueToStringGenerated(this.MinItems) + `,`, + `UniqueItems:` + fmt.Sprintf("%v", this.UniqueItems) + `,`, + `MultipleOf:` + valueToStringGenerated(this.MultipleOf) + `,`, + `Enum:` + repeatedStringForEnum + `,`, + `MaxProperties:` + valueToStringGenerated(this.MaxProperties) + `,`, + `MinProperties:` + valueToStringGenerated(this.MinProperties) + `,`, + `Required:` + fmt.Sprintf("%v", this.Required) + `,`, + `Items:` + strings.Replace(this.Items.String(), "JSONSchemaPropsOrArray", "JSONSchemaPropsOrArray", 1) + `,`, + `AllOf:` + repeatedStringForAllOf + `,`, + `OneOf:` + repeatedStringForOneOf + `,`, + `AnyOf:` + repeatedStringForAnyOf + `,`, + `Not:` + strings.Replace(this.Not.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `Properties:` + mapStringForProperties + `,`, + `AdditionalProperties:` + strings.Replace(this.AdditionalProperties.String(), "JSONSchemaPropsOrBool", "JSONSchemaPropsOrBool", 1) + `,`, + `PatternProperties:` + mapStringForPatternProperties + `,`, + `Dependencies:` + mapStringForDependencies + `,`, + `AdditionalItems:` + strings.Replace(this.AdditionalItems.String(), "JSONSchemaPropsOrBool", "JSONSchemaPropsOrBool", 1) + `,`, + `Definitions:` + mapStringForDefinitions + `,`, + `ExternalDocs:` + strings.Replace(this.ExternalDocs.String(), "ExternalDocumentation", "ExternalDocumentation", 1) + `,`, + `Example:` + strings.Replace(this.Example.String(), "JSON", "JSON", 1) + `,`, + `Nullable:` + fmt.Sprintf("%v", this.Nullable) + `,`, + `XPreserveUnknownFields:` + valueToStringGenerated(this.XPreserveUnknownFields) + `,`, + `XEmbeddedResource:` + fmt.Sprintf("%v", this.XEmbeddedResource) + `,`, + `XIntOrString:` + fmt.Sprintf("%v", this.XIntOrString) + `,`, + `XListMapKeys:` + fmt.Sprintf("%v", this.XListMapKeys) + `,`, + `XListType:` + valueToStringGenerated(this.XListType) + `,`, + `XMapType:` + valueToStringGenerated(this.XMapType) + `,`, + `XValidations:` + repeatedStringForXValidations + `,`, + `}`, + }, "") + return s +} +func (this *JSONSchemaPropsOrArray) String() string { + if this == nil { + return "nil" + } + repeatedStringForJSONSchemas := "[]JSONSchemaProps{" + for _, f := range this.JSONSchemas { + repeatedStringForJSONSchemas += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," + } + repeatedStringForJSONSchemas += "}" + s := strings.Join([]string{`&JSONSchemaPropsOrArray{`, + `Schema:` + strings.Replace(this.Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `JSONSchemas:` + repeatedStringForJSONSchemas + `,`, + `}`, + }, "") + return s +} +func (this *JSONSchemaPropsOrBool) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JSONSchemaPropsOrBool{`, + `Allows:` + fmt.Sprintf("%v", this.Allows) + `,`, + `Schema:` + strings.Replace(this.Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `}`, + }, "") + return s +} +func (this *JSONSchemaPropsOrStringArray) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JSONSchemaPropsOrStringArray{`, + `Schema:` + strings.Replace(this.Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `Property:` + fmt.Sprintf("%v", this.Property) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceReference{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Path:` + valueToStringGenerated(this.Path) + `,`, + `Port:` + valueToStringGenerated(this.Port) + `,`, + `}`, + }, "") + return s +} +func (this *ValidationRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidationRule{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *WebhookClientConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebhookClientConfig{`, + `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, + `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, + `URL:` + valueToStringGenerated(this.URL) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ConversionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConversionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConversionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredAPIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DesiredAPIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Objects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Objects = append(m.Objects, runtime.RawExtension{}) + if err := m.Objects[len(m.Objects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConversionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConversionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConversionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConvertedObjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConvertedObjects = append(m.ConvertedObjects, runtime.RawExtension{}) + if err := m.ConvertedObjects[len(m.ConvertedObjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConversionReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConversionReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConversionReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Request == nil { + m.Request = &ConversionRequest{} + } + if err := m.Request.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Response == nil { + m.Response = &ConversionResponse{} + } + if err := m.Response.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceColumnDefinition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceColumnDefinition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Format = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) + } + m.Priority = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Priority |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JSONPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JSONPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceConversion) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceConversion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceConversion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Strategy = ConversionStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WebhookClientConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WebhookClientConfig == nil { + m.WebhookClientConfig = &WebhookClientConfig{} + } + if err := m.WebhookClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConversionReviewVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConversionReviewVersions = append(m.ConversionReviewVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = CustomResourceDefinitionConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CustomResourceDefinition{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionNames: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionNames: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Plural", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Plural = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Singular", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Singular = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShortNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShortNames = append(m.ShortNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListKind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ListKind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Categories", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Categories = append(m.Categories, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Names.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scope = ResourceScope(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Validation == nil { + m.Validation = &CustomResourceValidation{} + } + if err := m.Validation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subresources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Subresources == nil { + m.Subresources = &CustomResourceSubresources{} + } + if err := m.Subresources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Versions = append(m.Versions, CustomResourceDefinitionVersion{}) + if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdditionalPrinterColumns", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdditionalPrinterColumns = append(m.AdditionalPrinterColumns, CustomResourceColumnDefinition{}) + if err := m.AdditionalPrinterColumns[len(m.AdditionalPrinterColumns)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conversion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Conversion == nil { + m.Conversion = &CustomResourceConversion{} + } + if err := m.Conversion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PreserveUnknownFields", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.PreserveUnknownFields = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, CustomResourceDefinitionCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AcceptedNames", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.AcceptedNames.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StoredVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StoredVersions = append(m.StoredVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionVersion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionVersion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Served", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Served = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Storage", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Storage = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Schema == nil { + m.Schema = &CustomResourceValidation{} + } + if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subresources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Subresources == nil { + m.Subresources = &CustomResourceSubresources{} + } + if err := m.Subresources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdditionalPrinterColumns", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdditionalPrinterColumns = append(m.AdditionalPrinterColumns, CustomResourceColumnDefinition{}) + if err := m.AdditionalPrinterColumns[len(m.AdditionalPrinterColumns)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Deprecated", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Deprecated = bool(v != 0) + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecationWarning", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.DeprecationWarning = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceSubresourceScale) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceSubresourceScale: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceSubresourceScale: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpecReplicasPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SpecReplicasPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StatusReplicasPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StatusReplicasPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelectorPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.LabelSelectorPath = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceSubresourceStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceSubresourceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceSubresourceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceSubresources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceSubresources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceSubresources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &CustomResourceSubresourceStatus{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scale", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Scale == nil { + m.Scale = &CustomResourceSubresourceScale{} + } + if err := m.Scale.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceValidation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceValidation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceValidation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OpenAPIV3Schema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.OpenAPIV3Schema == nil { + m.OpenAPIV3Schema = &JSONSchemaProps{} + } + if err := m.OpenAPIV3Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExternalDocumentation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExternalDocumentation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExternalDocumentation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.URL = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JSON) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JSON: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JSON: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Raw = append(m.Raw[:0], dAtA[iNdEx:postIndex]...) + if m.Raw == nil { + m.Raw = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JSONSchemaProps: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JSONSchemaProps: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Schema = JSONSchemaURL(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Ref = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Format = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Title", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Title = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Default == nil { + m.Default = &JSON{} + } + if err := m.Default.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Maximum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.Maximum = &v2 + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExclusiveMaximum", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ExclusiveMaximum = bool(v != 0) + case 11: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Minimum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.Minimum = &v2 + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExclusiveMinimum", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ExclusiveMinimum = bool(v != 0) + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxLength", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MaxLength = &v + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinLength", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MinLength = &v + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pattern", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pattern = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxItems", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MaxItems = &v + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinItems", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MinItems = &v + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UniqueItems", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.UniqueItems = bool(v != 0) + case 19: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field MultipleOf", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.MultipleOf = &v2 + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Enum", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Enum = append(m.Enum, JSON{}) + if err := m.Enum[len(m.Enum)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 21: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxProperties", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MaxProperties = &v + case 22: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinProperties", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MinProperties = &v + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Required", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Required = append(m.Required, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Items == nil { + m.Items = &JSONSchemaPropsOrArray{} + } + if err := m.Items.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 25: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllOf", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllOf = append(m.AllOf, JSONSchemaProps{}) + if err := m.AllOf[len(m.AllOf)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 26: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OneOf", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OneOf = append(m.OneOf, JSONSchemaProps{}) + if err := m.OneOf[len(m.OneOf)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 27: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AnyOf", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AnyOf = append(m.AnyOf, JSONSchemaProps{}) + if err := m.AnyOf[len(m.AnyOf)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 28: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Not", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Not == nil { + m.Not = &JSONSchemaProps{} + } + if err := m.Not.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 29: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Properties", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Properties == nil { + m.Properties = make(map[string]JSONSchemaProps) + } + var mapkey string + mapvalue := &JSONSchemaProps{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &JSONSchemaProps{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Properties[mapkey] = *mapvalue + iNdEx = postIndex + case 30: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdditionalProperties", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AdditionalProperties == nil { + m.AdditionalProperties = &JSONSchemaPropsOrBool{} + } + if err := m.AdditionalProperties.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 31: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PatternProperties", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PatternProperties == nil { + m.PatternProperties = make(map[string]JSONSchemaProps) + } + var mapkey string + mapvalue := &JSONSchemaProps{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &JSONSchemaProps{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.PatternProperties[mapkey] = *mapvalue + iNdEx = postIndex + case 32: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dependencies", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Dependencies == nil { + m.Dependencies = make(JSONSchemaDependencies) + } + var mapkey string + mapvalue := &JSONSchemaPropsOrStringArray{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &JSONSchemaPropsOrStringArray{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Dependencies[mapkey] = *mapvalue + iNdEx = postIndex + case 33: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdditionalItems", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AdditionalItems == nil { + m.AdditionalItems = &JSONSchemaPropsOrBool{} + } + if err := m.AdditionalItems.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 34: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Definitions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Definitions == nil { + m.Definitions = make(JSONSchemaDefinitions) + } + var mapkey string + mapvalue := &JSONSchemaProps{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &JSONSchemaProps{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Definitions[mapkey] = *mapvalue + iNdEx = postIndex + case 35: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalDocs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExternalDocs == nil { + m.ExternalDocs = &ExternalDocumentation{} + } + if err := m.ExternalDocs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 36: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Example", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Example == nil { + m.Example = &JSON{} + } + if err := m.Example.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 37: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nullable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Nullable = bool(v != 0) + case 38: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field XPreserveUnknownFields", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.XPreserveUnknownFields = &b + case 39: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field XEmbeddedResource", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.XEmbeddedResource = bool(v != 0) + case 40: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field XIntOrString", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.XIntOrString = bool(v != 0) + case 41: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field XListMapKeys", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.XListMapKeys = append(m.XListMapKeys, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 42: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field XListType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.XListType = &s + iNdEx = postIndex + case 43: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field XMapType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.XMapType = &s + iNdEx = postIndex + case 44: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field XValidations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.XValidations = append(m.XValidations, ValidationRule{}) + if err := m.XValidations[len(m.XValidations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JSONSchemaPropsOrArray) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JSONSchemaPropsOrArray: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JSONSchemaPropsOrArray: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Schema == nil { + m.Schema = &JSONSchemaProps{} + } + if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JSONSchemas", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JSONSchemas = append(m.JSONSchemas, JSONSchemaProps{}) + if err := m.JSONSchemas[len(m.JSONSchemas)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JSONSchemaPropsOrBool) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JSONSchemaPropsOrBool: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JSONSchemaPropsOrBool: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Allows", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Allows = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Schema == nil { + m.Schema = &JSONSchemaProps{} + } + if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JSONSchemaPropsOrStringArray) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JSONSchemaPropsOrStringArray: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JSONSchemaPropsOrStringArray: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Schema == nil { + m.Schema = &JSONSchemaProps{} + } + if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Property", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Property = append(m.Property, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Path = &s + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Port = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidationRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidationRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidationRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rule = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WebhookClientConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WebhookClientConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Service == nil { + m.Service = &ServiceReference{} + } + if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CABundle = append(m.CABundle[:0], dAtA[iNdEx:postIndex]...) + if m.CABundle == nil { + m.CABundle = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.URL = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto new file mode 100644 index 0000000..3f8ba6c --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto @@ -0,0 +1,766 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"; + +// ConversionRequest describes the conversion request parameters. +message ConversionRequest { + // uid is an identifier for the individual request/response. It allows distinguishing instances of requests which are + // otherwise identical (parallel requests, etc). + // The UID is meant to track the round trip (request/response) between the Kubernetes API server and the webhook, not the user request. + // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. + optional string uid = 1; + + // desiredAPIVersion is the version to convert given objects to. e.g. "myapi.example.com/v1" + optional string desiredAPIVersion = 2; + + // objects is the list of custom resource objects to be converted. + repeated k8s.io.apimachinery.pkg.runtime.RawExtension objects = 3; +} + +// ConversionResponse describes a conversion response. +message ConversionResponse { + // uid is an identifier for the individual request/response. + // This should be copied over from the corresponding `request.uid`. + optional string uid = 1; + + // convertedObjects is the list of converted version of `request.objects` if the `result` is successful, otherwise empty. + // The webhook is expected to set `apiVersion` of these objects to the `request.desiredAPIVersion`. The list + // must also have the same size as the input list with the same objects in the same order (equal kind, metadata.uid, metadata.name and metadata.namespace). + // The webhook is allowed to mutate labels and annotations. Any other change to the metadata is silently ignored. + repeated k8s.io.apimachinery.pkg.runtime.RawExtension convertedObjects = 2; + + // result contains the result of conversion with extra details if the conversion failed. `result.status` determines if + // the conversion failed or succeeded. The `result.status` field is required and represents the success or failure of the + // conversion. A successful conversion must set `result.status` to `Success`. A failed conversion must set + // `result.status` to `Failure` and provide more details in `result.message` and return http status 200. The `result.message` + // will be used to construct an error message for the end user. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Status result = 3; +} + +// ConversionReview describes a conversion request/response. +message ConversionReview { + // request describes the attributes for the conversion request. + // +optional + optional ConversionRequest request = 1; + + // response describes the attributes for the conversion response. + // +optional + optional ConversionResponse response = 2; +} + +// CustomResourceColumnDefinition specifies a column for server side printing. +message CustomResourceColumnDefinition { + // name is a human readable name for the column. + optional string name = 1; + + // type is an OpenAPI type definition for this column. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. + optional string type = 2; + + // format is an optional OpenAPI type definition for this column. The 'name' format is applied + // to the primary identifier column to assist in clients identifying column is the resource name. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. + // +optional + optional string format = 3; + + // description is a human readable description of this column. + // +optional + optional string description = 4; + + // priority is an integer defining the relative importance of this column compared to others. Lower + // numbers are considered higher priority. Columns that may be omitted in limited space scenarios + // should be given a priority greater than 0. + // +optional + optional int32 priority = 5; + + // JSONPath is a simple JSON path (i.e. with array notation) which is evaluated against + // each custom resource to produce the value for this column. + optional string JSONPath = 6; +} + +// CustomResourceConversion describes how to convert different versions of a CR. +message CustomResourceConversion { + // strategy specifies how custom resources are converted between versions. Allowed values are: + // - `None`: The converter only change the apiVersion and would not touch any other field in the custom resource. + // - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information + // is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhookClientConfig to be set. + optional string strategy = 1; + + // webhookClientConfig is the instructions for how to call the webhook if strategy is `Webhook`. + // Required when `strategy` is set to `Webhook`. + // +optional + optional WebhookClientConfig webhookClientConfig = 2; + + // conversionReviewVersions is an ordered list of preferred `ConversionReview` + // versions the Webhook expects. The API server will use the first version in + // the list which it supports. If none of the versions specified in this list + // are supported by API server, conversion will fail for the custom resource. + // If a persisted Webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail. + // Defaults to `["v1beta1"]`. + // +optional + repeated string conversionReviewVersions = 3; +} + +// CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format +// <.spec.name>.<.spec.group>. +// Deprecated in v1.16, planned for removal in v1.22. Use apiextensions.k8s.io/v1 CustomResourceDefinition instead. +message CustomResourceDefinition { + // Standard object's metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec describes how the user wants the resources to appear + optional CustomResourceDefinitionSpec spec = 2; + + // status indicates the actual state of the CustomResourceDefinition + // +optional + optional CustomResourceDefinitionStatus status = 3; +} + +// CustomResourceDefinitionCondition contains details for the current condition of this pod. +message CustomResourceDefinitionCondition { + // type is the type of the condition. Types include Established, NamesAccepted and Terminating. + optional string type = 1; + + // status is the status of the condition. + // Can be True, False, Unknown. + optional string status = 2; + + // lastTransitionTime last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // reason is a unique, one-word, CamelCase reason for the condition's last transition. + // +optional + optional string reason = 4; + + // message is a human-readable message indicating details about last transition. + // +optional + optional string message = 5; +} + +// CustomResourceDefinitionList is a list of CustomResourceDefinition objects. +message CustomResourceDefinitionList { + // Standard object's metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items list individual CustomResourceDefinition objects + repeated CustomResourceDefinition items = 2; +} + +// CustomResourceDefinitionNames indicates the names to serve this CustomResourceDefinition +message CustomResourceDefinitionNames { + // plural is the plural name of the resource to serve. + // The custom resources are served under `/apis///.../`. + // Must match the name of the CustomResourceDefinition (in the form `.`). + // Must be all lowercase. + optional string plural = 1; + + // singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`. + // +optional + optional string singular = 2; + + // shortNames are short names for the resource, exposed in API discovery documents, + // and used by clients to support invocations like `kubectl get `. + // It must be all lowercase. + // +optional + repeated string shortNames = 3; + + // kind is the serialized kind of the resource. It is normally CamelCase and singular. + // Custom resource instances will use this value as the `kind` attribute in API calls. + optional string kind = 4; + + // listKind is the serialized kind of the list for this resource. Defaults to "`kind`List". + // +optional + optional string listKind = 5; + + // categories is a list of grouped resources this custom resource belongs to (e.g. 'all'). + // This is published in API discovery documents, and used by clients to support invocations like + // `kubectl get all`. + // +optional + repeated string categories = 6; +} + +// CustomResourceDefinitionSpec describes how a user wants their resource to appear +message CustomResourceDefinitionSpec { + // group is the API group of the defined custom resource. + // The custom resources are served under `/apis//...`. + // Must match the name of the CustomResourceDefinition (in the form `.`). + optional string group = 1; + + // version is the API version of the defined custom resource. + // The custom resources are served under `/apis///...`. + // Must match the name of the first item in the `versions` list if `version` and `versions` are both specified. + // Optional if `versions` is specified. + // Deprecated: use `versions` instead. + // +optional + optional string version = 2; + + // names specify the resource and kind names for the custom resource. + optional CustomResourceDefinitionNames names = 3; + + // scope indicates whether the defined custom resource is cluster- or namespace-scoped. + // Allowed values are `Cluster` and `Namespaced`. Default is `Namespaced`. + optional string scope = 4; + + // validation describes the schema used for validation and pruning of the custom resource. + // If present, this validation schema is used to validate all versions. + // Top-level and per-version schemas are mutually exclusive. + // +optional + optional CustomResourceValidation validation = 5; + + // subresources specify what subresources the defined custom resource has. + // If present, this field configures subresources for all versions. + // Top-level and per-version subresources are mutually exclusive. + // +optional + optional CustomResourceSubresources subresources = 6; + + // versions is the list of all API versions of the defined custom resource. + // Optional if `version` is specified. + // The name of the first item in the `versions` list must match the `version` field if `version` and `versions` are both specified. + // Version names are used to compute the order in which served versions are listed in API discovery. + // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered + // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), + // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first + // by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing + // major version, then minor version. An example sorted list of versions: + // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. + // +optional + repeated CustomResourceDefinitionVersion versions = 7; + + // additionalPrinterColumns specifies additional columns returned in Table output. + // See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. + // If present, this field configures columns for all versions. + // Top-level and per-version columns are mutually exclusive. + // If no top-level or per-version columns are specified, a single column displaying the age of the custom resource is used. + // +optional + repeated CustomResourceColumnDefinition additionalPrinterColumns = 8; + + // conversion defines conversion settings for the CRD. + // +optional + optional CustomResourceConversion conversion = 9; + + // preserveUnknownFields indicates that object fields which are not specified + // in the OpenAPI schema should be preserved when persisting to storage. + // apiVersion, kind, metadata and known fields inside metadata are always preserved. + // If false, schemas must be defined for all versions. + // Defaults to true in v1beta for backwards compatibility. + // Deprecated: will be required to be false in v1. Preservation of unknown fields can be specified + // in the validation schema using the `x-kubernetes-preserve-unknown-fields: true` extension. + // See https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#field-pruning for details. + // +optional + optional bool preserveUnknownFields = 10; +} + +// CustomResourceDefinitionStatus indicates the state of the CustomResourceDefinition +message CustomResourceDefinitionStatus { + // conditions indicate state for particular aspects of a CustomResourceDefinition + // +optional + // +listType=map + // +listMapKey=type + repeated CustomResourceDefinitionCondition conditions = 1; + + // acceptedNames are the names that are actually being used to serve discovery. + // They may be different than the names in spec. + // +optional + optional CustomResourceDefinitionNames acceptedNames = 2; + + // storedVersions lists all versions of CustomResources that were ever persisted. Tracking these + // versions allows a migration path for stored versions in etcd. The field is mutable + // so a migration controller can finish a migration to another version (ensuring + // no old objects are left in storage), and then remove the rest of the + // versions from this list. + // Versions may not be removed from `spec.versions` while they exist in this list. + // +optional + repeated string storedVersions = 3; +} + +// CustomResourceDefinitionVersion describes a version for CRD. +message CustomResourceDefinitionVersion { + // name is the version name, e.g. “v1”, “v2beta1”, etc. + // The custom resources are served under this version at `/apis///...` if `served` is true. + optional string name = 1; + + // served is a flag enabling/disabling this version from being served via REST APIs + optional bool served = 2; + + // storage indicates this version should be used when persisting custom resources to storage. + // There must be exactly one version with storage=true. + optional bool storage = 3; + + // deprecated indicates this version of the custom resource API is deprecated. + // When set to true, API requests to this version receive a warning header in the server response. + // Defaults to false. + // +optional + optional bool deprecated = 7; + + // deprecationWarning overrides the default warning returned to API clients. + // May only be set when `deprecated` is true. + // The default warning indicates this version is deprecated and recommends use + // of the newest served version of equal or greater stability, if one exists. + // +optional + optional string deprecationWarning = 8; + + // schema describes the schema used for validation and pruning of this version of the custom resource. + // Top-level and per-version schemas are mutually exclusive. + // Per-version schemas must not all be set to identical values (top-level validation schema should be used instead). + // +optional + optional CustomResourceValidation schema = 4; + + // subresources specify what subresources this version of the defined custom resource have. + // Top-level and per-version subresources are mutually exclusive. + // Per-version subresources must not all be set to identical values (top-level subresources should be used instead). + // +optional + optional CustomResourceSubresources subresources = 5; + + // additionalPrinterColumns specifies additional columns returned in Table output. + // See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. + // Top-level and per-version columns are mutually exclusive. + // Per-version columns must not all be set to identical values (top-level columns should be used instead). + // If no top-level or per-version columns are specified, a single column displaying the age of the custom resource is used. + // +optional + repeated CustomResourceColumnDefinition additionalPrinterColumns = 6; +} + +// CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources. +message CustomResourceSubresourceScale { + // specReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `spec.replicas`. + // Only JSON paths without the array notation are allowed. + // Must be a JSON Path under `.spec`. + // If there is no value under the given path in the custom resource, the `/scale` subresource will return an error on GET. + optional string specReplicasPath = 1; + + // statusReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `status.replicas`. + // Only JSON paths without the array notation are allowed. + // Must be a JSON Path under `.status`. + // If there is no value under the given path in the custom resource, the `status.replicas` value in the `/scale` subresource + // will default to 0. + optional string statusReplicasPath = 2; + + // labelSelectorPath defines the JSON path inside of a custom resource that corresponds to Scale `status.selector`. + // Only JSON paths without the array notation are allowed. + // Must be a JSON Path under `.status` or `.spec`. + // Must be set to work with HorizontalPodAutoscaler. + // The field pointed by this JSON path must be a string field (not a complex selector struct) + // which contains a serialized label selector in string form. + // More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource + // If there is no value under the given path in the custom resource, the `status.selector` value in the `/scale` + // subresource will default to the empty string. + // +optional + optional string labelSelectorPath = 3; +} + +// CustomResourceSubresourceStatus defines how to serve the status subresource for CustomResources. +// Status is represented by the `.status` JSON path inside of a CustomResource. When set, +// * exposes a /status subresource for the custom resource +// * PUT requests to the /status subresource take a custom resource object, and ignore changes to anything except the status stanza +// * PUT/POST/PATCH requests to the custom resource ignore changes to the status stanza +message CustomResourceSubresourceStatus { +} + +// CustomResourceSubresources defines the status and scale subresources for CustomResources. +message CustomResourceSubresources { + // status indicates the custom resource should serve a `/status` subresource. + // When enabled: + // 1. requests to the custom resource primary endpoint ignore changes to the `status` stanza of the object. + // 2. requests to the custom resource `/status` subresource ignore changes to anything other than the `status` stanza of the object. + // +optional + optional CustomResourceSubresourceStatus status = 1; + + // scale indicates the custom resource should serve a `/scale` subresource that returns an `autoscaling/v1` Scale object. + // +optional + optional CustomResourceSubresourceScale scale = 2; +} + +// CustomResourceValidation is a list of validation methods for CustomResources. +message CustomResourceValidation { + // openAPIV3Schema is the OpenAPI v3 schema to use for validation and pruning. + // +optional + optional JSONSchemaProps openAPIV3Schema = 1; +} + +// ExternalDocumentation allows referencing an external resource for extended documentation. +message ExternalDocumentation { + optional string description = 1; + + optional string url = 2; +} + +// JSON represents any valid JSON value. +// These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil. +message JSON { + optional bytes raw = 1; +} + +// JSONSchemaProps is a JSON-Schema following Specification Draft 4 (http://json-schema.org/). +message JSONSchemaProps { + optional string id = 1; + + optional string schema = 2; + + optional string ref = 3; + + optional string description = 4; + + optional string type = 5; + + // format is an OpenAPI v3 format string. Unknown formats are ignored. The following formats are validated: + // + // - bsonobjectid: a bson object ID, i.e. a 24 characters hex string + // - uri: an URI as parsed by Golang net/url.ParseRequestURI + // - email: an email address as parsed by Golang net/mail.ParseAddress + // - hostname: a valid representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. + // - ipv4: an IPv4 IP as parsed by Golang net.ParseIP + // - ipv6: an IPv6 IP as parsed by Golang net.ParseIP + // - cidr: a CIDR as parsed by Golang net.ParseCIDR + // - mac: a MAC address as parsed by Golang net.ParseMAC + // - uuid: an UUID that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}$ + // - uuid3: an UUID3 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?3[0-9a-f]{3}-?[0-9a-f]{4}-?[0-9a-f]{12}$ + // - uuid4: an UUID4 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?4[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ + // - uuid5: an UUID5 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?5[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ + // - isbn: an ISBN10 or ISBN13 number string like "0321751043" or "978-0321751041" + // - isbn10: an ISBN10 number string like "0321751043" + // - isbn13: an ISBN13 number string like "978-0321751041" + // - creditcard: a credit card number defined by the regex ^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$ with any non digit characters mixed in + // - ssn: a U.S. social security number following the regex ^\\d{3}[- ]?\\d{2}[- ]?\\d{4}$ + // - hexcolor: an hexadecimal color code like "#FFFFFF: following the regex ^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$ + // - rgbcolor: an RGB color code like rgb like "rgb(255,255,2559" + // - byte: base64 encoded binary data + // - password: any kind of string + // - date: a date string like "2006-01-02" as defined by full-date in RFC3339 + // - duration: a duration string like "22 ns" as parsed by Golang time.ParseDuration or compatible with Scala duration format + // - datetime: a date time string like "2014-12-15T19:30:20.000Z" as defined by date-time in RFC3339. + optional string format = 6; + + optional string title = 7; + + // default is a default value for undefined object fields. + // Defaulting is a beta feature under the CustomResourceDefaulting feature gate. + // CustomResourceDefinitions with defaults must be created using the v1 (or newer) CustomResourceDefinition API. + optional JSON default = 8; + + optional double maximum = 9; + + optional bool exclusiveMaximum = 10; + + optional double minimum = 11; + + optional bool exclusiveMinimum = 12; + + optional int64 maxLength = 13; + + optional int64 minLength = 14; + + optional string pattern = 15; + + optional int64 maxItems = 16; + + optional int64 minItems = 17; + + optional bool uniqueItems = 18; + + optional double multipleOf = 19; + + repeated JSON enum = 20; + + optional int64 maxProperties = 21; + + optional int64 minProperties = 22; + + repeated string required = 23; + + optional JSONSchemaPropsOrArray items = 24; + + repeated JSONSchemaProps allOf = 25; + + repeated JSONSchemaProps oneOf = 26; + + repeated JSONSchemaProps anyOf = 27; + + optional JSONSchemaProps not = 28; + + map properties = 29; + + optional JSONSchemaPropsOrBool additionalProperties = 30; + + map patternProperties = 31; + + map dependencies = 32; + + optional JSONSchemaPropsOrBool additionalItems = 33; + + map definitions = 34; + + optional ExternalDocumentation externalDocs = 35; + + optional JSON example = 36; + + optional bool nullable = 37; + + // x-kubernetes-preserve-unknown-fields stops the API server + // decoding step from pruning fields which are not specified + // in the validation schema. This affects fields recursively, + // but switches back to normal pruning behaviour if nested + // properties or additionalProperties are specified in the schema. + // This can either be true or undefined. False is forbidden. + optional bool xKubernetesPreserveUnknownFields = 38; + + // x-kubernetes-embedded-resource defines that the value is an + // embedded Kubernetes runtime.Object, with TypeMeta and + // ObjectMeta. The type must be object. It is allowed to further + // restrict the embedded object. kind, apiVersion and metadata + // are validated automatically. x-kubernetes-preserve-unknown-fields + // is allowed to be true, but does not have to be if the object + // is fully specified (up to kind, apiVersion, metadata). + optional bool xKubernetesEmbeddedResource = 39; + + // x-kubernetes-int-or-string specifies that this value is + // either an integer or a string. If this is true, an empty + // type is allowed and type as child of anyOf is permitted + // if following one of the following patterns: + // + // 1) anyOf: + // - type: integer + // - type: string + // 2) allOf: + // - anyOf: + // - type: integer + // - type: string + // - ... zero or more + optional bool xKubernetesIntOrString = 40; + + // x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used + // as the index of the map. + // + // This tag MUST only be used on lists that have the "x-kubernetes-list-type" + // extension set to "map". Also, the values specified for this attribute must + // be a scalar typed field of the child structure (no nesting is supported). + // + // The properties specified must either be required or have a default value, + // to ensure those properties are present for all list items. + // + // +optional + repeated string xKubernetesListMapKeys = 41; + + // x-kubernetes-list-type annotates an array to further describe its topology. + // This extension must only be used on lists and may have 3 possible values: + // + // 1) `atomic`: the list is treated as a single entity, like a scalar. + // Atomic lists will be entirely replaced when updated. This extension + // may be used on any type of list (struct, scalar, ...). + // 2) `set`: + // Sets are lists that must not have multiple items with the same value. Each + // value must be a scalar, an object with x-kubernetes-map-type `atomic` or an + // array with x-kubernetes-list-type `atomic`. + // 3) `map`: + // These lists are like maps in that their elements have a non-index key + // used to identify them. Order is preserved upon merge. The map tag + // must only be used on a list with elements of type object. + // Defaults to atomic for arrays. + // +optional + optional string xKubernetesListType = 42; + + // x-kubernetes-map-type annotates an object to further describe its topology. + // This extension must only be used when type is object and may have 2 possible values: + // + // 1) `granular`: + // These maps are actual maps (key-value pairs) and each fields are independent + // from each other (they can each be manipulated by separate actors). This is + // the default behaviour for all maps. + // 2) `atomic`: the list is treated as a single entity, like a scalar. + // Atomic maps will be entirely replaced when updated. + // +optional + optional string xKubernetesMapType = 43; + + // x-kubernetes-validations describes a list of validation rules written in the CEL expression language. + // This field is an alpha-level. Using this field requires the feature gate `CustomResourceValidationExpressions` to be enabled. + // +patchMergeKey=rule + // +patchStrategy=merge + // +listType=map + // +listMapKey=rule + repeated ValidationRule xKubernetesValidations = 44; +} + +// JSONSchemaPropsOrArray represents a value that can either be a JSONSchemaProps +// or an array of JSONSchemaProps. Mainly here for serialization purposes. +message JSONSchemaPropsOrArray { + optional JSONSchemaProps schema = 1; + + repeated JSONSchemaProps jSONSchemas = 2; +} + +// JSONSchemaPropsOrBool represents JSONSchemaProps or a boolean value. +// Defaults to true for the boolean property. +message JSONSchemaPropsOrBool { + optional bool allows = 1; + + optional JSONSchemaProps schema = 2; +} + +// JSONSchemaPropsOrStringArray represents a JSONSchemaProps or a string array. +message JSONSchemaPropsOrStringArray { + optional JSONSchemaProps schema = 1; + + repeated string property = 2; +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +message ServiceReference { + // namespace is the namespace of the service. + // Required + optional string namespace = 1; + + // name is the name of the service. + // Required + optional string name = 2; + + // path is an optional URL path at which the webhook will be contacted. + // +optional + optional string path = 3; + + // port is an optional service port at which the webhook will be contacted. + // `port` should be a valid port number (1-65535, inclusive). + // Defaults to 443 for backward compatibility. + // +optional + optional int32 port = 4; +} + +// ValidationRule describes a validation rule written in the CEL expression language. +message ValidationRule { + // Rule represents the expression which will be evaluated by CEL. + // ref: https://github.com/google/cel-spec + // The Rule is scoped to the location of the x-kubernetes-validations extension in the schema. + // The `self` variable in the CEL expression is bound to the scoped value. + // Example: + // - Rule scoped to the root of a resource with a status subresource: {"rule": "self.status.actual <= self.spec.maxDesired"} + // + // If the Rule is scoped to an object with properties, the accessible properties of the object are field selectable + // via `self.field` and field presence can be checked via `has(self.field)`. Null valued fields are treated as + // absent fields in CEL expressions. + // If the Rule is scoped to an object with additionalProperties (i.e. a map) the value of the map + // are accessible via `self[mapKey]`, map containment can be checked via `mapKey in self` and all entries of the map + // are accessible via CEL macros and functions such as `self.all(...)`. + // If the Rule is scoped to an array, the elements of the array are accessible via `self[i]` and also by macros and + // functions. + // If the Rule is scoped to a scalar, `self` is bound to the scalar value. + // Examples: + // - Rule scoped to a map of objects: {"rule": "self.components['Widget'].priority < 10"} + // - Rule scoped to a list of integers: {"rule": "self.values.all(value, value >= 0 && value < 100)"} + // - Rule scoped to a string value: {"rule": "self.startsWith('kube')"} + // + // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the + // object and from any x-kubernetes-embedded-resource annotated objects. No other metadata properties are accessible. + // + // Unknown data preserved in custom resources via x-kubernetes-preserve-unknown-fields is not accessible in CEL + // expressions. This includes: + // - Unknown field values that are preserved by object schemas with x-kubernetes-preserve-unknown-fields. + // - Object properties where the property schema is of an "unknown type". An "unknown type" is recursively defined as: + // - A schema with no type and x-kubernetes-preserve-unknown-fields set to true + // - An array where the items schema is of an "unknown type" + // - An object where the additionalProperties schema is of an "unknown type" + // + // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. + // Accessible property names are escaped according to the following rules when accessed in the expression: + // - '__' escapes to '__underscores__' + // - '.' escapes to '__dot__' + // - '-' escapes to '__dash__' + // - '/' escapes to '__slash__' + // - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are: + // "true", "false", "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if", + // "import", "let", "loop", "package", "namespace", "return". + // Examples: + // - Rule accessing a property named "namespace": {"rule": "self.__namespace__ > 0"} + // - Rule accessing a property named "x-prop": {"rule": "self.x__dash__prop > 0"} + // - Rule accessing a property named "redact__d": {"rule": "self.redact__underscores__d > 0"} + // + // Equality on arrays with x-kubernetes-list-type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. + // Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type: + // - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and + // non-intersecting elements in `Y` are appended, retaining their partial order. + // - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values + // are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with + // non-intersecting keys are appended, retaining their partial order. + optional string rule = 1; + + // Message represents the message displayed when validation fails. The message is required if the Rule contains + // line breaks. The message must not contain line breaks. + // If unset, the message is "failed rule: {Rule}". + // e.g. "must be a URL with the host matching spec.host" + optional string message = 2; +} + +// WebhookClientConfig contains the information to make a TLS connection with the webhook. +message WebhookClientConfig { + // url gives the location of the webhook, in standard URL form + // (`scheme://host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + optional string url = 3; + + // service is a reference to the service for this webhook. Either + // service or url must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // +optional + optional ServiceReference service = 1; + + // caBundle is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional + optional bytes caBundle = 2; +} + diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go new file mode 100644 index 0000000..44941d8 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go @@ -0,0 +1,136 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "bytes" + "errors" + + "k8s.io/apimachinery/pkg/util/json" +) + +var jsTrue = []byte("true") +var jsFalse = []byte("false") + +func (s JSONSchemaPropsOrBool) MarshalJSON() ([]byte, error) { + if s.Schema != nil { + return json.Marshal(s.Schema) + } + + if s.Schema == nil && !s.Allows { + return jsFalse, nil + } + return jsTrue, nil +} + +func (s *JSONSchemaPropsOrBool) UnmarshalJSON(data []byte) error { + var nw JSONSchemaPropsOrBool + switch { + case len(data) == 0: + case data[0] == '{': + var sch JSONSchemaProps + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Allows = true + nw.Schema = &sch + case len(data) == 4 && string(data) == "true": + nw.Allows = true + case len(data) == 5 && string(data) == "false": + nw.Allows = false + default: + return errors.New("boolean or JSON schema expected") + } + *s = nw + return nil +} + +func (s JSONSchemaPropsOrStringArray) MarshalJSON() ([]byte, error) { + if len(s.Property) > 0 { + return json.Marshal(s.Property) + } + if s.Schema != nil { + return json.Marshal(s.Schema) + } + return []byte("null"), nil +} + +func (s *JSONSchemaPropsOrStringArray) UnmarshalJSON(data []byte) error { + var first byte + if len(data) > 1 { + first = data[0] + } + var nw JSONSchemaPropsOrStringArray + if first == '{' { + var sch JSONSchemaProps + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + if first == '[' { + if err := json.Unmarshal(data, &nw.Property); err != nil { + return err + } + } + *s = nw + return nil +} + +func (s JSONSchemaPropsOrArray) MarshalJSON() ([]byte, error) { + if len(s.JSONSchemas) > 0 { + return json.Marshal(s.JSONSchemas) + } + return json.Marshal(s.Schema) +} + +func (s *JSONSchemaPropsOrArray) UnmarshalJSON(data []byte) error { + var nw JSONSchemaPropsOrArray + var first byte + if len(data) > 1 { + first = data[0] + } + if first == '{' { + var sch JSONSchemaProps + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + if first == '[' { + if err := json.Unmarshal(data, &nw.JSONSchemas); err != nil { + return err + } + } + *s = nw + return nil +} + +func (s JSON) MarshalJSON() ([]byte, error) { + if len(s.Raw) > 0 { + return s.Raw, nil + } + return []byte("null"), nil + +} + +func (s *JSON) UnmarshalJSON(data []byte) error { + if len(data) > 0 && !bytes.Equal(data, nullLiteral) { + s.Raw = data + } + return nil +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/register.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/register.go new file mode 100644 index 0000000..ac80721 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/register.go @@ -0,0 +1,62 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const GroupName = "apiextensions.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &CustomResourceDefinition{}, + &CustomResourceDefinitionList{}, + &ConversionReview{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addDefaultingFuncs) +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go new file mode 100644 index 0000000..db445b1 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go @@ -0,0 +1,531 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" +) + +// ConversionStrategyType describes different conversion types. +type ConversionStrategyType string + +const ( + // KubeAPIApprovedAnnotation is an annotation that must be set to create a CRD for the k8s.io, *.k8s.io, kubernetes.io, or *.kubernetes.io namespaces. + // The value should be a link to a URL where the current spec was approved, so updates to the spec should also update the URL. + // If the API is unapproved, you may set the annotation to a string starting with `"unapproved"`. For instance, `"unapproved, temporarily squatting"` or `"unapproved, experimental-only"`. This is discouraged. + KubeAPIApprovedAnnotation = "api-approved.kubernetes.io" + + // NoneConverter is a converter that only sets apiversion of the CR and leave everything else unchanged. + NoneConverter ConversionStrategyType = "None" + // WebhookConverter is a converter that calls to an external webhook to convert the CR. + WebhookConverter ConversionStrategyType = "Webhook" +) + +// CustomResourceDefinitionSpec describes how a user wants their resource to appear +type CustomResourceDefinitionSpec struct { + // group is the API group of the defined custom resource. + // The custom resources are served under `/apis//...`. + // Must match the name of the CustomResourceDefinition (in the form `.`). + Group string `json:"group" protobuf:"bytes,1,opt,name=group"` + // version is the API version of the defined custom resource. + // The custom resources are served under `/apis///...`. + // Must match the name of the first item in the `versions` list if `version` and `versions` are both specified. + // Optional if `versions` is specified. + // Deprecated: use `versions` instead. + // +optional + Version string `json:"version,omitempty" protobuf:"bytes,2,opt,name=version"` + // names specify the resource and kind names for the custom resource. + Names CustomResourceDefinitionNames `json:"names" protobuf:"bytes,3,opt,name=names"` + // scope indicates whether the defined custom resource is cluster- or namespace-scoped. + // Allowed values are `Cluster` and `Namespaced`. Default is `Namespaced`. + Scope ResourceScope `json:"scope" protobuf:"bytes,4,opt,name=scope,casttype=ResourceScope"` + // validation describes the schema used for validation and pruning of the custom resource. + // If present, this validation schema is used to validate all versions. + // Top-level and per-version schemas are mutually exclusive. + // +optional + Validation *CustomResourceValidation `json:"validation,omitempty" protobuf:"bytes,5,opt,name=validation"` + // subresources specify what subresources the defined custom resource has. + // If present, this field configures subresources for all versions. + // Top-level and per-version subresources are mutually exclusive. + // +optional + Subresources *CustomResourceSubresources `json:"subresources,omitempty" protobuf:"bytes,6,opt,name=subresources"` + // versions is the list of all API versions of the defined custom resource. + // Optional if `version` is specified. + // The name of the first item in the `versions` list must match the `version` field if `version` and `versions` are both specified. + // Version names are used to compute the order in which served versions are listed in API discovery. + // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered + // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), + // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first + // by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing + // major version, then minor version. An example sorted list of versions: + // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. + // +optional + Versions []CustomResourceDefinitionVersion `json:"versions,omitempty" protobuf:"bytes,7,rep,name=versions"` + // additionalPrinterColumns specifies additional columns returned in Table output. + // See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. + // If present, this field configures columns for all versions. + // Top-level and per-version columns are mutually exclusive. + // If no top-level or per-version columns are specified, a single column displaying the age of the custom resource is used. + // +optional + AdditionalPrinterColumns []CustomResourceColumnDefinition `json:"additionalPrinterColumns,omitempty" protobuf:"bytes,8,rep,name=additionalPrinterColumns"` + + // conversion defines conversion settings for the CRD. + // +optional + Conversion *CustomResourceConversion `json:"conversion,omitempty" protobuf:"bytes,9,opt,name=conversion"` + + // preserveUnknownFields indicates that object fields which are not specified + // in the OpenAPI schema should be preserved when persisting to storage. + // apiVersion, kind, metadata and known fields inside metadata are always preserved. + // If false, schemas must be defined for all versions. + // Defaults to true in v1beta for backwards compatibility. + // Deprecated: will be required to be false in v1. Preservation of unknown fields can be specified + // in the validation schema using the `x-kubernetes-preserve-unknown-fields: true` extension. + // See https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#field-pruning for details. + // +optional + PreserveUnknownFields *bool `json:"preserveUnknownFields,omitempty" protobuf:"varint,10,opt,name=preserveUnknownFields"` +} + +// CustomResourceConversion describes how to convert different versions of a CR. +type CustomResourceConversion struct { + // strategy specifies how custom resources are converted between versions. Allowed values are: + // - `None`: The converter only change the apiVersion and would not touch any other field in the custom resource. + // - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information + // is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhookClientConfig to be set. + Strategy ConversionStrategyType `json:"strategy" protobuf:"bytes,1,name=strategy"` + + // webhookClientConfig is the instructions for how to call the webhook if strategy is `Webhook`. + // Required when `strategy` is set to `Webhook`. + // +optional + WebhookClientConfig *WebhookClientConfig `json:"webhookClientConfig,omitempty" protobuf:"bytes,2,name=webhookClientConfig"` + + // conversionReviewVersions is an ordered list of preferred `ConversionReview` + // versions the Webhook expects. The API server will use the first version in + // the list which it supports. If none of the versions specified in this list + // are supported by API server, conversion will fail for the custom resource. + // If a persisted Webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail. + // Defaults to `["v1beta1"]`. + // +optional + ConversionReviewVersions []string `json:"conversionReviewVersions,omitempty" protobuf:"bytes,3,rep,name=conversionReviewVersions"` +} + +// WebhookClientConfig contains the information to make a TLS connection with the webhook. +type WebhookClientConfig struct { + // url gives the location of the webhook, in standard URL form + // (`scheme://host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + URL *string `json:"url,omitempty" protobuf:"bytes,3,opt,name=url"` + + // service is a reference to the service for this webhook. Either + // service or url must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // +optional + Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,1,opt,name=service"` + + // caBundle is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional + CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,2,opt,name=caBundle"` +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +type ServiceReference struct { + // namespace is the namespace of the service. + // Required + Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` + // name is the name of the service. + // Required + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + + // path is an optional URL path at which the webhook will be contacted. + // +optional + Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"` + + // port is an optional service port at which the webhook will be contacted. + // `port` should be a valid port number (1-65535, inclusive). + // Defaults to 443 for backward compatibility. + // +optional + Port *int32 `json:"port,omitempty" protobuf:"varint,4,opt,name=port"` +} + +// CustomResourceDefinitionVersion describes a version for CRD. +type CustomResourceDefinitionVersion struct { + // name is the version name, e.g. “v1”, “v2beta1”, etc. + // The custom resources are served under this version at `/apis///...` if `served` is true. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // served is a flag enabling/disabling this version from being served via REST APIs + Served bool `json:"served" protobuf:"varint,2,opt,name=served"` + // storage indicates this version should be used when persisting custom resources to storage. + // There must be exactly one version with storage=true. + Storage bool `json:"storage" protobuf:"varint,3,opt,name=storage"` + // deprecated indicates this version of the custom resource API is deprecated. + // When set to true, API requests to this version receive a warning header in the server response. + // Defaults to false. + // +optional + Deprecated bool `json:"deprecated,omitempty" protobuf:"varint,7,opt,name=deprecated"` + // deprecationWarning overrides the default warning returned to API clients. + // May only be set when `deprecated` is true. + // The default warning indicates this version is deprecated and recommends use + // of the newest served version of equal or greater stability, if one exists. + // +optional + DeprecationWarning *string `json:"deprecationWarning,omitempty" protobuf:"bytes,8,opt,name=deprecationWarning"` + // schema describes the schema used for validation and pruning of this version of the custom resource. + // Top-level and per-version schemas are mutually exclusive. + // Per-version schemas must not all be set to identical values (top-level validation schema should be used instead). + // +optional + Schema *CustomResourceValidation `json:"schema,omitempty" protobuf:"bytes,4,opt,name=schema"` + // subresources specify what subresources this version of the defined custom resource have. + // Top-level and per-version subresources are mutually exclusive. + // Per-version subresources must not all be set to identical values (top-level subresources should be used instead). + // +optional + Subresources *CustomResourceSubresources `json:"subresources,omitempty" protobuf:"bytes,5,opt,name=subresources"` + // additionalPrinterColumns specifies additional columns returned in Table output. + // See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. + // Top-level and per-version columns are mutually exclusive. + // Per-version columns must not all be set to identical values (top-level columns should be used instead). + // If no top-level or per-version columns are specified, a single column displaying the age of the custom resource is used. + // +optional + AdditionalPrinterColumns []CustomResourceColumnDefinition `json:"additionalPrinterColumns,omitempty" protobuf:"bytes,6,rep,name=additionalPrinterColumns"` +} + +// CustomResourceColumnDefinition specifies a column for server side printing. +type CustomResourceColumnDefinition struct { + // name is a human readable name for the column. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // type is an OpenAPI type definition for this column. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. + Type string `json:"type" protobuf:"bytes,2,opt,name=type"` + // format is an optional OpenAPI type definition for this column. The 'name' format is applied + // to the primary identifier column to assist in clients identifying column is the resource name. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. + // +optional + Format string `json:"format,omitempty" protobuf:"bytes,3,opt,name=format"` + // description is a human readable description of this column. + // +optional + Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` + // priority is an integer defining the relative importance of this column compared to others. Lower + // numbers are considered higher priority. Columns that may be omitted in limited space scenarios + // should be given a priority greater than 0. + // +optional + Priority int32 `json:"priority,omitempty" protobuf:"bytes,5,opt,name=priority"` + // JSONPath is a simple JSON path (i.e. with array notation) which is evaluated against + // each custom resource to produce the value for this column. + JSONPath string `json:"JSONPath" protobuf:"bytes,6,opt,name=JSONPath"` +} + +// CustomResourceDefinitionNames indicates the names to serve this CustomResourceDefinition +type CustomResourceDefinitionNames struct { + // plural is the plural name of the resource to serve. + // The custom resources are served under `/apis///.../`. + // Must match the name of the CustomResourceDefinition (in the form `.`). + // Must be all lowercase. + Plural string `json:"plural" protobuf:"bytes,1,opt,name=plural"` + // singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`. + // +optional + Singular string `json:"singular,omitempty" protobuf:"bytes,2,opt,name=singular"` + // shortNames are short names for the resource, exposed in API discovery documents, + // and used by clients to support invocations like `kubectl get `. + // It must be all lowercase. + // +optional + ShortNames []string `json:"shortNames,omitempty" protobuf:"bytes,3,opt,name=shortNames"` + // kind is the serialized kind of the resource. It is normally CamelCase and singular. + // Custom resource instances will use this value as the `kind` attribute in API calls. + Kind string `json:"kind" protobuf:"bytes,4,opt,name=kind"` + // listKind is the serialized kind of the list for this resource. Defaults to "`kind`List". + // +optional + ListKind string `json:"listKind,omitempty" protobuf:"bytes,5,opt,name=listKind"` + // categories is a list of grouped resources this custom resource belongs to (e.g. 'all'). + // This is published in API discovery documents, and used by clients to support invocations like + // `kubectl get all`. + // +optional + Categories []string `json:"categories,omitempty" protobuf:"bytes,6,rep,name=categories"` +} + +// ResourceScope is an enum defining the different scopes available to a custom resource +type ResourceScope string + +const ( + ClusterScoped ResourceScope = "Cluster" + NamespaceScoped ResourceScope = "Namespaced" +) + +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// CustomResourceDefinitionConditionType is a valid value for CustomResourceDefinitionCondition.Type +type CustomResourceDefinitionConditionType string + +const ( + // Established means that the resource has become active. A resource is established when all names are + // accepted without a conflict for the first time. A resource stays established until deleted, even during + // a later NamesAccepted due to changed names. Note that not all names can be changed. + Established CustomResourceDefinitionConditionType = "Established" + // NamesAccepted means the names chosen for this CustomResourceDefinition do not conflict with others in + // the group and are therefore accepted. + NamesAccepted CustomResourceDefinitionConditionType = "NamesAccepted" + // NonStructuralSchema means that one or more OpenAPI schema is not structural. + // + // A schema is structural if it specifies types for all values, with the only exceptions of those with + // - x-kubernetes-int-or-string: true — for fields which can be integer or string + // - x-kubernetes-preserve-unknown-fields: true — for raw, unspecified JSON values + // and there is no type, additionalProperties, default, nullable or x-kubernetes-* vendor extenions + // specified under allOf, anyOf, oneOf or not. + // + // Non-structural schemas will not be allowed anymore in v1 API groups. Moreover, new features will not be + // available for non-structural CRDs: + // - pruning + // - defaulting + // - read-only + // - OpenAPI publishing + // - webhook conversion + NonStructuralSchema CustomResourceDefinitionConditionType = "NonStructuralSchema" + // Terminating means that the CustomResourceDefinition has been deleted and is cleaning up. + Terminating CustomResourceDefinitionConditionType = "Terminating" + // KubernetesAPIApprovalPolicyConformant indicates that an API in *.k8s.io or *.kubernetes.io is or is not approved. For CRDs + // outside those groups, this condition will not be set. For CRDs inside those groups, the condition will + // be true if .metadata.annotations["api-approved.kubernetes.io"] is set to a URL, otherwise it will be false. + // See https://github.com/kubernetes/enhancements/pull/1111 for more details. + KubernetesAPIApprovalPolicyConformant CustomResourceDefinitionConditionType = "KubernetesAPIApprovalPolicyConformant" +) + +// CustomResourceDefinitionCondition contains details for the current condition of this pod. +type CustomResourceDefinitionCondition struct { + // type is the type of the condition. Types include Established, NamesAccepted and Terminating. + Type CustomResourceDefinitionConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=CustomResourceDefinitionConditionType"` + // status is the status of the condition. + // Can be True, False, Unknown. + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + // lastTransitionTime last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // reason is a unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // message is a human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// CustomResourceDefinitionStatus indicates the state of the CustomResourceDefinition +type CustomResourceDefinitionStatus struct { + // conditions indicate state for particular aspects of a CustomResourceDefinition + // +optional + // +listType=map + // +listMapKey=type + Conditions []CustomResourceDefinitionCondition `json:"conditions" protobuf:"bytes,1,opt,name=conditions"` + + // acceptedNames are the names that are actually being used to serve discovery. + // They may be different than the names in spec. + // +optional + AcceptedNames CustomResourceDefinitionNames `json:"acceptedNames" protobuf:"bytes,2,opt,name=acceptedNames"` + + // storedVersions lists all versions of CustomResources that were ever persisted. Tracking these + // versions allows a migration path for stored versions in etcd. The field is mutable + // so a migration controller can finish a migration to another version (ensuring + // no old objects are left in storage), and then remove the rest of the + // versions from this list. + // Versions may not be removed from `spec.versions` while they exist in this list. + // +optional + StoredVersions []string `json:"storedVersions" protobuf:"bytes,3,rep,name=storedVersions"` +} + +// CustomResourceCleanupFinalizer is the name of the finalizer which will delete instances of +// a CustomResourceDefinition +const CustomResourceCleanupFinalizer = "customresourcecleanup.apiextensions.k8s.io" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.7 +// +k8s:prerelease-lifecycle-gen:deprecated=1.16 +// +k8s:prerelease-lifecycle-gen:removed=1.22 +// +k8s:prerelease-lifecycle-gen:replacement=apiextensions.k8s.io,v1,CustomResourceDefinition + +// CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format +// <.spec.name>.<.spec.group>. +// Deprecated in v1.16, planned for removal in v1.22. Use apiextensions.k8s.io/v1 CustomResourceDefinition instead. +type CustomResourceDefinition struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec describes how the user wants the resources to appear + Spec CustomResourceDefinitionSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // status indicates the actual state of the CustomResourceDefinition + // +optional + Status CustomResourceDefinitionStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.7 +// +k8s:prerelease-lifecycle-gen:deprecated=1.16 +// +k8s:prerelease-lifecycle-gen:removed=1.22 +// +k8s:prerelease-lifecycle-gen:replacement=apiextensions.k8s.io,v1,CustomResourceDefinitionList + +// CustomResourceDefinitionList is a list of CustomResourceDefinition objects. +type CustomResourceDefinitionList struct { + metav1.TypeMeta `json:",inline"` + + // Standard object's metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items list individual CustomResourceDefinition objects + Items []CustomResourceDefinition `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// CustomResourceValidation is a list of validation methods for CustomResources. +type CustomResourceValidation struct { + // openAPIV3Schema is the OpenAPI v3 schema to use for validation and pruning. + // +optional + OpenAPIV3Schema *JSONSchemaProps `json:"openAPIV3Schema,omitempty" protobuf:"bytes,1,opt,name=openAPIV3Schema"` +} + +// CustomResourceSubresources defines the status and scale subresources for CustomResources. +type CustomResourceSubresources struct { + // status indicates the custom resource should serve a `/status` subresource. + // When enabled: + // 1. requests to the custom resource primary endpoint ignore changes to the `status` stanza of the object. + // 2. requests to the custom resource `/status` subresource ignore changes to anything other than the `status` stanza of the object. + // +optional + Status *CustomResourceSubresourceStatus `json:"status,omitempty" protobuf:"bytes,1,opt,name=status"` + // scale indicates the custom resource should serve a `/scale` subresource that returns an `autoscaling/v1` Scale object. + // +optional + Scale *CustomResourceSubresourceScale `json:"scale,omitempty" protobuf:"bytes,2,opt,name=scale"` +} + +// CustomResourceSubresourceStatus defines how to serve the status subresource for CustomResources. +// Status is represented by the `.status` JSON path inside of a CustomResource. When set, +// * exposes a /status subresource for the custom resource +// * PUT requests to the /status subresource take a custom resource object, and ignore changes to anything except the status stanza +// * PUT/POST/PATCH requests to the custom resource ignore changes to the status stanza +type CustomResourceSubresourceStatus struct{} + +// CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources. +type CustomResourceSubresourceScale struct { + // specReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `spec.replicas`. + // Only JSON paths without the array notation are allowed. + // Must be a JSON Path under `.spec`. + // If there is no value under the given path in the custom resource, the `/scale` subresource will return an error on GET. + SpecReplicasPath string `json:"specReplicasPath" protobuf:"bytes,1,name=specReplicasPath"` + // statusReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `status.replicas`. + // Only JSON paths without the array notation are allowed. + // Must be a JSON Path under `.status`. + // If there is no value under the given path in the custom resource, the `status.replicas` value in the `/scale` subresource + // will default to 0. + StatusReplicasPath string `json:"statusReplicasPath" protobuf:"bytes,2,opt,name=statusReplicasPath"` + // labelSelectorPath defines the JSON path inside of a custom resource that corresponds to Scale `status.selector`. + // Only JSON paths without the array notation are allowed. + // Must be a JSON Path under `.status` or `.spec`. + // Must be set to work with HorizontalPodAutoscaler. + // The field pointed by this JSON path must be a string field (not a complex selector struct) + // which contains a serialized label selector in string form. + // More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource + // If there is no value under the given path in the custom resource, the `status.selector` value in the `/scale` + // subresource will default to the empty string. + // +optional + LabelSelectorPath *string `json:"labelSelectorPath,omitempty" protobuf:"bytes,3,opt,name=labelSelectorPath"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.13 +// +k8s:prerelease-lifecycle-gen:deprecated=1.19 +// This API is never served. It is used for outbound requests from apiservers. This will ensure it never gets served accidentally +// and having the generator against this group will protect future APIs which may be served. +// +k8s:prerelease-lifecycle-gen:replacement=apiextensions.k8s.io,v1,ConversionReview + +// ConversionReview describes a conversion request/response. +type ConversionReview struct { + metav1.TypeMeta `json:",inline"` + // request describes the attributes for the conversion request. + // +optional + Request *ConversionRequest `json:"request,omitempty" protobuf:"bytes,1,opt,name=request"` + // response describes the attributes for the conversion response. + // +optional + Response *ConversionResponse `json:"response,omitempty" protobuf:"bytes,2,opt,name=response"` +} + +// ConversionRequest describes the conversion request parameters. +type ConversionRequest struct { + // uid is an identifier for the individual request/response. It allows distinguishing instances of requests which are + // otherwise identical (parallel requests, etc). + // The UID is meant to track the round trip (request/response) between the Kubernetes API server and the webhook, not the user request. + // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. + UID types.UID `json:"uid" protobuf:"bytes,1,name=uid"` + // desiredAPIVersion is the version to convert given objects to. e.g. "myapi.example.com/v1" + DesiredAPIVersion string `json:"desiredAPIVersion" protobuf:"bytes,2,name=desiredAPIVersion"` + // objects is the list of custom resource objects to be converted. + Objects []runtime.RawExtension `json:"objects" protobuf:"bytes,3,rep,name=objects"` +} + +// ConversionResponse describes a conversion response. +type ConversionResponse struct { + // uid is an identifier for the individual request/response. + // This should be copied over from the corresponding `request.uid`. + UID types.UID `json:"uid" protobuf:"bytes,1,name=uid"` + // convertedObjects is the list of converted version of `request.objects` if the `result` is successful, otherwise empty. + // The webhook is expected to set `apiVersion` of these objects to the `request.desiredAPIVersion`. The list + // must also have the same size as the input list with the same objects in the same order (equal kind, metadata.uid, metadata.name and metadata.namespace). + // The webhook is allowed to mutate labels and annotations. Any other change to the metadata is silently ignored. + ConvertedObjects []runtime.RawExtension `json:"convertedObjects" protobuf:"bytes,2,rep,name=convertedObjects"` + // result contains the result of conversion with extra details if the conversion failed. `result.status` determines if + // the conversion failed or succeeded. The `result.status` field is required and represents the success or failure of the + // conversion. A successful conversion must set `result.status` to `Success`. A failed conversion must set + // `result.status` to `Failure` and provide more details in `result.message` and return http status 200. The `result.message` + // will be used to construct an error message for the end user. + Result metav1.Status `json:"result" protobuf:"bytes,3,name=result"` +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go new file mode 100644 index 0000000..c9d943c --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go @@ -0,0 +1,331 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// JSONSchemaProps is a JSON-Schema following Specification Draft 4 (http://json-schema.org/). +type JSONSchemaProps struct { + ID string `json:"id,omitempty" protobuf:"bytes,1,opt,name=id"` + Schema JSONSchemaURL `json:"$schema,omitempty" protobuf:"bytes,2,opt,name=schema"` + Ref *string `json:"$ref,omitempty" protobuf:"bytes,3,opt,name=ref"` + Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` + Type string `json:"type,omitempty" protobuf:"bytes,5,opt,name=type"` + + // format is an OpenAPI v3 format string. Unknown formats are ignored. The following formats are validated: + // + // - bsonobjectid: a bson object ID, i.e. a 24 characters hex string + // - uri: an URI as parsed by Golang net/url.ParseRequestURI + // - email: an email address as parsed by Golang net/mail.ParseAddress + // - hostname: a valid representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. + // - ipv4: an IPv4 IP as parsed by Golang net.ParseIP + // - ipv6: an IPv6 IP as parsed by Golang net.ParseIP + // - cidr: a CIDR as parsed by Golang net.ParseCIDR + // - mac: a MAC address as parsed by Golang net.ParseMAC + // - uuid: an UUID that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}$ + // - uuid3: an UUID3 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?3[0-9a-f]{3}-?[0-9a-f]{4}-?[0-9a-f]{12}$ + // - uuid4: an UUID4 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?4[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ + // - uuid5: an UUID5 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?5[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ + // - isbn: an ISBN10 or ISBN13 number string like "0321751043" or "978-0321751041" + // - isbn10: an ISBN10 number string like "0321751043" + // - isbn13: an ISBN13 number string like "978-0321751041" + // - creditcard: a credit card number defined by the regex ^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$ with any non digit characters mixed in + // - ssn: a U.S. social security number following the regex ^\\d{3}[- ]?\\d{2}[- ]?\\d{4}$ + // - hexcolor: an hexadecimal color code like "#FFFFFF: following the regex ^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$ + // - rgbcolor: an RGB color code like rgb like "rgb(255,255,2559" + // - byte: base64 encoded binary data + // - password: any kind of string + // - date: a date string like "2006-01-02" as defined by full-date in RFC3339 + // - duration: a duration string like "22 ns" as parsed by Golang time.ParseDuration or compatible with Scala duration format + // - datetime: a date time string like "2014-12-15T19:30:20.000Z" as defined by date-time in RFC3339. + Format string `json:"format,omitempty" protobuf:"bytes,6,opt,name=format"` + + Title string `json:"title,omitempty" protobuf:"bytes,7,opt,name=title"` + // default is a default value for undefined object fields. + // Defaulting is a beta feature under the CustomResourceDefaulting feature gate. + // CustomResourceDefinitions with defaults must be created using the v1 (or newer) CustomResourceDefinition API. + Default *JSON `json:"default,omitempty" protobuf:"bytes,8,opt,name=default"` + Maximum *float64 `json:"maximum,omitempty" protobuf:"bytes,9,opt,name=maximum"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty" protobuf:"bytes,10,opt,name=exclusiveMaximum"` + Minimum *float64 `json:"minimum,omitempty" protobuf:"bytes,11,opt,name=minimum"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty" protobuf:"bytes,12,opt,name=exclusiveMinimum"` + MaxLength *int64 `json:"maxLength,omitempty" protobuf:"bytes,13,opt,name=maxLength"` + MinLength *int64 `json:"minLength,omitempty" protobuf:"bytes,14,opt,name=minLength"` + Pattern string `json:"pattern,omitempty" protobuf:"bytes,15,opt,name=pattern"` + MaxItems *int64 `json:"maxItems,omitempty" protobuf:"bytes,16,opt,name=maxItems"` + MinItems *int64 `json:"minItems,omitempty" protobuf:"bytes,17,opt,name=minItems"` + UniqueItems bool `json:"uniqueItems,omitempty" protobuf:"bytes,18,opt,name=uniqueItems"` + MultipleOf *float64 `json:"multipleOf,omitempty" protobuf:"bytes,19,opt,name=multipleOf"` + Enum []JSON `json:"enum,omitempty" protobuf:"bytes,20,rep,name=enum"` + MaxProperties *int64 `json:"maxProperties,omitempty" protobuf:"bytes,21,opt,name=maxProperties"` + MinProperties *int64 `json:"minProperties,omitempty" protobuf:"bytes,22,opt,name=minProperties"` + Required []string `json:"required,omitempty" protobuf:"bytes,23,rep,name=required"` + Items *JSONSchemaPropsOrArray `json:"items,omitempty" protobuf:"bytes,24,opt,name=items"` + AllOf []JSONSchemaProps `json:"allOf,omitempty" protobuf:"bytes,25,rep,name=allOf"` + OneOf []JSONSchemaProps `json:"oneOf,omitempty" protobuf:"bytes,26,rep,name=oneOf"` + AnyOf []JSONSchemaProps `json:"anyOf,omitempty" protobuf:"bytes,27,rep,name=anyOf"` + Not *JSONSchemaProps `json:"not,omitempty" protobuf:"bytes,28,opt,name=not"` + Properties map[string]JSONSchemaProps `json:"properties,omitempty" protobuf:"bytes,29,rep,name=properties"` + AdditionalProperties *JSONSchemaPropsOrBool `json:"additionalProperties,omitempty" protobuf:"bytes,30,opt,name=additionalProperties"` + PatternProperties map[string]JSONSchemaProps `json:"patternProperties,omitempty" protobuf:"bytes,31,rep,name=patternProperties"` + Dependencies JSONSchemaDependencies `json:"dependencies,omitempty" protobuf:"bytes,32,opt,name=dependencies"` + AdditionalItems *JSONSchemaPropsOrBool `json:"additionalItems,omitempty" protobuf:"bytes,33,opt,name=additionalItems"` + Definitions JSONSchemaDefinitions `json:"definitions,omitempty" protobuf:"bytes,34,opt,name=definitions"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty" protobuf:"bytes,35,opt,name=externalDocs"` + Example *JSON `json:"example,omitempty" protobuf:"bytes,36,opt,name=example"` + Nullable bool `json:"nullable,omitempty" protobuf:"bytes,37,opt,name=nullable"` + + // x-kubernetes-preserve-unknown-fields stops the API server + // decoding step from pruning fields which are not specified + // in the validation schema. This affects fields recursively, + // but switches back to normal pruning behaviour if nested + // properties or additionalProperties are specified in the schema. + // This can either be true or undefined. False is forbidden. + XPreserveUnknownFields *bool `json:"x-kubernetes-preserve-unknown-fields,omitempty" protobuf:"bytes,38,opt,name=xKubernetesPreserveUnknownFields"` + + // x-kubernetes-embedded-resource defines that the value is an + // embedded Kubernetes runtime.Object, with TypeMeta and + // ObjectMeta. The type must be object. It is allowed to further + // restrict the embedded object. kind, apiVersion and metadata + // are validated automatically. x-kubernetes-preserve-unknown-fields + // is allowed to be true, but does not have to be if the object + // is fully specified (up to kind, apiVersion, metadata). + XEmbeddedResource bool `json:"x-kubernetes-embedded-resource,omitempty" protobuf:"bytes,39,opt,name=xKubernetesEmbeddedResource"` + + // x-kubernetes-int-or-string specifies that this value is + // either an integer or a string. If this is true, an empty + // type is allowed and type as child of anyOf is permitted + // if following one of the following patterns: + // + // 1) anyOf: + // - type: integer + // - type: string + // 2) allOf: + // - anyOf: + // - type: integer + // - type: string + // - ... zero or more + XIntOrString bool `json:"x-kubernetes-int-or-string,omitempty" protobuf:"bytes,40,opt,name=xKubernetesIntOrString"` + + // x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used + // as the index of the map. + // + // This tag MUST only be used on lists that have the "x-kubernetes-list-type" + // extension set to "map". Also, the values specified for this attribute must + // be a scalar typed field of the child structure (no nesting is supported). + // + // The properties specified must either be required or have a default value, + // to ensure those properties are present for all list items. + // + // +optional + XListMapKeys []string `json:"x-kubernetes-list-map-keys,omitempty" protobuf:"bytes,41,rep,name=xKubernetesListMapKeys"` + + // x-kubernetes-list-type annotates an array to further describe its topology. + // This extension must only be used on lists and may have 3 possible values: + // + // 1) `atomic`: the list is treated as a single entity, like a scalar. + // Atomic lists will be entirely replaced when updated. This extension + // may be used on any type of list (struct, scalar, ...). + // 2) `set`: + // Sets are lists that must not have multiple items with the same value. Each + // value must be a scalar, an object with x-kubernetes-map-type `atomic` or an + // array with x-kubernetes-list-type `atomic`. + // 3) `map`: + // These lists are like maps in that their elements have a non-index key + // used to identify them. Order is preserved upon merge. The map tag + // must only be used on a list with elements of type object. + // Defaults to atomic for arrays. + // +optional + XListType *string `json:"x-kubernetes-list-type,omitempty" protobuf:"bytes,42,opt,name=xKubernetesListType"` + + // x-kubernetes-map-type annotates an object to further describe its topology. + // This extension must only be used when type is object and may have 2 possible values: + // + // 1) `granular`: + // These maps are actual maps (key-value pairs) and each fields are independent + // from each other (they can each be manipulated by separate actors). This is + // the default behaviour for all maps. + // 2) `atomic`: the list is treated as a single entity, like a scalar. + // Atomic maps will be entirely replaced when updated. + // +optional + XMapType *string `json:"x-kubernetes-map-type,omitempty" protobuf:"bytes,43,opt,name=xKubernetesMapType"` + + // x-kubernetes-validations describes a list of validation rules written in the CEL expression language. + // This field is an alpha-level. Using this field requires the feature gate `CustomResourceValidationExpressions` to be enabled. + // +patchMergeKey=rule + // +patchStrategy=merge + // +listType=map + // +listMapKey=rule + XValidations ValidationRules `json:"x-kubernetes-validations,omitempty" patchStrategy:"merge" patchMergeKey:"rule" protobuf:"bytes,44,rep,name=xKubernetesValidations"` +} + +// ValidationRules describes a list of validation rules written in the CEL expression language. +type ValidationRules []ValidationRule + +// ValidationRule describes a validation rule written in the CEL expression language. +type ValidationRule struct { + // Rule represents the expression which will be evaluated by CEL. + // ref: https://github.com/google/cel-spec + // The Rule is scoped to the location of the x-kubernetes-validations extension in the schema. + // The `self` variable in the CEL expression is bound to the scoped value. + // Example: + // - Rule scoped to the root of a resource with a status subresource: {"rule": "self.status.actual <= self.spec.maxDesired"} + // + // If the Rule is scoped to an object with properties, the accessible properties of the object are field selectable + // via `self.field` and field presence can be checked via `has(self.field)`. Null valued fields are treated as + // absent fields in CEL expressions. + // If the Rule is scoped to an object with additionalProperties (i.e. a map) the value of the map + // are accessible via `self[mapKey]`, map containment can be checked via `mapKey in self` and all entries of the map + // are accessible via CEL macros and functions such as `self.all(...)`. + // If the Rule is scoped to an array, the elements of the array are accessible via `self[i]` and also by macros and + // functions. + // If the Rule is scoped to a scalar, `self` is bound to the scalar value. + // Examples: + // - Rule scoped to a map of objects: {"rule": "self.components['Widget'].priority < 10"} + // - Rule scoped to a list of integers: {"rule": "self.values.all(value, value >= 0 && value < 100)"} + // - Rule scoped to a string value: {"rule": "self.startsWith('kube')"} + // + // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the + // object and from any x-kubernetes-embedded-resource annotated objects. No other metadata properties are accessible. + // + // Unknown data preserved in custom resources via x-kubernetes-preserve-unknown-fields is not accessible in CEL + // expressions. This includes: + // - Unknown field values that are preserved by object schemas with x-kubernetes-preserve-unknown-fields. + // - Object properties where the property schema is of an "unknown type". An "unknown type" is recursively defined as: + // - A schema with no type and x-kubernetes-preserve-unknown-fields set to true + // - An array where the items schema is of an "unknown type" + // - An object where the additionalProperties schema is of an "unknown type" + // + // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. + // Accessible property names are escaped according to the following rules when accessed in the expression: + // - '__' escapes to '__underscores__' + // - '.' escapes to '__dot__' + // - '-' escapes to '__dash__' + // - '/' escapes to '__slash__' + // - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are: + // "true", "false", "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if", + // "import", "let", "loop", "package", "namespace", "return". + // Examples: + // - Rule accessing a property named "namespace": {"rule": "self.__namespace__ > 0"} + // - Rule accessing a property named "x-prop": {"rule": "self.x__dash__prop > 0"} + // - Rule accessing a property named "redact__d": {"rule": "self.redact__underscores__d > 0"} + // + // Equality on arrays with x-kubernetes-list-type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. + // Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type: + // - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and + // non-intersecting elements in `Y` are appended, retaining their partial order. + // - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values + // are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with + // non-intersecting keys are appended, retaining their partial order. + Rule string `json:"rule" protobuf:"bytes,1,opt,name=rule"` + // Message represents the message displayed when validation fails. The message is required if the Rule contains + // line breaks. The message must not contain line breaks. + // If unset, the message is "failed rule: {Rule}". + // e.g. "must be a URL with the host matching spec.host" + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` +} + +// JSON represents any valid JSON value. +// These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil. +type JSON struct { + Raw []byte `protobuf:"bytes,1,opt,name=raw"` +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ JSON) OpenAPISchemaType() []string { + // TODO: return actual types when anyOf is supported + return nil +} + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ JSON) OpenAPISchemaFormat() string { return "" } + +// JSONSchemaURL represents a schema url. +type JSONSchemaURL string + +// JSONSchemaPropsOrArray represents a value that can either be a JSONSchemaProps +// or an array of JSONSchemaProps. Mainly here for serialization purposes. +type JSONSchemaPropsOrArray struct { + Schema *JSONSchemaProps `protobuf:"bytes,1,opt,name=schema"` + JSONSchemas []JSONSchemaProps `protobuf:"bytes,2,rep,name=jSONSchemas"` +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ JSONSchemaPropsOrArray) OpenAPISchemaType() []string { + // TODO: return actual types when anyOf is supported + return nil +} + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ JSONSchemaPropsOrArray) OpenAPISchemaFormat() string { return "" } + +// JSONSchemaPropsOrBool represents JSONSchemaProps or a boolean value. +// Defaults to true for the boolean property. +type JSONSchemaPropsOrBool struct { + Allows bool `protobuf:"varint,1,opt,name=allows"` + Schema *JSONSchemaProps `protobuf:"bytes,2,opt,name=schema"` +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ JSONSchemaPropsOrBool) OpenAPISchemaType() []string { + // TODO: return actual types when anyOf is supported + return nil +} + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ JSONSchemaPropsOrBool) OpenAPISchemaFormat() string { return "" } + +// JSONSchemaDependencies represent a dependencies property. +type JSONSchemaDependencies map[string]JSONSchemaPropsOrStringArray + +// JSONSchemaPropsOrStringArray represents a JSONSchemaProps or a string array. +type JSONSchemaPropsOrStringArray struct { + Schema *JSONSchemaProps `protobuf:"bytes,1,opt,name=schema"` + Property []string `protobuf:"bytes,2,rep,name=property"` +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ JSONSchemaPropsOrStringArray) OpenAPISchemaType() []string { + // TODO: return actual types when anyOf is supported + return nil +} + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ JSONSchemaPropsOrStringArray) OpenAPISchemaFormat() string { return "" } + +// JSONSchemaDefinitions contains the models explicitly defined in this spec. +type JSONSchemaDefinitions map[string]JSONSchemaProps + +// ExternalDocumentation allows referencing an external resource for extended documentation. +type ExternalDocumentation struct { + Description string `json:"description,omitempty" protobuf:"bytes,1,opt,name=description"` + URL string `json:"url,omitempty" protobuf:"bytes,2,opt,name=url"` +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go new file mode 100644 index 0000000..54cae3c --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go @@ -0,0 +1,1366 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1beta1 + +import ( + unsafe "unsafe" + + apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*CustomResourceColumnDefinition)(nil), (*apiextensions.CustomResourceColumnDefinition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition(a.(*CustomResourceColumnDefinition), b.(*apiextensions.CustomResourceColumnDefinition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceColumnDefinition)(nil), (*CustomResourceColumnDefinition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceColumnDefinition_To_v1beta1_CustomResourceColumnDefinition(a.(*apiextensions.CustomResourceColumnDefinition), b.(*CustomResourceColumnDefinition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceConversion)(nil), (*apiextensions.CustomResourceConversion)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(a.(*CustomResourceConversion), b.(*apiextensions.CustomResourceConversion), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceConversion)(nil), (*CustomResourceConversion)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceConversion_To_v1beta1_CustomResourceConversion(a.(*apiextensions.CustomResourceConversion), b.(*CustomResourceConversion), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceDefinition)(nil), (*apiextensions.CustomResourceDefinition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(a.(*CustomResourceDefinition), b.(*apiextensions.CustomResourceDefinition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinition)(nil), (*CustomResourceDefinition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceDefinition_To_v1beta1_CustomResourceDefinition(a.(*apiextensions.CustomResourceDefinition), b.(*CustomResourceDefinition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionCondition)(nil), (*apiextensions.CustomResourceDefinitionCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_CustomResourceDefinitionCondition_To_apiextensions_CustomResourceDefinitionCondition(a.(*CustomResourceDefinitionCondition), b.(*apiextensions.CustomResourceDefinitionCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionCondition)(nil), (*CustomResourceDefinitionCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceDefinitionCondition_To_v1beta1_CustomResourceDefinitionCondition(a.(*apiextensions.CustomResourceDefinitionCondition), b.(*CustomResourceDefinitionCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionList)(nil), (*apiextensions.CustomResourceDefinitionList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_CustomResourceDefinitionList_To_apiextensions_CustomResourceDefinitionList(a.(*CustomResourceDefinitionList), b.(*apiextensions.CustomResourceDefinitionList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionList)(nil), (*CustomResourceDefinitionList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceDefinitionList_To_v1beta1_CustomResourceDefinitionList(a.(*apiextensions.CustomResourceDefinitionList), b.(*CustomResourceDefinitionList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionNames)(nil), (*apiextensions.CustomResourceDefinitionNames)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(a.(*CustomResourceDefinitionNames), b.(*apiextensions.CustomResourceDefinitionNames), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionNames)(nil), (*CustomResourceDefinitionNames)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceDefinitionNames_To_v1beta1_CustomResourceDefinitionNames(a.(*apiextensions.CustomResourceDefinitionNames), b.(*CustomResourceDefinitionNames), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionSpec)(nil), (*apiextensions.CustomResourceDefinitionSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(a.(*CustomResourceDefinitionSpec), b.(*apiextensions.CustomResourceDefinitionSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionSpec)(nil), (*CustomResourceDefinitionSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceDefinitionSpec_To_v1beta1_CustomResourceDefinitionSpec(a.(*apiextensions.CustomResourceDefinitionSpec), b.(*CustomResourceDefinitionSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionStatus)(nil), (*apiextensions.CustomResourceDefinitionStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus(a.(*CustomResourceDefinitionStatus), b.(*apiextensions.CustomResourceDefinitionStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionStatus)(nil), (*CustomResourceDefinitionStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceDefinitionStatus_To_v1beta1_CustomResourceDefinitionStatus(a.(*apiextensions.CustomResourceDefinitionStatus), b.(*CustomResourceDefinitionStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionVersion)(nil), (*apiextensions.CustomResourceDefinitionVersion)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(a.(*CustomResourceDefinitionVersion), b.(*apiextensions.CustomResourceDefinitionVersion), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionVersion)(nil), (*CustomResourceDefinitionVersion)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceDefinitionVersion_To_v1beta1_CustomResourceDefinitionVersion(a.(*apiextensions.CustomResourceDefinitionVersion), b.(*CustomResourceDefinitionVersion), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceSubresourceScale)(nil), (*apiextensions.CustomResourceSubresourceScale)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_CustomResourceSubresourceScale_To_apiextensions_CustomResourceSubresourceScale(a.(*CustomResourceSubresourceScale), b.(*apiextensions.CustomResourceSubresourceScale), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceSubresourceScale)(nil), (*CustomResourceSubresourceScale)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceSubresourceScale_To_v1beta1_CustomResourceSubresourceScale(a.(*apiextensions.CustomResourceSubresourceScale), b.(*CustomResourceSubresourceScale), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceSubresourceStatus)(nil), (*apiextensions.CustomResourceSubresourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_CustomResourceSubresourceStatus_To_apiextensions_CustomResourceSubresourceStatus(a.(*CustomResourceSubresourceStatus), b.(*apiextensions.CustomResourceSubresourceStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceSubresourceStatus)(nil), (*CustomResourceSubresourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceSubresourceStatus_To_v1beta1_CustomResourceSubresourceStatus(a.(*apiextensions.CustomResourceSubresourceStatus), b.(*CustomResourceSubresourceStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceSubresources)(nil), (*apiextensions.CustomResourceSubresources)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_CustomResourceSubresources_To_apiextensions_CustomResourceSubresources(a.(*CustomResourceSubresources), b.(*apiextensions.CustomResourceSubresources), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceSubresources)(nil), (*CustomResourceSubresources)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceSubresources_To_v1beta1_CustomResourceSubresources(a.(*apiextensions.CustomResourceSubresources), b.(*CustomResourceSubresources), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceValidation)(nil), (*apiextensions.CustomResourceValidation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(a.(*CustomResourceValidation), b.(*apiextensions.CustomResourceValidation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceValidation)(nil), (*CustomResourceValidation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceValidation_To_v1beta1_CustomResourceValidation(a.(*apiextensions.CustomResourceValidation), b.(*CustomResourceValidation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ExternalDocumentation)(nil), (*apiextensions.ExternalDocumentation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ExternalDocumentation_To_apiextensions_ExternalDocumentation(a.(*ExternalDocumentation), b.(*apiextensions.ExternalDocumentation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.ExternalDocumentation)(nil), (*ExternalDocumentation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_ExternalDocumentation_To_v1beta1_ExternalDocumentation(a.(*apiextensions.ExternalDocumentation), b.(*ExternalDocumentation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*JSONSchemaProps)(nil), (*apiextensions.JSONSchemaProps)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(a.(*JSONSchemaProps), b.(*apiextensions.JSONSchemaProps), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*JSONSchemaPropsOrArray)(nil), (*apiextensions.JSONSchemaPropsOrArray)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray(a.(*JSONSchemaPropsOrArray), b.(*apiextensions.JSONSchemaPropsOrArray), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.JSONSchemaPropsOrArray)(nil), (*JSONSchemaPropsOrArray)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_JSONSchemaPropsOrArray_To_v1beta1_JSONSchemaPropsOrArray(a.(*apiextensions.JSONSchemaPropsOrArray), b.(*JSONSchemaPropsOrArray), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*JSONSchemaPropsOrBool)(nil), (*apiextensions.JSONSchemaPropsOrBool)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(a.(*JSONSchemaPropsOrBool), b.(*apiextensions.JSONSchemaPropsOrBool), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.JSONSchemaPropsOrBool)(nil), (*JSONSchemaPropsOrBool)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_JSONSchemaPropsOrBool_To_v1beta1_JSONSchemaPropsOrBool(a.(*apiextensions.JSONSchemaPropsOrBool), b.(*JSONSchemaPropsOrBool), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*JSONSchemaPropsOrStringArray)(nil), (*apiextensions.JSONSchemaPropsOrStringArray)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray(a.(*JSONSchemaPropsOrStringArray), b.(*apiextensions.JSONSchemaPropsOrStringArray), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.JSONSchemaPropsOrStringArray)(nil), (*JSONSchemaPropsOrStringArray)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_JSONSchemaPropsOrStringArray_To_v1beta1_JSONSchemaPropsOrStringArray(a.(*apiextensions.JSONSchemaPropsOrStringArray), b.(*JSONSchemaPropsOrStringArray), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ServiceReference)(nil), (*apiextensions.ServiceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ServiceReference_To_apiextensions_ServiceReference(a.(*ServiceReference), b.(*apiextensions.ServiceReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.ServiceReference)(nil), (*ServiceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_ServiceReference_To_v1beta1_ServiceReference(a.(*apiextensions.ServiceReference), b.(*ServiceReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ValidationRule)(nil), (*apiextensions.ValidationRule)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ValidationRule_To_apiextensions_ValidationRule(a.(*ValidationRule), b.(*apiextensions.ValidationRule), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.ValidationRule)(nil), (*ValidationRule)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_ValidationRule_To_v1beta1_ValidationRule(a.(*apiextensions.ValidationRule), b.(*ValidationRule), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*WebhookClientConfig)(nil), (*apiextensions.WebhookClientConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(a.(*WebhookClientConfig), b.(*apiextensions.WebhookClientConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.WebhookClientConfig)(nil), (*WebhookClientConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_WebhookClientConfig_To_v1beta1_WebhookClientConfig(a.(*apiextensions.WebhookClientConfig), b.(*WebhookClientConfig), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*apiextensions.JSONSchemaProps)(nil), (*JSONSchemaProps)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(a.(*apiextensions.JSONSchemaProps), b.(*JSONSchemaProps), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*apiextensions.JSON)(nil), (*JSON)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_JSON_To_v1beta1_JSON(a.(*apiextensions.JSON), b.(*JSON), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*JSON)(nil), (*apiextensions.JSON)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_JSON_To_apiextensions_JSON(a.(*JSON), b.(*apiextensions.JSON), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1beta1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition(in *CustomResourceColumnDefinition, out *apiextensions.CustomResourceColumnDefinition, s conversion.Scope) error { + out.Name = in.Name + out.Type = in.Type + out.Format = in.Format + out.Description = in.Description + out.Priority = in.Priority + out.JSONPath = in.JSONPath + return nil +} + +// Convert_v1beta1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition is an autogenerated conversion function. +func Convert_v1beta1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition(in *CustomResourceColumnDefinition, out *apiextensions.CustomResourceColumnDefinition, s conversion.Scope) error { + return autoConvert_v1beta1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceColumnDefinition_To_v1beta1_CustomResourceColumnDefinition(in *apiextensions.CustomResourceColumnDefinition, out *CustomResourceColumnDefinition, s conversion.Scope) error { + out.Name = in.Name + out.Type = in.Type + out.Format = in.Format + out.Description = in.Description + out.Priority = in.Priority + out.JSONPath = in.JSONPath + return nil +} + +// Convert_apiextensions_CustomResourceColumnDefinition_To_v1beta1_CustomResourceColumnDefinition is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceColumnDefinition_To_v1beta1_CustomResourceColumnDefinition(in *apiextensions.CustomResourceColumnDefinition, out *CustomResourceColumnDefinition, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceColumnDefinition_To_v1beta1_CustomResourceColumnDefinition(in, out, s) +} + +func autoConvert_v1beta1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(in *CustomResourceConversion, out *apiextensions.CustomResourceConversion, s conversion.Scope) error { + out.Strategy = apiextensions.ConversionStrategyType(in.Strategy) + if in.WebhookClientConfig != nil { + in, out := &in.WebhookClientConfig, &out.WebhookClientConfig + *out = new(apiextensions.WebhookClientConfig) + if err := Convert_v1beta1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(*in, *out, s); err != nil { + return err + } + } else { + out.WebhookClientConfig = nil + } + out.ConversionReviewVersions = *(*[]string)(unsafe.Pointer(&in.ConversionReviewVersions)) + return nil +} + +// Convert_v1beta1_CustomResourceConversion_To_apiextensions_CustomResourceConversion is an autogenerated conversion function. +func Convert_v1beta1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(in *CustomResourceConversion, out *apiextensions.CustomResourceConversion, s conversion.Scope) error { + return autoConvert_v1beta1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceConversion_To_v1beta1_CustomResourceConversion(in *apiextensions.CustomResourceConversion, out *CustomResourceConversion, s conversion.Scope) error { + out.Strategy = ConversionStrategyType(in.Strategy) + if in.WebhookClientConfig != nil { + in, out := &in.WebhookClientConfig, &out.WebhookClientConfig + *out = new(WebhookClientConfig) + if err := Convert_apiextensions_WebhookClientConfig_To_v1beta1_WebhookClientConfig(*in, *out, s); err != nil { + return err + } + } else { + out.WebhookClientConfig = nil + } + out.ConversionReviewVersions = *(*[]string)(unsafe.Pointer(&in.ConversionReviewVersions)) + return nil +} + +// Convert_apiextensions_CustomResourceConversion_To_v1beta1_CustomResourceConversion is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceConversion_To_v1beta1_CustomResourceConversion(in *apiextensions.CustomResourceConversion, out *CustomResourceConversion, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceConversion_To_v1beta1_CustomResourceConversion(in, out, s) +} + +func autoConvert_v1beta1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(in *CustomResourceDefinition, out *apiextensions.CustomResourceDefinition, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition is an autogenerated conversion function. +func Convert_v1beta1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(in *CustomResourceDefinition, out *apiextensions.CustomResourceDefinition, s conversion.Scope) error { + return autoConvert_v1beta1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceDefinition_To_v1beta1_CustomResourceDefinition(in *apiextensions.CustomResourceDefinition, out *CustomResourceDefinition, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_apiextensions_CustomResourceDefinitionSpec_To_v1beta1_CustomResourceDefinitionSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_apiextensions_CustomResourceDefinitionStatus_To_v1beta1_CustomResourceDefinitionStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_apiextensions_CustomResourceDefinition_To_v1beta1_CustomResourceDefinition is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceDefinition_To_v1beta1_CustomResourceDefinition(in *apiextensions.CustomResourceDefinition, out *CustomResourceDefinition, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceDefinition_To_v1beta1_CustomResourceDefinition(in, out, s) +} + +func autoConvert_v1beta1_CustomResourceDefinitionCondition_To_apiextensions_CustomResourceDefinitionCondition(in *CustomResourceDefinitionCondition, out *apiextensions.CustomResourceDefinitionCondition, s conversion.Scope) error { + out.Type = apiextensions.CustomResourceDefinitionConditionType(in.Type) + out.Status = apiextensions.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1beta1_CustomResourceDefinitionCondition_To_apiextensions_CustomResourceDefinitionCondition is an autogenerated conversion function. +func Convert_v1beta1_CustomResourceDefinitionCondition_To_apiextensions_CustomResourceDefinitionCondition(in *CustomResourceDefinitionCondition, out *apiextensions.CustomResourceDefinitionCondition, s conversion.Scope) error { + return autoConvert_v1beta1_CustomResourceDefinitionCondition_To_apiextensions_CustomResourceDefinitionCondition(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceDefinitionCondition_To_v1beta1_CustomResourceDefinitionCondition(in *apiextensions.CustomResourceDefinitionCondition, out *CustomResourceDefinitionCondition, s conversion.Scope) error { + out.Type = CustomResourceDefinitionConditionType(in.Type) + out.Status = ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_apiextensions_CustomResourceDefinitionCondition_To_v1beta1_CustomResourceDefinitionCondition is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceDefinitionCondition_To_v1beta1_CustomResourceDefinitionCondition(in *apiextensions.CustomResourceDefinitionCondition, out *CustomResourceDefinitionCondition, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceDefinitionCondition_To_v1beta1_CustomResourceDefinitionCondition(in, out, s) +} + +func autoConvert_v1beta1_CustomResourceDefinitionList_To_apiextensions_CustomResourceDefinitionList(in *CustomResourceDefinitionList, out *apiextensions.CustomResourceDefinitionList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]apiextensions.CustomResourceDefinition, len(*in)) + for i := range *in { + if err := Convert_v1beta1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1beta1_CustomResourceDefinitionList_To_apiextensions_CustomResourceDefinitionList is an autogenerated conversion function. +func Convert_v1beta1_CustomResourceDefinitionList_To_apiextensions_CustomResourceDefinitionList(in *CustomResourceDefinitionList, out *apiextensions.CustomResourceDefinitionList, s conversion.Scope) error { + return autoConvert_v1beta1_CustomResourceDefinitionList_To_apiextensions_CustomResourceDefinitionList(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceDefinitionList_To_v1beta1_CustomResourceDefinitionList(in *apiextensions.CustomResourceDefinitionList, out *CustomResourceDefinitionList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomResourceDefinition, len(*in)) + for i := range *in { + if err := Convert_apiextensions_CustomResourceDefinition_To_v1beta1_CustomResourceDefinition(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_apiextensions_CustomResourceDefinitionList_To_v1beta1_CustomResourceDefinitionList is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceDefinitionList_To_v1beta1_CustomResourceDefinitionList(in *apiextensions.CustomResourceDefinitionList, out *CustomResourceDefinitionList, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceDefinitionList_To_v1beta1_CustomResourceDefinitionList(in, out, s) +} + +func autoConvert_v1beta1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(in *CustomResourceDefinitionNames, out *apiextensions.CustomResourceDefinitionNames, s conversion.Scope) error { + out.Plural = in.Plural + out.Singular = in.Singular + out.ShortNames = *(*[]string)(unsafe.Pointer(&in.ShortNames)) + out.Kind = in.Kind + out.ListKind = in.ListKind + out.Categories = *(*[]string)(unsafe.Pointer(&in.Categories)) + return nil +} + +// Convert_v1beta1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames is an autogenerated conversion function. +func Convert_v1beta1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(in *CustomResourceDefinitionNames, out *apiextensions.CustomResourceDefinitionNames, s conversion.Scope) error { + return autoConvert_v1beta1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceDefinitionNames_To_v1beta1_CustomResourceDefinitionNames(in *apiextensions.CustomResourceDefinitionNames, out *CustomResourceDefinitionNames, s conversion.Scope) error { + out.Plural = in.Plural + out.Singular = in.Singular + out.ShortNames = *(*[]string)(unsafe.Pointer(&in.ShortNames)) + out.Kind = in.Kind + out.ListKind = in.ListKind + out.Categories = *(*[]string)(unsafe.Pointer(&in.Categories)) + return nil +} + +// Convert_apiextensions_CustomResourceDefinitionNames_To_v1beta1_CustomResourceDefinitionNames is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceDefinitionNames_To_v1beta1_CustomResourceDefinitionNames(in *apiextensions.CustomResourceDefinitionNames, out *CustomResourceDefinitionNames, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceDefinitionNames_To_v1beta1_CustomResourceDefinitionNames(in, out, s) +} + +func autoConvert_v1beta1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(in *CustomResourceDefinitionSpec, out *apiextensions.CustomResourceDefinitionSpec, s conversion.Scope) error { + out.Group = in.Group + out.Version = in.Version + if err := Convert_v1beta1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(&in.Names, &out.Names, s); err != nil { + return err + } + out.Scope = apiextensions.ResourceScope(in.Scope) + if in.Validation != nil { + in, out := &in.Validation, &out.Validation + *out = new(apiextensions.CustomResourceValidation) + if err := Convert_v1beta1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(*in, *out, s); err != nil { + return err + } + } else { + out.Validation = nil + } + out.Subresources = (*apiextensions.CustomResourceSubresources)(unsafe.Pointer(in.Subresources)) + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]apiextensions.CustomResourceDefinitionVersion, len(*in)) + for i := range *in { + if err := Convert_v1beta1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Versions = nil + } + out.AdditionalPrinterColumns = *(*[]apiextensions.CustomResourceColumnDefinition)(unsafe.Pointer(&in.AdditionalPrinterColumns)) + if in.Conversion != nil { + in, out := &in.Conversion, &out.Conversion + *out = new(apiextensions.CustomResourceConversion) + if err := Convert_v1beta1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(*in, *out, s); err != nil { + return err + } + } else { + out.Conversion = nil + } + out.PreserveUnknownFields = (*bool)(unsafe.Pointer(in.PreserveUnknownFields)) + return nil +} + +// Convert_v1beta1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec is an autogenerated conversion function. +func Convert_v1beta1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(in *CustomResourceDefinitionSpec, out *apiextensions.CustomResourceDefinitionSpec, s conversion.Scope) error { + return autoConvert_v1beta1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceDefinitionSpec_To_v1beta1_CustomResourceDefinitionSpec(in *apiextensions.CustomResourceDefinitionSpec, out *CustomResourceDefinitionSpec, s conversion.Scope) error { + out.Group = in.Group + out.Version = in.Version + if err := Convert_apiextensions_CustomResourceDefinitionNames_To_v1beta1_CustomResourceDefinitionNames(&in.Names, &out.Names, s); err != nil { + return err + } + out.Scope = ResourceScope(in.Scope) + if in.Validation != nil { + in, out := &in.Validation, &out.Validation + *out = new(CustomResourceValidation) + if err := Convert_apiextensions_CustomResourceValidation_To_v1beta1_CustomResourceValidation(*in, *out, s); err != nil { + return err + } + } else { + out.Validation = nil + } + out.Subresources = (*CustomResourceSubresources)(unsafe.Pointer(in.Subresources)) + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]CustomResourceDefinitionVersion, len(*in)) + for i := range *in { + if err := Convert_apiextensions_CustomResourceDefinitionVersion_To_v1beta1_CustomResourceDefinitionVersion(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Versions = nil + } + out.AdditionalPrinterColumns = *(*[]CustomResourceColumnDefinition)(unsafe.Pointer(&in.AdditionalPrinterColumns)) + if in.Conversion != nil { + in, out := &in.Conversion, &out.Conversion + *out = new(CustomResourceConversion) + if err := Convert_apiextensions_CustomResourceConversion_To_v1beta1_CustomResourceConversion(*in, *out, s); err != nil { + return err + } + } else { + out.Conversion = nil + } + out.PreserveUnknownFields = (*bool)(unsafe.Pointer(in.PreserveUnknownFields)) + return nil +} + +// Convert_apiextensions_CustomResourceDefinitionSpec_To_v1beta1_CustomResourceDefinitionSpec is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceDefinitionSpec_To_v1beta1_CustomResourceDefinitionSpec(in *apiextensions.CustomResourceDefinitionSpec, out *CustomResourceDefinitionSpec, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceDefinitionSpec_To_v1beta1_CustomResourceDefinitionSpec(in, out, s) +} + +func autoConvert_v1beta1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus(in *CustomResourceDefinitionStatus, out *apiextensions.CustomResourceDefinitionStatus, s conversion.Scope) error { + out.Conditions = *(*[]apiextensions.CustomResourceDefinitionCondition)(unsafe.Pointer(&in.Conditions)) + if err := Convert_v1beta1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(&in.AcceptedNames, &out.AcceptedNames, s); err != nil { + return err + } + out.StoredVersions = *(*[]string)(unsafe.Pointer(&in.StoredVersions)) + return nil +} + +// Convert_v1beta1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus is an autogenerated conversion function. +func Convert_v1beta1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus(in *CustomResourceDefinitionStatus, out *apiextensions.CustomResourceDefinitionStatus, s conversion.Scope) error { + return autoConvert_v1beta1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceDefinitionStatus_To_v1beta1_CustomResourceDefinitionStatus(in *apiextensions.CustomResourceDefinitionStatus, out *CustomResourceDefinitionStatus, s conversion.Scope) error { + out.Conditions = *(*[]CustomResourceDefinitionCondition)(unsafe.Pointer(&in.Conditions)) + if err := Convert_apiextensions_CustomResourceDefinitionNames_To_v1beta1_CustomResourceDefinitionNames(&in.AcceptedNames, &out.AcceptedNames, s); err != nil { + return err + } + out.StoredVersions = *(*[]string)(unsafe.Pointer(&in.StoredVersions)) + return nil +} + +// Convert_apiextensions_CustomResourceDefinitionStatus_To_v1beta1_CustomResourceDefinitionStatus is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceDefinitionStatus_To_v1beta1_CustomResourceDefinitionStatus(in *apiextensions.CustomResourceDefinitionStatus, out *CustomResourceDefinitionStatus, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceDefinitionStatus_To_v1beta1_CustomResourceDefinitionStatus(in, out, s) +} + +func autoConvert_v1beta1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(in *CustomResourceDefinitionVersion, out *apiextensions.CustomResourceDefinitionVersion, s conversion.Scope) error { + out.Name = in.Name + out.Served = in.Served + out.Storage = in.Storage + out.Deprecated = in.Deprecated + out.DeprecationWarning = (*string)(unsafe.Pointer(in.DeprecationWarning)) + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(apiextensions.CustomResourceValidation) + if err := Convert_v1beta1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + out.Subresources = (*apiextensions.CustomResourceSubresources)(unsafe.Pointer(in.Subresources)) + out.AdditionalPrinterColumns = *(*[]apiextensions.CustomResourceColumnDefinition)(unsafe.Pointer(&in.AdditionalPrinterColumns)) + return nil +} + +// Convert_v1beta1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion is an autogenerated conversion function. +func Convert_v1beta1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(in *CustomResourceDefinitionVersion, out *apiextensions.CustomResourceDefinitionVersion, s conversion.Scope) error { + return autoConvert_v1beta1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceDefinitionVersion_To_v1beta1_CustomResourceDefinitionVersion(in *apiextensions.CustomResourceDefinitionVersion, out *CustomResourceDefinitionVersion, s conversion.Scope) error { + out.Name = in.Name + out.Served = in.Served + out.Storage = in.Storage + out.Deprecated = in.Deprecated + out.DeprecationWarning = (*string)(unsafe.Pointer(in.DeprecationWarning)) + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(CustomResourceValidation) + if err := Convert_apiextensions_CustomResourceValidation_To_v1beta1_CustomResourceValidation(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + out.Subresources = (*CustomResourceSubresources)(unsafe.Pointer(in.Subresources)) + out.AdditionalPrinterColumns = *(*[]CustomResourceColumnDefinition)(unsafe.Pointer(&in.AdditionalPrinterColumns)) + return nil +} + +// Convert_apiextensions_CustomResourceDefinitionVersion_To_v1beta1_CustomResourceDefinitionVersion is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceDefinitionVersion_To_v1beta1_CustomResourceDefinitionVersion(in *apiextensions.CustomResourceDefinitionVersion, out *CustomResourceDefinitionVersion, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceDefinitionVersion_To_v1beta1_CustomResourceDefinitionVersion(in, out, s) +} + +func autoConvert_v1beta1_CustomResourceSubresourceScale_To_apiextensions_CustomResourceSubresourceScale(in *CustomResourceSubresourceScale, out *apiextensions.CustomResourceSubresourceScale, s conversion.Scope) error { + out.SpecReplicasPath = in.SpecReplicasPath + out.StatusReplicasPath = in.StatusReplicasPath + out.LabelSelectorPath = (*string)(unsafe.Pointer(in.LabelSelectorPath)) + return nil +} + +// Convert_v1beta1_CustomResourceSubresourceScale_To_apiextensions_CustomResourceSubresourceScale is an autogenerated conversion function. +func Convert_v1beta1_CustomResourceSubresourceScale_To_apiextensions_CustomResourceSubresourceScale(in *CustomResourceSubresourceScale, out *apiextensions.CustomResourceSubresourceScale, s conversion.Scope) error { + return autoConvert_v1beta1_CustomResourceSubresourceScale_To_apiextensions_CustomResourceSubresourceScale(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceSubresourceScale_To_v1beta1_CustomResourceSubresourceScale(in *apiextensions.CustomResourceSubresourceScale, out *CustomResourceSubresourceScale, s conversion.Scope) error { + out.SpecReplicasPath = in.SpecReplicasPath + out.StatusReplicasPath = in.StatusReplicasPath + out.LabelSelectorPath = (*string)(unsafe.Pointer(in.LabelSelectorPath)) + return nil +} + +// Convert_apiextensions_CustomResourceSubresourceScale_To_v1beta1_CustomResourceSubresourceScale is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceSubresourceScale_To_v1beta1_CustomResourceSubresourceScale(in *apiextensions.CustomResourceSubresourceScale, out *CustomResourceSubresourceScale, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceSubresourceScale_To_v1beta1_CustomResourceSubresourceScale(in, out, s) +} + +func autoConvert_v1beta1_CustomResourceSubresourceStatus_To_apiextensions_CustomResourceSubresourceStatus(in *CustomResourceSubresourceStatus, out *apiextensions.CustomResourceSubresourceStatus, s conversion.Scope) error { + return nil +} + +// Convert_v1beta1_CustomResourceSubresourceStatus_To_apiextensions_CustomResourceSubresourceStatus is an autogenerated conversion function. +func Convert_v1beta1_CustomResourceSubresourceStatus_To_apiextensions_CustomResourceSubresourceStatus(in *CustomResourceSubresourceStatus, out *apiextensions.CustomResourceSubresourceStatus, s conversion.Scope) error { + return autoConvert_v1beta1_CustomResourceSubresourceStatus_To_apiextensions_CustomResourceSubresourceStatus(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceSubresourceStatus_To_v1beta1_CustomResourceSubresourceStatus(in *apiextensions.CustomResourceSubresourceStatus, out *CustomResourceSubresourceStatus, s conversion.Scope) error { + return nil +} + +// Convert_apiextensions_CustomResourceSubresourceStatus_To_v1beta1_CustomResourceSubresourceStatus is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceSubresourceStatus_To_v1beta1_CustomResourceSubresourceStatus(in *apiextensions.CustomResourceSubresourceStatus, out *CustomResourceSubresourceStatus, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceSubresourceStatus_To_v1beta1_CustomResourceSubresourceStatus(in, out, s) +} + +func autoConvert_v1beta1_CustomResourceSubresources_To_apiextensions_CustomResourceSubresources(in *CustomResourceSubresources, out *apiextensions.CustomResourceSubresources, s conversion.Scope) error { + out.Status = (*apiextensions.CustomResourceSubresourceStatus)(unsafe.Pointer(in.Status)) + out.Scale = (*apiextensions.CustomResourceSubresourceScale)(unsafe.Pointer(in.Scale)) + return nil +} + +// Convert_v1beta1_CustomResourceSubresources_To_apiextensions_CustomResourceSubresources is an autogenerated conversion function. +func Convert_v1beta1_CustomResourceSubresources_To_apiextensions_CustomResourceSubresources(in *CustomResourceSubresources, out *apiextensions.CustomResourceSubresources, s conversion.Scope) error { + return autoConvert_v1beta1_CustomResourceSubresources_To_apiextensions_CustomResourceSubresources(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceSubresources_To_v1beta1_CustomResourceSubresources(in *apiextensions.CustomResourceSubresources, out *CustomResourceSubresources, s conversion.Scope) error { + out.Status = (*CustomResourceSubresourceStatus)(unsafe.Pointer(in.Status)) + out.Scale = (*CustomResourceSubresourceScale)(unsafe.Pointer(in.Scale)) + return nil +} + +// Convert_apiextensions_CustomResourceSubresources_To_v1beta1_CustomResourceSubresources is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceSubresources_To_v1beta1_CustomResourceSubresources(in *apiextensions.CustomResourceSubresources, out *CustomResourceSubresources, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceSubresources_To_v1beta1_CustomResourceSubresources(in, out, s) +} + +func autoConvert_v1beta1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(in *CustomResourceValidation, out *apiextensions.CustomResourceValidation, s conversion.Scope) error { + if in.OpenAPIV3Schema != nil { + in, out := &in.OpenAPIV3Schema, &out.OpenAPIV3Schema + *out = new(apiextensions.JSONSchemaProps) + if err := Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.OpenAPIV3Schema = nil + } + return nil +} + +// Convert_v1beta1_CustomResourceValidation_To_apiextensions_CustomResourceValidation is an autogenerated conversion function. +func Convert_v1beta1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(in *CustomResourceValidation, out *apiextensions.CustomResourceValidation, s conversion.Scope) error { + return autoConvert_v1beta1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceValidation_To_v1beta1_CustomResourceValidation(in *apiextensions.CustomResourceValidation, out *CustomResourceValidation, s conversion.Scope) error { + if in.OpenAPIV3Schema != nil { + in, out := &in.OpenAPIV3Schema, &out.OpenAPIV3Schema + *out = new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.OpenAPIV3Schema = nil + } + return nil +} + +// Convert_apiextensions_CustomResourceValidation_To_v1beta1_CustomResourceValidation is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceValidation_To_v1beta1_CustomResourceValidation(in *apiextensions.CustomResourceValidation, out *CustomResourceValidation, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceValidation_To_v1beta1_CustomResourceValidation(in, out, s) +} + +func autoConvert_v1beta1_ExternalDocumentation_To_apiextensions_ExternalDocumentation(in *ExternalDocumentation, out *apiextensions.ExternalDocumentation, s conversion.Scope) error { + out.Description = in.Description + out.URL = in.URL + return nil +} + +// Convert_v1beta1_ExternalDocumentation_To_apiextensions_ExternalDocumentation is an autogenerated conversion function. +func Convert_v1beta1_ExternalDocumentation_To_apiextensions_ExternalDocumentation(in *ExternalDocumentation, out *apiextensions.ExternalDocumentation, s conversion.Scope) error { + return autoConvert_v1beta1_ExternalDocumentation_To_apiextensions_ExternalDocumentation(in, out, s) +} + +func autoConvert_apiextensions_ExternalDocumentation_To_v1beta1_ExternalDocumentation(in *apiextensions.ExternalDocumentation, out *ExternalDocumentation, s conversion.Scope) error { + out.Description = in.Description + out.URL = in.URL + return nil +} + +// Convert_apiextensions_ExternalDocumentation_To_v1beta1_ExternalDocumentation is an autogenerated conversion function. +func Convert_apiextensions_ExternalDocumentation_To_v1beta1_ExternalDocumentation(in *apiextensions.ExternalDocumentation, out *ExternalDocumentation, s conversion.Scope) error { + return autoConvert_apiextensions_ExternalDocumentation_To_v1beta1_ExternalDocumentation(in, out, s) +} + +func autoConvert_v1beta1_JSON_To_apiextensions_JSON(in *JSON, out *apiextensions.JSON, s conversion.Scope) error { + // WARNING: in.Raw requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_apiextensions_JSON_To_v1beta1_JSON(in *apiextensions.JSON, out *JSON, s conversion.Scope) error { + // FIXME: Type apiextensions.JSON is unsupported. + return nil +} + +func autoConvert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(in *JSONSchemaProps, out *apiextensions.JSONSchemaProps, s conversion.Scope) error { + out.ID = in.ID + out.Schema = apiextensions.JSONSchemaURL(in.Schema) + out.Ref = (*string)(unsafe.Pointer(in.Ref)) + out.Description = in.Description + out.Type = in.Type + out.Format = in.Format + out.Title = in.Title + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = new(apiextensions.JSON) + if err := Convert_v1beta1_JSON_To_apiextensions_JSON(*in, *out, s); err != nil { + return err + } + } else { + out.Default = nil + } + out.Maximum = (*float64)(unsafe.Pointer(in.Maximum)) + out.ExclusiveMaximum = in.ExclusiveMaximum + out.Minimum = (*float64)(unsafe.Pointer(in.Minimum)) + out.ExclusiveMinimum = in.ExclusiveMinimum + out.MaxLength = (*int64)(unsafe.Pointer(in.MaxLength)) + out.MinLength = (*int64)(unsafe.Pointer(in.MinLength)) + out.Pattern = in.Pattern + out.MaxItems = (*int64)(unsafe.Pointer(in.MaxItems)) + out.MinItems = (*int64)(unsafe.Pointer(in.MinItems)) + out.UniqueItems = in.UniqueItems + out.MultipleOf = (*float64)(unsafe.Pointer(in.MultipleOf)) + if in.Enum != nil { + in, out := &in.Enum, &out.Enum + *out = make([]apiextensions.JSON, len(*in)) + for i := range *in { + if err := Convert_v1beta1_JSON_To_apiextensions_JSON(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Enum = nil + } + out.MaxProperties = (*int64)(unsafe.Pointer(in.MaxProperties)) + out.MinProperties = (*int64)(unsafe.Pointer(in.MinProperties)) + out.Required = *(*[]string)(unsafe.Pointer(&in.Required)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = new(apiextensions.JSONSchemaPropsOrArray) + if err := Convert_v1beta1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray(*in, *out, s); err != nil { + return err + } + } else { + out.Items = nil + } + if in.AllOf != nil { + in, out := &in.AllOf, &out.AllOf + *out = make([]apiextensions.JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.AllOf = nil + } + if in.OneOf != nil { + in, out := &in.OneOf, &out.OneOf + *out = make([]apiextensions.JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.OneOf = nil + } + if in.AnyOf != nil { + in, out := &in.AnyOf, &out.AnyOf + *out = make([]apiextensions.JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.AnyOf = nil + } + if in.Not != nil { + in, out := &in.Not, &out.Not + *out = new(apiextensions.JSONSchemaProps) + if err := Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Not = nil + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]apiextensions.JSONSchemaProps, len(*in)) + for key, val := range *in { + newVal := new(apiextensions.JSONSchemaProps) + if err := Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.Properties = nil + } + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = new(apiextensions.JSONSchemaPropsOrBool) + if err := Convert_v1beta1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(*in, *out, s); err != nil { + return err + } + } else { + out.AdditionalProperties = nil + } + if in.PatternProperties != nil { + in, out := &in.PatternProperties, &out.PatternProperties + *out = make(map[string]apiextensions.JSONSchemaProps, len(*in)) + for key, val := range *in { + newVal := new(apiextensions.JSONSchemaProps) + if err := Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.PatternProperties = nil + } + if in.Dependencies != nil { + in, out := &in.Dependencies, &out.Dependencies + *out = make(apiextensions.JSONSchemaDependencies, len(*in)) + for key, val := range *in { + newVal := new(apiextensions.JSONSchemaPropsOrStringArray) + if err := Convert_v1beta1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.Dependencies = nil + } + if in.AdditionalItems != nil { + in, out := &in.AdditionalItems, &out.AdditionalItems + *out = new(apiextensions.JSONSchemaPropsOrBool) + if err := Convert_v1beta1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(*in, *out, s); err != nil { + return err + } + } else { + out.AdditionalItems = nil + } + if in.Definitions != nil { + in, out := &in.Definitions, &out.Definitions + *out = make(apiextensions.JSONSchemaDefinitions, len(*in)) + for key, val := range *in { + newVal := new(apiextensions.JSONSchemaProps) + if err := Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.Definitions = nil + } + out.ExternalDocs = (*apiextensions.ExternalDocumentation)(unsafe.Pointer(in.ExternalDocs)) + if in.Example != nil { + in, out := &in.Example, &out.Example + *out = new(apiextensions.JSON) + if err := Convert_v1beta1_JSON_To_apiextensions_JSON(*in, *out, s); err != nil { + return err + } + } else { + out.Example = nil + } + out.Nullable = in.Nullable + out.XPreserveUnknownFields = (*bool)(unsafe.Pointer(in.XPreserveUnknownFields)) + out.XEmbeddedResource = in.XEmbeddedResource + out.XIntOrString = in.XIntOrString + out.XListMapKeys = *(*[]string)(unsafe.Pointer(&in.XListMapKeys)) + out.XListType = (*string)(unsafe.Pointer(in.XListType)) + out.XMapType = (*string)(unsafe.Pointer(in.XMapType)) + out.XValidations = *(*apiextensions.ValidationRules)(unsafe.Pointer(&in.XValidations)) + return nil +} + +// Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps is an autogenerated conversion function. +func Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(in *JSONSchemaProps, out *apiextensions.JSONSchemaProps, s conversion.Scope) error { + return autoConvert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(in, out, s) +} + +func autoConvert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(in *apiextensions.JSONSchemaProps, out *JSONSchemaProps, s conversion.Scope) error { + out.ID = in.ID + out.Schema = JSONSchemaURL(in.Schema) + out.Ref = (*string)(unsafe.Pointer(in.Ref)) + out.Description = in.Description + out.Type = in.Type + out.Nullable = in.Nullable + out.Format = in.Format + out.Title = in.Title + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = new(JSON) + if err := Convert_apiextensions_JSON_To_v1beta1_JSON(*in, *out, s); err != nil { + return err + } + } else { + out.Default = nil + } + out.Maximum = (*float64)(unsafe.Pointer(in.Maximum)) + out.ExclusiveMaximum = in.ExclusiveMaximum + out.Minimum = (*float64)(unsafe.Pointer(in.Minimum)) + out.ExclusiveMinimum = in.ExclusiveMinimum + out.MaxLength = (*int64)(unsafe.Pointer(in.MaxLength)) + out.MinLength = (*int64)(unsafe.Pointer(in.MinLength)) + out.Pattern = in.Pattern + out.MaxItems = (*int64)(unsafe.Pointer(in.MaxItems)) + out.MinItems = (*int64)(unsafe.Pointer(in.MinItems)) + out.UniqueItems = in.UniqueItems + out.MultipleOf = (*float64)(unsafe.Pointer(in.MultipleOf)) + if in.Enum != nil { + in, out := &in.Enum, &out.Enum + *out = make([]JSON, len(*in)) + for i := range *in { + if err := Convert_apiextensions_JSON_To_v1beta1_JSON(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Enum = nil + } + out.MaxProperties = (*int64)(unsafe.Pointer(in.MaxProperties)) + out.MinProperties = (*int64)(unsafe.Pointer(in.MinProperties)) + out.Required = *(*[]string)(unsafe.Pointer(&in.Required)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = new(JSONSchemaPropsOrArray) + if err := Convert_apiextensions_JSONSchemaPropsOrArray_To_v1beta1_JSONSchemaPropsOrArray(*in, *out, s); err != nil { + return err + } + } else { + out.Items = nil + } + if in.AllOf != nil { + in, out := &in.AllOf, &out.AllOf + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.AllOf = nil + } + if in.OneOf != nil { + in, out := &in.OneOf, &out.OneOf + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.OneOf = nil + } + if in.AnyOf != nil { + in, out := &in.AnyOf, &out.AnyOf + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.AnyOf = nil + } + if in.Not != nil { + in, out := &in.Not, &out.Not + *out = new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Not = nil + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]JSONSchemaProps, len(*in)) + for key, val := range *in { + newVal := new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.Properties = nil + } + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = new(JSONSchemaPropsOrBool) + if err := Convert_apiextensions_JSONSchemaPropsOrBool_To_v1beta1_JSONSchemaPropsOrBool(*in, *out, s); err != nil { + return err + } + } else { + out.AdditionalProperties = nil + } + if in.PatternProperties != nil { + in, out := &in.PatternProperties, &out.PatternProperties + *out = make(map[string]JSONSchemaProps, len(*in)) + for key, val := range *in { + newVal := new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.PatternProperties = nil + } + if in.Dependencies != nil { + in, out := &in.Dependencies, &out.Dependencies + *out = make(JSONSchemaDependencies, len(*in)) + for key, val := range *in { + newVal := new(JSONSchemaPropsOrStringArray) + if err := Convert_apiextensions_JSONSchemaPropsOrStringArray_To_v1beta1_JSONSchemaPropsOrStringArray(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.Dependencies = nil + } + if in.AdditionalItems != nil { + in, out := &in.AdditionalItems, &out.AdditionalItems + *out = new(JSONSchemaPropsOrBool) + if err := Convert_apiextensions_JSONSchemaPropsOrBool_To_v1beta1_JSONSchemaPropsOrBool(*in, *out, s); err != nil { + return err + } + } else { + out.AdditionalItems = nil + } + if in.Definitions != nil { + in, out := &in.Definitions, &out.Definitions + *out = make(JSONSchemaDefinitions, len(*in)) + for key, val := range *in { + newVal := new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.Definitions = nil + } + out.ExternalDocs = (*ExternalDocumentation)(unsafe.Pointer(in.ExternalDocs)) + if in.Example != nil { + in, out := &in.Example, &out.Example + *out = new(JSON) + if err := Convert_apiextensions_JSON_To_v1beta1_JSON(*in, *out, s); err != nil { + return err + } + } else { + out.Example = nil + } + out.XPreserveUnknownFields = (*bool)(unsafe.Pointer(in.XPreserveUnknownFields)) + out.XEmbeddedResource = in.XEmbeddedResource + out.XIntOrString = in.XIntOrString + out.XListMapKeys = *(*[]string)(unsafe.Pointer(&in.XListMapKeys)) + out.XListType = (*string)(unsafe.Pointer(in.XListType)) + out.XMapType = (*string)(unsafe.Pointer(in.XMapType)) + out.XValidations = *(*ValidationRules)(unsafe.Pointer(&in.XValidations)) + return nil +} + +func autoConvert_v1beta1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray(in *JSONSchemaPropsOrArray, out *apiextensions.JSONSchemaPropsOrArray, s conversion.Scope) error { + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(apiextensions.JSONSchemaProps) + if err := Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + if in.JSONSchemas != nil { + in, out := &in.JSONSchemas, &out.JSONSchemas + *out = make([]apiextensions.JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.JSONSchemas = nil + } + return nil +} + +// Convert_v1beta1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray is an autogenerated conversion function. +func Convert_v1beta1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray(in *JSONSchemaPropsOrArray, out *apiextensions.JSONSchemaPropsOrArray, s conversion.Scope) error { + return autoConvert_v1beta1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray(in, out, s) +} + +func autoConvert_apiextensions_JSONSchemaPropsOrArray_To_v1beta1_JSONSchemaPropsOrArray(in *apiextensions.JSONSchemaPropsOrArray, out *JSONSchemaPropsOrArray, s conversion.Scope) error { + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + if in.JSONSchemas != nil { + in, out := &in.JSONSchemas, &out.JSONSchemas + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.JSONSchemas = nil + } + return nil +} + +// Convert_apiextensions_JSONSchemaPropsOrArray_To_v1beta1_JSONSchemaPropsOrArray is an autogenerated conversion function. +func Convert_apiextensions_JSONSchemaPropsOrArray_To_v1beta1_JSONSchemaPropsOrArray(in *apiextensions.JSONSchemaPropsOrArray, out *JSONSchemaPropsOrArray, s conversion.Scope) error { + return autoConvert_apiextensions_JSONSchemaPropsOrArray_To_v1beta1_JSONSchemaPropsOrArray(in, out, s) +} + +func autoConvert_v1beta1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(in *JSONSchemaPropsOrBool, out *apiextensions.JSONSchemaPropsOrBool, s conversion.Scope) error { + out.Allows = in.Allows + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(apiextensions.JSONSchemaProps) + if err := Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + return nil +} + +// Convert_v1beta1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool is an autogenerated conversion function. +func Convert_v1beta1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(in *JSONSchemaPropsOrBool, out *apiextensions.JSONSchemaPropsOrBool, s conversion.Scope) error { + return autoConvert_v1beta1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(in, out, s) +} + +func autoConvert_apiextensions_JSONSchemaPropsOrBool_To_v1beta1_JSONSchemaPropsOrBool(in *apiextensions.JSONSchemaPropsOrBool, out *JSONSchemaPropsOrBool, s conversion.Scope) error { + out.Allows = in.Allows + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + return nil +} + +// Convert_apiextensions_JSONSchemaPropsOrBool_To_v1beta1_JSONSchemaPropsOrBool is an autogenerated conversion function. +func Convert_apiextensions_JSONSchemaPropsOrBool_To_v1beta1_JSONSchemaPropsOrBool(in *apiextensions.JSONSchemaPropsOrBool, out *JSONSchemaPropsOrBool, s conversion.Scope) error { + return autoConvert_apiextensions_JSONSchemaPropsOrBool_To_v1beta1_JSONSchemaPropsOrBool(in, out, s) +} + +func autoConvert_v1beta1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray(in *JSONSchemaPropsOrStringArray, out *apiextensions.JSONSchemaPropsOrStringArray, s conversion.Scope) error { + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(apiextensions.JSONSchemaProps) + if err := Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + out.Property = *(*[]string)(unsafe.Pointer(&in.Property)) + return nil +} + +// Convert_v1beta1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray is an autogenerated conversion function. +func Convert_v1beta1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray(in *JSONSchemaPropsOrStringArray, out *apiextensions.JSONSchemaPropsOrStringArray, s conversion.Scope) error { + return autoConvert_v1beta1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray(in, out, s) +} + +func autoConvert_apiextensions_JSONSchemaPropsOrStringArray_To_v1beta1_JSONSchemaPropsOrStringArray(in *apiextensions.JSONSchemaPropsOrStringArray, out *JSONSchemaPropsOrStringArray, s conversion.Scope) error { + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + out.Property = *(*[]string)(unsafe.Pointer(&in.Property)) + return nil +} + +// Convert_apiextensions_JSONSchemaPropsOrStringArray_To_v1beta1_JSONSchemaPropsOrStringArray is an autogenerated conversion function. +func Convert_apiextensions_JSONSchemaPropsOrStringArray_To_v1beta1_JSONSchemaPropsOrStringArray(in *apiextensions.JSONSchemaPropsOrStringArray, out *JSONSchemaPropsOrStringArray, s conversion.Scope) error { + return autoConvert_apiextensions_JSONSchemaPropsOrStringArray_To_v1beta1_JSONSchemaPropsOrStringArray(in, out, s) +} + +func autoConvert_v1beta1_ServiceReference_To_apiextensions_ServiceReference(in *ServiceReference, out *apiextensions.ServiceReference, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Name = in.Name + out.Path = (*string)(unsafe.Pointer(in.Path)) + if err := v1.Convert_Pointer_int32_To_int32(&in.Port, &out.Port, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_ServiceReference_To_apiextensions_ServiceReference is an autogenerated conversion function. +func Convert_v1beta1_ServiceReference_To_apiextensions_ServiceReference(in *ServiceReference, out *apiextensions.ServiceReference, s conversion.Scope) error { + return autoConvert_v1beta1_ServiceReference_To_apiextensions_ServiceReference(in, out, s) +} + +func autoConvert_apiextensions_ServiceReference_To_v1beta1_ServiceReference(in *apiextensions.ServiceReference, out *ServiceReference, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Name = in.Name + out.Path = (*string)(unsafe.Pointer(in.Path)) + if err := v1.Convert_int32_To_Pointer_int32(&in.Port, &out.Port, s); err != nil { + return err + } + return nil +} + +// Convert_apiextensions_ServiceReference_To_v1beta1_ServiceReference is an autogenerated conversion function. +func Convert_apiextensions_ServiceReference_To_v1beta1_ServiceReference(in *apiextensions.ServiceReference, out *ServiceReference, s conversion.Scope) error { + return autoConvert_apiextensions_ServiceReference_To_v1beta1_ServiceReference(in, out, s) +} + +func autoConvert_v1beta1_ValidationRule_To_apiextensions_ValidationRule(in *ValidationRule, out *apiextensions.ValidationRule, s conversion.Scope) error { + out.Rule = in.Rule + out.Message = in.Message + return nil +} + +// Convert_v1beta1_ValidationRule_To_apiextensions_ValidationRule is an autogenerated conversion function. +func Convert_v1beta1_ValidationRule_To_apiextensions_ValidationRule(in *ValidationRule, out *apiextensions.ValidationRule, s conversion.Scope) error { + return autoConvert_v1beta1_ValidationRule_To_apiextensions_ValidationRule(in, out, s) +} + +func autoConvert_apiextensions_ValidationRule_To_v1beta1_ValidationRule(in *apiextensions.ValidationRule, out *ValidationRule, s conversion.Scope) error { + out.Rule = in.Rule + out.Message = in.Message + return nil +} + +// Convert_apiextensions_ValidationRule_To_v1beta1_ValidationRule is an autogenerated conversion function. +func Convert_apiextensions_ValidationRule_To_v1beta1_ValidationRule(in *apiextensions.ValidationRule, out *ValidationRule, s conversion.Scope) error { + return autoConvert_apiextensions_ValidationRule_To_v1beta1_ValidationRule(in, out, s) +} + +func autoConvert_v1beta1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(in *WebhookClientConfig, out *apiextensions.WebhookClientConfig, s conversion.Scope) error { + out.URL = (*string)(unsafe.Pointer(in.URL)) + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(apiextensions.ServiceReference) + if err := Convert_v1beta1_ServiceReference_To_apiextensions_ServiceReference(*in, *out, s); err != nil { + return err + } + } else { + out.Service = nil + } + out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle)) + return nil +} + +// Convert_v1beta1_WebhookClientConfig_To_apiextensions_WebhookClientConfig is an autogenerated conversion function. +func Convert_v1beta1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(in *WebhookClientConfig, out *apiextensions.WebhookClientConfig, s conversion.Scope) error { + return autoConvert_v1beta1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(in, out, s) +} + +func autoConvert_apiextensions_WebhookClientConfig_To_v1beta1_WebhookClientConfig(in *apiextensions.WebhookClientConfig, out *WebhookClientConfig, s conversion.Scope) error { + out.URL = (*string)(unsafe.Pointer(in.URL)) + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceReference) + if err := Convert_apiextensions_ServiceReference_To_v1beta1_ServiceReference(*in, *out, s); err != nil { + return err + } + } else { + out.Service = nil + } + out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle)) + return nil +} + +// Convert_apiextensions_WebhookClientConfig_To_v1beta1_WebhookClientConfig is an autogenerated conversion function. +func Convert_apiextensions_WebhookClientConfig_To_v1beta1_WebhookClientConfig(in *apiextensions.WebhookClientConfig, out *WebhookClientConfig, s conversion.Scope) error { + return autoConvert_apiextensions_WebhookClientConfig_To_v1beta1_WebhookClientConfig(in, out, s) +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000..26a9d4d --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,704 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConversionRequest) DeepCopyInto(out *ConversionRequest) { + *out = *in + if in.Objects != nil { + in, out := &in.Objects, &out.Objects + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConversionRequest. +func (in *ConversionRequest) DeepCopy() *ConversionRequest { + if in == nil { + return nil + } + out := new(ConversionRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConversionResponse) DeepCopyInto(out *ConversionResponse) { + *out = *in + if in.ConvertedObjects != nil { + in, out := &in.ConvertedObjects, &out.ConvertedObjects + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Result.DeepCopyInto(&out.Result) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConversionResponse. +func (in *ConversionResponse) DeepCopy() *ConversionResponse { + if in == nil { + return nil + } + out := new(ConversionResponse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConversionReview) DeepCopyInto(out *ConversionReview) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = new(ConversionRequest) + (*in).DeepCopyInto(*out) + } + if in.Response != nil { + in, out := &in.Response, &out.Response + *out = new(ConversionResponse) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConversionReview. +func (in *ConversionReview) DeepCopy() *ConversionReview { + if in == nil { + return nil + } + out := new(ConversionReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConversionReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceColumnDefinition) DeepCopyInto(out *CustomResourceColumnDefinition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceColumnDefinition. +func (in *CustomResourceColumnDefinition) DeepCopy() *CustomResourceColumnDefinition { + if in == nil { + return nil + } + out := new(CustomResourceColumnDefinition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceConversion) DeepCopyInto(out *CustomResourceConversion) { + *out = *in + if in.WebhookClientConfig != nil { + in, out := &in.WebhookClientConfig, &out.WebhookClientConfig + *out = new(WebhookClientConfig) + (*in).DeepCopyInto(*out) + } + if in.ConversionReviewVersions != nil { + in, out := &in.ConversionReviewVersions, &out.ConversionReviewVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceConversion. +func (in *CustomResourceConversion) DeepCopy() *CustomResourceConversion { + if in == nil { + return nil + } + out := new(CustomResourceConversion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinition) DeepCopyInto(out *CustomResourceDefinition) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinition. +func (in *CustomResourceDefinition) DeepCopy() *CustomResourceDefinition { + if in == nil { + return nil + } + out := new(CustomResourceDefinition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CustomResourceDefinition) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionCondition) DeepCopyInto(out *CustomResourceDefinitionCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionCondition. +func (in *CustomResourceDefinitionCondition) DeepCopy() *CustomResourceDefinitionCondition { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionList) DeepCopyInto(out *CustomResourceDefinitionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomResourceDefinition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionList. +func (in *CustomResourceDefinitionList) DeepCopy() *CustomResourceDefinitionList { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CustomResourceDefinitionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionNames) DeepCopyInto(out *CustomResourceDefinitionNames) { + *out = *in + if in.ShortNames != nil { + in, out := &in.ShortNames, &out.ShortNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Categories != nil { + in, out := &in.Categories, &out.Categories + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionNames. +func (in *CustomResourceDefinitionNames) DeepCopy() *CustomResourceDefinitionNames { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionNames) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionSpec) DeepCopyInto(out *CustomResourceDefinitionSpec) { + *out = *in + in.Names.DeepCopyInto(&out.Names) + if in.Validation != nil { + in, out := &in.Validation, &out.Validation + *out = new(CustomResourceValidation) + (*in).DeepCopyInto(*out) + } + if in.Subresources != nil { + in, out := &in.Subresources, &out.Subresources + *out = new(CustomResourceSubresources) + (*in).DeepCopyInto(*out) + } + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]CustomResourceDefinitionVersion, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdditionalPrinterColumns != nil { + in, out := &in.AdditionalPrinterColumns, &out.AdditionalPrinterColumns + *out = make([]CustomResourceColumnDefinition, len(*in)) + copy(*out, *in) + } + if in.Conversion != nil { + in, out := &in.Conversion, &out.Conversion + *out = new(CustomResourceConversion) + (*in).DeepCopyInto(*out) + } + if in.PreserveUnknownFields != nil { + in, out := &in.PreserveUnknownFields, &out.PreserveUnknownFields + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionSpec. +func (in *CustomResourceDefinitionSpec) DeepCopy() *CustomResourceDefinitionSpec { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionStatus) DeepCopyInto(out *CustomResourceDefinitionStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]CustomResourceDefinitionCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.AcceptedNames.DeepCopyInto(&out.AcceptedNames) + if in.StoredVersions != nil { + in, out := &in.StoredVersions, &out.StoredVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionStatus. +func (in *CustomResourceDefinitionStatus) DeepCopy() *CustomResourceDefinitionStatus { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionVersion) DeepCopyInto(out *CustomResourceDefinitionVersion) { + *out = *in + if in.DeprecationWarning != nil { + in, out := &in.DeprecationWarning, &out.DeprecationWarning + *out = new(string) + **out = **in + } + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(CustomResourceValidation) + (*in).DeepCopyInto(*out) + } + if in.Subresources != nil { + in, out := &in.Subresources, &out.Subresources + *out = new(CustomResourceSubresources) + (*in).DeepCopyInto(*out) + } + if in.AdditionalPrinterColumns != nil { + in, out := &in.AdditionalPrinterColumns, &out.AdditionalPrinterColumns + *out = make([]CustomResourceColumnDefinition, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionVersion. +func (in *CustomResourceDefinitionVersion) DeepCopy() *CustomResourceDefinitionVersion { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceSubresourceScale) DeepCopyInto(out *CustomResourceSubresourceScale) { + *out = *in + if in.LabelSelectorPath != nil { + in, out := &in.LabelSelectorPath, &out.LabelSelectorPath + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceSubresourceScale. +func (in *CustomResourceSubresourceScale) DeepCopy() *CustomResourceSubresourceScale { + if in == nil { + return nil + } + out := new(CustomResourceSubresourceScale) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceSubresourceStatus) DeepCopyInto(out *CustomResourceSubresourceStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceSubresourceStatus. +func (in *CustomResourceSubresourceStatus) DeepCopy() *CustomResourceSubresourceStatus { + if in == nil { + return nil + } + out := new(CustomResourceSubresourceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceSubresources) DeepCopyInto(out *CustomResourceSubresources) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(CustomResourceSubresourceStatus) + **out = **in + } + if in.Scale != nil { + in, out := &in.Scale, &out.Scale + *out = new(CustomResourceSubresourceScale) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceSubresources. +func (in *CustomResourceSubresources) DeepCopy() *CustomResourceSubresources { + if in == nil { + return nil + } + out := new(CustomResourceSubresources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceValidation) DeepCopyInto(out *CustomResourceValidation) { + *out = *in + if in.OpenAPIV3Schema != nil { + in, out := &in.OpenAPIV3Schema, &out.OpenAPIV3Schema + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceValidation. +func (in *CustomResourceValidation) DeepCopy() *CustomResourceValidation { + if in == nil { + return nil + } + out := new(CustomResourceValidation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalDocumentation) DeepCopyInto(out *ExternalDocumentation) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalDocumentation. +func (in *ExternalDocumentation) DeepCopy() *ExternalDocumentation { + if in == nil { + return nil + } + out := new(ExternalDocumentation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSON) DeepCopyInto(out *JSON) { + *out = *in + if in.Raw != nil { + in, out := &in.Raw, &out.Raw + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSON. +func (in *JSON) DeepCopy() *JSON { + if in == nil { + return nil + } + out := new(JSON) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in JSONSchemaDefinitions) DeepCopyInto(out *JSONSchemaDefinitions) { + { + in := &in + *out = make(JSONSchemaDefinitions, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaDefinitions. +func (in JSONSchemaDefinitions) DeepCopy() JSONSchemaDefinitions { + if in == nil { + return nil + } + out := new(JSONSchemaDefinitions) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in JSONSchemaDependencies) DeepCopyInto(out *JSONSchemaDependencies) { + { + in := &in + *out = make(JSONSchemaDependencies, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaDependencies. +func (in JSONSchemaDependencies) DeepCopy() JSONSchemaDependencies { + if in == nil { + return nil + } + out := new(JSONSchemaDependencies) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONSchemaProps) DeepCopyInto(out *JSONSchemaProps) { + clone := in.DeepCopy() + *out = *clone + return +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONSchemaPropsOrArray) DeepCopyInto(out *JSONSchemaPropsOrArray) { + *out = *in + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = (*in).DeepCopy() + } + if in.JSONSchemas != nil { + in, out := &in.JSONSchemas, &out.JSONSchemas + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaPropsOrArray. +func (in *JSONSchemaPropsOrArray) DeepCopy() *JSONSchemaPropsOrArray { + if in == nil { + return nil + } + out := new(JSONSchemaPropsOrArray) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONSchemaPropsOrBool) DeepCopyInto(out *JSONSchemaPropsOrBool) { + *out = *in + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaPropsOrBool. +func (in *JSONSchemaPropsOrBool) DeepCopy() *JSONSchemaPropsOrBool { + if in == nil { + return nil + } + out := new(JSONSchemaPropsOrBool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONSchemaPropsOrStringArray) DeepCopyInto(out *JSONSchemaPropsOrStringArray) { + *out = *in + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = (*in).DeepCopy() + } + if in.Property != nil { + in, out := &in.Property, &out.Property + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaPropsOrStringArray. +func (in *JSONSchemaPropsOrStringArray) DeepCopy() *JSONSchemaPropsOrStringArray { + if in == nil { + return nil + } + out := new(JSONSchemaPropsOrStringArray) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. +func (in *ServiceReference) DeepCopy() *ServiceReference { + if in == nil { + return nil + } + out := new(ServiceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationRule) DeepCopyInto(out *ValidationRule) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationRule. +func (in *ValidationRule) DeepCopy() *ValidationRule { + if in == nil { + return nil + } + out := new(ValidationRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ValidationRules) DeepCopyInto(out *ValidationRules) { + { + in := &in + *out = make(ValidationRules, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationRules. +func (in ValidationRules) DeepCopy() ValidationRules { + if in == nil { + return nil + } + out := new(ValidationRules) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { + *out = *in + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceReference) + (*in).DeepCopyInto(*out) + } + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookClientConfig. +func (in *WebhookClientConfig) DeepCopy() *WebhookClientConfig { + if in == nil { + return nil + } + out := new(WebhookClientConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.defaults.go new file mode 100644 index 0000000..225c6ff --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.defaults.go @@ -0,0 +1,56 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&CustomResourceDefinition{}, func(obj interface{}) { SetObjectDefaults_CustomResourceDefinition(obj.(*CustomResourceDefinition)) }) + scheme.AddTypeDefaultingFunc(&CustomResourceDefinitionList{}, func(obj interface{}) { + SetObjectDefaults_CustomResourceDefinitionList(obj.(*CustomResourceDefinitionList)) + }) + return nil +} + +func SetObjectDefaults_CustomResourceDefinition(in *CustomResourceDefinition) { + SetDefaults_CustomResourceDefinition(in) + SetDefaults_CustomResourceDefinitionSpec(&in.Spec) + if in.Spec.Conversion != nil { + if in.Spec.Conversion.WebhookClientConfig != nil { + if in.Spec.Conversion.WebhookClientConfig.Service != nil { + SetDefaults_ServiceReference(in.Spec.Conversion.WebhookClientConfig.Service) + } + } + } +} + +func SetObjectDefaults_CustomResourceDefinitionList(in *CustomResourceDefinitionList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_CustomResourceDefinition(a) + } +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000..9c22ae5 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,98 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1beta1 + +import ( + schema "k8s.io/apimachinery/pkg/runtime/schema" +) + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ConversionReview) APILifecycleIntroduced() (major, minor int) { + return 1, 13 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ConversionReview) APILifecycleDeprecated() (major, minor int) { + return 1, 19 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *ConversionReview) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "apiextensions.k8s.io", Version: "v1", Kind: "ConversionReview"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ConversionReview) APILifecycleRemoved() (major, minor int) { + return 1, 22 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CustomResourceDefinition) APILifecycleIntroduced() (major, minor int) { + return 1, 7 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *CustomResourceDefinition) APILifecycleDeprecated() (major, minor int) { + return 1, 16 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *CustomResourceDefinition) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "apiextensions.k8s.io", Version: "v1", Kind: "CustomResourceDefinition"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *CustomResourceDefinition) APILifecycleRemoved() (major, minor int) { + return 1, 22 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CustomResourceDefinitionList) APILifecycleIntroduced() (major, minor int) { + return 1, 7 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *CustomResourceDefinitionList) APILifecycleDeprecated() (major, minor int) { + return 1, 16 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *CustomResourceDefinitionList) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "apiextensions.k8s.io", Version: "v1", Kind: "CustomResourceDefinitionList"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *CustomResourceDefinitionList) APILifecycleRemoved() (major, minor int) { + return 1, 22 +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/clientset.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/clientset.go new file mode 100644 index 0000000..13fc815 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/clientset.go @@ -0,0 +1,133 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package clientset + +import ( + "fmt" + "net/http" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + ApiextensionsV1beta1() apiextensionsv1beta1.ApiextensionsV1beta1Interface + ApiextensionsV1() apiextensionsv1.ApiextensionsV1Interface +} + +// Clientset contains the clients for groups. +type Clientset struct { + *discovery.DiscoveryClient + apiextensionsV1beta1 *apiextensionsv1beta1.ApiextensionsV1beta1Client + apiextensionsV1 *apiextensionsv1.ApiextensionsV1Client +} + +// ApiextensionsV1beta1 retrieves the ApiextensionsV1beta1Client +func (c *Clientset) ApiextensionsV1beta1() apiextensionsv1beta1.ApiextensionsV1beta1Interface { + return c.apiextensionsV1beta1 +} + +// ApiextensionsV1 retrieves the ApiextensionsV1Client +func (c *Clientset) ApiextensionsV1() apiextensionsv1.ApiextensionsV1Interface { + return c.apiextensionsV1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + + var cs Clientset + var err error + cs.apiextensionsV1beta1, err = apiextensionsv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + cs.apiextensionsV1, err = apiextensionsv1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.apiextensionsV1beta1 = apiextensionsv1beta1.New(c) + cs.apiextensionsV1 = apiextensionsv1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/doc.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/doc.go new file mode 100644 index 0000000..ee865e5 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package clientset diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/doc.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/doc.go new file mode 100644 index 0000000..7dc3756 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/register.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/register.go new file mode 100644 index 0000000..25fca99 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/register.go @@ -0,0 +1,58 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + apiextensionsv1beta1.AddToScheme, + apiextensionsv1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/apiextensions_client.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/apiextensions_client.go new file mode 100644 index 0000000..0bdc44c --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/apiextensions_client.go @@ -0,0 +1,107 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "net/http" + + v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" + rest "k8s.io/client-go/rest" +) + +type ApiextensionsV1Interface interface { + RESTClient() rest.Interface + CustomResourceDefinitionsGetter +} + +// ApiextensionsV1Client is used to interact with features provided by the apiextensions.k8s.io group. +type ApiextensionsV1Client struct { + restClient rest.Interface +} + +func (c *ApiextensionsV1Client) CustomResourceDefinitions() CustomResourceDefinitionInterface { + return newCustomResourceDefinitions(c) +} + +// NewForConfig creates a new ApiextensionsV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*ApiextensionsV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new ApiextensionsV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ApiextensionsV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &ApiextensionsV1Client{client}, nil +} + +// NewForConfigOrDie creates a new ApiextensionsV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *ApiextensionsV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new ApiextensionsV1Client for the given RESTClient. +func New(c rest.Interface) *ApiextensionsV1Client { + return &ApiextensionsV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *ApiextensionsV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/customresourcedefinition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/customresourcedefinition.go new file mode 100644 index 0000000..5569b12 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/customresourcedefinition.go @@ -0,0 +1,184 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + scheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CustomResourceDefinitionsGetter has a method to return a CustomResourceDefinitionInterface. +// A group's client should implement this interface. +type CustomResourceDefinitionsGetter interface { + CustomResourceDefinitions() CustomResourceDefinitionInterface +} + +// CustomResourceDefinitionInterface has methods to work with CustomResourceDefinition resources. +type CustomResourceDefinitionInterface interface { + Create(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.CreateOptions) (*v1.CustomResourceDefinition, error) + Update(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.UpdateOptions) (*v1.CustomResourceDefinition, error) + UpdateStatus(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.UpdateOptions) (*v1.CustomResourceDefinition, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CustomResourceDefinition, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.CustomResourceDefinitionList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CustomResourceDefinition, err error) + CustomResourceDefinitionExpansion +} + +// customResourceDefinitions implements CustomResourceDefinitionInterface +type customResourceDefinitions struct { + client rest.Interface +} + +// newCustomResourceDefinitions returns a CustomResourceDefinitions +func newCustomResourceDefinitions(c *ApiextensionsV1Client) *customResourceDefinitions { + return &customResourceDefinitions{ + client: c.RESTClient(), + } +} + +// Get takes name of the customResourceDefinition, and returns the corresponding customResourceDefinition object, and an error if there is any. +func (c *customResourceDefinitions) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CustomResourceDefinition, err error) { + result = &v1.CustomResourceDefinition{} + err = c.client.Get(). + Resource("customresourcedefinitions"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CustomResourceDefinitions that match those selectors. +func (c *customResourceDefinitions) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CustomResourceDefinitionList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.CustomResourceDefinitionList{} + err = c.client.Get(). + Resource("customresourcedefinitions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested customResourceDefinitions. +func (c *customResourceDefinitions) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("customresourcedefinitions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a customResourceDefinition and creates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any. +func (c *customResourceDefinitions) Create(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.CreateOptions) (result *v1.CustomResourceDefinition, err error) { + result = &v1.CustomResourceDefinition{} + err = c.client.Post(). + Resource("customresourcedefinitions"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(customResourceDefinition). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a customResourceDefinition and updates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any. +func (c *customResourceDefinitions) Update(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.UpdateOptions) (result *v1.CustomResourceDefinition, err error) { + result = &v1.CustomResourceDefinition{} + err = c.client.Put(). + Resource("customresourcedefinitions"). + Name(customResourceDefinition.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(customResourceDefinition). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *customResourceDefinitions) UpdateStatus(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.UpdateOptions) (result *v1.CustomResourceDefinition, err error) { + result = &v1.CustomResourceDefinition{} + err = c.client.Put(). + Resource("customresourcedefinitions"). + Name(customResourceDefinition.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(customResourceDefinition). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the customResourceDefinition and deletes it. Returns an error if one occurs. +func (c *customResourceDefinitions) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("customresourcedefinitions"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *customResourceDefinitions) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("customresourcedefinitions"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched customResourceDefinition. +func (c *customResourceDefinitions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CustomResourceDefinition, err error) { + result = &v1.CustomResourceDefinition{} + err = c.client.Patch(pt). + Resource("customresourcedefinitions"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/doc.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/doc.go new file mode 100644 index 0000000..3af5d05 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/generated_expansion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/generated_expansion.go new file mode 100644 index 0000000..e594636 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type CustomResourceDefinitionExpansion interface{} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/apiextensions_client.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/apiextensions_client.go new file mode 100644 index 0000000..657ce2c --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/apiextensions_client.go @@ -0,0 +1,107 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "net/http" + + v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" + rest "k8s.io/client-go/rest" +) + +type ApiextensionsV1beta1Interface interface { + RESTClient() rest.Interface + CustomResourceDefinitionsGetter +} + +// ApiextensionsV1beta1Client is used to interact with features provided by the apiextensions.k8s.io group. +type ApiextensionsV1beta1Client struct { + restClient rest.Interface +} + +func (c *ApiextensionsV1beta1Client) CustomResourceDefinitions() CustomResourceDefinitionInterface { + return newCustomResourceDefinitions(c) +} + +// NewForConfig creates a new ApiextensionsV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*ApiextensionsV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new ApiextensionsV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ApiextensionsV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &ApiextensionsV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new ApiextensionsV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *ApiextensionsV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new ApiextensionsV1beta1Client for the given RESTClient. +func New(c rest.Interface) *ApiextensionsV1beta1Client { + return &ApiextensionsV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *ApiextensionsV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go new file mode 100644 index 0000000..2d16ca7 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go @@ -0,0 +1,184 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + "time" + + v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + scheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CustomResourceDefinitionsGetter has a method to return a CustomResourceDefinitionInterface. +// A group's client should implement this interface. +type CustomResourceDefinitionsGetter interface { + CustomResourceDefinitions() CustomResourceDefinitionInterface +} + +// CustomResourceDefinitionInterface has methods to work with CustomResourceDefinition resources. +type CustomResourceDefinitionInterface interface { + Create(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.CreateOptions) (*v1beta1.CustomResourceDefinition, error) + Update(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.UpdateOptions) (*v1beta1.CustomResourceDefinition, error) + UpdateStatus(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.UpdateOptions) (*v1beta1.CustomResourceDefinition, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CustomResourceDefinition, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CustomResourceDefinitionList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CustomResourceDefinition, err error) + CustomResourceDefinitionExpansion +} + +// customResourceDefinitions implements CustomResourceDefinitionInterface +type customResourceDefinitions struct { + client rest.Interface +} + +// newCustomResourceDefinitions returns a CustomResourceDefinitions +func newCustomResourceDefinitions(c *ApiextensionsV1beta1Client) *customResourceDefinitions { + return &customResourceDefinitions{ + client: c.RESTClient(), + } +} + +// Get takes name of the customResourceDefinition, and returns the corresponding customResourceDefinition object, and an error if there is any. +func (c *customResourceDefinitions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CustomResourceDefinition, err error) { + result = &v1beta1.CustomResourceDefinition{} + err = c.client.Get(). + Resource("customresourcedefinitions"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CustomResourceDefinitions that match those selectors. +func (c *customResourceDefinitions) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CustomResourceDefinitionList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.CustomResourceDefinitionList{} + err = c.client.Get(). + Resource("customresourcedefinitions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested customResourceDefinitions. +func (c *customResourceDefinitions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("customresourcedefinitions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a customResourceDefinition and creates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any. +func (c *customResourceDefinitions) Create(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.CreateOptions) (result *v1beta1.CustomResourceDefinition, err error) { + result = &v1beta1.CustomResourceDefinition{} + err = c.client.Post(). + Resource("customresourcedefinitions"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(customResourceDefinition). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a customResourceDefinition and updates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any. +func (c *customResourceDefinitions) Update(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.UpdateOptions) (result *v1beta1.CustomResourceDefinition, err error) { + result = &v1beta1.CustomResourceDefinition{} + err = c.client.Put(). + Resource("customresourcedefinitions"). + Name(customResourceDefinition.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(customResourceDefinition). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *customResourceDefinitions) UpdateStatus(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.UpdateOptions) (result *v1beta1.CustomResourceDefinition, err error) { + result = &v1beta1.CustomResourceDefinition{} + err = c.client.Put(). + Resource("customresourcedefinitions"). + Name(customResourceDefinition.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(customResourceDefinition). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the customResourceDefinition and deletes it. Returns an error if one occurs. +func (c *customResourceDefinitions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("customresourcedefinitions"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *customResourceDefinitions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("customresourcedefinitions"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched customResourceDefinition. +func (c *customResourceDefinitions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CustomResourceDefinition, err error) { + result = &v1beta1.CustomResourceDefinition{} + err = c.client.Patch(pt). + Resource("customresourcedefinitions"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/doc.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/doc.go new file mode 100644 index 0000000..7711019 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/generated_expansion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/generated_expansion.go new file mode 100644 index 0000000..2a989d4 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +type CustomResourceDefinitionExpansion interface{} diff --git a/vendor/k8s.io/client-go/util/retry/OWNERS b/vendor/k8s.io/client-go/util/retry/OWNERS new file mode 100644 index 0000000..75736b5 --- /dev/null +++ b/vendor/k8s.io/client-go/util/retry/OWNERS @@ -0,0 +1,4 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: + - caesarxuchao diff --git a/vendor/k8s.io/client-go/util/retry/util.go b/vendor/k8s.io/client-go/util/retry/util.go new file mode 100644 index 0000000..0c6e504 --- /dev/null +++ b/vendor/k8s.io/client-go/util/retry/util.go @@ -0,0 +1,105 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package retry + +import ( + "time" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/wait" +) + +// DefaultRetry is the recommended retry for a conflict where multiple clients +// are making changes to the same resource. +var DefaultRetry = wait.Backoff{ + Steps: 5, + Duration: 10 * time.Millisecond, + Factor: 1.0, + Jitter: 0.1, +} + +// DefaultBackoff is the recommended backoff for a conflict where a client +// may be attempting to make an unrelated modification to a resource under +// active management by one or more controllers. +var DefaultBackoff = wait.Backoff{ + Steps: 4, + Duration: 10 * time.Millisecond, + Factor: 5.0, + Jitter: 0.1, +} + +// OnError allows the caller to retry fn in case the error returned by fn is retriable +// according to the provided function. backoff defines the maximum retries and the wait +// interval between two retries. +func OnError(backoff wait.Backoff, retriable func(error) bool, fn func() error) error { + var lastErr error + err := wait.ExponentialBackoff(backoff, func() (bool, error) { + err := fn() + switch { + case err == nil: + return true, nil + case retriable(err): + lastErr = err + return false, nil + default: + return false, err + } + }) + if err == wait.ErrWaitTimeout { + err = lastErr + } + return err +} + +// RetryOnConflict is used to make an update to a resource when you have to worry about +// conflicts caused by other code making unrelated updates to the resource at the same +// time. fn should fetch the resource to be modified, make appropriate changes to it, try +// to update it, and return (unmodified) the error from the update function. On a +// successful update, RetryOnConflict will return nil. If the update function returns a +// "Conflict" error, RetryOnConflict will wait some amount of time as described by +// backoff, and then try again. On a non-"Conflict" error, or if it retries too many times +// and gives up, RetryOnConflict will return an error to the caller. +// +// err := retry.RetryOnConflict(retry.DefaultRetry, func() error { +// // Fetch the resource here; you need to refetch it on every try, since +// // if you got a conflict on the last update attempt then you need to get +// // the current version before making your own changes. +// pod, err := c.Pods("mynamespace").Get(name, metav1.GetOptions{}) +// if err != nil { +// return err +// } +// +// // Make whatever updates to the resource are needed +// pod.Status.Phase = v1.PodFailed +// +// // Try to update +// _, err = c.Pods("mynamespace").UpdateStatus(pod) +// // You have to return err itself here (not wrapped inside another error) +// // so that RetryOnConflict can identify it correctly. +// return err +// }) +// if err != nil { +// // May be conflict if max retries were hit, or may be something unrelated +// // like permissions or a network error +// return err +// } +// ... +// +// TODO: Make Backoff an interface? +func RetryOnConflict(backoff wait.Backoff, fn func() error) error { + return OnError(backoff, errors.IsConflict, fn) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index c842132..1c3f583 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -20,6 +20,7 @@ github.com/fsnotify/fsnotify # github.com/go-logr/logr v1.2.3 ## explicit; go 1.16 github.com/go-logr/logr +github.com/go-logr/logr/funcr # github.com/go-logr/zapr v1.2.3 ## explicit; go 1.16 github.com/go-logr/zapr @@ -140,6 +141,18 @@ github.com/onsi/ginkgo/reporters/stenographer github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty github.com/onsi/ginkgo/types +# github.com/onsi/ginkgo/v2 v2.6.0 +## explicit; go 1.18 +github.com/onsi/ginkgo/v2 +github.com/onsi/ginkgo/v2/config +github.com/onsi/ginkgo/v2/formatter +github.com/onsi/ginkgo/v2/internal +github.com/onsi/ginkgo/v2/internal/global +github.com/onsi/ginkgo/v2/internal/interrupt_handler +github.com/onsi/ginkgo/v2/internal/parallel_support +github.com/onsi/ginkgo/v2/internal/testingtproxy +github.com/onsi/ginkgo/v2/reporters +github.com/onsi/ginkgo/v2/types # github.com/onsi/gomega v1.24.1 ## explicit; go 1.18 github.com/onsi/gomega @@ -393,6 +406,11 @@ k8s.io/api/storage/v1beta1 ## explicit; go 1.19 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 +k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1 +k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset +k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme +k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 +k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 # k8s.io/apimachinery v0.26.0 ## explicit; go 1.19 k8s.io/apimachinery/pkg/api/equality @@ -577,6 +595,7 @@ k8s.io/client-go/util/connrotation k8s.io/client-go/util/flowcontrol k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil +k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue # k8s.io/component-base v0.26.0 ## explicit; go 1.19 @@ -632,16 +651,22 @@ sigs.k8s.io/controller-runtime/pkg/config/v1alpha1 sigs.k8s.io/controller-runtime/pkg/controller sigs.k8s.io/controller-runtime/pkg/controller/controllerutil sigs.k8s.io/controller-runtime/pkg/conversion +sigs.k8s.io/controller-runtime/pkg/envtest sigs.k8s.io/controller-runtime/pkg/event sigs.k8s.io/controller-runtime/pkg/handler sigs.k8s.io/controller-runtime/pkg/healthz sigs.k8s.io/controller-runtime/pkg/internal/controller sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics sigs.k8s.io/controller-runtime/pkg/internal/field/selector +sigs.k8s.io/controller-runtime/pkg/internal/flock sigs.k8s.io/controller-runtime/pkg/internal/httpserver sigs.k8s.io/controller-runtime/pkg/internal/log sigs.k8s.io/controller-runtime/pkg/internal/objectutil sigs.k8s.io/controller-runtime/pkg/internal/recorder +sigs.k8s.io/controller-runtime/pkg/internal/testing/addr +sigs.k8s.io/controller-runtime/pkg/internal/testing/certs +sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane +sigs.k8s.io/controller-runtime/pkg/internal/testing/process sigs.k8s.io/controller-runtime/pkg/leaderelection sigs.k8s.io/controller-runtime/pkg/log sigs.k8s.io/controller-runtime/pkg/log/zap diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go new file mode 100644 index 0000000..dc38b79 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go @@ -0,0 +1,458 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envtest + +import ( + "bufio" + "bytes" + "context" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "time" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + k8syaml "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/util/retry" + "k8s.io/utils/pointer" + "sigs.k8s.io/yaml" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/webhook/conversion" +) + +// CRDInstallOptions are the options for installing CRDs. +type CRDInstallOptions struct { + // Scheme is used to determine if conversion webhooks should be enabled + // for a particular CRD / object. + // + // Conversion webhooks are going to be enabled if an object in the scheme + // implements Hub and Spoke conversions. + // + // If nil, scheme.Scheme is used. + Scheme *runtime.Scheme + + // Paths is a list of paths to the directories or files containing CRDs + Paths []string + + // CRDs is a list of CRDs to install + CRDs []*apiextensionsv1.CustomResourceDefinition + + // ErrorIfPathMissing will cause an error if a Path does not exist + ErrorIfPathMissing bool + + // MaxTime is the max time to wait + MaxTime time.Duration + + // PollInterval is the interval to check + PollInterval time.Duration + + // CleanUpAfterUse will cause the CRDs listed for installation to be + // uninstalled when terminating the test environment. + // Defaults to false. + CleanUpAfterUse bool + + // WebhookOptions contains the conversion webhook information to install + // on the CRDs. This field is usually inherited by the EnvTest options. + // + // If you're passing this field manually, you need to make sure that + // the CA information and host port is filled in properly. + WebhookOptions WebhookInstallOptions +} + +const ( + defaultPollInterval = 100 * time.Millisecond + defaultMaxWait = 10 * time.Second +) + +// InstallCRDs installs a collection of CRDs into a cluster by reading the crd yaml files from a directory. +func InstallCRDs(config *rest.Config, options CRDInstallOptions) ([]*apiextensionsv1.CustomResourceDefinition, error) { + defaultCRDOptions(&options) + + // Read the CRD yamls into options.CRDs + if err := readCRDFiles(&options); err != nil { + return nil, fmt.Errorf("unable to read CRD files: %w", err) + } + + if err := modifyConversionWebhooks(options.CRDs, options.Scheme, options.WebhookOptions); err != nil { + return nil, err + } + + // Create the CRDs in the apiserver + if err := CreateCRDs(config, options.CRDs); err != nil { + return options.CRDs, fmt.Errorf("unable to create CRD instances: %w", err) + } + + // Wait for the CRDs to appear as Resources in the apiserver + if err := WaitForCRDs(config, options.CRDs, options); err != nil { + return options.CRDs, fmt.Errorf("something went wrong waiting for CRDs to appear as API resources: %w", err) + } + + return options.CRDs, nil +} + +// readCRDFiles reads the directories of CRDs in options.Paths and adds the CRD structs to options.CRDs. +func readCRDFiles(options *CRDInstallOptions) error { + if len(options.Paths) > 0 { + crdList, err := renderCRDs(options) + if err != nil { + return err + } + + options.CRDs = append(options.CRDs, crdList...) + } + return nil +} + +// defaultCRDOptions sets the default values for CRDs. +func defaultCRDOptions(o *CRDInstallOptions) { + if o.Scheme == nil { + o.Scheme = scheme.Scheme + } + if o.MaxTime == 0 { + o.MaxTime = defaultMaxWait + } + if o.PollInterval == 0 { + o.PollInterval = defaultPollInterval + } +} + +// WaitForCRDs waits for the CRDs to appear in discovery. +func WaitForCRDs(config *rest.Config, crds []*apiextensionsv1.CustomResourceDefinition, options CRDInstallOptions) error { + // Add each CRD to a map of GroupVersion to Resource + waitingFor := map[schema.GroupVersion]*sets.Set[string]{} + for _, crd := range crds { + gvs := []schema.GroupVersion{} + for _, version := range crd.Spec.Versions { + if version.Served { + gvs = append(gvs, schema.GroupVersion{Group: crd.Spec.Group, Version: version.Name}) + } + } + + for _, gv := range gvs { + log.V(1).Info("adding API in waitlist", "GV", gv) + if _, found := waitingFor[gv]; !found { + // Initialize the set + waitingFor[gv] = &sets.Set[string]{} + } + // Add the Resource + waitingFor[gv].Insert(crd.Spec.Names.Plural) + } + } + + // Poll until all resources are found in discovery + p := &poller{config: config, waitingFor: waitingFor} + return wait.PollImmediate(options.PollInterval, options.MaxTime, p.poll) +} + +// poller checks if all the resources have been found in discovery, and returns false if not. +type poller struct { + // config is used to get discovery + config *rest.Config + + // waitingFor is the map of resources keyed by group version that have not yet been found in discovery + waitingFor map[schema.GroupVersion]*sets.Set[string] +} + +// poll checks if all the resources have been found in discovery, and returns false if not. +func (p *poller) poll() (done bool, err error) { + // Create a new clientset to avoid any client caching of discovery + cs, err := clientset.NewForConfig(p.config) + if err != nil { + return false, err + } + + allFound := true + for gv, resources := range p.waitingFor { + // All resources found, do nothing + if resources.Len() == 0 { + delete(p.waitingFor, gv) + continue + } + + // Get the Resources for this GroupVersion + // TODO: Maybe the controller-runtime client should be able to do this... + resourceList, err := cs.Discovery().ServerResourcesForGroupVersion(gv.Group + "/" + gv.Version) + if err != nil { + return false, nil //nolint:nilerr + } + + // Remove each found resource from the resources set that we are waiting for + for _, resource := range resourceList.APIResources { + resources.Delete(resource.Name) + } + + // Still waiting on some resources in this group version + if resources.Len() != 0 { + allFound = false + } + } + return allFound, nil +} + +// UninstallCRDs uninstalls a collection of CRDs by reading the crd yaml files from a directory. +func UninstallCRDs(config *rest.Config, options CRDInstallOptions) error { + // Read the CRD yamls into options.CRDs + if err := readCRDFiles(&options); err != nil { + return err + } + + // Delete the CRDs from the apiserver + cs, err := client.New(config, client.Options{}) + if err != nil { + return err + } + + // Uninstall each CRD + for _, crd := range options.CRDs { + crd := crd + log.V(1).Info("uninstalling CRD", "crd", crd.GetName()) + if err := cs.Delete(context.TODO(), crd); err != nil { + // If CRD is not found, we can consider success + if !apierrors.IsNotFound(err) { + return err + } + } + } + + return nil +} + +// CreateCRDs creates the CRDs. +func CreateCRDs(config *rest.Config, crds []*apiextensionsv1.CustomResourceDefinition) error { + cs, err := client.New(config, client.Options{}) + if err != nil { + return fmt.Errorf("unable to create client: %w", err) + } + + // Create each CRD + for _, crd := range crds { + crd := crd + log.V(1).Info("installing CRD", "crd", crd.GetName()) + existingCrd := crd.DeepCopy() + err := cs.Get(context.TODO(), client.ObjectKey{Name: crd.GetName()}, existingCrd) + switch { + case apierrors.IsNotFound(err): + if err := cs.Create(context.TODO(), crd); err != nil { + return fmt.Errorf("unable to create CRD %q: %w", crd.GetName(), err) + } + case err != nil: + return fmt.Errorf("unable to get CRD %q to check if it exists: %w", crd.GetName(), err) + default: + log.V(1).Info("CRD already exists, updating", "crd", crd.GetName()) + if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := cs.Get(context.TODO(), client.ObjectKey{Name: crd.GetName()}, existingCrd); err != nil { + return err + } + crd.SetResourceVersion(existingCrd.GetResourceVersion()) + return cs.Update(context.TODO(), crd) + }); err != nil { + return err + } + } + } + return nil +} + +// renderCRDs iterate through options.Paths and extract all CRD files. +func renderCRDs(options *CRDInstallOptions) ([]*apiextensionsv1.CustomResourceDefinition, error) { + type GVKN struct { + GVK schema.GroupVersionKind + Name string + } + + crds := map[GVKN]*apiextensionsv1.CustomResourceDefinition{} + + for _, path := range options.Paths { + var ( + err error + info os.FileInfo + files []string + filePath = path + ) + + // Return the error if ErrorIfPathMissing exists + if info, err = os.Stat(path); os.IsNotExist(err) { + if options.ErrorIfPathMissing { + return nil, err + } + continue + } + + if !info.IsDir() { + filePath, files = filepath.Dir(path), []string{info.Name()} + } else { + entries, err := os.ReadDir(path) + if err != nil { + return nil, err + } + for _, e := range entries { + files = append(files, e.Name()) + } + } + + log.V(1).Info("reading CRDs from path", "path", path) + crdList, err := readCRDs(filePath, files) + if err != nil { + return nil, err + } + + for i, crd := range crdList { + gvkn := GVKN{GVK: crd.GroupVersionKind(), Name: crd.GetName()} + if _, found := crds[gvkn]; found { + // Currently, we only print a log when there are duplicates. We may want to error out if that makes more sense. + log.Info("there are more than one CRD definitions with the same ", "GVKN", gvkn) + } + // We always use the CRD definition that we found last. + crds[gvkn] = crdList[i] + } + } + + // Converting map to a list to return + res := []*apiextensionsv1.CustomResourceDefinition{} + for _, obj := range crds { + res = append(res, obj) + } + return res, nil +} + +// modifyConversionWebhooks takes all the registered CustomResourceDefinitions and applies modifications +// to conditionally enable webhooks if the type is registered within the scheme. +func modifyConversionWebhooks(crds []*apiextensionsv1.CustomResourceDefinition, scheme *runtime.Scheme, webhookOptions WebhookInstallOptions) error { + if len(webhookOptions.LocalServingCAData) == 0 { + return nil + } + + // Determine all registered convertible types. + convertibles := map[schema.GroupKind]struct{}{} + for gvk := range scheme.AllKnownTypes() { + obj, err := scheme.New(gvk) + if err != nil { + return err + } + if ok, err := conversion.IsConvertible(scheme, obj); ok && err == nil { + convertibles[gvk.GroupKind()] = struct{}{} + } + } + + // generate host port. + hostPort, err := webhookOptions.generateHostPort() + if err != nil { + return err + } + url := pointer.String(fmt.Sprintf("https://%s/convert", hostPort)) + + for i := range crds { + // Continue if we're preserving unknown fields. + if crds[i].Spec.PreserveUnknownFields { + continue + } + // Continue if the GroupKind isn't registered as being convertible. + if _, ok := convertibles[schema.GroupKind{ + Group: crds[i].Spec.Group, + Kind: crds[i].Spec.Names.Kind, + }]; !ok { + continue + } + if crds[i].Spec.Conversion == nil { + crds[i].Spec.Conversion = &apiextensionsv1.CustomResourceConversion{ + Webhook: &apiextensionsv1.WebhookConversion{}, + } + } + crds[i].Spec.Conversion.Strategy = apiextensionsv1.WebhookConverter + crds[i].Spec.Conversion.Webhook.ConversionReviewVersions = []string{"v1", "v1beta1"} + crds[i].Spec.Conversion.Webhook.ClientConfig = &apiextensionsv1.WebhookClientConfig{ + Service: nil, + URL: url, + CABundle: webhookOptions.LocalServingCAData, + } + } + + return nil +} + +// readCRDs reads the CRDs from files and Unmarshals them into structs. +func readCRDs(basePath string, files []string) ([]*apiextensionsv1.CustomResourceDefinition, error) { + var crds []*apiextensionsv1.CustomResourceDefinition + + // White list the file extensions that may contain CRDs + crdExts := sets.NewString(".json", ".yaml", ".yml") + + for _, file := range files { + // Only parse allowlisted file types + if !crdExts.Has(filepath.Ext(file)) { + continue + } + + // Unmarshal CRDs from file into structs + docs, err := readDocuments(filepath.Join(basePath, file)) + if err != nil { + return nil, err + } + + for _, doc := range docs { + crd := &apiextensionsv1.CustomResourceDefinition{} + if err = yaml.Unmarshal(doc, crd); err != nil { + return nil, err + } + + if crd.Kind != "CustomResourceDefinition" || crd.Spec.Names.Kind == "" || crd.Spec.Group == "" { + continue + } + crds = append(crds, crd) + } + + log.V(1).Info("read CRDs from file", "file", file) + } + return crds, nil +} + +// readDocuments reads documents from file. +func readDocuments(fp string) ([][]byte, error) { + b, err := os.ReadFile(fp) + if err != nil { + return nil, err + } + + docs := [][]byte{} + reader := k8syaml.NewYAMLReader(bufio.NewReader(bytes.NewReader(b))) + for { + // Read document + doc, err := reader.Read() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + + return nil, err + } + + docs = append(docs, doc) + } + + return docs, nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/doc.go new file mode 100644 index 0000000..412e794 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/doc.go @@ -0,0 +1,26 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package envtest provides libraries for integration testing by starting a local control plane +// +// Control plane binaries (etcd and kube-apiserver) are loaded by default from +// /usr/local/kubebuilder/bin. This can be overridden by setting the +// KUBEBUILDER_ASSETS environment variable, or by directly creating a +// ControlPlane for the Environment to use. +// +// Environment can also be configured to work with an existing cluster, and +// simply load CRDs and provide client configuration. +package envtest diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/helper.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/helper.go new file mode 100644 index 0000000..d3b5201 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/helper.go @@ -0,0 +1,69 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envtest + +import ( + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/client-go/kubernetes/scheme" +) + +var ( + crdScheme = scheme.Scheme +) + +// init is required to correctly initialize the crdScheme package variable. +func init() { + _ = apiextensionsv1.AddToScheme(crdScheme) +} + +// mergePaths merges two string slices containing paths. +// This function makes no guarantees about order of the merged slice. +func mergePaths(s1, s2 []string) []string { + m := make(map[string]struct{}) + for _, s := range s1 { + m[s] = struct{}{} + } + for _, s := range s2 { + m[s] = struct{}{} + } + merged := make([]string, len(m)) + i := 0 + for key := range m { + merged[i] = key + i++ + } + return merged +} + +// mergeCRDs merges two CRD slices using their names. +// This function makes no guarantees about order of the merged slice. +func mergeCRDs(s1, s2 []*apiextensionsv1.CustomResourceDefinition) []*apiextensionsv1.CustomResourceDefinition { + m := make(map[string]*apiextensionsv1.CustomResourceDefinition) + for _, obj := range s1 { + m[obj.GetName()] = obj + } + for _, obj := range s2 { + m[obj.GetName()] = obj + } + merged := make([]*apiextensionsv1.CustomResourceDefinition, len(m)) + i := 0 + for _, obj := range m { + merged[i] = obj.DeepCopy() + i++ + } + return merged +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go new file mode 100644 index 0000000..f9e0bb8 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go @@ -0,0 +1,374 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envtest + +import ( + "fmt" + "os" + "strings" + "time" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + + "sigs.k8s.io/controller-runtime/pkg/client/config" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/process" +) + +var log = logf.RuntimeLog.WithName("test-env") + +/* +It's possible to override some defaults, by setting the following environment variables: +* USE_EXISTING_CLUSTER (boolean): if set to true, envtest will use an existing cluster +* TEST_ASSET_KUBE_APISERVER (string): path to the api-server binary to use +* TEST_ASSET_ETCD (string): path to the etcd binary to use +* TEST_ASSET_KUBECTL (string): path to the kubectl binary to use +* KUBEBUILDER_ASSETS (string): directory containing the binaries to use (api-server, etcd and kubectl). Defaults to /usr/local/kubebuilder/bin. +* KUBEBUILDER_CONTROLPLANE_START_TIMEOUT (string supported by time.ParseDuration): timeout for test control plane to start. Defaults to 20s. +* KUBEBUILDER_CONTROLPLANE_STOP_TIMEOUT (string supported by time.ParseDuration): timeout for test control plane to start. Defaults to 20s. +* KUBEBUILDER_ATTACH_CONTROL_PLANE_OUTPUT (boolean): if set to true, the control plane's stdout and stderr are attached to os.Stdout and os.Stderr +*/ +const ( + envUseExistingCluster = "USE_EXISTING_CLUSTER" + envStartTimeout = "KUBEBUILDER_CONTROLPLANE_START_TIMEOUT" + envStopTimeout = "KUBEBUILDER_CONTROLPLANE_STOP_TIMEOUT" + envAttachOutput = "KUBEBUILDER_ATTACH_CONTROL_PLANE_OUTPUT" + StartTimeout = 60 + StopTimeout = 60 + + defaultKubebuilderControlPlaneStartTimeout = 20 * time.Second + defaultKubebuilderControlPlaneStopTimeout = 20 * time.Second +) + +// internal types we expose as part of our public API. +type ( + // ControlPlane is the re-exported ControlPlane type from the internal testing package. + ControlPlane = controlplane.ControlPlane + + // APIServer is the re-exported APIServer from the internal testing package. + APIServer = controlplane.APIServer + + // Etcd is the re-exported Etcd from the internal testing package. + Etcd = controlplane.Etcd + + // User represents a Kubernetes user to provision for auth purposes. + User = controlplane.User + + // AuthenticatedUser represets a Kubernetes user that's been provisioned. + AuthenticatedUser = controlplane.AuthenticatedUser + + // ListenAddr indicates the address and port that the API server should listen on. + ListenAddr = process.ListenAddr + + // SecureServing contains details describing how the API server should serve + // its secure endpoint. + SecureServing = controlplane.SecureServing + + // Authn is an authentication method that can be used with the control plane to + // provision users. + Authn = controlplane.Authn + + // Arguments allows configuring a process's flags. + Arguments = process.Arguments + + // Arg is a single flag with one or more values. + Arg = process.Arg +) + +var ( + // EmptyArguments constructs a new set of flags with nothing set. + // + // This is mostly useful for testing helper methods -- you'll want to call + // Configure on the APIServer (or etcd) to configure their arguments. + EmptyArguments = process.EmptyArguments +) + +// Environment creates a Kubernetes test environment that will start / stop the Kubernetes control plane and +// install extension APIs. +type Environment struct { + // ControlPlane is the ControlPlane including the apiserver and etcd + ControlPlane controlplane.ControlPlane + + // Scheme is used to determine if conversion webhooks should be enabled + // for a particular CRD / object. + // + // Conversion webhooks are going to be enabled if an object in the scheme + // implements Hub and Spoke conversions. + // + // If nil, scheme.Scheme is used. + Scheme *runtime.Scheme + + // Config can be used to talk to the apiserver. It's automatically + // populated if not set using the standard controller-runtime config + // loading. + Config *rest.Config + + // CRDInstallOptions are the options for installing CRDs. + CRDInstallOptions CRDInstallOptions + + // WebhookInstallOptions are the options for installing webhooks. + WebhookInstallOptions WebhookInstallOptions + + // ErrorIfCRDPathMissing provides an interface for the underlying + // CRDInstallOptions.ErrorIfPathMissing. It prevents silent failures + // for missing CRD paths. + ErrorIfCRDPathMissing bool + + // CRDs is a list of CRDs to install. + // If both this field and CRDs field in CRDInstallOptions are specified, the + // values are merged. + CRDs []*apiextensionsv1.CustomResourceDefinition + + // CRDDirectoryPaths is a list of paths containing CRD yaml or json configs. + // If both this field and Paths field in CRDInstallOptions are specified, the + // values are merged. + CRDDirectoryPaths []string + + // BinaryAssetsDirectory is the path where the binaries required for the envtest are + // located in the local environment. This field can be overridden by setting KUBEBUILDER_ASSETS. + BinaryAssetsDirectory string + + // UseExistingCluster indicates that this environments should use an + // existing kubeconfig, instead of trying to stand up a new control plane. + // This is useful in cases that need aggregated API servers and the like. + UseExistingCluster *bool + + // ControlPlaneStartTimeout is the maximum duration each controlplane component + // may take to start. It defaults to the KUBEBUILDER_CONTROLPLANE_START_TIMEOUT + // environment variable or 20 seconds if unspecified + ControlPlaneStartTimeout time.Duration + + // ControlPlaneStopTimeout is the maximum duration each controlplane component + // may take to stop. It defaults to the KUBEBUILDER_CONTROLPLANE_STOP_TIMEOUT + // environment variable or 20 seconds if unspecified + ControlPlaneStopTimeout time.Duration + + // KubeAPIServerFlags is the set of flags passed while starting the api server. + // + // Deprecated: use ControlPlane.GetAPIServer().Configure() instead. + KubeAPIServerFlags []string + + // AttachControlPlaneOutput indicates if control plane output will be attached to os.Stdout and os.Stderr. + // Enable this to get more visibility of the testing control plane. + // It respect KUBEBUILDER_ATTACH_CONTROL_PLANE_OUTPUT environment variable. + AttachControlPlaneOutput bool +} + +// Stop stops a running server. +// Previously installed CRDs, as listed in CRDInstallOptions.CRDs, will be uninstalled +// if CRDInstallOptions.CleanUpAfterUse are set to true. +func (te *Environment) Stop() error { + if te.CRDInstallOptions.CleanUpAfterUse { + if err := UninstallCRDs(te.Config, te.CRDInstallOptions); err != nil { + return err + } + } + + if err := te.WebhookInstallOptions.Cleanup(); err != nil { + return err + } + + if te.useExistingCluster() { + return nil + } + + return te.ControlPlane.Stop() +} + +// Start starts a local Kubernetes server and updates te.ApiserverPort with the port it is listening on. +func (te *Environment) Start() (*rest.Config, error) { + if te.useExistingCluster() { + log.V(1).Info("using existing cluster") + if te.Config == nil { + // we want to allow people to pass in their own config, so + // only load a config if it hasn't already been set. + log.V(1).Info("automatically acquiring client configuration") + + var err error + te.Config, err = config.GetConfig() + if err != nil { + return nil, fmt.Errorf("unable to get configuration for existing cluster: %w", err) + } + } + } else { + apiServer := te.ControlPlane.GetAPIServer() + if len(apiServer.Args) == 0 { //nolint:staticcheck + // pass these through separately from above in case something like + // AddUser defaults APIServer. + // + // TODO(directxman12): if/when we feel like making a bigger + // breaking change here, just make APIServer and Etcd non-pointers + // in ControlPlane. + + // NB(directxman12): we still pass these in so that things work if the + // user manually specifies them, but in most cases we expect them to + // be nil so that we use the new .Configure() logic. + apiServer.Args = te.KubeAPIServerFlags //nolint:staticcheck + } + if te.ControlPlane.Etcd == nil { + te.ControlPlane.Etcd = &controlplane.Etcd{} + } + + if os.Getenv(envAttachOutput) == "true" { + te.AttachControlPlaneOutput = true + } + if apiServer.Out == nil && te.AttachControlPlaneOutput { + apiServer.Out = os.Stdout + } + if apiServer.Err == nil && te.AttachControlPlaneOutput { + apiServer.Err = os.Stderr + } + if te.ControlPlane.Etcd.Out == nil && te.AttachControlPlaneOutput { + te.ControlPlane.Etcd.Out = os.Stdout + } + if te.ControlPlane.Etcd.Err == nil && te.AttachControlPlaneOutput { + te.ControlPlane.Etcd.Err = os.Stderr + } + + apiServer.Path = process.BinPathFinder("kube-apiserver", te.BinaryAssetsDirectory) + te.ControlPlane.Etcd.Path = process.BinPathFinder("etcd", te.BinaryAssetsDirectory) + te.ControlPlane.KubectlPath = process.BinPathFinder("kubectl", te.BinaryAssetsDirectory) + + if err := te.defaultTimeouts(); err != nil { + return nil, fmt.Errorf("failed to default controlplane timeouts: %w", err) + } + te.ControlPlane.Etcd.StartTimeout = te.ControlPlaneStartTimeout + te.ControlPlane.Etcd.StopTimeout = te.ControlPlaneStopTimeout + apiServer.StartTimeout = te.ControlPlaneStartTimeout + apiServer.StopTimeout = te.ControlPlaneStopTimeout + + log.V(1).Info("starting control plane") + if err := te.startControlPlane(); err != nil { + return nil, fmt.Errorf("unable to start control plane itself: %w", err) + } + + // Create the *rest.Config for creating new clients + baseConfig := &rest.Config{ + // gotta go fast during tests -- we don't really care about overwhelming our test API server + QPS: 1000.0, + Burst: 2000.0, + } + + adminInfo := User{Name: "admin", Groups: []string{"system:masters"}} + adminUser, err := te.ControlPlane.AddUser(adminInfo, baseConfig) + if err != nil { + return te.Config, fmt.Errorf("unable to provision admin user: %w", err) + } + te.Config = adminUser.Config() + } + + // Set the default scheme if nil. + if te.Scheme == nil { + te.Scheme = scheme.Scheme + } + + // Call PrepWithoutInstalling to setup certificates first + // and have them available to patch CRD conversion webhook as well. + if err := te.WebhookInstallOptions.PrepWithoutInstalling(); err != nil { + return nil, err + } + + log.V(1).Info("installing CRDs") + te.CRDInstallOptions.CRDs = mergeCRDs(te.CRDInstallOptions.CRDs, te.CRDs) + te.CRDInstallOptions.Paths = mergePaths(te.CRDInstallOptions.Paths, te.CRDDirectoryPaths) + te.CRDInstallOptions.ErrorIfPathMissing = te.ErrorIfCRDPathMissing + te.CRDInstallOptions.WebhookOptions = te.WebhookInstallOptions + crds, err := InstallCRDs(te.Config, te.CRDInstallOptions) + if err != nil { + return te.Config, fmt.Errorf("unable to install CRDs onto control plane: %w", err) + } + te.CRDs = crds + + log.V(1).Info("installing webhooks") + if err := te.WebhookInstallOptions.Install(te.Config); err != nil { + return nil, fmt.Errorf("unable to install webhooks onto control plane: %w", err) + } + return te.Config, nil +} + +// AddUser provisions a new user for connecting to this Environment. The user will +// have the specified name & belong to the specified groups. +// +// If you specify a "base" config, the returned REST Config will contain those +// settings as well as any required by the authentication method. You can use +// this to easily specify options like QPS. +// +// This is effectively a convinience alias for ControlPlane.AddUser -- see that +// for more low-level details. +func (te *Environment) AddUser(user User, baseConfig *rest.Config) (*AuthenticatedUser, error) { + return te.ControlPlane.AddUser(user, baseConfig) +} + +func (te *Environment) startControlPlane() error { + numTries, maxRetries := 0, 5 + var err error + for ; numTries < maxRetries; numTries++ { + // Start the control plane - retry if it fails + err = te.ControlPlane.Start() + if err == nil { + break + } + log.Error(err, "unable to start the controlplane", "tries", numTries) + } + if numTries == maxRetries { + return fmt.Errorf("failed to start the controlplane. retried %d times: %w", numTries, err) + } + return nil +} + +func (te *Environment) defaultTimeouts() error { + var err error + if te.ControlPlaneStartTimeout == 0 { + if envVal := os.Getenv(envStartTimeout); envVal != "" { + te.ControlPlaneStartTimeout, err = time.ParseDuration(envVal) + if err != nil { + return err + } + } else { + te.ControlPlaneStartTimeout = defaultKubebuilderControlPlaneStartTimeout + } + } + + if te.ControlPlaneStopTimeout == 0 { + if envVal := os.Getenv(envStopTimeout); envVal != "" { + te.ControlPlaneStopTimeout, err = time.ParseDuration(envVal) + if err != nil { + return err + } + } else { + te.ControlPlaneStopTimeout = defaultKubebuilderControlPlaneStopTimeout + } + } + return nil +} + +func (te *Environment) useExistingCluster() bool { + if te.UseExistingCluster == nil { + return strings.ToLower(os.Getenv(envUseExistingCluster)) == "true" + } + return *te.UseExistingCluster +} + +// DefaultKubeAPIServerFlags exposes the default args for the APIServer so that +// you can use those to append your own additional arguments. +// +// Deprecated: use APIServer.Configure() instead. +var DefaultKubeAPIServerFlags = controlplane.APIServerDefaultArgs //nolint:staticcheck diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/webhook.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/webhook.go new file mode 100644 index 0000000..49d8773 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/webhook.go @@ -0,0 +1,433 @@ +/* +Copyright 2019 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envtest + +import ( + "context" + "fmt" + "net" + "os" + "path/filepath" + "time" + + admissionv1 "k8s.io/api/admissionregistration/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/yaml" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/addr" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/certs" +) + +// WebhookInstallOptions are the options for installing mutating or validating webhooks. +type WebhookInstallOptions struct { + // Paths is a list of paths to the directories or files containing the mutating or validating webhooks yaml or json configs. + Paths []string + + // MutatingWebhooks is a list of MutatingWebhookConfigurations to install + MutatingWebhooks []*admissionv1.MutatingWebhookConfiguration + + // ValidatingWebhooks is a list of ValidatingWebhookConfigurations to install + ValidatingWebhooks []*admissionv1.ValidatingWebhookConfiguration + + // IgnoreErrorIfPathMissing will ignore an error if a DirectoryPath does not exist when set to true + IgnoreErrorIfPathMissing bool + + // LocalServingHost is the host for serving webhooks on. + // it will be automatically populated + LocalServingHost string + + // LocalServingPort is the allocated port for serving webhooks on. + // it will be automatically populated by a random available local port + LocalServingPort int + + // LocalServingCertDir is the allocated directory for serving certificates. + // it will be automatically populated by the local temp dir + LocalServingCertDir string + + // CAData is the CA that can be used to trust the serving certificates in LocalServingCertDir. + LocalServingCAData []byte + + // LocalServingHostExternalName is the hostname to use to reach the webhook server. + LocalServingHostExternalName string + + // MaxTime is the max time to wait + MaxTime time.Duration + + // PollInterval is the interval to check + PollInterval time.Duration +} + +// ModifyWebhookDefinitions modifies webhook definitions by: +// - applying CABundle based on the provided tinyca +// - if webhook client config uses service spec, it's removed and replaced with direct url. +func (o *WebhookInstallOptions) ModifyWebhookDefinitions() error { + caData := o.LocalServingCAData + + // generate host port. + hostPort, err := o.generateHostPort() + if err != nil { + return err + } + + for i := range o.MutatingWebhooks { + for j := range o.MutatingWebhooks[i].Webhooks { + updateClientConfig(&o.MutatingWebhooks[i].Webhooks[j].ClientConfig, hostPort, caData) + } + } + + for i := range o.ValidatingWebhooks { + for j := range o.ValidatingWebhooks[i].Webhooks { + updateClientConfig(&o.ValidatingWebhooks[i].Webhooks[j].ClientConfig, hostPort, caData) + } + } + return nil +} + +func updateClientConfig(cc *admissionv1.WebhookClientConfig, hostPort string, caData []byte) { + cc.CABundle = caData + if cc.Service != nil && cc.Service.Path != nil { + url := fmt.Sprintf("https://%s/%s", hostPort, *cc.Service.Path) + cc.URL = &url + cc.Service = nil + } +} + +func (o *WebhookInstallOptions) generateHostPort() (string, error) { + if o.LocalServingPort == 0 { + port, host, err := addr.Suggest(o.LocalServingHost) + if err != nil { + return "", fmt.Errorf("unable to grab random port for serving webhooks on: %w", err) + } + o.LocalServingPort = port + o.LocalServingHost = host + } + host := o.LocalServingHostExternalName + if host == "" { + host = o.LocalServingHost + } + return net.JoinHostPort(host, fmt.Sprintf("%d", o.LocalServingPort)), nil +} + +// PrepWithoutInstalling does the setup parts of Install (populating host-port, +// setting up CAs, etc), without actually truing to do anything with webhook +// definitions. This is largely useful for internal testing of +// controller-runtime, where we need a random host-port & caData for webhook +// tests, but may be useful in similar scenarios. +func (o *WebhookInstallOptions) PrepWithoutInstalling() error { + if err := o.setupCA(); err != nil { + return err + } + + if err := parseWebhook(o); err != nil { + return err + } + + return o.ModifyWebhookDefinitions() +} + +// Install installs specified webhooks to the API server. +func (o *WebhookInstallOptions) Install(config *rest.Config) error { + if len(o.LocalServingCAData) == 0 { + if err := o.PrepWithoutInstalling(); err != nil { + return err + } + } + + if err := createWebhooks(config, o.MutatingWebhooks, o.ValidatingWebhooks); err != nil { + return err + } + + return WaitForWebhooks(config, o.MutatingWebhooks, o.ValidatingWebhooks, *o) +} + +// Cleanup cleans up cert directories. +func (o *WebhookInstallOptions) Cleanup() error { + if o.LocalServingCertDir != "" { + return os.RemoveAll(o.LocalServingCertDir) + } + return nil +} + +// WaitForWebhooks waits for the Webhooks to be available through API server. +func WaitForWebhooks(config *rest.Config, + mutatingWebhooks []*admissionv1.MutatingWebhookConfiguration, + validatingWebhooks []*admissionv1.ValidatingWebhookConfiguration, + options WebhookInstallOptions) error { + waitingFor := map[schema.GroupVersionKind]*sets.Set[string]{} + + for _, hook := range mutatingWebhooks { + h := hook + gvk, err := apiutil.GVKForObject(h, scheme.Scheme) + if err != nil { + return fmt.Errorf("unable to get gvk for MutatingWebhookConfiguration %s: %w", hook.GetName(), err) + } + + if _, ok := waitingFor[gvk]; !ok { + waitingFor[gvk] = &sets.Set[string]{} + } + waitingFor[gvk].Insert(h.GetName()) + } + + for _, hook := range validatingWebhooks { + h := hook + gvk, err := apiutil.GVKForObject(h, scheme.Scheme) + if err != nil { + return fmt.Errorf("unable to get gvk for ValidatingWebhookConfiguration %s: %w", hook.GetName(), err) + } + + if _, ok := waitingFor[gvk]; !ok { + waitingFor[gvk] = &sets.Set[string]{} + } + waitingFor[gvk].Insert(hook.GetName()) + } + + // Poll until all resources are found in discovery + p := &webhookPoller{config: config, waitingFor: waitingFor} + return wait.PollImmediate(options.PollInterval, options.MaxTime, p.poll) +} + +// poller checks if all the resources have been found in discovery, and returns false if not. +type webhookPoller struct { + // config is used to get discovery + config *rest.Config + + // waitingFor is the map of resources keyed by group version that have not yet been found in discovery + waitingFor map[schema.GroupVersionKind]*sets.Set[string] +} + +// poll checks if all the resources have been found in discovery, and returns false if not. +func (p *webhookPoller) poll() (done bool, err error) { + // Create a new clientset to avoid any client caching of discovery + c, err := client.New(p.config, client.Options{}) + if err != nil { + return false, err + } + + allFound := true + for gvk, names := range p.waitingFor { + if names.Len() == 0 { + delete(p.waitingFor, gvk) + continue + } + for _, name := range names.UnsortedList() { + var obj = &unstructured.Unstructured{} + obj.SetGroupVersionKind(gvk) + err := c.Get(context.Background(), client.ObjectKey{ + Namespace: "", + Name: name, + }, obj) + + if err == nil { + names.Delete(name) + } + + if apierrors.IsNotFound(err) { + allFound = false + } + if err != nil { + return false, err + } + } + } + return allFound, nil +} + +// setupCA creates CA for testing and writes them to disk. +func (o *WebhookInstallOptions) setupCA() error { + hookCA, err := certs.NewTinyCA() + if err != nil { + return fmt.Errorf("unable to set up webhook CA: %w", err) + } + + names := []string{"localhost", o.LocalServingHost, o.LocalServingHostExternalName} + hookCert, err := hookCA.NewServingCert(names...) + if err != nil { + return fmt.Errorf("unable to set up webhook serving certs: %w", err) + } + + localServingCertsDir, err := os.MkdirTemp("", "envtest-serving-certs-") + o.LocalServingCertDir = localServingCertsDir + if err != nil { + return fmt.Errorf("unable to create directory for webhook serving certs: %w", err) + } + + certData, keyData, err := hookCert.AsBytes() + if err != nil { + return fmt.Errorf("unable to marshal webhook serving certs: %w", err) + } + + if err := os.WriteFile(filepath.Join(localServingCertsDir, "tls.crt"), certData, 0640); err != nil { //nolint:gosec + return fmt.Errorf("unable to write webhook serving cert to disk: %w", err) + } + if err := os.WriteFile(filepath.Join(localServingCertsDir, "tls.key"), keyData, 0640); err != nil { //nolint:gosec + return fmt.Errorf("unable to write webhook serving key to disk: %w", err) + } + + o.LocalServingCAData = certData + return err +} + +func createWebhooks(config *rest.Config, mutHooks []*admissionv1.MutatingWebhookConfiguration, valHooks []*admissionv1.ValidatingWebhookConfiguration) error { + cs, err := client.New(config, client.Options{}) + if err != nil { + return err + } + + // Create each webhook + for _, hook := range mutHooks { + hook := hook + log.V(1).Info("installing mutating webhook", "webhook", hook.GetName()) + if err := ensureCreated(cs, hook); err != nil { + return err + } + } + for _, hook := range valHooks { + hook := hook + log.V(1).Info("installing validating webhook", "webhook", hook.GetName()) + if err := ensureCreated(cs, hook); err != nil { + return err + } + } + return nil +} + +// ensureCreated creates or update object if already exists in the cluster. +func ensureCreated(cs client.Client, obj client.Object) error { + existing := obj.DeepCopyObject().(client.Object) + err := cs.Get(context.Background(), client.ObjectKey{Name: obj.GetName()}, existing) + switch { + case apierrors.IsNotFound(err): + if err := cs.Create(context.Background(), obj); err != nil { + return err + } + case err != nil: + return err + default: + log.V(1).Info("Webhook configuration already exists, updating", "webhook", obj.GetName()) + obj.SetResourceVersion(existing.GetResourceVersion()) + if err := cs.Update(context.Background(), obj); err != nil { + return err + } + } + return nil +} + +// parseWebhook reads the directories or files of Webhooks in options.Paths and adds the Webhook structs to options. +func parseWebhook(options *WebhookInstallOptions) error { + if len(options.Paths) > 0 { + for _, path := range options.Paths { + _, err := os.Stat(path) + if options.IgnoreErrorIfPathMissing && os.IsNotExist(err) { + continue // skip this path + } + if !options.IgnoreErrorIfPathMissing && os.IsNotExist(err) { + return err // treat missing path as error + } + mutHooks, valHooks, err := readWebhooks(path) + if err != nil { + return err + } + options.MutatingWebhooks = append(options.MutatingWebhooks, mutHooks...) + options.ValidatingWebhooks = append(options.ValidatingWebhooks, valHooks...) + } + } + return nil +} + +// readWebhooks reads the Webhooks from files and Unmarshals them into structs +// returns slice of mutating and validating webhook configurations. +func readWebhooks(path string) ([]*admissionv1.MutatingWebhookConfiguration, []*admissionv1.ValidatingWebhookConfiguration, error) { + // Get the webhook files + var files []string + var err error + log.V(1).Info("reading Webhooks from path", "path", path) + info, err := os.Stat(path) + if err != nil { + return nil, nil, err + } + if !info.IsDir() { + path, files = filepath.Dir(path), []string{info.Name()} + } else { + entries, err := os.ReadDir(path) + if err != nil { + return nil, nil, err + } + for _, e := range entries { + files = append(files, e.Name()) + } + } + + // file extensions that may contain Webhooks + resourceExtensions := sets.NewString(".json", ".yaml", ".yml") + + var mutHooks []*admissionv1.MutatingWebhookConfiguration + var valHooks []*admissionv1.ValidatingWebhookConfiguration + for _, file := range files { + // Only parse allowlisted file types + if !resourceExtensions.Has(filepath.Ext(file)) { + continue + } + + // Unmarshal Webhooks from file into structs + docs, err := readDocuments(filepath.Join(path, file)) + if err != nil { + return nil, nil, err + } + + for _, doc := range docs { + var generic metav1.PartialObjectMetadata + if err = yaml.Unmarshal(doc, &generic); err != nil { + return nil, nil, err + } + + const ( + admissionregv1 = "admissionregistration.k8s.io/v1" + ) + switch { + case generic.Kind == "MutatingWebhookConfiguration": + if generic.APIVersion != admissionregv1 { + return nil, nil, fmt.Errorf("only v1 is supported right now for MutatingWebhookConfiguration (name: %s)", generic.Name) + } + hook := &admissionv1.MutatingWebhookConfiguration{} + if err := yaml.Unmarshal(doc, hook); err != nil { + return nil, nil, err + } + mutHooks = append(mutHooks, hook) + case generic.Kind == "ValidatingWebhookConfiguration": + if generic.APIVersion != admissionregv1 { + return nil, nil, fmt.Errorf("only v1 is supported right now for ValidatingWebhookConfiguration (name: %s)", generic.Name) + } + hook := &admissionv1.ValidatingWebhookConfiguration{} + if err := yaml.Unmarshal(doc, hook); err != nil { + return nil, nil, err + } + valHooks = append(valHooks, hook) + default: + continue + } + } + + log.V(1).Info("read webhooks from file", "file", file) + } + return mutHooks, valHooks, nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/doc.go new file mode 100644 index 0000000..11e3982 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package flock is copied from k8s.io/kubernetes/pkg/util/flock to avoid +// importing k8s.io/kubernetes as a dependency. +// +// Provides file locking functionalities on unix systems. +package flock diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/errors.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/errors.go new file mode 100644 index 0000000..ee7a434 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/errors.go @@ -0,0 +1,24 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flock + +import "errors" + +var ( + // ErrAlreadyLocked is returned when the file is already locked. + ErrAlreadyLocked = errors.New("the file is already locked") +) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/flock_other.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/flock_other.go new file mode 100644 index 0000000..069a5b3 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/flock_other.go @@ -0,0 +1,24 @@ +// +build !linux,!darwin,!freebsd,!openbsd,!netbsd,!dragonfly + +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flock + +// Acquire is not implemented on non-unix systems. +func Acquire(path string) error { + return nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/flock_unix.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/flock_unix.go new file mode 100644 index 0000000..71ec576 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/flock_unix.go @@ -0,0 +1,48 @@ +//go:build linux || darwin || freebsd || openbsd || netbsd || dragonfly +// +build linux darwin freebsd openbsd netbsd dragonfly + +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flock + +import ( + "errors" + "fmt" + "os" + + "golang.org/x/sys/unix" +) + +// Acquire acquires a lock on a file for the duration of the process. This method +// is reentrant. +func Acquire(path string) error { + fd, err := unix.Open(path, unix.O_CREAT|unix.O_RDWR|unix.O_CLOEXEC, 0600) + if err != nil { + if errors.Is(err, os.ErrExist) { + return fmt.Errorf("cannot lock file %q: %w", path, ErrAlreadyLocked) + } + return err + } + + // We don't need to close the fd since we should hold + // it until the process exits. + err = unix.Flock(fd, unix.LOCK_NB|unix.LOCK_EX) + if errors.Is(err, unix.EWOULDBLOCK) { // This condition requires LOCK_NB. + return fmt.Errorf("cannot lock file %q: %w", path, ErrAlreadyLocked) + } + return err +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/addr/manager.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/addr/manager.go new file mode 100644 index 0000000..ffa33a8 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/addr/manager.go @@ -0,0 +1,142 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package addr + +import ( + "errors" + "fmt" + "io/fs" + "net" + "os" + "path/filepath" + "strings" + "time" + + "sigs.k8s.io/controller-runtime/pkg/internal/flock" +) + +// TODO(directxman12): interface / release functionality for external port managers + +const ( + portReserveTime = 2 * time.Minute + portConflictRetry = 100 + portFilePrefix = "port-" +) + +var ( + cacheDir string +) + +func init() { + baseDir, err := os.UserCacheDir() + if err == nil { + cacheDir = filepath.Join(baseDir, "kubebuilder-envtest") + err = os.MkdirAll(cacheDir, 0o750) + } + if err != nil { + // Either we didn't get a cache directory, or we can't use it + baseDir = os.TempDir() + cacheDir = filepath.Join(baseDir, "kubebuilder-envtest") + err = os.MkdirAll(cacheDir, 0o750) + } + if err != nil { + panic(err) + } +} + +type portCache struct{} + +func (c *portCache) add(port int) (bool, error) { + // Remove outdated ports. + if err := fs.WalkDir(os.DirFS(cacheDir), ".", func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if d.IsDir() || !d.Type().IsRegular() || !strings.HasPrefix(path, portFilePrefix) { + return nil + } + info, err := d.Info() + if err != nil { + // No-op if file no longer exists; may have been deleted by another + // process/thread trying to allocate ports. + if errors.Is(err, fs.ErrNotExist) { + return nil + } + return err + } + if time.Since(info.ModTime()) > portReserveTime { + if err := os.Remove(filepath.Join(cacheDir, path)); err != nil { + // No-op if file no longer exists; may have been deleted by another + // process/thread trying to allocate ports. + if os.IsNotExist(err) { + return nil + } + return err + } + } + return nil + }); err != nil { + return false, err + } + // Try allocating new port, by acquiring a file. + path := fmt.Sprintf("%s/%s%d", cacheDir, portFilePrefix, port) + if err := flock.Acquire(path); errors.Is(err, flock.ErrAlreadyLocked) { + return false, nil + } else if err != nil { + return false, err + } + return true, nil +} + +var cache = &portCache{} + +func suggest(listenHost string) (*net.TCPListener, int, string, error) { + if listenHost == "" { + listenHost = "localhost" + } + addr, err := net.ResolveTCPAddr("tcp", net.JoinHostPort(listenHost, "0")) + if err != nil { + return nil, -1, "", err + } + l, err := net.ListenTCP("tcp", addr) + if err != nil { + return nil, -1, "", err + } + return l, l.Addr().(*net.TCPAddr).Port, + addr.IP.String(), + nil +} + +// Suggest suggests an address a process can listen on. It returns +// a tuple consisting of a free port and the hostname resolved to its IP. +// It makes sure that new port allocated does not conflict with old ports +// allocated within 1 minute. +func Suggest(listenHost string) (int, string, error) { + for i := 0; i < portConflictRetry; i++ { + listener, port, resolvedHost, err := suggest(listenHost) + if err != nil { + return -1, "", err + } + defer listener.Close() + if ok, err := cache.add(port); ok { + return port, resolvedHost, nil + } else if err != nil { + return -1, "", err + } + } + return -1, "", fmt.Errorf("no free ports found after %d retries", portConflictRetry) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/certs/tinyca.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/certs/tinyca.go new file mode 100644 index 0000000..b418823 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/certs/tinyca.go @@ -0,0 +1,224 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certs + +// NB(directxman12): nothing has verified that this has good settings. In fact, +// the setting generated here are probably terrible, but they're fine for integration +// tests. These ABSOLUTELY SHOULD NOT ever be exposed in the public API. They're +// ONLY for use with envtest's ability to configure webhook testing. +// If I didn't otherwise not want to add a dependency on cfssl, I'd just use that. + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + crand "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "math/big" + "net" + "time" + + certutil "k8s.io/client-go/util/cert" +) + +var ( + ellipticCurve = elliptic.P256() + bigOne = big.NewInt(1) +) + +// CertPair is a private key and certificate for use for client auth, as a CA, or serving. +type CertPair struct { + Key crypto.Signer + Cert *x509.Certificate +} + +// CertBytes returns the PEM-encoded version of the certificate for this pair. +func (k CertPair) CertBytes() []byte { + return pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: k.Cert.Raw, + }) +} + +// AsBytes encodes keypair in the appropriate formats for on-disk storage (PEM and +// PKCS8, respectively). +func (k CertPair) AsBytes() (cert []byte, key []byte, err error) { + cert = k.CertBytes() + + rawKeyData, err := x509.MarshalPKCS8PrivateKey(k.Key) + if err != nil { + return nil, nil, fmt.Errorf("unable to encode private key: %w", err) + } + + key = pem.EncodeToMemory(&pem.Block{ + Type: "PRIVATE KEY", + Bytes: rawKeyData, + }) + + return cert, key, nil +} + +// TinyCA supports signing serving certs and client-certs, +// and can be used as an auth mechanism with envtest. +type TinyCA struct { + CA CertPair + orgName string + + nextSerial *big.Int +} + +// newPrivateKey generates a new private key of a relatively sane size (see +// rsaKeySize). +func newPrivateKey() (crypto.Signer, error) { + return ecdsa.GenerateKey(ellipticCurve, crand.Reader) +} + +// NewTinyCA creates a new a tiny CA utility for provisioning serving certs and client certs FOR TESTING ONLY. +// Don't use this for anything else! +func NewTinyCA() (*TinyCA, error) { + caPrivateKey, err := newPrivateKey() + if err != nil { + return nil, fmt.Errorf("unable to generate private key for CA: %w", err) + } + caCfg := certutil.Config{CommonName: "envtest-environment", Organization: []string{"envtest"}} + caCert, err := certutil.NewSelfSignedCACert(caCfg, caPrivateKey) + if err != nil { + return nil, fmt.Errorf("unable to generate certificate for CA: %w", err) + } + + return &TinyCA{ + CA: CertPair{Key: caPrivateKey, Cert: caCert}, + orgName: "envtest", + nextSerial: big.NewInt(1), + }, nil +} + +func (c *TinyCA) makeCert(cfg certutil.Config) (CertPair, error) { + now := time.Now() + + key, err := newPrivateKey() + if err != nil { + return CertPair{}, fmt.Errorf("unable to create private key: %w", err) + } + + serial := new(big.Int).Set(c.nextSerial) + c.nextSerial.Add(c.nextSerial, bigOne) + + template := x509.Certificate{ + Subject: pkix.Name{CommonName: cfg.CommonName, Organization: cfg.Organization}, + DNSNames: cfg.AltNames.DNSNames, + IPAddresses: cfg.AltNames.IPs, + SerialNumber: serial, + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: cfg.Usages, + + // technically not necessary for testing, but let's set anyway just in case. + NotBefore: now.UTC(), + // 1 week -- the default for cfssl, and just long enough for a + // long-term test, but not too long that anyone would try to use this + // seriously. + NotAfter: now.Add(168 * time.Hour).UTC(), + } + + certRaw, err := x509.CreateCertificate(crand.Reader, &template, c.CA.Cert, key.Public(), c.CA.Key) + if err != nil { + return CertPair{}, fmt.Errorf("unable to create certificate: %w", err) + } + + cert, err := x509.ParseCertificate(certRaw) + if err != nil { + return CertPair{}, fmt.Errorf("generated invalid certificate, could not parse: %w", err) + } + + return CertPair{ + Key: key, + Cert: cert, + }, nil +} + +// NewServingCert returns a new CertPair for a serving HTTPS on localhost (or other specified names). +func (c *TinyCA) NewServingCert(names ...string) (CertPair, error) { + if len(names) == 0 { + names = []string{"localhost"} + } + dnsNames, ips, err := resolveNames(names) + if err != nil { + return CertPair{}, err + } + + return c.makeCert(certutil.Config{ + CommonName: "localhost", + Organization: []string{c.orgName}, + AltNames: certutil.AltNames{ + DNSNames: dnsNames, + IPs: ips, + }, + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + }) +} + +// ClientInfo describes some Kubernetes user for the purposes of creating +// client certificates. +type ClientInfo struct { + // Name is the user name (embedded as the cert's CommonName) + Name string + // Groups are the groups to which this user belongs (embedded as the cert's + // Organization) + Groups []string +} + +// NewClientCert produces a new CertPair suitable for use with Kubernetes +// client cert auth with an API server validating based on this CA. +func (c *TinyCA) NewClientCert(user ClientInfo) (CertPair, error) { + return c.makeCert(certutil.Config{ + CommonName: user.Name, + Organization: user.Groups, + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + }) +} + +func resolveNames(names []string) ([]string, []net.IP, error) { + dnsNames := []string{} + ips := []net.IP{} + for _, name := range names { + if name == "" { + continue + } + ip := net.ParseIP(name) + if ip == nil { + dnsNames = append(dnsNames, name) + // Also resolve to IPs. + nameIPs, err := net.LookupHost(name) + if err != nil { + return nil, nil, err + } + for _, nameIP := range nameIPs { + ip = net.ParseIP(nameIP) + if ip != nil { + ips = append(ips, ip) + } + } + } else { + ips = append(ips, ip) + } + } + return dnsNames, ips, nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/apiserver.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/apiserver.go new file mode 100644 index 0000000..c9a1a23 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/apiserver.go @@ -0,0 +1,468 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane + +import ( + "fmt" + "io" + "net/url" + "os" + "path/filepath" + "strconv" + "time" + + "sigs.k8s.io/controller-runtime/pkg/internal/testing/addr" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/certs" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/process" +) + +const ( + // saKeyFile is the name of the service account signing private key file. + saKeyFile = "sa-signer.key" + // saKeyFile is the name of the service account signing public key (cert) file. + saCertFile = "sa-signer.crt" +) + +// SecureServing provides/configures how the API server serves on the secure port. +type SecureServing struct { + // ListenAddr contains the host & port to serve on. + // + // Configurable. If unset, it will be defaulted. + process.ListenAddr + // CA contains the CA that signed the API server's serving certificates. + // + // Read-only. + CA []byte + // Authn can be used to provision users, and override what type of + // authentication is used to provision users. + // + // Configurable. If unset, it will be defaulted. + Authn +} + +// APIServer knows how to run a kubernetes apiserver. +type APIServer struct { + // URL is the address the ApiServer should listen on for client + // connections. + // + // If set, this will configure the *insecure* serving details. + // If unset, it will contain the insecure port if insecure serving is enabled, + // and otherwise will contain the secure port. + // + // If this is not specified, we default to a random free port on localhost. + // + // Deprecated: use InsecureServing (for the insecure URL) or SecureServing, ideally. + URL *url.URL + + // SecurePort is the additional secure port that the APIServer should listen on. + // + // If set, this will override SecureServing.Port. + // + // Deprecated: use SecureServing. + SecurePort int + + // SecureServing indicates how the API server will serve on the secure port. + // + // Some parts are configurable. Will be defaulted if unset. + SecureServing + + // InsecureServing indicates how the API server will serve on the insecure port. + // + // If unset, the insecure port will be disabled. Set to an empty struct to get + // default values. + // + // Deprecated: does not work with Kubernetes versions 1.20 and above. Use secure + // serving instead. + InsecureServing *process.ListenAddr + + // Path is the path to the apiserver binary. + // + // If this is left as the empty string, we will attempt to locate a binary, + // by checking for the TEST_ASSET_KUBE_APISERVER environment variable, and + // the default test assets directory. See the "Binaries" section above (in + // doc.go) for details. + Path string + + // Args is a list of arguments which will passed to the APIServer binary. + // Before they are passed on, they will be evaluated as go-template strings. + // This means you can use fields which are defined and exported on this + // APIServer struct (e.g. "--cert-dir={{ .Dir }}"). + // Those templates will be evaluated after the defaulting of the APIServer's + // fields has already happened and just before the binary actually gets + // started. Thus you have access to calculated fields like `URL` and others. + // + // If not specified, the minimal set of arguments to run the APIServer will + // be used. + // + // They will be loaded into the same argument set as Configure. Each flag + // will be Append-ed to the configured arguments just before launch. + // + // Deprecated: use Configure instead. + Args []string + + // CertDir is a path to a directory containing whatever certificates the + // APIServer will need. + // + // If left unspecified, then the Start() method will create a fresh temporary + // directory, and the Stop() method will clean it up. + CertDir string + + // EtcdURL is the URL of the Etcd the APIServer should use. + // + // If this is not specified, the Start() method will return an error. + EtcdURL *url.URL + + // StartTimeout, StopTimeout specify the time the APIServer is allowed to + // take when starting and stoppping before an error is emitted. + // + // If not specified, these default to 20 seconds. + StartTimeout time.Duration + StopTimeout time.Duration + + // Out, Err specify where APIServer should write its StdOut, StdErr to. + // + // If not specified, the output will be discarded. + Out io.Writer + Err io.Writer + + processState *process.State + + // args contains the structured arguments to use for running the API server + // Lazily initialized by .Configure(), Defaulted eventually with .defaultArgs() + args *process.Arguments +} + +// Configure returns Arguments that may be used to customize the +// flags used to launch the API server. A set of defaults will +// be applied underneath. +func (s *APIServer) Configure() *process.Arguments { + if s.args == nil { + s.args = process.EmptyArguments() + } + return s.args +} + +// Start starts the apiserver, waits for it to come up, and returns an error, +// if occurred. +func (s *APIServer) Start() error { + if err := s.prepare(); err != nil { + return err + } + return s.processState.Start(s.Out, s.Err) +} + +func (s *APIServer) prepare() error { + if err := s.setProcessState(); err != nil { + return err + } + return s.Authn.Start() +} + +// configurePorts configures the serving ports for this API server. +// +// Most of this method currently deals with making the deprecated fields +// take precedence over the new fields. +func (s *APIServer) configurePorts() error { + // prefer the old fields to the new fields if a user set one, + // otherwise, default the new fields and populate the old ones. + + // Insecure: URL, InsecureServing + if s.URL != nil { + s.InsecureServing = &process.ListenAddr{ + Address: s.URL.Hostname(), + Port: s.URL.Port(), + } + } else if insec := s.InsecureServing; insec != nil { + if insec.Port == "" || insec.Address == "" { + port, host, err := addr.Suggest("") + if err != nil { + return fmt.Errorf("unable to provision unused insecure port: %w", err) + } + s.InsecureServing.Port = strconv.Itoa(port) + s.InsecureServing.Address = host + } + s.URL = s.InsecureServing.URL("http", "") + } + + // Secure: SecurePort, SecureServing + if s.SecurePort != 0 { + s.SecureServing.Port = strconv.Itoa(s.SecurePort) + // if we don't have an address, try the insecure address, and otherwise + // default to loopback. + if s.SecureServing.Address == "" { + if s.InsecureServing != nil { + s.SecureServing.Address = s.InsecureServing.Address + } else { + s.SecureServing.Address = "127.0.0.1" + } + } + } else if s.SecureServing.Port == "" || s.SecureServing.Address == "" { + port, host, err := addr.Suggest("") + if err != nil { + return fmt.Errorf("unable to provision unused secure port: %w", err) + } + s.SecureServing.Port = strconv.Itoa(port) + s.SecureServing.Address = host + s.SecurePort = port + } + + return nil +} + +func (s *APIServer) setProcessState() error { + if s.EtcdURL == nil { + return fmt.Errorf("expected EtcdURL to be configured") + } + + var err error + + // unconditionally re-set this so we can successfully restart + // TODO(directxman12): we supported this in the past, but do we actually + // want to support re-using an API server object to restart? The loss + // of provisioned users is surprising to say the least. + s.processState = &process.State{ + Dir: s.CertDir, + Path: s.Path, + StartTimeout: s.StartTimeout, + StopTimeout: s.StopTimeout, + } + if err := s.processState.Init("kube-apiserver"); err != nil { + return err + } + + if err := s.configurePorts(); err != nil { + return err + } + + // the secure port will always be on, so use that + s.processState.HealthCheck.URL = *s.SecureServing.URL("https", "/healthz") + + s.CertDir = s.processState.Dir + s.Path = s.processState.Path + s.StartTimeout = s.processState.StartTimeout + s.StopTimeout = s.processState.StopTimeout + + if err := s.populateAPIServerCerts(); err != nil { + return err + } + + if s.SecureServing.Authn == nil { + authn, err := NewCertAuthn() + if err != nil { + return err + } + s.SecureServing.Authn = authn + } + + if err := s.Authn.Configure(s.CertDir, s.Configure()); err != nil { + return err + } + + // NB(directxman12): insecure port is a mess: + // - 1.19 and below have the `--insecure-port` flag, and require it to be set to zero to + // disable it, otherwise the default will be used and we'll conflict. + // - 1.20 requires the flag to be unset or set to zero, and yells at you if you configure it + // - 1.24 won't have the flag at all... + // + // In an effort to automatically do the right thing during this mess, we do feature discovery + // on the flags, and hope that we've "parsed" them properly. + // + // TODO(directxman12): once we support 1.20 as the min version (might be when 1.24 comes out, + // might be around 1.25 or 1.26), remove this logic and the corresponding line in API server's + // default args. + if err := s.discoverFlags(); err != nil { + return err + } + + s.processState.Args, s.Args, err = process.TemplateAndArguments(s.Args, s.Configure(), process.TemplateDefaults{ //nolint:staticcheck + Data: s, + Defaults: s.defaultArgs(), + MinimalDefaults: map[string][]string{ + // as per kubernetes-sigs/controller-runtime#641, we need this (we + // probably need other stuff too, but this is the only thing that was + // previously considered a "minimal default") + "service-cluster-ip-range": {"10.0.0.0/24"}, + + // we need *some* authorization mode for health checks on the secure port, + // so default to RBAC unless the user set something else (in which case + // this'll be ignored due to SliceToArguments using AppendNoDefaults). + "authorization-mode": {"RBAC"}, + }, + }) + if err != nil { + return err + } + + return nil +} + +// discoverFlags checks for certain flags that *must* be set in certain +// versions, and *must not* be set in others. +func (s *APIServer) discoverFlags() error { + // Present: <1.24, Absent: >= 1.24 + present, err := s.processState.CheckFlag("insecure-port") + if err != nil { + return err + } + + if !present { + s.Configure().Disable("insecure-port") + } + + return nil +} + +func (s *APIServer) defaultArgs() map[string][]string { + args := map[string][]string{ + "service-cluster-ip-range": {"10.0.0.0/24"}, + "allow-privileged": {"true"}, + // we're keeping this disabled because if enabled, default SA is + // missing which would force all tests to create one in normal + // apiserver operation this SA is created by controller, but that is + // not run in integration environment + "disable-admission-plugins": {"ServiceAccount"}, + "cert-dir": {s.CertDir}, + "authorization-mode": {"RBAC"}, + "secure-port": {s.SecureServing.Port}, + // NB(directxman12): previously we didn't set the bind address for the secure + // port. It *shouldn't* make a difference unless people are doing something really + // funky, but if you start to get bug reports look here ;-) + "bind-address": {s.SecureServing.Address}, + + // required on 1.20+, fine to leave on for <1.20 + "service-account-issuer": {s.SecureServing.URL("https", "/").String()}, + "service-account-key-file": {filepath.Join(s.CertDir, saCertFile)}, + "service-account-signing-key-file": {filepath.Join(s.CertDir, saKeyFile)}, + } + if s.EtcdURL != nil { + args["etcd-servers"] = []string{s.EtcdURL.String()} + } + if s.URL != nil { + args["insecure-port"] = []string{s.URL.Port()} + args["insecure-bind-address"] = []string{s.URL.Hostname()} + } else { + // TODO(directxman12): remove this once 1.21 is the lowest version we support + // (this might be a while, but this line'll break as of 1.24, so see the comment + // in Start + args["insecure-port"] = []string{"0"} + } + return args +} + +func (s *APIServer) populateAPIServerCerts() error { + _, statErr := os.Stat(filepath.Join(s.CertDir, "apiserver.crt")) + if !os.IsNotExist(statErr) { + return statErr + } + + ca, err := certs.NewTinyCA() + if err != nil { + return err + } + + servingCerts, err := ca.NewServingCert() + if err != nil { + return err + } + + certData, keyData, err := servingCerts.AsBytes() + if err != nil { + return err + } + + if err := os.WriteFile(filepath.Join(s.CertDir, "apiserver.crt"), certData, 0640); err != nil { //nolint:gosec + return err + } + if err := os.WriteFile(filepath.Join(s.CertDir, "apiserver.key"), keyData, 0640); err != nil { //nolint:gosec + return err + } + + s.SecureServing.CA = ca.CA.CertBytes() + + // service account signing files too + saCA, err := certs.NewTinyCA() + if err != nil { + return err + } + + saCert, saKey, err := saCA.CA.AsBytes() + if err != nil { + return err + } + + if err := os.WriteFile(filepath.Join(s.CertDir, saCertFile), saCert, 0640); err != nil { //nolint:gosec + return err + } + return os.WriteFile(filepath.Join(s.CertDir, saKeyFile), saKey, 0640) //nolint:gosec +} + +// Stop stops this process gracefully, waits for its termination, and cleans up +// the CertDir if necessary. +func (s *APIServer) Stop() error { + if s.processState != nil { + if s.processState.DirNeedsCleaning { + s.CertDir = "" // reset the directory if it was randomly allocated, so that we can safely restart + } + if err := s.processState.Stop(); err != nil { + return err + } + } + return s.Authn.Stop() +} + +// APIServerDefaultArgs exposes the default args for the APIServer so that you +// can use those to append your own additional arguments. +// +// Note that these arguments don't handle newer API servers well to due the more +// complex feature detection neeeded. It's recommended that you switch to .Configure +// as you upgrade API server versions. +// +// Deprecated: use APIServer.Configure(). +var APIServerDefaultArgs = []string{ + "--advertise-address=127.0.0.1", + "--etcd-servers={{ if .EtcdURL }}{{ .EtcdURL.String }}{{ end }}", + "--cert-dir={{ .CertDir }}", + "--insecure-port={{ if .URL }}{{ .URL.Port }}{{else}}0{{ end }}", + "{{ if .URL }}--insecure-bind-address={{ .URL.Hostname }}{{ end }}", + "--secure-port={{ if .SecurePort }}{{ .SecurePort }}{{ end }}", + // we're keeping this disabled because if enabled, default SA is missing which would force all tests to create one + // in normal apiserver operation this SA is created by controller, but that is not run in integration environment + "--disable-admission-plugins=ServiceAccount", + "--service-cluster-ip-range=10.0.0.0/24", + "--allow-privileged=true", + // NB(directxman12): we also enable RBAC if nothing else was enabled +} + +// PrepareAPIServer is an internal-only (NEVER SHOULD BE EXPOSED) +// function that sets up the API server just before starting it, +// without actually starting it. This saves time on tests. +// +// NB(directxman12): do not expose this outside of internal -- it's unsafe to +// use, because things like port allocation could race even more than they +// currently do if you later call start! +func PrepareAPIServer(s *APIServer) error { + return s.prepare() +} + +// APIServerArguments is an internal-only (NEVER SHOULD BE EXPOSED) +// function that sets up the API server just before starting it, +// without actually starting it. It's public to make testing easier. +// +// NB(directxman12): do not expose this outside of internal. +func APIServerArguments(s *APIServer) []string { + return s.processState.Args +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/auth.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/auth.go new file mode 100644 index 0000000..16c86a7 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/auth.go @@ -0,0 +1,142 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane + +import ( + "fmt" + "os" + "path/filepath" + + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/certs" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/process" +) + +// User represents a Kubernetes user. +type User struct { + // Name is the user's Name. + Name string + // Groups are the groups to which the user belongs. + Groups []string +} + +// Authn knows how to configure an API server for a particular type of authentication, +// and provision users under that authentication scheme. +// +// The methods must be called in the following order (as presented below in the interface +// for a mnemonic): +// +// 1. Configure +// 2. Start +// 3. AddUsers (0+ calls) +// 4. Stop. +type Authn interface { + // Configure provides the working directory to this authenticator, + // and configures the given API server arguments to make use of this authenticator. + // + // Should be called first. + Configure(workDir string, args *process.Arguments) error + // Start runs this authenticator. Will be called just before API server start. + // + // Must be called after Configure. + Start() error + // AddUser provisions a user, returning a copy of the given base rest.Config + // configured to authenticate as that users. + // + // May only be called while the authenticator is "running". + AddUser(user User, baseCfg *rest.Config) (*rest.Config, error) + // Stop shuts down this authenticator. + Stop() error +} + +// CertAuthn is an authenticator (Authn) that makes use of client certificate authn. +type CertAuthn struct { + // ca is the CA used to sign the client certs + ca *certs.TinyCA + // certDir is the directory used to write the CA crt file + // so that the API server can read it. + certDir string +} + +// NewCertAuthn creates a new client-cert-based Authn with a new CA. +func NewCertAuthn() (*CertAuthn, error) { + ca, err := certs.NewTinyCA() + if err != nil { + return nil, fmt.Errorf("unable to provision client certificate auth CA: %w", err) + } + return &CertAuthn{ + ca: ca, + }, nil +} + +// AddUser provisions a new user that's authenticated via certificates, with +// the given uesrname and groups embedded in the certificate as expected by the +// API server. +func (c *CertAuthn) AddUser(user User, baseCfg *rest.Config) (*rest.Config, error) { + certs, err := c.ca.NewClientCert(certs.ClientInfo{ + Name: user.Name, + Groups: user.Groups, + }) + if err != nil { + return nil, fmt.Errorf("unable to create client certificates for %s: %w", user.Name, err) + } + + crt, key, err := certs.AsBytes() + if err != nil { + return nil, fmt.Errorf("unable to serialize client certificates for %s: %w", user.Name, err) + } + + cfg := rest.CopyConfig(baseCfg) + cfg.CertData = crt + cfg.KeyData = key + + return cfg, nil +} + +// caCrtPath returns the path to the on-disk client-cert CA crt file. +func (c *CertAuthn) caCrtPath() string { + return filepath.Join(c.certDir, "client-cert-auth-ca.crt") +} + +// Configure provides the working directory to this authenticator, +// and configures the given API server arguments to make use of this authenticator. +func (c *CertAuthn) Configure(workDir string, args *process.Arguments) error { + c.certDir = workDir + args.Set("client-ca-file", c.caCrtPath()) + return nil +} + +// Start runs this authenticator. Will be called just before API server start. +// +// Must be called after Configure. +func (c *CertAuthn) Start() error { + if len(c.certDir) == 0 { + return fmt.Errorf("start called before configure") + } + caCrt := c.ca.CA.CertBytes() + if err := os.WriteFile(c.caCrtPath(), caCrt, 0640); err != nil { //nolint:gosec + return fmt.Errorf("unable to save the client certificate CA to %s: %w", c.caCrtPath(), err) + } + + return nil +} + +// Stop shuts down this authenticator. +func (c *CertAuthn) Stop() error { + // no-op -- our workdir is cleaned up for us automatically + return nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/etcd.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/etcd.go new file mode 100644 index 0000000..c30d213 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/etcd.go @@ -0,0 +1,202 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane + +import ( + "io" + "net" + "net/url" + "strconv" + "time" + + "sigs.k8s.io/controller-runtime/pkg/internal/testing/addr" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/process" +) + +// Etcd knows how to run an etcd server. +type Etcd struct { + // URL is the address the Etcd should listen on for client connections. + // + // If this is not specified, we default to a random free port on localhost. + URL *url.URL + + // Path is the path to the etcd binary. + // + // If this is left as the empty string, we will attempt to locate a binary, + // by checking for the TEST_ASSET_ETCD environment variable, and the default + // test assets directory. See the "Binaries" section above (in doc.go) for + // details. + Path string + + // Args is a list of arguments which will passed to the Etcd binary. Before + // they are passed on, the`y will be evaluated as go-template strings. This + // means you can use fields which are defined and exported on this Etcd + // struct (e.g. "--data-dir={{ .Dir }}"). + // Those templates will be evaluated after the defaulting of the Etcd's + // fields has already happened and just before the binary actually gets + // started. Thus you have access to calculated fields like `URL` and others. + // + // If not specified, the minimal set of arguments to run the Etcd will be + // used. + // + // They will be loaded into the same argument set as Configure. Each flag + // will be Append-ed to the configured arguments just before launch. + // + // Deprecated: use Configure instead. + Args []string + + // DataDir is a path to a directory in which etcd can store its state. + // + // If left unspecified, then the Start() method will create a fresh temporary + // directory, and the Stop() method will clean it up. + DataDir string + + // StartTimeout, StopTimeout specify the time the Etcd is allowed to + // take when starting and stopping before an error is emitted. + // + // If not specified, these default to 20 seconds. + StartTimeout time.Duration + StopTimeout time.Duration + + // Out, Err specify where Etcd should write its StdOut, StdErr to. + // + // If not specified, the output will be discarded. + Out io.Writer + Err io.Writer + + // processState contains the actual details about this running process + processState *process.State + + // args contains the structured arguments to use for running etcd. + // Lazily initialized by .Configure(), Defaulted eventually with .defaultArgs() + args *process.Arguments + + // listenPeerURL is the address the Etcd should listen on for peer connections. + // It's automatically generated and a random port is picked during execution. + listenPeerURL *url.URL +} + +// Start starts the etcd, waits for it to come up, and returns an error, if one +// occurred. +func (e *Etcd) Start() error { + if err := e.setProcessState(); err != nil { + return err + } + return e.processState.Start(e.Out, e.Err) +} + +func (e *Etcd) setProcessState() error { + e.processState = &process.State{ + Dir: e.DataDir, + Path: e.Path, + StartTimeout: e.StartTimeout, + StopTimeout: e.StopTimeout, + } + + // unconditionally re-set this so we can successfully restart + // TODO(directxman12): we supported this in the past, but do we actually + // want to support re-using an API server object to restart? The loss + // of provisioned users is surprising to say the least. + if err := e.processState.Init("etcd"); err != nil { + return err + } + + // Set the listen url. + if e.URL == nil { + port, host, err := addr.Suggest("") + if err != nil { + return err + } + e.URL = &url.URL{ + Scheme: "http", + Host: net.JoinHostPort(host, strconv.Itoa(port)), + } + } + + // Set the listen peer URL. + { + port, host, err := addr.Suggest("") + if err != nil { + return err + } + e.listenPeerURL = &url.URL{ + Scheme: "http", + Host: net.JoinHostPort(host, strconv.Itoa(port)), + } + } + + // can use /health as of etcd 3.3.0 + e.processState.HealthCheck.URL = *e.URL + e.processState.HealthCheck.Path = "/health" + + e.DataDir = e.processState.Dir + e.Path = e.processState.Path + e.StartTimeout = e.processState.StartTimeout + e.StopTimeout = e.processState.StopTimeout + + var err error + e.processState.Args, e.Args, err = process.TemplateAndArguments(e.Args, e.Configure(), process.TemplateDefaults{ //nolint:staticcheck + Data: e, + Defaults: e.defaultArgs(), + }) + return err +} + +// Stop stops this process gracefully, waits for its termination, and cleans up +// the DataDir if necessary. +func (e *Etcd) Stop() error { + if e.processState.DirNeedsCleaning { + e.DataDir = "" // reset the directory if it was randomly allocated, so that we can safely restart + } + return e.processState.Stop() +} + +func (e *Etcd) defaultArgs() map[string][]string { + args := map[string][]string{ + "listen-peer-urls": {e.listenPeerURL.String()}, + "data-dir": {e.DataDir}, + } + if e.URL != nil { + args["advertise-client-urls"] = []string{e.URL.String()} + args["listen-client-urls"] = []string{e.URL.String()} + } + + // Add unsafe no fsync, available from etcd 3.5 + if ok, _ := e.processState.CheckFlag("unsafe-no-fsync"); ok { + args["unsafe-no-fsync"] = []string{"true"} + } + return args +} + +// Configure returns Arguments that may be used to customize the +// flags used to launch etcd. A set of defaults will +// be applied underneath. +func (e *Etcd) Configure() *process.Arguments { + if e.args == nil { + e.args = process.EmptyArguments() + } + return e.args +} + +// EtcdDefaultArgs exposes the default args for Etcd so that you +// can use those to append your own additional arguments. +var EtcdDefaultArgs = []string{ + "--listen-peer-urls=http://localhost:0", + "--advertise-client-urls={{ if .URL }}{{ .URL.String }}{{ end }}", + "--listen-client-urls={{ if .URL }}{{ .URL.String }}{{ end }}", + "--data-dir={{ .DataDir }}", +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/kubectl.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/kubectl.go new file mode 100644 index 0000000..a27b7a0 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/kubectl.go @@ -0,0 +1,119 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane + +import ( + "bytes" + "fmt" + "io" + "net/url" + "os/exec" + + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + kcapi "k8s.io/client-go/tools/clientcmd/api" + + "sigs.k8s.io/controller-runtime/pkg/internal/testing/process" +) + +const ( + envtestName = "envtest" +) + +// KubeConfigFromREST reverse-engineers a kubeconfig file from a rest.Config. +// The options are tailored towards the rest.Configs we generate, so they're +// not broadly applicable. +// +// This is not intended to be exposed beyond internal for the above reasons. +func KubeConfigFromREST(cfg *rest.Config) ([]byte, error) { + kubeConfig := kcapi.NewConfig() + protocol := "https" + if !rest.IsConfigTransportTLS(*cfg) { + protocol = "http" + } + + // cfg.Host is a URL, so we need to parse it so we can properly append the API path + baseURL, err := url.Parse(cfg.Host) + if err != nil { + return nil, fmt.Errorf("unable to interpret config's host value as a URL: %w", err) + } + + kubeConfig.Clusters[envtestName] = &kcapi.Cluster{ + // TODO(directxman12): if client-go ever decides to expose defaultServerUrlFor(config), + // we can just use that. Note that this is not the same as the public DefaultServerURL, + // which requires us to pass a bunch of stuff in manually. + Server: (&url.URL{Scheme: protocol, Host: baseURL.Host, Path: cfg.APIPath}).String(), + CertificateAuthorityData: cfg.CAData, + } + kubeConfig.AuthInfos[envtestName] = &kcapi.AuthInfo{ + // try to cover all auth strategies that aren't plugins + ClientCertificateData: cfg.CertData, + ClientKeyData: cfg.KeyData, + Token: cfg.BearerToken, + Username: cfg.Username, + Password: cfg.Password, + } + kcCtx := kcapi.NewContext() + kcCtx.Cluster = envtestName + kcCtx.AuthInfo = envtestName + kubeConfig.Contexts[envtestName] = kcCtx + kubeConfig.CurrentContext = envtestName + + contents, err := clientcmd.Write(*kubeConfig) + if err != nil { + return nil, fmt.Errorf("unable to serialize kubeconfig file: %w", err) + } + return contents, nil +} + +// KubeCtl is a wrapper around the kubectl binary. +type KubeCtl struct { + // Path where the kubectl binary can be found. + // + // If this is left empty, we will attempt to locate a binary, by checking for + // the TEST_ASSET_KUBECTL environment variable, and the default test assets + // directory. See the "Binaries" section above (in doc.go) for details. + Path string + + // Opts can be used to configure additional flags which will be used each + // time the wrapped binary is called. + // + // For example, you might want to use this to set the URL of the APIServer to + // connect to. + Opts []string +} + +// Run executes the wrapped binary with some preconfigured options and the +// arguments given to this method. It returns Readers for the stdout and +// stderr. +func (k *KubeCtl) Run(args ...string) (stdout, stderr io.Reader, err error) { + if k.Path == "" { + k.Path = process.BinPathFinder("kubectl", "") + } + + stdoutBuffer := &bytes.Buffer{} + stderrBuffer := &bytes.Buffer{} + allArgs := append(k.Opts, args...) + + cmd := exec.Command(k.Path, allArgs...) + cmd.Stdout = stdoutBuffer + cmd.Stderr = stderrBuffer + + err = cmd.Run() + + return stdoutBuffer, stderrBuffer, err +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/plane.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/plane.go new file mode 100644 index 0000000..456183a --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/plane.go @@ -0,0 +1,259 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane + +import ( + "fmt" + "net/url" + "os" + + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/certs" +) + +// NewTinyCA creates a new a tiny CA utility for provisioning serving certs and client certs FOR TESTING ONLY. +// Don't use this for anything else! +var NewTinyCA = certs.NewTinyCA + +// ControlPlane is a struct that knows how to start your test control plane. +// +// Right now, that means Etcd and your APIServer. This is likely to increase in +// future. +type ControlPlane struct { + APIServer *APIServer + Etcd *Etcd + + // Kubectl will override the default asset search path for kubectl + KubectlPath string + + // for the deprecated methods (Kubectl, etc) + defaultUserCfg *rest.Config + defaultUserKubectl *KubeCtl +} + +// Start will start your control plane processes. To stop them, call Stop(). +func (f *ControlPlane) Start() (retErr error) { + if f.Etcd == nil { + f.Etcd = &Etcd{} + } + if err := f.Etcd.Start(); err != nil { + return err + } + defer func() { + if retErr != nil { + _ = f.Etcd.Stop() + } + }() + + if f.APIServer == nil { + f.APIServer = &APIServer{} + } + f.APIServer.EtcdURL = f.Etcd.URL + if err := f.APIServer.Start(); err != nil { + return err + } + defer func() { + if retErr != nil { + _ = f.APIServer.Stop() + } + }() + + // provision the default user -- can be removed when the related + // methods are removed. The default user has admin permissions to + // mimic legacy no-authz setups. + user, err := f.AddUser(User{Name: "default", Groups: []string{"system:masters"}}, &rest.Config{}) + if err != nil { + return fmt.Errorf("unable to provision the default (legacy) user: %w", err) + } + kubectl, err := user.Kubectl() + if err != nil { + return fmt.Errorf("unable to provision the default (legacy) kubeconfig: %w", err) + } + f.defaultUserCfg = user.Config() + f.defaultUserKubectl = kubectl + return nil +} + +// Stop will stop your control plane processes, and clean up their data. +func (f *ControlPlane) Stop() error { + var errList []error + + if f.APIServer != nil { + if err := f.APIServer.Stop(); err != nil { + errList = append(errList, err) + } + } + + if f.Etcd != nil { + if err := f.Etcd.Stop(); err != nil { + errList = append(errList, err) + } + } + + return kerrors.NewAggregate(errList) +} + +// APIURL returns the URL you should connect to to talk to your API server. +// +// If insecure serving is configured, this will contain the insecure port. +// Otherwise, it will contain the secure port. +// +// Deprecated: use AddUser instead, or APIServer.{Ins|S}ecureServing.URL if +// you really want just the URL. +func (f *ControlPlane) APIURL() *url.URL { + return f.APIServer.URL +} + +// KubeCtl returns a pre-configured KubeCtl, ready to connect to this +// ControlPlane. +// +// Deprecated: use AddUser & AuthenticatedUser.Kubectl instead. +func (f *ControlPlane) KubeCtl() *KubeCtl { + return f.defaultUserKubectl +} + +// RESTClientConfig returns a pre-configured restconfig, ready to connect to +// this ControlPlane. +// +// Deprecated: use AddUser & AuthenticatedUser.Config instead. +func (f *ControlPlane) RESTClientConfig() (*rest.Config, error) { + return f.defaultUserCfg, nil +} + +// AuthenticatedUser contains access information for an provisioned user, +// including REST config, kubeconfig contents, and access to a KubeCtl instance. +// +// It's not "safe" to use the methods on this till after the API server has been +// started (due to certificate initialization and such). The various methods will +// panic if this is done. +type AuthenticatedUser struct { + // cfg is the rest.Config for connecting to the API server. It's lazily initialized. + cfg *rest.Config + // cfgIsComplete indicates the cfg has had late-initialized fields (e.g. + // API server CA data) initialized. + cfgIsComplete bool + + // apiServer is a handle to the APIServer that's used when finalizing cfg + // and producing the kubectl instance. + plane *ControlPlane + + // kubectl is our existing, provisioned kubectl. We don't provision one + // till someone actually asks for it. + kubectl *KubeCtl +} + +// Config returns the REST config that can be used to connect to the API server +// as this user. +// +// Will panic if used before the API server is started. +func (u *AuthenticatedUser) Config() *rest.Config { + // NB(directxman12): we choose to panic here for ergonomics sake, and because there's + // not really much you can do to "handle" this error. This machinery is intended to be + // used in tests anyway, so panicing is not a particularly big deal. + if u.cfgIsComplete { + return u.cfg + } + if len(u.plane.APIServer.SecureServing.CA) == 0 { + panic("the API server has not yet been started, please do that before accessing connection details") + } + + u.cfg.CAData = u.plane.APIServer.SecureServing.CA + u.cfg.Host = u.plane.APIServer.SecureServing.URL("https", "/").String() + u.cfgIsComplete = true + return u.cfg +} + +// KubeConfig returns a KubeConfig that's roughly equivalent to this user's REST config. +// +// Will panic if used before the API server is started. +func (u AuthenticatedUser) KubeConfig() ([]byte, error) { + // NB(directxman12): we don't return the actual API object to avoid yet another + // piece of kubernetes API in our public API, and also because generally the thing + // you want to do with this is just write it out to a file for external debugging + // purposes, etc. + return KubeConfigFromREST(u.Config()) +} + +// Kubectl returns a KubeCtl instance for talking to the API server as this user. It uses +// a kubeconfig equivalent to that returned by .KubeConfig. +// +// Will panic if used before the API server is started. +func (u *AuthenticatedUser) Kubectl() (*KubeCtl, error) { + if u.kubectl != nil { + return u.kubectl, nil + } + if len(u.plane.APIServer.CertDir) == 0 { + panic("the API server has not yet been started, please do that before accessing connection details") + } + + // cleaning this up is handled when our tmpDir is deleted + out, err := os.CreateTemp(u.plane.APIServer.CertDir, "*.kubecfg") + if err != nil { + return nil, fmt.Errorf("unable to create file for kubeconfig: %w", err) + } + defer out.Close() + contents, err := KubeConfigFromREST(u.Config()) + if err != nil { + return nil, err + } + if _, err := out.Write(contents); err != nil { + return nil, fmt.Errorf("unable to write kubeconfig to disk at %s: %w", out.Name(), err) + } + k := &KubeCtl{ + Path: u.plane.KubectlPath, + } + k.Opts = append(k.Opts, fmt.Sprintf("--kubeconfig=%s", out.Name())) + u.kubectl = k + return k, nil +} + +// AddUser provisions a new user in the cluster. It uses the APIServer's authentication +// strategy -- see APIServer.SecureServing.Authn. +// +// Unlike AddUser, it's safe to pass a nil rest.Config here if you have no +// particular opinions about the config. +// +// The default authentication strategy is not guaranteed to any specific strategy, but it is +// guaranteed to be callable both before and after Start has been called (but, as noted in the +// AuthenticatedUser docs, the given user objects are only valid after Start has been called). +func (f *ControlPlane) AddUser(user User, baseConfig *rest.Config) (*AuthenticatedUser, error) { + if f.GetAPIServer().SecureServing.Authn == nil { + return nil, fmt.Errorf("no API server authentication is configured yet. The API server defaults one when Start is called, did you mean to use that?") + } + + if baseConfig == nil { + baseConfig = &rest.Config{} + } + cfg, err := f.GetAPIServer().SecureServing.AddUser(user, baseConfig) + if err != nil { + return nil, err + } + + return &AuthenticatedUser{ + cfg: cfg, + plane: f, + }, nil +} + +// GetAPIServer returns this ControlPlane's APIServer, initializing it if necessary. +func (f *ControlPlane) GetAPIServer() *APIServer { + if f.APIServer == nil { + f.APIServer = &APIServer{} + } + return f.APIServer +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/arguments.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/arguments.go new file mode 100644 index 0000000..391eec1 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/arguments.go @@ -0,0 +1,340 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package process + +import ( + "bytes" + "html/template" + "sort" + "strings" +) + +// RenderTemplates returns an []string to render the templates +// +// Deprecated: will be removed in favor of Arguments. +func RenderTemplates(argTemplates []string, data interface{}) (args []string, err error) { + var t *template.Template + + for _, arg := range argTemplates { + t, err = template.New(arg).Parse(arg) + if err != nil { + args = nil + return + } + + buf := &bytes.Buffer{} + err = t.Execute(buf, data) + if err != nil { + args = nil + return + } + args = append(args, buf.String()) + } + + return +} + +// SliceToArguments converts a slice of arguments to structured arguments, +// appending each argument that starts with `--` and contains an `=` to the +// argument set (ignoring defaults), returning the rest. +// +// Deprecated: will be removed when RenderTemplates is removed. +func SliceToArguments(sliceArgs []string, args *Arguments) []string { + var rest []string + for i, arg := range sliceArgs { + if arg == "--" { + rest = append(rest, sliceArgs[i:]...) + return rest + } + // skip non-flag arguments, skip arguments w/o equals because we + // can't tell if the next argument should take a value + if !strings.HasPrefix(arg, "--") || !strings.Contains(arg, "=") { + rest = append(rest, arg) + continue + } + + parts := strings.SplitN(arg[2:], "=", 2) + name := parts[0] + val := parts[1] + + args.AppendNoDefaults(name, val) + } + + return rest +} + +// TemplateDefaults specifies defaults to be used for joining structured arguments with templates. +// +// Deprecated: will be removed when RenderTemplates is removed. +type TemplateDefaults struct { + // Data will be used to render the template. + Data interface{} + // Defaults will be used to default structured arguments if no template is passed. + Defaults map[string][]string + // MinimalDefaults will be used to default structured arguments if a template is passed. + // Use this for flags which *must* be present. + MinimalDefaults map[string][]string // for api server service-cluster-ip-range +} + +// TemplateAndArguments joins structured arguments and non-structured arguments, preserving existing +// behavior. Namely: +// +// 1. if templ has len > 0, it will be rendered against data +// 2. the rendered template values that look like `--foo=bar` will be split +// and appended to args, the rest will be kept around +// 3. the given args will be rendered as string form. If a template is given, +// no defaults will be used, otherwise defaults will be used +// 4. a result of [args..., rest...] will be returned +// +// It returns the resulting rendered arguments, plus the arguments that were +// not transferred to `args` during rendering. +// +// Deprecated: will be removed when RenderTemplates is removed. +func TemplateAndArguments(templ []string, args *Arguments, data TemplateDefaults) (allArgs []string, nonFlagishArgs []string, err error) { + if len(templ) == 0 { // 3 & 4 (no template case) + return args.AsStrings(data.Defaults), nil, nil + } + + // 1: render the template + rendered, err := RenderTemplates(templ, data.Data) + if err != nil { + return nil, nil, err + } + + // 2: filter out structured args and add them to args + rest := SliceToArguments(rendered, args) + + // 3 (template case): render structured args, no defaults (matching the + // legacy case where if Args was specified, no defaults were used) + res := args.AsStrings(data.MinimalDefaults) + + // 4: return the rendered structured args + all non-structured args + return append(res, rest...), rest, nil +} + +// EmptyArguments constructs an empty set of flags with no defaults. +func EmptyArguments() *Arguments { + return &Arguments{ + values: make(map[string]Arg), + } +} + +// Arguments are structured, overridable arguments. +// Each Arguments object contains some set of default arguments, which may +// be appended to, or overridden. +// +// When ready, you can serialize them to pass to exec.Command and friends using +// AsStrings. +// +// All flag-setting methods return the *same* instance of Arguments so that you +// can chain calls. +type Arguments struct { + // values contains the user-set values for the arguments. + // `values[key] = dontPass` means "don't pass this flag" + // `values[key] = passAsName` means "pass this flag without args like --key` + // `values[key] = []string{a, b, c}` means "--key=a --key=b --key=c` + // any values not explicitly set here will be copied from defaults on final rendering. + values map[string]Arg +} + +// Arg is an argument that has one or more values, +// and optionally falls back to default values. +type Arg interface { + // Append adds new values to this argument, returning + // a new instance contain the new value. The intermediate + // argument should generally be assumed to be consumed. + Append(vals ...string) Arg + // Get returns the full set of values, optionally including + // the passed in defaults. If it returns nil, this will be + // skipped. If it returns a non-nil empty slice, it'll be + // assumed that the argument should be passed as name-only. + Get(defaults []string) []string +} + +type userArg []string + +func (a userArg) Append(vals ...string) Arg { + return userArg(append(a, vals...)) //nolint:unconvert +} +func (a userArg) Get(_ []string) []string { + return []string(a) +} + +type defaultedArg []string + +func (a defaultedArg) Append(vals ...string) Arg { + return defaultedArg(append(a, vals...)) //nolint:unconvert +} +func (a defaultedArg) Get(defaults []string) []string { + res := append([]string(nil), defaults...) + return append(res, a...) +} + +type dontPassArg struct{} + +func (a dontPassArg) Append(vals ...string) Arg { + return userArg(vals) +} +func (dontPassArg) Get(_ []string) []string { + return nil +} + +type passAsNameArg struct{} + +func (a passAsNameArg) Append(_ ...string) Arg { + return passAsNameArg{} +} +func (passAsNameArg) Get(_ []string) []string { + return []string{} +} + +var ( + // DontPass indicates that the given argument will not actually be + // rendered. + DontPass Arg = dontPassArg{} + // PassAsName indicates that the given flag will be passed as `--key` + // without any value. + PassAsName Arg = passAsNameArg{} +) + +// AsStrings serializes this set of arguments to a slice of strings appropriate +// for passing to exec.Command and friends, making use of the given defaults +// as indicated for each particular argument. +// +// - Any flag in defaults that's not in Arguments will be present in the output +// - Any flag that's present in Arguments will be passed the corresponding +// defaults to do with as it will (ignore, append-to, suppress, etc). +func (a *Arguments) AsStrings(defaults map[string][]string) []string { + // sort for deterministic ordering + keysInOrder := make([]string, 0, len(defaults)+len(a.values)) + for key := range defaults { + if _, userSet := a.values[key]; userSet { + continue + } + keysInOrder = append(keysInOrder, key) + } + for key := range a.values { + keysInOrder = append(keysInOrder, key) + } + sort.Strings(keysInOrder) + + var res []string + for _, key := range keysInOrder { + vals := a.Get(key).Get(defaults[key]) + switch { + case vals == nil: // don't pass + continue + case len(vals) == 0: // pass as name + res = append(res, "--"+key) + default: + for _, val := range vals { + res = append(res, "--"+key+"="+val) + } + } + } + + return res +} + +// Get returns the value of the given flag. If nil, +// it will not be passed in AsString, otherwise: +// +// len == 0 --> `--key`, len > 0 --> `--key=val1 --key=val2 ...`. +func (a *Arguments) Get(key string) Arg { + if vals, ok := a.values[key]; ok { + return vals + } + return defaultedArg(nil) +} + +// Enable configures the given key to be passed as a "name-only" flag, +// like, `--key`. +func (a *Arguments) Enable(key string) *Arguments { + a.values[key] = PassAsName + return a +} + +// Disable prevents this flag from be passed. +func (a *Arguments) Disable(key string) *Arguments { + a.values[key] = DontPass + return a +} + +// Append adds additional values to this flag. If this flag has +// yet to be set, initial values will include defaults. If you want +// to intentionally ignore defaults/start from scratch, call AppendNoDefaults. +// +// Multiple values will look like `--key=value1 --key=value2 ...`. +func (a *Arguments) Append(key string, values ...string) *Arguments { + vals, present := a.values[key] + if !present { + vals = defaultedArg{} + } + a.values[key] = vals.Append(values...) + return a +} + +// AppendNoDefaults adds additional values to this flag. However, +// unlike Append, it will *not* copy values from defaults. +func (a *Arguments) AppendNoDefaults(key string, values ...string) *Arguments { + vals, present := a.values[key] + if !present { + vals = userArg{} + } + a.values[key] = vals.Append(values...) + return a +} + +// Set resets the given flag to the specified values, ignoring any existing +// values or defaults. +func (a *Arguments) Set(key string, values ...string) *Arguments { + a.values[key] = userArg(values) + return a +} + +// SetRaw sets the given flag to the given Arg value directly. Use this if +// you need to do some complicated deferred logic or something. +// +// Otherwise behaves like Set. +func (a *Arguments) SetRaw(key string, val Arg) *Arguments { + a.values[key] = val + return a +} + +// FuncArg is a basic implementation of Arg that can be used for custom argument logic, +// like pulling values out of APIServer, or dynamically calculating values just before +// launch. +// +// The given function will be mapped directly to Arg#Get, and will generally be +// used in conjunction with SetRaw. For example, to set `--some-flag` to the +// API server's CertDir, you could do: +// +// server.Configure().SetRaw("--some-flag", FuncArg(func(defaults []string) []string { +// return []string{server.CertDir} +// })) +// +// FuncArg ignores Appends; if you need to support appending values too, consider implementing +// Arg directly. +type FuncArg func([]string) []string + +// Append is a no-op for FuncArg, and just returns itself. +func (a FuncArg) Append(vals ...string) Arg { return a } + +// Get delegates functionality to the FuncArg function itself. +func (a FuncArg) Get(defaults []string) []string { + return a(defaults) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/bin_path_finder.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/bin_path_finder.go new file mode 100644 index 0000000..e1428aa --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/bin_path_finder.go @@ -0,0 +1,70 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package process + +import ( + "os" + "path/filepath" + "regexp" + "strings" +) + +const ( + // EnvAssetsPath is the environment variable that stores the global test + // binary location override. + EnvAssetsPath = "KUBEBUILDER_ASSETS" + // EnvAssetOverridePrefix is the environment variable prefix for per-binary + // location overrides. + EnvAssetOverridePrefix = "TEST_ASSET_" + // AssetsDefaultPath is the default location to look for test binaries in, + // if no override was provided. + AssetsDefaultPath = "/usr/local/kubebuilder/bin" +) + +// BinPathFinder finds the path to the given named binary, using the following locations +// in order of precedence (highest first). Notice that the various env vars only need +// to be set -- the asset is not checked for existence on the filesystem. +// +// 1. TEST_ASSET_{tr/a-z-/A-Z_/} (if set; asset overrides -- EnvAssetOverridePrefix) +// 1. KUBEBUILDER_ASSETS (if set; global asset path -- EnvAssetsPath) +// 3. assetDirectory (if set; per-config asset directory) +// 4. /usr/local/kubebuilder/bin (AssetsDefaultPath). +func BinPathFinder(symbolicName, assetDirectory string) (binPath string) { + punctuationPattern := regexp.MustCompile("[^A-Z0-9]+") + sanitizedName := punctuationPattern.ReplaceAllString(strings.ToUpper(symbolicName), "_") + leadingNumberPattern := regexp.MustCompile("^[0-9]+") + sanitizedName = leadingNumberPattern.ReplaceAllString(sanitizedName, "") + envVar := EnvAssetOverridePrefix + sanitizedName + + // TEST_ASSET_XYZ + if val, ok := os.LookupEnv(envVar); ok { + return val + } + + // KUBEBUILDER_ASSETS + if val, ok := os.LookupEnv(EnvAssetsPath); ok { + return filepath.Join(val, symbolicName) + } + + // assetDirectory + if assetDirectory != "" { + return filepath.Join(assetDirectory, symbolicName) + } + + // default path + return filepath.Join(AssetsDefaultPath, symbolicName) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/process.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/process.go new file mode 100644 index 0000000..af83c70 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/process.go @@ -0,0 +1,272 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package process + +import ( + "crypto/tls" + "fmt" + "io" + "net" + "net/http" + "net/url" + "os" + "os/exec" + "path" + "regexp" + "sync" + "syscall" + "time" +) + +// ListenAddr represents some listening address and port. +type ListenAddr struct { + Address string + Port string +} + +// URL returns a URL for this address with the given scheme and subpath. +func (l *ListenAddr) URL(scheme string, path string) *url.URL { + return &url.URL{ + Scheme: scheme, + Host: l.HostPort(), + Path: path, + } +} + +// HostPort returns the joined host-port pair for this address. +func (l *ListenAddr) HostPort() string { + return net.JoinHostPort(l.Address, l.Port) +} + +// HealthCheck describes the information needed to health-check a process via +// some health-check URL. +type HealthCheck struct { + url.URL + + // HealthCheckPollInterval is the interval which will be used for polling the + // endpoint described by Host, Port, and Path. + // + // If left empty it will default to 100 Milliseconds. + PollInterval time.Duration +} + +// State define the state of the process. +type State struct { + Cmd *exec.Cmd + + // HealthCheck describes how to check if this process is up. If we get an http.StatusOK, + // we assume the process is ready to operate. + // + // For example, the /healthz endpoint of the k8s API server, or the /health endpoint of etcd. + HealthCheck HealthCheck + + Args []string + + StopTimeout time.Duration + StartTimeout time.Duration + + Dir string + DirNeedsCleaning bool + Path string + + // ready holds whether the process is currently in ready state (hit the ready condition) or not. + // It will be set to true on a successful `Start()` and set to false on a successful `Stop()` + ready bool + + // waitDone is closed when our call to wait finishes up, and indicates that + // our process has terminated. + waitDone chan struct{} + errMu sync.Mutex + exitErr error + exited bool +} + +// Init sets up this process, configuring binary paths if missing, initializing +// temporary directories, etc. +// +// This defaults all defaultable fields. +func (ps *State) Init(name string) error { + if ps.Path == "" { + if name == "" { + return fmt.Errorf("must have at least one of name or path") + } + ps.Path = BinPathFinder(name, "") + } + + if ps.Dir == "" { + newDir, err := os.MkdirTemp("", "k8s_test_framework_") + if err != nil { + return err + } + ps.Dir = newDir + ps.DirNeedsCleaning = true + } + + if ps.StartTimeout == 0 { + ps.StartTimeout = 20 * time.Second + } + + if ps.StopTimeout == 0 { + ps.StopTimeout = 20 * time.Second + } + return nil +} + +type stopChannel chan struct{} + +// CheckFlag checks the help output of this command for the presence of the given flag, specified +// without the leading `--` (e.g. `CheckFlag("insecure-port")` checks for `--insecure-port`), +// returning true if the flag is present. +func (ps *State) CheckFlag(flag string) (bool, error) { + cmd := exec.Command(ps.Path, "--help") + outContents, err := cmd.CombinedOutput() + if err != nil { + return false, fmt.Errorf("unable to run command %q to check for flag %q: %w", ps.Path, flag, err) + } + pat := `(?m)^\s*--` + flag + `\b` // (m --> multi-line --> ^ matches start of line) + matched, err := regexp.Match(pat, outContents) + if err != nil { + return false, fmt.Errorf("unable to check command %q for flag %q in help output: %w", ps.Path, flag, err) + } + return matched, nil +} + +// Start starts the apiserver, waits for it to come up, and returns an error, +// if occurred. +func (ps *State) Start(stdout, stderr io.Writer) (err error) { + if ps.ready { + return nil + } + + ps.Cmd = exec.Command(ps.Path, ps.Args...) + ps.Cmd.Stdout = stdout + ps.Cmd.Stderr = stderr + + ready := make(chan bool) + timedOut := time.After(ps.StartTimeout) + pollerStopCh := make(stopChannel) + go pollURLUntilOK(ps.HealthCheck.URL, ps.HealthCheck.PollInterval, ready, pollerStopCh) + + ps.waitDone = make(chan struct{}) + + if err := ps.Cmd.Start(); err != nil { + ps.errMu.Lock() + defer ps.errMu.Unlock() + ps.exited = true + return err + } + go func() { + defer close(ps.waitDone) + err := ps.Cmd.Wait() + + ps.errMu.Lock() + defer ps.errMu.Unlock() + ps.exitErr = err + ps.exited = true + }() + + select { + case <-ready: + ps.ready = true + return nil + case <-ps.waitDone: + close(pollerStopCh) + return fmt.Errorf("timeout waiting for process %s to start successfully "+ + "(it may have failed to start, or stopped unexpectedly before becoming ready)", + path.Base(ps.Path)) + case <-timedOut: + close(pollerStopCh) + if ps.Cmd != nil { + // intentionally ignore this -- we might've crashed, failed to start, etc + ps.Cmd.Process.Signal(syscall.SIGTERM) //nolint:errcheck + } + return fmt.Errorf("timeout waiting for process %s to start", path.Base(ps.Path)) + } +} + +// Exited returns true if the process exited, and may also +// return an error (as per Cmd.Wait) if the process did not +// exit with error code 0. +func (ps *State) Exited() (bool, error) { + ps.errMu.Lock() + defer ps.errMu.Unlock() + return ps.exited, ps.exitErr +} + +func pollURLUntilOK(url url.URL, interval time.Duration, ready chan bool, stopCh stopChannel) { + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + // there's probably certs *somewhere*, + // but it's fine to just skip validating + // them for health checks during testing + InsecureSkipVerify: true, //nolint:gosec + }, + }, + } + if interval <= 0 { + interval = 100 * time.Millisecond + } + for { + res, err := client.Get(url.String()) + if err == nil { + res.Body.Close() + if res.StatusCode == http.StatusOK { + ready <- true + return + } + } + + select { + case <-stopCh: + return + default: + time.Sleep(interval) + } + } +} + +// Stop stops this process gracefully, waits for its termination, and cleans up +// the CertDir if necessary. +func (ps *State) Stop() error { + // Always clear the directory if we need to. + defer func() { + if ps.DirNeedsCleaning { + _ = os.RemoveAll(ps.Dir) + } + }() + if ps.Cmd == nil { + return nil + } + if done, _ := ps.Exited(); done { + return nil + } + if err := ps.Cmd.Process.Signal(syscall.SIGTERM); err != nil { + return fmt.Errorf("unable to signal for process %s to stop: %w", ps.Path, err) + } + + timedOut := time.After(ps.StopTimeout) + + select { + case <-ps.waitDone: + break + case <-timedOut: + return fmt.Errorf("timeout waiting for process %s to stop", path.Base(ps.Path)) + } + ps.ready = false + return nil +}